instruction
stringclasses 1
value | input
stringlengths 90
139k
| output
stringlengths 16
138k
| __index_level_0__
int64 165k
175k
|
---|---|---|---|
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: gss_pseudo_random (OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int prf_key,
const gss_buffer_t prf_in,
ssize_t desired_output_len,
gss_buffer_t prf_out)
{
OM_uint32 status;
gss_union_ctx_id_t ctx;
gss_mechanism mech;
if (minor_status == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
if (context_handle == GSS_C_NO_CONTEXT)
return GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT;
if (prf_in == GSS_C_NO_BUFFER)
return GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT;
if (prf_out == GSS_C_NO_BUFFER)
return GSS_S_CALL_INACCESSIBLE_WRITE | GSS_S_NO_CONTEXT;
prf_out->length = 0;
prf_out->value = NULL;
/*
* select the approprate underlying mechanism routine and
* call it.
*/
ctx = (gss_union_ctx_id_t) context_handle;
mech = gssint_get_mechanism (ctx->mech_type);
if (mech != NULL) {
if (mech->gss_pseudo_random != NULL) {
status = mech->gss_pseudo_random(minor_status,
ctx->internal_ctx_id,
prf_key,
prf_in,
desired_output_len,
prf_out);
if (status != GSS_S_COMPLETE)
map_error(minor_status, mech);
} else
status = GSS_S_UNAVAILABLE;
return status;
}
return GSS_S_BAD_MECH;
}
Commit Message: Preserve GSS context on init/accept failure
After gss_init_sec_context() or gss_accept_sec_context() has created a
context, don't delete the mechglue context on failures from subsequent
calls, even if the mechanism deletes the mech-specific context (which
is allowed by RFC 2744 but not preferred). Check for union contexts
with no mechanism context in each GSS function which accepts a
gss_ctx_id_t.
CVE-2017-11462:
RFC 2744 permits a GSS-API implementation to delete an existing
security context on a second or subsequent call to
gss_init_sec_context() or gss_accept_sec_context() if the call results
in an error. This API behavior has been found to be dangerous,
leading to the possibility of memory errors in some callers. For
safety, GSS-API implementations should instead preserve existing
security contexts on error until the caller deletes them.
All versions of MIT krb5 prior to this change may delete acceptor
contexts on error. Versions 1.13.4 through 1.13.7, 1.14.1 through
1.14.5, and 1.15 through 1.15.1 may also delete initiator contexts on
error.
ticket: 8598 (new)
target_version: 1.15-next
target_version: 1.14-next
tags: pullup
CWE ID: CWE-415 | gss_pseudo_random (OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int prf_key,
const gss_buffer_t prf_in,
ssize_t desired_output_len,
gss_buffer_t prf_out)
{
OM_uint32 status;
gss_union_ctx_id_t ctx;
gss_mechanism mech;
if (minor_status == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
if (context_handle == GSS_C_NO_CONTEXT)
return GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT;
if (prf_in == GSS_C_NO_BUFFER)
return GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT;
if (prf_out == GSS_C_NO_BUFFER)
return GSS_S_CALL_INACCESSIBLE_WRITE | GSS_S_NO_CONTEXT;
prf_out->length = 0;
prf_out->value = NULL;
/*
* select the approprate underlying mechanism routine and
* call it.
*/
ctx = (gss_union_ctx_id_t) context_handle;
if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT)
return GSS_S_NO_CONTEXT;
mech = gssint_get_mechanism (ctx->mech_type);
if (mech != NULL) {
if (mech->gss_pseudo_random != NULL) {
status = mech->gss_pseudo_random(minor_status,
ctx->internal_ctx_id,
prf_key,
prf_in,
desired_output_len,
prf_out);
if (status != GSS_S_COMPLETE)
map_error(minor_status, mech);
} else
status = GSS_S_UNAVAILABLE;
return status;
}
return GSS_S_BAD_MECH;
}
| 168,018 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: BluetoothDeviceChooserController::~BluetoothDeviceChooserController() {
if (scanning_start_time_) {
RecordScanningDuration(base::TimeTicks::Now() -
scanning_start_time_.value());
}
if (chooser_) {
DCHECK(!error_callback_.is_null());
error_callback_.Run(blink::mojom::WebBluetoothResult::CHOOSER_CANCELLED);
}
}
Commit Message: bluetooth: Implement getAvailability()
This change implements the getAvailability() method for
navigator.bluetooth as defined in the specification.
Bug: 707640
Change-Id: I9e9b3e7f8ea7f259e975f71cb6d9570e5f04b479
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1651516
Reviewed-by: Chris Harrelson <[email protected]>
Reviewed-by: Giovanni Ortuño Urquidi <[email protected]>
Reviewed-by: Kinuko Yasuda <[email protected]>
Commit-Queue: Ovidio de Jesús Ruiz-Henríquez <[email protected]>
Auto-Submit: Ovidio de Jesús Ruiz-Henríquez <[email protected]>
Cr-Commit-Position: refs/heads/master@{#688987}
CWE ID: CWE-119 | BluetoothDeviceChooserController::~BluetoothDeviceChooserController() {
if (scanning_start_time_) {
RecordScanningDuration(base::TimeTicks::Now() -
scanning_start_time_.value());
}
if (chooser_) {
DCHECK(!error_callback_.is_null());
error_callback_.Run(WebBluetoothResult::CHOOSER_CANCELLED);
}
}
| 172,446 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void FetchContext::DispatchWillSendRequest(unsigned long,
ResourceRequest&,
const ResourceResponse&,
const FetchInitiatorInfo&) {}
Commit Message: DevTools: send proper resource type in Network.RequestWillBeSent
This patch plumbs resoure type into the DispatchWillSendRequest
instrumenation. This allows us to report accurate type in
Network.RequestWillBeSent event, instead of "Other", that we report
today.
BUG=765501
R=dgozman
Change-Id: I0134c08b841e8dd247fdc8ff208bfd51e462709c
Reviewed-on: https://chromium-review.googlesource.com/667504
Reviewed-by: Pavel Feldman <[email protected]>
Reviewed-by: Dmitry Gozman <[email protected]>
Commit-Queue: Andrey Lushnikov <[email protected]>
Cr-Commit-Position: refs/heads/master@{#507936}
CWE ID: CWE-119 | void FetchContext::DispatchWillSendRequest(unsigned long,
ResourceRequest&,
const ResourceResponse&,
Resource::Type,
const FetchInitiatorInfo&) {}
| 172,477 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void RegisterPropertiesHandler(
void* object, const ImePropertyList& prop_list) {
if (!BrowserThread::CurrentlyOn(BrowserThread::UI)) {
LOG(ERROR) << "Not on UI thread";
return;
}
InputMethodLibraryImpl* input_method_library =
static_cast<InputMethodLibraryImpl*>(object);
input_method_library->RegisterProperties(prop_list);
}
Commit Message: Remove use of libcros from InputMethodLibrary.
BUG=chromium-os:16238
TEST==confirm that input methods work as before on the netbook. Also confirm that the chrome builds and works on the desktop as before.
Review URL: http://codereview.chromium.org/7003086
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@89142 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | static void RegisterPropertiesHandler(
// IBusController override.
virtual void OnRegisterImeProperties(
const input_method::ImePropertyList& prop_list) {
if (!BrowserThread::CurrentlyOn(BrowserThread::UI)) {
LOG(ERROR) << "Not on UI thread";
return;
}
RegisterProperties(prop_list);
}
| 170,502 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: PassRefPtr<RTCSessionDescriptionDescriptor> RTCPeerConnectionHandlerDummy::localDescription()
{
return 0;
}
Commit Message: Unreviewed, rolling out r127612, r127660, and r127664.
http://trac.webkit.org/changeset/127612
http://trac.webkit.org/changeset/127660
http://trac.webkit.org/changeset/127664
https://bugs.webkit.org/show_bug.cgi?id=95920
Source/Platform:
* Platform.gypi:
* chromium/public/WebRTCPeerConnectionHandler.h:
(WebKit):
(WebRTCPeerConnectionHandler):
* chromium/public/WebRTCVoidRequest.h: Removed.
Source/WebCore:
* CMakeLists.txt:
* GNUmakefile.list.am:
* Modules/mediastream/RTCErrorCallback.h:
(WebCore):
(RTCErrorCallback):
* Modules/mediastream/RTCErrorCallback.idl:
* Modules/mediastream/RTCPeerConnection.cpp:
(WebCore::RTCPeerConnection::createOffer):
* Modules/mediastream/RTCPeerConnection.h:
(WebCore):
(RTCPeerConnection):
* Modules/mediastream/RTCPeerConnection.idl:
* Modules/mediastream/RTCSessionDescriptionCallback.h:
(WebCore):
(RTCSessionDescriptionCallback):
* Modules/mediastream/RTCSessionDescriptionCallback.idl:
* Modules/mediastream/RTCSessionDescriptionRequestImpl.cpp:
(WebCore::RTCSessionDescriptionRequestImpl::create):
(WebCore::RTCSessionDescriptionRequestImpl::RTCSessionDescriptionRequestImpl):
(WebCore::RTCSessionDescriptionRequestImpl::requestSucceeded):
(WebCore::RTCSessionDescriptionRequestImpl::requestFailed):
(WebCore::RTCSessionDescriptionRequestImpl::clear):
* Modules/mediastream/RTCSessionDescriptionRequestImpl.h:
(RTCSessionDescriptionRequestImpl):
* Modules/mediastream/RTCVoidRequestImpl.cpp: Removed.
* Modules/mediastream/RTCVoidRequestImpl.h: Removed.
* WebCore.gypi:
* platform/chromium/support/WebRTCVoidRequest.cpp: Removed.
* platform/mediastream/RTCPeerConnectionHandler.cpp:
(RTCPeerConnectionHandlerDummy):
(WebCore::RTCPeerConnectionHandlerDummy::RTCPeerConnectionHandlerDummy):
* platform/mediastream/RTCPeerConnectionHandler.h:
(WebCore):
(WebCore::RTCPeerConnectionHandler::~RTCPeerConnectionHandler):
(RTCPeerConnectionHandler):
(WebCore::RTCPeerConnectionHandler::RTCPeerConnectionHandler):
* platform/mediastream/RTCVoidRequest.h: Removed.
* platform/mediastream/chromium/RTCPeerConnectionHandlerChromium.cpp:
* platform/mediastream/chromium/RTCPeerConnectionHandlerChromium.h:
(RTCPeerConnectionHandlerChromium):
Tools:
* DumpRenderTree/chromium/MockWebRTCPeerConnectionHandler.cpp:
(MockWebRTCPeerConnectionHandler::SuccessCallbackTask::SuccessCallbackTask):
(MockWebRTCPeerConnectionHandler::SuccessCallbackTask::runIfValid):
(MockWebRTCPeerConnectionHandler::FailureCallbackTask::FailureCallbackTask):
(MockWebRTCPeerConnectionHandler::FailureCallbackTask::runIfValid):
(MockWebRTCPeerConnectionHandler::createOffer):
* DumpRenderTree/chromium/MockWebRTCPeerConnectionHandler.h:
(MockWebRTCPeerConnectionHandler):
(SuccessCallbackTask):
(FailureCallbackTask):
LayoutTests:
* fast/mediastream/RTCPeerConnection-createOffer.html:
* fast/mediastream/RTCPeerConnection-localDescription-expected.txt: Removed.
* fast/mediastream/RTCPeerConnection-localDescription.html: Removed.
* fast/mediastream/RTCPeerConnection-remoteDescription-expected.txt: Removed.
* fast/mediastream/RTCPeerConnection-remoteDescription.html: Removed.
git-svn-id: svn://svn.chromium.org/blink/trunk@127679 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-20 | PassRefPtr<RTCSessionDescriptionDescriptor> RTCPeerConnectionHandlerDummy::localDescription()
| 170,347 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: usage(int iExitCode)
{
char word[32];
sprintf( word, getJobActionString(mode) );
fprintf( stderr, "Usage: %s [options] [constraints]\n", MyName );
fprintf( stderr, " where [options] is zero or more of:\n" );
fprintf( stderr, " -help Display this message and exit\n" );
fprintf( stderr, " -version Display version information and exit\n" );
fprintf( stderr, " -name schedd_name Connect to the given schedd\n" );
fprintf( stderr, " -pool hostname Use the given central manager to find daemons\n" );
fprintf( stderr, " -addr <ip:port> Connect directly to the given \"sinful string\"\n" );
if( mode == JA_REMOVE_JOBS || mode == JA_REMOVE_X_JOBS ) {
fprintf( stderr, " -reason reason Use the given RemoveReason\n");
} else if( mode == JA_RELEASE_JOBS ) {
fprintf( stderr, " -reason reason Use the given ReleaseReason\n");
} else if( mode == JA_HOLD_JOBS ) {
fprintf( stderr, " -reason reason Use the given HoldReason\n");
fprintf( stderr, " -subcode number Set HoldReasonSubCode\n");
}
if( mode == JA_REMOVE_JOBS || mode == JA_REMOVE_X_JOBS ) {
fprintf( stderr,
" -forcex Force the immediate local removal of jobs in the X state\n"
" (only affects jobs already being removed)\n" );
}
if( mode == JA_VACATE_JOBS || mode == JA_VACATE_FAST_JOBS ) {
fprintf( stderr,
" -fast Use a fast vacate (hardkill)\n" );
}
fprintf( stderr, " and where [constraints] is one of:\n" );
fprintf( stderr, " cluster.proc %s the given job\n", word );
fprintf( stderr, " cluster %s the given cluster of jobs\n", word );
fprintf( stderr, " user %s all jobs owned by user\n", word );
fprintf( stderr, " -constraint expr %s all jobs matching the boolean expression\n", word );
fprintf( stderr, " -all %s all jobs "
"(cannot be used with other constraints)\n", word );
exit( iExitCode );
}
Commit Message:
CWE ID: CWE-134 | usage(int iExitCode)
{
char word[32];
sprintf( word, "%s", getJobActionString(mode) );
fprintf( stderr, "Usage: %s [options] [constraints]\n", MyName );
fprintf( stderr, " where [options] is zero or more of:\n" );
fprintf( stderr, " -help Display this message and exit\n" );
fprintf( stderr, " -version Display version information and exit\n" );
fprintf( stderr, " -name schedd_name Connect to the given schedd\n" );
fprintf( stderr, " -pool hostname Use the given central manager to find daemons\n" );
fprintf( stderr, " -addr <ip:port> Connect directly to the given \"sinful string\"\n" );
if( mode == JA_REMOVE_JOBS || mode == JA_REMOVE_X_JOBS ) {
fprintf( stderr, " -reason reason Use the given RemoveReason\n");
} else if( mode == JA_RELEASE_JOBS ) {
fprintf( stderr, " -reason reason Use the given ReleaseReason\n");
} else if( mode == JA_HOLD_JOBS ) {
fprintf( stderr, " -reason reason Use the given HoldReason\n");
fprintf( stderr, " -subcode number Set HoldReasonSubCode\n");
}
if( mode == JA_REMOVE_JOBS || mode == JA_REMOVE_X_JOBS ) {
fprintf( stderr,
" -forcex Force the immediate local removal of jobs in the X state\n"
" (only affects jobs already being removed)\n" );
}
if( mode == JA_VACATE_JOBS || mode == JA_VACATE_FAST_JOBS ) {
fprintf( stderr,
" -fast Use a fast vacate (hardkill)\n" );
}
fprintf( stderr, " and where [constraints] is one of:\n" );
fprintf( stderr, " cluster.proc %s the given job\n", word );
fprintf( stderr, " cluster %s the given cluster of jobs\n", word );
fprintf( stderr, " user %s all jobs owned by user\n", word );
fprintf( stderr, " -constraint expr %s all jobs matching the boolean expression\n", word );
fprintf( stderr, " -all %s all jobs "
"(cannot be used with other constraints)\n", word );
exit( iExitCode );
}
| 165,375 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void CuePoint::Load(IMkvReader* pReader)
{
if (m_timecode >= 0) //already loaded
return;
assert(m_track_positions == NULL);
assert(m_track_positions_count == 0);
long long pos_ = -m_timecode;
const long long element_start = pos_;
long long stop;
{
long len;
const long long id = ReadUInt(pReader, pos_, len);
assert(id == 0x3B); //CuePoint ID
if (id != 0x3B)
return;
pos_ += len; //consume ID
const long long size = ReadUInt(pReader, pos_, len);
assert(size >= 0);
pos_ += len; //consume Size field
stop = pos_ + size;
}
const long long element_size = stop - element_start;
long long pos = pos_;
while (pos < stop)
{
long len;
const long long id = ReadUInt(pReader, pos, len);
assert(id >= 0); //TODO
assert((pos + len) <= stop);
pos += len; //consume ID
const long long size = ReadUInt(pReader, pos, len);
assert(size >= 0);
assert((pos + len) <= stop);
pos += len; //consume Size field
assert((pos + size) <= stop);
if (id == 0x33) //CueTime ID
m_timecode = UnserializeUInt(pReader, pos, size);
else if (id == 0x37) //CueTrackPosition(s) ID
++m_track_positions_count;
pos += size; //consume payload
assert(pos <= stop);
}
assert(m_timecode >= 0);
assert(m_track_positions_count > 0);
m_track_positions = new TrackPosition[m_track_positions_count];
TrackPosition* p = m_track_positions;
pos = pos_;
while (pos < stop)
{
long len;
const long long id = ReadUInt(pReader, pos, len);
assert(id >= 0); //TODO
assert((pos + len) <= stop);
pos += len; //consume ID
const long long size = ReadUInt(pReader, pos, len);
assert(size >= 0);
assert((pos + len) <= stop);
pos += len; //consume Size field
assert((pos + size) <= stop);
if (id == 0x37) //CueTrackPosition(s) ID
{
TrackPosition& tp = *p++;
tp.Parse(pReader, pos, size);
}
pos += size; //consume payload
assert(pos <= stop);
}
assert(size_t(p - m_track_positions) == m_track_positions_count);
m_element_start = element_start;
m_element_size = element_size;
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | void CuePoint::Load(IMkvReader* pReader)
assert(m_track_positions == NULL);
assert(m_track_positions_count == 0);
long long pos_ = -m_timecode;
const long long element_start = pos_;
long long stop;
{
long len;
const long long id = ReadUInt(pReader, pos_, len);
assert(id == 0x3B); // CuePoint ID
if (id != 0x3B)
return;
pos_ += len; // consume ID
const long long size = ReadUInt(pReader, pos_, len);
assert(size >= 0);
pos_ += len; // consume Size field
// pos_ now points to start of payload
stop = pos_ + size;
}
const long long element_size = stop - element_start;
long long pos = pos_;
// First count number of track positions
while (pos < stop) {
long len;
const long long id = ReadUInt(pReader, pos, len);
assert(id >= 0); // TODO
assert((pos + len) <= stop);
pos += len; // consume ID
const long long size = ReadUInt(pReader, pos, len);
assert(size >= 0);
assert((pos + len) <= stop);
pos += len; // consume Size field
assert((pos + size) <= stop);
if (id == 0x33) // CueTime ID
m_timecode = UnserializeUInt(pReader, pos, size);
else if (id == 0x37) // CueTrackPosition(s) ID
++m_track_positions_count;
pos += size; // consume payload
assert(pos <= stop);
}
assert(m_timecode >= 0);
assert(m_track_positions_count > 0);
// os << "CuePoint::Load(cont'd): idpos=" << idpos
// << " timecode=" << m_timecode
// << endl;
m_track_positions = new TrackPosition[m_track_positions_count];
// Now parse track positions
TrackPosition* p = m_track_positions;
pos = pos_;
while (pos < stop) {
long len;
const long long id = ReadUInt(pReader, pos, len);
assert(id >= 0); // TODO
assert((pos + len) <= stop);
pos += len; // consume ID
const long long size = ReadUInt(pReader, pos, len);
assert(size >= 0);
assert((pos + len) <= stop);
pos += len; // consume Size field
assert((pos + size) <= stop);
if (id == 0x37) { // CueTrackPosition(s) ID
TrackPosition& tp = *p++;
tp.Parse(pReader, pos, size);
}
pos += size; // consume payload
assert(pos <= stop);
}
assert(size_t(p - m_track_positions) == m_track_positions_count);
m_element_start = element_start;
m_element_size = element_size;
}
| 174,395 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in,
const struct iphdr *iph,
__be32 saddr, u8 tos,
int type, int code,
struct icmp_bxm *param)
{
struct flowi4 fl4 = {
.daddr = (param->replyopts.srr ?
param->replyopts.faddr : iph->saddr),
.saddr = saddr,
.flowi4_tos = RT_TOS(tos),
.flowi4_proto = IPPROTO_ICMP,
.fl4_icmp_type = type,
.fl4_icmp_code = code,
};
struct rtable *rt, *rt2;
int err;
security_skb_classify_flow(skb_in, flowi4_to_flowi(&fl4));
rt = __ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
return rt;
/* No need to clone since we're just using its address. */
rt2 = rt;
if (!fl4.saddr)
fl4.saddr = rt->rt_src;
rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
flowi4_to_flowi(&fl4), NULL, 0);
if (!IS_ERR(rt)) {
if (rt != rt2)
return rt;
} else if (PTR_ERR(rt) == -EPERM) {
rt = NULL;
} else
return rt;
err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(&fl4), AF_INET);
if (err)
goto relookup_failed;
if (inet_addr_type(net, fl4.saddr) == RTN_LOCAL) {
rt2 = __ip_route_output_key(net, &fl4);
if (IS_ERR(rt2))
err = PTR_ERR(rt2);
} else {
struct flowi4 fl4_2 = {};
unsigned long orefdst;
fl4_2.daddr = fl4.saddr;
rt2 = ip_route_output_key(net, &fl4_2);
if (IS_ERR(rt2)) {
err = PTR_ERR(rt2);
goto relookup_failed;
}
/* Ugh! */
orefdst = skb_in->_skb_refdst; /* save old refdst */
err = ip_route_input(skb_in, fl4.daddr, fl4.saddr,
RT_TOS(tos), rt2->dst.dev);
dst_release(&rt2->dst);
rt2 = skb_rtable(skb_in);
skb_in->_skb_refdst = orefdst; /* restore old refdst */
}
if (err)
goto relookup_failed;
rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst,
flowi4_to_flowi(&fl4), NULL,
XFRM_LOOKUP_ICMP);
if (!IS_ERR(rt2)) {
dst_release(&rt->dst);
rt = rt2;
} else if (PTR_ERR(rt2) == -EPERM) {
if (rt)
dst_release(&rt->dst);
return rt2;
} else {
err = PTR_ERR(rt2);
goto relookup_failed;
}
return rt;
relookup_failed:
if (rt)
return rt;
return ERR_PTR(err);
}
Commit Message: inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-362 | static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in,
const struct iphdr *iph,
__be32 saddr, u8 tos,
int type, int code,
struct icmp_bxm *param)
{
struct flowi4 fl4 = {
.daddr = (param->replyopts.opt.opt.srr ?
param->replyopts.opt.opt.faddr : iph->saddr),
.saddr = saddr,
.flowi4_tos = RT_TOS(tos),
.flowi4_proto = IPPROTO_ICMP,
.fl4_icmp_type = type,
.fl4_icmp_code = code,
};
struct rtable *rt, *rt2;
int err;
security_skb_classify_flow(skb_in, flowi4_to_flowi(&fl4));
rt = __ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
return rt;
/* No need to clone since we're just using its address. */
rt2 = rt;
if (!fl4.saddr)
fl4.saddr = rt->rt_src;
rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
flowi4_to_flowi(&fl4), NULL, 0);
if (!IS_ERR(rt)) {
if (rt != rt2)
return rt;
} else if (PTR_ERR(rt) == -EPERM) {
rt = NULL;
} else
return rt;
err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(&fl4), AF_INET);
if (err)
goto relookup_failed;
if (inet_addr_type(net, fl4.saddr) == RTN_LOCAL) {
rt2 = __ip_route_output_key(net, &fl4);
if (IS_ERR(rt2))
err = PTR_ERR(rt2);
} else {
struct flowi4 fl4_2 = {};
unsigned long orefdst;
fl4_2.daddr = fl4.saddr;
rt2 = ip_route_output_key(net, &fl4_2);
if (IS_ERR(rt2)) {
err = PTR_ERR(rt2);
goto relookup_failed;
}
/* Ugh! */
orefdst = skb_in->_skb_refdst; /* save old refdst */
err = ip_route_input(skb_in, fl4.daddr, fl4.saddr,
RT_TOS(tos), rt2->dst.dev);
dst_release(&rt2->dst);
rt2 = skb_rtable(skb_in);
skb_in->_skb_refdst = orefdst; /* restore old refdst */
}
if (err)
goto relookup_failed;
rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst,
flowi4_to_flowi(&fl4), NULL,
XFRM_LOOKUP_ICMP);
if (!IS_ERR(rt2)) {
dst_release(&rt->dst);
rt = rt2;
} else if (PTR_ERR(rt2) == -EPERM) {
if (rt)
dst_release(&rt->dst);
return rt2;
} else {
err = PTR_ERR(rt2);
goto relookup_failed;
}
return rt;
relookup_failed:
if (rt)
return rt;
return ERR_PTR(err);
}
| 165,553 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int ParseWave64HeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config)
{
int64_t total_samples = 0, infilesize;
Wave64ChunkHeader chunk_header;
Wave64FileHeader filehdr;
WaveHeader WaveHeader;
int format_chunk = 0;
uint32_t bcount;
infilesize = DoGetFileSize (infile);
memcpy (&filehdr, fourcc, 4);
if (!DoReadFile (infile, ((char *) &filehdr) + 4, sizeof (Wave64FileHeader) - 4, &bcount) ||
bcount != sizeof (Wave64FileHeader) - 4 || memcmp (filehdr.ckID, riff_guid, sizeof (riff_guid)) ||
memcmp (filehdr.formType, wave_guid, sizeof (wave_guid))) {
error_line ("%s is not a valid .W64 file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &filehdr, sizeof (filehdr))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
#if 1 // this might be a little too picky...
WavpackLittleEndianToNative (&filehdr, Wave64ChunkHeaderFormat);
if (infilesize && !(config->qmode & QMODE_IGNORE_LENGTH) &&
filehdr.ckSize && filehdr.ckSize + 1 && filehdr.ckSize != infilesize) {
error_line ("%s is not a valid .W64 file!", infilename);
return WAVPACK_SOFT_ERROR;
}
#endif
while (1) {
if (!DoReadFile (infile, &chunk_header, sizeof (Wave64ChunkHeader), &bcount) ||
bcount != sizeof (Wave64ChunkHeader)) {
error_line ("%s is not a valid .W64 file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &chunk_header, sizeof (Wave64ChunkHeader))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
WavpackLittleEndianToNative (&chunk_header, Wave64ChunkHeaderFormat);
chunk_header.ckSize -= sizeof (chunk_header);
if (!memcmp (chunk_header.ckID, fmt_guid, sizeof (fmt_guid))) {
int supported = TRUE, format;
if (format_chunk++) {
error_line ("%s is not a valid .W64 file!", infilename);
return WAVPACK_SOFT_ERROR;
}
chunk_header.ckSize = (chunk_header.ckSize + 7) & ~7L;
if (chunk_header.ckSize < 16 || chunk_header.ckSize > sizeof (WaveHeader) ||
!DoReadFile (infile, &WaveHeader, (uint32_t) chunk_header.ckSize, &bcount) ||
bcount != chunk_header.ckSize) {
error_line ("%s is not a valid .W64 file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &WaveHeader, (uint32_t) chunk_header.ckSize)) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
WavpackLittleEndianToNative (&WaveHeader, WaveHeaderFormat);
if (debug_logging_mode) {
error_line ("format tag size = %d", chunk_header.ckSize);
error_line ("FormatTag = %x, NumChannels = %d, BitsPerSample = %d",
WaveHeader.FormatTag, WaveHeader.NumChannels, WaveHeader.BitsPerSample);
error_line ("BlockAlign = %d, SampleRate = %d, BytesPerSecond = %d",
WaveHeader.BlockAlign, WaveHeader.SampleRate, WaveHeader.BytesPerSecond);
if (chunk_header.ckSize > 16)
error_line ("cbSize = %d, ValidBitsPerSample = %d", WaveHeader.cbSize,
WaveHeader.ValidBitsPerSample);
if (chunk_header.ckSize > 20)
error_line ("ChannelMask = %x, SubFormat = %d",
WaveHeader.ChannelMask, WaveHeader.SubFormat);
}
if (chunk_header.ckSize > 16 && WaveHeader.cbSize == 2)
config->qmode |= QMODE_ADOBE_MODE;
format = (WaveHeader.FormatTag == 0xfffe && chunk_header.ckSize == 40) ?
WaveHeader.SubFormat : WaveHeader.FormatTag;
config->bits_per_sample = (chunk_header.ckSize == 40 && WaveHeader.ValidBitsPerSample) ?
WaveHeader.ValidBitsPerSample : WaveHeader.BitsPerSample;
if (format != 1 && format != 3)
supported = FALSE;
if (format == 3 && config->bits_per_sample != 32)
supported = FALSE;
if (!WaveHeader.NumChannels || WaveHeader.NumChannels > 256 ||
WaveHeader.BlockAlign / WaveHeader.NumChannels < (config->bits_per_sample + 7) / 8 ||
WaveHeader.BlockAlign / WaveHeader.NumChannels > 4 ||
WaveHeader.BlockAlign % WaveHeader.NumChannels)
supported = FALSE;
if (config->bits_per_sample < 1 || config->bits_per_sample > 32)
supported = FALSE;
if (!supported) {
error_line ("%s is an unsupported .W64 format!", infilename);
return WAVPACK_SOFT_ERROR;
}
if (chunk_header.ckSize < 40) {
if (!config->channel_mask && !(config->qmode & QMODE_CHANS_UNASSIGNED)) {
if (WaveHeader.NumChannels <= 2)
config->channel_mask = 0x5 - WaveHeader.NumChannels;
else if (WaveHeader.NumChannels <= 18)
config->channel_mask = (1 << WaveHeader.NumChannels) - 1;
else
config->channel_mask = 0x3ffff;
}
}
else if (WaveHeader.ChannelMask && (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED))) {
error_line ("this W64 file already has channel order information!");
return WAVPACK_SOFT_ERROR;
}
else if (WaveHeader.ChannelMask)
config->channel_mask = WaveHeader.ChannelMask;
if (format == 3)
config->float_norm_exp = 127;
else if ((config->qmode & QMODE_ADOBE_MODE) &&
WaveHeader.BlockAlign / WaveHeader.NumChannels == 4) {
if (WaveHeader.BitsPerSample == 24)
config->float_norm_exp = 127 + 23;
else if (WaveHeader.BitsPerSample == 32)
config->float_norm_exp = 127 + 15;
}
if (debug_logging_mode) {
if (config->float_norm_exp == 127)
error_line ("data format: normalized 32-bit floating point");
else
error_line ("data format: %d-bit integers stored in %d byte(s)",
config->bits_per_sample, WaveHeader.BlockAlign / WaveHeader.NumChannels);
}
}
else if (!memcmp (chunk_header.ckID, data_guid, sizeof (data_guid))) { // on the data chunk, get size and exit loop
if (!WaveHeader.NumChannels) { // make sure we saw "fmt" chunk
error_line ("%s is not a valid .W64 file!", infilename);
return WAVPACK_SOFT_ERROR;
}
if ((config->qmode & QMODE_IGNORE_LENGTH) || chunk_header.ckSize <= 0) {
config->qmode |= QMODE_IGNORE_LENGTH;
if (infilesize && DoGetFilePosition (infile) != -1)
total_samples = (infilesize - DoGetFilePosition (infile)) / WaveHeader.BlockAlign;
else
total_samples = -1;
}
else {
if (infilesize && infilesize - chunk_header.ckSize > 16777216) {
error_line ("this .W64 file has over 16 MB of extra RIFF data, probably is corrupt!");
return WAVPACK_SOFT_ERROR;
}
total_samples = chunk_header.ckSize / WaveHeader.BlockAlign;
if (!total_samples) {
error_line ("this .W64 file has no audio samples, probably is corrupt!");
return WAVPACK_SOFT_ERROR;
}
if (total_samples > MAX_WAVPACK_SAMPLES) {
error_line ("%s has too many samples for WavPack!", infilename);
return WAVPACK_SOFT_ERROR;
}
}
config->bytes_per_sample = WaveHeader.BlockAlign / WaveHeader.NumChannels;
config->num_channels = WaveHeader.NumChannels;
config->sample_rate = WaveHeader.SampleRate;
break;
}
else { // just copy unknown chunks to output file
int bytes_to_copy = (chunk_header.ckSize + 7) & ~7L;
char *buff;
if (bytes_to_copy < 0 || bytes_to_copy > 4194304) {
error_line ("%s is not a valid .W64 file!", infilename);
return WAVPACK_SOFT_ERROR;
}
buff = malloc (bytes_to_copy);
if (debug_logging_mode)
error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes",
chunk_header.ckID [0], chunk_header.ckID [1], chunk_header.ckID [2],
chunk_header.ckID [3], chunk_header.ckSize);
if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) ||
bcount != bytes_to_copy ||
(!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, buff, bytes_to_copy))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
free (buff);
return WAVPACK_SOFT_ERROR;
}
free (buff);
}
}
if (!WavpackSetConfiguration64 (wpc, config, total_samples, NULL)) {
error_line ("%s: %s", infilename, WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
return WAVPACK_NO_ERROR;
}
Commit Message: issue #68: clear WaveHeader at start to prevent uninitialized read
CWE ID: CWE-665 | int ParseWave64HeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config)
{
int64_t total_samples = 0, infilesize;
Wave64ChunkHeader chunk_header;
Wave64FileHeader filehdr;
WaveHeader WaveHeader;
int format_chunk = 0;
uint32_t bcount;
CLEAR (WaveHeader);
infilesize = DoGetFileSize (infile);
memcpy (&filehdr, fourcc, 4);
if (!DoReadFile (infile, ((char *) &filehdr) + 4, sizeof (Wave64FileHeader) - 4, &bcount) ||
bcount != sizeof (Wave64FileHeader) - 4 || memcmp (filehdr.ckID, riff_guid, sizeof (riff_guid)) ||
memcmp (filehdr.formType, wave_guid, sizeof (wave_guid))) {
error_line ("%s is not a valid .W64 file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &filehdr, sizeof (filehdr))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
#if 1 // this might be a little too picky...
WavpackLittleEndianToNative (&filehdr, Wave64ChunkHeaderFormat);
if (infilesize && !(config->qmode & QMODE_IGNORE_LENGTH) &&
filehdr.ckSize && filehdr.ckSize + 1 && filehdr.ckSize != infilesize) {
error_line ("%s is not a valid .W64 file!", infilename);
return WAVPACK_SOFT_ERROR;
}
#endif
while (1) {
if (!DoReadFile (infile, &chunk_header, sizeof (Wave64ChunkHeader), &bcount) ||
bcount != sizeof (Wave64ChunkHeader)) {
error_line ("%s is not a valid .W64 file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &chunk_header, sizeof (Wave64ChunkHeader))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
WavpackLittleEndianToNative (&chunk_header, Wave64ChunkHeaderFormat);
chunk_header.ckSize -= sizeof (chunk_header);
if (!memcmp (chunk_header.ckID, fmt_guid, sizeof (fmt_guid))) {
int supported = TRUE, format;
if (format_chunk++) {
error_line ("%s is not a valid .W64 file!", infilename);
return WAVPACK_SOFT_ERROR;
}
chunk_header.ckSize = (chunk_header.ckSize + 7) & ~7L;
if (chunk_header.ckSize < 16 || chunk_header.ckSize > sizeof (WaveHeader) ||
!DoReadFile (infile, &WaveHeader, (uint32_t) chunk_header.ckSize, &bcount) ||
bcount != chunk_header.ckSize) {
error_line ("%s is not a valid .W64 file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &WaveHeader, (uint32_t) chunk_header.ckSize)) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
WavpackLittleEndianToNative (&WaveHeader, WaveHeaderFormat);
if (debug_logging_mode) {
error_line ("format tag size = %d", chunk_header.ckSize);
error_line ("FormatTag = %x, NumChannels = %d, BitsPerSample = %d",
WaveHeader.FormatTag, WaveHeader.NumChannels, WaveHeader.BitsPerSample);
error_line ("BlockAlign = %d, SampleRate = %d, BytesPerSecond = %d",
WaveHeader.BlockAlign, WaveHeader.SampleRate, WaveHeader.BytesPerSecond);
if (chunk_header.ckSize > 16)
error_line ("cbSize = %d, ValidBitsPerSample = %d", WaveHeader.cbSize,
WaveHeader.ValidBitsPerSample);
if (chunk_header.ckSize > 20)
error_line ("ChannelMask = %x, SubFormat = %d",
WaveHeader.ChannelMask, WaveHeader.SubFormat);
}
if (chunk_header.ckSize > 16 && WaveHeader.cbSize == 2)
config->qmode |= QMODE_ADOBE_MODE;
format = (WaveHeader.FormatTag == 0xfffe && chunk_header.ckSize == 40) ?
WaveHeader.SubFormat : WaveHeader.FormatTag;
config->bits_per_sample = (chunk_header.ckSize == 40 && WaveHeader.ValidBitsPerSample) ?
WaveHeader.ValidBitsPerSample : WaveHeader.BitsPerSample;
if (format != 1 && format != 3)
supported = FALSE;
if (format == 3 && config->bits_per_sample != 32)
supported = FALSE;
if (!WaveHeader.NumChannels || WaveHeader.NumChannels > 256 ||
WaveHeader.BlockAlign / WaveHeader.NumChannels < (config->bits_per_sample + 7) / 8 ||
WaveHeader.BlockAlign / WaveHeader.NumChannels > 4 ||
WaveHeader.BlockAlign % WaveHeader.NumChannels)
supported = FALSE;
if (config->bits_per_sample < 1 || config->bits_per_sample > 32)
supported = FALSE;
if (!supported) {
error_line ("%s is an unsupported .W64 format!", infilename);
return WAVPACK_SOFT_ERROR;
}
if (chunk_header.ckSize < 40) {
if (!config->channel_mask && !(config->qmode & QMODE_CHANS_UNASSIGNED)) {
if (WaveHeader.NumChannels <= 2)
config->channel_mask = 0x5 - WaveHeader.NumChannels;
else if (WaveHeader.NumChannels <= 18)
config->channel_mask = (1 << WaveHeader.NumChannels) - 1;
else
config->channel_mask = 0x3ffff;
}
}
else if (WaveHeader.ChannelMask && (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED))) {
error_line ("this W64 file already has channel order information!");
return WAVPACK_SOFT_ERROR;
}
else if (WaveHeader.ChannelMask)
config->channel_mask = WaveHeader.ChannelMask;
if (format == 3)
config->float_norm_exp = 127;
else if ((config->qmode & QMODE_ADOBE_MODE) &&
WaveHeader.BlockAlign / WaveHeader.NumChannels == 4) {
if (WaveHeader.BitsPerSample == 24)
config->float_norm_exp = 127 + 23;
else if (WaveHeader.BitsPerSample == 32)
config->float_norm_exp = 127 + 15;
}
if (debug_logging_mode) {
if (config->float_norm_exp == 127)
error_line ("data format: normalized 32-bit floating point");
else
error_line ("data format: %d-bit integers stored in %d byte(s)",
config->bits_per_sample, WaveHeader.BlockAlign / WaveHeader.NumChannels);
}
}
else if (!memcmp (chunk_header.ckID, data_guid, sizeof (data_guid))) { // on the data chunk, get size and exit loop
if (!WaveHeader.NumChannels) { // make sure we saw "fmt" chunk
error_line ("%s is not a valid .W64 file!", infilename);
return WAVPACK_SOFT_ERROR;
}
if ((config->qmode & QMODE_IGNORE_LENGTH) || chunk_header.ckSize <= 0) {
config->qmode |= QMODE_IGNORE_LENGTH;
if (infilesize && DoGetFilePosition (infile) != -1)
total_samples = (infilesize - DoGetFilePosition (infile)) / WaveHeader.BlockAlign;
else
total_samples = -1;
}
else {
if (infilesize && infilesize - chunk_header.ckSize > 16777216) {
error_line ("this .W64 file has over 16 MB of extra RIFF data, probably is corrupt!");
return WAVPACK_SOFT_ERROR;
}
total_samples = chunk_header.ckSize / WaveHeader.BlockAlign;
if (!total_samples) {
error_line ("this .W64 file has no audio samples, probably is corrupt!");
return WAVPACK_SOFT_ERROR;
}
if (total_samples > MAX_WAVPACK_SAMPLES) {
error_line ("%s has too many samples for WavPack!", infilename);
return WAVPACK_SOFT_ERROR;
}
}
config->bytes_per_sample = WaveHeader.BlockAlign / WaveHeader.NumChannels;
config->num_channels = WaveHeader.NumChannels;
config->sample_rate = WaveHeader.SampleRate;
break;
}
else { // just copy unknown chunks to output file
int bytes_to_copy = (chunk_header.ckSize + 7) & ~7L;
char *buff;
if (bytes_to_copy < 0 || bytes_to_copy > 4194304) {
error_line ("%s is not a valid .W64 file!", infilename);
return WAVPACK_SOFT_ERROR;
}
buff = malloc (bytes_to_copy);
if (debug_logging_mode)
error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes",
chunk_header.ckID [0], chunk_header.ckID [1], chunk_header.ckID [2],
chunk_header.ckID [3], chunk_header.ckSize);
if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) ||
bcount != bytes_to_copy ||
(!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, buff, bytes_to_copy))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
free (buff);
return WAVPACK_SOFT_ERROR;
}
free (buff);
}
}
if (!WavpackSetConfiguration64 (wpc, config, total_samples, NULL)) {
error_line ("%s: %s", infilename, WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
return WAVPACK_NO_ERROR;
}
| 169,461 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int peer_recv_callback(rdpTransport* transport, wStream* s, void* extra)
{
freerdp_peer* client = (freerdp_peer*) extra;
rdpRdp* rdp = client->context->rdp;
switch (rdp->state)
{
case CONNECTION_STATE_INITIAL:
if (!rdp_server_accept_nego(rdp, s))
return -1;
if (rdp->nego->selected_protocol & PROTOCOL_NLA)
{
sspi_CopyAuthIdentity(&client->identity, &(rdp->nego->transport->credssp->identity));
IFCALLRET(client->Logon, client->authenticated, client, &client->identity, TRUE);
credssp_free(rdp->nego->transport->credssp);
}
else
{
IFCALLRET(client->Logon, client->authenticated, client, &client->identity, FALSE);
}
break;
case CONNECTION_STATE_NEGO:
if (!rdp_server_accept_mcs_connect_initial(rdp, s))
return -1;
break;
case CONNECTION_STATE_MCS_CONNECT:
if (!rdp_server_accept_mcs_erect_domain_request(rdp, s))
return -1;
break;
case CONNECTION_STATE_MCS_ERECT_DOMAIN:
if (!rdp_server_accept_mcs_attach_user_request(rdp, s))
return -1;
break;
case CONNECTION_STATE_MCS_ATTACH_USER:
if (!rdp_server_accept_mcs_channel_join_request(rdp, s))
return -1;
break;
case CONNECTION_STATE_MCS_CHANNEL_JOIN:
if (rdp->settings->DisableEncryption)
{
if (!rdp_server_accept_client_keys(rdp, s))
return -1;
break;
}
rdp->state = CONNECTION_STATE_ESTABLISH_KEYS;
/* FALLTHROUGH */
case CONNECTION_STATE_ESTABLISH_KEYS:
if (!rdp_server_accept_client_info(rdp, s))
return -1;
IFCALL(client->Capabilities, client);
if (!rdp_send_demand_active(rdp))
return -1;
break;
case CONNECTION_STATE_LICENSE:
if (!rdp_server_accept_confirm_active(rdp, s))
{
/**
* During reactivation sequence the client might sent some input or channel data
* before receiving the Deactivate All PDU. We need to process them as usual.
*/
Stream_SetPosition(s, 0);
return peer_recv_pdu(client, s);
}
break;
case CONNECTION_STATE_ACTIVE:
if (peer_recv_pdu(client, s) < 0)
return -1;
break;
default:
fprintf(stderr, "Invalid state %d\n", rdp->state);
return -1;
}
return 0;
}
Commit Message: nla: invalidate sec handle after creation
If sec pointer isn't invalidated after creation it is not possible
to check if the upper and lower pointers are valid.
This fixes a segfault in the server part if the client disconnects before
the authentication was finished.
CWE ID: CWE-476 | static int peer_recv_callback(rdpTransport* transport, wStream* s, void* extra)
{
freerdp_peer* client = (freerdp_peer*) extra;
rdpRdp* rdp = client->context->rdp;
switch (rdp->state)
{
case CONNECTION_STATE_INITIAL:
if (!rdp_server_accept_nego(rdp, s))
return -1;
if (rdp->nego->selected_protocol & PROTOCOL_NLA)
{
sspi_CopyAuthIdentity(&client->identity, &(rdp->nego->transport->credssp->identity));
IFCALLRET(client->Logon, client->authenticated, client, &client->identity, TRUE);
credssp_free(rdp->nego->transport->credssp);
rdp->nego->transport->credssp = NULL;
}
else
{
IFCALLRET(client->Logon, client->authenticated, client, &client->identity, FALSE);
}
break;
case CONNECTION_STATE_NEGO:
if (!rdp_server_accept_mcs_connect_initial(rdp, s))
return -1;
break;
case CONNECTION_STATE_MCS_CONNECT:
if (!rdp_server_accept_mcs_erect_domain_request(rdp, s))
return -1;
break;
case CONNECTION_STATE_MCS_ERECT_DOMAIN:
if (!rdp_server_accept_mcs_attach_user_request(rdp, s))
return -1;
break;
case CONNECTION_STATE_MCS_ATTACH_USER:
if (!rdp_server_accept_mcs_channel_join_request(rdp, s))
return -1;
break;
case CONNECTION_STATE_MCS_CHANNEL_JOIN:
if (rdp->settings->DisableEncryption)
{
if (!rdp_server_accept_client_keys(rdp, s))
return -1;
break;
}
rdp->state = CONNECTION_STATE_ESTABLISH_KEYS;
/* FALLTHROUGH */
case CONNECTION_STATE_ESTABLISH_KEYS:
if (!rdp_server_accept_client_info(rdp, s))
return -1;
IFCALL(client->Capabilities, client);
if (!rdp_send_demand_active(rdp))
return -1;
break;
case CONNECTION_STATE_LICENSE:
if (!rdp_server_accept_confirm_active(rdp, s))
{
/**
* During reactivation sequence the client might sent some input or channel data
* before receiving the Deactivate All PDU. We need to process them as usual.
*/
Stream_SetPosition(s, 0);
return peer_recv_pdu(client, s);
}
break;
case CONNECTION_STATE_ACTIVE:
if (peer_recv_pdu(client, s) < 0)
return -1;
break;
default:
fprintf(stderr, "Invalid state %d\n", rdp->state);
return -1;
}
return 0;
}
| 167,600 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static MagickBooleanType ConcatenateImages(int argc,char **argv,
ExceptionInfo *exception )
{
FILE
*input,
*output;
int
c;
register ssize_t
i;
if (ExpandFilenames(&argc,&argv) == MagickFalse)
ThrowFileException(exception,ResourceLimitError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
output=fopen_utf8(argv[argc-1],"wb");
if (output == (FILE *) NULL) {
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",argv[argc-1]);
return(MagickFalse);
}
for (i=2; i < (ssize_t) (argc-1); i++) {
#if 0
fprintf(stderr, "DEBUG: Concatenate Image: \"%s\"\n", argv[i]);
#endif
input=fopen_utf8(argv[i],"rb");
if (input == (FILE *) NULL) {
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",argv[i]);
continue;
}
for (c=fgetc(input); c != EOF; c=fgetc(input))
(void) fputc((char) c,output);
(void) fclose(input);
(void) remove_utf8(argv[i]);
}
(void) fclose(output);
return(MagickTrue);
}
Commit Message: https://github.com/ImageMagick/ImageMagick/issues/196
CWE ID: CWE-20 | static MagickBooleanType ConcatenateImages(int argc,char **argv,
ExceptionInfo *exception )
{
FILE
*input,
*output;
MagickBooleanType
status;
int
c;
register ssize_t
i;
if (ExpandFilenames(&argc,&argv) == MagickFalse)
ThrowFileException(exception,ResourceLimitError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
output=fopen_utf8(argv[argc-1],"wb");
if (output == (FILE *) NULL)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
argv[argc-1]);
return(MagickFalse);
}
status=MagickTrue;
for (i=2; i < (ssize_t) (argc-1); i++)
{
input=fopen_utf8(argv[i],"rb");
if (input == (FILE *) NULL)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",argv[i]);
continue;
}
for (c=fgetc(input); c != EOF; c=fgetc(input))
if (fputc((char) c,output) != c)
status=MagickFalse;
(void) fclose(input);
(void) remove_utf8(argv[i]);
}
(void) fclose(output);
return(status);
}
| 168,628 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void TabletModeWindowManager::ForgetWindow(aura::Window* window,
bool destroyed) {
added_windows_.erase(window);
window->RemoveObserver(this);
WindowToState::iterator it = window_state_map_.find(window);
if (it == window_state_map_.end())
return;
if (destroyed) {
window_state_map_.erase(it);
} else {
it->second->LeaveTabletMode(wm::GetWindowState(it->first));
DCHECK(!IsTrackingWindow(window));
}
}
Commit Message: Fix the crash after clamshell -> tablet transition in overview mode.
This CL just reverted some changes that were made in
https://chromium-review.googlesource.com/c/chromium/src/+/1658955. In
that CL, we changed the clamshell <-> tablet transition when clamshell
split view mode is enabled, however, we should keep the old behavior
unchanged if the feature is not enabled, i.e., overview should be ended
if it's active before the transition. Otherwise, it will cause a nullptr
dereference crash since |split_view_drag_indicators_| is not created in
clamshell overview and will be used in tablet overview.
Bug: 982507
Change-Id: I238fe9472648a446cff4ab992150658c228714dd
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1705474
Commit-Queue: Xiaoqian Dai <[email protected]>
Reviewed-by: Mitsuru Oshima (Slow - on/off site) <[email protected]>
Cr-Commit-Position: refs/heads/master@{#679306}
CWE ID: CWE-362 | void TabletModeWindowManager::ForgetWindow(aura::Window* window,
bool destroyed,
bool was_in_overview) {
added_windows_.erase(window);
window->RemoveObserver(this);
WindowToState::iterator it = window_state_map_.find(window);
if (it == window_state_map_.end())
return;
if (destroyed) {
window_state_map_.erase(it);
} else {
it->second->LeaveTabletMode(wm::GetWindowState(it->first), was_in_overview);
DCHECK(!IsTrackingWindow(window));
}
}
| 172,400 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
u64 connection_id)
{
struct nvmet_fc_tgt_assoc *assoc;
struct nvmet_fc_tgt_queue *queue;
u64 association_id = nvmet_fc_getassociationid(connection_id);
u16 qid = nvmet_fc_getqueueid(connection_id);
unsigned long flags;
spin_lock_irqsave(&tgtport->lock, flags);
list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
if (association_id == assoc->association_id) {
queue = assoc->queues[qid];
if (queue &&
(!atomic_read(&queue->connected) ||
!nvmet_fc_tgt_q_get(queue)))
queue = NULL;
spin_unlock_irqrestore(&tgtport->lock, flags);
return queue;
}
}
spin_unlock_irqrestore(&tgtport->lock, flags);
return NULL;
}
Commit Message: nvmet-fc: ensure target queue id within range.
When searching for queue id's ensure they are within the expected range.
Signed-off-by: James Smart <[email protected]>
Signed-off-by: Christoph Hellwig <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
CWE ID: CWE-119 | nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
u64 connection_id)
{
struct nvmet_fc_tgt_assoc *assoc;
struct nvmet_fc_tgt_queue *queue;
u64 association_id = nvmet_fc_getassociationid(connection_id);
u16 qid = nvmet_fc_getqueueid(connection_id);
unsigned long flags;
if (qid > NVMET_NR_QUEUES)
return NULL;
spin_lock_irqsave(&tgtport->lock, flags);
list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
if (association_id == assoc->association_id) {
queue = assoc->queues[qid];
if (queue &&
(!atomic_read(&queue->connected) ||
!nvmet_fc_tgt_q_get(queue)))
queue = NULL;
spin_unlock_irqrestore(&tgtport->lock, flags);
return queue;
}
}
spin_unlock_irqrestore(&tgtport->lock, flags);
return NULL;
}
| 169,859 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: long FS_FOpenFileRead(const char *filename, fileHandle_t *file, qboolean uniqueFILE)
{
searchpath_t *search;
long len;
if(!fs_searchpaths)
Com_Error(ERR_FATAL, "Filesystem call made without initialization");
for(search = fs_searchpaths; search; search = search->next)
{
len = FS_FOpenFileReadDir(filename, search, file, uniqueFILE, qfalse);
if(file == NULL)
{
if(len > 0)
return len;
}
else
{
if(len >= 0 && *file)
return len;
}
}
#ifdef FS_MISSING
if(missingFiles)
fprintf(missingFiles, "%s\n", filename);
#endif
if(file)
{
*file = 0;
return -1;
}
else
{
return 0;
}
}
Commit Message: All: Don't load .pk3s as .dlls, and don't load user config files from .pk3s
CWE ID: CWE-269 | long FS_FOpenFileRead(const char *filename, fileHandle_t *file, qboolean uniqueFILE)
{
searchpath_t *search;
long len;
qboolean isLocalConfig;
if(!fs_searchpaths)
Com_Error(ERR_FATAL, "Filesystem call made without initialization");
isLocalConfig = !strcmp(filename, "autoexec.cfg") || !strcmp(filename, Q3CONFIG_CFG);
for(search = fs_searchpaths; search; search = search->next)
{
// autoexec.cfg and wolfconfig.cfg can only be loaded outside of pk3 files.
if (isLocalConfig && search->pack)
continue;
len = FS_FOpenFileReadDir(filename, search, file, uniqueFILE, qfalse);
if(file == NULL)
{
if(len > 0)
return len;
}
else
{
if(len >= 0 && *file)
return len;
}
}
#ifdef FS_MISSING
if(missingFiles)
fprintf(missingFiles, "%s\n", filename);
#endif
if(file)
{
*file = 0;
return -1;
}
else
{
return 0;
}
}
| 170,087 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int _yr_scan_match_callback(
uint8_t* match_data,
int32_t match_length,
int flags,
void* args)
{
CALLBACK_ARGS* callback_args = (CALLBACK_ARGS*) args;
YR_STRING* string = callback_args->string;
YR_MATCH* new_match;
int result = ERROR_SUCCESS;
int tidx = callback_args->context->tidx;
size_t match_offset = match_data - callback_args->data;
match_length += callback_args->forward_matches;
if (callback_args->full_word)
{
if (flags & RE_FLAGS_WIDE)
{
if (match_offset >= 2 &&
*(match_data - 1) == 0 &&
isalnum(*(match_data - 2)))
return ERROR_SUCCESS;
if (match_offset + match_length + 1 < callback_args->data_size &&
*(match_data + match_length + 1) == 0 &&
isalnum(*(match_data + match_length)))
return ERROR_SUCCESS;
}
else
{
if (match_offset >= 1 &&
isalnum(*(match_data - 1)))
return ERROR_SUCCESS;
if (match_offset + match_length < callback_args->data_size &&
isalnum(*(match_data + match_length)))
return ERROR_SUCCESS;
}
}
if (STRING_IS_CHAIN_PART(string))
{
result = _yr_scan_verify_chained_string_match(
string,
callback_args->context,
match_data,
callback_args->data_base,
match_offset,
match_length);
}
else
{
if (string->matches[tidx].count == 0)
{
FAIL_ON_ERROR(yr_arena_write_data(
callback_args->context->matching_strings_arena,
&string,
sizeof(string),
NULL));
}
FAIL_ON_ERROR(yr_arena_allocate_memory(
callback_args->context->matches_arena,
sizeof(YR_MATCH),
(void**) &new_match));
new_match->data_length = yr_min(match_length, MAX_MATCH_DATA);
FAIL_ON_ERROR(yr_arena_write_data(
callback_args->context->matches_arena,
match_data,
new_match->data_length,
(void**) &new_match->data));
if (result == ERROR_SUCCESS)
{
new_match->base = callback_args->data_base;
new_match->offset = match_offset;
new_match->match_length = match_length;
new_match->prev = NULL;
new_match->next = NULL;
FAIL_ON_ERROR(_yr_scan_add_match_to_list(
new_match,
&string->matches[tidx],
STRING_IS_GREEDY_REGEXP(string)));
}
}
return result;
}
Commit Message: Fix buffer overrun (issue #678). Add assert for detecting this kind of issues earlier.
CWE ID: CWE-125 | int _yr_scan_match_callback(
uint8_t* match_data,
int32_t match_length,
int flags,
void* args)
{
CALLBACK_ARGS* callback_args = (CALLBACK_ARGS*) args;
YR_STRING* string = callback_args->string;
YR_MATCH* new_match;
int result = ERROR_SUCCESS;
int tidx = callback_args->context->tidx;
size_t match_offset = match_data - callback_args->data;
match_length += callback_args->forward_matches;
// make sure that match fits into the data.
assert(match_offset + match_length <= callback_args->data_size);
if (callback_args->full_word)
{
if (flags & RE_FLAGS_WIDE)
{
if (match_offset >= 2 &&
*(match_data - 1) == 0 &&
isalnum(*(match_data - 2)))
return ERROR_SUCCESS;
if (match_offset + match_length + 1 < callback_args->data_size &&
*(match_data + match_length + 1) == 0 &&
isalnum(*(match_data + match_length)))
return ERROR_SUCCESS;
}
else
{
if (match_offset >= 1 &&
isalnum(*(match_data - 1)))
return ERROR_SUCCESS;
if (match_offset + match_length < callback_args->data_size &&
isalnum(*(match_data + match_length)))
return ERROR_SUCCESS;
}
}
if (STRING_IS_CHAIN_PART(string))
{
result = _yr_scan_verify_chained_string_match(
string,
callback_args->context,
match_data,
callback_args->data_base,
match_offset,
match_length);
}
else
{
if (string->matches[tidx].count == 0)
{
FAIL_ON_ERROR(yr_arena_write_data(
callback_args->context->matching_strings_arena,
&string,
sizeof(string),
NULL));
}
FAIL_ON_ERROR(yr_arena_allocate_memory(
callback_args->context->matches_arena,
sizeof(YR_MATCH),
(void**) &new_match));
new_match->data_length = yr_min(match_length, MAX_MATCH_DATA);
FAIL_ON_ERROR(yr_arena_write_data(
callback_args->context->matches_arena,
match_data,
new_match->data_length,
(void**) &new_match->data));
if (result == ERROR_SUCCESS)
{
new_match->base = callback_args->data_base;
new_match->offset = match_offset;
new_match->match_length = match_length;
new_match->prev = NULL;
new_match->next = NULL;
FAIL_ON_ERROR(_yr_scan_add_match_to_list(
new_match,
&string->matches[tidx],
STRING_IS_GREEDY_REGEXP(string)));
}
}
return result;
}
| 168,099 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: DynamicMetadataProvider::DynamicMetadataProvider(const DOMElement* e)
: AbstractMetadataProvider(e),
m_validate(XMLHelper::getAttrBool(e, false, validate)),
m_id(XMLHelper::getAttrString(e, "Dynamic", id)),
m_lock(RWLock::create()),
m_refreshDelayFactor(0.75),
m_minCacheDuration(XMLHelper::getAttrInt(e, 600, minCacheDuration)),
m_maxCacheDuration(XMLHelper::getAttrInt(e, 28800, maxCacheDuration)),
m_shutdown(false),
m_cleanupInterval(XMLHelper::getAttrInt(e, 1800, cleanupInterval)),
m_cleanupTimeout(XMLHelper::getAttrInt(e, 1800, cleanupTimeout)),
m_cleanup_wait(nullptr), m_cleanup_thread(nullptr)
{
if (m_minCacheDuration > m_maxCacheDuration) {
Category::getInstance(SAML_LOGCAT ".MetadataProvider.Dynamic").error(
"minCacheDuration setting exceeds maxCacheDuration setting, lowering to match it"
);
m_minCacheDuration = m_maxCacheDuration;
}
const XMLCh* delay = e ? e->getAttributeNS(nullptr, refreshDelayFactor) : nullptr;
if (delay && *delay) {
auto_ptr_char temp(delay);
m_refreshDelayFactor = atof(temp.get());
if (m_refreshDelayFactor <= 0.0 || m_refreshDelayFactor >= 1.0) {
Category::getInstance(SAML_LOGCAT ".MetadataProvider.Dynamic").error(
"invalid refreshDelayFactor setting, using default"
);
m_refreshDelayFactor = 0.75;
}
}
if (m_cleanupInterval > 0) {
if (m_cleanupTimeout < 0)
m_cleanupTimeout = 0;
m_cleanup_wait = CondWait::create();
m_cleanup_thread = Thread::create(&cleanup_fn, this);
}
}
Commit Message:
CWE ID: CWE-347 | DynamicMetadataProvider::DynamicMetadataProvider(const DOMElement* e)
: AbstractMetadataProvider(e), MetadataProvider(e),
m_validate(XMLHelper::getAttrBool(e, false, validate)),
m_id(XMLHelper::getAttrString(e, "Dynamic", id)),
m_lock(RWLock::create()),
m_refreshDelayFactor(0.75),
m_minCacheDuration(XMLHelper::getAttrInt(e, 600, minCacheDuration)),
m_maxCacheDuration(XMLHelper::getAttrInt(e, 28800, maxCacheDuration)),
m_shutdown(false),
m_cleanupInterval(XMLHelper::getAttrInt(e, 1800, cleanupInterval)),
m_cleanupTimeout(XMLHelper::getAttrInt(e, 1800, cleanupTimeout)),
m_cleanup_wait(nullptr), m_cleanup_thread(nullptr)
{
if (m_minCacheDuration > m_maxCacheDuration) {
Category::getInstance(SAML_LOGCAT ".Metadata.Dynamic").error(
"minCacheDuration setting exceeds maxCacheDuration setting, lowering to match it"
);
m_minCacheDuration = m_maxCacheDuration;
}
const XMLCh* delay = e ? e->getAttributeNS(nullptr, refreshDelayFactor) : nullptr;
if (delay && *delay) {
auto_ptr_char temp(delay);
m_refreshDelayFactor = atof(temp.get());
if (m_refreshDelayFactor <= 0.0 || m_refreshDelayFactor >= 1.0) {
Category::getInstance(SAML_LOGCAT ".MetadataProvider.Dynamic").error(
"invalid refreshDelayFactor setting, using default"
);
m_refreshDelayFactor = 0.75;
}
}
if (m_cleanupInterval > 0) {
if (m_cleanupTimeout < 0)
m_cleanupTimeout = 0;
m_cleanup_wait = CondWait::create();
m_cleanup_thread = Thread::create(&cleanup_fn, this);
}
}
| 164,622 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void InspectorNetworkAgent::DidReceiveResourceResponse(
unsigned long identifier,
DocumentLoader* loader,
const ResourceResponse& response,
Resource* cached_resource) {
String request_id = IdentifiersFactory::RequestId(identifier);
bool is_not_modified = response.HttpStatusCode() == 304;
bool resource_is_empty = true;
std::unique_ptr<protocol::Network::Response> resource_response =
BuildObjectForResourceResponse(response, cached_resource,
&resource_is_empty);
InspectorPageAgent::ResourceType type =
cached_resource ? InspectorPageAgent::CachedResourceType(*cached_resource)
: InspectorPageAgent::kOtherResource;
InspectorPageAgent::ResourceType saved_type =
resources_data_->GetResourceType(request_id);
if (saved_type == InspectorPageAgent::kScriptResource ||
saved_type == InspectorPageAgent::kXHRResource ||
saved_type == InspectorPageAgent::kDocumentResource ||
saved_type == InspectorPageAgent::kFetchResource ||
saved_type == InspectorPageAgent::kEventSourceResource) {
type = saved_type;
}
if (type == InspectorPageAgent::kDocumentResource && loader &&
loader->GetSubstituteData().IsValid())
return;
if (cached_resource)
resources_data_->AddResource(request_id, cached_resource);
String frame_id = loader && loader->GetFrame()
? IdentifiersFactory::FrameId(loader->GetFrame())
: "";
String loader_id = loader ? IdentifiersFactory::LoaderId(loader) : "";
resources_data_->ResponseReceived(request_id, frame_id, response);
resources_data_->SetResourceType(request_id, type);
if (response.GetSecurityStyle() != ResourceResponse::kSecurityStyleUnknown &&
response.GetSecurityStyle() !=
ResourceResponse::kSecurityStyleUnauthenticated) {
const ResourceResponse::SecurityDetails* response_security_details =
response.GetSecurityDetails();
resources_data_->SetCertificate(request_id,
response_security_details->certificate);
}
if (resource_response && !resource_is_empty) {
Maybe<String> maybe_frame_id;
if (!frame_id.IsEmpty())
maybe_frame_id = frame_id;
GetFrontend()->responseReceived(
request_id, loader_id, MonotonicallyIncreasingTime(),
InspectorPageAgent::ResourceTypeJson(type),
std::move(resource_response), std::move(maybe_frame_id));
}
if (is_not_modified && cached_resource && cached_resource->EncodedSize())
DidReceiveData(identifier, loader, 0, cached_resource->EncodedSize());
}
Commit Message: DevTools: send proper resource type in Network.RequestWillBeSent
This patch plumbs resoure type into the DispatchWillSendRequest
instrumenation. This allows us to report accurate type in
Network.RequestWillBeSent event, instead of "Other", that we report
today.
BUG=765501
R=dgozman
Change-Id: I0134c08b841e8dd247fdc8ff208bfd51e462709c
Reviewed-on: https://chromium-review.googlesource.com/667504
Reviewed-by: Pavel Feldman <[email protected]>
Reviewed-by: Dmitry Gozman <[email protected]>
Commit-Queue: Andrey Lushnikov <[email protected]>
Cr-Commit-Position: refs/heads/master@{#507936}
CWE ID: CWE-119 | void InspectorNetworkAgent::DidReceiveResourceResponse(
unsigned long identifier,
DocumentLoader* loader,
const ResourceResponse& response,
Resource* cached_resource) {
String request_id = IdentifiersFactory::RequestId(identifier);
bool is_not_modified = response.HttpStatusCode() == 304;
bool resource_is_empty = true;
std::unique_ptr<protocol::Network::Response> resource_response =
BuildObjectForResourceResponse(response, cached_resource,
&resource_is_empty);
InspectorPageAgent::ResourceType type =
cached_resource
? InspectorPageAgent::ToResourceType(cached_resource->GetType())
: InspectorPageAgent::kOtherResource;
InspectorPageAgent::ResourceType saved_type =
resources_data_->GetResourceType(request_id);
if (saved_type == InspectorPageAgent::kScriptResource ||
saved_type == InspectorPageAgent::kXHRResource ||
saved_type == InspectorPageAgent::kDocumentResource ||
saved_type == InspectorPageAgent::kFetchResource ||
saved_type == InspectorPageAgent::kEventSourceResource) {
type = saved_type;
}
if (type == InspectorPageAgent::kDocumentResource && loader &&
loader->GetSubstituteData().IsValid())
return;
if (cached_resource)
resources_data_->AddResource(request_id, cached_resource);
String frame_id = loader && loader->GetFrame()
? IdentifiersFactory::FrameId(loader->GetFrame())
: "";
String loader_id = loader ? IdentifiersFactory::LoaderId(loader) : "";
resources_data_->ResponseReceived(request_id, frame_id, response);
resources_data_->SetResourceType(request_id, type);
if (response.GetSecurityStyle() != ResourceResponse::kSecurityStyleUnknown &&
response.GetSecurityStyle() !=
ResourceResponse::kSecurityStyleUnauthenticated) {
const ResourceResponse::SecurityDetails* response_security_details =
response.GetSecurityDetails();
resources_data_->SetCertificate(request_id,
response_security_details->certificate);
}
if (resource_response && !resource_is_empty) {
Maybe<String> maybe_frame_id;
if (!frame_id.IsEmpty())
maybe_frame_id = frame_id;
GetFrontend()->responseReceived(
request_id, loader_id, MonotonicallyIncreasingTime(),
InspectorPageAgent::ResourceTypeJson(type),
std::move(resource_response), std::move(maybe_frame_id));
}
if (is_not_modified && cached_resource && cached_resource->EncodedSize())
DidReceiveData(identifier, loader, 0, cached_resource->EncodedSize());
}
| 172,466 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, unsigned int flags)
{
int result;
handle_t *handle = NULL;
struct inode *inode = file_inode(vma->vm_file);
struct super_block *sb = inode->i_sb;
bool write = flags & FAULT_FLAG_WRITE;
if (write) {
sb_start_pagefault(sb);
file_update_time(vma->vm_file);
handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
ext4_chunk_trans_blocks(inode,
PMD_SIZE / PAGE_SIZE));
}
if (IS_ERR(handle))
result = VM_FAULT_SIGBUS;
else
result = __dax_pmd_fault(vma, addr, pmd, flags,
ext4_get_block_dax, ext4_end_io_unwritten);
if (write) {
if (!IS_ERR(handle))
ext4_journal_stop(handle);
sb_end_pagefault(sb);
}
return result;
}
Commit Message: ext4: fix races between page faults and hole punching
Currently, page faults and hole punching are completely unsynchronized.
This can result in page fault faulting in a page into a range that we
are punching after truncate_pagecache_range() has been called and thus
we can end up with a page mapped to disk blocks that will be shortly
freed. Filesystem corruption will shortly follow. Note that the same
race is avoided for truncate by checking page fault offset against
i_size but there isn't similar mechanism available for punching holes.
Fix the problem by creating new rw semaphore i_mmap_sem in inode and
grab it for writing over truncate, hole punching, and other functions
removing blocks from extent tree and for read over page faults. We
cannot easily use i_data_sem for this since that ranks below transaction
start and we need something ranking above it so that it can be held over
the whole truncate / hole punching operation. Also remove various
workarounds we had in the code to reduce race window when page fault
could have created pages with stale mapping information.
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
CWE ID: CWE-362 | static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, unsigned int flags)
{
int result;
handle_t *handle = NULL;
struct inode *inode = file_inode(vma->vm_file);
struct super_block *sb = inode->i_sb;
bool write = flags & FAULT_FLAG_WRITE;
if (write) {
sb_start_pagefault(sb);
file_update_time(vma->vm_file);
down_read(&EXT4_I(inode)->i_mmap_sem);
handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
ext4_chunk_trans_blocks(inode,
PMD_SIZE / PAGE_SIZE));
} else
down_read(&EXT4_I(inode)->i_mmap_sem);
if (IS_ERR(handle))
result = VM_FAULT_SIGBUS;
else
result = __dax_pmd_fault(vma, addr, pmd, flags,
ext4_get_block_dax, ext4_end_io_unwritten);
if (write) {
if (!IS_ERR(handle))
ext4_journal_stop(handle);
up_read(&EXT4_I(inode)->i_mmap_sem);
sb_end_pagefault(sb);
} else
up_read(&EXT4_I(inode)->i_mmap_sem);
return result;
}
| 167,488 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: Node* DOMPatchSupport::patchNode(Node* node, const String& markup, ExceptionCode& ec)
{
if (node->isDocumentNode() || (node->parentNode() && node->parentNode()->isDocumentNode())) {
patchDocument(markup);
return 0;
}
Node* previousSibling = node->previousSibling();
RefPtr<DocumentFragment> fragment = DocumentFragment::create(m_document);
fragment->parseHTML(markup, node->parentElement() ? node->parentElement() : m_document->documentElement());
ContainerNode* parentNode = node->parentNode();
Vector<OwnPtr<Digest> > oldList;
for (Node* child = parentNode->firstChild(); child; child = child->nextSibling())
oldList.append(createDigest(child, 0));
String markupCopy = markup;
markupCopy.makeLower();
Vector<OwnPtr<Digest> > newList;
for (Node* child = parentNode->firstChild(); child != node; child = child->nextSibling())
newList.append(createDigest(child, 0));
for (Node* child = fragment->firstChild(); child; child = child->nextSibling()) {
if (child->hasTagName(headTag) && !child->firstChild() && markupCopy.find("</head>") == notFound)
continue; // HTML5 parser inserts empty <head> tag whenever it parses <body>
if (child->hasTagName(bodyTag) && !child->firstChild() && markupCopy.find("</body>") == notFound)
continue; // HTML5 parser inserts empty <body> tag whenever it parses </head>
newList.append(createDigest(child, &m_unusedNodesMap));
}
for (Node* child = node->nextSibling(); child; child = child->nextSibling())
newList.append(createDigest(child, 0));
if (!innerPatchChildren(parentNode, oldList, newList, ec)) {
ec = 0;
if (!m_domEditor->replaceChild(parentNode, fragment.release(), node, ec))
return 0;
}
return previousSibling ? previousSibling->nextSibling() : parentNode->firstChild();
}
Commit Message: There are too many poorly named functions to create a fragment from markup
https://bugs.webkit.org/show_bug.cgi?id=87339
Reviewed by Eric Seidel.
Source/WebCore:
Moved all functions that create a fragment from markup to markup.h/cpp.
There should be no behavioral change.
* dom/Range.cpp:
(WebCore::Range::createContextualFragment):
* dom/Range.h: Removed createDocumentFragmentForElement.
* dom/ShadowRoot.cpp:
(WebCore::ShadowRoot::setInnerHTML):
* editing/markup.cpp:
(WebCore::createFragmentFromMarkup):
(WebCore::createFragmentForInnerOuterHTML): Renamed from createFragmentFromSource.
(WebCore::createFragmentForTransformToFragment): Moved from XSLTProcessor.
(WebCore::removeElementPreservingChildren): Moved from Range.
(WebCore::createContextualFragment): Ditto.
* editing/markup.h:
* html/HTMLElement.cpp:
(WebCore::HTMLElement::setInnerHTML):
(WebCore::HTMLElement::setOuterHTML):
(WebCore::HTMLElement::insertAdjacentHTML):
* inspector/DOMPatchSupport.cpp:
(WebCore::DOMPatchSupport::patchNode): Added a FIXME since this code should be using
one of the functions listed in markup.h
* xml/XSLTProcessor.cpp:
(WebCore::XSLTProcessor::transformToFragment):
Source/WebKit/qt:
Replace calls to Range::createDocumentFragmentForElement by calls to
createContextualDocumentFragment.
* Api/qwebelement.cpp:
(QWebElement::appendInside):
(QWebElement::prependInside):
(QWebElement::prependOutside):
(QWebElement::appendOutside):
(QWebElement::encloseContentsWith):
(QWebElement::encloseWith):
git-svn-id: svn://svn.chromium.org/blink/trunk@118414 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-264 | Node* DOMPatchSupport::patchNode(Node* node, const String& markup, ExceptionCode& ec)
{
if (node->isDocumentNode() || (node->parentNode() && node->parentNode()->isDocumentNode())) {
patchDocument(markup);
return 0;
}
Node* previousSibling = node->previousSibling();
// FIXME: This code should use one of createFragment* in markup.h
RefPtr<DocumentFragment> fragment = DocumentFragment::create(m_document);
fragment->parseHTML(markup, node->parentElement() ? node->parentElement() : m_document->documentElement());
ContainerNode* parentNode = node->parentNode();
Vector<OwnPtr<Digest> > oldList;
for (Node* child = parentNode->firstChild(); child; child = child->nextSibling())
oldList.append(createDigest(child, 0));
String markupCopy = markup;
markupCopy.makeLower();
Vector<OwnPtr<Digest> > newList;
for (Node* child = parentNode->firstChild(); child != node; child = child->nextSibling())
newList.append(createDigest(child, 0));
for (Node* child = fragment->firstChild(); child; child = child->nextSibling()) {
if (child->hasTagName(headTag) && !child->firstChild() && markupCopy.find("</head>") == notFound)
continue; // HTML5 parser inserts empty <head> tag whenever it parses <body>
if (child->hasTagName(bodyTag) && !child->firstChild() && markupCopy.find("</body>") == notFound)
continue; // HTML5 parser inserts empty <body> tag whenever it parses </head>
newList.append(createDigest(child, &m_unusedNodesMap));
}
for (Node* child = node->nextSibling(); child; child = child->nextSibling())
newList.append(createDigest(child, 0));
if (!innerPatchChildren(parentNode, oldList, newList, ec)) {
ec = 0;
if (!m_domEditor->replaceChild(parentNode, fragment.release(), node, ec))
return 0;
}
return previousSibling ? previousSibling->nextSibling() : parentNode->firstChild();
}
| 170,443 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void WorkerProcessLauncherTest::SetUp() {
task_runner_ = new AutoThreadTaskRunner(
message_loop_.message_loop_proxy(),
base::Bind(&WorkerProcessLauncherTest::QuitMainMessageLoop,
base::Unretained(this)));
exit_code_ = STILL_ACTIVE;
launcher_delegate_.reset(new MockProcessLauncherDelegate());
EXPECT_CALL(*launcher_delegate_, Send(_))
.Times(AnyNumber())
.WillRepeatedly(Return(false));
EXPECT_CALL(*launcher_delegate_, GetExitCode())
.Times(AnyNumber())
.WillRepeatedly(ReturnPointee(&exit_code_));
EXPECT_CALL(*launcher_delegate_, KillProcess(_))
.Times(AnyNumber())
.WillRepeatedly(Invoke(this, &WorkerProcessLauncherTest::KillProcess));
EXPECT_CALL(ipc_delegate_, OnMessageReceived(_))
.Times(AnyNumber())
.WillRepeatedly(Return(false));
}
Commit Message: Validate and report peer's PID to WorkerProcessIpcDelegate so it will be able to duplicate handles to and from the worker process.
As a side effect WorkerProcessLauncher::Delegate is now responsible for retrieving the client's PID and deciding whether a launch failed due to a permanent error condition.
BUG=134694
Review URL: https://chromiumcodereview.appspot.com/11143025
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@162778 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | void WorkerProcessLauncherTest::SetUp() {
task_runner_ = new AutoThreadTaskRunner(
message_loop_.message_loop_proxy(),
base::Bind(&WorkerProcessLauncherTest::QuitMainMessageLoop,
base::Unretained(this)));
launcher_delegate_.reset(new MockProcessLauncherDelegate());
EXPECT_CALL(*launcher_delegate_, Send(_))
.Times(AnyNumber())
.WillRepeatedly(Return(false));
EXPECT_CALL(*launcher_delegate_, GetProcessId())
.Times(AnyNumber())
.WillRepeatedly(ReturnPointee(&client_pid_));
EXPECT_CALL(*launcher_delegate_, IsPermanentError(_))
.Times(AnyNumber())
.WillRepeatedly(ReturnPointee(&permanent_error_));
EXPECT_CALL(*launcher_delegate_, KillProcess(_))
.Times(AnyNumber())
.WillRepeatedly(Invoke(this, &WorkerProcessLauncherTest::KillProcess));
EXPECT_CALL(ipc_delegate_, OnMessageReceived(_))
.Times(AnyNumber())
.WillRepeatedly(Return(false));
}
| 171,552 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: cdf_check_stream_offset(const cdf_stream_t *sst, const cdf_header_t *h,
const void *p, size_t tail, int line)
{
const char *b = (const char *)sst->sst_tab;
const char *e = ((const char *)p) + tail;
(void)&line;
if (e >= b && (size_t)(e - b) <= CDF_SEC_SIZE(h) * sst->sst_len)
return 0;
DPRINTF(("%d: offset begin %p < end %p || %" SIZE_T_FORMAT "u"
" > %" SIZE_T_FORMAT "u [%" SIZE_T_FORMAT "u %"
SIZE_T_FORMAT "u]\n", line, b, e, (size_t)(e - b),
CDF_SEC_SIZE(h) * sst->sst_len, CDF_SEC_SIZE(h), sst->sst_len));
errno = EFTYPE;
return -1;
}
Commit Message: Use the proper sector size when checking stream offsets (Francisco Alonso and
Jan Kaluza at RedHat)
CWE ID: CWE-189 | cdf_check_stream_offset(const cdf_stream_t *sst, const cdf_header_t *h,
const void *p, size_t tail, int line)
{
const char *b = (const char *)sst->sst_tab;
const char *e = ((const char *)p) + tail;
size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ?
CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h);
(void)&line;
if (e >= b && (size_t)(e - b) <= ss * sst->sst_len)
return 0;
DPRINTF(("%d: offset begin %p < end %p || %" SIZE_T_FORMAT "u"
" > %" SIZE_T_FORMAT "u [%" SIZE_T_FORMAT "u %"
SIZE_T_FORMAT "u]\n", line, b, e, (size_t)(e - b),
ss * sst->sst_len, ss, sst->sst_len));
errno = EFTYPE;
return -1;
}
| 166,366 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void qrio_cpuwd_flag(bool flag)
{
u8 reason1;
void __iomem *qrio_base = (void *)CONFIG_SYS_QRIO_BASE;
reason1 = in_8(qrio_base + REASON1_OFF);
if (flag)
reason1 |= REASON1_CPUWD;
else
reason1 &= ~REASON1_CPUWD;
out_8(qrio_base + REASON1_OFF, reason1);
}
Commit Message: Merge branch '2020-01-22-master-imports'
- Re-add U8500 platform support
- Add bcm968360bg support
- Assorted Keymile fixes
- Other assorted bugfixes
CWE ID: CWE-787 | void qrio_cpuwd_flag(bool flag)
{
u8 reason1;
void __iomem *qrio_base = (void *)CONFIG_SYS_QRIO_BASE;
reason1 = in_8(qrio_base + REASON1_OFF);
if (flag)
reason1 |= REASON1_CPUWD;
else
reason1 &= ~REASON1_CPUWD;
out_8(qrio_base + REASON1_OFF, reason1);
}
| 169,624 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: PS_SERIALIZER_DECODE_FUNC(php) /* {{{ */
{
const char *p, *q;
char *name;
const char *endptr = val + vallen;
zval *current;
int namelen;
int has_value;
php_unserialize_data_t var_hash;
PHP_VAR_UNSERIALIZE_INIT(var_hash);
p = val;
while (p < endptr) {
zval **tmp;
q = p;
while (*q != PS_DELIMITER) {
if (++q >= endptr) goto break_outer_loop;
}
if (p[0] == PS_UNDEF_MARKER) {
p++;
has_value = 0;
} else {
has_value = 1;
}
namelen = q - p;
name = estrndup(p, namelen);
q++;
if (zend_hash_find(&EG(symbol_table), name, namelen + 1, (void **) &tmp) == SUCCESS) {
if ((Z_TYPE_PP(tmp) == IS_ARRAY && Z_ARRVAL_PP(tmp) == &EG(symbol_table)) || *tmp == PS(http_session_vars)) {
goto skip;
}
}
if (has_value) {
ALLOC_INIT_ZVAL(current);
if (php_var_unserialize(¤t, (const unsigned char **) &q, (const unsigned char *) endptr, &var_hash TSRMLS_CC)) {
php_set_session_var(name, namelen, current, &var_hash TSRMLS_CC);
} else {
var_push_dtor_no_addref(&var_hash, ¤t);
efree(name);
PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
return FAILURE;
}
var_push_dtor_no_addref(&var_hash, ¤t);
}
PS_ADD_VARL(name, namelen);
skip:
efree(name);
p = q;
}
break_outer_loop:
PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
return SUCCESS;
}
/* }}} */
Commit Message: Fix bug #72681 - consume data even if we're not storing them
CWE ID: CWE-74 | PS_SERIALIZER_DECODE_FUNC(php) /* {{{ */
{
const char *p, *q;
char *name;
const char *endptr = val + vallen;
zval *current;
int namelen;
int has_value;
php_unserialize_data_t var_hash;
int skip = 0;
PHP_VAR_UNSERIALIZE_INIT(var_hash);
p = val;
while (p < endptr) {
zval **tmp;
q = p;
skip = 0;
while (*q != PS_DELIMITER) {
if (++q >= endptr) goto break_outer_loop;
}
if (p[0] == PS_UNDEF_MARKER) {
p++;
has_value = 0;
} else {
has_value = 1;
}
namelen = q - p;
name = estrndup(p, namelen);
q++;
if (zend_hash_find(&EG(symbol_table), name, namelen + 1, (void **) &tmp) == SUCCESS) {
if ((Z_TYPE_PP(tmp) == IS_ARRAY && Z_ARRVAL_PP(tmp) == &EG(symbol_table)) || *tmp == PS(http_session_vars)) {
skip = 1;
}
}
if (has_value) {
ALLOC_INIT_ZVAL(current);
if (php_var_unserialize(¤t, (const unsigned char **) &q, (const unsigned char *) endptr, &var_hash TSRMLS_CC)) {
if (!skip) {
php_set_session_var(name, namelen, current, &var_hash TSRMLS_CC);
}
} else {
var_push_dtor_no_addref(&var_hash, ¤t);
efree(name);
PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
return FAILURE;
}
var_push_dtor_no_addref(&var_hash, ¤t);
}
if (!skip) {
PS_ADD_VARL(name, namelen);
}
skip:
efree(name);
p = q;
}
break_outer_loop:
PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
return SUCCESS;
}
/* }}} */
| 166,959 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_comp rpcomp;
snprintf(rpcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "pcomp");
if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
sizeof(struct crypto_report_comp), &rpcomp))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
Commit Message: crypto: user - fix info leaks in report API
Three errors resulting in kernel memory disclosure:
1/ The structures used for the netlink based crypto algorithm report API
are located on the stack. As snprintf() does not fill the remainder of
the buffer with null bytes, those stack bytes will be disclosed to users
of the API. Switch to strncpy() to fix this.
2/ crypto_report_one() does not initialize all field of struct
crypto_user_alg. Fix this to fix the heap info leak.
3/ For the module name we should copy only as many bytes as
module_name() returns -- not as much as the destination buffer could
hold. But the current code does not and therefore copies random data
from behind the end of the module name, as the module name is always
shorter than CRYPTO_MAX_ALG_NAME.
Also switch to use strncpy() to copy the algorithm's name and
driver_name. They are strings, after all.
Signed-off-by: Mathias Krause <[email protected]>
Cc: Steffen Klassert <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
CWE ID: CWE-310 | static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_comp rpcomp;
strncpy(rpcomp.type, "pcomp", sizeof(rpcomp.type));
if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
sizeof(struct crypto_report_comp), &rpcomp))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
| 166,070 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool PermissionsRequestFunction::RunImpl() {
if (!user_gesture() && !ignore_user_gesture_for_tests) {
error_ = kUserGestureRequiredError;
return false;
}
scoped_ptr<Request::Params> params(Request::Params::Create(*args_));
EXTENSION_FUNCTION_VALIDATE(params.get());
requested_permissions_ =
helpers::UnpackPermissionSet(params->permissions, &error_);
if (!requested_permissions_.get())
return false;
extensions::ExtensionPrefs* prefs =
profile()->GetExtensionService()->extension_prefs();
APIPermissionSet apis = requested_permissions_->apis();
for (APIPermissionSet::const_iterator i = apis.begin();
i != apis.end(); ++i) {
if (!i->info()->supports_optional()) {
error_ = ErrorUtils::FormatErrorMessage(
kNotWhitelistedError, i->name());
return false;
}
}
scoped_refptr<extensions::PermissionSet>
manifest_required_requested_permissions =
PermissionSet::ExcludeNotInManifestPermissions(
requested_permissions_.get());
if (!GetExtension()->optional_permission_set()->Contains(
*manifest_required_requested_permissions)) {
error_ = kNotInOptionalPermissionsError;
results_ = Request::Results::Create(false);
return false;
}
scoped_refptr<const PermissionSet> granted =
prefs->GetGrantedPermissions(GetExtension()->id());
if (granted.get() && granted->Contains(*requested_permissions_)) {
PermissionsUpdater perms_updater(profile());
perms_updater.AddPermissions(GetExtension(), requested_permissions_.get());
results_ = Request::Results::Create(true);
SendResponse(true);
return true;
}
requested_permissions_ = PermissionSet::CreateDifference(
requested_permissions_.get(), granted.get());
AddRef(); // Balanced in InstallUIProceed() / InstallUIAbort().
bool has_no_warnings = requested_permissions_->GetWarningMessages(
GetExtension()->GetType()).empty();
if (auto_confirm_for_tests == PROCEED || has_no_warnings) {
InstallUIProceed();
} else if (auto_confirm_for_tests == ABORT) {
InstallUIAbort(true);
} else {
CHECK_EQ(DO_NOT_SKIP, auto_confirm_for_tests);
install_ui_.reset(new ExtensionInstallPrompt(GetAssociatedWebContents()));
install_ui_->ConfirmPermissions(
this, GetExtension(), requested_permissions_.get());
}
return true;
}
Commit Message: Check prefs before allowing extension file access in the permissions API.
[email protected]
BUG=169632
Review URL: https://chromiumcodereview.appspot.com/11884008
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@176853 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-264 | bool PermissionsRequestFunction::RunImpl() {
results_ = Request::Results::Create(false);
if (!user_gesture() && !ignore_user_gesture_for_tests) {
error_ = kUserGestureRequiredError;
return false;
}
scoped_ptr<Request::Params> params(Request::Params::Create(*args_));
EXTENSION_FUNCTION_VALIDATE(params.get());
ExtensionPrefs* prefs = ExtensionSystem::Get(profile_)->extension_prefs();
requested_permissions_ =
helpers::UnpackPermissionSet(params->permissions,
prefs->AllowFileAccess(extension_->id()),
&error_);
if (!requested_permissions_.get())
return false;
APIPermissionSet apis = requested_permissions_->apis();
for (APIPermissionSet::const_iterator i = apis.begin();
i != apis.end(); ++i) {
if (!i->info()->supports_optional()) {
error_ = ErrorUtils::FormatErrorMessage(
kNotWhitelistedError, i->name());
return false;
}
}
scoped_refptr<extensions::PermissionSet>
manifest_required_requested_permissions =
PermissionSet::ExcludeNotInManifestPermissions(
requested_permissions_.get());
if (!GetExtension()->optional_permission_set()->Contains(
*manifest_required_requested_permissions)) {
error_ = kNotInOptionalPermissionsError;
results_ = Request::Results::Create(false);
return false;
}
scoped_refptr<const PermissionSet> granted =
prefs->GetGrantedPermissions(GetExtension()->id());
if (granted.get() && granted->Contains(*requested_permissions_)) {
PermissionsUpdater perms_updater(profile());
perms_updater.AddPermissions(GetExtension(), requested_permissions_.get());
results_ = Request::Results::Create(true);
SendResponse(true);
return true;
}
requested_permissions_ = PermissionSet::CreateDifference(
requested_permissions_.get(), granted.get());
AddRef(); // Balanced in InstallUIProceed() / InstallUIAbort().
bool has_no_warnings = requested_permissions_->GetWarningMessages(
GetExtension()->GetType()).empty();
if (auto_confirm_for_tests == PROCEED || has_no_warnings) {
InstallUIProceed();
} else if (auto_confirm_for_tests == ABORT) {
InstallUIAbort(true);
} else {
CHECK_EQ(DO_NOT_SKIP, auto_confirm_for_tests);
install_ui_.reset(new ExtensionInstallPrompt(GetAssociatedWebContents()));
install_ui_->ConfirmPermissions(
this, GetExtension(), requested_permissions_.get());
}
return true;
}
| 171,444 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: MagickExport MemoryInfo *RelinquishVirtualMemory(MemoryInfo *memory_info)
{
assert(memory_info != (MemoryInfo *) NULL);
assert(memory_info->signature == MagickSignature);
if (memory_info->blob != (void *) NULL)
switch (memory_info->type)
{
case AlignedVirtualMemory:
{
memory_info->blob=RelinquishAlignedMemory(memory_info->blob);
RelinquishMagickResource(MemoryResource,memory_info->length);
break;
}
case MapVirtualMemory:
{
(void) UnmapBlob(memory_info->blob,memory_info->length);
memory_info->blob=NULL;
RelinquishMagickResource(MapResource,memory_info->length);
if (*memory_info->filename != '\0')
(void) RelinquishUniqueFileResource(memory_info->filename);
break;
}
case UnalignedVirtualMemory:
default:
{
memory_info->blob=RelinquishMagickMemory(memory_info->blob);
break;
}
}
memory_info->signature=(~MagickSignature);
memory_info=(MemoryInfo *) RelinquishAlignedMemory(memory_info);
return(memory_info);
}
Commit Message:
CWE ID: CWE-189 | MagickExport MemoryInfo *RelinquishVirtualMemory(MemoryInfo *memory_info)
{
assert(memory_info != (MemoryInfo *) NULL);
assert(memory_info->signature == MagickSignature);
if (memory_info->blob != (void *) NULL)
switch (memory_info->type)
{
case AlignedVirtualMemory:
{
memory_info->blob=RelinquishAlignedMemory(memory_info->blob);
RelinquishMagickResource(MemoryResource,memory_info->length);
break;
}
case MapVirtualMemory:
{
(void) UnmapBlob(memory_info->blob,memory_info->length);
memory_info->blob=NULL;
RelinquishMagickResource(MapResource,memory_info->length);
if (*memory_info->filename != '\0')
{
(void) RelinquishUniqueFileResource(memory_info->filename);
RelinquishMagickResource(DiskResource,memory_info->length);
}
break;
}
case UnalignedVirtualMemory:
default:
{
memory_info->blob=RelinquishMagickMemory(memory_info->blob);
break;
}
}
memory_info->signature=(~MagickSignature);
memory_info=(MemoryInfo *) RelinquishAlignedMemory(memory_info);
return(memory_info);
}
| 168,860 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int zgfx_decompress(ZGFX_CONTEXT* zgfx, const BYTE* pSrcData, UINT32 SrcSize, BYTE** ppDstData,
UINT32* pDstSize, UINT32 flags)
{
int status = -1;
BYTE descriptor;
wStream* stream = Stream_New((BYTE*)pSrcData, SrcSize);
if (!stream)
return -1;
if (Stream_GetRemainingLength(stream) < 1)
goto fail;
Stream_Read_UINT8(stream, descriptor); /* descriptor (1 byte) */
if (descriptor == ZGFX_SEGMENTED_SINGLE)
{
if (!zgfx_decompress_segment(zgfx, stream, Stream_GetRemainingLength(stream)))
goto fail;
*ppDstData = NULL;
if (zgfx->OutputCount > 0)
*ppDstData = (BYTE*) malloc(zgfx->OutputCount);
if (!*ppDstData)
goto fail;
*pDstSize = zgfx->OutputCount;
CopyMemory(*ppDstData, zgfx->OutputBuffer, zgfx->OutputCount);
}
else if (descriptor == ZGFX_SEGMENTED_MULTIPART)
{
UINT32 segmentSize;
UINT16 segmentNumber;
UINT16 segmentCount;
UINT32 uncompressedSize;
BYTE* pConcatenated;
if (Stream_GetRemainingLength(stream) < 6)
goto fail;
Stream_Read_UINT16(stream, segmentCount); /* segmentCount (2 bytes) */
Stream_Read_UINT32(stream, uncompressedSize); /* uncompressedSize (4 bytes) */
if (Stream_GetRemainingLength(stream) < segmentCount * sizeof(UINT32))
goto fail;
pConcatenated = (BYTE*) malloc(uncompressedSize);
if (!pConcatenated)
goto fail;
*ppDstData = pConcatenated;
*pDstSize = uncompressedSize;
for (segmentNumber = 0; segmentNumber < segmentCount; segmentNumber++)
{
if (Stream_GetRemainingLength(stream) < sizeof(UINT32))
goto fail;
Stream_Read_UINT32(stream, segmentSize); /* segmentSize (4 bytes) */
if (!zgfx_decompress_segment(zgfx, stream, segmentSize))
goto fail;
CopyMemory(pConcatenated, zgfx->OutputBuffer, zgfx->OutputCount);
pConcatenated += zgfx->OutputCount;
}
}
else
{
goto fail;
}
status = 1;
fail:
Stream_Free(stream, FALSE);
return status;
}
Commit Message: Fixed CVE-2018-8785
Thanks to Eyal Itkin from Check Point Software Technologies.
CWE ID: CWE-119 | int zgfx_decompress(ZGFX_CONTEXT* zgfx, const BYTE* pSrcData, UINT32 SrcSize, BYTE** ppDstData,
UINT32* pDstSize, UINT32 flags)
{
int status = -1;
BYTE descriptor;
wStream* stream = Stream_New((BYTE*)pSrcData, SrcSize);
if (!stream)
return -1;
if (Stream_GetRemainingLength(stream) < 1)
goto fail;
Stream_Read_UINT8(stream, descriptor); /* descriptor (1 byte) */
if (descriptor == ZGFX_SEGMENTED_SINGLE)
{
if (!zgfx_decompress_segment(zgfx, stream, Stream_GetRemainingLength(stream)))
goto fail;
*ppDstData = NULL;
if (zgfx->OutputCount > 0)
*ppDstData = (BYTE*) malloc(zgfx->OutputCount);
if (!*ppDstData)
goto fail;
*pDstSize = zgfx->OutputCount;
CopyMemory(*ppDstData, zgfx->OutputBuffer, zgfx->OutputCount);
}
else if (descriptor == ZGFX_SEGMENTED_MULTIPART)
{
UINT32 segmentSize;
UINT16 segmentNumber;
UINT16 segmentCount;
UINT32 uncompressedSize;
BYTE* pConcatenated;
size_t used = 0;
if (Stream_GetRemainingLength(stream) < 6)
goto fail;
Stream_Read_UINT16(stream, segmentCount); /* segmentCount (2 bytes) */
Stream_Read_UINT32(stream, uncompressedSize); /* uncompressedSize (4 bytes) */
if (Stream_GetRemainingLength(stream) < segmentCount * sizeof(UINT32))
goto fail;
pConcatenated = (BYTE*) malloc(uncompressedSize);
if (!pConcatenated)
goto fail;
*ppDstData = pConcatenated;
*pDstSize = uncompressedSize;
for (segmentNumber = 0; segmentNumber < segmentCount; segmentNumber++)
{
if (Stream_GetRemainingLength(stream) < sizeof(UINT32))
goto fail;
Stream_Read_UINT32(stream, segmentSize); /* segmentSize (4 bytes) */
if (!zgfx_decompress_segment(zgfx, stream, segmentSize))
goto fail;
if (zgfx->OutputCount > UINT32_MAX - used)
goto fail;
if (used + zgfx->OutputCount > uncompressedSize)
goto fail;
CopyMemory(pConcatenated, zgfx->OutputBuffer, zgfx->OutputCount);
pConcatenated += zgfx->OutputCount;
used += zgfx->OutputCount;
}
}
else
{
goto fail;
}
status = 1;
fail:
Stream_Free(stream, FALSE);
return status;
}
| 169,294 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
struct inode *inode = d_inode(dentry);
struct buffer_head *bh = NULL;
struct ext2_xattr_entry *entry;
char *end;
size_t rest = buffer_size;
int error;
ea_idebug(inode, "buffer=%p, buffer_size=%ld",
buffer, (long)buffer_size);
down_read(&EXT2_I(inode)->xattr_sem);
error = 0;
if (!EXT2_I(inode)->i_file_acl)
goto cleanup;
ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl);
bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
error = -EIO;
if (!bh)
goto cleanup;
ea_bdebug(bh, "b_count=%d, refcount=%d",
atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
end = bh->b_data + bh->b_size;
if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
HDR(bh)->h_blocks != cpu_to_le32(1)) {
bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
"inode %ld: bad block %d", inode->i_ino,
EXT2_I(inode)->i_file_acl);
error = -EIO;
goto cleanup;
}
/* check the on-disk data structure */
entry = FIRST_ENTRY(bh);
while (!IS_LAST_ENTRY(entry)) {
struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(entry);
if ((char *)next >= end)
goto bad_block;
entry = next;
}
if (ext2_xattr_cache_insert(bh))
ea_idebug(inode, "cache insert failed");
/* list the attribute names */
for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
entry = EXT2_XATTR_NEXT(entry)) {
const struct xattr_handler *handler =
ext2_xattr_handler(entry->e_name_index);
if (handler && (!handler->list || handler->list(dentry))) {
const char *prefix = handler->prefix ?: handler->name;
size_t prefix_len = strlen(prefix);
size_t size = prefix_len + entry->e_name_len + 1;
if (buffer) {
if (size > rest) {
error = -ERANGE;
goto cleanup;
}
memcpy(buffer, prefix, prefix_len);
buffer += prefix_len;
memcpy(buffer, entry->e_name, entry->e_name_len);
buffer += entry->e_name_len;
*buffer++ = 0;
}
rest -= size;
}
}
error = buffer_size - rest; /* total size */
cleanup:
brelse(bh);
up_read(&EXT2_I(inode)->xattr_sem);
return error;
}
Commit Message: ext2: convert to mbcache2
The conversion is generally straightforward. We convert filesystem from
a global cache to per-fs one. Similarly to ext4 the tricky part is that
xattr block corresponding to found mbcache entry can get freed before we
get buffer lock for that block. So we have to check whether the entry is
still valid after getting the buffer lock.
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
CWE ID: CWE-19 | ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
struct inode *inode = d_inode(dentry);
struct buffer_head *bh = NULL;
struct ext2_xattr_entry *entry;
char *end;
size_t rest = buffer_size;
int error;
struct mb2_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache;
ea_idebug(inode, "buffer=%p, buffer_size=%ld",
buffer, (long)buffer_size);
down_read(&EXT2_I(inode)->xattr_sem);
error = 0;
if (!EXT2_I(inode)->i_file_acl)
goto cleanup;
ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl);
bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
error = -EIO;
if (!bh)
goto cleanup;
ea_bdebug(bh, "b_count=%d, refcount=%d",
atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
end = bh->b_data + bh->b_size;
if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
HDR(bh)->h_blocks != cpu_to_le32(1)) {
bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
"inode %ld: bad block %d", inode->i_ino,
EXT2_I(inode)->i_file_acl);
error = -EIO;
goto cleanup;
}
/* check the on-disk data structure */
entry = FIRST_ENTRY(bh);
while (!IS_LAST_ENTRY(entry)) {
struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(entry);
if ((char *)next >= end)
goto bad_block;
entry = next;
}
if (ext2_xattr_cache_insert(ext2_mb_cache, bh))
ea_idebug(inode, "cache insert failed");
/* list the attribute names */
for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
entry = EXT2_XATTR_NEXT(entry)) {
const struct xattr_handler *handler =
ext2_xattr_handler(entry->e_name_index);
if (handler && (!handler->list || handler->list(dentry))) {
const char *prefix = handler->prefix ?: handler->name;
size_t prefix_len = strlen(prefix);
size_t size = prefix_len + entry->e_name_len + 1;
if (buffer) {
if (size > rest) {
error = -ERANGE;
goto cleanup;
}
memcpy(buffer, prefix, prefix_len);
buffer += prefix_len;
memcpy(buffer, entry->e_name, entry->e_name_len);
buffer += entry->e_name_len;
*buffer++ = 0;
}
rest -= size;
}
}
error = buffer_size - rest; /* total size */
cleanup:
brelse(bh);
up_read(&EXT2_I(inode)->xattr_sem);
return error;
}
| 169,981 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: SQLWCHAR* _multi_string_alloc_and_expand( LPCSTR in )
{
SQLWCHAR *chr;
int len = 0;
if ( !in )
{
return in;
}
while ( in[ len ] != 0 || in[ len + 1 ] != 0 )
{
len ++;
}
chr = malloc(sizeof( SQLWCHAR ) * ( len + 2 ));
len = 0;
while ( in[ len ] != 0 || in[ len + 1 ] != 0 )
{
chr[ len ] = in[ len ];
len ++;
}
chr[ len ++ ] = 0;
chr[ len ++ ] = 0;
return chr;
}
Commit Message: New Pre Source
CWE ID: CWE-119 | SQLWCHAR* _multi_string_alloc_and_expand( LPCSTR in )
{
SQLWCHAR *chr;
int len = 0;
if ( !in )
{
return NULL;
}
while ( in[ len ] != 0 || in[ len + 1 ] != 0 )
{
len ++;
}
chr = malloc(sizeof( SQLWCHAR ) * ( len + 2 ));
len = 0;
while ( in[ len ] != 0 || in[ len + 1 ] != 0 )
{
chr[ len ] = in[ len ];
len ++;
}
chr[ len ++ ] = 0;
chr[ len ++ ] = 0;
return chr;
}
| 169,314 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: DailyDataSavingUpdate(
const char* pref_original, const char* pref_received,
PrefService* pref_service)
: pref_original_(pref_original),
pref_received_(pref_received),
original_update_(pref_service, pref_original_),
received_update_(pref_service, pref_received_) {
}
Commit Message: Added daily UMA for non-data-reduction-proxy data usage when the proxy is enabled.
BUG=325325
Review URL: https://codereview.chromium.org/106113002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@239897 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-416 | DailyDataSavingUpdate(
const char* pref_original,
const char* pref_received,
PrefService* pref_service)
: original_(pref_original, pref_service),
received_(pref_received, pref_service) {
}
| 171,322 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: tt_face_load_kern( TT_Face face,
FT_Stream stream )
{
FT_Error error;
FT_ULong table_size;
FT_Byte* p;
FT_Byte* p_limit;
FT_UInt nn, num_tables;
FT_UInt32 avail = 0, ordered = 0;
/* the kern table is optional; exit silently if it is missing */
error = face->goto_table( face, TTAG_kern, stream, &table_size );
if ( error )
goto Exit;
if ( table_size < 4 ) /* the case of a malformed table */
{
FT_ERROR(( "tt_face_load_kern:"
" kerning table is too small - ignored\n" ));
error = FT_THROW( Table_Missing );
goto Exit;
}
if ( FT_FRAME_EXTRACT( table_size, face->kern_table ) )
{
FT_ERROR(( "tt_face_load_kern:"
" could not extract kerning table\n" ));
goto Exit;
}
face->kern_table_size = table_size;
p = face->kern_table;
p_limit = p + table_size;
p += 2; /* skip version */
num_tables = FT_NEXT_USHORT( p );
if ( num_tables > 32 ) /* we only support up to 32 sub-tables */
num_tables = 32;
for ( nn = 0; nn < num_tables; nn++ )
{
FT_UInt num_pairs, length, coverage;
FT_Byte* p_next;
FT_UInt32 mask = (FT_UInt32)1UL << nn;
if ( p + 6 > p_limit )
break;
p_next = p;
p += 2; /* skip version */
length = FT_NEXT_USHORT( p );
coverage = FT_NEXT_USHORT( p );
if ( length <= 6 )
break;
p_next += length;
if ( p_next > p_limit ) /* handle broken table */
p_next = p_limit;
/* only use horizontal kerning tables */
if ( ( coverage & ~8 ) != 0x0001 ||
p + 8 > p_limit )
goto NextTable;
num_pairs = FT_NEXT_USHORT( p );
p += 6;
if ( ( p_next - p ) < 6 * (int)num_pairs ) /* handle broken count */
num_pairs = (FT_UInt)( ( p_next - p ) / 6 );
avail |= mask;
/*
* Now check whether the pairs in this table are ordered.
* We then can use binary search.
*/
if ( num_pairs > 0 )
{
FT_ULong count;
FT_ULong old_pair;
old_pair = FT_NEXT_ULONG( p );
p += 2;
for ( count = num_pairs - 1; count > 0; count-- )
{
FT_UInt32 cur_pair;
cur_pair = FT_NEXT_ULONG( p );
if ( cur_pair <= old_pair )
break;
p += 2;
old_pair = cur_pair;
}
if ( count == 0 )
ordered |= mask;
}
NextTable:
p = p_next;
}
face->num_kern_tables = nn;
face->kern_avail_bits = avail;
face->kern_order_bits = ordered;
Exit:
return error;
}
Commit Message:
CWE ID: CWE-125 | tt_face_load_kern( TT_Face face,
FT_Stream stream )
{
FT_Error error;
FT_ULong table_size;
FT_Byte* p;
FT_Byte* p_limit;
FT_UInt nn, num_tables;
FT_UInt32 avail = 0, ordered = 0;
/* the kern table is optional; exit silently if it is missing */
error = face->goto_table( face, TTAG_kern, stream, &table_size );
if ( error )
goto Exit;
if ( table_size < 4 ) /* the case of a malformed table */
{
FT_ERROR(( "tt_face_load_kern:"
" kerning table is too small - ignored\n" ));
error = FT_THROW( Table_Missing );
goto Exit;
}
if ( FT_FRAME_EXTRACT( table_size, face->kern_table ) )
{
FT_ERROR(( "tt_face_load_kern:"
" could not extract kerning table\n" ));
goto Exit;
}
face->kern_table_size = table_size;
p = face->kern_table;
p_limit = p + table_size;
p += 2; /* skip version */
num_tables = FT_NEXT_USHORT( p );
if ( num_tables > 32 ) /* we only support up to 32 sub-tables */
num_tables = 32;
for ( nn = 0; nn < num_tables; nn++ )
{
FT_UInt num_pairs, length, coverage;
FT_Byte* p_next;
FT_UInt32 mask = (FT_UInt32)1UL << nn;
if ( p + 6 > p_limit )
break;
p_next = p;
p += 2; /* skip version */
length = FT_NEXT_USHORT( p );
coverage = FT_NEXT_USHORT( p );
if ( length <= 6 + 8 )
break;
p_next += length;
if ( p_next > p_limit ) /* handle broken table */
p_next = p_limit;
/* only use horizontal kerning tables */
if ( ( coverage & ~8 ) != 0x0001 ||
p + 8 > p_limit )
goto NextTable;
num_pairs = FT_NEXT_USHORT( p );
p += 6;
if ( ( p_next - p ) < 6 * (int)num_pairs ) /* handle broken count */
num_pairs = (FT_UInt)( ( p_next - p ) / 6 );
avail |= mask;
/*
* Now check whether the pairs in this table are ordered.
* We then can use binary search.
*/
if ( num_pairs > 0 )
{
FT_ULong count;
FT_ULong old_pair;
old_pair = FT_NEXT_ULONG( p );
p += 2;
for ( count = num_pairs - 1; count > 0; count-- )
{
FT_UInt32 cur_pair;
cur_pair = FT_NEXT_ULONG( p );
if ( cur_pair <= old_pair )
break;
p += 2;
old_pair = cur_pair;
}
if ( count == 0 )
ordered |= mask;
}
NextTable:
p = p_next;
}
face->num_kern_tables = nn;
face->kern_avail_bits = avail;
face->kern_order_bits = ordered;
Exit:
return error;
}
| 164,865 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int em_call(struct x86_emulate_ctxt *ctxt)
{
long rel = ctxt->src.val;
ctxt->src.val = (unsigned long)ctxt->_eip;
jmp_rel(ctxt, rel);
return em_push(ctxt);
}
Commit Message: KVM: x86: Emulator fixes for eip canonical checks on near branches
Before changing rip (during jmp, call, ret, etc.) the target should be asserted
to be canonical one, as real CPUs do. During sysret, both target rsp and rip
should be canonical. If any of these values is noncanonical, a #GP exception
should occur. The exception to this rule are syscall and sysenter instructions
in which the assigned rip is checked during the assignment to the relevant
MSRs.
This patch fixes the emulator to behave as real CPUs do for near branches.
Far branches are handled by the next patch.
This fixes CVE-2014-3647.
Cc: [email protected]
Signed-off-by: Nadav Amit <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
CWE ID: CWE-264 | static int em_call(struct x86_emulate_ctxt *ctxt)
{
int rc;
long rel = ctxt->src.val;
ctxt->src.val = (unsigned long)ctxt->_eip;
rc = jmp_rel(ctxt, rel);
if (rc != X86EMUL_CONTINUE)
return rc;
return em_push(ctxt);
}
| 169,909 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: SPL_METHOD(DirectoryIterator, next)
{
spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC);
int skip_dots = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_SKIPDOTS);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern->u.dir.index++;
do {
spl_filesystem_dir_read(intern TSRMLS_CC);
} while (skip_dots && spl_filesystem_is_dot(intern->u.dir.entry.d_name));
if (intern->file_name) {
efree(intern->file_name);
intern->file_name = NULL;
}
}
Commit Message: Fix bug #72262 - do not overflow int
CWE ID: CWE-190 | SPL_METHOD(DirectoryIterator, next)
{
spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC);
int skip_dots = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_SKIPDOTS);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern->u.dir.index++;
do {
spl_filesystem_dir_read(intern TSRMLS_CC);
} while (skip_dots && spl_filesystem_is_dot(intern->u.dir.entry.d_name));
if (intern->file_name) {
efree(intern->file_name);
intern->file_name = NULL;
}
}
| 167,029 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int mbedtls_ecdsa_write_signature_restartable( mbedtls_ecdsa_context *ctx,
mbedtls_md_type_t md_alg,
const unsigned char *hash, size_t hlen,
unsigned char *sig, size_t *slen,
int (*f_rng)(void *, unsigned char *, size_t),
void *p_rng,
mbedtls_ecdsa_restart_ctx *rs_ctx )
{
int ret;
mbedtls_mpi r, s;
ECDSA_VALIDATE_RET( ctx != NULL );
ECDSA_VALIDATE_RET( hash != NULL );
ECDSA_VALIDATE_RET( sig != NULL );
ECDSA_VALIDATE_RET( slen != NULL );
mbedtls_mpi_init( &r );
mbedtls_mpi_init( &s );
#if defined(MBEDTLS_ECDSA_DETERMINISTIC)
(void) f_rng;
(void) p_rng;
MBEDTLS_MPI_CHK( ecdsa_sign_det_restartable( &ctx->grp, &r, &s, &ctx->d,
hash, hlen, md_alg, rs_ctx ) );
#else
(void) md_alg;
#if defined(MBEDTLS_ECDSA_SIGN_ALT)
MBEDTLS_MPI_CHK( mbedtls_ecdsa_sign( &ctx->grp, &r, &s, &ctx->d,
hash, hlen, f_rng, p_rng ) );
#else
MBEDTLS_MPI_CHK( ecdsa_sign_restartable( &ctx->grp, &r, &s, &ctx->d,
hash, hlen, f_rng, p_rng, rs_ctx ) );
#endif /* MBEDTLS_ECDSA_SIGN_ALT */
#endif /* MBEDTLS_ECDSA_DETERMINISTIC */
MBEDTLS_MPI_CHK( ecdsa_signature_to_asn1( &r, &s, sig, slen ) );
cleanup:
mbedtls_mpi_free( &r );
mbedtls_mpi_free( &s );
return( ret );
}
Commit Message: Merge remote-tracking branch 'upstream-restricted/pr/556' into mbedtls-2.16-restricted
CWE ID: CWE-200 | int mbedtls_ecdsa_write_signature_restartable( mbedtls_ecdsa_context *ctx,
mbedtls_md_type_t md_alg,
const unsigned char *hash, size_t hlen,
unsigned char *sig, size_t *slen,
int (*f_rng)(void *, unsigned char *, size_t),
void *p_rng,
mbedtls_ecdsa_restart_ctx *rs_ctx )
{
int ret;
mbedtls_mpi r, s;
ECDSA_VALIDATE_RET( ctx != NULL );
ECDSA_VALIDATE_RET( hash != NULL );
ECDSA_VALIDATE_RET( sig != NULL );
ECDSA_VALIDATE_RET( slen != NULL );
mbedtls_mpi_init( &r );
mbedtls_mpi_init( &s );
#if defined(MBEDTLS_ECDSA_DETERMINISTIC)
MBEDTLS_MPI_CHK( ecdsa_sign_det_restartable( &ctx->grp, &r, &s, &ctx->d,
hash, hlen, md_alg, f_rng,
p_rng, rs_ctx ) );
#else
(void) md_alg;
#if defined(MBEDTLS_ECDSA_SIGN_ALT)
MBEDTLS_MPI_CHK( mbedtls_ecdsa_sign( &ctx->grp, &r, &s, &ctx->d,
hash, hlen, f_rng, p_rng ) );
#else
/* Use the same RNG for both blinding and ephemeral key generation */
MBEDTLS_MPI_CHK( ecdsa_sign_restartable( &ctx->grp, &r, &s, &ctx->d,
hash, hlen, f_rng, p_rng, f_rng,
p_rng, rs_ctx ) );
#endif /* MBEDTLS_ECDSA_SIGN_ALT */
#endif /* MBEDTLS_ECDSA_DETERMINISTIC */
MBEDTLS_MPI_CHK( ecdsa_signature_to_asn1( &r, &s, sig, slen ) );
cleanup:
mbedtls_mpi_free( &r );
mbedtls_mpi_free( &s );
return( ret );
}
| 169,509 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: nfs_printfh(netdissect_options *ndo,
register const uint32_t *dp, const u_int len)
{
my_fsid fsid;
uint32_t ino;
const char *sfsname = NULL;
char *spacep;
if (ndo->ndo_uflag) {
u_int i;
char const *sep = "";
ND_PRINT((ndo, " fh["));
for (i=0; i<len; i++) {
ND_PRINT((ndo, "%s%x", sep, dp[i]));
sep = ":";
}
ND_PRINT((ndo, "]"));
return;
}
Parse_fh((const u_char *)dp, len, &fsid, &ino, NULL, &sfsname, 0);
if (sfsname) {
/* file system ID is ASCII, not numeric, for this server OS */
static char temp[NFSX_V3FHMAX+1];
/* Make sure string is null-terminated */
strncpy(temp, sfsname, NFSX_V3FHMAX);
temp[sizeof(temp) - 1] = '\0';
/* Remove trailing spaces */
spacep = strchr(temp, ' ');
if (spacep)
*spacep = '\0';
ND_PRINT((ndo, " fh %s/", temp));
} else {
ND_PRINT((ndo, " fh %d,%d/",
fsid.Fsid_dev.Major, fsid.Fsid_dev.Minor));
}
if(fsid.Fsid_dev.Minor == 257)
/* Print the undecoded handle */
ND_PRINT((ndo, "%s", fsid.Opaque_Handle));
else
ND_PRINT((ndo, "%ld", (long) ino));
}
Commit Message: CVE-2017-13001/NFS: Don't copy more data than is in the file handle.
Also, put the buffer on the stack; no reason to make it static. (65
bytes isn't a lot.)
This fixes a buffer over-read discovered by Kamil Frankowicz.
Add a test using the capture file supplied by the reporter(s).
CWE ID: CWE-125 | nfs_printfh(netdissect_options *ndo,
register const uint32_t *dp, const u_int len)
{
my_fsid fsid;
uint32_t ino;
const char *sfsname = NULL;
char *spacep;
if (ndo->ndo_uflag) {
u_int i;
char const *sep = "";
ND_PRINT((ndo, " fh["));
for (i=0; i<len; i++) {
ND_PRINT((ndo, "%s%x", sep, dp[i]));
sep = ":";
}
ND_PRINT((ndo, "]"));
return;
}
Parse_fh((const u_char *)dp, len, &fsid, &ino, NULL, &sfsname, 0);
if (sfsname) {
/* file system ID is ASCII, not numeric, for this server OS */
char temp[NFSX_V3FHMAX+1];
u_int stringlen;
/* Make sure string is null-terminated */
stringlen = len;
if (stringlen > NFSX_V3FHMAX)
stringlen = NFSX_V3FHMAX;
strncpy(temp, sfsname, stringlen);
temp[stringlen] = '\0';
/* Remove trailing spaces */
spacep = strchr(temp, ' ');
if (spacep)
*spacep = '\0';
ND_PRINT((ndo, " fh %s/", temp));
} else {
ND_PRINT((ndo, " fh %d,%d/",
fsid.Fsid_dev.Major, fsid.Fsid_dev.Minor));
}
if(fsid.Fsid_dev.Minor == 257)
/* Print the undecoded handle */
ND_PRINT((ndo, "%s", fsid.Opaque_Handle));
else
ND_PRINT((ndo, "%ld", (long) ino));
}
| 167,906 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void RenderViewImpl::DidFocus() {
WebFrame* main_frame = webview() ? webview()->MainFrame() : nullptr;
bool is_processing_user_gesture =
WebUserGestureIndicator::IsProcessingUserGesture(
main_frame && main_frame->IsWebLocalFrame()
? main_frame->ToWebLocalFrame()
: nullptr);
if (is_processing_user_gesture &&
!RenderThreadImpl::current()->layout_test_mode()) {
Send(new ViewHostMsg_Focus(GetRoutingID()));
}
}
Commit Message: If a page calls |window.focus()|, kick it out of fullscreen.
BUG=776418, 800056
Change-Id: I1880fe600e4814c073f247c43b1c1ac80c8fc017
Reviewed-on: https://chromium-review.googlesource.com/852378
Reviewed-by: Nasko Oskov <[email protected]>
Reviewed-by: Philip Jägenstedt <[email protected]>
Commit-Queue: Avi Drissman <[email protected]>
Cr-Commit-Position: refs/heads/master@{#533790}
CWE ID: | void RenderViewImpl::DidFocus() {
void RenderViewImpl::DidFocus(blink::WebLocalFrame* calling_frame) {
WebFrame* main_frame = webview() ? webview()->MainFrame() : nullptr;
bool is_processing_user_gesture =
WebUserGestureIndicator::IsProcessingUserGesture(
main_frame && main_frame->IsWebLocalFrame()
? main_frame->ToWebLocalFrame()
: nullptr);
if (is_processing_user_gesture &&
!RenderThreadImpl::current()->layout_test_mode()) {
Send(new ViewHostMsg_Focus(GetRoutingID()));
// Tattle on the frame that called |window.focus()|.
RenderFrameImpl* calling_render_frame =
RenderFrameImpl::FromWebFrame(calling_frame);
if (calling_render_frame)
calling_render_frame->FrameDidCallFocus();
}
}
| 172,720 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: cff_charset_load( CFF_Charset charset,
FT_UInt num_glyphs,
FT_Stream stream,
FT_ULong base_offset,
FT_ULong offset,
FT_Bool invert )
{
FT_Memory memory = stream->memory;
FT_Error error = CFF_Err_Ok;
FT_UShort glyph_sid;
/* If the the offset is greater than 2, we have to parse the */
/* charset table. */
if ( offset > 2 )
{
FT_UInt j;
charset->offset = base_offset + offset;
/* Get the format of the table. */
if ( FT_STREAM_SEEK( charset->offset ) ||
FT_READ_BYTE( charset->format ) )
goto Exit;
/* Allocate memory for sids. */
if ( FT_NEW_ARRAY( charset->sids, num_glyphs ) )
goto Exit;
/* assign the .notdef glyph */
charset->sids[0] = 0;
switch ( charset->format )
{
case 0:
if ( num_glyphs > 0 )
{
if ( FT_FRAME_ENTER( ( num_glyphs - 1 ) * 2 ) )
goto Exit;
for ( j = 1; j < num_glyphs; j++ )
charset->sids[j] = FT_GET_USHORT();
FT_FRAME_EXIT();
}
/* Read the first glyph sid of the range. */
if ( FT_READ_USHORT( glyph_sid ) )
goto Exit;
/* Read the number of glyphs in the range. */
if ( charset->format == 2 )
{
if ( FT_READ_USHORT( nleft ) )
goto Exit;
}
else
{
if ( FT_READ_BYTE( nleft ) )
goto Exit;
}
/* Fill in the range of sids -- `nleft + 1' glyphs. */
for ( i = 0; j < num_glyphs && i <= nleft; i++, j++, glyph_sid++ )
charset->sids[j] = glyph_sid;
}
}
break;
default:
FT_ERROR(( "cff_charset_load: invalid table format!\n" ));
error = CFF_Err_Invalid_File_Format;
goto Exit;
}
Commit Message:
CWE ID: CWE-189 | cff_charset_load( CFF_Charset charset,
FT_UInt num_glyphs,
FT_Stream stream,
FT_ULong base_offset,
FT_ULong offset,
FT_Bool invert )
{
FT_Memory memory = stream->memory;
FT_Error error = CFF_Err_Ok;
FT_UShort glyph_sid;
/* If the the offset is greater than 2, we have to parse the */
/* charset table. */
if ( offset > 2 )
{
FT_UInt j;
charset->offset = base_offset + offset;
/* Get the format of the table. */
if ( FT_STREAM_SEEK( charset->offset ) ||
FT_READ_BYTE( charset->format ) )
goto Exit;
/* Allocate memory for sids. */
if ( FT_NEW_ARRAY( charset->sids, num_glyphs ) )
goto Exit;
/* assign the .notdef glyph */
charset->sids[0] = 0;
switch ( charset->format )
{
case 0:
if ( num_glyphs > 0 )
{
if ( FT_FRAME_ENTER( ( num_glyphs - 1 ) * 2 ) )
goto Exit;
for ( j = 1; j < num_glyphs; j++ )
{
FT_UShort sid = FT_GET_USHORT();
/* this constant is given in the CFF specification */
if ( sid < 65000 )
charset->sids[j] = sid;
else
{
FT_ERROR(( "cff_charset_load:"
" invalid SID value %d set to zero\n", sid ));
charset->sids[j] = 0;
}
}
FT_FRAME_EXIT();
}
/* Read the first glyph sid of the range. */
if ( FT_READ_USHORT( glyph_sid ) )
goto Exit;
/* Read the number of glyphs in the range. */
if ( charset->format == 2 )
{
if ( FT_READ_USHORT( nleft ) )
goto Exit;
}
else
{
if ( FT_READ_BYTE( nleft ) )
goto Exit;
}
/* Fill in the range of sids -- `nleft + 1' glyphs. */
for ( i = 0; j < num_glyphs && i <= nleft; i++, j++, glyph_sid++ )
charset->sids[j] = glyph_sid;
}
}
break;
default:
FT_ERROR(( "cff_charset_load: invalid table format!\n" ));
error = CFF_Err_Invalid_File_Format;
goto Exit;
}
| 164,743 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static av_cold int vqa_decode_init(AVCodecContext *avctx)
{
VqaContext *s = avctx->priv_data;
int i, j, codebook_index;
s->avctx = avctx;
avctx->pix_fmt = PIX_FMT_PAL8;
/* make sure the extradata made it */
if (s->avctx->extradata_size != VQA_HEADER_SIZE) {
av_log(s->avctx, AV_LOG_ERROR, " VQA video: expected extradata size of %d\n", VQA_HEADER_SIZE);
return -1;
}
/* load up the VQA parameters from the header */
s->vqa_version = s->avctx->extradata[0];
s->width = AV_RL16(&s->avctx->extradata[6]);
s->height = AV_RL16(&s->avctx->extradata[8]);
if(av_image_check_size(s->width, s->height, 0, avctx)){
s->width= s->height= 0;
return -1;
}
s->vector_width = s->avctx->extradata[10];
s->vector_height = s->avctx->extradata[11];
s->partial_count = s->partial_countdown = s->avctx->extradata[13];
/* the vector dimensions have to meet very stringent requirements */
if ((s->vector_width != 4) ||
((s->vector_height != 2) && (s->vector_height != 4))) {
/* return without further initialization */
return -1;
}
/* allocate codebooks */
s->codebook_size = MAX_CODEBOOK_SIZE;
s->codebook = av_malloc(s->codebook_size);
/* allocate decode buffer */
s->decode_buffer_size = (s->width / s->vector_width) *
(s->height / s->vector_height) * 2;
s->decode_buffer = av_malloc(s->decode_buffer_size);
if (!s->decode_buffer)
goto fail;
/* initialize the solid-color vectors */
if (s->vector_height == 4) {
codebook_index = 0xFF00 * 16;
for (i = 0; i < 256; i++)
for (j = 0; j < 16; j++)
s->codebook[codebook_index++] = i;
} else {
codebook_index = 0xF00 * 8;
for (i = 0; i < 256; i++)
for (j = 0; j < 8; j++)
s->codebook[codebook_index++] = i;
}
s->next_codebook_buffer_index = 0;
s->frame.data[0] = NULL;
return 0;
fail:
av_freep(&s->codebook);
av_freep(&s->next_codebook_buffer);
av_freep(&s->decode_buffer);
return AVERROR(ENOMEM);
}
Commit Message:
CWE ID: CWE-119 | static av_cold int vqa_decode_init(AVCodecContext *avctx)
{
VqaContext *s = avctx->priv_data;
int i, j, codebook_index;
s->avctx = avctx;
avctx->pix_fmt = PIX_FMT_PAL8;
/* make sure the extradata made it */
if (s->avctx->extradata_size != VQA_HEADER_SIZE) {
av_log(s->avctx, AV_LOG_ERROR, " VQA video: expected extradata size of %d\n", VQA_HEADER_SIZE);
return -1;
}
/* load up the VQA parameters from the header */
s->vqa_version = s->avctx->extradata[0];
s->width = AV_RL16(&s->avctx->extradata[6]);
s->height = AV_RL16(&s->avctx->extradata[8]);
if(av_image_check_size(s->width, s->height, 0, avctx)){
s->width= s->height= 0;
return -1;
}
s->vector_width = s->avctx->extradata[10];
s->vector_height = s->avctx->extradata[11];
s->partial_count = s->partial_countdown = s->avctx->extradata[13];
/* the vector dimensions have to meet very stringent requirements */
if ((s->vector_width != 4) ||
((s->vector_height != 2) && (s->vector_height != 4))) {
/* return without further initialization */
return -1;
}
if (s->width & (s->vector_width - 1) ||
s->height & (s->vector_height - 1)) {
av_log(avctx, AV_LOG_ERROR, "Image size not multiple of block size\n");
return AVERROR_INVALIDDATA;
}
/* allocate codebooks */
s->codebook_size = MAX_CODEBOOK_SIZE;
s->codebook = av_malloc(s->codebook_size);
/* allocate decode buffer */
s->decode_buffer_size = (s->width / s->vector_width) *
(s->height / s->vector_height) * 2;
s->decode_buffer = av_malloc(s->decode_buffer_size);
if (!s->decode_buffer)
goto fail;
/* initialize the solid-color vectors */
if (s->vector_height == 4) {
codebook_index = 0xFF00 * 16;
for (i = 0; i < 256; i++)
for (j = 0; j < 16; j++)
s->codebook[codebook_index++] = i;
} else {
codebook_index = 0xF00 * 8;
for (i = 0; i < 256; i++)
for (j = 0; j < 8; j++)
s->codebook[codebook_index++] = i;
}
s->next_codebook_buffer_index = 0;
s->frame.data[0] = NULL;
return 0;
fail:
av_freep(&s->codebook);
av_freep(&s->next_codebook_buffer);
av_freep(&s->decode_buffer);
return AVERROR(ENOMEM);
}
| 165,148 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void RenderWidgetHostViewGuest::AcceleratedSurfaceNew(int32 width_in_pixel,
int32 height_in_pixel,
uint64 surface_handle) {
NOTIMPLEMENTED();
}
Commit Message: Implement TextureImageTransportSurface using texture mailbox
This has a couple of advantages:
- allow tearing down and recreating the UI parent context without
losing the renderer contexts
- do not require a context to be able to generate textures when
creating the GLSurfaceHandle
- clearer ownership semantics that potentially allows for more
robust and easier lost context handling/thumbnailing/etc., since a texture is at
any given time owned by either: UI parent, mailbox, or
TextureImageTransportSurface
- simplify frontbuffer protection logic;
the frontbuffer textures are now owned by RWHV where they are refcounted
The TextureImageTransportSurface informs RenderWidgetHostView of the
mailbox names for the front- and backbuffer textures by
associating them with a surface_handle (1 or 2) in the AcceleratedSurfaceNew message.
During SwapBuffers() or PostSubBuffer() cycles, it then uses
produceTextureCHROMIUM() and consumeTextureCHROMIUM()
to transfer ownership between renderer and browser compositor.
RWHV sends back the surface_handle of the buffer being returned with the Swap ACK
(or 0 if no buffer is being returned in which case TextureImageTransportSurface will
allocate a new texture - note that this could be used to
simply keep textures for thumbnailing).
BUG=154815,139616
[email protected]
Review URL: https://chromiumcodereview.appspot.com/11194042
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@171569 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: | void RenderWidgetHostViewGuest::AcceleratedSurfaceNew(int32 width_in_pixel,
void RenderWidgetHostViewGuest::AcceleratedSurfaceNew(
int32 width_in_pixel,
int32 height_in_pixel,
uint64 surface_handle,
const std::string& mailbox_name) {
NOTIMPLEMENTED();
}
| 171,392 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void utf16_to_utf8(const char16_t* src, size_t src_len, char* dst)
{
if (src == NULL || src_len == 0 || dst == NULL) {
return;
}
const char16_t* cur_utf16 = src;
const char16_t* const end_utf16 = src + src_len;
char *cur = dst;
while (cur_utf16 < end_utf16) {
char32_t utf32;
if((*cur_utf16 & 0xFC00) == 0xD800 && (cur_utf16 + 1) < end_utf16
&& (*(cur_utf16 + 1) & 0xFC00) == 0xDC00) {
utf32 = (*cur_utf16++ - 0xD800) << 10;
utf32 |= *cur_utf16++ - 0xDC00;
utf32 += 0x10000;
} else {
utf32 = (char32_t) *cur_utf16++;
}
const size_t len = utf32_codepoint_utf8_length(utf32);
utf32_codepoint_to_utf8((uint8_t*)cur, utf32, len);
cur += len;
}
*cur = '\0';
}
Commit Message: libutils/Unicode.cpp: Correct length computation and add checks for utf16->utf8
Inconsistent behaviour between utf16_to_utf8 and utf16_to_utf8_length
is causing a heap overflow.
Correcting the length computation and adding bound checks to the
conversion functions.
Test: ran libutils_tests
Bug: 29250543
Change-Id: I6115e3357141ed245c63c6eb25fc0fd0a9a7a2bb
(cherry picked from commit c4966a363e46d2e1074d1a365e232af0dcedd6a1)
CWE ID: CWE-119 | void utf16_to_utf8(const char16_t* src, size_t src_len, char* dst)
void utf16_to_utf8(const char16_t* src, size_t src_len, char* dst, size_t dst_len)
{
if (src == NULL || src_len == 0 || dst == NULL) {
return;
}
const char16_t* cur_utf16 = src;
const char16_t* const end_utf16 = src + src_len;
char *cur = dst;
while (cur_utf16 < end_utf16) {
char32_t utf32;
if((*cur_utf16 & 0xFC00) == 0xD800 && (cur_utf16 + 1) < end_utf16
&& (*(cur_utf16 + 1) & 0xFC00) == 0xDC00) {
utf32 = (*cur_utf16++ - 0xD800) << 10;
utf32 |= *cur_utf16++ - 0xDC00;
utf32 += 0x10000;
} else {
utf32 = (char32_t) *cur_utf16++;
}
const size_t len = utf32_codepoint_utf8_length(utf32);
LOG_ALWAYS_FATAL_IF(dst_len < len, "%zu < %zu", dst_len, len);
utf32_codepoint_to_utf8((uint8_t*)cur, utf32, len);
cur += len;
dst_len -= len;
}
LOG_ALWAYS_FATAL_IF(dst_len < 1, "%zu < 1", dst_len);
*cur = '\0';
}
| 173,419 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool HeapAllocator::backingShrink(void* address,
size_t quantizedCurrentSize,
size_t quantizedShrunkSize) {
if (!address || quantizedShrunkSize == quantizedCurrentSize)
return true;
ASSERT(quantizedShrunkSize < quantizedCurrentSize);
ThreadState* state = ThreadState::current();
if (state->sweepForbidden())
return false;
ASSERT(!state->isInGC());
ASSERT(state->isAllocationAllowed());
DCHECK_EQ(&state->heap(), &ThreadState::fromObject(address)->heap());
BasePage* page = pageFromObject(address);
if (page->isLargeObjectPage() || page->arena()->getThreadState() != state)
return false;
HeapObjectHeader* header = HeapObjectHeader::fromPayload(address);
ASSERT(header->checkHeader());
NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage();
if (quantizedCurrentSize <=
quantizedShrunkSize + sizeof(HeapObjectHeader) + sizeof(void*) * 32 &&
!arena->isObjectAllocatedAtAllocationPoint(header))
return true;
bool succeededAtAllocationPoint =
arena->shrinkObject(header, quantizedShrunkSize);
if (succeededAtAllocationPoint)
state->allocationPointAdjusted(arena->arenaIndex());
return true;
}
Commit Message: Call HeapObjectHeader::checkHeader solely for its side-effect.
This requires changing its signature. This is a preliminary stage to making it
private.
BUG=633030
Review-Url: https://codereview.chromium.org/2698673003
Cr-Commit-Position: refs/heads/master@{#460489}
CWE ID: CWE-119 | bool HeapAllocator::backingShrink(void* address,
size_t quantizedCurrentSize,
size_t quantizedShrunkSize) {
if (!address || quantizedShrunkSize == quantizedCurrentSize)
return true;
ASSERT(quantizedShrunkSize < quantizedCurrentSize);
ThreadState* state = ThreadState::current();
if (state->sweepForbidden())
return false;
ASSERT(!state->isInGC());
ASSERT(state->isAllocationAllowed());
DCHECK_EQ(&state->heap(), &ThreadState::fromObject(address)->heap());
BasePage* page = pageFromObject(address);
if (page->isLargeObjectPage() || page->arena()->getThreadState() != state)
return false;
HeapObjectHeader* header = HeapObjectHeader::fromPayload(address);
header->checkHeader();
NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage();
if (quantizedCurrentSize <=
quantizedShrunkSize + sizeof(HeapObjectHeader) + sizeof(void*) * 32 &&
!arena->isObjectAllocatedAtAllocationPoint(header))
return true;
bool succeededAtAllocationPoint =
arena->shrinkObject(header, quantizedShrunkSize);
if (succeededAtAllocationPoint)
state->allocationPointAdjusted(arena->arenaIndex());
return true;
}
| 172,707 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void MediaStreamManager::GenerateStream(
int render_process_id,
int render_frame_id,
int page_request_id,
const StreamControls& controls,
MediaDeviceSaltAndOrigin salt_and_origin,
bool user_gesture,
GenerateStreamCallback generate_stream_cb,
DeviceStoppedCallback device_stopped_cb,
DeviceChangedCallback device_changed_cb) {
DCHECK_CURRENTLY_ON(BrowserThread::IO);
DVLOG(1) << "GenerateStream()";
DeviceRequest* request = new DeviceRequest(
render_process_id, render_frame_id, page_request_id, user_gesture,
MEDIA_GENERATE_STREAM, controls, std::move(salt_and_origin),
std::move(device_stopped_cb));
request->device_changed_cb = std::move(device_changed_cb);
const std::string& label = AddRequest(request);
request->generate_stream_cb = std::move(generate_stream_cb);
if (generate_stream_test_callback_) {
if (std::move(generate_stream_test_callback_).Run(controls)) {
FinalizeGenerateStream(label, request);
} else {
FinalizeRequestFailed(label, request, MEDIA_DEVICE_INVALID_STATE);
}
return;
}
base::PostTaskWithTraits(FROM_HERE, {BrowserThread::IO},
base::BindOnce(&MediaStreamManager::SetUpRequest,
base::Unretained(this), label));
}
Commit Message: Make MediaStreamDispatcherHost per-request instead of per-frame.
Instead of having RenderFrameHost own a single MSDH to handle all
requests from a frame, MSDH objects will be owned by a strong binding.
A consequence of this is that an additional requester ID is added to
requests to MediaStreamManager, so that an MSDH is able to cancel only
requests generated by it.
In practice, MSDH will continue to be per frame in most cases since
each frame normally makes a single request for an MSDH object.
This fixes a lifetime issue caused by the IO thread executing tasks
after the RenderFrameHost dies.
Drive-by: Fix some minor lint issues.
Bug: 912520
Change-Id: I52742ffc98b9fc57ce8e6f5093a61aed86d3e516
Reviewed-on: https://chromium-review.googlesource.com/c/1369799
Reviewed-by: Emircan Uysaler <[email protected]>
Reviewed-by: Ken Buchanan <[email protected]>
Reviewed-by: Olga Sharonova <[email protected]>
Commit-Queue: Guido Urdaneta <[email protected]>
Cr-Commit-Position: refs/heads/master@{#616347}
CWE ID: CWE-189 | void MediaStreamManager::GenerateStream(
int render_process_id,
int render_frame_id,
int requester_id,
int page_request_id,
const StreamControls& controls,
MediaDeviceSaltAndOrigin salt_and_origin,
bool user_gesture,
GenerateStreamCallback generate_stream_cb,
DeviceStoppedCallback device_stopped_cb,
DeviceChangedCallback device_changed_cb) {
DCHECK_CURRENTLY_ON(BrowserThread::IO);
DVLOG(1) << "GenerateStream()";
DeviceRequest* request = new DeviceRequest(
render_process_id, render_frame_id, requester_id, page_request_id,
user_gesture, MEDIA_GENERATE_STREAM, controls, std::move(salt_and_origin),
std::move(device_stopped_cb));
request->device_changed_cb = std::move(device_changed_cb);
const std::string& label = AddRequest(request);
request->generate_stream_cb = std::move(generate_stream_cb);
if (generate_stream_test_callback_) {
if (std::move(generate_stream_test_callback_).Run(controls)) {
FinalizeGenerateStream(label, request);
} else {
FinalizeRequestFailed(label, request, MEDIA_DEVICE_INVALID_STATE);
}
return;
}
base::PostTaskWithTraits(FROM_HERE, {BrowserThread::IO},
base::BindOnce(&MediaStreamManager::SetUpRequest,
base::Unretained(this), label));
}
| 173,103 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
struct msghdr *msg, size_t ignored, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(
&ctx->req));
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
unsigned long iovlen;
struct iovec *iov;
int err = -EAGAIN;
int used;
long copied = 0;
lock_sock(sk);
for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
iovlen--, iov++) {
unsigned long seglen = iov->iov_len;
char __user *from = iov->iov_base;
while (seglen) {
sgl = list_first_entry(&ctx->tsgl,
struct skcipher_sg_list, list);
sg = sgl->sg;
while (!sg->length)
sg++;
used = ctx->used;
if (!used) {
err = skcipher_wait_for_data(sk, flags);
if (err)
goto unlock;
}
used = min_t(unsigned long, used, seglen);
used = af_alg_make_sg(&ctx->rsgl, from, used, 1);
err = used;
if (err < 0)
goto unlock;
if (ctx->more || used < ctx->used)
used -= used % bs;
err = -EINVAL;
if (!used)
goto free;
ablkcipher_request_set_crypt(&ctx->req, sg,
ctx->rsgl.sg, used,
ctx->iv);
err = af_alg_wait_for_completion(
ctx->enc ?
crypto_ablkcipher_encrypt(&ctx->req) :
crypto_ablkcipher_decrypt(&ctx->req),
&ctx->completion);
free:
af_alg_free_sg(&ctx->rsgl);
if (err)
goto unlock;
copied += used;
from += used;
seglen -= used;
skcipher_pull_sgl(sk, used);
}
}
err = 0;
unlock:
skcipher_wmem_wakeup(sk);
release_sock(sk);
return copied ?: err;
}
Commit Message: crypto: algif - suppress sending source address information in recvmsg
The current code does not set the msg_namelen member to 0 and therefore
makes net/socket.c leak the local sockaddr_storage variable to userland
-- 128 bytes of kernel stack memory. Fix that.
Cc: <[email protected]> # 2.6.38
Signed-off-by: Mathias Krause <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
CWE ID: CWE-200 | static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
struct msghdr *msg, size_t ignored, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(
&ctx->req));
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
unsigned long iovlen;
struct iovec *iov;
int err = -EAGAIN;
int used;
long copied = 0;
lock_sock(sk);
msg->msg_namelen = 0;
for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
iovlen--, iov++) {
unsigned long seglen = iov->iov_len;
char __user *from = iov->iov_base;
while (seglen) {
sgl = list_first_entry(&ctx->tsgl,
struct skcipher_sg_list, list);
sg = sgl->sg;
while (!sg->length)
sg++;
used = ctx->used;
if (!used) {
err = skcipher_wait_for_data(sk, flags);
if (err)
goto unlock;
}
used = min_t(unsigned long, used, seglen);
used = af_alg_make_sg(&ctx->rsgl, from, used, 1);
err = used;
if (err < 0)
goto unlock;
if (ctx->more || used < ctx->used)
used -= used % bs;
err = -EINVAL;
if (!used)
goto free;
ablkcipher_request_set_crypt(&ctx->req, sg,
ctx->rsgl.sg, used,
ctx->iv);
err = af_alg_wait_for_completion(
ctx->enc ?
crypto_ablkcipher_encrypt(&ctx->req) :
crypto_ablkcipher_decrypt(&ctx->req),
&ctx->completion);
free:
af_alg_free_sg(&ctx->rsgl);
if (err)
goto unlock;
copied += used;
from += used;
seglen -= used;
skcipher_pull_sgl(sk, used);
}
}
err = 0;
unlock:
skcipher_wmem_wakeup(sk);
release_sock(sk);
return copied ?: err;
}
| 166,047 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: DevToolsUI::DevToolsUI(content::WebUI* web_ui)
: WebUIController(web_ui),
bindings_(web_ui->GetWebContents()) {
web_ui->SetBindings(0);
Profile* profile = Profile::FromWebUI(web_ui);
content::URLDataSource::Add(
profile,
new DevToolsDataSource(profile->GetRequestContext()));
}
Commit Message: [DevTools] Move sanitize url to devtools_ui.cc.
Compatibility script is not reliable enough.
BUG=653134
Review-Url: https://codereview.chromium.org/2403633002
Cr-Commit-Position: refs/heads/master@{#425814}
CWE ID: CWE-200 | DevToolsUI::DevToolsUI(content::WebUI* web_ui)
: WebUIController(web_ui) {
web_ui->SetBindings(0);
Profile* profile = Profile::FromWebUI(web_ui);
content::URLDataSource::Add(
profile,
new DevToolsDataSource(profile->GetRequestContext()));
GURL url = web_ui->GetWebContents()->GetVisibleURL();
if (url.spec() == SanitizeFrontendURL(url).spec())
bindings_.reset(new DevToolsUIBindings(web_ui->GetWebContents()));
}
| 172,510 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: EAS_BOOL WT_CheckSampleEnd (S_WT_VOICE *pWTVoice, S_WT_INT_FRAME *pWTIntFrame, EAS_BOOL update)
{
EAS_U32 endPhaseAccum;
EAS_U32 endPhaseFrac;
EAS_I32 numSamples;
EAS_BOOL done = EAS_FALSE;
/* check to see if we hit the end of the waveform this time */
/*lint -e{703} use shift for performance */
endPhaseFrac = pWTVoice->phaseFrac + (pWTIntFrame->frame.phaseIncrement << SYNTH_UPDATE_PERIOD_IN_BITS);
endPhaseAccum = pWTVoice->phaseAccum + GET_PHASE_INT_PART(endPhaseFrac);
if (endPhaseAccum >= pWTVoice->loopEnd)
{
/* calculate how far current ptr is from end */
numSamples = (EAS_I32) (pWTVoice->loopEnd - pWTVoice->phaseAccum);
/* now account for the fractional portion */
/*lint -e{703} use shift for performance */
numSamples = (EAS_I32) ((numSamples << NUM_PHASE_FRAC_BITS) - pWTVoice->phaseFrac);
if (pWTIntFrame->frame.phaseIncrement) {
pWTIntFrame->numSamples = 1 + (numSamples / pWTIntFrame->frame.phaseIncrement);
} else {
pWTIntFrame->numSamples = numSamples;
}
if (pWTIntFrame->numSamples < 0) {
ALOGE("b/26366256");
pWTIntFrame->numSamples = 0;
}
/* sound will be done this frame */
done = EAS_TRUE;
}
/* update data for off-chip synth */
if (update)
{
pWTVoice->phaseFrac = endPhaseFrac;
pWTVoice->phaseAccum = endPhaseAccum;
}
return done;
}
Commit Message: Sonivox: add SafetyNet log.
Bug: 26366256
Change-Id: Ief72e01b7cc6d87a015105af847a99d3d9b03cb0
CWE ID: CWE-119 | EAS_BOOL WT_CheckSampleEnd (S_WT_VOICE *pWTVoice, S_WT_INT_FRAME *pWTIntFrame, EAS_BOOL update)
{
EAS_U32 endPhaseAccum;
EAS_U32 endPhaseFrac;
EAS_I32 numSamples;
EAS_BOOL done = EAS_FALSE;
/* check to see if we hit the end of the waveform this time */
/*lint -e{703} use shift for performance */
endPhaseFrac = pWTVoice->phaseFrac + (pWTIntFrame->frame.phaseIncrement << SYNTH_UPDATE_PERIOD_IN_BITS);
endPhaseAccum = pWTVoice->phaseAccum + GET_PHASE_INT_PART(endPhaseFrac);
if (endPhaseAccum >= pWTVoice->loopEnd)
{
/* calculate how far current ptr is from end */
numSamples = (EAS_I32) (pWTVoice->loopEnd - pWTVoice->phaseAccum);
/* now account for the fractional portion */
/*lint -e{703} use shift for performance */
numSamples = (EAS_I32) ((numSamples << NUM_PHASE_FRAC_BITS) - pWTVoice->phaseFrac);
if (pWTIntFrame->frame.phaseIncrement) {
pWTIntFrame->numSamples = 1 + (numSamples / pWTIntFrame->frame.phaseIncrement);
} else {
pWTIntFrame->numSamples = numSamples;
}
if (pWTIntFrame->numSamples < 0) {
ALOGE("b/26366256");
android_errorWriteLog(0x534e4554, "26366256");
pWTIntFrame->numSamples = 0;
}
/* sound will be done this frame */
done = EAS_TRUE;
}
/* update data for off-chip synth */
if (update)
{
pWTVoice->phaseFrac = endPhaseFrac;
pWTVoice->phaseAccum = endPhaseAccum;
}
return done;
}
| 174,607 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: hash_foreach (gpointer key, gpointer val, gpointer user_data)
{
const char *keystr = key;
const char *valstr = val;
guint *count = user_data;
*count += (strlen (keystr) + strlen (valstr));
g_print ("%s -> %s\n", keystr, valstr);
}
Commit Message:
CWE ID: CWE-264 | hash_foreach (gpointer key, gpointer val, gpointer user_data)
| 165,084 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int scan(Scanner *s)
{
uchar *cursor = s->cur;
char *str, *ptr = NULL;
std:
s->tok = cursor;
s->len = 0;
#line 311 "ext/date/lib/parse_iso_intervals.re"
#line 291 "ext/date/lib/parse_iso_intervals.c"
{
YYCTYPE yych;
unsigned int yyaccept = 0;
static const unsigned char yybm[] = {
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
};
YYDEBUG(0, *YYCURSOR);
if ((YYLIMIT - YYCURSOR) < 20) YYFILL(20);
yych = *YYCURSOR;
if (yych <= ',') {
if (yych <= '\n') {
if (yych <= 0x00) goto yy9;
if (yych <= 0x08) goto yy11;
if (yych <= '\t') goto yy7;
goto yy9;
} else {
if (yych == ' ') goto yy7;
if (yych <= '+') goto yy11;
goto yy7;
}
} else {
if (yych <= 'O') {
if (yych <= '-') goto yy11;
if (yych <= '/') goto yy7;
if (yych <= '9') goto yy4;
goto yy11;
} else {
if (yych <= 'P') goto yy5;
if (yych != 'R') goto yy11;
}
}
YYDEBUG(2, *YYCURSOR);
++YYCURSOR;
if ((yych = *YYCURSOR) <= '/') goto yy3;
if (yych <= '9') goto yy98;
yy3:
YYDEBUG(3, *YYCURSOR);
#line 424 "ext/date/lib/parse_iso_intervals.re"
{
add_error(s, "Unexpected character");
goto std;
}
#line 366 "ext/date/lib/parse_iso_intervals.c"
yy4:
YYDEBUG(4, *YYCURSOR);
yyaccept = 0;
yych = *(YYMARKER = ++YYCURSOR);
if (yych <= '/') goto yy3;
if (yych <= '9') goto yy59;
goto yy3;
yy5:
YYDEBUG(5, *YYCURSOR);
yyaccept = 1;
yych = *(YYMARKER = ++YYCURSOR);
if (yych <= '/') goto yy6;
if (yych <= '9') goto yy12;
if (yych == 'T') goto yy14;
yy6:
YYDEBUG(6, *YYCURSOR);
#line 351 "ext/date/lib/parse_iso_intervals.re"
{
timelib_sll nr;
int in_time = 0;
DEBUG_OUTPUT("period");
TIMELIB_INIT;
ptr++;
do {
if ( *ptr == 'T' ) {
in_time = 1;
ptr++;
}
if ( *ptr == '\0' ) {
add_error(s, "Missing expected time part");
break;
}
nr = timelib_get_unsigned_nr((char **) &ptr, 12);
switch (*ptr) {
case 'Y': s->period->y = nr; break;
case 'W': s->period->d = nr * 7; break;
case 'D': s->period->d = nr; break;
case 'H': s->period->h = nr; break;
case 'S': s->period->s = nr; break;
case 'M':
if (in_time) {
s->period->i = nr;
} else {
s->period->m = nr;
}
break;
default:
add_error(s, "Undefined period specifier");
break;
}
ptr++;
} while (*ptr);
s->have_period = 1;
TIMELIB_DEINIT;
return TIMELIB_PERIOD;
}
#line 424 "ext/date/lib/parse_iso_intervals.c"
yy7:
YYDEBUG(7, *YYCURSOR);
++YYCURSOR;
YYDEBUG(8, *YYCURSOR);
#line 413 "ext/date/lib/parse_iso_intervals.re"
{
goto std;
}
#line 433 "ext/date/lib/parse_iso_intervals.c"
yy9:
YYDEBUG(9, *YYCURSOR);
++YYCURSOR;
YYDEBUG(10, *YYCURSOR);
#line 418 "ext/date/lib/parse_iso_intervals.re"
{
s->pos = cursor; s->line++;
goto std;
}
#line 443 "ext/date/lib/parse_iso_intervals.c"
yy11:
YYDEBUG(11, *YYCURSOR);
yych = *++YYCURSOR;
goto yy3;
yy12:
YYDEBUG(12, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= 'L') {
if (yych <= '9') {
if (yych >= '0') goto yy25;
} else {
if (yych == 'D') goto yy24;
}
} else {
if (yych <= 'W') {
if (yych <= 'M') goto yy27;
if (yych >= 'W') goto yy26;
} else {
if (yych == 'Y') goto yy28;
}
}
yy13:
YYDEBUG(13, *YYCURSOR);
YYCURSOR = YYMARKER;
if (yyaccept <= 0) {
goto yy3;
} else {
goto yy6;
}
yy14:
YYDEBUG(14, *YYCURSOR);
yyaccept = 1;
yych = *(YYMARKER = ++YYCURSOR);
if (yybm[0+yych] & 128) {
goto yy15;
}
goto yy6;
yy15:
YYDEBUG(15, *YYCURSOR);
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);
yych = *YYCURSOR;
YYDEBUG(16, *YYCURSOR);
if (yybm[0+yych] & 128) {
goto yy15;
}
if (yych <= 'L') {
if (yych == 'H') goto yy19;
goto yy13;
} else {
if (yych <= 'M') goto yy18;
if (yych != 'S') goto yy13;
}
yy17:
YYDEBUG(17, *YYCURSOR);
yych = *++YYCURSOR;
goto yy6;
yy18:
YYDEBUG(18, *YYCURSOR);
yyaccept = 1;
yych = *(YYMARKER = ++YYCURSOR);
if (yych <= '/') goto yy6;
if (yych <= '9') goto yy22;
goto yy6;
yy19:
YYDEBUG(19, *YYCURSOR);
yyaccept = 1;
yych = *(YYMARKER = ++YYCURSOR);
if (yych <= '/') goto yy6;
if (yych >= ':') goto yy6;
yy20:
YYDEBUG(20, *YYCURSOR);
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);
yych = *YYCURSOR;
YYDEBUG(21, *YYCURSOR);
if (yych <= 'L') {
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy20;
goto yy13;
} else {
if (yych <= 'M') goto yy18;
if (yych == 'S') goto yy17;
goto yy13;
}
yy22:
YYDEBUG(22, *YYCURSOR);
++YYCURSOR;
if (YYLIMIT <= YYCURSOR) YYFILL(1);
yych = *YYCURSOR;
YYDEBUG(23, *YYCURSOR);
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy22;
if (yych == 'S') goto yy17;
goto yy13;
yy24:
YYDEBUG(24, *YYCURSOR);
yych = *++YYCURSOR;
if (yych == 'T') goto yy14;
goto yy6;
yy25:
YYDEBUG(25, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= 'L') {
if (yych <= '9') {
if (yych <= '/') goto yy13;
goto yy35;
} else {
if (yych == 'D') goto yy24;
goto yy13;
}
} else {
if (yych <= 'W') {
if (yych <= 'M') goto yy27;
if (yych <= 'V') goto yy13;
} else {
if (yych == 'Y') goto yy28;
goto yy13;
}
}
yy26:
YYDEBUG(26, *YYCURSOR);
yyaccept = 1;
yych = *(YYMARKER = ++YYCURSOR);
if (yych <= '/') goto yy6;
if (yych <= '9') goto yy33;
if (yych == 'T') goto yy14;
goto yy6;
yy27:
YYDEBUG(27, *YYCURSOR);
yyaccept = 1;
yych = *(YYMARKER = ++YYCURSOR);
if (yych <= '/') goto yy6;
if (yych <= '9') goto yy31;
if (yych == 'T') goto yy14;
goto yy6;
yy28:
YYDEBUG(28, *YYCURSOR);
yyaccept = 1;
yych = *(YYMARKER = ++YYCURSOR);
if (yych <= '/') goto yy6;
if (yych <= '9') goto yy29;
if (yych == 'T') goto yy14;
goto yy6;
yy29:
YYDEBUG(29, *YYCURSOR);
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 3) YYFILL(3);
yych = *YYCURSOR;
YYDEBUG(30, *YYCURSOR);
if (yych <= 'D') {
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy29;
if (yych <= 'C') goto yy13;
goto yy24;
} else {
if (yych <= 'M') {
if (yych <= 'L') goto yy13;
goto yy27;
} else {
if (yych == 'W') goto yy26;
goto yy13;
}
}
yy31:
YYDEBUG(31, *YYCURSOR);
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 3) YYFILL(3);
yych = *YYCURSOR;
YYDEBUG(32, *YYCURSOR);
if (yych <= 'C') {
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy31;
goto yy13;
} else {
if (yych <= 'D') goto yy24;
if (yych == 'W') goto yy26;
goto yy13;
}
yy33:
YYDEBUG(33, *YYCURSOR);
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 3) YYFILL(3);
yych = *YYCURSOR;
YYDEBUG(34, *YYCURSOR);
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy33;
if (yych == 'D') goto yy24;
goto yy13;
yy35:
YYDEBUG(35, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= 'L') {
if (yych <= '9') {
if (yych <= '/') goto yy13;
} else {
if (yych == 'D') goto yy24;
goto yy13;
}
} else {
if (yych <= 'W') {
if (yych <= 'M') goto yy27;
if (yych <= 'V') goto yy13;
goto yy26;
} else {
if (yych == 'Y') goto yy28;
goto yy13;
}
}
YYDEBUG(36, *YYCURSOR);
yych = *++YYCURSOR;
if (yych != '-') goto yy39;
YYDEBUG(37, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '0') goto yy40;
if (yych <= '1') goto yy41;
goto yy13;
yy38:
YYDEBUG(38, *YYCURSOR);
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 3) YYFILL(3);
yych = *YYCURSOR;
yy39:
YYDEBUG(39, *YYCURSOR);
if (yych <= 'L') {
if (yych <= '9') {
if (yych <= '/') goto yy13;
goto yy38;
} else {
if (yych == 'D') goto yy24;
goto yy13;
}
} else {
if (yych <= 'W') {
if (yych <= 'M') goto yy27;
if (yych <= 'V') goto yy13;
goto yy26;
} else {
if (yych == 'Y') goto yy28;
goto yy13;
}
}
yy40:
YYDEBUG(40, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy42;
goto yy13;
yy41:
YYDEBUG(41, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '3') goto yy13;
yy42:
YYDEBUG(42, *YYCURSOR);
yych = *++YYCURSOR;
if (yych != '-') goto yy13;
YYDEBUG(43, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '0') goto yy44;
if (yych <= '2') goto yy45;
if (yych <= '3') goto yy46;
goto yy13;
yy44:
YYDEBUG(44, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy47;
goto yy13;
yy45:
YYDEBUG(45, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy47;
goto yy13;
yy46:
YYDEBUG(46, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '2') goto yy13;
yy47:
YYDEBUG(47, *YYCURSOR);
yych = *++YYCURSOR;
if (yych != 'T') goto yy13;
YYDEBUG(48, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '1') goto yy49;
if (yych <= '2') goto yy50;
goto yy13;
yy49:
YYDEBUG(49, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy51;
goto yy13;
yy50:
YYDEBUG(50, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '5') goto yy13;
yy51:
YYDEBUG(51, *YYCURSOR);
yych = *++YYCURSOR;
if (yych != ':') goto yy13;
YYDEBUG(52, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '6') goto yy13;
YYDEBUG(53, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= ':') goto yy13;
YYDEBUG(54, *YYCURSOR);
yych = *++YYCURSOR;
if (yych != ':') goto yy13;
YYDEBUG(55, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '6') goto yy13;
YYDEBUG(56, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= ':') goto yy13;
YYDEBUG(57, *YYCURSOR);
++YYCURSOR;
YYDEBUG(58, *YYCURSOR);
#line 393 "ext/date/lib/parse_iso_intervals.re"
{
DEBUG_OUTPUT("combinedrep");
TIMELIB_INIT;
s->period->y = timelib_get_unsigned_nr((char **) &ptr, 4);
ptr++;
s->period->m = timelib_get_unsigned_nr((char **) &ptr, 2);
ptr++;
s->period->d = timelib_get_unsigned_nr((char **) &ptr, 2);
ptr++;
s->period->h = timelib_get_unsigned_nr((char **) &ptr, 2);
ptr++;
s->period->i = timelib_get_unsigned_nr((char **) &ptr, 2);
ptr++;
s->period->s = timelib_get_unsigned_nr((char **) &ptr, 2);
s->have_period = 1;
TIMELIB_DEINIT;
return TIMELIB_PERIOD;
}
#line 792 "ext/date/lib/parse_iso_intervals.c"
yy59:
YYDEBUG(59, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= ':') goto yy13;
YYDEBUG(60, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= ':') goto yy13;
YYDEBUG(61, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') {
if (yych == '-') goto yy64;
goto yy13;
} else {
if (yych <= '0') goto yy62;
if (yych <= '1') goto yy63;
goto yy13;
}
yy62:
YYDEBUG(62, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '0') goto yy13;
if (yych <= '9') goto yy85;
goto yy13;
yy63:
YYDEBUG(63, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '2') goto yy85;
goto yy13;
yy64:
YYDEBUG(64, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '0') goto yy65;
if (yych <= '1') goto yy66;
goto yy13;
yy65:
YYDEBUG(65, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '0') goto yy13;
if (yych <= '9') goto yy67;
goto yy13;
yy66:
YYDEBUG(66, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '3') goto yy13;
yy67:
YYDEBUG(67, *YYCURSOR);
yych = *++YYCURSOR;
if (yych != '-') goto yy13;
YYDEBUG(68, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '0') goto yy69;
if (yych <= '2') goto yy70;
if (yych <= '3') goto yy71;
goto yy13;
yy69:
YYDEBUG(69, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '0') goto yy13;
if (yych <= '9') goto yy72;
goto yy13;
yy70:
YYDEBUG(70, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy72;
goto yy13;
yy71:
YYDEBUG(71, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '2') goto yy13;
yy72:
YYDEBUG(72, *YYCURSOR);
yych = *++YYCURSOR;
if (yych != 'T') goto yy13;
YYDEBUG(73, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '1') goto yy74;
if (yych <= '2') goto yy75;
goto yy13;
yy74:
YYDEBUG(74, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy76;
goto yy13;
yy75:
YYDEBUG(75, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '5') goto yy13;
yy76:
YYDEBUG(76, *YYCURSOR);
yych = *++YYCURSOR;
if (yych != ':') goto yy13;
YYDEBUG(77, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '6') goto yy13;
YYDEBUG(78, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= ':') goto yy13;
YYDEBUG(79, *YYCURSOR);
yych = *++YYCURSOR;
if (yych != ':') goto yy13;
YYDEBUG(80, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '6') goto yy13;
YYDEBUG(81, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= ':') goto yy13;
YYDEBUG(82, *YYCURSOR);
yych = *++YYCURSOR;
if (yych != 'Z') goto yy13;
yy83:
YYDEBUG(83, *YYCURSOR);
++YYCURSOR;
YYDEBUG(84, *YYCURSOR);
#line 327 "ext/date/lib/parse_iso_intervals.re"
{
timelib_time *current;
if (s->have_date || s->have_period) {
current = s->end;
s->have_end_date = 1;
} else {
current = s->begin;
s->have_begin_date = 1;
}
DEBUG_OUTPUT("datetimebasic | datetimeextended");
TIMELIB_INIT;
current->y = timelib_get_nr((char **) &ptr, 4);
current->m = timelib_get_nr((char **) &ptr, 2);
current->d = timelib_get_nr((char **) &ptr, 2);
current->h = timelib_get_nr((char **) &ptr, 2);
current->i = timelib_get_nr((char **) &ptr, 2);
current->s = timelib_get_nr((char **) &ptr, 2);
s->have_date = 1;
TIMELIB_DEINIT;
return TIMELIB_ISO_DATE;
}
#line 944 "ext/date/lib/parse_iso_intervals.c"
yy85:
YYDEBUG(85, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '0') goto yy86;
if (yych <= '2') goto yy87;
if (yych <= '3') goto yy88;
goto yy13;
yy86:
YYDEBUG(86, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '0') goto yy13;
if (yych <= '9') goto yy89;
goto yy13;
yy87:
YYDEBUG(87, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy89;
goto yy13;
yy88:
YYDEBUG(88, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '2') goto yy13;
yy89:
YYDEBUG(89, *YYCURSOR);
yych = *++YYCURSOR;
if (yych != 'T') goto yy13;
YYDEBUG(90, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '1') goto yy91;
if (yych <= '2') goto yy92;
goto yy13;
yy91:
YYDEBUG(91, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy93;
goto yy13;
yy92:
YYDEBUG(92, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '5') goto yy13;
yy93:
YYDEBUG(93, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '6') goto yy13;
YYDEBUG(94, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= ':') goto yy13;
YYDEBUG(95, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '6') goto yy13;
YYDEBUG(96, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= ':') goto yy13;
YYDEBUG(97, *YYCURSOR);
yych = *++YYCURSOR;
if (yych == 'Z') goto yy83;
goto yy13;
yy98:
YYDEBUG(98, *YYCURSOR);
++YYCURSOR;
if (YYLIMIT <= YYCURSOR) YYFILL(1);
yych = *YYCURSOR;
YYDEBUG(99, *YYCURSOR);
if (yych <= '/') goto yy100;
if (yych <= '9') goto yy98;
yy100:
YYDEBUG(100, *YYCURSOR);
#line 316 "ext/date/lib/parse_iso_intervals.re"
{
DEBUG_OUTPUT("recurrences");
TIMELIB_INIT;
ptr++;
s->recurrences = timelib_get_unsigned_nr((char **) &ptr, 9);
TIMELIB_DEINIT;
s->have_recurrences = 1;
return TIMELIB_PERIOD;
}
#line 1032 "ext/date/lib/parse_iso_intervals.c"
}
#line 428 "ext/date/lib/parse_iso_intervals.re"
}
Commit Message:
CWE ID: CWE-119 | static int scan(Scanner *s)
{
uchar *cursor = s->cur;
char *str, *ptr = NULL;
std:
s->tok = cursor;
s->len = 0;
#line 311 "ext/date/lib/parse_iso_intervals.re"
#line 291 "ext/date/lib/parse_iso_intervals.c"
{
YYCTYPE yych;
unsigned int yyaccept = 0;
static const unsigned char yybm[] = {
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
};
YYDEBUG(0, *YYCURSOR);
if ((YYLIMIT - YYCURSOR) < 20) YYFILL(20);
yych = *YYCURSOR;
if (yych <= ',') {
if (yych <= '\n') {
if (yych <= 0x00) goto yy9;
if (yych <= 0x08) goto yy11;
if (yych <= '\t') goto yy7;
goto yy9;
} else {
if (yych == ' ') goto yy7;
if (yych <= '+') goto yy11;
goto yy7;
}
} else {
if (yych <= 'O') {
if (yych <= '-') goto yy11;
if (yych <= '/') goto yy7;
if (yych <= '9') goto yy4;
goto yy11;
} else {
if (yych <= 'P') goto yy5;
if (yych != 'R') goto yy11;
}
}
YYDEBUG(2, *YYCURSOR);
++YYCURSOR;
if ((yych = *YYCURSOR) <= '/') goto yy3;
if (yych <= '9') goto yy98;
yy3:
YYDEBUG(3, *YYCURSOR);
#line 424 "ext/date/lib/parse_iso_intervals.re"
{
add_error(s, "Unexpected character");
goto std;
}
#line 366 "ext/date/lib/parse_iso_intervals.c"
yy4:
YYDEBUG(4, *YYCURSOR);
yyaccept = 0;
yych = *(YYMARKER = ++YYCURSOR);
if (yych <= '/') goto yy3;
if (yych <= '9') goto yy59;
goto yy3;
yy5:
YYDEBUG(5, *YYCURSOR);
yyaccept = 1;
yych = *(YYMARKER = ++YYCURSOR);
if (yych <= '/') goto yy6;
if (yych <= '9') goto yy12;
if (yych == 'T') goto yy14;
yy6:
YYDEBUG(6, *YYCURSOR);
#line 351 "ext/date/lib/parse_iso_intervals.re"
{
timelib_sll nr;
int in_time = 0;
DEBUG_OUTPUT("period");
TIMELIB_INIT;
ptr++;
do {
if ( *ptr == 'T' ) {
in_time = 1;
ptr++;
}
if ( *ptr == '\0' ) {
add_error(s, "Missing expected time part");
break;
}
nr = timelib_get_unsigned_nr((char **) &ptr, 12);
switch (*ptr) {
case 'Y': s->period->y = nr; break;
case 'W': s->period->d = nr * 7; break;
case 'D': s->period->d = nr; break;
case 'H': s->period->h = nr; break;
case 'S': s->period->s = nr; break;
case 'M':
if (in_time) {
s->period->i = nr;
} else {
s->period->m = nr;
}
break;
default:
add_error(s, "Undefined period specifier");
break;
}
ptr++;
} while (!s->errors->error_count && *ptr);
s->have_period = 1;
TIMELIB_DEINIT;
return TIMELIB_PERIOD;
}
#line 424 "ext/date/lib/parse_iso_intervals.c"
yy7:
YYDEBUG(7, *YYCURSOR);
++YYCURSOR;
YYDEBUG(8, *YYCURSOR);
#line 413 "ext/date/lib/parse_iso_intervals.re"
{
goto std;
}
#line 433 "ext/date/lib/parse_iso_intervals.c"
yy9:
YYDEBUG(9, *YYCURSOR);
++YYCURSOR;
YYDEBUG(10, *YYCURSOR);
#line 418 "ext/date/lib/parse_iso_intervals.re"
{
s->pos = cursor; s->line++;
goto std;
}
#line 443 "ext/date/lib/parse_iso_intervals.c"
yy11:
YYDEBUG(11, *YYCURSOR);
yych = *++YYCURSOR;
goto yy3;
yy12:
YYDEBUG(12, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= 'L') {
if (yych <= '9') {
if (yych >= '0') goto yy25;
} else {
if (yych == 'D') goto yy24;
}
} else {
if (yych <= 'W') {
if (yych <= 'M') goto yy27;
if (yych >= 'W') goto yy26;
} else {
if (yych == 'Y') goto yy28;
}
}
yy13:
YYDEBUG(13, *YYCURSOR);
YYCURSOR = YYMARKER;
if (yyaccept <= 0) {
goto yy3;
} else {
goto yy6;
}
yy14:
YYDEBUG(14, *YYCURSOR);
yyaccept = 1;
yych = *(YYMARKER = ++YYCURSOR);
if (yybm[0+yych] & 128) {
goto yy15;
}
goto yy6;
yy15:
YYDEBUG(15, *YYCURSOR);
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);
yych = *YYCURSOR;
YYDEBUG(16, *YYCURSOR);
if (yybm[0+yych] & 128) {
goto yy15;
}
if (yych <= 'L') {
if (yych == 'H') goto yy19;
goto yy13;
} else {
if (yych <= 'M') goto yy18;
if (yych != 'S') goto yy13;
}
yy17:
YYDEBUG(17, *YYCURSOR);
yych = *++YYCURSOR;
goto yy6;
yy18:
YYDEBUG(18, *YYCURSOR);
yyaccept = 1;
yych = *(YYMARKER = ++YYCURSOR);
if (yych <= '/') goto yy6;
if (yych <= '9') goto yy22;
goto yy6;
yy19:
YYDEBUG(19, *YYCURSOR);
yyaccept = 1;
yych = *(YYMARKER = ++YYCURSOR);
if (yych <= '/') goto yy6;
if (yych >= ':') goto yy6;
yy20:
YYDEBUG(20, *YYCURSOR);
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);
yych = *YYCURSOR;
YYDEBUG(21, *YYCURSOR);
if (yych <= 'L') {
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy20;
goto yy13;
} else {
if (yych <= 'M') goto yy18;
if (yych == 'S') goto yy17;
goto yy13;
}
yy22:
YYDEBUG(22, *YYCURSOR);
++YYCURSOR;
if (YYLIMIT <= YYCURSOR) YYFILL(1);
yych = *YYCURSOR;
YYDEBUG(23, *YYCURSOR);
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy22;
if (yych == 'S') goto yy17;
goto yy13;
yy24:
YYDEBUG(24, *YYCURSOR);
yych = *++YYCURSOR;
if (yych == 'T') goto yy14;
goto yy6;
yy25:
YYDEBUG(25, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= 'L') {
if (yych <= '9') {
if (yych <= '/') goto yy13;
goto yy35;
} else {
if (yych == 'D') goto yy24;
goto yy13;
}
} else {
if (yych <= 'W') {
if (yych <= 'M') goto yy27;
if (yych <= 'V') goto yy13;
} else {
if (yych == 'Y') goto yy28;
goto yy13;
}
}
yy26:
YYDEBUG(26, *YYCURSOR);
yyaccept = 1;
yych = *(YYMARKER = ++YYCURSOR);
if (yych <= '/') goto yy6;
if (yych <= '9') goto yy33;
if (yych == 'T') goto yy14;
goto yy6;
yy27:
YYDEBUG(27, *YYCURSOR);
yyaccept = 1;
yych = *(YYMARKER = ++YYCURSOR);
if (yych <= '/') goto yy6;
if (yych <= '9') goto yy31;
if (yych == 'T') goto yy14;
goto yy6;
yy28:
YYDEBUG(28, *YYCURSOR);
yyaccept = 1;
yych = *(YYMARKER = ++YYCURSOR);
if (yych <= '/') goto yy6;
if (yych <= '9') goto yy29;
if (yych == 'T') goto yy14;
goto yy6;
yy29:
YYDEBUG(29, *YYCURSOR);
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 3) YYFILL(3);
yych = *YYCURSOR;
YYDEBUG(30, *YYCURSOR);
if (yych <= 'D') {
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy29;
if (yych <= 'C') goto yy13;
goto yy24;
} else {
if (yych <= 'M') {
if (yych <= 'L') goto yy13;
goto yy27;
} else {
if (yych == 'W') goto yy26;
goto yy13;
}
}
yy31:
YYDEBUG(31, *YYCURSOR);
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 3) YYFILL(3);
yych = *YYCURSOR;
YYDEBUG(32, *YYCURSOR);
if (yych <= 'C') {
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy31;
goto yy13;
} else {
if (yych <= 'D') goto yy24;
if (yych == 'W') goto yy26;
goto yy13;
}
yy33:
YYDEBUG(33, *YYCURSOR);
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 3) YYFILL(3);
yych = *YYCURSOR;
YYDEBUG(34, *YYCURSOR);
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy33;
if (yych == 'D') goto yy24;
goto yy13;
yy35:
YYDEBUG(35, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= 'L') {
if (yych <= '9') {
if (yych <= '/') goto yy13;
} else {
if (yych == 'D') goto yy24;
goto yy13;
}
} else {
if (yych <= 'W') {
if (yych <= 'M') goto yy27;
if (yych <= 'V') goto yy13;
goto yy26;
} else {
if (yych == 'Y') goto yy28;
goto yy13;
}
}
YYDEBUG(36, *YYCURSOR);
yych = *++YYCURSOR;
if (yych != '-') goto yy39;
YYDEBUG(37, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '0') goto yy40;
if (yych <= '1') goto yy41;
goto yy13;
yy38:
YYDEBUG(38, *YYCURSOR);
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 3) YYFILL(3);
yych = *YYCURSOR;
yy39:
YYDEBUG(39, *YYCURSOR);
if (yych <= 'L') {
if (yych <= '9') {
if (yych <= '/') goto yy13;
goto yy38;
} else {
if (yych == 'D') goto yy24;
goto yy13;
}
} else {
if (yych <= 'W') {
if (yych <= 'M') goto yy27;
if (yych <= 'V') goto yy13;
goto yy26;
} else {
if (yych == 'Y') goto yy28;
goto yy13;
}
}
yy40:
YYDEBUG(40, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy42;
goto yy13;
yy41:
YYDEBUG(41, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '3') goto yy13;
yy42:
YYDEBUG(42, *YYCURSOR);
yych = *++YYCURSOR;
if (yych != '-') goto yy13;
YYDEBUG(43, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '0') goto yy44;
if (yych <= '2') goto yy45;
if (yych <= '3') goto yy46;
goto yy13;
yy44:
YYDEBUG(44, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy47;
goto yy13;
yy45:
YYDEBUG(45, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy47;
goto yy13;
yy46:
YYDEBUG(46, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '2') goto yy13;
yy47:
YYDEBUG(47, *YYCURSOR);
yych = *++YYCURSOR;
if (yych != 'T') goto yy13;
YYDEBUG(48, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '1') goto yy49;
if (yych <= '2') goto yy50;
goto yy13;
yy49:
YYDEBUG(49, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy51;
goto yy13;
yy50:
YYDEBUG(50, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '5') goto yy13;
yy51:
YYDEBUG(51, *YYCURSOR);
yych = *++YYCURSOR;
if (yych != ':') goto yy13;
YYDEBUG(52, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '6') goto yy13;
YYDEBUG(53, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= ':') goto yy13;
YYDEBUG(54, *YYCURSOR);
yych = *++YYCURSOR;
if (yych != ':') goto yy13;
YYDEBUG(55, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '6') goto yy13;
YYDEBUG(56, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= ':') goto yy13;
YYDEBUG(57, *YYCURSOR);
++YYCURSOR;
YYDEBUG(58, *YYCURSOR);
#line 393 "ext/date/lib/parse_iso_intervals.re"
{
DEBUG_OUTPUT("combinedrep");
TIMELIB_INIT;
s->period->y = timelib_get_unsigned_nr((char **) &ptr, 4);
ptr++;
s->period->m = timelib_get_unsigned_nr((char **) &ptr, 2);
ptr++;
s->period->d = timelib_get_unsigned_nr((char **) &ptr, 2);
ptr++;
s->period->h = timelib_get_unsigned_nr((char **) &ptr, 2);
ptr++;
s->period->i = timelib_get_unsigned_nr((char **) &ptr, 2);
ptr++;
s->period->s = timelib_get_unsigned_nr((char **) &ptr, 2);
s->have_period = 1;
TIMELIB_DEINIT;
return TIMELIB_PERIOD;
}
#line 792 "ext/date/lib/parse_iso_intervals.c"
yy59:
YYDEBUG(59, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= ':') goto yy13;
YYDEBUG(60, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= ':') goto yy13;
YYDEBUG(61, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') {
if (yych == '-') goto yy64;
goto yy13;
} else {
if (yych <= '0') goto yy62;
if (yych <= '1') goto yy63;
goto yy13;
}
yy62:
YYDEBUG(62, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '0') goto yy13;
if (yych <= '9') goto yy85;
goto yy13;
yy63:
YYDEBUG(63, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '2') goto yy85;
goto yy13;
yy64:
YYDEBUG(64, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '0') goto yy65;
if (yych <= '1') goto yy66;
goto yy13;
yy65:
YYDEBUG(65, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '0') goto yy13;
if (yych <= '9') goto yy67;
goto yy13;
yy66:
YYDEBUG(66, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '3') goto yy13;
yy67:
YYDEBUG(67, *YYCURSOR);
yych = *++YYCURSOR;
if (yych != '-') goto yy13;
YYDEBUG(68, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '0') goto yy69;
if (yych <= '2') goto yy70;
if (yych <= '3') goto yy71;
goto yy13;
yy69:
YYDEBUG(69, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '0') goto yy13;
if (yych <= '9') goto yy72;
goto yy13;
yy70:
YYDEBUG(70, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy72;
goto yy13;
yy71:
YYDEBUG(71, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '2') goto yy13;
yy72:
YYDEBUG(72, *YYCURSOR);
yych = *++YYCURSOR;
if (yych != 'T') goto yy13;
YYDEBUG(73, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '1') goto yy74;
if (yych <= '2') goto yy75;
goto yy13;
yy74:
YYDEBUG(74, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy76;
goto yy13;
yy75:
YYDEBUG(75, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '5') goto yy13;
yy76:
YYDEBUG(76, *YYCURSOR);
yych = *++YYCURSOR;
if (yych != ':') goto yy13;
YYDEBUG(77, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '6') goto yy13;
YYDEBUG(78, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= ':') goto yy13;
YYDEBUG(79, *YYCURSOR);
yych = *++YYCURSOR;
if (yych != ':') goto yy13;
YYDEBUG(80, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '6') goto yy13;
YYDEBUG(81, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= ':') goto yy13;
YYDEBUG(82, *YYCURSOR);
yych = *++YYCURSOR;
if (yych != 'Z') goto yy13;
yy83:
YYDEBUG(83, *YYCURSOR);
++YYCURSOR;
YYDEBUG(84, *YYCURSOR);
#line 327 "ext/date/lib/parse_iso_intervals.re"
{
timelib_time *current;
if (s->have_date || s->have_period) {
current = s->end;
s->have_end_date = 1;
} else {
current = s->begin;
s->have_begin_date = 1;
}
DEBUG_OUTPUT("datetimebasic | datetimeextended");
TIMELIB_INIT;
current->y = timelib_get_nr((char **) &ptr, 4);
current->m = timelib_get_nr((char **) &ptr, 2);
current->d = timelib_get_nr((char **) &ptr, 2);
current->h = timelib_get_nr((char **) &ptr, 2);
current->i = timelib_get_nr((char **) &ptr, 2);
current->s = timelib_get_nr((char **) &ptr, 2);
s->have_date = 1;
TIMELIB_DEINIT;
return TIMELIB_ISO_DATE;
}
#line 944 "ext/date/lib/parse_iso_intervals.c"
yy85:
YYDEBUG(85, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '0') goto yy86;
if (yych <= '2') goto yy87;
if (yych <= '3') goto yy88;
goto yy13;
yy86:
YYDEBUG(86, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '0') goto yy13;
if (yych <= '9') goto yy89;
goto yy13;
yy87:
YYDEBUG(87, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy89;
goto yy13;
yy88:
YYDEBUG(88, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '2') goto yy13;
yy89:
YYDEBUG(89, *YYCURSOR);
yych = *++YYCURSOR;
if (yych != 'T') goto yy13;
YYDEBUG(90, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '1') goto yy91;
if (yych <= '2') goto yy92;
goto yy13;
yy91:
YYDEBUG(91, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych <= '9') goto yy93;
goto yy13;
yy92:
YYDEBUG(92, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '5') goto yy13;
yy93:
YYDEBUG(93, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '6') goto yy13;
YYDEBUG(94, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= ':') goto yy13;
YYDEBUG(95, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= '6') goto yy13;
YYDEBUG(96, *YYCURSOR);
yych = *++YYCURSOR;
if (yych <= '/') goto yy13;
if (yych >= ':') goto yy13;
YYDEBUG(97, *YYCURSOR);
yych = *++YYCURSOR;
if (yych == 'Z') goto yy83;
goto yy13;
yy98:
YYDEBUG(98, *YYCURSOR);
++YYCURSOR;
if (YYLIMIT <= YYCURSOR) YYFILL(1);
yych = *YYCURSOR;
YYDEBUG(99, *YYCURSOR);
if (yych <= '/') goto yy100;
if (yych <= '9') goto yy98;
yy100:
YYDEBUG(100, *YYCURSOR);
#line 316 "ext/date/lib/parse_iso_intervals.re"
{
DEBUG_OUTPUT("recurrences");
TIMELIB_INIT;
ptr++;
s->recurrences = timelib_get_unsigned_nr((char **) &ptr, 9);
TIMELIB_DEINIT;
s->have_recurrences = 1;
return TIMELIB_PERIOD;
}
#line 1032 "ext/date/lib/parse_iso_intervals.c"
}
#line 428 "ext/date/lib/parse_iso_intervals.re"
}
| 164,566 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int aacDecoder_drcExtractAndMap (
HANDLE_AAC_DRC self,
HANDLE_FDK_BITSTREAM hBs,
CAacDecoderStaticChannelInfo *pAacDecoderStaticChannelInfo[],
UCHAR pceInstanceTag,
UCHAR channelMapping[], /* Channel mapping translating drcChannel index to canonical channel index */
int validChannels )
{
CDrcPayload threadBs[MAX_DRC_THREADS];
CDrcPayload *validThreadBs[MAX_DRC_THREADS];
CDrcParams *pParams;
UINT backupBsPosition;
int i, thread, validThreads = 0;
int numExcludedChns[MAX_DRC_THREADS];
FDK_ASSERT(self != NULL);
FDK_ASSERT(hBs != NULL);
FDK_ASSERT(pAacDecoderStaticChannelInfo != NULL);
pParams = &self->params;
self->numThreads = 0;
backupBsPosition = FDKgetValidBits(hBs);
for (i = 0; i < self->numPayloads && self->numThreads < MAX_DRC_THREADS; i++) {
int bitsParsed;
/* Init payload data chunk. The memclear is very important because it initializes
the most values. Without it the module wouldn't work properly or crash. */
FDKmemclear(&threadBs[self->numThreads], sizeof(CDrcPayload));
threadBs[self->numThreads].channelData.bandTop[0] = (1024 >> 2) - 1;
/* Extract payload */
bitsParsed = aacDecoder_drcParse( hBs,
&threadBs[self->numThreads],
self->drcPayloadPosition[i] );
if (bitsParsed > 0) {
self->numThreads++;
}
}
self->numPayloads = 0;
if (self->dvbAncDataAvailable)
{ /* Append a DVB heavy compression payload thread if available. */
int bitsParsed;
/* Init payload data chunk. The memclear is very important because it initializes
the most values. Without it the module wouldn't work properly or crash. */
FDKmemclear(&threadBs[self->numThreads], sizeof(CDrcPayload));
threadBs[self->numThreads].channelData.bandTop[0] = (1024 >> 2) - 1;
/* Extract payload */
bitsParsed = aacDecoder_drcReadCompression( hBs,
&threadBs[self->numThreads],
self->dvbAncDataPosition );
if (bitsParsed > 0) {
self->numThreads++;
}
}
self->dvbAncDataAvailable = 0;
/* Reset the bitbufffer */
FDKpushBiDirectional(hBs, FDKgetValidBits(hBs) - backupBsPosition);
/* calculate number of valid bits in excl_chn_mask */
/* coupling channels not supported */
/* check for valid threads */
for (thread = 0; thread < self->numThreads; thread++) {
CDrcPayload *pThreadBs = &threadBs[thread];
int numExclChns = 0;
switch ((AACDEC_DRC_PAYLOAD_TYPE)pThreadBs->channelData.drcDataType) {
default:
continue;
case MPEG_DRC_EXT_DATA:
case DVB_DRC_ANC_DATA:
break;
}
if (pThreadBs->pceInstanceTag >= 0) { /* if PCE tag present */
if (pThreadBs->pceInstanceTag != pceInstanceTag) {
continue; /* don't accept */
}
}
/* calculate number of excluded channels */
if (pThreadBs->excludedChnsMask > 0) {
INT exclMask = pThreadBs->excludedChnsMask;
int ch;
for (ch = 0; ch < validChannels; ch++) {
numExclChns += exclMask & 0x1;
exclMask >>= 1;
}
}
if (numExclChns < validChannels) {
validThreadBs[validThreads] = pThreadBs;
numExcludedChns[validThreads] = numExclChns;
validThreads++;
}
}
if (validThreads > 1) {
int ch;
/* check consistency of excl_chn_mask amongst valid DRC threads */
for (ch = 0; ch < validChannels; ch++) {
int present = 0;
for (thread = 0; thread < validThreads; thread++) {
CDrcPayload *pThreadBs = validThreadBs[thread];
/* thread applies to this channel */
if ( (pThreadBs->channelData.drcDataType == MPEG_DRC_EXT_DATA)
&& ( (numExcludedChns[thread] == 0)
|| (!(pThreadBs->excludedChnsMask & (1<<ch))) ) ) {
present++;
}
}
if (present > 1) {
return -1;
}
}
}
/* map DRC bitstream information onto DRC channel information */
for (thread = 0; thread < validThreads; thread++)
{
CDrcPayload *pThreadBs = validThreadBs[thread];
INT exclMask = pThreadBs->excludedChnsMask;
AACDEC_DRC_PAYLOAD_TYPE drcPayloadType = (AACDEC_DRC_PAYLOAD_TYPE)pThreadBs->channelData.drcDataType;
int ch;
/* last progRefLevel transmitted is the one that is used
* (but it should really only be transmitted once per block!)
*/
if (pThreadBs->progRefLevel >= 0) {
self->progRefLevel = pThreadBs->progRefLevel;
self->progRefLevelPresent = 1;
self->prlExpiryCount = 0; /* Got a new value -> Reset counter */
}
if (drcPayloadType == DVB_DRC_ANC_DATA) {
/* Announce the presentation mode of this valid thread. */
self->presMode = pThreadBs->presMode;
}
/* SCE, CPE and LFE */
for (ch = 0; ch < validChannels; ch++) {
int mapedChannel = channelMapping[ch];
if ( ((exclMask & (1<<mapedChannel)) == 0)
&& ( (drcPayloadType == MPEG_DRC_EXT_DATA)
|| ((drcPayloadType == DVB_DRC_ANC_DATA) && self->params.applyHeavyCompression)
) ) {
/* copy thread to channel */
pAacDecoderStaticChannelInfo[ch]->drcData = pThreadBs->channelData;
}
}
/* CCEs not supported by now */
}
/* Increment and check expiry counter for the program reference level: */
if ( (pParams->expiryFrame > 0)
&& (self->prlExpiryCount++ > pParams->expiryFrame) )
{ /* The program reference level is too old, so set it back to the target level. */
self->progRefLevelPresent = 0;
self->progRefLevel = pParams->targetRefLevel;
self->prlExpiryCount = 0;
}
return 0;
}
Commit Message: Fix stack corruption happening in aacDecoder_drcExtractAndMap()
In the aacDecoder_drcExtractAndMap() function, self->numThreads
can be used after having exceeded its intended max value,
MAX_DRC_THREADS, causing memory to be cleared after the
threadBs[MAX_DRC_THREADS] array.
The crash is prevented by never using self->numThreads with
a value equal to or greater than MAX_DRC_THREADS.
A proper fix will be required as there seems to be an issue as
to which entry in the threadBs array is meant to be initialized
and used.
Bug 26751339
Change-Id: I655cc40c35d4206ab72e83b2bdb751be2fe52b5a
CWE ID: CWE-119 | static int aacDecoder_drcExtractAndMap (
HANDLE_AAC_DRC self,
HANDLE_FDK_BITSTREAM hBs,
CAacDecoderStaticChannelInfo *pAacDecoderStaticChannelInfo[],
UCHAR pceInstanceTag,
UCHAR channelMapping[], /* Channel mapping translating drcChannel index to canonical channel index */
int validChannels )
{
CDrcPayload threadBs[MAX_DRC_THREADS];
CDrcPayload *validThreadBs[MAX_DRC_THREADS];
CDrcParams *pParams;
UINT backupBsPosition;
int i, thread, validThreads = 0;
int numExcludedChns[MAX_DRC_THREADS];
FDK_ASSERT(self != NULL);
FDK_ASSERT(hBs != NULL);
FDK_ASSERT(pAacDecoderStaticChannelInfo != NULL);
pParams = &self->params;
self->numThreads = 0;
backupBsPosition = FDKgetValidBits(hBs);
for (i = 0; i < self->numPayloads && self->numThreads < MAX_DRC_THREADS; i++) {
int bitsParsed;
/* Init payload data chunk. The memclear is very important because it initializes
the most values. Without it the module wouldn't work properly or crash. */
FDKmemclear(&threadBs[self->numThreads], sizeof(CDrcPayload));
threadBs[self->numThreads].channelData.bandTop[0] = (1024 >> 2) - 1;
/* Extract payload */
bitsParsed = aacDecoder_drcParse( hBs,
&threadBs[self->numThreads],
self->drcPayloadPosition[i] );
if (bitsParsed > 0) {
self->numThreads++;
}
}
self->numPayloads = 0;
if (self->numThreads >= MAX_DRC_THREADS) {
self->numThreads = MAX_DRC_THREADS - 1;
}
if (self->dvbAncDataAvailable)
{ /* Append a DVB heavy compression payload thread if available. */
int bitsParsed;
/* Init payload data chunk. The memclear is very important because it initializes
the most values. Without it the module wouldn't work properly or crash. */
FDKmemclear(&threadBs[self->numThreads], sizeof(CDrcPayload));
threadBs[self->numThreads].channelData.bandTop[0] = (1024 >> 2) - 1;
/* Extract payload */
bitsParsed = aacDecoder_drcReadCompression( hBs,
&threadBs[self->numThreads],
self->dvbAncDataPosition );
if (bitsParsed > 0) {
self->numThreads++;
}
}
self->dvbAncDataAvailable = 0;
/* Reset the bitbufffer */
FDKpushBiDirectional(hBs, FDKgetValidBits(hBs) - backupBsPosition);
/* calculate number of valid bits in excl_chn_mask */
/* coupling channels not supported */
if (self->numThreads >= MAX_DRC_THREADS) {
self->numThreads = MAX_DRC_THREADS - 1;
}
/* check for valid threads */
for (thread = 0; thread < self->numThreads; thread++) {
CDrcPayload *pThreadBs = &threadBs[thread];
int numExclChns = 0;
switch ((AACDEC_DRC_PAYLOAD_TYPE)pThreadBs->channelData.drcDataType) {
default:
continue;
case MPEG_DRC_EXT_DATA:
case DVB_DRC_ANC_DATA:
break;
}
if (pThreadBs->pceInstanceTag >= 0) { /* if PCE tag present */
if (pThreadBs->pceInstanceTag != pceInstanceTag) {
continue; /* don't accept */
}
}
/* calculate number of excluded channels */
if (pThreadBs->excludedChnsMask > 0) {
INT exclMask = pThreadBs->excludedChnsMask;
int ch;
for (ch = 0; ch < validChannels; ch++) {
numExclChns += exclMask & 0x1;
exclMask >>= 1;
}
}
if (numExclChns < validChannels) {
validThreadBs[validThreads] = pThreadBs;
numExcludedChns[validThreads] = numExclChns;
validThreads++;
}
}
if (validThreads > 1) {
int ch;
/* check consistency of excl_chn_mask amongst valid DRC threads */
for (ch = 0; ch < validChannels; ch++) {
int present = 0;
for (thread = 0; thread < validThreads; thread++) {
CDrcPayload *pThreadBs = validThreadBs[thread];
/* thread applies to this channel */
if ( (pThreadBs->channelData.drcDataType == MPEG_DRC_EXT_DATA)
&& ( (numExcludedChns[thread] == 0)
|| (!(pThreadBs->excludedChnsMask & (1<<ch))) ) ) {
present++;
}
}
if (present > 1) {
return -1;
}
}
}
/* map DRC bitstream information onto DRC channel information */
for (thread = 0; thread < validThreads; thread++)
{
CDrcPayload *pThreadBs = validThreadBs[thread];
INT exclMask = pThreadBs->excludedChnsMask;
AACDEC_DRC_PAYLOAD_TYPE drcPayloadType = (AACDEC_DRC_PAYLOAD_TYPE)pThreadBs->channelData.drcDataType;
int ch;
/* last progRefLevel transmitted is the one that is used
* (but it should really only be transmitted once per block!)
*/
if (pThreadBs->progRefLevel >= 0) {
self->progRefLevel = pThreadBs->progRefLevel;
self->progRefLevelPresent = 1;
self->prlExpiryCount = 0; /* Got a new value -> Reset counter */
}
if (drcPayloadType == DVB_DRC_ANC_DATA) {
/* Announce the presentation mode of this valid thread. */
self->presMode = pThreadBs->presMode;
}
/* SCE, CPE and LFE */
for (ch = 0; ch < validChannels; ch++) {
int mapedChannel = channelMapping[ch];
if ( ((exclMask & (1<<mapedChannel)) == 0)
&& ( (drcPayloadType == MPEG_DRC_EXT_DATA)
|| ((drcPayloadType == DVB_DRC_ANC_DATA) && self->params.applyHeavyCompression)
) ) {
/* copy thread to channel */
pAacDecoderStaticChannelInfo[ch]->drcData = pThreadBs->channelData;
}
}
/* CCEs not supported by now */
}
/* Increment and check expiry counter for the program reference level: */
if ( (pParams->expiryFrame > 0)
&& (self->prlExpiryCount++ > pParams->expiryFrame) )
{ /* The program reference level is too old, so set it back to the target level. */
self->progRefLevelPresent = 0;
self->progRefLevel = pParams->targetRefLevel;
self->prlExpiryCount = 0;
}
return 0;
}
| 173,889 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void QuicClientPromisedInfo::OnPromiseHeaders(const SpdyHeaderBlock& headers) {
SpdyHeaderBlock::const_iterator it = headers.find(kHttp2MethodHeader);
DCHECK(it != headers.end());
if (!(it->second == "GET" || it->second == "HEAD")) {
QUIC_DVLOG(1) << "Promise for stream " << id_ << " has invalid method "
<< it->second;
Reset(QUIC_INVALID_PROMISE_METHOD);
return;
}
if (!SpdyUtils::UrlIsValid(headers)) {
QUIC_DVLOG(1) << "Promise for stream " << id_ << " has invalid URL "
<< url_;
Reset(QUIC_INVALID_PROMISE_URL);
return;
}
if (!session_->IsAuthorized(SpdyUtils::GetHostNameFromHeaderBlock(headers))) {
Reset(QUIC_UNAUTHORIZED_PROMISE_URL);
return;
}
request_headers_.reset(new SpdyHeaderBlock(headers.Clone()));
}
Commit Message: Fix Stack Buffer Overflow in QuicClientPromisedInfo::OnPromiseHeaders
BUG=777728
Cq-Include-Trybots: master.tryserver.chromium.android:android_cronet_tester;master.tryserver.chromium.mac:ios-simulator-cronet
Change-Id: I6a80db88aafdf20c7abd3847404b818565681310
Reviewed-on: https://chromium-review.googlesource.com/748425
Reviewed-by: Zhongyi Shi <[email protected]>
Commit-Queue: Ryan Hamilton <[email protected]>
Cr-Commit-Position: refs/heads/master@{#513105}
CWE ID: CWE-119 | void QuicClientPromisedInfo::OnPromiseHeaders(const SpdyHeaderBlock& headers) {
SpdyHeaderBlock::const_iterator it = headers.find(kHttp2MethodHeader);
if (it == headers.end()) {
QUIC_DVLOG(1) << "Promise for stream " << id_ << " has no method";
Reset(QUIC_INVALID_PROMISE_METHOD);
return;
}
if (!(it->second == "GET" || it->second == "HEAD")) {
QUIC_DVLOG(1) << "Promise for stream " << id_ << " has invalid method "
<< it->second;
Reset(QUIC_INVALID_PROMISE_METHOD);
return;
}
if (!SpdyUtils::UrlIsValid(headers)) {
QUIC_DVLOG(1) << "Promise for stream " << id_ << " has invalid URL "
<< url_;
Reset(QUIC_INVALID_PROMISE_URL);
return;
}
if (!session_->IsAuthorized(SpdyUtils::GetHostNameFromHeaderBlock(headers))) {
Reset(QUIC_UNAUTHORIZED_PROMISE_URL);
return;
}
request_headers_.reset(new SpdyHeaderBlock(headers.Clone()));
}
| 172,940 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: cib_send_plaintext(int sock, xmlNode * msg)
{
char *xml_text = dump_xml_unformatted(msg);
if (xml_text != NULL) {
int rc = 0;
char *unsent = xml_text;
int len = strlen(xml_text);
len++; /* null char */
crm_trace("Message on socket %d: size=%d", sock, len);
retry:
rc = write(sock, unsent, len);
if (rc < 0) {
switch (errno) {
case EINTR:
case EAGAIN:
crm_trace("Retry");
goto retry;
default:
crm_perror(LOG_ERR, "Could only write %d of the remaining %d bytes", rc, len);
break;
}
} else if (rc < len) {
crm_trace("Only sent %d of %d remaining bytes", rc, len);
len -= rc;
unsent += rc;
goto retry;
} else {
crm_trace("Sent %d bytes: %.100s", rc, xml_text);
}
}
free(xml_text);
return NULL;
}
Commit Message: High: core: Internal tls api improvements for reuse with future LRMD tls backend.
CWE ID: CWE-399 | cib_send_plaintext(int sock, xmlNode * msg)
static int
crm_send_plaintext(int sock, const char *buf, size_t len)
{
int rc = 0;
const char *unsent = buf;
int total_send;
if (buf == NULL) {
return -1;
}
total_send = len;
crm_trace("Message on socket %d: size=%d", sock, len);
retry:
rc = write(sock, unsent, len);
if (rc < 0) {
switch (errno) {
case EINTR:
case EAGAIN:
crm_trace("Retry");
goto retry;
default:
crm_perror(LOG_ERR, "Could only write %d of the remaining %d bytes", rc, (int) len);
break;
}
} else if (rc < len) {
crm_trace("Only sent %d of %d remaining bytes", rc, len);
len -= rc;
unsent += rc;
goto retry;
} else {
crm_trace("Sent %d bytes: %.100s", rc, buf);
}
return rc < 0 ? rc : total_send;
}
| 166,160 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: WebRunnerMainDelegate::WebRunnerMainDelegate(zx::channel context_channel)
: context_channel_(std::move(context_channel)) {}
Commit Message: [fuchsia] Implement browser tests for WebRunner Context service.
Tests may interact with the WebRunner FIDL services and the underlying
browser objects for end to end testing of service and browser
functionality.
* Add a browser test launcher main() for WebRunner.
* Add some simple navigation tests.
* Wire up GoBack()/GoForward() FIDL calls.
* Add embedded test server resources and initialization logic.
* Add missing deletion & notification calls to BrowserContext dtor.
* Use FIDL events for navigation state changes.
* Bug fixes:
** Move BrowserContext and Screen deletion to PostMainMessageLoopRun(),
so that they may use the MessageLoop during teardown.
** Fix Frame dtor to allow for null WindowTreeHosts (headless case)
** Fix std::move logic in Frame ctor which lead to no WebContents
observer being registered.
Bug: 871594
Change-Id: I36bcbd2436d534d366c6be4eeb54b9f9feadd1ac
Reviewed-on: https://chromium-review.googlesource.com/1164539
Commit-Queue: Kevin Marshall <[email protected]>
Reviewed-by: Wez <[email protected]>
Reviewed-by: Fabrice de Gans-Riberi <[email protected]>
Reviewed-by: Scott Violet <[email protected]>
Cr-Commit-Position: refs/heads/master@{#584155}
CWE ID: CWE-264 | WebRunnerMainDelegate::WebRunnerMainDelegate(zx::channel context_channel)
| 172,160 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: status_t OMXNodeInstance::configureVideoTunnelMode(
OMX_U32 portIndex, OMX_BOOL tunneled, OMX_U32 audioHwSync,
native_handle_t **sidebandHandle) {
Mutex::Autolock autolock(mLock);
CLOG_CONFIG(configureVideoTunnelMode, "%s:%u tun=%d sync=%u",
portString(portIndex), portIndex, tunneled, audioHwSync);
OMX_INDEXTYPE index;
OMX_STRING name = const_cast<OMX_STRING>(
"OMX.google.android.index.configureVideoTunnelMode");
OMX_ERRORTYPE err = OMX_GetExtensionIndex(mHandle, name, &index);
if (err != OMX_ErrorNone) {
CLOG_ERROR_IF(tunneled, getExtensionIndex, err, "%s", name);
return StatusFromOMXError(err);
}
ConfigureVideoTunnelModeParams tunnelParams;
InitOMXParams(&tunnelParams);
tunnelParams.nPortIndex = portIndex;
tunnelParams.bTunneled = tunneled;
tunnelParams.nAudioHwSync = audioHwSync;
err = OMX_SetParameter(mHandle, index, &tunnelParams);
if (err != OMX_ErrorNone) {
CLOG_ERROR(setParameter, err, "%s(%#x): %s:%u tun=%d sync=%u", name, index,
portString(portIndex), portIndex, tunneled, audioHwSync);
return StatusFromOMXError(err);
}
err = OMX_GetParameter(mHandle, index, &tunnelParams);
if (err != OMX_ErrorNone) {
CLOG_ERROR(getParameter, err, "%s(%#x): %s:%u tun=%d sync=%u", name, index,
portString(portIndex), portIndex, tunneled, audioHwSync);
return StatusFromOMXError(err);
}
if (sidebandHandle) {
*sidebandHandle = (native_handle_t*)tunnelParams.pSidebandWindow;
}
return OK;
}
Commit Message: DO NOT MERGE: IOMX: work against metadata buffer spoofing
- Prohibit direct set/getParam/Settings for extensions meant for
OMXNodeInstance alone. This disallows enabling metadata mode
without the knowledge of OMXNodeInstance.
- Use a backup buffer for metadata mode buffers and do not directly
share with clients.
- Disallow setting up metadata mode/tunneling/input surface
after first sendCommand.
- Disallow store-meta for input cross process.
- Disallow emptyBuffer for surface input (via IOMX).
- Fix checking for input surface.
Bug: 29422020
Change-Id: I801c77b80e703903f62e42d76fd2e76a34e4bc8e
(cherry picked from commit 7c3c2fa3e233c656fc8c2fc2a6634b3ecf8a23e8)
CWE ID: CWE-200 | status_t OMXNodeInstance::configureVideoTunnelMode(
OMX_U32 portIndex, OMX_BOOL tunneled, OMX_U32 audioHwSync,
native_handle_t **sidebandHandle) {
Mutex::Autolock autolock(mLock);
if (mSailed) {
android_errorWriteLog(0x534e4554, "29422020");
return INVALID_OPERATION;
}
CLOG_CONFIG(configureVideoTunnelMode, "%s:%u tun=%d sync=%u",
portString(portIndex), portIndex, tunneled, audioHwSync);
OMX_INDEXTYPE index;
OMX_STRING name = const_cast<OMX_STRING>(
"OMX.google.android.index.configureVideoTunnelMode");
OMX_ERRORTYPE err = OMX_GetExtensionIndex(mHandle, name, &index);
if (err != OMX_ErrorNone) {
CLOG_ERROR_IF(tunneled, getExtensionIndex, err, "%s", name);
return StatusFromOMXError(err);
}
ConfigureVideoTunnelModeParams tunnelParams;
InitOMXParams(&tunnelParams);
tunnelParams.nPortIndex = portIndex;
tunnelParams.bTunneled = tunneled;
tunnelParams.nAudioHwSync = audioHwSync;
err = OMX_SetParameter(mHandle, index, &tunnelParams);
if (err != OMX_ErrorNone) {
CLOG_ERROR(setParameter, err, "%s(%#x): %s:%u tun=%d sync=%u", name, index,
portString(portIndex), portIndex, tunneled, audioHwSync);
return StatusFromOMXError(err);
}
err = OMX_GetParameter(mHandle, index, &tunnelParams);
if (err != OMX_ErrorNone) {
CLOG_ERROR(getParameter, err, "%s(%#x): %s:%u tun=%d sync=%u", name, index,
portString(portIndex), portIndex, tunneled, audioHwSync);
return StatusFromOMXError(err);
}
if (sidebandHandle) {
*sidebandHandle = (native_handle_t*)tunnelParams.pSidebandWindow;
}
return OK;
}
| 174,131 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: dump_threads(void)
{
FILE *fp;
char time_buf[26];
element e;
vrrp_t *vrrp;
char *file_name;
file_name = make_file_name("/tmp/thread_dump.dat",
"vrrp",
#if HAVE_DECL_CLONE_NEWNET
global_data->network_namespace,
#else
NULL,
#endif
global_data->instance_name);
fp = fopen(file_name, "a");
FREE(file_name);
set_time_now();
ctime_r(&time_now.tv_sec, time_buf);
fprintf(fp, "\n%.19s.%6.6ld: Thread dump\n", time_buf, time_now.tv_usec);
dump_thread_data(master, fp);
fprintf(fp, "alloc = %lu\n", master->alloc);
fprintf(fp, "\n");
LIST_FOREACH(vrrp_data->vrrp, vrrp, e) {
ctime_r(&vrrp->sands.tv_sec, time_buf);
fprintf(fp, "VRRP instance %s, sands %.19s.%6.6lu, status %s\n", vrrp->iname, time_buf, vrrp->sands.tv_usec,
vrrp->state == VRRP_STATE_INIT ? "INIT" :
vrrp->state == VRRP_STATE_BACK ? "BACKUP" :
vrrp->state == VRRP_STATE_MAST ? "MASTER" :
vrrp->state == VRRP_STATE_FAULT ? "FAULT" :
vrrp->state == VRRP_STATE_STOP ? "STOP" :
vrrp->state == VRRP_DISPATCHER ? "DISPATCHER" : "unknown");
}
fclose(fp);
}
Commit Message: When opening files for write, ensure they aren't symbolic links
Issue #1048 identified that if, for example, a non privileged user
created a symbolic link from /etc/keepalvied.data to /etc/passwd,
writing to /etc/keepalived.data (which could be invoked via DBus)
would cause /etc/passwd to be overwritten.
This commit stops keepalived writing to pathnames where the ultimate
component is a symbolic link, by setting O_NOFOLLOW whenever opening
a file for writing.
This might break some setups, where, for example, /etc/keepalived.data
was a symbolic link to /home/fred/keepalived.data. If this was the case,
instead create a symbolic link from /home/fred/keepalived.data to
/tmp/keepalived.data, so that the file is still accessible via
/home/fred/keepalived.data.
There doesn't appear to be a way around this backward incompatibility,
since even checking if the pathname is a symbolic link prior to opening
for writing would create a race condition.
Signed-off-by: Quentin Armitage <[email protected]>
CWE ID: CWE-59 | dump_threads(void)
{
FILE *fp;
char time_buf[26];
element e;
vrrp_t *vrrp;
char *file_name;
file_name = make_file_name("/tmp/thread_dump.dat",
"vrrp",
#if HAVE_DECL_CLONE_NEWNET
global_data->network_namespace,
#else
NULL,
#endif
global_data->instance_name);
fp = fopen_safe(file_name, "a");
FREE(file_name);
set_time_now();
ctime_r(&time_now.tv_sec, time_buf);
fprintf(fp, "\n%.19s.%6.6ld: Thread dump\n", time_buf, time_now.tv_usec);
dump_thread_data(master, fp);
fprintf(fp, "alloc = %lu\n", master->alloc);
fprintf(fp, "\n");
LIST_FOREACH(vrrp_data->vrrp, vrrp, e) {
ctime_r(&vrrp->sands.tv_sec, time_buf);
fprintf(fp, "VRRP instance %s, sands %.19s.%6.6lu, status %s\n", vrrp->iname, time_buf, vrrp->sands.tv_usec,
vrrp->state == VRRP_STATE_INIT ? "INIT" :
vrrp->state == VRRP_STATE_BACK ? "BACKUP" :
vrrp->state == VRRP_STATE_MAST ? "MASTER" :
vrrp->state == VRRP_STATE_FAULT ? "FAULT" :
vrrp->state == VRRP_STATE_STOP ? "STOP" :
vrrp->state == VRRP_DISPATCHER ? "DISPATCHER" : "unknown");
}
fclose(fp);
}
| 168,993 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void SpdyWriteQueue::RemovePendingWritesForStream(
const base::WeakPtr<SpdyStream>& stream) {
CHECK(!removing_writes_);
removing_writes_ = true;
RequestPriority priority = stream->priority();
CHECK_GE(priority, MINIMUM_PRIORITY);
CHECK_LE(priority, MAXIMUM_PRIORITY);
DCHECK(stream.get());
#if DCHECK_IS_ON
for (int i = MINIMUM_PRIORITY; i <= MAXIMUM_PRIORITY; ++i) {
if (priority == i)
continue;
for (std::deque<PendingWrite>::const_iterator it = queue_[i].begin();
it != queue_[i].end(); ++it) {
DCHECK_NE(it->stream.get(), stream.get());
}
}
#endif
std::deque<PendingWrite>* queue = &queue_[priority];
std::deque<PendingWrite>::iterator out_it = queue->begin();
for (std::deque<PendingWrite>::const_iterator it = queue->begin();
it != queue->end(); ++it) {
if (it->stream.get() == stream.get()) {
delete it->frame_producer;
} else {
*out_it = *it;
++out_it;
}
}
queue->erase(out_it, queue->end());
removing_writes_ = false;
}
Commit Message: These can post callbacks which re-enter into SpdyWriteQueue.
BUG=369539
Review URL: https://codereview.chromium.org/265933007
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@268730 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: | void SpdyWriteQueue::RemovePendingWritesForStream(
const base::WeakPtr<SpdyStream>& stream) {
CHECK(!removing_writes_);
removing_writes_ = true;
RequestPriority priority = stream->priority();
CHECK_GE(priority, MINIMUM_PRIORITY);
CHECK_LE(priority, MAXIMUM_PRIORITY);
DCHECK(stream.get());
#if DCHECK_IS_ON
for (int i = MINIMUM_PRIORITY; i <= MAXIMUM_PRIORITY; ++i) {
if (priority == i)
continue;
for (std::deque<PendingWrite>::const_iterator it = queue_[i].begin();
it != queue_[i].end(); ++it) {
DCHECK_NE(it->stream.get(), stream.get());
}
}
#endif
// Defer deletion until queue iteration is complete, as
// SpdyBuffer::~SpdyBuffer() can result in callbacks into SpdyWriteQueue.
std::vector<SpdyBufferProducer*> erased_buffer_producers;
std::deque<PendingWrite>* queue = &queue_[priority];
std::deque<PendingWrite>::iterator out_it = queue->begin();
for (std::deque<PendingWrite>::const_iterator it = queue->begin();
it != queue->end(); ++it) {
if (it->stream.get() == stream.get()) {
erased_buffer_producers.push_back(it->frame_producer);
} else {
*out_it = *it;
++out_it;
}
}
queue->erase(out_it, queue->end());
removing_writes_ = false;
STLDeleteElements(&erased_buffer_producers); // Invokes callbacks.
}
| 171,674 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int cx24116_send_diseqc_msg(struct dvb_frontend *fe,
struct dvb_diseqc_master_cmd *d)
{
struct cx24116_state *state = fe->demodulator_priv;
int i, ret;
/* Dump DiSEqC message */
if (debug) {
printk(KERN_INFO "cx24116: %s(", __func__);
for (i = 0 ; i < d->msg_len ;) {
printk(KERN_INFO "0x%02x", d->msg[i]);
if (++i < d->msg_len)
printk(KERN_INFO ", ");
}
printk(") toneburst=%d\n", toneburst);
}
/* Validate length */
if (d->msg_len > (CX24116_ARGLEN - CX24116_DISEQC_MSGOFS))
return -EINVAL;
/* DiSEqC message */
for (i = 0; i < d->msg_len; i++)
state->dsec_cmd.args[CX24116_DISEQC_MSGOFS + i] = d->msg[i];
/* DiSEqC message length */
state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] = d->msg_len;
/* Command length */
state->dsec_cmd.len = CX24116_DISEQC_MSGOFS +
state->dsec_cmd.args[CX24116_DISEQC_MSGLEN];
/* DiSEqC toneburst */
if (toneburst == CX24116_DISEQC_MESGCACHE)
/* Message is cached */
return 0;
else if (toneburst == CX24116_DISEQC_TONEOFF)
/* Message is sent without burst */
state->dsec_cmd.args[CX24116_DISEQC_BURST] = 0;
else if (toneburst == CX24116_DISEQC_TONECACHE) {
/*
* Message is sent with derived else cached burst
*
* WRITE PORT GROUP COMMAND 38
*
* 0/A/A: E0 10 38 F0..F3
* 1/B/B: E0 10 38 F4..F7
* 2/C/A: E0 10 38 F8..FB
* 3/D/B: E0 10 38 FC..FF
*
* databyte[3]= 8421:8421
* ABCD:WXYZ
* CLR :SET
*
* WX= PORT SELECT 0..3 (X=TONEBURST)
* Y = VOLTAGE (0=13V, 1=18V)
* Z = BAND (0=LOW, 1=HIGH(22K))
*/
if (d->msg_len >= 4 && d->msg[2] == 0x38)
state->dsec_cmd.args[CX24116_DISEQC_BURST] =
((d->msg[3] & 4) >> 2);
if (debug)
dprintk("%s burst=%d\n", __func__,
state->dsec_cmd.args[CX24116_DISEQC_BURST]);
}
/* Wait for LNB ready */
ret = cx24116_wait_for_lnb(fe);
if (ret != 0)
return ret;
/* Wait for voltage/min repeat delay */
msleep(100);
/* Command */
ret = cx24116_cmd_execute(fe, &state->dsec_cmd);
if (ret != 0)
return ret;
/*
* Wait for send
*
* Eutelsat spec:
* >15ms delay + (XXX determine if FW does this, see set_tone)
* 13.5ms per byte +
* >15ms delay +
* 12.5ms burst +
* >15ms delay (XXX determine if FW does this, see set_tone)
*/
msleep((state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] << 4) +
((toneburst == CX24116_DISEQC_TONEOFF) ? 30 : 60));
return 0;
}
Commit Message: [media] cx24116: fix a buffer overflow when checking userspace params
The maximum size for a DiSEqC command is 6, according to the
userspace API. However, the code allows to write up much more values:
drivers/media/dvb-frontends/cx24116.c:983 cx24116_send_diseqc_msg() error: buffer overflow 'd->msg' 6 <= 23
Cc: [email protected]
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
CWE ID: CWE-119 | static int cx24116_send_diseqc_msg(struct dvb_frontend *fe,
struct dvb_diseqc_master_cmd *d)
{
struct cx24116_state *state = fe->demodulator_priv;
int i, ret;
/* Validate length */
if (d->msg_len > sizeof(d->msg))
return -EINVAL;
/* Dump DiSEqC message */
if (debug) {
printk(KERN_INFO "cx24116: %s(", __func__);
for (i = 0 ; i < d->msg_len ;) {
printk(KERN_INFO "0x%02x", d->msg[i]);
if (++i < d->msg_len)
printk(KERN_INFO ", ");
}
printk(") toneburst=%d\n", toneburst);
}
/* DiSEqC message */
for (i = 0; i < d->msg_len; i++)
state->dsec_cmd.args[CX24116_DISEQC_MSGOFS + i] = d->msg[i];
/* DiSEqC message length */
state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] = d->msg_len;
/* Command length */
state->dsec_cmd.len = CX24116_DISEQC_MSGOFS +
state->dsec_cmd.args[CX24116_DISEQC_MSGLEN];
/* DiSEqC toneburst */
if (toneburst == CX24116_DISEQC_MESGCACHE)
/* Message is cached */
return 0;
else if (toneburst == CX24116_DISEQC_TONEOFF)
/* Message is sent without burst */
state->dsec_cmd.args[CX24116_DISEQC_BURST] = 0;
else if (toneburst == CX24116_DISEQC_TONECACHE) {
/*
* Message is sent with derived else cached burst
*
* WRITE PORT GROUP COMMAND 38
*
* 0/A/A: E0 10 38 F0..F3
* 1/B/B: E0 10 38 F4..F7
* 2/C/A: E0 10 38 F8..FB
* 3/D/B: E0 10 38 FC..FF
*
* databyte[3]= 8421:8421
* ABCD:WXYZ
* CLR :SET
*
* WX= PORT SELECT 0..3 (X=TONEBURST)
* Y = VOLTAGE (0=13V, 1=18V)
* Z = BAND (0=LOW, 1=HIGH(22K))
*/
if (d->msg_len >= 4 && d->msg[2] == 0x38)
state->dsec_cmd.args[CX24116_DISEQC_BURST] =
((d->msg[3] & 4) >> 2);
if (debug)
dprintk("%s burst=%d\n", __func__,
state->dsec_cmd.args[CX24116_DISEQC_BURST]);
}
/* Wait for LNB ready */
ret = cx24116_wait_for_lnb(fe);
if (ret != 0)
return ret;
/* Wait for voltage/min repeat delay */
msleep(100);
/* Command */
ret = cx24116_cmd_execute(fe, &state->dsec_cmd);
if (ret != 0)
return ret;
/*
* Wait for send
*
* Eutelsat spec:
* >15ms delay + (XXX determine if FW does this, see set_tone)
* 13.5ms per byte +
* >15ms delay +
* 12.5ms burst +
* >15ms delay (XXX determine if FW does this, see set_tone)
*/
msleep((state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] << 4) +
((toneburst == CX24116_DISEQC_TONEOFF) ? 30 : 60));
return 0;
}
| 169,867 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void PlatformSensorProviderWin::SensorReaderCreated(
mojom::SensorType type,
mojo::ScopedSharedBufferMapping mapping,
const CreateSensorCallback& callback,
std::unique_ptr<PlatformSensorReaderWin> sensor_reader) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
if (!sensor_reader) {
callback.Run(nullptr);
return;
}
scoped_refptr<PlatformSensor> sensor = new PlatformSensorWin(
type, std::move(mapping), this, sensor_thread_->task_runner(),
std::move(sensor_reader));
callback.Run(sensor);
}
Commit Message: android: Fix sensors in device service.
This patch fixes a bug that prevented more than one sensor data
to be available at once when using the device motion/orientation
API.
The issue was introduced by this other patch [1] which fixed
some security-related issues in the way shared memory region
handles are managed throughout Chromium (more details at
https://crbug.com/789959).
The device service´s sensor implementation doesn´t work
correctly because it assumes it is possible to create a
writable mapping of a given shared memory region at any
time. This assumption is not correct on Android, once an
Ashmem region has been turned read-only, such mappings
are no longer possible.
To fix the implementation, this CL changes the following:
- PlatformSensor used to require moving a
mojo::ScopedSharedBufferMapping into the newly-created
instance. Said mapping being owned by and destroyed
with the PlatformSensor instance.
With this patch, the constructor instead takes a single
pointer to the corresponding SensorReadingSharedBuffer,
i.e. the area in memory where the sensor-specific
reading data is located, and can be either updated
or read-from.
Note that the PlatformSensor does not own the mapping
anymore.
- PlatformSensorProviderBase holds the *single* writable
mapping that is used to store all SensorReadingSharedBuffer
buffers. It is created just after the region itself,
and thus can be used even after the region's access
mode has been changed to read-only.
Addresses within the mapping will be passed to
PlatformSensor constructors, computed from the
mapping's base address plus a sensor-specific
offset.
The mapping is now owned by the
PlatformSensorProviderBase instance.
Note that, security-wise, nothing changes, because all
mojo::ScopedSharedBufferMapping before the patch actually
pointed to the same writable-page in memory anyway.
Since unit or integration tests didn't catch the regression
when [1] was submitted, this patch was tested manually by
running a newly-built Chrome apk in the Android emulator
and on a real device running Android O.
[1] https://chromium-review.googlesource.com/c/chromium/src/+/805238
BUG=805146
[email protected],[email protected],[email protected],[email protected]
Change-Id: I7d60a1cad278f48c361d2ece5a90de10eb082b44
Reviewed-on: https://chromium-review.googlesource.com/891180
Commit-Queue: David Turner <[email protected]>
Reviewed-by: Reilly Grant <[email protected]>
Reviewed-by: Matthew Cary <[email protected]>
Reviewed-by: Alexandr Ilin <[email protected]>
Cr-Commit-Position: refs/heads/master@{#532607}
CWE ID: CWE-732 | void PlatformSensorProviderWin::SensorReaderCreated(
mojom::SensorType type,
SensorReadingSharedBuffer* reading_buffer,
const CreateSensorCallback& callback,
std::unique_ptr<PlatformSensorReaderWin> sensor_reader) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
if (!sensor_reader) {
callback.Run(nullptr);
return;
}
scoped_refptr<PlatformSensor> sensor = new PlatformSensorWin(
type, reading_buffer, this, sensor_thread_->task_runner(),
std::move(sensor_reader));
callback.Run(sensor);
}
| 172,848 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int hugetlb_reserve_pages(struct inode *inode,
long from, long to,
struct vm_area_struct *vma,
vm_flags_t vm_flags)
{
long ret, chg;
struct hstate *h = hstate_inode(inode);
/*
* Only apply hugepage reservation if asked. At fault time, an
* attempt will be made for VM_NORESERVE to allocate a page
* and filesystem quota without using reserves
*/
if (vm_flags & VM_NORESERVE)
return 0;
/*
* Shared mappings base their reservation on the number of pages that
* are already allocated on behalf of the file. Private mappings need
* to reserve the full area even if read-only as mprotect() may be
* called to make the mapping read-write. Assume !vma is a shm mapping
*/
if (!vma || vma->vm_flags & VM_MAYSHARE)
chg = region_chg(&inode->i_mapping->private_list, from, to);
else {
struct resv_map *resv_map = resv_map_alloc();
if (!resv_map)
return -ENOMEM;
chg = to - from;
set_vma_resv_map(vma, resv_map);
set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
}
if (chg < 0)
return chg;
/* There must be enough filesystem quota for the mapping */
if (hugetlb_get_quota(inode->i_mapping, chg))
return -ENOSPC;
/*
* Check enough hugepages are available for the reservation.
* Hand back the quota if there are not
*/
ret = hugetlb_acct_memory(h, chg);
if (ret < 0) {
hugetlb_put_quota(inode->i_mapping, chg);
return ret;
}
/*
* Account for the reservations made. Shared mappings record regions
* that have reservations as they are shared by multiple VMAs.
* When the last VMA disappears, the region map says how much
* the reservation was and the page cache tells how much of
* the reservation was consumed. Private mappings are per-VMA and
* only the consumed reservations are tracked. When the VMA
* disappears, the original reservation is the VMA size and the
* consumed reservations are stored in the map. Hence, nothing
* else has to be done for private mappings here
*/
if (!vma || vma->vm_flags & VM_MAYSHARE)
region_add(&inode->i_mapping->private_list, from, to);
return 0;
}
Commit Message: hugepages: fix use after free bug in "quota" handling
hugetlbfs_{get,put}_quota() are badly named. They don't interact with the
general quota handling code, and they don't much resemble its behaviour.
Rather than being about maintaining limits on on-disk block usage by
particular users, they are instead about maintaining limits on in-memory
page usage (including anonymous MAP_PRIVATE copied-on-write pages)
associated with a particular hugetlbfs filesystem instance.
Worse, they work by having callbacks to the hugetlbfs filesystem code from
the low-level page handling code, in particular from free_huge_page().
This is a layering violation of itself, but more importantly, if the
kernel does a get_user_pages() on hugepages (which can happen from KVM
amongst others), then the free_huge_page() can be delayed until after the
associated inode has already been freed. If an unmount occurs at the
wrong time, even the hugetlbfs superblock where the "quota" limits are
stored may have been freed.
Andrew Barry proposed a patch to fix this by having hugepages, instead of
storing a pointer to their address_space and reaching the superblock from
there, had the hugepages store pointers directly to the superblock,
bumping the reference count as appropriate to avoid it being freed.
Andrew Morton rejected that version, however, on the grounds that it made
the existing layering violation worse.
This is a reworked version of Andrew's patch, which removes the extra, and
some of the existing, layering violation. It works by introducing the
concept of a hugepage "subpool" at the lower hugepage mm layer - that is a
finite logical pool of hugepages to allocate from. hugetlbfs now creates
a subpool for each filesystem instance with a page limit set, and a
pointer to the subpool gets added to each allocated hugepage, instead of
the address_space pointer used now. The subpool has its own lifetime and
is only freed once all pages in it _and_ all other references to it (i.e.
superblocks) are gone.
subpools are optional - a NULL subpool pointer is taken by the code to
mean that no subpool limits are in effect.
Previous discussion of this bug found in: "Fix refcounting in hugetlbfs
quota handling.". See: https://lkml.org/lkml/2011/8/11/28 or
http://marc.info/?l=linux-mm&m=126928970510627&w=1
v2: Fixed a bug spotted by Hillf Danton, and removed the extra parameter to
alloc_huge_page() - since it already takes the vma, it is not necessary.
Signed-off-by: Andrew Barry <[email protected]>
Signed-off-by: David Gibson <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Minchan Kim <[email protected]>
Cc: Hillf Danton <[email protected]>
Cc: Paul Mackerras <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
CWE ID: CWE-399 | int hugetlb_reserve_pages(struct inode *inode,
long from, long to,
struct vm_area_struct *vma,
vm_flags_t vm_flags)
{
long ret, chg;
struct hstate *h = hstate_inode(inode);
struct hugepage_subpool *spool = subpool_inode(inode);
/*
* Only apply hugepage reservation if asked. At fault time, an
* attempt will be made for VM_NORESERVE to allocate a page
* without using reserves
*/
if (vm_flags & VM_NORESERVE)
return 0;
/*
* Shared mappings base their reservation on the number of pages that
* are already allocated on behalf of the file. Private mappings need
* to reserve the full area even if read-only as mprotect() may be
* called to make the mapping read-write. Assume !vma is a shm mapping
*/
if (!vma || vma->vm_flags & VM_MAYSHARE)
chg = region_chg(&inode->i_mapping->private_list, from, to);
else {
struct resv_map *resv_map = resv_map_alloc();
if (!resv_map)
return -ENOMEM;
chg = to - from;
set_vma_resv_map(vma, resv_map);
set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
}
if (chg < 0)
return chg;
/* There must be enough pages in the subpool for the mapping */
if (hugepage_subpool_get_pages(spool, chg))
return -ENOSPC;
/*
* Check enough hugepages are available for the reservation.
* Hand the pages back to the subpool if there are not
*/
ret = hugetlb_acct_memory(h, chg);
if (ret < 0) {
hugepage_subpool_put_pages(spool, chg);
return ret;
}
/*
* Account for the reservations made. Shared mappings record regions
* that have reservations as they are shared by multiple VMAs.
* When the last VMA disappears, the region map says how much
* the reservation was and the page cache tells how much of
* the reservation was consumed. Private mappings are per-VMA and
* only the consumed reservations are tracked. When the VMA
* disappears, the original reservation is the VMA size and the
* consumed reservations are stored in the map. Hence, nothing
* else has to be done for private mappings here
*/
if (!vma || vma->vm_flags & VM_MAYSHARE)
region_add(&inode->i_mapping->private_list, from, to);
return 0;
}
| 165,610 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst)
{
struct inet6_request_sock *ireq6 = inet6_rsk(req);
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
struct inet_sock *newinet;
struct dccp6_sock *newdp6;
struct sock *newsk;
struct ipv6_txoptions *opt;
if (skb->protocol == htons(ETH_P_IP)) {
/*
* v6 mapped
*/
newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
if (newsk == NULL)
return NULL;
newdp6 = (struct dccp6_sock *)newsk;
newinet = inet_sk(newsk);
newinet->pinet6 = &newdp6->inet6;
newnp = inet6_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
newsk->sk_backlog_rcv = dccp_v4_do_rcv;
newnp->pktoptions = NULL;
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks count
* here, dccp_create_openreq_child now does this for us, see the comment in
* that function for the gory details. -acme
*/
/* It is tricky place. Until this moment IPv4 tcp
worked with IPv6 icsk.icsk_af_ops.
Sync it now.
*/
dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
return newsk;
}
opt = np->opt;
if (sk_acceptq_is_full(sk))
goto out_overflow;
if (dst == NULL) {
struct in6_addr *final_p, final;
struct flowi6 fl6;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_DCCP;
ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
final_p = fl6_update_dst(&fl6, opt, &final);
ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.fl6_dport = inet_rsk(req)->rmt_port;
fl6.fl6_sport = inet_rsk(req)->loc_port;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
if (IS_ERR(dst))
goto out;
}
newsk = dccp_create_openreq_child(sk, req, skb);
if (newsk == NULL)
goto out_nonewsk;
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks
* count here, dccp_create_openreq_child now does this for us, see the
* comment in that function for the gory details. -acme
*/
__ip6_dst_store(newsk, dst, NULL, NULL);
newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
NETIF_F_TSO);
newdp6 = (struct dccp6_sock *)newsk;
newinet = inet_sk(newsk);
newinet->pinet6 = &newdp6->inet6;
newnp = inet6_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
newsk->sk_bound_dev_if = ireq6->iif;
/* Now IPv6 options...
First: no IPv4 options.
*/
newinet->opt = NULL;
/* Clone RX bits */
newnp->rxopt.all = np->rxopt.all;
/* Clone pktoptions received with SYN */
newnp->pktoptions = NULL;
if (ireq6->pktopts != NULL) {
newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
kfree_skb(ireq6->pktopts);
ireq6->pktopts = NULL;
if (newnp->pktoptions)
skb_set_owner_r(newnp->pktoptions, newsk);
}
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
/*
* Clone native IPv6 options from listening socket (if any)
*
* Yes, keeping reference count would be much more clever, but we make
* one more one thing there: reattach optmem to newsk.
*/
if (opt != NULL) {
newnp->opt = ipv6_dup_options(newsk, opt);
if (opt != np->opt)
sock_kfree_s(sk, opt, opt->tot_len);
}
inet_csk(newsk)->icsk_ext_hdr_len = 0;
if (newnp->opt != NULL)
inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
newnp->opt->opt_flen);
dccp_sync_mss(newsk, dst_mtu(dst));
newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
if (__inet_inherit_port(sk, newsk) < 0) {
sock_put(newsk);
goto out;
}
__inet6_hash(newsk, NULL);
return newsk;
out_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
out_nonewsk:
dst_release(dst);
out:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
if (opt != NULL && opt != np->opt)
sock_kfree_s(sk, opt, opt->tot_len);
return NULL;
}
Commit Message: inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-362 | static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst)
{
struct inet6_request_sock *ireq6 = inet6_rsk(req);
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
struct inet_sock *newinet;
struct dccp6_sock *newdp6;
struct sock *newsk;
struct ipv6_txoptions *opt;
if (skb->protocol == htons(ETH_P_IP)) {
/*
* v6 mapped
*/
newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
if (newsk == NULL)
return NULL;
newdp6 = (struct dccp6_sock *)newsk;
newinet = inet_sk(newsk);
newinet->pinet6 = &newdp6->inet6;
newnp = inet6_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
newsk->sk_backlog_rcv = dccp_v4_do_rcv;
newnp->pktoptions = NULL;
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks count
* here, dccp_create_openreq_child now does this for us, see the comment in
* that function for the gory details. -acme
*/
/* It is tricky place. Until this moment IPv4 tcp
worked with IPv6 icsk.icsk_af_ops.
Sync it now.
*/
dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
return newsk;
}
opt = np->opt;
if (sk_acceptq_is_full(sk))
goto out_overflow;
if (dst == NULL) {
struct in6_addr *final_p, final;
struct flowi6 fl6;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_DCCP;
ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
final_p = fl6_update_dst(&fl6, opt, &final);
ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.fl6_dport = inet_rsk(req)->rmt_port;
fl6.fl6_sport = inet_rsk(req)->loc_port;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
if (IS_ERR(dst))
goto out;
}
newsk = dccp_create_openreq_child(sk, req, skb);
if (newsk == NULL)
goto out_nonewsk;
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks
* count here, dccp_create_openreq_child now does this for us, see the
* comment in that function for the gory details. -acme
*/
__ip6_dst_store(newsk, dst, NULL, NULL);
newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
NETIF_F_TSO);
newdp6 = (struct dccp6_sock *)newsk;
newinet = inet_sk(newsk);
newinet->pinet6 = &newdp6->inet6;
newnp = inet6_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
newsk->sk_bound_dev_if = ireq6->iif;
/* Now IPv6 options...
First: no IPv4 options.
*/
newinet->inet_opt = NULL;
/* Clone RX bits */
newnp->rxopt.all = np->rxopt.all;
/* Clone pktoptions received with SYN */
newnp->pktoptions = NULL;
if (ireq6->pktopts != NULL) {
newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
kfree_skb(ireq6->pktopts);
ireq6->pktopts = NULL;
if (newnp->pktoptions)
skb_set_owner_r(newnp->pktoptions, newsk);
}
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
/*
* Clone native IPv6 options from listening socket (if any)
*
* Yes, keeping reference count would be much more clever, but we make
* one more one thing there: reattach optmem to newsk.
*/
if (opt != NULL) {
newnp->opt = ipv6_dup_options(newsk, opt);
if (opt != np->opt)
sock_kfree_s(sk, opt, opt->tot_len);
}
inet_csk(newsk)->icsk_ext_hdr_len = 0;
if (newnp->opt != NULL)
inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
newnp->opt->opt_flen);
dccp_sync_mss(newsk, dst_mtu(dst));
newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
if (__inet_inherit_port(sk, newsk) < 0) {
sock_put(newsk);
goto out;
}
__inet6_hash(newsk, NULL);
return newsk;
out_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
out_nonewsk:
dst_release(dst);
out:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
if (opt != NULL && opt != np->opt)
sock_kfree_s(sk, opt, opt->tot_len);
return NULL;
}
| 165,542 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void Resource::LastPluginRefWasDeleted(bool instance_destroyed) {
DCHECK(resource_id_ != 0);
instance()->module()->GetCallbackTracker()->PostAbortForResource(
resource_id_);
resource_id_ = 0;
if (instance_destroyed)
instance_ = NULL;
}
Commit Message: Maintain a map of all resources in the resource tracker and clear instance back pointers when needed,
BUG=85808
Review URL: http://codereview.chromium.org/7196001
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@89746 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | void Resource::LastPluginRefWasDeleted(bool instance_destroyed) {
void Resource::LastPluginRefWasDeleted() {
DCHECK(resource_id_ != 0);
instance()->module()->GetCallbackTracker()->PostAbortForResource(
resource_id_);
resource_id_ = 0;
}
| 170,413 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: scoped_refptr<VideoFrame> CloneVideoFrameWithLayout(
const VideoFrame* const src_frame,
const VideoFrameLayout& dst_layout) {
LOG_ASSERT(src_frame->IsMappable());
LOG_ASSERT(src_frame->format() == dst_layout.format());
auto dst_frame = VideoFrame::CreateFrameWithLayout(
dst_layout, src_frame->visible_rect(), src_frame->natural_size(),
src_frame->timestamp(), false /* zero_initialize_memory*/);
if (!dst_frame) {
LOG(ERROR) << "Failed to create VideoFrame";
return nullptr;
}
const size_t num_planes = VideoFrame::NumPlanes(dst_layout.format());
LOG_ASSERT(dst_layout.planes().size() == num_planes);
LOG_ASSERT(src_frame->layout().planes().size() == num_planes);
for (size_t i = 0; i < num_planes; ++i) {
libyuv::CopyPlane(
src_frame->data(i), src_frame->layout().planes()[i].stride,
dst_frame->data(i), dst_frame->layout().planes()[i].stride,
VideoFrame::Columns(i, dst_frame->format(),
dst_frame->natural_size().width()),
VideoFrame::Rows(i, dst_frame->format(),
dst_frame->natural_size().height()));
}
return dst_frame;
}
Commit Message: media/gpu/test: ImageProcessorClient: Use bytes for width and height in libyuv::CopyPlane()
|width| is in bytes in libyuv::CopyPlane(). We formerly pass width in pixels.
This should matter when a pixel format is used whose pixel is composed of
more than one bytes.
Bug: None
Test: image_processor_test
Change-Id: I98e90be70c8d0128319172d4d19f3a8017b65d78
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1553129
Commit-Queue: Hirokazu Honda <[email protected]>
Reviewed-by: Alexandre Courbot <[email protected]>
Cr-Commit-Position: refs/heads/master@{#648117}
CWE ID: CWE-20 | scoped_refptr<VideoFrame> CloneVideoFrameWithLayout(
const VideoFrame* const src_frame,
const VideoFrameLayout& dst_layout) {
LOG_ASSERT(src_frame->IsMappable());
LOG_ASSERT(src_frame->format() == dst_layout.format());
auto dst_frame = VideoFrame::CreateFrameWithLayout(
dst_layout, src_frame->visible_rect(), src_frame->natural_size(),
src_frame->timestamp(), false /* zero_initialize_memory*/);
if (!dst_frame) {
LOG(ERROR) << "Failed to create VideoFrame";
return nullptr;
}
const size_t num_planes = VideoFrame::NumPlanes(dst_layout.format());
LOG_ASSERT(dst_layout.planes().size() == num_planes);
LOG_ASSERT(src_frame->layout().planes().size() == num_planes);
for (size_t i = 0; i < num_planes; ++i) {
// |width| in libyuv::CopyPlane() is in bytes, not pixels.
gfx::Size plane_size = VideoFrame::PlaneSize(dst_frame->format(), i,
dst_frame->natural_size());
libyuv::CopyPlane(
src_frame->data(i), src_frame->layout().planes()[i].stride,
dst_frame->data(i), dst_frame->layout().planes()[i].stride,
plane_size.width(), plane_size.height());
}
return dst_frame;
}
| 172,397 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int skt_write(int fd, const void *p, size_t len)
{
int sent;
struct pollfd pfd;
FNLOG();
pfd.fd = fd;
pfd.events = POLLOUT;
/* poll for 500 ms */
/* send time out */
if (poll(&pfd, 1, 500) == 0)
return 0;
ts_log("skt_write", len, NULL);
if ((sent = send(fd, p, len, MSG_NOSIGNAL)) == -1)
{
ERROR("write failed with errno=%d\n", errno);
return -1;
}
return sent;
}
Commit Message: DO NOT MERGE Fix potential DoS caused by delivering signal to BT process
Bug: 28885210
Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360
Conflicts:
btif/co/bta_hh_co.c
btif/src/btif_core.c
Merge conflict resolution of ag/1161415 (referencing ag/1164670)
- Directly into mnc-mr2-release
CWE ID: CWE-284 | static int skt_write(int fd, const void *p, size_t len)
{
int sent;
struct pollfd pfd;
FNLOG();
pfd.fd = fd;
pfd.events = POLLOUT;
/* poll for 500 ms */
/* send time out */
if (TEMP_FAILURE_RETRY(poll(&pfd, 1, 500)) == 0)
return 0;
ts_log("skt_write", len, NULL);
if ((sent = TEMP_FAILURE_RETRY(send(fd, p, len, MSG_NOSIGNAL))) == -1)
{
ERROR("write failed with errno=%d\n", errno);
return -1;
}
return sent;
}
| 173,429 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int ip_options_get_finish(struct net *net, struct ip_options **optp,
struct ip_options *opt, int optlen)
{
while (optlen & 3)
opt->__data[optlen++] = IPOPT_END;
opt->optlen = optlen;
if (optlen && ip_options_compile(net, opt, NULL)) {
kfree(opt);
return -EINVAL;
}
kfree(*optp);
*optp = opt;
return 0;
}
Commit Message: inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-362 | static int ip_options_get_finish(struct net *net, struct ip_options **optp,
static int ip_options_get_finish(struct net *net, struct ip_options_rcu **optp,
struct ip_options_rcu *opt, int optlen)
{
while (optlen & 3)
opt->opt.__data[optlen++] = IPOPT_END;
opt->opt.optlen = optlen;
if (optlen && ip_options_compile(net, &opt->opt, NULL)) {
kfree(opt);
return -EINVAL;
}
kfree(*optp);
*optp = opt;
return 0;
}
| 165,560 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: OMX_ERRORTYPE SoftAMRNBEncoder::internalGetParameter(
OMX_INDEXTYPE index, OMX_PTR params) {
switch (index) {
case OMX_IndexParamAudioPortFormat:
{
OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
(OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
if (formatParams->nPortIndex > 1) {
return OMX_ErrorUndefined;
}
if (formatParams->nIndex > 0) {
return OMX_ErrorNoMore;
}
formatParams->eEncoding =
(formatParams->nPortIndex == 0)
? OMX_AUDIO_CodingPCM : OMX_AUDIO_CodingAMR;
return OMX_ErrorNone;
}
case OMX_IndexParamAudioAmr:
{
OMX_AUDIO_PARAM_AMRTYPE *amrParams =
(OMX_AUDIO_PARAM_AMRTYPE *)params;
if (amrParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
amrParams->nChannels = 1;
amrParams->nBitRate = mBitRate;
amrParams->eAMRBandMode = (OMX_AUDIO_AMRBANDMODETYPE)(mMode + 1);
amrParams->eAMRDTXMode = OMX_AUDIO_AMRDTXModeOff;
amrParams->eAMRFrameFormat = OMX_AUDIO_AMRFrameFormatFSF;
return OMX_ErrorNone;
}
case OMX_IndexParamAudioPcm:
{
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
if (pcmParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
pcmParams->eNumData = OMX_NumericalDataSigned;
pcmParams->eEndian = OMX_EndianBig;
pcmParams->bInterleaved = OMX_TRUE;
pcmParams->nBitPerSample = 16;
pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelCF;
pcmParams->nChannels = 1;
pcmParams->nSamplingRate = kSampleRate;
return OMX_ErrorNone;
}
default:
return SimpleSoftOMXComponent::internalGetParameter(index, params);
}
}
Commit Message: DO NOT MERGE Verify OMX buffer sizes prior to access
Bug: 27207275
Change-Id: I4412825d1ee233d993af0a67708bea54304ff62d
CWE ID: CWE-119 | OMX_ERRORTYPE SoftAMRNBEncoder::internalGetParameter(
OMX_INDEXTYPE index, OMX_PTR params) {
switch (index) {
case OMX_IndexParamAudioPortFormat:
{
OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
(OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
if (!isValidOMXParam(formatParams)) {
return OMX_ErrorBadParameter;
}
if (formatParams->nPortIndex > 1) {
return OMX_ErrorUndefined;
}
if (formatParams->nIndex > 0) {
return OMX_ErrorNoMore;
}
formatParams->eEncoding =
(formatParams->nPortIndex == 0)
? OMX_AUDIO_CodingPCM : OMX_AUDIO_CodingAMR;
return OMX_ErrorNone;
}
case OMX_IndexParamAudioAmr:
{
OMX_AUDIO_PARAM_AMRTYPE *amrParams =
(OMX_AUDIO_PARAM_AMRTYPE *)params;
if (!isValidOMXParam(amrParams)) {
return OMX_ErrorBadParameter;
}
if (amrParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
amrParams->nChannels = 1;
amrParams->nBitRate = mBitRate;
amrParams->eAMRBandMode = (OMX_AUDIO_AMRBANDMODETYPE)(mMode + 1);
amrParams->eAMRDTXMode = OMX_AUDIO_AMRDTXModeOff;
amrParams->eAMRFrameFormat = OMX_AUDIO_AMRFrameFormatFSF;
return OMX_ErrorNone;
}
case OMX_IndexParamAudioPcm:
{
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
if (!isValidOMXParam(pcmParams)) {
return OMX_ErrorBadParameter;
}
if (pcmParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
pcmParams->eNumData = OMX_NumericalDataSigned;
pcmParams->eEndian = OMX_EndianBig;
pcmParams->bInterleaved = OMX_TRUE;
pcmParams->nBitPerSample = 16;
pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelCF;
pcmParams->nChannels = 1;
pcmParams->nSamplingRate = kSampleRate;
return OMX_ErrorNone;
}
default:
return SimpleSoftOMXComponent::internalGetParameter(index, params);
}
}
| 174,194 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
{
int def_errors;
unsigned long def_mount_opts;
struct super_block *sb = vfs->mnt_sb;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
def_errors = le16_to_cpu(es->s_errors);
if (sbi->s_sb_block != 1)
seq_printf(seq, ",sb=%llu", sbi->s_sb_block);
if (test_opt(sb, MINIX_DF))
seq_puts(seq, ",minixdf");
if (test_opt(sb, GRPID) && !(def_mount_opts & EXT4_DEFM_BSDGROUPS))
seq_puts(seq, ",grpid");
if (!test_opt(sb, GRPID) && (def_mount_opts & EXT4_DEFM_BSDGROUPS))
seq_puts(seq, ",nogrpid");
if (sbi->s_resuid != EXT4_DEF_RESUID ||
le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID) {
seq_printf(seq, ",resuid=%u", sbi->s_resuid);
}
if (sbi->s_resgid != EXT4_DEF_RESGID ||
le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID) {
seq_printf(seq, ",resgid=%u", sbi->s_resgid);
}
if (test_opt(sb, ERRORS_RO)) {
if (def_errors == EXT4_ERRORS_PANIC ||
def_errors == EXT4_ERRORS_CONTINUE) {
seq_puts(seq, ",errors=remount-ro");
}
}
if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
seq_puts(seq, ",errors=continue");
if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
seq_puts(seq, ",errors=panic");
if (test_opt(sb, NO_UID32) && !(def_mount_opts & EXT4_DEFM_UID16))
seq_puts(seq, ",nouid32");
if (test_opt(sb, DEBUG) && !(def_mount_opts & EXT4_DEFM_DEBUG))
seq_puts(seq, ",debug");
if (test_opt(sb, OLDALLOC))
seq_puts(seq, ",oldalloc");
#ifdef CONFIG_EXT4_FS_XATTR
if (test_opt(sb, XATTR_USER) &&
!(def_mount_opts & EXT4_DEFM_XATTR_USER))
seq_puts(seq, ",user_xattr");
if (!test_opt(sb, XATTR_USER) &&
(def_mount_opts & EXT4_DEFM_XATTR_USER)) {
seq_puts(seq, ",nouser_xattr");
}
#endif
#ifdef CONFIG_EXT4_FS_POSIX_ACL
if (test_opt(sb, POSIX_ACL) && !(def_mount_opts & EXT4_DEFM_ACL))
seq_puts(seq, ",acl");
if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT4_DEFM_ACL))
seq_puts(seq, ",noacl");
#endif
if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
seq_printf(seq, ",commit=%u",
(unsigned) (sbi->s_commit_interval / HZ));
}
if (sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME) {
seq_printf(seq, ",min_batch_time=%u",
(unsigned) sbi->s_min_batch_time);
}
if (sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) {
seq_printf(seq, ",max_batch_time=%u",
(unsigned) sbi->s_min_batch_time);
}
/*
* We're changing the default of barrier mount option, so
* let's always display its mount state so it's clear what its
* status is.
*/
seq_puts(seq, ",barrier=");
seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0");
if (test_opt(sb, JOURNAL_ASYNC_COMMIT))
seq_puts(seq, ",journal_async_commit");
if (test_opt(sb, NOBH))
seq_puts(seq, ",nobh");
if (test_opt(sb, I_VERSION))
seq_puts(seq, ",i_version");
if (!test_opt(sb, DELALLOC))
seq_puts(seq, ",nodelalloc");
if (sbi->s_stripe)
seq_printf(seq, ",stripe=%lu", sbi->s_stripe);
/*
* journal mode get enabled in different ways
* So just print the value even if we didn't specify it
*/
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
seq_puts(seq, ",data=journal");
else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
seq_puts(seq, ",data=ordered");
else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
seq_puts(seq, ",data=writeback");
if (sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
seq_printf(seq, ",inode_readahead_blks=%u",
sbi->s_inode_readahead_blks);
if (test_opt(sb, DATA_ERR_ABORT))
seq_puts(seq, ",data_err=abort");
if (test_opt(sb, NO_AUTO_DA_ALLOC))
seq_puts(seq, ",noauto_da_alloc");
if (test_opt(sb, DISCARD))
seq_puts(seq, ",discard");
if (test_opt(sb, NOLOAD))
seq_puts(seq, ",norecovery");
ext4_show_quota_options(seq, sb);
return 0;
}
Commit Message: ext4: use ext4_get_block_write in buffer write
Allocate uninitialized extent before ext4 buffer write and
convert the extent to initialized after io completes.
The purpose is to make sure an extent can only be marked
initialized after it has been written with new data so
we can safely drop the i_mutex lock in ext4 DIO read without
exposing stale data. This helps to improve multi-thread DIO
read performance on high-speed disks.
Skip the nobh and data=journal mount cases to make things simple for now.
Signed-off-by: Jiaying Zhang <[email protected]>
Signed-off-by: "Theodore Ts'o" <[email protected]>
CWE ID: | static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
{
int def_errors;
unsigned long def_mount_opts;
struct super_block *sb = vfs->mnt_sb;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
def_errors = le16_to_cpu(es->s_errors);
if (sbi->s_sb_block != 1)
seq_printf(seq, ",sb=%llu", sbi->s_sb_block);
if (test_opt(sb, MINIX_DF))
seq_puts(seq, ",minixdf");
if (test_opt(sb, GRPID) && !(def_mount_opts & EXT4_DEFM_BSDGROUPS))
seq_puts(seq, ",grpid");
if (!test_opt(sb, GRPID) && (def_mount_opts & EXT4_DEFM_BSDGROUPS))
seq_puts(seq, ",nogrpid");
if (sbi->s_resuid != EXT4_DEF_RESUID ||
le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID) {
seq_printf(seq, ",resuid=%u", sbi->s_resuid);
}
if (sbi->s_resgid != EXT4_DEF_RESGID ||
le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID) {
seq_printf(seq, ",resgid=%u", sbi->s_resgid);
}
if (test_opt(sb, ERRORS_RO)) {
if (def_errors == EXT4_ERRORS_PANIC ||
def_errors == EXT4_ERRORS_CONTINUE) {
seq_puts(seq, ",errors=remount-ro");
}
}
if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
seq_puts(seq, ",errors=continue");
if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
seq_puts(seq, ",errors=panic");
if (test_opt(sb, NO_UID32) && !(def_mount_opts & EXT4_DEFM_UID16))
seq_puts(seq, ",nouid32");
if (test_opt(sb, DEBUG) && !(def_mount_opts & EXT4_DEFM_DEBUG))
seq_puts(seq, ",debug");
if (test_opt(sb, OLDALLOC))
seq_puts(seq, ",oldalloc");
#ifdef CONFIG_EXT4_FS_XATTR
if (test_opt(sb, XATTR_USER) &&
!(def_mount_opts & EXT4_DEFM_XATTR_USER))
seq_puts(seq, ",user_xattr");
if (!test_opt(sb, XATTR_USER) &&
(def_mount_opts & EXT4_DEFM_XATTR_USER)) {
seq_puts(seq, ",nouser_xattr");
}
#endif
#ifdef CONFIG_EXT4_FS_POSIX_ACL
if (test_opt(sb, POSIX_ACL) && !(def_mount_opts & EXT4_DEFM_ACL))
seq_puts(seq, ",acl");
if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT4_DEFM_ACL))
seq_puts(seq, ",noacl");
#endif
if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
seq_printf(seq, ",commit=%u",
(unsigned) (sbi->s_commit_interval / HZ));
}
if (sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME) {
seq_printf(seq, ",min_batch_time=%u",
(unsigned) sbi->s_min_batch_time);
}
if (sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) {
seq_printf(seq, ",max_batch_time=%u",
(unsigned) sbi->s_min_batch_time);
}
/*
* We're changing the default of barrier mount option, so
* let's always display its mount state so it's clear what its
* status is.
*/
seq_puts(seq, ",barrier=");
seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0");
if (test_opt(sb, JOURNAL_ASYNC_COMMIT))
seq_puts(seq, ",journal_async_commit");
if (test_opt(sb, NOBH))
seq_puts(seq, ",nobh");
if (test_opt(sb, I_VERSION))
seq_puts(seq, ",i_version");
if (!test_opt(sb, DELALLOC))
seq_puts(seq, ",nodelalloc");
if (sbi->s_stripe)
seq_printf(seq, ",stripe=%lu", sbi->s_stripe);
/*
* journal mode get enabled in different ways
* So just print the value even if we didn't specify it
*/
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
seq_puts(seq, ",data=journal");
else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
seq_puts(seq, ",data=ordered");
else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
seq_puts(seq, ",data=writeback");
if (sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
seq_printf(seq, ",inode_readahead_blks=%u",
sbi->s_inode_readahead_blks);
if (test_opt(sb, DATA_ERR_ABORT))
seq_puts(seq, ",data_err=abort");
if (test_opt(sb, NO_AUTO_DA_ALLOC))
seq_puts(seq, ",noauto_da_alloc");
if (test_opt(sb, DISCARD))
seq_puts(seq, ",discard");
if (test_opt(sb, NOLOAD))
seq_puts(seq, ",norecovery");
if (test_opt(sb, DIOREAD_NOLOCK))
seq_puts(seq, ",dioread_nolock");
ext4_show_quota_options(seq, sb);
return 0;
}
| 167,555 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long vmflag)
{
unsigned long addr;
/* do a global flush by default */
unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
preempt_disable();
if (current->active_mm != mm)
goto out;
if (!current->mm) {
leave_mm(smp_processor_id());
goto out;
}
if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
base_pages_to_flush = (end - start) >> PAGE_SHIFT;
if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
base_pages_to_flush = TLB_FLUSH_ALL;
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
local_flush_tlb();
} else {
/* flush range by one by one 'invlpg' */
for (addr = start; addr < end; addr += PAGE_SIZE) {
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
__flush_tlb_single(addr);
}
}
trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
out:
if (base_pages_to_flush == TLB_FLUSH_ALL) {
start = 0UL;
end = TLB_FLUSH_ALL;
}
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, start, end);
preempt_enable();
}
Commit Message: x86/mm: Add barriers and document switch_mm()-vs-flush synchronization
When switch_mm() activates a new PGD, it also sets a bit that
tells other CPUs that the PGD is in use so that TLB flush IPIs
will be sent. In order for that to work correctly, the bit
needs to be visible prior to loading the PGD and therefore
starting to fill the local TLB.
Document all the barriers that make this work correctly and add
a couple that were missing.
Signed-off-by: Andy Lutomirski <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Brian Gerst <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Denys Vlasenko <[email protected]>
Cc: H. Peter Anvin <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Ingo Molnar <[email protected]>
CWE ID: CWE-362 | void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long vmflag)
{
unsigned long addr;
/* do a global flush by default */
unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
preempt_disable();
if (current->active_mm != mm) {
/* Synchronize with switch_mm. */
smp_mb();
goto out;
}
if (!current->mm) {
leave_mm(smp_processor_id());
/* Synchronize with switch_mm. */
smp_mb();
goto out;
}
if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
base_pages_to_flush = (end - start) >> PAGE_SHIFT;
/*
* Both branches below are implicit full barriers (MOV to CR or
* INVLPG) that synchronize with switch_mm.
*/
if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
base_pages_to_flush = TLB_FLUSH_ALL;
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
local_flush_tlb();
} else {
/* flush range by one by one 'invlpg' */
for (addr = start; addr < end; addr += PAGE_SIZE) {
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
__flush_tlb_single(addr);
}
}
trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
out:
if (base_pages_to_flush == TLB_FLUSH_ALL) {
start = 0UL;
end = TLB_FLUSH_ALL;
}
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, start, end);
preempt_enable();
}
| 167,440 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void ChromeWebContentsDelegateAndroid::AddNewContents(
WebContents* source,
WebContents* new_contents,
WindowOpenDisposition disposition,
const gfx::Rect& initial_rect,
bool user_gesture,
bool* was_blocked) {
DCHECK_NE(disposition, SAVE_TO_DISK);
DCHECK_NE(disposition, CURRENT_TAB);
TabHelpers::AttachTabHelpers(new_contents);
JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jobject> obj = GetJavaDelegate(env);
AddWebContentsResult add_result =
ADD_WEB_CONTENTS_RESULT_STOP_LOAD_AND_DELETE;
if (!obj.is_null()) {
ScopedJavaLocalRef<jobject> jsource;
if (source)
jsource = source->GetJavaWebContents();
ScopedJavaLocalRef<jobject> jnew_contents;
if (new_contents)
jnew_contents = new_contents->GetJavaWebContents();
add_result = static_cast<AddWebContentsResult>(
Java_ChromeWebContentsDelegateAndroid_addNewContents(
env,
obj.obj(),
jsource.obj(),
jnew_contents.obj(),
static_cast<jint>(disposition),
NULL,
user_gesture));
}
if (was_blocked)
*was_blocked = !(add_result == ADD_WEB_CONTENTS_RESULT_PROCEED);
if (add_result == ADD_WEB_CONTENTS_RESULT_STOP_LOAD_AND_DELETE)
delete new_contents;
}
Commit Message: Revert "Load web contents after tab is created."
This reverts commit 4c55f398def3214369aefa9f2f2e8f5940d3799d.
BUG=432562
[email protected],[email protected],[email protected]
Review URL: https://codereview.chromium.org/894003005
Cr-Commit-Position: refs/heads/master@{#314469}
CWE ID: CWE-399 | void ChromeWebContentsDelegateAndroid::AddNewContents(
WebContents* source,
WebContents* new_contents,
WindowOpenDisposition disposition,
const gfx::Rect& initial_rect,
bool user_gesture,
bool* was_blocked) {
DCHECK_NE(disposition, SAVE_TO_DISK);
DCHECK_NE(disposition, CURRENT_TAB);
TabHelpers::AttachTabHelpers(new_contents);
JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jobject> obj = GetJavaDelegate(env);
bool handled = false;
if (!obj.is_null()) {
ScopedJavaLocalRef<jobject> jsource;
if (source)
jsource = source->GetJavaWebContents();
ScopedJavaLocalRef<jobject> jnew_contents;
if (new_contents)
jnew_contents = new_contents->GetJavaWebContents();
handled = Java_ChromeWebContentsDelegateAndroid_addNewContents(
env,
obj.obj(),
jsource.obj(),
jnew_contents.obj(),
static_cast<jint>(disposition),
NULL,
user_gesture);
}
if (was_blocked)
*was_blocked = !handled;
if (!handled)
delete new_contents;
}
| 171,137 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: tiffcp(TIFF* in, TIFF* out)
{
uint16 bitspersample, samplesperpixel;
uint16 input_compression, input_photometric;
copyFunc cf;
uint32 width, length;
struct cpTag* p;
CopyField(TIFFTAG_IMAGEWIDTH, width);
CopyField(TIFFTAG_IMAGELENGTH, length);
CopyField(TIFFTAG_BITSPERSAMPLE, bitspersample);
CopyField(TIFFTAG_SAMPLESPERPIXEL, samplesperpixel);
if (compression != (uint16)-1)
TIFFSetField(out, TIFFTAG_COMPRESSION, compression);
else
CopyField(TIFFTAG_COMPRESSION, compression);
TIFFGetFieldDefaulted(in, TIFFTAG_COMPRESSION, &input_compression);
TIFFGetFieldDefaulted(in, TIFFTAG_PHOTOMETRIC, &input_photometric);
if (input_compression == COMPRESSION_JPEG) {
/* Force conversion to RGB */
TIFFSetField(in, TIFFTAG_JPEGCOLORMODE, JPEGCOLORMODE_RGB);
} else if (input_photometric == PHOTOMETRIC_YCBCR) {
/* Otherwise, can't handle subsampled input */
uint16 subsamplinghor,subsamplingver;
TIFFGetFieldDefaulted(in, TIFFTAG_YCBCRSUBSAMPLING,
&subsamplinghor, &subsamplingver);
if (subsamplinghor!=1 || subsamplingver!=1) {
fprintf(stderr, "tiffcp: %s: Can't copy/convert subsampled image.\n",
TIFFFileName(in));
return FALSE;
}
}
if (compression == COMPRESSION_JPEG) {
if (input_photometric == PHOTOMETRIC_RGB &&
jpegcolormode == JPEGCOLORMODE_RGB)
TIFFSetField(out, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_YCBCR);
else
TIFFSetField(out, TIFFTAG_PHOTOMETRIC, input_photometric);
}
else if (compression == COMPRESSION_SGILOG
|| compression == COMPRESSION_SGILOG24)
TIFFSetField(out, TIFFTAG_PHOTOMETRIC,
samplesperpixel == 1 ?
PHOTOMETRIC_LOGL : PHOTOMETRIC_LOGLUV);
else if (input_compression == COMPRESSION_JPEG &&
samplesperpixel == 3 ) {
/* RGB conversion was forced above
hence the output will be of the same type */
TIFFSetField(out, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB);
}
else
CopyTag(TIFFTAG_PHOTOMETRIC, 1, TIFF_SHORT);
if (fillorder != 0)
TIFFSetField(out, TIFFTAG_FILLORDER, fillorder);
else
CopyTag(TIFFTAG_FILLORDER, 1, TIFF_SHORT);
/*
* Will copy `Orientation' tag from input image
*/
TIFFGetFieldDefaulted(in, TIFFTAG_ORIENTATION, &orientation);
switch (orientation) {
case ORIENTATION_BOTRIGHT:
case ORIENTATION_RIGHTBOT: /* XXX */
TIFFWarning(TIFFFileName(in), "using bottom-left orientation");
orientation = ORIENTATION_BOTLEFT;
/* fall thru... */
case ORIENTATION_LEFTBOT: /* XXX */
case ORIENTATION_BOTLEFT:
break;
case ORIENTATION_TOPRIGHT:
case ORIENTATION_RIGHTTOP: /* XXX */
default:
TIFFWarning(TIFFFileName(in), "using top-left orientation");
orientation = ORIENTATION_TOPLEFT;
/* fall thru... */
case ORIENTATION_LEFTTOP: /* XXX */
case ORIENTATION_TOPLEFT:
break;
}
TIFFSetField(out, TIFFTAG_ORIENTATION, orientation);
/*
* Choose tiles/strip for the output image according to
* the command line arguments (-tiles, -strips) and the
* structure of the input image.
*/
if (outtiled == -1)
outtiled = TIFFIsTiled(in);
if (outtiled) {
/*
* Setup output file's tile width&height. If either
* is not specified, use either the value from the
* input image or, if nothing is defined, use the
* library default.
*/
if (tilewidth == (uint32) -1)
TIFFGetField(in, TIFFTAG_TILEWIDTH, &tilewidth);
if (tilelength == (uint32) -1)
TIFFGetField(in, TIFFTAG_TILELENGTH, &tilelength);
TIFFDefaultTileSize(out, &tilewidth, &tilelength);
TIFFSetField(out, TIFFTAG_TILEWIDTH, tilewidth);
TIFFSetField(out, TIFFTAG_TILELENGTH, tilelength);
} else {
/*
* RowsPerStrip is left unspecified: use either the
* value from the input image or, if nothing is defined,
* use the library default.
*/
if (rowsperstrip == (uint32) 0) {
if (!TIFFGetField(in, TIFFTAG_ROWSPERSTRIP,
&rowsperstrip)) {
rowsperstrip =
TIFFDefaultStripSize(out, rowsperstrip);
}
if (rowsperstrip > length && rowsperstrip != (uint32)-1)
rowsperstrip = length;
}
else if (rowsperstrip == (uint32) -1)
rowsperstrip = length;
TIFFSetField(out, TIFFTAG_ROWSPERSTRIP, rowsperstrip);
}
if (config != (uint16) -1)
TIFFSetField(out, TIFFTAG_PLANARCONFIG, config);
else
CopyField(TIFFTAG_PLANARCONFIG, config);
if (samplesperpixel <= 4)
CopyTag(TIFFTAG_TRANSFERFUNCTION, 4, TIFF_SHORT);
CopyTag(TIFFTAG_COLORMAP, 4, TIFF_SHORT);
/* SMinSampleValue & SMaxSampleValue */
switch (compression) {
case COMPRESSION_JPEG:
TIFFSetField(out, TIFFTAG_JPEGQUALITY, quality);
TIFFSetField(out, TIFFTAG_JPEGCOLORMODE, jpegcolormode);
break;
case COMPRESSION_JBIG:
CopyTag(TIFFTAG_FAXRECVPARAMS, 1, TIFF_LONG);
CopyTag(TIFFTAG_FAXRECVTIME, 1, TIFF_LONG);
CopyTag(TIFFTAG_FAXSUBADDRESS, 1, TIFF_ASCII);
CopyTag(TIFFTAG_FAXDCS, 1, TIFF_ASCII);
break;
case COMPRESSION_LZW:
case COMPRESSION_ADOBE_DEFLATE:
case COMPRESSION_DEFLATE:
case COMPRESSION_LZMA:
if (predictor != (uint16)-1)
TIFFSetField(out, TIFFTAG_PREDICTOR, predictor);
else
CopyField(TIFFTAG_PREDICTOR, predictor);
if (preset != -1) {
if (compression == COMPRESSION_ADOBE_DEFLATE
|| compression == COMPRESSION_DEFLATE)
TIFFSetField(out, TIFFTAG_ZIPQUALITY, preset);
else if (compression == COMPRESSION_LZMA)
TIFFSetField(out, TIFFTAG_LZMAPRESET, preset);
}
break;
case COMPRESSION_CCITTFAX3:
case COMPRESSION_CCITTFAX4:
if (compression == COMPRESSION_CCITTFAX3) {
if (g3opts != (uint32) -1)
TIFFSetField(out, TIFFTAG_GROUP3OPTIONS,
g3opts);
else
CopyField(TIFFTAG_GROUP3OPTIONS, g3opts);
} else
CopyTag(TIFFTAG_GROUP4OPTIONS, 1, TIFF_LONG);
CopyTag(TIFFTAG_BADFAXLINES, 1, TIFF_LONG);
CopyTag(TIFFTAG_CLEANFAXDATA, 1, TIFF_LONG);
CopyTag(TIFFTAG_CONSECUTIVEBADFAXLINES, 1, TIFF_LONG);
CopyTag(TIFFTAG_FAXRECVPARAMS, 1, TIFF_LONG);
CopyTag(TIFFTAG_FAXRECVTIME, 1, TIFF_LONG);
CopyTag(TIFFTAG_FAXSUBADDRESS, 1, TIFF_ASCII);
break;
}
{
uint32 len32;
void** data;
if (TIFFGetField(in, TIFFTAG_ICCPROFILE, &len32, &data))
TIFFSetField(out, TIFFTAG_ICCPROFILE, len32, data);
}
{
uint16 ninks;
const char* inknames;
if (TIFFGetField(in, TIFFTAG_NUMBEROFINKS, &ninks)) {
TIFFSetField(out, TIFFTAG_NUMBEROFINKS, ninks);
if (TIFFGetField(in, TIFFTAG_INKNAMES, &inknames)) {
int inknameslen = strlen(inknames) + 1;
const char* cp = inknames;
while (ninks > 1) {
cp = strchr(cp, '\0');
cp++;
inknameslen += (strlen(cp) + 1);
ninks--;
}
TIFFSetField(out, TIFFTAG_INKNAMES, inknameslen, inknames);
}
}
}
{
unsigned short pg0, pg1;
if (pageInSeq == 1) {
if (pageNum < 0) /* only one input file */ {
if (TIFFGetField(in, TIFFTAG_PAGENUMBER, &pg0, &pg1))
TIFFSetField(out, TIFFTAG_PAGENUMBER, pg0, pg1);
} else
TIFFSetField(out, TIFFTAG_PAGENUMBER, pageNum++, 0);
} else {
if (TIFFGetField(in, TIFFTAG_PAGENUMBER, &pg0, &pg1)) {
if (pageNum < 0) /* only one input file */
TIFFSetField(out, TIFFTAG_PAGENUMBER, pg0, pg1);
else
TIFFSetField(out, TIFFTAG_PAGENUMBER, pageNum++, 0);
}
}
}
for (p = tags; p < &tags[NTAGS]; p++)
CopyTag(p->tag, p->count, p->type);
cf = pickCopyFunc(in, out, bitspersample, samplesperpixel);
return (cf ? (*cf)(in, out, length, width, samplesperpixel) : FALSE);
}
Commit Message: * tools/tiffcp.c: fix read of undefined variable in case of missing
required tags. Found on test case of MSVR 35100.
* tools/tiffcrop.c: fix read of undefined buffer in
readContigStripsIntoBuffer() due to uint16 overflow. Probably not a
security issue but I can be wrong. Reported as MSVR 35100 by Axel
Souchet from the MSRC Vulnerabilities & Mitigations team.
CWE ID: CWE-190 | tiffcp(TIFF* in, TIFF* out)
{
uint16 bitspersample, samplesperpixel = 1;
uint16 input_compression, input_photometric = PHOTOMETRIC_MINISBLACK;
copyFunc cf;
uint32 width, length;
struct cpTag* p;
CopyField(TIFFTAG_IMAGEWIDTH, width);
CopyField(TIFFTAG_IMAGELENGTH, length);
CopyField(TIFFTAG_BITSPERSAMPLE, bitspersample);
CopyField(TIFFTAG_SAMPLESPERPIXEL, samplesperpixel);
if (compression != (uint16)-1)
TIFFSetField(out, TIFFTAG_COMPRESSION, compression);
else
CopyField(TIFFTAG_COMPRESSION, compression);
TIFFGetFieldDefaulted(in, TIFFTAG_COMPRESSION, &input_compression);
TIFFGetFieldDefaulted(in, TIFFTAG_PHOTOMETRIC, &input_photometric);
if (input_compression == COMPRESSION_JPEG) {
/* Force conversion to RGB */
TIFFSetField(in, TIFFTAG_JPEGCOLORMODE, JPEGCOLORMODE_RGB);
} else if (input_photometric == PHOTOMETRIC_YCBCR) {
/* Otherwise, can't handle subsampled input */
uint16 subsamplinghor,subsamplingver;
TIFFGetFieldDefaulted(in, TIFFTAG_YCBCRSUBSAMPLING,
&subsamplinghor, &subsamplingver);
if (subsamplinghor!=1 || subsamplingver!=1) {
fprintf(stderr, "tiffcp: %s: Can't copy/convert subsampled image.\n",
TIFFFileName(in));
return FALSE;
}
}
if (compression == COMPRESSION_JPEG) {
if (input_photometric == PHOTOMETRIC_RGB &&
jpegcolormode == JPEGCOLORMODE_RGB)
TIFFSetField(out, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_YCBCR);
else
TIFFSetField(out, TIFFTAG_PHOTOMETRIC, input_photometric);
}
else if (compression == COMPRESSION_SGILOG
|| compression == COMPRESSION_SGILOG24)
TIFFSetField(out, TIFFTAG_PHOTOMETRIC,
samplesperpixel == 1 ?
PHOTOMETRIC_LOGL : PHOTOMETRIC_LOGLUV);
else if (input_compression == COMPRESSION_JPEG &&
samplesperpixel == 3 ) {
/* RGB conversion was forced above
hence the output will be of the same type */
TIFFSetField(out, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB);
}
else
CopyTag(TIFFTAG_PHOTOMETRIC, 1, TIFF_SHORT);
if (fillorder != 0)
TIFFSetField(out, TIFFTAG_FILLORDER, fillorder);
else
CopyTag(TIFFTAG_FILLORDER, 1, TIFF_SHORT);
/*
* Will copy `Orientation' tag from input image
*/
TIFFGetFieldDefaulted(in, TIFFTAG_ORIENTATION, &orientation);
switch (orientation) {
case ORIENTATION_BOTRIGHT:
case ORIENTATION_RIGHTBOT: /* XXX */
TIFFWarning(TIFFFileName(in), "using bottom-left orientation");
orientation = ORIENTATION_BOTLEFT;
/* fall thru... */
case ORIENTATION_LEFTBOT: /* XXX */
case ORIENTATION_BOTLEFT:
break;
case ORIENTATION_TOPRIGHT:
case ORIENTATION_RIGHTTOP: /* XXX */
default:
TIFFWarning(TIFFFileName(in), "using top-left orientation");
orientation = ORIENTATION_TOPLEFT;
/* fall thru... */
case ORIENTATION_LEFTTOP: /* XXX */
case ORIENTATION_TOPLEFT:
break;
}
TIFFSetField(out, TIFFTAG_ORIENTATION, orientation);
/*
* Choose tiles/strip for the output image according to
* the command line arguments (-tiles, -strips) and the
* structure of the input image.
*/
if (outtiled == -1)
outtiled = TIFFIsTiled(in);
if (outtiled) {
/*
* Setup output file's tile width&height. If either
* is not specified, use either the value from the
* input image or, if nothing is defined, use the
* library default.
*/
if (tilewidth == (uint32) -1)
TIFFGetField(in, TIFFTAG_TILEWIDTH, &tilewidth);
if (tilelength == (uint32) -1)
TIFFGetField(in, TIFFTAG_TILELENGTH, &tilelength);
TIFFDefaultTileSize(out, &tilewidth, &tilelength);
TIFFSetField(out, TIFFTAG_TILEWIDTH, tilewidth);
TIFFSetField(out, TIFFTAG_TILELENGTH, tilelength);
} else {
/*
* RowsPerStrip is left unspecified: use either the
* value from the input image or, if nothing is defined,
* use the library default.
*/
if (rowsperstrip == (uint32) 0) {
if (!TIFFGetField(in, TIFFTAG_ROWSPERSTRIP,
&rowsperstrip)) {
rowsperstrip =
TIFFDefaultStripSize(out, rowsperstrip);
}
if (rowsperstrip > length && rowsperstrip != (uint32)-1)
rowsperstrip = length;
}
else if (rowsperstrip == (uint32) -1)
rowsperstrip = length;
TIFFSetField(out, TIFFTAG_ROWSPERSTRIP, rowsperstrip);
}
if (config != (uint16) -1)
TIFFSetField(out, TIFFTAG_PLANARCONFIG, config);
else
CopyField(TIFFTAG_PLANARCONFIG, config);
if (samplesperpixel <= 4)
CopyTag(TIFFTAG_TRANSFERFUNCTION, 4, TIFF_SHORT);
CopyTag(TIFFTAG_COLORMAP, 4, TIFF_SHORT);
/* SMinSampleValue & SMaxSampleValue */
switch (compression) {
case COMPRESSION_JPEG:
TIFFSetField(out, TIFFTAG_JPEGQUALITY, quality);
TIFFSetField(out, TIFFTAG_JPEGCOLORMODE, jpegcolormode);
break;
case COMPRESSION_JBIG:
CopyTag(TIFFTAG_FAXRECVPARAMS, 1, TIFF_LONG);
CopyTag(TIFFTAG_FAXRECVTIME, 1, TIFF_LONG);
CopyTag(TIFFTAG_FAXSUBADDRESS, 1, TIFF_ASCII);
CopyTag(TIFFTAG_FAXDCS, 1, TIFF_ASCII);
break;
case COMPRESSION_LZW:
case COMPRESSION_ADOBE_DEFLATE:
case COMPRESSION_DEFLATE:
case COMPRESSION_LZMA:
if (predictor != (uint16)-1)
TIFFSetField(out, TIFFTAG_PREDICTOR, predictor);
else
CopyField(TIFFTAG_PREDICTOR, predictor);
if (preset != -1) {
if (compression == COMPRESSION_ADOBE_DEFLATE
|| compression == COMPRESSION_DEFLATE)
TIFFSetField(out, TIFFTAG_ZIPQUALITY, preset);
else if (compression == COMPRESSION_LZMA)
TIFFSetField(out, TIFFTAG_LZMAPRESET, preset);
}
break;
case COMPRESSION_CCITTFAX3:
case COMPRESSION_CCITTFAX4:
if (compression == COMPRESSION_CCITTFAX3) {
if (g3opts != (uint32) -1)
TIFFSetField(out, TIFFTAG_GROUP3OPTIONS,
g3opts);
else
CopyField(TIFFTAG_GROUP3OPTIONS, g3opts);
} else
CopyTag(TIFFTAG_GROUP4OPTIONS, 1, TIFF_LONG);
CopyTag(TIFFTAG_BADFAXLINES, 1, TIFF_LONG);
CopyTag(TIFFTAG_CLEANFAXDATA, 1, TIFF_LONG);
CopyTag(TIFFTAG_CONSECUTIVEBADFAXLINES, 1, TIFF_LONG);
CopyTag(TIFFTAG_FAXRECVPARAMS, 1, TIFF_LONG);
CopyTag(TIFFTAG_FAXRECVTIME, 1, TIFF_LONG);
CopyTag(TIFFTAG_FAXSUBADDRESS, 1, TIFF_ASCII);
break;
}
{
uint32 len32;
void** data;
if (TIFFGetField(in, TIFFTAG_ICCPROFILE, &len32, &data))
TIFFSetField(out, TIFFTAG_ICCPROFILE, len32, data);
}
{
uint16 ninks;
const char* inknames;
if (TIFFGetField(in, TIFFTAG_NUMBEROFINKS, &ninks)) {
TIFFSetField(out, TIFFTAG_NUMBEROFINKS, ninks);
if (TIFFGetField(in, TIFFTAG_INKNAMES, &inknames)) {
int inknameslen = strlen(inknames) + 1;
const char* cp = inknames;
while (ninks > 1) {
cp = strchr(cp, '\0');
cp++;
inknameslen += (strlen(cp) + 1);
ninks--;
}
TIFFSetField(out, TIFFTAG_INKNAMES, inknameslen, inknames);
}
}
}
{
unsigned short pg0, pg1;
if (pageInSeq == 1) {
if (pageNum < 0) /* only one input file */ {
if (TIFFGetField(in, TIFFTAG_PAGENUMBER, &pg0, &pg1))
TIFFSetField(out, TIFFTAG_PAGENUMBER, pg0, pg1);
} else
TIFFSetField(out, TIFFTAG_PAGENUMBER, pageNum++, 0);
} else {
if (TIFFGetField(in, TIFFTAG_PAGENUMBER, &pg0, &pg1)) {
if (pageNum < 0) /* only one input file */
TIFFSetField(out, TIFFTAG_PAGENUMBER, pg0, pg1);
else
TIFFSetField(out, TIFFTAG_PAGENUMBER, pageNum++, 0);
}
}
}
for (p = tags; p < &tags[NTAGS]; p++)
CopyTag(p->tag, p->count, p->type);
cf = pickCopyFunc(in, out, bitspersample, samplesperpixel);
return (cf ? (*cf)(in, out, length, width, samplesperpixel) : FALSE);
}
| 166,865 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: long long Segment::ParseHeaders()
{
long long total, available;
const int status = m_pReader->Length(&total, &available);
if (status < 0) //error
return status;
assert((total < 0) || (available <= total));
const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size;
assert((segment_stop < 0) || (total < 0) || (segment_stop <= total));
assert((segment_stop < 0) || (m_pos <= segment_stop));
for (;;)
{
if ((total >= 0) && (m_pos >= total))
break;
if ((segment_stop >= 0) && (m_pos >= segment_stop))
break;
long long pos = m_pos;
const long long element_start = pos;
if ((pos + 1) > available)
return (pos + 1);
long len;
long long result = GetUIntLength(m_pReader, pos, len);
if (result < 0) //error
return result;
if (result > 0) //underflow (weird)
return (pos + 1);
if ((segment_stop >= 0) && ((pos + len) > segment_stop))
return E_FILE_FORMAT_INVALID;
if ((pos + len) > available)
return pos + len;
const long long idpos = pos;
const long long id = ReadUInt(m_pReader, idpos, len);
if (id < 0) //error
return id;
if (id == 0x0F43B675) //Cluster ID
break;
pos += len; //consume ID
if ((pos + 1) > available)
return (pos + 1);
result = GetUIntLength(m_pReader, pos, len);
if (result < 0) //error
return result;
if (result > 0) //underflow (weird)
return (pos + 1);
if ((segment_stop >= 0) && ((pos + len) > segment_stop))
return E_FILE_FORMAT_INVALID;
if ((pos + len) > available)
return pos + len;
const long long size = ReadUInt(m_pReader, pos, len);
if (size < 0) //error
return size;
pos += len; //consume length of size of element
const long long element_size = size + pos - element_start;
if ((segment_stop >= 0) && ((pos + size) > segment_stop))
return E_FILE_FORMAT_INVALID;
if ((pos + size) > available)
return pos + size;
if (id == 0x0549A966) //Segment Info ID
{
if (m_pInfo)
return E_FILE_FORMAT_INVALID;
m_pInfo = new (std::nothrow) SegmentInfo(
this,
pos,
size,
element_start,
element_size);
if (m_pInfo == NULL)
return -1;
const long status = m_pInfo->Parse();
if (status)
return status;
}
else if (id == 0x0654AE6B) //Tracks ID
{
if (m_pTracks)
return E_FILE_FORMAT_INVALID;
m_pTracks = new (std::nothrow) Tracks(this,
pos,
size,
element_start,
element_size);
if (m_pTracks == NULL)
return -1;
const long status = m_pTracks->Parse();
if (status)
return status;
}
else if (id == 0x0C53BB6B) //Cues ID
{
if (m_pCues == NULL)
{
m_pCues = new (std::nothrow) Cues(
this,
pos,
size,
element_start,
element_size);
if (m_pCues == NULL)
return -1;
}
}
else if (id == 0x014D9B74) //SeekHead ID
{
if (m_pSeekHead == NULL)
{
m_pSeekHead = new (std::nothrow) SeekHead(
this,
pos,
size,
element_start,
element_size);
if (m_pSeekHead == NULL)
return -1;
const long status = m_pSeekHead->Parse();
if (status)
return status;
}
}
else if (id == 0x0043A770) //Chapters ID
{
if (m_pChapters == NULL)
{
m_pChapters = new (std::nothrow) Chapters(
this,
pos,
size,
element_start,
element_size);
if (m_pChapters == NULL)
return -1;
const long status = m_pChapters->Parse();
if (status)
return status;
}
}
m_pos = pos + size; //consume payload
}
assert((segment_stop < 0) || (m_pos <= segment_stop));
if (m_pInfo == NULL) //TODO: liberalize this behavior
return E_FILE_FORMAT_INVALID;
if (m_pTracks == NULL)
return E_FILE_FORMAT_INVALID;
return 0; //success
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | long long Segment::ParseHeaders()
if (status)
return status;
} else if (id == 0x0C53BB6B) { // Cues ID
if (m_pCues == NULL) {
m_pCues = new (std::nothrow)
Cues(this, pos, size, element_start, element_size);
if (m_pCues == NULL)
return -1;
}
} else if (id == 0x014D9B74) { // SeekHead ID
if (m_pSeekHead == NULL) {
m_pSeekHead = new (std::nothrow)
SeekHead(this, pos, size, element_start, element_size);
if (m_pSeekHead == NULL)
return -1;
const long status = m_pSeekHead->Parse();
if (status)
return status;
}
} else if (id == 0x0043A770) { // Chapters ID
if (m_pChapters == NULL) {
m_pChapters = new (std::nothrow)
Chapters(this, pos, size, element_start, element_size);
if (m_pChapters == NULL)
return -1;
const long status = m_pChapters->Parse();
if (status)
return status;
}
}
m_pos = pos + size; // consume payload
}
assert((segment_stop < 0) || (m_pos <= segment_stop));
if (m_pInfo == NULL) // TODO: liberalize this behavior
return E_FILE_FORMAT_INVALID;
if (m_pTracks == NULL)
return E_FILE_FORMAT_INVALID;
return 0; // success
}
| 174,427 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool PrintRenderFrameHelper::PrintPagesNative(blink::WebLocalFrame* frame,
int page_count) {
const PrintMsg_PrintPages_Params& params = *print_pages_params_;
const PrintMsg_Print_Params& print_params = params.params;
std::vector<int> printed_pages = GetPrintedPages(params, page_count);
if (printed_pages.empty())
return false;
PdfMetafileSkia metafile(print_params.printed_doc_type);
CHECK(metafile.Init());
for (int page_number : printed_pages) {
PrintPageInternal(print_params, page_number, page_count, frame, &metafile,
nullptr, nullptr);
}
FinishFramePrinting();
metafile.FinishDocument();
#if defined(OS_ANDROID)
int sequence_number = -1;
base::FileDescriptor fd;
Send(new PrintHostMsg_AllocateTempFileForPrinting(routing_id(), &fd,
&sequence_number));
if (!SaveToFD(metafile, fd))
return false;
Send(new PrintHostMsg_TempFileForPrintingWritten(
routing_id(), sequence_number, printed_pages.size()));
return true;
#else
PrintHostMsg_DidPrintDocument_Params page_params;
if (!CopyMetafileDataToSharedMem(metafile,
&page_params.metafile_data_handle)) {
return false;
}
page_params.data_size = metafile.GetDataSize();
page_params.document_cookie = print_params.document_cookie;
Send(new PrintHostMsg_DidPrintDocument(routing_id(), page_params));
return true;
#endif // defined(OS_ANDROID)
}
Commit Message: Correct mojo::WrapSharedMemoryHandle usage
Fixes some incorrect uses of mojo::WrapSharedMemoryHandle which
were assuming that the call actually has any control over the memory
protection applied to a handle when mapped.
Where fixing usage is infeasible for this CL, TODOs are added to
annotate follow-up work.
Also updates the API and documentation to (hopefully) improve clarity
and avoid similar mistakes from being made in the future.
BUG=792900
Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Change-Id: I0578aaa9ca3bfcb01aaf2451315d1ede95458477
Reviewed-on: https://chromium-review.googlesource.com/818282
Reviewed-by: Wei Li <[email protected]>
Reviewed-by: Lei Zhang <[email protected]>
Reviewed-by: John Abd-El-Malek <[email protected]>
Reviewed-by: Daniel Cheng <[email protected]>
Reviewed-by: Sadrul Chowdhury <[email protected]>
Reviewed-by: Yuzhu Shen <[email protected]>
Reviewed-by: Robert Sesek <[email protected]>
Commit-Queue: Ken Rockot <[email protected]>
Cr-Commit-Position: refs/heads/master@{#530268}
CWE ID: CWE-787 | bool PrintRenderFrameHelper::PrintPagesNative(blink::WebLocalFrame* frame,
int page_count) {
const PrintMsg_PrintPages_Params& params = *print_pages_params_;
const PrintMsg_Print_Params& print_params = params.params;
std::vector<int> printed_pages = GetPrintedPages(params, page_count);
if (printed_pages.empty())
return false;
PdfMetafileSkia metafile(print_params.printed_doc_type);
CHECK(metafile.Init());
for (int page_number : printed_pages) {
PrintPageInternal(print_params, page_number, page_count, frame, &metafile,
nullptr, nullptr);
}
FinishFramePrinting();
metafile.FinishDocument();
#if defined(OS_ANDROID)
int sequence_number = -1;
base::FileDescriptor fd;
Send(new PrintHostMsg_AllocateTempFileForPrinting(routing_id(), &fd,
&sequence_number));
if (!SaveToFD(metafile, fd))
return false;
Send(new PrintHostMsg_TempFileForPrintingWritten(
routing_id(), sequence_number, printed_pages.size()));
return true;
#else
PrintHostMsg_DidPrintDocument_Params page_params;
if (!CopyMetafileDataToReadOnlySharedMem(metafile,
&page_params.metafile_data_handle)) {
return false;
}
page_params.data_size = metafile.GetDataSize();
page_params.document_cookie = print_params.document_cookie;
Send(new PrintHostMsg_DidPrintDocument(routing_id(), page_params));
return true;
#endif // defined(OS_ANDROID)
}
| 172,855 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void ResourceCoordinatorService::OnStart() {
ref_factory_.reset(new service_manager::ServiceContextRefFactory(
base::Bind(&service_manager::ServiceContext::RequestQuit,
base::Unretained(context()))));
ukm_recorder_ = ukm::MojoUkmRecorder::Create(context()->connector());
registry_.AddInterface(
base::Bind(&CoordinationUnitIntrospectorImpl::BindToInterface,
base::Unretained(&introspector_)));
auto page_signal_generator_impl = std::make_unique<PageSignalGeneratorImpl>();
registry_.AddInterface(
base::Bind(&PageSignalGeneratorImpl::BindToInterface,
base::Unretained(page_signal_generator_impl.get())));
coordination_unit_manager_.RegisterObserver(
std::move(page_signal_generator_impl));
coordination_unit_manager_.RegisterObserver(
std::make_unique<MetricsCollector>());
coordination_unit_manager_.RegisterObserver(
std::make_unique<IPCVolumeReporter>(
std::make_unique<base::OneShotTimer>()));
coordination_unit_manager_.OnStart(®istry_, ref_factory_.get());
coordination_unit_manager_.set_ukm_recorder(ukm_recorder_.get());
memory_instrumentation_coordinator_ =
std::make_unique<memory_instrumentation::CoordinatorImpl>(
context()->connector());
registry_.AddInterface(base::BindRepeating(
&memory_instrumentation::CoordinatorImpl::BindCoordinatorRequest,
base::Unretained(memory_instrumentation_coordinator_.get())));
tracing_agent_registry_ = std::make_unique<tracing::AgentRegistry>();
registry_.AddInterface(
base::BindRepeating(&tracing::AgentRegistry::BindAgentRegistryRequest,
base::Unretained(tracing_agent_registry_.get())));
tracing_coordinator_ = std::make_unique<tracing::Coordinator>();
registry_.AddInterface(
base::BindRepeating(&tracing::Coordinator::BindCoordinatorRequest,
base::Unretained(tracing_coordinator_.get())));
}
Commit Message: memory-infra: split up memory-infra coordinator service into two
This allows for heap profiler to use its own service with correct
capabilities and all other instances to use the existing coordinator
service.
Bug: 792028
Change-Id: I84e4ec71f5f1d00991c0516b1424ce7334bcd3cd
Reviewed-on: https://chromium-review.googlesource.com/836896
Commit-Queue: Lalit Maganti <[email protected]>
Reviewed-by: Daniel Cheng <[email protected]>
Reviewed-by: oysteine <[email protected]>
Reviewed-by: Albert J. Wong <[email protected]>
Reviewed-by: Hector Dearman <[email protected]>
Cr-Commit-Position: refs/heads/master@{#529059}
CWE ID: CWE-269 | void ResourceCoordinatorService::OnStart() {
ref_factory_.reset(new service_manager::ServiceContextRefFactory(
base::Bind(&service_manager::ServiceContext::RequestQuit,
base::Unretained(context()))));
ukm_recorder_ = ukm::MojoUkmRecorder::Create(context()->connector());
registry_.AddInterface(
base::Bind(&CoordinationUnitIntrospectorImpl::BindToInterface,
base::Unretained(&introspector_)));
auto page_signal_generator_impl = std::make_unique<PageSignalGeneratorImpl>();
registry_.AddInterface(
base::Bind(&PageSignalGeneratorImpl::BindToInterface,
base::Unretained(page_signal_generator_impl.get())));
coordination_unit_manager_.RegisterObserver(
std::move(page_signal_generator_impl));
coordination_unit_manager_.RegisterObserver(
std::make_unique<MetricsCollector>());
coordination_unit_manager_.RegisterObserver(
std::make_unique<IPCVolumeReporter>(
std::make_unique<base::OneShotTimer>()));
coordination_unit_manager_.OnStart(®istry_, ref_factory_.get());
coordination_unit_manager_.set_ukm_recorder(ukm_recorder_.get());
memory_instrumentation_coordinator_ =
std::make_unique<memory_instrumentation::CoordinatorImpl>(
context()->connector());
registry_.AddInterface(base::BindRepeating(
&memory_instrumentation::CoordinatorImpl::BindCoordinatorRequest,
base::Unretained(memory_instrumentation_coordinator_.get())));
registry_.AddInterface(base::BindRepeating(
&memory_instrumentation::CoordinatorImpl::BindHeapProfilerHelperRequest,
base::Unretained(memory_instrumentation_coordinator_.get())));
tracing_agent_registry_ = std::make_unique<tracing::AgentRegistry>();
registry_.AddInterface(
base::BindRepeating(&tracing::AgentRegistry::BindAgentRegistryRequest,
base::Unretained(tracing_agent_registry_.get())));
tracing_coordinator_ = std::make_unique<tracing::Coordinator>();
registry_.AddInterface(
base::BindRepeating(&tracing::Coordinator::BindCoordinatorRequest,
base::Unretained(tracing_coordinator_.get())));
}
| 172,919 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: long Chapters::ParseEdition(
long long pos,
long long size)
{
if (!ExpandEditionsArray())
return -1;
Edition& e = m_editions[m_editions_count++];
e.Init();
return e.Parse(m_pSegment->m_pReader, pos, size);
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | long Chapters::ParseEdition(
Atom& a = m_atoms[m_atoms_count++];
a.Init();
return a.Parse(pReader, pos, size);
}
| 174,423 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void AppCache::InitializeWithDatabaseRecords(
const AppCacheDatabase::CacheRecord& cache_record,
const std::vector<AppCacheDatabase::EntryRecord>& entries,
const std::vector<AppCacheDatabase::NamespaceRecord>& intercepts,
const std::vector<AppCacheDatabase::NamespaceRecord>& fallbacks,
const std::vector<AppCacheDatabase::OnlineWhiteListRecord>& whitelists) {
DCHECK(cache_id_ == cache_record.cache_id);
online_whitelist_all_ = cache_record.online_wildcard;
update_time_ = cache_record.update_time;
for (size_t i = 0; i < entries.size(); ++i) {
const AppCacheDatabase::EntryRecord& entry = entries.at(i);
AddEntry(entry.url, AppCacheEntry(entry.flags, entry.response_id,
entry.response_size));
}
DCHECK(cache_size_ == cache_record.cache_size);
for (size_t i = 0; i < intercepts.size(); ++i)
intercept_namespaces_.push_back(intercepts.at(i).namespace_);
for (size_t i = 0; i < fallbacks.size(); ++i)
fallback_namespaces_.push_back(fallbacks.at(i).namespace_);
std::sort(intercept_namespaces_.begin(), intercept_namespaces_.end(),
SortNamespacesByLength);
std::sort(fallback_namespaces_.begin(), fallback_namespaces_.end(),
SortNamespacesByLength);
for (size_t i = 0; i < whitelists.size(); ++i) {
const AppCacheDatabase::OnlineWhiteListRecord& record = whitelists.at(i);
online_whitelist_namespaces_.push_back(
AppCacheNamespace(APPCACHE_NETWORK_NAMESPACE,
record.namespace_url,
GURL(),
record.is_pattern));
}
}
Commit Message: Reland "AppCache: Add padding to cross-origin responses."
This is a reland of 85b389caa7d725cdd31f59e9a2b79ff54804b7b7
Initialized CacheRecord::padding_size to 0.
Original change's description:
> AppCache: Add padding to cross-origin responses.
>
> Bug: 918293
> Change-Id: I4f16640f06feac009d6bbbb624951da6d2669f6c
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1488059
> Commit-Queue: Staphany Park <[email protected]>
> Reviewed-by: Victor Costan <[email protected]>
> Reviewed-by: Marijn Kruisselbrink <[email protected]>
> Cr-Commit-Position: refs/heads/master@{#644624}
Bug: 918293
Change-Id: Ie1d3f99c7e8a854d33255a4d66243da2ce16441c
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1539906
Reviewed-by: Victor Costan <[email protected]>
Commit-Queue: Staphany Park <[email protected]>
Cr-Commit-Position: refs/heads/master@{#644719}
CWE ID: CWE-200 | void AppCache::InitializeWithDatabaseRecords(
const AppCacheDatabase::CacheRecord& cache_record,
const std::vector<AppCacheDatabase::EntryRecord>& entries,
const std::vector<AppCacheDatabase::NamespaceRecord>& intercepts,
const std::vector<AppCacheDatabase::NamespaceRecord>& fallbacks,
const std::vector<AppCacheDatabase::OnlineWhiteListRecord>& whitelists) {
DCHECK_EQ(cache_id_, cache_record.cache_id);
online_whitelist_all_ = cache_record.online_wildcard;
update_time_ = cache_record.update_time;
for (size_t i = 0; i < entries.size(); ++i) {
const AppCacheDatabase::EntryRecord& entry = entries.at(i);
AddEntry(entry.url, AppCacheEntry(entry.flags, entry.response_id,
entry.response_size, entry.padding_size));
}
DCHECK_EQ(cache_size_, cache_record.cache_size);
DCHECK_EQ(padding_size_, cache_record.padding_size);
for (size_t i = 0; i < intercepts.size(); ++i)
intercept_namespaces_.push_back(intercepts.at(i).namespace_);
for (size_t i = 0; i < fallbacks.size(); ++i)
fallback_namespaces_.push_back(fallbacks.at(i).namespace_);
std::sort(intercept_namespaces_.begin(), intercept_namespaces_.end(),
SortNamespacesByLength);
std::sort(fallback_namespaces_.begin(), fallback_namespaces_.end(),
SortNamespacesByLength);
for (size_t i = 0; i < whitelists.size(); ++i) {
const AppCacheDatabase::OnlineWhiteListRecord& record = whitelists.at(i);
online_whitelist_namespaces_.push_back(
AppCacheNamespace(APPCACHE_NETWORK_NAMESPACE,
record.namespace_url,
GURL(),
record.is_pattern));
}
}
| 172,970 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void BrowserDevToolsAgentHost::AttachSession(DevToolsSession* session) {
session->SetBrowserOnly(true);
session->AddHandler(
base::WrapUnique(new protocol::TargetHandler(true /* browser_only */)));
if (only_discovery_)
return;
session->AddHandler(base::WrapUnique(new protocol::BrowserHandler()));
session->AddHandler(base::WrapUnique(new protocol::IOHandler(
GetIOContext())));
session->AddHandler(base::WrapUnique(new protocol::MemoryHandler()));
session->AddHandler(base::WrapUnique(new protocol::SecurityHandler()));
session->AddHandler(base::WrapUnique(new protocol::SystemInfoHandler()));
session->AddHandler(base::WrapUnique(new protocol::TetheringHandler(
socket_callback_, tethering_task_runner_)));
session->AddHandler(base::WrapUnique(new protocol::TracingHandler(
protocol::TracingHandler::Browser,
FrameTreeNode::kFrameTreeNodeInvalidId,
GetIOContext())));
}
Commit Message: [DevTools] Do not allow chrome.debugger to attach to web ui pages
If the page navigates to web ui, we force detach the debugger extension.
[email protected]
Bug: 798222
Change-Id: Idb46c2f59e839388397a8dfa6ce2e2a897698df3
Reviewed-on: https://chromium-review.googlesource.com/935961
Commit-Queue: Dmitry Gozman <[email protected]>
Reviewed-by: Devlin <[email protected]>
Reviewed-by: Pavel Feldman <[email protected]>
Reviewed-by: Nasko Oskov <[email protected]>
Cr-Commit-Position: refs/heads/master@{#540916}
CWE ID: CWE-20 | void BrowserDevToolsAgentHost::AttachSession(DevToolsSession* session) {
bool BrowserDevToolsAgentHost::AttachSession(DevToolsSession* session) {
if (session->restricted())
return false;
session->SetBrowserOnly(true);
session->AddHandler(
base::WrapUnique(new protocol::TargetHandler(true /* browser_only */)));
if (only_discovery_)
return true;
session->AddHandler(base::WrapUnique(new protocol::BrowserHandler()));
session->AddHandler(base::WrapUnique(new protocol::IOHandler(
GetIOContext())));
session->AddHandler(base::WrapUnique(new protocol::MemoryHandler()));
session->AddHandler(base::WrapUnique(new protocol::SecurityHandler()));
session->AddHandler(base::WrapUnique(new protocol::SystemInfoHandler()));
session->AddHandler(base::WrapUnique(new protocol::TetheringHandler(
socket_callback_, tethering_task_runner_)));
session->AddHandler(base::WrapUnique(new protocol::TracingHandler(
protocol::TracingHandler::Browser,
FrameTreeNode::kFrameTreeNodeInvalidId,
GetIOContext())));
return true;
}
| 173,241 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: status_t NuPlayer::GenericSource::setBuffers(
bool audio, Vector<MediaBuffer *> &buffers) {
if (mIsSecure && !audio) {
return mVideoTrack.mSource->setBuffers(buffers);
}
return INVALID_OPERATION;
}
Commit Message: MPEG4Extractor: ensure kKeyTrackID exists before creating an MPEG4Source as track.
GenericSource: return error when no track exists.
SampleIterator: make sure mSamplesPerChunk is not zero before using it as divisor.
Bug: 21657957
Bug: 23705695
Bug: 22802344
Bug: 28799341
Change-Id: I7664992ade90b935d3f255dcd43ecc2898f30b04
(cherry picked from commit 0386c91b8a910a134e5898ffa924c1b6c7560b13)
CWE ID: CWE-119 | status_t NuPlayer::GenericSource::setBuffers(
bool audio, Vector<MediaBuffer *> &buffers) {
if (mIsWidevine && !audio && mVideoTrack.mSource != NULL) {
return mVideoTrack.mSource->setBuffers(buffers);
}
return INVALID_OPERATION;
}
| 173,763 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static js_Ast *memberexp(js_State *J)
{
js_Ast *a;
INCREC();
a = newexp(J);
loop:
if (jsP_accept(J, '.')) { a = EXP2(MEMBER, a, identifiername(J)); goto loop; }
if (jsP_accept(J, '[')) { a = EXP2(INDEX, a, expression(J, 0)); jsP_expect(J, ']'); goto loop; }
DECREC();
return a;
}
Commit Message:
CWE ID: CWE-674 | static js_Ast *memberexp(js_State *J)
{
js_Ast *a = newexp(J);
SAVEREC();
loop:
INCREC();
if (jsP_accept(J, '.')) { a = EXP2(MEMBER, a, identifiername(J)); goto loop; }
if (jsP_accept(J, '[')) { a = EXP2(INDEX, a, expression(J, 0)); jsP_expect(J, ']'); goto loop; }
POPREC();
return a;
}
| 165,136 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int jpc_ppm_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in)
{
jpc_ppm_t *ppm = &ms->parms.ppm;
/* Eliminate compiler warning about unused variables. */
cstate = 0;
ppm->data = 0;
if (ms->len < 1) {
goto error;
}
if (jpc_getuint8(in, &ppm->ind)) {
goto error;
}
ppm->len = ms->len - 1;
if (ppm->len > 0) {
if (!(ppm->data = jas_malloc(ppm->len))) {
goto error;
}
if (JAS_CAST(uint, jas_stream_read(in, ppm->data, ppm->len)) != ppm->len) {
goto error;
}
} else {
ppm->data = 0;
}
return 0;
error:
jpc_ppm_destroyparms(ms);
return -1;
}
Commit Message: The generation of the configuration file jas_config.h has been completely
reworked in order to avoid pollution of the global namespace.
Some problematic types like uchar, ulong, and friends have been replaced
with names with a jas_ prefix.
An option max_samples has been added to the BMP and JPEG decoders to
restrict the maximum size of image that they can decode. This change
was made as a (possibly temporary) fix to address security concerns.
A max_samples command-line option has also been added to imginfo.
Whether an image component (for jas_image_t) is stored in memory or on
disk is now based on the component size (rather than the image size).
Some debug log message were added.
Some new integer overflow checks were added.
Some new safe integer add/multiply functions were added.
More pre-C99 cruft was removed. JasPer has numerous "hacks" to
handle pre-C99 compilers. JasPer now assumes C99 support. So, this
pre-C99 cruft is unnecessary and can be removed.
The regression jasper-doublefree-mem_close.jpg has been re-enabled.
Theoretically, it should work more predictably now.
CWE ID: CWE-190 | static int jpc_ppm_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in)
{
jpc_ppm_t *ppm = &ms->parms.ppm;
/* Eliminate compiler warning about unused variables. */
cstate = 0;
ppm->data = 0;
if (ms->len < 1) {
goto error;
}
if (jpc_getuint8(in, &ppm->ind)) {
goto error;
}
ppm->len = ms->len - 1;
if (ppm->len > 0) {
if (!(ppm->data = jas_malloc(ppm->len))) {
goto error;
}
if (JAS_CAST(jas_uint, jas_stream_read(in, ppm->data, ppm->len)) != ppm->len) {
goto error;
}
} else {
ppm->data = 0;
}
return 0;
error:
jpc_ppm_destroyparms(ms);
return -1;
}
| 168,717 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: horDiff32(TIFF* tif, uint8* cp0, tmsize_t cc)
{
TIFFPredictorState* sp = PredictorState(tif);
tmsize_t stride = sp->stride;
uint32 *wp = (uint32*) cp0;
tmsize_t wc = cc/4;
assert((cc%(4*stride))==0);
if (wc > stride) {
wc -= stride;
wp += wc - 1;
do {
REPEAT4(stride, wp[stride] -= wp[0]; wp--)
wc -= stride;
} while (wc > 0);
}
}
Commit Message: * libtiff/tif_predict.h, libtiff/tif_predict.c:
Replace assertions by runtime checks to avoid assertions in debug mode,
or buffer overflows in release mode. Can happen when dealing with
unusual tile size like YCbCr with subsampling. Reported as MSVR 35105
by Axel Souchet & Vishal Chauhan from the MSRC Vulnerabilities & Mitigations
team.
CWE ID: CWE-119 | horDiff32(TIFF* tif, uint8* cp0, tmsize_t cc)
{
TIFFPredictorState* sp = PredictorState(tif);
tmsize_t stride = sp->stride;
uint32 *wp = (uint32*) cp0;
tmsize_t wc = cc/4;
if((cc%(4*stride))!=0)
{
TIFFErrorExt(tif->tif_clientdata, "horDiff32",
"%s", "(cc%(4*stride))!=0");
return 0;
}
if (wc > stride) {
wc -= stride;
wp += wc - 1;
do {
REPEAT4(stride, wp[stride] -= wp[0]; wp--)
wc -= stride;
} while (wc > 0);
}
return 1;
}
| 166,886 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
bool from_vmentry, u32 *entry_failure_code)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exec_control, vmcs12_exec_ctrl;
vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
if (from_vmentry &&
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
} else {
kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
}
if (from_vmentry) {
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
vmcs12->vm_entry_intr_info_field);
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
vmcs12->vm_entry_exception_error_code);
vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
vmcs12->vm_entry_instruction_len);
vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
vmcs12->guest_interruptibility_info);
vmx->loaded_vmcs->nmi_known_unmasked =
!(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
} else {
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
}
vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
vmx_set_rflags(vcpu, vmcs12->guest_rflags);
vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
vmcs12->guest_pending_dbg_exceptions);
vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
if (nested_cpu_has_xsaves(vmcs12))
vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
vmcs_write64(VMCS_LINK_POINTER, -1ull);
exec_control = vmcs12->pin_based_vm_exec_control;
/* Preemption timer setting is only taken from vmcs01. */
exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
exec_control |= vmcs_config.pin_based_exec_ctrl;
if (vmx->hv_deadline_tsc == -1)
exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
/* Posted interrupts setting is only taken from vmcs12. */
if (nested_cpu_has_posted_intr(vmcs12)) {
vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
vmx->nested.pi_pending = false;
vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
} else {
exec_control &= ~PIN_BASED_POSTED_INTR;
}
vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
vmx->nested.preemption_timer_expired = false;
if (nested_cpu_has_preemption_timer(vmcs12))
vmx_start_preemption_timer(vcpu);
/*
* Whether page-faults are trapped is determined by a combination of
* 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
* If enable_ept, L0 doesn't care about page faults and we should
* set all of these to L1's desires. However, if !enable_ept, L0 does
* care about (at least some) page faults, and because it is not easy
* (if at all possible?) to merge L0 and L1's desires, we simply ask
* to exit on each and every L2 page fault. This is done by setting
* MASK=MATCH=0 and (see below) EB.PF=1.
* Note that below we don't need special code to set EB.PF beyond the
* "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
* vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
* !enable_ept, EB.PF is 1, so the "or" will always be 1.
*/
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
enable_ept ? vmcs12->page_fault_error_code_mask : 0);
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
enable_ept ? vmcs12->page_fault_error_code_match : 0);
if (cpu_has_secondary_exec_ctrls()) {
exec_control = vmx->secondary_exec_control;
/* Take the following fields only from vmcs12 */
exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_ENABLE_INVPCID |
SECONDARY_EXEC_RDTSCP |
SECONDARY_EXEC_XSAVES |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_ENABLE_VMFUNC);
if (nested_cpu_has(vmcs12,
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) {
vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control &
~SECONDARY_EXEC_ENABLE_PML;
exec_control |= vmcs12_exec_ctrl;
}
/* All VMFUNCs are currently emulated through L0 vmexits. */
if (exec_control & SECONDARY_EXEC_ENABLE_VMFUNC)
vmcs_write64(VM_FUNCTION_CONTROL, 0);
if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) {
vmcs_write64(EOI_EXIT_BITMAP0,
vmcs12->eoi_exit_bitmap0);
vmcs_write64(EOI_EXIT_BITMAP1,
vmcs12->eoi_exit_bitmap1);
vmcs_write64(EOI_EXIT_BITMAP2,
vmcs12->eoi_exit_bitmap2);
vmcs_write64(EOI_EXIT_BITMAP3,
vmcs12->eoi_exit_bitmap3);
vmcs_write16(GUEST_INTR_STATUS,
vmcs12->guest_intr_status);
}
/*
* Write an illegal value to APIC_ACCESS_ADDR. Later,
* nested_get_vmcs12_pages will either fix it up or
* remove the VM execution control.
*/
if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
vmcs_write64(APIC_ACCESS_ADDR, -1ull);
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
}
/*
* Set host-state according to L0's settings (vmcs12 is irrelevant here)
* Some constant fields are set here by vmx_set_constant_host_state().
* Other fields are different per CPU, and will be set later when
* vmx_vcpu_load() is called, and when vmx_save_host_state() is called.
*/
vmx_set_constant_host_state(vmx);
/*
* Set the MSR load/store lists to match L0's settings.
*/
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
/*
* HOST_RSP is normally set correctly in vmx_vcpu_run() just before
* entry, but only if the current (host) sp changed from the value
* we wrote last (vmx->host_rsp). This cache is no longer relevant
* if we switch vmcs, and rather than hold a separate cache per vmcs,
* here we just force the write to happen on entry.
*/
vmx->host_rsp = 0;
exec_control = vmx_exec_control(vmx); /* L0's desires */
exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
exec_control &= ~CPU_BASED_TPR_SHADOW;
exec_control |= vmcs12->cpu_based_vm_exec_control;
/*
* Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if
* nested_get_vmcs12_pages can't fix it up, the illegal value
* will result in a VM entry failure.
*/
if (exec_control & CPU_BASED_TPR_SHADOW) {
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
}
/*
* Merging of IO bitmap not currently supported.
* Rather, exit every time.
*/
exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
exec_control |= CPU_BASED_UNCOND_IO_EXITING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
/* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
* bitwise-or of what L1 wants to trap for L2, and what we want to
* trap. Note that CR0.TS also needs updating - we do this later.
*/
update_exception_bitmap(vcpu);
vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
/* L2->L1 exit controls are emulated - the hardware exit is to L0 so
* we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
* bits are further modified by vmx_set_efer() below.
*/
vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
/* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
* emulated by vmx_set_efer(), below.
*/
vm_entry_controls_init(vmx,
(vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
~VM_ENTRY_IA32E_MODE) |
(vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
if (from_vmentry &&
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
vcpu->arch.pat = vmcs12->guest_ia32_pat;
} else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
}
set_cr4_guest_host_mask(vmx);
if (from_vmentry &&
vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)
vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vmcs_write64(TSC_OFFSET,
vcpu->arch.tsc_offset + vmcs12->tsc_offset);
else
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
if (kvm_has_tsc_control)
decache_tsc_multiplier(vmx);
if (enable_vpid) {
/*
* There is no direct mapping between vpid02 and vpid12, the
* vpid02 is per-vCPU for L0 and reused while the value of
* vpid12 is changed w/ one invvpid during nested vmentry.
* The vpid12 is allocated by L1 for L2, so it will not
* influence global bitmap(for vpid01 and vpid02 allocation)
* even if spawn a lot of nested vCPUs.
*/
if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) {
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
vmx->nested.last_vpid = vmcs12->virtual_processor_id;
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
}
} else {
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
vmx_flush_tlb(vcpu);
}
}
if (enable_pml) {
/*
* Conceptually we want to copy the PML address and index from
* vmcs01 here, and then back to vmcs01 on nested vmexit. But,
* since we always flush the log on each vmexit, this happens
* to be equivalent to simply resetting the fields in vmcs02.
*/
ASSERT(vmx->pml_pg);
vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
}
if (nested_cpu_has_ept(vmcs12)) {
if (nested_ept_init_mmu_context(vcpu)) {
*entry_failure_code = ENTRY_FAIL_DEFAULT;
return 1;
}
} else if (nested_cpu_has2(vmcs12,
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
vmx_flush_tlb_ept_only(vcpu);
}
/*
* This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
* bits which we consider mandatory enabled.
* The CR0_READ_SHADOW is what L2 should have expected to read given
* the specifications by L1; It's not enough to take
* vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
* have more bits than L1 expected.
*/
vmx_set_cr0(vcpu, vmcs12->guest_cr0);
vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
vmx_set_cr4(vcpu, vmcs12->guest_cr4);
vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
if (from_vmentry &&
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
vcpu->arch.efer = vmcs12->guest_ia32_efer;
else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
vcpu->arch.efer |= (EFER_LMA | EFER_LME);
else
vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
/* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
vmx_set_efer(vcpu, vcpu->arch.efer);
/* Shadow page tables on either EPT or shadow page tables. */
if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
entry_failure_code))
return 1;
if (!enable_ept)
vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
/*
* L1 may access the L2's PDPTR, so save them to construct vmcs12
*/
if (enable_ept) {
vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
}
kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
return 0;
}
Commit Message: kvm: nVMX: Don't allow L2 to access the hardware CR8
If L1 does not specify the "use TPR shadow" VM-execution control in
vmcs12, then L0 must specify the "CR8-load exiting" and "CR8-store
exiting" VM-execution controls in vmcs02. Failure to do so will give
the L2 VM unrestricted read/write access to the hardware CR8.
This fixes CVE-2017-12154.
Signed-off-by: Jim Mattson <[email protected]>
Reviewed-by: David Hildenbrand <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
CWE ID: | static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
bool from_vmentry, u32 *entry_failure_code)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exec_control, vmcs12_exec_ctrl;
vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
if (from_vmentry &&
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
} else {
kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
}
if (from_vmentry) {
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
vmcs12->vm_entry_intr_info_field);
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
vmcs12->vm_entry_exception_error_code);
vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
vmcs12->vm_entry_instruction_len);
vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
vmcs12->guest_interruptibility_info);
vmx->loaded_vmcs->nmi_known_unmasked =
!(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
} else {
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
}
vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
vmx_set_rflags(vcpu, vmcs12->guest_rflags);
vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
vmcs12->guest_pending_dbg_exceptions);
vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
if (nested_cpu_has_xsaves(vmcs12))
vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
vmcs_write64(VMCS_LINK_POINTER, -1ull);
exec_control = vmcs12->pin_based_vm_exec_control;
/* Preemption timer setting is only taken from vmcs01. */
exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
exec_control |= vmcs_config.pin_based_exec_ctrl;
if (vmx->hv_deadline_tsc == -1)
exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
/* Posted interrupts setting is only taken from vmcs12. */
if (nested_cpu_has_posted_intr(vmcs12)) {
vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
vmx->nested.pi_pending = false;
vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
} else {
exec_control &= ~PIN_BASED_POSTED_INTR;
}
vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
vmx->nested.preemption_timer_expired = false;
if (nested_cpu_has_preemption_timer(vmcs12))
vmx_start_preemption_timer(vcpu);
/*
* Whether page-faults are trapped is determined by a combination of
* 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
* If enable_ept, L0 doesn't care about page faults and we should
* set all of these to L1's desires. However, if !enable_ept, L0 does
* care about (at least some) page faults, and because it is not easy
* (if at all possible?) to merge L0 and L1's desires, we simply ask
* to exit on each and every L2 page fault. This is done by setting
* MASK=MATCH=0 and (see below) EB.PF=1.
* Note that below we don't need special code to set EB.PF beyond the
* "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
* vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
* !enable_ept, EB.PF is 1, so the "or" will always be 1.
*/
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
enable_ept ? vmcs12->page_fault_error_code_mask : 0);
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
enable_ept ? vmcs12->page_fault_error_code_match : 0);
if (cpu_has_secondary_exec_ctrls()) {
exec_control = vmx->secondary_exec_control;
/* Take the following fields only from vmcs12 */
exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_ENABLE_INVPCID |
SECONDARY_EXEC_RDTSCP |
SECONDARY_EXEC_XSAVES |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_ENABLE_VMFUNC);
if (nested_cpu_has(vmcs12,
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) {
vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control &
~SECONDARY_EXEC_ENABLE_PML;
exec_control |= vmcs12_exec_ctrl;
}
/* All VMFUNCs are currently emulated through L0 vmexits. */
if (exec_control & SECONDARY_EXEC_ENABLE_VMFUNC)
vmcs_write64(VM_FUNCTION_CONTROL, 0);
if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) {
vmcs_write64(EOI_EXIT_BITMAP0,
vmcs12->eoi_exit_bitmap0);
vmcs_write64(EOI_EXIT_BITMAP1,
vmcs12->eoi_exit_bitmap1);
vmcs_write64(EOI_EXIT_BITMAP2,
vmcs12->eoi_exit_bitmap2);
vmcs_write64(EOI_EXIT_BITMAP3,
vmcs12->eoi_exit_bitmap3);
vmcs_write16(GUEST_INTR_STATUS,
vmcs12->guest_intr_status);
}
/*
* Write an illegal value to APIC_ACCESS_ADDR. Later,
* nested_get_vmcs12_pages will either fix it up or
* remove the VM execution control.
*/
if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
vmcs_write64(APIC_ACCESS_ADDR, -1ull);
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
}
/*
* Set host-state according to L0's settings (vmcs12 is irrelevant here)
* Some constant fields are set here by vmx_set_constant_host_state().
* Other fields are different per CPU, and will be set later when
* vmx_vcpu_load() is called, and when vmx_save_host_state() is called.
*/
vmx_set_constant_host_state(vmx);
/*
* Set the MSR load/store lists to match L0's settings.
*/
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
/*
* HOST_RSP is normally set correctly in vmx_vcpu_run() just before
* entry, but only if the current (host) sp changed from the value
* we wrote last (vmx->host_rsp). This cache is no longer relevant
* if we switch vmcs, and rather than hold a separate cache per vmcs,
* here we just force the write to happen on entry.
*/
vmx->host_rsp = 0;
exec_control = vmx_exec_control(vmx); /* L0's desires */
exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
exec_control &= ~CPU_BASED_TPR_SHADOW;
exec_control |= vmcs12->cpu_based_vm_exec_control;
/*
* Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if
* nested_get_vmcs12_pages can't fix it up, the illegal value
* will result in a VM entry failure.
*/
if (exec_control & CPU_BASED_TPR_SHADOW) {
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
} else {
#ifdef CONFIG_X86_64
exec_control |= CPU_BASED_CR8_LOAD_EXITING |
CPU_BASED_CR8_STORE_EXITING;
#endif
}
/*
* Merging of IO bitmap not currently supported.
* Rather, exit every time.
*/
exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
exec_control |= CPU_BASED_UNCOND_IO_EXITING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
/* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
* bitwise-or of what L1 wants to trap for L2, and what we want to
* trap. Note that CR0.TS also needs updating - we do this later.
*/
update_exception_bitmap(vcpu);
vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
/* L2->L1 exit controls are emulated - the hardware exit is to L0 so
* we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
* bits are further modified by vmx_set_efer() below.
*/
vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
/* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
* emulated by vmx_set_efer(), below.
*/
vm_entry_controls_init(vmx,
(vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
~VM_ENTRY_IA32E_MODE) |
(vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
if (from_vmentry &&
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
vcpu->arch.pat = vmcs12->guest_ia32_pat;
} else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
}
set_cr4_guest_host_mask(vmx);
if (from_vmentry &&
vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)
vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vmcs_write64(TSC_OFFSET,
vcpu->arch.tsc_offset + vmcs12->tsc_offset);
else
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
if (kvm_has_tsc_control)
decache_tsc_multiplier(vmx);
if (enable_vpid) {
/*
* There is no direct mapping between vpid02 and vpid12, the
* vpid02 is per-vCPU for L0 and reused while the value of
* vpid12 is changed w/ one invvpid during nested vmentry.
* The vpid12 is allocated by L1 for L2, so it will not
* influence global bitmap(for vpid01 and vpid02 allocation)
* even if spawn a lot of nested vCPUs.
*/
if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) {
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
vmx->nested.last_vpid = vmcs12->virtual_processor_id;
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
}
} else {
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
vmx_flush_tlb(vcpu);
}
}
if (enable_pml) {
/*
* Conceptually we want to copy the PML address and index from
* vmcs01 here, and then back to vmcs01 on nested vmexit. But,
* since we always flush the log on each vmexit, this happens
* to be equivalent to simply resetting the fields in vmcs02.
*/
ASSERT(vmx->pml_pg);
vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
}
if (nested_cpu_has_ept(vmcs12)) {
if (nested_ept_init_mmu_context(vcpu)) {
*entry_failure_code = ENTRY_FAIL_DEFAULT;
return 1;
}
} else if (nested_cpu_has2(vmcs12,
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
vmx_flush_tlb_ept_only(vcpu);
}
/*
* This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
* bits which we consider mandatory enabled.
* The CR0_READ_SHADOW is what L2 should have expected to read given
* the specifications by L1; It's not enough to take
* vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
* have more bits than L1 expected.
*/
vmx_set_cr0(vcpu, vmcs12->guest_cr0);
vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
vmx_set_cr4(vcpu, vmcs12->guest_cr4);
vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
if (from_vmentry &&
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
vcpu->arch.efer = vmcs12->guest_ia32_efer;
else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
vcpu->arch.efer |= (EFER_LMA | EFER_LME);
else
vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
/* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
vmx_set_efer(vcpu, vcpu->arch.efer);
/* Shadow page tables on either EPT or shadow page tables. */
if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
entry_failure_code))
return 1;
if (!enable_ept)
vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
/*
* L1 may access the L2's PDPTR, so save them to construct vmcs12
*/
if (enable_ept) {
vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
}
kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
return 0;
}
| 167,990 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: perform_gamma_composition_tests(png_modifier *pm, int do_background,
int expand_16)
{
png_byte colour_type = 0;
png_byte bit_depth = 0;
unsigned int palette_number = 0;
/* Skip the non-alpha cases - there is no setting of a transparency colour at
* present.
*/
while (next_format(&colour_type, &bit_depth, &palette_number, 1/*gamma*/))
if ((colour_type & PNG_COLOR_MASK_ALPHA) != 0)
{
unsigned int i, j;
/* Don't skip the i==j case here - it's relevant. */
for (i=0; i<pm->ngamma_tests; ++i) for (j=0; j<pm->ngamma_tests; ++j)
{
gamma_composition_test(pm, colour_type, bit_depth, palette_number,
pm->interlace_type, 1/pm->gammas[i], pm->gammas[j],
pm->use_input_precision, do_background, expand_16);
if (fail(pm))
return;
}
}
}
Commit Message: DO NOT MERGE Update libpng to 1.6.20
BUG:23265085
Change-Id: I85199805636d771f3597b691b63bc0bf46084833
(cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82)
CWE ID: | perform_gamma_composition_tests(png_modifier *pm, int do_background,
int expand_16)
{
png_byte colour_type = 0;
png_byte bit_depth = 0;
unsigned int palette_number = 0;
/* Skip the non-alpha cases - there is no setting of a transparency colour at
* present.
*
* TODO: incorrect; the palette case sets tRNS and, now RGB and gray do,
* however the palette case fails miserably so is commented out below.
*/
while (next_format(&colour_type, &bit_depth, &palette_number,
pm->test_lbg_gamma_composition, pm->test_tRNS))
if ((colour_type & PNG_COLOR_MASK_ALPHA) != 0
#if 0 /* TODO: FIXME */
/*TODO: FIXME: this should work */
|| colour_type == 3
#endif
|| (colour_type != 3 && palette_number != 0))
{
unsigned int i, j;
/* Don't skip the i==j case here - it's relevant. */
for (i=0; i<pm->ngamma_tests; ++i) for (j=0; j<pm->ngamma_tests; ++j)
{
gamma_composition_test(pm, colour_type, bit_depth, palette_number,
pm->interlace_type, 1/pm->gammas[i], pm->gammas[j],
pm->use_input_precision, do_background, expand_16);
if (fail(pm))
return;
}
}
}
| 173,679 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb,
struct sock *sk)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
ev->type = LLC_SAP_EV_TYPE_PDU;
ev->reason = 0;
skb->sk = sk;
llc_sap_state_process(sap, skb);
}
Commit Message: net/llc: avoid BUG_ON() in skb_orphan()
It seems nobody used LLC since linux-3.12.
Fortunately fuzzers like syzkaller still know how to run this code,
otherwise it would be no fun.
Setting skb->sk without skb->destructor leads to all kinds of
bugs, we now prefer to be very strict about it.
Ideally here we would use skb_set_owner() but this helper does not exist yet,
only CAN seems to have a private helper for that.
Fixes: 376c7311bdb6 ("net: add a temporary sanity check in skb_orphan()")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-20 | static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb,
struct sock *sk)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
ev->type = LLC_SAP_EV_TYPE_PDU;
ev->reason = 0;
skb_orphan(skb);
sock_hold(sk);
skb->sk = sk;
skb->destructor = sock_efree;
llc_sap_state_process(sap, skb);
}
| 168,349 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: ftc_snode_load( FTC_SNode snode,
FTC_Manager manager,
FT_UInt gindex,
FT_ULong *asize )
{
FT_Error error;
FTC_GNode gnode = FTC_GNODE( snode );
FTC_Family family = gnode->family;
FT_Memory memory = manager->memory;
FT_Face face;
FTC_SBit sbit;
FTC_SFamilyClass clazz;
if ( (FT_UInt)(gindex - gnode->gindex) >= snode->count )
{
FT_ERROR(( "ftc_snode_load: invalid glyph index" ));
return FT_THROW( Invalid_Argument );
}
sbit = snode->sbits + ( gindex - gnode->gindex );
clazz = (FTC_SFamilyClass)family->clazz;
sbit->buffer = 0;
error = clazz->family_load_glyph( family, gindex, manager, &face );
if ( error )
goto BadGlyph;
{
FT_Int temp;
FT_GlyphSlot slot = face->glyph;
FT_Bitmap* bitmap = &slot->bitmap;
FT_Pos xadvance, yadvance; /* FT_GlyphSlot->advance.{x|y} */
if ( slot->format != FT_GLYPH_FORMAT_BITMAP )
{
FT_TRACE0(( "ftc_snode_load:"
" glyph loaded didn't return a bitmap\n" ));
goto BadGlyph;
}
/* Check that our values fit into 8-bit containers! */
/* If this is not the case, our bitmap is too large */
/* and we will leave it as `missing' with sbit.buffer = 0 */
#define CHECK_CHAR( d ) ( temp = (FT_Char)d, temp == d )
#define CHECK_BYTE( d ) ( temp = (FT_Byte)d, temp == d )
/* horizontal advance in pixels */
xadvance = ( slot->advance.x + 32 ) >> 6;
yadvance = ( slot->advance.y + 32 ) >> 6;
if ( !CHECK_BYTE( bitmap->rows ) ||
!CHECK_BYTE( bitmap->width ) ||
!CHECK_CHAR( bitmap->pitch ) ||
!CHECK_CHAR( slot->bitmap_left ) ||
!CHECK_CHAR( slot->bitmap_top ) ||
!CHECK_CHAR( xadvance ) ||
!CHECK_CHAR( yadvance ) )
{
FT_TRACE2(( "ftc_snode_load:"
" glyph too large for small bitmap cache\n"));
goto BadGlyph;
}
sbit->width = (FT_Byte)bitmap->width;
sbit->height = (FT_Byte)bitmap->rows;
sbit->pitch = (FT_Char)bitmap->pitch;
sbit->left = (FT_Char)slot->bitmap_left;
sbit->top = (FT_Char)slot->bitmap_top;
sbit->xadvance = (FT_Char)xadvance;
sbit->yadvance = (FT_Char)yadvance;
sbit->format = (FT_Byte)bitmap->pixel_mode;
sbit->max_grays = (FT_Byte)(bitmap->num_grays - 1);
/* copy the bitmap into a new buffer -- ignore error */
error = ftc_sbit_copy_bitmap( sbit, bitmap, memory );
/* now, compute size */
if ( asize )
*asize = FT_ABS( sbit->pitch ) * sbit->height;
} /* glyph loading successful */
/* ignore the errors that might have occurred -- */
/* we mark unloaded glyphs with `sbit.buffer == 0' */
/* and `width == 255', `height == 0' */
/* */
if ( error && FT_ERR_NEQ( error, Out_Of_Memory ) )
{
BadGlyph:
sbit->width = 255;
sbit->height = 0;
sbit->buffer = NULL;
error = FT_Err_Ok;
if ( asize )
*asize = 0;
}
return error;
}
Commit Message:
CWE ID: CWE-119 | ftc_snode_load( FTC_SNode snode,
FTC_Manager manager,
FT_UInt gindex,
FT_ULong *asize )
{
FT_Error error;
FTC_GNode gnode = FTC_GNODE( snode );
FTC_Family family = gnode->family;
FT_Memory memory = manager->memory;
FT_Face face;
FTC_SBit sbit;
FTC_SFamilyClass clazz;
if ( (FT_UInt)(gindex - gnode->gindex) >= snode->count )
{
FT_ERROR(( "ftc_snode_load: invalid glyph index" ));
return FT_THROW( Invalid_Argument );
}
sbit = snode->sbits + ( gindex - gnode->gindex );
clazz = (FTC_SFamilyClass)family->clazz;
sbit->buffer = 0;
error = clazz->family_load_glyph( family, gindex, manager, &face );
if ( error )
goto BadGlyph;
{
FT_Int temp;
FT_GlyphSlot slot = face->glyph;
FT_Bitmap* bitmap = &slot->bitmap;
FT_Pos xadvance, yadvance; /* FT_GlyphSlot->advance.{x|y} */
if ( slot->format != FT_GLYPH_FORMAT_BITMAP )
{
FT_TRACE0(( "ftc_snode_load:"
" glyph loaded didn't return a bitmap\n" ));
goto BadGlyph;
}
/* Check whether our values fit into 8-bit containers! */
/* If this is not the case, our bitmap is too large */
/* and we will leave it as `missing' with sbit.buffer = 0 */
#define CHECK_CHAR( d ) ( temp = (FT_Char)d, (FT_Int) temp == (FT_Int) d )
#define CHECK_BYTE( d ) ( temp = (FT_Byte)d, (FT_UInt)temp == (FT_UInt)d )
/* horizontal advance in pixels */
xadvance = ( slot->advance.x + 32 ) >> 6;
yadvance = ( slot->advance.y + 32 ) >> 6;
if ( !CHECK_BYTE( bitmap->rows ) ||
!CHECK_BYTE( bitmap->width ) ||
!CHECK_CHAR( bitmap->pitch ) ||
!CHECK_CHAR( slot->bitmap_left ) ||
!CHECK_CHAR( slot->bitmap_top ) ||
!CHECK_CHAR( xadvance ) ||
!CHECK_CHAR( yadvance ) )
{
FT_TRACE2(( "ftc_snode_load:"
" glyph too large for small bitmap cache\n"));
goto BadGlyph;
}
sbit->width = (FT_Byte)bitmap->width;
sbit->height = (FT_Byte)bitmap->rows;
sbit->pitch = (FT_Char)bitmap->pitch;
sbit->left = (FT_Char)slot->bitmap_left;
sbit->top = (FT_Char)slot->bitmap_top;
sbit->xadvance = (FT_Char)xadvance;
sbit->yadvance = (FT_Char)yadvance;
sbit->format = (FT_Byte)bitmap->pixel_mode;
sbit->max_grays = (FT_Byte)(bitmap->num_grays - 1);
/* copy the bitmap into a new buffer -- ignore error */
error = ftc_sbit_copy_bitmap( sbit, bitmap, memory );
/* now, compute size */
if ( asize )
*asize = FT_ABS( sbit->pitch ) * sbit->height;
} /* glyph loading successful */
/* ignore the errors that might have occurred -- */
/* we mark unloaded glyphs with `sbit.buffer == 0' */
/* and `width == 255', `height == 0' */
/* */
if ( error && FT_ERR_NEQ( error, Out_Of_Memory ) )
{
BadGlyph:
sbit->width = 255;
sbit->height = 0;
sbit->buffer = NULL;
error = FT_Err_Ok;
if ( asize )
*asize = 0;
}
return error;
}
| 164,851 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void GetCSI(const v8::FunctionCallbackInfo<v8::Value>& args) {
WebLocalFrame* frame = WebLocalFrame::frameForCurrentContext();
if (frame) {
WebDataSource* data_source = frame->dataSource();
if (data_source) {
DocumentState* document_state =
DocumentState::FromDataSource(data_source);
v8::Isolate* isolate = args.GetIsolate();
v8::Local<v8::Object> csi = v8::Object::New(isolate);
base::Time now = base::Time::Now();
base::Time start = document_state->request_time().is_null() ?
document_state->start_load_time() :
document_state->request_time();
base::Time onload = document_state->finish_document_load_time();
base::TimeDelta page = now - start;
csi->Set(v8::String::NewFromUtf8(isolate, "startE"),
v8::Number::New(isolate, floor(start.ToDoubleT() * 1000)));
csi->Set(v8::String::NewFromUtf8(isolate, "onloadT"),
v8::Number::New(isolate, floor(onload.ToDoubleT() * 1000)));
csi->Set(v8::String::NewFromUtf8(isolate, "pageT"),
v8::Number::New(isolate, page.InMillisecondsF()));
csi->Set(
v8::String::NewFromUtf8(isolate, "tran"),
v8::Number::New(
isolate, GetCSITransitionType(data_source->navigationType())));
args.GetReturnValue().Set(csi);
return;
}
}
args.GetReturnValue().SetNull();
return;
}
Commit Message: Cache csi info before passing it to JS setters.
JS setters invalidate the pointers frame, data_source and document_state.
BUG=590455
Review URL: https://codereview.chromium.org/1751553002
Cr-Commit-Position: refs/heads/master@{#379047}
CWE ID: | static void GetCSI(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().SetNull();
WebLocalFrame* frame = WebLocalFrame::frameForCurrentContext();
if (!frame) {
return;
}
WebDataSource* data_source = frame->dataSource();
if (!data_source) {
return;
}
DocumentState* document_state = DocumentState::FromDataSource(data_source);
if (!document_state) {
return;
}
base::Time now = base::Time::Now();
base::Time start = document_state->request_time().is_null()
? document_state->start_load_time()
: document_state->request_time();
base::Time onload = document_state->finish_document_load_time();
base::TimeDelta page = now - start;
int navigation_type = GetCSITransitionType(data_source->navigationType());
// Important: |frame|, |data_source| and |document_state| should not be
// referred to below this line, as JS setters below can invalidate these
// pointers.
v8::Isolate* isolate = args.GetIsolate();
v8::Local<v8::Context> ctx = isolate->GetCurrentContext();
v8::Local<v8::Object> csi = v8::Object::New(isolate);
if (!csi->Set(ctx, v8::String::NewFromUtf8(isolate, "startE",
v8::NewStringType::kNormal)
.ToLocalChecked(),
v8::Number::New(isolate, floor(start.ToDoubleT() * 1000)))
.FromMaybe(false)) {
return;
}
if (!csi->Set(ctx, v8::String::NewFromUtf8(isolate, "onloadT",
v8::NewStringType::kNormal)
.ToLocalChecked(),
v8::Number::New(isolate, floor(onload.ToDoubleT() * 1000)))
.FromMaybe(false)) {
return;
}
if (!csi->Set(ctx, v8::String::NewFromUtf8(isolate, "pageT",
v8::NewStringType::kNormal)
.ToLocalChecked(),
v8::Number::New(isolate, page.InMillisecondsF()))
.FromMaybe(false)) {
return;
}
if (!csi->Set(ctx, v8::String::NewFromUtf8(isolate, "tran",
v8::NewStringType::kNormal)
.ToLocalChecked(),
v8::Number::New(isolate, navigation_type))
.FromMaybe(false)) {
return;
}
args.GetReturnValue().Set(csi);
}
| 172,117 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: netdutils::Status XfrmController::ipSecSetEncapSocketOwner(const android::base::unique_fd& socket,
int newUid, uid_t callerUid) {
ALOGD("XfrmController:%s, line=%d", __FUNCTION__, __LINE__);
const int fd = socket.get();
struct stat info;
if (fstat(fd, &info)) {
return netdutils::statusFromErrno(errno, "Failed to stat socket file descriptor");
}
if (info.st_uid != callerUid) {
return netdutils::statusFromErrno(EPERM, "fchown disabled for non-owner calls");
}
if (S_ISSOCK(info.st_mode) == 0) {
return netdutils::statusFromErrno(EINVAL, "File descriptor was not a socket");
}
int optval;
socklen_t optlen;
netdutils::Status status =
getSyscallInstance().getsockopt(Fd(socket), IPPROTO_UDP, UDP_ENCAP, &optval, &optlen);
if (status != netdutils::status::ok) {
return status;
}
if (optval != UDP_ENCAP_ESPINUDP && optval != UDP_ENCAP_ESPINUDP_NON_IKE) {
return netdutils::statusFromErrno(EINVAL, "Socket did not have UDP-encap sockopt set");
}
if (fchown(fd, newUid, -1)) {
return netdutils::statusFromErrno(errno, "Failed to fchown socket file descriptor");
}
return netdutils::status::ok;
}
Commit Message: Set optlen for UDP-encap check in XfrmController
When setting the socket owner for an encap socket XfrmController will
first attempt to verify that the socket has the UDP-encap socket option
set. When doing so it would pass in an uninitialized optlen parameter
which could cause the call to not modify the option value if the optlen
happened to be too short. So for example if the stack happened to
contain a zero where optlen was located the check would fail and the
socket owner would not be changed.
Fix this by setting optlen to the size of the option value parameter.
Test: run cts -m CtsNetTestCases
BUG: 111650288
Change-Id: I57b6e9dba09c1acda71e3ec2084652e961667bd9
(cherry picked from commit fc42a105147310bd680952d4b71fe32974bd8506)
CWE ID: CWE-909 | netdutils::Status XfrmController::ipSecSetEncapSocketOwner(const android::base::unique_fd& socket,
int newUid, uid_t callerUid) {
ALOGD("XfrmController:%s, line=%d", __FUNCTION__, __LINE__);
const int fd = socket.get();
struct stat info;
if (fstat(fd, &info)) {
return netdutils::statusFromErrno(errno, "Failed to stat socket file descriptor");
}
if (info.st_uid != callerUid) {
return netdutils::statusFromErrno(EPERM, "fchown disabled for non-owner calls");
}
if (S_ISSOCK(info.st_mode) == 0) {
return netdutils::statusFromErrno(EINVAL, "File descriptor was not a socket");
}
int optval;
socklen_t optlen = sizeof(optval);
netdutils::Status status =
getSyscallInstance().getsockopt(Fd(socket), IPPROTO_UDP, UDP_ENCAP, &optval, &optlen);
if (status != netdutils::status::ok) {
return status;
}
if (optval != UDP_ENCAP_ESPINUDP && optval != UDP_ENCAP_ESPINUDP_NON_IKE) {
return netdutils::statusFromErrno(EINVAL, "Socket did not have UDP-encap sockopt set");
}
if (fchown(fd, newUid, -1)) {
return netdutils::statusFromErrno(errno, "Failed to fchown socket file descriptor");
}
return netdutils::status::ok;
}
| 174,073 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: string16 ExtensionGlobalError::GenerateMessageSection(
const ExtensionIdSet* extensions,
int template_message_id) {
CHECK(extensions);
CHECK(template_message_id);
string16 message;
for (ExtensionIdSet::const_iterator iter = extensions->begin();
iter != extensions->end(); ++iter) {
const Extension* e = extension_service_->GetExtensionById(*iter, true);
message += l10n_util::GetStringFUTF16(
template_message_id,
string16(ASCIIToUTF16(e->name())),
l10n_util::GetStringUTF16(IDS_SHORT_PRODUCT_NAME));
}
return message;
}
Commit Message: [i18n-fixlet] Make strings branding specific in extension code.
IDS_EXTENSIONS_UNINSTALL
IDS_EXTENSIONS_INCOGNITO_WARNING
IDS_EXTENSION_INSTALLED_HEADING
IDS_EXTENSION_ALERT_ITEM_EXTERNAL And fix a $1 $1 bug.
IDS_EXTENSION_INLINE_INSTALL_PROMPT_TITLE
BUG=NONE
TEST=NONE
Review URL: http://codereview.chromium.org/9107061
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@118018 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-119 | string16 ExtensionGlobalError::GenerateMessageSection(
const ExtensionIdSet* extensions,
int template_message_id) {
CHECK(extensions);
CHECK(template_message_id);
string16 message;
for (ExtensionIdSet::const_iterator iter = extensions->begin();
iter != extensions->end(); ++iter) {
const Extension* e = extension_service_->GetExtensionById(*iter, true);
message += l10n_util::GetStringFUTF16(template_message_id,
string16(ASCIIToUTF16(e->name())));
}
return message;
}
| 170,980 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void Document::InitContentSecurityPolicy(
ContentSecurityPolicy* csp,
const ContentSecurityPolicy* policy_to_inherit) {
SetContentSecurityPolicy(csp ? csp : ContentSecurityPolicy::Create());
GetContentSecurityPolicy()->BindToExecutionContext(this);
if (policy_to_inherit) {
GetContentSecurityPolicy()->CopyStateFrom(policy_to_inherit);
} else if (frame_) {
Frame* inherit_from = frame_->Tree().Parent() ? frame_->Tree().Parent()
: frame_->Client()->Opener();
if (inherit_from && frame_ != inherit_from) {
DCHECK(inherit_from->GetSecurityContext() &&
inherit_from->GetSecurityContext()->GetContentSecurityPolicy());
policy_to_inherit =
inherit_from->GetSecurityContext()->GetContentSecurityPolicy();
if (url_.IsEmpty() || url_.ProtocolIsAbout() || url_.ProtocolIsData() ||
url_.ProtocolIs("blob") || url_.ProtocolIs("filesystem")) {
GetContentSecurityPolicy()->CopyStateFrom(policy_to_inherit);
}
}
}
if (policy_to_inherit && IsPluginDocument())
GetContentSecurityPolicy()->CopyPluginTypesFrom(policy_to_inherit);
}
Commit Message: Inherit CSP when self-navigating to local-scheme URL
As the linked bug example shows, we should inherit CSP when we navigate
to a local-scheme URL (even if we are in a main browsing context).
Bug: 799747
Change-Id: I8413aa8e8049461ebcf0ffbf7b04c41d1340af02
Reviewed-on: https://chromium-review.googlesource.com/c/1234337
Reviewed-by: Mike West <[email protected]>
Commit-Queue: Andy Paicu <[email protected]>
Cr-Commit-Position: refs/heads/master@{#597889}
CWE ID: | void Document::InitContentSecurityPolicy(
ContentSecurityPolicy* csp,
const ContentSecurityPolicy* policy_to_inherit,
const ContentSecurityPolicy* previous_document_csp) {
SetContentSecurityPolicy(csp ? csp : ContentSecurityPolicy::Create());
GetContentSecurityPolicy()->BindToExecutionContext(this);
if (policy_to_inherit) {
GetContentSecurityPolicy()->CopyStateFrom(policy_to_inherit);
} else {
if (frame_) {
Frame* inherit_from = frame_->Tree().Parent()
? frame_->Tree().Parent()
: frame_->Client()->Opener();
if (inherit_from && frame_ != inherit_from) {
DCHECK(inherit_from->GetSecurityContext() &&
inherit_from->GetSecurityContext()->GetContentSecurityPolicy());
policy_to_inherit =
inherit_from->GetSecurityContext()->GetContentSecurityPolicy();
}
}
// If we don't have an opener or parent, inherit from the previous
// document CSP.
if (!policy_to_inherit)
policy_to_inherit = previous_document_csp;
// We should inherit the relevant CSP if the document is loaded using
// a local-scheme url.
if (policy_to_inherit &&
(url_.IsEmpty() || url_.ProtocolIsAbout() || url_.ProtocolIsData() ||
url_.ProtocolIs("blob") || url_.ProtocolIs("filesystem")))
GetContentSecurityPolicy()->CopyStateFrom(policy_to_inherit);
}
if (policy_to_inherit && IsPluginDocument())
GetContentSecurityPolicy()->CopyPluginTypesFrom(policy_to_inherit);
}
| 172,615 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: process_db_args(krb5_context context, char **db_args, xargs_t *xargs,
OPERATION optype)
{
int i=0;
krb5_error_code st=0;
char *arg=NULL, *arg_val=NULL;
char **dptr=NULL;
unsigned int arg_val_len=0;
if (db_args) {
for (i=0; db_args[i]; ++i) {
arg = strtok_r(db_args[i], "=", &arg_val);
if (strcmp(arg, TKTPOLICY_ARG) == 0) {
dptr = &xargs->tktpolicydn;
} else {
if (strcmp(arg, USERDN_ARG) == 0) {
if (optype == MODIFY_PRINCIPAL ||
xargs->dn != NULL || xargs->containerdn != NULL ||
xargs->linkdn != NULL) {
st = EINVAL;
k5_setmsg(context, st, _("%s option not supported"),
arg);
goto cleanup;
}
dptr = &xargs->dn;
} else if (strcmp(arg, CONTAINERDN_ARG) == 0) {
if (optype == MODIFY_PRINCIPAL ||
xargs->dn != NULL || xargs->containerdn != NULL) {
st = EINVAL;
k5_setmsg(context, st, _("%s option not supported"),
arg);
goto cleanup;
}
dptr = &xargs->containerdn;
} else if (strcmp(arg, LINKDN_ARG) == 0) {
if (xargs->dn != NULL || xargs->linkdn != NULL) {
st = EINVAL;
k5_setmsg(context, st, _("%s option not supported"),
arg);
goto cleanup;
}
dptr = &xargs->linkdn;
} else {
st = EINVAL;
k5_setmsg(context, st, _("unknown option: %s"), arg);
goto cleanup;
}
xargs->dn_from_kbd = TRUE;
if (arg_val == NULL || strlen(arg_val) == 0) {
st = EINVAL;
k5_setmsg(context, st, _("%s option value missing"), arg);
goto cleanup;
}
}
if (arg_val == NULL) {
st = EINVAL;
k5_setmsg(context, st, _("%s option value missing"), arg);
goto cleanup;
}
arg_val_len = strlen(arg_val) + 1;
if (strcmp(arg, TKTPOLICY_ARG) == 0) {
if ((st = krb5_ldap_name_to_policydn (context,
arg_val,
dptr)) != 0)
goto cleanup;
} else {
*dptr = k5memdup(arg_val, arg_val_len, &st);
if (*dptr == NULL)
goto cleanup;
}
}
}
cleanup:
return st;
}
Commit Message: Fix LDAP null deref on empty arg [CVE-2016-3119]
In the LDAP KDB module's process_db_args(), strtok_r() may return NULL
if there is an empty string in the db_args array. Check for this case
and avoid dereferencing a null pointer.
CVE-2016-3119:
In MIT krb5 1.6 and later, an authenticated attacker with permission
to modify a principal entry can cause kadmind to dereference a null
pointer by supplying an empty DB argument to the modify_principal
command, if kadmind is configured to use the LDAP KDB module.
CVSSv2 Vector: AV:N/AC:H/Au:S/C:N/I:N/A:C/E:H/RL:OF/RC:ND
ticket: 8383 (new)
target_version: 1.14-next
target_version: 1.13-next
tags: pullup
CWE ID: | process_db_args(krb5_context context, char **db_args, xargs_t *xargs,
OPERATION optype)
{
int i=0;
krb5_error_code st=0;
char *arg=NULL, *arg_val=NULL;
char **dptr=NULL;
unsigned int arg_val_len=0;
if (db_args) {
for (i=0; db_args[i]; ++i) {
arg = strtok_r(db_args[i], "=", &arg_val);
arg = (arg != NULL) ? arg : "";
if (strcmp(arg, TKTPOLICY_ARG) == 0) {
dptr = &xargs->tktpolicydn;
} else {
if (strcmp(arg, USERDN_ARG) == 0) {
if (optype == MODIFY_PRINCIPAL ||
xargs->dn != NULL || xargs->containerdn != NULL ||
xargs->linkdn != NULL) {
st = EINVAL;
k5_setmsg(context, st, _("%s option not supported"),
arg);
goto cleanup;
}
dptr = &xargs->dn;
} else if (strcmp(arg, CONTAINERDN_ARG) == 0) {
if (optype == MODIFY_PRINCIPAL ||
xargs->dn != NULL || xargs->containerdn != NULL) {
st = EINVAL;
k5_setmsg(context, st, _("%s option not supported"),
arg);
goto cleanup;
}
dptr = &xargs->containerdn;
} else if (strcmp(arg, LINKDN_ARG) == 0) {
if (xargs->dn != NULL || xargs->linkdn != NULL) {
st = EINVAL;
k5_setmsg(context, st, _("%s option not supported"),
arg);
goto cleanup;
}
dptr = &xargs->linkdn;
} else {
st = EINVAL;
k5_setmsg(context, st, _("unknown option: %s"), arg);
goto cleanup;
}
xargs->dn_from_kbd = TRUE;
if (arg_val == NULL || strlen(arg_val) == 0) {
st = EINVAL;
k5_setmsg(context, st, _("%s option value missing"), arg);
goto cleanup;
}
}
if (arg_val == NULL) {
st = EINVAL;
k5_setmsg(context, st, _("%s option value missing"), arg);
goto cleanup;
}
arg_val_len = strlen(arg_val) + 1;
if (strcmp(arg, TKTPOLICY_ARG) == 0) {
if ((st = krb5_ldap_name_to_policydn (context,
arg_val,
dptr)) != 0)
goto cleanup;
} else {
*dptr = k5memdup(arg_val, arg_val_len, &st);
if (*dptr == NULL)
goto cleanup;
}
}
}
cleanup:
return st;
}
| 167,379 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *restrict clone_info,CacheInfo *restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads 2
#define cache_threads(source,destination,chunk) \
num_threads((chunk) < (16*GetMagickResourceLimit(ThreadResource)) ? 1 : \
GetMagickResourceLimit(ThreadResource) < MaxCacheThreads ? \
GetMagickResourceLimit(ThreadResource) : MaxCacheThreads)
MagickBooleanType
status;
NexusInfo
**restrict cache_nexus,
**restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) || (clone_info->type == MapCache)) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->active_index_channel == clone_info->active_index_channel))
{
/*
Identical pixel cache morphology.
*/
CopyPixels(clone_info->pixels,cache_info->pixels,cache_info->columns*
cache_info->rows);
if ((cache_info->active_index_channel != MagickFalse) &&
(clone_info->active_index_channel != MagickFalse))
(void) memcpy(clone_info->indexes,cache_info->indexes,
cache_info->columns*cache_info->rows*sizeof(*cache_info->indexes));
return(MagickTrue);
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
clone_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
if ((cache_nexus == (NexusInfo **) NULL) ||
(clone_nexus == (NexusInfo **) NULL))
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
length=(size_t) MagickMin(cache_info->columns,clone_info->columns)*
sizeof(*cache_info->pixels);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
cache_threads(cache_info,clone_info,cache_info->rows)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
PixelPacket
*pixels;
RectangleInfo
region;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
region.width=cache_info->columns;
region.height=1;
region.x=0;
region.y=y;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,MagickTrue,
cache_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
region.width=clone_info->columns;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,®ion,MagickTrue,
clone_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
(void) ResetMagickMemory(clone_nexus[id]->pixels,0,(size_t)
clone_nexus[id]->length);
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length);
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->active_index_channel != MagickFalse) &&
(clone_info->active_index_channel != MagickFalse))
{
/*
Clone indexes.
*/
length=(size_t) MagickMin(cache_info->columns,clone_info->columns)*
sizeof(*cache_info->indexes);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
cache_threads(cache_info,clone_info,cache_info->rows)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
PixelPacket
*pixels;
RectangleInfo
region;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
region.width=cache_info->columns;
region.height=1;
region.x=0;
region.y=y;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,MagickTrue,
cache_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
status=ReadPixelCacheIndexes(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
region.width=clone_info->columns;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,®ion,MagickTrue,
clone_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
(void) memcpy(clone_nexus[id]->indexes,cache_nexus[id]->indexes,length);
status=WritePixelCacheIndexes(clone_info,clone_nexus[id],exception);
}
}
cache_nexus=DestroyPixelCacheNexus(cache_nexus,MaxCacheThreads);
clone_nexus=DestroyPixelCacheNexus(clone_nexus,MaxCacheThreads);
if (cache_info->debug != MagickFalse)
{
char
message[MaxTextExtent];
(void) FormatLocaleString(message,MaxTextExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
Commit Message:
CWE ID: CWE-189 | static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *restrict clone_info,CacheInfo *restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads 2
#define cache_threads(source,destination,chunk) \
num_threads((chunk) < (16*GetMagickResourceLimit(ThreadResource)) ? 1 : \
GetMagickResourceLimit(ThreadResource) < MaxCacheThreads ? \
GetMagickResourceLimit(ThreadResource) : MaxCacheThreads)
MagickBooleanType
status;
NexusInfo
**restrict cache_nexus,
**restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) || (clone_info->type == MapCache)) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->active_index_channel == clone_info->active_index_channel))
{
/*
Identical pixel cache morphology.
*/
(void) memcpy(clone_info->pixels,cache_info->pixels,cache_info->columns*
cache_info->rows*sizeof(*cache_info->pixels));
if ((cache_info->active_index_channel != MagickFalse) &&
(clone_info->active_index_channel != MagickFalse))
(void) memcpy(clone_info->indexes,cache_info->indexes,
cache_info->columns*cache_info->rows*sizeof(*cache_info->indexes));
return(MagickTrue);
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
clone_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
if ((cache_nexus == (NexusInfo **) NULL) ||
(clone_nexus == (NexusInfo **) NULL))
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
length=(size_t) MagickMin(cache_info->columns,clone_info->columns)*
sizeof(*cache_info->pixels);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
cache_threads(cache_info,clone_info,cache_info->rows)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
PixelPacket
*pixels;
RectangleInfo
region;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
region.width=cache_info->columns;
region.height=1;
region.x=0;
region.y=y;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,MagickTrue,
cache_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
region.width=clone_info->columns;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,®ion,MagickTrue,
clone_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
(void) ResetMagickMemory(clone_nexus[id]->pixels,0,(size_t)
clone_nexus[id]->length);
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length);
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->active_index_channel != MagickFalse) &&
(clone_info->active_index_channel != MagickFalse))
{
/*
Clone indexes.
*/
length=(size_t) MagickMin(cache_info->columns,clone_info->columns)*
sizeof(*cache_info->indexes);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
cache_threads(cache_info,clone_info,cache_info->rows)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
PixelPacket
*pixels;
RectangleInfo
region;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
region.width=cache_info->columns;
region.height=1;
region.x=0;
region.y=y;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,MagickTrue,
cache_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
status=ReadPixelCacheIndexes(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
region.width=clone_info->columns;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,®ion,MagickTrue,
clone_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
(void) memcpy(clone_nexus[id]->indexes,cache_nexus[id]->indexes,length);
status=WritePixelCacheIndexes(clone_info,clone_nexus[id],exception);
}
}
cache_nexus=DestroyPixelCacheNexus(cache_nexus,MaxCacheThreads);
clone_nexus=DestroyPixelCacheNexus(clone_nexus,MaxCacheThreads);
if (cache_info->debug != MagickFalse)
{
char
message[MaxTextExtent];
(void) FormatLocaleString(message,MaxTextExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
| 168,810 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void edge_bulk_in_callback(struct urb *urb)
{
struct edgeport_port *edge_port = urb->context;
struct device *dev = &edge_port->port->dev;
unsigned char *data = urb->transfer_buffer;
int retval = 0;
int port_number;
int status = urb->status;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n", __func__, status);
return;
default:
dev_err(&urb->dev->dev, "%s - nonzero read bulk status received: %d\n", __func__, status);
}
if (status == -EPIPE)
goto exit;
if (status) {
dev_err(&urb->dev->dev, "%s - stopping read!\n", __func__);
return;
}
port_number = edge_port->port->port_number;
if (edge_port->lsr_event) {
edge_port->lsr_event = 0;
dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n",
__func__, port_number, edge_port->lsr_mask, *data);
handle_new_lsr(edge_port, 1, edge_port->lsr_mask, *data);
/* Adjust buffer length/pointer */
--urb->actual_length;
++data;
}
if (urb->actual_length) {
usb_serial_debug_data(dev, __func__, urb->actual_length, data);
if (edge_port->close_pending)
dev_dbg(dev, "%s - close pending, dropping data on the floor\n",
__func__);
else
edge_tty_recv(edge_port->port, data,
urb->actual_length);
edge_port->port->icount.rx += urb->actual_length;
}
exit:
/* continue read unless stopped */
spin_lock(&edge_port->ep_lock);
if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING)
retval = usb_submit_urb(urb, GFP_ATOMIC);
else if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPING)
edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPED;
spin_unlock(&edge_port->ep_lock);
if (retval)
dev_err(dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval);
}
Commit Message: USB: serial: io_ti: fix information leak in completion handler
Add missing sanity check to the bulk-in completion handler to avoid an
integer underflow that can be triggered by a malicious device.
This avoids leaking 128 kB of memory content from after the URB transfer
buffer to user space.
Fixes: 8c209e6782ca ("USB: make actual_length in struct urb field u32")
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Cc: stable <[email protected]> # 2.6.30
Signed-off-by: Johan Hovold <[email protected]>
CWE ID: CWE-191 | static void edge_bulk_in_callback(struct urb *urb)
{
struct edgeport_port *edge_port = urb->context;
struct device *dev = &edge_port->port->dev;
unsigned char *data = urb->transfer_buffer;
int retval = 0;
int port_number;
int status = urb->status;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n", __func__, status);
return;
default:
dev_err(&urb->dev->dev, "%s - nonzero read bulk status received: %d\n", __func__, status);
}
if (status == -EPIPE)
goto exit;
if (status) {
dev_err(&urb->dev->dev, "%s - stopping read!\n", __func__);
return;
}
port_number = edge_port->port->port_number;
if (urb->actual_length > 0 && edge_port->lsr_event) {
edge_port->lsr_event = 0;
dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n",
__func__, port_number, edge_port->lsr_mask, *data);
handle_new_lsr(edge_port, 1, edge_port->lsr_mask, *data);
/* Adjust buffer length/pointer */
--urb->actual_length;
++data;
}
if (urb->actual_length) {
usb_serial_debug_data(dev, __func__, urb->actual_length, data);
if (edge_port->close_pending)
dev_dbg(dev, "%s - close pending, dropping data on the floor\n",
__func__);
else
edge_tty_recv(edge_port->port, data,
urb->actual_length);
edge_port->port->icount.rx += urb->actual_length;
}
exit:
/* continue read unless stopped */
spin_lock(&edge_port->ep_lock);
if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING)
retval = usb_submit_urb(urb, GFP_ATOMIC);
else if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPING)
edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPED;
spin_unlock(&edge_port->ep_lock);
if (retval)
dev_err(dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval);
}
| 168,189 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: std::vector<GetLengthType> CSoundFile::GetLength(enmGetLengthResetMode adjustMode, GetLengthTarget target)
{
std::vector<GetLengthType> results;
GetLengthType retval;
retval.startOrder = target.startOrder;
retval.startRow = target.startRow;
const bool hasSearchTarget = target.mode != GetLengthTarget::NoTarget;
const bool adjustSamplePos = (adjustMode & eAdjustSamplePositions) == eAdjustSamplePositions;
SEQUENCEINDEX sequence = target.sequence;
if(sequence >= Order.GetNumSequences()) sequence = Order.GetCurrentSequenceIndex();
const ModSequence &orderList = Order(sequence);
GetLengthMemory memory(*this);
CSoundFile::PlayState &playState = *memory.state;
RowVisitor visitedRows(*this, sequence);
playState.m_nNextRow = playState.m_nRow = target.startRow;
playState.m_nNextOrder = playState.m_nCurrentOrder = target.startOrder;
std::bitset<MAX_EFFECTS> forbiddenCommands;
std::bitset<MAX_VOLCMDS> forbiddenVolCommands;
if(adjustSamplePos)
{
forbiddenCommands.set(CMD_ARPEGGIO); forbiddenCommands.set(CMD_PORTAMENTOUP);
forbiddenCommands.set(CMD_PORTAMENTODOWN); forbiddenCommands.set(CMD_XFINEPORTAUPDOWN);
forbiddenCommands.set(CMD_NOTESLIDEUP); forbiddenCommands.set(CMD_NOTESLIDEUPRETRIG);
forbiddenCommands.set(CMD_NOTESLIDEDOWN); forbiddenCommands.set(CMD_NOTESLIDEDOWNRETRIG);
forbiddenVolCommands.set(VOLCMD_PORTAUP); forbiddenVolCommands.set(VOLCMD_PORTADOWN);
for(CHANNELINDEX i = 0; i < GetNumChannels(); i++)
{
if(ChnSettings[i].dwFlags[CHN_MUTE]) memory.chnSettings[i].ticksToRender = GetLengthMemory::IGNORE_CHANNEL;
}
if(target.mode == GetLengthTarget::SeekPosition && target.pos.order < orderList.size())
{
const PATTERNINDEX seekPat = orderList[target.pos.order];
if(Patterns.IsValidPat(seekPat) && Patterns[seekPat].IsValidRow(target.pos.row))
{
const ModCommand *m = Patterns[seekPat].GetRow(target.pos.row);
for(CHANNELINDEX i = 0; i < GetNumChannels(); i++, m++)
{
if(m->note == NOTE_NOTECUT || m->note == NOTE_KEYOFF || (m->note == NOTE_FADE && GetNumInstruments())
|| (m->IsNote() && !m->IsPortamento()))
{
memory.chnSettings[i].ticksToRender = GetLengthMemory::IGNORE_CHANNEL;
}
}
}
}
}
uint32 oldTickDuration = 0;
for (;;)
{
if(target.mode == GetLengthTarget::SeekSeconds && memory.elapsedTime >= target.time)
{
retval.targetReached = true;
break;
}
uint32 rowDelay = 0, tickDelay = 0;
playState.m_nRow = playState.m_nNextRow;
playState.m_nCurrentOrder = playState.m_nNextOrder;
if(orderList.IsValidPat(playState.m_nCurrentOrder) && playState.m_nRow >= Patterns[orderList[playState.m_nCurrentOrder]].GetNumRows())
{
playState.m_nRow = 0;
if(m_playBehaviour[kFT2LoopE60Restart])
{
playState.m_nRow = playState.m_nNextPatStartRow;
playState.m_nNextPatStartRow = 0;
}
playState.m_nCurrentOrder = ++playState.m_nNextOrder;
}
playState.m_nPattern = playState.m_nCurrentOrder < orderList.size() ? orderList[playState.m_nCurrentOrder] : orderList.GetInvalidPatIndex();
bool positionJumpOnThisRow = false;
bool patternBreakOnThisRow = false;
bool patternLoopEndedOnThisRow = false, patternLoopStartedOnThisRow = false;
if(!Patterns.IsValidPat(playState.m_nPattern) && playState.m_nPattern != orderList.GetInvalidPatIndex() && target.mode == GetLengthTarget::SeekPosition && playState.m_nCurrentOrder == target.pos.order)
{
retval.targetReached = true;
break;
}
while(playState.m_nPattern >= Patterns.Size())
{
if((playState.m_nPattern == orderList.GetInvalidPatIndex()) || (playState.m_nCurrentOrder >= orderList.size()))
{
if(playState.m_nCurrentOrder == orderList.GetRestartPos())
break;
else
playState.m_nCurrentOrder = orderList.GetRestartPos();
} else
{
playState.m_nCurrentOrder++;
}
playState.m_nPattern = (playState.m_nCurrentOrder < orderList.size()) ? orderList[playState.m_nCurrentOrder] : orderList.GetInvalidPatIndex();
playState.m_nNextOrder = playState.m_nCurrentOrder;
if((!Patterns.IsValidPat(playState.m_nPattern)) && visitedRows.IsVisited(playState.m_nCurrentOrder, 0, true))
{
if(!hasSearchTarget || !visitedRows.GetFirstUnvisitedRow(playState.m_nNextOrder, playState.m_nRow, true))
{
break;
} else
{
retval.duration = memory.elapsedTime;
results.push_back(retval);
retval.startRow = playState.m_nRow;
retval.startOrder = playState.m_nNextOrder;
memory.Reset();
playState.m_nCurrentOrder = playState.m_nNextOrder;
playState.m_nPattern = orderList[playState.m_nCurrentOrder];
playState.m_nNextRow = playState.m_nRow;
break;
}
}
}
if(playState.m_nNextOrder == ORDERINDEX_INVALID)
{
break;
}
if(!Patterns.IsValidPat(playState.m_nPattern))
{
if(playState.m_nCurrentOrder == orderList.GetRestartPos())
{
if(!hasSearchTarget || !visitedRows.GetFirstUnvisitedRow(playState.m_nNextOrder, playState.m_nRow, true))
{
break;
} else
{
retval.duration = memory.elapsedTime;
results.push_back(retval);
retval.startRow = playState.m_nRow;
retval.startOrder = playState.m_nNextOrder;
memory.Reset();
playState.m_nNextRow = playState.m_nRow;
continue;
}
}
playState.m_nNextOrder = playState.m_nCurrentOrder + 1;
continue;
}
if(playState.m_nRow >= Patterns[playState.m_nPattern].GetNumRows())
playState.m_nRow = 0;
if(target.mode == GetLengthTarget::SeekPosition && playState.m_nCurrentOrder == target.pos.order && playState.m_nRow == target.pos.row)
{
retval.targetReached = true;
break;
}
if(visitedRows.IsVisited(playState.m_nCurrentOrder, playState.m_nRow, true))
{
if(!hasSearchTarget || !visitedRows.GetFirstUnvisitedRow(playState.m_nNextOrder, playState.m_nRow, true))
{
break;
} else
{
retval.duration = memory.elapsedTime;
results.push_back(retval);
retval.startRow = playState.m_nRow;
retval.startOrder = playState.m_nNextOrder;
memory.Reset();
playState.m_nNextRow = playState.m_nRow;
continue;
}
}
retval.endOrder = playState.m_nCurrentOrder;
retval.endRow = playState.m_nRow;
playState.m_nNextRow = playState.m_nRow + 1;
if(playState.m_nRow >= Patterns[playState.m_nPattern].GetNumRows())
{
playState.m_nRow = 0;
}
if(!playState.m_nRow)
{
for(CHANNELINDEX chn = 0; chn < GetNumChannels(); chn++)
{
memory.chnSettings[chn].patLoop = memory.elapsedTime;
memory.chnSettings[chn].patLoopSmp = playState.m_lTotalSampleCount;
}
}
ModChannel *pChn = playState.Chn;
const ModCommand *p = Patterns[playState.m_nPattern].GetpModCommand(playState.m_nRow, 0);
for(CHANNELINDEX nChn = 0; nChn < GetNumChannels(); nChn++, p++)
{
if(m_playBehaviour[kST3NoMutedChannels] && ChnSettings[nChn].dwFlags[CHN_MUTE]) // not even effects are processed on muted S3M channels
continue;
if(p->IsPcNote())
{
#ifndef NO_PLUGINS
if((adjustMode & eAdjust) && p->instr > 0 && p->instr <= MAX_MIXPLUGINS)
{
memory.plugParams[std::make_pair(p->instr, p->GetValueVolCol())] = p->GetValueEffectCol();
}
#endif // NO_PLUGINS
pChn[nChn].rowCommand.Clear();
continue;
}
pChn[nChn].rowCommand = *p;
switch(p->command)
{
case CMD_SPEED:
SetSpeed(playState, p->param);
break;
case CMD_TEMPO:
if(m_playBehaviour[kMODVBlankTiming])
{
if(p->param != 0) SetSpeed(playState, p->param);
}
break;
case CMD_S3MCMDEX:
if((p->param & 0xF0) == 0x60)
{
tickDelay += (p->param & 0x0F);
} else if((p->param & 0xF0) == 0xE0 && !rowDelay)
{
if(!(GetType() & MOD_TYPE_S3M) || (p->param & 0x0F) != 0)
{
rowDelay = 1 + (p->param & 0x0F);
}
}
break;
case CMD_MODCMDEX:
if((p->param & 0xF0) == 0xE0)
{
rowDelay = 1 + (p->param & 0x0F);
}
break;
}
}
if(rowDelay == 0) rowDelay = 1;
const uint32 numTicks = (playState.m_nMusicSpeed + tickDelay) * rowDelay;
const uint32 nonRowTicks = numTicks - rowDelay;
for(CHANNELINDEX nChn = 0; nChn < GetNumChannels(); pChn++, nChn++) if(!pChn->rowCommand.IsEmpty())
{
if(m_playBehaviour[kST3NoMutedChannels] && ChnSettings[nChn].dwFlags[CHN_MUTE]) // not even effects are processed on muted S3M channels
continue;
ModCommand::COMMAND command = pChn->rowCommand.command;
ModCommand::PARAM param = pChn->rowCommand.param;
ModCommand::NOTE note = pChn->rowCommand.note;
if (pChn->rowCommand.instr)
{
pChn->nNewIns = pChn->rowCommand.instr;
pChn->nLastNote = NOTE_NONE;
memory.chnSettings[nChn].vol = 0xFF;
}
if (pChn->rowCommand.IsNote()) pChn->nLastNote = note;
if(pChn->rowCommand.IsNote() || pChn->rowCommand.instr)
{
SAMPLEINDEX smp = 0;
if(GetNumInstruments())
{
ModInstrument *pIns;
if(pChn->nNewIns <= GetNumInstruments() && (pIns = Instruments[pChn->nNewIns]) != nullptr)
{
if(pIns->dwFlags[INS_SETPANNING])
pChn->nPan = pIns->nPan;
if(ModCommand::IsNote(note))
smp = pIns->Keyboard[note - NOTE_MIN];
}
} else
{
smp = pChn->nNewIns;
}
if(smp > 0 && smp <= GetNumSamples() && Samples[smp].uFlags[CHN_PANNING])
{
pChn->nPan = Samples[smp].nPan;
}
}
switch(pChn->rowCommand.volcmd)
{
case VOLCMD_VOLUME:
memory.chnSettings[nChn].vol = pChn->rowCommand.vol;
break;
case VOLCMD_VOLSLIDEUP:
case VOLCMD_VOLSLIDEDOWN:
if(pChn->rowCommand.vol != 0)
pChn->nOldVolParam = pChn->rowCommand.vol;
break;
}
switch(command)
{
case CMD_POSITIONJUMP:
positionJumpOnThisRow = true;
playState.m_nNextOrder = static_cast<ORDERINDEX>(CalculateXParam(playState.m_nPattern, playState.m_nRow, nChn));
playState.m_nNextPatStartRow = 0; // FT2 E60 bug
if(!patternBreakOnThisRow || (GetType() & (MOD_TYPE_MOD | MOD_TYPE_XM)))
playState.m_nNextRow = 0;
if (adjustMode & eAdjust)
{
pChn->nPatternLoopCount = 0;
pChn->nPatternLoop = 0;
}
break;
case CMD_PATTERNBREAK:
{
ROWINDEX row = PatternBreak(playState, nChn, param);
if(row != ROWINDEX_INVALID)
{
patternBreakOnThisRow = true;
playState.m_nNextRow = row;
if(!positionJumpOnThisRow)
{
playState.m_nNextOrder = playState.m_nCurrentOrder + 1;
}
if(adjustMode & eAdjust)
{
pChn->nPatternLoopCount = 0;
pChn->nPatternLoop = 0;
}
}
}
break;
case CMD_TEMPO:
if(!m_playBehaviour[kMODVBlankTiming])
{
TEMPO tempo(CalculateXParam(playState.m_nPattern, playState.m_nRow, nChn), 0);
if ((adjustMode & eAdjust) && (GetType() & (MOD_TYPE_S3M | MOD_TYPE_IT | MOD_TYPE_MPT)))
{
if (tempo.GetInt()) pChn->nOldTempo = static_cast<uint8>(tempo.GetInt()); else tempo.Set(pChn->nOldTempo);
}
if (tempo.GetInt() >= 0x20) playState.m_nMusicTempo = tempo;
else
{
TEMPO tempoDiff((tempo.GetInt() & 0x0F) * nonRowTicks, 0);
if ((tempo.GetInt() & 0xF0) == 0x10)
{
playState.m_nMusicTempo += tempoDiff;
} else
{
if(tempoDiff < playState.m_nMusicTempo)
playState.m_nMusicTempo -= tempoDiff;
else
playState.m_nMusicTempo.Set(0);
}
}
TEMPO tempoMin = GetModSpecifications().GetTempoMin(), tempoMax = GetModSpecifications().GetTempoMax();
if(m_playBehaviour[kTempoClamp]) // clamp tempo correctly in compatible mode
{
tempoMax.Set(255);
}
Limit(playState.m_nMusicTempo, tempoMin, tempoMax);
}
break;
case CMD_S3MCMDEX:
switch(param & 0xF0)
{
case 0x90:
if(param <= 0x91)
{
pChn->dwFlags.set(CHN_SURROUND, param == 0x91);
}
break;
case 0xA0:
pChn->nOldHiOffset = param & 0x0F;
break;
case 0xB0:
if (param & 0x0F)
{
patternLoopEndedOnThisRow = true;
} else
{
CHANNELINDEX firstChn = nChn, lastChn = nChn;
if(GetType() == MOD_TYPE_S3M)
{
firstChn = 0;
lastChn = GetNumChannels() - 1;
}
for(CHANNELINDEX c = firstChn; c <= lastChn; c++)
{
memory.chnSettings[c].patLoop = memory.elapsedTime;
memory.chnSettings[c].patLoopSmp = playState.m_lTotalSampleCount;
memory.chnSettings[c].patLoopStart = playState.m_nRow;
}
patternLoopStartedOnThisRow = true;
}
break;
case 0xF0:
pChn->nActiveMacro = param & 0x0F;
break;
}
break;
case CMD_MODCMDEX:
switch(param & 0xF0)
{
case 0x60:
if (param & 0x0F)
{
playState.m_nNextPatStartRow = memory.chnSettings[nChn].patLoopStart; // FT2 E60 bug
patternLoopEndedOnThisRow = true;
} else
{
patternLoopStartedOnThisRow = true;
memory.chnSettings[nChn].patLoop = memory.elapsedTime;
memory.chnSettings[nChn].patLoopSmp = playState.m_lTotalSampleCount;
memory.chnSettings[nChn].patLoopStart = playState.m_nRow;
}
break;
case 0xF0:
pChn->nActiveMacro = param & 0x0F;
break;
}
break;
case CMD_XFINEPORTAUPDOWN:
if(((param & 0xF0) == 0xA0) && !m_playBehaviour[kFT2RestrictXCommand]) pChn->nOldHiOffset = param & 0x0F;
break;
}
if (!(adjustMode & eAdjust)) continue;
switch(command)
{
case CMD_PORTAMENTOUP:
if(param)
{
if(!m_playBehaviour[kFT2PortaUpDownMemory])
pChn->nOldPortaDown = param;
pChn->nOldPortaUp = param;
}
break;
case CMD_PORTAMENTODOWN:
if(param)
{
if(!m_playBehaviour[kFT2PortaUpDownMemory])
pChn->nOldPortaUp = param;
pChn->nOldPortaDown = param;
}
break;
case CMD_TONEPORTAMENTO:
if (param) pChn->nPortamentoSlide = param << 2;
break;
case CMD_OFFSET:
if (param) pChn->oldOffset = param << 8;
break;
case CMD_VOLUMESLIDE:
case CMD_TONEPORTAVOL:
if (param) pChn->nOldVolumeSlide = param;
break;
case CMD_VOLUME:
memory.chnSettings[nChn].vol = param;
break;
case CMD_GLOBALVOLUME:
if(!(GetType() & GLOBALVOL_7BIT_FORMATS) && param < 128) param *= 2;
if(param <= 128)
{
playState.m_nGlobalVolume = param * 2;
} else if(!(GetType() & (MOD_TYPE_IT | MOD_TYPE_MPT | MOD_TYPE_S3M)))
{
playState.m_nGlobalVolume = 256;
}
break;
case CMD_GLOBALVOLSLIDE:
if(m_playBehaviour[kPerChannelGlobalVolSlide])
{
if (param) pChn->nOldGlobalVolSlide = param; else param = pChn->nOldGlobalVolSlide;
} else
{
if (param) playState.Chn[0].nOldGlobalVolSlide = param; else param = playState.Chn[0].nOldGlobalVolSlide;
}
if (((param & 0x0F) == 0x0F) && (param & 0xF0))
{
param >>= 4;
if (!(GetType() & GLOBALVOL_7BIT_FORMATS)) param <<= 1;
playState.m_nGlobalVolume += param << 1;
} else if (((param & 0xF0) == 0xF0) && (param & 0x0F))
{
param = (param & 0x0F) << 1;
if (!(GetType() & GLOBALVOL_7BIT_FORMATS)) param <<= 1;
playState.m_nGlobalVolume -= param;
} else if (param & 0xF0)
{
param >>= 4;
param <<= 1;
if (!(GetType() & GLOBALVOL_7BIT_FORMATS)) param <<= 1;
playState.m_nGlobalVolume += param * nonRowTicks;
} else
{
param = (param & 0x0F) << 1;
if (!(GetType() & GLOBALVOL_7BIT_FORMATS)) param <<= 1;
playState.m_nGlobalVolume -= param * nonRowTicks;
}
Limit(playState.m_nGlobalVolume, 0, 256);
break;
case CMD_CHANNELVOLUME:
if (param <= 64) pChn->nGlobalVol = param;
break;
case CMD_CHANNELVOLSLIDE:
{
if (param) pChn->nOldChnVolSlide = param; else param = pChn->nOldChnVolSlide;
int32 volume = pChn->nGlobalVol;
if((param & 0x0F) == 0x0F && (param & 0xF0))
volume += (param >> 4); // Fine Up
else if((param & 0xF0) == 0xF0 && (param & 0x0F))
volume -= (param & 0x0F); // Fine Down
else if(param & 0x0F) // Down
volume -= (param & 0x0F) * nonRowTicks;
else // Up
volume += ((param & 0xF0) >> 4) * nonRowTicks;
Limit(volume, 0, 64);
pChn->nGlobalVol = volume;
}
break;
case CMD_PANNING8:
Panning(pChn, param, Pan8bit);
break;
case CMD_MODCMDEX:
if(param < 0x10)
{
for(CHANNELINDEX chn = 0; chn < GetNumChannels(); chn++)
{
playState.Chn[chn].dwFlags.set(CHN_AMIGAFILTER, !(param & 1));
}
}
MPT_FALLTHROUGH;
case CMD_S3MCMDEX:
if((param & 0xF0) == 0x80)
{
Panning(pChn, (param & 0x0F), Pan4bit);
}
break;
case CMD_VIBRATOVOL:
if (param) pChn->nOldVolumeSlide = param;
param = 0;
MPT_FALLTHROUGH;
case CMD_VIBRATO:
Vibrato(pChn, param);
break;
case CMD_FINEVIBRATO:
FineVibrato(pChn, param);
break;
case CMD_TREMOLO:
Tremolo(pChn, param);
break;
case CMD_PANBRELLO:
Panbrello(pChn, param);
break;
}
switch(pChn->rowCommand.volcmd)
{
case VOLCMD_PANNING:
Panning(pChn, pChn->rowCommand.vol, Pan6bit);
break;
case VOLCMD_VIBRATOSPEED:
if(m_playBehaviour[kFT2VolColVibrato])
pChn->nVibratoSpeed = pChn->rowCommand.vol & 0x0F;
else
Vibrato(pChn, pChn->rowCommand.vol << 4);
break;
case VOLCMD_VIBRATODEPTH:
Vibrato(pChn, pChn->rowCommand.vol);
break;
}
switch(pChn->rowCommand.command)
{
case CMD_VIBRATO:
case CMD_FINEVIBRATO:
case CMD_VIBRATOVOL:
if(adjustMode & eAdjust)
{
uint32 vibTicks = ((GetType() & (MOD_TYPE_IT | MOD_TYPE_MPT)) && !m_SongFlags[SONG_ITOLDEFFECTS]) ? numTicks : nonRowTicks;
uint32 inc = pChn->nVibratoSpeed * vibTicks;
if(m_playBehaviour[kITVibratoTremoloPanbrello])
inc *= 4;
pChn->nVibratoPos += static_cast<uint8>(inc);
}
break;
case CMD_TREMOLO:
if(adjustMode & eAdjust)
{
uint32 tremTicks = ((GetType() & (MOD_TYPE_IT | MOD_TYPE_MPT)) && !m_SongFlags[SONG_ITOLDEFFECTS]) ? numTicks : nonRowTicks;
uint32 inc = pChn->nTremoloSpeed * tremTicks;
if(m_playBehaviour[kITVibratoTremoloPanbrello])
inc *= 4;
pChn->nTremoloPos += static_cast<uint8>(inc);
}
break;
case CMD_PANBRELLO:
if(adjustMode & eAdjust)
{
pChn->nPanbrelloPos += static_cast<uint8>(pChn->nPanbrelloSpeed * (numTicks - 1));
ProcessPanbrello(pChn);
}
break;
}
}
if(GetType() == MOD_TYPE_XM && playState.m_nMusicSpeed == uint16_max)
{
break;
}
playState.m_nCurrentRowsPerBeat = m_nDefaultRowsPerBeat;
if(Patterns[playState.m_nPattern].GetOverrideSignature())
{
playState.m_nCurrentRowsPerBeat = Patterns[playState.m_nPattern].GetRowsPerBeat();
}
const uint32 tickDuration = GetTickDuration(playState);
const uint32 rowDuration = tickDuration * numTicks;
memory.elapsedTime += static_cast<double>(rowDuration) / static_cast<double>(m_MixerSettings.gdwMixingFreq);
playState.m_lTotalSampleCount += rowDuration;
if(adjustSamplePos)
{
pChn = playState.Chn;
for(CHANNELINDEX nChn = 0; nChn < GetNumChannels(); pChn++, nChn++)
{
if(memory.chnSettings[nChn].ticksToRender == GetLengthMemory::IGNORE_CHANNEL)
continue;
uint32 startTick = 0;
const ModCommand &m = pChn->rowCommand;
uint32 paramHi = m.param >> 4, paramLo = m.param & 0x0F;
bool porta = m.command == CMD_TONEPORTAMENTO || m.command == CMD_TONEPORTAVOL || m.volcmd == VOLCMD_TONEPORTAMENTO;
bool stopNote = patternLoopStartedOnThisRow; // It's too much trouble to keep those pattern loops in sync...
if(m.instr) pChn->proTrackerOffset = 0;
if(m.IsNote())
{
if(porta && memory.chnSettings[nChn].incChanged)
{
pChn->increment = GetChannelIncrement(pChn, pChn->nPeriod, 0);
}
int32 setPan = pChn->nPan;
pChn->nNewNote = pChn->nLastNote;
if(pChn->nNewIns != 0) InstrumentChange(pChn, pChn->nNewIns, porta);
NoteChange(pChn, m.note, porta);
memory.chnSettings[nChn].incChanged = true;
if((m.command == CMD_MODCMDEX || m.command == CMD_S3MCMDEX) && (m.param & 0xF0) == 0xD0 && paramLo < numTicks)
{
startTick = paramLo;
} else if(m.command == CMD_DELAYCUT && paramHi < numTicks)
{
startTick = paramHi;
}
if(rowDelay > 1 && startTick != 0 && (GetType() & (MOD_TYPE_S3M | MOD_TYPE_IT | MOD_TYPE_MPT)))
{
startTick += (playState.m_nMusicSpeed + tickDelay) * (rowDelay - 1);
}
if(!porta) memory.chnSettings[nChn].ticksToRender = 0;
if(m.command == CMD_PANNING8
|| ((m.command == CMD_MODCMDEX || m.command == CMD_S3MCMDEX) && paramHi == 0x8)
|| m.volcmd == VOLCMD_PANNING)
{
pChn->nPan = setPan;
}
if(m.command == CMD_OFFSET)
{
bool isExtended = false;
SmpLength offset = CalculateXParam(playState.m_nPattern, playState.m_nRow, nChn, &isExtended);
if(!isExtended)
{
offset <<= 8;
if(offset == 0) offset = pChn->oldOffset;
offset += static_cast<SmpLength>(pChn->nOldHiOffset) << 16;
}
SampleOffset(*pChn, offset);
} else if(m.command == CMD_OFFSETPERCENTAGE)
{
SampleOffset(*pChn, Util::muldiv_unsigned(pChn->nLength, m.param, 255));
} else if(m.command == CMD_REVERSEOFFSET && pChn->pModSample != nullptr)
{
memory.RenderChannel(nChn, oldTickDuration); // Re-sync what we've got so far
ReverseSampleOffset(*pChn, m.param);
startTick = playState.m_nMusicSpeed - 1;
} else if(m.volcmd == VOLCMD_OFFSET)
{
if(m.vol <= CountOf(pChn->pModSample->cues) && pChn->pModSample != nullptr)
{
SmpLength offset;
if(m.vol == 0)
offset = pChn->oldOffset;
else
offset = pChn->oldOffset = pChn->pModSample->cues[m.vol - 1];
SampleOffset(*pChn, offset);
}
}
}
if(m.note == NOTE_KEYOFF || m.note == NOTE_NOTECUT || (m.note == NOTE_FADE && GetNumInstruments())
|| ((m.command == CMD_MODCMDEX || m.command == CMD_S3MCMDEX) && (m.param & 0xF0) == 0xC0 && paramLo < numTicks)
|| (m.command == CMD_DELAYCUT && paramLo != 0 && startTick + paramLo < numTicks))
{
stopNote = true;
}
if(m.command == CMD_VOLUME)
{
pChn->nVolume = m.param * 4;
} else if(m.volcmd == VOLCMD_VOLUME)
{
pChn->nVolume = m.vol * 4;
}
if(pChn->pModSample && !stopNote)
{
if(m.command < MAX_EFFECTS)
{
if(forbiddenCommands[m.command])
{
stopNote = true;
} else if(m.command == CMD_MODCMDEX)
{
switch(m.param & 0xF0)
{
case 0x10:
case 0x20:
stopNote = true;
}
}
}
if(m.volcmd < forbiddenVolCommands.size() && forbiddenVolCommands[m.volcmd])
{
stopNote = true;
}
}
if(stopNote)
{
pChn->Stop();
memory.chnSettings[nChn].ticksToRender = 0;
} else
{
if(oldTickDuration != tickDuration && oldTickDuration != 0)
{
memory.RenderChannel(nChn, oldTickDuration); // Re-sync what we've got so far
}
switch(m.command)
{
case CMD_TONEPORTAVOL:
case CMD_VOLUMESLIDE:
case CMD_VIBRATOVOL:
if(m.param || (GetType() != MOD_TYPE_MOD))
{
for(uint32 i = 0; i < numTicks; i++)
{
pChn->isFirstTick = (i == 0);
VolumeSlide(pChn, m.param);
}
}
break;
case CMD_MODCMDEX:
if((m.param & 0x0F) || (GetType() & (MOD_TYPE_XM | MOD_TYPE_MT2)))
{
pChn->isFirstTick = true;
switch(m.param & 0xF0)
{
case 0xA0: FineVolumeUp(pChn, m.param & 0x0F, false); break;
case 0xB0: FineVolumeDown(pChn, m.param & 0x0F, false); break;
}
}
break;
case CMD_S3MCMDEX:
if(m.param == 0x9E)
{
memory.RenderChannel(nChn, oldTickDuration); // Re-sync what we've got so far
pChn->dwFlags.reset(CHN_PINGPONGFLAG);
} else if(m.param == 0x9F)
{
memory.RenderChannel(nChn, oldTickDuration); // Re-sync what we've got so far
pChn->dwFlags.set(CHN_PINGPONGFLAG);
if(!pChn->position.GetInt() && pChn->nLength && (m.IsNote() || !pChn->dwFlags[CHN_LOOP]))
{
pChn->position.Set(pChn->nLength - 1, SamplePosition::fractMax);
}
} else if((m.param & 0xF0) == 0x70)
{
}
break;
}
pChn->isFirstTick = true;
switch(m.volcmd)
{
case VOLCMD_FINEVOLUP: FineVolumeUp(pChn, m.vol, m_playBehaviour[kITVolColMemory]); break;
case VOLCMD_FINEVOLDOWN: FineVolumeDown(pChn, m.vol, m_playBehaviour[kITVolColMemory]); break;
case VOLCMD_VOLSLIDEUP:
case VOLCMD_VOLSLIDEDOWN:
{
ModCommand::VOL vol = m.vol;
if(vol == 0 && m_playBehaviour[kITVolColMemory])
{
vol = pChn->nOldVolParam;
if(vol == 0)
break;
}
if(m.volcmd == VOLCMD_VOLSLIDEUP)
vol <<= 4;
for(uint32 i = 0; i < numTicks; i++)
{
pChn->isFirstTick = (i == 0);
VolumeSlide(pChn, vol);
}
}
break;
}
if(porta)
{
uint32 portaTick = memory.chnSettings[nChn].ticksToRender + startTick + 1;
memory.chnSettings[nChn].ticksToRender += numTicks;
memory.RenderChannel(nChn, tickDuration, portaTick);
} else
{
memory.chnSettings[nChn].ticksToRender += (numTicks - startTick);
}
}
}
}
oldTickDuration = tickDuration;
if(patternLoopEndedOnThisRow
&& (!m_playBehaviour[kFT2PatternLoopWithJumps] || !(positionJumpOnThisRow || patternBreakOnThisRow))
&& (!m_playBehaviour[kITPatternLoopWithJumps] || !positionJumpOnThisRow))
{
std::map<double, int> startTimes;
pChn = playState.Chn;
for(CHANNELINDEX nChn = 0; nChn < GetNumChannels(); nChn++, pChn++)
{
ModCommand::COMMAND command = pChn->rowCommand.command;
ModCommand::PARAM param = pChn->rowCommand.param;
if((command == CMD_S3MCMDEX && param >= 0xB1 && param <= 0xBF)
|| (command == CMD_MODCMDEX && param >= 0x61 && param <= 0x6F))
{
const double start = memory.chnSettings[nChn].patLoop;
if(!startTimes[start]) startTimes[start] = 1;
startTimes[start] = mpt::lcm(startTimes[start], 1 + (param & 0x0F));
}
}
for(const auto &i : startTimes)
{
memory.elapsedTime += (memory.elapsedTime - i.first) * (double)(i.second - 1);
for(CHANNELINDEX nChn = 0; nChn < GetNumChannels(); nChn++, pChn++)
{
if(memory.chnSettings[nChn].patLoop == i.first)
{
playState.m_lTotalSampleCount += (playState.m_lTotalSampleCount - memory.chnSettings[nChn].patLoopSmp) * (i.second - 1);
if(m_playBehaviour[kITPatternLoopTargetReset] || (GetType() == MOD_TYPE_S3M))
{
memory.chnSettings[nChn].patLoop = memory.elapsedTime;
memory.chnSettings[nChn].patLoopSmp = playState.m_lTotalSampleCount;
memory.chnSettings[nChn].patLoopStart = playState.m_nRow + 1;
}
break;
}
}
}
if(GetType() == MOD_TYPE_IT)
{
for(CHANNELINDEX nChn = 0; nChn < GetNumChannels(); nChn++)
{
if((pChn->rowCommand.command == CMD_S3MCMDEX && pChn->rowCommand.param >= 0xB1 && pChn->rowCommand.param <= 0xBF))
{
memory.chnSettings[nChn].patLoop = memory.elapsedTime;
memory.chnSettings[nChn].patLoopSmp = playState.m_lTotalSampleCount;
}
}
}
}
}
if(adjustSamplePos)
{
for(CHANNELINDEX nChn = 0; nChn < GetNumChannels(); nChn++)
{
if(memory.chnSettings[nChn].ticksToRender != GetLengthMemory::IGNORE_CHANNEL)
{
memory.RenderChannel(nChn, oldTickDuration);
}
}
}
if(retval.targetReached || target.mode == GetLengthTarget::NoTarget)
{
retval.lastOrder = playState.m_nCurrentOrder;
retval.lastRow = playState.m_nRow;
}
retval.duration = memory.elapsedTime;
results.push_back(retval);
if(adjustMode & eAdjust)
{
if(retval.targetReached || target.mode == GetLengthTarget::NoTarget)
{
m_PlayState = std::move(playState);
m_PlayState.m_nNextRow = m_PlayState.m_nRow;
m_PlayState.m_nFrameDelay = m_PlayState.m_nPatternDelay = 0;
m_PlayState.m_nTickCount = Util::MaxValueOfType(m_PlayState.m_nTickCount) - 1;
m_PlayState.m_bPositionChanged = true;
for(CHANNELINDEX n = 0; n < GetNumChannels(); n++)
{
if(m_PlayState.Chn[n].nLastNote != NOTE_NONE)
{
m_PlayState.Chn[n].nNewNote = m_PlayState.Chn[n].nLastNote;
}
if(memory.chnSettings[n].vol != 0xFF && !adjustSamplePos)
{
m_PlayState.Chn[n].nVolume = std::min(memory.chnSettings[n].vol, uint8(64)) * 4;
}
}
#ifndef NO_PLUGINS
std::bitset<MAX_MIXPLUGINS> plugSetProgram;
for(const auto ¶m : memory.plugParams)
{
PLUGINDEX plug = param.first.first - 1;
IMixPlugin *plugin = m_MixPlugins[plug].pMixPlugin;
if(plugin != nullptr)
{
if(!plugSetProgram[plug])
{
plugSetProgram.set(plug);
plugin->BeginSetProgram();
}
plugin->SetParameter(param.first.second, param.second / PlugParamValue(ModCommand::maxColumnValue));
}
}
if(plugSetProgram.any())
{
for(PLUGINDEX i = 0; i < MAX_MIXPLUGINS; i++)
{
if(plugSetProgram[i])
{
m_MixPlugins[i].pMixPlugin->EndSetProgram();
}
}
}
#endif // NO_PLUGINS
} else if(adjustMode != eAdjustOnSuccess)
{
m_PlayState.m_nMusicSpeed = m_nDefaultSpeed;
m_PlayState.m_nMusicTempo = m_nDefaultTempo;
m_PlayState.m_nGlobalVolume = m_nDefaultGlobalVolume;
}
if(sequence != Order.GetCurrentSequenceIndex())
{
Order.SetSequence(sequence);
}
visitedSongRows.Set(visitedRows);
}
return results;
}
Commit Message: [Fix] Possible out-of-bounds read when computing length of some IT files with pattern loops (OpenMPT: formats that are converted to IT, libopenmpt: IT/ITP/MO3), caught with afl-fuzz.
git-svn-id: https://source.openmpt.org/svn/openmpt/trunk/OpenMPT@10027 56274372-70c3-4bfc-bfc3-4c3a0b034d27
CWE ID: CWE-125 | std::vector<GetLengthType> CSoundFile::GetLength(enmGetLengthResetMode adjustMode, GetLengthTarget target)
{
std::vector<GetLengthType> results;
GetLengthType retval;
retval.startOrder = target.startOrder;
retval.startRow = target.startRow;
const bool hasSearchTarget = target.mode != GetLengthTarget::NoTarget;
const bool adjustSamplePos = (adjustMode & eAdjustSamplePositions) == eAdjustSamplePositions;
SEQUENCEINDEX sequence = target.sequence;
if(sequence >= Order.GetNumSequences()) sequence = Order.GetCurrentSequenceIndex();
const ModSequence &orderList = Order(sequence);
GetLengthMemory memory(*this);
CSoundFile::PlayState &playState = *memory.state;
RowVisitor visitedRows(*this, sequence);
playState.m_nNextRow = playState.m_nRow = target.startRow;
playState.m_nNextOrder = playState.m_nCurrentOrder = target.startOrder;
std::bitset<MAX_EFFECTS> forbiddenCommands;
std::bitset<MAX_VOLCMDS> forbiddenVolCommands;
if(adjustSamplePos)
{
forbiddenCommands.set(CMD_ARPEGGIO); forbiddenCommands.set(CMD_PORTAMENTOUP);
forbiddenCommands.set(CMD_PORTAMENTODOWN); forbiddenCommands.set(CMD_XFINEPORTAUPDOWN);
forbiddenCommands.set(CMD_NOTESLIDEUP); forbiddenCommands.set(CMD_NOTESLIDEUPRETRIG);
forbiddenCommands.set(CMD_NOTESLIDEDOWN); forbiddenCommands.set(CMD_NOTESLIDEDOWNRETRIG);
forbiddenVolCommands.set(VOLCMD_PORTAUP); forbiddenVolCommands.set(VOLCMD_PORTADOWN);
for(CHANNELINDEX i = 0; i < GetNumChannels(); i++)
{
if(ChnSettings[i].dwFlags[CHN_MUTE]) memory.chnSettings[i].ticksToRender = GetLengthMemory::IGNORE_CHANNEL;
}
if(target.mode == GetLengthTarget::SeekPosition && target.pos.order < orderList.size())
{
const PATTERNINDEX seekPat = orderList[target.pos.order];
if(Patterns.IsValidPat(seekPat) && Patterns[seekPat].IsValidRow(target.pos.row))
{
const ModCommand *m = Patterns[seekPat].GetRow(target.pos.row);
for(CHANNELINDEX i = 0; i < GetNumChannels(); i++, m++)
{
if(m->note == NOTE_NOTECUT || m->note == NOTE_KEYOFF || (m->note == NOTE_FADE && GetNumInstruments())
|| (m->IsNote() && !m->IsPortamento()))
{
memory.chnSettings[i].ticksToRender = GetLengthMemory::IGNORE_CHANNEL;
}
}
}
}
}
uint32 oldTickDuration = 0;
for (;;)
{
if(target.mode == GetLengthTarget::SeekSeconds && memory.elapsedTime >= target.time)
{
retval.targetReached = true;
break;
}
uint32 rowDelay = 0, tickDelay = 0;
playState.m_nRow = playState.m_nNextRow;
playState.m_nCurrentOrder = playState.m_nNextOrder;
if(orderList.IsValidPat(playState.m_nCurrentOrder) && playState.m_nRow >= Patterns[orderList[playState.m_nCurrentOrder]].GetNumRows())
{
playState.m_nRow = 0;
if(m_playBehaviour[kFT2LoopE60Restart])
{
playState.m_nRow = playState.m_nNextPatStartRow;
playState.m_nNextPatStartRow = 0;
}
playState.m_nCurrentOrder = ++playState.m_nNextOrder;
}
playState.m_nPattern = playState.m_nCurrentOrder < orderList.size() ? orderList[playState.m_nCurrentOrder] : orderList.GetInvalidPatIndex();
bool positionJumpOnThisRow = false;
bool patternBreakOnThisRow = false;
bool patternLoopEndedOnThisRow = false, patternLoopStartedOnThisRow = false;
if(!Patterns.IsValidPat(playState.m_nPattern) && playState.m_nPattern != orderList.GetInvalidPatIndex() && target.mode == GetLengthTarget::SeekPosition && playState.m_nCurrentOrder == target.pos.order)
{
retval.targetReached = true;
break;
}
while(playState.m_nPattern >= Patterns.Size())
{
if((playState.m_nPattern == orderList.GetInvalidPatIndex()) || (playState.m_nCurrentOrder >= orderList.size()))
{
if(playState.m_nCurrentOrder == orderList.GetRestartPos())
break;
else
playState.m_nCurrentOrder = orderList.GetRestartPos();
} else
{
playState.m_nCurrentOrder++;
}
playState.m_nPattern = (playState.m_nCurrentOrder < orderList.size()) ? orderList[playState.m_nCurrentOrder] : orderList.GetInvalidPatIndex();
playState.m_nNextOrder = playState.m_nCurrentOrder;
if((!Patterns.IsValidPat(playState.m_nPattern)) && visitedRows.IsVisited(playState.m_nCurrentOrder, 0, true))
{
if(!hasSearchTarget || !visitedRows.GetFirstUnvisitedRow(playState.m_nNextOrder, playState.m_nRow, true))
{
break;
} else
{
retval.duration = memory.elapsedTime;
results.push_back(retval);
retval.startRow = playState.m_nRow;
retval.startOrder = playState.m_nNextOrder;
memory.Reset();
playState.m_nCurrentOrder = playState.m_nNextOrder;
playState.m_nPattern = orderList[playState.m_nCurrentOrder];
playState.m_nNextRow = playState.m_nRow;
break;
}
}
}
if(playState.m_nNextOrder == ORDERINDEX_INVALID)
{
break;
}
if(!Patterns.IsValidPat(playState.m_nPattern))
{
if(playState.m_nCurrentOrder == orderList.GetRestartPos())
{
if(!hasSearchTarget || !visitedRows.GetFirstUnvisitedRow(playState.m_nNextOrder, playState.m_nRow, true))
{
break;
} else
{
retval.duration = memory.elapsedTime;
results.push_back(retval);
retval.startRow = playState.m_nRow;
retval.startOrder = playState.m_nNextOrder;
memory.Reset();
playState.m_nNextRow = playState.m_nRow;
continue;
}
}
playState.m_nNextOrder = playState.m_nCurrentOrder + 1;
continue;
}
if(playState.m_nRow >= Patterns[playState.m_nPattern].GetNumRows())
playState.m_nRow = 0;
if(target.mode == GetLengthTarget::SeekPosition && playState.m_nCurrentOrder == target.pos.order && playState.m_nRow == target.pos.row)
{
retval.targetReached = true;
break;
}
if(visitedRows.IsVisited(playState.m_nCurrentOrder, playState.m_nRow, true))
{
if(!hasSearchTarget || !visitedRows.GetFirstUnvisitedRow(playState.m_nNextOrder, playState.m_nRow, true))
{
break;
} else
{
retval.duration = memory.elapsedTime;
results.push_back(retval);
retval.startRow = playState.m_nRow;
retval.startOrder = playState.m_nNextOrder;
memory.Reset();
playState.m_nNextRow = playState.m_nRow;
continue;
}
}
retval.endOrder = playState.m_nCurrentOrder;
retval.endRow = playState.m_nRow;
playState.m_nNextRow = playState.m_nRow + 1;
if(playState.m_nRow >= Patterns[playState.m_nPattern].GetNumRows())
{
playState.m_nRow = 0;
}
if(!playState.m_nRow)
{
for(CHANNELINDEX chn = 0; chn < GetNumChannels(); chn++)
{
memory.chnSettings[chn].patLoop = memory.elapsedTime;
memory.chnSettings[chn].patLoopSmp = playState.m_lTotalSampleCount;
}
}
ModChannel *pChn = playState.Chn;
const ModCommand *p = Patterns[playState.m_nPattern].GetpModCommand(playState.m_nRow, 0);
for(CHANNELINDEX nChn = 0; nChn < GetNumChannels(); nChn++, p++)
{
if(m_playBehaviour[kST3NoMutedChannels] && ChnSettings[nChn].dwFlags[CHN_MUTE]) // not even effects are processed on muted S3M channels
continue;
if(p->IsPcNote())
{
#ifndef NO_PLUGINS
if((adjustMode & eAdjust) && p->instr > 0 && p->instr <= MAX_MIXPLUGINS)
{
memory.plugParams[std::make_pair(p->instr, p->GetValueVolCol())] = p->GetValueEffectCol();
}
#endif // NO_PLUGINS
pChn[nChn].rowCommand.Clear();
continue;
}
pChn[nChn].rowCommand = *p;
switch(p->command)
{
case CMD_SPEED:
SetSpeed(playState, p->param);
break;
case CMD_TEMPO:
if(m_playBehaviour[kMODVBlankTiming])
{
if(p->param != 0) SetSpeed(playState, p->param);
}
break;
case CMD_S3MCMDEX:
if((p->param & 0xF0) == 0x60)
{
tickDelay += (p->param & 0x0F);
} else if((p->param & 0xF0) == 0xE0 && !rowDelay)
{
if(!(GetType() & MOD_TYPE_S3M) || (p->param & 0x0F) != 0)
{
rowDelay = 1 + (p->param & 0x0F);
}
}
break;
case CMD_MODCMDEX:
if((p->param & 0xF0) == 0xE0)
{
rowDelay = 1 + (p->param & 0x0F);
}
break;
}
}
if(rowDelay == 0) rowDelay = 1;
const uint32 numTicks = (playState.m_nMusicSpeed + tickDelay) * rowDelay;
const uint32 nonRowTicks = numTicks - rowDelay;
for(CHANNELINDEX nChn = 0; nChn < GetNumChannels(); pChn++, nChn++) if(!pChn->rowCommand.IsEmpty())
{
if(m_playBehaviour[kST3NoMutedChannels] && ChnSettings[nChn].dwFlags[CHN_MUTE]) // not even effects are processed on muted S3M channels
continue;
ModCommand::COMMAND command = pChn->rowCommand.command;
ModCommand::PARAM param = pChn->rowCommand.param;
ModCommand::NOTE note = pChn->rowCommand.note;
if (pChn->rowCommand.instr)
{
pChn->nNewIns = pChn->rowCommand.instr;
pChn->nLastNote = NOTE_NONE;
memory.chnSettings[nChn].vol = 0xFF;
}
if (pChn->rowCommand.IsNote()) pChn->nLastNote = note;
if(pChn->rowCommand.IsNote() || pChn->rowCommand.instr)
{
SAMPLEINDEX smp = 0;
if(GetNumInstruments())
{
ModInstrument *pIns;
if(pChn->nNewIns <= GetNumInstruments() && (pIns = Instruments[pChn->nNewIns]) != nullptr)
{
if(pIns->dwFlags[INS_SETPANNING])
pChn->nPan = pIns->nPan;
if(ModCommand::IsNote(note))
smp = pIns->Keyboard[note - NOTE_MIN];
}
} else
{
smp = pChn->nNewIns;
}
if(smp > 0 && smp <= GetNumSamples() && Samples[smp].uFlags[CHN_PANNING])
{
pChn->nPan = Samples[smp].nPan;
}
}
switch(pChn->rowCommand.volcmd)
{
case VOLCMD_VOLUME:
memory.chnSettings[nChn].vol = pChn->rowCommand.vol;
break;
case VOLCMD_VOLSLIDEUP:
case VOLCMD_VOLSLIDEDOWN:
if(pChn->rowCommand.vol != 0)
pChn->nOldVolParam = pChn->rowCommand.vol;
break;
}
switch(command)
{
case CMD_POSITIONJUMP:
positionJumpOnThisRow = true;
playState.m_nNextOrder = static_cast<ORDERINDEX>(CalculateXParam(playState.m_nPattern, playState.m_nRow, nChn));
playState.m_nNextPatStartRow = 0; // FT2 E60 bug
if(!patternBreakOnThisRow || (GetType() & (MOD_TYPE_MOD | MOD_TYPE_XM)))
playState.m_nNextRow = 0;
if (adjustMode & eAdjust)
{
pChn->nPatternLoopCount = 0;
pChn->nPatternLoop = 0;
}
break;
case CMD_PATTERNBREAK:
{
ROWINDEX row = PatternBreak(playState, nChn, param);
if(row != ROWINDEX_INVALID)
{
patternBreakOnThisRow = true;
playState.m_nNextRow = row;
if(!positionJumpOnThisRow)
{
playState.m_nNextOrder = playState.m_nCurrentOrder + 1;
}
if(adjustMode & eAdjust)
{
pChn->nPatternLoopCount = 0;
pChn->nPatternLoop = 0;
}
}
}
break;
case CMD_TEMPO:
if(!m_playBehaviour[kMODVBlankTiming])
{
TEMPO tempo(CalculateXParam(playState.m_nPattern, playState.m_nRow, nChn), 0);
if ((adjustMode & eAdjust) && (GetType() & (MOD_TYPE_S3M | MOD_TYPE_IT | MOD_TYPE_MPT)))
{
if (tempo.GetInt()) pChn->nOldTempo = static_cast<uint8>(tempo.GetInt()); else tempo.Set(pChn->nOldTempo);
}
if (tempo.GetInt() >= 0x20) playState.m_nMusicTempo = tempo;
else
{
TEMPO tempoDiff((tempo.GetInt() & 0x0F) * nonRowTicks, 0);
if ((tempo.GetInt() & 0xF0) == 0x10)
{
playState.m_nMusicTempo += tempoDiff;
} else
{
if(tempoDiff < playState.m_nMusicTempo)
playState.m_nMusicTempo -= tempoDiff;
else
playState.m_nMusicTempo.Set(0);
}
}
TEMPO tempoMin = GetModSpecifications().GetTempoMin(), tempoMax = GetModSpecifications().GetTempoMax();
if(m_playBehaviour[kTempoClamp]) // clamp tempo correctly in compatible mode
{
tempoMax.Set(255);
}
Limit(playState.m_nMusicTempo, tempoMin, tempoMax);
}
break;
case CMD_S3MCMDEX:
switch(param & 0xF0)
{
case 0x90:
if(param <= 0x91)
{
pChn->dwFlags.set(CHN_SURROUND, param == 0x91);
}
break;
case 0xA0:
pChn->nOldHiOffset = param & 0x0F;
break;
case 0xB0:
if (param & 0x0F)
{
patternLoopEndedOnThisRow = true;
} else
{
CHANNELINDEX firstChn = nChn, lastChn = nChn;
if(GetType() == MOD_TYPE_S3M)
{
firstChn = 0;
lastChn = GetNumChannels() - 1;
}
for(CHANNELINDEX c = firstChn; c <= lastChn; c++)
{
memory.chnSettings[c].patLoop = memory.elapsedTime;
memory.chnSettings[c].patLoopSmp = playState.m_lTotalSampleCount;
memory.chnSettings[c].patLoopStart = playState.m_nRow;
}
patternLoopStartedOnThisRow = true;
}
break;
case 0xF0:
pChn->nActiveMacro = param & 0x0F;
break;
}
break;
case CMD_MODCMDEX:
switch(param & 0xF0)
{
case 0x60:
if (param & 0x0F)
{
playState.m_nNextPatStartRow = memory.chnSettings[nChn].patLoopStart; // FT2 E60 bug
patternLoopEndedOnThisRow = true;
} else
{
patternLoopStartedOnThisRow = true;
memory.chnSettings[nChn].patLoop = memory.elapsedTime;
memory.chnSettings[nChn].patLoopSmp = playState.m_lTotalSampleCount;
memory.chnSettings[nChn].patLoopStart = playState.m_nRow;
}
break;
case 0xF0:
pChn->nActiveMacro = param & 0x0F;
break;
}
break;
case CMD_XFINEPORTAUPDOWN:
if(((param & 0xF0) == 0xA0) && !m_playBehaviour[kFT2RestrictXCommand]) pChn->nOldHiOffset = param & 0x0F;
break;
}
if (!(adjustMode & eAdjust)) continue;
switch(command)
{
case CMD_PORTAMENTOUP:
if(param)
{
if(!m_playBehaviour[kFT2PortaUpDownMemory])
pChn->nOldPortaDown = param;
pChn->nOldPortaUp = param;
}
break;
case CMD_PORTAMENTODOWN:
if(param)
{
if(!m_playBehaviour[kFT2PortaUpDownMemory])
pChn->nOldPortaUp = param;
pChn->nOldPortaDown = param;
}
break;
case CMD_TONEPORTAMENTO:
if (param) pChn->nPortamentoSlide = param << 2;
break;
case CMD_OFFSET:
if (param) pChn->oldOffset = param << 8;
break;
case CMD_VOLUMESLIDE:
case CMD_TONEPORTAVOL:
if (param) pChn->nOldVolumeSlide = param;
break;
case CMD_VOLUME:
memory.chnSettings[nChn].vol = param;
break;
case CMD_GLOBALVOLUME:
if(!(GetType() & GLOBALVOL_7BIT_FORMATS) && param < 128) param *= 2;
if(param <= 128)
{
playState.m_nGlobalVolume = param * 2;
} else if(!(GetType() & (MOD_TYPE_IT | MOD_TYPE_MPT | MOD_TYPE_S3M)))
{
playState.m_nGlobalVolume = 256;
}
break;
case CMD_GLOBALVOLSLIDE:
if(m_playBehaviour[kPerChannelGlobalVolSlide])
{
if (param) pChn->nOldGlobalVolSlide = param; else param = pChn->nOldGlobalVolSlide;
} else
{
if (param) playState.Chn[0].nOldGlobalVolSlide = param; else param = playState.Chn[0].nOldGlobalVolSlide;
}
if (((param & 0x0F) == 0x0F) && (param & 0xF0))
{
param >>= 4;
if (!(GetType() & GLOBALVOL_7BIT_FORMATS)) param <<= 1;
playState.m_nGlobalVolume += param << 1;
} else if (((param & 0xF0) == 0xF0) && (param & 0x0F))
{
param = (param & 0x0F) << 1;
if (!(GetType() & GLOBALVOL_7BIT_FORMATS)) param <<= 1;
playState.m_nGlobalVolume -= param;
} else if (param & 0xF0)
{
param >>= 4;
param <<= 1;
if (!(GetType() & GLOBALVOL_7BIT_FORMATS)) param <<= 1;
playState.m_nGlobalVolume += param * nonRowTicks;
} else
{
param = (param & 0x0F) << 1;
if (!(GetType() & GLOBALVOL_7BIT_FORMATS)) param <<= 1;
playState.m_nGlobalVolume -= param * nonRowTicks;
}
Limit(playState.m_nGlobalVolume, 0, 256);
break;
case CMD_CHANNELVOLUME:
if (param <= 64) pChn->nGlobalVol = param;
break;
case CMD_CHANNELVOLSLIDE:
{
if (param) pChn->nOldChnVolSlide = param; else param = pChn->nOldChnVolSlide;
int32 volume = pChn->nGlobalVol;
if((param & 0x0F) == 0x0F && (param & 0xF0))
volume += (param >> 4); // Fine Up
else if((param & 0xF0) == 0xF0 && (param & 0x0F))
volume -= (param & 0x0F); // Fine Down
else if(param & 0x0F) // Down
volume -= (param & 0x0F) * nonRowTicks;
else // Up
volume += ((param & 0xF0) >> 4) * nonRowTicks;
Limit(volume, 0, 64);
pChn->nGlobalVol = volume;
}
break;
case CMD_PANNING8:
Panning(pChn, param, Pan8bit);
break;
case CMD_MODCMDEX:
if(param < 0x10)
{
for(CHANNELINDEX chn = 0; chn < GetNumChannels(); chn++)
{
playState.Chn[chn].dwFlags.set(CHN_AMIGAFILTER, !(param & 1));
}
}
MPT_FALLTHROUGH;
case CMD_S3MCMDEX:
if((param & 0xF0) == 0x80)
{
Panning(pChn, (param & 0x0F), Pan4bit);
}
break;
case CMD_VIBRATOVOL:
if (param) pChn->nOldVolumeSlide = param;
param = 0;
MPT_FALLTHROUGH;
case CMD_VIBRATO:
Vibrato(pChn, param);
break;
case CMD_FINEVIBRATO:
FineVibrato(pChn, param);
break;
case CMD_TREMOLO:
Tremolo(pChn, param);
break;
case CMD_PANBRELLO:
Panbrello(pChn, param);
break;
}
switch(pChn->rowCommand.volcmd)
{
case VOLCMD_PANNING:
Panning(pChn, pChn->rowCommand.vol, Pan6bit);
break;
case VOLCMD_VIBRATOSPEED:
if(m_playBehaviour[kFT2VolColVibrato])
pChn->nVibratoSpeed = pChn->rowCommand.vol & 0x0F;
else
Vibrato(pChn, pChn->rowCommand.vol << 4);
break;
case VOLCMD_VIBRATODEPTH:
Vibrato(pChn, pChn->rowCommand.vol);
break;
}
switch(pChn->rowCommand.command)
{
case CMD_VIBRATO:
case CMD_FINEVIBRATO:
case CMD_VIBRATOVOL:
if(adjustMode & eAdjust)
{
uint32 vibTicks = ((GetType() & (MOD_TYPE_IT | MOD_TYPE_MPT)) && !m_SongFlags[SONG_ITOLDEFFECTS]) ? numTicks : nonRowTicks;
uint32 inc = pChn->nVibratoSpeed * vibTicks;
if(m_playBehaviour[kITVibratoTremoloPanbrello])
inc *= 4;
pChn->nVibratoPos += static_cast<uint8>(inc);
}
break;
case CMD_TREMOLO:
if(adjustMode & eAdjust)
{
uint32 tremTicks = ((GetType() & (MOD_TYPE_IT | MOD_TYPE_MPT)) && !m_SongFlags[SONG_ITOLDEFFECTS]) ? numTicks : nonRowTicks;
uint32 inc = pChn->nTremoloSpeed * tremTicks;
if(m_playBehaviour[kITVibratoTremoloPanbrello])
inc *= 4;
pChn->nTremoloPos += static_cast<uint8>(inc);
}
break;
case CMD_PANBRELLO:
if(adjustMode & eAdjust)
{
pChn->nPanbrelloPos += static_cast<uint8>(pChn->nPanbrelloSpeed * (numTicks - 1));
ProcessPanbrello(pChn);
}
break;
}
}
if(GetType() == MOD_TYPE_XM && playState.m_nMusicSpeed == uint16_max)
{
break;
}
playState.m_nCurrentRowsPerBeat = m_nDefaultRowsPerBeat;
if(Patterns[playState.m_nPattern].GetOverrideSignature())
{
playState.m_nCurrentRowsPerBeat = Patterns[playState.m_nPattern].GetRowsPerBeat();
}
const uint32 tickDuration = GetTickDuration(playState);
const uint32 rowDuration = tickDuration * numTicks;
memory.elapsedTime += static_cast<double>(rowDuration) / static_cast<double>(m_MixerSettings.gdwMixingFreq);
playState.m_lTotalSampleCount += rowDuration;
if(adjustSamplePos)
{
pChn = playState.Chn;
for(CHANNELINDEX nChn = 0; nChn < GetNumChannels(); pChn++, nChn++)
{
if(memory.chnSettings[nChn].ticksToRender == GetLengthMemory::IGNORE_CHANNEL)
continue;
uint32 startTick = 0;
const ModCommand &m = pChn->rowCommand;
uint32 paramHi = m.param >> 4, paramLo = m.param & 0x0F;
bool porta = m.command == CMD_TONEPORTAMENTO || m.command == CMD_TONEPORTAVOL || m.volcmd == VOLCMD_TONEPORTAMENTO;
bool stopNote = patternLoopStartedOnThisRow; // It's too much trouble to keep those pattern loops in sync...
if(m.instr) pChn->proTrackerOffset = 0;
if(m.IsNote())
{
if(porta && memory.chnSettings[nChn].incChanged)
{
pChn->increment = GetChannelIncrement(pChn, pChn->nPeriod, 0);
}
int32 setPan = pChn->nPan;
pChn->nNewNote = pChn->nLastNote;
if(pChn->nNewIns != 0) InstrumentChange(pChn, pChn->nNewIns, porta);
NoteChange(pChn, m.note, porta);
memory.chnSettings[nChn].incChanged = true;
if((m.command == CMD_MODCMDEX || m.command == CMD_S3MCMDEX) && (m.param & 0xF0) == 0xD0 && paramLo < numTicks)
{
startTick = paramLo;
} else if(m.command == CMD_DELAYCUT && paramHi < numTicks)
{
startTick = paramHi;
}
if(rowDelay > 1 && startTick != 0 && (GetType() & (MOD_TYPE_S3M | MOD_TYPE_IT | MOD_TYPE_MPT)))
{
startTick += (playState.m_nMusicSpeed + tickDelay) * (rowDelay - 1);
}
if(!porta) memory.chnSettings[nChn].ticksToRender = 0;
if(m.command == CMD_PANNING8
|| ((m.command == CMD_MODCMDEX || m.command == CMD_S3MCMDEX) && paramHi == 0x8)
|| m.volcmd == VOLCMD_PANNING)
{
pChn->nPan = setPan;
}
if(m.command == CMD_OFFSET)
{
bool isExtended = false;
SmpLength offset = CalculateXParam(playState.m_nPattern, playState.m_nRow, nChn, &isExtended);
if(!isExtended)
{
offset <<= 8;
if(offset == 0) offset = pChn->oldOffset;
offset += static_cast<SmpLength>(pChn->nOldHiOffset) << 16;
}
SampleOffset(*pChn, offset);
} else if(m.command == CMD_OFFSETPERCENTAGE)
{
SampleOffset(*pChn, Util::muldiv_unsigned(pChn->nLength, m.param, 255));
} else if(m.command == CMD_REVERSEOFFSET && pChn->pModSample != nullptr)
{
memory.RenderChannel(nChn, oldTickDuration); // Re-sync what we've got so far
ReverseSampleOffset(*pChn, m.param);
startTick = playState.m_nMusicSpeed - 1;
} else if(m.volcmd == VOLCMD_OFFSET)
{
if(m.vol <= CountOf(pChn->pModSample->cues) && pChn->pModSample != nullptr)
{
SmpLength offset;
if(m.vol == 0)
offset = pChn->oldOffset;
else
offset = pChn->oldOffset = pChn->pModSample->cues[m.vol - 1];
SampleOffset(*pChn, offset);
}
}
}
if(m.note == NOTE_KEYOFF || m.note == NOTE_NOTECUT || (m.note == NOTE_FADE && GetNumInstruments())
|| ((m.command == CMD_MODCMDEX || m.command == CMD_S3MCMDEX) && (m.param & 0xF0) == 0xC0 && paramLo < numTicks)
|| (m.command == CMD_DELAYCUT && paramLo != 0 && startTick + paramLo < numTicks))
{
stopNote = true;
}
if(m.command == CMD_VOLUME)
{
pChn->nVolume = m.param * 4;
} else if(m.volcmd == VOLCMD_VOLUME)
{
pChn->nVolume = m.vol * 4;
}
if(pChn->pModSample && !stopNote)
{
if(m.command < MAX_EFFECTS)
{
if(forbiddenCommands[m.command])
{
stopNote = true;
} else if(m.command == CMD_MODCMDEX)
{
switch(m.param & 0xF0)
{
case 0x10:
case 0x20:
stopNote = true;
}
}
}
if(m.volcmd < forbiddenVolCommands.size() && forbiddenVolCommands[m.volcmd])
{
stopNote = true;
}
}
if(stopNote)
{
pChn->Stop();
memory.chnSettings[nChn].ticksToRender = 0;
} else
{
if(oldTickDuration != tickDuration && oldTickDuration != 0)
{
memory.RenderChannel(nChn, oldTickDuration); // Re-sync what we've got so far
}
switch(m.command)
{
case CMD_TONEPORTAVOL:
case CMD_VOLUMESLIDE:
case CMD_VIBRATOVOL:
if(m.param || (GetType() != MOD_TYPE_MOD))
{
for(uint32 i = 0; i < numTicks; i++)
{
pChn->isFirstTick = (i == 0);
VolumeSlide(pChn, m.param);
}
}
break;
case CMD_MODCMDEX:
if((m.param & 0x0F) || (GetType() & (MOD_TYPE_XM | MOD_TYPE_MT2)))
{
pChn->isFirstTick = true;
switch(m.param & 0xF0)
{
case 0xA0: FineVolumeUp(pChn, m.param & 0x0F, false); break;
case 0xB0: FineVolumeDown(pChn, m.param & 0x0F, false); break;
}
}
break;
case CMD_S3MCMDEX:
if(m.param == 0x9E)
{
memory.RenderChannel(nChn, oldTickDuration); // Re-sync what we've got so far
pChn->dwFlags.reset(CHN_PINGPONGFLAG);
} else if(m.param == 0x9F)
{
memory.RenderChannel(nChn, oldTickDuration); // Re-sync what we've got so far
pChn->dwFlags.set(CHN_PINGPONGFLAG);
if(!pChn->position.GetInt() && pChn->nLength && (m.IsNote() || !pChn->dwFlags[CHN_LOOP]))
{
pChn->position.Set(pChn->nLength - 1, SamplePosition::fractMax);
}
} else if((m.param & 0xF0) == 0x70)
{
}
break;
}
pChn->isFirstTick = true;
switch(m.volcmd)
{
case VOLCMD_FINEVOLUP: FineVolumeUp(pChn, m.vol, m_playBehaviour[kITVolColMemory]); break;
case VOLCMD_FINEVOLDOWN: FineVolumeDown(pChn, m.vol, m_playBehaviour[kITVolColMemory]); break;
case VOLCMD_VOLSLIDEUP:
case VOLCMD_VOLSLIDEDOWN:
{
ModCommand::VOL vol = m.vol;
if(vol == 0 && m_playBehaviour[kITVolColMemory])
{
vol = pChn->nOldVolParam;
if(vol == 0)
break;
}
if(m.volcmd == VOLCMD_VOLSLIDEUP)
vol <<= 4;
for(uint32 i = 0; i < numTicks; i++)
{
pChn->isFirstTick = (i == 0);
VolumeSlide(pChn, vol);
}
}
break;
}
if(porta)
{
uint32 portaTick = memory.chnSettings[nChn].ticksToRender + startTick + 1;
memory.chnSettings[nChn].ticksToRender += numTicks;
memory.RenderChannel(nChn, tickDuration, portaTick);
} else
{
memory.chnSettings[nChn].ticksToRender += (numTicks - startTick);
}
}
}
}
oldTickDuration = tickDuration;
if(patternLoopEndedOnThisRow
&& (!m_playBehaviour[kFT2PatternLoopWithJumps] || !(positionJumpOnThisRow || patternBreakOnThisRow))
&& (!m_playBehaviour[kITPatternLoopWithJumps] || !positionJumpOnThisRow))
{
std::map<double, int> startTimes;
pChn = playState.Chn;
for(CHANNELINDEX nChn = 0; nChn < GetNumChannels(); nChn++, pChn++)
{
ModCommand::COMMAND command = pChn->rowCommand.command;
ModCommand::PARAM param = pChn->rowCommand.param;
if((command == CMD_S3MCMDEX && param >= 0xB1 && param <= 0xBF)
|| (command == CMD_MODCMDEX && param >= 0x61 && param <= 0x6F))
{
const double start = memory.chnSettings[nChn].patLoop;
if(!startTimes[start]) startTimes[start] = 1;
startTimes[start] = mpt::lcm(startTimes[start], 1 + (param & 0x0F));
}
}
for(const auto &i : startTimes)
{
memory.elapsedTime += (memory.elapsedTime - i.first) * (double)(i.second - 1);
for(CHANNELINDEX nChn = 0; nChn < GetNumChannels(); nChn++, pChn++)
{
if(memory.chnSettings[nChn].patLoop == i.first)
{
playState.m_lTotalSampleCount += (playState.m_lTotalSampleCount - memory.chnSettings[nChn].patLoopSmp) * (i.second - 1);
if(m_playBehaviour[kITPatternLoopTargetReset] || (GetType() == MOD_TYPE_S3M))
{
memory.chnSettings[nChn].patLoop = memory.elapsedTime;
memory.chnSettings[nChn].patLoopSmp = playState.m_lTotalSampleCount;
memory.chnSettings[nChn].patLoopStart = playState.m_nRow + 1;
}
break;
}
}
}
if(GetType() == MOD_TYPE_IT)
{
pChn = playState.Chn;
for(CHANNELINDEX nChn = 0; nChn < GetNumChannels(); nChn++, pChn++)
{
if((pChn->rowCommand.command == CMD_S3MCMDEX && pChn->rowCommand.param >= 0xB1 && pChn->rowCommand.param <= 0xBF))
{
memory.chnSettings[nChn].patLoop = memory.elapsedTime;
memory.chnSettings[nChn].patLoopSmp = playState.m_lTotalSampleCount;
}
}
}
}
}
if(adjustSamplePos)
{
for(CHANNELINDEX nChn = 0; nChn < GetNumChannels(); nChn++)
{
if(memory.chnSettings[nChn].ticksToRender != GetLengthMemory::IGNORE_CHANNEL)
{
memory.RenderChannel(nChn, oldTickDuration);
}
}
}
if(retval.targetReached || target.mode == GetLengthTarget::NoTarget)
{
retval.lastOrder = playState.m_nCurrentOrder;
retval.lastRow = playState.m_nRow;
}
retval.duration = memory.elapsedTime;
results.push_back(retval);
if(adjustMode & eAdjust)
{
if(retval.targetReached || target.mode == GetLengthTarget::NoTarget)
{
m_PlayState = std::move(playState);
m_PlayState.m_nNextRow = m_PlayState.m_nRow;
m_PlayState.m_nFrameDelay = m_PlayState.m_nPatternDelay = 0;
m_PlayState.m_nTickCount = Util::MaxValueOfType(m_PlayState.m_nTickCount) - 1;
m_PlayState.m_bPositionChanged = true;
for(CHANNELINDEX n = 0; n < GetNumChannels(); n++)
{
if(m_PlayState.Chn[n].nLastNote != NOTE_NONE)
{
m_PlayState.Chn[n].nNewNote = m_PlayState.Chn[n].nLastNote;
}
if(memory.chnSettings[n].vol != 0xFF && !adjustSamplePos)
{
m_PlayState.Chn[n].nVolume = std::min(memory.chnSettings[n].vol, uint8(64)) * 4;
}
}
#ifndef NO_PLUGINS
std::bitset<MAX_MIXPLUGINS> plugSetProgram;
for(const auto ¶m : memory.plugParams)
{
PLUGINDEX plug = param.first.first - 1;
IMixPlugin *plugin = m_MixPlugins[plug].pMixPlugin;
if(plugin != nullptr)
{
if(!plugSetProgram[plug])
{
plugSetProgram.set(plug);
plugin->BeginSetProgram();
}
plugin->SetParameter(param.first.second, param.second / PlugParamValue(ModCommand::maxColumnValue));
}
}
if(plugSetProgram.any())
{
for(PLUGINDEX i = 0; i < MAX_MIXPLUGINS; i++)
{
if(plugSetProgram[i])
{
m_MixPlugins[i].pMixPlugin->EndSetProgram();
}
}
}
#endif // NO_PLUGINS
} else if(adjustMode != eAdjustOnSuccess)
{
m_PlayState.m_nMusicSpeed = m_nDefaultSpeed;
m_PlayState.m_nMusicTempo = m_nDefaultTempo;
m_PlayState.m_nGlobalVolume = m_nDefaultGlobalVolume;
}
if(sequence != Order.GetCurrentSequenceIndex())
{
Order.SetSequence(sequence);
}
visitedSongRows.Set(visitedRows);
}
return results;
}
| 169,264 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool HTMLMediaElement::isAutoplayAllowedPerSettings() const {
LocalFrame* frame = document().frame();
if (!frame)
return false;
FrameLoaderClient* frameLoaderClient = frame->loader().client();
return frameLoaderClient && frameLoaderClient->allowAutoplay(false);
}
Commit Message: [Blink>Media] Allow autoplay muted on Android by default
There was a mistake causing autoplay muted is shipped on Android
but it will be disabled if the chromium embedder doesn't specify
content setting for "AllowAutoplay" preference. This CL makes the
AllowAutoplay preference true by default so that it is allowed by
embedders (including AndroidWebView) unless they explicitly
disable it.
Intent to ship:
https://groups.google.com/a/chromium.org/d/msg/blink-dev/Q1cnzNI2GpI/AL_eyUNABgAJ
BUG=689018
Review-Url: https://codereview.chromium.org/2677173002
Cr-Commit-Position: refs/heads/master@{#448423}
CWE ID: CWE-119 | bool HTMLMediaElement::isAutoplayAllowedPerSettings() const {
LocalFrame* frame = document().frame();
if (!frame)
return false;
FrameLoaderClient* frameLoaderClient = frame->loader().client();
return frameLoaderClient && frameLoaderClient->allowAutoplay(true);
}
| 172,016 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: iperf_json_printf(const char *format, ...)
{
cJSON* o;
va_list argp;
const char *cp;
char name[100];
char* np;
cJSON* j;
o = cJSON_CreateObject();
if (o == NULL)
return NULL;
va_start(argp, format);
np = name;
for (cp = format; *cp != '\0'; ++cp) {
switch (*cp) {
case ' ':
break;
case ':':
*np = '\0';
break;
case '%':
++cp;
switch (*cp) {
case 'b':
j = cJSON_CreateBool(va_arg(argp, int));
break;
case 'd':
j = cJSON_CreateInt(va_arg(argp, int64_t));
break;
case 'f':
j = cJSON_CreateFloat(va_arg(argp, double));
break;
case 's':
j = cJSON_CreateString(va_arg(argp, char *));
break;
default:
return NULL;
}
if (j == NULL)
return NULL;
cJSON_AddItemToObject(o, name, j);
np = name;
break;
default:
*np++ = *cp;
break;
}
}
va_end(argp);
return o;
}
Commit Message: Fix a buffer overflow / heap corruption issue that could occur if a
malformed JSON string was passed on the control channel. This issue,
present in the cJSON library, was already fixed upstream, so was
addressed here in iperf3 by importing a newer version of cJSON (plus
local ESnet modifications).
Discovered and reported by Dave McDaniel, Cisco Talos.
Based on a patch by @dopheide-esnet, with input from @DaveGamble.
Cross-references: TALOS-CAN-0164, ESNET-SECADV-2016-0001,
CVE-2016-4303
(cherry picked from commit ed94082be27d971a5e1b08b666e2c217cf470a40)
Signed-off-by: Bruce A. Mah <[email protected]>
CWE ID: CWE-119 | iperf_json_printf(const char *format, ...)
{
cJSON* o;
va_list argp;
const char *cp;
char name[100];
char* np;
cJSON* j;
o = cJSON_CreateObject();
if (o == NULL)
return NULL;
va_start(argp, format);
np = name;
for (cp = format; *cp != '\0'; ++cp) {
switch (*cp) {
case ' ':
break;
case ':':
*np = '\0';
break;
case '%':
++cp;
switch (*cp) {
case 'b':
j = cJSON_CreateBool(va_arg(argp, int));
break;
case 'd':
j = cJSON_CreateNumber(va_arg(argp, int64_t));
break;
case 'f':
j = cJSON_CreateNumber(va_arg(argp, double));
break;
case 's':
j = cJSON_CreateString(va_arg(argp, char *));
break;
default:
return NULL;
}
if (j == NULL)
return NULL;
cJSON_AddItemToObject(o, name, j);
np = name;
break;
default:
*np++ = *cp;
break;
}
}
va_end(argp);
return o;
}
| 167,318 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: WORD32 ihevcd_decode(iv_obj_t *ps_codec_obj, void *pv_api_ip, void *pv_api_op)
{
WORD32 ret = IV_SUCCESS;
codec_t *ps_codec = (codec_t *)(ps_codec_obj->pv_codec_handle);
ivd_video_decode_ip_t *ps_dec_ip;
ivd_video_decode_op_t *ps_dec_op;
WORD32 proc_idx = 0;
WORD32 prev_proc_idx = 0;
/* Initialize error code */
ps_codec->i4_error_code = 0;
ps_dec_ip = (ivd_video_decode_ip_t *)pv_api_ip;
ps_dec_op = (ivd_video_decode_op_t *)pv_api_op;
{
UWORD32 u4_size = ps_dec_op->u4_size;
memset(ps_dec_op, 0, sizeof(ivd_video_decode_op_t));
ps_dec_op->u4_size = u4_size; //Restore size field
}
if(ps_codec->i4_init_done != 1)
{
ps_dec_op->u4_error_code |= 1 << IVD_FATALERROR;
ps_dec_op->u4_error_code |= IHEVCD_INIT_NOT_DONE;
return IV_FAIL;
}
if(ps_codec->u4_pic_cnt >= NUM_FRAMES_LIMIT)
{
ps_dec_op->u4_error_code |= 1 << IVD_FATALERROR;
ps_dec_op->u4_error_code |= IHEVCD_NUM_FRAMES_LIMIT_REACHED;
return IV_FAIL;
}
/* If reset flag is set, flush the existing buffers */
if(ps_codec->i4_reset_flag)
{
ps_codec->i4_flush_mode = 1;
}
/*Data memory barries instruction,so that bitstream write by the application is complete*/
/* In case the decoder is not in flush mode check for input buffer validity */
if(0 == ps_codec->i4_flush_mode)
{
if(ps_dec_ip->pv_stream_buffer == NULL)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DEC_FRM_BS_BUF_NULL;
return IV_FAIL;
}
if(ps_dec_ip->u4_num_Bytes <= MIN_START_CODE_LEN)
{
if((WORD32)ps_dec_ip->u4_num_Bytes > 0)
ps_dec_op->u4_num_bytes_consumed = ps_dec_ip->u4_num_Bytes;
else
ps_dec_op->u4_num_bytes_consumed = 0;
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DEC_NUMBYTES_INV;
return IV_FAIL;
}
}
#ifdef APPLY_CONCEALMENT
{
WORD32 num_mbs;
num_mbs = (ps_codec->i4_wd * ps_codec->i4_ht + 255) >> 8;
/* Reset MB Count at the beginning of every process call */
ps_codec->mb_count = 0;
memset(ps_codec->mb_map, 0, ((num_mbs + 7) >> 3));
}
#endif
if(0 == ps_codec->i4_share_disp_buf && ps_codec->i4_header_mode == 0)
{
UWORD32 i;
if(ps_dec_ip->s_out_buffer.u4_num_bufs == 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_ZERO_OP_BUFS;
return IV_FAIL;
}
for(i = 0; i < ps_dec_ip->s_out_buffer.u4_num_bufs; i++)
{
if(ps_dec_ip->s_out_buffer.pu1_bufs[i] == NULL)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_OP_BUF_NULL;
return IV_FAIL;
}
if(ps_dec_ip->s_out_buffer.u4_min_out_buf_size[i] == 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_ZERO_OP_BUF_SIZE;
return IV_FAIL;
}
}
}
ps_codec->ps_out_buffer = &ps_dec_ip->s_out_buffer;
ps_codec->u4_ts = ps_dec_ip->u4_ts;
if(ps_codec->i4_flush_mode)
{
ps_dec_op->u4_pic_wd = ps_codec->i4_disp_wd;
ps_dec_op->u4_pic_ht = ps_codec->i4_disp_ht;
ps_dec_op->u4_new_seq = 0;
ps_codec->ps_disp_buf = (pic_buf_t *)ihevc_disp_mgr_get(
(disp_mgr_t *)ps_codec->pv_disp_buf_mgr, &ps_codec->i4_disp_buf_id);
/* In case of non-shared mode, then convert/copy the frame to output buffer */
/* Only if the codec is in non-shared mode or in shared mode but needs 420P output */
if((ps_codec->ps_disp_buf)
&& ((0 == ps_codec->i4_share_disp_buf)
|| (IV_YUV_420P
== ps_codec->e_chroma_fmt)))
{
process_ctxt_t *ps_proc = &ps_codec->as_process[prev_proc_idx];
if(0 == ps_proc->i4_init_done)
{
ihevcd_init_proc_ctxt(ps_proc, 0);
}
/* Output buffer check */
ret = ihevcd_check_out_buf_size(ps_codec);
RETURN_IF((ret != (IHEVCD_ERROR_T)IHEVCD_SUCCESS), ret);
/* Set remaining number of rows to be processed */
ret = ihevcd_fmt_conv(ps_codec, &ps_codec->as_process[prev_proc_idx],
ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2], 0,
ps_codec->i4_disp_ht);
ihevc_buf_mgr_release((buf_mgr_t *)ps_codec->pv_pic_buf_mgr,
ps_codec->i4_disp_buf_id, BUF_MGR_DISP);
}
ihevcd_fill_outargs(ps_codec, ps_dec_ip, ps_dec_op);
if(1 == ps_dec_op->u4_output_present)
{
WORD32 xpos = ps_codec->i4_disp_wd - 32 - LOGO_WD;
WORD32 ypos = ps_codec->i4_disp_ht - 32 - LOGO_HT;
if(ypos < 0)
ypos = 0;
if(xpos < 0)
xpos = 0;
INSERT_LOGO(ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2], ps_codec->i4_disp_strd,
xpos,
ypos,
ps_codec->e_chroma_fmt,
ps_codec->i4_disp_wd,
ps_codec->i4_disp_ht);
}
if(NULL == ps_codec->ps_disp_buf)
{
/* If in flush mode and there are no more buffers to flush,
* check for the reset flag and reset the decoder */
if(ps_codec->i4_reset_flag)
{
ihevcd_init(ps_codec);
}
return (IV_FAIL);
}
return (IV_SUCCESS);
}
/* In case of shared mode, check if there is a free buffer for reconstruction */
if((0 == ps_codec->i4_header_mode) && (1 == ps_codec->i4_share_disp_buf))
{
WORD32 buf_status;
buf_status = 1;
if(ps_codec->pv_pic_buf_mgr)
buf_status = ihevc_buf_mgr_check_free((buf_mgr_t *)ps_codec->pv_pic_buf_mgr);
/* If there is no free buffer, then return with an error code */
if(0 == buf_status)
{
ps_dec_op->u4_error_code = IVD_DEC_REF_BUF_NULL;
ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM);
return IV_FAIL;
}
}
ps_codec->i4_bytes_remaining = ps_dec_ip->u4_num_Bytes;
ps_codec->pu1_inp_bitsbuf = (UWORD8 *)ps_dec_ip->pv_stream_buffer;
ps_codec->s_parse.i4_end_of_frame = 0;
ps_codec->i4_pic_present = 0;
ps_codec->i4_slice_error = 0;
ps_codec->ps_disp_buf = NULL;
if(ps_codec->i4_num_cores > 1)
{
ithread_set_affinity(0);
}
while(MIN_START_CODE_LEN < ps_codec->i4_bytes_remaining)
{
WORD32 nal_len;
WORD32 nal_ofst;
WORD32 bits_len;
if(ps_codec->i4_slice_error)
{
slice_header_t *ps_slice_hdr_next = ps_codec->s_parse.ps_slice_hdr_base + (ps_codec->s_parse.i4_cur_slice_idx & (MAX_SLICE_HDR_CNT - 1));
WORD32 next_slice_addr = ps_slice_hdr_next->i2_ctb_x +
ps_slice_hdr_next->i2_ctb_y * ps_codec->s_parse.ps_sps->i2_pic_wd_in_ctb;
if(ps_codec->s_parse.i4_next_ctb_indx == next_slice_addr)
ps_codec->i4_slice_error = 0;
}
if(ps_codec->pu1_bitsbuf_dynamic)
{
ps_codec->pu1_bitsbuf = ps_codec->pu1_bitsbuf_dynamic;
ps_codec->u4_bitsbuf_size = ps_codec->u4_bitsbuf_size_dynamic;
}
else
{
ps_codec->pu1_bitsbuf = ps_codec->pu1_bitsbuf_static;
ps_codec->u4_bitsbuf_size = ps_codec->u4_bitsbuf_size_static;
}
nal_ofst = ihevcd_nal_search_start_code(ps_codec->pu1_inp_bitsbuf,
ps_codec->i4_bytes_remaining);
ps_codec->i4_nal_ofst = nal_ofst;
{
WORD32 bytes_remaining = ps_codec->i4_bytes_remaining - nal_ofst;
bytes_remaining = MIN((UWORD32)bytes_remaining, ps_codec->u4_bitsbuf_size);
ihevcd_nal_remv_emuln_bytes(ps_codec->pu1_inp_bitsbuf + nal_ofst,
ps_codec->pu1_bitsbuf,
bytes_remaining,
&nal_len, &bits_len);
/* Decoder may read upto 8 extra bytes at the end of frame */
/* These are not used, but still set them to zero to avoid uninitialized reads */
if(bits_len < (WORD32)(ps_codec->u4_bitsbuf_size - 8))
{
memset(ps_codec->pu1_bitsbuf + bits_len, 0, 2 * sizeof(UWORD32));
}
}
/* This may be used to update the offsets for tiles and entropy sync row offsets */
ps_codec->i4_num_emln_bytes = nal_len - bits_len;
ps_codec->i4_nal_len = nal_len;
ihevcd_bits_init(&ps_codec->s_parse.s_bitstrm, ps_codec->pu1_bitsbuf,
bits_len);
ret = ihevcd_nal_unit(ps_codec);
/* If the frame is incomplete and
* the bytes remaining is zero or a header is received,
* complete the frame treating it to be in error */
if(ps_codec->i4_pic_present &&
(ps_codec->s_parse.i4_next_ctb_indx != ps_codec->s_parse.ps_sps->i4_pic_size_in_ctb))
{
if((ps_codec->i4_bytes_remaining - (nal_len + nal_ofst) <= MIN_START_CODE_LEN) ||
(ps_codec->i4_header_in_slice_mode))
{
slice_header_t *ps_slice_hdr_next;
ps_codec->s_parse.i4_cur_slice_idx--;
if(ps_codec->s_parse.i4_cur_slice_idx < 0)
ps_codec->s_parse.i4_cur_slice_idx = 0;
ps_slice_hdr_next = ps_codec->s_parse.ps_slice_hdr_base + ((ps_codec->s_parse.i4_cur_slice_idx + 1) & (MAX_SLICE_HDR_CNT - 1));
ps_slice_hdr_next->i2_ctb_x = 0;
ps_slice_hdr_next->i2_ctb_y = ps_codec->s_parse.ps_sps->i2_pic_ht_in_ctb;
ps_codec->i4_slice_error = 1;
continue;
}
}
if(IHEVCD_IGNORE_SLICE == ret)
{
ps_codec->s_parse.i4_cur_slice_idx = MAX(0, (ps_codec->s_parse.i4_cur_slice_idx - 1));
ps_codec->pu1_inp_bitsbuf += (nal_ofst + nal_len);
ps_codec->i4_bytes_remaining -= (nal_ofst + nal_len);
continue;
}
if((IVD_RES_CHANGED == ret) ||
(IVD_STREAM_WIDTH_HEIGHT_NOT_SUPPORTED == ret))
{
break;
}
/* Update bytes remaining and bytes consumed and input bitstream pointer */
/* Do not consume the NAL in the following cases */
/* Slice header reached during header decode mode */
/* TODO: Next picture's slice reached */
if(ret != IHEVCD_SLICE_IN_HEADER_MODE)
{
if((0 == ps_codec->i4_slice_error) ||
(ps_codec->i4_bytes_remaining - (nal_len + nal_ofst) <= MIN_START_CODE_LEN))
{
ps_codec->pu1_inp_bitsbuf += (nal_ofst + nal_len);
ps_codec->i4_bytes_remaining -= (nal_ofst + nal_len);
}
if(ret != IHEVCD_SUCCESS)
break;
if(ps_codec->s_parse.i4_end_of_frame)
break;
}
else
{
ret = IHEVCD_SUCCESS;
break;
}
/* Allocate dynamic bitstream buffer once SPS is decoded */
if((ps_codec->u4_allocate_dynamic_done == 0) && ps_codec->i4_sps_done)
{
WORD32 ret;
ret = ihevcd_allocate_dynamic_bufs(ps_codec);
if(ret != IV_SUCCESS)
{
/* Free any dynamic buffers that are allocated */
ihevcd_free_dynamic_bufs(ps_codec);
ps_codec->i4_error_code = IVD_MEM_ALLOC_FAILED;
ps_dec_op->u4_error_code |= 1 << IVD_FATALERROR;
ps_dec_op->u4_error_code |= IVD_MEM_ALLOC_FAILED;
return IV_FAIL;
}
}
BREAK_AFTER_SLICE_NAL();
}
if((ps_codec->u4_pic_cnt == 0) && (ret != IHEVCD_SUCCESS))
{
ps_codec->i4_error_code = ret;
ihevcd_fill_outargs(ps_codec, ps_dec_ip, ps_dec_op);
return IV_FAIL;
}
if(1 == ps_codec->i4_pic_present)
{
WORD32 i;
sps_t *ps_sps = ps_codec->s_parse.ps_sps;
ps_codec->i4_first_pic_done = 1;
/*TODO temporary fix: end_of_frame is checked before adding format conversion to job queue */
if(ps_codec->i4_num_cores > 1 && ps_codec->s_parse.i4_end_of_frame)
{
/* Add job queue for format conversion / frame copy for each ctb row */
/* Only if the codec is in non-shared mode or in shared mode but needs 420P output */
process_ctxt_t *ps_proc;
/* i4_num_cores - 1 contexts are currently being used by other threads */
ps_proc = &ps_codec->as_process[ps_codec->i4_num_cores - 1];
if((ps_codec->ps_disp_buf) &&
((0 == ps_codec->i4_share_disp_buf) || (IV_YUV_420P == ps_codec->e_chroma_fmt)))
{
/* If format conversion jobs were not issued in pic_init() add them here */
if((0 == ps_codec->u4_enable_fmt_conv_ahead) ||
(ps_codec->i4_disp_buf_id == ps_proc->i4_cur_pic_buf_id))
for(i = 0; i < ps_sps->i2_pic_ht_in_ctb; i++)
{
proc_job_t s_job;
IHEVCD_ERROR_T ret;
s_job.i4_cmd = CMD_FMTCONV;
s_job.i2_ctb_cnt = 0;
s_job.i2_ctb_x = 0;
s_job.i2_ctb_y = i;
s_job.i2_slice_idx = 0;
s_job.i4_tu_coeff_data_ofst = 0;
ret = ihevcd_jobq_queue((jobq_t *)ps_codec->s_parse.pv_proc_jobq,
&s_job, sizeof(proc_job_t), 1);
if(ret != (IHEVCD_ERROR_T)IHEVCD_SUCCESS)
return (WORD32)ret;
}
}
/* Reached end of frame : Signal terminate */
/* The terminate flag is checked only after all the jobs are dequeued */
ret = ihevcd_jobq_terminate((jobq_t *)ps_codec->s_parse.pv_proc_jobq);
while(1)
{
IHEVCD_ERROR_T ret;
proc_job_t s_job;
process_ctxt_t *ps_proc;
/* i4_num_cores - 1 contexts are currently being used by other threads */
ps_proc = &ps_codec->as_process[ps_codec->i4_num_cores - 1];
ret = ihevcd_jobq_dequeue((jobq_t *)ps_proc->pv_proc_jobq, &s_job,
sizeof(proc_job_t), 1);
if((IHEVCD_ERROR_T)IHEVCD_SUCCESS != ret)
break;
ps_proc->i4_ctb_cnt = s_job.i2_ctb_cnt;
ps_proc->i4_ctb_x = s_job.i2_ctb_x;
ps_proc->i4_ctb_y = s_job.i2_ctb_y;
ps_proc->i4_cur_slice_idx = s_job.i2_slice_idx;
if(CMD_PROCESS == s_job.i4_cmd)
{
ihevcd_init_proc_ctxt(ps_proc, s_job.i4_tu_coeff_data_ofst);
ihevcd_process(ps_proc);
}
else if(CMD_FMTCONV == s_job.i4_cmd)
{
sps_t *ps_sps = ps_codec->s_parse.ps_sps;
WORD32 num_rows = 1 << ps_sps->i1_log2_ctb_size;
if(0 == ps_proc->i4_init_done)
{
ihevcd_init_proc_ctxt(ps_proc, 0);
}
num_rows = MIN(num_rows, (ps_codec->i4_disp_ht - (s_job.i2_ctb_y << ps_sps->i1_log2_ctb_size)));
if(num_rows < 0)
num_rows = 0;
ihevcd_fmt_conv(ps_codec, ps_proc,
ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2],
s_job.i2_ctb_y << ps_sps->i1_log2_ctb_size,
num_rows);
}
}
}
/* In case of non-shared mode and while running in single core mode, then convert/copy the frame to output buffer */
/* Only if the codec is in non-shared mode or in shared mode but needs 420P output */
else if((ps_codec->ps_disp_buf) && ((0 == ps_codec->i4_share_disp_buf) ||
(IV_YUV_420P == ps_codec->e_chroma_fmt)) &&
(ps_codec->s_parse.i4_end_of_frame))
{
process_ctxt_t *ps_proc = &ps_codec->as_process[proc_idx];
/* Set remaining number of rows to be processed */
ps_codec->s_fmt_conv.i4_num_rows = ps_codec->i4_disp_ht
- ps_codec->s_fmt_conv.i4_cur_row;
if(0 == ps_proc->i4_init_done)
{
ihevcd_init_proc_ctxt(ps_proc, 0);
}
if(ps_codec->s_fmt_conv.i4_num_rows < 0)
ps_codec->s_fmt_conv.i4_num_rows = 0;
ret = ihevcd_fmt_conv(ps_codec, ps_proc,
ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2],
ps_codec->s_fmt_conv.i4_cur_row,
ps_codec->s_fmt_conv.i4_num_rows);
ps_codec->s_fmt_conv.i4_cur_row += ps_codec->s_fmt_conv.i4_num_rows;
}
DEBUG_DUMP_MV_MAP(ps_codec);
/* Mark MV Buf as needed for reference */
ihevc_buf_mgr_set_status((buf_mgr_t *)ps_codec->pv_mv_buf_mgr,
ps_codec->as_process[proc_idx].i4_cur_mv_bank_buf_id,
BUF_MGR_REF);
/* Mark pic buf as needed for reference */
ihevc_buf_mgr_set_status((buf_mgr_t *)ps_codec->pv_pic_buf_mgr,
ps_codec->as_process[proc_idx].i4_cur_pic_buf_id,
BUF_MGR_REF);
/* Mark pic buf as needed for display */
ihevc_buf_mgr_set_status((buf_mgr_t *)ps_codec->pv_pic_buf_mgr,
ps_codec->as_process[proc_idx].i4_cur_pic_buf_id,
BUF_MGR_DISP);
/* Insert the current picture as short term reference */
ihevc_dpb_mgr_insert_ref((dpb_mgr_t *)ps_codec->pv_dpb_mgr,
ps_codec->as_process[proc_idx].ps_cur_pic,
ps_codec->as_process[proc_idx].i4_cur_pic_buf_id);
/* If a frame was displayed (in non-shared mode), then release it from display manager */
if((0 == ps_codec->i4_share_disp_buf) && (ps_codec->ps_disp_buf))
ihevc_buf_mgr_release((buf_mgr_t *)ps_codec->pv_pic_buf_mgr,
ps_codec->i4_disp_buf_id, BUF_MGR_DISP);
/* Wait for threads */
for(i = 0; i < (ps_codec->i4_num_cores - 1); i++)
{
if(ps_codec->ai4_process_thread_created[i])
{
ithread_join(ps_codec->apv_process_thread_handle[i], NULL);
ps_codec->ai4_process_thread_created[i] = 0;
}
}
DEBUG_VALIDATE_PADDED_REGION(&ps_codec->as_process[proc_idx]);
if(ps_codec->u4_pic_cnt > 0)
{
DEBUG_DUMP_PIC_PU(ps_codec);
}
DEBUG_DUMP_PIC_BUFFERS(ps_codec);
/* Increment the number of pictures decoded */
ps_codec->u4_pic_cnt++;
}
ihevcd_fill_outargs(ps_codec, ps_dec_ip, ps_dec_op);
if(1 == ps_dec_op->u4_output_present)
{
WORD32 xpos = ps_codec->i4_disp_wd - 32 - LOGO_WD;
WORD32 ypos = ps_codec->i4_disp_ht - 32 - LOGO_HT;
if(ypos < 0)
ypos = 0;
if(xpos < 0)
xpos = 0;
INSERT_LOGO(ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2], ps_codec->i4_disp_strd,
xpos,
ypos,
ps_codec->e_chroma_fmt,
ps_codec->i4_disp_wd,
ps_codec->i4_disp_ht);
}
return ret;
}
Commit Message: Fix slice decrement for skipped slices
Test: run the poc with and without the patch
Bug: 63045918
Change-Id: I27804d42c55480c25303d1a5dbb43b1d86d7fa94
(cherry picked from commit 272f2c23c8ba8579adb0618b4124163b9bf086fb)
CWE ID: CWE-682 | WORD32 ihevcd_decode(iv_obj_t *ps_codec_obj, void *pv_api_ip, void *pv_api_op)
{
WORD32 ret = IV_SUCCESS;
codec_t *ps_codec = (codec_t *)(ps_codec_obj->pv_codec_handle);
ivd_video_decode_ip_t *ps_dec_ip;
ivd_video_decode_op_t *ps_dec_op;
WORD32 proc_idx = 0;
WORD32 prev_proc_idx = 0;
/* Initialize error code */
ps_codec->i4_error_code = 0;
ps_dec_ip = (ivd_video_decode_ip_t *)pv_api_ip;
ps_dec_op = (ivd_video_decode_op_t *)pv_api_op;
{
UWORD32 u4_size = ps_dec_op->u4_size;
memset(ps_dec_op, 0, sizeof(ivd_video_decode_op_t));
ps_dec_op->u4_size = u4_size; //Restore size field
}
if(ps_codec->i4_init_done != 1)
{
ps_dec_op->u4_error_code |= 1 << IVD_FATALERROR;
ps_dec_op->u4_error_code |= IHEVCD_INIT_NOT_DONE;
return IV_FAIL;
}
if(ps_codec->u4_pic_cnt >= NUM_FRAMES_LIMIT)
{
ps_dec_op->u4_error_code |= 1 << IVD_FATALERROR;
ps_dec_op->u4_error_code |= IHEVCD_NUM_FRAMES_LIMIT_REACHED;
return IV_FAIL;
}
/* If reset flag is set, flush the existing buffers */
if(ps_codec->i4_reset_flag)
{
ps_codec->i4_flush_mode = 1;
}
/*Data memory barries instruction,so that bitstream write by the application is complete*/
/* In case the decoder is not in flush mode check for input buffer validity */
if(0 == ps_codec->i4_flush_mode)
{
if(ps_dec_ip->pv_stream_buffer == NULL)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DEC_FRM_BS_BUF_NULL;
return IV_FAIL;
}
if(ps_dec_ip->u4_num_Bytes <= MIN_START_CODE_LEN)
{
if((WORD32)ps_dec_ip->u4_num_Bytes > 0)
ps_dec_op->u4_num_bytes_consumed = ps_dec_ip->u4_num_Bytes;
else
ps_dec_op->u4_num_bytes_consumed = 0;
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DEC_NUMBYTES_INV;
return IV_FAIL;
}
}
#ifdef APPLY_CONCEALMENT
{
WORD32 num_mbs;
num_mbs = (ps_codec->i4_wd * ps_codec->i4_ht + 255) >> 8;
/* Reset MB Count at the beginning of every process call */
ps_codec->mb_count = 0;
memset(ps_codec->mb_map, 0, ((num_mbs + 7) >> 3));
}
#endif
if(0 == ps_codec->i4_share_disp_buf && ps_codec->i4_header_mode == 0)
{
UWORD32 i;
if(ps_dec_ip->s_out_buffer.u4_num_bufs == 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_ZERO_OP_BUFS;
return IV_FAIL;
}
for(i = 0; i < ps_dec_ip->s_out_buffer.u4_num_bufs; i++)
{
if(ps_dec_ip->s_out_buffer.pu1_bufs[i] == NULL)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_OP_BUF_NULL;
return IV_FAIL;
}
if(ps_dec_ip->s_out_buffer.u4_min_out_buf_size[i] == 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_ZERO_OP_BUF_SIZE;
return IV_FAIL;
}
}
}
ps_codec->ps_out_buffer = &ps_dec_ip->s_out_buffer;
ps_codec->u4_ts = ps_dec_ip->u4_ts;
if(ps_codec->i4_flush_mode)
{
ps_dec_op->u4_pic_wd = ps_codec->i4_disp_wd;
ps_dec_op->u4_pic_ht = ps_codec->i4_disp_ht;
ps_dec_op->u4_new_seq = 0;
ps_codec->ps_disp_buf = (pic_buf_t *)ihevc_disp_mgr_get(
(disp_mgr_t *)ps_codec->pv_disp_buf_mgr, &ps_codec->i4_disp_buf_id);
/* In case of non-shared mode, then convert/copy the frame to output buffer */
/* Only if the codec is in non-shared mode or in shared mode but needs 420P output */
if((ps_codec->ps_disp_buf)
&& ((0 == ps_codec->i4_share_disp_buf)
|| (IV_YUV_420P
== ps_codec->e_chroma_fmt)))
{
process_ctxt_t *ps_proc = &ps_codec->as_process[prev_proc_idx];
if(0 == ps_proc->i4_init_done)
{
ihevcd_init_proc_ctxt(ps_proc, 0);
}
/* Output buffer check */
ret = ihevcd_check_out_buf_size(ps_codec);
RETURN_IF((ret != (IHEVCD_ERROR_T)IHEVCD_SUCCESS), ret);
/* Set remaining number of rows to be processed */
ret = ihevcd_fmt_conv(ps_codec, &ps_codec->as_process[prev_proc_idx],
ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2], 0,
ps_codec->i4_disp_ht);
ihevc_buf_mgr_release((buf_mgr_t *)ps_codec->pv_pic_buf_mgr,
ps_codec->i4_disp_buf_id, BUF_MGR_DISP);
}
ihevcd_fill_outargs(ps_codec, ps_dec_ip, ps_dec_op);
if(1 == ps_dec_op->u4_output_present)
{
WORD32 xpos = ps_codec->i4_disp_wd - 32 - LOGO_WD;
WORD32 ypos = ps_codec->i4_disp_ht - 32 - LOGO_HT;
if(ypos < 0)
ypos = 0;
if(xpos < 0)
xpos = 0;
INSERT_LOGO(ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2], ps_codec->i4_disp_strd,
xpos,
ypos,
ps_codec->e_chroma_fmt,
ps_codec->i4_disp_wd,
ps_codec->i4_disp_ht);
}
if(NULL == ps_codec->ps_disp_buf)
{
/* If in flush mode and there are no more buffers to flush,
* check for the reset flag and reset the decoder */
if(ps_codec->i4_reset_flag)
{
ihevcd_init(ps_codec);
}
return (IV_FAIL);
}
return (IV_SUCCESS);
}
/* In case of shared mode, check if there is a free buffer for reconstruction */
if((0 == ps_codec->i4_header_mode) && (1 == ps_codec->i4_share_disp_buf))
{
WORD32 buf_status;
buf_status = 1;
if(ps_codec->pv_pic_buf_mgr)
buf_status = ihevc_buf_mgr_check_free((buf_mgr_t *)ps_codec->pv_pic_buf_mgr);
/* If there is no free buffer, then return with an error code */
if(0 == buf_status)
{
ps_dec_op->u4_error_code = IVD_DEC_REF_BUF_NULL;
ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM);
return IV_FAIL;
}
}
ps_codec->i4_bytes_remaining = ps_dec_ip->u4_num_Bytes;
ps_codec->pu1_inp_bitsbuf = (UWORD8 *)ps_dec_ip->pv_stream_buffer;
ps_codec->s_parse.i4_end_of_frame = 0;
ps_codec->i4_pic_present = 0;
ps_codec->i4_slice_error = 0;
ps_codec->ps_disp_buf = NULL;
if(ps_codec->i4_num_cores > 1)
{
ithread_set_affinity(0);
}
while(MIN_START_CODE_LEN < ps_codec->i4_bytes_remaining)
{
WORD32 nal_len;
WORD32 nal_ofst;
WORD32 bits_len;
if(ps_codec->i4_slice_error)
{
slice_header_t *ps_slice_hdr_next = ps_codec->s_parse.ps_slice_hdr_base + (ps_codec->s_parse.i4_cur_slice_idx & (MAX_SLICE_HDR_CNT - 1));
WORD32 next_slice_addr = ps_slice_hdr_next->i2_ctb_x +
ps_slice_hdr_next->i2_ctb_y * ps_codec->s_parse.ps_sps->i2_pic_wd_in_ctb;
if(ps_codec->s_parse.i4_next_ctb_indx == next_slice_addr)
ps_codec->i4_slice_error = 0;
}
if(ps_codec->pu1_bitsbuf_dynamic)
{
ps_codec->pu1_bitsbuf = ps_codec->pu1_bitsbuf_dynamic;
ps_codec->u4_bitsbuf_size = ps_codec->u4_bitsbuf_size_dynamic;
}
else
{
ps_codec->pu1_bitsbuf = ps_codec->pu1_bitsbuf_static;
ps_codec->u4_bitsbuf_size = ps_codec->u4_bitsbuf_size_static;
}
nal_ofst = ihevcd_nal_search_start_code(ps_codec->pu1_inp_bitsbuf,
ps_codec->i4_bytes_remaining);
ps_codec->i4_nal_ofst = nal_ofst;
{
WORD32 bytes_remaining = ps_codec->i4_bytes_remaining - nal_ofst;
bytes_remaining = MIN((UWORD32)bytes_remaining, ps_codec->u4_bitsbuf_size);
ihevcd_nal_remv_emuln_bytes(ps_codec->pu1_inp_bitsbuf + nal_ofst,
ps_codec->pu1_bitsbuf,
bytes_remaining,
&nal_len, &bits_len);
/* Decoder may read upto 8 extra bytes at the end of frame */
/* These are not used, but still set them to zero to avoid uninitialized reads */
if(bits_len < (WORD32)(ps_codec->u4_bitsbuf_size - 8))
{
memset(ps_codec->pu1_bitsbuf + bits_len, 0, 2 * sizeof(UWORD32));
}
}
/* This may be used to update the offsets for tiles and entropy sync row offsets */
ps_codec->i4_num_emln_bytes = nal_len - bits_len;
ps_codec->i4_nal_len = nal_len;
ihevcd_bits_init(&ps_codec->s_parse.s_bitstrm, ps_codec->pu1_bitsbuf,
bits_len);
ret = ihevcd_nal_unit(ps_codec);
/* If the frame is incomplete and
* the bytes remaining is zero or a header is received,
* complete the frame treating it to be in error */
if(ps_codec->i4_pic_present &&
(ps_codec->s_parse.i4_next_ctb_indx != ps_codec->s_parse.ps_sps->i4_pic_size_in_ctb))
{
if((ps_codec->i4_bytes_remaining - (nal_len + nal_ofst) <= MIN_START_CODE_LEN) ||
(ps_codec->i4_header_in_slice_mode))
{
slice_header_t *ps_slice_hdr_next;
ps_codec->s_parse.i4_cur_slice_idx--;
if(ps_codec->s_parse.i4_cur_slice_idx < 0)
ps_codec->s_parse.i4_cur_slice_idx = 0;
ps_slice_hdr_next = ps_codec->s_parse.ps_slice_hdr_base + ((ps_codec->s_parse.i4_cur_slice_idx + 1) & (MAX_SLICE_HDR_CNT - 1));
ps_slice_hdr_next->i2_ctb_x = 0;
ps_slice_hdr_next->i2_ctb_y = ps_codec->s_parse.ps_sps->i2_pic_ht_in_ctb;
ps_codec->i4_slice_error = 1;
continue;
}
}
if(IHEVCD_IGNORE_SLICE == ret)
{
ps_codec->pu1_inp_bitsbuf += (nal_ofst + nal_len);
ps_codec->i4_bytes_remaining -= (nal_ofst + nal_len);
continue;
}
if((IVD_RES_CHANGED == ret) ||
(IVD_STREAM_WIDTH_HEIGHT_NOT_SUPPORTED == ret))
{
break;
}
/* Update bytes remaining and bytes consumed and input bitstream pointer */
/* Do not consume the NAL in the following cases */
/* Slice header reached during header decode mode */
/* TODO: Next picture's slice reached */
if(ret != IHEVCD_SLICE_IN_HEADER_MODE)
{
if((0 == ps_codec->i4_slice_error) ||
(ps_codec->i4_bytes_remaining - (nal_len + nal_ofst) <= MIN_START_CODE_LEN))
{
ps_codec->pu1_inp_bitsbuf += (nal_ofst + nal_len);
ps_codec->i4_bytes_remaining -= (nal_ofst + nal_len);
}
if(ret != IHEVCD_SUCCESS)
break;
if(ps_codec->s_parse.i4_end_of_frame)
break;
}
else
{
ret = IHEVCD_SUCCESS;
break;
}
/* Allocate dynamic bitstream buffer once SPS is decoded */
if((ps_codec->u4_allocate_dynamic_done == 0) && ps_codec->i4_sps_done)
{
WORD32 ret;
ret = ihevcd_allocate_dynamic_bufs(ps_codec);
if(ret != IV_SUCCESS)
{
/* Free any dynamic buffers that are allocated */
ihevcd_free_dynamic_bufs(ps_codec);
ps_codec->i4_error_code = IVD_MEM_ALLOC_FAILED;
ps_dec_op->u4_error_code |= 1 << IVD_FATALERROR;
ps_dec_op->u4_error_code |= IVD_MEM_ALLOC_FAILED;
return IV_FAIL;
}
}
BREAK_AFTER_SLICE_NAL();
}
if((ps_codec->u4_pic_cnt == 0) && (ret != IHEVCD_SUCCESS))
{
ps_codec->i4_error_code = ret;
ihevcd_fill_outargs(ps_codec, ps_dec_ip, ps_dec_op);
return IV_FAIL;
}
if(1 == ps_codec->i4_pic_present)
{
WORD32 i;
sps_t *ps_sps = ps_codec->s_parse.ps_sps;
ps_codec->i4_first_pic_done = 1;
/*TODO temporary fix: end_of_frame is checked before adding format conversion to job queue */
if(ps_codec->i4_num_cores > 1 && ps_codec->s_parse.i4_end_of_frame)
{
/* Add job queue for format conversion / frame copy for each ctb row */
/* Only if the codec is in non-shared mode or in shared mode but needs 420P output */
process_ctxt_t *ps_proc;
/* i4_num_cores - 1 contexts are currently being used by other threads */
ps_proc = &ps_codec->as_process[ps_codec->i4_num_cores - 1];
if((ps_codec->ps_disp_buf) &&
((0 == ps_codec->i4_share_disp_buf) || (IV_YUV_420P == ps_codec->e_chroma_fmt)))
{
/* If format conversion jobs were not issued in pic_init() add them here */
if((0 == ps_codec->u4_enable_fmt_conv_ahead) ||
(ps_codec->i4_disp_buf_id == ps_proc->i4_cur_pic_buf_id))
for(i = 0; i < ps_sps->i2_pic_ht_in_ctb; i++)
{
proc_job_t s_job;
IHEVCD_ERROR_T ret;
s_job.i4_cmd = CMD_FMTCONV;
s_job.i2_ctb_cnt = 0;
s_job.i2_ctb_x = 0;
s_job.i2_ctb_y = i;
s_job.i2_slice_idx = 0;
s_job.i4_tu_coeff_data_ofst = 0;
ret = ihevcd_jobq_queue((jobq_t *)ps_codec->s_parse.pv_proc_jobq,
&s_job, sizeof(proc_job_t), 1);
if(ret != (IHEVCD_ERROR_T)IHEVCD_SUCCESS)
return (WORD32)ret;
}
}
/* Reached end of frame : Signal terminate */
/* The terminate flag is checked only after all the jobs are dequeued */
ret = ihevcd_jobq_terminate((jobq_t *)ps_codec->s_parse.pv_proc_jobq);
while(1)
{
IHEVCD_ERROR_T ret;
proc_job_t s_job;
process_ctxt_t *ps_proc;
/* i4_num_cores - 1 contexts are currently being used by other threads */
ps_proc = &ps_codec->as_process[ps_codec->i4_num_cores - 1];
ret = ihevcd_jobq_dequeue((jobq_t *)ps_proc->pv_proc_jobq, &s_job,
sizeof(proc_job_t), 1);
if((IHEVCD_ERROR_T)IHEVCD_SUCCESS != ret)
break;
ps_proc->i4_ctb_cnt = s_job.i2_ctb_cnt;
ps_proc->i4_ctb_x = s_job.i2_ctb_x;
ps_proc->i4_ctb_y = s_job.i2_ctb_y;
ps_proc->i4_cur_slice_idx = s_job.i2_slice_idx;
if(CMD_PROCESS == s_job.i4_cmd)
{
ihevcd_init_proc_ctxt(ps_proc, s_job.i4_tu_coeff_data_ofst);
ihevcd_process(ps_proc);
}
else if(CMD_FMTCONV == s_job.i4_cmd)
{
sps_t *ps_sps = ps_codec->s_parse.ps_sps;
WORD32 num_rows = 1 << ps_sps->i1_log2_ctb_size;
if(0 == ps_proc->i4_init_done)
{
ihevcd_init_proc_ctxt(ps_proc, 0);
}
num_rows = MIN(num_rows, (ps_codec->i4_disp_ht - (s_job.i2_ctb_y << ps_sps->i1_log2_ctb_size)));
if(num_rows < 0)
num_rows = 0;
ihevcd_fmt_conv(ps_codec, ps_proc,
ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2],
s_job.i2_ctb_y << ps_sps->i1_log2_ctb_size,
num_rows);
}
}
}
/* In case of non-shared mode and while running in single core mode, then convert/copy the frame to output buffer */
/* Only if the codec is in non-shared mode or in shared mode but needs 420P output */
else if((ps_codec->ps_disp_buf) && ((0 == ps_codec->i4_share_disp_buf) ||
(IV_YUV_420P == ps_codec->e_chroma_fmt)) &&
(ps_codec->s_parse.i4_end_of_frame))
{
process_ctxt_t *ps_proc = &ps_codec->as_process[proc_idx];
/* Set remaining number of rows to be processed */
ps_codec->s_fmt_conv.i4_num_rows = ps_codec->i4_disp_ht
- ps_codec->s_fmt_conv.i4_cur_row;
if(0 == ps_proc->i4_init_done)
{
ihevcd_init_proc_ctxt(ps_proc, 0);
}
if(ps_codec->s_fmt_conv.i4_num_rows < 0)
ps_codec->s_fmt_conv.i4_num_rows = 0;
ret = ihevcd_fmt_conv(ps_codec, ps_proc,
ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2],
ps_codec->s_fmt_conv.i4_cur_row,
ps_codec->s_fmt_conv.i4_num_rows);
ps_codec->s_fmt_conv.i4_cur_row += ps_codec->s_fmt_conv.i4_num_rows;
}
DEBUG_DUMP_MV_MAP(ps_codec);
/* Mark MV Buf as needed for reference */
ihevc_buf_mgr_set_status((buf_mgr_t *)ps_codec->pv_mv_buf_mgr,
ps_codec->as_process[proc_idx].i4_cur_mv_bank_buf_id,
BUF_MGR_REF);
/* Mark pic buf as needed for reference */
ihevc_buf_mgr_set_status((buf_mgr_t *)ps_codec->pv_pic_buf_mgr,
ps_codec->as_process[proc_idx].i4_cur_pic_buf_id,
BUF_MGR_REF);
/* Mark pic buf as needed for display */
ihevc_buf_mgr_set_status((buf_mgr_t *)ps_codec->pv_pic_buf_mgr,
ps_codec->as_process[proc_idx].i4_cur_pic_buf_id,
BUF_MGR_DISP);
/* Insert the current picture as short term reference */
ihevc_dpb_mgr_insert_ref((dpb_mgr_t *)ps_codec->pv_dpb_mgr,
ps_codec->as_process[proc_idx].ps_cur_pic,
ps_codec->as_process[proc_idx].i4_cur_pic_buf_id);
/* If a frame was displayed (in non-shared mode), then release it from display manager */
if((0 == ps_codec->i4_share_disp_buf) && (ps_codec->ps_disp_buf))
ihevc_buf_mgr_release((buf_mgr_t *)ps_codec->pv_pic_buf_mgr,
ps_codec->i4_disp_buf_id, BUF_MGR_DISP);
/* Wait for threads */
for(i = 0; i < (ps_codec->i4_num_cores - 1); i++)
{
if(ps_codec->ai4_process_thread_created[i])
{
ithread_join(ps_codec->apv_process_thread_handle[i], NULL);
ps_codec->ai4_process_thread_created[i] = 0;
}
}
DEBUG_VALIDATE_PADDED_REGION(&ps_codec->as_process[proc_idx]);
if(ps_codec->u4_pic_cnt > 0)
{
DEBUG_DUMP_PIC_PU(ps_codec);
}
DEBUG_DUMP_PIC_BUFFERS(ps_codec);
/* Increment the number of pictures decoded */
ps_codec->u4_pic_cnt++;
}
ihevcd_fill_outargs(ps_codec, ps_dec_ip, ps_dec_op);
if(1 == ps_dec_op->u4_output_present)
{
WORD32 xpos = ps_codec->i4_disp_wd - 32 - LOGO_WD;
WORD32 ypos = ps_codec->i4_disp_ht - 32 - LOGO_HT;
if(ypos < 0)
ypos = 0;
if(xpos < 0)
xpos = 0;
INSERT_LOGO(ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2], ps_codec->i4_disp_strd,
xpos,
ypos,
ps_codec->e_chroma_fmt,
ps_codec->i4_disp_wd,
ps_codec->i4_disp_ht);
}
return ret;
}
| 173,975 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void Sp_replace_regexp(js_State *J)
{
js_Regexp *re;
const char *source, *s, *r;
js_Buffer *sb = NULL;
int n, x;
Resub m;
source = checkstring(J, 0);
re = js_toregexp(J, 1);
if (js_regexec(re->prog, source, &m, 0)) {
js_copy(J, 0);
return;
}
re->last = 0;
loop:
s = m.sub[0].sp;
n = m.sub[0].ep - m.sub[0].sp;
if (js_iscallable(J, 2)) {
js_copy(J, 2);
js_pushundefined(J);
for (x = 0; m.sub[x].sp; ++x) /* arg 0..x: substring and subexps that matched */
js_pushlstring(J, m.sub[x].sp, m.sub[x].ep - m.sub[x].sp);
js_pushnumber(J, s - source); /* arg x+2: offset within search string */
js_copy(J, 0); /* arg x+3: search string */
js_call(J, 2 + x);
r = js_tostring(J, -1);
js_putm(J, &sb, source, s);
js_puts(J, &sb, r);
js_pop(J, 1);
} else {
r = js_tostring(J, 2);
js_putm(J, &sb, source, s);
while (*r) {
if (*r == '$') {
switch (*(++r)) {
case 0: --r; /* end of string; back up */
/* fallthrough */
case '$': js_putc(J, &sb, '$'); break;
case '`': js_putm(J, &sb, source, s); break;
case '\'': js_puts(J, &sb, s + n); break;
case '&':
js_putm(J, &sb, s, s + n);
break;
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
x = *r - '0';
if (r[1] >= '0' && r[1] <= '9')
x = x * 10 + *(++r) - '0';
if (x > 0 && x < m.nsub) {
js_putm(J, &sb, m.sub[x].sp, m.sub[x].ep);
} else {
js_putc(J, &sb, '$');
if (x > 10) {
js_putc(J, &sb, '0' + x / 10);
js_putc(J, &sb, '0' + x % 10);
} else {
js_putc(J, &sb, '0' + x);
}
}
break;
default:
js_putc(J, &sb, '$');
js_putc(J, &sb, *r);
break;
}
++r;
} else {
js_putc(J, &sb, *r++);
}
}
}
if (re->flags & JS_REGEXP_G) {
source = m.sub[0].ep;
if (n == 0) {
if (*source)
js_putc(J, &sb, *source++);
else
goto end;
}
if (!js_regexec(re->prog, source, &m, REG_NOTBOL))
goto loop;
}
end:
js_puts(J, &sb, s + n);
js_putc(J, &sb, 0);
if (js_try(J)) {
js_free(J, sb);
js_throw(J);
}
js_pushstring(J, sb ? sb->s : "");
js_endtry(J);
js_free(J, sb);
}
Commit Message: Bug 700937: Limit recursion in regexp matcher.
Also handle negative return code as an error in the JS bindings.
CWE ID: CWE-400 | static void Sp_replace_regexp(js_State *J)
{
js_Regexp *re;
const char *source, *s, *r;
js_Buffer *sb = NULL;
int n, x;
Resub m;
source = checkstring(J, 0);
re = js_toregexp(J, 1);
if (js_doregexec(J, re->prog, source, &m, 0)) {
js_copy(J, 0);
return;
}
re->last = 0;
loop:
s = m.sub[0].sp;
n = m.sub[0].ep - m.sub[0].sp;
if (js_iscallable(J, 2)) {
js_copy(J, 2);
js_pushundefined(J);
for (x = 0; m.sub[x].sp; ++x) /* arg 0..x: substring and subexps that matched */
js_pushlstring(J, m.sub[x].sp, m.sub[x].ep - m.sub[x].sp);
js_pushnumber(J, s - source); /* arg x+2: offset within search string */
js_copy(J, 0); /* arg x+3: search string */
js_call(J, 2 + x);
r = js_tostring(J, -1);
js_putm(J, &sb, source, s);
js_puts(J, &sb, r);
js_pop(J, 1);
} else {
r = js_tostring(J, 2);
js_putm(J, &sb, source, s);
while (*r) {
if (*r == '$') {
switch (*(++r)) {
case 0: --r; /* end of string; back up */
/* fallthrough */
case '$': js_putc(J, &sb, '$'); break;
case '`': js_putm(J, &sb, source, s); break;
case '\'': js_puts(J, &sb, s + n); break;
case '&':
js_putm(J, &sb, s, s + n);
break;
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
x = *r - '0';
if (r[1] >= '0' && r[1] <= '9')
x = x * 10 + *(++r) - '0';
if (x > 0 && x < m.nsub) {
js_putm(J, &sb, m.sub[x].sp, m.sub[x].ep);
} else {
js_putc(J, &sb, '$');
if (x > 10) {
js_putc(J, &sb, '0' + x / 10);
js_putc(J, &sb, '0' + x % 10);
} else {
js_putc(J, &sb, '0' + x);
}
}
break;
default:
js_putc(J, &sb, '$');
js_putc(J, &sb, *r);
break;
}
++r;
} else {
js_putc(J, &sb, *r++);
}
}
}
if (re->flags & JS_REGEXP_G) {
source = m.sub[0].ep;
if (n == 0) {
if (*source)
js_putc(J, &sb, *source++);
else
goto end;
}
if (!js_doregexec(J, re->prog, source, &m, REG_NOTBOL))
goto loop;
}
end:
js_puts(J, &sb, s + n);
js_putc(J, &sb, 0);
if (js_try(J)) {
js_free(J, sb);
js_throw(J);
}
js_pushstring(J, sb ? sb->s : "");
js_endtry(J);
js_free(J, sb);
}
| 169,699 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: virtual InputMethodDescriptors* GetSupportedInputMethods() {
if (!initialized_successfully_) {
InputMethodDescriptors* result = new InputMethodDescriptors;
result->push_back(input_method::GetFallbackInputMethodDescriptor());
return result;
}
return chromeos::GetSupportedInputMethodDescriptors();
}
Commit Message: Remove use of libcros from InputMethodLibrary.
BUG=chromium-os:16238
TEST==confirm that input methods work as before on the netbook. Also confirm that the chrome builds and works on the desktop as before.
Review URL: http://codereview.chromium.org/7003086
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@89142 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | virtual InputMethodDescriptors* GetSupportedInputMethods() {
virtual input_method::InputMethodDescriptors* GetSupportedInputMethods() {
if (!initialized_successfully_) {
input_method::InputMethodDescriptors* result =
new input_method::InputMethodDescriptors;
result->push_back(input_method::GetFallbackInputMethodDescriptor());
return result;
}
return input_method::GetSupportedInputMethodDescriptors();
}
| 170,492 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void copyIPv6IfDifferent(void * dest, const void * src)
{
if(dest != src) {
memcpy(dest, src, sizeof(struct in6_addr));
}
}
Commit Message: pcpserver.c: copyIPv6IfDifferent() check for NULL src argument
CWE ID: CWE-476 | static void copyIPv6IfDifferent(void * dest, const void * src)
{
if(dest != src && src != NULL) {
memcpy(dest, src, sizeof(struct in6_addr));
}
}
| 169,665 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void BinaryUploadService::IsAuthorized(AuthorizationCallback callback) {
if (!timer_.IsRunning()) {
timer_.Start(FROM_HERE, base::TimeDelta::FromHours(24), this,
&BinaryUploadService::ResetAuthorizationData);
}
if (!can_upload_data_.has_value()) {
if (!pending_validate_data_upload_request_) {
std::string dm_token = GetDMToken();
if (dm_token.empty()) {
std::move(callback).Run(false);
return;
}
pending_validate_data_upload_request_ = true;
auto request = std::make_unique<ValidateDataUploadRequest>(base::BindOnce(
&BinaryUploadService::ValidateDataUploadRequestCallback,
weakptr_factory_.GetWeakPtr()));
request->set_dm_token(dm_token);
UploadForDeepScanning(std::move(request));
}
authorization_callbacks_.push_back(std::move(callback));
return;
}
std::move(callback).Run(can_upload_data_.value());
}
Commit Message: Migrate download_protection code to new DM token class.
Migrates RetrieveDMToken calls to use the new BrowserDMToken class.
Bug: 1020296
Change-Id: Icef580e243430d73b6c1c42b273a8540277481d9
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1904234
Commit-Queue: Dominique Fauteux-Chapleau <[email protected]>
Reviewed-by: Tien Mai <[email protected]>
Reviewed-by: Daniel Rubery <[email protected]>
Cr-Commit-Position: refs/heads/master@{#714196}
CWE ID: CWE-20 | void BinaryUploadService::IsAuthorized(AuthorizationCallback callback) {
if (!timer_.IsRunning()) {
timer_.Start(FROM_HERE, base::TimeDelta::FromHours(24), this,
&BinaryUploadService::ResetAuthorizationData);
}
if (!can_upload_data_.has_value()) {
if (!pending_validate_data_upload_request_) {
auto dm_token = GetDMToken();
if (!dm_token.is_valid()) {
std::move(callback).Run(false);
return;
}
pending_validate_data_upload_request_ = true;
auto request = std::make_unique<ValidateDataUploadRequest>(base::BindOnce(
&BinaryUploadService::ValidateDataUploadRequestCallback,
weakptr_factory_.GetWeakPtr()));
request->set_dm_token(dm_token.value());
UploadForDeepScanning(std::move(request));
}
authorization_callbacks_.push_back(std::move(callback));
return;
}
std::move(callback).Run(can_upload_data_.value());
}
| 172,355 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator(
media::VideoDecodeAccelerator::Client* client,
base::ProcessHandle renderer_process)
: client_(client),
egl_config_(NULL),
state_(kUninitialized),
pictures_requested_(false),
renderer_process_(renderer_process),
last_input_buffer_id_(-1),
inputs_before_decode_(0) {
memset(&input_stream_info_, 0, sizeof(input_stream_info_));
memset(&output_stream_info_, 0, sizeof(output_stream_info_));
}
Commit Message: Convert plugin and GPU process to brokered handle duplication.
BUG=119250
Review URL: https://chromiumcodereview.appspot.com/9958034
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@132303 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: | DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator(
media::VideoDecodeAccelerator::Client* client)
: client_(client),
egl_config_(NULL),
state_(kUninitialized),
pictures_requested_(false),
last_input_buffer_id_(-1),
inputs_before_decode_(0) {
memset(&input_stream_info_, 0, sizeof(input_stream_info_));
memset(&output_stream_info_, 0, sizeof(output_stream_info_));
}
| 170,940 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void smp_task_done(struct sas_task *task)
{
if (!del_timer(&task->slow_task->timer))
return;
complete(&task->slow_task->completion);
}
Commit Message: scsi: libsas: fix a race condition when smp task timeout
When the lldd is processing the complete sas task in interrupt and set the
task stat as SAS_TASK_STATE_DONE, the smp timeout timer is able to be
triggered at the same time. And smp_task_timedout() will complete the task
wheter the SAS_TASK_STATE_DONE is set or not. Then the sas task may freed
before lldd end the interrupt process. Thus a use-after-free will happen.
Fix this by calling the complete() only when SAS_TASK_STATE_DONE is not
set. And remove the check of the return value of the del_timer(). Once the
LLDD sets DONE, it must call task->done(), which will call
smp_task_done()->complete() and the task will be completed and freed
correctly.
Reported-by: chenxiang <[email protected]>
Signed-off-by: Jason Yan <[email protected]>
CC: John Garry <[email protected]>
CC: Johannes Thumshirn <[email protected]>
CC: Ewan Milne <[email protected]>
CC: Christoph Hellwig <[email protected]>
CC: Tomas Henzl <[email protected]>
CC: Dan Williams <[email protected]>
CC: Hannes Reinecke <[email protected]>
Reviewed-by: Hannes Reinecke <[email protected]>
Reviewed-by: John Garry <[email protected]>
Reviewed-by: Johannes Thumshirn <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]>
CWE ID: CWE-416 | static void smp_task_done(struct sas_task *task)
{
del_timer(&task->slow_task->timer);
complete(&task->slow_task->completion);
}
| 169,782 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: Horizontal_Sweep_Drop( RAS_ARGS Short y,
FT_F26Dot6 x1,
FT_F26Dot6 x2,
PProfile left,
PProfile right )
{
Long e1, e2, pxl;
PByte bits;
Byte f1;
/* During the horizontal sweep, we only take care of drop-outs */
/* e1 + <-- pixel center */
/* | */
/* x1 ---+--> <-- contour */
/* | */
/* | */
/* x2 <--+--- <-- contour */
/* | */
/* | */
/* e2 + <-- pixel center */
e1 = CEILING( x1 );
e2 = FLOOR ( x2 );
pxl = e1;
if ( e1 > e2 )
{
Int dropOutControl = left->flags & 7;
if ( e1 == e2 + ras.precision )
{
switch ( dropOutControl )
{
case 0: /* simple drop-outs including stubs */
pxl = e2;
break;
case 4: /* smart drop-outs including stubs */
pxl = FLOOR( ( x1 + x2 - 1 ) / 2 + ras.precision_half );
break;
case 1: /* simple drop-outs excluding stubs */
case 5: /* smart drop-outs excluding stubs */
/* see Vertical_Sweep_Drop for details */
/* rightmost stub test */
if ( left->next == right &&
left->height <= 0 &&
!( left->flags & Overshoot_Top &&
x2 - x1 >= ras.precision_half ) )
return;
/* leftmost stub test */
if ( right->next == left &&
left->start == y &&
!( left->flags & Overshoot_Bottom &&
x2 - x1 >= ras.precision_half ) )
return;
if ( dropOutControl == 1 )
pxl = e2;
else
pxl = FLOOR( ( x1 + x2 - 1 ) / 2 + ras.precision_half );
break;
default: /* modes 2, 3, 6, 7 */
return; /* no drop-out control */
}
/* undocumented but confirmed: If the drop-out would result in a */
/* pixel outside of the bounding box, use the pixel inside of the */
/* bounding box instead */
if ( pxl < 0 )
pxl = e1;
else if ( TRUNC( pxl ) >= ras.target.rows )
pxl = e2;
/* check that the other pixel isn't set */
e1 = pxl == e1 ? e2 : e1;
e1 = TRUNC( e1 );
bits = ras.bTarget + ( y >> 3 );
f1 = (Byte)( 0x80 >> ( y & 7 ) );
bits -= e1 * ras.target.pitch;
if ( ras.target.pitch > 0 )
bits += ( ras.target.rows - 1 ) * ras.target.pitch;
if ( e1 >= 0 &&
e1 < ras.target.rows &&
*bits & f1 )
return;
}
else
return;
}
bits = ras.bTarget + ( y >> 3 );
f1 = (Byte)( 0x80 >> ( y & 7 ) );
e1 = TRUNC( pxl );
if ( e1 >= 0 && e1 < ras.target.rows )
{
bits -= e1 * ras.target.pitch;
if ( ras.target.pitch > 0 )
bits += ( ras.target.rows - 1 ) * ras.target.pitch;
bits[0] |= f1;
}
}
Commit Message:
CWE ID: CWE-119 | Horizontal_Sweep_Drop( RAS_ARGS Short y,
FT_F26Dot6 x1,
FT_F26Dot6 x2,
PProfile left,
PProfile right )
{
Long e1, e2, pxl;
PByte bits;
Byte f1;
/* During the horizontal sweep, we only take care of drop-outs */
/* e1 + <-- pixel center */
/* | */
/* x1 ---+--> <-- contour */
/* | */
/* | */
/* x2 <--+--- <-- contour */
/* | */
/* | */
/* e2 + <-- pixel center */
e1 = CEILING( x1 );
e2 = FLOOR ( x2 );
pxl = e1;
if ( e1 > e2 )
{
Int dropOutControl = left->flags & 7;
if ( e1 == e2 + ras.precision )
{
switch ( dropOutControl )
{
case 0: /* simple drop-outs including stubs */
pxl = e2;
break;
case 4: /* smart drop-outs including stubs */
pxl = FLOOR( ( x1 + x2 - 1 ) / 2 + ras.precision_half );
break;
case 1: /* simple drop-outs excluding stubs */
case 5: /* smart drop-outs excluding stubs */
/* see Vertical_Sweep_Drop for details */
/* rightmost stub test */
if ( left->next == right &&
left->height <= 0 &&
!( left->flags & Overshoot_Top &&
x2 - x1 >= ras.precision_half ) )
return;
/* leftmost stub test */
if ( right->next == left &&
left->start == y &&
!( left->flags & Overshoot_Bottom &&
x2 - x1 >= ras.precision_half ) )
return;
if ( dropOutControl == 1 )
pxl = e2;
else
pxl = FLOOR( ( x1 + x2 - 1 ) / 2 + ras.precision_half );
break;
default: /* modes 2, 3, 6, 7 */
return; /* no drop-out control */
}
/* undocumented but confirmed: If the drop-out would result in a */
/* pixel outside of the bounding box, use the pixel inside of the */
/* bounding box instead */
if ( pxl < 0 )
pxl = e1;
else if ( (ULong)( TRUNC( pxl ) ) >= ras.target.rows )
pxl = e2;
/* check that the other pixel isn't set */
e1 = pxl == e1 ? e2 : e1;
e1 = TRUNC( e1 );
bits = ras.bTarget + ( y >> 3 );
f1 = (Byte)( 0x80 >> ( y & 7 ) );
bits -= e1 * ras.target.pitch;
if ( ras.target.pitch > 0 )
bits += ( ras.target.rows - 1 ) * ras.target.pitch;
if ( e1 >= 0 &&
(ULong)e1 < ras.target.rows &&
*bits & f1 )
return;
}
else
return;
}
bits = ras.bTarget + ( y >> 3 );
f1 = (Byte)( 0x80 >> ( y & 7 ) );
e1 = TRUNC( pxl );
if ( e1 >= 0 && (ULong)e1 < ras.target.rows )
{
bits -= e1 * ras.target.pitch;
if ( ras.target.pitch > 0 )
bits += ( ras.target.rows - 1 ) * ras.target.pitch;
bits[0] |= f1;
}
}
| 164,852 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int v9fs_device_realize_common(V9fsState *s, Error **errp)
{
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
int i, len;
struct stat stat;
FsDriverEntry *fse;
V9fsPath path;
int rc = 1;
/* initialize pdu allocator */
QLIST_INIT(&s->free_list);
QLIST_INIT(&s->active_list);
for (i = 0; i < (MAX_REQ - 1); i++) {
QLIST_INSERT_HEAD(&s->free_list, &v->pdus[i], next);
v->pdus[i].s = s;
v->pdus[i].idx = i;
}
v9fs_path_init(&path);
fse = get_fsdev_fsentry(s->fsconf.fsdev_id);
if (!fse) {
/* We don't have a fsdev identified by fsdev_id */
error_setg(errp, "9pfs device couldn't find fsdev with the "
"id = %s",
s->fsconf.fsdev_id ? s->fsconf.fsdev_id : "NULL");
goto out;
}
if (!s->fsconf.tag) {
/* we haven't specified a mount_tag */
error_setg(errp, "fsdev with id %s needs mount_tag arguments",
s->fsconf.fsdev_id);
goto out;
}
s->ctx.export_flags = fse->export_flags;
s->ctx.fs_root = g_strdup(fse->path);
s->ctx.exops.get_st_gen = NULL;
len = strlen(s->fsconf.tag);
if (len > MAX_TAG_LEN - 1) {
error_setg(errp, "mount tag '%s' (%d bytes) is longer than "
"maximum (%d bytes)", s->fsconf.tag, len, MAX_TAG_LEN - 1);
goto out;
}
s->tag = g_strdup(s->fsconf.tag);
s->ctx.uid = -1;
s->ops = fse->ops;
s->fid_list = NULL;
qemu_co_rwlock_init(&s->rename_lock);
if (s->ops->init(&s->ctx) < 0) {
error_setg(errp, "9pfs Failed to initialize fs-driver with id:%s"
" and export path:%s", s->fsconf.fsdev_id, s->ctx.fs_root);
goto out;
}
/*
* Check details of export path, We need to use fs driver
* call back to do that. Since we are in the init path, we don't
* use co-routines here.
*/
if (s->ops->name_to_path(&s->ctx, NULL, "/", &path) < 0) {
error_setg(errp,
"error in converting name to path %s", strerror(errno));
goto out;
}
if (s->ops->lstat(&s->ctx, &path, &stat)) {
error_setg(errp, "share path %s does not exist", fse->path);
goto out;
} else if (!S_ISDIR(stat.st_mode)) {
error_setg(errp, "share path %s is not a directory", fse->path);
goto out;
}
v9fs_path_free(&path);
rc = 0;
out:
if (rc) {
g_free(s->ctx.fs_root);
g_free(s->tag);
v9fs_path_free(&path);
}
return rc;
}
Commit Message:
CWE ID: CWE-400 | int v9fs_device_realize_common(V9fsState *s, Error **errp)
{
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
int i, len;
struct stat stat;
FsDriverEntry *fse;
V9fsPath path;
int rc = 1;
/* initialize pdu allocator */
QLIST_INIT(&s->free_list);
QLIST_INIT(&s->active_list);
for (i = 0; i < (MAX_REQ - 1); i++) {
QLIST_INSERT_HEAD(&s->free_list, &v->pdus[i], next);
v->pdus[i].s = s;
v->pdus[i].idx = i;
}
v9fs_path_init(&path);
fse = get_fsdev_fsentry(s->fsconf.fsdev_id);
if (!fse) {
/* We don't have a fsdev identified by fsdev_id */
error_setg(errp, "9pfs device couldn't find fsdev with the "
"id = %s",
s->fsconf.fsdev_id ? s->fsconf.fsdev_id : "NULL");
goto out;
}
if (!s->fsconf.tag) {
/* we haven't specified a mount_tag */
error_setg(errp, "fsdev with id %s needs mount_tag arguments",
s->fsconf.fsdev_id);
goto out;
}
s->ctx.export_flags = fse->export_flags;
s->ctx.fs_root = g_strdup(fse->path);
s->ctx.exops.get_st_gen = NULL;
len = strlen(s->fsconf.tag);
if (len > MAX_TAG_LEN - 1) {
error_setg(errp, "mount tag '%s' (%d bytes) is longer than "
"maximum (%d bytes)", s->fsconf.tag, len, MAX_TAG_LEN - 1);
goto out;
}
s->tag = g_strdup(s->fsconf.tag);
s->ctx.uid = -1;
s->ops = fse->ops;
s->fid_list = NULL;
qemu_co_rwlock_init(&s->rename_lock);
if (s->ops->init(&s->ctx) < 0) {
error_setg(errp, "9pfs Failed to initialize fs-driver with id:%s"
" and export path:%s", s->fsconf.fsdev_id, s->ctx.fs_root);
goto out;
}
/*
* Check details of export path, We need to use fs driver
* call back to do that. Since we are in the init path, we don't
* use co-routines here.
*/
if (s->ops->name_to_path(&s->ctx, NULL, "/", &path) < 0) {
error_setg(errp,
"error in converting name to path %s", strerror(errno));
goto out;
}
if (s->ops->lstat(&s->ctx, &path, &stat)) {
error_setg(errp, "share path %s does not exist", fse->path);
goto out;
} else if (!S_ISDIR(stat.st_mode)) {
error_setg(errp, "share path %s is not a directory", fse->path);
goto out;
}
v9fs_path_free(&path);
rc = 0;
out:
if (rc) {
g_free(s->tag);
g_free(s->ctx.fs_root);
v9fs_path_free(&path);
}
return rc;
}
| 164,895 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.