instruction
stringclasses 1
value | input
stringlengths 90
139k
| output
stringlengths 16
138k
| __index_level_0__
int64 165k
175k
|
---|---|---|---|
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: BrowserContextImpl::~BrowserContextImpl() {
CHECK(!otr_context_);
}
Commit Message:
CWE ID: CWE-20 | BrowserContextImpl::~BrowserContextImpl() {
| 165,417 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
loff_t size;
unsigned long len;
int ret;
struct file *file = vma->vm_file;
struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
handle_t *handle;
get_block_t *get_block;
int retries = 0;
sb_start_pagefault(inode->i_sb);
file_update_time(vma->vm_file);
/* Delalloc case is easy... */
if (test_opt(inode->i_sb, DELALLOC) &&
!ext4_should_journal_data(inode) &&
!ext4_nonda_switch(inode->i_sb)) {
do {
ret = block_page_mkwrite(vma, vmf,
ext4_da_get_block_prep);
} while (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries));
goto out_ret;
}
lock_page(page);
size = i_size_read(inode);
/* Page got truncated from under us? */
if (page->mapping != mapping || page_offset(page) > size) {
unlock_page(page);
ret = VM_FAULT_NOPAGE;
goto out;
}
if (page->index == size >> PAGE_CACHE_SHIFT)
len = size & ~PAGE_CACHE_MASK;
else
len = PAGE_CACHE_SIZE;
/*
* Return if we have all the buffers mapped. This avoids the need to do
* journal_start/journal_stop which can block and take a long time
*/
if (page_has_buffers(page)) {
if (!ext4_walk_page_buffers(NULL, page_buffers(page),
0, len, NULL,
ext4_bh_unmapped)) {
/* Wait so that we don't change page under IO */
wait_for_stable_page(page);
ret = VM_FAULT_LOCKED;
goto out;
}
}
unlock_page(page);
/* OK, we need to fill the hole... */
if (ext4_should_dioread_nolock(inode))
get_block = ext4_get_block_write;
else
get_block = ext4_get_block;
retry_alloc:
handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
ext4_writepage_trans_blocks(inode));
if (IS_ERR(handle)) {
ret = VM_FAULT_SIGBUS;
goto out;
}
ret = block_page_mkwrite(vma, vmf, get_block);
if (!ret && ext4_should_journal_data(inode)) {
if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
unlock_page(page);
ret = VM_FAULT_SIGBUS;
ext4_journal_stop(handle);
goto out;
}
ext4_set_inode_state(inode, EXT4_STATE_JDATA);
}
ext4_journal_stop(handle);
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry_alloc;
out_ret:
ret = block_page_mkwrite_return(ret);
out:
sb_end_pagefault(inode->i_sb);
return ret;
}
Commit Message: ext4: fix races between page faults and hole punching
Currently, page faults and hole punching are completely unsynchronized.
This can result in page fault faulting in a page into a range that we
are punching after truncate_pagecache_range() has been called and thus
we can end up with a page mapped to disk blocks that will be shortly
freed. Filesystem corruption will shortly follow. Note that the same
race is avoided for truncate by checking page fault offset against
i_size but there isn't similar mechanism available for punching holes.
Fix the problem by creating new rw semaphore i_mmap_sem in inode and
grab it for writing over truncate, hole punching, and other functions
removing blocks from extent tree and for read over page faults. We
cannot easily use i_data_sem for this since that ranks below transaction
start and we need something ranking above it so that it can be held over
the whole truncate / hole punching operation. Also remove various
workarounds we had in the code to reduce race window when page fault
could have created pages with stale mapping information.
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
CWE ID: CWE-362 | int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
loff_t size;
unsigned long len;
int ret;
struct file *file = vma->vm_file;
struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
handle_t *handle;
get_block_t *get_block;
int retries = 0;
sb_start_pagefault(inode->i_sb);
file_update_time(vma->vm_file);
down_read(&EXT4_I(inode)->i_mmap_sem);
/* Delalloc case is easy... */
if (test_opt(inode->i_sb, DELALLOC) &&
!ext4_should_journal_data(inode) &&
!ext4_nonda_switch(inode->i_sb)) {
do {
ret = block_page_mkwrite(vma, vmf,
ext4_da_get_block_prep);
} while (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries));
goto out_ret;
}
lock_page(page);
size = i_size_read(inode);
/* Page got truncated from under us? */
if (page->mapping != mapping || page_offset(page) > size) {
unlock_page(page);
ret = VM_FAULT_NOPAGE;
goto out;
}
if (page->index == size >> PAGE_CACHE_SHIFT)
len = size & ~PAGE_CACHE_MASK;
else
len = PAGE_CACHE_SIZE;
/*
* Return if we have all the buffers mapped. This avoids the need to do
* journal_start/journal_stop which can block and take a long time
*/
if (page_has_buffers(page)) {
if (!ext4_walk_page_buffers(NULL, page_buffers(page),
0, len, NULL,
ext4_bh_unmapped)) {
/* Wait so that we don't change page under IO */
wait_for_stable_page(page);
ret = VM_FAULT_LOCKED;
goto out;
}
}
unlock_page(page);
/* OK, we need to fill the hole... */
if (ext4_should_dioread_nolock(inode))
get_block = ext4_get_block_write;
else
get_block = ext4_get_block;
retry_alloc:
handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
ext4_writepage_trans_blocks(inode));
if (IS_ERR(handle)) {
ret = VM_FAULT_SIGBUS;
goto out;
}
ret = block_page_mkwrite(vma, vmf, get_block);
if (!ret && ext4_should_journal_data(inode)) {
if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
unlock_page(page);
ret = VM_FAULT_SIGBUS;
ext4_journal_stop(handle);
goto out;
}
ext4_set_inode_state(inode, EXT4_STATE_JDATA);
}
ext4_journal_stop(handle);
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry_alloc;
out_ret:
ret = block_page_mkwrite_return(ret);
out:
up_read(&EXT4_I(inode)->i_mmap_sem);
sb_end_pagefault(inode->i_sb);
return ret;
}
| 167,489 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void CheckFakeData(uint8* audio_data, int frames_written,
double playback_rate) {
size_t length =
(frames_written * algorithm_.bytes_per_frame())
/ algorithm_.bytes_per_channel();
switch (algorithm_.bytes_per_channel()) {
case 4:
DoCheckFakeData<int32>(audio_data, length);
break;
case 2:
DoCheckFakeData<int16>(audio_data, length);
break;
case 1:
DoCheckFakeData<uint8>(audio_data, length);
break;
default:
NOTREACHED() << "Unsupported audio bit depth in crossfade.";
}
}
Commit Message: Protect AudioRendererAlgorithm from invalid step sizes.
BUG=165430
TEST=unittests and asan pass.
Review URL: https://codereview.chromium.org/11573023
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@173249 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-119 | void CheckFakeData(uint8* audio_data, int frames_written,
| 171,530 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: status_t MPEG4Extractor::readMetaData() {
if (mInitCheck != NO_INIT) {
return mInitCheck;
}
off64_t offset = 0;
status_t err;
bool sawMoovOrSidx = false;
while (!(sawMoovOrSidx && (mMdatFound || mMoofFound))) {
off64_t orig_offset = offset;
err = parseChunk(&offset, 0);
if (err != OK && err != UNKNOWN_ERROR) {
break;
} else if (offset <= orig_offset) {
ALOGE("did not advance: %lld->%lld", (long long)orig_offset, (long long)offset);
err = ERROR_MALFORMED;
break;
} else if (err == UNKNOWN_ERROR) {
sawMoovOrSidx = true;
}
}
if (mInitCheck == OK) {
if (mHasVideo) {
mFileMetaData->setCString(
kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_MPEG4);
} else {
mFileMetaData->setCString(kKeyMIMEType, "audio/mp4");
}
} else {
mInitCheck = err;
}
CHECK_NE(err, (status_t)NO_INIT);
uint64_t psshsize = 0;
for (size_t i = 0; i < mPssh.size(); i++) {
psshsize += 20 + mPssh[i].datalen;
}
if (psshsize > 0 && psshsize <= UINT32_MAX) {
char *buf = (char*)malloc(psshsize);
char *ptr = buf;
for (size_t i = 0; i < mPssh.size(); i++) {
memcpy(ptr, mPssh[i].uuid, 20); // uuid + length
memcpy(ptr + 20, mPssh[i].data, mPssh[i].datalen);
ptr += (20 + mPssh[i].datalen);
}
mFileMetaData->setData(kKeyPssh, 'pssh', buf, psshsize);
free(buf);
}
return mInitCheck;
}
Commit Message: Check malloc result to avoid NPD
Bug: 28471206
Change-Id: Id5d055d76893d6f53a2e524ff5f282d1ddca3345
CWE ID: CWE-20 | status_t MPEG4Extractor::readMetaData() {
if (mInitCheck != NO_INIT) {
return mInitCheck;
}
off64_t offset = 0;
status_t err;
bool sawMoovOrSidx = false;
while (!(sawMoovOrSidx && (mMdatFound || mMoofFound))) {
off64_t orig_offset = offset;
err = parseChunk(&offset, 0);
if (err != OK && err != UNKNOWN_ERROR) {
break;
} else if (offset <= orig_offset) {
ALOGE("did not advance: %lld->%lld", (long long)orig_offset, (long long)offset);
err = ERROR_MALFORMED;
break;
} else if (err == UNKNOWN_ERROR) {
sawMoovOrSidx = true;
}
}
if (mInitCheck == OK) {
if (mHasVideo) {
mFileMetaData->setCString(
kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_MPEG4);
} else {
mFileMetaData->setCString(kKeyMIMEType, "audio/mp4");
}
} else {
mInitCheck = err;
}
CHECK_NE(err, (status_t)NO_INIT);
uint64_t psshsize = 0;
for (size_t i = 0; i < mPssh.size(); i++) {
psshsize += 20 + mPssh[i].datalen;
}
if (psshsize > 0 && psshsize <= UINT32_MAX) {
char *buf = (char*)malloc(psshsize);
if (!buf) {
ALOGE("b/28471206");
return NO_MEMORY;
}
char *ptr = buf;
for (size_t i = 0; i < mPssh.size(); i++) {
memcpy(ptr, mPssh[i].uuid, 20); // uuid + length
memcpy(ptr + 20, mPssh[i].data, mPssh[i].datalen);
ptr += (20 + mPssh[i].datalen);
}
mFileMetaData->setData(kKeyPssh, 'pssh', buf, psshsize);
free(buf);
}
return mInitCheck;
}
| 173,548 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: String AXNodeObject::textFromDescendants(AXObjectSet& visited,
bool recursive) const {
if (!canHaveChildren() && recursive)
return String();
StringBuilder accumulatedText;
AXObject* previous = nullptr;
AXObjectVector children;
HeapVector<Member<AXObject>> ownedChildren;
computeAriaOwnsChildren(ownedChildren);
for (AXObject* obj = rawFirstChild(); obj; obj = obj->rawNextSibling()) {
if (!axObjectCache().isAriaOwned(obj))
children.push_back(obj);
}
for (const auto& ownedChild : ownedChildren)
children.push_back(ownedChild);
for (AXObject* child : children) {
if (equalIgnoringCase(child->getAttribute(aria_hiddenAttr), "true"))
continue;
if (previous && accumulatedText.length() &&
!isHTMLSpace(accumulatedText[accumulatedText.length() - 1])) {
if (!isInSameNonInlineBlockFlow(child->getLayoutObject(),
previous->getLayoutObject()))
accumulatedText.append(' ');
}
String result;
if (child->isPresentational())
result = child->textFromDescendants(visited, true);
else
result = recursiveTextAlternative(*child, false, visited);
accumulatedText.append(result);
previous = child;
}
return accumulatedText.toString();
}
Commit Message: Switch to equalIgnoringASCIICase throughout modules/accessibility
BUG=627682
Review-Url: https://codereview.chromium.org/2793913007
Cr-Commit-Position: refs/heads/master@{#461858}
CWE ID: CWE-254 | String AXNodeObject::textFromDescendants(AXObjectSet& visited,
bool recursive) const {
if (!canHaveChildren() && recursive)
return String();
StringBuilder accumulatedText;
AXObject* previous = nullptr;
AXObjectVector children;
HeapVector<Member<AXObject>> ownedChildren;
computeAriaOwnsChildren(ownedChildren);
for (AXObject* obj = rawFirstChild(); obj; obj = obj->rawNextSibling()) {
if (!axObjectCache().isAriaOwned(obj))
children.push_back(obj);
}
for (const auto& ownedChild : ownedChildren)
children.push_back(ownedChild);
for (AXObject* child : children) {
if (equalIgnoringASCIICase(child->getAttribute(aria_hiddenAttr), "true"))
continue;
if (previous && accumulatedText.length() &&
!isHTMLSpace(accumulatedText[accumulatedText.length() - 1])) {
if (!isInSameNonInlineBlockFlow(child->getLayoutObject(),
previous->getLayoutObject()))
accumulatedText.append(' ');
}
String result;
if (child->isPresentational())
result = child->textFromDescendants(visited, true);
else
result = recursiveTextAlternative(*child, false, visited);
accumulatedText.append(result);
previous = child;
}
return accumulatedText.toString();
}
| 171,922 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: OMX_ERRORTYPE SoftVPXEncoder::internalSetParameter(OMX_INDEXTYPE index,
const OMX_PTR param) {
const int32_t indexFull = index;
switch (indexFull) {
case OMX_IndexParamVideoBitrate:
return internalSetBitrateParams(
(const OMX_VIDEO_PARAM_BITRATETYPE *)param);
case OMX_IndexParamVideoVp8:
return internalSetVp8Params(
(const OMX_VIDEO_PARAM_VP8TYPE *)param);
case OMX_IndexParamVideoAndroidVp8Encoder:
return internalSetAndroidVp8Params(
(const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *)param);
default:
return SoftVideoEncoderOMXComponent::internalSetParameter(index, param);
}
}
Commit Message: DO NOT MERGE Verify OMX buffer sizes prior to access
Bug: 27207275
Change-Id: I4412825d1ee233d993af0a67708bea54304ff62d
CWE ID: CWE-119 | OMX_ERRORTYPE SoftVPXEncoder::internalSetParameter(OMX_INDEXTYPE index,
const OMX_PTR param) {
const int32_t indexFull = index;
switch (indexFull) {
case OMX_IndexParamVideoBitrate: {
const OMX_VIDEO_PARAM_BITRATETYPE *bitRate =
(const OMX_VIDEO_PARAM_BITRATETYPE*) param;
if (!isValidOMXParam(bitRate)) {
return OMX_ErrorBadParameter;
}
return internalSetBitrateParams(bitRate);
}
case OMX_IndexParamVideoVp8: {
const OMX_VIDEO_PARAM_VP8TYPE *vp8Params =
(const OMX_VIDEO_PARAM_VP8TYPE*) param;
if (!isValidOMXParam(vp8Params)) {
return OMX_ErrorBadParameter;
}
return internalSetVp8Params(vp8Params);
}
case OMX_IndexParamVideoAndroidVp8Encoder: {
const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vp8AndroidParams =
(const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE*) param;
if (!isValidOMXParam(vp8AndroidParams)) {
return OMX_ErrorBadParameter;
}
return internalSetAndroidVp8Params(vp8AndroidParams);
}
default:
return SoftVideoEncoderOMXComponent::internalSetParameter(index, param);
}
}
| 174,214 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
{
struct hwsim_new_radio_params param = { 0 };
const char *hwname = NULL;
int ret;
param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
param.channels = channels;
param.destroy_on_close =
info->attrs[HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE];
if (info->attrs[HWSIM_ATTR_CHANNELS])
param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
if (info->attrs[HWSIM_ATTR_NO_VIF])
param.no_vif = true;
if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
hwname = kasprintf(GFP_KERNEL, "%.*s",
nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
(char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
if (!hwname)
return -ENOMEM;
param.hwname = hwname;
}
if (info->attrs[HWSIM_ATTR_USE_CHANCTX])
param.use_chanctx = true;
else
param.use_chanctx = (param.channels > 1);
if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2])
param.reg_alpha2 =
nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]);
if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) {
u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]);
if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom))
return -EINVAL;
param.regd = hwsim_world_regdom_custom[idx];
}
ret = mac80211_hwsim_new_radio(info, ¶m);
kfree(hwname);
return ret;
}
Commit Message: mac80211_hwsim: fix possible memory leak in hwsim_new_radio_nl()
'hwname' is malloced in hwsim_new_radio_nl() and should be freed
before leaving from the error handling cases, otherwise it will cause
memory leak.
Fixes: ff4dd73dd2b4 ("mac80211_hwsim: check HWSIM_ATTR_RADIO_NAME length")
Signed-off-by: Wei Yongjun <[email protected]>
Reviewed-by: Ben Hutchings <[email protected]>
Signed-off-by: Johannes Berg <[email protected]>
CWE ID: CWE-772 | static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
{
struct hwsim_new_radio_params param = { 0 };
const char *hwname = NULL;
int ret;
param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
param.channels = channels;
param.destroy_on_close =
info->attrs[HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE];
if (info->attrs[HWSIM_ATTR_CHANNELS])
param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
if (info->attrs[HWSIM_ATTR_NO_VIF])
param.no_vif = true;
if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
hwname = kasprintf(GFP_KERNEL, "%.*s",
nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
(char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
if (!hwname)
return -ENOMEM;
param.hwname = hwname;
}
if (info->attrs[HWSIM_ATTR_USE_CHANCTX])
param.use_chanctx = true;
else
param.use_chanctx = (param.channels > 1);
if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2])
param.reg_alpha2 =
nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]);
if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) {
u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]);
if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) {
kfree(hwname);
return -EINVAL;
}
param.regd = hwsim_world_regdom_custom[idx];
}
ret = mac80211_hwsim_new_radio(info, ¶m);
kfree(hwname);
return ret;
}
| 169,302 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int main(int argc, char *argv[])
{
FILE *iplist = NULL;
plist_t root_node = NULL;
char *plist_out = NULL;
uint32_t size = 0;
int read_size = 0;
char *plist_entire = NULL;
struct stat filestats;
options_t *options = parse_arguments(argc, argv);
if (!options)
{
print_usage(argc, argv);
return 0;
}
iplist = fopen(options->in_file, "rb");
if (!iplist) {
free(options);
return 1;
}
stat(options->in_file, &filestats);
plist_entire = (char *) malloc(sizeof(char) * (filestats.st_size + 1));
read_size = fread(plist_entire, sizeof(char), filestats.st_size, iplist);
fclose(iplist);
if (memcmp(plist_entire, "bplist00", 8) == 0)
{
plist_from_bin(plist_entire, read_size, &root_node);
plist_to_xml(root_node, &plist_out, &size);
}
else
{
plist_from_xml(plist_entire, read_size, &root_node);
plist_to_bin(root_node, &plist_out, &size);
}
plist_free(root_node);
free(plist_entire);
if (plist_out)
{
if (options->out_file != NULL)
{
FILE *oplist = fopen(options->out_file, "wb");
if (!oplist) {
free(options);
return 1;
}
fwrite(plist_out, size, sizeof(char), oplist);
fclose(oplist);
}
else
fwrite(plist_out, size, sizeof(char), stdout);
free(plist_out);
}
else
printf("ERROR: Failed to convert input file.\n");
free(options);
return 0;
}
Commit Message: plistutil: Prevent OOB heap buffer read by checking input size
As pointed out in #87 plistutil would do a memcmp with a heap buffer
without checking the size. If the size is less than 8 it would read
beyond the bounds of this heap buffer. This commit prevents that.
CWE ID: CWE-125 | int main(int argc, char *argv[])
{
FILE *iplist = NULL;
plist_t root_node = NULL;
char *plist_out = NULL;
uint32_t size = 0;
int read_size = 0;
char *plist_entire = NULL;
struct stat filestats;
options_t *options = parse_arguments(argc, argv);
if (!options)
{
print_usage(argc, argv);
return 0;
}
iplist = fopen(options->in_file, "rb");
if (!iplist) {
free(options);
return 1;
}
stat(options->in_file, &filestats);
if (filestats.st_size < 8) {
printf("ERROR: Input file is too small to contain valid plist data.\n");
return -1;
}
plist_entire = (char *) malloc(sizeof(char) * (filestats.st_size + 1));
read_size = fread(plist_entire, sizeof(char), filestats.st_size, iplist);
fclose(iplist);
if (memcmp(plist_entire, "bplist00", 8) == 0)
{
plist_from_bin(plist_entire, read_size, &root_node);
plist_to_xml(root_node, &plist_out, &size);
}
else
{
plist_from_xml(plist_entire, read_size, &root_node);
plist_to_bin(root_node, &plist_out, &size);
}
plist_free(root_node);
free(plist_entire);
if (plist_out)
{
if (options->out_file != NULL)
{
FILE *oplist = fopen(options->out_file, "wb");
if (!oplist) {
free(options);
return 1;
}
fwrite(plist_out, size, sizeof(char), oplist);
fclose(oplist);
}
else
fwrite(plist_out, size, sizeof(char), stdout);
free(plist_out);
}
else
printf("ERROR: Failed to convert input file.\n");
free(options);
return 0;
}
| 168,398 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb)
{
struct batadv_frag_packet *packet;
struct batadv_frag_list_entry *entry;
struct sk_buff *skb_out = NULL;
int size, hdr_size = sizeof(struct batadv_frag_packet);
/* Make sure incoming skb has non-bogus data. */
packet = (struct batadv_frag_packet *)skb->data;
size = ntohs(packet->total_size);
if (size > batadv_frag_size_limit())
goto free;
/* Remove first entry, as this is the destination for the rest of the
* fragments.
*/
entry = hlist_entry(chain->first, struct batadv_frag_list_entry, list);
hlist_del(&entry->list);
skb_out = entry->skb;
kfree(entry);
/* Make room for the rest of the fragments. */
if (pskb_expand_head(skb_out, 0, size - skb->len, GFP_ATOMIC) < 0) {
kfree_skb(skb_out);
skb_out = NULL;
goto free;
}
/* Move the existing MAC header to just before the payload. (Override
* the fragment header.)
*/
skb_pull_rcsum(skb_out, hdr_size);
memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
skb_set_mac_header(skb_out, -ETH_HLEN);
skb_reset_network_header(skb_out);
skb_reset_transport_header(skb_out);
/* Copy the payload of the each fragment into the last skb */
hlist_for_each_entry(entry, chain, list) {
size = entry->skb->len - hdr_size;
memcpy(skb_put(skb_out, size), entry->skb->data + hdr_size,
size);
}
free:
/* Locking is not needed, because 'chain' is not part of any orig. */
batadv_frag_clear_chain(chain);
return skb_out;
}
Commit Message: batman-adv: Calculate extra tail size based on queued fragments
The fragmentation code was replaced in 610bfc6bc99bc83680d190ebc69359a05fc7f605
("batman-adv: Receive fragmented packets and merge"). The new code provided a
mostly unused parameter skb for the merging function. It is used inside the
function to calculate the additionally needed skb tailroom. But instead of
increasing its own tailroom, it is only increasing the tailroom of the first
queued skb. This is not correct in some situations because the first queued
entry can be a different one than the parameter.
An observed problem was:
1. packet with size 104, total_size 1464, fragno 1 was received
- packet is queued
2. packet with size 1400, total_size 1464, fragno 0 was received
- packet is queued at the end of the list
3. enough data was received and can be given to the merge function
(1464 == (1400 - 20) + (104 - 20))
- merge functions gets 1400 byte large packet as skb argument
4. merge function gets first entry in queue (104 byte)
- stored as skb_out
5. merge function calculates the required extra tail as total_size - skb->len
- pskb_expand_head tail of skb_out with 64 bytes
6. merge function tries to squeeze the extra 1380 bytes from the second queued
skb (1400 byte aka skb parameter) in the 64 extra tail bytes of skb_out
Instead calculate the extra required tail bytes for skb_out also using skb_out
instead of using the parameter skb. The skb parameter is only used to get the
total_size from the last received packet. This is also the total_size used to
decide that all fragments were received.
Reported-by: Philipp Psurek <[email protected]>
Signed-off-by: Sven Eckelmann <[email protected]>
Acked-by: Martin Hundebøll <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-399 | batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb)
{
struct batadv_frag_packet *packet;
struct batadv_frag_list_entry *entry;
struct sk_buff *skb_out = NULL;
int size, hdr_size = sizeof(struct batadv_frag_packet);
/* Make sure incoming skb has non-bogus data. */
packet = (struct batadv_frag_packet *)skb->data;
size = ntohs(packet->total_size);
if (size > batadv_frag_size_limit())
goto free;
/* Remove first entry, as this is the destination for the rest of the
* fragments.
*/
entry = hlist_entry(chain->first, struct batadv_frag_list_entry, list);
hlist_del(&entry->list);
skb_out = entry->skb;
kfree(entry);
/* Make room for the rest of the fragments. */
if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
kfree_skb(skb_out);
skb_out = NULL;
goto free;
}
/* Move the existing MAC header to just before the payload. (Override
* the fragment header.)
*/
skb_pull_rcsum(skb_out, hdr_size);
memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
skb_set_mac_header(skb_out, -ETH_HLEN);
skb_reset_network_header(skb_out);
skb_reset_transport_header(skb_out);
/* Copy the payload of the each fragment into the last skb */
hlist_for_each_entry(entry, chain, list) {
size = entry->skb->len - hdr_size;
memcpy(skb_put(skb_out, size), entry->skb->data + hdr_size,
size);
}
free:
/* Locking is not needed, because 'chain' is not part of any orig. */
batadv_frag_clear_chain(chain);
return skb_out;
}
| 166,786 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
siginfo_t __user *, uinfo)
{
siginfo_t info;
if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
return -EFAULT;
/* Not even root can pretend to send signals from the kernel.
Nor can they impersonate a kill(), which adds source info. */
if (info.si_code >= 0)
return -EPERM;
info.si_signo = sig;
/* POSIX.1b doesn't mention process groups. */
return kill_proc_info(sig, &info, pid);
}
Commit Message: Prevent rt_sigqueueinfo and rt_tgsigqueueinfo from spoofing the signal code
Userland should be able to trust the pid and uid of the sender of a
signal if the si_code is SI_TKILL.
Unfortunately, the kernel has historically allowed sigqueueinfo() to
send any si_code at all (as long as it was negative - to distinguish it
from kernel-generated signals like SIGILL etc), so it could spoof a
SI_TKILL with incorrect siginfo values.
Happily, it looks like glibc has always set si_code to the appropriate
SI_QUEUE, so there are probably no actual user code that ever uses
anything but the appropriate SI_QUEUE flag.
So just tighten the check for si_code (we used to allow any negative
value), and add a (one-time) warning in case there are binaries out
there that might depend on using other si_code values.
Signed-off-by: Julien Tinnes <[email protected]>
Acked-by: Oleg Nesterov <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
CWE ID: | SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
siginfo_t __user *, uinfo)
{
siginfo_t info;
if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
return -EFAULT;
/* Not even root can pretend to send signals from the kernel.
* Nor can they impersonate a kill()/tgkill(), which adds source info.
*/
if (info.si_code != SI_QUEUE) {
/* We used to allow any < 0 si_code */
WARN_ON_ONCE(info.si_code < 0);
return -EPERM;
}
info.si_signo = sig;
/* POSIX.1b doesn't mention process groups. */
return kill_proc_info(sig, &info, pid);
}
| 166,231 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
{
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
int nsems;
int error;
error = get_undo_list(&ulp);
if (error)
return ERR_PTR(error);
rcu_read_lock();
spin_lock(&ulp->lock);
un = lookup_undo(ulp, semid);
spin_unlock(&ulp->lock);
if (likely(un!=NULL))
goto out;
/* no undo structure around - allocate one. */
/* step 1: figure out the size of the semaphore array */
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
rcu_read_unlock();
return ERR_CAST(sma);
}
nsems = sma->sem_nsems;
ipc_rcu_getref(sma);
rcu_read_unlock();
/* step 2: allocate new undo structure */
new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
if (!new) {
sem_putref(sma);
return ERR_PTR(-ENOMEM);
}
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
sem_unlock(sma);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
}
spin_lock(&ulp->lock);
/*
* step 4: check for races: did someone else allocate the undo struct?
*/
un = lookup_undo(ulp, semid);
if (un) {
kfree(new);
goto success;
}
/* step 5: initialize & link new undo structure */
new->semadj = (short *) &new[1];
new->ulp = ulp;
new->semid = semid;
assert_spin_locked(&ulp->lock);
list_add_rcu(&new->list_proc, &ulp->list_proc);
assert_spin_locked(&sma->sem_perm.lock);
list_add(&new->list_id, &sma->list_id);
un = new;
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
sem_unlock(sma);
out:
return un;
}
Commit Message: ipc,sem: fine grained locking for semtimedop
Introduce finer grained locking for semtimedop, to handle the common case
of a program wanting to manipulate one semaphore from an array with
multiple semaphores.
If the call is a semop manipulating just one semaphore in an array with
multiple semaphores, only take the lock for that semaphore itself.
If the call needs to manipulate multiple semaphores, or another caller is
in a transaction that manipulates multiple semaphores, the sem_array lock
is taken, as well as all the locks for the individual semaphores.
On a 24 CPU system, performance numbers with the semop-multi
test with N threads and N semaphores, look like this:
vanilla Davidlohr's Davidlohr's + Davidlohr's +
threads patches rwlock patches v3 patches
10 610652 726325 1783589 2142206
20 341570 365699 1520453 1977878
30 288102 307037 1498167 2037995
40 290714 305955 1612665 2256484
50 288620 312890 1733453 2650292
60 289987 306043 1649360 2388008
70 291298 306347 1723167 2717486
80 290948 305662 1729545 2763582
90 290996 306680 1736021 2757524
100 292243 306700 1773700 3059159
[[email protected]: do not call sem_lock when bogus sma]
[[email protected]: make refcounter atomic]
Signed-off-by: Rik van Riel <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Acked-by: Davidlohr Bueso <[email protected]>
Cc: Chegu Vinod <[email protected]>
Cc: Jason Low <[email protected]>
Reviewed-by: Michel Lespinasse <[email protected]>
Cc: Peter Hurley <[email protected]>
Cc: Stanislav Kinsbursky <[email protected]>
Tested-by: Emmanuel Benisty <[email protected]>
Tested-by: Sedat Dilek <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
CWE ID: CWE-189 | static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
{
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
int nsems, error;
error = get_undo_list(&ulp);
if (error)
return ERR_PTR(error);
rcu_read_lock();
spin_lock(&ulp->lock);
un = lookup_undo(ulp, semid);
spin_unlock(&ulp->lock);
if (likely(un!=NULL))
goto out;
/* no undo structure around - allocate one. */
/* step 1: figure out the size of the semaphore array */
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
rcu_read_unlock();
return ERR_CAST(sma);
}
nsems = sma->sem_nsems;
if (!ipc_rcu_getref(sma)) {
rcu_read_unlock();
un = ERR_PTR(-EIDRM);
goto out;
}
rcu_read_unlock();
/* step 2: allocate new undo structure */
new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
if (!new) {
sem_putref(sma);
return ERR_PTR(-ENOMEM);
}
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
}
spin_lock(&ulp->lock);
/*
* step 4: check for races: did someone else allocate the undo struct?
*/
un = lookup_undo(ulp, semid);
if (un) {
kfree(new);
goto success;
}
/* step 5: initialize & link new undo structure */
new->semadj = (short *) &new[1];
new->ulp = ulp;
new->semid = semid;
assert_spin_locked(&ulp->lock);
list_add_rcu(&new->list_proc, &ulp->list_proc);
assert_spin_locked(&sma->sem_perm.lock);
list_add(&new->list_id, &sma->list_id);
un = new;
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
sem_unlock(sma, -1);
out:
return un;
}
| 165,970 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int svc_rdma_xdr_encode_error(struct svcxprt_rdma *xprt,
struct rpcrdma_msg *rmsgp,
enum rpcrdma_errcode err, __be32 *va)
{
__be32 *startp = va;
*va++ = rmsgp->rm_xid;
*va++ = rmsgp->rm_vers;
*va++ = xprt->sc_fc_credits;
*va++ = rdma_error;
*va++ = cpu_to_be32(err);
if (err == ERR_VERS) {
*va++ = rpcrdma_version;
*va++ = rpcrdma_version;
}
return (int)((unsigned long)va - (unsigned long)startp);
}
Commit Message: Merge tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux
Pull nfsd updates from Bruce Fields:
"Another RDMA update from Chuck Lever, and a bunch of miscellaneous
bugfixes"
* tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux: (26 commits)
nfsd: Fix up the "supattr_exclcreat" attributes
nfsd: encoders mustn't use unitialized values in error cases
nfsd: fix undefined behavior in nfsd4_layout_verify
lockd: fix lockd shutdown race
NFSv4: Fix callback server shutdown
SUNRPC: Refactor svc_set_num_threads()
NFSv4.x/callback: Create the callback service through svc_create_pooled
lockd: remove redundant check on block
svcrdma: Clean out old XDR encoders
svcrdma: Remove the req_map cache
svcrdma: Remove unused RDMA Write completion handler
svcrdma: Reduce size of sge array in struct svc_rdma_op_ctxt
svcrdma: Clean up RPC-over-RDMA backchannel reply processing
svcrdma: Report Write/Reply chunk overruns
svcrdma: Clean up RDMA_ERROR path
svcrdma: Use rdma_rw API in RPC reply path
svcrdma: Introduce local rdma_rw API helpers
svcrdma: Clean up svc_rdma_get_inv_rkey()
svcrdma: Add helper to save pages under I/O
svcrdma: Eliminate RPCRDMA_SQ_DEPTH_MULT
...
CWE ID: CWE-404 | int svc_rdma_xdr_encode_error(struct svcxprt_rdma *xprt,
| 168,160 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data,
unsigned int data_sz,
vpx_codec_stream_info_t *si,
int *is_intra_only,
vpx_decrypt_cb decrypt_cb,
void *decrypt_state) {
int intra_only_flag = 0;
uint8_t clear_buffer[9];
if (data + data_sz <= data)
return VPX_CODEC_INVALID_PARAM;
si->is_kf = 0;
si->w = si->h = 0;
if (decrypt_cb) {
data_sz = VPXMIN(sizeof(clear_buffer), data_sz);
decrypt_cb(decrypt_state, data, clear_buffer, data_sz);
data = clear_buffer;
}
{
int show_frame;
int error_resilient;
struct vpx_read_bit_buffer rb = { data, data + data_sz, 0, NULL, NULL };
const int frame_marker = vpx_rb_read_literal(&rb, 2);
const BITSTREAM_PROFILE profile = vp9_read_profile(&rb);
if (frame_marker != VP9_FRAME_MARKER)
return VPX_CODEC_UNSUP_BITSTREAM;
if (profile >= MAX_PROFILES)
return VPX_CODEC_UNSUP_BITSTREAM;
if ((profile >= 2 && data_sz <= 1) || data_sz < 1)
return VPX_CODEC_UNSUP_BITSTREAM;
if (vpx_rb_read_bit(&rb)) { // show an existing frame
vpx_rb_read_literal(&rb, 3); // Frame buffer to show.
return VPX_CODEC_OK;
}
if (data_sz <= 8)
return VPX_CODEC_UNSUP_BITSTREAM;
si->is_kf = !vpx_rb_read_bit(&rb);
show_frame = vpx_rb_read_bit(&rb);
error_resilient = vpx_rb_read_bit(&rb);
if (si->is_kf) {
if (!vp9_read_sync_code(&rb))
return VPX_CODEC_UNSUP_BITSTREAM;
if (!parse_bitdepth_colorspace_sampling(profile, &rb))
return VPX_CODEC_UNSUP_BITSTREAM;
vp9_read_frame_size(&rb, (int *)&si->w, (int *)&si->h);
} else {
intra_only_flag = show_frame ? 0 : vpx_rb_read_bit(&rb);
rb.bit_offset += error_resilient ? 0 : 2; // reset_frame_context
if (intra_only_flag) {
if (!vp9_read_sync_code(&rb))
return VPX_CODEC_UNSUP_BITSTREAM;
if (profile > PROFILE_0) {
if (!parse_bitdepth_colorspace_sampling(profile, &rb))
return VPX_CODEC_UNSUP_BITSTREAM;
}
rb.bit_offset += REF_FRAMES; // refresh_frame_flags
vp9_read_frame_size(&rb, (int *)&si->w, (int *)&si->h);
}
}
}
if (is_intra_only != NULL)
*is_intra_only = intra_only_flag;
return VPX_CODEC_OK;
}
Commit Message: DO NOT MERGE | libvpx: cherry-pick aa1c813 from upstream
Description from upstream:
vp9: Fix potential SEGV in decoder_peek_si_internal
decoder_peek_si_internal could potentially read more bytes than
what actually exists in the input buffer. We check for the buffer
size to be at least 8, but we try to read up to 10 bytes in the
worst case. A well crafted file could thus cause a segfault.
Likely change that introduced this bug was:
https://chromium-review.googlesource.com/#/c/70439 (git hash:
7c43fb6)
Bug: 30013856
Change-Id: If556414cb5b82472d5673e045bc185cc57bb9af3
(cherry picked from commit bd57d587c2eb743c61b049add18f9fd72bf78c33)
CWE ID: CWE-119 | static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data,
unsigned int data_sz,
vpx_codec_stream_info_t *si,
int *is_intra_only,
vpx_decrypt_cb decrypt_cb,
void *decrypt_state) {
int intra_only_flag = 0;
uint8_t clear_buffer[10];
if (data + data_sz <= data)
return VPX_CODEC_INVALID_PARAM;
si->is_kf = 0;
si->w = si->h = 0;
if (decrypt_cb) {
data_sz = VPXMIN(sizeof(clear_buffer), data_sz);
decrypt_cb(decrypt_state, data, clear_buffer, data_sz);
data = clear_buffer;
}
// A maximum of 6 bits are needed to read the frame marker, profile and
// show_existing_frame.
if (data_sz < 1)
return VPX_CODEC_UNSUP_BITSTREAM;
{
int show_frame;
int error_resilient;
struct vpx_read_bit_buffer rb = { data, data + data_sz, 0, NULL, NULL };
const int frame_marker = vpx_rb_read_literal(&rb, 2);
const BITSTREAM_PROFILE profile = vp9_read_profile(&rb);
if (frame_marker != VP9_FRAME_MARKER)
return VPX_CODEC_UNSUP_BITSTREAM;
if (profile >= MAX_PROFILES)
return VPX_CODEC_UNSUP_BITSTREAM;
if (vpx_rb_read_bit(&rb)) { // show an existing frame
// If profile is > 2 and show_existing_frame is true, then at least 1 more
// byte (6+3=9 bits) is needed.
if (profile > 2 && data_sz < 2)
return VPX_CODEC_UNSUP_BITSTREAM;
vpx_rb_read_literal(&rb, 3); // Frame buffer to show.
return VPX_CODEC_OK;
}
// For the rest of the function, a maximum of 9 more bytes are needed
// (computed by taking the maximum possible bits needed in each case). Note
// that this has to be updated if we read any more bits in this function.
if (data_sz < 10)
return VPX_CODEC_UNSUP_BITSTREAM;
si->is_kf = !vpx_rb_read_bit(&rb);
show_frame = vpx_rb_read_bit(&rb);
error_resilient = vpx_rb_read_bit(&rb);
if (si->is_kf) {
if (!vp9_read_sync_code(&rb))
return VPX_CODEC_UNSUP_BITSTREAM;
if (!parse_bitdepth_colorspace_sampling(profile, &rb))
return VPX_CODEC_UNSUP_BITSTREAM;
vp9_read_frame_size(&rb, (int *)&si->w, (int *)&si->h);
} else {
intra_only_flag = show_frame ? 0 : vpx_rb_read_bit(&rb);
rb.bit_offset += error_resilient ? 0 : 2; // reset_frame_context
if (intra_only_flag) {
if (!vp9_read_sync_code(&rb))
return VPX_CODEC_UNSUP_BITSTREAM;
if (profile > PROFILE_0) {
if (!parse_bitdepth_colorspace_sampling(profile, &rb))
return VPX_CODEC_UNSUP_BITSTREAM;
}
rb.bit_offset += REF_FRAMES; // refresh_frame_flags
vp9_read_frame_size(&rb, (int *)&si->w, (int *)&si->h);
}
}
}
if (is_intra_only != NULL)
*is_intra_only = intra_only_flag;
return VPX_CODEC_OK;
}
| 173,409 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: DWORD UnprivilegedProcessDelegate::GetExitCode() {
DCHECK(main_task_runner_->BelongsToCurrentThread());
DWORD exit_code = CONTROL_C_EXIT;
if (worker_process_.IsValid()) {
if (!::GetExitCodeProcess(worker_process_, &exit_code)) {
LOG_GETLASTERROR(INFO)
<< "Failed to query the exit code of the worker process";
exit_code = CONTROL_C_EXIT;
}
}
return exit_code;
}
Commit Message: Validate and report peer's PID to WorkerProcessIpcDelegate so it will be able to duplicate handles to and from the worker process.
As a side effect WorkerProcessLauncher::Delegate is now responsible for retrieving the client's PID and deciding whether a launch failed due to a permanent error condition.
BUG=134694
Review URL: https://chromiumcodereview.appspot.com/11143025
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@162778 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | DWORD UnprivilegedProcessDelegate::GetExitCode() {
DWORD UnprivilegedProcessDelegate::GetProcessId() const {
if (worker_process_.IsValid()) {
return ::GetProcessId(worker_process_);
} else {
return 0;
}
}
bool UnprivilegedProcessDelegate::IsPermanentError(int failure_count) const {
// Get exit code of the worker process if it is available.
DWORD exit_code = CONTROL_C_EXIT;
if (worker_process_.IsValid()) {
if (!::GetExitCodeProcess(worker_process_, &exit_code)) {
LOG_GETLASTERROR(INFO)
<< "Failed to query the exit code of the worker process";
exit_code = CONTROL_C_EXIT;
}
}
// Stop trying to restart the worker process if it exited due to
// misconfiguration.
return (kMinPermanentErrorExitCode <= exit_code &&
exit_code <= kMaxPermanentErrorExitCode);
}
| 171,545 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void ResourceFetcher::DidLoadResourceFromMemoryCache(
unsigned long identifier,
Resource* resource,
const ResourceRequest& original_resource_request) {
ResourceRequest resource_request(resource->Url());
resource_request.SetFrameType(original_resource_request.GetFrameType());
resource_request.SetRequestContext(
original_resource_request.GetRequestContext());
Context().DispatchDidLoadResourceFromMemoryCache(identifier, resource_request,
resource->GetResponse());
Context().DispatchWillSendRequest(identifier, resource_request,
ResourceResponse() /* redirects */,
resource->Options().initiator_info);
Context().DispatchDidReceiveResponse(
identifier, resource->GetResponse(), resource_request.GetFrameType(),
resource_request.GetRequestContext(), resource,
FetchContext::ResourceResponseType::kFromMemoryCache);
if (resource->EncodedSize() > 0)
Context().DispatchDidReceiveData(identifier, 0, resource->EncodedSize());
Context().DispatchDidFinishLoading(
identifier, 0, 0, resource->GetResponse().DecodedBodyLength());
}
Commit Message: DevTools: send proper resource type in Network.RequestWillBeSent
This patch plumbs resoure type into the DispatchWillSendRequest
instrumenation. This allows us to report accurate type in
Network.RequestWillBeSent event, instead of "Other", that we report
today.
BUG=765501
R=dgozman
Change-Id: I0134c08b841e8dd247fdc8ff208bfd51e462709c
Reviewed-on: https://chromium-review.googlesource.com/667504
Reviewed-by: Pavel Feldman <[email protected]>
Reviewed-by: Dmitry Gozman <[email protected]>
Commit-Queue: Andrey Lushnikov <[email protected]>
Cr-Commit-Position: refs/heads/master@{#507936}
CWE ID: CWE-119 | void ResourceFetcher::DidLoadResourceFromMemoryCache(
unsigned long identifier,
Resource* resource,
const ResourceRequest& original_resource_request) {
ResourceRequest resource_request(resource->Url());
resource_request.SetFrameType(original_resource_request.GetFrameType());
resource_request.SetRequestContext(
original_resource_request.GetRequestContext());
Context().DispatchDidLoadResourceFromMemoryCache(identifier, resource_request,
resource->GetResponse());
Context().DispatchWillSendRequest(
identifier, resource_request, ResourceResponse() /* redirects */,
resource->GetType(), resource->Options().initiator_info);
Context().DispatchDidReceiveResponse(
identifier, resource->GetResponse(), resource_request.GetFrameType(),
resource_request.GetRequestContext(), resource,
FetchContext::ResourceResponseType::kFromMemoryCache);
if (resource->EncodedSize() > 0)
Context().DispatchDidReceiveData(identifier, 0, resource->EncodedSize());
Context().DispatchDidFinishLoading(
identifier, 0, 0, resource->GetResponse().DecodedBodyLength());
}
| 172,478 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: PHP_FUNCTION(openssl_seal)
{
zval *pubkeys, *pubkey, *sealdata, *ekeys, *iv = NULL;
HashTable *pubkeysht;
EVP_PKEY **pkeys;
zend_resource ** key_resources; /* so we know what to cleanup */
int i, len1, len2, *eksl, nkeys, iv_len;
unsigned char iv_buf[EVP_MAX_IV_LENGTH + 1], *buf = NULL, **eks;
char * data;
size_t data_len;
char *method =NULL;
size_t method_len = 0;
const EVP_CIPHER *cipher;
EVP_CIPHER_CTX *ctx;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "sz/z/a/|sz/", &data, &data_len,
&sealdata, &ekeys, &pubkeys, &method, &method_len, &iv) == FAILURE) {
return;
}
pubkeysht = Z_ARRVAL_P(pubkeys);
nkeys = pubkeysht ? zend_hash_num_elements(pubkeysht) : 0;
if (!nkeys) {
php_error_docref(NULL, E_WARNING, "Fourth argument to openssl_seal() must be a non-empty array");
RETURN_FALSE;
}
PHP_OPENSSL_CHECK_SIZE_T_TO_INT(data_len, data);
if (method) {
cipher = EVP_get_cipherbyname(method);
if (!cipher) {
php_error_docref(NULL, E_WARNING, "Unknown signature algorithm.");
RETURN_FALSE;
}
} else {
cipher = EVP_rc4();
}
iv_len = EVP_CIPHER_iv_length(cipher);
if (!iv && iv_len > 0) {
php_error_docref(NULL, E_WARNING,
"Cipher algorithm requires an IV to be supplied as a sixth parameter");
RETURN_FALSE;
}
pkeys = safe_emalloc(nkeys, sizeof(*pkeys), 0);
eksl = safe_emalloc(nkeys, sizeof(*eksl), 0);
eks = safe_emalloc(nkeys, sizeof(*eks), 0);
memset(eks, 0, sizeof(*eks) * nkeys);
key_resources = safe_emalloc(nkeys, sizeof(zend_resource*), 0);
memset(key_resources, 0, sizeof(zend_resource*) * nkeys);
memset(pkeys, 0, sizeof(*pkeys) * nkeys);
/* get the public keys we are using to seal this data */
i = 0;
ZEND_HASH_FOREACH_VAL(pubkeysht, pubkey) {
pkeys[i] = php_openssl_evp_from_zval(pubkey, 1, NULL, 0, 0, &key_resources[i]);
if (pkeys[i] == NULL) {
php_error_docref(NULL, E_WARNING, "not a public key (%dth member of pubkeys)", i+1);
RETVAL_FALSE;
goto clean_exit;
}
eks[i] = emalloc(EVP_PKEY_size(pkeys[i]) + 1);
i++;
} ZEND_HASH_FOREACH_END();
ctx = EVP_CIPHER_CTX_new();
if (ctx == NULL || !EVP_EncryptInit(ctx,cipher,NULL,NULL)) {
EVP_CIPHER_CTX_free(ctx);
RETVAL_FALSE;
goto clean_exit;
}
/* allocate one byte extra to make room for \0 */
buf = emalloc(data_len + EVP_CIPHER_CTX_block_size(ctx));
EVP_CIPHER_CTX_cleanup(ctx);
if (!EVP_SealInit(ctx, cipher, eks, eksl, &iv_buf[0], pkeys, nkeys) ||
!EVP_SealUpdate(ctx, buf, &len1, (unsigned char *)data, (int)data_len) ||
!EVP_SealFinal(ctx, buf + len1, &len2)) {
RETVAL_FALSE;
efree(buf);
EVP_CIPHER_CTX_free(ctx);
goto clean_exit;
}
if (len1 + len2 > 0) {
zval_dtor(sealdata);
ZVAL_NEW_STR(sealdata, zend_string_init((char*)buf, len1 + len2, 0));
efree(buf);
zval_dtor(ekeys);
array_init(ekeys);
for (i=0; i<nkeys; i++) {
eks[i][eksl[i]] = '\0';
add_next_index_stringl(ekeys, (const char*)eks[i], eksl[i]);
efree(eks[i]);
eks[i] = NULL;
}
if (iv) {
zval_dtor(iv);
iv_buf[iv_len] = '\0';
ZVAL_NEW_STR(iv, zend_string_init((char*)iv_buf, iv_len, 0));
}
} else {
efree(buf);
}
RETVAL_LONG(len1 + len2);
EVP_CIPHER_CTX_free(ctx);
clean_exit:
for (i=0; i<nkeys; i++) {
if (key_resources[i] == NULL && pkeys[i] != NULL) {
EVP_PKEY_free(pkeys[i]);
}
if (eks[i]) {
efree(eks[i]);
}
}
efree(eks);
efree(eksl);
efree(pkeys);
efree(key_resources);
}
Commit Message:
CWE ID: CWE-754 | PHP_FUNCTION(openssl_seal)
{
zval *pubkeys, *pubkey, *sealdata, *ekeys, *iv = NULL;
HashTable *pubkeysht;
EVP_PKEY **pkeys;
zend_resource ** key_resources; /* so we know what to cleanup */
int i, len1, len2, *eksl, nkeys, iv_len;
unsigned char iv_buf[EVP_MAX_IV_LENGTH + 1], *buf = NULL, **eks;
char * data;
size_t data_len;
char *method =NULL;
size_t method_len = 0;
const EVP_CIPHER *cipher;
EVP_CIPHER_CTX *ctx;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "sz/z/a/|sz/", &data, &data_len,
&sealdata, &ekeys, &pubkeys, &method, &method_len, &iv) == FAILURE) {
return;
}
pubkeysht = Z_ARRVAL_P(pubkeys);
nkeys = pubkeysht ? zend_hash_num_elements(pubkeysht) : 0;
if (!nkeys) {
php_error_docref(NULL, E_WARNING, "Fourth argument to openssl_seal() must be a non-empty array");
RETURN_FALSE;
}
PHP_OPENSSL_CHECK_SIZE_T_TO_INT(data_len, data);
if (method) {
cipher = EVP_get_cipherbyname(method);
if (!cipher) {
php_error_docref(NULL, E_WARNING, "Unknown signature algorithm.");
RETURN_FALSE;
}
} else {
cipher = EVP_rc4();
}
iv_len = EVP_CIPHER_iv_length(cipher);
if (!iv && iv_len > 0) {
php_error_docref(NULL, E_WARNING,
"Cipher algorithm requires an IV to be supplied as a sixth parameter");
RETURN_FALSE;
}
pkeys = safe_emalloc(nkeys, sizeof(*pkeys), 0);
eksl = safe_emalloc(nkeys, sizeof(*eksl), 0);
eks = safe_emalloc(nkeys, sizeof(*eks), 0);
memset(eks, 0, sizeof(*eks) * nkeys);
key_resources = safe_emalloc(nkeys, sizeof(zend_resource*), 0);
memset(key_resources, 0, sizeof(zend_resource*) * nkeys);
memset(pkeys, 0, sizeof(*pkeys) * nkeys);
/* get the public keys we are using to seal this data */
i = 0;
ZEND_HASH_FOREACH_VAL(pubkeysht, pubkey) {
pkeys[i] = php_openssl_evp_from_zval(pubkey, 1, NULL, 0, 0, &key_resources[i]);
if (pkeys[i] == NULL) {
php_error_docref(NULL, E_WARNING, "not a public key (%dth member of pubkeys)", i+1);
RETVAL_FALSE;
goto clean_exit;
}
eks[i] = emalloc(EVP_PKEY_size(pkeys[i]) + 1);
i++;
} ZEND_HASH_FOREACH_END();
ctx = EVP_CIPHER_CTX_new();
if (ctx == NULL || !EVP_EncryptInit(ctx,cipher,NULL,NULL)) {
EVP_CIPHER_CTX_free(ctx);
RETVAL_FALSE;
goto clean_exit;
}
/* allocate one byte extra to make room for \0 */
buf = emalloc(data_len + EVP_CIPHER_CTX_block_size(ctx));
EVP_CIPHER_CTX_cleanup(ctx);
if (EVP_SealInit(ctx, cipher, eks, eksl, &iv_buf[0], pkeys, nkeys) <= 0 ||
!EVP_SealUpdate(ctx, buf, &len1, (unsigned char *)data, (int)data_len) ||
!EVP_SealFinal(ctx, buf + len1, &len2)) {
RETVAL_FALSE;
efree(buf);
EVP_CIPHER_CTX_free(ctx);
goto clean_exit;
}
if (len1 + len2 > 0) {
zval_dtor(sealdata);
ZVAL_NEW_STR(sealdata, zend_string_init((char*)buf, len1 + len2, 0));
efree(buf);
zval_dtor(ekeys);
array_init(ekeys);
for (i=0; i<nkeys; i++) {
eks[i][eksl[i]] = '\0';
add_next_index_stringl(ekeys, (const char*)eks[i], eksl[i]);
efree(eks[i]);
eks[i] = NULL;
}
if (iv) {
zval_dtor(iv);
iv_buf[iv_len] = '\0';
ZVAL_NEW_STR(iv, zend_string_init((char*)iv_buf, iv_len, 0));
}
} else {
efree(buf);
}
RETVAL_LONG(len1 + len2);
EVP_CIPHER_CTX_free(ctx);
clean_exit:
for (i=0; i<nkeys; i++) {
if (key_resources[i] == NULL && pkeys[i] != NULL) {
EVP_PKEY_free(pkeys[i]);
}
if (eks[i]) {
efree(eks[i]);
}
}
efree(eks);
efree(eksl);
efree(pkeys);
efree(key_resources);
}
| 164,755 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int sctp_rcv(struct sk_buff *skb)
{
struct sock *sk;
struct sctp_association *asoc;
struct sctp_endpoint *ep = NULL;
struct sctp_ep_common *rcvr;
struct sctp_transport *transport = NULL;
struct sctp_chunk *chunk;
struct sctphdr *sh;
union sctp_addr src;
union sctp_addr dest;
int family;
struct sctp_af *af;
if (skb->pkt_type!=PACKET_HOST)
goto discard_it;
SCTP_INC_STATS_BH(SCTP_MIB_INSCTPPACKS);
if (skb_linearize(skb))
goto discard_it;
sh = sctp_hdr(skb);
/* Pull up the IP and SCTP headers. */
__skb_pull(skb, skb_transport_offset(skb));
if (skb->len < sizeof(struct sctphdr))
goto discard_it;
if (!skb_csum_unnecessary(skb) && sctp_rcv_checksum(skb) < 0)
goto discard_it;
skb_pull(skb, sizeof(struct sctphdr));
/* Make sure we at least have chunk headers worth of data left. */
if (skb->len < sizeof(struct sctp_chunkhdr))
goto discard_it;
family = ipver2af(ip_hdr(skb)->version);
af = sctp_get_af_specific(family);
if (unlikely(!af))
goto discard_it;
/* Initialize local addresses for lookups. */
af->from_skb(&src, skb, 1);
af->from_skb(&dest, skb, 0);
/* If the packet is to or from a non-unicast address,
* silently discard the packet.
*
* This is not clearly defined in the RFC except in section
* 8.4 - OOTB handling. However, based on the book "Stream Control
* Transmission Protocol" 2.1, "It is important to note that the
* IP address of an SCTP transport address must be a routable
* unicast address. In other words, IP multicast addresses and
* IP broadcast addresses cannot be used in an SCTP transport
* address."
*/
if (!af->addr_valid(&src, NULL, skb) ||
!af->addr_valid(&dest, NULL, skb))
goto discard_it;
asoc = __sctp_rcv_lookup(skb, &src, &dest, &transport);
if (!asoc)
ep = __sctp_rcv_lookup_endpoint(&dest);
/* Retrieve the common input handling substructure. */
rcvr = asoc ? &asoc->base : &ep->base;
sk = rcvr->sk;
/*
* If a frame arrives on an interface and the receiving socket is
* bound to another interface, via SO_BINDTODEVICE, treat it as OOTB
*/
if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb)))
{
if (asoc) {
sctp_association_put(asoc);
asoc = NULL;
} else {
sctp_endpoint_put(ep);
ep = NULL;
}
sk = sctp_get_ctl_sock();
ep = sctp_sk(sk)->ep;
sctp_endpoint_hold(ep);
rcvr = &ep->base;
}
/*
* RFC 2960, 8.4 - Handle "Out of the blue" Packets.
* An SCTP packet is called an "out of the blue" (OOTB)
* packet if it is correctly formed, i.e., passed the
* receiver's checksum check, but the receiver is not
* able to identify the association to which this
* packet belongs.
*/
if (!asoc) {
if (sctp_rcv_ootb(skb)) {
SCTP_INC_STATS_BH(SCTP_MIB_OUTOFBLUES);
goto discard_release;
}
}
if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
goto discard_release;
nf_reset(skb);
if (sk_filter(sk, skb))
goto discard_release;
/* Create an SCTP packet structure. */
chunk = sctp_chunkify(skb, asoc, sk);
if (!chunk)
goto discard_release;
SCTP_INPUT_CB(skb)->chunk = chunk;
/* Remember what endpoint is to handle this packet. */
chunk->rcvr = rcvr;
/* Remember the SCTP header. */
chunk->sctp_hdr = sh;
/* Set the source and destination addresses of the incoming chunk. */
sctp_init_addrs(chunk, &src, &dest);
/* Remember where we came from. */
chunk->transport = transport;
/* Acquire access to the sock lock. Note: We are safe from other
* bottom halves on this lock, but a user may be in the lock too,
* so check if it is busy.
*/
sctp_bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
sctp_add_backlog(sk, skb);
} else {
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ);
sctp_inq_push(&chunk->rcvr->inqueue, chunk);
}
sctp_bh_unlock_sock(sk);
/* Release the asoc/ep ref we took in the lookup calls. */
if (asoc)
sctp_association_put(asoc);
else
sctp_endpoint_put(ep);
return 0;
discard_it:
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_DISCARDS);
kfree_skb(skb);
return 0;
discard_release:
/* Release the asoc/ep ref we took in the lookup calls. */
if (asoc)
sctp_association_put(asoc);
else
sctp_endpoint_put(ep);
goto discard_it;
}
Commit Message: sctp: Fix another socket race during accept/peeloff
There is a race between sctp_rcv() and sctp_accept() where we
have moved the association from the listening socket to the
accepted socket, but sctp_rcv() processing cached the old
socket and continues to use it.
The easy solution is to check for the socket mismatch once we've
grabed the socket lock. If we hit a mis-match, that means
that were are currently holding the lock on the listening socket,
but the association is refrencing a newly accepted socket. We need
to drop the lock on the old socket and grab the lock on the new one.
A more proper solution might be to create accepted sockets when
the new association is established, similar to TCP. That would
eliminate the race for 1-to-1 style sockets, but it would still
existing for 1-to-many sockets where a user wished to peeloff an
association. For now, we'll live with this easy solution as
it addresses the problem.
Reported-by: Michal Hocko <[email protected]>
Reported-by: Karsten Keil <[email protected]>
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-362 | int sctp_rcv(struct sk_buff *skb)
{
struct sock *sk;
struct sctp_association *asoc;
struct sctp_endpoint *ep = NULL;
struct sctp_ep_common *rcvr;
struct sctp_transport *transport = NULL;
struct sctp_chunk *chunk;
struct sctphdr *sh;
union sctp_addr src;
union sctp_addr dest;
int family;
struct sctp_af *af;
if (skb->pkt_type!=PACKET_HOST)
goto discard_it;
SCTP_INC_STATS_BH(SCTP_MIB_INSCTPPACKS);
if (skb_linearize(skb))
goto discard_it;
sh = sctp_hdr(skb);
/* Pull up the IP and SCTP headers. */
__skb_pull(skb, skb_transport_offset(skb));
if (skb->len < sizeof(struct sctphdr))
goto discard_it;
if (!skb_csum_unnecessary(skb) && sctp_rcv_checksum(skb) < 0)
goto discard_it;
skb_pull(skb, sizeof(struct sctphdr));
/* Make sure we at least have chunk headers worth of data left. */
if (skb->len < sizeof(struct sctp_chunkhdr))
goto discard_it;
family = ipver2af(ip_hdr(skb)->version);
af = sctp_get_af_specific(family);
if (unlikely(!af))
goto discard_it;
/* Initialize local addresses for lookups. */
af->from_skb(&src, skb, 1);
af->from_skb(&dest, skb, 0);
/* If the packet is to or from a non-unicast address,
* silently discard the packet.
*
* This is not clearly defined in the RFC except in section
* 8.4 - OOTB handling. However, based on the book "Stream Control
* Transmission Protocol" 2.1, "It is important to note that the
* IP address of an SCTP transport address must be a routable
* unicast address. In other words, IP multicast addresses and
* IP broadcast addresses cannot be used in an SCTP transport
* address."
*/
if (!af->addr_valid(&src, NULL, skb) ||
!af->addr_valid(&dest, NULL, skb))
goto discard_it;
asoc = __sctp_rcv_lookup(skb, &src, &dest, &transport);
if (!asoc)
ep = __sctp_rcv_lookup_endpoint(&dest);
/* Retrieve the common input handling substructure. */
rcvr = asoc ? &asoc->base : &ep->base;
sk = rcvr->sk;
/*
* If a frame arrives on an interface and the receiving socket is
* bound to another interface, via SO_BINDTODEVICE, treat it as OOTB
*/
if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb)))
{
if (asoc) {
sctp_association_put(asoc);
asoc = NULL;
} else {
sctp_endpoint_put(ep);
ep = NULL;
}
sk = sctp_get_ctl_sock();
ep = sctp_sk(sk)->ep;
sctp_endpoint_hold(ep);
rcvr = &ep->base;
}
/*
* RFC 2960, 8.4 - Handle "Out of the blue" Packets.
* An SCTP packet is called an "out of the blue" (OOTB)
* packet if it is correctly formed, i.e., passed the
* receiver's checksum check, but the receiver is not
* able to identify the association to which this
* packet belongs.
*/
if (!asoc) {
if (sctp_rcv_ootb(skb)) {
SCTP_INC_STATS_BH(SCTP_MIB_OUTOFBLUES);
goto discard_release;
}
}
if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
goto discard_release;
nf_reset(skb);
if (sk_filter(sk, skb))
goto discard_release;
/* Create an SCTP packet structure. */
chunk = sctp_chunkify(skb, asoc, sk);
if (!chunk)
goto discard_release;
SCTP_INPUT_CB(skb)->chunk = chunk;
/* Remember what endpoint is to handle this packet. */
chunk->rcvr = rcvr;
/* Remember the SCTP header. */
chunk->sctp_hdr = sh;
/* Set the source and destination addresses of the incoming chunk. */
sctp_init_addrs(chunk, &src, &dest);
/* Remember where we came from. */
chunk->transport = transport;
/* Acquire access to the sock lock. Note: We are safe from other
* bottom halves on this lock, but a user may be in the lock too,
* so check if it is busy.
*/
sctp_bh_lock_sock(sk);
if (sk != rcvr->sk) {
/* Our cached sk is different from the rcvr->sk. This is
* because migrate()/accept() may have moved the association
* to a new socket and released all the sockets. So now we
* are holding a lock on the old socket while the user may
* be doing something with the new socket. Switch our veiw
* of the current sk.
*/
sctp_bh_unlock_sock(sk);
sk = rcvr->sk;
sctp_bh_lock_sock(sk);
}
if (sock_owned_by_user(sk)) {
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
sctp_add_backlog(sk, skb);
} else {
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ);
sctp_inq_push(&chunk->rcvr->inqueue, chunk);
}
sctp_bh_unlock_sock(sk);
/* Release the asoc/ep ref we took in the lookup calls. */
if (asoc)
sctp_association_put(asoc);
else
sctp_endpoint_put(ep);
return 0;
discard_it:
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_DISCARDS);
kfree_skb(skb);
return 0;
discard_release:
/* Release the asoc/ep ref we took in the lookup calls. */
if (asoc)
sctp_association_put(asoc);
else
sctp_endpoint_put(ep);
goto discard_it;
}
| 166,208 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
struct tss_segment_32 *tss)
{
int ret;
u8 cpl;
if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
return emulate_gp(ctxt, 0);
ctxt->_eip = tss->eip;
ctxt->eflags = tss->eflags | 2;
/* General purpose registers */
*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
/*
* SDM says that segment selectors are loaded before segment
* descriptors. This is important because CPL checks will
* use CS.RPL.
*/
set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
/*
* If we're switching between Protected Mode and VM86, we need to make
* sure to update the mode before loading the segment descriptors so
* that the selectors are interpreted correctly.
*/
if (ctxt->eflags & X86_EFLAGS_VM) {
ctxt->mode = X86EMUL_MODE_VM86;
cpl = 3;
} else {
ctxt->mode = X86EMUL_MODE_PROT32;
cpl = tss->cs & 3;
}
/*
* Now load segment descriptors. If fault happenes at this stage
* it is handled in a context of new task
*/
ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true);
if (ret != X86EMUL_CONTINUE)
return ret;
return X86EMUL_CONTINUE;
}
Commit Message: KVM: x86: Handle errors when RIP is set during far jumps
Far jmp/call/ret may fault while loading a new RIP. Currently KVM does not
handle this case, and may result in failed vm-entry once the assignment is
done. The tricky part of doing so is that loading the new CS affects the
VMCS/VMCB state, so if we fail during loading the new RIP, we are left in
unconsistent state. Therefore, this patch saves on 64-bit the old CS
descriptor and restores it if loading RIP failed.
This fixes CVE-2014-3647.
Cc: [email protected]
Signed-off-by: Nadav Amit <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
CWE ID: CWE-264 | static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
struct tss_segment_32 *tss)
{
int ret;
u8 cpl;
if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
return emulate_gp(ctxt, 0);
ctxt->_eip = tss->eip;
ctxt->eflags = tss->eflags | 2;
/* General purpose registers */
*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
/*
* SDM says that segment selectors are loaded before segment
* descriptors. This is important because CPL checks will
* use CS.RPL.
*/
set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
/*
* If we're switching between Protected Mode and VM86, we need to make
* sure to update the mode before loading the segment descriptors so
* that the selectors are interpreted correctly.
*/
if (ctxt->eflags & X86_EFLAGS_VM) {
ctxt->mode = X86EMUL_MODE_VM86;
cpl = 3;
} else {
ctxt->mode = X86EMUL_MODE_PROT32;
cpl = tss->cs & 3;
}
/*
* Now load segment descriptors. If fault happenes at this stage
* it is handled in a context of new task
*/
ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
cpl, true, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
true, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
true, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
true, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
true, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
true, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
true, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
return X86EMUL_CONTINUE;
}
| 166,343 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: GDataRootDirectory::GDataRootDirectory()
: ALLOW_THIS_IN_INITIALIZER_LIST(GDataDirectory(NULL, this)),
fake_search_directory_(new GDataDirectory(NULL, NULL)),
largest_changestamp_(0), serialized_size_(0) {
title_ = kGDataRootDirectory;
SetFileNameFromTitle();
}
Commit Message: gdata: Define the resource ID for the root directory
Per the spec, the resource ID for the root directory is defined
as "folder:root". Add the resource ID to the root directory in our
file system representation so we can look up the root directory by
the resource ID.
BUG=127697
TEST=add unit tests
Review URL: https://chromiumcodereview.appspot.com/10332253
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@137928 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: | GDataRootDirectory::GDataRootDirectory()
: ALLOW_THIS_IN_INITIALIZER_LIST(GDataDirectory(NULL, this)),
fake_search_directory_(new GDataDirectory(NULL, NULL)),
largest_changestamp_(0), serialized_size_(0) {
title_ = kGDataRootDirectory;
SetFileNameFromTitle();
resource_id_ = kGDataRootDirectoryResourceId;
// Add self to the map so the root directory can be looked up by the
// resource ID.
AddEntryToResourceMap(this);
}
| 170,777 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static OPJ_BOOL opj_pi_next_rpcl(opj_pi_iterator_t * pi)
{
opj_pi_comp_t *comp = NULL;
opj_pi_resolution_t *res = NULL;
OPJ_UINT32 index = 0;
if (!pi->first) {
goto LABEL_SKIP;
} else {
OPJ_UINT32 compno, resno;
pi->first = 0;
pi->dx = 0;
pi->dy = 0;
for (compno = 0; compno < pi->numcomps; compno++) {
comp = &pi->comps[compno];
for (resno = 0; resno < comp->numresolutions; resno++) {
OPJ_UINT32 dx, dy;
res = &comp->resolutions[resno];
dx = comp->dx * (1u << (res->pdx + comp->numresolutions - 1 - resno));
dy = comp->dy * (1u << (res->pdy + comp->numresolutions - 1 - resno));
pi->dx = !pi->dx ? dx : opj_uint_min(pi->dx, dx);
pi->dy = !pi->dy ? dy : opj_uint_min(pi->dy, dy);
}
}
}
if (!pi->tp_on) {
pi->poc.ty0 = pi->ty0;
pi->poc.tx0 = pi->tx0;
pi->poc.ty1 = pi->ty1;
pi->poc.tx1 = pi->tx1;
}
for (pi->resno = pi->poc.resno0; pi->resno < pi->poc.resno1; pi->resno++) {
for (pi->y = pi->poc.ty0; pi->y < pi->poc.ty1;
pi->y += (OPJ_INT32)(pi->dy - (OPJ_UINT32)(pi->y % (OPJ_INT32)pi->dy))) {
for (pi->x = pi->poc.tx0; pi->x < pi->poc.tx1;
pi->x += (OPJ_INT32)(pi->dx - (OPJ_UINT32)(pi->x % (OPJ_INT32)pi->dx))) {
for (pi->compno = pi->poc.compno0; pi->compno < pi->poc.compno1; pi->compno++) {
OPJ_UINT32 levelno;
OPJ_INT32 trx0, try0;
OPJ_INT32 trx1, try1;
OPJ_UINT32 rpx, rpy;
OPJ_INT32 prci, prcj;
comp = &pi->comps[pi->compno];
if (pi->resno >= comp->numresolutions) {
continue;
}
res = &comp->resolutions[pi->resno];
levelno = comp->numresolutions - 1 - pi->resno;
trx0 = opj_int_ceildiv(pi->tx0, (OPJ_INT32)(comp->dx << levelno));
try0 = opj_int_ceildiv(pi->ty0, (OPJ_INT32)(comp->dy << levelno));
trx1 = opj_int_ceildiv(pi->tx1, (OPJ_INT32)(comp->dx << levelno));
try1 = opj_int_ceildiv(pi->ty1, (OPJ_INT32)(comp->dy << levelno));
rpx = res->pdx + levelno;
rpy = res->pdy + levelno;
if (!((pi->y % (OPJ_INT32)(comp->dy << rpy) == 0) || ((pi->y == pi->ty0) &&
((try0 << levelno) % (1 << rpy))))) {
continue;
}
if (!((pi->x % (OPJ_INT32)(comp->dx << rpx) == 0) || ((pi->x == pi->tx0) &&
((trx0 << levelno) % (1 << rpx))))) {
continue;
}
if ((res->pw == 0) || (res->ph == 0)) {
continue;
}
if ((trx0 == trx1) || (try0 == try1)) {
continue;
}
prci = opj_int_floordivpow2(opj_int_ceildiv(pi->x,
(OPJ_INT32)(comp->dx << levelno)), (OPJ_INT32)res->pdx)
- opj_int_floordivpow2(trx0, (OPJ_INT32)res->pdx);
prcj = opj_int_floordivpow2(opj_int_ceildiv(pi->y,
(OPJ_INT32)(comp->dy << levelno)), (OPJ_INT32)res->pdy)
- opj_int_floordivpow2(try0, (OPJ_INT32)res->pdy);
pi->precno = (OPJ_UINT32)(prci + prcj * (OPJ_INT32)res->pw);
for (pi->layno = pi->poc.layno0; pi->layno < pi->poc.layno1; pi->layno++) {
index = pi->layno * pi->step_l + pi->resno * pi->step_r + pi->compno *
pi->step_c + pi->precno * pi->step_p;
if (!pi->include[index]) {
pi->include[index] = 1;
return OPJ_TRUE;
}
LABEL_SKIP:
;
}
}
}
}
}
return OPJ_FALSE;
}
Commit Message: Avoid division by zero in opj_pi_next_rpcl, opj_pi_next_pcrl and opj_pi_next_cprl (#938)
Fixes issues with id:000026,sig:08,src:002419,op:int32,pos:60,val:+32 and
id:000019,sig:08,src:001098,op:flip1,pos:49
CWE ID: CWE-369 | static OPJ_BOOL opj_pi_next_rpcl(opj_pi_iterator_t * pi)
{
opj_pi_comp_t *comp = NULL;
opj_pi_resolution_t *res = NULL;
OPJ_UINT32 index = 0;
if (!pi->first) {
goto LABEL_SKIP;
} else {
OPJ_UINT32 compno, resno;
pi->first = 0;
pi->dx = 0;
pi->dy = 0;
for (compno = 0; compno < pi->numcomps; compno++) {
comp = &pi->comps[compno];
for (resno = 0; resno < comp->numresolutions; resno++) {
OPJ_UINT32 dx, dy;
res = &comp->resolutions[resno];
dx = comp->dx * (1u << (res->pdx + comp->numresolutions - 1 - resno));
dy = comp->dy * (1u << (res->pdy + comp->numresolutions - 1 - resno));
pi->dx = !pi->dx ? dx : opj_uint_min(pi->dx, dx);
pi->dy = !pi->dy ? dy : opj_uint_min(pi->dy, dy);
}
}
}
if (!pi->tp_on) {
pi->poc.ty0 = pi->ty0;
pi->poc.tx0 = pi->tx0;
pi->poc.ty1 = pi->ty1;
pi->poc.tx1 = pi->tx1;
}
for (pi->resno = pi->poc.resno0; pi->resno < pi->poc.resno1; pi->resno++) {
for (pi->y = pi->poc.ty0; pi->y < pi->poc.ty1;
pi->y += (OPJ_INT32)(pi->dy - (OPJ_UINT32)(pi->y % (OPJ_INT32)pi->dy))) {
for (pi->x = pi->poc.tx0; pi->x < pi->poc.tx1;
pi->x += (OPJ_INT32)(pi->dx - (OPJ_UINT32)(pi->x % (OPJ_INT32)pi->dx))) {
for (pi->compno = pi->poc.compno0; pi->compno < pi->poc.compno1; pi->compno++) {
OPJ_UINT32 levelno;
OPJ_INT32 trx0, try0;
OPJ_INT32 trx1, try1;
OPJ_UINT32 rpx, rpy;
OPJ_INT32 prci, prcj;
comp = &pi->comps[pi->compno];
if (pi->resno >= comp->numresolutions) {
continue;
}
res = &comp->resolutions[pi->resno];
levelno = comp->numresolutions - 1 - pi->resno;
trx0 = opj_int_ceildiv(pi->tx0, (OPJ_INT32)(comp->dx << levelno));
try0 = opj_int_ceildiv(pi->ty0, (OPJ_INT32)(comp->dy << levelno));
trx1 = opj_int_ceildiv(pi->tx1, (OPJ_INT32)(comp->dx << levelno));
try1 = opj_int_ceildiv(pi->ty1, (OPJ_INT32)(comp->dy << levelno));
rpx = res->pdx + levelno;
rpy = res->pdy + levelno;
/* To avoid divisions by zero / undefined behaviour on shift */
/* in below tests */
/* Fixes reading id:000026,sig:08,src:002419,op:int32,pos:60,val:+32 */
/* of https://github.com/uclouvain/openjpeg/issues/938 */
if (rpx >= 31 || ((comp->dx << rpx) >> rpx) != comp->dx ||
rpy >= 31 || ((comp->dy << rpy) >> rpy) != comp->dy) {
continue;
}
/* See ISO-15441. B.12.1.3 Resolution level-position-component-layer progression */
if (!((pi->y % (OPJ_INT32)(comp->dy << rpy) == 0) || ((pi->y == pi->ty0) &&
((try0 << levelno) % (1 << rpy))))) {
continue;
}
if (!((pi->x % (OPJ_INT32)(comp->dx << rpx) == 0) || ((pi->x == pi->tx0) &&
((trx0 << levelno) % (1 << rpx))))) {
continue;
}
if ((res->pw == 0) || (res->ph == 0)) {
continue;
}
if ((trx0 == trx1) || (try0 == try1)) {
continue;
}
prci = opj_int_floordivpow2(opj_int_ceildiv(pi->x,
(OPJ_INT32)(comp->dx << levelno)), (OPJ_INT32)res->pdx)
- opj_int_floordivpow2(trx0, (OPJ_INT32)res->pdx);
prcj = opj_int_floordivpow2(opj_int_ceildiv(pi->y,
(OPJ_INT32)(comp->dy << levelno)), (OPJ_INT32)res->pdy)
- opj_int_floordivpow2(try0, (OPJ_INT32)res->pdy);
pi->precno = (OPJ_UINT32)(prci + prcj * (OPJ_INT32)res->pw);
for (pi->layno = pi->poc.layno0; pi->layno < pi->poc.layno1; pi->layno++) {
index = pi->layno * pi->step_l + pi->resno * pi->step_r + pi->compno *
pi->step_c + pi->precno * pi->step_p;
if (!pi->include[index]) {
pi->include[index] = 1;
return OPJ_TRUE;
}
LABEL_SKIP:
;
}
}
}
}
}
return OPJ_FALSE;
}
| 168,457 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool scoped_pixel_buffer_object::Init(CGLContextObj cgl_context,
int size_in_bytes) {
cgl_context_ = cgl_context;
CGLContextObj CGL_MACRO_CONTEXT = cgl_context_;
glGenBuffersARB(1, &pixel_buffer_object_);
if (glGetError() == GL_NO_ERROR) {
glBindBufferARB(GL_PIXEL_PACK_BUFFER_ARB, pixel_buffer_object_);
glBufferDataARB(GL_PIXEL_PACK_BUFFER_ARB, size_in_bytes, NULL,
GL_STREAM_READ_ARB);
glBindBufferARB(GL_PIXEL_PACK_BUFFER_ARB, 0);
if (glGetError() != GL_NO_ERROR) {
Release();
}
} else {
cgl_context_ = NULL;
pixel_buffer_object_ = 0;
}
return pixel_buffer_object_ != 0;
}
Commit Message: Workaround for bad driver issue with NVIDIA GeForce 7300 GT on Mac 10.5.
BUG=87283
TEST=Run on a machine with NVIDIA GeForce 7300 GT on Mac 10.5 immediately after booting.
Review URL: http://codereview.chromium.org/7373018
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@92651 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | bool scoped_pixel_buffer_object::Init(CGLContextObj cgl_context,
int size_in_bytes) {
// The PBO path is only done on 10.6 (SnowLeopard) and above due to
// a driver issue that was found on 10.5
// (specifically on a NVIDIA GeForce 7300 GT).
// http://crbug.com/87283
if (base::mac::IsOSLeopardOrEarlier()) {
return false;
}
cgl_context_ = cgl_context;
CGLContextObj CGL_MACRO_CONTEXT = cgl_context_;
glGenBuffersARB(1, &pixel_buffer_object_);
if (glGetError() == GL_NO_ERROR) {
glBindBufferARB(GL_PIXEL_PACK_BUFFER_ARB, pixel_buffer_object_);
glBufferDataARB(GL_PIXEL_PACK_BUFFER_ARB, size_in_bytes, NULL,
GL_STREAM_READ_ARB);
glBindBufferARB(GL_PIXEL_PACK_BUFFER_ARB, 0);
if (glGetError() != GL_NO_ERROR) {
Release();
}
} else {
cgl_context_ = NULL;
pixel_buffer_object_ = 0;
}
return pixel_buffer_object_ != 0;
}
| 170,317 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: NativeBrowserFrame* NativeBrowserFrame::CreateNativeBrowserFrame(
BrowserFrame* browser_frame,
BrowserView* browser_view) {
if (views::Widget::IsPureViews())
return new BrowserFrameViews(browser_frame, browser_view);
return new BrowserFrameGtk(browser_frame, browser_view);
}
Commit Message: Fixed brekage when PureViews are enable but Desktop is not
[email protected]
BUG=none
TEST=chrome starts with --use-pure-views with touchui
Review URL: http://codereview.chromium.org/7210037
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@91197 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | NativeBrowserFrame* NativeBrowserFrame::CreateNativeBrowserFrame(
BrowserFrame* browser_frame,
BrowserView* browser_view) {
if (views::Widget::IsPureViews() &&
views::ViewsDelegate::views_delegate->GetDefaultParentView())
return new BrowserFrameViews(browser_frame, browser_view);
return new BrowserFrameGtk(browser_frame, browser_view);
}
| 170,315 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static Image *ReadMPCImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
char
cache_filename[MaxTextExtent],
id[MaxTextExtent],
keyword[MaxTextExtent],
*options;
const unsigned char
*p;
GeometryInfo
geometry_info;
Image
*image;
int
c;
LinkedListInfo
*profiles;
MagickBooleanType
status;
MagickOffsetType
offset;
MagickStatusType
flags;
register ssize_t
i;
size_t
depth,
length;
ssize_t
count;
StringInfo
*profile;
unsigned int
signature;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
(void) CopyMagickString(cache_filename,image->filename,MaxTextExtent);
AppendImageFormat("cache",cache_filename);
c=ReadBlobByte(image);
if (c == EOF)
{
image=DestroyImage(image);
return((Image *) NULL);
}
*id='\0';
(void) ResetMagickMemory(keyword,0,sizeof(keyword));
offset=0;
do
{
/*
Decode image header; header terminates one character beyond a ':'.
*/
profiles=(LinkedListInfo *) NULL;
length=MaxTextExtent;
options=AcquireString((char *) NULL);
signature=GetMagickSignature((const StringInfo *) NULL);
image->depth=8;
image->compression=NoCompression;
while ((isgraph(c) != MagickFalse) && (c != (int) ':'))
{
register char
*p;
if (c == (int) '{')
{
char
*comment;
/*
Read comment-- any text between { }.
*/
length=MaxTextExtent;
comment=AcquireString((char *) NULL);
for (p=comment; comment != (char *) NULL; p++)
{
c=ReadBlobByte(image);
if (c == (int) '\\')
c=ReadBlobByte(image);
else
if ((c == EOF) || (c == (int) '}'))
break;
if ((size_t) (p-comment+1) >= length)
{
*p='\0';
length<<=1;
comment=(char *) ResizeQuantumMemory(comment,length+
MaxTextExtent,sizeof(*comment));
if (comment == (char *) NULL)
break;
p=comment+strlen(comment);
}
*p=(char) c;
}
if (comment == (char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
*p='\0';
(void) SetImageProperty(image,"comment",comment);
comment=DestroyString(comment);
c=ReadBlobByte(image);
}
else
if (isalnum(c) != MagickFalse)
{
/*
Get the keyword.
*/
p=keyword;
do
{
if (c == (int) '=')
break;
if ((size_t) (p-keyword) < (MaxTextExtent-1))
*p++=(char) c;
c=ReadBlobByte(image);
} while (c != EOF);
*p='\0';
p=options;
while (isspace((int) ((unsigned char) c)) != 0)
c=ReadBlobByte(image);
if (c == (int) '=')
{
/*
Get the keyword value.
*/
c=ReadBlobByte(image);
while ((c != (int) '}') && (c != EOF))
{
if ((size_t) (p-options+1) >= length)
{
*p='\0';
length<<=1;
options=(char *) ResizeQuantumMemory(options,length+
MaxTextExtent,sizeof(*options));
if (options == (char *) NULL)
break;
p=options+strlen(options);
}
if (options == (char *) NULL)
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
*p++=(char) c;
c=ReadBlobByte(image);
if (c == '\\')
{
c=ReadBlobByte(image);
if (c == (int) '}')
{
*p++=(char) c;
c=ReadBlobByte(image);
}
}
if (*options != '{')
if (isspace((int) ((unsigned char) c)) != 0)
break;
}
}
*p='\0';
if (*options == '{')
(void) CopyMagickString(options,options+1,strlen(options));
/*
Assign a value to the specified keyword.
*/
switch (*keyword)
{
case 'b':
case 'B':
{
if (LocaleCompare(keyword,"background-color") == 0)
{
(void) QueryColorDatabase(options,&image->background_color,
exception);
break;
}
if (LocaleCompare(keyword,"blue-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=
image->chromaticity.blue_primary.x;
break;
}
if (LocaleCompare(keyword,"border-color") == 0)
{
(void) QueryColorDatabase(options,&image->border_color,
exception);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'c':
case 'C':
{
if (LocaleCompare(keyword,"class") == 0)
{
ssize_t
storage_class;
storage_class=ParseCommandOption(MagickClassOptions,
MagickFalse,options);
if (storage_class < 0)
break;
image->storage_class=(ClassType) storage_class;
break;
}
if (LocaleCompare(keyword,"colors") == 0)
{
image->colors=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"colorspace") == 0)
{
ssize_t
colorspace;
colorspace=ParseCommandOption(MagickColorspaceOptions,
MagickFalse,options);
if (colorspace < 0)
break;
image->colorspace=(ColorspaceType) colorspace;
break;
}
if (LocaleCompare(keyword,"compression") == 0)
{
ssize_t
compression;
compression=ParseCommandOption(MagickCompressOptions,
MagickFalse,options);
if (compression < 0)
break;
image->compression=(CompressionType) compression;
break;
}
if (LocaleCompare(keyword,"columns") == 0)
{
image->columns=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'd':
case 'D':
{
if (LocaleCompare(keyword,"delay") == 0)
{
image->delay=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"depth") == 0)
{
image->depth=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"dispose") == 0)
{
ssize_t
dispose;
dispose=ParseCommandOption(MagickDisposeOptions,MagickFalse,
options);
if (dispose < 0)
break;
image->dispose=(DisposeType) dispose;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'e':
case 'E':
{
if (LocaleCompare(keyword,"endian") == 0)
{
ssize_t
endian;
endian=ParseCommandOption(MagickEndianOptions,MagickFalse,
options);
if (endian < 0)
break;
image->endian=(EndianType) endian;
break;
}
if (LocaleCompare(keyword,"error") == 0)
{
image->error.mean_error_per_pixel=StringToDouble(options,
(char **) NULL);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'g':
case 'G':
{
if (LocaleCompare(keyword,"gamma") == 0)
{
image->gamma=StringToDouble(options,(char **) NULL);
break;
}
if (LocaleCompare(keyword,"green-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=
image->chromaticity.green_primary.x;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'i':
case 'I':
{
if (LocaleCompare(keyword,"id") == 0)
{
(void) CopyMagickString(id,options,MaxTextExtent);
break;
}
if (LocaleCompare(keyword,"iterations") == 0)
{
image->iterations=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'm':
case 'M':
{
if (LocaleCompare(keyword,"magick-signature") == 0)
{
signature=(unsigned int) StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"matte") == 0)
{
ssize_t
matte;
matte=ParseCommandOption(MagickBooleanOptions,MagickFalse,
options);
if (matte < 0)
break;
image->matte=(MagickBooleanType) matte;
break;
}
if (LocaleCompare(keyword,"matte-color") == 0)
{
(void) QueryColorDatabase(options,&image->matte_color,
exception);
break;
}
if (LocaleCompare(keyword,"maximum-error") == 0)
{
image->error.normalized_maximum_error=StringToDouble(
options,(char **) NULL);
break;
}
if (LocaleCompare(keyword,"mean-error") == 0)
{
image->error.normalized_mean_error=StringToDouble(options,
(char **) NULL);
break;
}
if (LocaleCompare(keyword,"montage") == 0)
{
(void) CloneString(&image->montage,options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'o':
case 'O':
{
if (LocaleCompare(keyword,"opaque") == 0)
{
ssize_t
matte;
matte=ParseCommandOption(MagickBooleanOptions,MagickFalse,
options);
if (matte < 0)
break;
image->matte=(MagickBooleanType) matte;
break;
}
if (LocaleCompare(keyword,"orientation") == 0)
{
ssize_t
orientation;
orientation=ParseCommandOption(MagickOrientationOptions,
MagickFalse,options);
if (orientation < 0)
break;
image->orientation=(OrientationType) orientation;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'p':
case 'P':
{
if (LocaleCompare(keyword,"page") == 0)
{
char
*geometry;
geometry=GetPageGeometry(options);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
break;
}
if (LocaleCompare(keyword,"pixel-intensity") == 0)
{
ssize_t
intensity;
intensity=ParseCommandOption(MagickPixelIntensityOptions,
MagickFalse,options);
if (intensity < 0)
break;
image->intensity=(PixelIntensityMethod) intensity;
break;
}
if ((LocaleNCompare(keyword,"profile:",8) == 0) ||
(LocaleNCompare(keyword,"profile-",8) == 0))
{
if (profiles == (LinkedListInfo *) NULL)
profiles=NewLinkedList(0);
(void) AppendValueToLinkedList(profiles,
AcquireString(keyword+8));
profile=BlobToStringInfo((const void *) NULL,(size_t)
StringToLong(options));
if (profile == (StringInfo *) NULL)
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
(void) SetImageProfile(image,keyword+8,profile);
profile=DestroyStringInfo(profile);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'q':
case 'Q':
{
if (LocaleCompare(keyword,"quality") == 0)
{
image->quality=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'r':
case 'R':
{
if (LocaleCompare(keyword,"red-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
if ((flags & SigmaValue) != 0)
image->chromaticity.red_primary.y=geometry_info.sigma;
break;
}
if (LocaleCompare(keyword,"rendering-intent") == 0)
{
ssize_t
rendering_intent;
rendering_intent=ParseCommandOption(MagickIntentOptions,
MagickFalse,options);
if (rendering_intent < 0)
break;
image->rendering_intent=(RenderingIntent) rendering_intent;
break;
}
if (LocaleCompare(keyword,"resolution") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
break;
}
if (LocaleCompare(keyword,"rows") == 0)
{
image->rows=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 's':
case 'S':
{
if (LocaleCompare(keyword,"scene") == 0)
{
image->scene=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 't':
case 'T':
{
if (LocaleCompare(keyword,"ticks-per-second") == 0)
{
image->ticks_per_second=(ssize_t) StringToLong(options);
break;
}
if (LocaleCompare(keyword,"tile-offset") == 0)
{
char
*geometry;
geometry=GetPageGeometry(options);
(void) ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
}
if (LocaleCompare(keyword,"type") == 0)
{
ssize_t
type;
type=ParseCommandOption(MagickTypeOptions,MagickFalse,
options);
if (type < 0)
break;
image->type=(ImageType) type;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'u':
case 'U':
{
if (LocaleCompare(keyword,"units") == 0)
{
ssize_t
units;
units=ParseCommandOption(MagickResolutionOptions,MagickFalse,
options);
if (units < 0)
break;
image->units=(ResolutionType) units;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'w':
case 'W':
{
if (LocaleCompare(keyword,"white-point") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=
image->chromaticity.white_point.x;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
default:
{
(void) SetImageProperty(image,keyword,options);
break;
}
}
}
else
c=ReadBlobByte(image);
while (isspace((int) ((unsigned char) c)) != 0)
c=ReadBlobByte(image);
}
options=DestroyString(options);
(void) ReadBlobByte(image);
/*
Verify that required image information is defined.
*/
if ((LocaleCompare(id,"MagickCache") != 0) ||
(image->storage_class == UndefinedClass) ||
(image->compression == UndefinedCompression) || (image->columns == 0) ||
(image->rows == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (signature != GetMagickSignature((const StringInfo *) NULL))
ThrowReaderException(CacheError,"IncompatibleAPI");
if (image->montage != (char *) NULL)
{
register char
*p;
/*
Image directory.
*/
length=MaxTextExtent;
image->directory=AcquireString((char *) NULL);
p=image->directory;
do
{
*p='\0';
if ((strlen(image->directory)+MaxTextExtent) >= length)
{
/*
Allocate more memory for the image directory.
*/
length<<=1;
image->directory=(char *) ResizeQuantumMemory(image->directory,
length+MaxTextExtent,sizeof(*image->directory));
if (image->directory == (char *) NULL)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
p=image->directory+strlen(image->directory);
}
c=ReadBlobByte(image);
*p++=(char) c;
} while (c != (int) '\0');
}
if (profiles != (LinkedListInfo *) NULL)
{
const char
*name;
const StringInfo
*profile;
register unsigned char
*p;
/*
Read image profiles.
*/
ResetLinkedListIterator(profiles);
name=(const char *) GetNextValueInLinkedList(profiles);
while (name != (const char *) NULL)
{
profile=GetImageProfile(image,name);
if (profile != (StringInfo *) NULL)
{
p=GetStringInfoDatum(profile);
count=ReadBlob(image,GetStringInfoLength(profile),p);
}
name=(const char *) GetNextValueInLinkedList(profiles);
}
profiles=DestroyLinkedList(profiles,RelinquishMagickMemory);
}
depth=GetImageQuantumDepth(image,MagickFalse);
if (image->storage_class == PseudoClass)
{
/*
Create image colormap.
*/
if (AcquireImageColormap(image,image->colors) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->colors != 0)
{
size_t
packet_size;
unsigned char
*colormap;
/*
Read image colormap from file.
*/
packet_size=(size_t) (3UL*depth/8UL);
colormap=(unsigned char *) AcquireQuantumMemory(image->colors,
packet_size*sizeof(*colormap));
if (colormap == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,packet_size*image->colors,colormap);
if (count != (ssize_t) (packet_size*image->colors))
ThrowReaderException(CorruptImageError,
"InsufficientImageDataInFile");
p=colormap;
switch (depth)
{
default:
ThrowReaderException(CorruptImageError,
"ImageDepthNotSupported");
case 8:
{
unsigned char
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushCharPixel(p,&pixel);
image->colormap[i].red=ScaleCharToQuantum(pixel);
p=PushCharPixel(p,&pixel);
image->colormap[i].green=ScaleCharToQuantum(pixel);
p=PushCharPixel(p,&pixel);
image->colormap[i].blue=ScaleCharToQuantum(pixel);
}
break;
}
case 16:
{
unsigned short
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].red=ScaleShortToQuantum(pixel);
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].green=ScaleShortToQuantum(pixel);
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].blue=ScaleShortToQuantum(pixel);
}
break;
}
case 32:
{
unsigned int
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].red=ScaleLongToQuantum(pixel);
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].green=ScaleLongToQuantum(pixel);
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].blue=ScaleLongToQuantum(pixel);
}
break;
}
}
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
}
}
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
/*
Attach persistent pixel cache.
*/
status=PersistPixelCache(image,cache_filename,MagickTrue,&offset,exception);
if (status == MagickFalse)
ThrowReaderException(CacheError,"UnableToPersistPixelCache");
/*
Proceed to next image.
*/
do
{
c=ReadBlobByte(image);
} while ((isgraph(c) == MagickFalse) && (c != EOF));
if (c != EOF)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while (c != EOF);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
Commit Message:
CWE ID: CWE-119 | static Image *ReadMPCImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
char
cache_filename[MaxTextExtent],
id[MaxTextExtent],
keyword[MaxTextExtent],
*options;
const unsigned char
*p;
GeometryInfo
geometry_info;
Image
*image;
int
c;
LinkedListInfo
*profiles;
MagickBooleanType
status;
MagickOffsetType
offset;
MagickStatusType
flags;
register ssize_t
i;
size_t
depth,
length;
ssize_t
count;
StringInfo
*profile;
unsigned int
signature;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
(void) CopyMagickString(cache_filename,image->filename,MaxTextExtent);
AppendImageFormat("cache",cache_filename);
c=ReadBlobByte(image);
if (c == EOF)
{
image=DestroyImage(image);
return((Image *) NULL);
}
*id='\0';
(void) ResetMagickMemory(keyword,0,sizeof(keyword));
offset=0;
do
{
/*
Decode image header; header terminates one character beyond a ':'.
*/
profiles=(LinkedListInfo *) NULL;
length=MaxTextExtent;
options=AcquireString((char *) NULL);
signature=GetMagickSignature((const StringInfo *) NULL);
image->depth=8;
image->compression=NoCompression;
while ((isgraph(c) != MagickFalse) && (c != (int) ':'))
{
register char
*p;
if (c == (int) '{')
{
char
*comment;
/*
Read comment-- any text between { }.
*/
length=MaxTextExtent;
comment=AcquireString((char *) NULL);
for (p=comment; comment != (char *) NULL; p++)
{
c=ReadBlobByte(image);
if (c == (int) '\\')
c=ReadBlobByte(image);
else
if ((c == EOF) || (c == (int) '}'))
break;
if ((size_t) (p-comment+1) >= length)
{
*p='\0';
length<<=1;
comment=(char *) ResizeQuantumMemory(comment,length+
MaxTextExtent,sizeof(*comment));
if (comment == (char *) NULL)
break;
p=comment+strlen(comment);
}
*p=(char) c;
}
if (comment == (char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
*p='\0';
(void) SetImageProperty(image,"comment",comment);
comment=DestroyString(comment);
c=ReadBlobByte(image);
}
else
if (isalnum(c) != MagickFalse)
{
/*
Get the keyword.
*/
p=keyword;
do
{
if (c == (int) '=')
break;
if ((size_t) (p-keyword) < (MaxTextExtent-1))
*p++=(char) c;
c=ReadBlobByte(image);
} while (c != EOF);
*p='\0';
p=options;
while (isspace((int) ((unsigned char) c)) != 0)
c=ReadBlobByte(image);
if (c == (int) '=')
{
/*
Get the keyword value.
*/
c=ReadBlobByte(image);
while ((c != (int) '}') && (c != EOF))
{
if ((size_t) (p-options+1) >= length)
{
*p='\0';
length<<=1;
options=(char *) ResizeQuantumMemory(options,length+
MaxTextExtent,sizeof(*options));
if (options == (char *) NULL)
break;
p=options+strlen(options);
}
if (options == (char *) NULL)
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
*p++=(char) c;
c=ReadBlobByte(image);
if (c == '\\')
{
c=ReadBlobByte(image);
if (c == (int) '}')
{
*p++=(char) c;
c=ReadBlobByte(image);
}
}
if (*options != '{')
if (isspace((int) ((unsigned char) c)) != 0)
break;
}
}
*p='\0';
if (*options == '{')
(void) CopyMagickString(options,options+1,strlen(options));
/*
Assign a value to the specified keyword.
*/
switch (*keyword)
{
case 'b':
case 'B':
{
if (LocaleCompare(keyword,"background-color") == 0)
{
(void) QueryColorDatabase(options,&image->background_color,
exception);
break;
}
if (LocaleCompare(keyword,"blue-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=
image->chromaticity.blue_primary.x;
break;
}
if (LocaleCompare(keyword,"border-color") == 0)
{
(void) QueryColorDatabase(options,&image->border_color,
exception);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'c':
case 'C':
{
if (LocaleCompare(keyword,"class") == 0)
{
ssize_t
storage_class;
storage_class=ParseCommandOption(MagickClassOptions,
MagickFalse,options);
if (storage_class < 0)
break;
image->storage_class=(ClassType) storage_class;
break;
}
if (LocaleCompare(keyword,"colors") == 0)
{
image->colors=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"colorspace") == 0)
{
ssize_t
colorspace;
colorspace=ParseCommandOption(MagickColorspaceOptions,
MagickFalse,options);
if (colorspace < 0)
break;
image->colorspace=(ColorspaceType) colorspace;
break;
}
if (LocaleCompare(keyword,"compression") == 0)
{
ssize_t
compression;
compression=ParseCommandOption(MagickCompressOptions,
MagickFalse,options);
if (compression < 0)
break;
image->compression=(CompressionType) compression;
break;
}
if (LocaleCompare(keyword,"columns") == 0)
{
image->columns=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'd':
case 'D':
{
if (LocaleCompare(keyword,"delay") == 0)
{
image->delay=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"depth") == 0)
{
image->depth=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"dispose") == 0)
{
ssize_t
dispose;
dispose=ParseCommandOption(MagickDisposeOptions,MagickFalse,
options);
if (dispose < 0)
break;
image->dispose=(DisposeType) dispose;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'e':
case 'E':
{
if (LocaleCompare(keyword,"endian") == 0)
{
ssize_t
endian;
endian=ParseCommandOption(MagickEndianOptions,MagickFalse,
options);
if (endian < 0)
break;
image->endian=(EndianType) endian;
break;
}
if (LocaleCompare(keyword,"error") == 0)
{
image->error.mean_error_per_pixel=StringToDouble(options,
(char **) NULL);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'g':
case 'G':
{
if (LocaleCompare(keyword,"gamma") == 0)
{
image->gamma=StringToDouble(options,(char **) NULL);
break;
}
if (LocaleCompare(keyword,"green-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=
image->chromaticity.green_primary.x;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'i':
case 'I':
{
if (LocaleCompare(keyword,"id") == 0)
{
(void) CopyMagickString(id,options,MaxTextExtent);
break;
}
if (LocaleCompare(keyword,"iterations") == 0)
{
image->iterations=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'm':
case 'M':
{
if (LocaleCompare(keyword,"magick-signature") == 0)
{
signature=(unsigned int) StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"matte") == 0)
{
ssize_t
matte;
matte=ParseCommandOption(MagickBooleanOptions,MagickFalse,
options);
if (matte < 0)
break;
image->matte=(MagickBooleanType) matte;
break;
}
if (LocaleCompare(keyword,"matte-color") == 0)
{
(void) QueryColorDatabase(options,&image->matte_color,
exception);
break;
}
if (LocaleCompare(keyword,"maximum-error") == 0)
{
image->error.normalized_maximum_error=StringToDouble(
options,(char **) NULL);
break;
}
if (LocaleCompare(keyword,"mean-error") == 0)
{
image->error.normalized_mean_error=StringToDouble(options,
(char **) NULL);
break;
}
if (LocaleCompare(keyword,"montage") == 0)
{
(void) CloneString(&image->montage,options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'o':
case 'O':
{
if (LocaleCompare(keyword,"opaque") == 0)
{
ssize_t
matte;
matte=ParseCommandOption(MagickBooleanOptions,MagickFalse,
options);
if (matte < 0)
break;
image->matte=(MagickBooleanType) matte;
break;
}
if (LocaleCompare(keyword,"orientation") == 0)
{
ssize_t
orientation;
orientation=ParseCommandOption(MagickOrientationOptions,
MagickFalse,options);
if (orientation < 0)
break;
image->orientation=(OrientationType) orientation;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'p':
case 'P':
{
if (LocaleCompare(keyword,"page") == 0)
{
char
*geometry;
geometry=GetPageGeometry(options);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
break;
}
if (LocaleCompare(keyword,"pixel-intensity") == 0)
{
ssize_t
intensity;
intensity=ParseCommandOption(MagickPixelIntensityOptions,
MagickFalse,options);
if (intensity < 0)
break;
image->intensity=(PixelIntensityMethod) intensity;
break;
}
if ((LocaleNCompare(keyword,"profile:",8) == 0) ||
(LocaleNCompare(keyword,"profile-",8) == 0))
{
if (profiles == (LinkedListInfo *) NULL)
profiles=NewLinkedList(0);
(void) AppendValueToLinkedList(profiles,
AcquireString(keyword+8));
profile=BlobToStringInfo((const void *) NULL,(size_t)
StringToLong(options));
if (profile == (StringInfo *) NULL)
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
(void) SetImageProfile(image,keyword+8,profile);
profile=DestroyStringInfo(profile);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'q':
case 'Q':
{
if (LocaleCompare(keyword,"quality") == 0)
{
image->quality=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'r':
case 'R':
{
if (LocaleCompare(keyword,"red-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
if ((flags & SigmaValue) != 0)
image->chromaticity.red_primary.y=geometry_info.sigma;
break;
}
if (LocaleCompare(keyword,"rendering-intent") == 0)
{
ssize_t
rendering_intent;
rendering_intent=ParseCommandOption(MagickIntentOptions,
MagickFalse,options);
if (rendering_intent < 0)
break;
image->rendering_intent=(RenderingIntent) rendering_intent;
break;
}
if (LocaleCompare(keyword,"resolution") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
break;
}
if (LocaleCompare(keyword,"rows") == 0)
{
image->rows=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 's':
case 'S':
{
if (LocaleCompare(keyword,"scene") == 0)
{
image->scene=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 't':
case 'T':
{
if (LocaleCompare(keyword,"ticks-per-second") == 0)
{
image->ticks_per_second=(ssize_t) StringToLong(options);
break;
}
if (LocaleCompare(keyword,"tile-offset") == 0)
{
char
*geometry;
geometry=GetPageGeometry(options);
(void) ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
}
if (LocaleCompare(keyword,"type") == 0)
{
ssize_t
type;
type=ParseCommandOption(MagickTypeOptions,MagickFalse,
options);
if (type < 0)
break;
image->type=(ImageType) type;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'u':
case 'U':
{
if (LocaleCompare(keyword,"units") == 0)
{
ssize_t
units;
units=ParseCommandOption(MagickResolutionOptions,MagickFalse,
options);
if (units < 0)
break;
image->units=(ResolutionType) units;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'w':
case 'W':
{
if (LocaleCompare(keyword,"white-point") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=
image->chromaticity.white_point.x;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
default:
{
(void) SetImageProperty(image,keyword,options);
break;
}
}
}
else
c=ReadBlobByte(image);
while (isspace((int) ((unsigned char) c)) != 0)
c=ReadBlobByte(image);
}
options=DestroyString(options);
(void) ReadBlobByte(image);
/*
Verify that required image information is defined.
*/
if ((LocaleCompare(id,"MagickCache") != 0) ||
(image->storage_class == UndefinedClass) ||
(image->compression == UndefinedCompression) || (image->columns == 0) ||
(image->rows == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (signature != GetMagickSignature((const StringInfo *) NULL))
ThrowReaderException(CacheError,"IncompatibleAPI");
if (image->montage != (char *) NULL)
{
register char
*p;
/*
Image directory.
*/
length=MaxTextExtent;
image->directory=AcquireString((char *) NULL);
p=image->directory;
do
{
*p='\0';
if ((strlen(image->directory)+MaxTextExtent) >= length)
{
/*
Allocate more memory for the image directory.
*/
length<<=1;
image->directory=(char *) ResizeQuantumMemory(image->directory,
length+MaxTextExtent,sizeof(*image->directory));
if (image->directory == (char *) NULL)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
p=image->directory+strlen(image->directory);
}
c=ReadBlobByte(image);
*p++=(char) c;
} while (c != (int) '\0');
}
if (profiles != (LinkedListInfo *) NULL)
{
const char
*name;
const StringInfo
*profile;
register unsigned char
*p;
/*
Read image profiles.
*/
ResetLinkedListIterator(profiles);
name=(const char *) GetNextValueInLinkedList(profiles);
while (name != (const char *) NULL)
{
profile=GetImageProfile(image,name);
if (profile != (StringInfo *) NULL)
{
p=GetStringInfoDatum(profile);
count=ReadBlob(image,GetStringInfoLength(profile),p);
}
name=(const char *) GetNextValueInLinkedList(profiles);
}
profiles=DestroyLinkedList(profiles,RelinquishMagickMemory);
}
depth=GetImageQuantumDepth(image,MagickFalse);
if (image->storage_class == PseudoClass)
{
/*
Create image colormap.
*/
if (AcquireImageColormap(image,image->colors) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->colors != 0)
{
size_t
packet_size;
unsigned char
*colormap;
/*
Read image colormap from file.
*/
packet_size=(size_t) (3UL*depth/8UL);
colormap=(unsigned char *) AcquireQuantumMemory(image->colors,
packet_size*sizeof(*colormap));
if (colormap == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,packet_size*image->colors,colormap);
if (count != (ssize_t) (packet_size*image->colors))
ThrowReaderException(CorruptImageError,
"InsufficientImageDataInFile");
p=colormap;
switch (depth)
{
default:
ThrowReaderException(CorruptImageError,
"ImageDepthNotSupported");
case 8:
{
unsigned char
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushCharPixel(p,&pixel);
image->colormap[i].red=ScaleCharToQuantum(pixel);
p=PushCharPixel(p,&pixel);
image->colormap[i].green=ScaleCharToQuantum(pixel);
p=PushCharPixel(p,&pixel);
image->colormap[i].blue=ScaleCharToQuantum(pixel);
}
break;
}
case 16:
{
unsigned short
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].red=ScaleShortToQuantum(pixel);
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].green=ScaleShortToQuantum(pixel);
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].blue=ScaleShortToQuantum(pixel);
}
break;
}
case 32:
{
unsigned int
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].red=ScaleLongToQuantum(pixel);
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].green=ScaleLongToQuantum(pixel);
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].blue=ScaleLongToQuantum(pixel);
}
break;
}
}
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
}
}
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
/*
Attach persistent pixel cache.
*/
status=PersistPixelCache(image,cache_filename,MagickTrue,&offset,exception);
if (status == MagickFalse)
ThrowReaderException(CacheError,"UnableToPersistPixelCache");
/*
Proceed to next image.
*/
do
{
c=ReadBlobByte(image);
} while ((isgraph(c) == MagickFalse) && (c != EOF));
if (c != EOF)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while (c != EOF);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
| 168,583 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: crypto_retrieve_X509_sans(krb5_context context,
pkinit_plg_crypto_context plgctx,
pkinit_req_crypto_context reqctx,
X509 *cert,
krb5_principal **princs_ret,
krb5_principal **upn_ret,
unsigned char ***dns_ret)
{
krb5_error_code retval = EINVAL;
char buf[DN_BUF_LEN];
int p = 0, u = 0, d = 0, ret = 0, l;
krb5_principal *princs = NULL;
krb5_principal *upns = NULL;
unsigned char **dnss = NULL;
unsigned int i, num_found = 0, num_sans = 0;
X509_EXTENSION *ext = NULL;
GENERAL_NAMES *ialt = NULL;
GENERAL_NAME *gen = NULL;
if (princs_ret != NULL)
*princs_ret = NULL;
if (upn_ret != NULL)
*upn_ret = NULL;
if (dns_ret != NULL)
*dns_ret = NULL;
if (princs_ret == NULL && upn_ret == NULL && dns_ret == NULL) {
pkiDebug("%s: nowhere to return any values!\n", __FUNCTION__);
return retval;
}
if (cert == NULL) {
pkiDebug("%s: no certificate!\n", __FUNCTION__);
return retval;
}
X509_NAME_oneline(X509_get_subject_name(cert),
buf, sizeof(buf));
pkiDebug("%s: looking for SANs in cert = %s\n", __FUNCTION__, buf);
l = X509_get_ext_by_NID(cert, NID_subject_alt_name, -1);
if (l < 0)
return 0;
if (!(ext = X509_get_ext(cert, l)) || !(ialt = X509V3_EXT_d2i(ext))) {
pkiDebug("%s: found no subject alt name extensions\n", __FUNCTION__);
retval = ENOENT;
goto cleanup;
}
num_sans = sk_GENERAL_NAME_num(ialt);
pkiDebug("%s: found %d subject alt name extension(s)\n", __FUNCTION__,
num_sans);
/* OK, we're likely returning something. Allocate return values */
if (princs_ret != NULL) {
princs = calloc(num_sans + 1, sizeof(krb5_principal));
if (princs == NULL) {
retval = ENOMEM;
goto cleanup;
}
}
if (upn_ret != NULL) {
upns = calloc(num_sans + 1, sizeof(krb5_principal));
if (upns == NULL) {
retval = ENOMEM;
goto cleanup;
}
}
if (dns_ret != NULL) {
dnss = calloc(num_sans + 1, sizeof(*dnss));
if (dnss == NULL) {
retval = ENOMEM;
goto cleanup;
}
}
for (i = 0; i < num_sans; i++) {
krb5_data name = { 0, 0, NULL };
gen = sk_GENERAL_NAME_value(ialt, i);
switch (gen->type) {
case GEN_OTHERNAME:
name.length = gen->d.otherName->value->value.sequence->length;
name.data = (char *)gen->d.otherName->value->value.sequence->data;
if (princs != NULL &&
OBJ_cmp(plgctx->id_pkinit_san,
gen->d.otherName->type_id) == 0) {
#ifdef DEBUG_ASN1
print_buffer_bin((unsigned char *)name.data, name.length,
"/tmp/pkinit_san");
#endif
ret = k5int_decode_krb5_principal_name(&name, &princs[p]);
if (ret) {
pkiDebug("%s: failed decoding pkinit san value\n",
__FUNCTION__);
} else {
p++;
num_found++;
}
} else if (upns != NULL &&
OBJ_cmp(plgctx->id_ms_san_upn,
gen->d.otherName->type_id) == 0) {
/* Prevent abuse of embedded null characters. */
if (memchr(name.data, '\0', name.length))
break;
ret = krb5_parse_name_flags(context, name.data,
KRB5_PRINCIPAL_PARSE_ENTERPRISE,
&upns[u]);
if (ret) {
pkiDebug("%s: failed parsing ms-upn san value\n",
__FUNCTION__);
} else {
u++;
num_found++;
}
} else {
pkiDebug("%s: unrecognized othername oid in SAN\n",
__FUNCTION__);
continue;
}
break;
case GEN_DNS:
if (dnss != NULL) {
/* Prevent abuse of embedded null characters. */
if (memchr(gen->d.dNSName->data, '\0', gen->d.dNSName->length))
break;
pkiDebug("%s: found dns name = %s\n", __FUNCTION__,
gen->d.dNSName->data);
dnss[d] = (unsigned char *)
strdup((char *)gen->d.dNSName->data);
if (dnss[d] == NULL) {
pkiDebug("%s: failed to duplicate dns name\n",
__FUNCTION__);
} else {
d++;
num_found++;
}
}
break;
default:
pkiDebug("%s: SAN type = %d expecting %d\n", __FUNCTION__,
gen->type, GEN_OTHERNAME);
}
}
sk_GENERAL_NAME_pop_free(ialt, GENERAL_NAME_free);
retval = 0;
if (princs)
*princs_ret = princs;
if (upns)
*upn_ret = upns;
if (dnss)
*dns_ret = dnss;
cleanup:
if (retval) {
if (princs != NULL) {
for (i = 0; princs[i] != NULL; i++)
krb5_free_principal(context, princs[i]);
free(princs);
}
if (upns != NULL) {
for (i = 0; upns[i] != NULL; i++)
krb5_free_principal(context, upns[i]);
free(upns);
}
if (dnss != NULL) {
for (i = 0; dnss[i] != NULL; i++)
free(dnss[i]);
free(dnss);
}
}
return retval;
}
Commit Message: Fix certauth built-in module returns
The PKINIT certauth eku module should never authoritatively authorize
a certificate, because an extended key usage does not establish a
relationship between the certificate and any specific user; it only
establishes that the certificate was created for PKINIT client
authentication. Therefore, pkinit_eku_authorize() should return
KRB5_PLUGIN_NO_HANDLE on success, not 0.
The certauth san module should pass if it does not find any SANs of
the types it can match against; the presence of other types of SANs
should not cause it to explicitly deny a certificate. Check for an
empty result from crypto_retrieve_cert_sans() in verify_client_san(),
instead of returning ENOENT from crypto_retrieve_cert_sans() when
there are no SANs at all.
ticket: 8561
CWE ID: CWE-287 | crypto_retrieve_X509_sans(krb5_context context,
pkinit_plg_crypto_context plgctx,
pkinit_req_crypto_context reqctx,
X509 *cert,
krb5_principal **princs_ret,
krb5_principal **upn_ret,
unsigned char ***dns_ret)
{
krb5_error_code retval = EINVAL;
char buf[DN_BUF_LEN];
int p = 0, u = 0, d = 0, ret = 0, l;
krb5_principal *princs = NULL;
krb5_principal *upns = NULL;
unsigned char **dnss = NULL;
unsigned int i, num_found = 0, num_sans = 0;
X509_EXTENSION *ext = NULL;
GENERAL_NAMES *ialt = NULL;
GENERAL_NAME *gen = NULL;
if (princs_ret != NULL)
*princs_ret = NULL;
if (upn_ret != NULL)
*upn_ret = NULL;
if (dns_ret != NULL)
*dns_ret = NULL;
if (princs_ret == NULL && upn_ret == NULL && dns_ret == NULL) {
pkiDebug("%s: nowhere to return any values!\n", __FUNCTION__);
return retval;
}
if (cert == NULL) {
pkiDebug("%s: no certificate!\n", __FUNCTION__);
return retval;
}
X509_NAME_oneline(X509_get_subject_name(cert),
buf, sizeof(buf));
pkiDebug("%s: looking for SANs in cert = %s\n", __FUNCTION__, buf);
l = X509_get_ext_by_NID(cert, NID_subject_alt_name, -1);
if (l < 0)
return 0;
if (!(ext = X509_get_ext(cert, l)) || !(ialt = X509V3_EXT_d2i(ext))) {
pkiDebug("%s: found no subject alt name extensions\n", __FUNCTION__);
goto cleanup;
}
num_sans = sk_GENERAL_NAME_num(ialt);
pkiDebug("%s: found %d subject alt name extension(s)\n", __FUNCTION__,
num_sans);
/* OK, we're likely returning something. Allocate return values */
if (princs_ret != NULL) {
princs = calloc(num_sans + 1, sizeof(krb5_principal));
if (princs == NULL) {
retval = ENOMEM;
goto cleanup;
}
}
if (upn_ret != NULL) {
upns = calloc(num_sans + 1, sizeof(krb5_principal));
if (upns == NULL) {
retval = ENOMEM;
goto cleanup;
}
}
if (dns_ret != NULL) {
dnss = calloc(num_sans + 1, sizeof(*dnss));
if (dnss == NULL) {
retval = ENOMEM;
goto cleanup;
}
}
for (i = 0; i < num_sans; i++) {
krb5_data name = { 0, 0, NULL };
gen = sk_GENERAL_NAME_value(ialt, i);
switch (gen->type) {
case GEN_OTHERNAME:
name.length = gen->d.otherName->value->value.sequence->length;
name.data = (char *)gen->d.otherName->value->value.sequence->data;
if (princs != NULL &&
OBJ_cmp(plgctx->id_pkinit_san,
gen->d.otherName->type_id) == 0) {
#ifdef DEBUG_ASN1
print_buffer_bin((unsigned char *)name.data, name.length,
"/tmp/pkinit_san");
#endif
ret = k5int_decode_krb5_principal_name(&name, &princs[p]);
if (ret) {
pkiDebug("%s: failed decoding pkinit san value\n",
__FUNCTION__);
} else {
p++;
num_found++;
}
} else if (upns != NULL &&
OBJ_cmp(plgctx->id_ms_san_upn,
gen->d.otherName->type_id) == 0) {
/* Prevent abuse of embedded null characters. */
if (memchr(name.data, '\0', name.length))
break;
ret = krb5_parse_name_flags(context, name.data,
KRB5_PRINCIPAL_PARSE_ENTERPRISE,
&upns[u]);
if (ret) {
pkiDebug("%s: failed parsing ms-upn san value\n",
__FUNCTION__);
} else {
u++;
num_found++;
}
} else {
pkiDebug("%s: unrecognized othername oid in SAN\n",
__FUNCTION__);
continue;
}
break;
case GEN_DNS:
if (dnss != NULL) {
/* Prevent abuse of embedded null characters. */
if (memchr(gen->d.dNSName->data, '\0', gen->d.dNSName->length))
break;
pkiDebug("%s: found dns name = %s\n", __FUNCTION__,
gen->d.dNSName->data);
dnss[d] = (unsigned char *)
strdup((char *)gen->d.dNSName->data);
if (dnss[d] == NULL) {
pkiDebug("%s: failed to duplicate dns name\n",
__FUNCTION__);
} else {
d++;
num_found++;
}
}
break;
default:
pkiDebug("%s: SAN type = %d expecting %d\n", __FUNCTION__,
gen->type, GEN_OTHERNAME);
}
}
sk_GENERAL_NAME_pop_free(ialt, GENERAL_NAME_free);
retval = 0;
if (princs != NULL && *princs != NULL) {
*princs_ret = princs;
princs = NULL;
}
if (upns != NULL && *upns != NULL) {
*upn_ret = upns;
upns = NULL;
}
if (dnss != NULL && *dnss != NULL) {
*dns_ret = dnss;
dnss = NULL;
}
cleanup:
for (i = 0; princs != NULL && princs[i] != NULL; i++)
krb5_free_principal(context, princs[i]);
free(princs);
for (i = 0; upns != NULL && upns[i] != NULL; i++)
krb5_free_principal(context, upns[i]);
free(upns);
for (i = 0; dnss != NULL && dnss[i] != NULL; i++)
free(dnss[i]);
free(dnss);
return retval;
}
| 170,173 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: PaintPropertyTreeBuilderFragmentContext()
: current_effect(EffectPaintPropertyNode::Root()) {
current.clip = absolute_position.clip = fixed_position.clip =
ClipPaintPropertyNode::Root();
current.transform = absolute_position.transform = fixed_position.transform =
TransformPaintPropertyNode::Root();
current.scroll = absolute_position.scroll = fixed_position.scroll =
ScrollPaintPropertyNode::Root();
}
Commit Message: Reland "[CI] Make paint property nodes non-ref-counted"
This reverts commit 887383b30842d9d9006e11bb6932660a3cb5b1b7.
Reason for revert: Retry in M69.
Original change's description:
> Revert "[CI] Make paint property nodes non-ref-counted"
>
> This reverts commit 70fc0b018c9517558b7aa2be00edf2debb449123.
>
> Reason for revert: Caused bugs found by clusterfuzz
>
> Original change's description:
> > [CI] Make paint property nodes non-ref-counted
> >
> > Now all paint property nodes are owned by ObjectPaintProperties
> > (and LocalFrameView temporarily before removing non-RLS mode).
> > Others just use raw pointers or references.
> >
> > Bug: 833496
> > Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
> > Change-Id: I2d544fe153bb94698623248748df63c8aa2081ae
> > Reviewed-on: https://chromium-review.googlesource.com/1031101
> > Reviewed-by: Tien-Ren Chen <[email protected]>
> > Commit-Queue: Xianzhu Wang <[email protected]>
> > Cr-Commit-Position: refs/heads/master@{#554626}
>
> [email protected],[email protected],[email protected]
>
> Change-Id: I02bb50d6744cb81a797246a0116b677e80a3c69f
> No-Presubmit: true
> No-Tree-Checks: true
> No-Try: true
> Bug: 833496,837932,837943
> Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
> Reviewed-on: https://chromium-review.googlesource.com/1034292
> Reviewed-by: Xianzhu Wang <[email protected]>
> Commit-Queue: Xianzhu Wang <[email protected]>
> Cr-Commit-Position: refs/heads/master@{#554653}
[email protected],[email protected],[email protected]
# Not skipping CQ checks because original CL landed > 1 day ago.
Bug: 833496, 837932, 837943
Change-Id: I0b4ef70db1f1f211ba97c30d617225355c750992
Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
Reviewed-on: https://chromium-review.googlesource.com/1083491
Commit-Queue: Xianzhu Wang <[email protected]>
Reviewed-by: Xianzhu Wang <[email protected]>
Cr-Commit-Position: refs/heads/master@{#563930}
CWE ID: | PaintPropertyTreeBuilderFragmentContext()
: current_effect(&EffectPaintPropertyNode::Root()) {
current.clip = absolute_position.clip = fixed_position.clip =
&ClipPaintPropertyNode::Root();
current.transform = absolute_position.transform = fixed_position.transform =
&TransformPaintPropertyNode::Root();
current.scroll = absolute_position.scroll = fixed_position.scroll =
&ScrollPaintPropertyNode::Root();
}
| 171,792 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: long long Cluster::GetTimeCode() const
{
long long pos;
long len;
const long status = Load(pos, len);
if (status < 0) //error
return status;
return m_timecode;
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | long long Cluster::GetTimeCode() const
const long status = Load(pos, len);
if (status < 0) // error
return status;
return m_timecode;
}
long long Cluster::GetTime() const {
const long long tc = GetTimeCode();
if (tc < 0)
return tc;
const SegmentInfo* const pInfo = m_pSegment->GetInfo();
assert(pInfo);
const long long scale = pInfo->GetTimeCodeScale();
assert(scale >= 1);
const long long t = m_timecode * scale;
return t;
}
long long Cluster::GetFirstTime() const {
const BlockEntry* pEntry;
const long status = GetFirst(pEntry);
if (status < 0) // error
return status;
if (pEntry == NULL) // empty cluster
return GetTime();
const Block* const pBlock = pEntry->GetBlock();
assert(pBlock);
return pBlock->GetTime(this);
}
long long Cluster::GetLastTime() const {
const BlockEntry* pEntry;
const long status = GetLast(pEntry);
if (status < 0) // error
return status;
if (pEntry == NULL) // empty cluster
return GetTime();
const Block* const pBlock = pEntry->GetBlock();
assert(pBlock);
return pBlock->GetTime(this);
}
long Cluster::CreateBlock(long long id,
long long pos, // absolute pos of payload
long long size, long long discard_padding) {
assert((id == 0x20) || (id == 0x23)); // BlockGroup or SimpleBlock
if (m_entries_count < 0) { // haven't parsed anything yet
assert(m_entries == NULL);
assert(m_entries_size == 0);
m_entries_size = 1024;
m_entries = new BlockEntry* [m_entries_size];
m_entries_count = 0;
} else {
assert(m_entries);
assert(m_entries_size > 0);
assert(m_entries_count <= m_entries_size);
if (m_entries_count >= m_entries_size) {
const long entries_size = 2 * m_entries_size;
BlockEntry** const entries = new BlockEntry* [entries_size];
assert(entries);
BlockEntry** src = m_entries;
BlockEntry** const src_end = src + m_entries_count;
BlockEntry** dst = entries;
while (src != src_end)
*dst++ = *src++;
delete[] m_entries;
m_entries = entries;
m_entries_size = entries_size;
}
}
if (id == 0x20) // BlockGroup ID
return CreateBlockGroup(pos, size, discard_padding);
else // SimpleBlock ID
return CreateSimpleBlock(pos, size);
}
long Cluster::CreateBlockGroup(long long start_offset, long long size,
long long discard_padding) {
assert(m_entries);
assert(m_entries_size > 0);
assert(m_entries_count >= 0);
assert(m_entries_count < m_entries_size);
IMkvReader* const pReader = m_pSegment->m_pReader;
long long pos = start_offset;
const long long stop = start_offset + size;
// For WebM files, there is a bias towards previous reference times
//(in order to support alt-ref frames, which refer back to the previous
// keyframe). Normally a 0 value is not possible, but here we tenatively
// allow 0 as the value of a reference frame, with the interpretation
// that this is a "previous" reference time.
long long prev = 1; // nonce
long long next = 0; // nonce
long long duration = -1; // really, this is unsigned
long long bpos = -1;
long long bsize = -1;
while (pos < stop) {
long len;
const long long id = ReadUInt(pReader, pos, len);
assert(id >= 0); // TODO
assert((pos + len) <= stop);
pos += len; // consume ID
const long long size = ReadUInt(pReader, pos, len);
assert(size >= 0); // TODO
assert((pos + len) <= stop);
pos += len; // consume size
if (id == 0x21) { // Block ID
if (bpos < 0) { // Block ID
bpos = pos;
bsize = size;
}
} else if (id == 0x1B) { // Duration ID
assert(size <= 8);
duration = UnserializeUInt(pReader, pos, size);
assert(duration >= 0); // TODO
} else if (id == 0x7B) { // ReferenceBlock
assert(size <= 8);
const long size_ = static_cast<long>(size);
long long time;
long status = UnserializeInt(pReader, pos, size_, time);
assert(status == 0);
if (status != 0)
return -1;
if (time <= 0) // see note above
prev = time;
else // weird
next = time;
}
pos += size; // consume payload
assert(pos <= stop);
}
assert(pos == stop);
assert(bpos >= 0);
assert(bsize >= 0);
const long idx = m_entries_count;
BlockEntry** const ppEntry = m_entries + idx;
BlockEntry*& pEntry = *ppEntry;
pEntry = new (std::nothrow)
BlockGroup(this, idx, bpos, bsize, prev, next, duration, discard_padding);
if (pEntry == NULL)
return -1; // generic error
BlockGroup* const p = static_cast<BlockGroup*>(pEntry);
const long status = p->Parse();
if (status == 0) { // success
++m_entries_count;
return 0;
}
delete pEntry;
pEntry = 0;
return status;
}
long Cluster::CreateSimpleBlock(long long st, long long sz) {
assert(m_entries);
assert(m_entries_size > 0);
assert(m_entries_count >= 0);
assert(m_entries_count < m_entries_size);
const long idx = m_entries_count;
BlockEntry** const ppEntry = m_entries + idx;
BlockEntry*& pEntry = *ppEntry;
pEntry = new (std::nothrow) SimpleBlock(this, idx, st, sz);
if (pEntry == NULL)
return -1; // generic error
SimpleBlock* const p = static_cast<SimpleBlock*>(pEntry);
const long status = p->Parse();
if (status == 0) {
++m_entries_count;
return 0;
}
delete pEntry;
pEntry = 0;
return status;
}
long Cluster::GetFirst(const BlockEntry*& pFirst) const {
if (m_entries_count <= 0) {
long long pos;
long len;
const long status = Parse(pos, len);
| 174,365 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int BlobURLRequestJob::ComputeBytesToRead() const {
int64 current_item_remaining_bytes =
item_length_list_[current_item_index_] - current_item_offset_;
int64 remaining_bytes = std::min(current_item_remaining_bytes,
remaining_bytes_);
return static_cast<int>(std::min(
static_cast<int64>(read_buf_->BytesRemaining()),
remaining_bytes));
}
Commit Message: Avoid integer overflows in BlobURLRequestJob.
BUG=169685
Review URL: https://chromiumcodereview.appspot.com/12047012
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@179154 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-189 | int BlobURLRequestJob::ComputeBytesToRead() const {
int64 current_item_length = item_length_list_[current_item_index_];
int64 item_remaining = current_item_length - current_item_offset_;
int64 buf_remaining = read_buf_->BytesRemaining();
int64 max_remaining = std::numeric_limits<int>::max();
int64 min = std::min(std::min(std::min(item_remaining,
buf_remaining),
remaining_bytes_),
max_remaining);
return static_cast<int>(min);
}
| 171,397 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int ssl_parse_client_psk_identity( mbedtls_ssl_context *ssl, unsigned char **p,
const unsigned char *end )
{
int ret = 0;
size_t n;
if( ssl->conf->f_psk == NULL &&
( ssl->conf->psk == NULL || ssl->conf->psk_identity == NULL ||
ssl->conf->psk_identity_len == 0 || ssl->conf->psk_len == 0 ) )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "got no pre-shared key" ) );
return( MBEDTLS_ERR_SSL_PRIVATE_KEY_REQUIRED );
}
/*
* Receive client pre-shared key identity name
*/
if( *p + 2 > end )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
n = ( (*p)[0] << 8 ) | (*p)[1];
*p += 2;
if( n < 1 || n > 65535 || *p + n > end )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
if( ssl->conf->f_psk != NULL )
{
if( ssl->conf->f_psk( ssl->conf->p_psk, ssl, *p, n ) != 0 )
ret = MBEDTLS_ERR_SSL_UNKNOWN_IDENTITY;
}
else
{
/* Identity is not a big secret since clients send it in the clear,
* but treat it carefully anyway, just in case */
if( n != ssl->conf->psk_identity_len ||
mbedtls_ssl_safer_memcmp( ssl->conf->psk_identity, *p, n ) != 0 )
{
ret = MBEDTLS_ERR_SSL_UNKNOWN_IDENTITY;
}
}
if( ret == MBEDTLS_ERR_SSL_UNKNOWN_IDENTITY )
{
MBEDTLS_SSL_DEBUG_BUF( 3, "Unknown PSK identity", *p, n );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_UNKNOWN_PSK_IDENTITY );
return( MBEDTLS_ERR_SSL_UNKNOWN_IDENTITY );
}
*p += n;
return( 0 );
}
Commit Message: Prevent bounds check bypass through overflow in PSK identity parsing
The check `if( *p + n > end )` in `ssl_parse_client_psk_identity` is
unsafe because `*p + n` might overflow, thus bypassing the check. As
`n` is a user-specified value up to 65K, this is relevant if the
library happens to be located in the last 65K of virtual memory.
This commit replaces the check by a safe version.
CWE ID: CWE-190 | static int ssl_parse_client_psk_identity( mbedtls_ssl_context *ssl, unsigned char **p,
const unsigned char *end )
{
int ret = 0;
size_t n;
if( ssl->conf->f_psk == NULL &&
( ssl->conf->psk == NULL || ssl->conf->psk_identity == NULL ||
ssl->conf->psk_identity_len == 0 || ssl->conf->psk_len == 0 ) )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "got no pre-shared key" ) );
return( MBEDTLS_ERR_SSL_PRIVATE_KEY_REQUIRED );
}
/*
* Receive client pre-shared key identity name
*/
if( end - *p < 2 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
n = ( (*p)[0] << 8 ) | (*p)[1];
*p += 2;
if( n < 1 || n > 65535 || n > (size_t) ( end - *p ) )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
if( ssl->conf->f_psk != NULL )
{
if( ssl->conf->f_psk( ssl->conf->p_psk, ssl, *p, n ) != 0 )
ret = MBEDTLS_ERR_SSL_UNKNOWN_IDENTITY;
}
else
{
/* Identity is not a big secret since clients send it in the clear,
* but treat it carefully anyway, just in case */
if( n != ssl->conf->psk_identity_len ||
mbedtls_ssl_safer_memcmp( ssl->conf->psk_identity, *p, n ) != 0 )
{
ret = MBEDTLS_ERR_SSL_UNKNOWN_IDENTITY;
}
}
if( ret == MBEDTLS_ERR_SSL_UNKNOWN_IDENTITY )
{
MBEDTLS_SSL_DEBUG_BUF( 3, "Unknown PSK identity", *p, n );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_UNKNOWN_PSK_IDENTITY );
return( MBEDTLS_ERR_SSL_UNKNOWN_IDENTITY );
}
*p += n;
return( 0 );
}
| 169,418 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
{
u8 *buf = NULL;
int err;
if (!seed && slen) {
buf = kmalloc(slen, GFP_KERNEL);
if (!buf)
return -ENOMEM;
get_random_bytes(buf, slen);
seed = buf;
}
err = tfm->seed(tfm, seed, slen);
kfree(buf);
return err;
}
Commit Message: crypto: rng - Remove old low-level rng interface
Now that all rng implementations have switched over to the new
interface, we can remove the old low-level interface.
Signed-off-by: Herbert Xu <[email protected]>
CWE ID: CWE-476 | int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
{
u8 *buf = NULL;
int err;
if (!seed && slen) {
buf = kmalloc(slen, GFP_KERNEL);
if (!buf)
return -ENOMEM;
get_random_bytes(buf, slen);
seed = buf;
}
err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
kfree(buf);
return err;
}
| 167,732 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: validate_body_helper (DBusTypeReader *reader,
int byte_order,
dbus_bool_t walk_reader_to_end,
const unsigned char *p,
const unsigned char *end,
const unsigned char **new_p)
{
int current_type;
while ((current_type = _dbus_type_reader_get_current_type (reader)) != DBUS_TYPE_INVALID)
{
const unsigned char *a;
case DBUS_TYPE_BYTE:
++p;
break;
case DBUS_TYPE_BOOLEAN:
case DBUS_TYPE_INT16:
case DBUS_TYPE_UINT16:
case DBUS_TYPE_INT32:
case DBUS_TYPE_UINT32:
case DBUS_TYPE_UNIX_FD:
case DBUS_TYPE_INT64:
case DBUS_TYPE_UINT64:
case DBUS_TYPE_DOUBLE:
alignment = _dbus_type_get_alignment (current_type);
a = _DBUS_ALIGN_ADDRESS (p, alignment);
if (a >= end)
return DBUS_INVALID_NOT_ENOUGH_DATA;
while (p != a)
{
if (*p != '\0')
return DBUS_INVALID_ALIGNMENT_PADDING_NOT_NUL;
++p;
}
if (current_type == DBUS_TYPE_BOOLEAN)
{
dbus_uint32_t v = _dbus_unpack_uint32 (byte_order,
p);
if (!(v == 0 || v == 1))
return DBUS_INVALID_BOOLEAN_NOT_ZERO_OR_ONE;
}
p += alignment;
break;
case DBUS_TYPE_ARRAY:
case DBUS_TYPE_STRING:
case DBUS_TYPE_OBJECT_PATH:
{
dbus_uint32_t claimed_len;
a = _DBUS_ALIGN_ADDRESS (p, 4);
if (a + 4 > end)
return DBUS_INVALID_NOT_ENOUGH_DATA;
while (p != a)
{
if (*p != '\0')
return DBUS_INVALID_ALIGNMENT_PADDING_NOT_NUL;
++p;
}
claimed_len = _dbus_unpack_uint32 (byte_order, p);
p += 4;
/* p may now be == end */
_dbus_assert (p <= end);
if (current_type == DBUS_TYPE_ARRAY)
{
int array_elem_type = _dbus_type_reader_get_element_type (reader);
if (!_dbus_type_is_valid (array_elem_type))
{
return DBUS_INVALID_UNKNOWN_TYPECODE;
}
alignment = _dbus_type_get_alignment (array_elem_type);
a = _DBUS_ALIGN_ADDRESS (p, alignment);
/* a may now be == end */
if (a > end)
return DBUS_INVALID_NOT_ENOUGH_DATA;
while (p != a)
{
if (*p != '\0')
return DBUS_INVALID_ALIGNMENT_PADDING_NOT_NUL;
++p;
}
}
if (claimed_len > (unsigned long) (end - p))
return DBUS_INVALID_LENGTH_OUT_OF_BOUNDS;
if (current_type == DBUS_TYPE_OBJECT_PATH)
{
DBusString str;
_dbus_string_init_const_len (&str, p, claimed_len);
if (!_dbus_validate_path (&str, 0,
_dbus_string_get_length (&str)))
return DBUS_INVALID_BAD_PATH;
p += claimed_len;
}
else if (current_type == DBUS_TYPE_STRING)
{
DBusString str;
_dbus_string_init_const_len (&str, p, claimed_len);
if (!_dbus_string_validate_utf8 (&str, 0,
_dbus_string_get_length (&str)))
return DBUS_INVALID_BAD_UTF8_IN_STRING;
p += claimed_len;
}
else if (current_type == DBUS_TYPE_ARRAY && claimed_len > 0)
{
DBusTypeReader sub;
DBusValidity validity;
const unsigned char *array_end;
int array_elem_type;
if (claimed_len > DBUS_MAXIMUM_ARRAY_LENGTH)
return DBUS_INVALID_ARRAY_LENGTH_EXCEEDS_MAXIMUM;
/* Remember that the reader is types only, so we can't
* use it to iterate over elements. It stays the same
* for all elements.
*/
_dbus_type_reader_recurse (reader, &sub);
array_end = p + claimed_len;
array_elem_type = _dbus_type_reader_get_element_type (reader);
/* avoid recursive call to validate_body_helper if this is an array
* of fixed-size elements
*/
if (dbus_type_is_fixed (array_elem_type))
{
/* bools need to be handled differently, because they can
* have an invalid value
*/
if (array_elem_type == DBUS_TYPE_BOOLEAN)
{
dbus_uint32_t v;
alignment = _dbus_type_get_alignment (array_elem_type);
while (p < array_end)
{
v = _dbus_unpack_uint32 (byte_order, p);
if (!(v == 0 || v == 1))
return DBUS_INVALID_BOOLEAN_NOT_ZERO_OR_ONE;
p += alignment;
}
}
else
{
p = array_end;
}
}
else
{
while (p < array_end)
{
validity = validate_body_helper (&sub, byte_order, FALSE, p, end, &p);
if (validity != DBUS_VALID)
return validity;
}
}
if (p != array_end)
return DBUS_INVALID_ARRAY_LENGTH_INCORRECT;
}
/* check nul termination */
{
while (p < array_end)
{
validity = validate_body_helper (&sub, byte_order, FALSE, p, end, &p);
if (validity != DBUS_VALID)
return validity;
}
}
break;
case DBUS_TYPE_SIGNATURE:
{
dbus_uint32_t claimed_len;
DBusString str;
DBusValidity validity;
claimed_len = *p;
++p;
/* 1 is for nul termination */
if (claimed_len + 1 > (unsigned long) (end - p))
return DBUS_INVALID_SIGNATURE_LENGTH_OUT_OF_BOUNDS;
_dbus_string_init_const_len (&str, p, claimed_len);
validity =
_dbus_validate_signature_with_reason (&str, 0,
_dbus_string_get_length (&str));
if (validity != DBUS_VALID)
return validity;
p += claimed_len;
_dbus_assert (p < end);
if (*p != DBUS_TYPE_INVALID)
return DBUS_INVALID_SIGNATURE_MISSING_NUL;
++p;
_dbus_verbose ("p = %p end = %p claimed_len %u\n", p, end, claimed_len);
}
break;
case DBUS_TYPE_VARIANT:
{
/* 1 byte sig len, sig typecodes, align to
* contained-type-boundary, values.
*/
/* In addition to normal signature validation, we need to be sure
* the signature contains only a single (possibly container) type.
*/
dbus_uint32_t claimed_len;
DBusString sig;
DBusTypeReader sub;
DBusValidity validity;
int contained_alignment;
int contained_type;
DBusValidity reason;
claimed_len = *p;
++p;
/* + 1 for nul */
if (claimed_len + 1 > (unsigned long) (end - p))
return DBUS_INVALID_VARIANT_SIGNATURE_LENGTH_OUT_OF_BOUNDS;
_dbus_string_init_const_len (&sig, p, claimed_len);
reason = _dbus_validate_signature_with_reason (&sig, 0,
_dbus_string_get_length (&sig));
if (!(reason == DBUS_VALID))
{
if (reason == DBUS_VALIDITY_UNKNOWN_OOM_ERROR)
return reason;
else
return DBUS_INVALID_VARIANT_SIGNATURE_BAD;
}
p += claimed_len;
if (*p != DBUS_TYPE_INVALID)
return DBUS_INVALID_VARIANT_SIGNATURE_MISSING_NUL;
++p;
contained_type = _dbus_first_type_in_signature (&sig, 0);
if (contained_type == DBUS_TYPE_INVALID)
return DBUS_INVALID_VARIANT_SIGNATURE_EMPTY;
contained_alignment = _dbus_type_get_alignment (contained_type);
a = _DBUS_ALIGN_ADDRESS (p, contained_alignment);
if (a > end)
return DBUS_INVALID_NOT_ENOUGH_DATA;
while (p != a)
{
if (*p != '\0')
return DBUS_INVALID_ALIGNMENT_PADDING_NOT_NUL;
++p;
}
_dbus_type_reader_init_types_only (&sub, &sig, 0);
_dbus_assert (_dbus_type_reader_get_current_type (&sub) != DBUS_TYPE_INVALID);
validity = validate_body_helper (&sub, byte_order, FALSE, p, end, &p);
if (validity != DBUS_VALID)
return validity;
if (_dbus_type_reader_next (&sub))
return DBUS_INVALID_VARIANT_SIGNATURE_SPECIFIES_MULTIPLE_VALUES;
_dbus_assert (_dbus_type_reader_get_current_type (&sub) == DBUS_TYPE_INVALID);
}
break;
case DBUS_TYPE_DICT_ENTRY:
case DBUS_TYPE_STRUCT:
_dbus_assert (_dbus_type_reader_get_current_type (&sub) != DBUS_TYPE_INVALID);
validity = validate_body_helper (&sub, byte_order, FALSE, p, end, &p);
if (validity != DBUS_VALID)
return validity;
if (*p != '\0')
return DBUS_INVALID_ALIGNMENT_PADDING_NOT_NUL;
++p;
}
_dbus_type_reader_recurse (reader, &sub);
validity = validate_body_helper (&sub, byte_order, TRUE, p, end, &p);
if (validity != DBUS_VALID)
return validity;
}
break;
default:
_dbus_assert_not_reached ("invalid typecode in supposedly-validated signature");
break;
}
Commit Message:
CWE ID: CWE-399 | validate_body_helper (DBusTypeReader *reader,
int byte_order,
dbus_bool_t walk_reader_to_end,
int total_depth,
const unsigned char *p,
const unsigned char *end,
const unsigned char **new_p)
{
int current_type;
/* The spec allows arrays and structs to each nest 32, for total
* nesting of 2*32. We want to impose the same limit on "dynamic"
* value nesting (not visible in the signature) which is introduced
* by DBUS_TYPE_VARIANT.
*/
if (total_depth > (DBUS_MAXIMUM_TYPE_RECURSION_DEPTH * 2))
{
return DBUS_INVALID_NESTED_TOO_DEEPLY;
}
while ((current_type = _dbus_type_reader_get_current_type (reader)) != DBUS_TYPE_INVALID)
{
const unsigned char *a;
case DBUS_TYPE_BYTE:
++p;
break;
case DBUS_TYPE_BOOLEAN:
case DBUS_TYPE_INT16:
case DBUS_TYPE_UINT16:
case DBUS_TYPE_INT32:
case DBUS_TYPE_UINT32:
case DBUS_TYPE_UNIX_FD:
case DBUS_TYPE_INT64:
case DBUS_TYPE_UINT64:
case DBUS_TYPE_DOUBLE:
alignment = _dbus_type_get_alignment (current_type);
a = _DBUS_ALIGN_ADDRESS (p, alignment);
if (a >= end)
return DBUS_INVALID_NOT_ENOUGH_DATA;
while (p != a)
{
if (*p != '\0')
return DBUS_INVALID_ALIGNMENT_PADDING_NOT_NUL;
++p;
}
if (current_type == DBUS_TYPE_BOOLEAN)
{
dbus_uint32_t v = _dbus_unpack_uint32 (byte_order,
p);
if (!(v == 0 || v == 1))
return DBUS_INVALID_BOOLEAN_NOT_ZERO_OR_ONE;
}
p += alignment;
break;
case DBUS_TYPE_ARRAY:
case DBUS_TYPE_STRING:
case DBUS_TYPE_OBJECT_PATH:
{
dbus_uint32_t claimed_len;
a = _DBUS_ALIGN_ADDRESS (p, 4);
if (a + 4 > end)
return DBUS_INVALID_NOT_ENOUGH_DATA;
while (p != a)
{
if (*p != '\0')
return DBUS_INVALID_ALIGNMENT_PADDING_NOT_NUL;
++p;
}
claimed_len = _dbus_unpack_uint32 (byte_order, p);
p += 4;
/* p may now be == end */
_dbus_assert (p <= end);
if (current_type == DBUS_TYPE_ARRAY)
{
int array_elem_type = _dbus_type_reader_get_element_type (reader);
if (!_dbus_type_is_valid (array_elem_type))
{
return DBUS_INVALID_UNKNOWN_TYPECODE;
}
alignment = _dbus_type_get_alignment (array_elem_type);
a = _DBUS_ALIGN_ADDRESS (p, alignment);
/* a may now be == end */
if (a > end)
return DBUS_INVALID_NOT_ENOUGH_DATA;
while (p != a)
{
if (*p != '\0')
return DBUS_INVALID_ALIGNMENT_PADDING_NOT_NUL;
++p;
}
}
if (claimed_len > (unsigned long) (end - p))
return DBUS_INVALID_LENGTH_OUT_OF_BOUNDS;
if (current_type == DBUS_TYPE_OBJECT_PATH)
{
DBusString str;
_dbus_string_init_const_len (&str, p, claimed_len);
if (!_dbus_validate_path (&str, 0,
_dbus_string_get_length (&str)))
return DBUS_INVALID_BAD_PATH;
p += claimed_len;
}
else if (current_type == DBUS_TYPE_STRING)
{
DBusString str;
_dbus_string_init_const_len (&str, p, claimed_len);
if (!_dbus_string_validate_utf8 (&str, 0,
_dbus_string_get_length (&str)))
return DBUS_INVALID_BAD_UTF8_IN_STRING;
p += claimed_len;
}
else if (current_type == DBUS_TYPE_ARRAY && claimed_len > 0)
{
DBusTypeReader sub;
DBusValidity validity;
const unsigned char *array_end;
int array_elem_type;
if (claimed_len > DBUS_MAXIMUM_ARRAY_LENGTH)
return DBUS_INVALID_ARRAY_LENGTH_EXCEEDS_MAXIMUM;
/* Remember that the reader is types only, so we can't
* use it to iterate over elements. It stays the same
* for all elements.
*/
_dbus_type_reader_recurse (reader, &sub);
array_end = p + claimed_len;
array_elem_type = _dbus_type_reader_get_element_type (reader);
/* avoid recursive call to validate_body_helper if this is an array
* of fixed-size elements
*/
if (dbus_type_is_fixed (array_elem_type))
{
/* bools need to be handled differently, because they can
* have an invalid value
*/
if (array_elem_type == DBUS_TYPE_BOOLEAN)
{
dbus_uint32_t v;
alignment = _dbus_type_get_alignment (array_elem_type);
while (p < array_end)
{
v = _dbus_unpack_uint32 (byte_order, p);
if (!(v == 0 || v == 1))
return DBUS_INVALID_BOOLEAN_NOT_ZERO_OR_ONE;
p += alignment;
}
}
else
{
p = array_end;
}
}
else
{
while (p < array_end)
{
validity = validate_body_helper (&sub, byte_order, FALSE, p, end, &p);
if (validity != DBUS_VALID)
return validity;
}
}
if (p != array_end)
return DBUS_INVALID_ARRAY_LENGTH_INCORRECT;
}
/* check nul termination */
{
while (p < array_end)
{
validity = validate_body_helper (&sub, byte_order, FALSE,
total_depth + 1,
p, end, &p);
if (validity != DBUS_VALID)
return validity;
}
}
break;
case DBUS_TYPE_SIGNATURE:
{
dbus_uint32_t claimed_len;
DBusString str;
DBusValidity validity;
claimed_len = *p;
++p;
/* 1 is for nul termination */
if (claimed_len + 1 > (unsigned long) (end - p))
return DBUS_INVALID_SIGNATURE_LENGTH_OUT_OF_BOUNDS;
_dbus_string_init_const_len (&str, p, claimed_len);
validity =
_dbus_validate_signature_with_reason (&str, 0,
_dbus_string_get_length (&str));
if (validity != DBUS_VALID)
return validity;
p += claimed_len;
_dbus_assert (p < end);
if (*p != DBUS_TYPE_INVALID)
return DBUS_INVALID_SIGNATURE_MISSING_NUL;
++p;
_dbus_verbose ("p = %p end = %p claimed_len %u\n", p, end, claimed_len);
}
break;
case DBUS_TYPE_VARIANT:
{
/* 1 byte sig len, sig typecodes, align to
* contained-type-boundary, values.
*/
/* In addition to normal signature validation, we need to be sure
* the signature contains only a single (possibly container) type.
*/
dbus_uint32_t claimed_len;
DBusString sig;
DBusTypeReader sub;
DBusValidity validity;
int contained_alignment;
int contained_type;
DBusValidity reason;
claimed_len = *p;
++p;
/* + 1 for nul */
if (claimed_len + 1 > (unsigned long) (end - p))
return DBUS_INVALID_VARIANT_SIGNATURE_LENGTH_OUT_OF_BOUNDS;
_dbus_string_init_const_len (&sig, p, claimed_len);
reason = _dbus_validate_signature_with_reason (&sig, 0,
_dbus_string_get_length (&sig));
if (!(reason == DBUS_VALID))
{
if (reason == DBUS_VALIDITY_UNKNOWN_OOM_ERROR)
return reason;
else
return DBUS_INVALID_VARIANT_SIGNATURE_BAD;
}
p += claimed_len;
if (*p != DBUS_TYPE_INVALID)
return DBUS_INVALID_VARIANT_SIGNATURE_MISSING_NUL;
++p;
contained_type = _dbus_first_type_in_signature (&sig, 0);
if (contained_type == DBUS_TYPE_INVALID)
return DBUS_INVALID_VARIANT_SIGNATURE_EMPTY;
contained_alignment = _dbus_type_get_alignment (contained_type);
a = _DBUS_ALIGN_ADDRESS (p, contained_alignment);
if (a > end)
return DBUS_INVALID_NOT_ENOUGH_DATA;
while (p != a)
{
if (*p != '\0')
return DBUS_INVALID_ALIGNMENT_PADDING_NOT_NUL;
++p;
}
_dbus_type_reader_init_types_only (&sub, &sig, 0);
_dbus_assert (_dbus_type_reader_get_current_type (&sub) != DBUS_TYPE_INVALID);
validity = validate_body_helper (&sub, byte_order, FALSE, p, end, &p);
if (validity != DBUS_VALID)
return validity;
if (_dbus_type_reader_next (&sub))
return DBUS_INVALID_VARIANT_SIGNATURE_SPECIFIES_MULTIPLE_VALUES;
_dbus_assert (_dbus_type_reader_get_current_type (&sub) == DBUS_TYPE_INVALID);
}
break;
case DBUS_TYPE_DICT_ENTRY:
case DBUS_TYPE_STRUCT:
_dbus_assert (_dbus_type_reader_get_current_type (&sub) != DBUS_TYPE_INVALID);
validity = validate_body_helper (&sub, byte_order, FALSE,
total_depth + 1,
p, end, &p);
if (validity != DBUS_VALID)
return validity;
if (*p != '\0')
return DBUS_INVALID_ALIGNMENT_PADDING_NOT_NUL;
++p;
}
_dbus_type_reader_recurse (reader, &sub);
validity = validate_body_helper (&sub, byte_order, TRUE, p, end, &p);
if (validity != DBUS_VALID)
return validity;
}
break;
default:
_dbus_assert_not_reached ("invalid typecode in supposedly-validated signature");
break;
}
| 164,886 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void pdf_load_pages_kids(FILE *fp, pdf_t *pdf)
{
int i, id, dummy;
char *buf, *c;
long start, sz;
start = ftell(fp);
/* Load all kids for all xref tables (versions) */
for (i=0; i<pdf->n_xrefs; i++)
{
if (pdf->xrefs[i].version && (pdf->xrefs[i].end != 0))
{
fseek(fp, pdf->xrefs[i].start, SEEK_SET);
while (SAFE_F(fp, (fgetc(fp) != 't')))
; /* Iterate to trailer */
/* Get root catalog */
sz = pdf->xrefs[i].end - ftell(fp);
buf = malloc(sz + 1);
SAFE_E(fread(buf, 1, sz, fp), sz, "Failed to load /Root.\n");
buf[sz] = '\0';
if (!(c = strstr(buf, "/Root")))
{
free(buf);
continue;
}
/* Jump to catalog (root) */
id = atoi(c + strlen("/Root") + 1);
free(buf);
buf = get_object(fp, id, &pdf->xrefs[i], NULL, &dummy);
if (!buf || !(c = strstr(buf, "/Pages")))
{
free(buf);
continue;
}
/* Start at the first Pages obj and get kids */
id = atoi(c + strlen("/Pages") + 1);
load_kids(fp, id, &pdf->xrefs[i]);
free(buf);
}
}
fseek(fp, start, SEEK_SET);
}
Commit Message: Zero and sanity check all dynamic allocs.
This addresses the memory issues in Issue #6 expressed in
calloc_some.pdf and malloc_some.pdf
CWE ID: CWE-787 | void pdf_load_pages_kids(FILE *fp, pdf_t *pdf)
{
int i, id, dummy;
char *buf, *c;
long start, sz;
start = ftell(fp);
/* Load all kids for all xref tables (versions) */
for (i=0; i<pdf->n_xrefs; i++)
{
if (pdf->xrefs[i].version && (pdf->xrefs[i].end != 0))
{
fseek(fp, pdf->xrefs[i].start, SEEK_SET);
while (SAFE_F(fp, (fgetc(fp) != 't')))
; /* Iterate to trailer */
/* Get root catalog */
sz = pdf->xrefs[i].end - ftell(fp);
buf = safe_calloc(sz + 1);
SAFE_E(fread(buf, 1, sz, fp), sz, "Failed to load /Root.\n");
buf[sz] = '\0';
if (!(c = strstr(buf, "/Root")))
{
free(buf);
continue;
}
/* Jump to catalog (root) */
id = atoi(c + strlen("/Root") + 1);
free(buf);
buf = get_object(fp, id, &pdf->xrefs[i], NULL, &dummy);
if (!buf || !(c = strstr(buf, "/Pages")))
{
free(buf);
continue;
}
/* Start at the first Pages obj and get kids */
id = atoi(c + strlen("/Pages") + 1);
load_kids(fp, id, &pdf->xrefs[i]);
free(buf);
}
}
fseek(fp, start, SEEK_SET);
}
| 169,571 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: my_object_error_get_type (void)
{
static GType etype = 0;
if (etype == 0)
{
static const GEnumValue values[] =
{
ENUM_ENTRY (MY_OBJECT_ERROR_FOO, "Foo"),
ENUM_ENTRY (MY_OBJECT_ERROR_BAR, "Bar"),
{ 0, 0, 0 }
};
etype = g_enum_register_static ("MyObjectError", values);
}
return etype;
}
Commit Message:
CWE ID: CWE-264 | my_object_error_get_type (void)
| 165,097 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: virtual void TearDown() {
vpx_svc_release(&svc_);
delete(decoder_);
if (codec_initialized_) vpx_codec_destroy(&codec_);
}
Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478
DO NOT MERGE - libvpx: Pull from upstream
Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06
BUG=23452792
Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec
CWE ID: CWE-119 | virtual void TearDown() {
ReleaseEncoder();
delete(decoder_);
}
void InitializeEncoder() {
const vpx_codec_err_t res =
vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_OK, res);
vpx_codec_control(&codec_, VP8E_SET_CPUUSED, 4); // Make the test faster
vpx_codec_control(&codec_, VP9E_SET_TILE_COLUMNS, tile_columns_);
vpx_codec_control(&codec_, VP9E_SET_TILE_ROWS, tile_rows_);
codec_initialized_ = true;
}
void ReleaseEncoder() {
vpx_svc_release(&svc_);
if (codec_initialized_) vpx_codec_destroy(&codec_);
codec_initialized_ = false;
}
void GetStatsData(std::string *const stats_buf) {
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *cx_pkt;
while ((cx_pkt = vpx_codec_get_cx_data(&codec_, &iter)) != NULL) {
if (cx_pkt->kind == VPX_CODEC_STATS_PKT) {
EXPECT_GT(cx_pkt->data.twopass_stats.sz, 0U);
ASSERT_TRUE(cx_pkt->data.twopass_stats.buf != NULL);
stats_buf->append(static_cast<char*>(cx_pkt->data.twopass_stats.buf),
cx_pkt->data.twopass_stats.sz);
}
}
}
void Pass1EncodeNFrames(const int n, const int layers,
std::string *const stats_buf) {
vpx_codec_err_t res;
ASSERT_GT(n, 0);
ASSERT_GT(layers, 0);
svc_.spatial_layers = layers;
codec_enc_.g_pass = VPX_RC_FIRST_PASS;
InitializeEncoder();
libvpx_test::I420VideoSource video(test_file_name_,
codec_enc_.g_w, codec_enc_.g_h,
codec_enc_.g_timebase.den,
codec_enc_.g_timebase.num, 0, 30);
video.Begin();
for (int i = 0; i < n; ++i) {
res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
ASSERT_EQ(VPX_CODEC_OK, res);
GetStatsData(stats_buf);
video.Next();
}
// Flush encoder and test EOS packet.
res = vpx_svc_encode(&svc_, &codec_, NULL, video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
ASSERT_EQ(VPX_CODEC_OK, res);
GetStatsData(stats_buf);
ReleaseEncoder();
}
void StoreFrames(const size_t max_frame_received,
struct vpx_fixed_buf *const outputs,
size_t *const frame_received) {
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *cx_pkt;
while ((cx_pkt = vpx_codec_get_cx_data(&codec_, &iter)) != NULL) {
if (cx_pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
const size_t frame_size = cx_pkt->data.frame.sz;
EXPECT_GT(frame_size, 0U);
ASSERT_TRUE(cx_pkt->data.frame.buf != NULL);
ASSERT_LT(*frame_received, max_frame_received);
if (*frame_received == 0)
EXPECT_EQ(1, !!(cx_pkt->data.frame.flags & VPX_FRAME_IS_KEY));
outputs[*frame_received].buf = malloc(frame_size + 16);
ASSERT_TRUE(outputs[*frame_received].buf != NULL);
memcpy(outputs[*frame_received].buf, cx_pkt->data.frame.buf,
frame_size);
outputs[*frame_received].sz = frame_size;
++(*frame_received);
}
}
}
void Pass2EncodeNFrames(std::string *const stats_buf,
const int n, const int layers,
struct vpx_fixed_buf *const outputs) {
vpx_codec_err_t res;
size_t frame_received = 0;
ASSERT_TRUE(outputs != NULL);
ASSERT_GT(n, 0);
ASSERT_GT(layers, 0);
svc_.spatial_layers = layers;
codec_enc_.rc_target_bitrate = 500;
if (codec_enc_.g_pass == VPX_RC_LAST_PASS) {
ASSERT_TRUE(stats_buf != NULL);
ASSERT_GT(stats_buf->size(), 0U);
codec_enc_.rc_twopass_stats_in.buf = &(*stats_buf)[0];
codec_enc_.rc_twopass_stats_in.sz = stats_buf->size();
}
InitializeEncoder();
libvpx_test::I420VideoSource video(test_file_name_,
codec_enc_.g_w, codec_enc_.g_h,
codec_enc_.g_timebase.den,
codec_enc_.g_timebase.num, 0, 30);
video.Begin();
for (int i = 0; i < n; ++i) {
res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
ASSERT_EQ(VPX_CODEC_OK, res);
StoreFrames(n, outputs, &frame_received);
video.Next();
}
// Flush encoder.
res = vpx_svc_encode(&svc_, &codec_, NULL, 0,
video.duration(), VPX_DL_GOOD_QUALITY);
EXPECT_EQ(VPX_CODEC_OK, res);
StoreFrames(n, outputs, &frame_received);
EXPECT_EQ(frame_received, static_cast<size_t>(n));
ReleaseEncoder();
}
void DecodeNFrames(const struct vpx_fixed_buf *const inputs, const int n) {
int decoded_frames = 0;
int received_frames = 0;
ASSERT_TRUE(inputs != NULL);
ASSERT_GT(n, 0);
for (int i = 0; i < n; ++i) {
ASSERT_TRUE(inputs[i].buf != NULL);
ASSERT_GT(inputs[i].sz, 0U);
const vpx_codec_err_t res_dec =
decoder_->DecodeFrame(static_cast<const uint8_t *>(inputs[i].buf),
inputs[i].sz);
ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
++decoded_frames;
DxDataIterator dec_iter = decoder_->GetDxData();
while (dec_iter.Next() != NULL) {
++received_frames;
}
}
EXPECT_EQ(decoded_frames, n);
EXPECT_EQ(received_frames, n);
}
void DropEnhancementLayers(struct vpx_fixed_buf *const inputs,
const int num_super_frames,
const int remained_spatial_layers) {
ASSERT_TRUE(inputs != NULL);
ASSERT_GT(num_super_frames, 0);
ASSERT_GT(remained_spatial_layers, 0);
for (int i = 0; i < num_super_frames; ++i) {
uint32_t frame_sizes[8] = {0};
int frame_count = 0;
int frames_found = 0;
int frame;
ASSERT_TRUE(inputs[i].buf != NULL);
ASSERT_GT(inputs[i].sz, 0U);
vpx_codec_err_t res =
vp9_parse_superframe_index(static_cast<const uint8_t*>(inputs[i].buf),
inputs[i].sz, frame_sizes, &frame_count,
NULL, NULL);
ASSERT_EQ(VPX_CODEC_OK, res);
if (frame_count == 0) {
// There's no super frame but only a single frame.
ASSERT_EQ(1, remained_spatial_layers);
} else {
// Found a super frame.
uint8_t *frame_data = static_cast<uint8_t*>(inputs[i].buf);
uint8_t *frame_start = frame_data;
for (frame = 0; frame < frame_count; ++frame) {
// Looking for a visible frame.
if (frame_data[0] & 0x02) {
++frames_found;
if (frames_found == remained_spatial_layers)
break;
}
frame_data += frame_sizes[frame];
}
ASSERT_LT(frame, frame_count) << "Couldn't find a visible frame. "
<< "remained_spatial_layers: " << remained_spatial_layers
<< " super_frame: " << i;
if (frame == frame_count - 1)
continue;
frame_data += frame_sizes[frame];
// We need to add one more frame for multiple frame contexts.
uint8_t marker =
static_cast<const uint8_t*>(inputs[i].buf)[inputs[i].sz - 1];
const uint32_t mag = ((marker >> 3) & 0x3) + 1;
const size_t index_sz = 2 + mag * frame_count;
const size_t new_index_sz = 2 + mag * (frame + 1);
marker &= 0x0f8;
marker |= frame;
// Copy existing frame sizes.
memmove(frame_data + 1, frame_start + inputs[i].sz - index_sz + 1,
new_index_sz - 2);
// New marker.
frame_data[0] = marker;
frame_data += (mag * (frame + 1) + 1);
*frame_data++ = marker;
inputs[i].sz = frame_data - frame_start;
}
}
}
void FreeBitstreamBuffers(struct vpx_fixed_buf *const inputs, const int n) {
ASSERT_TRUE(inputs != NULL);
ASSERT_GT(n, 0);
for (int i = 0; i < n; ++i) {
free(inputs[i].buf);
inputs[i].buf = NULL;
inputs[i].sz = 0;
}
}
| 174,582 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static uint32_t readU16(const uint8_t* data, size_t offset) {
return data[offset] << 8 | data[offset + 1];
}
Commit Message: Avoid integer overflows in parsing fonts
A malformed TTF can cause size calculations to overflow. This patch
checks the maximum reasonable value so that the total size fits in 32
bits. It also adds some explicit casting to avoid possible technical
undefined behavior when parsing sized unsigned values.
Bug: 25645298
Change-Id: Id4716132041a6f4f1fbb73ec4e445391cf7d9616
(cherry picked from commit 183c9ec2800baa2ce099ee260c6cbc6121cf1274)
CWE ID: CWE-19 | static uint32_t readU16(const uint8_t* data, size_t offset) {
return ((uint32_t)data[offset]) << 8 | ((uint32_t)data[offset + 1]);
}
| 173,966 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page;
struct kvm *kvm;
int r;
BUG_ON(vcpu->kvm == NULL);
kvm = vcpu->kvm;
vcpu->arch.emulate_ctxt.ops = &emulate_ops;
if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
else
vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page) {
r = -ENOMEM;
goto fail;
}
vcpu->arch.pio_data = page_address(page);
kvm_set_tsc_khz(vcpu, max_tsc_khz);
r = kvm_mmu_create(vcpu);
if (r < 0)
goto fail_free_pio_data;
if (irqchip_in_kernel(kvm)) {
r = kvm_create_lapic(vcpu);
if (r < 0)
goto fail_mmu_destroy;
} else
static_key_slow_inc(&kvm_no_apic_vcpu);
vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
GFP_KERNEL);
if (!vcpu->arch.mce_banks) {
r = -ENOMEM;
goto fail_free_lapic;
}
vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
goto fail_free_mce_banks;
r = fx_init(vcpu);
if (r)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
return 0;
fail_free_wbinvd_dirty_mask:
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
fail_free_mce_banks:
kfree(vcpu->arch.mce_banks);
fail_free_lapic:
kvm_free_lapic(vcpu);
fail_mmu_destroy:
kvm_mmu_destroy(vcpu);
fail_free_pio_data:
free_page((unsigned long)vcpu->arch.pio_data);
fail:
return r;
}
Commit Message: KVM: x86: Convert MSR_KVM_SYSTEM_TIME to use gfn_to_hva_cache functions (CVE-2013-1797)
There is a potential use after free issue with the handling of
MSR_KVM_SYSTEM_TIME. If the guest specifies a GPA in a movable or removable
memory such as frame buffers then KVM might continue to write to that
address even after it's removed via KVM_SET_USER_MEMORY_REGION. KVM pins
the page in memory so it's unlikely to cause an issue, but if the user
space component re-purposes the memory previously used for the guest, then
the guest will be able to corrupt that memory.
Tested: Tested against kvmclock unit test
Signed-off-by: Andrew Honig <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]>
CWE ID: CWE-399 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page;
struct kvm *kvm;
int r;
BUG_ON(vcpu->kvm == NULL);
kvm = vcpu->kvm;
vcpu->arch.emulate_ctxt.ops = &emulate_ops;
if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
else
vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page) {
r = -ENOMEM;
goto fail;
}
vcpu->arch.pio_data = page_address(page);
kvm_set_tsc_khz(vcpu, max_tsc_khz);
r = kvm_mmu_create(vcpu);
if (r < 0)
goto fail_free_pio_data;
if (irqchip_in_kernel(kvm)) {
r = kvm_create_lapic(vcpu);
if (r < 0)
goto fail_mmu_destroy;
} else
static_key_slow_inc(&kvm_no_apic_vcpu);
vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
GFP_KERNEL);
if (!vcpu->arch.mce_banks) {
r = -ENOMEM;
goto fail_free_lapic;
}
vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
goto fail_free_mce_banks;
r = fx_init(vcpu);
if (r)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
return 0;
fail_free_wbinvd_dirty_mask:
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
fail_free_mce_banks:
kfree(vcpu->arch.mce_banks);
fail_free_lapic:
kvm_free_lapic(vcpu);
fail_mmu_destroy:
kvm_mmu_destroy(vcpu);
fail_free_pio_data:
free_page((unsigned long)vcpu->arch.pio_data);
fail:
return r;
}
| 166,115 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool IsTraceEventArgsWhitelisted(const char* category_group_name,
const char* event_name) {
base::CStringTokenizer category_group_tokens(
category_group_name, category_group_name + strlen(category_group_name),
",");
while (category_group_tokens.GetNext()) {
const std::string& category_group_token = category_group_tokens.token();
for (int i = 0; kEventArgsWhitelist[i][0] != NULL; ++i) {
DCHECK(kEventArgsWhitelist[i][1]);
if (base::MatchPattern(category_group_token.c_str(),
kEventArgsWhitelist[i][0]) &&
base::MatchPattern(event_name, kEventArgsWhitelist[i][1])) {
return true;
}
}
}
return false;
}
Commit Message: Tracing: Add support for PII whitelisting of individual trace event arguments
R=dsinclair,shatch
BUG=546093
Review URL: https://codereview.chromium.org/1415013003
Cr-Commit-Position: refs/heads/master@{#356690}
CWE ID: CWE-399 | bool IsTraceEventArgsWhitelisted(const char* category_group_name,
bool IsTraceArgumentNameWhitelisted(const char* const* granular_filter,
const char* arg_name) {
for (int i = 0; granular_filter[i] != nullptr; ++i) {
if (base::MatchPattern(arg_name, granular_filter[i]))
return true;
}
return false;
}
bool IsTraceEventArgsWhitelisted(
const char* category_group_name,
const char* event_name,
base::trace_event::ArgumentNameFilterPredicate* arg_name_filter) {
DCHECK(arg_name_filter);
base::CStringTokenizer category_group_tokens(
category_group_name, category_group_name + strlen(category_group_name),
",");
while (category_group_tokens.GetNext()) {
const std::string& category_group_token = category_group_tokens.token();
for (int i = 0; kEventArgsWhitelist[i].category_name != nullptr; ++i) {
const WhitelistEntry& whitelist_entry = kEventArgsWhitelist[i];
DCHECK(whitelist_entry.event_name);
if (base::MatchPattern(category_group_token.c_str(),
whitelist_entry.category_name) &&
base::MatchPattern(event_name, whitelist_entry.event_name)) {
if (whitelist_entry.arg_name_filter) {
*arg_name_filter = base::Bind(&IsTraceArgumentNameWhitelisted,
whitelist_entry.arg_name_filter);
}
return true;
}
}
}
return false;
}
| 171,680 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int proc_connectinfo(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_connectinfo ci = {
.devnum = ps->dev->devnum,
.slow = ps->dev->speed == USB_SPEED_LOW
};
if (copy_to_user(arg, &ci, sizeof(ci)))
return -EFAULT;
return 0;
}
Commit Message: USB: usbfs: fix potential infoleak in devio
The stack object “ci” has a total size of 8 bytes. Its last 3 bytes
are padding bytes which are not initialized and leaked to userland
via “copy_to_user”.
Signed-off-by: Kangjie Lu <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
CWE ID: CWE-200 | static int proc_connectinfo(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_connectinfo ci;
memset(&ci, 0, sizeof(ci));
ci.devnum = ps->dev->devnum;
ci.slow = ps->dev->speed == USB_SPEED_LOW;
if (copy_to_user(arg, &ci, sizeof(ci)))
return -EFAULT;
return 0;
}
| 167,259 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool CheckClientDownloadRequest::ShouldUploadForDlpScan() {
if (!base::FeatureList::IsEnabled(kDeepScanningOfDownloads))
return false;
int check_content_compliance = g_browser_process->local_state()->GetInteger(
prefs::kCheckContentCompliance);
if (check_content_compliance !=
CheckContentComplianceValues::CHECK_DOWNLOADS &&
check_content_compliance !=
CheckContentComplianceValues::CHECK_UPLOADS_AND_DOWNLOADS)
return false;
if (policy::BrowserDMTokenStorage::Get()->RetrieveDMToken().empty())
return false;
const base::ListValue* domains = g_browser_process->local_state()->GetList(
prefs::kURLsToCheckComplianceOfDownloadedContent);
url_matcher::URLMatcher matcher;
policy::url_util::AddAllowFilters(&matcher, domains);
return !matcher.MatchURL(item_->GetURL()).empty();
}
Commit Message: Migrate download_protection code to new DM token class.
Migrates RetrieveDMToken calls to use the new BrowserDMToken class.
Bug: 1020296
Change-Id: Icef580e243430d73b6c1c42b273a8540277481d9
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1904234
Commit-Queue: Dominique Fauteux-Chapleau <[email protected]>
Reviewed-by: Tien Mai <[email protected]>
Reviewed-by: Daniel Rubery <[email protected]>
Cr-Commit-Position: refs/heads/master@{#714196}
CWE ID: CWE-20 | bool CheckClientDownloadRequest::ShouldUploadForDlpScan() {
if (!base::FeatureList::IsEnabled(kDeepScanningOfDownloads))
return false;
int check_content_compliance = g_browser_process->local_state()->GetInteger(
prefs::kCheckContentCompliance);
if (check_content_compliance !=
CheckContentComplianceValues::CHECK_DOWNLOADS &&
check_content_compliance !=
CheckContentComplianceValues::CHECK_UPLOADS_AND_DOWNLOADS)
return false;
// If there's no valid DM token, the upload will fail, so we can skip
// uploading now.
if (!BrowserDMTokenStorage::Get()->RetrieveBrowserDMToken().is_valid())
return false;
const base::ListValue* domains = g_browser_process->local_state()->GetList(
prefs::kURLsToCheckComplianceOfDownloadedContent);
url_matcher::URLMatcher matcher;
policy::url_util::AddAllowFilters(&matcher, domains);
return !matcher.MatchURL(item_->GetURL()).empty();
}
| 172,356 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool SVGFEColorMatrixElement::setFilterEffectAttribute(FilterEffect* effect, const QualifiedName& attrName)
{
FEColorMatrix* colorMatrix = static_cast<FEColorMatrix*>(effect);
if (attrName == SVGNames::typeAttr)
return colorMatrix->setType(m_type->currentValue()->enumValue());
if (attrName == SVGNames::valuesAttr)
return colorMatrix->setValues(m_values->currentValue()->toFloatVector());
ASSERT_NOT_REACHED();
return false;
}
Commit Message: Explicitly enforce values size in feColorMatrix.
[email protected]
BUG=468519
Review URL: https://codereview.chromium.org/1075413002
git-svn-id: svn://svn.chromium.org/blink/trunk@193571 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: | bool SVGFEColorMatrixElement::setFilterEffectAttribute(FilterEffect* effect, const QualifiedName& attrName)
{
FEColorMatrix* colorMatrix = static_cast<FEColorMatrix*>(effect);
if (attrName == SVGNames::typeAttr)
return colorMatrix->setType(m_type->currentValue()->enumValue());
if (attrName == SVGNames::valuesAttr) {
Vector<float> values = m_values->currentValue()->toFloatVector();
if (values.size() == 20)
return colorMatrix->setValues(m_values->currentValue()->toFloatVector());
return false;
}
ASSERT_NOT_REACHED();
return false;
}
| 171,979 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: xsltStylesheetPtr XSLStyleSheet::compileStyleSheet()
{
if (m_embedded)
return xsltLoadStylesheetPI(document());
ASSERT(!m_stylesheetDocTaken);
xsltStylesheetPtr result = xsltParseStylesheetDoc(m_stylesheetDoc);
if (result)
m_stylesheetDocTaken = true;
return result;
}
Commit Message: Avoid reparsing an XSLT stylesheet after the first failure.
Certain libxslt versions appear to leave the doc in an invalid state when parsing fails. We should cache this result and avoid re-parsing.
(The test cannot be converted to text-only due to its invalid stylesheet).
[email protected],[email protected],[email protected]
BUG=271939
Review URL: https://chromiumcodereview.appspot.com/23103007
git-svn-id: svn://svn.chromium.org/blink/trunk@156248 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-399 | xsltStylesheetPtr XSLStyleSheet::compileStyleSheet()
{
if (m_embedded)
return xsltLoadStylesheetPI(document());
// Certain libxslt versions are corrupting the xmlDoc on compilation failures -
// hence attempting to recompile after a failure is unsafe.
if (m_compilationFailed)
return 0;
ASSERT(!m_stylesheetDocTaken);
xsltStylesheetPtr result = xsltParseStylesheetDoc(m_stylesheetDoc);
if (result)
m_stylesheetDocTaken = true;
else
m_compilationFailed = true;
return result;
}
| 171,185 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static inline struct keydata *get_keyptr(void)
{
struct keydata *keyptr = &ip_keydata[ip_cnt & 1];
smp_rmb();
return keyptr;
}
Commit Message: net: Compute protocol sequence numbers and fragment IDs using MD5.
Computers have become a lot faster since we compromised on the
partial MD4 hash which we use currently for performance reasons.
MD5 is a much safer choice, and is inline with both RFC1948 and
other ISS generators (OpenBSD, Solaris, etc.)
Furthermore, only having 24-bits of the sequence number be truly
unpredictable is a very serious limitation. So the periodic
regeneration and 8-bit counter have been removed. We compute and
use a full 32-bit sequence number.
For ipv6, DCCP was found to use a 32-bit truncated initial sequence
number (it needs 43-bits) and that is fixed here as well.
Reported-by: Dan Kaminsky <[email protected]>
Tested-by: Willy Tarreau <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: | static inline struct keydata *get_keyptr(void)
| 165,760 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: parse_charstrings( T1_Face face,
T1_Loader loader )
{
T1_Parser parser = &loader->parser;
PS_Table code_table = &loader->charstrings;
PS_Table name_table = &loader->glyph_names;
PS_Table swap_table = &loader->swap_table;
FT_Memory memory = parser->root.memory;
FT_Error error;
PSAux_Service psaux = (PSAux_Service)face->psaux;
FT_Byte* cur;
FT_Byte* limit = parser->root.limit;
FT_Int n, num_glyphs;
FT_UInt notdef_index = 0;
FT_Byte notdef_found = 0;
num_glyphs = (FT_Int)T1_ToInt( parser );
if ( num_glyphs < 0 )
{
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
/* some fonts like Optima-Oblique not only define the /CharStrings */
/* array but access it also */
if ( num_glyphs == 0 || parser->root.error )
return;
/* initialize tables, leaving space for addition of .notdef, */
/* if necessary, and a few other glyphs to handle buggy */
/* fonts which have more glyphs than specified. */
/* for some non-standard fonts like `Optima' which provides */
/* different outlines depending on the resolution it is */
/* possible to get here twice */
if ( !loader->num_glyphs )
{
error = psaux->ps_table_funcs->init(
code_table, num_glyphs + 1 + TABLE_EXTEND, memory );
if ( error )
goto Fail;
error = psaux->ps_table_funcs->init(
name_table, num_glyphs + 1 + TABLE_EXTEND, memory );
if ( error )
goto Fail;
/* Initialize table for swapping index notdef_index and */
/* index 0 names and codes (if necessary). */
error = psaux->ps_table_funcs->init( swap_table, 4, memory );
if ( error )
goto Fail;
}
n = 0;
for (;;)
{
FT_Long size;
FT_Byte* base;
/* the format is simple: */
/* `/glyphname' + binary data */
T1_Skip_Spaces( parser );
cur = parser->root.cursor;
if ( cur >= limit )
break;
/* we stop when we find a `def' or `end' keyword */
if ( cur + 3 < limit && IS_PS_DELIM( cur[3] ) )
{
if ( cur[0] == 'd' &&
cur[1] == 'e' &&
cur[2] == 'f' )
{
/* There are fonts which have this: */
/* */
/* /CharStrings 118 dict def */
/* Private begin */
/* CharStrings begin */
/* ... */
/* */
/* To catch this we ignore `def' if */
/* no charstring has actually been */
/* seen. */
if ( n )
break;
}
if ( cur[0] == 'e' &&
cur[1] == 'n' &&
cur[2] == 'd' )
break;
}
T1_Skip_PS_Token( parser );
if ( parser->root.error )
return;
if ( cur + 2 >= limit )
{
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
cur++; /* skip `/' */
len = parser->root.cursor - cur;
if ( !read_binary_data( parser, &size, &base, IS_INCREMENTAL ) )
return;
/* for some non-standard fonts like `Optima' which provides */
/* different outlines depending on the resolution it is */
/* possible to get here twice */
if ( loader->num_glyphs )
continue;
error = T1_Add_Table( name_table, n, cur, len + 1 );
if ( error )
goto Fail;
/* add a trailing zero to the name table */
name_table->elements[n][len] = '\0';
/* record index of /.notdef */
if ( *cur == '.' &&
ft_strcmp( ".notdef",
(const char*)(name_table->elements[n]) ) == 0 )
{
notdef_index = n;
notdef_found = 1;
}
if ( face->type1.private_dict.lenIV >= 0 &&
n < num_glyphs + TABLE_EXTEND )
{
FT_Byte* temp;
if ( size <= face->type1.private_dict.lenIV )
{
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
/* t1_decrypt() shouldn't write to base -- make temporary copy */
if ( FT_ALLOC( temp, size ) )
goto Fail;
FT_MEM_COPY( temp, base, size );
psaux->t1_decrypt( temp, size, 4330 );
size -= face->type1.private_dict.lenIV;
error = T1_Add_Table( code_table, n,
temp + face->type1.private_dict.lenIV, size );
FT_FREE( temp );
}
else
error = T1_Add_Table( code_table, n, base, size );
if ( error )
goto Fail;
n++;
}
}
Commit Message:
CWE ID: CWE-119 | parse_charstrings( T1_Face face,
T1_Loader loader )
{
T1_Parser parser = &loader->parser;
PS_Table code_table = &loader->charstrings;
PS_Table name_table = &loader->glyph_names;
PS_Table swap_table = &loader->swap_table;
FT_Memory memory = parser->root.memory;
FT_Error error;
PSAux_Service psaux = (PSAux_Service)face->psaux;
FT_Byte* cur;
FT_Byte* limit = parser->root.limit;
FT_Int n, num_glyphs;
FT_UInt notdef_index = 0;
FT_Byte notdef_found = 0;
num_glyphs = (FT_Int)T1_ToInt( parser );
if ( num_glyphs < 0 )
{
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
/* some fonts like Optima-Oblique not only define the /CharStrings */
/* array but access it also */
if ( num_glyphs == 0 || parser->root.error )
return;
/* initialize tables, leaving space for addition of .notdef, */
/* if necessary, and a few other glyphs to handle buggy */
/* fonts which have more glyphs than specified. */
/* for some non-standard fonts like `Optima' which provides */
/* different outlines depending on the resolution it is */
/* possible to get here twice */
if ( !loader->num_glyphs )
{
error = psaux->ps_table_funcs->init(
code_table, num_glyphs + 1 + TABLE_EXTEND, memory );
if ( error )
goto Fail;
error = psaux->ps_table_funcs->init(
name_table, num_glyphs + 1 + TABLE_EXTEND, memory );
if ( error )
goto Fail;
/* Initialize table for swapping index notdef_index and */
/* index 0 names and codes (if necessary). */
error = psaux->ps_table_funcs->init( swap_table, 4, memory );
if ( error )
goto Fail;
}
n = 0;
for (;;)
{
FT_Long size;
FT_Byte* base;
/* the format is simple: */
/* `/glyphname' + binary data */
T1_Skip_Spaces( parser );
cur = parser->root.cursor;
if ( cur >= limit )
break;
/* we stop when we find a `def' or `end' keyword */
if ( cur + 3 < limit && IS_PS_DELIM( cur[3] ) )
{
if ( cur[0] == 'd' &&
cur[1] == 'e' &&
cur[2] == 'f' )
{
/* There are fonts which have this: */
/* */
/* /CharStrings 118 dict def */
/* Private begin */
/* CharStrings begin */
/* ... */
/* */
/* To catch this we ignore `def' if */
/* no charstring has actually been */
/* seen. */
if ( n )
break;
}
if ( cur[0] == 'e' &&
cur[1] == 'n' &&
cur[2] == 'd' )
break;
}
T1_Skip_PS_Token( parser );
if ( parser->root.cursor >= limit )
{
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
if ( parser->root.error )
return;
if ( cur + 2 >= limit )
{
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
cur++; /* skip `/' */
len = parser->root.cursor - cur;
if ( !read_binary_data( parser, &size, &base, IS_INCREMENTAL ) )
return;
/* for some non-standard fonts like `Optima' which provides */
/* different outlines depending on the resolution it is */
/* possible to get here twice */
if ( loader->num_glyphs )
continue;
error = T1_Add_Table( name_table, n, cur, len + 1 );
if ( error )
goto Fail;
/* add a trailing zero to the name table */
name_table->elements[n][len] = '\0';
/* record index of /.notdef */
if ( *cur == '.' &&
ft_strcmp( ".notdef",
(const char*)(name_table->elements[n]) ) == 0 )
{
notdef_index = n;
notdef_found = 1;
}
if ( face->type1.private_dict.lenIV >= 0 &&
n < num_glyphs + TABLE_EXTEND )
{
FT_Byte* temp;
if ( size <= face->type1.private_dict.lenIV )
{
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
/* t1_decrypt() shouldn't write to base -- make temporary copy */
if ( FT_ALLOC( temp, size ) )
goto Fail;
FT_MEM_COPY( temp, base, size );
psaux->t1_decrypt( temp, size, 4330 );
size -= face->type1.private_dict.lenIV;
error = T1_Add_Table( code_table, n,
temp + face->type1.private_dict.lenIV, size );
FT_FREE( temp );
}
else
error = T1_Add_Table( code_table, n, base, size );
if ( error )
goto Fail;
n++;
}
}
| 164,857 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int sequencer_write(int dev, struct file *file, const char __user *buf, int count)
{
unsigned char event_rec[EV_SZ], ev_code;
int p = 0, c, ev_size;
int mode = translate_mode(file);
dev = dev >> 4;
DEB(printk("sequencer_write(dev=%d, count=%d)\n", dev, count));
if (mode == OPEN_READ)
return -EIO;
c = count;
while (c >= 4)
{
if (copy_from_user((char *) event_rec, &(buf)[p], 4))
goto out;
ev_code = event_rec[0];
if (ev_code == SEQ_FULLSIZE)
{
int err, fmt;
dev = *(unsigned short *) &event_rec[2];
if (dev < 0 || dev >= max_synthdev || synth_devs[dev] == NULL)
return -ENXIO;
if (!(synth_open_mask & (1 << dev)))
return -ENXIO;
fmt = (*(short *) &event_rec[0]) & 0xffff;
err = synth_devs[dev]->load_patch(dev, fmt, buf, p + 4, c, 0);
if (err < 0)
return err;
return err;
}
if (ev_code >= 128)
{
if (seq_mode == SEQ_2 && ev_code == SEQ_EXTENDED)
{
printk(KERN_WARNING "Sequencer: Invalid level 2 event %x\n", ev_code);
return -EINVAL;
}
ev_size = 8;
if (c < ev_size)
{
if (!seq_playing)
seq_startplay();
return count - c;
}
if (copy_from_user((char *)&event_rec[4],
&(buf)[p + 4], 4))
goto out;
}
else
{
if (seq_mode == SEQ_2)
{
printk(KERN_WARNING "Sequencer: 4 byte event in level 2 mode\n");
return -EINVAL;
}
ev_size = 4;
if (event_rec[0] != SEQ_MIDIPUTC)
obsolete_api_used = 1;
}
if (event_rec[0] == SEQ_MIDIPUTC)
{
if (!midi_opened[event_rec[2]])
{
int err, mode;
int dev = event_rec[2];
if (dev >= max_mididev || midi_devs[dev]==NULL)
{
/*printk("Sequencer Error: Nonexistent MIDI device %d\n", dev);*/
return -ENXIO;
}
mode = translate_mode(file);
if ((err = midi_devs[dev]->open(dev, mode,
sequencer_midi_input, sequencer_midi_output)) < 0)
{
seq_reset();
printk(KERN_WARNING "Sequencer Error: Unable to open Midi #%d\n", dev);
return err;
}
midi_opened[dev] = 1;
}
}
if (!seq_queue(event_rec, (file->f_flags & (O_NONBLOCK) ? 1 : 0)))
{
int processed = count - c;
if (!seq_playing)
seq_startplay();
if (!processed && (file->f_flags & O_NONBLOCK))
return -EAGAIN;
else
return processed;
}
p += ev_size;
c -= ev_size;
}
if (!seq_playing)
seq_startplay();
out:
return count;
}
Commit Message: sound/oss: remove offset from load_patch callbacks
Was: [PATCH] sound/oss/midi_synth: prevent underflow, use of
uninitialized value, and signedness issue
The offset passed to midi_synth_load_patch() can be essentially
arbitrary. If it's greater than the header length, this will result in
a copy_from_user(dst, src, negative_val). While this will just return
-EFAULT on x86, on other architectures this may cause memory corruption.
Additionally, the length field of the sysex_info structure may not be
initialized prior to its use. Finally, a signed comparison may result
in an unintentionally large loop.
On suggestion by Takashi Iwai, version two removes the offset argument
from the load_patch callbacks entirely, which also resolves similar
issues in opl3. Compile tested only.
v3 adjusts comments and hopefully gets copy offsets right.
Signed-off-by: Dan Rosenberg <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
CWE ID: CWE-189 | int sequencer_write(int dev, struct file *file, const char __user *buf, int count)
{
unsigned char event_rec[EV_SZ], ev_code;
int p = 0, c, ev_size;
int mode = translate_mode(file);
dev = dev >> 4;
DEB(printk("sequencer_write(dev=%d, count=%d)\n", dev, count));
if (mode == OPEN_READ)
return -EIO;
c = count;
while (c >= 4)
{
if (copy_from_user((char *) event_rec, &(buf)[p], 4))
goto out;
ev_code = event_rec[0];
if (ev_code == SEQ_FULLSIZE)
{
int err, fmt;
dev = *(unsigned short *) &event_rec[2];
if (dev < 0 || dev >= max_synthdev || synth_devs[dev] == NULL)
return -ENXIO;
if (!(synth_open_mask & (1 << dev)))
return -ENXIO;
fmt = (*(short *) &event_rec[0]) & 0xffff;
err = synth_devs[dev]->load_patch(dev, fmt, buf + p, c, 0);
if (err < 0)
return err;
return err;
}
if (ev_code >= 128)
{
if (seq_mode == SEQ_2 && ev_code == SEQ_EXTENDED)
{
printk(KERN_WARNING "Sequencer: Invalid level 2 event %x\n", ev_code);
return -EINVAL;
}
ev_size = 8;
if (c < ev_size)
{
if (!seq_playing)
seq_startplay();
return count - c;
}
if (copy_from_user((char *)&event_rec[4],
&(buf)[p + 4], 4))
goto out;
}
else
{
if (seq_mode == SEQ_2)
{
printk(KERN_WARNING "Sequencer: 4 byte event in level 2 mode\n");
return -EINVAL;
}
ev_size = 4;
if (event_rec[0] != SEQ_MIDIPUTC)
obsolete_api_used = 1;
}
if (event_rec[0] == SEQ_MIDIPUTC)
{
if (!midi_opened[event_rec[2]])
{
int err, mode;
int dev = event_rec[2];
if (dev >= max_mididev || midi_devs[dev]==NULL)
{
/*printk("Sequencer Error: Nonexistent MIDI device %d\n", dev);*/
return -ENXIO;
}
mode = translate_mode(file);
if ((err = midi_devs[dev]->open(dev, mode,
sequencer_midi_input, sequencer_midi_output)) < 0)
{
seq_reset();
printk(KERN_WARNING "Sequencer Error: Unable to open Midi #%d\n", dev);
return err;
}
midi_opened[dev] = 1;
}
}
if (!seq_queue(event_rec, (file->f_flags & (O_NONBLOCK) ? 1 : 0)))
{
int processed = count - c;
if (!seq_playing)
seq_startplay();
if (!processed && (file->f_flags & O_NONBLOCK))
return -EAGAIN;
else
return processed;
}
p += ev_size;
c -= ev_size;
}
if (!seq_playing)
seq_startplay();
out:
return count;
}
| 165,894 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i;
struct nat_entry *ne;
int err;
/* 0 nid should not be used */
if (unlikely(nid == 0))
return false;
if (build) {
/* do not add allocated nids */
ne = __lookup_nat_cache(nm_i, nid);
if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
nat_get_blkaddr(ne) != NULL_ADDR))
return false;
}
i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
i->nid = nid;
i->state = NID_NEW;
if (radix_tree_preload(GFP_NOFS)) {
kmem_cache_free(free_nid_slab, i);
return true;
}
spin_lock(&nm_i->nid_list_lock);
err = __insert_nid_to_list(sbi, i, FREE_NID_LIST, true);
spin_unlock(&nm_i->nid_list_lock);
radix_tree_preload_end();
if (err) {
kmem_cache_free(free_nid_slab, i);
return true;
}
return true;
}
Commit Message: f2fs: fix race condition in between free nid allocator/initializer
In below concurrent case, allocated nid can be loaded into free nid cache
and be allocated again.
Thread A Thread B
- f2fs_create
- f2fs_new_inode
- alloc_nid
- __insert_nid_to_list(ALLOC_NID_LIST)
- f2fs_balance_fs_bg
- build_free_nids
- __build_free_nids
- scan_nat_page
- add_free_nid
- __lookup_nat_cache
- f2fs_add_link
- init_inode_metadata
- new_inode_page
- new_node_page
- set_node_addr
- alloc_nid_done
- __remove_nid_from_list(ALLOC_NID_LIST)
- __insert_nid_to_list(FREE_NID_LIST)
This patch makes nat cache lookup and free nid list operation being atomical
to avoid this race condition.
Signed-off-by: Jaegeuk Kim <[email protected]>
Signed-off-by: Chao Yu <[email protected]>
Signed-off-by: Jaegeuk Kim <[email protected]>
CWE ID: CWE-362 | static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i, *e;
struct nat_entry *ne;
int err = -EINVAL;
bool ret = false;
/* 0 nid should not be used */
if (unlikely(nid == 0))
return false;
i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
i->nid = nid;
i->state = NID_NEW;
if (radix_tree_preload(GFP_NOFS))
goto err;
spin_lock(&nm_i->nid_list_lock);
if (build) {
/*
* Thread A Thread B
* - f2fs_create
* - f2fs_new_inode
* - alloc_nid
* - __insert_nid_to_list(ALLOC_NID_LIST)
* - f2fs_balance_fs_bg
* - build_free_nids
* - __build_free_nids
* - scan_nat_page
* - add_free_nid
* - __lookup_nat_cache
* - f2fs_add_link
* - init_inode_metadata
* - new_inode_page
* - new_node_page
* - set_node_addr
* - alloc_nid_done
* - __remove_nid_from_list(ALLOC_NID_LIST)
* - __insert_nid_to_list(FREE_NID_LIST)
*/
ne = __lookup_nat_cache(nm_i, nid);
if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
nat_get_blkaddr(ne) != NULL_ADDR))
goto err_out;
e = __lookup_free_nid_list(nm_i, nid);
if (e) {
if (e->state == NID_NEW)
ret = true;
goto err_out;
}
}
ret = true;
err = __insert_nid_to_list(sbi, i, FREE_NID_LIST, true);
err_out:
spin_unlock(&nm_i->nid_list_lock);
radix_tree_preload_end();
err:
if (err)
kmem_cache_free(free_nid_slab, i);
return ret;
}
| 169,379 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: xsltTestCompMatch(xsltTransformContextPtr ctxt, xsltCompMatchPtr comp,
xmlNodePtr node, const xmlChar *mode,
const xmlChar *modeURI) {
int i;
xsltStepOpPtr step, sel = NULL;
xsltStepStates states = {0, 0, NULL}; /* // may require backtrack */
if ((comp == NULL) || (node == NULL) || (ctxt == NULL)) {
xsltTransformError(ctxt, NULL, node,
"xsltTestCompMatch: null arg\n");
return(-1);
}
if (mode != NULL) {
if (comp->mode == NULL)
return(0);
/*
* both mode strings must be interned on the stylesheet dictionary
*/
if (comp->mode != mode)
return(0);
} else {
if (comp->mode != NULL)
return(0);
}
if (modeURI != NULL) {
if (comp->modeURI == NULL)
return(0);
/*
* both modeURI strings must be interned on the stylesheet dictionary
*/
if (comp->modeURI != modeURI)
return(0);
} else {
if (comp->modeURI != NULL)
return(0);
}
i = 0;
restart:
for (;i < comp->nbStep;i++) {
step = &comp->steps[i];
if (step->op != XSLT_OP_PREDICATE)
sel = step;
switch (step->op) {
case XSLT_OP_END:
goto found;
case XSLT_OP_ROOT:
if ((node->type == XML_DOCUMENT_NODE) ||
#ifdef LIBXML_DOCB_ENABLED
(node->type == XML_DOCB_DOCUMENT_NODE) ||
#endif
(node->type == XML_HTML_DOCUMENT_NODE))
continue;
if ((node->type == XML_ELEMENT_NODE) && (node->name[0] == ' '))
continue;
goto rollback;
case XSLT_OP_ELEM:
if (node->type != XML_ELEMENT_NODE)
goto rollback;
if (step->value == NULL)
continue;
if (step->value[0] != node->name[0])
goto rollback;
if (!xmlStrEqual(step->value, node->name))
goto rollback;
/* Namespace test */
if (node->ns == NULL) {
if (step->value2 != NULL)
goto rollback;
} else if (node->ns->href != NULL) {
if (step->value2 == NULL)
goto rollback;
if (!xmlStrEqual(step->value2, node->ns->href))
goto rollback;
}
continue;
case XSLT_OP_ATTR:
if (node->type != XML_ATTRIBUTE_NODE)
goto rollback;
if (step->value != NULL) {
if (step->value[0] != node->name[0])
goto rollback;
if (!xmlStrEqual(step->value, node->name))
goto rollback;
}
/* Namespace test */
if (node->ns == NULL) {
if (step->value2 != NULL)
goto rollback;
} else if (step->value2 != NULL) {
if (!xmlStrEqual(step->value2, node->ns->href))
goto rollback;
}
continue;
case XSLT_OP_PARENT:
if ((node->type == XML_DOCUMENT_NODE) ||
(node->type == XML_HTML_DOCUMENT_NODE) ||
#ifdef LIBXML_DOCB_ENABLED
(node->type == XML_DOCB_DOCUMENT_NODE) ||
#endif
(node->type == XML_NAMESPACE_DECL))
goto rollback;
node = node->parent;
if (node == NULL)
goto rollback;
if (step->value == NULL)
continue;
if (step->value[0] != node->name[0])
goto rollback;
if (!xmlStrEqual(step->value, node->name))
goto rollback;
/* Namespace test */
if (node->ns == NULL) {
if (step->value2 != NULL)
goto rollback;
} else if (node->ns->href != NULL) {
if (step->value2 == NULL)
goto rollback;
if (!xmlStrEqual(step->value2, node->ns->href))
goto rollback;
}
continue;
case XSLT_OP_ANCESTOR:
/* TODO: implement coalescing of ANCESTOR/NODE ops */
if (step->value == NULL) {
step = &comp->steps[i+1];
if (step->op == XSLT_OP_ROOT)
goto found;
/* added NS, ID and KEY as a result of bug 168208 */
if ((step->op != XSLT_OP_ELEM) &&
(step->op != XSLT_OP_ALL) &&
(step->op != XSLT_OP_NS) &&
(step->op != XSLT_OP_ID) &&
(step->op != XSLT_OP_KEY))
goto rollback;
}
if (node == NULL)
goto rollback;
if ((node->type == XML_DOCUMENT_NODE) ||
(node->type == XML_HTML_DOCUMENT_NODE) ||
#ifdef LIBXML_DOCB_ENABLED
(node->type == XML_DOCB_DOCUMENT_NODE) ||
#endif
(node->type == XML_NAMESPACE_DECL))
goto rollback;
node = node->parent;
if ((step->op != XSLT_OP_ELEM) && step->op != XSLT_OP_ALL) {
xsltPatPushState(ctxt, &states, i, node);
continue;
}
i++;
if (step->value == NULL) {
xsltPatPushState(ctxt, &states, i - 1, node);
continue;
}
while (node != NULL) {
if ((node->type == XML_ELEMENT_NODE) &&
(step->value[0] == node->name[0]) &&
(xmlStrEqual(step->value, node->name))) {
/* Namespace test */
if (node->ns == NULL) {
if (step->value2 == NULL)
break;
} else if (node->ns->href != NULL) {
if ((step->value2 != NULL) &&
(xmlStrEqual(step->value2, node->ns->href)))
break;
}
}
node = node->parent;
}
if (node == NULL)
goto rollback;
xsltPatPushState(ctxt, &states, i - 1, node);
continue;
case XSLT_OP_ID: {
/* TODO Handle IDs decently, must be done differently */
xmlAttrPtr id;
if (node->type != XML_ELEMENT_NODE)
goto rollback;
id = xmlGetID(node->doc, step->value);
if ((id == NULL) || (id->parent != node))
goto rollback;
break;
}
case XSLT_OP_KEY: {
xmlNodeSetPtr list;
int indx;
list = xsltGetKey(ctxt, step->value,
step->value3, step->value2);
if (list == NULL)
goto rollback;
for (indx = 0;indx < list->nodeNr;indx++)
if (list->nodeTab[indx] == node)
break;
if (indx >= list->nodeNr)
goto rollback;
break;
}
case XSLT_OP_NS:
if (node->type != XML_ELEMENT_NODE)
goto rollback;
if (node->ns == NULL) {
if (step->value != NULL)
goto rollback;
} else if (node->ns->href != NULL) {
if (step->value == NULL)
goto rollback;
if (!xmlStrEqual(step->value, node->ns->href))
goto rollback;
}
break;
case XSLT_OP_ALL:
if (node->type != XML_ELEMENT_NODE)
goto rollback;
break;
case XSLT_OP_PREDICATE: {
xmlNodePtr oldNode;
xmlDocPtr doc;
int oldCS, oldCP;
int pos = 0, len = 0;
int isRVT;
/*
* when there is cascading XSLT_OP_PREDICATE, then use a
* direct computation approach. It's not done directly
* at the beginning of the routine to filter out as much
* as possible this costly computation.
*/
if (comp->direct) {
if (states.states != NULL) {
/* Free the rollback states */
xmlFree(states.states);
}
return(xsltTestCompMatchDirect(ctxt, comp, node,
comp->nsList, comp->nsNr));
}
doc = node->doc;
if (XSLT_IS_RES_TREE_FRAG(doc))
isRVT = 1;
else
isRVT = 0;
/*
* Depending on the last selection, one may need to
* recompute contextSize and proximityPosition.
*/
oldCS = ctxt->xpathCtxt->contextSize;
oldCP = ctxt->xpathCtxt->proximityPosition;
if ((sel != NULL) &&
(sel->op == XSLT_OP_ELEM) &&
(sel->value != NULL) &&
(node->type == XML_ELEMENT_NODE) &&
(node->parent != NULL)) {
xmlNodePtr previous;
int nocache = 0;
previous = (xmlNodePtr)
XSLT_RUNTIME_EXTRA(ctxt, sel->previousExtra, ptr);
if ((previous != NULL) &&
(previous->parent == node->parent)) {
/*
* just walk back to adjust the index
*/
int indx = 0;
xmlNodePtr sibling = node;
while (sibling != NULL) {
if (sibling == previous)
break;
if ((sibling->type == XML_ELEMENT_NODE) &&
(previous->name != NULL) &&
(sibling->name != NULL) &&
(previous->name[0] == sibling->name[0]) &&
(xmlStrEqual(previous->name, sibling->name)))
{
if ((sel->value2 == NULL) ||
((sibling->ns != NULL) &&
(xmlStrEqual(sel->value2,
sibling->ns->href))))
indx++;
}
sibling = sibling->prev;
}
if (sibling == NULL) {
/* hum going backward in document order ... */
indx = 0;
sibling = node;
while (sibling != NULL) {
if (sibling == previous)
break;
if ((sibling->type == XML_ELEMENT_NODE) &&
(previous->name != NULL) &&
(sibling->name != NULL) &&
(previous->name[0] == sibling->name[0]) &&
(xmlStrEqual(previous->name, sibling->name)))
{
if ((sel->value2 == NULL) ||
((sibling->ns != NULL) &&
(xmlStrEqual(sel->value2,
sibling->ns->href))))
{
indx--;
}
}
sibling = sibling->next;
}
}
if (sibling != NULL) {
pos = XSLT_RUNTIME_EXTRA(ctxt,
sel->indexExtra, ival) + indx;
/*
* If the node is in a Value Tree we need to
* save len, but cannot cache the node!
* (bugs 153137 and 158840)
*/
if (node->doc != NULL) {
len = XSLT_RUNTIME_EXTRA(ctxt,
sel->lenExtra, ival);
if (!isRVT) {
XSLT_RUNTIME_EXTRA(ctxt,
sel->previousExtra, ptr) = node;
XSLT_RUNTIME_EXTRA(ctxt,
sel->indexExtra, ival) = pos;
}
}
} else
pos = 0;
} else {
/*
* recompute the index
*/
xmlNodePtr parent = node->parent;
xmlNodePtr siblings = NULL;
if (parent) siblings = parent->children;
while (siblings != NULL) {
if (siblings->type == XML_ELEMENT_NODE) {
if (siblings == node) {
len++;
pos = len;
} else if ((node->name != NULL) &&
(siblings->name != NULL) &&
(node->name[0] == siblings->name[0]) &&
(xmlStrEqual(node->name, siblings->name))) {
if ((sel->value2 == NULL) ||
((siblings->ns != NULL) &&
(xmlStrEqual(sel->value2,
siblings->ns->href))))
len++;
}
}
siblings = siblings->next;
}
if ((parent == NULL) || (node->doc == NULL))
nocache = 1;
else {
while (parent->parent != NULL)
parent = parent->parent;
if (((parent->type != XML_DOCUMENT_NODE) &&
(parent->type != XML_HTML_DOCUMENT_NODE)) ||
(parent != (xmlNodePtr) node->doc))
nocache = 1;
}
}
if (pos != 0) {
ctxt->xpathCtxt->contextSize = len;
ctxt->xpathCtxt->proximityPosition = pos;
/*
* If the node is in a Value Tree we cannot
* cache it !
*/
if ((!isRVT) && (node->doc != NULL) &&
(nocache == 0)) {
XSLT_RUNTIME_EXTRA(ctxt, sel->previousExtra, ptr) =
node;
XSLT_RUNTIME_EXTRA(ctxt, sel->indexExtra, ival) =
pos;
XSLT_RUNTIME_EXTRA(ctxt, sel->lenExtra, ival) =
len;
}
}
} else if ((sel != NULL) && (sel->op == XSLT_OP_ALL) &&
(node->type == XML_ELEMENT_NODE)) {
xmlNodePtr previous;
int nocache = 0;
previous = (xmlNodePtr)
XSLT_RUNTIME_EXTRA(ctxt, sel->previousExtra, ptr);
if ((previous != NULL) &&
(previous->parent == node->parent)) {
/*
* just walk back to adjust the index
*/
int indx = 0;
xmlNodePtr sibling = node;
while (sibling != NULL) {
if (sibling == previous)
break;
if (sibling->type == XML_ELEMENT_NODE)
indx++;
sibling = sibling->prev;
}
if (sibling == NULL) {
/* hum going backward in document order ... */
indx = 0;
sibling = node;
while (sibling != NULL) {
if (sibling == previous)
break;
if (sibling->type == XML_ELEMENT_NODE)
indx--;
sibling = sibling->next;
}
}
if (sibling != NULL) {
pos = XSLT_RUNTIME_EXTRA(ctxt,
sel->indexExtra, ival) + indx;
/*
* If the node is in a Value Tree we cannot
* cache it !
*/
if ((node->doc != NULL) && !isRVT) {
len = XSLT_RUNTIME_EXTRA(ctxt,
sel->lenExtra, ival);
XSLT_RUNTIME_EXTRA(ctxt,
sel->previousExtra, ptr) = node;
XSLT_RUNTIME_EXTRA(ctxt,
sel->indexExtra, ival) = pos;
}
} else
pos = 0;
} else {
/*
* recompute the index
*/
xmlNodePtr parent = node->parent;
xmlNodePtr siblings = NULL;
if (parent) siblings = parent->children;
while (siblings != NULL) {
if (siblings->type == XML_ELEMENT_NODE) {
len++;
if (siblings == node) {
pos = len;
}
}
siblings = siblings->next;
}
if ((parent == NULL) || (node->doc == NULL))
nocache = 1;
else {
while (parent->parent != NULL)
parent = parent->parent;
if (((parent->type != XML_DOCUMENT_NODE) &&
(parent->type != XML_HTML_DOCUMENT_NODE)) ||
(parent != (xmlNodePtr) node->doc))
nocache = 1;
}
}
if (pos != 0) {
ctxt->xpathCtxt->contextSize = len;
ctxt->xpathCtxt->proximityPosition = pos;
/*
* If the node is in a Value Tree we cannot
* cache it !
*/
if ((node->doc != NULL) && (nocache == 0) && !isRVT) {
XSLT_RUNTIME_EXTRA(ctxt, sel->previousExtra, ptr) =
node;
XSLT_RUNTIME_EXTRA(ctxt, sel->indexExtra, ival) =
pos;
XSLT_RUNTIME_EXTRA(ctxt, sel->lenExtra, ival) =
len;
}
}
}
oldNode = ctxt->node;
ctxt->node = node;
if (step->value == NULL)
goto wrong_index;
if (step->comp == NULL)
goto wrong_index;
if (!xsltEvalXPathPredicate(ctxt, step->comp, comp->nsList,
comp->nsNr))
goto wrong_index;
if (pos != 0) {
ctxt->xpathCtxt->contextSize = oldCS;
ctxt->xpathCtxt->proximityPosition = oldCP;
}
ctxt->node = oldNode;
break;
wrong_index:
if (pos != 0) {
ctxt->xpathCtxt->contextSize = oldCS;
ctxt->xpathCtxt->proximityPosition = oldCP;
}
ctxt->node = oldNode;
goto rollback;
}
case XSLT_OP_PI:
if (node->type != XML_PI_NODE)
goto rollback;
if (step->value != NULL) {
if (!xmlStrEqual(step->value, node->name))
goto rollback;
}
break;
case XSLT_OP_COMMENT:
if (node->type != XML_COMMENT_NODE)
goto rollback;
break;
case XSLT_OP_TEXT:
if ((node->type != XML_TEXT_NODE) &&
(node->type != XML_CDATA_SECTION_NODE))
goto rollback;
break;
case XSLT_OP_NODE:
switch (node->type) {
case XML_ELEMENT_NODE:
case XML_CDATA_SECTION_NODE:
case XML_PI_NODE:
case XML_COMMENT_NODE:
case XML_TEXT_NODE:
break;
default:
goto rollback;
}
break;
}
}
found:
if (states.states != NULL) {
/* Free the rollback states */
xmlFree(states.states);
}
return(1);
rollback:
/* got an error try to rollback */
if (states.states == NULL)
return(0);
if (states.nbstates <= 0) {
xmlFree(states.states);
return(0);
}
states.nbstates--;
i = states.states[states.nbstates].step;
node = states.states[states.nbstates].node;
#if 0
fprintf(stderr, "Pop: %d, %s\n", i, node->name);
#endif
goto restart;
}
Commit Message: Roll libxslt to 891681e3e948f31732229f53cb6db7215f740fc7
BUG=583156,583171
Review URL: https://codereview.chromium.org/1853083002
Cr-Commit-Position: refs/heads/master@{#385338}
CWE ID: CWE-119 | xsltTestCompMatch(xsltTransformContextPtr ctxt, xsltCompMatchPtr comp,
xmlNodePtr matchNode, const xmlChar *mode,
const xmlChar *modeURI) {
int i;
xmlNodePtr node = matchNode;
xsltStepOpPtr step, sel = NULL;
xsltStepStates states = {0, 0, NULL}; /* // may require backtrack */
if ((comp == NULL) || (node == NULL) || (ctxt == NULL)) {
xsltTransformError(ctxt, NULL, node,
"xsltTestCompMatch: null arg\n");
return(-1);
}
if (mode != NULL) {
if (comp->mode == NULL)
return(0);
/*
* both mode strings must be interned on the stylesheet dictionary
*/
if (comp->mode != mode)
return(0);
} else {
if (comp->mode != NULL)
return(0);
}
if (modeURI != NULL) {
if (comp->modeURI == NULL)
return(0);
/*
* both modeURI strings must be interned on the stylesheet dictionary
*/
if (comp->modeURI != modeURI)
return(0);
} else {
if (comp->modeURI != NULL)
return(0);
}
i = 0;
restart:
for (;i < comp->nbStep;i++) {
step = &comp->steps[i];
if (step->op != XSLT_OP_PREDICATE)
sel = step;
switch (step->op) {
case XSLT_OP_END:
goto found;
case XSLT_OP_ROOT:
if ((node->type == XML_DOCUMENT_NODE) ||
#ifdef LIBXML_DOCB_ENABLED
(node->type == XML_DOCB_DOCUMENT_NODE) ||
#endif
(node->type == XML_HTML_DOCUMENT_NODE))
continue;
if ((node->type == XML_ELEMENT_NODE) && (node->name[0] == ' '))
continue;
goto rollback;
case XSLT_OP_ELEM:
if (node->type != XML_ELEMENT_NODE)
goto rollback;
if (step->value == NULL)
continue;
if (step->value[0] != node->name[0])
goto rollback;
if (!xmlStrEqual(step->value, node->name))
goto rollback;
/* Namespace test */
if (node->ns == NULL) {
if (step->value2 != NULL)
goto rollback;
} else if (node->ns->href != NULL) {
if (step->value2 == NULL)
goto rollback;
if (!xmlStrEqual(step->value2, node->ns->href))
goto rollback;
}
continue;
case XSLT_OP_ATTR:
if (node->type != XML_ATTRIBUTE_NODE)
goto rollback;
if (step->value != NULL) {
if (step->value[0] != node->name[0])
goto rollback;
if (!xmlStrEqual(step->value, node->name))
goto rollback;
}
/* Namespace test */
if (node->ns == NULL) {
if (step->value2 != NULL)
goto rollback;
} else if (step->value2 != NULL) {
if (!xmlStrEqual(step->value2, node->ns->href))
goto rollback;
}
continue;
case XSLT_OP_PARENT:
if ((node->type == XML_DOCUMENT_NODE) ||
(node->type == XML_HTML_DOCUMENT_NODE) ||
#ifdef LIBXML_DOCB_ENABLED
(node->type == XML_DOCB_DOCUMENT_NODE) ||
#endif
(node->type == XML_NAMESPACE_DECL))
goto rollback;
node = node->parent;
if (node == NULL)
goto rollback;
if (step->value == NULL)
continue;
if (step->value[0] != node->name[0])
goto rollback;
if (!xmlStrEqual(step->value, node->name))
goto rollback;
/* Namespace test */
if (node->ns == NULL) {
if (step->value2 != NULL)
goto rollback;
} else if (node->ns->href != NULL) {
if (step->value2 == NULL)
goto rollback;
if (!xmlStrEqual(step->value2, node->ns->href))
goto rollback;
}
continue;
case XSLT_OP_ANCESTOR:
/* TODO: implement coalescing of ANCESTOR/NODE ops */
if (step->value == NULL) {
step = &comp->steps[i+1];
if (step->op == XSLT_OP_ROOT)
goto found;
/* added NS, ID and KEY as a result of bug 168208 */
if ((step->op != XSLT_OP_ELEM) &&
(step->op != XSLT_OP_ALL) &&
(step->op != XSLT_OP_NS) &&
(step->op != XSLT_OP_ID) &&
(step->op != XSLT_OP_KEY))
goto rollback;
}
if (node == NULL)
goto rollback;
if ((node->type == XML_DOCUMENT_NODE) ||
(node->type == XML_HTML_DOCUMENT_NODE) ||
#ifdef LIBXML_DOCB_ENABLED
(node->type == XML_DOCB_DOCUMENT_NODE) ||
#endif
(node->type == XML_NAMESPACE_DECL))
goto rollback;
node = node->parent;
if ((step->op != XSLT_OP_ELEM) && step->op != XSLT_OP_ALL) {
xsltPatPushState(ctxt, &states, i, node);
continue;
}
i++;
if (step->value == NULL) {
xsltPatPushState(ctxt, &states, i - 1, node);
continue;
}
while (node != NULL) {
if ((node->type == XML_ELEMENT_NODE) &&
(step->value[0] == node->name[0]) &&
(xmlStrEqual(step->value, node->name))) {
/* Namespace test */
if (node->ns == NULL) {
if (step->value2 == NULL)
break;
} else if (node->ns->href != NULL) {
if ((step->value2 != NULL) &&
(xmlStrEqual(step->value2, node->ns->href)))
break;
}
}
node = node->parent;
}
if (node == NULL)
goto rollback;
xsltPatPushState(ctxt, &states, i - 1, node);
continue;
case XSLT_OP_ID: {
/* TODO Handle IDs decently, must be done differently */
xmlAttrPtr id;
if (node->type != XML_ELEMENT_NODE)
goto rollback;
id = xmlGetID(node->doc, step->value);
if ((id == NULL) || (id->parent != node))
goto rollback;
break;
}
case XSLT_OP_KEY: {
xmlNodeSetPtr list;
int indx;
list = xsltGetKey(ctxt, step->value,
step->value3, step->value2);
if (list == NULL)
goto rollback;
for (indx = 0;indx < list->nodeNr;indx++)
if (list->nodeTab[indx] == node)
break;
if (indx >= list->nodeNr)
goto rollback;
break;
}
case XSLT_OP_NS:
if (node->type != XML_ELEMENT_NODE)
goto rollback;
if (node->ns == NULL) {
if (step->value != NULL)
goto rollback;
} else if (node->ns->href != NULL) {
if (step->value == NULL)
goto rollback;
if (!xmlStrEqual(step->value, node->ns->href))
goto rollback;
}
break;
case XSLT_OP_ALL:
if (node->type != XML_ELEMENT_NODE)
goto rollback;
break;
case XSLT_OP_PREDICATE: {
/*
* When there is cascading XSLT_OP_PREDICATE or a predicate
* after an op which hasn't been optimized yet, then use a
* direct computation approach. It's not done directly
* at the beginning of the routine to filter out as much
* as possible this costly computation.
*/
if (comp->direct) {
if (states.states != NULL) {
/* Free the rollback states */
xmlFree(states.states);
}
return(xsltTestCompMatchDirect(ctxt, comp, matchNode,
comp->nsList, comp->nsNr));
}
if (!xsltTestPredicateMatch(ctxt, comp, node, step, sel))
goto rollback;
break;
}
case XSLT_OP_PI:
if (node->type != XML_PI_NODE)
goto rollback;
if (step->value != NULL) {
if (!xmlStrEqual(step->value, node->name))
goto rollback;
}
break;
case XSLT_OP_COMMENT:
if (node->type != XML_COMMENT_NODE)
goto rollback;
break;
case XSLT_OP_TEXT:
if ((node->type != XML_TEXT_NODE) &&
(node->type != XML_CDATA_SECTION_NODE))
goto rollback;
break;
case XSLT_OP_NODE:
switch (node->type) {
case XML_ELEMENT_NODE:
case XML_CDATA_SECTION_NODE:
case XML_PI_NODE:
case XML_COMMENT_NODE:
case XML_TEXT_NODE:
break;
default:
goto rollback;
}
break;
}
}
found:
if (states.states != NULL) {
/* Free the rollback states */
xmlFree(states.states);
}
return(1);
rollback:
/* got an error try to rollback */
if (states.states == NULL)
return(0);
if (states.nbstates <= 0) {
xmlFree(states.states);
return(0);
}
states.nbstates--;
i = states.states[states.nbstates].step;
node = states.states[states.nbstates].node;
#if 0
fprintf(stderr, "Pop: %d, %s\n", i, node->name);
#endif
goto restart;
}
| 173,314 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void MediaStreamDispatcherHost::CancelAllRequests() {
if (!bindings_.empty())
return;
media_stream_manager_->CancelAllRequests(render_process_id_,
render_frame_id_);
}
Commit Message: Make MediaStreamDispatcherHost per-request instead of per-frame.
Instead of having RenderFrameHost own a single MSDH to handle all
requests from a frame, MSDH objects will be owned by a strong binding.
A consequence of this is that an additional requester ID is added to
requests to MediaStreamManager, so that an MSDH is able to cancel only
requests generated by it.
In practice, MSDH will continue to be per frame in most cases since
each frame normally makes a single request for an MSDH object.
This fixes a lifetime issue caused by the IO thread executing tasks
after the RenderFrameHost dies.
Drive-by: Fix some minor lint issues.
Bug: 912520
Change-Id: I52742ffc98b9fc57ce8e6f5093a61aed86d3e516
Reviewed-on: https://chromium-review.googlesource.com/c/1369799
Reviewed-by: Emircan Uysaler <[email protected]>
Reviewed-by: Ken Buchanan <[email protected]>
Reviewed-by: Olga Sharonova <[email protected]>
Commit-Queue: Guido Urdaneta <[email protected]>
Cr-Commit-Position: refs/heads/master@{#616347}
CWE ID: CWE-189 | void MediaStreamDispatcherHost::CancelAllRequests() {
media_stream_manager_->CancelAllRequests(render_process_id_, render_frame_id_,
requester_id_);
}
| 173,092 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void bt_for_each(struct blk_mq_hw_ctx *hctx,
struct blk_mq_bitmap_tags *bt, unsigned int off,
busy_iter_fn *fn, void *data, bool reserved)
{
struct request *rq;
int bit, i;
for (i = 0; i < bt->map_nr; i++) {
struct blk_align_bitmap *bm = &bt->map[i];
for (bit = find_first_bit(&bm->word, bm->depth);
bit < bm->depth;
bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
rq = blk_mq_tag_to_rq(hctx->tags, off + bit);
if (rq->q == hctx->queue)
fn(hctx, rq, data, reserved);
}
off += (1 << bt->bits_per_word);
}
}
Commit Message: blk-mq: fix race between timeout and freeing request
Inside timeout handler, blk_mq_tag_to_rq() is called
to retrieve the request from one tag. This way is obviously
wrong because the request can be freed any time and some
fiedds of the request can't be trusted, then kernel oops
might be triggered[1].
Currently wrt. blk_mq_tag_to_rq(), the only special case is
that the flush request can share same tag with the request
cloned from, and the two requests can't be active at the same
time, so this patch fixes the above issue by updating tags->rqs[tag]
with the active request(either flush rq or the request cloned
from) of the tag.
Also blk_mq_tag_to_rq() gets much simplified with this patch.
Given blk_mq_tag_to_rq() is mainly for drivers and the caller must
make sure the request can't be freed, so in bt_for_each() this
helper is replaced with tags->rqs[tag].
[1] kernel oops log
[ 439.696220] BUG: unable to handle kernel NULL pointer dereference at 0000000000000158^M
[ 439.697162] IP: [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.700653] PGD 7ef765067 PUD 7ef764067 PMD 0 ^M
[ 439.700653] Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC ^M
[ 439.700653] Dumping ftrace buffer:^M
[ 439.700653] (ftrace buffer empty)^M
[ 439.700653] Modules linked in: nbd ipv6 kvm_intel kvm serio_raw^M
[ 439.700653] CPU: 6 PID: 2779 Comm: stress-ng-sigfd Not tainted 4.2.0-rc5-next-20150805+ #265^M
[ 439.730500] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011^M
[ 439.730500] task: ffff880605308000 ti: ffff88060530c000 task.ti: ffff88060530c000^M
[ 439.730500] RIP: 0010:[<ffffffff812d89ba>] [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.730500] RSP: 0018:ffff880819203da0 EFLAGS: 00010283^M
[ 439.730500] RAX: ffff880811b0e000 RBX: ffff8800bb465f00 RCX: 0000000000000002^M
[ 439.730500] RDX: 0000000000000000 RSI: 0000000000000202 RDI: 0000000000000000^M
[ 439.730500] RBP: ffff880819203db0 R08: 0000000000000002 R09: 0000000000000000^M
[ 439.730500] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000202^M
[ 439.730500] R13: ffff880814104800 R14: 0000000000000002 R15: ffff880811a2ea00^M
[ 439.730500] FS: 00007f165b3f5740(0000) GS:ffff880819200000(0000) knlGS:0000000000000000^M
[ 439.730500] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b^M
[ 439.730500] CR2: 0000000000000158 CR3: 00000007ef766000 CR4: 00000000000006e0^M
[ 439.730500] Stack:^M
[ 439.730500] 0000000000000008 ffff8808114eed90 ffff880819203e00 ffffffff812dc104^M
[ 439.755663] ffff880819203e40 ffffffff812d9f5e 0000020000000000 ffff8808114eed80^M
[ 439.755663] Call Trace:^M
[ 439.755663] <IRQ> ^M
[ 439.755663] [<ffffffff812dc104>] bt_for_each+0x6e/0xc8^M
[ 439.755663] [<ffffffff812d9f5e>] ? blk_mq_rq_timed_out+0x6a/0x6a^M
[ 439.755663] [<ffffffff812d9f5e>] ? blk_mq_rq_timed_out+0x6a/0x6a^M
[ 439.755663] [<ffffffff812dc1b3>] blk_mq_tag_busy_iter+0x55/0x5e^M
[ 439.755663] [<ffffffff812d88b4>] ? blk_mq_bio_to_request+0x38/0x38^M
[ 439.755663] [<ffffffff812d8911>] blk_mq_rq_timer+0x5d/0xd4^M
[ 439.755663] [<ffffffff810a3e10>] call_timer_fn+0xf7/0x284^M
[ 439.755663] [<ffffffff810a3d1e>] ? call_timer_fn+0x5/0x284^M
[ 439.755663] [<ffffffff812d88b4>] ? blk_mq_bio_to_request+0x38/0x38^M
[ 439.755663] [<ffffffff810a46d6>] run_timer_softirq+0x1ce/0x1f8^M
[ 439.755663] [<ffffffff8104c367>] __do_softirq+0x181/0x3a4^M
[ 439.755663] [<ffffffff8104c76e>] irq_exit+0x40/0x94^M
[ 439.755663] [<ffffffff81031482>] smp_apic_timer_interrupt+0x33/0x3e^M
[ 439.755663] [<ffffffff815559a4>] apic_timer_interrupt+0x84/0x90^M
[ 439.755663] <EOI> ^M
[ 439.755663] [<ffffffff81554350>] ? _raw_spin_unlock_irq+0x32/0x4a^M
[ 439.755663] [<ffffffff8106a98b>] finish_task_switch+0xe0/0x163^M
[ 439.755663] [<ffffffff8106a94d>] ? finish_task_switch+0xa2/0x163^M
[ 439.755663] [<ffffffff81550066>] __schedule+0x469/0x6cd^M
[ 439.755663] [<ffffffff8155039b>] schedule+0x82/0x9a^M
[ 439.789267] [<ffffffff8119b28b>] signalfd_read+0x186/0x49a^M
[ 439.790911] [<ffffffff8106d86a>] ? wake_up_q+0x47/0x47^M
[ 439.790911] [<ffffffff811618c2>] __vfs_read+0x28/0x9f^M
[ 439.790911] [<ffffffff8117a289>] ? __fget_light+0x4d/0x74^M
[ 439.790911] [<ffffffff811620a7>] vfs_read+0x7a/0xc6^M
[ 439.790911] [<ffffffff8116292b>] SyS_read+0x49/0x7f^M
[ 439.790911] [<ffffffff81554c17>] entry_SYSCALL_64_fastpath+0x12/0x6f^M
[ 439.790911] Code: 48 89 e5 e8 a9 b8 e7 ff 5d c3 0f 1f 44 00 00 55 89
f2 48 89 e5 41 54 41 89 f4 53 48 8b 47 60 48 8b 1c d0 48 8b 7b 30 48 8b
53 38 <48> 8b 87 58 01 00 00 48 85 c0 75 09 48 8b 97 88 0c 00 00 eb 10
^M
[ 439.790911] RIP [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.790911] RSP <ffff880819203da0>^M
[ 439.790911] CR2: 0000000000000158^M
[ 439.790911] ---[ end trace d40af58949325661 ]---^M
Cc: <[email protected]>
Signed-off-by: Ming Lei <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
CWE ID: CWE-362 | static void bt_for_each(struct blk_mq_hw_ctx *hctx,
struct blk_mq_bitmap_tags *bt, unsigned int off,
busy_iter_fn *fn, void *data, bool reserved)
{
struct request *rq;
int bit, i;
for (i = 0; i < bt->map_nr; i++) {
struct blk_align_bitmap *bm = &bt->map[i];
for (bit = find_first_bit(&bm->word, bm->depth);
bit < bm->depth;
bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
rq = hctx->tags->rqs[off + bit];
if (rq->q == hctx->queue)
fn(hctx, rq, data, reserved);
}
off += (1 << bt->bits_per_word);
}
}
| 169,455 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int seticc(i_ctx_t * i_ctx_p, int ncomps, ref *ICCdict, float *range_buff)
{
int code, k;
gs_color_space * pcs;
ref * pstrmval;
stream * s = 0L;
cmm_profile_t *picc_profile = NULL;
int i, expected = 0;
ref * pnameval;
static const char *const icc_std_profile_names[] = {
GSICC_STANDARD_PROFILES
};
static const char *const icc_std_profile_keys[] = {
GSICC_STANDARD_PROFILES_KEYS
};
/* verify the DataSource entry */
if (dict_find_string(ICCdict, "DataSource", &pstrmval) <= 0)
return_error(gs_error_undefined);
check_read_file(i_ctx_p, s, pstrmval);
/* build the color space object */
code = gs_cspace_build_ICC(&pcs, NULL, gs_gstate_memory(igs));
if (code < 0)
return gs_rethrow(code, "building color space object");
/* For now, dump the profile into a buffer
and obtain handle from the buffer when we need it.
We may want to change this later.
This depends to some degree on what the CMS is capable of doing.
I don't want to get bogged down on stream I/O at this point.
Note also, if we are going to be putting these into the clist we will
want to have this buffer. */
/* Check if we have the /Name entry. This is used to associate with
specs that have enumerated types to indicate sRGB sGray etc */
if (dict_find_string(ICCdict, "Name", &pnameval) > 0){
uint size = r_size(pnameval);
char *str = (char *)gs_alloc_bytes(gs_gstate_memory(igs), size+1, "seticc");
memcpy(str, (const char *)pnameval->value.bytes, size);
str[size] = 0;
/* Compare this to the standard profile names */
for (k = 0; k < GSICC_NUMBER_STANDARD_PROFILES; k++) {
if ( strcmp( str, icc_std_profile_keys[k] ) == 0 ) {
picc_profile = gsicc_get_profile_handle_file(icc_std_profile_names[k],
strlen(icc_std_profile_names[k]), gs_gstate_memory(igs));
break;
}
}
gs_free_object(gs_gstate_memory(igs), str, "seticc");
} else {
picc_profile = gsicc_profile_new(s, gs_gstate_memory(igs), NULL, 0);
if (picc_profile == NULL)
return gs_throw(gs_error_VMerror, "Creation of ICC profile failed");
/* We have to get the profile handle due to the fact that we need to know
if it has a data space that is CIELAB */
picc_profile->profile_handle =
gsicc_get_profile_handle_buffer(picc_profile->buffer,
picc_profile->buffer_size,
gs_gstate_memory(igs));
}
if (picc_profile == NULL || picc_profile->profile_handle == NULL) {
/* Free up everything, the profile is not valid. We will end up going
ahead and using a default based upon the number of components */
rc_decrement(picc_profile,"seticc");
rc_decrement(pcs,"seticc");
return -1;
}
code = gsicc_set_gscs_profile(pcs, picc_profile, gs_gstate_memory(igs));
if (code < 0) {
rc_decrement(picc_profile,"seticc");
rc_decrement(pcs,"seticc");
return code;
}
picc_profile->num_comps = ncomps;
picc_profile->data_cs =
gscms_get_profile_data_space(picc_profile->profile_handle,
picc_profile->memory);
switch (picc_profile->data_cs) {
case gsCIEXYZ:
case gsCIELAB:
case gsRGB:
expected = 3;
break;
case gsGRAY:
expected = 1;
break;
case gsCMYK:
expected = 4;
break;
case gsNCHANNEL:
case gsNAMED: /* Silence warnings */
case gsUNDEFINED: /* Silence warnings */
break;
}
if (!expected || ncomps != expected) {
rc_decrement(picc_profile,"seticc");
rc_decrement(pcs,"seticc");
return_error(gs_error_rangecheck);
}
/* Lets go ahead and get the hash code and check if we match one of the default spaces */
/* Later we may want to delay this, but for now lets go ahead and do it */
gsicc_init_hash_cs(picc_profile, igs);
/* Set the range according to the data type that is associated with the
ICC input color type. Occasionally, we will run into CIELAB to CIELAB
profiles for spot colors in PDF documents. These spot colors are typically described
as separation colors with tint transforms that go from a tint value
to a linear mapping between the CIELAB white point and the CIELAB tint
color. This results in a CIELAB value that we need to use to fill. We
need to detect this to make sure we do the proper scaling of the data. For
CIELAB images in PDF, the source is always normal 8 or 16 bit encoded data
in the range from 0 to 255 or 0 to 65535. In that case, there should not
be any encoding and decoding to CIELAB. The PDF content will not include
an ICC profile but will set the color space to \Lab. In this case, we use
our seticc_lab operation to install the LAB to LAB profile, but we detect
that we did that through the use of the is_lab flag in the profile descriptor.
When then avoid the CIELAB encode and decode */
if (picc_profile->data_cs == gsCIELAB) {
/* If the input space to this profile is CIELAB, then we need to adjust the limits */
/* See ICC spec ICC.1:2004-10 Section 6.3.4.2 and 6.4. I don't believe we need to
worry about CIEXYZ profiles or any of the other odds ones. Need to check that though
at some point. */
picc_profile->Range.ranges[0].rmin = 0.0;
picc_profile->Range.ranges[0].rmax = 100.0;
picc_profile->Range.ranges[1].rmin = -128.0;
picc_profile->Range.ranges[1].rmax = 127.0;
picc_profile->Range.ranges[2].rmin = -128.0;
picc_profile->Range.ranges[2].rmax = 127.0;
picc_profile->islab = true;
} else {
for (i = 0; i < ncomps; i++) {
picc_profile->Range.ranges[i].rmin = range_buff[2 * i];
picc_profile->Range.ranges[i].rmax = range_buff[2 * i + 1];
}
}
/* Now see if we are in an overide situation. We have to wait until now
in case this is an LAB profile which we will not overide */
if (gs_currentoverrideicc(igs) && picc_profile->data_cs != gsCIELAB) {
/* Free up the profile structure */
switch( picc_profile->data_cs ) {
case gsRGB:
pcs->cmm_icc_profile_data = igs->icc_manager->default_rgb;
break;
case gsGRAY:
pcs->cmm_icc_profile_data = igs->icc_manager->default_gray;
break;
case gsCMYK:
pcs->cmm_icc_profile_data = igs->icc_manager->default_cmyk;
break;
default:
break;
}
/* Have one increment from the color space. Having these tied
together is not really correct. Need to fix that. ToDo. MJV */
rc_adjust(picc_profile, -2, "seticc");
rc_increment(pcs->cmm_icc_profile_data);
}
/* Set the color space. We are done. No joint cache here... */
code = gs_setcolorspace(igs, pcs);
/* The context has taken a reference to the colorspace. We no longer need
* ours, so drop it. */
rc_decrement_only(pcs, "seticc");
/* In this case, we already have a ref count of 2 on the icc profile
one for when it was created and one for when it was set. We really
only want one here so adjust */
rc_decrement(picc_profile,"seticc");
/* Remove the ICC dict from the stack */
pop(1);
return code;
}
Commit Message:
CWE ID: CWE-704 | int seticc(i_ctx_t * i_ctx_p, int ncomps, ref *ICCdict, float *range_buff)
{
int code, k;
gs_color_space * pcs;
ref * pstrmval;
stream * s = 0L;
cmm_profile_t *picc_profile = NULL;
int i, expected = 0;
ref * pnameval;
static const char *const icc_std_profile_names[] = {
GSICC_STANDARD_PROFILES
};
static const char *const icc_std_profile_keys[] = {
GSICC_STANDARD_PROFILES_KEYS
};
/* verify the DataSource entry */
if (dict_find_string(ICCdict, "DataSource", &pstrmval) <= 0)
return_error(gs_error_undefined);
check_read_file(i_ctx_p, s, pstrmval);
/* build the color space object */
code = gs_cspace_build_ICC(&pcs, NULL, gs_gstate_memory(igs));
if (code < 0)
return gs_rethrow(code, "building color space object");
/* For now, dump the profile into a buffer
and obtain handle from the buffer when we need it.
We may want to change this later.
This depends to some degree on what the CMS is capable of doing.
I don't want to get bogged down on stream I/O at this point.
Note also, if we are going to be putting these into the clist we will
want to have this buffer. */
/* Check if we have the /Name entry. This is used to associate with
specs that have enumerated types to indicate sRGB sGray etc */
if (dict_find_string(ICCdict, "Name", &pnameval) > 0 && r_has_type(pnameval, t_string)){
uint size = r_size(pnameval);
char *str = (char *)gs_alloc_bytes(gs_gstate_memory(igs), size+1, "seticc");
memcpy(str, (const char *)pnameval->value.bytes, size);
str[size] = 0;
/* Compare this to the standard profile names */
for (k = 0; k < GSICC_NUMBER_STANDARD_PROFILES; k++) {
if ( strcmp( str, icc_std_profile_keys[k] ) == 0 ) {
picc_profile = gsicc_get_profile_handle_file(icc_std_profile_names[k],
strlen(icc_std_profile_names[k]), gs_gstate_memory(igs));
break;
}
}
gs_free_object(gs_gstate_memory(igs), str, "seticc");
} else {
picc_profile = gsicc_profile_new(s, gs_gstate_memory(igs), NULL, 0);
if (picc_profile == NULL)
return gs_throw(gs_error_VMerror, "Creation of ICC profile failed");
/* We have to get the profile handle due to the fact that we need to know
if it has a data space that is CIELAB */
picc_profile->profile_handle =
gsicc_get_profile_handle_buffer(picc_profile->buffer,
picc_profile->buffer_size,
gs_gstate_memory(igs));
}
if (picc_profile == NULL || picc_profile->profile_handle == NULL) {
/* Free up everything, the profile is not valid. We will end up going
ahead and using a default based upon the number of components */
rc_decrement(picc_profile,"seticc");
rc_decrement(pcs,"seticc");
return -1;
}
code = gsicc_set_gscs_profile(pcs, picc_profile, gs_gstate_memory(igs));
if (code < 0) {
rc_decrement(picc_profile,"seticc");
rc_decrement(pcs,"seticc");
return code;
}
picc_profile->num_comps = ncomps;
picc_profile->data_cs =
gscms_get_profile_data_space(picc_profile->profile_handle,
picc_profile->memory);
switch (picc_profile->data_cs) {
case gsCIEXYZ:
case gsCIELAB:
case gsRGB:
expected = 3;
break;
case gsGRAY:
expected = 1;
break;
case gsCMYK:
expected = 4;
break;
case gsNCHANNEL:
case gsNAMED: /* Silence warnings */
case gsUNDEFINED: /* Silence warnings */
break;
}
if (!expected || ncomps != expected) {
rc_decrement(picc_profile,"seticc");
rc_decrement(pcs,"seticc");
return_error(gs_error_rangecheck);
}
/* Lets go ahead and get the hash code and check if we match one of the default spaces */
/* Later we may want to delay this, but for now lets go ahead and do it */
gsicc_init_hash_cs(picc_profile, igs);
/* Set the range according to the data type that is associated with the
ICC input color type. Occasionally, we will run into CIELAB to CIELAB
profiles for spot colors in PDF documents. These spot colors are typically described
as separation colors with tint transforms that go from a tint value
to a linear mapping between the CIELAB white point and the CIELAB tint
color. This results in a CIELAB value that we need to use to fill. We
need to detect this to make sure we do the proper scaling of the data. For
CIELAB images in PDF, the source is always normal 8 or 16 bit encoded data
in the range from 0 to 255 or 0 to 65535. In that case, there should not
be any encoding and decoding to CIELAB. The PDF content will not include
an ICC profile but will set the color space to \Lab. In this case, we use
our seticc_lab operation to install the LAB to LAB profile, but we detect
that we did that through the use of the is_lab flag in the profile descriptor.
When then avoid the CIELAB encode and decode */
if (picc_profile->data_cs == gsCIELAB) {
/* If the input space to this profile is CIELAB, then we need to adjust the limits */
/* See ICC spec ICC.1:2004-10 Section 6.3.4.2 and 6.4. I don't believe we need to
worry about CIEXYZ profiles or any of the other odds ones. Need to check that though
at some point. */
picc_profile->Range.ranges[0].rmin = 0.0;
picc_profile->Range.ranges[0].rmax = 100.0;
picc_profile->Range.ranges[1].rmin = -128.0;
picc_profile->Range.ranges[1].rmax = 127.0;
picc_profile->Range.ranges[2].rmin = -128.0;
picc_profile->Range.ranges[2].rmax = 127.0;
picc_profile->islab = true;
} else {
for (i = 0; i < ncomps; i++) {
picc_profile->Range.ranges[i].rmin = range_buff[2 * i];
picc_profile->Range.ranges[i].rmax = range_buff[2 * i + 1];
}
}
/* Now see if we are in an overide situation. We have to wait until now
in case this is an LAB profile which we will not overide */
if (gs_currentoverrideicc(igs) && picc_profile->data_cs != gsCIELAB) {
/* Free up the profile structure */
switch( picc_profile->data_cs ) {
case gsRGB:
pcs->cmm_icc_profile_data = igs->icc_manager->default_rgb;
break;
case gsGRAY:
pcs->cmm_icc_profile_data = igs->icc_manager->default_gray;
break;
case gsCMYK:
pcs->cmm_icc_profile_data = igs->icc_manager->default_cmyk;
break;
default:
break;
}
/* Have one increment from the color space. Having these tied
together is not really correct. Need to fix that. ToDo. MJV */
rc_adjust(picc_profile, -2, "seticc");
rc_increment(pcs->cmm_icc_profile_data);
}
/* Set the color space. We are done. No joint cache here... */
code = gs_setcolorspace(igs, pcs);
/* The context has taken a reference to the colorspace. We no longer need
* ours, so drop it. */
rc_decrement_only(pcs, "seticc");
/* In this case, we already have a ref count of 2 on the icc profile
one for when it was created and one for when it was set. We really
only want one here so adjust */
rc_decrement(picc_profile,"seticc");
/* Remove the ICC dict from the stack */
pop(1);
return code;
}
| 164,634 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int cipso_v4_req_setattr(struct request_sock *req,
const struct cipso_v4_doi *doi_def,
const struct netlbl_lsm_secattr *secattr)
{
int ret_val = -EPERM;
unsigned char *buf = NULL;
u32 buf_len;
u32 opt_len;
struct ip_options *opt = NULL;
struct inet_request_sock *req_inet;
/* We allocate the maximum CIPSO option size here so we are probably
* being a little wasteful, but it makes our life _much_ easier later
* on and after all we are only talking about 40 bytes. */
buf_len = CIPSO_V4_OPT_LEN_MAX;
buf = kmalloc(buf_len, GFP_ATOMIC);
if (buf == NULL) {
ret_val = -ENOMEM;
goto req_setattr_failure;
}
ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr);
if (ret_val < 0)
goto req_setattr_failure;
buf_len = ret_val;
/* We can't use ip_options_get() directly because it makes a call to
* ip_options_get_alloc() which allocates memory with GFP_KERNEL and
* we won't always have CAP_NET_RAW even though we _always_ want to
* set the IPOPT_CIPSO option. */
opt_len = (buf_len + 3) & ~3;
opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
if (opt == NULL) {
ret_val = -ENOMEM;
goto req_setattr_failure;
}
memcpy(opt->__data, buf, buf_len);
opt->optlen = opt_len;
opt->cipso = sizeof(struct iphdr);
kfree(buf);
buf = NULL;
req_inet = inet_rsk(req);
opt = xchg(&req_inet->opt, opt);
kfree(opt);
return 0;
req_setattr_failure:
kfree(buf);
kfree(opt);
return ret_val;
}
Commit Message: inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-362 | int cipso_v4_req_setattr(struct request_sock *req,
const struct cipso_v4_doi *doi_def,
const struct netlbl_lsm_secattr *secattr)
{
int ret_val = -EPERM;
unsigned char *buf = NULL;
u32 buf_len;
u32 opt_len;
struct ip_options_rcu *opt = NULL;
struct inet_request_sock *req_inet;
/* We allocate the maximum CIPSO option size here so we are probably
* being a little wasteful, but it makes our life _much_ easier later
* on and after all we are only talking about 40 bytes. */
buf_len = CIPSO_V4_OPT_LEN_MAX;
buf = kmalloc(buf_len, GFP_ATOMIC);
if (buf == NULL) {
ret_val = -ENOMEM;
goto req_setattr_failure;
}
ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr);
if (ret_val < 0)
goto req_setattr_failure;
buf_len = ret_val;
/* We can't use ip_options_get() directly because it makes a call to
* ip_options_get_alloc() which allocates memory with GFP_KERNEL and
* we won't always have CAP_NET_RAW even though we _always_ want to
* set the IPOPT_CIPSO option. */
opt_len = (buf_len + 3) & ~3;
opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
if (opt == NULL) {
ret_val = -ENOMEM;
goto req_setattr_failure;
}
memcpy(opt->opt.__data, buf, buf_len);
opt->opt.optlen = opt_len;
opt->opt.cipso = sizeof(struct iphdr);
kfree(buf);
buf = NULL;
req_inet = inet_rsk(req);
opt = xchg(&req_inet->opt, opt);
if (opt)
call_rcu(&opt->rcu, opt_kfree_rcu);
return 0;
req_setattr_failure:
kfree(buf);
kfree(opt);
return ret_val;
}
| 165,548 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void ReportPreconnectAccuracy(
const PreconnectStats& stats,
const std::map<GURL, OriginRequestSummary>& requests) {
if (stats.requests_stats.empty())
return;
int preresolve_hits_count = 0;
int preresolve_misses_count = 0;
int preconnect_hits_count = 0;
int preconnect_misses_count = 0;
for (const auto& request_stats : stats.requests_stats) {
bool hit = requests.find(request_stats.origin) != requests.end();
bool preconnect = request_stats.was_preconnected;
preresolve_hits_count += hit;
preresolve_misses_count += !hit;
preconnect_hits_count += preconnect && hit;
preconnect_misses_count += preconnect && !hit;
}
int total_preresolves = preresolve_hits_count + preresolve_misses_count;
int total_preconnects = preconnect_hits_count + preconnect_misses_count;
DCHECK_EQ(static_cast<int>(stats.requests_stats.size()),
preresolve_hits_count + preresolve_misses_count);
DCHECK_GT(total_preresolves, 0);
size_t preresolve_hits_percentage =
(100 * preresolve_hits_count) / total_preresolves;
if (total_preconnects > 0) {
size_t preconnect_hits_percentage =
(100 * preconnect_hits_count) / total_preconnects;
UMA_HISTOGRAM_PERCENTAGE(
internal::kLoadingPredictorPreconnectHitsPercentage,
preconnect_hits_percentage);
}
UMA_HISTOGRAM_PERCENTAGE(internal::kLoadingPredictorPreresolveHitsPercentage,
preresolve_hits_percentage);
UMA_HISTOGRAM_COUNTS_100(internal::kLoadingPredictorPreresolveCount,
total_preresolves);
UMA_HISTOGRAM_COUNTS_100(internal::kLoadingPredictorPreconnectCount,
total_preconnects);
}
Commit Message: Origins should be represented as url::Origin (not as GURL).
As pointed out in //docs/security/origin-vs-url.md, origins should be
represented as url::Origin (not as GURL). This CL applies this
guideline to predictor-related code and changes the type of the
following fields from GURL to url::Origin:
- OriginRequestSummary::origin
- PreconnectedRequestStats::origin
- PreconnectRequest::origin
The old code did not depend on any non-origin parts of GURL
(like path and/or query). Therefore, this CL has no intended
behavior change.
Bug: 973885
Change-Id: Idd14590b4834cb9d50c74ed747b595fe1a4ba357
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1895167
Commit-Queue: Łukasz Anforowicz <[email protected]>
Reviewed-by: Alex Ilin <[email protected]>
Cr-Commit-Position: refs/heads/master@{#716311}
CWE ID: CWE-125 | void ReportPreconnectAccuracy(
const PreconnectStats& stats,
const std::map<url::Origin, OriginRequestSummary>& requests) {
if (stats.requests_stats.empty())
return;
int preresolve_hits_count = 0;
int preresolve_misses_count = 0;
int preconnect_hits_count = 0;
int preconnect_misses_count = 0;
for (const auto& request_stats : stats.requests_stats) {
bool hit = requests.find(request_stats.origin) != requests.end();
bool preconnect = request_stats.was_preconnected;
preresolve_hits_count += hit;
preresolve_misses_count += !hit;
preconnect_hits_count += preconnect && hit;
preconnect_misses_count += preconnect && !hit;
}
int total_preresolves = preresolve_hits_count + preresolve_misses_count;
int total_preconnects = preconnect_hits_count + preconnect_misses_count;
DCHECK_EQ(static_cast<int>(stats.requests_stats.size()),
preresolve_hits_count + preresolve_misses_count);
DCHECK_GT(total_preresolves, 0);
size_t preresolve_hits_percentage =
(100 * preresolve_hits_count) / total_preresolves;
if (total_preconnects > 0) {
size_t preconnect_hits_percentage =
(100 * preconnect_hits_count) / total_preconnects;
UMA_HISTOGRAM_PERCENTAGE(
internal::kLoadingPredictorPreconnectHitsPercentage,
preconnect_hits_percentage);
}
UMA_HISTOGRAM_PERCENTAGE(internal::kLoadingPredictorPreresolveHitsPercentage,
preresolve_hits_percentage);
UMA_HISTOGRAM_COUNTS_100(internal::kLoadingPredictorPreresolveCount,
total_preresolves);
UMA_HISTOGRAM_COUNTS_100(internal::kLoadingPredictorPreconnectCount,
total_preconnects);
}
| 172,372 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: buffer_add_range(int fd, struct evbuffer *evb, struct range *range)
{
char buf[BUFSIZ];
size_t n, range_sz;
ssize_t nread;
if (lseek(fd, range->start, SEEK_SET) == -1)
return (0);
range_sz = range->end - range->start + 1;
while (range_sz) {
n = MINIMUM(range_sz, sizeof(buf));
if ((nread = read(fd, buf, n)) == -1)
return (0);
evbuffer_add(evb, buf, nread);
range_sz -= nread;
}
return (1);
}
Commit Message: Reimplement httpd's support for byte ranges.
The previous implementation loaded all the output into a single output
buffer and used its size to determine the Content-Length of the body.
The new implementation calculates the body length first and writes the
individual ranges in an async way using the bufferevent mechanism.
This prevents httpd from using too much memory and applies the
watermark and throttling mechanisms to range requests.
Problem reported by Pierre Kim (pierre.kim.sec at gmail.com)
OK benno@ sunil@
CWE ID: CWE-770 | buffer_add_range(int fd, struct evbuffer *evb, struct range *range)
| 168,375 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void StorageHandler::ClearDataForOrigin(
const std::string& origin,
const std::string& storage_types,
std::unique_ptr<ClearDataForOriginCallback> callback) {
if (!process_)
return callback->sendFailure(Response::InternalError());
StoragePartition* partition = process_->GetStoragePartition();
std::vector<std::string> types = base::SplitString(
storage_types, ",", base::TRIM_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
std::unordered_set<std::string> set(types.begin(), types.end());
uint32_t remove_mask = 0;
if (set.count(Storage::StorageTypeEnum::Appcache))
remove_mask |= StoragePartition::REMOVE_DATA_MASK_APPCACHE;
if (set.count(Storage::StorageTypeEnum::Cookies))
remove_mask |= StoragePartition::REMOVE_DATA_MASK_COOKIES;
if (set.count(Storage::StorageTypeEnum::File_systems))
remove_mask |= StoragePartition::REMOVE_DATA_MASK_FILE_SYSTEMS;
if (set.count(Storage::StorageTypeEnum::Indexeddb))
remove_mask |= StoragePartition::REMOVE_DATA_MASK_INDEXEDDB;
if (set.count(Storage::StorageTypeEnum::Local_storage))
remove_mask |= StoragePartition::REMOVE_DATA_MASK_LOCAL_STORAGE;
if (set.count(Storage::StorageTypeEnum::Shader_cache))
remove_mask |= StoragePartition::REMOVE_DATA_MASK_SHADER_CACHE;
if (set.count(Storage::StorageTypeEnum::Websql))
remove_mask |= StoragePartition::REMOVE_DATA_MASK_WEBSQL;
if (set.count(Storage::StorageTypeEnum::Service_workers))
remove_mask |= StoragePartition::REMOVE_DATA_MASK_SERVICE_WORKERS;
if (set.count(Storage::StorageTypeEnum::Cache_storage))
remove_mask |= StoragePartition::REMOVE_DATA_MASK_CACHE_STORAGE;
if (set.count(Storage::StorageTypeEnum::All))
remove_mask |= StoragePartition::REMOVE_DATA_MASK_ALL;
if (!remove_mask) {
return callback->sendFailure(
Response::InvalidParams("No valid storage type specified"));
}
partition->ClearData(remove_mask,
StoragePartition::QUOTA_MANAGED_STORAGE_MASK_ALL,
GURL(origin), StoragePartition::OriginMatcherFunction(),
base::Time(), base::Time::Max(),
base::BindOnce(&ClearDataForOriginCallback::sendSuccess,
std::move(callback)));
}
Commit Message: DevTools: speculative fix for crash in NetworkHandler::Disable
This keeps BrowserContext* and StoragePartition* instead of
RenderProcessHost* in an attemp to resolve UAF of RenderProcessHost
upon closure of DevTools front-end.
Bug: 801117, 783067, 780694
Change-Id: I6c2cca60cc0c29f0949d189cf918769059f80c1b
Reviewed-on: https://chromium-review.googlesource.com/876657
Commit-Queue: Andrey Kosyakov <[email protected]>
Reviewed-by: Dmitry Gozman <[email protected]>
Cr-Commit-Position: refs/heads/master@{#531157}
CWE ID: CWE-20 | void StorageHandler::ClearDataForOrigin(
const std::string& origin,
const std::string& storage_types,
std::unique_ptr<ClearDataForOriginCallback> callback) {
if (!storage_partition_)
return callback->sendFailure(Response::InternalError());
std::vector<std::string> types = base::SplitString(
storage_types, ",", base::TRIM_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
std::unordered_set<std::string> set(types.begin(), types.end());
uint32_t remove_mask = 0;
if (set.count(Storage::StorageTypeEnum::Appcache))
remove_mask |= StoragePartition::REMOVE_DATA_MASK_APPCACHE;
if (set.count(Storage::StorageTypeEnum::Cookies))
remove_mask |= StoragePartition::REMOVE_DATA_MASK_COOKIES;
if (set.count(Storage::StorageTypeEnum::File_systems))
remove_mask |= StoragePartition::REMOVE_DATA_MASK_FILE_SYSTEMS;
if (set.count(Storage::StorageTypeEnum::Indexeddb))
remove_mask |= StoragePartition::REMOVE_DATA_MASK_INDEXEDDB;
if (set.count(Storage::StorageTypeEnum::Local_storage))
remove_mask |= StoragePartition::REMOVE_DATA_MASK_LOCAL_STORAGE;
if (set.count(Storage::StorageTypeEnum::Shader_cache))
remove_mask |= StoragePartition::REMOVE_DATA_MASK_SHADER_CACHE;
if (set.count(Storage::StorageTypeEnum::Websql))
remove_mask |= StoragePartition::REMOVE_DATA_MASK_WEBSQL;
if (set.count(Storage::StorageTypeEnum::Service_workers))
remove_mask |= StoragePartition::REMOVE_DATA_MASK_SERVICE_WORKERS;
if (set.count(Storage::StorageTypeEnum::Cache_storage))
remove_mask |= StoragePartition::REMOVE_DATA_MASK_CACHE_STORAGE;
if (set.count(Storage::StorageTypeEnum::All))
remove_mask |= StoragePartition::REMOVE_DATA_MASK_ALL;
if (!remove_mask) {
return callback->sendFailure(
Response::InvalidParams("No valid storage type specified"));
}
storage_partition_->ClearData(
remove_mask, StoragePartition::QUOTA_MANAGED_STORAGE_MASK_ALL,
GURL(origin), StoragePartition::OriginMatcherFunction(), base::Time(),
base::Time::Max(),
base::BindOnce(&ClearDataForOriginCallback::sendSuccess,
std::move(callback)));
}
| 172,770 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void BrowserPpapiHostImpl::AddInstance(
PP_Instance instance,
const PepperRendererInstanceData& renderer_instance_data) {
DCHECK(instance_map_.find(instance) == instance_map_.end());
instance_map_[instance] =
base::MakeUnique<InstanceData>(renderer_instance_data);
}
Commit Message: Validate in-process plugin instance messages.
Bug: 733548, 733549
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_site_isolation
Change-Id: Ie5572c7bcafa05399b09c44425ddd5ce9b9e4cba
Reviewed-on: https://chromium-review.googlesource.com/538908
Commit-Queue: Bill Budge <[email protected]>
Reviewed-by: Raymes Khoury <[email protected]>
Cr-Commit-Position: refs/heads/master@{#480696}
CWE ID: CWE-20 | void BrowserPpapiHostImpl::AddInstance(
PP_Instance instance,
const PepperRendererInstanceData& renderer_instance_data) {
// NOTE: 'instance' may be coming from a compromised renderer process. We
// take care here to make sure an attacker can't overwrite data for an
// existing plugin instance.
// See http://crbug.com/733548.
if (instance_map_.find(instance) == instance_map_.end()) {
instance_map_[instance] =
base::MakeUnique<InstanceData>(renderer_instance_data);
} else {
NOTREACHED();
}
}
| 172,309 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
switch (hdev->product) {
case USB_DEVICE_ID_KYE_ERGO_525V:
/* the fixups that need to be done:
* - change led usage page to button for extra buttons
* - report size 8 count 1 must be size 1 count 8 for button
* bitfield
* - change the button usage range to 4-7 for the extra
* buttons
*/
if (*rsize >= 74 &&
rdesc[61] == 0x05 && rdesc[62] == 0x08 &&
rdesc[63] == 0x19 && rdesc[64] == 0x08 &&
rdesc[65] == 0x29 && rdesc[66] == 0x0f &&
rdesc[71] == 0x75 && rdesc[72] == 0x08 &&
rdesc[73] == 0x95 && rdesc[74] == 0x01) {
hid_info(hdev,
"fixing up Kye/Genius Ergo Mouse "
"report descriptor\n");
rdesc[62] = 0x09;
rdesc[64] = 0x04;
rdesc[66] = 0x07;
rdesc[72] = 0x01;
rdesc[74] = 0x08;
}
break;
case USB_DEVICE_ID_KYE_EASYPEN_I405X:
if (*rsize == EASYPEN_I405X_RDESC_ORIG_SIZE) {
rdesc = easypen_i405x_rdesc_fixed;
*rsize = sizeof(easypen_i405x_rdesc_fixed);
}
break;
case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) {
rdesc = mousepen_i608x_rdesc_fixed;
*rsize = sizeof(mousepen_i608x_rdesc_fixed);
}
break;
case USB_DEVICE_ID_KYE_EASYPEN_M610X:
if (*rsize == EASYPEN_M610X_RDESC_ORIG_SIZE) {
rdesc = easypen_m610x_rdesc_fixed;
*rsize = sizeof(easypen_m610x_rdesc_fixed);
}
break;
case USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE:
rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
"Genius Gila Gaming Mouse");
break;
case USB_DEVICE_ID_GENIUS_GX_IMPERATOR:
rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83,
"Genius Gx Imperator Keyboard");
break;
case USB_DEVICE_ID_GENIUS_MANTICORE:
rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
"Genius Manticore Keyboard");
break;
}
return rdesc;
}
Commit Message: HID: fix a couple of off-by-ones
There are a few very theoretical off-by-one bugs in report descriptor size
checking when performing a pre-parsing fixup. Fix those.
Cc: [email protected]
Reported-by: Ben Hawkes <[email protected]>
Reviewed-by: Benjamin Tissoires <[email protected]>
Signed-off-by: Jiri Kosina <[email protected]>
CWE ID: CWE-119 | static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
switch (hdev->product) {
case USB_DEVICE_ID_KYE_ERGO_525V:
/* the fixups that need to be done:
* - change led usage page to button for extra buttons
* - report size 8 count 1 must be size 1 count 8 for button
* bitfield
* - change the button usage range to 4-7 for the extra
* buttons
*/
if (*rsize >= 75 &&
rdesc[61] == 0x05 && rdesc[62] == 0x08 &&
rdesc[63] == 0x19 && rdesc[64] == 0x08 &&
rdesc[65] == 0x29 && rdesc[66] == 0x0f &&
rdesc[71] == 0x75 && rdesc[72] == 0x08 &&
rdesc[73] == 0x95 && rdesc[74] == 0x01) {
hid_info(hdev,
"fixing up Kye/Genius Ergo Mouse "
"report descriptor\n");
rdesc[62] = 0x09;
rdesc[64] = 0x04;
rdesc[66] = 0x07;
rdesc[72] = 0x01;
rdesc[74] = 0x08;
}
break;
case USB_DEVICE_ID_KYE_EASYPEN_I405X:
if (*rsize == EASYPEN_I405X_RDESC_ORIG_SIZE) {
rdesc = easypen_i405x_rdesc_fixed;
*rsize = sizeof(easypen_i405x_rdesc_fixed);
}
break;
case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) {
rdesc = mousepen_i608x_rdesc_fixed;
*rsize = sizeof(mousepen_i608x_rdesc_fixed);
}
break;
case USB_DEVICE_ID_KYE_EASYPEN_M610X:
if (*rsize == EASYPEN_M610X_RDESC_ORIG_SIZE) {
rdesc = easypen_m610x_rdesc_fixed;
*rsize = sizeof(easypen_m610x_rdesc_fixed);
}
break;
case USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE:
rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
"Genius Gila Gaming Mouse");
break;
case USB_DEVICE_ID_GENIUS_GX_IMPERATOR:
rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83,
"Genius Gx Imperator Keyboard");
break;
case USB_DEVICE_ID_GENIUS_MANTICORE:
rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
"Genius Manticore Keyboard");
break;
}
return rdesc;
}
| 166,371 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void DOMHandler::SetRenderer(RenderProcessHost* process_host,
RenderFrameHostImpl* frame_host) {
host_ = frame_host;
}
Commit Message: DevTools: speculative fix for crash in NetworkHandler::Disable
This keeps BrowserContext* and StoragePartition* instead of
RenderProcessHost* in an attemp to resolve UAF of RenderProcessHost
upon closure of DevTools front-end.
Bug: 801117, 783067, 780694
Change-Id: I6c2cca60cc0c29f0949d189cf918769059f80c1b
Reviewed-on: https://chromium-review.googlesource.com/876657
Commit-Queue: Andrey Kosyakov <[email protected]>
Reviewed-by: Dmitry Gozman <[email protected]>
Cr-Commit-Position: refs/heads/master@{#531157}
CWE ID: CWE-20 | void DOMHandler::SetRenderer(RenderProcessHost* process_host,
void DOMHandler::SetRenderer(int process_host_id,
RenderFrameHostImpl* frame_host) {
host_ = frame_host;
}
| 172,745 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: status_t SoundTriggerHwService::Module::startRecognition(sound_model_handle_t handle,
const sp<IMemory>& dataMemory)
{
ALOGV("startRecognition() model handle %d", handle);
if (!captureHotwordAllowed()) {
return PERMISSION_DENIED;
}
if (dataMemory != 0 && dataMemory->pointer() == NULL) {
ALOGE("startRecognition() dataMemory is non-0 but has NULL pointer()");
return BAD_VALUE;
}
AutoMutex lock(mLock);
if (mServiceState == SOUND_TRIGGER_STATE_DISABLED) {
return INVALID_OPERATION;
}
sp<Model> model = getModel(handle);
if (model == 0) {
return BAD_VALUE;
}
if ((dataMemory == 0) ||
(dataMemory->size() < sizeof(struct sound_trigger_recognition_config))) {
return BAD_VALUE;
}
if (model->mState == Model::STATE_ACTIVE) {
return INVALID_OPERATION;
}
struct sound_trigger_recognition_config *config =
(struct sound_trigger_recognition_config *)dataMemory->pointer();
config->capture_handle = model->mCaptureIOHandle;
config->capture_device = model->mCaptureDevice;
status_t status = mHwDevice->start_recognition(mHwDevice, handle, config,
SoundTriggerHwService::recognitionCallback,
this);
if (status == NO_ERROR) {
model->mState = Model::STATE_ACTIVE;
model->mConfig = *config;
}
return status;
}
Commit Message: soundtrigger: add size check on sound model and recogntion data
Bug: 30148546
Change-Id: I082f535a853c96571887eeea37c6d41ecee7d8c0
(cherry picked from commit bb00d8f139ff51336ab3c810d35685003949bcf8)
(cherry picked from commit ef0c91518446e65533ca8bab6726a845f27c73fd)
CWE ID: CWE-264 | status_t SoundTriggerHwService::Module::startRecognition(sound_model_handle_t handle,
const sp<IMemory>& dataMemory)
{
ALOGV("startRecognition() model handle %d", handle);
if (!captureHotwordAllowed()) {
return PERMISSION_DENIED;
}
if (dataMemory == 0 || dataMemory->pointer() == NULL) {
ALOGE("startRecognition() dataMemory is 0 or has NULL pointer()");
return BAD_VALUE;
}
struct sound_trigger_recognition_config *config =
(struct sound_trigger_recognition_config *)dataMemory->pointer();
if (config->data_offset < sizeof(struct sound_trigger_recognition_config) ||
config->data_size > (UINT_MAX - config->data_offset) ||
dataMemory->size() < config->data_offset ||
config->data_size > (dataMemory->size() - config->data_offset)) {
ALOGE("startRecognition() data_size is too big");
return BAD_VALUE;
}
AutoMutex lock(mLock);
if (mServiceState == SOUND_TRIGGER_STATE_DISABLED) {
return INVALID_OPERATION;
}
sp<Model> model = getModel(handle);
if (model == 0) {
return BAD_VALUE;
}
if (model->mState == Model::STATE_ACTIVE) {
return INVALID_OPERATION;
}
config->capture_handle = model->mCaptureIOHandle;
config->capture_device = model->mCaptureDevice;
status_t status = mHwDevice->start_recognition(mHwDevice, handle, config,
SoundTriggerHwService::recognitionCallback,
this);
if (status == NO_ERROR) {
model->mState = Model::STATE_ACTIVE;
model->mConfig = *config;
}
return status;
}
| 173,400 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: valid_length(uint8_t option, int dl, int *type)
{
const struct dhcp_opt *opt;
ssize_t sz;
if (dl == 0)
return -1;
for (opt = dhcp_opts; opt->option; opt++) {
if (opt->option != option)
continue;
if (type)
*type = opt->type;
if (opt->type == 0 ||
opt->type & (STRING | RFC3442 | RFC5969))
return 0;
sz = 0;
if (opt->type & (UINT32 | IPV4))
sz = sizeof(uint32_t);
if (opt->type & UINT16)
sz = sizeof(uint16_t);
if (opt->type & UINT8)
sz = sizeof(uint8_t);
if (opt->type & (IPV4 | ARRAY))
return dl % sz;
return (dl == sz ? 0 : -1);
}
/* unknown option, so let it pass */
return 0;
}
Commit Message: Improve length checks in DHCP Options parsing of dhcpcd.
Bug: 26461634
Change-Id: Ic4c2eb381a6819e181afc8ab13891f3fc58b7deb
CWE ID: CWE-119 | valid_length(uint8_t option, int dl, int *type)
{
const struct dhcp_opt *opt;
ssize_t sz;
if (dl == 0)
return -1;
for (opt = dhcp_opts; opt->option; opt++) {
if (opt->option != option)
continue;
if (type)
*type = opt->type;
/* The size of RFC3442 and RFC5969 options is checked at a later
* stage in the code */
if (opt->type == 0 ||
opt->type & (STRING | RFC3442 | RFC5969))
return 0;
/* The code does not use SINT16 / SINT32 together with ARRAY.
* It is however far easier to reason about the code if all
* possible array elements are included, and also does not code
* any additional CPU resources. sizeof(uintXX_t) ==
* sizeof(intXX_t) can be assumed. */
sz = 0;
if (opt->type & (UINT32 | SINT32 | IPV4))
sz = sizeof(uint32_t);
else if (opt->type & (UINT16 | SINT16))
sz = sizeof(uint16_t);
else if (opt->type & UINT8)
sz = sizeof(uint8_t);
if (opt->type & ARRAY) {
/* The result of modulo zero is undefined. There are no
* options defined in this file that do not match one of
* the if-clauses above, so the following is not really
* necessary. However, to avoid confusion and unexpected
* behavior if the defined options are ever extended,
* returning false here seems sensible. */
if (!sz) return -1;
return (dl % sz == 0) ? 0 : -1;
}
return (sz == dl) ? 0 : -1;
}
/* unknown option, so let it pass */
return 0;
}
| 173,900 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void DocumentLoader::DetachFromFrame() {
DCHECK(frame_);
fetcher_->StopFetching();
if (frame_ && !SentDidFinishLoad())
LoadFailed(ResourceError::CancelledError(Url()));
fetcher_->ClearContext();
if (!frame_)
return;
application_cache_host_->DetachFromDocumentLoader();
application_cache_host_.Clear();
service_worker_network_provider_ = nullptr;
WeakIdentifierMap<DocumentLoader>::NotifyObjectDestroyed(this);
ClearResource();
frame_ = nullptr;
}
Commit Message: Fix detach with open()ed document leaving parent loading indefinitely
Change-Id: I26c2a054b9f1e5eb076acd677e1223058825f6d6
Bug: 803416
Test: fast/loader/document-open-iframe-then-detach.html
Change-Id: I26c2a054b9f1e5eb076acd677e1223058825f6d6
Reviewed-on: https://chromium-review.googlesource.com/887298
Reviewed-by: Mike West <[email protected]>
Commit-Queue: Nate Chapin <[email protected]>
Cr-Commit-Position: refs/heads/master@{#532967}
CWE ID: CWE-362 | void DocumentLoader::DetachFromFrame() {
void DocumentLoader::StopLoading() {
fetcher_->StopFetching();
if (frame_ && !SentDidFinishLoad())
LoadFailed(ResourceError::CancelledError(Url()));
}
void DocumentLoader::DetachFromFrame() {
DCHECK(frame_);
StopLoading();
fetcher_->ClearContext();
if (!frame_)
return;
application_cache_host_->DetachFromDocumentLoader();
application_cache_host_.Clear();
service_worker_network_provider_ = nullptr;
WeakIdentifierMap<DocumentLoader>::NotifyObjectDestroyed(this);
ClearResource();
frame_ = nullptr;
}
| 171,851 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void UnloadController::TabReplacedAt(TabStripModel* tab_strip_model,
TabContents* old_contents,
TabContents* new_contents,
int index) {
TabDetachedImpl(old_contents);
TabAttachedImpl(new_contents->web_contents());
}
Commit Message: Remove TabContents from TabStripModelObserver::TabDetachedAt.
BUG=107201
TEST=no visible change
Review URL: https://chromiumcodereview.appspot.com/11293205
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@167122 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-20 | void UnloadController::TabReplacedAt(TabStripModel* tab_strip_model,
TabContents* old_contents,
TabContents* new_contents,
int index) {
TabDetachedImpl(old_contents->web_contents());
TabAttachedImpl(new_contents->web_contents());
}
| 171,521 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void registerBlobURLTask(void* context)
{
OwnPtr<BlobRegistryContext> blobRegistryContext = adoptPtr(static_cast<BlobRegistryContext*>(context));
blobRegistry().registerBlobURL(blobRegistryContext->url, blobRegistryContext->blobData.release());
}
Commit Message: Remove BlobRegistry indirection since there is only one implementation.
BUG=
Review URL: https://chromiumcodereview.appspot.com/15851008
git-svn-id: svn://svn.chromium.org/blink/trunk@152746 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: | static void registerBlobURLTask(void* context)
{
OwnPtr<BlobRegistryContext> blobRegistryContext = adoptPtr(static_cast<BlobRegistryContext*>(context));
if (WebBlobRegistry* registry = blobRegistry()) {
WebBlobData webBlobData(blobRegistryContext->blobData.release());
registry->registerBlobURL(blobRegistryContext->url, webBlobData);
}
}
| 170,687 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: PHP_METHOD(Phar, isValidPharFilename)
{
char *fname;
const char *ext_str;
size_t fname_len;
int ext_len, is_executable;
zend_bool executable = 1;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s|b", &fname, &fname_len, &executable) == FAILURE) {
return;
}
is_executable = executable;
RETVAL_BOOL(phar_detect_phar_fname_ext(fname, fname_len, &ext_str, &ext_len, is_executable, 2, 1) == SUCCESS);
}
Commit Message:
CWE ID: CWE-20 | PHP_METHOD(Phar, isValidPharFilename)
{
char *fname;
const char *ext_str;
size_t fname_len;
int ext_len, is_executable;
zend_bool executable = 1;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "p|b", &fname, &fname_len, &executable) == FAILURE) {
return;
}
is_executable = executable;
RETVAL_BOOL(phar_detect_phar_fname_ext(fname, fname_len, &ext_str, &ext_len, is_executable, 2, 1) == SUCCESS);
}
| 165,059 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: Read(cc::mojom::CompositorFrameMetadataDataView data,
cc::CompositorFrameMetadata* out) {
out->device_scale_factor = data.device_scale_factor();
if (!data.ReadRootScrollOffset(&out->root_scroll_offset))
return false;
out->page_scale_factor = data.page_scale_factor();
if (!data.ReadScrollableViewportSize(&out->scrollable_viewport_size) ||
!data.ReadRootLayerSize(&out->root_layer_size)) {
return false;
}
out->min_page_scale_factor = data.min_page_scale_factor();
out->max_page_scale_factor = data.max_page_scale_factor();
out->root_overflow_x_hidden = data.root_overflow_x_hidden();
out->root_overflow_y_hidden = data.root_overflow_y_hidden();
out->may_contain_video = data.may_contain_video();
out->is_resourceless_software_draw_with_scroll_or_animation =
data.is_resourceless_software_draw_with_scroll_or_animation();
out->top_controls_height = data.top_controls_height();
out->top_controls_shown_ratio = data.top_controls_shown_ratio();
out->bottom_controls_height = data.bottom_controls_height();
out->bottom_controls_shown_ratio = data.bottom_controls_shown_ratio();
out->root_background_color = data.root_background_color();
out->can_activate_before_dependencies =
data.can_activate_before_dependencies();
return data.ReadSelection(&out->selection) &&
data.ReadLatencyInfo(&out->latency_info) &&
data.ReadReferencedSurfaces(&out->referenced_surfaces);
}
Commit Message: (Reland) Discard compositor frames from unloaded web content
This is a reland of https://codereview.chromium.org/2707243005/ with a
small change to fix an uninitialized memory error that fails on MSAN
bots.
BUG=672847
[email protected], [email protected]
CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_site_isolation
Review-Url: https://codereview.chromium.org/2731283003
Cr-Commit-Position: refs/heads/master@{#454954}
CWE ID: CWE-362 | Read(cc::mojom::CompositorFrameMetadataDataView data,
cc::CompositorFrameMetadata* out) {
out->device_scale_factor = data.device_scale_factor();
if (!data.ReadRootScrollOffset(&out->root_scroll_offset))
return false;
out->page_scale_factor = data.page_scale_factor();
if (!data.ReadScrollableViewportSize(&out->scrollable_viewport_size) ||
!data.ReadRootLayerSize(&out->root_layer_size)) {
return false;
}
out->min_page_scale_factor = data.min_page_scale_factor();
out->max_page_scale_factor = data.max_page_scale_factor();
out->root_overflow_x_hidden = data.root_overflow_x_hidden();
out->root_overflow_y_hidden = data.root_overflow_y_hidden();
out->may_contain_video = data.may_contain_video();
out->is_resourceless_software_draw_with_scroll_or_animation =
data.is_resourceless_software_draw_with_scroll_or_animation();
out->top_controls_height = data.top_controls_height();
out->top_controls_shown_ratio = data.top_controls_shown_ratio();
out->bottom_controls_height = data.bottom_controls_height();
out->bottom_controls_shown_ratio = data.bottom_controls_shown_ratio();
out->content_source_id = data.content_source_id();
out->root_background_color = data.root_background_color();
out->can_activate_before_dependencies =
data.can_activate_before_dependencies();
return data.ReadSelection(&out->selection) &&
data.ReadLatencyInfo(&out->latency_info) &&
data.ReadReferencedSurfaces(&out->referenced_surfaces);
}
| 172,393 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int hns_xgmac_get_sset_count(int stringset)
{
if (stringset == ETH_SS_STATS)
return ARRAY_SIZE(g_xgmac_stats_string);
return 0;
}
Commit Message: net: hns: fix ethtool_get_strings overflow in hns driver
hns_get_sset_count() returns HNS_NET_STATS_CNT and the data space allocated
is not enough for ethtool_get_strings(), which will cause random memory
corruption.
When SLAB and DEBUG_SLAB are both enabled, memory corruptions like the
the following can be observed without this patch:
[ 43.115200] Slab corruption (Not tainted): Acpi-ParseExt start=ffff801fb0b69030, len=80
[ 43.115206] Redzone: 0x9f911029d006462/0x5f78745f31657070.
[ 43.115208] Last user: [<5f7272655f746b70>](0x5f7272655f746b70)
[ 43.115214] 010: 70 70 65 31 5f 74 78 5f 70 6b 74 00 6b 6b 6b 6b ppe1_tx_pkt.kkkk
[ 43.115217] 030: 70 70 65 31 5f 74 78 5f 70 6b 74 5f 6f 6b 00 6b ppe1_tx_pkt_ok.k
[ 43.115218] Next obj: start=ffff801fb0b69098, len=80
[ 43.115220] Redzone: 0x706d655f6f666966/0x9f911029d74e35b.
[ 43.115229] Last user: [<ffff0000084b11b0>](acpi_os_release_object+0x28/0x38)
[ 43.115231] 000: 74 79 00 6b 6b 6b 6b 6b 70 70 65 31 5f 74 78 5f ty.kkkkkppe1_tx_
[ 43.115232] 010: 70 6b 74 5f 65 72 72 5f 63 73 75 6d 5f 66 61 69 pkt_err_csum_fai
Signed-off-by: Timmy Li <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-119 | static int hns_xgmac_get_sset_count(int stringset)
{
if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
return ARRAY_SIZE(g_xgmac_stats_string);
return 0;
}
| 169,401 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: mget(struct magic_set *ms, const unsigned char *s, struct magic *m,
size_t nbytes, size_t o, unsigned int cont_level, int mode, int text,
int flip, int recursion_level, int *printed_something,
int *need_separator, int *returnval)
{
uint32_t soffset, offset = ms->offset;
uint32_t count = m->str_range;
int rv, oneed_separator, in_type;
char *sbuf, *rbuf;
union VALUETYPE *p = &ms->ms_value;
struct mlist ml;
if (recursion_level >= 20) {
file_error(ms, 0, "recursion nesting exceeded");
return -1;
}
if (mcopy(ms, p, m->type, m->flag & INDIR, s, (uint32_t)(offset + o),
(uint32_t)nbytes, count) == -1)
return -1;
if ((ms->flags & MAGIC_DEBUG) != 0) {
fprintf(stderr, "mget(type=%d, flag=%x, offset=%u, o=%zu, "
"nbytes=%zu, count=%u)\n", m->type, m->flag, offset, o,
nbytes, count);
mdebug(offset, (char *)(void *)p, sizeof(union VALUETYPE));
#ifndef COMPILE_ONLY
file_mdump(m);
#endif
}
if (m->flag & INDIR) {
int off = m->in_offset;
if (m->in_op & FILE_OPINDIRECT) {
const union VALUETYPE *q = CAST(const union VALUETYPE *,
((const void *)(s + offset + off)));
switch (cvt_flip(m->in_type, flip)) {
case FILE_BYTE:
off = q->b;
break;
case FILE_SHORT:
off = q->h;
break;
case FILE_BESHORT:
off = (short)((q->hs[0]<<8)|(q->hs[1]));
break;
case FILE_LESHORT:
off = (short)((q->hs[1]<<8)|(q->hs[0]));
break;
case FILE_LONG:
off = q->l;
break;
case FILE_BELONG:
case FILE_BEID3:
off = (int32_t)((q->hl[0]<<24)|(q->hl[1]<<16)|
(q->hl[2]<<8)|(q->hl[3]));
break;
case FILE_LEID3:
case FILE_LELONG:
off = (int32_t)((q->hl[3]<<24)|(q->hl[2]<<16)|
(q->hl[1]<<8)|(q->hl[0]));
break;
case FILE_MELONG:
off = (int32_t)((q->hl[1]<<24)|(q->hl[0]<<16)|
(q->hl[3]<<8)|(q->hl[2]));
break;
}
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr, "indirect offs=%u\n", off);
}
switch (in_type = cvt_flip(m->in_type, flip)) {
case FILE_BYTE:
if (nbytes < offset || nbytes < (offset + 1))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = p->b & off;
break;
case FILE_OPOR:
offset = p->b | off;
break;
case FILE_OPXOR:
offset = p->b ^ off;
break;
case FILE_OPADD:
offset = p->b + off;
break;
case FILE_OPMINUS:
offset = p->b - off;
break;
case FILE_OPMULTIPLY:
offset = p->b * off;
break;
case FILE_OPDIVIDE:
offset = p->b / off;
break;
case FILE_OPMODULO:
offset = p->b % off;
break;
}
} else
offset = p->b;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_BESHORT:
if (nbytes < offset || nbytes < (offset + 2))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) &
off;
break;
case FILE_OPOR:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) |
off;
break;
case FILE_OPXOR:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) ^
off;
break;
case FILE_OPADD:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) +
off;
break;
case FILE_OPMINUS:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) -
off;
break;
case FILE_OPMULTIPLY:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) *
off;
break;
case FILE_OPDIVIDE:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) /
off;
break;
case FILE_OPMODULO:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) %
off;
break;
}
} else
offset = (short)((p->hs[0]<<8)|
(p->hs[1]));
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_LESHORT:
if (nbytes < offset || nbytes < (offset + 2))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) &
off;
break;
case FILE_OPOR:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) |
off;
break;
case FILE_OPXOR:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) ^
off;
break;
case FILE_OPADD:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) +
off;
break;
case FILE_OPMINUS:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) -
off;
break;
case FILE_OPMULTIPLY:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) *
off;
break;
case FILE_OPDIVIDE:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) /
off;
break;
case FILE_OPMODULO:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) %
off;
break;
}
} else
offset = (short)((p->hs[1]<<8)|
(p->hs[0]));
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_SHORT:
if (nbytes < offset || nbytes < (offset + 2))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = p->h & off;
break;
case FILE_OPOR:
offset = p->h | off;
break;
case FILE_OPXOR:
offset = p->h ^ off;
break;
case FILE_OPADD:
offset = p->h + off;
break;
case FILE_OPMINUS:
offset = p->h - off;
break;
case FILE_OPMULTIPLY:
offset = p->h * off;
break;
case FILE_OPDIVIDE:
offset = p->h / off;
break;
case FILE_OPMODULO:
offset = p->h % off;
break;
}
}
else
offset = p->h;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_BELONG:
case FILE_BEID3:
if (nbytes < offset || nbytes < (offset + 4))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) &
off;
break;
case FILE_OPOR:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) |
off;
break;
case FILE_OPXOR:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) ^
off;
break;
case FILE_OPADD:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) +
off;
break;
case FILE_OPMINUS:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) -
off;
break;
case FILE_OPMULTIPLY:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) *
off;
break;
case FILE_OPDIVIDE:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) /
off;
break;
case FILE_OPMODULO:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) %
off;
break;
}
} else
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3]));
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_LELONG:
case FILE_LEID3:
if (nbytes < offset || nbytes < (offset + 4))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) &
off;
break;
case FILE_OPOR:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) |
off;
break;
case FILE_OPXOR:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) ^
off;
break;
case FILE_OPADD:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) +
off;
break;
case FILE_OPMINUS:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) -
off;
break;
case FILE_OPMULTIPLY:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) *
off;
break;
case FILE_OPDIVIDE:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) /
off;
break;
case FILE_OPMODULO:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) %
off;
break;
}
} else
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0]));
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_MELONG:
if (nbytes < offset || nbytes < (offset + 4))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) &
off;
break;
case FILE_OPOR:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) |
off;
break;
case FILE_OPXOR:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) ^
off;
break;
case FILE_OPADD:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) +
off;
break;
case FILE_OPMINUS:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) -
off;
break;
case FILE_OPMULTIPLY:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) *
off;
break;
case FILE_OPDIVIDE:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) /
off;
break;
case FILE_OPMODULO:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) %
off;
break;
}
} else
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2]));
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_LONG:
if (nbytes < offset || nbytes < (offset + 4))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = p->l & off;
break;
case FILE_OPOR:
offset = p->l | off;
break;
case FILE_OPXOR:
offset = p->l ^ off;
break;
case FILE_OPADD:
offset = p->l + off;
break;
case FILE_OPMINUS:
offset = p->l - off;
break;
case FILE_OPMULTIPLY:
offset = p->l * off;
break;
case FILE_OPDIVIDE:
offset = p->l / off;
break;
case FILE_OPMODULO:
offset = p->l % off;
break;
}
} else
offset = p->l;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
default:
break;
}
switch (in_type) {
case FILE_LEID3:
case FILE_BEID3:
offset = ((((offset >> 0) & 0x7f) << 0) |
(((offset >> 8) & 0x7f) << 7) |
(((offset >> 16) & 0x7f) << 14) |
(((offset >> 24) & 0x7f) << 21)) + 10;
break;
default:
break;
}
if (m->flag & INDIROFFADD) {
offset += ms->c.li[cont_level-1].off;
if (offset == 0) {
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr,
"indirect *zero* offset\n");
return 0;
}
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr, "indirect +offs=%u\n", offset);
}
if (mcopy(ms, p, m->type, 0, s, offset, nbytes, count) == -1)
return -1;
ms->offset = offset;
if ((ms->flags & MAGIC_DEBUG) != 0) {
mdebug(offset, (char *)(void *)p,
sizeof(union VALUETYPE));
#ifndef COMPILE_ONLY
file_mdump(m);
#endif
}
}
/* Verify we have enough data to match magic type */
switch (m->type) {
case FILE_BYTE:
if (nbytes < (offset + 1)) /* should alway be true */
return 0;
break;
case FILE_SHORT:
case FILE_BESHORT:
case FILE_LESHORT:
if (nbytes < (offset + 2))
return 0;
break;
case FILE_LONG:
case FILE_BELONG:
case FILE_LELONG:
case FILE_MELONG:
case FILE_DATE:
case FILE_BEDATE:
case FILE_LEDATE:
case FILE_MEDATE:
case FILE_LDATE:
case FILE_BELDATE:
case FILE_LELDATE:
case FILE_MELDATE:
case FILE_FLOAT:
case FILE_BEFLOAT:
case FILE_LEFLOAT:
if (nbytes < (offset + 4))
return 0;
break;
case FILE_DOUBLE:
case FILE_BEDOUBLE:
case FILE_LEDOUBLE:
if (nbytes < (offset + 8))
return 0;
break;
case FILE_STRING:
case FILE_PSTRING:
case FILE_SEARCH:
if (nbytes < (offset + m->vallen))
return 0;
break;
case FILE_REGEX:
if (nbytes < offset)
return 0;
break;
case FILE_INDIRECT:
if (nbytes < offset)
return 0;
sbuf = ms->o.buf;
soffset = ms->offset;
ms->o.buf = NULL;
ms->offset = 0;
rv = file_softmagic(ms, s + offset, nbytes - offset,
BINTEST, text);
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr, "indirect @offs=%u[%d]\n", offset, rv);
rbuf = ms->o.buf;
ms->o.buf = sbuf;
ms->offset = soffset;
if (rv == 1) {
if ((ms->flags & (MAGIC_MIME|MAGIC_APPLE)) == 0 &&
file_printf(ms, F(m->desc, "%u"), offset) == -1)
return -1;
if (file_printf(ms, "%s", rbuf) == -1)
return -1;
free(rbuf);
}
return rv;
case FILE_USE:
if (nbytes < offset)
return 0;
sbuf = m->value.s;
if (*sbuf == '^') {
sbuf++;
flip = !flip;
}
if (file_magicfind(ms, sbuf, &ml) == -1) {
file_error(ms, 0, "cannot find entry `%s'", sbuf);
return -1;
}
oneed_separator = *need_separator;
if (m->flag & NOSPACE)
*need_separator = 0;
rv = match(ms, ml.magic, ml.nmagic, s, nbytes, offset + o,
mode, text, flip, recursion_level, printed_something,
need_separator, returnval);
if (rv != 1)
*need_separator = oneed_separator;
return rv;
case FILE_NAME:
if (file_printf(ms, "%s", m->desc) == -1)
return -1;
return 1;
case FILE_DEFAULT: /* nothing to check */
case FILE_CLEAR:
default:
break;
}
if (!mconvert(ms, m, flip))
return 0;
return 1;
}
Commit Message: PR/313: Aaron Reffett: Check properly for exceeding the offset.
CWE ID: CWE-119 | mget(struct magic_set *ms, const unsigned char *s, struct magic *m,
size_t nbytes, size_t o, unsigned int cont_level, int mode, int text,
int flip, int recursion_level, int *printed_something,
int *need_separator, int *returnval)
{
uint32_t soffset, offset = ms->offset;
uint32_t count = m->str_range;
int rv, oneed_separator, in_type;
char *sbuf, *rbuf;
union VALUETYPE *p = &ms->ms_value;
struct mlist ml;
if (recursion_level >= 20) {
file_error(ms, 0, "recursion nesting exceeded");
return -1;
}
if (mcopy(ms, p, m->type, m->flag & INDIR, s, (uint32_t)(offset + o),
(uint32_t)nbytes, count) == -1)
return -1;
if ((ms->flags & MAGIC_DEBUG) != 0) {
fprintf(stderr, "mget(type=%d, flag=%x, offset=%u, o=%zu, "
"nbytes=%zu, count=%u)\n", m->type, m->flag, offset, o,
nbytes, count);
mdebug(offset, (char *)(void *)p, sizeof(union VALUETYPE));
#ifndef COMPILE_ONLY
file_mdump(m);
#endif
}
if (m->flag & INDIR) {
int off = m->in_offset;
if (m->in_op & FILE_OPINDIRECT) {
const union VALUETYPE *q = CAST(const union VALUETYPE *,
((const void *)(s + offset + off)));
switch (cvt_flip(m->in_type, flip)) {
case FILE_BYTE:
off = q->b;
break;
case FILE_SHORT:
off = q->h;
break;
case FILE_BESHORT:
off = (short)((q->hs[0]<<8)|(q->hs[1]));
break;
case FILE_LESHORT:
off = (short)((q->hs[1]<<8)|(q->hs[0]));
break;
case FILE_LONG:
off = q->l;
break;
case FILE_BELONG:
case FILE_BEID3:
off = (int32_t)((q->hl[0]<<24)|(q->hl[1]<<16)|
(q->hl[2]<<8)|(q->hl[3]));
break;
case FILE_LEID3:
case FILE_LELONG:
off = (int32_t)((q->hl[3]<<24)|(q->hl[2]<<16)|
(q->hl[1]<<8)|(q->hl[0]));
break;
case FILE_MELONG:
off = (int32_t)((q->hl[1]<<24)|(q->hl[0]<<16)|
(q->hl[3]<<8)|(q->hl[2]));
break;
}
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr, "indirect offs=%u\n", off);
}
switch (in_type = cvt_flip(m->in_type, flip)) {
case FILE_BYTE:
if (OFFSET_OOB(nbytes, offset, 1))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = p->b & off;
break;
case FILE_OPOR:
offset = p->b | off;
break;
case FILE_OPXOR:
offset = p->b ^ off;
break;
case FILE_OPADD:
offset = p->b + off;
break;
case FILE_OPMINUS:
offset = p->b - off;
break;
case FILE_OPMULTIPLY:
offset = p->b * off;
break;
case FILE_OPDIVIDE:
offset = p->b / off;
break;
case FILE_OPMODULO:
offset = p->b % off;
break;
}
} else
offset = p->b;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_BESHORT:
if (OFFSET_OOB(nbytes, offset, 2))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) &
off;
break;
case FILE_OPOR:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) |
off;
break;
case FILE_OPXOR:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) ^
off;
break;
case FILE_OPADD:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) +
off;
break;
case FILE_OPMINUS:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) -
off;
break;
case FILE_OPMULTIPLY:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) *
off;
break;
case FILE_OPDIVIDE:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) /
off;
break;
case FILE_OPMODULO:
offset = (short)((p->hs[0]<<8)|
(p->hs[1])) %
off;
break;
}
} else
offset = (short)((p->hs[0]<<8)|
(p->hs[1]));
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_LESHORT:
if (OFFSET_OOB(nbytes, offset, 2))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) &
off;
break;
case FILE_OPOR:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) |
off;
break;
case FILE_OPXOR:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) ^
off;
break;
case FILE_OPADD:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) +
off;
break;
case FILE_OPMINUS:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) -
off;
break;
case FILE_OPMULTIPLY:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) *
off;
break;
case FILE_OPDIVIDE:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) /
off;
break;
case FILE_OPMODULO:
offset = (short)((p->hs[1]<<8)|
(p->hs[0])) %
off;
break;
}
} else
offset = (short)((p->hs[1]<<8)|
(p->hs[0]));
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_SHORT:
if (OFFSET_OOB(nbytes, offset, 2))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = p->h & off;
break;
case FILE_OPOR:
offset = p->h | off;
break;
case FILE_OPXOR:
offset = p->h ^ off;
break;
case FILE_OPADD:
offset = p->h + off;
break;
case FILE_OPMINUS:
offset = p->h - off;
break;
case FILE_OPMULTIPLY:
offset = p->h * off;
break;
case FILE_OPDIVIDE:
offset = p->h / off;
break;
case FILE_OPMODULO:
offset = p->h % off;
break;
}
}
else
offset = p->h;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_BELONG:
case FILE_BEID3:
if (OFFSET_OOB(nbytes, offset, 4))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) &
off;
break;
case FILE_OPOR:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) |
off;
break;
case FILE_OPXOR:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) ^
off;
break;
case FILE_OPADD:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) +
off;
break;
case FILE_OPMINUS:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) -
off;
break;
case FILE_OPMULTIPLY:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) *
off;
break;
case FILE_OPDIVIDE:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) /
off;
break;
case FILE_OPMODULO:
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3])) %
off;
break;
}
} else
offset = (int32_t)((p->hl[0]<<24)|
(p->hl[1]<<16)|
(p->hl[2]<<8)|
(p->hl[3]));
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_LELONG:
case FILE_LEID3:
if (OFFSET_OOB(nbytes, offset, 4))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) &
off;
break;
case FILE_OPOR:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) |
off;
break;
case FILE_OPXOR:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) ^
off;
break;
case FILE_OPADD:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) +
off;
break;
case FILE_OPMINUS:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) -
off;
break;
case FILE_OPMULTIPLY:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) *
off;
break;
case FILE_OPDIVIDE:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) /
off;
break;
case FILE_OPMODULO:
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0])) %
off;
break;
}
} else
offset = (int32_t)((p->hl[3]<<24)|
(p->hl[2]<<16)|
(p->hl[1]<<8)|
(p->hl[0]));
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_MELONG:
if (OFFSET_OOB(nbytes, offset, 4))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) &
off;
break;
case FILE_OPOR:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) |
off;
break;
case FILE_OPXOR:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) ^
off;
break;
case FILE_OPADD:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) +
off;
break;
case FILE_OPMINUS:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) -
off;
break;
case FILE_OPMULTIPLY:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) *
off;
break;
case FILE_OPDIVIDE:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) /
off;
break;
case FILE_OPMODULO:
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2])) %
off;
break;
}
} else
offset = (int32_t)((p->hl[1]<<24)|
(p->hl[0]<<16)|
(p->hl[3]<<8)|
(p->hl[2]));
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_LONG:
if (OFFSET_OOB(nbytes, offset, 4))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = p->l & off;
break;
case FILE_OPOR:
offset = p->l | off;
break;
case FILE_OPXOR:
offset = p->l ^ off;
break;
case FILE_OPADD:
offset = p->l + off;
break;
case FILE_OPMINUS:
offset = p->l - off;
break;
case FILE_OPMULTIPLY:
offset = p->l * off;
break;
case FILE_OPDIVIDE:
offset = p->l / off;
break;
case FILE_OPMODULO:
offset = p->l % off;
break;
}
} else
offset = p->l;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
default:
break;
}
switch (in_type) {
case FILE_LEID3:
case FILE_BEID3:
offset = ((((offset >> 0) & 0x7f) << 0) |
(((offset >> 8) & 0x7f) << 7) |
(((offset >> 16) & 0x7f) << 14) |
(((offset >> 24) & 0x7f) << 21)) + 10;
break;
default:
break;
}
if (m->flag & INDIROFFADD) {
offset += ms->c.li[cont_level-1].off;
if (offset == 0) {
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr,
"indirect *zero* offset\n");
return 0;
}
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr, "indirect +offs=%u\n", offset);
}
if (mcopy(ms, p, m->type, 0, s, offset, nbytes, count) == -1)
return -1;
ms->offset = offset;
if ((ms->flags & MAGIC_DEBUG) != 0) {
mdebug(offset, (char *)(void *)p,
sizeof(union VALUETYPE));
#ifndef COMPILE_ONLY
file_mdump(m);
#endif
}
}
/* Verify we have enough data to match magic type */
switch (m->type) {
case FILE_BYTE:
if (OFFSET_OOB(nbytes, offset, 1))
return 0;
break;
case FILE_SHORT:
case FILE_BESHORT:
case FILE_LESHORT:
if (OFFSET_OOB(nbytes, offset, 2))
return 0;
break;
case FILE_LONG:
case FILE_BELONG:
case FILE_LELONG:
case FILE_MELONG:
case FILE_DATE:
case FILE_BEDATE:
case FILE_LEDATE:
case FILE_MEDATE:
case FILE_LDATE:
case FILE_BELDATE:
case FILE_LELDATE:
case FILE_MELDATE:
case FILE_FLOAT:
case FILE_BEFLOAT:
case FILE_LEFLOAT:
if (OFFSET_OOB(nbytes, offset, 4))
return 0;
break;
case FILE_DOUBLE:
case FILE_BEDOUBLE:
case FILE_LEDOUBLE:
if (OFFSET_OOB(nbytes, offset, 8))
return 0;
break;
case FILE_STRING:
case FILE_PSTRING:
case FILE_SEARCH:
if (OFFSET_OOB(nbytes, offset, m->vallen))
return 0;
break;
case FILE_REGEX:
if (OFFSET_OOB(nbytes, offset, 0))
return 0;
break;
case FILE_INDIRECT:
if (OFFSET_OOB(nbytes, offset, 0))
return 0;
sbuf = ms->o.buf;
soffset = ms->offset;
ms->o.buf = NULL;
ms->offset = 0;
rv = file_softmagic(ms, s + offset, nbytes - offset,
BINTEST, text);
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr, "indirect @offs=%u[%d]\n", offset, rv);
rbuf = ms->o.buf;
ms->o.buf = sbuf;
ms->offset = soffset;
if (rv == 1) {
if ((ms->flags & (MAGIC_MIME|MAGIC_APPLE)) == 0 &&
file_printf(ms, F(m->desc, "%u"), offset) == -1)
return -1;
if (file_printf(ms, "%s", rbuf) == -1)
return -1;
free(rbuf);
}
return rv;
case FILE_USE:
if (OFFSET_OOB(nbytes, offset, 0))
return 0;
sbuf = m->value.s;
if (*sbuf == '^') {
sbuf++;
flip = !flip;
}
if (file_magicfind(ms, sbuf, &ml) == -1) {
file_error(ms, 0, "cannot find entry `%s'", sbuf);
return -1;
}
oneed_separator = *need_separator;
if (m->flag & NOSPACE)
*need_separator = 0;
rv = match(ms, ml.magic, ml.nmagic, s, nbytes, offset + o,
mode, text, flip, recursion_level, printed_something,
need_separator, returnval);
if (rv != 1)
*need_separator = oneed_separator;
return rv;
case FILE_NAME:
if (file_printf(ms, "%s", m->desc) == -1)
return -1;
return 1;
case FILE_DEFAULT: /* nothing to check */
case FILE_CLEAR:
default:
break;
}
if (!mconvert(ms, m, flip))
return 0;
return 1;
}
| 166,423 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_hash rhash;
struct shash_alg *salg = __crypto_shash_alg(alg);
snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "shash");
rhash.blocksize = alg->cra_blocksize;
rhash.digestsize = salg->digestsize;
if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
sizeof(struct crypto_report_hash), &rhash))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
Commit Message: crypto: user - fix info leaks in report API
Three errors resulting in kernel memory disclosure:
1/ The structures used for the netlink based crypto algorithm report API
are located on the stack. As snprintf() does not fill the remainder of
the buffer with null bytes, those stack bytes will be disclosed to users
of the API. Switch to strncpy() to fix this.
2/ crypto_report_one() does not initialize all field of struct
crypto_user_alg. Fix this to fix the heap info leak.
3/ For the module name we should copy only as many bytes as
module_name() returns -- not as much as the destination buffer could
hold. But the current code does not and therefore copies random data
from behind the end of the module name, as the module name is always
shorter than CRYPTO_MAX_ALG_NAME.
Also switch to use strncpy() to copy the algorithm's name and
driver_name. They are strings, after all.
Signed-off-by: Mathias Krause <[email protected]>
Cc: Steffen Klassert <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
CWE ID: CWE-310 | static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_hash rhash;
struct shash_alg *salg = __crypto_shash_alg(alg);
strncpy(rhash.type, "shash", sizeof(rhash.type));
rhash.blocksize = alg->cra_blocksize;
rhash.digestsize = salg->digestsize;
if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
sizeof(struct crypto_report_hash), &rhash))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
| 166,072 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
target_ulong pc_start)
{
int b, prefixes;
int shift;
TCGMemOp ot, aflag, dflag;
int modrm, reg, rm, mod, op, opreg, val;
target_ulong next_eip, tval;
int rex_w, rex_r;
s->pc_start = s->pc = pc_start;
prefixes = 0;
s->override = -1;
rex_w = -1;
rex_r = 0;
#ifdef TARGET_X86_64
s->rex_x = 0;
s->rex_b = 0;
x86_64_hregs = 0;
#endif
s->rip_offset = 0; /* for relative ip address */
s->vex_l = 0;
s->vex_v = 0;
next_byte:
b = cpu_ldub_code(env, s->pc);
s->pc++;
/* Collect prefixes. */
switch (b) {
case 0xf3:
prefixes |= PREFIX_REPZ;
goto next_byte;
case 0xf2:
prefixes |= PREFIX_REPNZ;
goto next_byte;
case 0xf0:
prefixes |= PREFIX_LOCK;
goto next_byte;
case 0x2e:
s->override = R_CS;
goto next_byte;
case 0x36:
s->override = R_SS;
goto next_byte;
case 0x3e:
s->override = R_DS;
goto next_byte;
case 0x26:
s->override = R_ES;
goto next_byte;
case 0x64:
s->override = R_FS;
goto next_byte;
case 0x65:
s->override = R_GS;
goto next_byte;
case 0x66:
prefixes |= PREFIX_DATA;
goto next_byte;
case 0x67:
prefixes |= PREFIX_ADR;
goto next_byte;
#ifdef TARGET_X86_64
case 0x40 ... 0x4f:
if (CODE64(s)) {
/* REX prefix */
rex_w = (b >> 3) & 1;
rex_r = (b & 0x4) << 1;
s->rex_x = (b & 0x2) << 2;
REX_B(s) = (b & 0x1) << 3;
x86_64_hregs = 1; /* select uniform byte register addressing */
goto next_byte;
}
break;
#endif
case 0xc5: /* 2-byte VEX */
case 0xc4: /* 3-byte VEX */
/* VEX prefixes cannot be used except in 32-bit mode.
Otherwise the instruction is LES or LDS. */
if (s->code32 && !s->vm86) {
static const int pp_prefix[4] = {
0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
};
int vex3, vex2 = cpu_ldub_code(env, s->pc);
if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
/* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
otherwise the instruction is LES or LDS. */
break;
}
s->pc++;
/* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
| PREFIX_LOCK | PREFIX_DATA)) {
goto illegal_op;
}
#ifdef TARGET_X86_64
if (x86_64_hregs) {
goto illegal_op;
}
#endif
rex_r = (~vex2 >> 4) & 8;
if (b == 0xc5) {
vex3 = vex2;
b = cpu_ldub_code(env, s->pc++);
} else {
#ifdef TARGET_X86_64
s->rex_x = (~vex2 >> 3) & 8;
s->rex_b = (~vex2 >> 2) & 8;
#endif
vex3 = cpu_ldub_code(env, s->pc++);
rex_w = (vex3 >> 7) & 1;
switch (vex2 & 0x1f) {
case 0x01: /* Implied 0f leading opcode bytes. */
b = cpu_ldub_code(env, s->pc++) | 0x100;
break;
case 0x02: /* Implied 0f 38 leading opcode bytes. */
b = 0x138;
break;
case 0x03: /* Implied 0f 3a leading opcode bytes. */
b = 0x13a;
break;
default: /* Reserved for future use. */
goto unknown_op;
}
}
s->vex_v = (~vex3 >> 3) & 0xf;
s->vex_l = (vex3 >> 2) & 1;
prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
}
break;
}
/* Post-process prefixes. */
if (CODE64(s)) {
/* In 64-bit mode, the default data size is 32-bit. Select 64-bit
data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
over 0x66 if both are present. */
dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
/* In 64-bit mode, 0x67 selects 32-bit addressing. */
aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
} else {
/* In 16/32-bit mode, 0x66 selects the opposite data size. */
if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) {
dflag = MO_32;
} else {
dflag = MO_16;
}
/* In 16/32-bit mode, 0x67 selects the opposite addressing. */
if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) {
aflag = MO_32;
} else {
aflag = MO_16;
}
}
s->prefix = prefixes;
s->aflag = aflag;
s->dflag = dflag;
/* now check op code */
reswitch:
switch(b) {
case 0x0f:
/**************************/
/* extended op code */
b = cpu_ldub_code(env, s->pc++) | 0x100;
goto reswitch;
/**************************/
/* arith & logic */
case 0x00 ... 0x05:
case 0x08 ... 0x0d:
case 0x10 ... 0x15:
case 0x18 ... 0x1d:
case 0x20 ... 0x25:
case 0x28 ... 0x2d:
case 0x30 ... 0x35:
case 0x38 ... 0x3d:
{
int op, f, val;
op = (b >> 3) & 7;
f = (b >> 1) & 3;
ot = mo_b_d(b, dflag);
switch(f) {
case 0: /* OP Ev, Gv */
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
opreg = OR_TMP0;
} else if (op == OP_XORL && rm == reg) {
xor_zero:
/* xor reg, reg optimisation */
set_cc_op(s, CC_OP_CLR);
tcg_gen_movi_tl(cpu_T0, 0);
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
} else {
opreg = rm;
}
gen_op_mov_v_reg(ot, cpu_T1, reg);
gen_op(s, op, ot, opreg);
break;
case 1: /* OP Gv, Ev */
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
reg = ((modrm >> 3) & 7) | rex_r;
rm = (modrm & 7) | REX_B(s);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
} else if (op == OP_XORL && rm == reg) {
goto xor_zero;
} else {
gen_op_mov_v_reg(ot, cpu_T1, rm);
}
gen_op(s, op, ot, reg);
break;
case 2: /* OP A, Iv */
val = insn_get(env, s, ot);
tcg_gen_movi_tl(cpu_T1, val);
gen_op(s, op, ot, OR_EAX);
break;
}
}
break;
case 0x82:
if (CODE64(s))
goto illegal_op;
case 0x80: /* GRP1 */
case 0x81:
case 0x83:
{
int val;
ot = mo_b_d(b, dflag);
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
op = (modrm >> 3) & 7;
if (mod != 3) {
if (b == 0x83)
s->rip_offset = 1;
else
s->rip_offset = insn_const_size(ot);
gen_lea_modrm(env, s, modrm);
opreg = OR_TMP0;
} else {
opreg = rm;
}
switch(b) {
default:
case 0x80:
case 0x81:
case 0x82:
val = insn_get(env, s, ot);
break;
case 0x83:
val = (int8_t)insn_get(env, s, MO_8);
break;
}
tcg_gen_movi_tl(cpu_T1, val);
gen_op(s, op, ot, opreg);
}
break;
/**************************/
/* inc, dec, and other misc arith */
case 0x40 ... 0x47: /* inc Gv */
ot = dflag;
gen_inc(s, ot, OR_EAX + (b & 7), 1);
break;
case 0x48 ... 0x4f: /* dec Gv */
ot = dflag;
gen_inc(s, ot, OR_EAX + (b & 7), -1);
break;
case 0xf6: /* GRP3 */
case 0xf7:
ot = mo_b_d(b, dflag);
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
op = (modrm >> 3) & 7;
if (mod != 3) {
if (op == 0) {
s->rip_offset = insn_const_size(ot);
}
gen_lea_modrm(env, s, modrm);
/* For those below that handle locked memory, don't load here. */
if (!(s->prefix & PREFIX_LOCK)
|| op != 2) {
gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
}
} else {
gen_op_mov_v_reg(ot, cpu_T0, rm);
}
switch(op) {
case 0: /* test */
val = insn_get(env, s, ot);
tcg_gen_movi_tl(cpu_T1, val);
gen_op_testl_T0_T1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
break;
case 2: /* not */
if (s->prefix & PREFIX_LOCK) {
if (mod == 3) {
goto illegal_op;
}
tcg_gen_movi_tl(cpu_T0, ~0);
tcg_gen_atomic_xor_fetch_tl(cpu_T0, cpu_A0, cpu_T0,
s->mem_index, ot | MO_LE);
} else {
tcg_gen_not_tl(cpu_T0, cpu_T0);
if (mod != 3) {
gen_op_st_v(s, ot, cpu_T0, cpu_A0);
} else {
gen_op_mov_reg_v(ot, rm, cpu_T0);
}
}
break;
case 3: /* neg */
if (s->prefix & PREFIX_LOCK) {
TCGLabel *label1;
TCGv a0, t0, t1, t2;
if (mod == 3) {
goto illegal_op;
}
a0 = tcg_temp_local_new();
t0 = tcg_temp_local_new();
label1 = gen_new_label();
tcg_gen_mov_tl(a0, cpu_A0);
tcg_gen_mov_tl(t0, cpu_T0);
gen_set_label(label1);
t1 = tcg_temp_new();
t2 = tcg_temp_new();
tcg_gen_mov_tl(t2, t0);
tcg_gen_neg_tl(t1, t0);
tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1,
s->mem_index, ot | MO_LE);
tcg_temp_free(t1);
tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1);
tcg_temp_free(t2);
tcg_temp_free(a0);
tcg_gen_mov_tl(cpu_T0, t0);
tcg_temp_free(t0);
} else {
tcg_gen_neg_tl(cpu_T0, cpu_T0);
if (mod != 3) {
gen_op_st_v(s, ot, cpu_T0, cpu_A0);
} else {
gen_op_mov_reg_v(ot, rm, cpu_T0);
}
}
gen_op_update_neg_cc();
set_cc_op(s, CC_OP_SUBB + ot);
break;
case 4: /* mul */
switch(ot) {
case MO_8:
gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
tcg_gen_ext8u_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
tcg_gen_andi_tl(cpu_cc_src, cpu_T0, 0xff00);
set_cc_op(s, CC_OP_MULB);
break;
case MO_16:
gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
tcg_gen_ext16u_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
set_cc_op(s, CC_OP_MULW);
break;
default:
case MO_32:
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
set_cc_op(s, CC_OP_MULL);
break;
#ifdef TARGET_X86_64
case MO_64:
tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
cpu_T0, cpu_regs[R_EAX]);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
set_cc_op(s, CC_OP_MULQ);
break;
#endif
}
break;
case 5: /* imul */
switch(ot) {
case MO_8:
gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
tcg_gen_ext8s_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
tcg_gen_ext8s_tl(cpu_tmp0, cpu_T0);
tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
set_cc_op(s, CC_OP_MULB);
break;
case MO_16:
gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
set_cc_op(s, CC_OP_MULW);
break;
default:
case MO_32:
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
set_cc_op(s, CC_OP_MULL);
break;
#ifdef TARGET_X86_64
case MO_64:
tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
cpu_T0, cpu_regs[R_EAX]);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
set_cc_op(s, CC_OP_MULQ);
break;
#endif
}
break;
case 6: /* div */
switch(ot) {
case MO_8:
gen_helper_divb_AL(cpu_env, cpu_T0);
break;
case MO_16:
gen_helper_divw_AX(cpu_env, cpu_T0);
break;
default:
case MO_32:
gen_helper_divl_EAX(cpu_env, cpu_T0);
break;
#ifdef TARGET_X86_64
case MO_64:
gen_helper_divq_EAX(cpu_env, cpu_T0);
break;
#endif
}
break;
case 7: /* idiv */
switch(ot) {
case MO_8:
gen_helper_idivb_AL(cpu_env, cpu_T0);
break;
case MO_16:
gen_helper_idivw_AX(cpu_env, cpu_T0);
break;
default:
case MO_32:
gen_helper_idivl_EAX(cpu_env, cpu_T0);
break;
#ifdef TARGET_X86_64
case MO_64:
gen_helper_idivq_EAX(cpu_env, cpu_T0);
break;
#endif
}
break;
default:
goto unknown_op;
}
break;
case 0xfe: /* GRP4 */
case 0xff: /* GRP5 */
ot = mo_b_d(b, dflag);
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
op = (modrm >> 3) & 7;
if (op >= 2 && b == 0xfe) {
goto unknown_op;
}
if (CODE64(s)) {
if (op == 2 || op == 4) {
/* operand size for jumps is 64 bit */
ot = MO_64;
} else if (op == 3 || op == 5) {
ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16;
} else if (op == 6) {
/* default push size is 64 bit */
ot = mo_pushpop(s, dflag);
}
}
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
if (op >= 2 && op != 3 && op != 5)
gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
} else {
gen_op_mov_v_reg(ot, cpu_T0, rm);
}
switch(op) {
case 0: /* inc Ev */
if (mod != 3)
opreg = OR_TMP0;
else
opreg = rm;
gen_inc(s, ot, opreg, 1);
break;
case 1: /* dec Ev */
if (mod != 3)
opreg = OR_TMP0;
else
opreg = rm;
gen_inc(s, ot, opreg, -1);
break;
case 2: /* call Ev */
/* XXX: optimize if memory (no 'and' is necessary) */
if (dflag == MO_16) {
tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
}
next_eip = s->pc - s->cs_base;
tcg_gen_movi_tl(cpu_T1, next_eip);
gen_push_v(s, cpu_T1);
gen_op_jmp_v(cpu_T0);
gen_bnd_jmp(s);
gen_eob(s);
break;
case 3: /* lcall Ev */
gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
gen_add_A0_im(s, 1 << ot);
gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
do_lcall:
if (s->pe && !s->vm86) {
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
tcg_const_i32(dflag - 1),
tcg_const_tl(s->pc - s->cs_base));
} else {
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T1,
tcg_const_i32(dflag - 1),
tcg_const_i32(s->pc - s->cs_base));
}
gen_eob(s);
break;
case 4: /* jmp Ev */
if (dflag == MO_16) {
tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
}
gen_op_jmp_v(cpu_T0);
gen_bnd_jmp(s);
gen_eob(s);
break;
case 5: /* ljmp Ev */
gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
gen_add_A0_im(s, 1 << ot);
gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
do_ljmp:
if (s->pe && !s->vm86) {
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
tcg_const_tl(s->pc - s->cs_base));
} else {
gen_op_movl_seg_T0_vm(R_CS);
gen_op_jmp_v(cpu_T1);
}
gen_eob(s);
break;
case 6: /* push Ev */
gen_push_v(s, cpu_T0);
break;
default:
goto unknown_op;
}
break;
case 0x84: /* test Ev, Gv */
case 0x85:
ot = mo_b_d(b, dflag);
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_op_mov_v_reg(ot, cpu_T1, reg);
gen_op_testl_T0_T1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
break;
case 0xa8: /* test eAX, Iv */
case 0xa9:
ot = mo_b_d(b, dflag);
val = insn_get(env, s, ot);
gen_op_mov_v_reg(ot, cpu_T0, OR_EAX);
tcg_gen_movi_tl(cpu_T1, val);
gen_op_testl_T0_T1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
break;
case 0x98: /* CWDE/CBW */
switch (dflag) {
#ifdef TARGET_X86_64
case MO_64:
gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
gen_op_mov_reg_v(MO_64, R_EAX, cpu_T0);
break;
#endif
case MO_32:
gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
gen_op_mov_reg_v(MO_32, R_EAX, cpu_T0);
break;
case MO_16:
gen_op_mov_v_reg(MO_8, cpu_T0, R_EAX);
tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
break;
default:
tcg_abort();
}
break;
case 0x99: /* CDQ/CWD */
switch (dflag) {
#ifdef TARGET_X86_64
case MO_64:
gen_op_mov_v_reg(MO_64, cpu_T0, R_EAX);
tcg_gen_sari_tl(cpu_T0, cpu_T0, 63);
gen_op_mov_reg_v(MO_64, R_EDX, cpu_T0);
break;
#endif
case MO_32:
gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
tcg_gen_sari_tl(cpu_T0, cpu_T0, 31);
gen_op_mov_reg_v(MO_32, R_EDX, cpu_T0);
break;
case MO_16:
gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
tcg_gen_sari_tl(cpu_T0, cpu_T0, 15);
gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
break;
default:
tcg_abort();
}
break;
case 0x1af: /* imul Gv, Ev */
case 0x69: /* imul Gv, Ev, I */
case 0x6b:
ot = dflag;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
if (b == 0x69)
s->rip_offset = insn_const_size(ot);
else if (b == 0x6b)
s->rip_offset = 1;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
if (b == 0x69) {
val = insn_get(env, s, ot);
tcg_gen_movi_tl(cpu_T1, val);
} else if (b == 0x6b) {
val = (int8_t)insn_get(env, s, MO_8);
tcg_gen_movi_tl(cpu_T1, val);
} else {
gen_op_mov_v_reg(ot, cpu_T1, reg);
}
switch (ot) {
#ifdef TARGET_X86_64
case MO_64:
tcg_gen_muls2_i64(cpu_regs[reg], cpu_T1, cpu_T0, cpu_T1);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T1);
break;
#endif
case MO_32:
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
break;
default:
tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
}
set_cc_op(s, CC_OP_MULB + ot);
break;
case 0x1c0:
case 0x1c1: /* xadd Ev, Gv */
ot = mo_b_d(b, dflag);
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
gen_op_mov_v_reg(ot, cpu_T0, reg);
if (mod == 3) {
rm = (modrm & 7) | REX_B(s);
gen_op_mov_v_reg(ot, cpu_T1, rm);
tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_mov_reg_v(ot, reg, cpu_T1);
gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
gen_lea_modrm(env, s, modrm);
if (s->prefix & PREFIX_LOCK) {
tcg_gen_atomic_fetch_add_tl(cpu_T1, cpu_A0, cpu_T0,
s->mem_index, ot | MO_LE);
tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
} else {
gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_st_v(s, ot, cpu_T0, cpu_A0);
}
gen_op_mov_reg_v(ot, reg, cpu_T1);
}
gen_op_update2_cc();
set_cc_op(s, CC_OP_ADDB + ot);
break;
case 0x1b0:
case 0x1b1: /* cmpxchg Ev, Gv */
{
TCGv oldv, newv, cmpv;
ot = mo_b_d(b, dflag);
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
oldv = tcg_temp_new();
newv = tcg_temp_new();
cmpv = tcg_temp_new();
gen_op_mov_v_reg(ot, newv, reg);
tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
if (s->prefix & PREFIX_LOCK) {
if (mod == 3) {
goto illegal_op;
}
gen_lea_modrm(env, s, modrm);
tcg_gen_atomic_cmpxchg_tl(oldv, cpu_A0, cmpv, newv,
s->mem_index, ot | MO_LE);
gen_op_mov_reg_v(ot, R_EAX, oldv);
} else {
if (mod == 3) {
rm = (modrm & 7) | REX_B(s);
gen_op_mov_v_reg(ot, oldv, rm);
} else {
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, ot, oldv, cpu_A0);
rm = 0; /* avoid warning */
}
gen_extu(ot, oldv);
gen_extu(ot, cmpv);
/* store value = (old == cmp ? new : old); */
tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
if (mod == 3) {
gen_op_mov_reg_v(ot, R_EAX, oldv);
gen_op_mov_reg_v(ot, rm, newv);
} else {
/* Perform an unconditional store cycle like physical cpu;
must be before changing accumulator to ensure
idempotency if the store faults and the instruction
is restarted */
gen_op_st_v(s, ot, newv, cpu_A0);
gen_op_mov_reg_v(ot, R_EAX, oldv);
}
}
tcg_gen_mov_tl(cpu_cc_src, oldv);
tcg_gen_mov_tl(cpu_cc_srcT, cmpv);
tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
set_cc_op(s, CC_OP_SUBB + ot);
tcg_temp_free(oldv);
tcg_temp_free(newv);
tcg_temp_free(cmpv);
}
break;
case 0x1c7: /* cmpxchg8b */
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if ((mod == 3) || ((modrm & 0x38) != 0x8))
goto illegal_op;
#ifdef TARGET_X86_64
if (dflag == MO_64) {
if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
goto illegal_op;
gen_lea_modrm(env, s, modrm);
if ((s->prefix & PREFIX_LOCK) && parallel_cpus) {
gen_helper_cmpxchg16b(cpu_env, cpu_A0);
} else {
gen_helper_cmpxchg16b_unlocked(cpu_env, cpu_A0);
}
} else
#endif
{
if (!(s->cpuid_features & CPUID_CX8))
goto illegal_op;
gen_lea_modrm(env, s, modrm);
if ((s->prefix & PREFIX_LOCK) && parallel_cpus) {
gen_helper_cmpxchg8b(cpu_env, cpu_A0);
} else {
gen_helper_cmpxchg8b_unlocked(cpu_env, cpu_A0);
}
}
set_cc_op(s, CC_OP_EFLAGS);
break;
/**************************/
/* push/pop */
case 0x50 ... 0x57: /* push */
gen_op_mov_v_reg(MO_32, cpu_T0, (b & 7) | REX_B(s));
gen_push_v(s, cpu_T0);
break;
case 0x58 ... 0x5f: /* pop */
ot = gen_pop_T0(s);
/* NOTE: order is important for pop %sp */
gen_pop_update(s, ot);
gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T0);
break;
case 0x60: /* pusha */
if (CODE64(s))
goto illegal_op;
gen_pusha(s);
break;
case 0x61: /* popa */
if (CODE64(s))
goto illegal_op;
gen_popa(s);
break;
case 0x68: /* push Iv */
case 0x6a:
ot = mo_pushpop(s, dflag);
if (b == 0x68)
val = insn_get(env, s, ot);
else
val = (int8_t)insn_get(env, s, MO_8);
tcg_gen_movi_tl(cpu_T0, val);
gen_push_v(s, cpu_T0);
break;
case 0x8f: /* pop Ev */
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
ot = gen_pop_T0(s);
if (mod == 3) {
/* NOTE: order is important for pop %sp */
gen_pop_update(s, ot);
rm = (modrm & 7) | REX_B(s);
gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
/* NOTE: order is important too for MMU exceptions */
s->popl_esp_hack = 1 << ot;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
s->popl_esp_hack = 0;
gen_pop_update(s, ot);
}
break;
case 0xc8: /* enter */
{
int level;
val = cpu_lduw_code(env, s->pc);
s->pc += 2;
level = cpu_ldub_code(env, s->pc++);
gen_enter(s, val, level);
}
break;
case 0xc9: /* leave */
gen_leave(s);
break;
case 0x06: /* push es */
case 0x0e: /* push cs */
case 0x16: /* push ss */
case 0x1e: /* push ds */
if (CODE64(s))
goto illegal_op;
gen_op_movl_T0_seg(b >> 3);
gen_push_v(s, cpu_T0);
break;
case 0x1a0: /* push fs */
case 0x1a8: /* push gs */
gen_op_movl_T0_seg((b >> 3) & 7);
gen_push_v(s, cpu_T0);
break;
case 0x07: /* pop es */
case 0x17: /* pop ss */
case 0x1f: /* pop ds */
if (CODE64(s))
goto illegal_op;
reg = b >> 3;
ot = gen_pop_T0(s);
gen_movl_seg_T0(s, reg);
gen_pop_update(s, ot);
/* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
if (s->is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
if (reg == R_SS) {
s->tf = 0;
gen_eob_inhibit_irq(s, true);
} else {
gen_eob(s);
}
}
break;
case 0x1a1: /* pop fs */
case 0x1a9: /* pop gs */
ot = gen_pop_T0(s);
gen_movl_seg_T0(s, (b >> 3) & 7);
gen_pop_update(s, ot);
if (s->is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
}
break;
/**************************/
/* mov */
case 0x88:
case 0x89: /* mov Gv, Ev */
ot = mo_b_d(b, dflag);
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
/* generate a generic store */
gen_ldst_modrm(env, s, modrm, ot, reg, 1);
break;
case 0xc6:
case 0xc7: /* mov Ev, Iv */
ot = mo_b_d(b, dflag);
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if (mod != 3) {
s->rip_offset = insn_const_size(ot);
gen_lea_modrm(env, s, modrm);
}
val = insn_get(env, s, ot);
tcg_gen_movi_tl(cpu_T0, val);
if (mod != 3) {
gen_op_st_v(s, ot, cpu_T0, cpu_A0);
} else {
gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T0);
}
break;
case 0x8a:
case 0x8b: /* mov Ev, Gv */
ot = mo_b_d(b, dflag);
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
case 0x8e: /* mov seg, Gv */
modrm = cpu_ldub_code(env, s->pc++);
reg = (modrm >> 3) & 7;
if (reg >= 6 || reg == R_CS)
goto illegal_op;
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
gen_movl_seg_T0(s, reg);
/* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
if (s->is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
if (reg == R_SS) {
s->tf = 0;
gen_eob_inhibit_irq(s, true);
} else {
gen_eob(s);
}
}
break;
case 0x8c: /* mov Gv, seg */
modrm = cpu_ldub_code(env, s->pc++);
reg = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
if (reg >= 6)
goto illegal_op;
gen_op_movl_T0_seg(reg);
ot = mod == 3 ? dflag : MO_16;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 0x1b6: /* movzbS Gv, Eb */
case 0x1b7: /* movzwS Gv, Eb */
case 0x1be: /* movsbS Gv, Eb */
case 0x1bf: /* movswS Gv, Eb */
{
TCGMemOp d_ot;
TCGMemOp s_ot;
/* d_ot is the size of destination */
d_ot = dflag;
/* ot is the size of source */
ot = (b & 1) + MO_8;
/* s_ot is the sign+size of source */
s_ot = b & 8 ? MO_SIGN | ot : ot;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
if (mod == 3) {
if (s_ot == MO_SB && byte_reg_is_xH(rm)) {
tcg_gen_sextract_tl(cpu_T0, cpu_regs[rm - 4], 8, 8);
} else {
gen_op_mov_v_reg(ot, cpu_T0, rm);
switch (s_ot) {
case MO_UB:
tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
break;
case MO_SB:
tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
break;
case MO_UW:
tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
break;
default:
case MO_SW:
tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
break;
}
}
gen_op_mov_reg_v(d_ot, reg, cpu_T0);
} else {
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, s_ot, cpu_T0, cpu_A0);
gen_op_mov_reg_v(d_ot, reg, cpu_T0);
}
}
break;
case 0x8d: /* lea */
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
reg = ((modrm >> 3) & 7) | rex_r;
{
AddressParts a = gen_lea_modrm_0(env, s, modrm);
TCGv ea = gen_lea_modrm_1(a);
gen_lea_v_seg(s, s->aflag, ea, -1, -1);
gen_op_mov_reg_v(dflag, reg, cpu_A0);
}
break;
case 0xa0: /* mov EAX, Ov */
case 0xa1:
case 0xa2: /* mov Ov, EAX */
case 0xa3:
{
target_ulong offset_addr;
ot = mo_b_d(b, dflag);
switch (s->aflag) {
#ifdef TARGET_X86_64
case MO_64:
offset_addr = cpu_ldq_code(env, s->pc);
s->pc += 8;
break;
#endif
default:
offset_addr = insn_get(env, s, s->aflag);
break;
}
tcg_gen_movi_tl(cpu_A0, offset_addr);
gen_add_A0_ds_seg(s);
if ((b & 2) == 0) {
gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
gen_op_mov_reg_v(ot, R_EAX, cpu_T0);
} else {
gen_op_mov_v_reg(ot, cpu_T0, R_EAX);
gen_op_st_v(s, ot, cpu_T0, cpu_A0);
}
}
break;
case 0xd7: /* xlat */
tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]);
tcg_gen_ext8u_tl(cpu_T0, cpu_regs[R_EAX]);
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T0);
gen_extu(s->aflag, cpu_A0);
gen_add_A0_ds_seg(s);
gen_op_ld_v(s, MO_8, cpu_T0, cpu_A0);
gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
break;
case 0xb0 ... 0xb7: /* mov R, Ib */
val = insn_get(env, s, MO_8);
tcg_gen_movi_tl(cpu_T0, val);
gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T0);
break;
case 0xb8 ... 0xbf: /* mov R, Iv */
#ifdef TARGET_X86_64
if (dflag == MO_64) {
uint64_t tmp;
/* 64 bit case */
tmp = cpu_ldq_code(env, s->pc);
s->pc += 8;
reg = (b & 7) | REX_B(s);
tcg_gen_movi_tl(cpu_T0, tmp);
gen_op_mov_reg_v(MO_64, reg, cpu_T0);
} else
#endif
{
ot = dflag;
val = insn_get(env, s, ot);
reg = (b & 7) | REX_B(s);
tcg_gen_movi_tl(cpu_T0, val);
gen_op_mov_reg_v(ot, reg, cpu_T0);
}
break;
case 0x91 ... 0x97: /* xchg R, EAX */
do_xchg_reg_eax:
ot = dflag;
reg = (b & 7) | REX_B(s);
rm = R_EAX;
goto do_xchg_reg;
case 0x86:
case 0x87: /* xchg Ev, Gv */
ot = mo_b_d(b, dflag);
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (mod == 3) {
rm = (modrm & 7) | REX_B(s);
do_xchg_reg:
gen_op_mov_v_reg(ot, cpu_T0, reg);
gen_op_mov_v_reg(ot, cpu_T1, rm);
gen_op_mov_reg_v(ot, rm, cpu_T0);
gen_op_mov_reg_v(ot, reg, cpu_T1);
} else {
gen_lea_modrm(env, s, modrm);
gen_op_mov_v_reg(ot, cpu_T0, reg);
/* for xchg, lock is implicit */
tcg_gen_atomic_xchg_tl(cpu_T1, cpu_A0, cpu_T0,
s->mem_index, ot | MO_LE);
gen_op_mov_reg_v(ot, reg, cpu_T1);
}
break;
case 0xc4: /* les Gv */
/* In CODE64 this is VEX3; see above. */
op = R_ES;
goto do_lxx;
case 0xc5: /* lds Gv */
/* In CODE64 this is VEX2; see above. */
op = R_DS;
goto do_lxx;
case 0x1b2: /* lss Gv */
op = R_SS;
goto do_lxx;
case 0x1b4: /* lfs Gv */
op = R_FS;
goto do_lxx;
case 0x1b5: /* lgs Gv */
op = R_GS;
do_lxx:
ot = dflag != MO_16 ? MO_32 : MO_16;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
gen_add_A0_im(s, 1 << ot);
/* load the segment first to handle exceptions properly */
gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
gen_movl_seg_T0(s, op);
/* then put the data */
gen_op_mov_reg_v(ot, reg, cpu_T1);
if (s->is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
}
break;
/************************/
/* shifts */
case 0xc0:
case 0xc1:
/* shift Ev,Ib */
shift = 2;
grp2:
{
ot = mo_b_d(b, dflag);
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
if (mod != 3) {
if (shift == 2) {
s->rip_offset = 1;
}
gen_lea_modrm(env, s, modrm);
opreg = OR_TMP0;
} else {
opreg = (modrm & 7) | REX_B(s);
}
/* simpler op */
if (shift == 0) {
gen_shift(s, op, ot, opreg, OR_ECX);
} else {
if (shift == 2) {
shift = cpu_ldub_code(env, s->pc++);
}
gen_shifti(s, op, ot, opreg, shift);
}
}
break;
case 0xd0:
case 0xd1:
/* shift Ev,1 */
shift = 1;
goto grp2;
case 0xd2:
case 0xd3:
/* shift Ev,cl */
shift = 0;
goto grp2;
case 0x1a4: /* shld imm */
op = 0;
shift = 1;
goto do_shiftd;
case 0x1a5: /* shld cl */
op = 0;
shift = 0;
goto do_shiftd;
case 0x1ac: /* shrd imm */
op = 1;
shift = 1;
goto do_shiftd;
case 0x1ad: /* shrd cl */
op = 1;
shift = 0;
do_shiftd:
ot = dflag;
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
reg = ((modrm >> 3) & 7) | rex_r;
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
opreg = OR_TMP0;
} else {
opreg = rm;
}
gen_op_mov_v_reg(ot, cpu_T1, reg);
if (shift) {
TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
gen_shiftd_rm_T1(s, ot, opreg, op, imm);
tcg_temp_free(imm);
} else {
gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
}
break;
/************************/
/* floats */
case 0xd8 ... 0xdf:
if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
/* if CR0.EM or CR0.TS are set, generate an FPU exception */
/* XXX: what to do if illegal op ? */
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = modrm & 7;
op = ((b & 7) << 3) | ((modrm >> 3) & 7);
if (mod != 3) {
/* memory op */
gen_lea_modrm(env, s, modrm);
switch(op) {
case 0x00 ... 0x07: /* fxxxs */
case 0x10 ... 0x17: /* fixxxl */
case 0x20 ... 0x27: /* fxxxl */
case 0x30 ... 0x37: /* fixxx */
{
int op1;
op1 = op & 7;
switch(op >> 4) {
case 0:
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
break;
case 1:
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
break;
case 2:
tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
s->mem_index, MO_LEQ);
gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
break;
case 3:
default:
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LESW);
gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
break;
}
gen_helper_fp_arith_ST0_FT0(op1);
if (op1 == 3) {
/* fcomp needs pop */
gen_helper_fpop(cpu_env);
}
}
break;
case 0x08: /* flds */
case 0x0a: /* fsts */
case 0x0b: /* fstps */
case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
switch(op & 7) {
case 0:
switch(op >> 4) {
case 0:
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
break;
case 1:
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
break;
case 2:
tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
s->mem_index, MO_LEQ);
gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
break;
case 3:
default:
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LESW);
gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
break;
}
break;
case 1:
/* XXX: the corresponding CPUID bit must be tested ! */
switch(op >> 4) {
case 1:
gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
break;
case 2:
gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
s->mem_index, MO_LEQ);
break;
case 3:
default:
gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUW);
break;
}
gen_helper_fpop(cpu_env);
break;
default:
switch(op >> 4) {
case 0:
gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
break;
case 1:
gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
break;
case 2:
gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
s->mem_index, MO_LEQ);
break;
case 3:
default:
gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUW);
break;
}
if ((op & 7) == 3)
gen_helper_fpop(cpu_env);
break;
}
break;
case 0x0c: /* fldenv mem */
gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
break;
case 0x0d: /* fldcw mem */
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUW);
gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
break;
case 0x0e: /* fnstenv mem */
gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
break;
case 0x0f: /* fnstcw mem */
gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUW);
break;
case 0x1d: /* fldt mem */
gen_helper_fldt_ST0(cpu_env, cpu_A0);
break;
case 0x1f: /* fstpt mem */
gen_helper_fstt_ST0(cpu_env, cpu_A0);
gen_helper_fpop(cpu_env);
break;
case 0x2c: /* frstor mem */
gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
break;
case 0x2e: /* fnsave mem */
gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
break;
case 0x2f: /* fnstsw mem */
gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUW);
break;
case 0x3c: /* fbld */
gen_helper_fbld_ST0(cpu_env, cpu_A0);
break;
case 0x3e: /* fbstp */
gen_helper_fbst_ST0(cpu_env, cpu_A0);
gen_helper_fpop(cpu_env);
break;
case 0x3d: /* fildll */
tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
break;
case 0x3f: /* fistpll */
gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
gen_helper_fpop(cpu_env);
break;
default:
goto unknown_op;
}
} else {
/* register float ops */
opreg = rm;
switch(op) {
case 0x08: /* fld sti */
gen_helper_fpush(cpu_env);
gen_helper_fmov_ST0_STN(cpu_env,
tcg_const_i32((opreg + 1) & 7));
break;
case 0x09: /* fxchg sti */
case 0x29: /* fxchg4 sti, undocumented op */
case 0x39: /* fxchg7 sti, undocumented op */
gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
break;
case 0x0a: /* grp d9/2 */
switch(rm) {
case 0: /* fnop */
/* check exceptions (FreeBSD FPU probe) */
gen_helper_fwait(cpu_env);
break;
default:
goto unknown_op;
}
break;
case 0x0c: /* grp d9/4 */
switch(rm) {
case 0: /* fchs */
gen_helper_fchs_ST0(cpu_env);
break;
case 1: /* fabs */
gen_helper_fabs_ST0(cpu_env);
break;
case 4: /* ftst */
gen_helper_fldz_FT0(cpu_env);
gen_helper_fcom_ST0_FT0(cpu_env);
break;
case 5: /* fxam */
gen_helper_fxam_ST0(cpu_env);
break;
default:
goto unknown_op;
}
break;
case 0x0d: /* grp d9/5 */
{
switch(rm) {
case 0:
gen_helper_fpush(cpu_env);
gen_helper_fld1_ST0(cpu_env);
break;
case 1:
gen_helper_fpush(cpu_env);
gen_helper_fldl2t_ST0(cpu_env);
break;
case 2:
gen_helper_fpush(cpu_env);
gen_helper_fldl2e_ST0(cpu_env);
break;
case 3:
gen_helper_fpush(cpu_env);
gen_helper_fldpi_ST0(cpu_env);
break;
case 4:
gen_helper_fpush(cpu_env);
gen_helper_fldlg2_ST0(cpu_env);
break;
case 5:
gen_helper_fpush(cpu_env);
gen_helper_fldln2_ST0(cpu_env);
break;
case 6:
gen_helper_fpush(cpu_env);
gen_helper_fldz_ST0(cpu_env);
break;
default:
goto unknown_op;
}
}
break;
case 0x0e: /* grp d9/6 */
switch(rm) {
case 0: /* f2xm1 */
gen_helper_f2xm1(cpu_env);
break;
case 1: /* fyl2x */
gen_helper_fyl2x(cpu_env);
break;
case 2: /* fptan */
gen_helper_fptan(cpu_env);
break;
case 3: /* fpatan */
gen_helper_fpatan(cpu_env);
break;
case 4: /* fxtract */
gen_helper_fxtract(cpu_env);
break;
case 5: /* fprem1 */
gen_helper_fprem1(cpu_env);
break;
case 6: /* fdecstp */
gen_helper_fdecstp(cpu_env);
break;
default:
case 7: /* fincstp */
gen_helper_fincstp(cpu_env);
break;
}
break;
case 0x0f: /* grp d9/7 */
switch(rm) {
case 0: /* fprem */
gen_helper_fprem(cpu_env);
break;
case 1: /* fyl2xp1 */
gen_helper_fyl2xp1(cpu_env);
break;
case 2: /* fsqrt */
gen_helper_fsqrt(cpu_env);
break;
case 3: /* fsincos */
gen_helper_fsincos(cpu_env);
break;
case 5: /* fscale */
gen_helper_fscale(cpu_env);
break;
case 4: /* frndint */
gen_helper_frndint(cpu_env);
break;
case 6: /* fsin */
gen_helper_fsin(cpu_env);
break;
default:
case 7: /* fcos */
gen_helper_fcos(cpu_env);
break;
}
break;
case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
{
int op1;
op1 = op & 7;
if (op >= 0x20) {
gen_helper_fp_arith_STN_ST0(op1, opreg);
if (op >= 0x30)
gen_helper_fpop(cpu_env);
} else {
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fp_arith_ST0_FT0(op1);
}
}
break;
case 0x02: /* fcom */
case 0x22: /* fcom2, undocumented op */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fcom_ST0_FT0(cpu_env);
break;
case 0x03: /* fcomp */
case 0x23: /* fcomp3, undocumented op */
case 0x32: /* fcomp5, undocumented op */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fcom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
break;
case 0x15: /* da/5 */
switch(rm) {
case 1: /* fucompp */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
gen_helper_fucom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
gen_helper_fpop(cpu_env);
break;
default:
goto unknown_op;
}
break;
case 0x1c:
switch(rm) {
case 0: /* feni (287 only, just do nop here) */
break;
case 1: /* fdisi (287 only, just do nop here) */
break;
case 2: /* fclex */
gen_helper_fclex(cpu_env);
break;
case 3: /* fninit */
gen_helper_fninit(cpu_env);
break;
case 4: /* fsetpm (287 only, just do nop here) */
break;
default:
goto unknown_op;
}
break;
case 0x1d: /* fucomi */
if (!(s->cpuid_features & CPUID_CMOV)) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fucomi_ST0_FT0(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x1e: /* fcomi */
if (!(s->cpuid_features & CPUID_CMOV)) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fcomi_ST0_FT0(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x28: /* ffree sti */
gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
break;
case 0x2a: /* fst sti */
gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
break;
case 0x2b: /* fstp sti */
case 0x0b: /* fstp1 sti, undocumented op */
case 0x3a: /* fstp8 sti, undocumented op */
case 0x3b: /* fstp9 sti, undocumented op */
gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
gen_helper_fpop(cpu_env);
break;
case 0x2c: /* fucom st(i) */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fucom_ST0_FT0(cpu_env);
break;
case 0x2d: /* fucomp st(i) */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fucom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
break;
case 0x33: /* de/3 */
switch(rm) {
case 1: /* fcompp */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
gen_helper_fcom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
gen_helper_fpop(cpu_env);
break;
default:
goto unknown_op;
}
break;
case 0x38: /* ffreep sti, undocumented op */
gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fpop(cpu_env);
break;
case 0x3c: /* df/4 */
switch(rm) {
case 0:
gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
break;
default:
goto unknown_op;
}
break;
case 0x3d: /* fucomip */
if (!(s->cpuid_features & CPUID_CMOV)) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fucomi_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x3e: /* fcomip */
if (!(s->cpuid_features & CPUID_CMOV)) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fcomi_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x10 ... 0x13: /* fcmovxx */
case 0x18 ... 0x1b:
{
int op1;
TCGLabel *l1;
static const uint8_t fcmov_cc[8] = {
(JCC_B << 1),
(JCC_Z << 1),
(JCC_BE << 1),
(JCC_P << 1),
};
if (!(s->cpuid_features & CPUID_CMOV)) {
goto illegal_op;
}
op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
l1 = gen_new_label();
gen_jcc1_noeob(s, op1, l1);
gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
gen_set_label(l1);
}
break;
default:
goto unknown_op;
}
}
break;
/************************/
/* string ops */
case 0xa4: /* movsS */
case 0xa5:
ot = mo_b_d(b, dflag);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_movs(s, ot);
}
break;
case 0xaa: /* stosS */
case 0xab:
ot = mo_b_d(b, dflag);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_stos(s, ot);
}
break;
case 0xac: /* lodsS */
case 0xad:
ot = mo_b_d(b, dflag);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_lods(s, ot);
}
break;
case 0xae: /* scasS */
case 0xaf:
ot = mo_b_d(b, dflag);
if (prefixes & PREFIX_REPNZ) {
gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
} else if (prefixes & PREFIX_REPZ) {
gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
} else {
gen_scas(s, ot);
}
break;
case 0xa6: /* cmpsS */
case 0xa7:
ot = mo_b_d(b, dflag);
if (prefixes & PREFIX_REPNZ) {
gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
} else if (prefixes & PREFIX_REPZ) {
gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
} else {
gen_cmps(s, ot);
}
break;
case 0x6c: /* insS */
case 0x6d:
ot = mo_b_d32(b, dflag);
tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_ins(s, ot);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_jmp(s, s->pc - s->cs_base);
}
}
break;
case 0x6e: /* outsS */
case 0x6f:
ot = mo_b_d32(b, dflag);
tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
svm_is_rep(prefixes) | 4);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_outs(s, ot);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_jmp(s, s->pc - s->cs_base);
}
}
break;
/************************/
/* port I/O */
case 0xe4:
case 0xe5:
ot = mo_b_d32(b, dflag);
val = cpu_ldub_code(env, s->pc++);
tcg_gen_movi_tl(cpu_T0, val);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_movi_i32(cpu_tmp2_i32, val);
gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
case 0xe6:
case 0xe7:
ot = mo_b_d32(b, dflag);
val = cpu_ldub_code(env, s->pc++);
tcg_gen_movi_tl(cpu_T0, val);
gen_check_io(s, ot, pc_start - s->cs_base,
svm_is_rep(prefixes));
gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_movi_i32(cpu_tmp2_i32, val);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
case 0xec:
case 0xed:
ot = mo_b_d32(b, dflag);
tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
case 0xee:
case 0xef:
ot = mo_b_d32(b, dflag);
tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
svm_is_rep(prefixes));
gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
/************************/
/* control */
case 0xc2: /* ret im */
val = cpu_ldsw_code(env, s->pc);
s->pc += 2;
ot = gen_pop_T0(s);
gen_stack_update(s, val + (1 << ot));
/* Note that gen_pop_T0 uses a zero-extending load. */
gen_op_jmp_v(cpu_T0);
gen_bnd_jmp(s);
gen_eob(s);
break;
case 0xc3: /* ret */
ot = gen_pop_T0(s);
gen_pop_update(s, ot);
/* Note that gen_pop_T0 uses a zero-extending load. */
gen_op_jmp_v(cpu_T0);
gen_bnd_jmp(s);
gen_eob(s);
break;
case 0xca: /* lret im */
val = cpu_ldsw_code(env, s->pc);
s->pc += 2;
do_lret:
if (s->pe && !s->vm86) {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
tcg_const_i32(val));
} else {
gen_stack_A0(s);
/* pop offset */
gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
/* NOTE: keeping EIP updated is not a problem in case of
exception */
gen_op_jmp_v(cpu_T0);
/* pop selector */
gen_add_A0_im(s, 1 << dflag);
gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
gen_op_movl_seg_T0_vm(R_CS);
/* add stack offset */
gen_stack_update(s, val + (2 << dflag));
}
gen_eob(s);
break;
case 0xcb: /* lret */
val = 0;
goto do_lret;
case 0xcf: /* iret */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
if (!s->pe) {
/* real mode */
gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
set_cc_op(s, CC_OP_EFLAGS);
} else if (s->vm86) {
if (s->iopl != 3) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
set_cc_op(s, CC_OP_EFLAGS);
}
} else {
gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1),
tcg_const_i32(s->pc - s->cs_base));
set_cc_op(s, CC_OP_EFLAGS);
}
gen_eob(s);
break;
case 0xe8: /* call im */
{
if (dflag != MO_16) {
tval = (int32_t)insn_get(env, s, MO_32);
} else {
tval = (int16_t)insn_get(env, s, MO_16);
}
next_eip = s->pc - s->cs_base;
tval += next_eip;
if (dflag == MO_16) {
tval &= 0xffff;
} else if (!CODE64(s)) {
tval &= 0xffffffff;
}
tcg_gen_movi_tl(cpu_T0, next_eip);
gen_push_v(s, cpu_T0);
gen_bnd_jmp(s);
gen_jmp(s, tval);
}
break;
case 0x9a: /* lcall im */
{
unsigned int selector, offset;
if (CODE64(s))
goto illegal_op;
ot = dflag;
offset = insn_get(env, s, ot);
selector = insn_get(env, s, MO_16);
tcg_gen_movi_tl(cpu_T0, selector);
tcg_gen_movi_tl(cpu_T1, offset);
}
goto do_lcall;
case 0xe9: /* jmp im */
if (dflag != MO_16) {
tval = (int32_t)insn_get(env, s, MO_32);
} else {
tval = (int16_t)insn_get(env, s, MO_16);
}
tval += s->pc - s->cs_base;
if (dflag == MO_16) {
tval &= 0xffff;
} else if (!CODE64(s)) {
tval &= 0xffffffff;
}
gen_bnd_jmp(s);
gen_jmp(s, tval);
break;
case 0xea: /* ljmp im */
{
unsigned int selector, offset;
if (CODE64(s))
goto illegal_op;
ot = dflag;
offset = insn_get(env, s, ot);
selector = insn_get(env, s, MO_16);
tcg_gen_movi_tl(cpu_T0, selector);
tcg_gen_movi_tl(cpu_T1, offset);
}
goto do_ljmp;
case 0xeb: /* jmp Jb */
tval = (int8_t)insn_get(env, s, MO_8);
tval += s->pc - s->cs_base;
if (dflag == MO_16) {
tval &= 0xffff;
}
gen_jmp(s, tval);
break;
case 0x70 ... 0x7f: /* jcc Jb */
tval = (int8_t)insn_get(env, s, MO_8);
goto do_jcc;
case 0x180 ... 0x18f: /* jcc Jv */
if (dflag != MO_16) {
tval = (int32_t)insn_get(env, s, MO_32);
} else {
tval = (int16_t)insn_get(env, s, MO_16);
}
do_jcc:
next_eip = s->pc - s->cs_base;
tval += next_eip;
if (dflag == MO_16) {
tval &= 0xffff;
}
gen_bnd_jmp(s);
gen_jcc(s, b, tval, next_eip);
break;
case 0x190 ... 0x19f: /* setcc Gv */
modrm = cpu_ldub_code(env, s->pc++);
gen_setcc1(s, b, cpu_T0);
gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
break;
case 0x140 ... 0x14f: /* cmov Gv, Ev */
if (!(s->cpuid_features & CPUID_CMOV)) {
goto illegal_op;
}
ot = dflag;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
gen_cmovcc1(env, s, ot, b, modrm, reg);
break;
/************************/
/* flags */
case 0x9c: /* pushf */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
if (s->vm86 && s->iopl != 3) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_update_cc_op(s);
gen_helper_read_eflags(cpu_T0, cpu_env);
gen_push_v(s, cpu_T0);
}
break;
case 0x9d: /* popf */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
if (s->vm86 && s->iopl != 3) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
ot = gen_pop_T0(s);
if (s->cpl == 0) {
if (dflag != MO_16) {
gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK |
IF_MASK |
IOPL_MASK)));
} else {
gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK |
IF_MASK | IOPL_MASK)
& 0xffff));
}
} else {
if (s->cpl <= s->iopl) {
if (dflag != MO_16) {
gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK |
AC_MASK |
ID_MASK |
NT_MASK |
IF_MASK)));
} else {
gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK |
AC_MASK |
ID_MASK |
NT_MASK |
IF_MASK)
& 0xffff));
}
} else {
if (dflag != MO_16) {
gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK)));
} else {
gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK)
& 0xffff));
}
}
}
gen_pop_update(s, ot);
set_cc_op(s, CC_OP_EFLAGS);
/* abort translation because TF/AC flag may change */
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
}
break;
case 0x9e: /* sahf */
if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
goto illegal_op;
gen_op_mov_v_reg(MO_8, cpu_T0, R_AH);
gen_compute_eflags(s);
tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
tcg_gen_andi_tl(cpu_T0, cpu_T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T0);
break;
case 0x9f: /* lahf */
if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
goto illegal_op;
gen_compute_eflags(s);
/* Note: gen_compute_eflags() only gives the condition codes */
tcg_gen_ori_tl(cpu_T0, cpu_cc_src, 0x02);
gen_op_mov_reg_v(MO_8, R_AH, cpu_T0);
break;
case 0xf5: /* cmc */
gen_compute_eflags(s);
tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
break;
case 0xf8: /* clc */
gen_compute_eflags(s);
tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
break;
case 0xf9: /* stc */
gen_compute_eflags(s);
tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
break;
case 0xfc: /* cld */
tcg_gen_movi_i32(cpu_tmp2_i32, 1);
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
break;
case 0xfd: /* std */
tcg_gen_movi_i32(cpu_tmp2_i32, -1);
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
break;
/************************/
/* bit operations */
case 0x1ba: /* bt/bts/btr/btc Gv, im */
ot = dflag;
modrm = cpu_ldub_code(env, s->pc++);
op = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
if (mod != 3) {
s->rip_offset = 1;
gen_lea_modrm(env, s, modrm);
if (!(s->prefix & PREFIX_LOCK)) {
gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
}
} else {
gen_op_mov_v_reg(ot, cpu_T0, rm);
}
/* load shift */
val = cpu_ldub_code(env, s->pc++);
tcg_gen_movi_tl(cpu_T1, val);
if (op < 4)
goto unknown_op;
op -= 4;
goto bt_op;
case 0x1a3: /* bt Gv, Ev */
op = 0;
goto do_btx;
case 0x1ab: /* bts */
op = 1;
goto do_btx;
case 0x1b3: /* btr */
op = 2;
goto do_btx;
case 0x1bb: /* btc */
op = 3;
do_btx:
ot = dflag;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
gen_op_mov_v_reg(MO_32, cpu_T1, reg);
if (mod != 3) {
AddressParts a = gen_lea_modrm_0(env, s, modrm);
/* specific case: we need to add a displacement */
gen_exts(ot, cpu_T1);
tcg_gen_sari_tl(cpu_tmp0, cpu_T1, 3 + ot);
tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
tcg_gen_add_tl(cpu_A0, gen_lea_modrm_1(a), cpu_tmp0);
gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
if (!(s->prefix & PREFIX_LOCK)) {
gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
}
} else {
gen_op_mov_v_reg(ot, cpu_T0, rm);
}
bt_op:
tcg_gen_andi_tl(cpu_T1, cpu_T1, (1 << (3 + ot)) - 1);
tcg_gen_movi_tl(cpu_tmp0, 1);
tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1);
if (s->prefix & PREFIX_LOCK) {
switch (op) {
case 0: /* bt */
/* Needs no atomic ops; we surpressed the normal
memory load for LOCK above so do it now. */
gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
break;
case 1: /* bts */
tcg_gen_atomic_fetch_or_tl(cpu_T0, cpu_A0, cpu_tmp0,
s->mem_index, ot | MO_LE);
break;
case 2: /* btr */
tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
tcg_gen_atomic_fetch_and_tl(cpu_T0, cpu_A0, cpu_tmp0,
s->mem_index, ot | MO_LE);
break;
default:
case 3: /* btc */
tcg_gen_atomic_fetch_xor_tl(cpu_T0, cpu_A0, cpu_tmp0,
s->mem_index, ot | MO_LE);
break;
}
tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1);
} else {
tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1);
switch (op) {
case 0: /* bt */
/* Data already loaded; nothing to do. */
break;
case 1: /* bts */
tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0);
break;
case 2: /* btr */
tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_tmp0);
break;
default:
case 3: /* btc */
tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_tmp0);
break;
}
if (op != 0) {
if (mod != 3) {
gen_op_st_v(s, ot, cpu_T0, cpu_A0);
} else {
gen_op_mov_reg_v(ot, rm, cpu_T0);
}
}
}
/* Delay all CC updates until after the store above. Note that
C is the result of the test, Z is unchanged, and the others
are all undefined. */
switch (s->cc_op) {
case CC_OP_MULB ... CC_OP_MULQ:
case CC_OP_ADDB ... CC_OP_ADDQ:
case CC_OP_ADCB ... CC_OP_ADCQ:
case CC_OP_SUBB ... CC_OP_SUBQ:
case CC_OP_SBBB ... CC_OP_SBBQ:
case CC_OP_LOGICB ... CC_OP_LOGICQ:
case CC_OP_INCB ... CC_OP_INCQ:
case CC_OP_DECB ... CC_OP_DECQ:
case CC_OP_SHLB ... CC_OP_SHLQ:
case CC_OP_SARB ... CC_OP_SARQ:
case CC_OP_BMILGB ... CC_OP_BMILGQ:
/* Z was going to be computed from the non-zero status of CC_DST.
We can get that same Z value (and the new C value) by leaving
CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
same width. */
tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
break;
default:
/* Otherwise, generate EFLAGS and replace the C bit. */
gen_compute_eflags(s);
tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, cpu_tmp4,
ctz32(CC_C), 1);
break;
}
break;
case 0x1bc: /* bsf / tzcnt */
case 0x1bd: /* bsr / lzcnt */
ot = dflag;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_extu(ot, cpu_T0);
/* Note that lzcnt and tzcnt are in different extensions. */
if ((prefixes & PREFIX_REPZ)
&& (b & 1
? s->cpuid_ext3_features & CPUID_EXT3_ABM
: s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
int size = 8 << ot;
/* For lzcnt/tzcnt, C bit is defined related to the input. */
tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
if (b & 1) {
/* For lzcnt, reduce the target_ulong result by the
number of zeros that we expect to find at the top. */
tcg_gen_clzi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS);
tcg_gen_subi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - size);
} else {
/* For tzcnt, a zero input must return the operand size. */
tcg_gen_ctzi_tl(cpu_T0, cpu_T0, size);
}
/* For lzcnt/tzcnt, Z bit is defined related to the result. */
gen_op_update1_cc();
set_cc_op(s, CC_OP_BMILGB + ot);
} else {
/* For bsr/bsf, only the Z bit is defined and it is related
to the input and not the result. */
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
set_cc_op(s, CC_OP_LOGICB + ot);
/* ??? The manual says that the output is undefined when the
input is zero, but real hardware leaves it unchanged, and
real programs appear to depend on that. Accomplish this
by passing the output as the value to return upon zero. */
if (b & 1) {
/* For bsr, return the bit index of the first 1 bit,
not the count of leading zeros. */
tcg_gen_xori_tl(cpu_T1, cpu_regs[reg], TARGET_LONG_BITS - 1);
tcg_gen_clz_tl(cpu_T0, cpu_T0, cpu_T1);
tcg_gen_xori_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - 1);
} else {
tcg_gen_ctz_tl(cpu_T0, cpu_T0, cpu_regs[reg]);
}
}
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
/************************/
/* bcd */
case 0x27: /* daa */
if (CODE64(s))
goto illegal_op;
gen_update_cc_op(s);
gen_helper_daa(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x2f: /* das */
if (CODE64(s))
goto illegal_op;
gen_update_cc_op(s);
gen_helper_das(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x37: /* aaa */
if (CODE64(s))
goto illegal_op;
gen_update_cc_op(s);
gen_helper_aaa(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x3f: /* aas */
if (CODE64(s))
goto illegal_op;
gen_update_cc_op(s);
gen_helper_aas(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0xd4: /* aam */
if (CODE64(s))
goto illegal_op;
val = cpu_ldub_code(env, s->pc++);
if (val == 0) {
gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
} else {
gen_helper_aam(cpu_env, tcg_const_i32(val));
set_cc_op(s, CC_OP_LOGICB);
}
break;
case 0xd5: /* aad */
if (CODE64(s))
goto illegal_op;
val = cpu_ldub_code(env, s->pc++);
gen_helper_aad(cpu_env, tcg_const_i32(val));
set_cc_op(s, CC_OP_LOGICB);
break;
/************************/
/* misc */
case 0x90: /* nop */
/* XXX: correct lock test for all insn */
if (prefixes & PREFIX_LOCK) {
goto illegal_op;
}
/* If REX_B is set, then this is xchg eax, r8d, not a nop. */
if (REX_B(s)) {
goto do_xchg_reg_eax;
}
if (prefixes & PREFIX_REPZ) {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
s->is_jmp = DISAS_TB_JUMP;
}
break;
case 0x9b: /* fwait */
if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
(HF_MP_MASK | HF_TS_MASK)) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
} else {
gen_helper_fwait(cpu_env);
}
break;
case 0xcc: /* int3 */
gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
break;
case 0xcd: /* int N */
val = cpu_ldub_code(env, s->pc++);
if (s->vm86 && s->iopl != 3) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
}
break;
case 0xce: /* into */
if (CODE64(s))
goto illegal_op;
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
break;
#ifdef WANT_ICEBP
case 0xf1: /* icebp (undocumented, exits to external debugger) */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
#if 1
gen_debug(s, pc_start - s->cs_base);
#else
/* start debug */
tb_flush(CPU(x86_env_get_cpu(env)));
qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
#endif
break;
#endif
case 0xfa: /* cli */
if (!s->vm86) {
if (s->cpl <= s->iopl) {
gen_helper_cli(cpu_env);
} else {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
}
} else {
if (s->iopl == 3) {
gen_helper_cli(cpu_env);
} else {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
}
}
break;
case 0xfb: /* sti */
if (s->vm86 ? s->iopl == 3 : s->cpl <= s->iopl) {
gen_helper_sti(cpu_env);
/* interruptions are enabled only the first insn after sti */
gen_jmp_im(s->pc - s->cs_base);
gen_eob_inhibit_irq(s, true);
} else {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
}
break;
case 0x62: /* bound */
if (CODE64(s))
goto illegal_op;
ot = dflag;
modrm = cpu_ldub_code(env, s->pc++);
reg = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
gen_op_mov_v_reg(ot, cpu_T0, reg);
gen_lea_modrm(env, s, modrm);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
if (ot == MO_16) {
gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
} else {
gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
}
break;
case 0x1c8 ... 0x1cf: /* bswap reg */
reg = (b & 7) | REX_B(s);
#ifdef TARGET_X86_64
if (dflag == MO_64) {
gen_op_mov_v_reg(MO_64, cpu_T0, reg);
tcg_gen_bswap64_i64(cpu_T0, cpu_T0);
gen_op_mov_reg_v(MO_64, reg, cpu_T0);
} else
#endif
{
gen_op_mov_v_reg(MO_32, cpu_T0, reg);
tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
tcg_gen_bswap32_tl(cpu_T0, cpu_T0);
gen_op_mov_reg_v(MO_32, reg, cpu_T0);
}
break;
case 0xd6: /* salc */
if (CODE64(s))
goto illegal_op;
gen_compute_eflags_c(s, cpu_T0);
tcg_gen_neg_tl(cpu_T0, cpu_T0);
gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
break;
case 0xe0: /* loopnz */
case 0xe1: /* loopz */
case 0xe2: /* loop */
case 0xe3: /* jecxz */
{
TCGLabel *l1, *l2, *l3;
tval = (int8_t)insn_get(env, s, MO_8);
next_eip = s->pc - s->cs_base;
tval += next_eip;
if (dflag == MO_16) {
tval &= 0xffff;
}
l1 = gen_new_label();
l2 = gen_new_label();
l3 = gen_new_label();
b &= 3;
switch(b) {
case 0: /* loopnz */
case 1: /* loopz */
gen_op_add_reg_im(s->aflag, R_ECX, -1);
gen_op_jz_ecx(s->aflag, l3);
gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
break;
case 2: /* loop */
gen_op_add_reg_im(s->aflag, R_ECX, -1);
gen_op_jnz_ecx(s->aflag, l1);
break;
default:
case 3: /* jcxz */
gen_op_jz_ecx(s->aflag, l1);
break;
}
gen_set_label(l3);
gen_jmp_im(next_eip);
tcg_gen_br(l2);
gen_set_label(l1);
gen_jmp_im(tval);
gen_set_label(l2);
gen_eob(s);
}
break;
case 0x130: /* wrmsr */
case 0x132: /* rdmsr */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
if (b & 2) {
gen_helper_rdmsr(cpu_env);
} else {
gen_helper_wrmsr(cpu_env);
}
}
break;
case 0x131: /* rdtsc */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_rdtsc(cpu_env);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
case 0x133: /* rdpmc */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_rdpmc(cpu_env);
break;
case 0x134: /* sysenter */
/* For Intel SYSENTER is valid on 64-bit */
if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
goto illegal_op;
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_helper_sysenter(cpu_env);
gen_eob(s);
}
break;
case 0x135: /* sysexit */
/* For Intel SYSEXIT is valid on 64-bit */
if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
goto illegal_op;
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
gen_eob(s);
}
break;
#ifdef TARGET_X86_64
case 0x105: /* syscall */
/* XXX: is it usable in real mode ? */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
/* TF handling for the syscall insn is different. The TF bit is checked
after the syscall insn completes. This allows #DB to not be
generated after one has entered CPL0 if TF is set in FMASK. */
gen_eob_worker(s, false, true);
break;
case 0x107: /* sysret */
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
/* condition codes are modified only in long mode */
if (s->lma) {
set_cc_op(s, CC_OP_EFLAGS);
}
/* TF handling for the sysret insn is different. The TF bit is
checked after the sysret insn completes. This allows #DB to be
generated "as if" the syscall insn in userspace has just
completed. */
gen_eob_worker(s, false, true);
}
break;
#endif
case 0x1a2: /* cpuid */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_cpuid(cpu_env);
break;
case 0xf4: /* hlt */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
s->is_jmp = DISAS_TB_JUMP;
}
break;
case 0x100:
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
switch(op) {
case 0: /* sldt */
if (!s->pe || s->vm86)
goto illegal_op;
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
tcg_gen_ld32u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State, ldt.selector));
ot = mod == 3 ? dflag : MO_16;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 2: /* lldt */
if (!s->pe || s->vm86)
goto illegal_op;
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_lldt(cpu_env, cpu_tmp2_i32);
}
break;
case 1: /* str */
if (!s->pe || s->vm86)
goto illegal_op;
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
tcg_gen_ld32u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State, tr.selector));
ot = mod == 3 ? dflag : MO_16;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 3: /* ltr */
if (!s->pe || s->vm86)
goto illegal_op;
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_ltr(cpu_env, cpu_tmp2_i32);
}
break;
case 4: /* verr */
case 5: /* verw */
if (!s->pe || s->vm86)
goto illegal_op;
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
gen_update_cc_op(s);
if (op == 4) {
gen_helper_verr(cpu_env, cpu_T0);
} else {
gen_helper_verw(cpu_env, cpu_T0);
}
set_cc_op(s, CC_OP_EFLAGS);
break;
default:
goto unknown_op;
}
break;
case 0x101:
modrm = cpu_ldub_code(env, s->pc++);
switch (modrm) {
CASE_MODRM_MEM_OP(0): /* sgdt */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
gen_lea_modrm(env, s, modrm);
tcg_gen_ld32u_tl(cpu_T0,
cpu_env, offsetof(CPUX86State, gdt.limit));
gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
gen_add_A0_im(s, 2);
tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
if (dflag == MO_16) {
tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
}
gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
break;
case 0xc8: /* monitor */
if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]);
gen_extu(s->aflag, cpu_A0);
gen_add_A0_ds_seg(s);
gen_helper_monitor(cpu_env, cpu_A0);
break;
case 0xc9: /* mwait */
if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
gen_eob(s);
break;
case 0xca: /* clac */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
|| s->cpl != 0) {
goto illegal_op;
}
gen_helper_clac(cpu_env);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
case 0xcb: /* stac */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
|| s->cpl != 0) {
goto illegal_op;
}
gen_helper_stac(cpu_env);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
CASE_MODRM_MEM_OP(1): /* sidt */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
gen_lea_modrm(env, s, modrm);
tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.limit));
gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
gen_add_A0_im(s, 2);
tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
if (dflag == MO_16) {
tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
}
gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
break;
case 0xd0: /* xgetbv */
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
|| (s->prefix & (PREFIX_LOCK | PREFIX_DATA
| PREFIX_REPZ | PREFIX_REPNZ))) {
goto illegal_op;
}
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
gen_helper_xgetbv(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32);
tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64);
break;
case 0xd1: /* xsetbv */
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
|| (s->prefix & (PREFIX_LOCK | PREFIX_DATA
| PREFIX_REPZ | PREFIX_REPNZ))) {
goto illegal_op;
}
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
cpu_regs[R_EDX]);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
gen_helper_xsetbv(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64);
/* End TB because translation flags may change. */
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
case 0xd8: /* VMRUN */
if (!(s->flags & HF_SVME_MASK) || !s->pe) {
goto illegal_op;
}
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
tcg_const_i32(s->pc - pc_start));
tcg_gen_exit_tb(0);
s->is_jmp = DISAS_TB_JUMP;
break;
case 0xd9: /* VMMCALL */
if (!(s->flags & HF_SVME_MASK)) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_vmmcall(cpu_env);
break;
case 0xda: /* VMLOAD */
if (!(s->flags & HF_SVME_MASK) || !s->pe) {
goto illegal_op;
}
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
break;
case 0xdb: /* VMSAVE */
if (!(s->flags & HF_SVME_MASK) || !s->pe) {
goto illegal_op;
}
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
break;
case 0xdc: /* STGI */
if ((!(s->flags & HF_SVME_MASK)
&& !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
|| !s->pe) {
goto illegal_op;
}
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_stgi(cpu_env);
break;
case 0xdd: /* CLGI */
if (!(s->flags & HF_SVME_MASK) || !s->pe) {
goto illegal_op;
}
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_clgi(cpu_env);
break;
case 0xde: /* SKINIT */
if ((!(s->flags & HF_SVME_MASK)
&& !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
|| !s->pe) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_skinit(cpu_env);
break;
case 0xdf: /* INVLPGA */
if (!(s->flags & HF_SVME_MASK) || !s->pe) {
goto illegal_op;
}
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag - 1));
break;
CASE_MODRM_MEM_OP(2): /* lgdt */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE);
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
gen_add_A0_im(s, 2);
gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
if (dflag == MO_16) {
tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
}
tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, gdt.limit));
break;
CASE_MODRM_MEM_OP(3): /* lidt */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE);
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
gen_add_A0_im(s, 2);
gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
if (dflag == MO_16) {
tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
}
tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, idt.limit));
break;
CASE_MODRM_OP(4): /* smsw */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, cr[0]));
if (CODE64(s)) {
mod = (modrm >> 6) & 3;
ot = (mod != 3 ? MO_16 : s->dflag);
} else {
ot = MO_16;
}
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 0xee: /* rdpkru */
if (prefixes & PREFIX_LOCK) {
goto illegal_op;
}
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
gen_helper_rdpkru(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32);
tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64);
break;
case 0xef: /* wrpkru */
if (prefixes & PREFIX_LOCK) {
goto illegal_op;
}
tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
cpu_regs[R_EDX]);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
gen_helper_wrpkru(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64);
break;
CASE_MODRM_OP(6): /* lmsw */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
gen_helper_lmsw(cpu_env, cpu_T0);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
CASE_MODRM_MEM_OP(7): /* invlpg */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_lea_modrm(env, s, modrm);
gen_helper_invlpg(cpu_env, cpu_A0);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
case 0xf8: /* swapgs */
#ifdef TARGET_X86_64
if (CODE64(s)) {
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
tcg_gen_mov_tl(cpu_T0, cpu_seg_base[R_GS]);
tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env,
offsetof(CPUX86State, kernelgsbase));
tcg_gen_st_tl(cpu_T0, cpu_env,
offsetof(CPUX86State, kernelgsbase));
}
break;
}
#endif
goto illegal_op;
case 0xf9: /* rdtscp */
if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_rdtscp(cpu_env);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
default:
goto unknown_op;
}
break;
case 0x108: /* invd */
case 0x109: /* wbinvd */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
/* nothing to do */
}
break;
case 0x63: /* arpl or movslS (x86_64) */
#ifdef TARGET_X86_64
if (CODE64(s)) {
int d_ot;
/* d_ot is the size of destination */
d_ot = dflag;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
if (mod == 3) {
gen_op_mov_v_reg(MO_32, cpu_T0, rm);
/* sign extend */
if (d_ot == MO_64) {
tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
}
gen_op_mov_reg_v(d_ot, reg, cpu_T0);
} else {
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T0, cpu_A0);
gen_op_mov_reg_v(d_ot, reg, cpu_T0);
}
} else
#endif
{
TCGLabel *label1;
TCGv t0, t1, t2, a0;
if (!s->pe || s->vm86)
goto illegal_op;
t0 = tcg_temp_local_new();
t1 = tcg_temp_local_new();
t2 = tcg_temp_local_new();
ot = MO_16;
modrm = cpu_ldub_code(env, s->pc++);
reg = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
rm = modrm & 7;
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, ot, t0, cpu_A0);
a0 = tcg_temp_local_new();
tcg_gen_mov_tl(a0, cpu_A0);
} else {
gen_op_mov_v_reg(ot, t0, rm);
TCGV_UNUSED(a0);
}
gen_op_mov_v_reg(ot, t1, reg);
tcg_gen_andi_tl(cpu_tmp0, t0, 3);
tcg_gen_andi_tl(t1, t1, 3);
tcg_gen_movi_tl(t2, 0);
label1 = gen_new_label();
tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
tcg_gen_andi_tl(t0, t0, ~3);
tcg_gen_or_tl(t0, t0, t1);
tcg_gen_movi_tl(t2, CC_Z);
gen_set_label(label1);
if (mod != 3) {
gen_op_st_v(s, ot, t0, a0);
tcg_temp_free(a0);
} else {
gen_op_mov_reg_v(ot, rm, t0);
}
gen_compute_eflags(s);
tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
tcg_temp_free(t0);
tcg_temp_free(t1);
tcg_temp_free(t2);
}
break;
case 0x102: /* lar */
case 0x103: /* lsl */
{
TCGLabel *label1;
TCGv t0;
if (!s->pe || s->vm86)
goto illegal_op;
ot = dflag != MO_16 ? MO_32 : MO_16;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
t0 = tcg_temp_local_new();
gen_update_cc_op(s);
if (b == 0x102) {
gen_helper_lar(t0, cpu_env, cpu_T0);
} else {
gen_helper_lsl(t0, cpu_env, cpu_T0);
}
tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
label1 = gen_new_label();
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
gen_op_mov_reg_v(ot, reg, t0);
gen_set_label(label1);
set_cc_op(s, CC_OP_EFLAGS);
tcg_temp_free(t0);
}
break;
case 0x118:
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
switch(op) {
case 0: /* prefetchnta */
case 1: /* prefetchnt0 */
case 2: /* prefetchnt0 */
case 3: /* prefetchnt0 */
if (mod == 3)
goto illegal_op;
gen_nop_modrm(env, s, modrm);
/* nothing more to do */
break;
default: /* nop (multi byte) */
gen_nop_modrm(env, s, modrm);
break;
}
break;
case 0x11a:
modrm = cpu_ldub_code(env, s->pc++);
if (s->flags & HF_MPX_EN_MASK) {
mod = (modrm >> 6) & 3;
reg = ((modrm >> 3) & 7) | rex_r;
if (prefixes & PREFIX_REPZ) {
/* bndcl */
if (reg >= 4
|| (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16) {
goto illegal_op;
}
gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
} else if (prefixes & PREFIX_REPNZ) {
/* bndcu */
if (reg >= 4
|| (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16) {
goto illegal_op;
}
TCGv_i64 notu = tcg_temp_new_i64();
tcg_gen_not_i64(notu, cpu_bndu[reg]);
gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
tcg_temp_free_i64(notu);
} else if (prefixes & PREFIX_DATA) {
/* bndmov -- from reg/mem */
if (reg >= 4 || s->aflag == MO_16) {
goto illegal_op;
}
if (mod == 3) {
int reg2 = (modrm & 7) | REX_B(s);
if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
if (s->flags & HF_MPX_IU_MASK) {
tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
}
} else {
gen_lea_modrm(env, s, modrm);
if (CODE64(s)) {
tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
s->mem_index, MO_LEQ);
tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
s->mem_index, MO_LEQ);
} else {
tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
s->mem_index, MO_LEUL);
tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
s->mem_index, MO_LEUL);
}
/* bnd registers are now in-use */
gen_set_hflag(s, HF_MPX_IU_MASK);
}
} else if (mod != 3) {
/* bndldx */
AddressParts a = gen_lea_modrm_0(env, s, modrm);
if (reg >= 4
|| (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16
|| a.base < -1) {
goto illegal_op;
}
if (a.base >= 0) {
tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
} else {
tcg_gen_movi_tl(cpu_A0, 0);
}
gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
if (a.index >= 0) {
tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
} else {
tcg_gen_movi_tl(cpu_T0, 0);
}
if (CODE64(s)) {
gen_helper_bndldx64(cpu_bndl[reg], cpu_env, cpu_A0, cpu_T0);
tcg_gen_ld_i64(cpu_bndu[reg], cpu_env,
offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
} else {
gen_helper_bndldx32(cpu_bndu[reg], cpu_env, cpu_A0, cpu_T0);
tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
}
gen_set_hflag(s, HF_MPX_IU_MASK);
}
}
gen_nop_modrm(env, s, modrm);
break;
case 0x11b:
modrm = cpu_ldub_code(env, s->pc++);
if (s->flags & HF_MPX_EN_MASK) {
mod = (modrm >> 6) & 3;
reg = ((modrm >> 3) & 7) | rex_r;
if (mod != 3 && (prefixes & PREFIX_REPZ)) {
/* bndmk */
if (reg >= 4
|| (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16) {
goto illegal_op;
}
AddressParts a = gen_lea_modrm_0(env, s, modrm);
if (a.base >= 0) {
tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
if (!CODE64(s)) {
tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
}
} else if (a.base == -1) {
/* no base register has lower bound of 0 */
tcg_gen_movi_i64(cpu_bndl[reg], 0);
} else {
/* rip-relative generates #ud */
goto illegal_op;
}
tcg_gen_not_tl(cpu_A0, gen_lea_modrm_1(a));
if (!CODE64(s)) {
tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
}
tcg_gen_extu_tl_i64(cpu_bndu[reg], cpu_A0);
/* bnd registers are now in-use */
gen_set_hflag(s, HF_MPX_IU_MASK);
break;
} else if (prefixes & PREFIX_REPNZ) {
/* bndcn */
if (reg >= 4
|| (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16) {
goto illegal_op;
}
gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
} else if (prefixes & PREFIX_DATA) {
/* bndmov -- to reg/mem */
if (reg >= 4 || s->aflag == MO_16) {
goto illegal_op;
}
if (mod == 3) {
int reg2 = (modrm & 7) | REX_B(s);
if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
if (s->flags & HF_MPX_IU_MASK) {
tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
}
} else {
gen_lea_modrm(env, s, modrm);
if (CODE64(s)) {
tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
s->mem_index, MO_LEQ);
tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
s->mem_index, MO_LEQ);
} else {
tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
s->mem_index, MO_LEUL);
tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
s->mem_index, MO_LEUL);
}
}
} else if (mod != 3) {
/* bndstx */
AddressParts a = gen_lea_modrm_0(env, s, modrm);
if (reg >= 4
|| (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16
|| a.base < -1) {
goto illegal_op;
}
if (a.base >= 0) {
tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
} else {
tcg_gen_movi_tl(cpu_A0, 0);
}
gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
if (a.index >= 0) {
tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
} else {
tcg_gen_movi_tl(cpu_T0, 0);
}
if (CODE64(s)) {
gen_helper_bndstx64(cpu_env, cpu_A0, cpu_T0,
cpu_bndl[reg], cpu_bndu[reg]);
} else {
gen_helper_bndstx32(cpu_env, cpu_A0, cpu_T0,
cpu_bndl[reg], cpu_bndu[reg]);
}
}
}
gen_nop_modrm(env, s, modrm);
break;
case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
modrm = cpu_ldub_code(env, s->pc++);
gen_nop_modrm(env, s, modrm);
break;
case 0x120: /* mov reg, crN */
case 0x122: /* mov crN, reg */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
modrm = cpu_ldub_code(env, s->pc++);
/* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
* AMD documentation (24594.pdf) and testing of
* intel 386 and 486 processors all show that the mod bits
* are assumed to be 1's, regardless of actual values.
*/
rm = (modrm & 7) | REX_B(s);
reg = ((modrm >> 3) & 7) | rex_r;
if (CODE64(s))
ot = MO_64;
else
ot = MO_32;
if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
(s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
reg = 8;
}
switch(reg) {
case 0:
case 2:
case 3:
case 4:
case 8:
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
if (b & 2) {
gen_op_mov_v_reg(ot, cpu_T0, rm);
gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
cpu_T0);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
} else {
gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg));
gen_op_mov_reg_v(ot, rm, cpu_T0);
}
break;
default:
goto unknown_op;
}
}
break;
case 0x121: /* mov reg, drN */
case 0x123: /* mov drN, reg */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
modrm = cpu_ldub_code(env, s->pc++);
/* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
* AMD documentation (24594.pdf) and testing of
* intel 386 and 486 processors all show that the mod bits
* are assumed to be 1's, regardless of actual values.
*/
rm = (modrm & 7) | REX_B(s);
reg = ((modrm >> 3) & 7) | rex_r;
if (CODE64(s))
ot = MO_64;
else
ot = MO_32;
if (reg >= 8) {
goto illegal_op;
}
if (b & 2) {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
gen_op_mov_v_reg(ot, cpu_T0, rm);
tcg_gen_movi_i32(cpu_tmp2_i32, reg);
gen_helper_set_dr(cpu_env, cpu_tmp2_i32, cpu_T0);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
tcg_gen_movi_i32(cpu_tmp2_i32, reg);
gen_helper_get_dr(cpu_T0, cpu_env, cpu_tmp2_i32);
gen_op_mov_reg_v(ot, rm, cpu_T0);
}
}
break;
case 0x106: /* clts */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
gen_helper_clts(cpu_env);
/* abort block because static cpu state changed */
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
}
break;
/* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
case 0x1c3: /* MOVNTI reg, mem */
if (!(s->cpuid_features & CPUID_SSE2))
goto illegal_op;
ot = mo_64_32(dflag);
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
reg = ((modrm >> 3) & 7) | rex_r;
/* generate a generic store */
gen_ldst_modrm(env, s, modrm, ot, reg, 1);
break;
case 0x1ae:
modrm = cpu_ldub_code(env, s->pc++);
switch (modrm) {
CASE_MODRM_MEM_OP(0): /* fxsave */
if (!(s->cpuid_features & CPUID_FXSR)
|| (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
gen_lea_modrm(env, s, modrm);
gen_helper_fxsave(cpu_env, cpu_A0);
break;
CASE_MODRM_MEM_OP(1): /* fxrstor */
if (!(s->cpuid_features & CPUID_FXSR)
|| (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
gen_lea_modrm(env, s, modrm);
gen_helper_fxrstor(cpu_env, cpu_A0);
break;
CASE_MODRM_MEM_OP(2): /* ldmxcsr */
if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
goto illegal_op;
}
if (s->flags & HF_TS_MASK) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
gen_lea_modrm(env, s, modrm);
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL);
gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
break;
CASE_MODRM_MEM_OP(3): /* stmxcsr */
if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
goto illegal_op;
}
if (s->flags & HF_TS_MASK) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
gen_lea_modrm(env, s, modrm);
tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, mxcsr));
gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
break;
CASE_MODRM_MEM_OP(4): /* xsave */
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
|| (prefixes & (PREFIX_LOCK | PREFIX_DATA
| PREFIX_REPZ | PREFIX_REPNZ))) {
goto illegal_op;
}
gen_lea_modrm(env, s, modrm);
tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
cpu_regs[R_EDX]);
gen_helper_xsave(cpu_env, cpu_A0, cpu_tmp1_i64);
break;
CASE_MODRM_MEM_OP(5): /* xrstor */
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
|| (prefixes & (PREFIX_LOCK | PREFIX_DATA
| PREFIX_REPZ | PREFIX_REPNZ))) {
goto illegal_op;
}
gen_lea_modrm(env, s, modrm);
tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
cpu_regs[R_EDX]);
gen_helper_xrstor(cpu_env, cpu_A0, cpu_tmp1_i64);
/* XRSTOR is how MPX is enabled, which changes how
we translate. Thus we need to end the TB. */
gen_update_cc_op(s);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
if (prefixes & PREFIX_LOCK) {
goto illegal_op;
}
if (prefixes & PREFIX_DATA) {
/* clwb */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
goto illegal_op;
}
gen_nop_modrm(env, s, modrm);
} else {
/* xsaveopt */
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
|| (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
|| (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
goto illegal_op;
}
gen_lea_modrm(env, s, modrm);
tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
cpu_regs[R_EDX]);
gen_helper_xsaveopt(cpu_env, cpu_A0, cpu_tmp1_i64);
}
break;
CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
if (prefixes & PREFIX_LOCK) {
goto illegal_op;
}
if (prefixes & PREFIX_DATA) {
/* clflushopt */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
goto illegal_op;
}
} else {
/* clflush */
if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
|| !(s->cpuid_features & CPUID_CLFLUSH)) {
goto illegal_op;
}
}
gen_nop_modrm(env, s, modrm);
break;
case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
case 0xc8 ... 0xc8: /* rdgsbase (f3 0f ae /1) */
case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
case 0xd8 ... 0xd8: /* wrgsbase (f3 0f ae /3) */
if (CODE64(s)
&& (prefixes & PREFIX_REPZ)
&& !(prefixes & PREFIX_LOCK)
&& (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
TCGv base, treg, src, dst;
/* Preserve hflags bits by testing CR4 at runtime. */
tcg_gen_movi_i32(cpu_tmp2_i32, CR4_FSGSBASE_MASK);
gen_helper_cr4_testbit(cpu_env, cpu_tmp2_i32);
base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
treg = cpu_regs[(modrm & 7) | REX_B(s)];
if (modrm & 0x10) {
/* wr*base */
dst = base, src = treg;
} else {
/* rd*base */
dst = treg, src = base;
}
if (s->dflag == MO_32) {
tcg_gen_ext32u_tl(dst, src);
} else {
tcg_gen_mov_tl(dst, src);
}
break;
}
goto unknown_op;
case 0xf8: /* sfence / pcommit */
if (prefixes & PREFIX_DATA) {
/* pcommit */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
|| (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
break;
}
/* fallthru */
case 0xf9 ... 0xff: /* sfence */
if (!(s->cpuid_features & CPUID_SSE)
|| (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
break;
case 0xe8 ... 0xef: /* lfence */
if (!(s->cpuid_features & CPUID_SSE)
|| (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
break;
case 0xf0 ... 0xf7: /* mfence */
if (!(s->cpuid_features & CPUID_SSE2)
|| (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
break;
default:
goto unknown_op;
}
break;
case 0x10d: /* 3DNow! prefetch(w) */
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
gen_nop_modrm(env, s, modrm);
break;
case 0x1aa: /* rsm */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
if (!(s->flags & HF_SMM_MASK))
goto illegal_op;
gen_update_cc_op(s);
gen_jmp_im(s->pc - s->cs_base);
gen_helper_rsm(cpu_env);
gen_eob(s);
break;
case 0x1b8: /* SSE4.2 popcnt */
if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
PREFIX_REPZ)
goto illegal_op;
if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
goto illegal_op;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
if (s->prefix & PREFIX_DATA) {
ot = MO_16;
} else {
ot = mo_64_32(dflag);
}
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_extu(ot, cpu_T0);
tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
tcg_gen_ctpop_tl(cpu_T0, cpu_T0);
gen_op_mov_reg_v(ot, reg, cpu_T0);
set_cc_op(s, CC_OP_POPCNT);
break;
case 0x10e ... 0x10f:
/* 3DNow! instructions, ignore prefixes */
s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
case 0x110 ... 0x117:
case 0x128 ... 0x12f:
case 0x138 ... 0x13a:
case 0x150 ... 0x179:
case 0x17c ... 0x17f:
case 0x1c2:
case 0x1c4 ... 0x1c6:
case 0x1d0 ... 0x1fe:
gen_sse(env, s, b, pc_start, rex_r);
break;
default:
goto unknown_op;
}
return s->pc;
illegal_op:
gen_illegal_opcode(s);
return s->pc;
unknown_op:
gen_unknown_opcode(env, s);
return s->pc;
}
Commit Message: tcg/i386: Check the size of instruction being translated
This fixes the bug: 'user-to-root privesc inside VM via bad translation
caching' reported by Jann Horn here:
https://bugs.chromium.org/p/project-zero/issues/detail?id=1122
Reviewed-by: Richard Henderson <[email protected]>
CC: Peter Maydell <[email protected]>
CC: Paolo Bonzini <[email protected]>
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Pranith Kumar <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
CWE ID: CWE-94 | static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
target_ulong pc_start)
{
int b, prefixes;
int shift;
TCGMemOp ot, aflag, dflag;
int modrm, reg, rm, mod, op, opreg, val;
target_ulong next_eip, tval;
int rex_w, rex_r;
s->pc_start = s->pc = pc_start;
prefixes = 0;
s->override = -1;
rex_w = -1;
rex_r = 0;
#ifdef TARGET_X86_64
s->rex_x = 0;
s->rex_b = 0;
x86_64_hregs = 0;
#endif
s->rip_offset = 0; /* for relative ip address */
s->vex_l = 0;
s->vex_v = 0;
next_byte:
/* x86 has an upper limit of 15 bytes for an instruction. Since we
* do not want to decode and generate IR for an illegal
* instruction, the following check limits the instruction size to
* 25 bytes: 14 prefix + 1 opc + 6 (modrm+sib+ofs) + 4 imm */
if (s->pc - pc_start > 14) {
goto illegal_op;
}
b = cpu_ldub_code(env, s->pc);
s->pc++;
/* Collect prefixes. */
switch (b) {
case 0xf3:
prefixes |= PREFIX_REPZ;
goto next_byte;
case 0xf2:
prefixes |= PREFIX_REPNZ;
goto next_byte;
case 0xf0:
prefixes |= PREFIX_LOCK;
goto next_byte;
case 0x2e:
s->override = R_CS;
goto next_byte;
case 0x36:
s->override = R_SS;
goto next_byte;
case 0x3e:
s->override = R_DS;
goto next_byte;
case 0x26:
s->override = R_ES;
goto next_byte;
case 0x64:
s->override = R_FS;
goto next_byte;
case 0x65:
s->override = R_GS;
goto next_byte;
case 0x66:
prefixes |= PREFIX_DATA;
goto next_byte;
case 0x67:
prefixes |= PREFIX_ADR;
goto next_byte;
#ifdef TARGET_X86_64
case 0x40 ... 0x4f:
if (CODE64(s)) {
/* REX prefix */
rex_w = (b >> 3) & 1;
rex_r = (b & 0x4) << 1;
s->rex_x = (b & 0x2) << 2;
REX_B(s) = (b & 0x1) << 3;
x86_64_hregs = 1; /* select uniform byte register addressing */
goto next_byte;
}
break;
#endif
case 0xc5: /* 2-byte VEX */
case 0xc4: /* 3-byte VEX */
/* VEX prefixes cannot be used except in 32-bit mode.
Otherwise the instruction is LES or LDS. */
if (s->code32 && !s->vm86) {
static const int pp_prefix[4] = {
0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
};
int vex3, vex2 = cpu_ldub_code(env, s->pc);
if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
/* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
otherwise the instruction is LES or LDS. */
break;
}
s->pc++;
/* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
| PREFIX_LOCK | PREFIX_DATA)) {
goto illegal_op;
}
#ifdef TARGET_X86_64
if (x86_64_hregs) {
goto illegal_op;
}
#endif
rex_r = (~vex2 >> 4) & 8;
if (b == 0xc5) {
vex3 = vex2;
b = cpu_ldub_code(env, s->pc++);
} else {
#ifdef TARGET_X86_64
s->rex_x = (~vex2 >> 3) & 8;
s->rex_b = (~vex2 >> 2) & 8;
#endif
vex3 = cpu_ldub_code(env, s->pc++);
rex_w = (vex3 >> 7) & 1;
switch (vex2 & 0x1f) {
case 0x01: /* Implied 0f leading opcode bytes. */
b = cpu_ldub_code(env, s->pc++) | 0x100;
break;
case 0x02: /* Implied 0f 38 leading opcode bytes. */
b = 0x138;
break;
case 0x03: /* Implied 0f 3a leading opcode bytes. */
b = 0x13a;
break;
default: /* Reserved for future use. */
goto unknown_op;
}
}
s->vex_v = (~vex3 >> 3) & 0xf;
s->vex_l = (vex3 >> 2) & 1;
prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
}
break;
}
/* Post-process prefixes. */
if (CODE64(s)) {
/* In 64-bit mode, the default data size is 32-bit. Select 64-bit
data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
over 0x66 if both are present. */
dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
/* In 64-bit mode, 0x67 selects 32-bit addressing. */
aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
} else {
/* In 16/32-bit mode, 0x66 selects the opposite data size. */
if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) {
dflag = MO_32;
} else {
dflag = MO_16;
}
/* In 16/32-bit mode, 0x67 selects the opposite addressing. */
if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) {
aflag = MO_32;
} else {
aflag = MO_16;
}
}
s->prefix = prefixes;
s->aflag = aflag;
s->dflag = dflag;
/* now check op code */
reswitch:
switch(b) {
case 0x0f:
/**************************/
/* extended op code */
b = cpu_ldub_code(env, s->pc++) | 0x100;
goto reswitch;
/**************************/
/* arith & logic */
case 0x00 ... 0x05:
case 0x08 ... 0x0d:
case 0x10 ... 0x15:
case 0x18 ... 0x1d:
case 0x20 ... 0x25:
case 0x28 ... 0x2d:
case 0x30 ... 0x35:
case 0x38 ... 0x3d:
{
int op, f, val;
op = (b >> 3) & 7;
f = (b >> 1) & 3;
ot = mo_b_d(b, dflag);
switch(f) {
case 0: /* OP Ev, Gv */
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
opreg = OR_TMP0;
} else if (op == OP_XORL && rm == reg) {
xor_zero:
/* xor reg, reg optimisation */
set_cc_op(s, CC_OP_CLR);
tcg_gen_movi_tl(cpu_T0, 0);
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
} else {
opreg = rm;
}
gen_op_mov_v_reg(ot, cpu_T1, reg);
gen_op(s, op, ot, opreg);
break;
case 1: /* OP Gv, Ev */
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
reg = ((modrm >> 3) & 7) | rex_r;
rm = (modrm & 7) | REX_B(s);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
} else if (op == OP_XORL && rm == reg) {
goto xor_zero;
} else {
gen_op_mov_v_reg(ot, cpu_T1, rm);
}
gen_op(s, op, ot, reg);
break;
case 2: /* OP A, Iv */
val = insn_get(env, s, ot);
tcg_gen_movi_tl(cpu_T1, val);
gen_op(s, op, ot, OR_EAX);
break;
}
}
break;
case 0x82:
if (CODE64(s))
goto illegal_op;
case 0x80: /* GRP1 */
case 0x81:
case 0x83:
{
int val;
ot = mo_b_d(b, dflag);
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
op = (modrm >> 3) & 7;
if (mod != 3) {
if (b == 0x83)
s->rip_offset = 1;
else
s->rip_offset = insn_const_size(ot);
gen_lea_modrm(env, s, modrm);
opreg = OR_TMP0;
} else {
opreg = rm;
}
switch(b) {
default:
case 0x80:
case 0x81:
case 0x82:
val = insn_get(env, s, ot);
break;
case 0x83:
val = (int8_t)insn_get(env, s, MO_8);
break;
}
tcg_gen_movi_tl(cpu_T1, val);
gen_op(s, op, ot, opreg);
}
break;
/**************************/
/* inc, dec, and other misc arith */
case 0x40 ... 0x47: /* inc Gv */
ot = dflag;
gen_inc(s, ot, OR_EAX + (b & 7), 1);
break;
case 0x48 ... 0x4f: /* dec Gv */
ot = dflag;
gen_inc(s, ot, OR_EAX + (b & 7), -1);
break;
case 0xf6: /* GRP3 */
case 0xf7:
ot = mo_b_d(b, dflag);
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
op = (modrm >> 3) & 7;
if (mod != 3) {
if (op == 0) {
s->rip_offset = insn_const_size(ot);
}
gen_lea_modrm(env, s, modrm);
/* For those below that handle locked memory, don't load here. */
if (!(s->prefix & PREFIX_LOCK)
|| op != 2) {
gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
}
} else {
gen_op_mov_v_reg(ot, cpu_T0, rm);
}
switch(op) {
case 0: /* test */
val = insn_get(env, s, ot);
tcg_gen_movi_tl(cpu_T1, val);
gen_op_testl_T0_T1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
break;
case 2: /* not */
if (s->prefix & PREFIX_LOCK) {
if (mod == 3) {
goto illegal_op;
}
tcg_gen_movi_tl(cpu_T0, ~0);
tcg_gen_atomic_xor_fetch_tl(cpu_T0, cpu_A0, cpu_T0,
s->mem_index, ot | MO_LE);
} else {
tcg_gen_not_tl(cpu_T0, cpu_T0);
if (mod != 3) {
gen_op_st_v(s, ot, cpu_T0, cpu_A0);
} else {
gen_op_mov_reg_v(ot, rm, cpu_T0);
}
}
break;
case 3: /* neg */
if (s->prefix & PREFIX_LOCK) {
TCGLabel *label1;
TCGv a0, t0, t1, t2;
if (mod == 3) {
goto illegal_op;
}
a0 = tcg_temp_local_new();
t0 = tcg_temp_local_new();
label1 = gen_new_label();
tcg_gen_mov_tl(a0, cpu_A0);
tcg_gen_mov_tl(t0, cpu_T0);
gen_set_label(label1);
t1 = tcg_temp_new();
t2 = tcg_temp_new();
tcg_gen_mov_tl(t2, t0);
tcg_gen_neg_tl(t1, t0);
tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1,
s->mem_index, ot | MO_LE);
tcg_temp_free(t1);
tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1);
tcg_temp_free(t2);
tcg_temp_free(a0);
tcg_gen_mov_tl(cpu_T0, t0);
tcg_temp_free(t0);
} else {
tcg_gen_neg_tl(cpu_T0, cpu_T0);
if (mod != 3) {
gen_op_st_v(s, ot, cpu_T0, cpu_A0);
} else {
gen_op_mov_reg_v(ot, rm, cpu_T0);
}
}
gen_op_update_neg_cc();
set_cc_op(s, CC_OP_SUBB + ot);
break;
case 4: /* mul */
switch(ot) {
case MO_8:
gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
tcg_gen_ext8u_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
tcg_gen_andi_tl(cpu_cc_src, cpu_T0, 0xff00);
set_cc_op(s, CC_OP_MULB);
break;
case MO_16:
gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
tcg_gen_ext16u_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
set_cc_op(s, CC_OP_MULW);
break;
default:
case MO_32:
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
set_cc_op(s, CC_OP_MULL);
break;
#ifdef TARGET_X86_64
case MO_64:
tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
cpu_T0, cpu_regs[R_EAX]);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
set_cc_op(s, CC_OP_MULQ);
break;
#endif
}
break;
case 5: /* imul */
switch(ot) {
case MO_8:
gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
tcg_gen_ext8s_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
tcg_gen_ext8s_tl(cpu_tmp0, cpu_T0);
tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
set_cc_op(s, CC_OP_MULB);
break;
case MO_16:
gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
set_cc_op(s, CC_OP_MULW);
break;
default:
case MO_32:
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
set_cc_op(s, CC_OP_MULL);
break;
#ifdef TARGET_X86_64
case MO_64:
tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
cpu_T0, cpu_regs[R_EAX]);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
set_cc_op(s, CC_OP_MULQ);
break;
#endif
}
break;
case 6: /* div */
switch(ot) {
case MO_8:
gen_helper_divb_AL(cpu_env, cpu_T0);
break;
case MO_16:
gen_helper_divw_AX(cpu_env, cpu_T0);
break;
default:
case MO_32:
gen_helper_divl_EAX(cpu_env, cpu_T0);
break;
#ifdef TARGET_X86_64
case MO_64:
gen_helper_divq_EAX(cpu_env, cpu_T0);
break;
#endif
}
break;
case 7: /* idiv */
switch(ot) {
case MO_8:
gen_helper_idivb_AL(cpu_env, cpu_T0);
break;
case MO_16:
gen_helper_idivw_AX(cpu_env, cpu_T0);
break;
default:
case MO_32:
gen_helper_idivl_EAX(cpu_env, cpu_T0);
break;
#ifdef TARGET_X86_64
case MO_64:
gen_helper_idivq_EAX(cpu_env, cpu_T0);
break;
#endif
}
break;
default:
goto unknown_op;
}
break;
case 0xfe: /* GRP4 */
case 0xff: /* GRP5 */
ot = mo_b_d(b, dflag);
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
op = (modrm >> 3) & 7;
if (op >= 2 && b == 0xfe) {
goto unknown_op;
}
if (CODE64(s)) {
if (op == 2 || op == 4) {
/* operand size for jumps is 64 bit */
ot = MO_64;
} else if (op == 3 || op == 5) {
ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16;
} else if (op == 6) {
/* default push size is 64 bit */
ot = mo_pushpop(s, dflag);
}
}
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
if (op >= 2 && op != 3 && op != 5)
gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
} else {
gen_op_mov_v_reg(ot, cpu_T0, rm);
}
switch(op) {
case 0: /* inc Ev */
if (mod != 3)
opreg = OR_TMP0;
else
opreg = rm;
gen_inc(s, ot, opreg, 1);
break;
case 1: /* dec Ev */
if (mod != 3)
opreg = OR_TMP0;
else
opreg = rm;
gen_inc(s, ot, opreg, -1);
break;
case 2: /* call Ev */
/* XXX: optimize if memory (no 'and' is necessary) */
if (dflag == MO_16) {
tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
}
next_eip = s->pc - s->cs_base;
tcg_gen_movi_tl(cpu_T1, next_eip);
gen_push_v(s, cpu_T1);
gen_op_jmp_v(cpu_T0);
gen_bnd_jmp(s);
gen_eob(s);
break;
case 3: /* lcall Ev */
gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
gen_add_A0_im(s, 1 << ot);
gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
do_lcall:
if (s->pe && !s->vm86) {
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
tcg_const_i32(dflag - 1),
tcg_const_tl(s->pc - s->cs_base));
} else {
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T1,
tcg_const_i32(dflag - 1),
tcg_const_i32(s->pc - s->cs_base));
}
gen_eob(s);
break;
case 4: /* jmp Ev */
if (dflag == MO_16) {
tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
}
gen_op_jmp_v(cpu_T0);
gen_bnd_jmp(s);
gen_eob(s);
break;
case 5: /* ljmp Ev */
gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
gen_add_A0_im(s, 1 << ot);
gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
do_ljmp:
if (s->pe && !s->vm86) {
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
tcg_const_tl(s->pc - s->cs_base));
} else {
gen_op_movl_seg_T0_vm(R_CS);
gen_op_jmp_v(cpu_T1);
}
gen_eob(s);
break;
case 6: /* push Ev */
gen_push_v(s, cpu_T0);
break;
default:
goto unknown_op;
}
break;
case 0x84: /* test Ev, Gv */
case 0x85:
ot = mo_b_d(b, dflag);
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_op_mov_v_reg(ot, cpu_T1, reg);
gen_op_testl_T0_T1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
break;
case 0xa8: /* test eAX, Iv */
case 0xa9:
ot = mo_b_d(b, dflag);
val = insn_get(env, s, ot);
gen_op_mov_v_reg(ot, cpu_T0, OR_EAX);
tcg_gen_movi_tl(cpu_T1, val);
gen_op_testl_T0_T1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
break;
case 0x98: /* CWDE/CBW */
switch (dflag) {
#ifdef TARGET_X86_64
case MO_64:
gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
gen_op_mov_reg_v(MO_64, R_EAX, cpu_T0);
break;
#endif
case MO_32:
gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
gen_op_mov_reg_v(MO_32, R_EAX, cpu_T0);
break;
case MO_16:
gen_op_mov_v_reg(MO_8, cpu_T0, R_EAX);
tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
break;
default:
tcg_abort();
}
break;
case 0x99: /* CDQ/CWD */
switch (dflag) {
#ifdef TARGET_X86_64
case MO_64:
gen_op_mov_v_reg(MO_64, cpu_T0, R_EAX);
tcg_gen_sari_tl(cpu_T0, cpu_T0, 63);
gen_op_mov_reg_v(MO_64, R_EDX, cpu_T0);
break;
#endif
case MO_32:
gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
tcg_gen_sari_tl(cpu_T0, cpu_T0, 31);
gen_op_mov_reg_v(MO_32, R_EDX, cpu_T0);
break;
case MO_16:
gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
tcg_gen_sari_tl(cpu_T0, cpu_T0, 15);
gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
break;
default:
tcg_abort();
}
break;
case 0x1af: /* imul Gv, Ev */
case 0x69: /* imul Gv, Ev, I */
case 0x6b:
ot = dflag;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
if (b == 0x69)
s->rip_offset = insn_const_size(ot);
else if (b == 0x6b)
s->rip_offset = 1;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
if (b == 0x69) {
val = insn_get(env, s, ot);
tcg_gen_movi_tl(cpu_T1, val);
} else if (b == 0x6b) {
val = (int8_t)insn_get(env, s, MO_8);
tcg_gen_movi_tl(cpu_T1, val);
} else {
gen_op_mov_v_reg(ot, cpu_T1, reg);
}
switch (ot) {
#ifdef TARGET_X86_64
case MO_64:
tcg_gen_muls2_i64(cpu_regs[reg], cpu_T1, cpu_T0, cpu_T1);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T1);
break;
#endif
case MO_32:
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
break;
default:
tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
}
set_cc_op(s, CC_OP_MULB + ot);
break;
case 0x1c0:
case 0x1c1: /* xadd Ev, Gv */
ot = mo_b_d(b, dflag);
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
gen_op_mov_v_reg(ot, cpu_T0, reg);
if (mod == 3) {
rm = (modrm & 7) | REX_B(s);
gen_op_mov_v_reg(ot, cpu_T1, rm);
tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_mov_reg_v(ot, reg, cpu_T1);
gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
gen_lea_modrm(env, s, modrm);
if (s->prefix & PREFIX_LOCK) {
tcg_gen_atomic_fetch_add_tl(cpu_T1, cpu_A0, cpu_T0,
s->mem_index, ot | MO_LE);
tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
} else {
gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_st_v(s, ot, cpu_T0, cpu_A0);
}
gen_op_mov_reg_v(ot, reg, cpu_T1);
}
gen_op_update2_cc();
set_cc_op(s, CC_OP_ADDB + ot);
break;
case 0x1b0:
case 0x1b1: /* cmpxchg Ev, Gv */
{
TCGv oldv, newv, cmpv;
ot = mo_b_d(b, dflag);
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
oldv = tcg_temp_new();
newv = tcg_temp_new();
cmpv = tcg_temp_new();
gen_op_mov_v_reg(ot, newv, reg);
tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
if (s->prefix & PREFIX_LOCK) {
if (mod == 3) {
goto illegal_op;
}
gen_lea_modrm(env, s, modrm);
tcg_gen_atomic_cmpxchg_tl(oldv, cpu_A0, cmpv, newv,
s->mem_index, ot | MO_LE);
gen_op_mov_reg_v(ot, R_EAX, oldv);
} else {
if (mod == 3) {
rm = (modrm & 7) | REX_B(s);
gen_op_mov_v_reg(ot, oldv, rm);
} else {
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, ot, oldv, cpu_A0);
rm = 0; /* avoid warning */
}
gen_extu(ot, oldv);
gen_extu(ot, cmpv);
/* store value = (old == cmp ? new : old); */
tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
if (mod == 3) {
gen_op_mov_reg_v(ot, R_EAX, oldv);
gen_op_mov_reg_v(ot, rm, newv);
} else {
/* Perform an unconditional store cycle like physical cpu;
must be before changing accumulator to ensure
idempotency if the store faults and the instruction
is restarted */
gen_op_st_v(s, ot, newv, cpu_A0);
gen_op_mov_reg_v(ot, R_EAX, oldv);
}
}
tcg_gen_mov_tl(cpu_cc_src, oldv);
tcg_gen_mov_tl(cpu_cc_srcT, cmpv);
tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
set_cc_op(s, CC_OP_SUBB + ot);
tcg_temp_free(oldv);
tcg_temp_free(newv);
tcg_temp_free(cmpv);
}
break;
case 0x1c7: /* cmpxchg8b */
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if ((mod == 3) || ((modrm & 0x38) != 0x8))
goto illegal_op;
#ifdef TARGET_X86_64
if (dflag == MO_64) {
if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
goto illegal_op;
gen_lea_modrm(env, s, modrm);
if ((s->prefix & PREFIX_LOCK) && parallel_cpus) {
gen_helper_cmpxchg16b(cpu_env, cpu_A0);
} else {
gen_helper_cmpxchg16b_unlocked(cpu_env, cpu_A0);
}
} else
#endif
{
if (!(s->cpuid_features & CPUID_CX8))
goto illegal_op;
gen_lea_modrm(env, s, modrm);
if ((s->prefix & PREFIX_LOCK) && parallel_cpus) {
gen_helper_cmpxchg8b(cpu_env, cpu_A0);
} else {
gen_helper_cmpxchg8b_unlocked(cpu_env, cpu_A0);
}
}
set_cc_op(s, CC_OP_EFLAGS);
break;
/**************************/
/* push/pop */
case 0x50 ... 0x57: /* push */
gen_op_mov_v_reg(MO_32, cpu_T0, (b & 7) | REX_B(s));
gen_push_v(s, cpu_T0);
break;
case 0x58 ... 0x5f: /* pop */
ot = gen_pop_T0(s);
/* NOTE: order is important for pop %sp */
gen_pop_update(s, ot);
gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T0);
break;
case 0x60: /* pusha */
if (CODE64(s))
goto illegal_op;
gen_pusha(s);
break;
case 0x61: /* popa */
if (CODE64(s))
goto illegal_op;
gen_popa(s);
break;
case 0x68: /* push Iv */
case 0x6a:
ot = mo_pushpop(s, dflag);
if (b == 0x68)
val = insn_get(env, s, ot);
else
val = (int8_t)insn_get(env, s, MO_8);
tcg_gen_movi_tl(cpu_T0, val);
gen_push_v(s, cpu_T0);
break;
case 0x8f: /* pop Ev */
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
ot = gen_pop_T0(s);
if (mod == 3) {
/* NOTE: order is important for pop %sp */
gen_pop_update(s, ot);
rm = (modrm & 7) | REX_B(s);
gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
/* NOTE: order is important too for MMU exceptions */
s->popl_esp_hack = 1 << ot;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
s->popl_esp_hack = 0;
gen_pop_update(s, ot);
}
break;
case 0xc8: /* enter */
{
int level;
val = cpu_lduw_code(env, s->pc);
s->pc += 2;
level = cpu_ldub_code(env, s->pc++);
gen_enter(s, val, level);
}
break;
case 0xc9: /* leave */
gen_leave(s);
break;
case 0x06: /* push es */
case 0x0e: /* push cs */
case 0x16: /* push ss */
case 0x1e: /* push ds */
if (CODE64(s))
goto illegal_op;
gen_op_movl_T0_seg(b >> 3);
gen_push_v(s, cpu_T0);
break;
case 0x1a0: /* push fs */
case 0x1a8: /* push gs */
gen_op_movl_T0_seg((b >> 3) & 7);
gen_push_v(s, cpu_T0);
break;
case 0x07: /* pop es */
case 0x17: /* pop ss */
case 0x1f: /* pop ds */
if (CODE64(s))
goto illegal_op;
reg = b >> 3;
ot = gen_pop_T0(s);
gen_movl_seg_T0(s, reg);
gen_pop_update(s, ot);
/* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
if (s->is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
if (reg == R_SS) {
s->tf = 0;
gen_eob_inhibit_irq(s, true);
} else {
gen_eob(s);
}
}
break;
case 0x1a1: /* pop fs */
case 0x1a9: /* pop gs */
ot = gen_pop_T0(s);
gen_movl_seg_T0(s, (b >> 3) & 7);
gen_pop_update(s, ot);
if (s->is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
}
break;
/**************************/
/* mov */
case 0x88:
case 0x89: /* mov Gv, Ev */
ot = mo_b_d(b, dflag);
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
/* generate a generic store */
gen_ldst_modrm(env, s, modrm, ot, reg, 1);
break;
case 0xc6:
case 0xc7: /* mov Ev, Iv */
ot = mo_b_d(b, dflag);
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if (mod != 3) {
s->rip_offset = insn_const_size(ot);
gen_lea_modrm(env, s, modrm);
}
val = insn_get(env, s, ot);
tcg_gen_movi_tl(cpu_T0, val);
if (mod != 3) {
gen_op_st_v(s, ot, cpu_T0, cpu_A0);
} else {
gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T0);
}
break;
case 0x8a:
case 0x8b: /* mov Ev, Gv */
ot = mo_b_d(b, dflag);
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
case 0x8e: /* mov seg, Gv */
modrm = cpu_ldub_code(env, s->pc++);
reg = (modrm >> 3) & 7;
if (reg >= 6 || reg == R_CS)
goto illegal_op;
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
gen_movl_seg_T0(s, reg);
/* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
if (s->is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
if (reg == R_SS) {
s->tf = 0;
gen_eob_inhibit_irq(s, true);
} else {
gen_eob(s);
}
}
break;
case 0x8c: /* mov Gv, seg */
modrm = cpu_ldub_code(env, s->pc++);
reg = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
if (reg >= 6)
goto illegal_op;
gen_op_movl_T0_seg(reg);
ot = mod == 3 ? dflag : MO_16;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 0x1b6: /* movzbS Gv, Eb */
case 0x1b7: /* movzwS Gv, Eb */
case 0x1be: /* movsbS Gv, Eb */
case 0x1bf: /* movswS Gv, Eb */
{
TCGMemOp d_ot;
TCGMemOp s_ot;
/* d_ot is the size of destination */
d_ot = dflag;
/* ot is the size of source */
ot = (b & 1) + MO_8;
/* s_ot is the sign+size of source */
s_ot = b & 8 ? MO_SIGN | ot : ot;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
if (mod == 3) {
if (s_ot == MO_SB && byte_reg_is_xH(rm)) {
tcg_gen_sextract_tl(cpu_T0, cpu_regs[rm - 4], 8, 8);
} else {
gen_op_mov_v_reg(ot, cpu_T0, rm);
switch (s_ot) {
case MO_UB:
tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
break;
case MO_SB:
tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
break;
case MO_UW:
tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
break;
default:
case MO_SW:
tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
break;
}
}
gen_op_mov_reg_v(d_ot, reg, cpu_T0);
} else {
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, s_ot, cpu_T0, cpu_A0);
gen_op_mov_reg_v(d_ot, reg, cpu_T0);
}
}
break;
case 0x8d: /* lea */
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
reg = ((modrm >> 3) & 7) | rex_r;
{
AddressParts a = gen_lea_modrm_0(env, s, modrm);
TCGv ea = gen_lea_modrm_1(a);
gen_lea_v_seg(s, s->aflag, ea, -1, -1);
gen_op_mov_reg_v(dflag, reg, cpu_A0);
}
break;
case 0xa0: /* mov EAX, Ov */
case 0xa1:
case 0xa2: /* mov Ov, EAX */
case 0xa3:
{
target_ulong offset_addr;
ot = mo_b_d(b, dflag);
switch (s->aflag) {
#ifdef TARGET_X86_64
case MO_64:
offset_addr = cpu_ldq_code(env, s->pc);
s->pc += 8;
break;
#endif
default:
offset_addr = insn_get(env, s, s->aflag);
break;
}
tcg_gen_movi_tl(cpu_A0, offset_addr);
gen_add_A0_ds_seg(s);
if ((b & 2) == 0) {
gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
gen_op_mov_reg_v(ot, R_EAX, cpu_T0);
} else {
gen_op_mov_v_reg(ot, cpu_T0, R_EAX);
gen_op_st_v(s, ot, cpu_T0, cpu_A0);
}
}
break;
case 0xd7: /* xlat */
tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]);
tcg_gen_ext8u_tl(cpu_T0, cpu_regs[R_EAX]);
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T0);
gen_extu(s->aflag, cpu_A0);
gen_add_A0_ds_seg(s);
gen_op_ld_v(s, MO_8, cpu_T0, cpu_A0);
gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
break;
case 0xb0 ... 0xb7: /* mov R, Ib */
val = insn_get(env, s, MO_8);
tcg_gen_movi_tl(cpu_T0, val);
gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T0);
break;
case 0xb8 ... 0xbf: /* mov R, Iv */
#ifdef TARGET_X86_64
if (dflag == MO_64) {
uint64_t tmp;
/* 64 bit case */
tmp = cpu_ldq_code(env, s->pc);
s->pc += 8;
reg = (b & 7) | REX_B(s);
tcg_gen_movi_tl(cpu_T0, tmp);
gen_op_mov_reg_v(MO_64, reg, cpu_T0);
} else
#endif
{
ot = dflag;
val = insn_get(env, s, ot);
reg = (b & 7) | REX_B(s);
tcg_gen_movi_tl(cpu_T0, val);
gen_op_mov_reg_v(ot, reg, cpu_T0);
}
break;
case 0x91 ... 0x97: /* xchg R, EAX */
do_xchg_reg_eax:
ot = dflag;
reg = (b & 7) | REX_B(s);
rm = R_EAX;
goto do_xchg_reg;
case 0x86:
case 0x87: /* xchg Ev, Gv */
ot = mo_b_d(b, dflag);
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (mod == 3) {
rm = (modrm & 7) | REX_B(s);
do_xchg_reg:
gen_op_mov_v_reg(ot, cpu_T0, reg);
gen_op_mov_v_reg(ot, cpu_T1, rm);
gen_op_mov_reg_v(ot, rm, cpu_T0);
gen_op_mov_reg_v(ot, reg, cpu_T1);
} else {
gen_lea_modrm(env, s, modrm);
gen_op_mov_v_reg(ot, cpu_T0, reg);
/* for xchg, lock is implicit */
tcg_gen_atomic_xchg_tl(cpu_T1, cpu_A0, cpu_T0,
s->mem_index, ot | MO_LE);
gen_op_mov_reg_v(ot, reg, cpu_T1);
}
break;
case 0xc4: /* les Gv */
/* In CODE64 this is VEX3; see above. */
op = R_ES;
goto do_lxx;
case 0xc5: /* lds Gv */
/* In CODE64 this is VEX2; see above. */
op = R_DS;
goto do_lxx;
case 0x1b2: /* lss Gv */
op = R_SS;
goto do_lxx;
case 0x1b4: /* lfs Gv */
op = R_FS;
goto do_lxx;
case 0x1b5: /* lgs Gv */
op = R_GS;
do_lxx:
ot = dflag != MO_16 ? MO_32 : MO_16;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
gen_add_A0_im(s, 1 << ot);
/* load the segment first to handle exceptions properly */
gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
gen_movl_seg_T0(s, op);
/* then put the data */
gen_op_mov_reg_v(ot, reg, cpu_T1);
if (s->is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
}
break;
/************************/
/* shifts */
case 0xc0:
case 0xc1:
/* shift Ev,Ib */
shift = 2;
grp2:
{
ot = mo_b_d(b, dflag);
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
if (mod != 3) {
if (shift == 2) {
s->rip_offset = 1;
}
gen_lea_modrm(env, s, modrm);
opreg = OR_TMP0;
} else {
opreg = (modrm & 7) | REX_B(s);
}
/* simpler op */
if (shift == 0) {
gen_shift(s, op, ot, opreg, OR_ECX);
} else {
if (shift == 2) {
shift = cpu_ldub_code(env, s->pc++);
}
gen_shifti(s, op, ot, opreg, shift);
}
}
break;
case 0xd0:
case 0xd1:
/* shift Ev,1 */
shift = 1;
goto grp2;
case 0xd2:
case 0xd3:
/* shift Ev,cl */
shift = 0;
goto grp2;
case 0x1a4: /* shld imm */
op = 0;
shift = 1;
goto do_shiftd;
case 0x1a5: /* shld cl */
op = 0;
shift = 0;
goto do_shiftd;
case 0x1ac: /* shrd imm */
op = 1;
shift = 1;
goto do_shiftd;
case 0x1ad: /* shrd cl */
op = 1;
shift = 0;
do_shiftd:
ot = dflag;
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
reg = ((modrm >> 3) & 7) | rex_r;
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
opreg = OR_TMP0;
} else {
opreg = rm;
}
gen_op_mov_v_reg(ot, cpu_T1, reg);
if (shift) {
TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
gen_shiftd_rm_T1(s, ot, opreg, op, imm);
tcg_temp_free(imm);
} else {
gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
}
break;
/************************/
/* floats */
case 0xd8 ... 0xdf:
if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
/* if CR0.EM or CR0.TS are set, generate an FPU exception */
/* XXX: what to do if illegal op ? */
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = modrm & 7;
op = ((b & 7) << 3) | ((modrm >> 3) & 7);
if (mod != 3) {
/* memory op */
gen_lea_modrm(env, s, modrm);
switch(op) {
case 0x00 ... 0x07: /* fxxxs */
case 0x10 ... 0x17: /* fixxxl */
case 0x20 ... 0x27: /* fxxxl */
case 0x30 ... 0x37: /* fixxx */
{
int op1;
op1 = op & 7;
switch(op >> 4) {
case 0:
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
break;
case 1:
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
break;
case 2:
tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
s->mem_index, MO_LEQ);
gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
break;
case 3:
default:
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LESW);
gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
break;
}
gen_helper_fp_arith_ST0_FT0(op1);
if (op1 == 3) {
/* fcomp needs pop */
gen_helper_fpop(cpu_env);
}
}
break;
case 0x08: /* flds */
case 0x0a: /* fsts */
case 0x0b: /* fstps */
case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
switch(op & 7) {
case 0:
switch(op >> 4) {
case 0:
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
break;
case 1:
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
break;
case 2:
tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
s->mem_index, MO_LEQ);
gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
break;
case 3:
default:
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LESW);
gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
break;
}
break;
case 1:
/* XXX: the corresponding CPUID bit must be tested ! */
switch(op >> 4) {
case 1:
gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
break;
case 2:
gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
s->mem_index, MO_LEQ);
break;
case 3:
default:
gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUW);
break;
}
gen_helper_fpop(cpu_env);
break;
default:
switch(op >> 4) {
case 0:
gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
break;
case 1:
gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
break;
case 2:
gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
s->mem_index, MO_LEQ);
break;
case 3:
default:
gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUW);
break;
}
if ((op & 7) == 3)
gen_helper_fpop(cpu_env);
break;
}
break;
case 0x0c: /* fldenv mem */
gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
break;
case 0x0d: /* fldcw mem */
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUW);
gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
break;
case 0x0e: /* fnstenv mem */
gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
break;
case 0x0f: /* fnstcw mem */
gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUW);
break;
case 0x1d: /* fldt mem */
gen_helper_fldt_ST0(cpu_env, cpu_A0);
break;
case 0x1f: /* fstpt mem */
gen_helper_fstt_ST0(cpu_env, cpu_A0);
gen_helper_fpop(cpu_env);
break;
case 0x2c: /* frstor mem */
gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
break;
case 0x2e: /* fnsave mem */
gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
break;
case 0x2f: /* fnstsw mem */
gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUW);
break;
case 0x3c: /* fbld */
gen_helper_fbld_ST0(cpu_env, cpu_A0);
break;
case 0x3e: /* fbstp */
gen_helper_fbst_ST0(cpu_env, cpu_A0);
gen_helper_fpop(cpu_env);
break;
case 0x3d: /* fildll */
tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
break;
case 0x3f: /* fistpll */
gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
gen_helper_fpop(cpu_env);
break;
default:
goto unknown_op;
}
} else {
/* register float ops */
opreg = rm;
switch(op) {
case 0x08: /* fld sti */
gen_helper_fpush(cpu_env);
gen_helper_fmov_ST0_STN(cpu_env,
tcg_const_i32((opreg + 1) & 7));
break;
case 0x09: /* fxchg sti */
case 0x29: /* fxchg4 sti, undocumented op */
case 0x39: /* fxchg7 sti, undocumented op */
gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
break;
case 0x0a: /* grp d9/2 */
switch(rm) {
case 0: /* fnop */
/* check exceptions (FreeBSD FPU probe) */
gen_helper_fwait(cpu_env);
break;
default:
goto unknown_op;
}
break;
case 0x0c: /* grp d9/4 */
switch(rm) {
case 0: /* fchs */
gen_helper_fchs_ST0(cpu_env);
break;
case 1: /* fabs */
gen_helper_fabs_ST0(cpu_env);
break;
case 4: /* ftst */
gen_helper_fldz_FT0(cpu_env);
gen_helper_fcom_ST0_FT0(cpu_env);
break;
case 5: /* fxam */
gen_helper_fxam_ST0(cpu_env);
break;
default:
goto unknown_op;
}
break;
case 0x0d: /* grp d9/5 */
{
switch(rm) {
case 0:
gen_helper_fpush(cpu_env);
gen_helper_fld1_ST0(cpu_env);
break;
case 1:
gen_helper_fpush(cpu_env);
gen_helper_fldl2t_ST0(cpu_env);
break;
case 2:
gen_helper_fpush(cpu_env);
gen_helper_fldl2e_ST0(cpu_env);
break;
case 3:
gen_helper_fpush(cpu_env);
gen_helper_fldpi_ST0(cpu_env);
break;
case 4:
gen_helper_fpush(cpu_env);
gen_helper_fldlg2_ST0(cpu_env);
break;
case 5:
gen_helper_fpush(cpu_env);
gen_helper_fldln2_ST0(cpu_env);
break;
case 6:
gen_helper_fpush(cpu_env);
gen_helper_fldz_ST0(cpu_env);
break;
default:
goto unknown_op;
}
}
break;
case 0x0e: /* grp d9/6 */
switch(rm) {
case 0: /* f2xm1 */
gen_helper_f2xm1(cpu_env);
break;
case 1: /* fyl2x */
gen_helper_fyl2x(cpu_env);
break;
case 2: /* fptan */
gen_helper_fptan(cpu_env);
break;
case 3: /* fpatan */
gen_helper_fpatan(cpu_env);
break;
case 4: /* fxtract */
gen_helper_fxtract(cpu_env);
break;
case 5: /* fprem1 */
gen_helper_fprem1(cpu_env);
break;
case 6: /* fdecstp */
gen_helper_fdecstp(cpu_env);
break;
default:
case 7: /* fincstp */
gen_helper_fincstp(cpu_env);
break;
}
break;
case 0x0f: /* grp d9/7 */
switch(rm) {
case 0: /* fprem */
gen_helper_fprem(cpu_env);
break;
case 1: /* fyl2xp1 */
gen_helper_fyl2xp1(cpu_env);
break;
case 2: /* fsqrt */
gen_helper_fsqrt(cpu_env);
break;
case 3: /* fsincos */
gen_helper_fsincos(cpu_env);
break;
case 5: /* fscale */
gen_helper_fscale(cpu_env);
break;
case 4: /* frndint */
gen_helper_frndint(cpu_env);
break;
case 6: /* fsin */
gen_helper_fsin(cpu_env);
break;
default:
case 7: /* fcos */
gen_helper_fcos(cpu_env);
break;
}
break;
case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
{
int op1;
op1 = op & 7;
if (op >= 0x20) {
gen_helper_fp_arith_STN_ST0(op1, opreg);
if (op >= 0x30)
gen_helper_fpop(cpu_env);
} else {
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fp_arith_ST0_FT0(op1);
}
}
break;
case 0x02: /* fcom */
case 0x22: /* fcom2, undocumented op */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fcom_ST0_FT0(cpu_env);
break;
case 0x03: /* fcomp */
case 0x23: /* fcomp3, undocumented op */
case 0x32: /* fcomp5, undocumented op */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fcom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
break;
case 0x15: /* da/5 */
switch(rm) {
case 1: /* fucompp */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
gen_helper_fucom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
gen_helper_fpop(cpu_env);
break;
default:
goto unknown_op;
}
break;
case 0x1c:
switch(rm) {
case 0: /* feni (287 only, just do nop here) */
break;
case 1: /* fdisi (287 only, just do nop here) */
break;
case 2: /* fclex */
gen_helper_fclex(cpu_env);
break;
case 3: /* fninit */
gen_helper_fninit(cpu_env);
break;
case 4: /* fsetpm (287 only, just do nop here) */
break;
default:
goto unknown_op;
}
break;
case 0x1d: /* fucomi */
if (!(s->cpuid_features & CPUID_CMOV)) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fucomi_ST0_FT0(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x1e: /* fcomi */
if (!(s->cpuid_features & CPUID_CMOV)) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fcomi_ST0_FT0(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x28: /* ffree sti */
gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
break;
case 0x2a: /* fst sti */
gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
break;
case 0x2b: /* fstp sti */
case 0x0b: /* fstp1 sti, undocumented op */
case 0x3a: /* fstp8 sti, undocumented op */
case 0x3b: /* fstp9 sti, undocumented op */
gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
gen_helper_fpop(cpu_env);
break;
case 0x2c: /* fucom st(i) */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fucom_ST0_FT0(cpu_env);
break;
case 0x2d: /* fucomp st(i) */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fucom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
break;
case 0x33: /* de/3 */
switch(rm) {
case 1: /* fcompp */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
gen_helper_fcom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
gen_helper_fpop(cpu_env);
break;
default:
goto unknown_op;
}
break;
case 0x38: /* ffreep sti, undocumented op */
gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fpop(cpu_env);
break;
case 0x3c: /* df/4 */
switch(rm) {
case 0:
gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
break;
default:
goto unknown_op;
}
break;
case 0x3d: /* fucomip */
if (!(s->cpuid_features & CPUID_CMOV)) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fucomi_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x3e: /* fcomip */
if (!(s->cpuid_features & CPUID_CMOV)) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fcomi_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x10 ... 0x13: /* fcmovxx */
case 0x18 ... 0x1b:
{
int op1;
TCGLabel *l1;
static const uint8_t fcmov_cc[8] = {
(JCC_B << 1),
(JCC_Z << 1),
(JCC_BE << 1),
(JCC_P << 1),
};
if (!(s->cpuid_features & CPUID_CMOV)) {
goto illegal_op;
}
op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
l1 = gen_new_label();
gen_jcc1_noeob(s, op1, l1);
gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
gen_set_label(l1);
}
break;
default:
goto unknown_op;
}
}
break;
/************************/
/* string ops */
case 0xa4: /* movsS */
case 0xa5:
ot = mo_b_d(b, dflag);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_movs(s, ot);
}
break;
case 0xaa: /* stosS */
case 0xab:
ot = mo_b_d(b, dflag);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_stos(s, ot);
}
break;
case 0xac: /* lodsS */
case 0xad:
ot = mo_b_d(b, dflag);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_lods(s, ot);
}
break;
case 0xae: /* scasS */
case 0xaf:
ot = mo_b_d(b, dflag);
if (prefixes & PREFIX_REPNZ) {
gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
} else if (prefixes & PREFIX_REPZ) {
gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
} else {
gen_scas(s, ot);
}
break;
case 0xa6: /* cmpsS */
case 0xa7:
ot = mo_b_d(b, dflag);
if (prefixes & PREFIX_REPNZ) {
gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
} else if (prefixes & PREFIX_REPZ) {
gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
} else {
gen_cmps(s, ot);
}
break;
case 0x6c: /* insS */
case 0x6d:
ot = mo_b_d32(b, dflag);
tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_ins(s, ot);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_jmp(s, s->pc - s->cs_base);
}
}
break;
case 0x6e: /* outsS */
case 0x6f:
ot = mo_b_d32(b, dflag);
tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
svm_is_rep(prefixes) | 4);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_outs(s, ot);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_jmp(s, s->pc - s->cs_base);
}
}
break;
/************************/
/* port I/O */
case 0xe4:
case 0xe5:
ot = mo_b_d32(b, dflag);
val = cpu_ldub_code(env, s->pc++);
tcg_gen_movi_tl(cpu_T0, val);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_movi_i32(cpu_tmp2_i32, val);
gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
case 0xe6:
case 0xe7:
ot = mo_b_d32(b, dflag);
val = cpu_ldub_code(env, s->pc++);
tcg_gen_movi_tl(cpu_T0, val);
gen_check_io(s, ot, pc_start - s->cs_base,
svm_is_rep(prefixes));
gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_movi_i32(cpu_tmp2_i32, val);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
case 0xec:
case 0xed:
ot = mo_b_d32(b, dflag);
tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
case 0xee:
case 0xef:
ot = mo_b_d32(b, dflag);
tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
svm_is_rep(prefixes));
gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
/************************/
/* control */
case 0xc2: /* ret im */
val = cpu_ldsw_code(env, s->pc);
s->pc += 2;
ot = gen_pop_T0(s);
gen_stack_update(s, val + (1 << ot));
/* Note that gen_pop_T0 uses a zero-extending load. */
gen_op_jmp_v(cpu_T0);
gen_bnd_jmp(s);
gen_eob(s);
break;
case 0xc3: /* ret */
ot = gen_pop_T0(s);
gen_pop_update(s, ot);
/* Note that gen_pop_T0 uses a zero-extending load. */
gen_op_jmp_v(cpu_T0);
gen_bnd_jmp(s);
gen_eob(s);
break;
case 0xca: /* lret im */
val = cpu_ldsw_code(env, s->pc);
s->pc += 2;
do_lret:
if (s->pe && !s->vm86) {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
tcg_const_i32(val));
} else {
gen_stack_A0(s);
/* pop offset */
gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
/* NOTE: keeping EIP updated is not a problem in case of
exception */
gen_op_jmp_v(cpu_T0);
/* pop selector */
gen_add_A0_im(s, 1 << dflag);
gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
gen_op_movl_seg_T0_vm(R_CS);
/* add stack offset */
gen_stack_update(s, val + (2 << dflag));
}
gen_eob(s);
break;
case 0xcb: /* lret */
val = 0;
goto do_lret;
case 0xcf: /* iret */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
if (!s->pe) {
/* real mode */
gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
set_cc_op(s, CC_OP_EFLAGS);
} else if (s->vm86) {
if (s->iopl != 3) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
set_cc_op(s, CC_OP_EFLAGS);
}
} else {
gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1),
tcg_const_i32(s->pc - s->cs_base));
set_cc_op(s, CC_OP_EFLAGS);
}
gen_eob(s);
break;
case 0xe8: /* call im */
{
if (dflag != MO_16) {
tval = (int32_t)insn_get(env, s, MO_32);
} else {
tval = (int16_t)insn_get(env, s, MO_16);
}
next_eip = s->pc - s->cs_base;
tval += next_eip;
if (dflag == MO_16) {
tval &= 0xffff;
} else if (!CODE64(s)) {
tval &= 0xffffffff;
}
tcg_gen_movi_tl(cpu_T0, next_eip);
gen_push_v(s, cpu_T0);
gen_bnd_jmp(s);
gen_jmp(s, tval);
}
break;
case 0x9a: /* lcall im */
{
unsigned int selector, offset;
if (CODE64(s))
goto illegal_op;
ot = dflag;
offset = insn_get(env, s, ot);
selector = insn_get(env, s, MO_16);
tcg_gen_movi_tl(cpu_T0, selector);
tcg_gen_movi_tl(cpu_T1, offset);
}
goto do_lcall;
case 0xe9: /* jmp im */
if (dflag != MO_16) {
tval = (int32_t)insn_get(env, s, MO_32);
} else {
tval = (int16_t)insn_get(env, s, MO_16);
}
tval += s->pc - s->cs_base;
if (dflag == MO_16) {
tval &= 0xffff;
} else if (!CODE64(s)) {
tval &= 0xffffffff;
}
gen_bnd_jmp(s);
gen_jmp(s, tval);
break;
case 0xea: /* ljmp im */
{
unsigned int selector, offset;
if (CODE64(s))
goto illegal_op;
ot = dflag;
offset = insn_get(env, s, ot);
selector = insn_get(env, s, MO_16);
tcg_gen_movi_tl(cpu_T0, selector);
tcg_gen_movi_tl(cpu_T1, offset);
}
goto do_ljmp;
case 0xeb: /* jmp Jb */
tval = (int8_t)insn_get(env, s, MO_8);
tval += s->pc - s->cs_base;
if (dflag == MO_16) {
tval &= 0xffff;
}
gen_jmp(s, tval);
break;
case 0x70 ... 0x7f: /* jcc Jb */
tval = (int8_t)insn_get(env, s, MO_8);
goto do_jcc;
case 0x180 ... 0x18f: /* jcc Jv */
if (dflag != MO_16) {
tval = (int32_t)insn_get(env, s, MO_32);
} else {
tval = (int16_t)insn_get(env, s, MO_16);
}
do_jcc:
next_eip = s->pc - s->cs_base;
tval += next_eip;
if (dflag == MO_16) {
tval &= 0xffff;
}
gen_bnd_jmp(s);
gen_jcc(s, b, tval, next_eip);
break;
case 0x190 ... 0x19f: /* setcc Gv */
modrm = cpu_ldub_code(env, s->pc++);
gen_setcc1(s, b, cpu_T0);
gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
break;
case 0x140 ... 0x14f: /* cmov Gv, Ev */
if (!(s->cpuid_features & CPUID_CMOV)) {
goto illegal_op;
}
ot = dflag;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
gen_cmovcc1(env, s, ot, b, modrm, reg);
break;
/************************/
/* flags */
case 0x9c: /* pushf */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
if (s->vm86 && s->iopl != 3) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_update_cc_op(s);
gen_helper_read_eflags(cpu_T0, cpu_env);
gen_push_v(s, cpu_T0);
}
break;
case 0x9d: /* popf */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
if (s->vm86 && s->iopl != 3) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
ot = gen_pop_T0(s);
if (s->cpl == 0) {
if (dflag != MO_16) {
gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK |
IF_MASK |
IOPL_MASK)));
} else {
gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK |
IF_MASK | IOPL_MASK)
& 0xffff));
}
} else {
if (s->cpl <= s->iopl) {
if (dflag != MO_16) {
gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK |
AC_MASK |
ID_MASK |
NT_MASK |
IF_MASK)));
} else {
gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK |
AC_MASK |
ID_MASK |
NT_MASK |
IF_MASK)
& 0xffff));
}
} else {
if (dflag != MO_16) {
gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK)));
} else {
gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK)
& 0xffff));
}
}
}
gen_pop_update(s, ot);
set_cc_op(s, CC_OP_EFLAGS);
/* abort translation because TF/AC flag may change */
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
}
break;
case 0x9e: /* sahf */
if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
goto illegal_op;
gen_op_mov_v_reg(MO_8, cpu_T0, R_AH);
gen_compute_eflags(s);
tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
tcg_gen_andi_tl(cpu_T0, cpu_T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T0);
break;
case 0x9f: /* lahf */
if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
goto illegal_op;
gen_compute_eflags(s);
/* Note: gen_compute_eflags() only gives the condition codes */
tcg_gen_ori_tl(cpu_T0, cpu_cc_src, 0x02);
gen_op_mov_reg_v(MO_8, R_AH, cpu_T0);
break;
case 0xf5: /* cmc */
gen_compute_eflags(s);
tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
break;
case 0xf8: /* clc */
gen_compute_eflags(s);
tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
break;
case 0xf9: /* stc */
gen_compute_eflags(s);
tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
break;
case 0xfc: /* cld */
tcg_gen_movi_i32(cpu_tmp2_i32, 1);
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
break;
case 0xfd: /* std */
tcg_gen_movi_i32(cpu_tmp2_i32, -1);
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
break;
/************************/
/* bit operations */
case 0x1ba: /* bt/bts/btr/btc Gv, im */
ot = dflag;
modrm = cpu_ldub_code(env, s->pc++);
op = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
if (mod != 3) {
s->rip_offset = 1;
gen_lea_modrm(env, s, modrm);
if (!(s->prefix & PREFIX_LOCK)) {
gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
}
} else {
gen_op_mov_v_reg(ot, cpu_T0, rm);
}
/* load shift */
val = cpu_ldub_code(env, s->pc++);
tcg_gen_movi_tl(cpu_T1, val);
if (op < 4)
goto unknown_op;
op -= 4;
goto bt_op;
case 0x1a3: /* bt Gv, Ev */
op = 0;
goto do_btx;
case 0x1ab: /* bts */
op = 1;
goto do_btx;
case 0x1b3: /* btr */
op = 2;
goto do_btx;
case 0x1bb: /* btc */
op = 3;
do_btx:
ot = dflag;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
gen_op_mov_v_reg(MO_32, cpu_T1, reg);
if (mod != 3) {
AddressParts a = gen_lea_modrm_0(env, s, modrm);
/* specific case: we need to add a displacement */
gen_exts(ot, cpu_T1);
tcg_gen_sari_tl(cpu_tmp0, cpu_T1, 3 + ot);
tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
tcg_gen_add_tl(cpu_A0, gen_lea_modrm_1(a), cpu_tmp0);
gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
if (!(s->prefix & PREFIX_LOCK)) {
gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
}
} else {
gen_op_mov_v_reg(ot, cpu_T0, rm);
}
bt_op:
tcg_gen_andi_tl(cpu_T1, cpu_T1, (1 << (3 + ot)) - 1);
tcg_gen_movi_tl(cpu_tmp0, 1);
tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1);
if (s->prefix & PREFIX_LOCK) {
switch (op) {
case 0: /* bt */
/* Needs no atomic ops; we surpressed the normal
memory load for LOCK above so do it now. */
gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
break;
case 1: /* bts */
tcg_gen_atomic_fetch_or_tl(cpu_T0, cpu_A0, cpu_tmp0,
s->mem_index, ot | MO_LE);
break;
case 2: /* btr */
tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
tcg_gen_atomic_fetch_and_tl(cpu_T0, cpu_A0, cpu_tmp0,
s->mem_index, ot | MO_LE);
break;
default:
case 3: /* btc */
tcg_gen_atomic_fetch_xor_tl(cpu_T0, cpu_A0, cpu_tmp0,
s->mem_index, ot | MO_LE);
break;
}
tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1);
} else {
tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1);
switch (op) {
case 0: /* bt */
/* Data already loaded; nothing to do. */
break;
case 1: /* bts */
tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0);
break;
case 2: /* btr */
tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_tmp0);
break;
default:
case 3: /* btc */
tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_tmp0);
break;
}
if (op != 0) {
if (mod != 3) {
gen_op_st_v(s, ot, cpu_T0, cpu_A0);
} else {
gen_op_mov_reg_v(ot, rm, cpu_T0);
}
}
}
/* Delay all CC updates until after the store above. Note that
C is the result of the test, Z is unchanged, and the others
are all undefined. */
switch (s->cc_op) {
case CC_OP_MULB ... CC_OP_MULQ:
case CC_OP_ADDB ... CC_OP_ADDQ:
case CC_OP_ADCB ... CC_OP_ADCQ:
case CC_OP_SUBB ... CC_OP_SUBQ:
case CC_OP_SBBB ... CC_OP_SBBQ:
case CC_OP_LOGICB ... CC_OP_LOGICQ:
case CC_OP_INCB ... CC_OP_INCQ:
case CC_OP_DECB ... CC_OP_DECQ:
case CC_OP_SHLB ... CC_OP_SHLQ:
case CC_OP_SARB ... CC_OP_SARQ:
case CC_OP_BMILGB ... CC_OP_BMILGQ:
/* Z was going to be computed from the non-zero status of CC_DST.
We can get that same Z value (and the new C value) by leaving
CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
same width. */
tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
break;
default:
/* Otherwise, generate EFLAGS and replace the C bit. */
gen_compute_eflags(s);
tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, cpu_tmp4,
ctz32(CC_C), 1);
break;
}
break;
case 0x1bc: /* bsf / tzcnt */
case 0x1bd: /* bsr / lzcnt */
ot = dflag;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_extu(ot, cpu_T0);
/* Note that lzcnt and tzcnt are in different extensions. */
if ((prefixes & PREFIX_REPZ)
&& (b & 1
? s->cpuid_ext3_features & CPUID_EXT3_ABM
: s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
int size = 8 << ot;
/* For lzcnt/tzcnt, C bit is defined related to the input. */
tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
if (b & 1) {
/* For lzcnt, reduce the target_ulong result by the
number of zeros that we expect to find at the top. */
tcg_gen_clzi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS);
tcg_gen_subi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - size);
} else {
/* For tzcnt, a zero input must return the operand size. */
tcg_gen_ctzi_tl(cpu_T0, cpu_T0, size);
}
/* For lzcnt/tzcnt, Z bit is defined related to the result. */
gen_op_update1_cc();
set_cc_op(s, CC_OP_BMILGB + ot);
} else {
/* For bsr/bsf, only the Z bit is defined and it is related
to the input and not the result. */
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
set_cc_op(s, CC_OP_LOGICB + ot);
/* ??? The manual says that the output is undefined when the
input is zero, but real hardware leaves it unchanged, and
real programs appear to depend on that. Accomplish this
by passing the output as the value to return upon zero. */
if (b & 1) {
/* For bsr, return the bit index of the first 1 bit,
not the count of leading zeros. */
tcg_gen_xori_tl(cpu_T1, cpu_regs[reg], TARGET_LONG_BITS - 1);
tcg_gen_clz_tl(cpu_T0, cpu_T0, cpu_T1);
tcg_gen_xori_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - 1);
} else {
tcg_gen_ctz_tl(cpu_T0, cpu_T0, cpu_regs[reg]);
}
}
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
/************************/
/* bcd */
case 0x27: /* daa */
if (CODE64(s))
goto illegal_op;
gen_update_cc_op(s);
gen_helper_daa(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x2f: /* das */
if (CODE64(s))
goto illegal_op;
gen_update_cc_op(s);
gen_helper_das(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x37: /* aaa */
if (CODE64(s))
goto illegal_op;
gen_update_cc_op(s);
gen_helper_aaa(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x3f: /* aas */
if (CODE64(s))
goto illegal_op;
gen_update_cc_op(s);
gen_helper_aas(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0xd4: /* aam */
if (CODE64(s))
goto illegal_op;
val = cpu_ldub_code(env, s->pc++);
if (val == 0) {
gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
} else {
gen_helper_aam(cpu_env, tcg_const_i32(val));
set_cc_op(s, CC_OP_LOGICB);
}
break;
case 0xd5: /* aad */
if (CODE64(s))
goto illegal_op;
val = cpu_ldub_code(env, s->pc++);
gen_helper_aad(cpu_env, tcg_const_i32(val));
set_cc_op(s, CC_OP_LOGICB);
break;
/************************/
/* misc */
case 0x90: /* nop */
/* XXX: correct lock test for all insn */
if (prefixes & PREFIX_LOCK) {
goto illegal_op;
}
/* If REX_B is set, then this is xchg eax, r8d, not a nop. */
if (REX_B(s)) {
goto do_xchg_reg_eax;
}
if (prefixes & PREFIX_REPZ) {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
s->is_jmp = DISAS_TB_JUMP;
}
break;
case 0x9b: /* fwait */
if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
(HF_MP_MASK | HF_TS_MASK)) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
} else {
gen_helper_fwait(cpu_env);
}
break;
case 0xcc: /* int3 */
gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
break;
case 0xcd: /* int N */
val = cpu_ldub_code(env, s->pc++);
if (s->vm86 && s->iopl != 3) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
}
break;
case 0xce: /* into */
if (CODE64(s))
goto illegal_op;
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
break;
#ifdef WANT_ICEBP
case 0xf1: /* icebp (undocumented, exits to external debugger) */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
#if 1
gen_debug(s, pc_start - s->cs_base);
#else
/* start debug */
tb_flush(CPU(x86_env_get_cpu(env)));
qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
#endif
break;
#endif
case 0xfa: /* cli */
if (!s->vm86) {
if (s->cpl <= s->iopl) {
gen_helper_cli(cpu_env);
} else {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
}
} else {
if (s->iopl == 3) {
gen_helper_cli(cpu_env);
} else {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
}
}
break;
case 0xfb: /* sti */
if (s->vm86 ? s->iopl == 3 : s->cpl <= s->iopl) {
gen_helper_sti(cpu_env);
/* interruptions are enabled only the first insn after sti */
gen_jmp_im(s->pc - s->cs_base);
gen_eob_inhibit_irq(s, true);
} else {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
}
break;
case 0x62: /* bound */
if (CODE64(s))
goto illegal_op;
ot = dflag;
modrm = cpu_ldub_code(env, s->pc++);
reg = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
gen_op_mov_v_reg(ot, cpu_T0, reg);
gen_lea_modrm(env, s, modrm);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
if (ot == MO_16) {
gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
} else {
gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
}
break;
case 0x1c8 ... 0x1cf: /* bswap reg */
reg = (b & 7) | REX_B(s);
#ifdef TARGET_X86_64
if (dflag == MO_64) {
gen_op_mov_v_reg(MO_64, cpu_T0, reg);
tcg_gen_bswap64_i64(cpu_T0, cpu_T0);
gen_op_mov_reg_v(MO_64, reg, cpu_T0);
} else
#endif
{
gen_op_mov_v_reg(MO_32, cpu_T0, reg);
tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
tcg_gen_bswap32_tl(cpu_T0, cpu_T0);
gen_op_mov_reg_v(MO_32, reg, cpu_T0);
}
break;
case 0xd6: /* salc */
if (CODE64(s))
goto illegal_op;
gen_compute_eflags_c(s, cpu_T0);
tcg_gen_neg_tl(cpu_T0, cpu_T0);
gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
break;
case 0xe0: /* loopnz */
case 0xe1: /* loopz */
case 0xe2: /* loop */
case 0xe3: /* jecxz */
{
TCGLabel *l1, *l2, *l3;
tval = (int8_t)insn_get(env, s, MO_8);
next_eip = s->pc - s->cs_base;
tval += next_eip;
if (dflag == MO_16) {
tval &= 0xffff;
}
l1 = gen_new_label();
l2 = gen_new_label();
l3 = gen_new_label();
b &= 3;
switch(b) {
case 0: /* loopnz */
case 1: /* loopz */
gen_op_add_reg_im(s->aflag, R_ECX, -1);
gen_op_jz_ecx(s->aflag, l3);
gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
break;
case 2: /* loop */
gen_op_add_reg_im(s->aflag, R_ECX, -1);
gen_op_jnz_ecx(s->aflag, l1);
break;
default:
case 3: /* jcxz */
gen_op_jz_ecx(s->aflag, l1);
break;
}
gen_set_label(l3);
gen_jmp_im(next_eip);
tcg_gen_br(l2);
gen_set_label(l1);
gen_jmp_im(tval);
gen_set_label(l2);
gen_eob(s);
}
break;
case 0x130: /* wrmsr */
case 0x132: /* rdmsr */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
if (b & 2) {
gen_helper_rdmsr(cpu_env);
} else {
gen_helper_wrmsr(cpu_env);
}
}
break;
case 0x131: /* rdtsc */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_rdtsc(cpu_env);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
case 0x133: /* rdpmc */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_rdpmc(cpu_env);
break;
case 0x134: /* sysenter */
/* For Intel SYSENTER is valid on 64-bit */
if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
goto illegal_op;
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_helper_sysenter(cpu_env);
gen_eob(s);
}
break;
case 0x135: /* sysexit */
/* For Intel SYSEXIT is valid on 64-bit */
if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
goto illegal_op;
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
gen_eob(s);
}
break;
#ifdef TARGET_X86_64
case 0x105: /* syscall */
/* XXX: is it usable in real mode ? */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
/* TF handling for the syscall insn is different. The TF bit is checked
after the syscall insn completes. This allows #DB to not be
generated after one has entered CPL0 if TF is set in FMASK. */
gen_eob_worker(s, false, true);
break;
case 0x107: /* sysret */
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
/* condition codes are modified only in long mode */
if (s->lma) {
set_cc_op(s, CC_OP_EFLAGS);
}
/* TF handling for the sysret insn is different. The TF bit is
checked after the sysret insn completes. This allows #DB to be
generated "as if" the syscall insn in userspace has just
completed. */
gen_eob_worker(s, false, true);
}
break;
#endif
case 0x1a2: /* cpuid */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_cpuid(cpu_env);
break;
case 0xf4: /* hlt */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
s->is_jmp = DISAS_TB_JUMP;
}
break;
case 0x100:
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
switch(op) {
case 0: /* sldt */
if (!s->pe || s->vm86)
goto illegal_op;
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
tcg_gen_ld32u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State, ldt.selector));
ot = mod == 3 ? dflag : MO_16;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 2: /* lldt */
if (!s->pe || s->vm86)
goto illegal_op;
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_lldt(cpu_env, cpu_tmp2_i32);
}
break;
case 1: /* str */
if (!s->pe || s->vm86)
goto illegal_op;
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
tcg_gen_ld32u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State, tr.selector));
ot = mod == 3 ? dflag : MO_16;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 3: /* ltr */
if (!s->pe || s->vm86)
goto illegal_op;
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_ltr(cpu_env, cpu_tmp2_i32);
}
break;
case 4: /* verr */
case 5: /* verw */
if (!s->pe || s->vm86)
goto illegal_op;
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
gen_update_cc_op(s);
if (op == 4) {
gen_helper_verr(cpu_env, cpu_T0);
} else {
gen_helper_verw(cpu_env, cpu_T0);
}
set_cc_op(s, CC_OP_EFLAGS);
break;
default:
goto unknown_op;
}
break;
case 0x101:
modrm = cpu_ldub_code(env, s->pc++);
switch (modrm) {
CASE_MODRM_MEM_OP(0): /* sgdt */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
gen_lea_modrm(env, s, modrm);
tcg_gen_ld32u_tl(cpu_T0,
cpu_env, offsetof(CPUX86State, gdt.limit));
gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
gen_add_A0_im(s, 2);
tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
if (dflag == MO_16) {
tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
}
gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
break;
case 0xc8: /* monitor */
if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]);
gen_extu(s->aflag, cpu_A0);
gen_add_A0_ds_seg(s);
gen_helper_monitor(cpu_env, cpu_A0);
break;
case 0xc9: /* mwait */
if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
gen_eob(s);
break;
case 0xca: /* clac */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
|| s->cpl != 0) {
goto illegal_op;
}
gen_helper_clac(cpu_env);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
case 0xcb: /* stac */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
|| s->cpl != 0) {
goto illegal_op;
}
gen_helper_stac(cpu_env);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
CASE_MODRM_MEM_OP(1): /* sidt */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
gen_lea_modrm(env, s, modrm);
tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.limit));
gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
gen_add_A0_im(s, 2);
tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
if (dflag == MO_16) {
tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
}
gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
break;
case 0xd0: /* xgetbv */
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
|| (s->prefix & (PREFIX_LOCK | PREFIX_DATA
| PREFIX_REPZ | PREFIX_REPNZ))) {
goto illegal_op;
}
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
gen_helper_xgetbv(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32);
tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64);
break;
case 0xd1: /* xsetbv */
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
|| (s->prefix & (PREFIX_LOCK | PREFIX_DATA
| PREFIX_REPZ | PREFIX_REPNZ))) {
goto illegal_op;
}
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
cpu_regs[R_EDX]);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
gen_helper_xsetbv(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64);
/* End TB because translation flags may change. */
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
case 0xd8: /* VMRUN */
if (!(s->flags & HF_SVME_MASK) || !s->pe) {
goto illegal_op;
}
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
tcg_const_i32(s->pc - pc_start));
tcg_gen_exit_tb(0);
s->is_jmp = DISAS_TB_JUMP;
break;
case 0xd9: /* VMMCALL */
if (!(s->flags & HF_SVME_MASK)) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_vmmcall(cpu_env);
break;
case 0xda: /* VMLOAD */
if (!(s->flags & HF_SVME_MASK) || !s->pe) {
goto illegal_op;
}
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
break;
case 0xdb: /* VMSAVE */
if (!(s->flags & HF_SVME_MASK) || !s->pe) {
goto illegal_op;
}
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
break;
case 0xdc: /* STGI */
if ((!(s->flags & HF_SVME_MASK)
&& !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
|| !s->pe) {
goto illegal_op;
}
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_stgi(cpu_env);
break;
case 0xdd: /* CLGI */
if (!(s->flags & HF_SVME_MASK) || !s->pe) {
goto illegal_op;
}
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_clgi(cpu_env);
break;
case 0xde: /* SKINIT */
if ((!(s->flags & HF_SVME_MASK)
&& !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
|| !s->pe) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_skinit(cpu_env);
break;
case 0xdf: /* INVLPGA */
if (!(s->flags & HF_SVME_MASK) || !s->pe) {
goto illegal_op;
}
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag - 1));
break;
CASE_MODRM_MEM_OP(2): /* lgdt */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE);
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
gen_add_A0_im(s, 2);
gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
if (dflag == MO_16) {
tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
}
tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, gdt.limit));
break;
CASE_MODRM_MEM_OP(3): /* lidt */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE);
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
gen_add_A0_im(s, 2);
gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
if (dflag == MO_16) {
tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
}
tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, idt.limit));
break;
CASE_MODRM_OP(4): /* smsw */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, cr[0]));
if (CODE64(s)) {
mod = (modrm >> 6) & 3;
ot = (mod != 3 ? MO_16 : s->dflag);
} else {
ot = MO_16;
}
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 0xee: /* rdpkru */
if (prefixes & PREFIX_LOCK) {
goto illegal_op;
}
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
gen_helper_rdpkru(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32);
tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64);
break;
case 0xef: /* wrpkru */
if (prefixes & PREFIX_LOCK) {
goto illegal_op;
}
tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
cpu_regs[R_EDX]);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
gen_helper_wrpkru(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64);
break;
CASE_MODRM_OP(6): /* lmsw */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
gen_helper_lmsw(cpu_env, cpu_T0);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
CASE_MODRM_MEM_OP(7): /* invlpg */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_lea_modrm(env, s, modrm);
gen_helper_invlpg(cpu_env, cpu_A0);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
case 0xf8: /* swapgs */
#ifdef TARGET_X86_64
if (CODE64(s)) {
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
tcg_gen_mov_tl(cpu_T0, cpu_seg_base[R_GS]);
tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env,
offsetof(CPUX86State, kernelgsbase));
tcg_gen_st_tl(cpu_T0, cpu_env,
offsetof(CPUX86State, kernelgsbase));
}
break;
}
#endif
goto illegal_op;
case 0xf9: /* rdtscp */
if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_rdtscp(cpu_env);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
default:
goto unknown_op;
}
break;
case 0x108: /* invd */
case 0x109: /* wbinvd */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
/* nothing to do */
}
break;
case 0x63: /* arpl or movslS (x86_64) */
#ifdef TARGET_X86_64
if (CODE64(s)) {
int d_ot;
/* d_ot is the size of destination */
d_ot = dflag;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
if (mod == 3) {
gen_op_mov_v_reg(MO_32, cpu_T0, rm);
/* sign extend */
if (d_ot == MO_64) {
tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
}
gen_op_mov_reg_v(d_ot, reg, cpu_T0);
} else {
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T0, cpu_A0);
gen_op_mov_reg_v(d_ot, reg, cpu_T0);
}
} else
#endif
{
TCGLabel *label1;
TCGv t0, t1, t2, a0;
if (!s->pe || s->vm86)
goto illegal_op;
t0 = tcg_temp_local_new();
t1 = tcg_temp_local_new();
t2 = tcg_temp_local_new();
ot = MO_16;
modrm = cpu_ldub_code(env, s->pc++);
reg = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
rm = modrm & 7;
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, ot, t0, cpu_A0);
a0 = tcg_temp_local_new();
tcg_gen_mov_tl(a0, cpu_A0);
} else {
gen_op_mov_v_reg(ot, t0, rm);
TCGV_UNUSED(a0);
}
gen_op_mov_v_reg(ot, t1, reg);
tcg_gen_andi_tl(cpu_tmp0, t0, 3);
tcg_gen_andi_tl(t1, t1, 3);
tcg_gen_movi_tl(t2, 0);
label1 = gen_new_label();
tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
tcg_gen_andi_tl(t0, t0, ~3);
tcg_gen_or_tl(t0, t0, t1);
tcg_gen_movi_tl(t2, CC_Z);
gen_set_label(label1);
if (mod != 3) {
gen_op_st_v(s, ot, t0, a0);
tcg_temp_free(a0);
} else {
gen_op_mov_reg_v(ot, rm, t0);
}
gen_compute_eflags(s);
tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
tcg_temp_free(t0);
tcg_temp_free(t1);
tcg_temp_free(t2);
}
break;
case 0x102: /* lar */
case 0x103: /* lsl */
{
TCGLabel *label1;
TCGv t0;
if (!s->pe || s->vm86)
goto illegal_op;
ot = dflag != MO_16 ? MO_32 : MO_16;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
t0 = tcg_temp_local_new();
gen_update_cc_op(s);
if (b == 0x102) {
gen_helper_lar(t0, cpu_env, cpu_T0);
} else {
gen_helper_lsl(t0, cpu_env, cpu_T0);
}
tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
label1 = gen_new_label();
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
gen_op_mov_reg_v(ot, reg, t0);
gen_set_label(label1);
set_cc_op(s, CC_OP_EFLAGS);
tcg_temp_free(t0);
}
break;
case 0x118:
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
switch(op) {
case 0: /* prefetchnta */
case 1: /* prefetchnt0 */
case 2: /* prefetchnt0 */
case 3: /* prefetchnt0 */
if (mod == 3)
goto illegal_op;
gen_nop_modrm(env, s, modrm);
/* nothing more to do */
break;
default: /* nop (multi byte) */
gen_nop_modrm(env, s, modrm);
break;
}
break;
case 0x11a:
modrm = cpu_ldub_code(env, s->pc++);
if (s->flags & HF_MPX_EN_MASK) {
mod = (modrm >> 6) & 3;
reg = ((modrm >> 3) & 7) | rex_r;
if (prefixes & PREFIX_REPZ) {
/* bndcl */
if (reg >= 4
|| (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16) {
goto illegal_op;
}
gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
} else if (prefixes & PREFIX_REPNZ) {
/* bndcu */
if (reg >= 4
|| (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16) {
goto illegal_op;
}
TCGv_i64 notu = tcg_temp_new_i64();
tcg_gen_not_i64(notu, cpu_bndu[reg]);
gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
tcg_temp_free_i64(notu);
} else if (prefixes & PREFIX_DATA) {
/* bndmov -- from reg/mem */
if (reg >= 4 || s->aflag == MO_16) {
goto illegal_op;
}
if (mod == 3) {
int reg2 = (modrm & 7) | REX_B(s);
if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
if (s->flags & HF_MPX_IU_MASK) {
tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
}
} else {
gen_lea_modrm(env, s, modrm);
if (CODE64(s)) {
tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
s->mem_index, MO_LEQ);
tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
s->mem_index, MO_LEQ);
} else {
tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
s->mem_index, MO_LEUL);
tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
s->mem_index, MO_LEUL);
}
/* bnd registers are now in-use */
gen_set_hflag(s, HF_MPX_IU_MASK);
}
} else if (mod != 3) {
/* bndldx */
AddressParts a = gen_lea_modrm_0(env, s, modrm);
if (reg >= 4
|| (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16
|| a.base < -1) {
goto illegal_op;
}
if (a.base >= 0) {
tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
} else {
tcg_gen_movi_tl(cpu_A0, 0);
}
gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
if (a.index >= 0) {
tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
} else {
tcg_gen_movi_tl(cpu_T0, 0);
}
if (CODE64(s)) {
gen_helper_bndldx64(cpu_bndl[reg], cpu_env, cpu_A0, cpu_T0);
tcg_gen_ld_i64(cpu_bndu[reg], cpu_env,
offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
} else {
gen_helper_bndldx32(cpu_bndu[reg], cpu_env, cpu_A0, cpu_T0);
tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
}
gen_set_hflag(s, HF_MPX_IU_MASK);
}
}
gen_nop_modrm(env, s, modrm);
break;
case 0x11b:
modrm = cpu_ldub_code(env, s->pc++);
if (s->flags & HF_MPX_EN_MASK) {
mod = (modrm >> 6) & 3;
reg = ((modrm >> 3) & 7) | rex_r;
if (mod != 3 && (prefixes & PREFIX_REPZ)) {
/* bndmk */
if (reg >= 4
|| (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16) {
goto illegal_op;
}
AddressParts a = gen_lea_modrm_0(env, s, modrm);
if (a.base >= 0) {
tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
if (!CODE64(s)) {
tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
}
} else if (a.base == -1) {
/* no base register has lower bound of 0 */
tcg_gen_movi_i64(cpu_bndl[reg], 0);
} else {
/* rip-relative generates #ud */
goto illegal_op;
}
tcg_gen_not_tl(cpu_A0, gen_lea_modrm_1(a));
if (!CODE64(s)) {
tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
}
tcg_gen_extu_tl_i64(cpu_bndu[reg], cpu_A0);
/* bnd registers are now in-use */
gen_set_hflag(s, HF_MPX_IU_MASK);
break;
} else if (prefixes & PREFIX_REPNZ) {
/* bndcn */
if (reg >= 4
|| (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16) {
goto illegal_op;
}
gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
} else if (prefixes & PREFIX_DATA) {
/* bndmov -- to reg/mem */
if (reg >= 4 || s->aflag == MO_16) {
goto illegal_op;
}
if (mod == 3) {
int reg2 = (modrm & 7) | REX_B(s);
if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
if (s->flags & HF_MPX_IU_MASK) {
tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
}
} else {
gen_lea_modrm(env, s, modrm);
if (CODE64(s)) {
tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
s->mem_index, MO_LEQ);
tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
s->mem_index, MO_LEQ);
} else {
tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
s->mem_index, MO_LEUL);
tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
s->mem_index, MO_LEUL);
}
}
} else if (mod != 3) {
/* bndstx */
AddressParts a = gen_lea_modrm_0(env, s, modrm);
if (reg >= 4
|| (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16
|| a.base < -1) {
goto illegal_op;
}
if (a.base >= 0) {
tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
} else {
tcg_gen_movi_tl(cpu_A0, 0);
}
gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
if (a.index >= 0) {
tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
} else {
tcg_gen_movi_tl(cpu_T0, 0);
}
if (CODE64(s)) {
gen_helper_bndstx64(cpu_env, cpu_A0, cpu_T0,
cpu_bndl[reg], cpu_bndu[reg]);
} else {
gen_helper_bndstx32(cpu_env, cpu_A0, cpu_T0,
cpu_bndl[reg], cpu_bndu[reg]);
}
}
}
gen_nop_modrm(env, s, modrm);
break;
case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
modrm = cpu_ldub_code(env, s->pc++);
gen_nop_modrm(env, s, modrm);
break;
case 0x120: /* mov reg, crN */
case 0x122: /* mov crN, reg */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
modrm = cpu_ldub_code(env, s->pc++);
/* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
* AMD documentation (24594.pdf) and testing of
* intel 386 and 486 processors all show that the mod bits
* are assumed to be 1's, regardless of actual values.
*/
rm = (modrm & 7) | REX_B(s);
reg = ((modrm >> 3) & 7) | rex_r;
if (CODE64(s))
ot = MO_64;
else
ot = MO_32;
if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
(s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
reg = 8;
}
switch(reg) {
case 0:
case 2:
case 3:
case 4:
case 8:
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
if (b & 2) {
gen_op_mov_v_reg(ot, cpu_T0, rm);
gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
cpu_T0);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
} else {
gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg));
gen_op_mov_reg_v(ot, rm, cpu_T0);
}
break;
default:
goto unknown_op;
}
}
break;
case 0x121: /* mov reg, drN */
case 0x123: /* mov drN, reg */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
modrm = cpu_ldub_code(env, s->pc++);
/* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
* AMD documentation (24594.pdf) and testing of
* intel 386 and 486 processors all show that the mod bits
* are assumed to be 1's, regardless of actual values.
*/
rm = (modrm & 7) | REX_B(s);
reg = ((modrm >> 3) & 7) | rex_r;
if (CODE64(s))
ot = MO_64;
else
ot = MO_32;
if (reg >= 8) {
goto illegal_op;
}
if (b & 2) {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
gen_op_mov_v_reg(ot, cpu_T0, rm);
tcg_gen_movi_i32(cpu_tmp2_i32, reg);
gen_helper_set_dr(cpu_env, cpu_tmp2_i32, cpu_T0);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
tcg_gen_movi_i32(cpu_tmp2_i32, reg);
gen_helper_get_dr(cpu_T0, cpu_env, cpu_tmp2_i32);
gen_op_mov_reg_v(ot, rm, cpu_T0);
}
}
break;
case 0x106: /* clts */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
gen_helper_clts(cpu_env);
/* abort block because static cpu state changed */
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
}
break;
/* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
case 0x1c3: /* MOVNTI reg, mem */
if (!(s->cpuid_features & CPUID_SSE2))
goto illegal_op;
ot = mo_64_32(dflag);
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
reg = ((modrm >> 3) & 7) | rex_r;
/* generate a generic store */
gen_ldst_modrm(env, s, modrm, ot, reg, 1);
break;
case 0x1ae:
modrm = cpu_ldub_code(env, s->pc++);
switch (modrm) {
CASE_MODRM_MEM_OP(0): /* fxsave */
if (!(s->cpuid_features & CPUID_FXSR)
|| (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
gen_lea_modrm(env, s, modrm);
gen_helper_fxsave(cpu_env, cpu_A0);
break;
CASE_MODRM_MEM_OP(1): /* fxrstor */
if (!(s->cpuid_features & CPUID_FXSR)
|| (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
gen_lea_modrm(env, s, modrm);
gen_helper_fxrstor(cpu_env, cpu_A0);
break;
CASE_MODRM_MEM_OP(2): /* ldmxcsr */
if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
goto illegal_op;
}
if (s->flags & HF_TS_MASK) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
gen_lea_modrm(env, s, modrm);
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL);
gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
break;
CASE_MODRM_MEM_OP(3): /* stmxcsr */
if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
goto illegal_op;
}
if (s->flags & HF_TS_MASK) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
gen_lea_modrm(env, s, modrm);
tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, mxcsr));
gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
break;
CASE_MODRM_MEM_OP(4): /* xsave */
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
|| (prefixes & (PREFIX_LOCK | PREFIX_DATA
| PREFIX_REPZ | PREFIX_REPNZ))) {
goto illegal_op;
}
gen_lea_modrm(env, s, modrm);
tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
cpu_regs[R_EDX]);
gen_helper_xsave(cpu_env, cpu_A0, cpu_tmp1_i64);
break;
CASE_MODRM_MEM_OP(5): /* xrstor */
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
|| (prefixes & (PREFIX_LOCK | PREFIX_DATA
| PREFIX_REPZ | PREFIX_REPNZ))) {
goto illegal_op;
}
gen_lea_modrm(env, s, modrm);
tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
cpu_regs[R_EDX]);
gen_helper_xrstor(cpu_env, cpu_A0, cpu_tmp1_i64);
/* XRSTOR is how MPX is enabled, which changes how
we translate. Thus we need to end the TB. */
gen_update_cc_op(s);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
if (prefixes & PREFIX_LOCK) {
goto illegal_op;
}
if (prefixes & PREFIX_DATA) {
/* clwb */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
goto illegal_op;
}
gen_nop_modrm(env, s, modrm);
} else {
/* xsaveopt */
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
|| (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
|| (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
goto illegal_op;
}
gen_lea_modrm(env, s, modrm);
tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
cpu_regs[R_EDX]);
gen_helper_xsaveopt(cpu_env, cpu_A0, cpu_tmp1_i64);
}
break;
CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
if (prefixes & PREFIX_LOCK) {
goto illegal_op;
}
if (prefixes & PREFIX_DATA) {
/* clflushopt */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
goto illegal_op;
}
} else {
/* clflush */
if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
|| !(s->cpuid_features & CPUID_CLFLUSH)) {
goto illegal_op;
}
}
gen_nop_modrm(env, s, modrm);
break;
case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
case 0xc8 ... 0xc8: /* rdgsbase (f3 0f ae /1) */
case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
case 0xd8 ... 0xd8: /* wrgsbase (f3 0f ae /3) */
if (CODE64(s)
&& (prefixes & PREFIX_REPZ)
&& !(prefixes & PREFIX_LOCK)
&& (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
TCGv base, treg, src, dst;
/* Preserve hflags bits by testing CR4 at runtime. */
tcg_gen_movi_i32(cpu_tmp2_i32, CR4_FSGSBASE_MASK);
gen_helper_cr4_testbit(cpu_env, cpu_tmp2_i32);
base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
treg = cpu_regs[(modrm & 7) | REX_B(s)];
if (modrm & 0x10) {
/* wr*base */
dst = base, src = treg;
} else {
/* rd*base */
dst = treg, src = base;
}
if (s->dflag == MO_32) {
tcg_gen_ext32u_tl(dst, src);
} else {
tcg_gen_mov_tl(dst, src);
}
break;
}
goto unknown_op;
case 0xf8: /* sfence / pcommit */
if (prefixes & PREFIX_DATA) {
/* pcommit */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
|| (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
break;
}
/* fallthru */
case 0xf9 ... 0xff: /* sfence */
if (!(s->cpuid_features & CPUID_SSE)
|| (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
break;
case 0xe8 ... 0xef: /* lfence */
if (!(s->cpuid_features & CPUID_SSE)
|| (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
break;
case 0xf0 ... 0xf7: /* mfence */
if (!(s->cpuid_features & CPUID_SSE2)
|| (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
break;
default:
goto unknown_op;
}
break;
case 0x10d: /* 3DNow! prefetch(w) */
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
gen_nop_modrm(env, s, modrm);
break;
case 0x1aa: /* rsm */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
if (!(s->flags & HF_SMM_MASK))
goto illegal_op;
gen_update_cc_op(s);
gen_jmp_im(s->pc - s->cs_base);
gen_helper_rsm(cpu_env);
gen_eob(s);
break;
case 0x1b8: /* SSE4.2 popcnt */
if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
PREFIX_REPZ)
goto illegal_op;
if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
goto illegal_op;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
if (s->prefix & PREFIX_DATA) {
ot = MO_16;
} else {
ot = mo_64_32(dflag);
}
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_extu(ot, cpu_T0);
tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
tcg_gen_ctpop_tl(cpu_T0, cpu_T0);
gen_op_mov_reg_v(ot, reg, cpu_T0);
set_cc_op(s, CC_OP_POPCNT);
break;
case 0x10e ... 0x10f:
/* 3DNow! instructions, ignore prefixes */
s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
case 0x110 ... 0x117:
case 0x128 ... 0x12f:
case 0x138 ... 0x13a:
case 0x150 ... 0x179:
case 0x17c ... 0x17f:
case 0x1c2:
case 0x1c4 ... 0x1c6:
case 0x1d0 ... 0x1fe:
gen_sse(env, s, b, pc_start, rex_r);
break;
default:
goto unknown_op;
}
return s->pc;
illegal_op:
gen_illegal_opcode(s);
return s->pc;
unknown_op:
gen_unknown_opcode(env, s);
return s->pc;
}
| 168,205 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: size_t compile_tree(struct filter_op **fop)
{
int i = 1;
struct filter_op *array = NULL;
struct unfold_elm *ue;
BUG_IF(tree_root == NULL);
fprintf(stdout, " Unfolding the meta-tree ");
fflush(stdout);
/* start the recursion on the tree */
unfold_blk(&tree_root);
fprintf(stdout, " done.\n\n");
/* substitute the virtual labels with real offsets */
labels_to_offsets();
/* convert the tailq into an array */
TAILQ_FOREACH(ue, &unfolded_tree, next) {
/* label == 0 means a real instruction */
if (ue->label == 0) {
SAFE_REALLOC(array, i * sizeof(struct filter_op));
memcpy(&array[i - 1], &ue->fop, sizeof(struct filter_op));
i++;
}
}
/* always append the exit function to a script */
SAFE_REALLOC(array, i * sizeof(struct filter_op));
array[i - 1].opcode = FOP_EXIT;
/* return the pointer to the array */
*fop = array;
return (i);
}
Commit Message: Exit gracefully in case of corrupted filters (Closes issue #782)
CWE ID: CWE-125 | size_t compile_tree(struct filter_op **fop)
{
int i = 1;
struct filter_op *array = NULL;
struct unfold_elm *ue;
// invalid file
if (tree_root == NULL)
return 0;
fprintf(stdout, " Unfolding the meta-tree ");
fflush(stdout);
/* start the recursion on the tree */
unfold_blk(&tree_root);
fprintf(stdout, " done.\n\n");
/* substitute the virtual labels with real offsets */
labels_to_offsets();
/* convert the tailq into an array */
TAILQ_FOREACH(ue, &unfolded_tree, next) {
/* label == 0 means a real instruction */
if (ue->label == 0) {
SAFE_REALLOC(array, i * sizeof(struct filter_op));
memcpy(&array[i - 1], &ue->fop, sizeof(struct filter_op));
i++;
}
}
/* always append the exit function to a script */
SAFE_REALLOC(array, i * sizeof(struct filter_op));
array[i - 1].opcode = FOP_EXIT;
/* return the pointer to the array */
*fop = array;
return (i);
}
| 168,336 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: ProcXSendExtensionEvent(ClientPtr client)
{
int ret;
DeviceIntPtr dev;
xEvent *first;
XEventClass *list;
struct tmask tmp[EMASKSIZE];
REQUEST(xSendExtensionEventReq);
REQUEST_AT_LEAST_SIZE(xSendExtensionEventReq);
if (stuff->length !=
bytes_to_int32(sizeof(xSendExtensionEventReq)) + stuff->count +
(stuff->num_events * bytes_to_int32(sizeof(xEvent))))
return BadLength;
ret = dixLookupDevice(&dev, stuff->deviceid, client, DixWriteAccess);
if (ret != Success)
return ret;
if (stuff->num_events == 0)
return ret;
/* The client's event type must be one defined by an extension. */
first = ((xEvent *) &stuff[1]);
if (!((EXTENSION_EVENT_BASE <= first->u.u.type) &&
(first->u.u.type < lastEvent))) {
client->errorValue = first->u.u.type;
return BadValue;
}
list = (XEventClass *) (first + stuff->num_events);
return ret;
ret = (SendEvent(client, dev, stuff->destination,
stuff->propagate, (xEvent *) &stuff[1],
tmp[stuff->deviceid].mask, stuff->num_events));
return ret;
}
Commit Message:
CWE ID: CWE-119 | ProcXSendExtensionEvent(ClientPtr client)
{
int ret, i;
DeviceIntPtr dev;
xEvent *first;
XEventClass *list;
struct tmask tmp[EMASKSIZE];
REQUEST(xSendExtensionEventReq);
REQUEST_AT_LEAST_SIZE(xSendExtensionEventReq);
if (stuff->length !=
bytes_to_int32(sizeof(xSendExtensionEventReq)) + stuff->count +
(stuff->num_events * bytes_to_int32(sizeof(xEvent))))
return BadLength;
ret = dixLookupDevice(&dev, stuff->deviceid, client, DixWriteAccess);
if (ret != Success)
return ret;
if (stuff->num_events == 0)
return ret;
/* The client's event type must be one defined by an extension. */
first = ((xEvent *) &stuff[1]);
for (i = 0; i < stuff->num_events; i++) {
if (!((EXTENSION_EVENT_BASE <= first[i].u.u.type) &&
(first[i].u.u.type < lastEvent))) {
client->errorValue = first[i].u.u.type;
return BadValue;
}
}
list = (XEventClass *) (first + stuff->num_events);
return ret;
ret = (SendEvent(client, dev, stuff->destination,
stuff->propagate, (xEvent *) &stuff[1],
tmp[stuff->deviceid].mask, stuff->num_events));
return ret;
}
| 164,765 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static Image *ReadSGIImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
status;
MagickSizeType
number_pixels;
MemoryInfo
*pixel_info;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
i,
x;
register unsigned char
*p;
SGIInfo
iris_info;
size_t
bytes_per_pixel,
quantum;
ssize_t
count,
y,
z;
unsigned char
*pixels;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read SGI raster header.
*/
iris_info.magic=ReadBlobMSBShort(image);
do
{
/*
Verify SGI identifier.
*/
if (iris_info.magic != 0x01DA)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
iris_info.storage=(unsigned char) ReadBlobByte(image);
switch (iris_info.storage)
{
case 0x00: image->compression=NoCompression; break;
case 0x01: image->compression=RLECompression; break;
default:
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
iris_info.bytes_per_pixel=(unsigned char) ReadBlobByte(image);
if ((iris_info.bytes_per_pixel == 0) || (iris_info.bytes_per_pixel > 2))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
iris_info.dimension=ReadBlobMSBShort(image);
iris_info.columns=ReadBlobMSBShort(image);
iris_info.rows=ReadBlobMSBShort(image);
iris_info.depth=ReadBlobMSBShort(image);
if ((iris_info.depth == 0) || (iris_info.depth > 4))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
iris_info.minimum_value=ReadBlobMSBLong(image);
iris_info.maximum_value=ReadBlobMSBLong(image);
iris_info.sans=ReadBlobMSBLong(image);
(void) ReadBlob(image,sizeof(iris_info.name),(unsigned char *)
iris_info.name);
iris_info.name[sizeof(iris_info.name)-1]='\0';
if (*iris_info.name != '\0')
(void) SetImageProperty(image,"label",iris_info.name);
iris_info.pixel_format=ReadBlobMSBLong(image);
if (iris_info.pixel_format != 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
count=ReadBlob(image,sizeof(iris_info.filler),iris_info.filler);
(void) count;
image->columns=iris_info.columns;
image->rows=iris_info.rows;
image->depth=(size_t) MagickMin(iris_info.depth,MAGICKCORE_QUANTUM_DEPTH);
if (iris_info.pixel_format == 0)
image->depth=(size_t) MagickMin((size_t) 8*
iris_info.bytes_per_pixel,MAGICKCORE_QUANTUM_DEPTH);
if (iris_info.depth < 3)
{
image->storage_class=PseudoClass;
image->colors=iris_info.bytes_per_pixel > 1 ? 65535 : 256;
}
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
/*
Allocate SGI pixels.
*/
bytes_per_pixel=(size_t) iris_info.bytes_per_pixel;
number_pixels=(MagickSizeType) iris_info.columns*iris_info.rows;
if ((4*bytes_per_pixel*number_pixels) != ((MagickSizeType) (size_t)
(4*bytes_per_pixel*number_pixels)))
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
pixel_info=AcquireVirtualMemory(iris_info.columns,iris_info.rows*4*
bytes_per_pixel*sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
if ((int) iris_info.storage != 0x01)
{
unsigned char
*scanline;
/*
Read standard image format.
*/
scanline=(unsigned char *) AcquireQuantumMemory(iris_info.columns,
bytes_per_pixel*sizeof(*scanline));
if (scanline == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (z=0; z < (ssize_t) iris_info.depth; z++)
{
p=pixels+bytes_per_pixel*z;
for (y=0; y < (ssize_t) iris_info.rows; y++)
{
count=ReadBlob(image,bytes_per_pixel*iris_info.columns,scanline);
if (EOFBlob(image) != MagickFalse)
break;
if (bytes_per_pixel == 2)
for (x=0; x < (ssize_t) iris_info.columns; x++)
{
*p=scanline[2*x];
*(p+1)=scanline[2*x+1];
p+=8;
}
else
for (x=0; x < (ssize_t) iris_info.columns; x++)
{
*p=scanline[x];
p+=4;
}
}
}
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
}
else
{
MemoryInfo
*packet_info;
size_t
*runlength;
ssize_t
offset,
*offsets;
unsigned char
*packets;
unsigned int
data_order;
/*
Read runlength-encoded image format.
*/
offsets=(ssize_t *) AcquireQuantumMemory((size_t) iris_info.rows,
iris_info.depth*sizeof(*offsets));
runlength=(size_t *) AcquireQuantumMemory(iris_info.rows,
iris_info.depth*sizeof(*runlength));
packet_info=AcquireVirtualMemory((size_t) iris_info.columns+10UL,4UL*
sizeof(*packets));
if ((offsets == (ssize_t *) NULL) ||
(runlength == (size_t *) NULL) ||
(packet_info == (MemoryInfo *) NULL))
{
if (offsets == (ssize_t *) NULL)
offsets=(ssize_t *) RelinquishMagickMemory(offsets);
if (runlength == (size_t *) NULL)
runlength=(size_t *) RelinquishMagickMemory(runlength);
if (packet_info == (MemoryInfo *) NULL)
packet_info=RelinquishVirtualMemory(packet_info);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
packets=(unsigned char *) GetVirtualMemoryBlob(packet_info);
for (i=0; i < (ssize_t) (iris_info.rows*iris_info.depth); i++)
offsets[i]=(int) ReadBlobMSBLong(image);
for (i=0; i < (ssize_t) (iris_info.rows*iris_info.depth); i++)
{
runlength[i]=ReadBlobMSBLong(image);
if (runlength[i] > (4*(size_t) iris_info.columns+10))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
/*
Check data order.
*/
offset=0;
data_order=0;
for (y=0; ((y < (ssize_t) iris_info.rows) && (data_order == 0)); y++)
for (z=0; ((z < (ssize_t) iris_info.depth) && (data_order == 0)); z++)
{
if (offsets[y+z*iris_info.rows] < offset)
data_order=1;
offset=offsets[y+z*iris_info.rows];
}
offset=(ssize_t) TellBlob(image);
if (data_order == 1)
{
for (z=0; z < (ssize_t) iris_info.depth; z++)
{
p=pixels;
for (y=0; y < (ssize_t) iris_info.rows; y++)
{
if (offset != offsets[y+z*iris_info.rows])
{
offset=offsets[y+z*iris_info.rows];
offset=(ssize_t) SeekBlob(image,(ssize_t) offset,SEEK_SET);
}
count=ReadBlob(image,(size_t) runlength[y+z*iris_info.rows],
packets);
if (EOFBlob(image) != MagickFalse)
break;
offset+=(ssize_t) runlength[y+z*iris_info.rows];
status=SGIDecode(bytes_per_pixel,(ssize_t)
(runlength[y+z*iris_info.rows]/bytes_per_pixel),packets,
1L*iris_info.columns,p+bytes_per_pixel*z);
if (status == MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
p+=(iris_info.columns*4*bytes_per_pixel);
}
}
}
else
{
MagickOffsetType
position;
position=TellBlob(image);
p=pixels;
for (y=0; y < (ssize_t) iris_info.rows; y++)
{
for (z=0; z < (ssize_t) iris_info.depth; z++)
{
if (offset != offsets[y+z*iris_info.rows])
{
offset=offsets[y+z*iris_info.rows];
offset=(ssize_t) SeekBlob(image,(ssize_t) offset,SEEK_SET);
}
count=ReadBlob(image,(size_t) runlength[y+z*iris_info.rows],
packets);
if (EOFBlob(image) != MagickFalse)
break;
offset+=(ssize_t) runlength[y+z*iris_info.rows];
status=SGIDecode(bytes_per_pixel,(ssize_t)
(runlength[y+z*iris_info.rows]/bytes_per_pixel),packets,
1L*iris_info.columns,p+bytes_per_pixel*z);
if (status == MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
p+=(iris_info.columns*4*bytes_per_pixel);
}
offset=(ssize_t) SeekBlob(image,position,SEEK_SET);
}
packet_info=RelinquishVirtualMemory(packet_info);
runlength=(size_t *) RelinquishMagickMemory(runlength);
offsets=(ssize_t *) RelinquishMagickMemory(offsets);
}
/*
Initialize image structure.
*/
image->matte=iris_info.depth == 4 ? MagickTrue : MagickFalse;
image->columns=iris_info.columns;
image->rows=iris_info.rows;
/*
Convert SGI raster image to pixel packets.
*/
if (image->storage_class == DirectClass)
{
/*
Convert SGI image to DirectClass pixel packets.
*/
if (bytes_per_pixel == 2)
{
for (y=0; y < (ssize_t) image->rows; y++)
{
p=pixels+(image->rows-y-1)*8*image->columns;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ScaleShortToQuantum((unsigned short)
((*(p+0) << 8) | (*(p+1)))));
SetPixelGreen(q,ScaleShortToQuantum((unsigned short)
((*(p+2) << 8) | (*(p+3)))));
SetPixelBlue(q,ScaleShortToQuantum((unsigned short)
((*(p+4) << 8) | (*(p+5)))));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelAlpha(q,ScaleShortToQuantum((unsigned short)
((*(p+6) << 8) | (*(p+7)))));
p+=8;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
y,image->rows);
if (status == MagickFalse)
break;
}
}
}
else
for (y=0; y < (ssize_t) image->rows; y++)
{
p=pixels+(image->rows-y-1)*4*image->columns;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ScaleCharToQuantum(*p));
q->green=ScaleCharToQuantum(*(p+1));
q->blue=ScaleCharToQuantum(*(p+2));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelAlpha(q,ScaleCharToQuantum(*(p+3)));
p+=4;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
}
else
{
/*
Create grayscale map.
*/
if (AcquireImageColormap(image,image->colors) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
/*
Convert SGI image to PseudoClass pixel packets.
*/
if (bytes_per_pixel == 2)
{
for (y=0; y < (ssize_t) image->rows; y++)
{
p=pixels+(image->rows-y-1)*8*image->columns;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
quantum=(*p << 8);
quantum|=(*(p+1));
SetPixelIndex(indexes+x,quantum);
p+=8;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
y,image->rows);
if (status == MagickFalse)
break;
}
}
}
else
for (y=0; y < (ssize_t) image->rows; y++)
{
p=pixels+(image->rows-y-1)*4*image->columns;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(indexes+x,*p);
p+=4;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
(void) SyncImage(image);
}
pixel_info=RelinquishVirtualMemory(pixel_info);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
iris_info.magic=ReadBlobMSBShort(image);
if (iris_info.magic == 0x01DA)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while (iris_info.magic == 0x01DA);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
Commit Message:
CWE ID: CWE-119 | static Image *ReadSGIImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
status;
MagickSizeType
number_pixels;
MemoryInfo
*pixel_info;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
i,
x;
register unsigned char
*p;
SGIInfo
iris_info;
size_t
bytes_per_pixel,
quantum;
ssize_t
count,
y,
z;
unsigned char
*pixels;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read SGI raster header.
*/
iris_info.magic=ReadBlobMSBShort(image);
do
{
/*
Verify SGI identifier.
*/
if (iris_info.magic != 0x01DA)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
iris_info.storage=(unsigned char) ReadBlobByte(image);
switch (iris_info.storage)
{
case 0x00: image->compression=NoCompression; break;
case 0x01: image->compression=RLECompression; break;
default:
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
iris_info.bytes_per_pixel=(unsigned char) ReadBlobByte(image);
if ((iris_info.bytes_per_pixel == 0) || (iris_info.bytes_per_pixel > 2))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
iris_info.dimension=ReadBlobMSBShort(image);
iris_info.columns=ReadBlobMSBShort(image);
iris_info.rows=ReadBlobMSBShort(image);
iris_info.depth=ReadBlobMSBShort(image);
if ((iris_info.depth == 0) || (iris_info.depth > 4))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
iris_info.minimum_value=ReadBlobMSBLong(image);
iris_info.maximum_value=ReadBlobMSBLong(image);
iris_info.sans=ReadBlobMSBLong(image);
(void) ReadBlob(image,sizeof(iris_info.name),(unsigned char *)
iris_info.name);
iris_info.name[sizeof(iris_info.name)-1]='\0';
if (*iris_info.name != '\0')
(void) SetImageProperty(image,"label",iris_info.name);
iris_info.pixel_format=ReadBlobMSBLong(image);
if (iris_info.pixel_format != 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
count=ReadBlob(image,sizeof(iris_info.filler),iris_info.filler);
(void) count;
image->columns=iris_info.columns;
image->rows=iris_info.rows;
image->depth=(size_t) MagickMin(iris_info.depth,MAGICKCORE_QUANTUM_DEPTH);
if (iris_info.pixel_format == 0)
image->depth=(size_t) MagickMin((size_t) 8*
iris_info.bytes_per_pixel,MAGICKCORE_QUANTUM_DEPTH);
if (iris_info.depth < 3)
{
image->storage_class=PseudoClass;
image->colors=iris_info.bytes_per_pixel > 1 ? 65535 : 256;
}
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
/*
Allocate SGI pixels.
*/
bytes_per_pixel=(size_t) iris_info.bytes_per_pixel;
number_pixels=(MagickSizeType) iris_info.columns*iris_info.rows;
if ((4*bytes_per_pixel*number_pixels) != ((MagickSizeType) (size_t)
(4*bytes_per_pixel*number_pixels)))
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
pixel_info=AcquireVirtualMemory(iris_info.columns,iris_info.rows*4*
bytes_per_pixel*sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
if ((int) iris_info.storage != 0x01)
{
unsigned char
*scanline;
/*
Read standard image format.
*/
scanline=(unsigned char *) AcquireQuantumMemory(iris_info.columns,
bytes_per_pixel*sizeof(*scanline));
if (scanline == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (z=0; z < (ssize_t) iris_info.depth; z++)
{
p=pixels+bytes_per_pixel*z;
for (y=0; y < (ssize_t) iris_info.rows; y++)
{
count=ReadBlob(image,bytes_per_pixel*iris_info.columns,scanline);
if (EOFBlob(image) != MagickFalse)
break;
if (bytes_per_pixel == 2)
for (x=0; x < (ssize_t) iris_info.columns; x++)
{
*p=scanline[2*x];
*(p+1)=scanline[2*x+1];
p+=8;
}
else
for (x=0; x < (ssize_t) iris_info.columns; x++)
{
*p=scanline[x];
p+=4;
}
}
}
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
}
else
{
MemoryInfo
*packet_info;
size_t
*runlength;
ssize_t
offset,
*offsets;
unsigned char
*packets;
unsigned int
data_order;
/*
Read runlength-encoded image format.
*/
offsets=(ssize_t *) AcquireQuantumMemory((size_t) iris_info.rows,
iris_info.depth*sizeof(*offsets));
runlength=(size_t *) AcquireQuantumMemory(iris_info.rows,
iris_info.depth*sizeof(*runlength));
packet_info=AcquireVirtualMemory((size_t) iris_info.columns+10UL,4UL*
sizeof(*packets));
if ((offsets == (ssize_t *) NULL) ||
(runlength == (size_t *) NULL) ||
(packet_info == (MemoryInfo *) NULL))
{
if (offsets == (ssize_t *) NULL)
offsets=(ssize_t *) RelinquishMagickMemory(offsets);
if (runlength == (size_t *) NULL)
runlength=(size_t *) RelinquishMagickMemory(runlength);
if (packet_info == (MemoryInfo *) NULL)
packet_info=RelinquishVirtualMemory(packet_info);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
packets=(unsigned char *) GetVirtualMemoryBlob(packet_info);
for (i=0; i < (ssize_t) (iris_info.rows*iris_info.depth); i++)
offsets[i]=(int) ReadBlobMSBLong(image);
for (i=0; i < (ssize_t) (iris_info.rows*iris_info.depth); i++)
{
runlength[i]=ReadBlobMSBLong(image);
if (runlength[i] > (4*(size_t) iris_info.columns+10))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
/*
Check data order.
*/
offset=0;
data_order=0;
for (y=0; ((y < (ssize_t) iris_info.rows) && (data_order == 0)); y++)
for (z=0; ((z < (ssize_t) iris_info.depth) && (data_order == 0)); z++)
{
if (offsets[y+z*iris_info.rows] < offset)
data_order=1;
offset=offsets[y+z*iris_info.rows];
}
offset=(ssize_t) TellBlob(image);
if (data_order == 1)
{
for (z=0; z < (ssize_t) iris_info.depth; z++)
{
p=pixels;
for (y=0; y < (ssize_t) iris_info.rows; y++)
{
if (offset != offsets[y+z*iris_info.rows])
{
offset=offsets[y+z*iris_info.rows];
offset=(ssize_t) SeekBlob(image,(ssize_t) offset,SEEK_SET);
}
count=ReadBlob(image,(size_t) runlength[y+z*iris_info.rows],
packets);
if (EOFBlob(image) != MagickFalse)
break;
offset+=(ssize_t) runlength[y+z*iris_info.rows];
status=SGIDecode(bytes_per_pixel,(ssize_t)
(runlength[y+z*iris_info.rows]/bytes_per_pixel),packets,
1L*iris_info.columns,p+bytes_per_pixel*z);
if (status == MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
p+=(iris_info.columns*4*bytes_per_pixel);
}
}
}
else
{
MagickOffsetType
position;
position=TellBlob(image);
p=pixels;
for (y=0; y < (ssize_t) iris_info.rows; y++)
{
for (z=0; z < (ssize_t) iris_info.depth; z++)
{
if (offset != offsets[y+z*iris_info.rows])
{
offset=offsets[y+z*iris_info.rows];
offset=(ssize_t) SeekBlob(image,(ssize_t) offset,SEEK_SET);
}
count=ReadBlob(image,(size_t) runlength[y+z*iris_info.rows],
packets);
if (EOFBlob(image) != MagickFalse)
break;
offset+=(ssize_t) runlength[y+z*iris_info.rows];
status=SGIDecode(bytes_per_pixel,(ssize_t)
(runlength[y+z*iris_info.rows]/bytes_per_pixel),packets,
1L*iris_info.columns,p+bytes_per_pixel*z);
if (status == MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
p+=(iris_info.columns*4*bytes_per_pixel);
}
offset=(ssize_t) SeekBlob(image,position,SEEK_SET);
}
packet_info=RelinquishVirtualMemory(packet_info);
runlength=(size_t *) RelinquishMagickMemory(runlength);
offsets=(ssize_t *) RelinquishMagickMemory(offsets);
}
/*
Initialize image structure.
*/
image->matte=iris_info.depth == 4 ? MagickTrue : MagickFalse;
image->columns=iris_info.columns;
image->rows=iris_info.rows;
/*
Convert SGI raster image to pixel packets.
*/
if (image->storage_class == DirectClass)
{
/*
Convert SGI image to DirectClass pixel packets.
*/
if (bytes_per_pixel == 2)
{
for (y=0; y < (ssize_t) image->rows; y++)
{
p=pixels+(image->rows-y-1)*8*image->columns;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ScaleShortToQuantum((unsigned short)
((*(p+0) << 8) | (*(p+1)))));
SetPixelGreen(q,ScaleShortToQuantum((unsigned short)
((*(p+2) << 8) | (*(p+3)))));
SetPixelBlue(q,ScaleShortToQuantum((unsigned short)
((*(p+4) << 8) | (*(p+5)))));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelAlpha(q,ScaleShortToQuantum((unsigned short)
((*(p+6) << 8) | (*(p+7)))));
p+=8;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
y,image->rows);
if (status == MagickFalse)
break;
}
}
}
else
for (y=0; y < (ssize_t) image->rows; y++)
{
p=pixels+(image->rows-y-1)*4*image->columns;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ScaleCharToQuantum(*p));
q->green=ScaleCharToQuantum(*(p+1));
q->blue=ScaleCharToQuantum(*(p+2));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelAlpha(q,ScaleCharToQuantum(*(p+3)));
p+=4;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
}
else
{
/*
Create grayscale map.
*/
if (AcquireImageColormap(image,image->colors) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
/*
Convert SGI image to PseudoClass pixel packets.
*/
if (bytes_per_pixel == 2)
{
for (y=0; y < (ssize_t) image->rows; y++)
{
p=pixels+(image->rows-y-1)*8*image->columns;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
quantum=(*p << 8);
quantum|=(*(p+1));
SetPixelIndex(indexes+x,quantum);
p+=8;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
y,image->rows);
if (status == MagickFalse)
break;
}
}
}
else
for (y=0; y < (ssize_t) image->rows; y++)
{
p=pixels+(image->rows-y-1)*4*image->columns;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(indexes+x,*p);
p+=4;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
(void) SyncImage(image);
}
pixel_info=RelinquishVirtualMemory(pixel_info);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
iris_info.magic=ReadBlobMSBShort(image);
if (iris_info.magic == 0x01DA)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while (iris_info.magic == 0x01DA);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
| 168,604 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void APIPermissionInfo::RegisterAllPermissions(
PermissionsInfo* info) {
struct PermissionRegistration {
APIPermission::ID id;
const char* name;
int flags;
int l10n_message_id;
PermissionMessage::ID message_id;
APIPermissionConstructor constructor;
} PermissionsToRegister[] = {
{ APIPermission::kBackground, "background" },
{ APIPermission::kClipboardRead, "clipboardRead", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_CLIPBOARD,
PermissionMessage::kClipboard },
{ APIPermission::kClipboardWrite, "clipboardWrite" },
{ APIPermission::kDeclarativeWebRequest, "declarativeWebRequest" },
{ APIPermission::kDownloads, "downloads", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_DOWNLOADS,
PermissionMessage::kDownloads },
{ APIPermission::kExperimental, "experimental", kFlagCannotBeOptional },
{ APIPermission::kGeolocation, "geolocation", kFlagCannotBeOptional,
IDS_EXTENSION_PROMPT_WARNING_GEOLOCATION,
PermissionMessage::kGeolocation },
{ APIPermission::kNotification, "notifications" },
{ APIPermission::kUnlimitedStorage, "unlimitedStorage",
kFlagCannotBeOptional },
{ APIPermission::kAppNotifications, "appNotifications" },
{ APIPermission::kActiveTab, "activeTab" },
{ APIPermission::kAlarms, "alarms" },
{ APIPermission::kBookmark, "bookmarks", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_BOOKMARKS,
PermissionMessage::kBookmarks },
{ APIPermission::kBrowsingData, "browsingData" },
{ APIPermission::kContentSettings, "contentSettings", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_CONTENT_SETTINGS,
PermissionMessage::kContentSettings },
{ APIPermission::kContextMenus, "contextMenus" },
{ APIPermission::kCookie, "cookies" },
{ APIPermission::kFileBrowserHandler, "fileBrowserHandler",
kFlagCannotBeOptional },
{ APIPermission::kFontSettings, "fontSettings", kFlagCannotBeOptional },
{ APIPermission::kHistory, "history", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_BROWSING_HISTORY,
PermissionMessage::kBrowsingHistory },
{ APIPermission::kIdle, "idle" },
{ APIPermission::kInput, "input", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_INPUT,
PermissionMessage::kInput },
{ APIPermission::kManagement, "management", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_MANAGEMENT,
PermissionMessage::kManagement },
{ APIPermission::kPrivacy, "privacy", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_PRIVACY,
PermissionMessage::kPrivacy },
{ APIPermission::kStorage, "storage" },
{ APIPermission::kSyncFileSystem, "syncFileSystem" },
{ APIPermission::kTab, "tabs", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_TABS,
PermissionMessage::kTabs },
{ APIPermission::kTopSites, "topSites", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_BROWSING_HISTORY,
PermissionMessage::kBrowsingHistory },
{ APIPermission::kTts, "tts", 0, kFlagCannotBeOptional },
{ APIPermission::kTtsEngine, "ttsEngine", kFlagCannotBeOptional,
IDS_EXTENSION_PROMPT_WARNING_TTS_ENGINE,
PermissionMessage::kTtsEngine },
{ APIPermission::kWebNavigation, "webNavigation", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_TABS, PermissionMessage::kTabs },
{ APIPermission::kWebRequest, "webRequest" },
{ APIPermission::kWebRequestBlocking, "webRequestBlocking" },
{ APIPermission::kWebView, "webview", kFlagCannotBeOptional },
{ APIPermission::kBookmarkManagerPrivate, "bookmarkManagerPrivate",
kFlagCannotBeOptional },
{ APIPermission::kChromeosInfoPrivate, "chromeosInfoPrivate",
kFlagCannotBeOptional },
{ APIPermission::kFileBrowserHandlerInternal, "fileBrowserHandlerInternal",
kFlagCannotBeOptional },
{ APIPermission::kFileBrowserPrivate, "fileBrowserPrivate",
kFlagCannotBeOptional },
{ APIPermission::kManagedModePrivate, "managedModePrivate",
kFlagCannotBeOptional },
{ APIPermission::kMediaPlayerPrivate, "mediaPlayerPrivate",
kFlagCannotBeOptional },
{ APIPermission::kMetricsPrivate, "metricsPrivate",
kFlagCannotBeOptional },
{ APIPermission::kSystemPrivate, "systemPrivate",
kFlagCannotBeOptional },
{ APIPermission::kCloudPrintPrivate, "cloudPrintPrivate",
kFlagCannotBeOptional },
{ APIPermission::kInputMethodPrivate, "inputMethodPrivate",
kFlagCannotBeOptional },
{ APIPermission::kEchoPrivate, "echoPrivate", kFlagCannotBeOptional },
{ APIPermission::kRtcPrivate, "rtcPrivate", kFlagCannotBeOptional },
{ APIPermission::kTerminalPrivate, "terminalPrivate",
kFlagCannotBeOptional },
{ APIPermission::kWallpaperPrivate, "wallpaperPrivate",
kFlagCannotBeOptional },
{ APIPermission::kWebRequestInternal, "webRequestInternal" },
{ APIPermission::kWebSocketProxyPrivate, "webSocketProxyPrivate",
kFlagCannotBeOptional },
{ APIPermission::kWebstorePrivate, "webstorePrivate",
kFlagCannotBeOptional },
{ APIPermission::kMediaGalleriesPrivate, "mediaGalleriesPrivate",
kFlagCannotBeOptional },
{ APIPermission::kDebugger, "debugger",
kFlagImpliesFullURLAccess | kFlagCannotBeOptional,
IDS_EXTENSION_PROMPT_WARNING_DEBUGGER,
PermissionMessage::kDebugger },
{ APIPermission::kDevtools, "devtools",
kFlagImpliesFullURLAccess | kFlagCannotBeOptional },
{ APIPermission::kPageCapture, "pageCapture",
kFlagImpliesFullURLAccess },
{ APIPermission::kTabCapture, "tabCapture",
kFlagImpliesFullURLAccess },
{ APIPermission::kPlugin, "plugin",
kFlagImpliesFullURLAccess | kFlagImpliesFullAccess |
kFlagCannotBeOptional,
IDS_EXTENSION_PROMPT_WARNING_FULL_ACCESS,
PermissionMessage::kFullAccess },
{ APIPermission::kProxy, "proxy",
kFlagImpliesFullURLAccess | kFlagCannotBeOptional },
{ APIPermission::kSerial, "serial", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_SERIAL,
PermissionMessage::kSerial },
{ APIPermission::kSocket, "socket", kFlagCannotBeOptional, 0,
PermissionMessage::kNone, &::CreateAPIPermission<SocketPermission> },
{ APIPermission::kAppCurrentWindowInternal, "app.currentWindowInternal" },
{ APIPermission::kAppRuntime, "app.runtime" },
{ APIPermission::kAppWindow, "app.window" },
{ APIPermission::kAudioCapture, "audioCapture", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_AUDIO_CAPTURE,
PermissionMessage::kAudioCapture },
{ APIPermission::kVideoCapture, "videoCapture", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_VIDEO_CAPTURE,
PermissionMessage::kVideoCapture },
{ APIPermission::kFileSystem, "fileSystem" },
{ APIPermission::kFileSystemWrite, "fileSystem.write", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_FILE_SYSTEM_WRITE,
PermissionMessage::kFileSystemWrite },
{ APIPermission::kMediaGalleries, "mediaGalleries" },
{ APIPermission::kMediaGalleriesRead, "mediaGalleries.read" },
{ APIPermission::kMediaGalleriesAllAutoDetected,
"mediaGalleries.allAutoDetected", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_MEDIA_GALLERIES_ALL_GALLERIES,
PermissionMessage::kMediaGalleriesAllGalleries },
{ APIPermission::kPushMessaging, "pushMessaging", kFlagCannotBeOptional },
{ APIPermission::kBluetooth, "bluetooth", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_BLUETOOTH,
PermissionMessage::kBluetooth },
{ APIPermission::kBluetoothDevice, "bluetoothDevice",
kFlagNone, 0, PermissionMessage::kNone,
&::CreateAPIPermission<BluetoothDevicePermission> },
{ APIPermission::kUsb, "usb", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_USB,
PermissionMessage::kUsb },
{ APIPermission::kSystemIndicator, "systemIndicator", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_SYSTEM_INDICATOR,
PermissionMessage::kSystemIndicator },
{ APIPermission::kPointerLock, "pointerLock" },
};
for (size_t i = 0; i < ARRAYSIZE_UNSAFE(PermissionsToRegister); ++i) {
const PermissionRegistration& pr = PermissionsToRegister[i];
info->RegisterPermission(
pr.id, pr.name, pr.l10n_message_id,
pr.message_id ? pr.message_id : PermissionMessage::kNone,
pr.flags,
pr.constructor);
}
info->RegisterAlias("unlimitedStorage", kOldUnlimitedStoragePermission);
info->RegisterAlias("tabs", kWindowsPermission);
}
Commit Message: DIAL (Discovery and Launch protocol) extension API skeleton.
This implements the skeleton for a new Chrome extension API for local device discovery. The API will first be restricted to whitelisted extensions only. The API will allow extensions to receive events from a DIAL service running within Chrome which notifies of devices being discovered on the local network.
Spec available here:
https://docs.google.com/a/google.com/document/d/14FI-VKWrsMG7pIy3trgM3ybnKS-o5TULkt8itiBNXlQ/edit
BUG=163288
[email protected]
Review URL: https://chromiumcodereview.appspot.com/11444020
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@172243 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-119 | void APIPermissionInfo::RegisterAllPermissions(
PermissionsInfo* info) {
struct PermissionRegistration {
APIPermission::ID id;
const char* name;
int flags;
int l10n_message_id;
PermissionMessage::ID message_id;
APIPermissionConstructor constructor;
} PermissionsToRegister[] = {
{ APIPermission::kBackground, "background" },
{ APIPermission::kClipboardRead, "clipboardRead", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_CLIPBOARD,
PermissionMessage::kClipboard },
{ APIPermission::kClipboardWrite, "clipboardWrite" },
{ APIPermission::kDeclarativeWebRequest, "declarativeWebRequest" },
{ APIPermission::kDownloads, "downloads", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_DOWNLOADS,
PermissionMessage::kDownloads },
{ APIPermission::kExperimental, "experimental", kFlagCannotBeOptional },
{ APIPermission::kGeolocation, "geolocation", kFlagCannotBeOptional,
IDS_EXTENSION_PROMPT_WARNING_GEOLOCATION,
PermissionMessage::kGeolocation },
{ APIPermission::kNotification, "notifications" },
{ APIPermission::kUnlimitedStorage, "unlimitedStorage",
kFlagCannotBeOptional },
{ APIPermission::kAppNotifications, "appNotifications" },
{ APIPermission::kActiveTab, "activeTab" },
{ APIPermission::kAlarms, "alarms" },
{ APIPermission::kBookmark, "bookmarks", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_BOOKMARKS,
PermissionMessage::kBookmarks },
{ APIPermission::kBrowsingData, "browsingData" },
{ APIPermission::kContentSettings, "contentSettings", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_CONTENT_SETTINGS,
PermissionMessage::kContentSettings },
{ APIPermission::kContextMenus, "contextMenus" },
{ APIPermission::kCookie, "cookies" },
{ APIPermission::kFileBrowserHandler, "fileBrowserHandler",
kFlagCannotBeOptional },
{ APIPermission::kFontSettings, "fontSettings", kFlagCannotBeOptional },
{ APIPermission::kHistory, "history", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_BROWSING_HISTORY,
PermissionMessage::kBrowsingHistory },
{ APIPermission::kIdle, "idle" },
{ APIPermission::kInput, "input", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_INPUT,
PermissionMessage::kInput },
{ APIPermission::kManagement, "management", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_MANAGEMENT,
PermissionMessage::kManagement },
{ APIPermission::kPrivacy, "privacy", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_PRIVACY,
PermissionMessage::kPrivacy },
{ APIPermission::kStorage, "storage" },
{ APIPermission::kSyncFileSystem, "syncFileSystem" },
{ APIPermission::kTab, "tabs", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_TABS,
PermissionMessage::kTabs },
{ APIPermission::kTopSites, "topSites", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_BROWSING_HISTORY,
PermissionMessage::kBrowsingHistory },
{ APIPermission::kTts, "tts", 0, kFlagCannotBeOptional },
{ APIPermission::kTtsEngine, "ttsEngine", kFlagCannotBeOptional,
IDS_EXTENSION_PROMPT_WARNING_TTS_ENGINE,
PermissionMessage::kTtsEngine },
{ APIPermission::kWebNavigation, "webNavigation", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_TABS, PermissionMessage::kTabs },
{ APIPermission::kWebRequest, "webRequest" },
{ APIPermission::kWebRequestBlocking, "webRequestBlocking" },
{ APIPermission::kWebView, "webview", kFlagCannotBeOptional },
{ APIPermission::kBookmarkManagerPrivate, "bookmarkManagerPrivate",
kFlagCannotBeOptional },
{ APIPermission::kChromeosInfoPrivate, "chromeosInfoPrivate",
kFlagCannotBeOptional },
{ APIPermission::kDial, "dial", kFlagCannotBeOptional },
{ APIPermission::kFileBrowserHandlerInternal, "fileBrowserHandlerInternal",
kFlagCannotBeOptional },
{ APIPermission::kFileBrowserPrivate, "fileBrowserPrivate",
kFlagCannotBeOptional },
{ APIPermission::kManagedModePrivate, "managedModePrivate",
kFlagCannotBeOptional },
{ APIPermission::kMediaPlayerPrivate, "mediaPlayerPrivate",
kFlagCannotBeOptional },
{ APIPermission::kMetricsPrivate, "metricsPrivate",
kFlagCannotBeOptional },
{ APIPermission::kSystemPrivate, "systemPrivate",
kFlagCannotBeOptional },
{ APIPermission::kCloudPrintPrivate, "cloudPrintPrivate",
kFlagCannotBeOptional },
{ APIPermission::kInputMethodPrivate, "inputMethodPrivate",
kFlagCannotBeOptional },
{ APIPermission::kEchoPrivate, "echoPrivate", kFlagCannotBeOptional },
{ APIPermission::kRtcPrivate, "rtcPrivate", kFlagCannotBeOptional },
{ APIPermission::kTerminalPrivate, "terminalPrivate",
kFlagCannotBeOptional },
{ APIPermission::kWallpaperPrivate, "wallpaperPrivate",
kFlagCannotBeOptional },
{ APIPermission::kWebRequestInternal, "webRequestInternal" },
{ APIPermission::kWebSocketProxyPrivate, "webSocketProxyPrivate",
kFlagCannotBeOptional },
{ APIPermission::kWebstorePrivate, "webstorePrivate",
kFlagCannotBeOptional },
{ APIPermission::kMediaGalleriesPrivate, "mediaGalleriesPrivate",
kFlagCannotBeOptional },
{ APIPermission::kDebugger, "debugger",
kFlagImpliesFullURLAccess | kFlagCannotBeOptional,
IDS_EXTENSION_PROMPT_WARNING_DEBUGGER,
PermissionMessage::kDebugger },
{ APIPermission::kDevtools, "devtools",
kFlagImpliesFullURLAccess | kFlagCannotBeOptional },
{ APIPermission::kPageCapture, "pageCapture",
kFlagImpliesFullURLAccess },
{ APIPermission::kTabCapture, "tabCapture",
kFlagImpliesFullURLAccess },
{ APIPermission::kPlugin, "plugin",
kFlagImpliesFullURLAccess | kFlagImpliesFullAccess |
kFlagCannotBeOptional,
IDS_EXTENSION_PROMPT_WARNING_FULL_ACCESS,
PermissionMessage::kFullAccess },
{ APIPermission::kProxy, "proxy",
kFlagImpliesFullURLAccess | kFlagCannotBeOptional },
{ APIPermission::kSerial, "serial", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_SERIAL,
PermissionMessage::kSerial },
{ APIPermission::kSocket, "socket", kFlagCannotBeOptional, 0,
PermissionMessage::kNone, &::CreateAPIPermission<SocketPermission> },
{ APIPermission::kAppCurrentWindowInternal, "app.currentWindowInternal" },
{ APIPermission::kAppRuntime, "app.runtime" },
{ APIPermission::kAppWindow, "app.window" },
{ APIPermission::kAudioCapture, "audioCapture", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_AUDIO_CAPTURE,
PermissionMessage::kAudioCapture },
{ APIPermission::kVideoCapture, "videoCapture", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_VIDEO_CAPTURE,
PermissionMessage::kVideoCapture },
{ APIPermission::kFileSystem, "fileSystem" },
{ APIPermission::kFileSystemWrite, "fileSystem.write", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_FILE_SYSTEM_WRITE,
PermissionMessage::kFileSystemWrite },
{ APIPermission::kMediaGalleries, "mediaGalleries" },
{ APIPermission::kMediaGalleriesRead, "mediaGalleries.read" },
{ APIPermission::kMediaGalleriesAllAutoDetected,
"mediaGalleries.allAutoDetected", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_MEDIA_GALLERIES_ALL_GALLERIES,
PermissionMessage::kMediaGalleriesAllGalleries },
{ APIPermission::kPushMessaging, "pushMessaging", kFlagCannotBeOptional },
{ APIPermission::kBluetooth, "bluetooth", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_BLUETOOTH,
PermissionMessage::kBluetooth },
{ APIPermission::kBluetoothDevice, "bluetoothDevice",
kFlagNone, 0, PermissionMessage::kNone,
&::CreateAPIPermission<BluetoothDevicePermission> },
{ APIPermission::kUsb, "usb", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_USB,
PermissionMessage::kUsb },
{ APIPermission::kSystemIndicator, "systemIndicator", kFlagNone,
IDS_EXTENSION_PROMPT_WARNING_SYSTEM_INDICATOR,
PermissionMessage::kSystemIndicator },
{ APIPermission::kPointerLock, "pointerLock" },
};
for (size_t i = 0; i < ARRAYSIZE_UNSAFE(PermissionsToRegister); ++i) {
const PermissionRegistration& pr = PermissionsToRegister[i];
info->RegisterPermission(
pr.id, pr.name, pr.l10n_message_id,
pr.message_id ? pr.message_id : PermissionMessage::kNone,
pr.flags,
pr.constructor);
}
info->RegisterAlias("unlimitedStorage", kOldUnlimitedStoragePermission);
info->RegisterAlias("tabs", kWindowsPermission);
}
| 171,340 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int get_client_hello(SSL *s)
{
int i, n;
unsigned long len;
unsigned char *p;
STACK_OF(SSL_CIPHER) *cs; /* a stack of SSL_CIPHERS */
STACK_OF(SSL_CIPHER) *cl; /* the ones we want to use */
STACK_OF(SSL_CIPHER) *prio, *allow;
int z;
/*
* This is a bit of a hack to check for the correct packet type the first
* time round.
*/
if (s->state == SSL2_ST_GET_CLIENT_HELLO_A) {
s->first_packet = 1;
s->state = SSL2_ST_GET_CLIENT_HELLO_B;
}
p = (unsigned char *)s->init_buf->data;
if (s->state == SSL2_ST_GET_CLIENT_HELLO_B) {
i = ssl2_read(s, (char *)&(p[s->init_num]), 9 - s->init_num);
if (i < (9 - s->init_num))
return (ssl2_part_read(s, SSL_F_GET_CLIENT_HELLO, i));
s->init_num = 9;
if (*(p++) != SSL2_MT_CLIENT_HELLO) {
if (p[-1] != SSL2_MT_ERROR) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_READ_WRONG_PACKET_TYPE);
} else
SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_PEER_ERROR);
return (-1);
}
n2s(p, i);
if (i < s->version)
s->version = i;
n2s(p, i);
s->s2->tmp.cipher_spec_length = i;
n2s(p, i);
s->s2->tmp.session_id_length = i;
if ((i < 0) || (i > SSL_MAX_SSL_SESSION_ID_LENGTH)) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_LENGTH_MISMATCH);
return -1;
}
n2s(p, i);
s->s2->challenge_length = i;
if ((i < SSL2_MIN_CHALLENGE_LENGTH) ||
(i > SSL2_MAX_CHALLENGE_LENGTH)) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_INVALID_CHALLENGE_LENGTH);
return (-1);
}
s->state = SSL2_ST_GET_CLIENT_HELLO_C;
}
/* SSL2_ST_GET_CLIENT_HELLO_C */
p = (unsigned char *)s->init_buf->data;
len =
9 + (unsigned long)s->s2->tmp.cipher_spec_length +
(unsigned long)s->s2->challenge_length +
(unsigned long)s->s2->tmp.session_id_length;
if (len > SSL2_MAX_RECORD_LENGTH_3_BYTE_HEADER) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_MESSAGE_TOO_LONG);
return -1;
}
n = (int)len - s->init_num;
i = ssl2_read(s, (char *)&(p[s->init_num]), n);
if (i != n)
return (ssl2_part_read(s, SSL_F_GET_CLIENT_HELLO, i));
if (s->msg_callback) {
/* CLIENT-HELLO */
s->msg_callback(0, s->version, 0, p, (size_t)len, s,
s->msg_callback_arg);
}
p += 9;
/*
* get session-id before cipher stuff so we can get out session structure
* if it is cached
*/
/* session-id */
if ((s->s2->tmp.session_id_length != 0) &&
(s->s2->tmp.session_id_length != SSL2_SSL_SESSION_ID_LENGTH)) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_BAD_SSL_SESSION_ID_LENGTH);
return (-1);
}
if (s->s2->tmp.session_id_length == 0) {
if (!ssl_get_new_session(s, 1)) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
return (-1);
}
} else {
i = ssl_get_prev_session(s, &(p[s->s2->tmp.cipher_spec_length]),
s->s2->tmp.session_id_length, NULL);
if (i == 1) { /* previous session */
s->hit = 1;
} else if (i == -1) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
return (-1);
} else {
if (s->cert == NULL) {
ssl2_return_error(s, SSL2_PE_NO_CERTIFICATE);
SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_NO_CERTIFICATE_SET);
return (-1);
}
if (!ssl_get_new_session(s, 1)) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
return (-1);
}
}
}
if (!s->hit) {
cs = ssl_bytes_to_cipher_list(s, p, s->s2->tmp.cipher_spec_length,
&s->session->ciphers);
if (cs == NULL)
goto mem_err;
cl = SSL_get_ciphers(s);
if (s->options & SSL_OP_CIPHER_SERVER_PREFERENCE) {
prio = sk_SSL_CIPHER_dup(cl);
if (prio == NULL)
goto mem_err;
allow = cs;
} else {
prio = cs;
allow = cl;
}
for (z = 0; z < sk_SSL_CIPHER_num(prio); z++) {
if (sk_SSL_CIPHER_find(allow, sk_SSL_CIPHER_value(prio, z)) < 0) {
(void)sk_SSL_CIPHER_delete(prio, z);
z--;
}
}
/*
sk_SSL_CIPHER_free(s->session->ciphers);
s->session->ciphers = prio;
}
/*
* s->session->ciphers should now have a list of ciphers that are on
* both the client and server. This list is ordered by the order the
if (s->s2->challenge_length > sizeof s->s2->challenge) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_HELLO, ERR_R_INTERNAL_ERROR);
return -1;
}
memcpy(s->s2->challenge, p, (unsigned int)s->s2->challenge_length);
return (1);
mem_err:
SSLerr(SSL_F_GET_CLIENT_HELLO, ERR_R_MALLOC_FAILURE);
return (0);
}
Commit Message:
CWE ID: CWE-310 | static int get_client_hello(SSL *s)
{
int i, n;
unsigned long len;
unsigned char *p;
STACK_OF(SSL_CIPHER) *cs; /* a stack of SSL_CIPHERS */
STACK_OF(SSL_CIPHER) *cl; /* the ones we want to use */
STACK_OF(SSL_CIPHER) *prio, *allow;
int z;
/*
* This is a bit of a hack to check for the correct packet type the first
* time round.
*/
if (s->state == SSL2_ST_GET_CLIENT_HELLO_A) {
s->first_packet = 1;
s->state = SSL2_ST_GET_CLIENT_HELLO_B;
}
p = (unsigned char *)s->init_buf->data;
if (s->state == SSL2_ST_GET_CLIENT_HELLO_B) {
i = ssl2_read(s, (char *)&(p[s->init_num]), 9 - s->init_num);
if (i < (9 - s->init_num))
return (ssl2_part_read(s, SSL_F_GET_CLIENT_HELLO, i));
s->init_num = 9;
if (*(p++) != SSL2_MT_CLIENT_HELLO) {
if (p[-1] != SSL2_MT_ERROR) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_READ_WRONG_PACKET_TYPE);
} else
SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_PEER_ERROR);
return (-1);
}
n2s(p, i);
if (i < s->version)
s->version = i;
n2s(p, i);
s->s2->tmp.cipher_spec_length = i;
n2s(p, i);
s->s2->tmp.session_id_length = i;
if ((i < 0) || (i > SSL_MAX_SSL_SESSION_ID_LENGTH)) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_LENGTH_MISMATCH);
return -1;
}
n2s(p, i);
s->s2->challenge_length = i;
if ((i < SSL2_MIN_CHALLENGE_LENGTH) ||
(i > SSL2_MAX_CHALLENGE_LENGTH)) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_INVALID_CHALLENGE_LENGTH);
return (-1);
}
s->state = SSL2_ST_GET_CLIENT_HELLO_C;
}
/* SSL2_ST_GET_CLIENT_HELLO_C */
p = (unsigned char *)s->init_buf->data;
len =
9 + (unsigned long)s->s2->tmp.cipher_spec_length +
(unsigned long)s->s2->challenge_length +
(unsigned long)s->s2->tmp.session_id_length;
if (len > SSL2_MAX_RECORD_LENGTH_3_BYTE_HEADER) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_MESSAGE_TOO_LONG);
return -1;
}
n = (int)len - s->init_num;
i = ssl2_read(s, (char *)&(p[s->init_num]), n);
if (i != n)
return (ssl2_part_read(s, SSL_F_GET_CLIENT_HELLO, i));
if (s->msg_callback) {
/* CLIENT-HELLO */
s->msg_callback(0, s->version, 0, p, (size_t)len, s,
s->msg_callback_arg);
}
p += 9;
/*
* get session-id before cipher stuff so we can get out session structure
* if it is cached
*/
/* session-id */
if ((s->s2->tmp.session_id_length != 0) &&
(s->s2->tmp.session_id_length != SSL2_SSL_SESSION_ID_LENGTH)) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_BAD_SSL_SESSION_ID_LENGTH);
return (-1);
}
if (s->s2->tmp.session_id_length == 0) {
if (!ssl_get_new_session(s, 1)) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
return (-1);
}
} else {
i = ssl_get_prev_session(s, &(p[s->s2->tmp.cipher_spec_length]),
s->s2->tmp.session_id_length, NULL);
if (i == 1) { /* previous session */
s->hit = 1;
} else if (i == -1) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
return (-1);
} else {
if (s->cert == NULL) {
ssl2_return_error(s, SSL2_PE_NO_CERTIFICATE);
SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_NO_CERTIFICATE_SET);
return (-1);
}
if (!ssl_get_new_session(s, 1)) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
return (-1);
}
}
}
if (!s->hit) {
cs = ssl_bytes_to_cipher_list(s, p, s->s2->tmp.cipher_spec_length,
&s->session->ciphers);
if (cs == NULL)
goto mem_err;
cl = SSL_get_ciphers(s);
if (s->options & SSL_OP_CIPHER_SERVER_PREFERENCE) {
prio = sk_SSL_CIPHER_dup(cl);
if (prio == NULL)
goto mem_err;
allow = cs;
} else {
prio = cs;
allow = cl;
}
/* Generate list of SSLv2 ciphers shared between client and server */
for (z = 0; z < sk_SSL_CIPHER_num(prio); z++) {
const SSL_CIPHER *cp = sk_SSL_CIPHER_value(prio, z);
if ((cp->algorithm_ssl & SSL_SSLV2) == 0 ||
sk_SSL_CIPHER_find(allow, cp) < 0) {
(void)sk_SSL_CIPHER_delete(prio, z);
z--;
}
}
/*
sk_SSL_CIPHER_free(s->session->ciphers);
s->session->ciphers = prio;
}
/* Make sure we have at least one cipher in common */
if (sk_SSL_CIPHER_num(s->session->ciphers) == 0) {
ssl2_return_error(s, SSL2_PE_NO_CIPHER);
SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_NO_CIPHER_MATCH);
return -1;
}
/*
* s->session->ciphers should now have a list of ciphers that are on
* both the client and server. This list is ordered by the order the
if (s->s2->challenge_length > sizeof s->s2->challenge) {
ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR);
SSLerr(SSL_F_GET_CLIENT_HELLO, ERR_R_INTERNAL_ERROR);
return -1;
}
memcpy(s->s2->challenge, p, (unsigned int)s->s2->challenge_length);
return (1);
mem_err:
SSLerr(SSL_F_GET_CLIENT_HELLO, ERR_R_MALLOC_FAILURE);
return (0);
}
| 165,321 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int SeekHead::GetCount() const
{
return m_entry_count;
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | int SeekHead::GetCount() const
const SeekHead::VoidElement* SeekHead::GetVoidElement(int idx) const {
if (idx < 0)
return 0;
if (idx >= m_void_element_count)
return 0;
return m_void_elements + idx;
}
| 174,297 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void ncq_err(NCQTransferState *ncq_tfs)
{
IDEState *ide_state = &ncq_tfs->drive->port.ifs[0];
ide_state->error = ABRT_ERR;
ide_state->status = READY_STAT | ERR_STAT;
ncq_tfs->drive->port_regs.scr_err |= (1 << ncq_tfs->tag);
}
Commit Message:
CWE ID: | static void ncq_err(NCQTransferState *ncq_tfs)
{
IDEState *ide_state = &ncq_tfs->drive->port.ifs[0];
ide_state->error = ABRT_ERR;
ide_state->status = READY_STAT | ERR_STAT;
ncq_tfs->drive->port_regs.scr_err |= (1 << ncq_tfs->tag);
ncq_tfs->used = 0;
}
| 165,223 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: char *suhosin_decrypt_single_cookie(char *name, int name_len, char *value, int value_len, char *key, char **where TSRMLS_DC)
{
char buffer[4096];
char buffer2[4096];
int o_name_len = name_len;
char *buf = buffer, *buf2 = buffer2, *d, *d_url;
int l;
if (name_len > sizeof(buffer)-2) {
buf = estrndup(name, name_len);
} else {
memcpy(buf, name, name_len);
buf[name_len] = 0;
}
name_len = php_url_decode(buf, name_len);
normalize_varname(buf);
name_len = strlen(buf);
if (SUHOSIN_G(cookie_plainlist)) {
if (zend_hash_exists(SUHOSIN_G(cookie_plainlist), buf, name_len+1)) {
decrypt_return_plain:
if (buf != buffer) {
efree(buf);
}
memcpy(*where, name, o_name_len);
*where += o_name_len;
**where = '='; *where +=1;
memcpy(*where, value, value_len);
*where += value_len;
return *where;
}
} else if (SUHOSIN_G(cookie_cryptlist)) {
if (!zend_hash_exists(SUHOSIN_G(cookie_cryptlist), buf, name_len+1)) {
goto decrypt_return_plain;
}
}
if (strlen(value) <= sizeof(buffer2)-2) {
memcpy(buf2, value, value_len);
buf2[value_len] = 0;
} else {
buf2 = estrndup(value, value_len);
}
value_len = php_url_decode(buf2, value_len);
d = suhosin_decrypt_string(buf2, value_len, buf, name_len, key, &l, SUHOSIN_G(cookie_checkraddr) TSRMLS_CC);
if (d == NULL) {
goto skip_cookie;
}
d_url = php_url_encode(d, l, &l);
efree(d);
memcpy(*where, name, o_name_len);
*where += o_name_len;
**where = '=';*where += 1;
memcpy(*where, d_url, l);
*where += l;
efree(d_url);
skip_cookie:
if (buf != buffer) {
efree(buf);
}
if (buf2 != buffer2) {
efree(buf2);
}
return *where;
}
Commit Message: Fixed stack based buffer overflow in transparent cookie encryption (see separate advisory)
CWE ID: CWE-119 | char *suhosin_decrypt_single_cookie(char *name, int name_len, char *value, int value_len, char *key, char **where TSRMLS_DC)
{
int o_name_len = name_len;
char *buf, *buf2, *d, *d_url;
int l;
buf = estrndup(name, name_len);
name_len = php_url_decode(buf, name_len);
normalize_varname(buf);
name_len = strlen(buf);
if (SUHOSIN_G(cookie_plainlist)) {
if (zend_hash_exists(SUHOSIN_G(cookie_plainlist), buf, name_len+1)) {
decrypt_return_plain:
efree(buf);
memcpy(*where, name, o_name_len);
*where += o_name_len;
**where = '='; *where +=1;
memcpy(*where, value, value_len);
*where += value_len;
return *where;
}
} else if (SUHOSIN_G(cookie_cryptlist)) {
if (!zend_hash_exists(SUHOSIN_G(cookie_cryptlist), buf, name_len+1)) {
goto decrypt_return_plain;
}
}
buf2 = estrndup(value, value_len);
value_len = php_url_decode(buf2, value_len);
d = suhosin_decrypt_string(buf2, value_len, buf, name_len, key, &l, SUHOSIN_G(cookie_checkraddr) TSRMLS_CC);
if (d == NULL) {
goto skip_cookie;
}
d_url = php_url_encode(d, l, &l);
efree(d);
memcpy(*where, name, o_name_len);
*where += o_name_len;
**where = '=';*where += 1;
memcpy(*where, d_url, l);
*where += l;
efree(d_url);
skip_cookie:
efree(buf);
efree(buf2);
return *where;
}
| 165,649 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void __exit pf_exit(void)
{
struct pf_unit *pf;
int unit;
unregister_blkdev(major, name);
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
if (pf->present)
del_gendisk(pf->disk);
blk_cleanup_queue(pf->disk->queue);
blk_mq_free_tag_set(&pf->tag_set);
put_disk(pf->disk);
if (pf->present)
pi_release(pf->pi);
}
}
Commit Message: paride/pf: Fix potential NULL pointer dereference
Syzkaller report this:
pf: pf version 1.04, major 47, cluster 64, nice 0
pf: No ATAPI disk detected
kasan: CONFIG_KASAN_INLINE enabled
kasan: GPF could be caused by NULL-ptr deref or user memory access
general protection fault: 0000 [#1] SMP KASAN PTI
CPU: 0 PID: 9887 Comm: syz-executor.0 Tainted: G C 5.1.0-rc3+ #8
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014
RIP: 0010:pf_init+0x7af/0x1000 [pf]
Code: 46 77 d2 48 89 d8 48 c1 e8 03 80 3c 28 00 74 08 48 89 df e8 03 25 a6 d2 4c 8b 23 49 8d bc 24 80 05 00 00 48 89 f8 48 c1 e8 03 <80> 3c 28 00 74 05 e8 e6 24 a6 d2 49 8b bc 24 80 05 00 00 e8 79 34
RSP: 0018:ffff8881abcbf998 EFLAGS: 00010202
RAX: 00000000000000b0 RBX: ffffffffc1e4a8a8 RCX: ffffffffaec50788
RDX: 0000000000039b10 RSI: ffffc9000153c000 RDI: 0000000000000580
RBP: dffffc0000000000 R08: ffffed103ee44e59 R09: ffffed103ee44e59
R10: 0000000000000001 R11: ffffed103ee44e58 R12: 0000000000000000
R13: ffffffffc1e4b028 R14: 0000000000000000 R15: 0000000000000020
FS: 00007f1b78a91700(0000) GS:ffff8881f7200000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007f6d72b207f8 CR3: 00000001d5790004 CR4: 00000000007606f0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
PKRU: 55555554
Call Trace:
? 0xffffffffc1e50000
do_one_initcall+0xbc/0x47d init/main.c:901
do_init_module+0x1b5/0x547 kernel/module.c:3456
load_module+0x6405/0x8c10 kernel/module.c:3804
__do_sys_finit_module+0x162/0x190 kernel/module.c:3898
do_syscall_64+0x9f/0x450 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x462e99
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007f1b78a90c58 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462e99
RDX: 0000000000000000 RSI: 0000000020000180 RDI: 0000000000000003
RBP: 00007f1b78a90c70 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007f1b78a916bc
R13: 00000000004bcefa R14: 00000000006f6fb0 R15: 0000000000000004
Modules linked in: pf(+) paride gpio_tps65218 tps65218 i2c_cht_wc ati_remote dc395x act_meta_skbtcindex act_ife ife ecdh_generic rc_xbox_dvd sky81452_regulator v4l2_fwnode leds_blinkm snd_usb_hiface comedi(C) aes_ti slhc cfi_cmdset_0020 mtd cfi_util sx8654 mdio_gpio of_mdio fixed_phy mdio_bitbang libphy alcor_pci matrix_keymap hid_uclogic usbhid scsi_transport_fc videobuf2_v4l2 videobuf2_dma_sg snd_soc_pcm179x_spi snd_soc_pcm179x_codec i2c_demux_pinctrl mdev snd_indigodj isl6405 mii enc28j60 cmac adt7316_i2c(C) adt7316(C) fmc_trivial fmc nf_reject_ipv4 authenc rc_dtt200u rtc_ds1672 dvb_usb_dibusb_mc dvb_usb_dibusb_mc_common dib3000mc dibx000_common dvb_usb_dibusb_common dvb_usb dvb_core videobuf2_common videobuf2_vmalloc videobuf2_memops regulator_haptic adf7242 mac802154 ieee802154 s5h1409 da9034_ts snd_intel8x0m wmi cx24120 usbcore sdhci_cadence sdhci_pltfm sdhci mmc_core joydev i2c_algo_bit scsi_transport_iscsi iscsi_boot_sysfs ves1820 lockd grace nfs_acl auth_rpcgss sunrp
c
ip_vs snd_soc_adau7002 snd_cs4281 snd_rawmidi gameport snd_opl3_lib snd_seq_device snd_hwdep snd_ac97_codec ad7418 hid_primax hid snd_soc_cs4265 snd_soc_core snd_pcm_dmaengine snd_pcm snd_timer ac97_bus snd_compress snd soundcore ti_adc108s102 eeprom_93cx6 i2c_algo_pca mlxreg_hotplug st_pressure st_sensors industrialio_triggered_buffer kfifo_buf industrialio v4l2_common videodev media snd_soc_adau_utils rc_pinnacle_grey rc_core pps_gpio leds_lm3692x nandcore ledtrig_pattern iptable_security iptable_raw iptable_mangle iptable_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 iptable_filter bpfilter ip6_vti ip_vti ip_gre ipip sit tunnel4 ip_tunnel hsr veth netdevsim vxcan batman_adv cfg80211 rfkill chnl_net caif nlmon dummy team bonding vcan bridge stp llc ip6_gre gre ip6_tunnel tunnel6 tun mousedev ppdev tpm kvm_intel kvm irqbypass crct10dif_pclmul crc32_pclmul crc32c_intel ghash_clmulni_intel aesni_intel ide_pci_generic aes_x86_64 piix crypto_simd input_leds psmouse cryp
td
glue_helper ide_core intel_agp serio_raw intel_gtt agpgart ata_generic i2c_piix4 pata_acpi parport_pc parport rtc_cmos floppy sch_fq_codel ip_tables x_tables sha1_ssse3 sha1_generic ipv6 [last unloaded: paride]
Dumping ftrace buffer:
(ftrace buffer empty)
---[ end trace 7a818cf5f210d79e ]---
If alloc_disk fails in pf_init_units, pf->disk will be
NULL, however in pf_detect and pf_exit, it's not check
this before free.It may result a NULL pointer dereference.
Also when register_blkdev failed, blk_cleanup_queue() and
blk_mq_free_tag_set() should be called to free resources.
Reported-by: Hulk Robot <[email protected]>
Fixes: 6ce59025f118 ("paride/pf: cleanup queues when detection fails")
Signed-off-by: YueHaibing <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
CWE ID: CWE-476 | static void __exit pf_exit(void)
{
struct pf_unit *pf;
int unit;
unregister_blkdev(major, name);
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
if (!pf->disk)
continue;
if (pf->present)
del_gendisk(pf->disk);
blk_cleanup_queue(pf->disk->queue);
blk_mq_free_tag_set(&pf->tag_set);
put_disk(pf->disk);
if (pf->present)
pi_release(pf->pi);
}
}
| 169,522 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: yyparse (void *YYPARSE_PARAM)
#else
int
yyparse (YYPARSE_PARAM)
void *YYPARSE_PARAM;
#endif
#else /* ! YYPARSE_PARAM */
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
int
yyparse (void)
#else
int
yyparse ()
#endif
#endif
{
int yystate;
/* Number of tokens to shift before error messages enabled. */
int yyerrstatus;
/* The stacks and their tools:
`yyss': related to states.
`yyvs': related to semantic values.
Refer to the stacks thru separate pointers, to allow yyoverflow
to reallocate them elsewhere. */
/* The state stack. */
yytype_int16 yyssa[YYINITDEPTH];
yytype_int16 *yyss;
yytype_int16 *yyssp;
/* The semantic value stack. */
YYSTYPE yyvsa[YYINITDEPTH];
YYSTYPE *yyvs;
YYSTYPE *yyvsp;
YYSIZE_T yystacksize;
int yyn;
int yyresult;
/* Lookahead token as an internal (translated) token number. */
int yytoken;
/* The variables used to return semantic value and location from the
action routines. */
YYSTYPE yyval;
#if YYERROR_VERBOSE
/* Buffer for error messages, and its allocated size. */
char yymsgbuf[128];
char *yymsg = yymsgbuf;
YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
#endif
#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N))
/* The number of symbols on the RHS of the reduced rule.
Keep to zero when no symbol should be popped. */
int yylen = 0;
yytoken = 0;
yyss = yyssa;
yyvs = yyvsa;
yystacksize = YYINITDEPTH;
YYDPRINTF ((stderr, "Starting parse\n"));
yystate = 0;
yyerrstatus = 0;
yynerrs = 0;
yychar = YYEMPTY; /* Cause a token to be read. */
/* Initialize stack pointers.
Waste one element of value and location stack
so that they stay on the same level as the state stack.
The wasted elements are never initialized. */
yyssp = yyss;
yyvsp = yyvs;
goto yysetstate;
/*------------------------------------------------------------.
| yynewstate -- Push a new state, which is found in yystate. |
`------------------------------------------------------------*/
yynewstate:
/* In all cases, when you get here, the value and location stacks
have just been pushed. So pushing a state here evens the stacks. */
yyssp++;
yysetstate:
*yyssp = yystate;
if (yyss + yystacksize - 1 <= yyssp)
{
/* Get the current used size of the three stacks, in elements. */
YYSIZE_T yysize = yyssp - yyss + 1;
#ifdef yyoverflow
{
/* Give user a chance to reallocate the stack. Use copies of
these so that the &'s don't force the real ones into
memory. */
YYSTYPE *yyvs1 = yyvs;
yytype_int16 *yyss1 = yyss;
/* Each stack pointer address is followed by the size of the
data in use in that stack, in bytes. This used to be a
conditional around just the two extra args, but that might
be undefined if yyoverflow is a macro. */
yyoverflow (YY_("memory exhausted"),
&yyss1, yysize * sizeof (*yyssp),
&yyvs1, yysize * sizeof (*yyvsp),
&yystacksize);
yyss = yyss1;
yyvs = yyvs1;
}
#else /* no yyoverflow */
# ifndef YYSTACK_RELOCATE
goto yyexhaustedlab;
# else
/* Extend the stack our own way. */
if (YYMAXDEPTH <= yystacksize)
goto yyexhaustedlab;
yystacksize *= 2;
if (YYMAXDEPTH < yystacksize)
yystacksize = YYMAXDEPTH;
{
yytype_int16 *yyss1 = yyss;
union yyalloc *yyptr =
(union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
if (! yyptr)
goto yyexhaustedlab;
YYSTACK_RELOCATE (yyss_alloc, yyss);
YYSTACK_RELOCATE (yyvs_alloc, yyvs);
# undef YYSTACK_RELOCATE
if (yyss1 != yyssa)
YYSTACK_FREE (yyss1);
}
# endif
#endif /* no yyoverflow */
yyssp = yyss + yysize - 1;
yyvsp = yyvs + yysize - 1;
YYDPRINTF ((stderr, "Stack size increased to %lu\n",
(unsigned long int) yystacksize));
if (yyss + yystacksize - 1 <= yyssp)
YYABORT;
}
YYDPRINTF ((stderr, "Entering state %d\n", yystate));
if (yystate == YYFINAL)
YYACCEPT;
goto yybackup;
/*-----------.
| yybackup. |
`-----------*/
yybackup:
/* Do appropriate processing given the current state. Read a
lookahead token if we need one and don't already have one. */
/* First try to decide what to do without reference to lookahead token. */
yyn = yypact[yystate];
if (yyn == YYPACT_NINF)
goto yydefault;
/* Not known => get a lookahead token if don't already have one. */
/* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */
if (yychar == YYEMPTY)
{
YYDPRINTF ((stderr, "Reading a token: "));
yychar = YYLEX;
}
if (yychar <= YYEOF)
{
yychar = yytoken = YYEOF;
YYDPRINTF ((stderr, "Now at end of input.\n"));
}
else
{
yytoken = YYTRANSLATE (yychar);
YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
}
/* If the proper action on seeing token YYTOKEN is to reduce or to
detect an error, take that action. */
yyn += yytoken;
if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
goto yydefault;
yyn = yytable[yyn];
if (yyn <= 0)
{
if (yyn == 0 || yyn == YYTABLE_NINF)
goto yyerrlab;
yyn = -yyn;
goto yyreduce;
}
/* Count tokens shifted since error; after three, turn off error
status. */
if (yyerrstatus)
yyerrstatus--;
/* Shift the lookahead token. */
YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
/* Discard the shifted token. */
yychar = YYEMPTY;
yystate = yyn;
*++yyvsp = yylval;
goto yynewstate;
/*-----------------------------------------------------------.
| yydefault -- do the default action for the current state. |
`-----------------------------------------------------------*/
yydefault:
yyn = yydefact[yystate];
if (yyn == 0)
goto yyerrlab;
goto yyreduce;
/*-----------------------------.
| yyreduce -- Do a reduction. |
`-----------------------------*/
yyreduce:
/* yyn is the number of a rule to reduce with. */
yylen = yyr2[yyn];
/* If YYLEN is nonzero, implement the default value of the action:
`$$ = $1'.
Otherwise, the following line sets YYVAL to garbage.
This behavior is undocumented and Bison
users should not rely upon it. Assigning to YYVAL
unconditionally makes the parser a bit smaller, and it avoids a
GCC warning that YYVAL may be used uninitialized. */
yyval = yyvsp[1-yylen];
YY_REDUCE_PRINT (yyn);
switch (yyn)
{
case 5:
/* Line 1455 of yacc.c */
#line 320 "ntp_parser.y"
{
/* I will need to incorporate much more fine grained
* error messages. The following should suffice for
* the time being.
*/
msyslog(LOG_ERR,
"syntax error in %s line %d, column %d",
ip_file->fname,
ip_file->err_line_no,
ip_file->err_col_no);
}
break;
case 19:
/* Line 1455 of yacc.c */
#line 354 "ntp_parser.y"
{
struct peer_node *my_node = create_peer_node((yyvsp[(1) - (3)].Integer), (yyvsp[(2) - (3)].Address_node), (yyvsp[(3) - (3)].Queue));
if (my_node)
enqueue(cfgt.peers, my_node);
}
break;
case 20:
/* Line 1455 of yacc.c */
#line 360 "ntp_parser.y"
{
struct peer_node *my_node = create_peer_node((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Address_node), NULL);
if (my_node)
enqueue(cfgt.peers, my_node);
}
break;
case 27:
/* Line 1455 of yacc.c */
#line 377 "ntp_parser.y"
{ (yyval.Address_node) = create_address_node((yyvsp[(2) - (2)].String), AF_INET); }
break;
case 28:
/* Line 1455 of yacc.c */
#line 378 "ntp_parser.y"
{ (yyval.Address_node) = create_address_node((yyvsp[(2) - (2)].String), AF_INET6); }
break;
case 29:
/* Line 1455 of yacc.c */
#line 382 "ntp_parser.y"
{ (yyval.Address_node) = create_address_node((yyvsp[(1) - (1)].String), 0); }
break;
case 30:
/* Line 1455 of yacc.c */
#line 386 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); }
break;
case 31:
/* Line 1455 of yacc.c */
#line 387 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); }
break;
case 32:
/* Line 1455 of yacc.c */
#line 391 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 33:
/* Line 1455 of yacc.c */
#line 392 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 34:
/* Line 1455 of yacc.c */
#line 393 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 35:
/* Line 1455 of yacc.c */
#line 394 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 36:
/* Line 1455 of yacc.c */
#line 395 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 37:
/* Line 1455 of yacc.c */
#line 396 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 38:
/* Line 1455 of yacc.c */
#line 397 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 39:
/* Line 1455 of yacc.c */
#line 398 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 40:
/* Line 1455 of yacc.c */
#line 399 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 41:
/* Line 1455 of yacc.c */
#line 400 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 42:
/* Line 1455 of yacc.c */
#line 401 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 43:
/* Line 1455 of yacc.c */
#line 402 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 44:
/* Line 1455 of yacc.c */
#line 403 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 45:
/* Line 1455 of yacc.c */
#line 404 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 46:
/* Line 1455 of yacc.c */
#line 405 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 47:
/* Line 1455 of yacc.c */
#line 415 "ntp_parser.y"
{
struct unpeer_node *my_node = create_unpeer_node((yyvsp[(2) - (2)].Address_node));
if (my_node)
enqueue(cfgt.unpeers, my_node);
}
break;
case 50:
/* Line 1455 of yacc.c */
#line 434 "ntp_parser.y"
{ cfgt.broadcastclient = 1; }
break;
case 51:
/* Line 1455 of yacc.c */
#line 436 "ntp_parser.y"
{ append_queue(cfgt.manycastserver, (yyvsp[(2) - (2)].Queue)); }
break;
case 52:
/* Line 1455 of yacc.c */
#line 438 "ntp_parser.y"
{ append_queue(cfgt.multicastclient, (yyvsp[(2) - (2)].Queue)); }
break;
case 53:
/* Line 1455 of yacc.c */
#line 449 "ntp_parser.y"
{ enqueue(cfgt.vars, create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer))); }
break;
case 54:
/* Line 1455 of yacc.c */
#line 451 "ntp_parser.y"
{ cfgt.auth.control_key = (yyvsp[(2) - (2)].Integer); }
break;
case 55:
/* Line 1455 of yacc.c */
#line 453 "ntp_parser.y"
{
cfgt.auth.cryptosw++;
append_queue(cfgt.auth.crypto_cmd_list, (yyvsp[(2) - (2)].Queue));
}
break;
case 56:
/* Line 1455 of yacc.c */
#line 458 "ntp_parser.y"
{ cfgt.auth.keys = (yyvsp[(2) - (2)].String); }
break;
case 57:
/* Line 1455 of yacc.c */
#line 460 "ntp_parser.y"
{ cfgt.auth.keysdir = (yyvsp[(2) - (2)].String); }
break;
case 58:
/* Line 1455 of yacc.c */
#line 462 "ntp_parser.y"
{ cfgt.auth.request_key = (yyvsp[(2) - (2)].Integer); }
break;
case 59:
/* Line 1455 of yacc.c */
#line 464 "ntp_parser.y"
{ cfgt.auth.revoke = (yyvsp[(2) - (2)].Integer); }
break;
case 60:
/* Line 1455 of yacc.c */
#line 466 "ntp_parser.y"
{ cfgt.auth.trusted_key_list = (yyvsp[(2) - (2)].Queue); }
break;
case 61:
/* Line 1455 of yacc.c */
#line 468 "ntp_parser.y"
{ cfgt.auth.ntp_signd_socket = (yyvsp[(2) - (2)].String); }
break;
case 63:
/* Line 1455 of yacc.c */
#line 474 "ntp_parser.y"
{ (yyval.Queue) = create_queue(); }
break;
case 64:
/* Line 1455 of yacc.c */
#line 479 "ntp_parser.y"
{
if ((yyvsp[(2) - (2)].Attr_val) != NULL)
(yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val));
else
(yyval.Queue) = (yyvsp[(1) - (2)].Queue);
}
break;
case 65:
/* Line 1455 of yacc.c */
#line 486 "ntp_parser.y"
{
if ((yyvsp[(1) - (1)].Attr_val) != NULL)
(yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val));
else
(yyval.Queue) = create_queue();
}
break;
case 66:
/* Line 1455 of yacc.c */
#line 496 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)); }
break;
case 67:
/* Line 1455 of yacc.c */
#line 498 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)); }
break;
case 68:
/* Line 1455 of yacc.c */
#line 500 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)); }
break;
case 69:
/* Line 1455 of yacc.c */
#line 502 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)); }
break;
case 70:
/* Line 1455 of yacc.c */
#line 504 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)); }
break;
case 71:
/* Line 1455 of yacc.c */
#line 506 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)); }
break;
case 72:
/* Line 1455 of yacc.c */
#line 508 "ntp_parser.y"
{
(yyval.Attr_val) = NULL;
cfgt.auth.revoke = (yyvsp[(2) - (2)].Integer);
msyslog(LOG_WARNING,
"'crypto revoke %d' is deprecated, "
"please use 'revoke %d' instead.",
cfgt.auth.revoke, cfgt.auth.revoke);
}
break;
case 73:
/* Line 1455 of yacc.c */
#line 525 "ntp_parser.y"
{ append_queue(cfgt.orphan_cmds,(yyvsp[(2) - (2)].Queue)); }
break;
case 74:
/* Line 1455 of yacc.c */
#line 529 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); }
break;
case 75:
/* Line 1455 of yacc.c */
#line 530 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); }
break;
case 76:
/* Line 1455 of yacc.c */
#line 535 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (double)(yyvsp[(2) - (2)].Integer)); }
break;
case 77:
/* Line 1455 of yacc.c */
#line 537 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (double)(yyvsp[(2) - (2)].Integer)); }
break;
case 78:
/* Line 1455 of yacc.c */
#line 539 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (double)(yyvsp[(2) - (2)].Integer)); }
break;
case 79:
/* Line 1455 of yacc.c */
#line 541 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (double)(yyvsp[(2) - (2)].Integer)); }
break;
case 80:
/* Line 1455 of yacc.c */
#line 543 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (double)(yyvsp[(2) - (2)].Integer)); }
break;
case 81:
/* Line 1455 of yacc.c */
#line 545 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 82:
/* Line 1455 of yacc.c */
#line 547 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 83:
/* Line 1455 of yacc.c */
#line 549 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 84:
/* Line 1455 of yacc.c */
#line 551 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 85:
/* Line 1455 of yacc.c */
#line 553 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (double)(yyvsp[(2) - (2)].Integer)); }
break;
case 86:
/* Line 1455 of yacc.c */
#line 555 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (double)(yyvsp[(2) - (2)].Integer)); }
break;
case 87:
/* Line 1455 of yacc.c */
#line 565 "ntp_parser.y"
{ append_queue(cfgt.stats_list, (yyvsp[(2) - (2)].Queue)); }
break;
case 88:
/* Line 1455 of yacc.c */
#line 567 "ntp_parser.y"
{
if (input_from_file)
cfgt.stats_dir = (yyvsp[(2) - (2)].String);
else {
free((yyvsp[(2) - (2)].String));
yyerror("statsdir remote configuration ignored");
}
}
break;
case 89:
/* Line 1455 of yacc.c */
#line 576 "ntp_parser.y"
{
enqueue(cfgt.filegen_opts,
create_filegen_node((yyvsp[(2) - (3)].Integer), (yyvsp[(3) - (3)].Queue)));
}
break;
case 90:
/* Line 1455 of yacc.c */
#line 583 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), create_ival((yyvsp[(2) - (2)].Integer))); }
break;
case 91:
/* Line 1455 of yacc.c */
#line 584 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue(create_ival((yyvsp[(1) - (1)].Integer))); }
break;
case 100:
/* Line 1455 of yacc.c */
#line 600 "ntp_parser.y"
{
if ((yyvsp[(2) - (2)].Attr_val) != NULL)
(yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val));
else
(yyval.Queue) = (yyvsp[(1) - (2)].Queue);
}
break;
case 101:
/* Line 1455 of yacc.c */
#line 607 "ntp_parser.y"
{
if ((yyvsp[(1) - (1)].Attr_val) != NULL)
(yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val));
else
(yyval.Queue) = create_queue();
}
break;
case 102:
/* Line 1455 of yacc.c */
#line 617 "ntp_parser.y"
{
if (input_from_file)
(yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String));
else {
(yyval.Attr_val) = NULL;
free((yyvsp[(2) - (2)].String));
yyerror("filegen file remote configuration ignored");
}
}
break;
case 103:
/* Line 1455 of yacc.c */
#line 627 "ntp_parser.y"
{
if (input_from_file)
(yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer));
else {
(yyval.Attr_val) = NULL;
yyerror("filegen type remote configuration ignored");
}
}
break;
case 104:
/* Line 1455 of yacc.c */
#line 636 "ntp_parser.y"
{
if (input_from_file)
(yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer));
else {
(yyval.Attr_val) = NULL;
yyerror("filegen link remote configuration ignored");
}
}
break;
case 105:
/* Line 1455 of yacc.c */
#line 645 "ntp_parser.y"
{
if (input_from_file)
(yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer));
else {
(yyval.Attr_val) = NULL;
yyerror("filegen nolink remote configuration ignored");
}
}
break;
case 106:
/* Line 1455 of yacc.c */
#line 653 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 107:
/* Line 1455 of yacc.c */
#line 654 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 115:
/* Line 1455 of yacc.c */
#line 674 "ntp_parser.y"
{
append_queue(cfgt.discard_opts, (yyvsp[(2) - (2)].Queue));
}
break;
case 116:
/* Line 1455 of yacc.c */
#line 678 "ntp_parser.y"
{
append_queue(cfgt.mru_opts, (yyvsp[(2) - (2)].Queue));
}
break;
case 117:
/* Line 1455 of yacc.c */
#line 682 "ntp_parser.y"
{
enqueue(cfgt.restrict_opts,
create_restrict_node((yyvsp[(2) - (3)].Address_node), NULL, (yyvsp[(3) - (3)].Queue), ip_file->line_no));
}
break;
case 118:
/* Line 1455 of yacc.c */
#line 687 "ntp_parser.y"
{
enqueue(cfgt.restrict_opts,
create_restrict_node((yyvsp[(2) - (5)].Address_node), (yyvsp[(4) - (5)].Address_node), (yyvsp[(5) - (5)].Queue), ip_file->line_no));
}
break;
case 119:
/* Line 1455 of yacc.c */
#line 692 "ntp_parser.y"
{
enqueue(cfgt.restrict_opts,
create_restrict_node(NULL, NULL, (yyvsp[(3) - (3)].Queue), ip_file->line_no));
}
break;
case 120:
/* Line 1455 of yacc.c */
#line 697 "ntp_parser.y"
{
enqueue(cfgt.restrict_opts,
create_restrict_node(
create_address_node(
estrdup("0.0.0.0"),
AF_INET),
create_address_node(
estrdup("0.0.0.0"),
AF_INET),
(yyvsp[(4) - (4)].Queue),
ip_file->line_no));
}
break;
case 121:
/* Line 1455 of yacc.c */
#line 710 "ntp_parser.y"
{
enqueue(cfgt.restrict_opts,
create_restrict_node(
create_address_node(
estrdup("::"),
AF_INET6),
create_address_node(
estrdup("::"),
AF_INET6),
(yyvsp[(4) - (4)].Queue),
ip_file->line_no));
}
break;
case 122:
/* Line 1455 of yacc.c */
#line 723 "ntp_parser.y"
{
enqueue(cfgt.restrict_opts,
create_restrict_node(
NULL, NULL,
enqueue((yyvsp[(3) - (3)].Queue), create_ival((yyvsp[(2) - (3)].Integer))),
ip_file->line_no));
}
break;
case 123:
/* Line 1455 of yacc.c */
#line 734 "ntp_parser.y"
{ (yyval.Queue) = create_queue(); }
break;
case 124:
/* Line 1455 of yacc.c */
#line 736 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), create_ival((yyvsp[(2) - (2)].Integer))); }
break;
case 139:
/* Line 1455 of yacc.c */
#line 758 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); }
break;
case 140:
/* Line 1455 of yacc.c */
#line 760 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); }
break;
case 141:
/* Line 1455 of yacc.c */
#line 764 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 142:
/* Line 1455 of yacc.c */
#line 765 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 143:
/* Line 1455 of yacc.c */
#line 766 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 144:
/* Line 1455 of yacc.c */
#line 771 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); }
break;
case 145:
/* Line 1455 of yacc.c */
#line 773 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); }
break;
case 146:
/* Line 1455 of yacc.c */
#line 777 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 147:
/* Line 1455 of yacc.c */
#line 778 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 148:
/* Line 1455 of yacc.c */
#line 779 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 149:
/* Line 1455 of yacc.c */
#line 780 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 150:
/* Line 1455 of yacc.c */
#line 781 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 151:
/* Line 1455 of yacc.c */
#line 782 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 152:
/* Line 1455 of yacc.c */
#line 783 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 153:
/* Line 1455 of yacc.c */
#line 784 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 154:
/* Line 1455 of yacc.c */
#line 793 "ntp_parser.y"
{ enqueue(cfgt.fudge, create_addr_opts_node((yyvsp[(2) - (3)].Address_node), (yyvsp[(3) - (3)].Queue))); }
break;
case 155:
/* Line 1455 of yacc.c */
#line 798 "ntp_parser.y"
{ enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); }
break;
case 156:
/* Line 1455 of yacc.c */
#line 800 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); }
break;
case 157:
/* Line 1455 of yacc.c */
#line 804 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 158:
/* Line 1455 of yacc.c */
#line 805 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 159:
/* Line 1455 of yacc.c */
#line 806 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 160:
/* Line 1455 of yacc.c */
#line 807 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)); }
break;
case 161:
/* Line 1455 of yacc.c */
#line 808 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 162:
/* Line 1455 of yacc.c */
#line 809 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 163:
/* Line 1455 of yacc.c */
#line 810 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 164:
/* Line 1455 of yacc.c */
#line 811 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 165:
/* Line 1455 of yacc.c */
#line 820 "ntp_parser.y"
{ append_queue(cfgt.enable_opts, (yyvsp[(2) - (2)].Queue)); }
break;
case 166:
/* Line 1455 of yacc.c */
#line 822 "ntp_parser.y"
{ append_queue(cfgt.disable_opts, (yyvsp[(2) - (2)].Queue)); }
break;
case 167:
/* Line 1455 of yacc.c */
#line 827 "ntp_parser.y"
{
if ((yyvsp[(2) - (2)].Attr_val) != NULL)
(yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val));
else
(yyval.Queue) = (yyvsp[(1) - (2)].Queue);
}
break;
case 168:
/* Line 1455 of yacc.c */
#line 834 "ntp_parser.y"
{
if ((yyvsp[(1) - (1)].Attr_val) != NULL)
(yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val));
else
(yyval.Queue) = create_queue();
}
break;
case 169:
/* Line 1455 of yacc.c */
#line 843 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 170:
/* Line 1455 of yacc.c */
#line 844 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 171:
/* Line 1455 of yacc.c */
#line 845 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 172:
/* Line 1455 of yacc.c */
#line 846 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 173:
/* Line 1455 of yacc.c */
#line 847 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 174:
/* Line 1455 of yacc.c */
#line 848 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 175:
/* Line 1455 of yacc.c */
#line 850 "ntp_parser.y"
{
if (input_from_file)
(yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer));
else {
(yyval.Attr_val) = NULL;
yyerror("enable/disable stats remote configuration ignored");
}
}
break;
case 176:
/* Line 1455 of yacc.c */
#line 865 "ntp_parser.y"
{ append_queue(cfgt.tinker, (yyvsp[(2) - (2)].Queue)); }
break;
case 177:
/* Line 1455 of yacc.c */
#line 869 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); }
break;
case 178:
/* Line 1455 of yacc.c */
#line 870 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); }
break;
case 179:
/* Line 1455 of yacc.c */
#line 874 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 180:
/* Line 1455 of yacc.c */
#line 875 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 181:
/* Line 1455 of yacc.c */
#line 876 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 182:
/* Line 1455 of yacc.c */
#line 877 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 183:
/* Line 1455 of yacc.c */
#line 878 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 184:
/* Line 1455 of yacc.c */
#line 879 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 185:
/* Line 1455 of yacc.c */
#line 880 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 187:
/* Line 1455 of yacc.c */
#line 891 "ntp_parser.y"
{
if (curr_include_level >= MAXINCLUDELEVEL) {
fprintf(stderr, "getconfig: Maximum include file level exceeded.\n");
msyslog(LOG_ERR, "getconfig: Maximum include file level exceeded.");
}
else {
fp[curr_include_level + 1] = F_OPEN(FindConfig((yyvsp[(2) - (3)].String)), "r");
if (fp[curr_include_level + 1] == NULL) {
fprintf(stderr, "getconfig: Couldn't open <%s>\n", FindConfig((yyvsp[(2) - (3)].String)));
msyslog(LOG_ERR, "getconfig: Couldn't open <%s>", FindConfig((yyvsp[(2) - (3)].String)));
}
else
ip_file = fp[++curr_include_level];
}
}
break;
case 188:
/* Line 1455 of yacc.c */
#line 907 "ntp_parser.y"
{
while (curr_include_level != -1)
FCLOSE(fp[curr_include_level--]);
}
break;
case 189:
/* Line 1455 of yacc.c */
#line 913 "ntp_parser.y"
{ enqueue(cfgt.vars, create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double))); }
break;
case 190:
/* Line 1455 of yacc.c */
#line 915 "ntp_parser.y"
{ enqueue(cfgt.vars, create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer))); }
break;
case 191:
/* Line 1455 of yacc.c */
#line 917 "ntp_parser.y"
{ enqueue(cfgt.vars, create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double))); }
break;
case 192:
/* Line 1455 of yacc.c */
#line 919 "ntp_parser.y"
{ /* Null action, possibly all null parms */ }
break;
case 193:
/* Line 1455 of yacc.c */
#line 921 "ntp_parser.y"
{ enqueue(cfgt.vars, create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String))); }
break;
case 194:
/* Line 1455 of yacc.c */
#line 924 "ntp_parser.y"
{ enqueue(cfgt.vars, create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String))); }
break;
case 195:
/* Line 1455 of yacc.c */
#line 926 "ntp_parser.y"
{
if (input_from_file)
enqueue(cfgt.vars,
create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)));
else {
free((yyvsp[(2) - (2)].String));
yyerror("logfile remote configuration ignored");
}
}
break;
case 196:
/* Line 1455 of yacc.c */
#line 937 "ntp_parser.y"
{ append_queue(cfgt.logconfig, (yyvsp[(2) - (2)].Queue)); }
break;
case 197:
/* Line 1455 of yacc.c */
#line 939 "ntp_parser.y"
{ append_queue(cfgt.phone, (yyvsp[(2) - (2)].Queue)); }
break;
case 198:
/* Line 1455 of yacc.c */
#line 941 "ntp_parser.y"
{
if (input_from_file)
enqueue(cfgt.vars,
create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)));
else {
free((yyvsp[(2) - (2)].String));
yyerror("saveconfigdir remote configuration ignored");
}
}
break;
case 199:
/* Line 1455 of yacc.c */
#line 951 "ntp_parser.y"
{ enqueue(cfgt.setvar, (yyvsp[(2) - (2)].Set_var)); }
break;
case 200:
/* Line 1455 of yacc.c */
#line 953 "ntp_parser.y"
{ enqueue(cfgt.trap, create_addr_opts_node((yyvsp[(2) - (2)].Address_node), NULL)); }
break;
case 201:
/* Line 1455 of yacc.c */
#line 955 "ntp_parser.y"
{ enqueue(cfgt.trap, create_addr_opts_node((yyvsp[(2) - (3)].Address_node), (yyvsp[(3) - (3)].Queue))); }
break;
case 202:
/* Line 1455 of yacc.c */
#line 957 "ntp_parser.y"
{ append_queue(cfgt.ttl, (yyvsp[(2) - (2)].Queue)); }
break;
case 203:
/* Line 1455 of yacc.c */
#line 959 "ntp_parser.y"
{ enqueue(cfgt.qos, create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String))); }
break;
case 204:
/* Line 1455 of yacc.c */
#line 964 "ntp_parser.y"
{ enqueue(cfgt.vars, create_attr_sval(T_Driftfile, (yyvsp[(1) - (1)].String))); }
break;
case 205:
/* Line 1455 of yacc.c */
#line 966 "ntp_parser.y"
{ enqueue(cfgt.vars, create_attr_dval(T_WanderThreshold, (yyvsp[(2) - (2)].Double)));
enqueue(cfgt.vars, create_attr_sval(T_Driftfile, (yyvsp[(1) - (2)].String))); }
break;
case 206:
/* Line 1455 of yacc.c */
#line 969 "ntp_parser.y"
{ enqueue(cfgt.vars, create_attr_sval(T_Driftfile, "\0")); }
break;
case 207:
/* Line 1455 of yacc.c */
#line 974 "ntp_parser.y"
{ (yyval.Set_var) = create_setvar_node((yyvsp[(1) - (4)].String), (yyvsp[(3) - (4)].String), (yyvsp[(4) - (4)].Integer)); }
break;
case 208:
/* Line 1455 of yacc.c */
#line 976 "ntp_parser.y"
{ (yyval.Set_var) = create_setvar_node((yyvsp[(1) - (3)].String), (yyvsp[(3) - (3)].String), 0); }
break;
case 209:
/* Line 1455 of yacc.c */
#line 981 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); }
break;
case 210:
/* Line 1455 of yacc.c */
#line 982 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); }
break;
case 211:
/* Line 1455 of yacc.c */
#line 986 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 212:
/* Line 1455 of yacc.c */
#line 987 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_pval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Address_node)); }
break;
case 213:
/* Line 1455 of yacc.c */
#line 991 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); }
break;
case 214:
/* Line 1455 of yacc.c */
#line 992 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); }
break;
case 215:
/* Line 1455 of yacc.c */
#line 997 "ntp_parser.y"
{
char prefix = (yyvsp[(1) - (1)].String)[0];
char *type = (yyvsp[(1) - (1)].String) + 1;
if (prefix != '+' && prefix != '-' && prefix != '=') {
yyerror("Logconfig prefix is not '+', '-' or '='\n");
}
else
(yyval.Attr_val) = create_attr_sval(prefix, estrdup(type));
YYFREE((yyvsp[(1) - (1)].String));
}
break;
case 216:
/* Line 1455 of yacc.c */
#line 1012 "ntp_parser.y"
{
enqueue(cfgt.nic_rules,
create_nic_rule_node((yyvsp[(3) - (3)].Integer), NULL, (yyvsp[(2) - (3)].Integer)));
}
break;
case 217:
/* Line 1455 of yacc.c */
#line 1017 "ntp_parser.y"
{
enqueue(cfgt.nic_rules,
create_nic_rule_node(0, (yyvsp[(3) - (3)].String), (yyvsp[(2) - (3)].Integer)));
}
break;
case 227:
/* Line 1455 of yacc.c */
#line 1048 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), create_ival((yyvsp[(2) - (2)].Integer))); }
break;
case 228:
/* Line 1455 of yacc.c */
#line 1049 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue(create_ival((yyvsp[(1) - (1)].Integer))); }
break;
case 229:
/* Line 1455 of yacc.c */
#line 1054 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); }
break;
case 230:
/* Line 1455 of yacc.c */
#line 1056 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); }
break;
case 231:
/* Line 1455 of yacc.c */
#line 1061 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival('i', (yyvsp[(1) - (1)].Integer)); }
break;
case 233:
/* Line 1455 of yacc.c */
#line 1067 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_shorts('-', (yyvsp[(2) - (5)].Integer), (yyvsp[(4) - (5)].Integer)); }
break;
case 234:
/* Line 1455 of yacc.c */
#line 1071 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), create_pval((yyvsp[(2) - (2)].String))); }
break;
case 235:
/* Line 1455 of yacc.c */
#line 1072 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue(create_pval((yyvsp[(1) - (1)].String))); }
break;
case 236:
/* Line 1455 of yacc.c */
#line 1076 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Address_node)); }
break;
case 237:
/* Line 1455 of yacc.c */
#line 1077 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Address_node)); }
break;
case 238:
/* Line 1455 of yacc.c */
#line 1082 "ntp_parser.y"
{
if ((yyvsp[(1) - (1)].Integer) != 0 && (yyvsp[(1) - (1)].Integer) != 1) {
yyerror("Integer value is not boolean (0 or 1). Assuming 1");
(yyval.Integer) = 1;
}
else
(yyval.Integer) = (yyvsp[(1) - (1)].Integer);
}
break;
case 239:
/* Line 1455 of yacc.c */
#line 1090 "ntp_parser.y"
{ (yyval.Integer) = 1; }
break;
case 240:
/* Line 1455 of yacc.c */
#line 1091 "ntp_parser.y"
{ (yyval.Integer) = 0; }
break;
case 241:
/* Line 1455 of yacc.c */
#line 1095 "ntp_parser.y"
{ (yyval.Double) = (double)(yyvsp[(1) - (1)].Integer); }
break;
case 243:
/* Line 1455 of yacc.c */
#line 1106 "ntp_parser.y"
{
cfgt.sim_details = create_sim_node((yyvsp[(3) - (5)].Queue), (yyvsp[(4) - (5)].Queue));
/* Reset the old_config_style variable */
old_config_style = 1;
}
break;
case 244:
/* Line 1455 of yacc.c */
#line 1120 "ntp_parser.y"
{ old_config_style = 0; }
break;
case 245:
/* Line 1455 of yacc.c */
#line 1124 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (3)].Queue), (yyvsp[(2) - (3)].Attr_val)); }
break;
case 246:
/* Line 1455 of yacc.c */
#line 1125 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (2)].Attr_val)); }
break;
case 247:
/* Line 1455 of yacc.c */
#line 1129 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (3)].Integer), (yyvsp[(3) - (3)].Double)); }
break;
case 248:
/* Line 1455 of yacc.c */
#line 1130 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (3)].Integer), (yyvsp[(3) - (3)].Double)); }
break;
case 249:
/* Line 1455 of yacc.c */
#line 1134 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Sim_server)); }
break;
case 250:
/* Line 1455 of yacc.c */
#line 1135 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Sim_server)); }
break;
case 251:
/* Line 1455 of yacc.c */
#line 1140 "ntp_parser.y"
{ (yyval.Sim_server) = create_sim_server((yyvsp[(1) - (5)].Address_node), (yyvsp[(3) - (5)].Double), (yyvsp[(4) - (5)].Queue)); }
break;
case 252:
/* Line 1455 of yacc.c */
#line 1144 "ntp_parser.y"
{ (yyval.Double) = (yyvsp[(3) - (4)].Double); }
break;
case 253:
/* Line 1455 of yacc.c */
#line 1148 "ntp_parser.y"
{ (yyval.Address_node) = (yyvsp[(3) - (3)].Address_node); }
break;
case 254:
/* Line 1455 of yacc.c */
#line 1152 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Sim_script)); }
break;
case 255:
/* Line 1455 of yacc.c */
#line 1153 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Sim_script)); }
break;
case 256:
/* Line 1455 of yacc.c */
#line 1158 "ntp_parser.y"
{ (yyval.Sim_script) = create_sim_script_info((yyvsp[(3) - (6)].Double), (yyvsp[(5) - (6)].Queue)); }
break;
case 257:
/* Line 1455 of yacc.c */
#line 1162 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (3)].Queue), (yyvsp[(2) - (3)].Attr_val)); }
break;
case 258:
/* Line 1455 of yacc.c */
#line 1163 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (2)].Attr_val)); }
break;
case 259:
/* Line 1455 of yacc.c */
#line 1168 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (3)].Integer), (yyvsp[(3) - (3)].Double)); }
break;
case 260:
/* Line 1455 of yacc.c */
#line 1170 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (3)].Integer), (yyvsp[(3) - (3)].Double)); }
break;
case 261:
/* Line 1455 of yacc.c */
#line 1172 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (3)].Integer), (yyvsp[(3) - (3)].Double)); }
break;
case 262:
/* Line 1455 of yacc.c */
#line 1174 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (3)].Integer), (yyvsp[(3) - (3)].Double)); }
break;
case 263:
/* Line 1455 of yacc.c */
#line 1176 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (3)].Integer), (yyvsp[(3) - (3)].Double)); }
break;
/* Line 1455 of yacc.c */
#line 3826 "ntp_parser.c"
default: break;
}
YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
*++yyvsp = yyval;
/* Now `shift' the result of the reduction. Determine what state
that goes to, based on the state we popped back to and the rule
number reduced by. */
yyn = yyr1[yyn];
yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
yystate = yytable[yystate];
else
yystate = yydefgoto[yyn - YYNTOKENS];
goto yynewstate;
/*------------------------------------.
| yyerrlab -- here on detecting error |
`------------------------------------*/
yyerrlab:
/* If not already recovering from an error, report this error. */
if (!yyerrstatus)
{
++yynerrs;
#if ! YYERROR_VERBOSE
yyerror (YY_("syntax error"));
#else
{
YYSIZE_T yysize = yysyntax_error (0, yystate, yychar);
if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM)
{
YYSIZE_T yyalloc = 2 * yysize;
if (! (yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM))
yyalloc = YYSTACK_ALLOC_MAXIMUM;
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
yymsg = (char *) YYSTACK_ALLOC (yyalloc);
if (yymsg)
yymsg_alloc = yyalloc;
else
{
yymsg = yymsgbuf;
yymsg_alloc = sizeof yymsgbuf;
}
}
if (0 < yysize && yysize <= yymsg_alloc)
{
(void) yysyntax_error (yymsg, yystate, yychar);
yyerror (yymsg);
}
else
{
yyerror (YY_("syntax error"));
if (yysize != 0)
goto yyexhaustedlab;
}
}
#endif
}
if (yyerrstatus == 3)
{
/* If just tried and failed to reuse lookahead token after an
error, discard it. */
if (yychar <= YYEOF)
{
/* Return failure if at end of input. */
if (yychar == YYEOF)
YYABORT;
}
else
{
yydestruct ("Error: discarding",
yytoken, &yylval);
yychar = YYEMPTY;
}
}
/* Else will try to reuse lookahead token after shifting the error
token. */
goto yyerrlab1;
/*---------------------------------------------------.
| yyerrorlab -- error raised explicitly by YYERROR. |
`---------------------------------------------------*/
yyerrorlab:
/* Pacify compilers like GCC when the user code never invokes
YYERROR and the label yyerrorlab therefore never appears in user
code. */
if (/*CONSTCOND*/ 0)
goto yyerrorlab;
/* Do not reclaim the symbols of the rule which action triggered
this YYERROR. */
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
yystate = *yyssp;
goto yyerrlab1;
/*-------------------------------------------------------------.
| yyerrlab1 -- common code for both syntax error and YYERROR. |
`-------------------------------------------------------------*/
yyerrlab1:
yyerrstatus = 3; /* Each real token shifted decrements this. */
for (;;)
{
yyn = yypact[yystate];
if (yyn != YYPACT_NINF)
{
yyn += YYTERROR;
if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
{
yyn = yytable[yyn];
if (0 < yyn)
break;
}
}
/* Pop the current state because it cannot handle the error token. */
if (yyssp == yyss)
YYABORT;
yydestruct ("Error: popping",
yystos[yystate], yyvsp);
YYPOPSTACK (1);
yystate = *yyssp;
YY_STACK_PRINT (yyss, yyssp);
}
*++yyvsp = yylval;
/* Shift the error token. */
YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
yystate = yyn;
goto yynewstate;
/*-------------------------------------.
| yyacceptlab -- YYACCEPT comes here. |
`-------------------------------------*/
yyacceptlab:
yyresult = 0;
goto yyreturn;
/*-----------------------------------.
| yyabortlab -- YYABORT comes here. |
`-----------------------------------*/
yyabortlab:
yyresult = 1;
goto yyreturn;
#if !defined(yyoverflow) || YYERROR_VERBOSE
/*-------------------------------------------------.
| yyexhaustedlab -- memory exhaustion comes here. |
`-------------------------------------------------*/
yyexhaustedlab:
yyerror (YY_("memory exhausted"));
yyresult = 2;
/* Fall through. */
#endif
yyreturn:
if (yychar != YYEMPTY)
yydestruct ("Cleanup: discarding lookahead",
yytoken, &yylval);
/* Do not reclaim the symbols of the rule which action triggered
this YYABORT or YYACCEPT. */
YYPOPSTACK (yylen);
YY_STACK_PRINT (yyss, yyssp);
while (yyssp != yyss)
{
yydestruct ("Cleanup: popping",
yystos[*yyssp], yyvsp);
YYPOPSTACK (1);
}
#ifndef yyoverflow
if (yyss != yyssa)
YYSTACK_FREE (yyss);
#endif
#if YYERROR_VERBOSE
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
#endif
/* Make sure YYID is used. */
return YYID (yyresult);
}
Commit Message: [Bug 1593] ntpd abort in free() with logconfig syntax error.
CWE ID: CWE-20 | yyparse (void *YYPARSE_PARAM)
#else
int
yyparse (YYPARSE_PARAM)
void *YYPARSE_PARAM;
#endif
#else /* ! YYPARSE_PARAM */
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
int
yyparse (void)
#else
int
yyparse ()
#endif
#endif
{
int yystate;
/* Number of tokens to shift before error messages enabled. */
int yyerrstatus;
/* The stacks and their tools:
`yyss': related to states.
`yyvs': related to semantic values.
Refer to the stacks thru separate pointers, to allow yyoverflow
to reallocate them elsewhere. */
/* The state stack. */
yytype_int16 yyssa[YYINITDEPTH];
yytype_int16 *yyss;
yytype_int16 *yyssp;
/* The semantic value stack. */
YYSTYPE yyvsa[YYINITDEPTH];
YYSTYPE *yyvs;
YYSTYPE *yyvsp;
YYSIZE_T yystacksize;
int yyn;
int yyresult;
/* Lookahead token as an internal (translated) token number. */
int yytoken;
/* The variables used to return semantic value and location from the
action routines. */
YYSTYPE yyval;
#if YYERROR_VERBOSE
/* Buffer for error messages, and its allocated size. */
char yymsgbuf[128];
char *yymsg = yymsgbuf;
YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
#endif
#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N))
/* The number of symbols on the RHS of the reduced rule.
Keep to zero when no symbol should be popped. */
int yylen = 0;
yytoken = 0;
yyss = yyssa;
yyvs = yyvsa;
yystacksize = YYINITDEPTH;
YYDPRINTF ((stderr, "Starting parse\n"));
yystate = 0;
yyerrstatus = 0;
yynerrs = 0;
yychar = YYEMPTY; /* Cause a token to be read. */
/* Initialize stack pointers.
Waste one element of value and location stack
so that they stay on the same level as the state stack.
The wasted elements are never initialized. */
yyssp = yyss;
yyvsp = yyvs;
goto yysetstate;
/*------------------------------------------------------------.
| yynewstate -- Push a new state, which is found in yystate. |
`------------------------------------------------------------*/
yynewstate:
/* In all cases, when you get here, the value and location stacks
have just been pushed. So pushing a state here evens the stacks. */
yyssp++;
yysetstate:
*yyssp = yystate;
if (yyss + yystacksize - 1 <= yyssp)
{
/* Get the current used size of the three stacks, in elements. */
YYSIZE_T yysize = yyssp - yyss + 1;
#ifdef yyoverflow
{
/* Give user a chance to reallocate the stack. Use copies of
these so that the &'s don't force the real ones into
memory. */
YYSTYPE *yyvs1 = yyvs;
yytype_int16 *yyss1 = yyss;
/* Each stack pointer address is followed by the size of the
data in use in that stack, in bytes. This used to be a
conditional around just the two extra args, but that might
be undefined if yyoverflow is a macro. */
yyoverflow (YY_("memory exhausted"),
&yyss1, yysize * sizeof (*yyssp),
&yyvs1, yysize * sizeof (*yyvsp),
&yystacksize);
yyss = yyss1;
yyvs = yyvs1;
}
#else /* no yyoverflow */
# ifndef YYSTACK_RELOCATE
goto yyexhaustedlab;
# else
/* Extend the stack our own way. */
if (YYMAXDEPTH <= yystacksize)
goto yyexhaustedlab;
yystacksize *= 2;
if (YYMAXDEPTH < yystacksize)
yystacksize = YYMAXDEPTH;
{
yytype_int16 *yyss1 = yyss;
union yyalloc *yyptr =
(union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
if (! yyptr)
goto yyexhaustedlab;
YYSTACK_RELOCATE (yyss_alloc, yyss);
YYSTACK_RELOCATE (yyvs_alloc, yyvs);
# undef YYSTACK_RELOCATE
if (yyss1 != yyssa)
YYSTACK_FREE (yyss1);
}
# endif
#endif /* no yyoverflow */
yyssp = yyss + yysize - 1;
yyvsp = yyvs + yysize - 1;
YYDPRINTF ((stderr, "Stack size increased to %lu\n",
(unsigned long int) yystacksize));
if (yyss + yystacksize - 1 <= yyssp)
YYABORT;
}
YYDPRINTF ((stderr, "Entering state %d\n", yystate));
if (yystate == YYFINAL)
YYACCEPT;
goto yybackup;
/*-----------.
| yybackup. |
`-----------*/
yybackup:
/* Do appropriate processing given the current state. Read a
lookahead token if we need one and don't already have one. */
/* First try to decide what to do without reference to lookahead token. */
yyn = yypact[yystate];
if (yyn == YYPACT_NINF)
goto yydefault;
/* Not known => get a lookahead token if don't already have one. */
/* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */
if (yychar == YYEMPTY)
{
YYDPRINTF ((stderr, "Reading a token: "));
yychar = YYLEX;
}
if (yychar <= YYEOF)
{
yychar = yytoken = YYEOF;
YYDPRINTF ((stderr, "Now at end of input.\n"));
}
else
{
yytoken = YYTRANSLATE (yychar);
YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
}
/* If the proper action on seeing token YYTOKEN is to reduce or to
detect an error, take that action. */
yyn += yytoken;
if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
goto yydefault;
yyn = yytable[yyn];
if (yyn <= 0)
{
if (yyn == 0 || yyn == YYTABLE_NINF)
goto yyerrlab;
yyn = -yyn;
goto yyreduce;
}
/* Count tokens shifted since error; after three, turn off error
status. */
if (yyerrstatus)
yyerrstatus--;
/* Shift the lookahead token. */
YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
/* Discard the shifted token. */
yychar = YYEMPTY;
yystate = yyn;
*++yyvsp = yylval;
goto yynewstate;
/*-----------------------------------------------------------.
| yydefault -- do the default action for the current state. |
`-----------------------------------------------------------*/
yydefault:
yyn = yydefact[yystate];
if (yyn == 0)
goto yyerrlab;
goto yyreduce;
/*-----------------------------.
| yyreduce -- Do a reduction. |
`-----------------------------*/
yyreduce:
/* yyn is the number of a rule to reduce with. */
yylen = yyr2[yyn];
/* If YYLEN is nonzero, implement the default value of the action:
`$$ = $1'.
Otherwise, the following line sets YYVAL to garbage.
This behavior is undocumented and Bison
users should not rely upon it. Assigning to YYVAL
unconditionally makes the parser a bit smaller, and it avoids a
GCC warning that YYVAL may be used uninitialized. */
yyval = yyvsp[1-yylen];
YY_REDUCE_PRINT (yyn);
switch (yyn)
{
case 5:
/* Line 1455 of yacc.c */
#line 320 "ntp_parser.y"
{
/* I will need to incorporate much more fine grained
* error messages. The following should suffice for
* the time being.
*/
msyslog(LOG_ERR,
"syntax error in %s line %d, column %d",
ip_file->fname,
ip_file->err_line_no,
ip_file->err_col_no);
}
break;
case 19:
/* Line 1455 of yacc.c */
#line 354 "ntp_parser.y"
{
struct peer_node *my_node = create_peer_node((yyvsp[(1) - (3)].Integer), (yyvsp[(2) - (3)].Address_node), (yyvsp[(3) - (3)].Queue));
if (my_node)
enqueue(cfgt.peers, my_node);
}
break;
case 20:
/* Line 1455 of yacc.c */
#line 360 "ntp_parser.y"
{
struct peer_node *my_node = create_peer_node((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Address_node), NULL);
if (my_node)
enqueue(cfgt.peers, my_node);
}
break;
case 27:
/* Line 1455 of yacc.c */
#line 377 "ntp_parser.y"
{ (yyval.Address_node) = create_address_node((yyvsp[(2) - (2)].String), AF_INET); }
break;
case 28:
/* Line 1455 of yacc.c */
#line 378 "ntp_parser.y"
{ (yyval.Address_node) = create_address_node((yyvsp[(2) - (2)].String), AF_INET6); }
break;
case 29:
/* Line 1455 of yacc.c */
#line 382 "ntp_parser.y"
{ (yyval.Address_node) = create_address_node((yyvsp[(1) - (1)].String), 0); }
break;
case 30:
/* Line 1455 of yacc.c */
#line 386 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); }
break;
case 31:
/* Line 1455 of yacc.c */
#line 387 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); }
break;
case 32:
/* Line 1455 of yacc.c */
#line 391 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 33:
/* Line 1455 of yacc.c */
#line 392 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 34:
/* Line 1455 of yacc.c */
#line 393 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 35:
/* Line 1455 of yacc.c */
#line 394 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 36:
/* Line 1455 of yacc.c */
#line 395 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 37:
/* Line 1455 of yacc.c */
#line 396 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 38:
/* Line 1455 of yacc.c */
#line 397 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 39:
/* Line 1455 of yacc.c */
#line 398 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 40:
/* Line 1455 of yacc.c */
#line 399 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 41:
/* Line 1455 of yacc.c */
#line 400 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 42:
/* Line 1455 of yacc.c */
#line 401 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 43:
/* Line 1455 of yacc.c */
#line 402 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 44:
/* Line 1455 of yacc.c */
#line 403 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 45:
/* Line 1455 of yacc.c */
#line 404 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 46:
/* Line 1455 of yacc.c */
#line 405 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 47:
/* Line 1455 of yacc.c */
#line 415 "ntp_parser.y"
{
struct unpeer_node *my_node = create_unpeer_node((yyvsp[(2) - (2)].Address_node));
if (my_node)
enqueue(cfgt.unpeers, my_node);
}
break;
case 50:
/* Line 1455 of yacc.c */
#line 434 "ntp_parser.y"
{ cfgt.broadcastclient = 1; }
break;
case 51:
/* Line 1455 of yacc.c */
#line 436 "ntp_parser.y"
{ append_queue(cfgt.manycastserver, (yyvsp[(2) - (2)].Queue)); }
break;
case 52:
/* Line 1455 of yacc.c */
#line 438 "ntp_parser.y"
{ append_queue(cfgt.multicastclient, (yyvsp[(2) - (2)].Queue)); }
break;
case 53:
/* Line 1455 of yacc.c */
#line 449 "ntp_parser.y"
{ enqueue(cfgt.vars, create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer))); }
break;
case 54:
/* Line 1455 of yacc.c */
#line 451 "ntp_parser.y"
{ cfgt.auth.control_key = (yyvsp[(2) - (2)].Integer); }
break;
case 55:
/* Line 1455 of yacc.c */
#line 453 "ntp_parser.y"
{
cfgt.auth.cryptosw++;
append_queue(cfgt.auth.crypto_cmd_list, (yyvsp[(2) - (2)].Queue));
}
break;
case 56:
/* Line 1455 of yacc.c */
#line 458 "ntp_parser.y"
{ cfgt.auth.keys = (yyvsp[(2) - (2)].String); }
break;
case 57:
/* Line 1455 of yacc.c */
#line 460 "ntp_parser.y"
{ cfgt.auth.keysdir = (yyvsp[(2) - (2)].String); }
break;
case 58:
/* Line 1455 of yacc.c */
#line 462 "ntp_parser.y"
{ cfgt.auth.request_key = (yyvsp[(2) - (2)].Integer); }
break;
case 59:
/* Line 1455 of yacc.c */
#line 464 "ntp_parser.y"
{ cfgt.auth.revoke = (yyvsp[(2) - (2)].Integer); }
break;
case 60:
/* Line 1455 of yacc.c */
#line 466 "ntp_parser.y"
{ cfgt.auth.trusted_key_list = (yyvsp[(2) - (2)].Queue); }
break;
case 61:
/* Line 1455 of yacc.c */
#line 468 "ntp_parser.y"
{ cfgt.auth.ntp_signd_socket = (yyvsp[(2) - (2)].String); }
break;
case 63:
/* Line 1455 of yacc.c */
#line 474 "ntp_parser.y"
{ (yyval.Queue) = create_queue(); }
break;
case 64:
/* Line 1455 of yacc.c */
#line 479 "ntp_parser.y"
{
if ((yyvsp[(2) - (2)].Attr_val) != NULL)
(yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val));
else
(yyval.Queue) = (yyvsp[(1) - (2)].Queue);
}
break;
case 65:
/* Line 1455 of yacc.c */
#line 486 "ntp_parser.y"
{
if ((yyvsp[(1) - (1)].Attr_val) != NULL)
(yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val));
else
(yyval.Queue) = create_queue();
}
break;
case 66:
/* Line 1455 of yacc.c */
#line 496 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)); }
break;
case 67:
/* Line 1455 of yacc.c */
#line 498 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)); }
break;
case 68:
/* Line 1455 of yacc.c */
#line 500 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)); }
break;
case 69:
/* Line 1455 of yacc.c */
#line 502 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)); }
break;
case 70:
/* Line 1455 of yacc.c */
#line 504 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)); }
break;
case 71:
/* Line 1455 of yacc.c */
#line 506 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)); }
break;
case 72:
/* Line 1455 of yacc.c */
#line 508 "ntp_parser.y"
{
(yyval.Attr_val) = NULL;
cfgt.auth.revoke = (yyvsp[(2) - (2)].Integer);
msyslog(LOG_WARNING,
"'crypto revoke %d' is deprecated, "
"please use 'revoke %d' instead.",
cfgt.auth.revoke, cfgt.auth.revoke);
}
break;
case 73:
/* Line 1455 of yacc.c */
#line 525 "ntp_parser.y"
{ append_queue(cfgt.orphan_cmds,(yyvsp[(2) - (2)].Queue)); }
break;
case 74:
/* Line 1455 of yacc.c */
#line 529 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); }
break;
case 75:
/* Line 1455 of yacc.c */
#line 530 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); }
break;
case 76:
/* Line 1455 of yacc.c */
#line 535 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (double)(yyvsp[(2) - (2)].Integer)); }
break;
case 77:
/* Line 1455 of yacc.c */
#line 537 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (double)(yyvsp[(2) - (2)].Integer)); }
break;
case 78:
/* Line 1455 of yacc.c */
#line 539 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (double)(yyvsp[(2) - (2)].Integer)); }
break;
case 79:
/* Line 1455 of yacc.c */
#line 541 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (double)(yyvsp[(2) - (2)].Integer)); }
break;
case 80:
/* Line 1455 of yacc.c */
#line 543 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (double)(yyvsp[(2) - (2)].Integer)); }
break;
case 81:
/* Line 1455 of yacc.c */
#line 545 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 82:
/* Line 1455 of yacc.c */
#line 547 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 83:
/* Line 1455 of yacc.c */
#line 549 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 84:
/* Line 1455 of yacc.c */
#line 551 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 85:
/* Line 1455 of yacc.c */
#line 553 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (double)(yyvsp[(2) - (2)].Integer)); }
break;
case 86:
/* Line 1455 of yacc.c */
#line 555 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (double)(yyvsp[(2) - (2)].Integer)); }
break;
case 87:
/* Line 1455 of yacc.c */
#line 565 "ntp_parser.y"
{ append_queue(cfgt.stats_list, (yyvsp[(2) - (2)].Queue)); }
break;
case 88:
/* Line 1455 of yacc.c */
#line 567 "ntp_parser.y"
{
if (input_from_file)
cfgt.stats_dir = (yyvsp[(2) - (2)].String);
else {
free((yyvsp[(2) - (2)].String));
yyerror("statsdir remote configuration ignored");
}
}
break;
case 89:
/* Line 1455 of yacc.c */
#line 576 "ntp_parser.y"
{
enqueue(cfgt.filegen_opts,
create_filegen_node((yyvsp[(2) - (3)].Integer), (yyvsp[(3) - (3)].Queue)));
}
break;
case 90:
/* Line 1455 of yacc.c */
#line 583 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), create_ival((yyvsp[(2) - (2)].Integer))); }
break;
case 91:
/* Line 1455 of yacc.c */
#line 584 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue(create_ival((yyvsp[(1) - (1)].Integer))); }
break;
case 100:
/* Line 1455 of yacc.c */
#line 600 "ntp_parser.y"
{
if ((yyvsp[(2) - (2)].Attr_val) != NULL)
(yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val));
else
(yyval.Queue) = (yyvsp[(1) - (2)].Queue);
}
break;
case 101:
/* Line 1455 of yacc.c */
#line 607 "ntp_parser.y"
{
if ((yyvsp[(1) - (1)].Attr_val) != NULL)
(yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val));
else
(yyval.Queue) = create_queue();
}
break;
case 102:
/* Line 1455 of yacc.c */
#line 617 "ntp_parser.y"
{
if (input_from_file)
(yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String));
else {
(yyval.Attr_val) = NULL;
free((yyvsp[(2) - (2)].String));
yyerror("filegen file remote configuration ignored");
}
}
break;
case 103:
/* Line 1455 of yacc.c */
#line 627 "ntp_parser.y"
{
if (input_from_file)
(yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer));
else {
(yyval.Attr_val) = NULL;
yyerror("filegen type remote configuration ignored");
}
}
break;
case 104:
/* Line 1455 of yacc.c */
#line 636 "ntp_parser.y"
{
if (input_from_file)
(yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer));
else {
(yyval.Attr_val) = NULL;
yyerror("filegen link remote configuration ignored");
}
}
break;
case 105:
/* Line 1455 of yacc.c */
#line 645 "ntp_parser.y"
{
if (input_from_file)
(yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer));
else {
(yyval.Attr_val) = NULL;
yyerror("filegen nolink remote configuration ignored");
}
}
break;
case 106:
/* Line 1455 of yacc.c */
#line 653 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 107:
/* Line 1455 of yacc.c */
#line 654 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 115:
/* Line 1455 of yacc.c */
#line 674 "ntp_parser.y"
{
append_queue(cfgt.discard_opts, (yyvsp[(2) - (2)].Queue));
}
break;
case 116:
/* Line 1455 of yacc.c */
#line 678 "ntp_parser.y"
{
append_queue(cfgt.mru_opts, (yyvsp[(2) - (2)].Queue));
}
break;
case 117:
/* Line 1455 of yacc.c */
#line 682 "ntp_parser.y"
{
enqueue(cfgt.restrict_opts,
create_restrict_node((yyvsp[(2) - (3)].Address_node), NULL, (yyvsp[(3) - (3)].Queue), ip_file->line_no));
}
break;
case 118:
/* Line 1455 of yacc.c */
#line 687 "ntp_parser.y"
{
enqueue(cfgt.restrict_opts,
create_restrict_node((yyvsp[(2) - (5)].Address_node), (yyvsp[(4) - (5)].Address_node), (yyvsp[(5) - (5)].Queue), ip_file->line_no));
}
break;
case 119:
/* Line 1455 of yacc.c */
#line 692 "ntp_parser.y"
{
enqueue(cfgt.restrict_opts,
create_restrict_node(NULL, NULL, (yyvsp[(3) - (3)].Queue), ip_file->line_no));
}
break;
case 120:
/* Line 1455 of yacc.c */
#line 697 "ntp_parser.y"
{
enqueue(cfgt.restrict_opts,
create_restrict_node(
create_address_node(
estrdup("0.0.0.0"),
AF_INET),
create_address_node(
estrdup("0.0.0.0"),
AF_INET),
(yyvsp[(4) - (4)].Queue),
ip_file->line_no));
}
break;
case 121:
/* Line 1455 of yacc.c */
#line 710 "ntp_parser.y"
{
enqueue(cfgt.restrict_opts,
create_restrict_node(
create_address_node(
estrdup("::"),
AF_INET6),
create_address_node(
estrdup("::"),
AF_INET6),
(yyvsp[(4) - (4)].Queue),
ip_file->line_no));
}
break;
case 122:
/* Line 1455 of yacc.c */
#line 723 "ntp_parser.y"
{
enqueue(cfgt.restrict_opts,
create_restrict_node(
NULL, NULL,
enqueue((yyvsp[(3) - (3)].Queue), create_ival((yyvsp[(2) - (3)].Integer))),
ip_file->line_no));
}
break;
case 123:
/* Line 1455 of yacc.c */
#line 734 "ntp_parser.y"
{ (yyval.Queue) = create_queue(); }
break;
case 124:
/* Line 1455 of yacc.c */
#line 736 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), create_ival((yyvsp[(2) - (2)].Integer))); }
break;
case 139:
/* Line 1455 of yacc.c */
#line 758 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); }
break;
case 140:
/* Line 1455 of yacc.c */
#line 760 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); }
break;
case 141:
/* Line 1455 of yacc.c */
#line 764 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 142:
/* Line 1455 of yacc.c */
#line 765 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 143:
/* Line 1455 of yacc.c */
#line 766 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 144:
/* Line 1455 of yacc.c */
#line 771 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); }
break;
case 145:
/* Line 1455 of yacc.c */
#line 773 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); }
break;
case 146:
/* Line 1455 of yacc.c */
#line 777 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 147:
/* Line 1455 of yacc.c */
#line 778 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 148:
/* Line 1455 of yacc.c */
#line 779 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 149:
/* Line 1455 of yacc.c */
#line 780 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 150:
/* Line 1455 of yacc.c */
#line 781 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 151:
/* Line 1455 of yacc.c */
#line 782 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 152:
/* Line 1455 of yacc.c */
#line 783 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 153:
/* Line 1455 of yacc.c */
#line 784 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 154:
/* Line 1455 of yacc.c */
#line 793 "ntp_parser.y"
{ enqueue(cfgt.fudge, create_addr_opts_node((yyvsp[(2) - (3)].Address_node), (yyvsp[(3) - (3)].Queue))); }
break;
case 155:
/* Line 1455 of yacc.c */
#line 798 "ntp_parser.y"
{ enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); }
break;
case 156:
/* Line 1455 of yacc.c */
#line 800 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); }
break;
case 157:
/* Line 1455 of yacc.c */
#line 804 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 158:
/* Line 1455 of yacc.c */
#line 805 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 159:
/* Line 1455 of yacc.c */
#line 806 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 160:
/* Line 1455 of yacc.c */
#line 807 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)); }
break;
case 161:
/* Line 1455 of yacc.c */
#line 808 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 162:
/* Line 1455 of yacc.c */
#line 809 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 163:
/* Line 1455 of yacc.c */
#line 810 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 164:
/* Line 1455 of yacc.c */
#line 811 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 165:
/* Line 1455 of yacc.c */
#line 820 "ntp_parser.y"
{ append_queue(cfgt.enable_opts, (yyvsp[(2) - (2)].Queue)); }
break;
case 166:
/* Line 1455 of yacc.c */
#line 822 "ntp_parser.y"
{ append_queue(cfgt.disable_opts, (yyvsp[(2) - (2)].Queue)); }
break;
case 167:
/* Line 1455 of yacc.c */
#line 827 "ntp_parser.y"
{
if ((yyvsp[(2) - (2)].Attr_val) != NULL)
(yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val));
else
(yyval.Queue) = (yyvsp[(1) - (2)].Queue);
}
break;
case 168:
/* Line 1455 of yacc.c */
#line 834 "ntp_parser.y"
{
if ((yyvsp[(1) - (1)].Attr_val) != NULL)
(yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val));
else
(yyval.Queue) = create_queue();
}
break;
case 169:
/* Line 1455 of yacc.c */
#line 843 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 170:
/* Line 1455 of yacc.c */
#line 844 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 171:
/* Line 1455 of yacc.c */
#line 845 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 172:
/* Line 1455 of yacc.c */
#line 846 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 173:
/* Line 1455 of yacc.c */
#line 847 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 174:
/* Line 1455 of yacc.c */
#line 848 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); }
break;
case 175:
/* Line 1455 of yacc.c */
#line 850 "ntp_parser.y"
{
if (input_from_file)
(yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer));
else {
(yyval.Attr_val) = NULL;
yyerror("enable/disable stats remote configuration ignored");
}
}
break;
case 176:
/* Line 1455 of yacc.c */
#line 865 "ntp_parser.y"
{ append_queue(cfgt.tinker, (yyvsp[(2) - (2)].Queue)); }
break;
case 177:
/* Line 1455 of yacc.c */
#line 869 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); }
break;
case 178:
/* Line 1455 of yacc.c */
#line 870 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); }
break;
case 179:
/* Line 1455 of yacc.c */
#line 874 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 180:
/* Line 1455 of yacc.c */
#line 875 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 181:
/* Line 1455 of yacc.c */
#line 876 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 182:
/* Line 1455 of yacc.c */
#line 877 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 183:
/* Line 1455 of yacc.c */
#line 878 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 184:
/* Line 1455 of yacc.c */
#line 879 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 185:
/* Line 1455 of yacc.c */
#line 880 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); }
break;
case 187:
/* Line 1455 of yacc.c */
#line 891 "ntp_parser.y"
{
if (curr_include_level >= MAXINCLUDELEVEL) {
fprintf(stderr, "getconfig: Maximum include file level exceeded.\n");
msyslog(LOG_ERR, "getconfig: Maximum include file level exceeded.");
}
else {
fp[curr_include_level + 1] = F_OPEN(FindConfig((yyvsp[(2) - (3)].String)), "r");
if (fp[curr_include_level + 1] == NULL) {
fprintf(stderr, "getconfig: Couldn't open <%s>\n", FindConfig((yyvsp[(2) - (3)].String)));
msyslog(LOG_ERR, "getconfig: Couldn't open <%s>", FindConfig((yyvsp[(2) - (3)].String)));
}
else
ip_file = fp[++curr_include_level];
}
}
break;
case 188:
/* Line 1455 of yacc.c */
#line 907 "ntp_parser.y"
{
while (curr_include_level != -1)
FCLOSE(fp[curr_include_level--]);
}
break;
case 189:
/* Line 1455 of yacc.c */
#line 913 "ntp_parser.y"
{ enqueue(cfgt.vars, create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double))); }
break;
case 190:
/* Line 1455 of yacc.c */
#line 915 "ntp_parser.y"
{ enqueue(cfgt.vars, create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer))); }
break;
case 191:
/* Line 1455 of yacc.c */
#line 917 "ntp_parser.y"
{ enqueue(cfgt.vars, create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double))); }
break;
case 192:
/* Line 1455 of yacc.c */
#line 919 "ntp_parser.y"
{ /* Null action, possibly all null parms */ }
break;
case 193:
/* Line 1455 of yacc.c */
#line 921 "ntp_parser.y"
{ enqueue(cfgt.vars, create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String))); }
break;
case 194:
/* Line 1455 of yacc.c */
#line 924 "ntp_parser.y"
{ enqueue(cfgt.vars, create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String))); }
break;
case 195:
/* Line 1455 of yacc.c */
#line 926 "ntp_parser.y"
{
if (input_from_file)
enqueue(cfgt.vars,
create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)));
else {
free((yyvsp[(2) - (2)].String));
yyerror("logfile remote configuration ignored");
}
}
break;
case 196:
/* Line 1455 of yacc.c */
#line 937 "ntp_parser.y"
{ append_queue(cfgt.logconfig, (yyvsp[(2) - (2)].Queue)); }
break;
case 197:
/* Line 1455 of yacc.c */
#line 939 "ntp_parser.y"
{ append_queue(cfgt.phone, (yyvsp[(2) - (2)].Queue)); }
break;
case 198:
/* Line 1455 of yacc.c */
#line 941 "ntp_parser.y"
{
if (input_from_file)
enqueue(cfgt.vars,
create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)));
else {
free((yyvsp[(2) - (2)].String));
yyerror("saveconfigdir remote configuration ignored");
}
}
break;
case 199:
/* Line 1455 of yacc.c */
#line 951 "ntp_parser.y"
{ enqueue(cfgt.setvar, (yyvsp[(2) - (2)].Set_var)); }
break;
case 200:
/* Line 1455 of yacc.c */
#line 953 "ntp_parser.y"
{ enqueue(cfgt.trap, create_addr_opts_node((yyvsp[(2) - (2)].Address_node), NULL)); }
break;
case 201:
/* Line 1455 of yacc.c */
#line 955 "ntp_parser.y"
{ enqueue(cfgt.trap, create_addr_opts_node((yyvsp[(2) - (3)].Address_node), (yyvsp[(3) - (3)].Queue))); }
break;
case 202:
/* Line 1455 of yacc.c */
#line 957 "ntp_parser.y"
{ append_queue(cfgt.ttl, (yyvsp[(2) - (2)].Queue)); }
break;
case 203:
/* Line 1455 of yacc.c */
#line 959 "ntp_parser.y"
{ enqueue(cfgt.qos, create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String))); }
break;
case 204:
/* Line 1455 of yacc.c */
#line 964 "ntp_parser.y"
{ enqueue(cfgt.vars, create_attr_sval(T_Driftfile, (yyvsp[(1) - (1)].String))); }
break;
case 205:
/* Line 1455 of yacc.c */
#line 966 "ntp_parser.y"
{ enqueue(cfgt.vars, create_attr_dval(T_WanderThreshold, (yyvsp[(2) - (2)].Double)));
enqueue(cfgt.vars, create_attr_sval(T_Driftfile, (yyvsp[(1) - (2)].String))); }
break;
case 206:
/* Line 1455 of yacc.c */
#line 969 "ntp_parser.y"
{ enqueue(cfgt.vars, create_attr_sval(T_Driftfile, "\0")); }
break;
case 207:
/* Line 1455 of yacc.c */
#line 974 "ntp_parser.y"
{ (yyval.Set_var) = create_setvar_node((yyvsp[(1) - (4)].String), (yyvsp[(3) - (4)].String), (yyvsp[(4) - (4)].Integer)); }
break;
case 208:
/* Line 1455 of yacc.c */
#line 976 "ntp_parser.y"
{ (yyval.Set_var) = create_setvar_node((yyvsp[(1) - (3)].String), (yyvsp[(3) - (3)].String), 0); }
break;
case 209:
/* Line 1455 of yacc.c */
#line 981 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); }
break;
case 210:
/* Line 1455 of yacc.c */
#line 982 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); }
break;
case 211:
/* Line 1455 of yacc.c */
#line 986 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); }
break;
case 212:
/* Line 1455 of yacc.c */
#line 987 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_pval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Address_node)); }
break;
case 213:
/* Line 1455 of yacc.c */
#line 991 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); }
break;
case 214:
/* Line 1455 of yacc.c */
#line 992 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); }
break;
case 215:
/* Line 1455 of yacc.c */
#line 997 "ntp_parser.y"
{
char prefix;
char * type;
switch ((yyvsp[(1) - (1)].String)[0]) {
case '+':
case '-':
case '=':
prefix = (yyvsp[(1) - (1)].String)[0];
type = (yyvsp[(1) - (1)].String) + 1;
break;
default:
prefix = '=';
type = (yyvsp[(1) - (1)].String);
}
(yyval.Attr_val) = create_attr_sval(prefix, estrdup(type));
YYFREE((yyvsp[(1) - (1)].String));
}
break;
case 216:
/* Line 1455 of yacc.c */
#line 1022 "ntp_parser.y"
{
enqueue(cfgt.nic_rules,
create_nic_rule_node((yyvsp[(3) - (3)].Integer), NULL, (yyvsp[(2) - (3)].Integer)));
}
break;
case 217:
/* Line 1455 of yacc.c */
#line 1027 "ntp_parser.y"
{
enqueue(cfgt.nic_rules,
create_nic_rule_node(0, (yyvsp[(3) - (3)].String), (yyvsp[(2) - (3)].Integer)));
}
break;
case 227:
/* Line 1455 of yacc.c */
#line 1058 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), create_ival((yyvsp[(2) - (2)].Integer))); }
break;
case 228:
/* Line 1455 of yacc.c */
#line 1059 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue(create_ival((yyvsp[(1) - (1)].Integer))); }
break;
case 229:
/* Line 1455 of yacc.c */
#line 1064 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); }
break;
case 230:
/* Line 1455 of yacc.c */
#line 1066 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); }
break;
case 231:
/* Line 1455 of yacc.c */
#line 1071 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_ival('i', (yyvsp[(1) - (1)].Integer)); }
break;
case 233:
/* Line 1455 of yacc.c */
#line 1077 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_shorts('-', (yyvsp[(2) - (5)].Integer), (yyvsp[(4) - (5)].Integer)); }
break;
case 234:
/* Line 1455 of yacc.c */
#line 1081 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), create_pval((yyvsp[(2) - (2)].String))); }
break;
case 235:
/* Line 1455 of yacc.c */
#line 1082 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue(create_pval((yyvsp[(1) - (1)].String))); }
break;
case 236:
/* Line 1455 of yacc.c */
#line 1086 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Address_node)); }
break;
case 237:
/* Line 1455 of yacc.c */
#line 1087 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Address_node)); }
break;
case 238:
/* Line 1455 of yacc.c */
#line 1092 "ntp_parser.y"
{
if ((yyvsp[(1) - (1)].Integer) != 0 && (yyvsp[(1) - (1)].Integer) != 1) {
yyerror("Integer value is not boolean (0 or 1). Assuming 1");
(yyval.Integer) = 1;
}
else
(yyval.Integer) = (yyvsp[(1) - (1)].Integer);
}
break;
case 239:
/* Line 1455 of yacc.c */
#line 1100 "ntp_parser.y"
{ (yyval.Integer) = 1; }
break;
case 240:
/* Line 1455 of yacc.c */
#line 1101 "ntp_parser.y"
{ (yyval.Integer) = 0; }
break;
case 241:
/* Line 1455 of yacc.c */
#line 1105 "ntp_parser.y"
{ (yyval.Double) = (double)(yyvsp[(1) - (1)].Integer); }
break;
case 243:
/* Line 1455 of yacc.c */
#line 1116 "ntp_parser.y"
{
cfgt.sim_details = create_sim_node((yyvsp[(3) - (5)].Queue), (yyvsp[(4) - (5)].Queue));
/* Reset the old_config_style variable */
old_config_style = 1;
}
break;
case 244:
/* Line 1455 of yacc.c */
#line 1130 "ntp_parser.y"
{ old_config_style = 0; }
break;
case 245:
/* Line 1455 of yacc.c */
#line 1134 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (3)].Queue), (yyvsp[(2) - (3)].Attr_val)); }
break;
case 246:
/* Line 1455 of yacc.c */
#line 1135 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (2)].Attr_val)); }
break;
case 247:
/* Line 1455 of yacc.c */
#line 1139 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (3)].Integer), (yyvsp[(3) - (3)].Double)); }
break;
case 248:
/* Line 1455 of yacc.c */
#line 1140 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (3)].Integer), (yyvsp[(3) - (3)].Double)); }
break;
case 249:
/* Line 1455 of yacc.c */
#line 1144 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Sim_server)); }
break;
case 250:
/* Line 1455 of yacc.c */
#line 1145 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Sim_server)); }
break;
case 251:
/* Line 1455 of yacc.c */
#line 1150 "ntp_parser.y"
{ (yyval.Sim_server) = create_sim_server((yyvsp[(1) - (5)].Address_node), (yyvsp[(3) - (5)].Double), (yyvsp[(4) - (5)].Queue)); }
break;
case 252:
/* Line 1455 of yacc.c */
#line 1154 "ntp_parser.y"
{ (yyval.Double) = (yyvsp[(3) - (4)].Double); }
break;
case 253:
/* Line 1455 of yacc.c */
#line 1158 "ntp_parser.y"
{ (yyval.Address_node) = (yyvsp[(3) - (3)].Address_node); }
break;
case 254:
/* Line 1455 of yacc.c */
#line 1162 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Sim_script)); }
break;
case 255:
/* Line 1455 of yacc.c */
#line 1163 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Sim_script)); }
break;
case 256:
/* Line 1455 of yacc.c */
#line 1168 "ntp_parser.y"
{ (yyval.Sim_script) = create_sim_script_info((yyvsp[(3) - (6)].Double), (yyvsp[(5) - (6)].Queue)); }
break;
case 257:
/* Line 1455 of yacc.c */
#line 1172 "ntp_parser.y"
{ (yyval.Queue) = enqueue((yyvsp[(1) - (3)].Queue), (yyvsp[(2) - (3)].Attr_val)); }
break;
case 258:
/* Line 1455 of yacc.c */
#line 1173 "ntp_parser.y"
{ (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (2)].Attr_val)); }
break;
case 259:
/* Line 1455 of yacc.c */
#line 1178 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (3)].Integer), (yyvsp[(3) - (3)].Double)); }
break;
case 260:
/* Line 1455 of yacc.c */
#line 1180 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (3)].Integer), (yyvsp[(3) - (3)].Double)); }
break;
case 261:
/* Line 1455 of yacc.c */
#line 1182 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (3)].Integer), (yyvsp[(3) - (3)].Double)); }
break;
case 262:
/* Line 1455 of yacc.c */
#line 1184 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (3)].Integer), (yyvsp[(3) - (3)].Double)); }
break;
case 263:
/* Line 1455 of yacc.c */
#line 1186 "ntp_parser.y"
{ (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (3)].Integer), (yyvsp[(3) - (3)].Double)); }
break;
/* Line 1455 of yacc.c */
#line 3836 "ntp_parser.c"
default: break;
}
YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
*++yyvsp = yyval;
/* Now `shift' the result of the reduction. Determine what state
that goes to, based on the state we popped back to and the rule
number reduced by. */
yyn = yyr1[yyn];
yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
yystate = yytable[yystate];
else
yystate = yydefgoto[yyn - YYNTOKENS];
goto yynewstate;
/*------------------------------------.
| yyerrlab -- here on detecting error |
`------------------------------------*/
yyerrlab:
/* If not already recovering from an error, report this error. */
if (!yyerrstatus)
{
++yynerrs;
#if ! YYERROR_VERBOSE
yyerror (YY_("syntax error"));
#else
{
YYSIZE_T yysize = yysyntax_error (0, yystate, yychar);
if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM)
{
YYSIZE_T yyalloc = 2 * yysize;
if (! (yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM))
yyalloc = YYSTACK_ALLOC_MAXIMUM;
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
yymsg = (char *) YYSTACK_ALLOC (yyalloc);
if (yymsg)
yymsg_alloc = yyalloc;
else
{
yymsg = yymsgbuf;
yymsg_alloc = sizeof yymsgbuf;
}
}
if (0 < yysize && yysize <= yymsg_alloc)
{
(void) yysyntax_error (yymsg, yystate, yychar);
yyerror (yymsg);
}
else
{
yyerror (YY_("syntax error"));
if (yysize != 0)
goto yyexhaustedlab;
}
}
#endif
}
if (yyerrstatus == 3)
{
/* If just tried and failed to reuse lookahead token after an
error, discard it. */
if (yychar <= YYEOF)
{
/* Return failure if at end of input. */
if (yychar == YYEOF)
YYABORT;
}
else
{
yydestruct ("Error: discarding",
yytoken, &yylval);
yychar = YYEMPTY;
}
}
/* Else will try to reuse lookahead token after shifting the error
token. */
goto yyerrlab1;
/*---------------------------------------------------.
| yyerrorlab -- error raised explicitly by YYERROR. |
`---------------------------------------------------*/
yyerrorlab:
/* Pacify compilers like GCC when the user code never invokes
YYERROR and the label yyerrorlab therefore never appears in user
code. */
if (/*CONSTCOND*/ 0)
goto yyerrorlab;
/* Do not reclaim the symbols of the rule which action triggered
this YYERROR. */
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
yystate = *yyssp;
goto yyerrlab1;
/*-------------------------------------------------------------.
| yyerrlab1 -- common code for both syntax error and YYERROR. |
`-------------------------------------------------------------*/
yyerrlab1:
yyerrstatus = 3; /* Each real token shifted decrements this. */
for (;;)
{
yyn = yypact[yystate];
if (yyn != YYPACT_NINF)
{
yyn += YYTERROR;
if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
{
yyn = yytable[yyn];
if (0 < yyn)
break;
}
}
/* Pop the current state because it cannot handle the error token. */
if (yyssp == yyss)
YYABORT;
yydestruct ("Error: popping",
yystos[yystate], yyvsp);
YYPOPSTACK (1);
yystate = *yyssp;
YY_STACK_PRINT (yyss, yyssp);
}
*++yyvsp = yylval;
/* Shift the error token. */
YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
yystate = yyn;
goto yynewstate;
/*-------------------------------------.
| yyacceptlab -- YYACCEPT comes here. |
`-------------------------------------*/
yyacceptlab:
yyresult = 0;
goto yyreturn;
/*-----------------------------------.
| yyabortlab -- YYABORT comes here. |
`-----------------------------------*/
yyabortlab:
yyresult = 1;
goto yyreturn;
#if !defined(yyoverflow) || YYERROR_VERBOSE
/*-------------------------------------------------.
| yyexhaustedlab -- memory exhaustion comes here. |
`-------------------------------------------------*/
yyexhaustedlab:
yyerror (YY_("memory exhausted"));
yyresult = 2;
/* Fall through. */
#endif
yyreturn:
if (yychar != YYEMPTY)
yydestruct ("Cleanup: discarding lookahead",
yytoken, &yylval);
/* Do not reclaim the symbols of the rule which action triggered
this YYABORT or YYACCEPT. */
YYPOPSTACK (yylen);
YY_STACK_PRINT (yyss, yyssp);
while (yyssp != yyss)
{
yydestruct ("Cleanup: popping",
yystos[*yyssp], yyvsp);
YYPOPSTACK (1);
}
#ifndef yyoverflow
if (yyss != yyssa)
YYSTACK_FREE (yyss);
#endif
#if YYERROR_VERBOSE
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
#endif
/* Make sure YYID is used. */
return YYID (yyresult);
}
| 168,877 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int su3000_frontend_attach(struct dvb_usb_adapter *d)
{
u8 obuf[3] = { 0xe, 0x80, 0 };
u8 ibuf[] = { 0 };
if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
err("command 0x0e transfer failed.");
obuf[0] = 0xe;
obuf[1] = 0x02;
obuf[2] = 1;
if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
err("command 0x0e transfer failed.");
msleep(300);
obuf[0] = 0xe;
obuf[1] = 0x83;
obuf[2] = 0;
if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
err("command 0x0e transfer failed.");
obuf[0] = 0xe;
obuf[1] = 0x83;
obuf[2] = 1;
if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
err("command 0x0e transfer failed.");
obuf[0] = 0x51;
if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
err("command 0x51 transfer failed.");
d->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config,
&d->dev->i2c_adap);
if (d->fe_adap[0].fe == NULL)
return -EIO;
if (dvb_attach(ts2020_attach, d->fe_adap[0].fe,
&dw2104_ts2020_config,
&d->dev->i2c_adap)) {
info("Attached DS3000/TS2020!");
return 0;
}
info("Failed to attach DS3000/TS2020!");
return -EIO;
}
Commit Message: [media] dw2102: don't do DMA on stack
On Kernel 4.9, WARNINGs about doing DMA on stack are hit at
the dw2102 driver: one in su3000_power_ctrl() and the other in tt_s2_4600_frontend_attach().
Both were due to the use of buffers on the stack as parameters to
dvb_usb_generic_rw() and the resulting attempt to do DMA with them.
The device was non-functional as a result.
So, switch this driver over to use a buffer within the device state
structure, as has been done with other DVB-USB drivers.
Tested with TechnoTrend TT-connect S2-4600.
[[email protected]: fixed a warning at su3000_i2c_transfer() that
state var were dereferenced before check 'd']
Signed-off-by: Jonathan McDowell <[email protected]>
Cc: <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
CWE ID: CWE-119 | static int su3000_frontend_attach(struct dvb_usb_adapter *d)
static int su3000_frontend_attach(struct dvb_usb_adapter *adap)
{
struct dvb_usb_device *d = adap->dev;
struct dw2102_state *state = d->priv;
mutex_lock(&d->data_mutex);
state->data[0] = 0xe;
state->data[1] = 0x80;
state->data[2] = 0;
if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
state->data[0] = 0xe;
state->data[1] = 0x02;
state->data[2] = 1;
if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
msleep(300);
state->data[0] = 0xe;
state->data[1] = 0x83;
state->data[2] = 0;
if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
state->data[0] = 0xe;
state->data[1] = 0x83;
state->data[2] = 1;
if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
state->data[0] = 0x51;
if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
err("command 0x51 transfer failed.");
mutex_unlock(&d->data_mutex);
adap->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config,
&d->i2c_adap);
if (adap->fe_adap[0].fe == NULL)
return -EIO;
if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe,
&dw2104_ts2020_config,
&d->i2c_adap)) {
info("Attached DS3000/TS2020!");
return 0;
}
info("Failed to attach DS3000/TS2020!");
return -EIO;
}
| 168,225 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void UpdateAtlas::didSwapBuffers()
{
m_areaAllocator.clear();
buildLayoutIfNeeded();
}
Commit Message: [WK2] LayerTreeCoordinator should release unused UpdatedAtlases
https://bugs.webkit.org/show_bug.cgi?id=95072
Reviewed by Jocelyn Turcotte.
Release graphic buffers that haven't been used for a while in order to save memory.
This way we can give back memory to the system when no user interaction happens
after a period of time, for example when we are in the background.
* Shared/ShareableBitmap.h:
* WebProcess/WebPage/CoordinatedGraphics/LayerTreeCoordinator.cpp:
(WebKit::LayerTreeCoordinator::LayerTreeCoordinator):
(WebKit::LayerTreeCoordinator::beginContentUpdate):
(WebKit):
(WebKit::LayerTreeCoordinator::scheduleReleaseInactiveAtlases):
(WebKit::LayerTreeCoordinator::releaseInactiveAtlasesTimerFired):
* WebProcess/WebPage/CoordinatedGraphics/LayerTreeCoordinator.h:
(LayerTreeCoordinator):
* WebProcess/WebPage/UpdateAtlas.cpp:
(WebKit::UpdateAtlas::UpdateAtlas):
(WebKit::UpdateAtlas::didSwapBuffers):
Don't call buildLayoutIfNeeded here. It's enought to call it in beginPaintingOnAvailableBuffer
and this way we can track whether this atlas is used with m_areaAllocator.
(WebKit::UpdateAtlas::beginPaintingOnAvailableBuffer):
* WebProcess/WebPage/UpdateAtlas.h:
(WebKit::UpdateAtlas::addTimeInactive):
(WebKit::UpdateAtlas::isInactive):
(WebKit::UpdateAtlas::isInUse):
(UpdateAtlas):
git-svn-id: svn://svn.chromium.org/blink/trunk@128473 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-20 | void UpdateAtlas::didSwapBuffers()
{
m_areaAllocator.clear();
}
| 170,272 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: AcceleratedStaticBitmapImage::AcceleratedStaticBitmapImage(
const gpu::Mailbox& mailbox,
const gpu::SyncToken& sync_token,
unsigned texture_id,
base::WeakPtr<WebGraphicsContext3DProviderWrapper>&&
context_provider_wrapper,
IntSize mailbox_size)
: paint_image_content_id_(cc::PaintImage::GetNextContentId()) {
texture_holder_ = std::make_unique<MailboxTextureHolder>(
mailbox, sync_token, texture_id, std::move(context_provider_wrapper),
mailbox_size);
thread_checker_.DetachFromThread();
}
Commit Message: Fix *StaticBitmapImage ThreadChecker and unaccelerated SkImage destroy
- AcceleratedStaticBitmapImage was misusing ThreadChecker by having its
own detach logic. Using proper DetachThread is simpler, cleaner and
correct.
- UnacceleratedStaticBitmapImage didn't destroy the SkImage in the
proper thread, leading to GrContext/SkSp problems.
Bug: 890576
Change-Id: Ic71e7f7322b0b851774628247aa5256664bc0723
Reviewed-on: https://chromium-review.googlesource.com/c/1307775
Reviewed-by: Gabriel Charette <[email protected]>
Reviewed-by: Jeremy Roman <[email protected]>
Commit-Queue: Fernando Serboncini <[email protected]>
Cr-Commit-Position: refs/heads/master@{#604427}
CWE ID: CWE-119 | AcceleratedStaticBitmapImage::AcceleratedStaticBitmapImage(
const gpu::Mailbox& mailbox,
const gpu::SyncToken& sync_token,
unsigned texture_id,
base::WeakPtr<WebGraphicsContext3DProviderWrapper>&&
context_provider_wrapper,
IntSize mailbox_size)
: paint_image_content_id_(cc::PaintImage::GetNextContentId()) {
texture_holder_ = std::make_unique<MailboxTextureHolder>(
mailbox, sync_token, texture_id, std::move(context_provider_wrapper),
mailbox_size);
}
| 172,589 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static v8::Handle<v8::Value> idbKeyCallback(const v8::Arguments& args)
{
INC_STATS("DOM.TestObj.idbKey");
if (args.Length() < 1)
return V8Proxy::throwNotEnoughArgumentsError();
TestObj* imp = V8TestObj::toNative(args.Holder());
EXCEPTION_BLOCK(RefPtr<IDBKey>, key, createIDBKeyFromValue(MAYBE_MISSING_PARAMETER(args, 0, DefaultIsUndefined)));
imp->idbKey(key.get());
return v8::Handle<v8::Value>();
}
Commit Message: [V8] Pass Isolate to throwNotEnoughArgumentsError()
https://bugs.webkit.org/show_bug.cgi?id=86983
Reviewed by Adam Barth.
The objective is to pass Isolate around in V8 bindings.
This patch passes Isolate to throwNotEnoughArgumentsError().
No tests. No change in behavior.
* bindings/scripts/CodeGeneratorV8.pm:
(GenerateArgumentsCountCheck):
(GenerateEventConstructorCallback):
* bindings/scripts/test/V8/V8Float64Array.cpp:
(WebCore::Float64ArrayV8Internal::fooCallback):
* bindings/scripts/test/V8/V8TestActiveDOMObject.cpp:
(WebCore::TestActiveDOMObjectV8Internal::excitingFunctionCallback):
(WebCore::TestActiveDOMObjectV8Internal::postMessageCallback):
* bindings/scripts/test/V8/V8TestCustomNamedGetter.cpp:
(WebCore::TestCustomNamedGetterV8Internal::anotherFunctionCallback):
* bindings/scripts/test/V8/V8TestEventConstructor.cpp:
(WebCore::V8TestEventConstructor::constructorCallback):
* bindings/scripts/test/V8/V8TestEventTarget.cpp:
(WebCore::TestEventTargetV8Internal::itemCallback):
(WebCore::TestEventTargetV8Internal::dispatchEventCallback):
* bindings/scripts/test/V8/V8TestInterface.cpp:
(WebCore::TestInterfaceV8Internal::supplementalMethod2Callback):
(WebCore::V8TestInterface::constructorCallback):
* bindings/scripts/test/V8/V8TestMediaQueryListListener.cpp:
(WebCore::TestMediaQueryListListenerV8Internal::methodCallback):
* bindings/scripts/test/V8/V8TestNamedConstructor.cpp:
(WebCore::V8TestNamedConstructorConstructorCallback):
* bindings/scripts/test/V8/V8TestObj.cpp:
(WebCore::TestObjV8Internal::voidMethodWithArgsCallback):
(WebCore::TestObjV8Internal::intMethodWithArgsCallback):
(WebCore::TestObjV8Internal::objMethodWithArgsCallback):
(WebCore::TestObjV8Internal::methodWithSequenceArgCallback):
(WebCore::TestObjV8Internal::methodReturningSequenceCallback):
(WebCore::TestObjV8Internal::methodThatRequiresAllArgsAndThrowsCallback):
(WebCore::TestObjV8Internal::serializedValueCallback):
(WebCore::TestObjV8Internal::idbKeyCallback):
(WebCore::TestObjV8Internal::optionsObjectCallback):
(WebCore::TestObjV8Internal::methodWithNonOptionalArgAndOptionalArgCallback):
(WebCore::TestObjV8Internal::methodWithNonOptionalArgAndTwoOptionalArgsCallback):
(WebCore::TestObjV8Internal::methodWithCallbackArgCallback):
(WebCore::TestObjV8Internal::methodWithNonCallbackArgAndCallbackArgCallback):
(WebCore::TestObjV8Internal::overloadedMethod1Callback):
(WebCore::TestObjV8Internal::overloadedMethod2Callback):
(WebCore::TestObjV8Internal::overloadedMethod3Callback):
(WebCore::TestObjV8Internal::overloadedMethod4Callback):
(WebCore::TestObjV8Internal::overloadedMethod5Callback):
(WebCore::TestObjV8Internal::overloadedMethod6Callback):
(WebCore::TestObjV8Internal::overloadedMethod7Callback):
(WebCore::TestObjV8Internal::overloadedMethod11Callback):
(WebCore::TestObjV8Internal::overloadedMethod12Callback):
(WebCore::TestObjV8Internal::enabledAtRuntimeMethod1Callback):
(WebCore::TestObjV8Internal::enabledAtRuntimeMethod2Callback):
(WebCore::TestObjV8Internal::convert1Callback):
(WebCore::TestObjV8Internal::convert2Callback):
(WebCore::TestObjV8Internal::convert3Callback):
(WebCore::TestObjV8Internal::convert4Callback):
(WebCore::TestObjV8Internal::convert5Callback):
(WebCore::TestObjV8Internal::strictFunctionCallback):
(WebCore::V8TestObj::constructorCallback):
* bindings/scripts/test/V8/V8TestSerializedScriptValueInterface.cpp:
(WebCore::TestSerializedScriptValueInterfaceV8Internal::acceptTransferListCallback):
(WebCore::V8TestSerializedScriptValueInterface::constructorCallback):
* bindings/v8/ScriptController.cpp:
(WebCore::setValueAndClosePopupCallback):
* bindings/v8/V8Proxy.cpp:
(WebCore::V8Proxy::throwNotEnoughArgumentsError):
* bindings/v8/V8Proxy.h:
(V8Proxy):
* bindings/v8/custom/V8AudioContextCustom.cpp:
(WebCore::V8AudioContext::constructorCallback):
* bindings/v8/custom/V8DataViewCustom.cpp:
(WebCore::V8DataView::getInt8Callback):
(WebCore::V8DataView::getUint8Callback):
(WebCore::V8DataView::setInt8Callback):
(WebCore::V8DataView::setUint8Callback):
* bindings/v8/custom/V8DirectoryEntryCustom.cpp:
(WebCore::V8DirectoryEntry::getDirectoryCallback):
(WebCore::V8DirectoryEntry::getFileCallback):
* bindings/v8/custom/V8IntentConstructor.cpp:
(WebCore::V8Intent::constructorCallback):
* bindings/v8/custom/V8SVGLengthCustom.cpp:
(WebCore::V8SVGLength::convertToSpecifiedUnitsCallback):
* bindings/v8/custom/V8WebGLRenderingContextCustom.cpp:
(WebCore::getObjectParameter):
(WebCore::V8WebGLRenderingContext::getAttachedShadersCallback):
(WebCore::V8WebGLRenderingContext::getExtensionCallback):
(WebCore::V8WebGLRenderingContext::getFramebufferAttachmentParameterCallback):
(WebCore::V8WebGLRenderingContext::getParameterCallback):
(WebCore::V8WebGLRenderingContext::getProgramParameterCallback):
(WebCore::V8WebGLRenderingContext::getShaderParameterCallback):
(WebCore::V8WebGLRenderingContext::getUniformCallback):
(WebCore::vertexAttribAndUniformHelperf):
(WebCore::uniformHelperi):
(WebCore::uniformMatrixHelper):
* bindings/v8/custom/V8WebKitMutationObserverCustom.cpp:
(WebCore::V8WebKitMutationObserver::constructorCallback):
(WebCore::V8WebKitMutationObserver::observeCallback):
* bindings/v8/custom/V8WebSocketCustom.cpp:
(WebCore::V8WebSocket::constructorCallback):
(WebCore::V8WebSocket::sendCallback):
* bindings/v8/custom/V8XMLHttpRequestCustom.cpp:
(WebCore::V8XMLHttpRequest::openCallback):
git-svn-id: svn://svn.chromium.org/blink/trunk@117736 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: | static v8::Handle<v8::Value> idbKeyCallback(const v8::Arguments& args)
{
INC_STATS("DOM.TestObj.idbKey");
if (args.Length() < 1)
return V8Proxy::throwNotEnoughArgumentsError(args.GetIsolate());
TestObj* imp = V8TestObj::toNative(args.Holder());
EXCEPTION_BLOCK(RefPtr<IDBKey>, key, createIDBKeyFromValue(MAYBE_MISSING_PARAMETER(args, 0, DefaultIsUndefined)));
imp->idbKey(key.get());
return v8::Handle<v8::Value>();
}
| 171,084 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static bool blit_is_unsafe(struct CirrusVGAState *s)
{
/* should be the case, see cirrus_bitblt_start */
assert(s->cirrus_blt_width > 0);
assert(s->cirrus_blt_height > 0);
if (blit_region_is_unsafe(s, s->cirrus_blt_dstpitch,
s->cirrus_blt_dstaddr & s->cirrus_addr_mask)) {
return true;
}
return false;
}
Commit Message:
CWE ID: CWE-119 | static bool blit_is_unsafe(struct CirrusVGAState *s)
{
/* should be the case, see cirrus_bitblt_start */
assert(s->cirrus_blt_width > 0);
assert(s->cirrus_blt_height > 0);
if (s->cirrus_blt_width > CIRRUS_BLTBUFSIZE) {
return true;
}
if (blit_region_is_unsafe(s, s->cirrus_blt_dstpitch,
s->cirrus_blt_dstaddr & s->cirrus_addr_mask)) {
return true;
}
return false;
}
| 164,894 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: string DecodeFile(const string& filename, int num_threads) {
libvpx_test::WebMVideoSource video(filename);
video.Init();
vpx_codec_dec_cfg_t cfg = {0};
cfg.threads = num_threads;
libvpx_test::VP9Decoder decoder(cfg, 0);
libvpx_test::MD5 md5;
for (video.Begin(); video.cxdata(); video.Next()) {
const vpx_codec_err_t res =
decoder.DecodeFrame(video.cxdata(), video.frame_size());
if (res != VPX_CODEC_OK) {
EXPECT_EQ(VPX_CODEC_OK, res) << decoder.DecodeError();
break;
}
libvpx_test::DxDataIterator dec_iter = decoder.GetDxData();
const vpx_image_t *img = NULL;
while ((img = dec_iter.Next())) {
md5.Add(img);
}
}
return string(md5.Get());
}
Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478
DO NOT MERGE - libvpx: Pull from upstream
Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06
BUG=23452792
Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec
CWE ID: CWE-119 | string DecodeFile(const string& filename, int num_threads) {
libvpx_test::WebMVideoSource video(filename);
video.Init();
vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
cfg.threads = num_threads;
libvpx_test::VP9Decoder decoder(cfg, 0);
libvpx_test::MD5 md5;
for (video.Begin(); video.cxdata(); video.Next()) {
const vpx_codec_err_t res =
decoder.DecodeFrame(video.cxdata(), video.frame_size());
if (res != VPX_CODEC_OK) {
EXPECT_EQ(VPX_CODEC_OK, res) << decoder.DecodeError();
break;
}
libvpx_test::DxDataIterator dec_iter = decoder.GetDxData();
const vpx_image_t *img = NULL;
while ((img = dec_iter.Next())) {
md5.Add(img);
}
}
return string(md5.Get());
}
| 174,598 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int ssl_get_prev_session(SSL *s, unsigned char *session_id, int len,
const unsigned char *limit)
{
/* This is used only by servers. */
SSL_SESSION *ret = NULL;
int fatal = 0;
int try_session_cache = 1;
#ifndef OPENSSL_NO_TLSEXT
int r;
#endif
if (session_id + len > limit) {
fatal = 1;
goto err;
}
if (len == 0)
try_session_cache = 0;
#ifndef OPENSSL_NO_TLSEXT
/* sets s->tlsext_ticket_expected */
r = tls1_process_ticket(s, session_id, len, limit, &ret);
switch (r) {
case -1: /* Error during processing */
fatal = 1;
goto err;
case 0: /* No ticket found */
case 1: /* Zero length ticket found */
break; /* Ok to carry on processing session id. */
case 2: /* Ticket found but not decrypted. */
case 3: /* Ticket decrypted, *ret has been set. */
try_session_cache = 0;
break;
default:
abort();
}
#endif
if (try_session_cache &&
ret == NULL &&
!(s->session_ctx->session_cache_mode &
SSL_SESS_CACHE_NO_INTERNAL_LOOKUP)) {
SSL_SESSION data;
data.ssl_version = s->version;
data.session_id_length = len;
if (len == 0)
return 0;
memcpy(data.session_id, session_id, len);
CRYPTO_r_lock(CRYPTO_LOCK_SSL_CTX);
ret = lh_SSL_SESSION_retrieve(s->session_ctx->sessions, &data);
if (ret != NULL) {
/* don't allow other threads to steal it: */
CRYPTO_add(&ret->references, 1, CRYPTO_LOCK_SSL_SESSION);
}
CRYPTO_r_unlock(CRYPTO_LOCK_SSL_CTX);
if (ret == NULL)
s->session_ctx->stats.sess_miss++;
}
if (try_session_cache &&
ret == NULL && s->session_ctx->get_session_cb != NULL) {
int copy = 1;
if ((ret = s->session_ctx->get_session_cb(s, session_id, len, ©))) {
s->session_ctx->stats.sess_cb_hit++;
/*
* Increment reference count now if the session callback asks us
* to do so (note that if the session structures returned by the
* callback are shared between threads, it must handle the
* reference count itself [i.e. copy == 0], or things won't be
* thread-safe).
*/
if (copy)
CRYPTO_add(&ret->references, 1, CRYPTO_LOCK_SSL_SESSION);
/*
* Add the externally cached session to the internal cache as
* well if and only if we are supposed to.
*/
if (!
(s->session_ctx->session_cache_mode &
SSL_SESS_CACHE_NO_INTERNAL_STORE))
/*
* The following should not return 1, otherwise, things are
* very strange
*/
SSL_CTX_add_session(s->session_ctx, ret);
}
}
if (ret == NULL)
goto err;
/* Now ret is non-NULL and we own one of its reference counts. */
if (ret->sid_ctx_length != s->sid_ctx_length
|| memcmp(ret->sid_ctx, s->sid_ctx, ret->sid_ctx_length)) {
/*
* We have the session requested by the client, but we don't want to
* use it in this context.
*/
goto err; /* treat like cache miss */
}
if ((s->verify_mode & SSL_VERIFY_PEER) && s->sid_ctx_length == 0) {
/*
* We can't be sure if this session is being used out of context,
* which is especially important for SSL_VERIFY_PEER. The application
* should have used SSL[_CTX]_set_session_id_context. For this error
* case, we generate an error instead of treating the event like a
* cache miss (otherwise it would be easy for applications to
* effectively disable the session cache by accident without anyone
* noticing).
*/
SSLerr(SSL_F_SSL_GET_PREV_SESSION,
SSL_R_SESSION_ID_CONTEXT_UNINITIALIZED);
fatal = 1;
goto err;
}
if (ret->cipher == NULL) {
unsigned char buf[5], *p;
unsigned long l;
p = buf;
l = ret->cipher_id;
l2n(l, p);
if ((ret->ssl_version >> 8) >= SSL3_VERSION_MAJOR)
ret->cipher = ssl_get_cipher_by_char(s, &(buf[2]));
else
ret->cipher = ssl_get_cipher_by_char(s, &(buf[1]));
if (ret->cipher == NULL)
goto err;
}
if (ret->timeout < (long)(time(NULL) - ret->time)) { /* timeout */
s->session_ctx->stats.sess_timeout++;
if (try_session_cache) {
/* session was from the cache, so remove it */
SSL_CTX_remove_session(s->session_ctx, ret);
}
goto err;
}
s->session_ctx->stats.sess_hit++;
if (s->session != NULL)
SSL_SESSION_free(s->session);
s->session = ret;
s->verify_result = s->session->verify_result;
return 1;
err:
if (ret != NULL) {
SSL_SESSION_free(ret);
#ifndef OPENSSL_NO_TLSEXT
if (!try_session_cache) {
/*
* The session was from a ticket, so we should issue a ticket for
* the new session
*/
s->tlsext_ticket_expected = 1;
}
#endif
}
if (fatal)
return -1;
else
return 0;
}
Commit Message:
CWE ID: CWE-190 | int ssl_get_prev_session(SSL *s, unsigned char *session_id, int len,
const unsigned char *limit)
{
/* This is used only by servers. */
SSL_SESSION *ret = NULL;
int fatal = 0;
int try_session_cache = 1;
#ifndef OPENSSL_NO_TLSEXT
int r;
#endif
if (limit - session_id < len) {
fatal = 1;
goto err;
}
if (len == 0)
try_session_cache = 0;
#ifndef OPENSSL_NO_TLSEXT
/* sets s->tlsext_ticket_expected */
r = tls1_process_ticket(s, session_id, len, limit, &ret);
switch (r) {
case -1: /* Error during processing */
fatal = 1;
goto err;
case 0: /* No ticket found */
case 1: /* Zero length ticket found */
break; /* Ok to carry on processing session id. */
case 2: /* Ticket found but not decrypted. */
case 3: /* Ticket decrypted, *ret has been set. */
try_session_cache = 0;
break;
default:
abort();
}
#endif
if (try_session_cache &&
ret == NULL &&
!(s->session_ctx->session_cache_mode &
SSL_SESS_CACHE_NO_INTERNAL_LOOKUP)) {
SSL_SESSION data;
data.ssl_version = s->version;
data.session_id_length = len;
if (len == 0)
return 0;
memcpy(data.session_id, session_id, len);
CRYPTO_r_lock(CRYPTO_LOCK_SSL_CTX);
ret = lh_SSL_SESSION_retrieve(s->session_ctx->sessions, &data);
if (ret != NULL) {
/* don't allow other threads to steal it: */
CRYPTO_add(&ret->references, 1, CRYPTO_LOCK_SSL_SESSION);
}
CRYPTO_r_unlock(CRYPTO_LOCK_SSL_CTX);
if (ret == NULL)
s->session_ctx->stats.sess_miss++;
}
if (try_session_cache &&
ret == NULL && s->session_ctx->get_session_cb != NULL) {
int copy = 1;
if ((ret = s->session_ctx->get_session_cb(s, session_id, len, ©))) {
s->session_ctx->stats.sess_cb_hit++;
/*
* Increment reference count now if the session callback asks us
* to do so (note that if the session structures returned by the
* callback are shared between threads, it must handle the
* reference count itself [i.e. copy == 0], or things won't be
* thread-safe).
*/
if (copy)
CRYPTO_add(&ret->references, 1, CRYPTO_LOCK_SSL_SESSION);
/*
* Add the externally cached session to the internal cache as
* well if and only if we are supposed to.
*/
if (!
(s->session_ctx->session_cache_mode &
SSL_SESS_CACHE_NO_INTERNAL_STORE))
/*
* The following should not return 1, otherwise, things are
* very strange
*/
SSL_CTX_add_session(s->session_ctx, ret);
}
}
if (ret == NULL)
goto err;
/* Now ret is non-NULL and we own one of its reference counts. */
if (ret->sid_ctx_length != s->sid_ctx_length
|| memcmp(ret->sid_ctx, s->sid_ctx, ret->sid_ctx_length)) {
/*
* We have the session requested by the client, but we don't want to
* use it in this context.
*/
goto err; /* treat like cache miss */
}
if ((s->verify_mode & SSL_VERIFY_PEER) && s->sid_ctx_length == 0) {
/*
* We can't be sure if this session is being used out of context,
* which is especially important for SSL_VERIFY_PEER. The application
* should have used SSL[_CTX]_set_session_id_context. For this error
* case, we generate an error instead of treating the event like a
* cache miss (otherwise it would be easy for applications to
* effectively disable the session cache by accident without anyone
* noticing).
*/
SSLerr(SSL_F_SSL_GET_PREV_SESSION,
SSL_R_SESSION_ID_CONTEXT_UNINITIALIZED);
fatal = 1;
goto err;
}
if (ret->cipher == NULL) {
unsigned char buf[5], *p;
unsigned long l;
p = buf;
l = ret->cipher_id;
l2n(l, p);
if ((ret->ssl_version >> 8) >= SSL3_VERSION_MAJOR)
ret->cipher = ssl_get_cipher_by_char(s, &(buf[2]));
else
ret->cipher = ssl_get_cipher_by_char(s, &(buf[1]));
if (ret->cipher == NULL)
goto err;
}
if (ret->timeout < (long)(time(NULL) - ret->time)) { /* timeout */
s->session_ctx->stats.sess_timeout++;
if (try_session_cache) {
/* session was from the cache, so remove it */
SSL_CTX_remove_session(s->session_ctx, ret);
}
goto err;
}
s->session_ctx->stats.sess_hit++;
if (s->session != NULL)
SSL_SESSION_free(s->session);
s->session = ret;
s->verify_result = s->session->verify_result;
return 1;
err:
if (ret != NULL) {
SSL_SESSION_free(ret);
#ifndef OPENSSL_NO_TLSEXT
if (!try_session_cache) {
/*
* The session was from a ticket, so we should issue a ticket for
* the new session
*/
s->tlsext_ticket_expected = 1;
}
#endif
}
if (fatal)
return -1;
else
return 0;
}
| 165,201 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
{
enum direction dir = decode_direction(insn);
int size = decode_access_size(regs, insn);
int orig_asi, asi;
current_thread_info()->kern_una_regs = regs;
current_thread_info()->kern_una_insn = insn;
orig_asi = asi = decode_asi(insn, regs);
/* If this is a {get,put}_user() on an unaligned userspace pointer,
* just signal a fault and do not log the event.
*/
if (asi == ASI_AIUS) {
kernel_mna_trap_fault(0);
return;
}
log_unaligned(regs);
if (!ok_for_kernel(insn) || dir == both) {
printk("Unsupported unaligned load/store trap for kernel "
"at <%016lx>.\n", regs->tpc);
unaligned_panic("Kernel does fpu/atomic "
"unaligned load/store.", regs);
kernel_mna_trap_fault(0);
} else {
unsigned long addr, *reg_addr;
int err;
addr = compute_effective_address(regs, insn,
((insn >> 25) & 0x1f));
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
switch (asi) {
case ASI_NL:
case ASI_AIUPL:
case ASI_AIUSL:
case ASI_PL:
case ASI_SL:
case ASI_PNFL:
case ASI_SNFL:
asi &= ~0x08;
break;
}
switch (dir) {
case load:
reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
err = do_int_load(reg_addr, size,
(unsigned long *) addr,
decode_signedness(insn), asi);
if (likely(!err) && unlikely(asi != orig_asi)) {
unsigned long val_in = *reg_addr;
switch (size) {
case 2:
val_in = swab16(val_in);
break;
case 4:
val_in = swab32(val_in);
break;
case 8:
val_in = swab64(val_in);
break;
case 16:
default:
BUG();
break;
}
*reg_addr = val_in;
}
break;
case store:
err = do_int_store(((insn>>25)&0x1f), size,
(unsigned long *) addr, regs,
asi, orig_asi);
break;
default:
panic("Impossible kernel unaligned trap.");
/* Not reached... */
}
if (unlikely(err))
kernel_mna_trap_fault(1);
else
advance(regs);
}
}
Commit Message: perf: Remove the nmi parameter from the swevent and overflow interface
The nmi parameter indicated if we could do wakeups from the current
context, if not, we would set some state and self-IPI and let the
resulting interrupt do the wakeup.
For the various event classes:
- hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from
the PMI-tail (ARM etc.)
- tracepoint: nmi=0; since tracepoint could be from NMI context.
- software: nmi=[0,1]; some, like the schedule thing cannot
perform wakeups, and hence need 0.
As one can see, there is very little nmi=1 usage, and the down-side of
not using it is that on some platforms some software events can have a
jiffy delay in wakeup (when arch_irq_work_raise isn't implemented).
The up-side however is that we can remove the nmi parameter and save a
bunch of conditionals in fast paths.
Signed-off-by: Peter Zijlstra <[email protected]>
Cc: Michael Cree <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Deng-Cheng Zhu <[email protected]>
Cc: Anton Blanchard <[email protected]>
Cc: Eric B Munson <[email protected]>
Cc: Heiko Carstens <[email protected]>
Cc: Paul Mundt <[email protected]>
Cc: David S. Miller <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Cc: Jason Wessel <[email protected]>
Cc: Don Zickus <[email protected]>
Link: http://lkml.kernel.org/n/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
CWE ID: CWE-399 | asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
{
enum direction dir = decode_direction(insn);
int size = decode_access_size(regs, insn);
int orig_asi, asi;
current_thread_info()->kern_una_regs = regs;
current_thread_info()->kern_una_insn = insn;
orig_asi = asi = decode_asi(insn, regs);
/* If this is a {get,put}_user() on an unaligned userspace pointer,
* just signal a fault and do not log the event.
*/
if (asi == ASI_AIUS) {
kernel_mna_trap_fault(0);
return;
}
log_unaligned(regs);
if (!ok_for_kernel(insn) || dir == both) {
printk("Unsupported unaligned load/store trap for kernel "
"at <%016lx>.\n", regs->tpc);
unaligned_panic("Kernel does fpu/atomic "
"unaligned load/store.", regs);
kernel_mna_trap_fault(0);
} else {
unsigned long addr, *reg_addr;
int err;
addr = compute_effective_address(regs, insn,
((insn >> 25) & 0x1f));
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
switch (asi) {
case ASI_NL:
case ASI_AIUPL:
case ASI_AIUSL:
case ASI_PL:
case ASI_SL:
case ASI_PNFL:
case ASI_SNFL:
asi &= ~0x08;
break;
}
switch (dir) {
case load:
reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
err = do_int_load(reg_addr, size,
(unsigned long *) addr,
decode_signedness(insn), asi);
if (likely(!err) && unlikely(asi != orig_asi)) {
unsigned long val_in = *reg_addr;
switch (size) {
case 2:
val_in = swab16(val_in);
break;
case 4:
val_in = swab32(val_in);
break;
case 8:
val_in = swab64(val_in);
break;
case 16:
default:
BUG();
break;
}
*reg_addr = val_in;
}
break;
case store:
err = do_int_store(((insn>>25)&0x1f), size,
(unsigned long *) addr, regs,
asi, orig_asi);
break;
default:
panic("Impossible kernel unaligned trap.");
/* Not reached... */
}
if (unlikely(err))
kernel_mna_trap_fault(1);
else
advance(regs);
}
}
| 165,812 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void ProcessStateChangesUnifiedPlan(
WebRtcSetDescriptionObserver::States states) {
DCHECK_EQ(sdp_semantics_, webrtc::SdpSemantics::kUnifiedPlan);
handler_->OnModifyTransceivers(
std::move(states.transceiver_states),
action_ == PeerConnectionTracker::ACTION_SET_REMOTE_DESCRIPTION);
}
Commit Message: Check weak pointers in RTCPeerConnectionHandler::WebRtcSetDescriptionObserverImpl
Bug: 912074
Change-Id: I8ba86751f5d5bf12db51520f985ef0d3dae63ed8
Reviewed-on: https://chromium-review.googlesource.com/c/1411916
Commit-Queue: Guido Urdaneta <[email protected]>
Reviewed-by: Henrik Boström <[email protected]>
Cr-Commit-Position: refs/heads/master@{#622945}
CWE ID: CWE-416 | void ProcessStateChangesUnifiedPlan(
WebRtcSetDescriptionObserver::States states) {
DCHECK_EQ(sdp_semantics_, webrtc::SdpSemantics::kUnifiedPlan);
if (handler_) {
handler_->OnModifyTransceivers(
std::move(states.transceiver_states),
action_ == PeerConnectionTracker::ACTION_SET_REMOTE_DESCRIPTION);
}
}
| 173,075 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void FrameLoader::Trace(blink::Visitor* visitor) {
visitor->Trace(frame_);
visitor->Trace(progress_tracker_);
visitor->Trace(document_loader_);
visitor->Trace(provisional_document_loader_);
}
Commit Message: Inherit the navigation initiator when navigating instead of the parent/opener
Spec PR: https://github.com/w3c/webappsec-csp/pull/358
Bug: 905301, 894228, 836148
Change-Id: I43ada2266d42d1cd56dbe3c6dd89d115e878a83a
Reviewed-on: https://chromium-review.googlesource.com/c/1314633
Commit-Queue: Andy Paicu <[email protected]>
Reviewed-by: Mike West <[email protected]>
Cr-Commit-Position: refs/heads/master@{#610850}
CWE ID: CWE-20 | void FrameLoader::Trace(blink::Visitor* visitor) {
visitor->Trace(frame_);
visitor->Trace(progress_tracker_);
visitor->Trace(document_loader_);
visitor->Trace(provisional_document_loader_);
visitor->Trace(last_origin_document_);
}
| 173,059 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: LazyBackgroundPageNativeHandler::LazyBackgroundPageNativeHandler(
ScriptContext* context)
: ObjectBackedNativeHandler(context) {
RouteFunction(
"IncrementKeepaliveCount",
base::Bind(&LazyBackgroundPageNativeHandler::IncrementKeepaliveCount,
base::Unretained(this)));
RouteFunction(
"DecrementKeepaliveCount",
base::Bind(&LazyBackgroundPageNativeHandler::DecrementKeepaliveCount,
base::Unretained(this)));
}
Commit Message: [Extensions] Expand bindings access checks
BUG=601149
BUG=601073
Review URL: https://codereview.chromium.org/1866103002
Cr-Commit-Position: refs/heads/master@{#387710}
CWE ID: CWE-284 | LazyBackgroundPageNativeHandler::LazyBackgroundPageNativeHandler(
ScriptContext* context)
: ObjectBackedNativeHandler(context) {
RouteFunction(
"IncrementKeepaliveCount", "tts",
base::Bind(&LazyBackgroundPageNativeHandler::IncrementKeepaliveCount,
base::Unretained(this)));
RouteFunction(
"DecrementKeepaliveCount", "tts",
base::Bind(&LazyBackgroundPageNativeHandler::DecrementKeepaliveCount,
base::Unretained(this)));
}
| 172,250 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int ff_mms_asf_header_parser(MMSContext *mms)
{
uint8_t *p = mms->asf_header;
uint8_t *end;
int flags, stream_id;
mms->stream_num = 0;
if (mms->asf_header_size < sizeof(ff_asf_guid) * 2 + 22 ||
memcmp(p, ff_asf_header, sizeof(ff_asf_guid))) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (invalid ASF header, size=%d)\n",
mms->asf_header_size);
return AVERROR_INVALIDDATA;
}
end = mms->asf_header + mms->asf_header_size;
p += sizeof(ff_asf_guid) + 14;
while(end - p >= sizeof(ff_asf_guid) + 8) {
uint64_t chunksize;
if (!memcmp(p, ff_asf_data_header, sizeof(ff_asf_guid))) {
chunksize = 50; // see Reference [2] section 5.1
} else {
chunksize = AV_RL64(p + sizeof(ff_asf_guid));
}
if (!chunksize || chunksize > end - p) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (header chunksize %"PRId64" is invalid)\n",
chunksize);
return AVERROR_INVALIDDATA;
}
if (!memcmp(p, ff_asf_file_header, sizeof(ff_asf_guid))) {
/* read packet size */
if (end - p > sizeof(ff_asf_guid) * 2 + 68) {
mms->asf_packet_len = AV_RL32(p + sizeof(ff_asf_guid) * 2 + 64);
if (mms->asf_packet_len <= 0 || mms->asf_packet_len > sizeof(mms->in_buffer)) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (too large pkt_len %d)\n",
mms->asf_packet_len);
return AVERROR_INVALIDDATA;
}
}
} else if (!memcmp(p, ff_asf_stream_header, sizeof(ff_asf_guid))) {
flags = AV_RL16(p + sizeof(ff_asf_guid)*3 + 24);
stream_id = flags & 0x7F;
if (mms->stream_num < MMS_MAX_STREAMS &&
46 + mms->stream_num * 6 < sizeof(mms->out_buffer)) {
mms->streams = av_fast_realloc(mms->streams,
&mms->nb_streams_allocated,
(mms->stream_num + 1) * sizeof(MMSStream));
if (!mms->streams)
return AVERROR(ENOMEM);
mms->streams[mms->stream_num].id = stream_id;
mms->stream_num++;
} else {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (too many A/V streams)\n");
return AVERROR_INVALIDDATA;
}
} else if (!memcmp(p, ff_asf_ext_stream_header, sizeof(ff_asf_guid))) {
if (end - p >= 88) {
int stream_count = AV_RL16(p + 84), ext_len_count = AV_RL16(p + 86);
uint64_t skip_bytes = 88;
while (stream_count--) {
if (end - p < skip_bytes + 4) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (next stream name length is not in the buffer)\n");
return AVERROR_INVALIDDATA;
}
skip_bytes += 4 + AV_RL16(p + skip_bytes + 2);
}
while (ext_len_count--) {
if (end - p < skip_bytes + 22) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (next extension system info length is not in the buffer)\n");
return AVERROR_INVALIDDATA;
}
skip_bytes += 22 + AV_RL32(p + skip_bytes + 18);
}
if (end - p < skip_bytes) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (the last extension system info length is invalid)\n");
return AVERROR_INVALIDDATA;
}
if (chunksize - skip_bytes > 24)
chunksize = skip_bytes;
}
} else if (!memcmp(p, ff_asf_head1_guid, sizeof(ff_asf_guid))) {
chunksize = 46; // see references [2] section 3.4. This should be set 46.
}
p += chunksize;
}
return 0;
}
Commit Message: avformat/mms: Add missing chunksize check
Fixes: out of array read
Fixes: mms-crash-01b6c5d85f9d9f40f4e879896103e9f5b222816a
Found-by: Paul Ch <[email protected]>
1st hunk by Paul Ch <[email protected]>
Tested-by: Paul Ch <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]>
CWE ID: CWE-125 | int ff_mms_asf_header_parser(MMSContext *mms)
{
uint8_t *p = mms->asf_header;
uint8_t *end;
int flags, stream_id;
mms->stream_num = 0;
if (mms->asf_header_size < sizeof(ff_asf_guid) * 2 + 22 ||
memcmp(p, ff_asf_header, sizeof(ff_asf_guid))) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (invalid ASF header, size=%d)\n",
mms->asf_header_size);
return AVERROR_INVALIDDATA;
}
end = mms->asf_header + mms->asf_header_size;
p += sizeof(ff_asf_guid) + 14;
while(end - p >= sizeof(ff_asf_guid) + 8) {
uint64_t chunksize;
if (!memcmp(p, ff_asf_data_header, sizeof(ff_asf_guid))) {
chunksize = 50; // see Reference [2] section 5.1
} else {
chunksize = AV_RL64(p + sizeof(ff_asf_guid));
}
if (!chunksize || chunksize > end - p) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (header chunksize %"PRId64" is invalid)\n",
chunksize);
return AVERROR_INVALIDDATA;
}
if (!memcmp(p, ff_asf_file_header, sizeof(ff_asf_guid))) {
/* read packet size */
if (end - p > sizeof(ff_asf_guid) * 2 + 68) {
mms->asf_packet_len = AV_RL32(p + sizeof(ff_asf_guid) * 2 + 64);
if (mms->asf_packet_len <= 0 || mms->asf_packet_len > sizeof(mms->in_buffer)) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (too large pkt_len %d)\n",
mms->asf_packet_len);
return AVERROR_INVALIDDATA;
}
}
} else if (!memcmp(p, ff_asf_stream_header, sizeof(ff_asf_guid))) {
if (end - p >= (sizeof(ff_asf_guid) * 3 + 26)) {
flags = AV_RL16(p + sizeof(ff_asf_guid)*3 + 24);
stream_id = flags & 0x7F;
//The second condition is for checking CS_PKT_STREAM_ID_REQUEST packet size,
//we can calculate the packet size by stream_num.
//Please see function send_stream_selection_request().
if (mms->stream_num < MMS_MAX_STREAMS &&
46 + mms->stream_num * 6 < sizeof(mms->out_buffer)) {
mms->streams = av_fast_realloc(mms->streams,
&mms->nb_streams_allocated,
(mms->stream_num + 1) * sizeof(MMSStream));
if (!mms->streams)
return AVERROR(ENOMEM);
mms->streams[mms->stream_num].id = stream_id;
mms->stream_num++;
} else {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (too many A/V streams)\n");
return AVERROR_INVALIDDATA;
}
}
} else if (!memcmp(p, ff_asf_ext_stream_header, sizeof(ff_asf_guid))) {
if (end - p >= 88) {
int stream_count = AV_RL16(p + 84), ext_len_count = AV_RL16(p + 86);
uint64_t skip_bytes = 88;
while (stream_count--) {
if (end - p < skip_bytes + 4) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (next stream name length is not in the buffer)\n");
return AVERROR_INVALIDDATA;
}
skip_bytes += 4 + AV_RL16(p + skip_bytes + 2);
}
while (ext_len_count--) {
if (end - p < skip_bytes + 22) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (next extension system info length is not in the buffer)\n");
return AVERROR_INVALIDDATA;
}
skip_bytes += 22 + AV_RL32(p + skip_bytes + 18);
}
if (end - p < skip_bytes) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (the last extension system info length is invalid)\n");
return AVERROR_INVALIDDATA;
}
if (chunksize - skip_bytes > 24)
chunksize = skip_bytes;
}
} else if (!memcmp(p, ff_asf_head1_guid, sizeof(ff_asf_guid))) {
chunksize = 46; // see references [2] section 3.4. This should be set 46.
if (chunksize > end - p) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (header chunksize %"PRId64" is invalid)\n",
chunksize);
return AVERROR_INVALIDDATA;
}
}
p += chunksize;
}
return 0;
}
| 168,927 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int jp2_box_put(jp2_box_t *box, jas_stream_t *out)
{
jas_stream_t *tmpstream;
bool extlen;
bool dataflag;
tmpstream = 0;
dataflag = !(box->info->flags & (JP2_BOX_SUPER | JP2_BOX_NODATA));
if (dataflag) {
if (!(tmpstream = jas_stream_memopen(0, 0))) {
goto error;
}
if (box->ops->putdata) {
if ((*box->ops->putdata)(box, tmpstream)) {
goto error;
}
}
box->len = jas_stream_tell(tmpstream) + JP2_BOX_HDRLEN(false);
jas_stream_rewind(tmpstream);
}
extlen = (box->len >= (((uint_fast64_t)1) << 32)) != 0;
if (jp2_putuint32(out, extlen ? 1 : box->len)) {
goto error;
}
if (jp2_putuint32(out, box->type)) {
goto error;
}
if (extlen) {
if (jp2_putuint64(out, box->len)) {
goto error;
}
}
if (dataflag) {
if (jas_stream_copy(out, tmpstream, box->len - JP2_BOX_HDRLEN(false))) {
goto error;
}
jas_stream_close(tmpstream);
}
return 0;
error:
if (tmpstream) {
jas_stream_close(tmpstream);
}
return -1;
}
Commit Message: Fixed bugs due to uninitialized data in the JP2 decoder.
Also, added some comments marking I/O stream interfaces that probably
need to be changed (in the long term) to fix integer overflow problems.
CWE ID: CWE-476 | int jp2_box_put(jp2_box_t *box, jas_stream_t *out)
{
jas_stream_t *tmpstream;
bool extlen;
bool dataflag;
tmpstream = 0;
dataflag = !(box->info->flags & (JP2_BOX_SUPER | JP2_BOX_NODATA));
if (dataflag) {
if (!(tmpstream = jas_stream_memopen(0, 0))) {
goto error;
}
if (box->ops->putdata) {
if ((*box->ops->putdata)(box, tmpstream)) {
goto error;
}
}
box->len = jas_stream_tell(tmpstream) + JP2_BOX_HDRLEN(false);
jas_stream_rewind(tmpstream);
}
extlen = (box->len >= (((uint_fast64_t)1) << 32)) != 0;
if (jp2_putuint32(out, extlen ? 1 : box->len)) {
goto error;
}
if (jp2_putuint32(out, box->type)) {
goto error;
}
if (extlen) {
if (jp2_putuint64(out, box->len)) {
goto error;
}
}
if (dataflag) {
if (jas_stream_copy(out, tmpstream, box->len -
JP2_BOX_HDRLEN(false))) {
jas_eprintf("cannot copy box data\n");
goto error;
}
jas_stream_close(tmpstream);
}
return 0;
error:
if (tmpstream) {
jas_stream_close(tmpstream);
}
return -1;
}
| 168,319 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: ieee802_15_4_if_print(netdissect_options *ndo,
const struct pcap_pkthdr *h, const u_char *p)
{
u_int caplen = h->caplen;
int hdrlen;
uint16_t fc;
uint8_t seq;
if (caplen < 3) {
ND_PRINT((ndo, "[|802.15.4] %x", caplen));
return caplen;
}
fc = EXTRACT_LE_16BITS(p);
hdrlen = extract_header_length(fc);
seq = EXTRACT_LE_8BITS(p + 2);
p += 3;
caplen -= 3;
ND_PRINT((ndo,"IEEE 802.15.4 %s packet ", ftypes[fc & 0x7]));
if (ndo->ndo_vflag)
ND_PRINT((ndo,"seq %02x ", seq));
if (hdrlen == -1) {
ND_PRINT((ndo,"invalid! "));
return caplen;
}
if (!ndo->ndo_vflag) {
p+= hdrlen;
caplen -= hdrlen;
} else {
uint16_t panid = 0;
switch ((fc >> 10) & 0x3) {
case 0x00:
ND_PRINT((ndo,"none "));
break;
case 0x01:
ND_PRINT((ndo,"reserved destination addressing mode"));
return 0;
case 0x02:
panid = EXTRACT_LE_16BITS(p);
p += 2;
ND_PRINT((ndo,"%04x:%04x ", panid, EXTRACT_LE_16BITS(p)));
p += 2;
break;
case 0x03:
panid = EXTRACT_LE_16BITS(p);
p += 2;
ND_PRINT((ndo,"%04x:%s ", panid, le64addr_string(ndo, p)));
p += 8;
break;
}
ND_PRINT((ndo,"< "));
switch ((fc >> 14) & 0x3) {
case 0x00:
ND_PRINT((ndo,"none "));
break;
case 0x01:
ND_PRINT((ndo,"reserved source addressing mode"));
return 0;
case 0x02:
if (!(fc & (1 << 6))) {
panid = EXTRACT_LE_16BITS(p);
p += 2;
}
ND_PRINT((ndo,"%04x:%04x ", panid, EXTRACT_LE_16BITS(p)));
p += 2;
break;
case 0x03:
if (!(fc & (1 << 6))) {
panid = EXTRACT_LE_16BITS(p);
p += 2;
}
ND_PRINT((ndo,"%04x:%s ", panid, le64addr_string(ndo, p)));
p += 8;
break;
}
caplen -= hdrlen;
}
if (!ndo->ndo_suppress_default_print)
ND_DEFAULTPRINT(p, caplen);
return 0;
}
Commit Message: CVE-2017-13000/IEEE 802.15.4: Add more bounds checks.
While we're at it, add a bunch of macros for the frame control field's
subfields, have the reserved frame types show the frame type value, use
the same code path for processing source and destination addresses
regardless of whether -v was specified (just leave out the addresses in
non-verbose mode), and return the header length in all cases.
This fixes a buffer over-read discovered by Forcepoint's security
researchers Otto Airamo & Antti Levomäki.
Add tests using the capture files supplied by the reporter(s).
CWE ID: CWE-125 | ieee802_15_4_if_print(netdissect_options *ndo,
const struct pcap_pkthdr *h, const u_char *p)
{
u_int caplen = h->caplen;
u_int hdrlen;
uint16_t fc;
uint8_t seq;
uint16_t panid = 0;
if (caplen < 3) {
ND_PRINT((ndo, "[|802.15.4]"));
return caplen;
}
hdrlen = 3;
fc = EXTRACT_LE_16BITS(p);
seq = EXTRACT_LE_8BITS(p + 2);
p += 3;
caplen -= 3;
ND_PRINT((ndo,"IEEE 802.15.4 %s packet ", ftypes[FC_FRAME_TYPE(fc)]));
if (ndo->ndo_vflag)
ND_PRINT((ndo,"seq %02x ", seq));
/*
* Destination address and PAN ID, if present.
*/
switch (FC_DEST_ADDRESSING_MODE(fc)) {
case FC_ADDRESSING_MODE_NONE:
if (fc & FC_PAN_ID_COMPRESSION) {
/*
* PAN ID compression; this requires that both
* the source and destination addresses be present,
* but the destination address is missing.
*/
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"none "));
break;
case FC_ADDRESSING_MODE_RESERVED:
if (ndo->ndo_vflag)
ND_PRINT((ndo,"reserved destination addressing mode"));
return hdrlen;
case FC_ADDRESSING_MODE_SHORT:
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%04x ", panid, EXTRACT_LE_16BITS(p + 2)));
p += 2;
caplen -= 2;
hdrlen += 2;
break;
case FC_ADDRESSING_MODE_LONG:
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
if (caplen < 8) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%s ", panid, le64addr_string(ndo, p + 2)));
p += 8;
caplen -= 8;
hdrlen += 8;
break;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"< "));
/*
* Source address and PAN ID, if present.
*/
switch (FC_SRC_ADDRESSING_MODE(fc)) {
case FC_ADDRESSING_MODE_NONE:
if (ndo->ndo_vflag)
ND_PRINT((ndo,"none "));
break;
case FC_ADDRESSING_MODE_RESERVED:
if (ndo->ndo_vflag)
ND_PRINT((ndo,"reserved source addressing mode"));
return 0;
case FC_ADDRESSING_MODE_SHORT:
if (!(fc & FC_PAN_ID_COMPRESSION)) {
/*
* The source PAN ID is not compressed out, so
* fetch it. (Otherwise, we'll use the destination
* PAN ID, fetched above.)
*/
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
}
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%04x ", panid, EXTRACT_LE_16BITS(p)));
p += 2;
caplen -= 2;
hdrlen += 2;
break;
case FC_ADDRESSING_MODE_LONG:
if (!(fc & FC_PAN_ID_COMPRESSION)) {
/*
* The source PAN ID is not compressed out, so
* fetch it. (Otherwise, we'll use the destination
* PAN ID, fetched above.)
*/
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
}
if (caplen < 8) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%s ", panid, le64addr_string(ndo, p)));
p += 8;
caplen -= 8;
hdrlen += 8;
break;
}
if (!ndo->ndo_suppress_default_print)
ND_DEFAULTPRINT(p, caplen);
return hdrlen;
}
| 170,030 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void MediaStreamDevicesController::Accept(bool update_content_setting) {
if (content_settings_)
content_settings_->OnMediaStreamAllowed();
NotifyUIRequestAccepted();
content::MediaStreamDevices devices;
if (microphone_requested_ || webcam_requested_) {
switch (request_.request_type) {
case content::MEDIA_OPEN_DEVICE:
MediaCaptureDevicesDispatcher::GetInstance()->GetRequestedDevice(
request_.requested_device_id,
request_.audio_type == content::MEDIA_DEVICE_AUDIO_CAPTURE,
request_.video_type == content::MEDIA_DEVICE_VIDEO_CAPTURE,
&devices);
break;
case content::MEDIA_DEVICE_ACCESS:
case content::MEDIA_GENERATE_STREAM:
case content::MEDIA_ENUMERATE_DEVICES:
MediaCaptureDevicesDispatcher::GetInstance()->
GetDefaultDevicesForProfile(profile_,
microphone_requested_,
webcam_requested_,
&devices);
break;
}
if (update_content_setting && IsSchemeSecure() && !devices.empty())
SetPermission(true);
}
scoped_ptr<content::MediaStreamUI> ui;
if (!devices.empty()) {
ui = MediaCaptureDevicesDispatcher::GetInstance()->
GetMediaStreamCaptureIndicator()->RegisterMediaStream(
web_contents_, devices);
}
content::MediaResponseCallback cb = callback_;
callback_.Reset();
cb.Run(devices, ui.Pass());
}
Commit Message: Make the content setting for webcam/mic sticky for Pepper requests.
This makes the content setting sticky for webcam/mic requests from Pepper from non-https origins.
BUG=249335
[email protected], [email protected]
Review URL: https://codereview.chromium.org/17060006
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@206479 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-264 | void MediaStreamDevicesController::Accept(bool update_content_setting) {
if (content_settings_)
content_settings_->OnMediaStreamAllowed();
NotifyUIRequestAccepted();
content::MediaStreamDevices devices;
if (microphone_requested_ || webcam_requested_) {
switch (request_.request_type) {
case content::MEDIA_OPEN_DEVICE:
MediaCaptureDevicesDispatcher::GetInstance()->GetRequestedDevice(
request_.requested_device_id,
request_.audio_type == content::MEDIA_DEVICE_AUDIO_CAPTURE,
request_.video_type == content::MEDIA_DEVICE_VIDEO_CAPTURE,
&devices);
break;
case content::MEDIA_DEVICE_ACCESS:
case content::MEDIA_GENERATE_STREAM:
case content::MEDIA_ENUMERATE_DEVICES:
MediaCaptureDevicesDispatcher::GetInstance()->
GetDefaultDevicesForProfile(profile_,
microphone_requested_,
webcam_requested_,
&devices);
break;
}
// TODO(raymes): We currently set the content permission for non-https
// websites for Pepper requests as well. This is temporary and should be
// removed.
if (update_content_setting) {
if ((IsSchemeSecure() && !devices.empty()) ||
request_.request_type == content::MEDIA_OPEN_DEVICE) {
SetPermission(true);
}
}
}
scoped_ptr<content::MediaStreamUI> ui;
if (!devices.empty()) {
ui = MediaCaptureDevicesDispatcher::GetInstance()->
GetMediaStreamCaptureIndicator()->RegisterMediaStream(
web_contents_, devices);
}
content::MediaResponseCallback cb = callback_;
callback_.Reset();
cb.Run(devices, ui.Pass());
}
| 171,312 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
bool pr = false;
u32 msr = msr_info->index;
u64 data = msr_info->data;
switch (msr) {
case MSR_AMD64_NB_CFG:
case MSR_IA32_UCODE_REV:
case MSR_IA32_UCODE_WRITE:
case MSR_VM_HSAVE_PA:
case MSR_AMD64_PATCH_LOADER:
case MSR_AMD64_BU_CFG2:
break;
case MSR_EFER:
return set_efer(vcpu, data);
case MSR_K7_HWCR:
data &= ~(u64)0x40; /* ignore flush filter disable */
data &= ~(u64)0x100; /* ignore ignne emulation enable */
data &= ~(u64)0x8; /* ignore TLB cache disable */
if (data != 0) {
vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
data);
return 1;
}
break;
case MSR_FAM10H_MMIO_CONF_BASE:
if (data != 0) {
vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
"0x%llx\n", data);
return 1;
}
break;
case MSR_IA32_DEBUGCTLMSR:
if (!data) {
/* We support the non-activated case already */
break;
} else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
/* Values other than LBR and BTF are vendor-specific,
thus reserved and should throw a #GP */
return 1;
}
vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
__func__, data);
break;
case 0x200 ... 0x2ff:
return set_msr_mtrr(vcpu, msr, data);
case MSR_IA32_APICBASE:
kvm_set_apic_base(vcpu, data);
break;
case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
return kvm_x2apic_msr_write(vcpu, msr, data);
case MSR_IA32_TSCDEADLINE:
kvm_set_lapic_tscdeadline_msr(vcpu, data);
break;
case MSR_IA32_TSC_ADJUST:
if (guest_cpuid_has_tsc_adjust(vcpu)) {
if (!msr_info->host_initiated) {
u64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
}
vcpu->arch.ia32_tsc_adjust_msr = data;
}
break;
case MSR_IA32_MISC_ENABLE:
vcpu->arch.ia32_misc_enable_msr = data;
break;
case MSR_KVM_WALL_CLOCK_NEW:
case MSR_KVM_WALL_CLOCK:
vcpu->kvm->arch.wall_clock = data;
kvm_write_wall_clock(vcpu->kvm, data);
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
kvmclock_reset(vcpu);
vcpu->arch.time = data;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
/* we verify if the enable bit is set... */
if (!(data & 1))
break;
/* ...but clean it before doing the actual write */
vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
vcpu->arch.time_page =
gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
if (is_error_page(vcpu->arch.time_page))
vcpu->arch.time_page = NULL;
break;
}
case MSR_KVM_ASYNC_PF_EN:
if (kvm_pv_enable_async_pf(vcpu, data))
return 1;
break;
case MSR_KVM_STEAL_TIME:
if (unlikely(!sched_info_on()))
return 1;
if (data & KVM_STEAL_RESERVED_MASK)
return 1;
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
data & KVM_STEAL_VALID_BITS))
return 1;
vcpu->arch.st.msr_val = data;
if (!(data & KVM_MSR_ENABLED))
break;
vcpu->arch.st.last_steal = current->sched_info.run_delay;
preempt_disable();
accumulate_steal_time(vcpu);
preempt_enable();
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
break;
case MSR_KVM_PV_EOI_EN:
if (kvm_lapic_enable_pv_eoi(vcpu, data))
return 1;
break;
case MSR_IA32_MCG_CTL:
case MSR_IA32_MCG_STATUS:
case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
return set_msr_mce(vcpu, msr, data);
/* Performance counters are not protected by a CPUID bit,
* so we should check all of them in the generic path for the sake of
* cross vendor migration.
* Writing a zero into the event select MSRs disables them,
* which we perfectly emulate ;-). Any other value should be at least
* reported, some guests depend on them.
*/
case MSR_K7_EVNTSEL0:
case MSR_K7_EVNTSEL1:
case MSR_K7_EVNTSEL2:
case MSR_K7_EVNTSEL3:
if (data != 0)
vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
/* at least RHEL 4 unconditionally writes to the perfctr registers,
* so we ignore writes to make it happy.
*/
case MSR_K7_PERFCTR0:
case MSR_K7_PERFCTR1:
case MSR_K7_PERFCTR2:
case MSR_K7_PERFCTR3:
vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
case MSR_P6_PERFCTR0:
case MSR_P6_PERFCTR1:
pr = true;
case MSR_P6_EVNTSEL0:
case MSR_P6_EVNTSEL1:
if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr, data);
if (pr || data != 0)
vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
case MSR_K7_CLK_CTL:
/*
* Ignore all writes to this no longer documented MSR.
* Writes are only relevant for old K7 processors,
* all pre-dating SVM, but a recommended workaround from
* AMD for these chips. It is possible to specify the
* affected processor models on the command line, hence
* the need to ignore the workaround.
*/
break;
case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
if (kvm_hv_msr_partition_wide(msr)) {
int r;
mutex_lock(&vcpu->kvm->lock);
r = set_msr_hyperv_pw(vcpu, msr, data);
mutex_unlock(&vcpu->kvm->lock);
return r;
} else
return set_msr_hyperv(vcpu, msr, data);
break;
case MSR_IA32_BBL_CR_CTL3:
/* Drop writes to this legacy MSR -- see rdmsr
* counterpart for further detail.
*/
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
break;
case MSR_AMD64_OSVW_ID_LENGTH:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
vcpu->arch.osvw.length = data;
break;
case MSR_AMD64_OSVW_STATUS:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
vcpu->arch.osvw.status = data;
break;
default:
if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
return xen_hvm_config(vcpu, data);
if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr, data);
if (!ignore_msrs) {
vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
msr, data);
return 1;
} else {
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
msr, data);
break;
}
}
return 0;
}
Commit Message: KVM: x86: fix for buffer overflow in handling of MSR_KVM_SYSTEM_TIME (CVE-2013-1796)
If the guest sets the GPA of the time_page so that the request to update the
time straddles a page then KVM will write onto an incorrect page. The
write is done byusing kmap atomic to get a pointer to the page for the time
structure and then performing a memcpy to that page starting at an offset
that the guest controls. Well behaved guests always provide a 32-byte aligned
address, however a malicious guest could use this to corrupt host kernel
memory.
Tested: Tested against kvmclock unit test.
Signed-off-by: Andrew Honig <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]>
CWE ID: CWE-119 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
bool pr = false;
u32 msr = msr_info->index;
u64 data = msr_info->data;
switch (msr) {
case MSR_AMD64_NB_CFG:
case MSR_IA32_UCODE_REV:
case MSR_IA32_UCODE_WRITE:
case MSR_VM_HSAVE_PA:
case MSR_AMD64_PATCH_LOADER:
case MSR_AMD64_BU_CFG2:
break;
case MSR_EFER:
return set_efer(vcpu, data);
case MSR_K7_HWCR:
data &= ~(u64)0x40; /* ignore flush filter disable */
data &= ~(u64)0x100; /* ignore ignne emulation enable */
data &= ~(u64)0x8; /* ignore TLB cache disable */
if (data != 0) {
vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
data);
return 1;
}
break;
case MSR_FAM10H_MMIO_CONF_BASE:
if (data != 0) {
vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
"0x%llx\n", data);
return 1;
}
break;
case MSR_IA32_DEBUGCTLMSR:
if (!data) {
/* We support the non-activated case already */
break;
} else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
/* Values other than LBR and BTF are vendor-specific,
thus reserved and should throw a #GP */
return 1;
}
vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
__func__, data);
break;
case 0x200 ... 0x2ff:
return set_msr_mtrr(vcpu, msr, data);
case MSR_IA32_APICBASE:
kvm_set_apic_base(vcpu, data);
break;
case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
return kvm_x2apic_msr_write(vcpu, msr, data);
case MSR_IA32_TSCDEADLINE:
kvm_set_lapic_tscdeadline_msr(vcpu, data);
break;
case MSR_IA32_TSC_ADJUST:
if (guest_cpuid_has_tsc_adjust(vcpu)) {
if (!msr_info->host_initiated) {
u64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
}
vcpu->arch.ia32_tsc_adjust_msr = data;
}
break;
case MSR_IA32_MISC_ENABLE:
vcpu->arch.ia32_misc_enable_msr = data;
break;
case MSR_KVM_WALL_CLOCK_NEW:
case MSR_KVM_WALL_CLOCK:
vcpu->kvm->arch.wall_clock = data;
kvm_write_wall_clock(vcpu->kvm, data);
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
kvmclock_reset(vcpu);
vcpu->arch.time = data;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
/* we verify if the enable bit is set... */
if (!(data & 1))
break;
/* ...but clean it before doing the actual write */
vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
/* Check that the address is 32-byte aligned. */
if (vcpu->arch.time_offset &
(sizeof(struct pvclock_vcpu_time_info) - 1))
break;
vcpu->arch.time_page =
gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
if (is_error_page(vcpu->arch.time_page))
vcpu->arch.time_page = NULL;
break;
}
case MSR_KVM_ASYNC_PF_EN:
if (kvm_pv_enable_async_pf(vcpu, data))
return 1;
break;
case MSR_KVM_STEAL_TIME:
if (unlikely(!sched_info_on()))
return 1;
if (data & KVM_STEAL_RESERVED_MASK)
return 1;
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
data & KVM_STEAL_VALID_BITS))
return 1;
vcpu->arch.st.msr_val = data;
if (!(data & KVM_MSR_ENABLED))
break;
vcpu->arch.st.last_steal = current->sched_info.run_delay;
preempt_disable();
accumulate_steal_time(vcpu);
preempt_enable();
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
break;
case MSR_KVM_PV_EOI_EN:
if (kvm_lapic_enable_pv_eoi(vcpu, data))
return 1;
break;
case MSR_IA32_MCG_CTL:
case MSR_IA32_MCG_STATUS:
case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
return set_msr_mce(vcpu, msr, data);
/* Performance counters are not protected by a CPUID bit,
* so we should check all of them in the generic path for the sake of
* cross vendor migration.
* Writing a zero into the event select MSRs disables them,
* which we perfectly emulate ;-). Any other value should be at least
* reported, some guests depend on them.
*/
case MSR_K7_EVNTSEL0:
case MSR_K7_EVNTSEL1:
case MSR_K7_EVNTSEL2:
case MSR_K7_EVNTSEL3:
if (data != 0)
vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
/* at least RHEL 4 unconditionally writes to the perfctr registers,
* so we ignore writes to make it happy.
*/
case MSR_K7_PERFCTR0:
case MSR_K7_PERFCTR1:
case MSR_K7_PERFCTR2:
case MSR_K7_PERFCTR3:
vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
case MSR_P6_PERFCTR0:
case MSR_P6_PERFCTR1:
pr = true;
case MSR_P6_EVNTSEL0:
case MSR_P6_EVNTSEL1:
if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr, data);
if (pr || data != 0)
vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
case MSR_K7_CLK_CTL:
/*
* Ignore all writes to this no longer documented MSR.
* Writes are only relevant for old K7 processors,
* all pre-dating SVM, but a recommended workaround from
* AMD for these chips. It is possible to specify the
* affected processor models on the command line, hence
* the need to ignore the workaround.
*/
break;
case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
if (kvm_hv_msr_partition_wide(msr)) {
int r;
mutex_lock(&vcpu->kvm->lock);
r = set_msr_hyperv_pw(vcpu, msr, data);
mutex_unlock(&vcpu->kvm->lock);
return r;
} else
return set_msr_hyperv(vcpu, msr, data);
break;
case MSR_IA32_BBL_CR_CTL3:
/* Drop writes to this legacy MSR -- see rdmsr
* counterpart for further detail.
*/
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
break;
case MSR_AMD64_OSVW_ID_LENGTH:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
vcpu->arch.osvw.length = data;
break;
case MSR_AMD64_OSVW_STATUS:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
vcpu->arch.osvw.status = data;
break;
default:
if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
return xen_hvm_config(vcpu, data);
if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr, data);
if (!ignore_msrs) {
vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
msr, data);
return 1;
} else {
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
msr, data);
break;
}
}
return 0;
}
| 166,120 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void btsock_l2cap_signaled(int fd, int flags, uint32_t user_id)
{
l2cap_socket *sock;
char drop_it = FALSE;
/* We use MSG_DONTWAIT when sending data to JAVA, hence it can be accepted to hold the lock. */
pthread_mutex_lock(&state_lock);
sock = btsock_l2cap_find_by_id_l(user_id);
if (sock) {
if ((flags & SOCK_THREAD_FD_RD) && !sock->server) {
if (sock->connected) {
int size = 0;
if (!(flags & SOCK_THREAD_FD_EXCEPTION) || (ioctl(sock->our_fd, FIONREAD, &size)
== 0 && size)) {
uint8_t *buffer = osi_malloc(L2CAP_MAX_SDU_LENGTH);
/* Apparently we hijack the req_id (UINT32) to pass the pointer to the buffer to
* the write complete callback, which call a free... wonder if this works on a
* 64 bit platform? */
if (buffer != NULL) {
/* The socket is created with SOCK_SEQPACKET, hence we read one message at
* the time. The maximum size of a message is allocated to ensure data is
* not lost. This is okay to do as Android uses virtual memory, hence even
* if we only use a fraction of the memory it should not block for others
* to use the memory. As the definition of ioctl(FIONREAD) do not clearly
* define what value will be returned if multiple messages are written to
* the socket before any message is read from the socket, we could
* potentially risk to allocate way more memory than needed. One of the use
* cases for this socket is obex where multiple 64kbyte messages are
* typically written to the socket in a tight loop, hence we risk the ioctl
* will return the total amount of data in the buffer, which could be
* multiple 64kbyte chunks.
* UPDATE: As bluedroid cannot handle 64kbyte buffers, the size is reduced
* to around 8kbyte - and using malloc for buffer allocation here seems to
* be wrong
* UPDATE: Since we are responsible for freeing the buffer in the
* write_complete_ind, it is OK to use malloc. */
int count = recv(fd, buffer, L2CAP_MAX_SDU_LENGTH,
MSG_NOSIGNAL | MSG_DONTWAIT);
APPL_TRACE_DEBUG("btsock_l2cap_signaled - %d bytes received from socket",
count);
if (sock->fixed_chan) {
if(BTA_JvL2capWriteFixed(sock->channel, (BD_ADDR*)&sock->addr,
(UINT32)buffer, btsock_l2cap_cbk, buffer, count,
(void *)user_id) != BTA_JV_SUCCESS) {
on_l2cap_write_fixed_done(buffer, user_id);
}
} else {
if(BTA_JvL2capWrite(sock->handle, (UINT32)buffer, buffer, count,
(void *)user_id) != BTA_JV_SUCCESS) {
on_l2cap_write_done(buffer, user_id);
}
}
} else {
APPL_TRACE_ERROR("Unable to allocate memory for data packet from JAVA...")
}
}
} else
drop_it = TRUE;
}
if (flags & SOCK_THREAD_FD_WR) {
if (flush_incoming_que_on_wr_signal_l(sock) && sock->connected)
btsock_thread_add_fd(pth, sock->our_fd, BTSOCK_L2CAP, SOCK_THREAD_FD_WR, sock->id);
}
if (drop_it || (flags & SOCK_THREAD_FD_EXCEPTION)) {
int size = 0;
if (drop_it || ioctl(sock->our_fd, FIONREAD, &size) != 0 || size == 0)
btsock_l2cap_free_l(sock);
}
}
pthread_mutex_unlock(&state_lock);
}
Commit Message: DO NOT MERGE Fix potential DoS caused by delivering signal to BT process
Bug: 28885210
Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360
Conflicts:
btif/co/bta_hh_co.c
btif/src/btif_core.c
Merge conflict resolution of ag/1161415 (referencing ag/1164670)
- Directly into mnc-mr2-release
CWE ID: CWE-284 | void btsock_l2cap_signaled(int fd, int flags, uint32_t user_id)
{
l2cap_socket *sock;
char drop_it = FALSE;
/* We use MSG_DONTWAIT when sending data to JAVA, hence it can be accepted to hold the lock. */
pthread_mutex_lock(&state_lock);
sock = btsock_l2cap_find_by_id_l(user_id);
if (sock) {
if ((flags & SOCK_THREAD_FD_RD) && !sock->server) {
if (sock->connected) {
int size = 0;
if (!(flags & SOCK_THREAD_FD_EXCEPTION) || (TEMP_FAILURE_RETRY(ioctl(sock->our_fd, FIONREAD, &size))
== 0 && size)) {
uint8_t *buffer = osi_malloc(L2CAP_MAX_SDU_LENGTH);
/* Apparently we hijack the req_id (UINT32) to pass the pointer to the buffer to
* the write complete callback, which call a free... wonder if this works on a
* 64 bit platform? */
if (buffer != NULL) {
/* The socket is created with SOCK_SEQPACKET, hence we read one message at
* the time. The maximum size of a message is allocated to ensure data is
* not lost. This is okay to do as Android uses virtual memory, hence even
* if we only use a fraction of the memory it should not block for others
* to use the memory. As the definition of ioctl(FIONREAD) do not clearly
* define what value will be returned if multiple messages are written to
* the socket before any message is read from the socket, we could
* potentially risk to allocate way more memory than needed. One of the use
* cases for this socket is obex where multiple 64kbyte messages are
* typically written to the socket in a tight loop, hence we risk the ioctl
* will return the total amount of data in the buffer, which could be
* multiple 64kbyte chunks.
* UPDATE: As bluedroid cannot handle 64kbyte buffers, the size is reduced
* to around 8kbyte - and using malloc for buffer allocation here seems to
* be wrong
* UPDATE: Since we are responsible for freeing the buffer in the
* write_complete_ind, it is OK to use malloc. */
int count = TEMP_FAILURE_RETRY(recv(fd, buffer, L2CAP_MAX_SDU_LENGTH,
MSG_NOSIGNAL | MSG_DONTWAIT));
APPL_TRACE_DEBUG("btsock_l2cap_signaled - %d bytes received from socket",
count);
if (sock->fixed_chan) {
if(BTA_JvL2capWriteFixed(sock->channel, (BD_ADDR*)&sock->addr,
(UINT32)buffer, btsock_l2cap_cbk, buffer, count,
(void *)user_id) != BTA_JV_SUCCESS) {
on_l2cap_write_fixed_done(buffer, user_id);
}
} else {
if(BTA_JvL2capWrite(sock->handle, (UINT32)buffer, buffer, count,
(void *)user_id) != BTA_JV_SUCCESS) {
on_l2cap_write_done(buffer, user_id);
}
}
} else {
APPL_TRACE_ERROR("Unable to allocate memory for data packet from JAVA...")
}
}
} else
drop_it = TRUE;
}
if (flags & SOCK_THREAD_FD_WR) {
if (flush_incoming_que_on_wr_signal_l(sock) && sock->connected)
btsock_thread_add_fd(pth, sock->our_fd, BTSOCK_L2CAP, SOCK_THREAD_FD_WR, sock->id);
}
if (drop_it || (flags & SOCK_THREAD_FD_EXCEPTION)) {
int size = 0;
if (drop_it || TEMP_FAILURE_RETRY(ioctl(sock->our_fd, FIONREAD, &size)) != 0 || size == 0)
btsock_l2cap_free_l(sock);
}
}
pthread_mutex_unlock(&state_lock);
}
| 173,453 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: GLboolean WebGLRenderingContextBase::isShader(WebGLShader* shader) {
if (!shader || isContextLost())
return 0;
return ContextGL()->IsShader(shader->Object());
}
Commit Message: Validate all incoming WebGLObjects.
A few entry points were missing the correct validation.
Tested with improved conformance tests in
https://github.com/KhronosGroup/WebGL/pull/2654 .
Bug: 848914
Cq-Include-Trybots: luci.chromium.try:android_optional_gpu_tests_rel;luci.chromium.try:linux_optional_gpu_tests_rel;luci.chromium.try:mac_optional_gpu_tests_rel;luci.chromium.try:win_optional_gpu_tests_rel
Change-Id: Ib98a61cc5bf378d1b3338b04acd7e1bc4c2fe008
Reviewed-on: https://chromium-review.googlesource.com/1086718
Reviewed-by: Kai Ninomiya <[email protected]>
Reviewed-by: Antoine Labour <[email protected]>
Commit-Queue: Kenneth Russell <[email protected]>
Cr-Commit-Position: refs/heads/master@{#565016}
CWE ID: CWE-119 | GLboolean WebGLRenderingContextBase::isShader(WebGLShader* shader) {
if (!shader || isContextLost() || !shader->Validate(ContextGroup(), this))
return 0;
return ContextGL()->IsShader(shader->Object());
}
| 173,132 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool AppCacheDatabase::InsertEntry(const EntryRecord* record) {
if (!LazyOpen(kCreateIfNeeded))
return false;
static const char kSql[] =
"INSERT INTO Entries (cache_id, url, flags, response_id, response_size)"
" VALUES(?, ?, ?, ?, ?)";
sql::Statement statement(db_->GetCachedStatement(SQL_FROM_HERE, kSql));
statement.BindInt64(0, record->cache_id);
statement.BindString(1, record->url.spec());
statement.BindInt(2, record->flags);
statement.BindInt64(3, record->response_id);
statement.BindInt64(4, record->response_size);
return statement.Run();
}
Commit Message: Reland "AppCache: Add padding to cross-origin responses."
This is a reland of 85b389caa7d725cdd31f59e9a2b79ff54804b7b7
Initialized CacheRecord::padding_size to 0.
Original change's description:
> AppCache: Add padding to cross-origin responses.
>
> Bug: 918293
> Change-Id: I4f16640f06feac009d6bbbb624951da6d2669f6c
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1488059
> Commit-Queue: Staphany Park <[email protected]>
> Reviewed-by: Victor Costan <[email protected]>
> Reviewed-by: Marijn Kruisselbrink <[email protected]>
> Cr-Commit-Position: refs/heads/master@{#644624}
Bug: 918293
Change-Id: Ie1d3f99c7e8a854d33255a4d66243da2ce16441c
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1539906
Reviewed-by: Victor Costan <[email protected]>
Commit-Queue: Staphany Park <[email protected]>
Cr-Commit-Position: refs/heads/master@{#644719}
CWE ID: CWE-200 | bool AppCacheDatabase::InsertEntry(const EntryRecord* record) {
if (!LazyOpen(kCreateIfNeeded))
return false;
static const char kSql[] =
"INSERT INTO Entries (cache_id, url, flags, response_id, response_size, "
"padding_size)"
" VALUES(?, ?, ?, ?, ?, ?)";
sql::Statement statement(db_->GetCachedStatement(SQL_FROM_HERE, kSql));
statement.BindInt64(0, record->cache_id);
statement.BindString(1, record->url.spec());
statement.BindInt(2, record->flags);
statement.BindInt64(3, record->response_id);
DCHECK_GE(record->response_size, 0);
statement.BindInt64(4, record->response_size);
DCHECK_GE(record->padding_size, 0);
statement.BindInt64(5, record->padding_size);
return statement.Run();
}
| 172,980 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
{
long ret;
struct cros_ec_command u_cmd;
struct cros_ec_command *s_cmd;
if (copy_from_user(&u_cmd, arg, sizeof(u_cmd)))
return -EFAULT;
if ((u_cmd.outsize > EC_MAX_MSG_BYTES) ||
(u_cmd.insize > EC_MAX_MSG_BYTES))
return -EINVAL;
s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
GFP_KERNEL);
if (!s_cmd)
return -ENOMEM;
if (copy_from_user(s_cmd, arg, sizeof(*s_cmd) + u_cmd.outsize)) {
ret = -EFAULT;
goto exit;
}
s_cmd->command += ec->cmd_offset;
ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
/* Only copy data to userland if data was received. */
if (ret < 0)
goto exit;
if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + u_cmd.insize))
ret = -EFAULT;
exit:
kfree(s_cmd);
return ret;
}
Commit Message: platform/chrome: cros_ec_dev - double fetch bug in ioctl
We verify "u_cmd.outsize" and "u_cmd.insize" but we need to make sure
that those values have not changed between the two copy_from_user()
calls. Otherwise it could lead to a buffer overflow.
Additionally, cros_ec_cmd_xfer() can set s_cmd->insize to a lower value.
We should use the new smaller value so we don't copy too much data to
the user.
Reported-by: Pengfei Wang <[email protected]>
Fixes: a841178445bb ('mfd: cros_ec: Use a zero-length array for command data')
Signed-off-by: Dan Carpenter <[email protected]>
Reviewed-by: Kees Cook <[email protected]>
Tested-by: Gwendal Grignou <[email protected]>
Cc: <[email protected]> # v4.2+
Signed-off-by: Olof Johansson <[email protected]>
CWE ID: CWE-362 | static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
{
long ret;
struct cros_ec_command u_cmd;
struct cros_ec_command *s_cmd;
if (copy_from_user(&u_cmd, arg, sizeof(u_cmd)))
return -EFAULT;
if ((u_cmd.outsize > EC_MAX_MSG_BYTES) ||
(u_cmd.insize > EC_MAX_MSG_BYTES))
return -EINVAL;
s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
GFP_KERNEL);
if (!s_cmd)
return -ENOMEM;
if (copy_from_user(s_cmd, arg, sizeof(*s_cmd) + u_cmd.outsize)) {
ret = -EFAULT;
goto exit;
}
if (u_cmd.outsize != s_cmd->outsize ||
u_cmd.insize != s_cmd->insize) {
ret = -EINVAL;
goto exit;
}
s_cmd->command += ec->cmd_offset;
ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
/* Only copy data to userland if data was received. */
if (ret < 0)
goto exit;
if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize))
ret = -EFAULT;
exit:
kfree(s_cmd);
return ret;
}
| 167,017 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int php_stream_temp_seek(php_stream *stream, off_t offset, int whence, off_t *newoffs TSRMLS_DC)
{
php_stream_temp_data *ts = (php_stream_temp_data*)stream->abstract;
int ret;
assert(ts != NULL);
if (!ts->innerstream) {
*newoffs = -1;
return -1;
}
ret = php_stream_seek(ts->innerstream, offset, whence);
*newoffs = php_stream_tell(ts->innerstream);
stream->eof = ts->innerstream->eof;
return ret;
}
Commit Message:
CWE ID: CWE-20 | static int php_stream_temp_seek(php_stream *stream, off_t offset, int whence, off_t *newoffs TSRMLS_DC)
{
php_stream_temp_data *ts = (php_stream_temp_data*)stream->abstract;
int ret;
assert(ts != NULL);
if (!ts->innerstream) {
*newoffs = -1;
return -1;
}
ret = php_stream_seek(ts->innerstream, offset, whence);
*newoffs = php_stream_tell(ts->innerstream);
stream->eof = ts->innerstream->eof;
return ret;
}
| 165,481 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static Image *ExtractPostscript(Image *image,const ImageInfo *image_info,
MagickOffsetType PS_Offset,ssize_t PS_Size,ExceptionInfo *exception)
{
char
postscript_file[MaxTextExtent];
const MagicInfo
*magic_info;
FILE
*ps_file;
ImageInfo
*clone_info;
Image
*image2;
unsigned char
magick[2*MaxTextExtent];
if ((clone_info=CloneImageInfo(image_info)) == NULL)
return(image);
clone_info->blob=(void *) NULL;
clone_info->length=0;
/* Obtain temporary file */
(void) AcquireUniqueFilename(postscript_file);
ps_file=fopen_utf8(postscript_file,"wb");
if (ps_file == (FILE *) NULL)
goto FINISH;
/* Copy postscript to temporary file */
(void) SeekBlob(image,PS_Offset,SEEK_SET);
(void) ReadBlob(image, 2*MaxTextExtent, magick);
(void) SeekBlob(image,PS_Offset,SEEK_SET);
while(PS_Size-- > 0)
{
(void) fputc(ReadBlobByte(image),ps_file);
}
(void) fclose(ps_file);
/* Detect file format - Check magic.mgk configuration file. */
magic_info=GetMagicInfo(magick,2*MaxTextExtent,exception);
if(magic_info == (const MagicInfo *) NULL) goto FINISH_UNL;
/* printf("Detected:%s \n",magic_info->name); */
if(exception->severity != UndefinedException) goto FINISH_UNL;
if(magic_info->name == (char *) NULL) goto FINISH_UNL;
(void) CopyMagickMemory(clone_info->magick,magic_info->name,MaxTextExtent);
/* Read nested image */
/*FormatString(clone_info->filename,"%s:%s",magic_info->name,postscript_file);*/
FormatLocaleString(clone_info->filename,MaxTextExtent,"%s",postscript_file);
image2=ReadImage(clone_info,exception);
if (!image2)
goto FINISH_UNL;
/*
Replace current image with new image while copying base image
attributes.
*/
(void) CopyMagickMemory(image2->filename,image->filename,MaxTextExtent);
(void) CopyMagickMemory(image2->magick_filename,image->magick_filename,MaxTextExtent);
(void) CopyMagickMemory(image2->magick,image->magick,MaxTextExtent);
image2->depth=image->depth;
DestroyBlob(image2);
image2->blob=ReferenceBlob(image->blob);
if ((image->rows == 0) || (image->columns == 0))
DeleteImageFromList(&image);
AppendImageToList(&image,image2);
FINISH_UNL:
(void) RelinquishUniqueFileResource(postscript_file);
FINISH:
DestroyImageInfo(clone_info);
return(image);
}
Commit Message: https://github.com/ImageMagick/ImageMagick/issues/122
CWE ID: CWE-125 | static Image *ExtractPostscript(Image *image,const ImageInfo *image_info,
MagickOffsetType PS_Offset,ssize_t PS_Size,ExceptionInfo *exception)
{
char
postscript_file[MaxTextExtent];
const MagicInfo
*magic_info;
FILE
*ps_file;
ImageInfo
*clone_info;
Image
*image2;
unsigned char
magick[2*MaxTextExtent];
if ((clone_info=CloneImageInfo(image_info)) == NULL)
return(image);
clone_info->blob=(void *) NULL;
clone_info->length=0;
/* Obtain temporary file */
(void) AcquireUniqueFilename(postscript_file);
ps_file=fopen_utf8(postscript_file,"wb");
if (ps_file == (FILE *) NULL)
goto FINISH;
/* Copy postscript to temporary file */
(void) SeekBlob(image,PS_Offset,SEEK_SET);
(void) ReadBlob(image, 2*MaxTextExtent, magick);
(void) SeekBlob(image,PS_Offset,SEEK_SET);
while(PS_Size-- > 0)
{
(void) fputc(ReadBlobByte(image),ps_file);
}
(void) fclose(ps_file);
/* Detect file format - Check magic.mgk configuration file. */
magic_info=GetMagicInfo(magick,2*MaxTextExtent,exception);
if(magic_info == (const MagicInfo *) NULL) goto FINISH_UNL;
/* printf("Detected:%s \n",magic_info->name); */
if(exception->severity != UndefinedException) goto FINISH_UNL;
if(magic_info->name == (char *) NULL) goto FINISH_UNL;
(void) strncpy(clone_info->magick,magic_info->name,MaxTextExtent);
/* Read nested image */
/*FormatString(clone_info->filename,"%s:%s",magic_info->name,postscript_file);*/
FormatLocaleString(clone_info->filename,MaxTextExtent,"%s",postscript_file);
image2=ReadImage(clone_info,exception);
if (!image2)
goto FINISH_UNL;
/*
Replace current image with new image while copying base image
attributes.
*/
(void) CopyMagickMemory(image2->filename,image->filename,MaxTextExtent);
(void) CopyMagickMemory(image2->magick_filename,image->magick_filename,MaxTextExtent);
(void) CopyMagickMemory(image2->magick,image->magick,MaxTextExtent);
image2->depth=image->depth;
DestroyBlob(image2);
image2->blob=ReferenceBlob(image->blob);
if ((image->rows == 0) || (image->columns == 0))
DeleteImageFromList(&image);
AppendImageToList(&image,image2);
FINISH_UNL:
(void) RelinquishUniqueFileResource(postscript_file);
FINISH:
DestroyImageInfo(clone_info);
return(image);
}
| 168,802 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int mct_u232_port_probe(struct usb_serial_port *port)
{
struct mct_u232_private *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
/* Use second interrupt-in endpoint for reading. */
priv->read_urb = port->serial->port[1]->interrupt_in_urb;
priv->read_urb->context = port;
spin_lock_init(&priv->lock);
usb_set_serial_port_data(port, priv);
return 0;
}
Commit Message: USB: mct_u232: add sanity checking in probe
An attack using the lack of sanity checking in probe is known. This
patch checks for the existence of a second port.
CVE-2016-3136
Signed-off-by: Oliver Neukum <[email protected]>
CC: [email protected]
[johan: add error message ]
Signed-off-by: Johan Hovold <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
CWE ID: | static int mct_u232_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct mct_u232_private *priv;
/* check first to simplify error handling */
if (!serial->port[1] || !serial->port[1]->interrupt_in_urb) {
dev_err(&port->dev, "expected endpoint missing\n");
return -ENODEV;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
/* Use second interrupt-in endpoint for reading. */
priv->read_urb = serial->port[1]->interrupt_in_urb;
priv->read_urb->context = port;
spin_lock_init(&priv->lock);
usb_set_serial_port_data(port, priv);
return 0;
}
| 167,361 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: retrieve_url (struct url * orig_parsed, const char *origurl, char **file,
char **newloc, const char *refurl, int *dt, bool recursive,
struct iri *iri, bool register_status)
{
uerr_t result;
char *url;
bool location_changed;
bool iri_fallbacked = 0;
int dummy;
char *mynewloc, *proxy;
struct url *u = orig_parsed, *proxy_url;
int up_error_code; /* url parse error code */
char *local_file = NULL;
int redirection_count = 0;
bool method_suspended = false;
char *saved_body_data = NULL;
char *saved_method = NULL;
char *saved_body_file_name = NULL;
/* If dt is NULL, use local storage. */
if (!dt)
{
dt = &dummy;
dummy = 0;
}
url = xstrdup (origurl);
if (newloc)
*newloc = NULL;
if (file)
*file = NULL;
if (!refurl)
refurl = opt.referer;
redirected:
/* (also for IRI fallbacking) */
result = NOCONERROR;
mynewloc = NULL;
xfree(local_file);
proxy_url = NULL;
proxy = getproxy (u);
if (proxy)
{
struct iri *pi = iri_new ();
set_uri_encoding (pi, opt.locale, true);
pi->utf8_encode = false;
/* Parse the proxy URL. */
proxy_url = url_parse (proxy, &up_error_code, pi, true);
if (!proxy_url)
{
char *error = url_error (proxy, up_error_code);
logprintf (LOG_NOTQUIET, _("Error parsing proxy URL %s: %s.\n"),
proxy, error);
xfree (url);
xfree (error);
xfree (proxy);
iri_free (pi);
RESTORE_METHOD;
result = PROXERR;
goto bail;
}
if (proxy_url->scheme != SCHEME_HTTP && proxy_url->scheme != u->scheme)
{
logprintf (LOG_NOTQUIET, _("Error in proxy URL %s: Must be HTTP.\n"), proxy);
url_free (proxy_url);
xfree (url);
xfree (proxy);
iri_free (pi);
RESTORE_METHOD;
result = PROXERR;
goto bail;
}
iri_free(pi);
xfree (proxy);
}
if (u->scheme == SCHEME_HTTP
#ifdef HAVE_SSL
|| u->scheme == SCHEME_HTTPS
#endif
|| (proxy_url && proxy_url->scheme == SCHEME_HTTP))
{
#ifdef HAVE_HSTS
#ifdef TESTING
/* we don't link against main.o when we're testing */
hsts_store_t hsts_store = NULL;
#else
extern hsts_store_t hsts_store;
#endif
if (opt.hsts && hsts_store)
{
if (hsts_match (hsts_store, u))
logprintf (LOG_VERBOSE, "URL transformed to HTTPS due to an HSTS policy\n");
}
#endif
result = http_loop (u, orig_parsed, &mynewloc, &local_file, refurl, dt,
proxy_url, iri);
}
else if (u->scheme == SCHEME_FTP
#ifdef HAVE_SSL
|| u->scheme == SCHEME_FTPS
#endif
)
{
/* If this is a redirection, temporarily turn off opt.ftp_glob
and opt.recursive, both being undesirable when following
redirects. */
bool oldrec = recursive, glob = opt.ftp_glob;
if (redirection_count)
oldrec = glob = false;
result = ftp_loop (u, &local_file, dt, proxy_url, recursive, glob);
recursive = oldrec;
/* There is a possibility of having HTTP being redirected to
according to the suffix. The HTML suffixes are `.html',
`.htm' and a few others, case-insensitive. */
if (redirection_count && local_file && (u->scheme == SCHEME_FTP
#ifdef HAVE_SSL
|| u->scheme == SCHEME_FTPS
#endif
))
{
if (has_html_suffix_p (local_file))
*dt |= TEXTHTML;
}
}
if (proxy_url)
{
url_free (proxy_url);
proxy_url = NULL;
}
location_changed = (result == NEWLOCATION || result == NEWLOCATION_KEEP_POST);
if (location_changed)
{
char *construced_newloc;
struct url *newloc_parsed;
assert (mynewloc != NULL);
xfree (local_file);
/* The HTTP specs only allow absolute URLs to appear in
redirects, but a ton of boneheaded webservers and CGIs out
there break the rules and use relative URLs, and popular
browsers are lenient about this, so wget should be too. */
construced_newloc = uri_merge (url, mynewloc);
xfree (mynewloc);
mynewloc = construced_newloc;
#ifdef ENABLE_IRI
/* Reset UTF-8 encoding state, set the URI encoding and reset
the content encoding. */
iri->utf8_encode = opt.enable_iri;
if (opt.encoding_remote)
set_uri_encoding (iri, opt.encoding_remote, true);
set_content_encoding (iri, NULL);
xfree (iri->orig_url);
#endif
/* Now, see if this new location makes sense. */
newloc_parsed = url_parse (mynewloc, &up_error_code, iri, true);
if (!newloc_parsed)
{
char *error = url_error (mynewloc, up_error_code);
logprintf (LOG_NOTQUIET, "%s: %s.\n", escnonprint_uri (mynewloc),
error);
if (orig_parsed != u)
{
url_free (u);
}
xfree (url);
xfree (mynewloc);
xfree (error);
RESTORE_METHOD;
goto bail;
}
/* Now mynewloc will become newloc_parsed->url, because if the
Location contained relative paths like .././something, we
don't want that propagating as url. */
xfree (mynewloc);
mynewloc = xstrdup (newloc_parsed->url);
/* Check for max. number of redirections. */
if (++redirection_count > opt.max_redirect)
{
logprintf (LOG_NOTQUIET, _("%d redirections exceeded.\n"),
opt.max_redirect);
url_free (newloc_parsed);
if (orig_parsed != u)
{
url_free (u);
}
xfree (url);
xfree (mynewloc);
RESTORE_METHOD;
result = WRONGCODE;
goto bail;
}
xfree (url);
url = mynewloc;
if (orig_parsed != u)
{
url_free (u);
}
u = newloc_parsed;
/* If we're being redirected from POST, and we received a
redirect code different than 307, we don't want to POST
again. Many requests answer POST with a redirection to an
index page; that redirection is clearly a GET. We "suspend"
POST data for the duration of the redirections, and restore
it when we're done.
RFC2616 HTTP/1.1 introduces code 307 Temporary Redirect
specifically to preserve the method of the request.
*/
if (result != NEWLOCATION_KEEP_POST && !method_suspended)
SUSPEND_METHOD;
goto redirected;
}
else
{
xfree(mynewloc);
}
/* Try to not encode in UTF-8 if fetching failed */
if (!(*dt & RETROKF) && iri->utf8_encode)
{
iri->utf8_encode = false;
if (orig_parsed != u)
{
url_free (u);
}
u = url_parse (origurl, NULL, iri, true);
if (u)
{
DEBUGP (("[IRI fallbacking to non-utf8 for %s\n", quote (url)));
xfree (url);
url = xstrdup (u->url);
iri_fallbacked = 1;
goto redirected;
}
else
DEBUGP (("[Couldn't fallback to non-utf8 for %s\n", quote (url)));
}
if (local_file && u && (*dt & RETROKF || opt.content_on_error))
{
register_download (u->url, local_file);
if (!opt.spider && redirection_count && 0 != strcmp (origurl, u->url))
register_redirection (origurl, u->url);
if (*dt & TEXTHTML)
register_html (local_file);
if (*dt & TEXTCSS)
register_css (local_file);
}
if (file)
*file = local_file ? local_file : NULL;
else
xfree (local_file);
if (orig_parsed != u)
{
url_free (u);
}
if (redirection_count || iri_fallbacked)
{
if (newloc)
*newloc = url;
else
xfree (url);
}
else
{
if (newloc)
*newloc = NULL;
xfree (url);
}
RESTORE_METHOD;
bail:
if (register_status)
inform_exit_status (result);
return result;
}
Commit Message:
CWE ID: CWE-254 | retrieve_url (struct url * orig_parsed, const char *origurl, char **file,
char **newloc, const char *refurl, int *dt, bool recursive,
struct iri *iri, bool register_status)
{
uerr_t result;
char *url;
bool location_changed;
bool iri_fallbacked = 0;
int dummy;
char *mynewloc, *proxy;
struct url *u = orig_parsed, *proxy_url;
int up_error_code; /* url parse error code */
char *local_file = NULL;
int redirection_count = 0;
bool method_suspended = false;
char *saved_body_data = NULL;
char *saved_method = NULL;
char *saved_body_file_name = NULL;
/* If dt is NULL, use local storage. */
if (!dt)
{
dt = &dummy;
dummy = 0;
}
url = xstrdup (origurl);
if (newloc)
*newloc = NULL;
if (file)
*file = NULL;
if (!refurl)
refurl = opt.referer;
redirected:
/* (also for IRI fallbacking) */
result = NOCONERROR;
mynewloc = NULL;
xfree(local_file);
proxy_url = NULL;
proxy = getproxy (u);
if (proxy)
{
struct iri *pi = iri_new ();
set_uri_encoding (pi, opt.locale, true);
pi->utf8_encode = false;
/* Parse the proxy URL. */
proxy_url = url_parse (proxy, &up_error_code, pi, true);
if (!proxy_url)
{
char *error = url_error (proxy, up_error_code);
logprintf (LOG_NOTQUIET, _("Error parsing proxy URL %s: %s.\n"),
proxy, error);
xfree (url);
xfree (error);
xfree (proxy);
iri_free (pi);
RESTORE_METHOD;
result = PROXERR;
goto bail;
}
if (proxy_url->scheme != SCHEME_HTTP && proxy_url->scheme != u->scheme)
{
logprintf (LOG_NOTQUIET, _("Error in proxy URL %s: Must be HTTP.\n"), proxy);
url_free (proxy_url);
xfree (url);
xfree (proxy);
iri_free (pi);
RESTORE_METHOD;
result = PROXERR;
goto bail;
}
iri_free(pi);
xfree (proxy);
}
if (u->scheme == SCHEME_HTTP
#ifdef HAVE_SSL
|| u->scheme == SCHEME_HTTPS
#endif
|| (proxy_url && proxy_url->scheme == SCHEME_HTTP))
{
#ifdef HAVE_HSTS
#ifdef TESTING
/* we don't link against main.o when we're testing */
hsts_store_t hsts_store = NULL;
#else
extern hsts_store_t hsts_store;
#endif
if (opt.hsts && hsts_store)
{
if (hsts_match (hsts_store, u))
logprintf (LOG_VERBOSE, "URL transformed to HTTPS due to an HSTS policy\n");
}
#endif
result = http_loop (u, orig_parsed, &mynewloc, &local_file, refurl, dt,
proxy_url, iri);
}
else if (u->scheme == SCHEME_FTP
#ifdef HAVE_SSL
|| u->scheme == SCHEME_FTPS
#endif
)
{
/* If this is a redirection, temporarily turn off opt.ftp_glob
and opt.recursive, both being undesirable when following
redirects. */
bool oldrec = recursive, glob = opt.ftp_glob;
if (redirection_count)
oldrec = glob = false;
result = ftp_loop (u, orig_parsed, &local_file, dt, proxy_url,
recursive, glob);
recursive = oldrec;
/* There is a possibility of having HTTP being redirected to
according to the suffix. The HTML suffixes are `.html',
`.htm' and a few others, case-insensitive. */
if (redirection_count && local_file && (u->scheme == SCHEME_FTP
#ifdef HAVE_SSL
|| u->scheme == SCHEME_FTPS
#endif
))
{
if (has_html_suffix_p (local_file))
*dt |= TEXTHTML;
}
}
if (proxy_url)
{
url_free (proxy_url);
proxy_url = NULL;
}
location_changed = (result == NEWLOCATION || result == NEWLOCATION_KEEP_POST);
if (location_changed)
{
char *construced_newloc;
struct url *newloc_parsed;
assert (mynewloc != NULL);
xfree (local_file);
/* The HTTP specs only allow absolute URLs to appear in
redirects, but a ton of boneheaded webservers and CGIs out
there break the rules and use relative URLs, and popular
browsers are lenient about this, so wget should be too. */
construced_newloc = uri_merge (url, mynewloc);
xfree (mynewloc);
mynewloc = construced_newloc;
#ifdef ENABLE_IRI
/* Reset UTF-8 encoding state, set the URI encoding and reset
the content encoding. */
iri->utf8_encode = opt.enable_iri;
if (opt.encoding_remote)
set_uri_encoding (iri, opt.encoding_remote, true);
set_content_encoding (iri, NULL);
xfree (iri->orig_url);
#endif
/* Now, see if this new location makes sense. */
newloc_parsed = url_parse (mynewloc, &up_error_code, iri, true);
if (!newloc_parsed)
{
char *error = url_error (mynewloc, up_error_code);
logprintf (LOG_NOTQUIET, "%s: %s.\n", escnonprint_uri (mynewloc),
error);
if (orig_parsed != u)
{
url_free (u);
}
xfree (url);
xfree (mynewloc);
xfree (error);
RESTORE_METHOD;
goto bail;
}
/* Now mynewloc will become newloc_parsed->url, because if the
Location contained relative paths like .././something, we
don't want that propagating as url. */
xfree (mynewloc);
mynewloc = xstrdup (newloc_parsed->url);
/* Check for max. number of redirections. */
if (++redirection_count > opt.max_redirect)
{
logprintf (LOG_NOTQUIET, _("%d redirections exceeded.\n"),
opt.max_redirect);
url_free (newloc_parsed);
if (orig_parsed != u)
{
url_free (u);
}
xfree (url);
xfree (mynewloc);
RESTORE_METHOD;
result = WRONGCODE;
goto bail;
}
xfree (url);
url = mynewloc;
if (orig_parsed != u)
{
url_free (u);
}
u = newloc_parsed;
/* If we're being redirected from POST, and we received a
redirect code different than 307, we don't want to POST
again. Many requests answer POST with a redirection to an
index page; that redirection is clearly a GET. We "suspend"
POST data for the duration of the redirections, and restore
it when we're done.
RFC2616 HTTP/1.1 introduces code 307 Temporary Redirect
specifically to preserve the method of the request.
*/
if (result != NEWLOCATION_KEEP_POST && !method_suspended)
SUSPEND_METHOD;
goto redirected;
}
else
{
xfree(mynewloc);
}
/* Try to not encode in UTF-8 if fetching failed */
if (!(*dt & RETROKF) && iri->utf8_encode)
{
iri->utf8_encode = false;
if (orig_parsed != u)
{
url_free (u);
}
u = url_parse (origurl, NULL, iri, true);
if (u)
{
DEBUGP (("[IRI fallbacking to non-utf8 for %s\n", quote (url)));
xfree (url);
url = xstrdup (u->url);
iri_fallbacked = 1;
goto redirected;
}
else
DEBUGP (("[Couldn't fallback to non-utf8 for %s\n", quote (url)));
}
if (local_file && u && (*dt & RETROKF || opt.content_on_error))
{
register_download (u->url, local_file);
if (!opt.spider && redirection_count && 0 != strcmp (origurl, u->url))
register_redirection (origurl, u->url);
if (*dt & TEXTHTML)
register_html (local_file);
if (*dt & TEXTCSS)
register_css (local_file);
}
if (file)
*file = local_file ? local_file : NULL;
else
xfree (local_file);
if (orig_parsed != u)
{
url_free (u);
}
if (redirection_count || iri_fallbacked)
{
if (newloc)
*newloc = url;
else
xfree (url);
}
else
{
if (newloc)
*newloc = NULL;
xfree (url);
}
RESTORE_METHOD;
bail:
if (register_status)
inform_exit_status (result);
return result;
}
| 165,016 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.