instruction
stringclasses
1 value
input
stringlengths
90
139k
output
stringlengths
16
138k
__index_level_0__
int64
165k
175k
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct bnep_connlist_req cl; struct bnep_connadd_req ca; struct bnep_conndel_req cd; struct bnep_conninfo ci; struct socket *nsock; void __user *argp = (void __user *)arg; int err; BT_DBG("cmd %x arg %lx", cmd, arg); switch (cmd) { case BNEPCONNADD: if (!capable(CAP_NET_ADMIN)) return -EACCES; if (copy_from_user(&ca, argp, sizeof(ca))) return -EFAULT; nsock = sockfd_lookup(ca.sock, &err); if (!nsock) return err; if (nsock->sk->sk_state != BT_CONNECTED) { sockfd_put(nsock); return -EBADFD; } err = bnep_add_connection(&ca, nsock); if (!err) { if (copy_to_user(argp, &ca, sizeof(ca))) err = -EFAULT; } else sockfd_put(nsock); return err; case BNEPCONNDEL: if (!capable(CAP_NET_ADMIN)) return -EACCES; if (copy_from_user(&cd, argp, sizeof(cd))) return -EFAULT; return bnep_del_connection(&cd); case BNEPGETCONNLIST: if (copy_from_user(&cl, argp, sizeof(cl))) return -EFAULT; if (cl.cnum <= 0) return -EINVAL; err = bnep_get_connlist(&cl); if (!err && copy_to_user(argp, &cl, sizeof(cl))) return -EFAULT; return err; case BNEPGETCONNINFO: if (copy_from_user(&ci, argp, sizeof(ci))) return -EFAULT; err = bnep_get_conninfo(&ci); if (!err && copy_to_user(argp, &ci, sizeof(ci))) return -EFAULT; return err; default: return -EINVAL; } return 0; } Commit Message: Bluetooth: bnep: fix buffer overflow Struct ca is copied from userspace. It is not checked whether the "device" field is NULL terminated. This potentially leads to BUG() inside of alloc_netdev_mqs() and/or information leak by creating a device with a name made of contents of kernel stack. Signed-off-by: Vasiliy Kulikov <[email protected]> Signed-off-by: Gustavo F. Padovan <[email protected]> CWE ID: CWE-20
static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct bnep_connlist_req cl; struct bnep_connadd_req ca; struct bnep_conndel_req cd; struct bnep_conninfo ci; struct socket *nsock; void __user *argp = (void __user *)arg; int err; BT_DBG("cmd %x arg %lx", cmd, arg); switch (cmd) { case BNEPCONNADD: if (!capable(CAP_NET_ADMIN)) return -EACCES; if (copy_from_user(&ca, argp, sizeof(ca))) return -EFAULT; nsock = sockfd_lookup(ca.sock, &err); if (!nsock) return err; if (nsock->sk->sk_state != BT_CONNECTED) { sockfd_put(nsock); return -EBADFD; } ca.device[sizeof(ca.device)-1] = 0; err = bnep_add_connection(&ca, nsock); if (!err) { if (copy_to_user(argp, &ca, sizeof(ca))) err = -EFAULT; } else sockfd_put(nsock); return err; case BNEPCONNDEL: if (!capable(CAP_NET_ADMIN)) return -EACCES; if (copy_from_user(&cd, argp, sizeof(cd))) return -EFAULT; return bnep_del_connection(&cd); case BNEPGETCONNLIST: if (copy_from_user(&cl, argp, sizeof(cl))) return -EFAULT; if (cl.cnum <= 0) return -EINVAL; err = bnep_get_connlist(&cl); if (!err && copy_to_user(argp, &cl, sizeof(cl))) return -EFAULT; return err; case BNEPGETCONNINFO: if (copy_from_user(&ci, argp, sizeof(ci))) return -EFAULT; err = bnep_get_conninfo(&ci); if (!err && copy_to_user(argp, &ci, sizeof(ci))) return -EFAULT; return err; default: return -EINVAL; } return 0; }
165,897
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) { hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n"); rdesc[11] = rdesc[16] = 0xff; rdesc[12] = rdesc[17] = 0x03; } return rdesc; } Commit Message: HID: fix a couple of off-by-ones There are a few very theoretical off-by-one bugs in report descriptor size checking when performing a pre-parsing fixup. Fix those. Cc: [email protected] Reported-by: Ben Hawkes <[email protected]> Reviewed-by: Benjamin Tissoires <[email protected]> Signed-off-by: Jiri Kosina <[email protected]> CWE ID: CWE-119
static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 18 && rdesc[11] == 0x3c && rdesc[12] == 0x02) { hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n"); rdesc[11] = rdesc[16] = 0xff; rdesc[12] = rdesc[17] = 0x03; } return rdesc; }
166,370
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: char* _multi_string_alloc_and_copy( LPCWSTR in ) { char *chr; int len = 0; if ( !in ) { return in; } while ( in[ len ] != 0 || in[ len + 1 ] != 0 ) { len ++; } chr = malloc( len + 2 ); len = 0; while ( in[ len ] != 0 || in[ len + 1 ] != 0 ) { chr[ len ] = 0xFF & in[ len ]; len ++; } chr[ len ++ ] = '\0'; chr[ len ++ ] = '\0'; return chr; } Commit Message: New Pre Source CWE ID: CWE-119
char* _multi_string_alloc_and_copy( LPCWSTR in ) { char *chr; int len = 0; if ( !in ) { return NULL; } while ( in[ len ] != 0 || in[ len + 1 ] != 0 ) { len ++; } chr = malloc( len + 2 ); len = 0; while ( in[ len ] != 0 || in[ len + 1 ] != 0 ) { chr[ len ] = 0xFF & in[ len ]; len ++; } chr[ len ++ ] = '\0'; chr[ len ++ ] = '\0'; return chr; }
169,313
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: _prolog_error(batch_job_launch_msg_t *req, int rc) { char *err_name_ptr, err_name[256], path_name[MAXPATHLEN]; char *fmt_char; int fd; if (req->std_err || req->std_out) { if (req->std_err) strncpy(err_name, req->std_err, sizeof(err_name)); else strncpy(err_name, req->std_out, sizeof(err_name)); if ((fmt_char = strchr(err_name, (int) '%')) && (fmt_char[1] == 'j') && !strchr(fmt_char+1, (int) '%')) { char tmp_name[256]; fmt_char[1] = 'u'; snprintf(tmp_name, sizeof(tmp_name), err_name, req->job_id); strncpy(err_name, tmp_name, sizeof(err_name)); } } else { snprintf(err_name, sizeof(err_name), "slurm-%u.out", req->job_id); } err_name_ptr = err_name; if (err_name_ptr[0] == '/') snprintf(path_name, MAXPATHLEN, "%s", err_name_ptr); else if (req->work_dir) snprintf(path_name, MAXPATHLEN, "%s/%s", req->work_dir, err_name_ptr); else snprintf(path_name, MAXPATHLEN, "/%s", err_name_ptr); if ((fd = open(path_name, (O_CREAT|O_APPEND|O_WRONLY), 0644)) == -1) { error("Unable to open %s: %s", path_name, slurm_strerror(errno)); return; } snprintf(err_name, sizeof(err_name), "Error running slurm prolog: %d\n", WEXITSTATUS(rc)); safe_write(fd, err_name, strlen(err_name)); if (fchown(fd, (uid_t) req->uid, (gid_t) req->gid) == -1) { snprintf(err_name, sizeof(err_name), "Couldn't change fd owner to %u:%u: %m\n", req->uid, req->gid); } rwfail: close(fd); } Commit Message: Fix security issue in _prolog_error(). Fix security issue caused by insecure file path handling triggered by the failure of a Prolog script. To exploit this a user needs to anticipate or cause the Prolog to fail for their job. (This commit is slightly different from the fix to the 15.08 branch.) CVE-2016-10030. CWE ID: CWE-284
_prolog_error(batch_job_launch_msg_t *req, int rc) { char *err_name_ptr, err_name[256], path_name[MAXPATHLEN]; char *fmt_char; int fd; if (req->std_err || req->std_out) { if (req->std_err) strncpy(err_name, req->std_err, sizeof(err_name)); else strncpy(err_name, req->std_out, sizeof(err_name)); if ((fmt_char = strchr(err_name, (int) '%')) && (fmt_char[1] == 'j') && !strchr(fmt_char+1, (int) '%')) { char tmp_name[256]; fmt_char[1] = 'u'; snprintf(tmp_name, sizeof(tmp_name), err_name, req->job_id); strncpy(err_name, tmp_name, sizeof(err_name)); } } else { snprintf(err_name, sizeof(err_name), "slurm-%u.out", req->job_id); } err_name_ptr = err_name; if (err_name_ptr[0] == '/') snprintf(path_name, MAXPATHLEN, "%s", err_name_ptr); else if (req->work_dir) snprintf(path_name, MAXPATHLEN, "%s/%s", req->work_dir, err_name_ptr); else snprintf(path_name, MAXPATHLEN, "/%s", err_name_ptr); if ((fd = _open_as_other(path_name, req)) == -1) { error("Unable to open %s: Permission denied", path_name); return; } snprintf(err_name, sizeof(err_name), "Error running slurm prolog: %d\n", WEXITSTATUS(rc)); safe_write(fd, err_name, strlen(err_name)); if (fchown(fd, (uid_t) req->uid, (gid_t) req->gid) == -1) { snprintf(err_name, sizeof(err_name), "Couldn't change fd owner to %u:%u: %m\n", req->uid, req->gid); } rwfail: close(fd); }
168,646
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: PassRefPtr<DocumentFragment> Range::createContextualFragment(const String& markup, ExceptionCode& ec) { if (!m_start.container()) { ec = INVALID_STATE_ERR; return 0; } Node* element = m_start.container()->isElementNode() ? m_start.container() : m_start.container()->parentNode(); if (!element || !element->isHTMLElement()) { ec = NOT_SUPPORTED_ERR; return 0; } RefPtr<DocumentFragment> fragment = createDocumentFragmentForElement(markup, toElement(element), AllowScriptingContentAndDoNotMarkAlreadyStarted); if (!fragment) { ec = NOT_SUPPORTED_ERR; return 0; } return fragment.release(); } Commit Message: There are too many poorly named functions to create a fragment from markup https://bugs.webkit.org/show_bug.cgi?id=87339 Reviewed by Eric Seidel. Source/WebCore: Moved all functions that create a fragment from markup to markup.h/cpp. There should be no behavioral change. * dom/Range.cpp: (WebCore::Range::createContextualFragment): * dom/Range.h: Removed createDocumentFragmentForElement. * dom/ShadowRoot.cpp: (WebCore::ShadowRoot::setInnerHTML): * editing/markup.cpp: (WebCore::createFragmentFromMarkup): (WebCore::createFragmentForInnerOuterHTML): Renamed from createFragmentFromSource. (WebCore::createFragmentForTransformToFragment): Moved from XSLTProcessor. (WebCore::removeElementPreservingChildren): Moved from Range. (WebCore::createContextualFragment): Ditto. * editing/markup.h: * html/HTMLElement.cpp: (WebCore::HTMLElement::setInnerHTML): (WebCore::HTMLElement::setOuterHTML): (WebCore::HTMLElement::insertAdjacentHTML): * inspector/DOMPatchSupport.cpp: (WebCore::DOMPatchSupport::patchNode): Added a FIXME since this code should be using one of the functions listed in markup.h * xml/XSLTProcessor.cpp: (WebCore::XSLTProcessor::transformToFragment): Source/WebKit/qt: Replace calls to Range::createDocumentFragmentForElement by calls to createContextualDocumentFragment. * Api/qwebelement.cpp: (QWebElement::appendInside): (QWebElement::prependInside): (QWebElement::prependOutside): (QWebElement::appendOutside): (QWebElement::encloseContentsWith): (QWebElement::encloseWith): git-svn-id: svn://svn.chromium.org/blink/trunk@118414 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-264
PassRefPtr<DocumentFragment> Range::createContextualFragment(const String& markup, ExceptionCode& ec) { if (!m_start.container()) { ec = INVALID_STATE_ERR; return 0; } Node* element = m_start.container()->isElementNode() ? m_start.container() : m_start.container()->parentNode(); if (!element || !element->isHTMLElement()) { ec = NOT_SUPPORTED_ERR; return 0; } RefPtr<DocumentFragment> fragment = WebCore::createContextualFragment(markup, toElement(element), AllowScriptingContentAndDoNotMarkAlreadyStarted); if (!fragment) { ec = NOT_SUPPORTED_ERR; return 0; } return fragment.release(); }
170,434
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; struct kvm_regs *regs = vcpu_gp_regs(vcpu); int nr_regs = sizeof(*regs) / sizeof(__u32); __uint128_t tmp; void *valp = &tmp; u64 off; int err = 0; /* Our ID is an index into the kvm_regs struct. */ off = core_reg_offset_from_id(reg->id); if (off >= nr_regs || (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) return -ENOENT; if (validate_core_offset(reg)) return -EINVAL; if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) return -EINVAL; if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) { err = -EFAULT; goto out; } if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) { u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK; switch (mode) { case PSR_AA32_MODE_USR: case PSR_AA32_MODE_FIQ: case PSR_AA32_MODE_IRQ: case PSR_AA32_MODE_SVC: case PSR_AA32_MODE_ABT: case PSR_AA32_MODE_UND: case PSR_MODE_EL0t: case PSR_MODE_EL1t: case PSR_MODE_EL1h: break; default: err = -EINVAL; goto out; } } memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id)); out: return err; } Commit Message: arm64: KVM: Sanitize PSTATE.M when being set from userspace Not all execution modes are valid for a guest, and some of them depend on what the HW actually supports. Let's verify that what userspace provides is compatible with both the VM settings and the HW capabilities. Cc: <[email protected]> Fixes: 0d854a60b1d7 ("arm64: KVM: enable initialization of a 32bit vcpu") Reviewed-by: Christoffer Dall <[email protected]> Reviewed-by: Mark Rutland <[email protected]> Reviewed-by: Dave Martin <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Signed-off-by: Will Deacon <[email protected]> CWE ID: CWE-20
static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; struct kvm_regs *regs = vcpu_gp_regs(vcpu); int nr_regs = sizeof(*regs) / sizeof(__u32); __uint128_t tmp; void *valp = &tmp; u64 off; int err = 0; /* Our ID is an index into the kvm_regs struct. */ off = core_reg_offset_from_id(reg->id); if (off >= nr_regs || (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) return -ENOENT; if (validate_core_offset(reg)) return -EINVAL; if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) return -EINVAL; if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) { err = -EFAULT; goto out; } if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) { u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK; switch (mode) { case PSR_AA32_MODE_USR: if (!system_supports_32bit_el0()) return -EINVAL; break; case PSR_AA32_MODE_FIQ: case PSR_AA32_MODE_IRQ: case PSR_AA32_MODE_SVC: case PSR_AA32_MODE_ABT: case PSR_AA32_MODE_UND: if (!vcpu_el1_is_32bit(vcpu)) return -EINVAL; break; case PSR_MODE_EL0t: case PSR_MODE_EL1t: case PSR_MODE_EL1h: if (vcpu_el1_is_32bit(vcpu)) return -EINVAL; break; default: err = -EINVAL; goto out; } } memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id)); out: return err; }
170,159
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: my_object_stringify (MyObject *obj, GValue *value, char **ret, GError **error) { GValue valstr = {0, }; g_value_init (&valstr, G_TYPE_STRING); if (!g_value_transform (value, &valstr)) { g_set_error (error, MY_OBJECT_ERROR, MY_OBJECT_ERROR_FOO, "couldn't transform value"); return FALSE; } *ret = g_value_dup_string (&valstr); g_value_unset (&valstr); return TRUE; } Commit Message: CWE ID: CWE-264
my_object_stringify (MyObject *obj, GValue *value, char **ret, GError **error)
165,122
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ScreenPositionController::ConvertHostPointToRelativeToRootWindow( aura::Window* root_window, const aura::Window::Windows& root_windows, gfx::Point* point, aura::Window** target_root) { DCHECK(!root_window->parent()); gfx::Point point_in_root(*point); root_window->GetHost()->ConvertPointFromHost(&point_in_root); *target_root = root_window; *point = point_in_root; #if defined(USE_X11) || defined(USE_OZONE) if (!root_window->GetHost()->GetBounds().Contains(*point)) { gfx::Point location_in_native(point_in_root); root_window->GetHost()->ConvertPointToNativeScreen(&location_in_native); for (size_t i = 0; i < root_windows.size(); ++i) { aura::WindowTreeHost* host = root_windows[i]->GetHost(); const gfx::Rect native_bounds = host->GetBounds(); if (native_bounds.Contains(location_in_native)) { *target_root = root_windows[i]; *point = location_in_native; host->ConvertPointFromNativeScreen(point); break; } } } #else NOTIMPLEMENTED(); #endif } Commit Message: Use the host coordinate when comparing to host window bounds. I somehow overlooked this and the test was not strict enough to catch this. BUG=521919 TEST=Updated ScreenPositionControllerTest.ConvertHostPointToScreenHiDPI so that it fails without the patch. Review URL: https://codereview.chromium.org/1293373002 Cr-Commit-Position: refs/heads/master@{#344186} CWE ID: CWE-399
void ScreenPositionController::ConvertHostPointToRelativeToRootWindow( aura::Window* root_window, const aura::Window::Windows& root_windows, gfx::Point* point, aura::Window** target_root) { DCHECK(!root_window->parent()); gfx::Point point_in_root(*point); root_window->GetHost()->ConvertPointFromHost(&point_in_root); #if defined(USE_X11) || defined(USE_OZONE) gfx::Rect host_bounds(root_window->GetHost()->GetBounds().size()); if (!host_bounds.Contains(*point)) { gfx::Point location_in_native(point_in_root); root_window->GetHost()->ConvertPointToNativeScreen(&location_in_native); for (size_t i = 0; i < root_windows.size(); ++i) { aura::WindowTreeHost* host = root_windows[i]->GetHost(); const gfx::Rect native_bounds = host->GetBounds(); if (native_bounds.Contains(location_in_native)) { *target_root = root_windows[i]; *point = location_in_native; host->ConvertPointFromNativeScreen(point); return; } } } #endif *target_root = root_window; *point = point_in_root; }
171,711
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static RList *r_bin_wasm_get_global_entries (RBinWasmObj *bin, RBinWasmSection *sec) { RList *ret = NULL; RBinWasmGlobalEntry *ptr = NULL; int buflen = bin->buf->length; if (sec->payload_data + 32 > buflen) { return NULL; } if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; ut32 len = sec->payload_len; ut32 count = sec->count; ut32 i = 0, r = 0; while (i < len && len < buflen && r < count) { if (!(ptr = R_NEW0 (RBinWasmGlobalEntry))) { return ret; } if (len + 8 > buflen || !(consume_u8 (buf + i, buf + len, (ut8*)&ptr->content_type, &i))) { goto beach; } if (len + 8 > buflen || !(consume_u8 (buf + i, buf + len, &ptr->mutability, &i))) { goto beach; } if (len + 8 > buflen || !(consume_init_expr (buf + i, buf + len, R_BIN_WASM_END_OF_CODE, NULL, &i))) { goto beach; } r_list_append (ret, ptr); r++; } return ret; beach: free (ptr); return ret; } Commit Message: Fix crash in fuzzed wasm r2_hoobr_consume_init_expr CWE ID: CWE-125
static RList *r_bin_wasm_get_global_entries (RBinWasmObj *bin, RBinWasmSection *sec) { RList *ret = NULL; RBinWasmGlobalEntry *ptr = NULL; if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; int buflen = bin->buf->length - (ut32)sec->payload_data; ut32 len = sec->payload_len; ut32 count = sec->count; ut32 i = 0, r = 0; while (i < len && len < buflen && r < count) { if (!(ptr = R_NEW0 (RBinWasmGlobalEntry))) { return ret; } if (len + 8 > buflen || !(consume_u8 (buf + i, buf + len, (ut8*)&ptr->content_type, &i))) { goto beach; } if (len + 8 > buflen || !(consume_u8 (buf + i, buf + len, &ptr->mutability, &i))) { goto beach; } if (len + 8 > buflen || !(consume_init_expr (buf + i, buf + len, R_BIN_WASM_END_OF_CODE, NULL, &i))) { goto beach; } r_list_append (ret, ptr); r++; } return ret; beach: free (ptr); return ret; }
168,253
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: my_object_many_stringify (MyObject *obj, GHashTable /* char * -> GValue * */ *vals, GHashTable /* char * -> GValue * */ **ret, GError **error) { *ret = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, unset_and_free_gvalue); g_hash_table_foreach (vals, hash_foreach_stringify, *ret); return TRUE; } Commit Message: CWE ID: CWE-264
my_object_many_stringify (MyObject *obj, GHashTable /* char * -> GValue * */ *vals, GHashTable /* char * -> GValue * */ **ret, GError **error)
165,112
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: _archive_write_data(struct archive *_a, const void *buff, size_t s) { struct archive_write *a = (struct archive_write *)_a; archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC, ARCHIVE_STATE_DATA, "archive_write_data"); archive_clear_error(&a->archive); return ((a->format_write_data)(a, buff, s)); } Commit Message: Limit write requests to at most INT_MAX. This prevents a certain common programming error (passing -1 to write) from leading to other problems deeper in the library. CWE ID: CWE-189
_archive_write_data(struct archive *_a, const void *buff, size_t s) { struct archive_write *a = (struct archive_write *)_a; const size_t max_write = INT_MAX; archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC, ARCHIVE_STATE_DATA, "archive_write_data"); /* In particular, this catches attempts to pass negative values. */ if (s > max_write) s = max_write; archive_clear_error(&a->archive); return ((a->format_write_data)(a, buff, s)); }
166,176
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, unsigned int *total, struct ebt_entries_buf_state *state) { unsigned int i, j, startoff, new_offset = 0; /* stores match/watchers/targets & offset of next struct ebt_entry: */ unsigned int offsets[4]; unsigned int *offsets_update = NULL; int ret; char *buf_start; if (*total < sizeof(struct ebt_entries)) return -EINVAL; if (!entry->bitmask) { *total -= sizeof(struct ebt_entries); return ebt_buf_add(state, entry, sizeof(struct ebt_entries)); } if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry)) return -EINVAL; startoff = state->buf_user_offset; /* pull in most part of ebt_entry, it does not need to be changed. */ ret = ebt_buf_add(state, entry, offsetof(struct ebt_entry, watchers_offset)); if (ret < 0) return ret; offsets[0] = sizeof(struct ebt_entry); /* matches come first */ memcpy(&offsets[1], &entry->watchers_offset, sizeof(offsets) - sizeof(offsets[0])); if (state->buf_kern_start) { buf_start = state->buf_kern_start + state->buf_kern_offset; offsets_update = (unsigned int *) buf_start; } ret = ebt_buf_add(state, &offsets[1], sizeof(offsets) - sizeof(offsets[0])); if (ret < 0) return ret; buf_start = (char *) entry; /* 0: matches offset, always follows ebt_entry. * 1: watchers offset, from ebt_entry structure * 2: target offset, from ebt_entry structure * 3: next ebt_entry offset, from ebt_entry structure * * offsets are relative to beginning of struct ebt_entry (i.e., 0). */ for (i = 0, j = 1 ; j < 4 ; j++, i++) { struct compat_ebt_entry_mwt *match32; unsigned int size; char *buf = buf_start + offsets[i]; if (offsets[i] > offsets[j]) return -EINVAL; match32 = (struct compat_ebt_entry_mwt *) buf; size = offsets[j] - offsets[i]; ret = ebt_size_mwt(match32, size, i, state, base); if (ret < 0) return ret; new_offset += ret; if (offsets_update && new_offset) { pr_debug("change offset %d to %d\n", offsets_update[i], offsets[j] + new_offset); offsets_update[i] = offsets[j] + new_offset; } } if (state->buf_kern_start == NULL) { unsigned int offset = buf_start - (char *) base; ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset); if (ret < 0) return ret; } startoff = state->buf_user_offset - startoff; if (WARN_ON(*total < startoff)) return -EINVAL; *total -= startoff; return 0; } Commit Message: netfilter: ebtables: CONFIG_COMPAT: don't trust userland offsets We need to make sure the offsets are not out of range of the total size. Also check that they are in ascending order. The WARN_ON triggered by syzkaller (it sets panic_on_warn) is changed to also bail out, no point in continuing parsing. Briefly tested with simple ruleset of -A INPUT --limit 1/s' --log plus jump to custom chains using 32bit ebtables binary. Reported-by: <[email protected]> Signed-off-by: Florian Westphal <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]> CWE ID: CWE-787
static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, unsigned int *total, struct ebt_entries_buf_state *state) { unsigned int i, j, startoff, new_offset = 0; /* stores match/watchers/targets & offset of next struct ebt_entry: */ unsigned int offsets[4]; unsigned int *offsets_update = NULL; int ret; char *buf_start; if (*total < sizeof(struct ebt_entries)) return -EINVAL; if (!entry->bitmask) { *total -= sizeof(struct ebt_entries); return ebt_buf_add(state, entry, sizeof(struct ebt_entries)); } if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry)) return -EINVAL; startoff = state->buf_user_offset; /* pull in most part of ebt_entry, it does not need to be changed. */ ret = ebt_buf_add(state, entry, offsetof(struct ebt_entry, watchers_offset)); if (ret < 0) return ret; offsets[0] = sizeof(struct ebt_entry); /* matches come first */ memcpy(&offsets[1], &entry->watchers_offset, sizeof(offsets) - sizeof(offsets[0])); if (state->buf_kern_start) { buf_start = state->buf_kern_start + state->buf_kern_offset; offsets_update = (unsigned int *) buf_start; } ret = ebt_buf_add(state, &offsets[1], sizeof(offsets) - sizeof(offsets[0])); if (ret < 0) return ret; buf_start = (char *) entry; /* 0: matches offset, always follows ebt_entry. * 1: watchers offset, from ebt_entry structure * 2: target offset, from ebt_entry structure * 3: next ebt_entry offset, from ebt_entry structure * * offsets are relative to beginning of struct ebt_entry (i.e., 0). */ for (i = 0; i < 4 ; ++i) { if (offsets[i] >= *total) return -EINVAL; if (i == 0) continue; if (offsets[i-1] > offsets[i]) return -EINVAL; } for (i = 0, j = 1 ; j < 4 ; j++, i++) { struct compat_ebt_entry_mwt *match32; unsigned int size; char *buf = buf_start + offsets[i]; if (offsets[i] > offsets[j]) return -EINVAL; match32 = (struct compat_ebt_entry_mwt *) buf; size = offsets[j] - offsets[i]; ret = ebt_size_mwt(match32, size, i, state, base); if (ret < 0) return ret; new_offset += ret; if (offsets_update && new_offset) { pr_debug("change offset %d to %d\n", offsets_update[i], offsets[j] + new_offset); offsets_update[i] = offsets[j] + new_offset; } } if (state->buf_kern_start == NULL) { unsigned int offset = buf_start - (char *) base; ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset); if (ret < 0) return ret; } startoff = state->buf_user_offset - startoff; if (WARN_ON(*total < startoff)) return -EINVAL; *total -= startoff; return 0; }
169,358
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: const Cluster* Segment::FindCluster(long long time_ns) const { if ((m_clusters == NULL) || (m_clusterCount <= 0)) return &m_eos; { Cluster* const pCluster = m_clusters[0]; assert(pCluster); assert(pCluster->m_index == 0); if (time_ns <= pCluster->GetTime()) return pCluster; } long i = 0; long j = m_clusterCount; while (i < j) { const long k = i + (j - i) / 2; assert(k < m_clusterCount); Cluster* const pCluster = m_clusters[k]; assert(pCluster); assert(pCluster->m_index == k); const long long t = pCluster->GetTime(); if (t <= time_ns) i = k + 1; else j = k; assert(i <= j); } assert(i == j); assert(i > 0); assert(i <= m_clusterCount); const long k = i - 1; Cluster* const pCluster = m_clusters[k]; assert(pCluster); assert(pCluster->m_index == k); assert(pCluster->GetTime() <= time_ns); return pCluster; } Commit Message: libwebm: Pull from upstream Rolling mkvparser from upstream. Primarily for fixing a bug on parsing failures with certain Opus WebM files. Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae The diff is so huge because there were some style clean ups upstream. But it was ensured that there were no breaking changes when the style clean ups was done upstream. Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039 CWE ID: CWE-119
const Cluster* Segment::FindCluster(long long time_ns) const { Cluster* const pCluster = m_clusters[0]; assert(pCluster); assert(pCluster->m_index == 0); if (time_ns <= pCluster->GetTime()) return pCluster; } // Binary search of cluster array long i = 0; long j = m_clusterCount; while (i < j) { // INVARIANT: //[0, i) <= time_ns //[i, j) ? //[j, m_clusterCount) > time_ns const long k = i + (j - i) / 2; assert(k < m_clusterCount); Cluster* const pCluster = m_clusters[k]; assert(pCluster); assert(pCluster->m_index == k); const long long t = pCluster->GetTime(); if (t <= time_ns) i = k + 1; else j = k; assert(i <= j); } assert(i == j); assert(i > 0); assert(i <= m_clusterCount); const long k = i - 1; Cluster* const pCluster = m_clusters[k]; assert(pCluster); assert(pCluster->m_index == k); assert(pCluster->GetTime() <= time_ns); return pCluster; }
174,279
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void GCInfoTable::Resize() { static const int kGcInfoZapValue = 0x33; const size_t kInitialSize = 512; size_t new_size = gc_info_table_size_ ? 2 * gc_info_table_size_ : kInitialSize; DCHECK(new_size < GCInfoTable::kMaxIndex); g_gc_info_table = reinterpret_cast<GCInfo const**>(WTF::Partitions::FastRealloc( g_gc_info_table, new_size * sizeof(GCInfo), "GCInfo")); DCHECK(g_gc_info_table); memset(reinterpret_cast<uint8_t*>(g_gc_info_table) + gc_info_table_size_ * sizeof(GCInfo), kGcInfoZapValue, (new_size - gc_info_table_size_) * sizeof(GCInfo)); gc_info_table_size_ = new_size; } Commit Message: [oilpan] Fix GCInfoTable for multiple threads Previously, grow and access from different threads could lead to a race on the table backing; see bug. - Rework the table to work on an existing reservation. - Commit upon growing, avoiding any copies. Drive-by: Fix over-allocation of table. Bug: chromium:841280 Change-Id: I329cb6f40091e14e8c05334ba1104a9440c31d43 Reviewed-on: https://chromium-review.googlesource.com/1061525 Commit-Queue: Michael Lippautz <[email protected]> Reviewed-by: Kentaro Hara <[email protected]> Cr-Commit-Position: refs/heads/master@{#560434} CWE ID: CWE-362
void GCInfoTable::Resize() { const size_t new_limit = (limit_) ? 2 * limit_ : ComputeInitialTableLimit(); const size_t old_committed_size = limit_ * kEntrySize; const size_t new_committed_size = new_limit * kEntrySize; CHECK(table_); CHECK_EQ(0u, new_committed_size % base::kPageAllocationGranularity); CHECK_GE(MaxTableSize(), limit_ * kEntrySize); // Recommitting and zapping assumes byte-addressable storage. uint8_t* const current_table_end = reinterpret_cast<uint8_t*>(table_) + old_committed_size; const size_t table_size_delta = new_committed_size - old_committed_size; // Commit the new size and allow read/write. // TODO(ajwong): SetSystemPagesAccess should be part of RecommitSystemPages to // avoid having two calls here. bool ok = base::SetSystemPagesAccess(current_table_end, table_size_delta, base::PageReadWrite); CHECK(ok); ok = base::RecommitSystemPages(current_table_end, table_size_delta, base::PageReadWrite); CHECK(ok); // Zap unused values., memset(current_table_end, kGcInfoZapValue, table_size_delta); limit_ = new_limit; }
173,136
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: xmlDocPtr soap_xmlParseFile(const char *filename TSRMLS_DC) { xmlParserCtxtPtr ctxt = NULL; xmlDocPtr ret; zend_bool old_allow_url_fopen; /* xmlInitParser(); */ old_allow_url_fopen = PG(allow_url_fopen); PG(allow_url_fopen) = 1; ctxt = xmlCreateFileParserCtxt(filename); PG(allow_url_fopen) = old_allow_url_fopen; if (ctxt) { ctxt->keepBlanks = 0; ctxt->sax->ignorableWhitespace = soap_ignorableWhitespace; ctxt->sax->comment = soap_Comment; ctxt->sax->warning = NULL; /*ctxt->sax->fatalError = NULL;*/ xmlParseDocument(ctxt); if (ctxt->wellFormed) { ret = ctxt->myDoc; if (ret->URL == NULL && ctxt->directory != NULL) { ret->URL = xmlCharStrdup(ctxt->directory); } } else { ret = NULL; xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL; } xmlFreeParserCtxt(ctxt); } else { ret = NULL; } /* xmlCleanupParser(); */ if (ret) { cleanup_xml_node((xmlNodePtr)ret); } return ret; } Commit Message: CWE ID: CWE-200
xmlDocPtr soap_xmlParseFile(const char *filename TSRMLS_DC) { xmlParserCtxtPtr ctxt = NULL; xmlDocPtr ret; zend_bool old_allow_url_fopen; /* xmlInitParser(); */ old_allow_url_fopen = PG(allow_url_fopen); PG(allow_url_fopen) = 1; ctxt = xmlCreateFileParserCtxt(filename); PG(allow_url_fopen) = old_allow_url_fopen; if (ctxt) { ctxt->keepBlanks = 0; ctxt->options -= XML_PARSE_DTDLOAD; ctxt->sax->ignorableWhitespace = soap_ignorableWhitespace; ctxt->sax->comment = soap_Comment; ctxt->sax->warning = NULL; /*ctxt->sax->fatalError = NULL;*/ xmlParseDocument(ctxt); if (ctxt->wellFormed) { ret = ctxt->myDoc; if (ret->URL == NULL && ctxt->directory != NULL) { ret->URL = xmlCharStrdup(ctxt->directory); } } else { ret = NULL; xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL; } xmlFreeParserCtxt(ctxt); } else { ret = NULL; } /* xmlCleanupParser(); */ if (ret) { cleanup_xml_node((xmlNodePtr)ret); } return ret; }
164,727
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int find_low_bit(unsigned int x) { int i; for(i=0;i<=31;i++) { if(x&(1<<i)) return i; } return 0; } Commit Message: Trying to fix some invalid left shift operations Fixes issue #16 CWE ID: CWE-682
static int find_low_bit(unsigned int x) { int i; for(i=0;i<=31;i++) { if(x&(1U<<(unsigned int)i)) return i; } return 0; }
168,195
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void get_icu_disp_value_src_php( char* tag_name, INTERNAL_FUNCTION_PARAMETERS) { const char* loc_name = NULL; int loc_name_len = 0; const char* disp_loc_name = NULL; int disp_loc_name_len = 0; int free_loc_name = 0; UChar* disp_name = NULL; int32_t disp_name_len = 0; char* mod_loc_name = NULL; int32_t buflen = 512; UErrorCode status = U_ZERO_ERROR; char* utf8value = NULL; int utf8value_len = 0; char* msg = NULL; int grOffset = 0; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s|s", &loc_name, &loc_name_len , &disp_loc_name ,&disp_loc_name_len ) == FAILURE) { spprintf(&msg , 0, "locale_get_display_%s : unable to parse input params", tag_name ); intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, msg , 1 TSRMLS_CC ); efree(msg); RETURN_FALSE; } if(loc_name_len > ULOC_FULLNAME_CAPACITY) { /* See bug 67397: overlong locale names cause trouble in uloc_getDisplayName */ spprintf(&msg , 0, "locale_get_display_%s : name too long", tag_name ); intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, msg , 1 TSRMLS_CC ); efree(msg); RETURN_FALSE; } if(loc_name_len == 0) { loc_name = intl_locale_get_default(TSRMLS_C); } if( strcmp(tag_name, DISP_NAME) != 0 ){ /* Handle grandfathered languages */ grOffset = findOffset( LOC_GRANDFATHERED , loc_name ); if( grOffset >= 0 ){ if( strcmp(tag_name , LOC_LANG_TAG)==0 ){ mod_loc_name = getPreferredTag( loc_name ); } else { /* Since Grandfathered, no value, do nothing, retutn NULL */ RETURN_FALSE; } } } /* end of if != LOC_CANONICAL_TAG */ if( mod_loc_name==NULL ){ mod_loc_name = estrdup( loc_name ); } /* Check if disp_loc_name passed , if not use default locale */ if( !disp_loc_name){ disp_loc_name = estrdup(intl_locale_get_default(TSRMLS_C)); free_loc_name = 1; } /* Get the disp_value for the given locale */ do{ disp_name = erealloc( disp_name , buflen * sizeof(UChar) ); disp_name_len = buflen; if( strcmp(tag_name , LOC_LANG_TAG)==0 ){ buflen = uloc_getDisplayLanguage ( mod_loc_name , disp_loc_name , disp_name , disp_name_len , &status); } else if( strcmp(tag_name , LOC_SCRIPT_TAG)==0 ){ buflen = uloc_getDisplayScript ( mod_loc_name , disp_loc_name , disp_name , disp_name_len , &status); } else if( strcmp(tag_name , LOC_REGION_TAG)==0 ){ buflen = uloc_getDisplayCountry ( mod_loc_name , disp_loc_name , disp_name , disp_name_len , &status); } else if( strcmp(tag_name , LOC_VARIANT_TAG)==0 ){ buflen = uloc_getDisplayVariant ( mod_loc_name , disp_loc_name , disp_name , disp_name_len , &status); } else if( strcmp(tag_name , DISP_NAME)==0 ){ buflen = uloc_getDisplayName ( mod_loc_name , disp_loc_name , disp_name , disp_name_len , &status); } /* U_STRING_NOT_TERMINATED_WARNING is admissible here; don't look for it */ if( U_FAILURE( status ) ) { if( status == U_BUFFER_OVERFLOW_ERROR ) { status = U_ZERO_ERROR; continue; } spprintf(&msg, 0, "locale_get_display_%s : unable to get locale %s", tag_name , tag_name ); intl_error_set( NULL, status, msg , 1 TSRMLS_CC ); efree(msg); if( disp_name){ efree( disp_name ); } if( mod_loc_name){ efree( mod_loc_name ); } if (free_loc_name) { efree((void *)disp_loc_name); disp_loc_name = NULL; } RETURN_FALSE; } } while( buflen > disp_name_len ); if( mod_loc_name){ efree( mod_loc_name ); } if (free_loc_name) { efree((void *)disp_loc_name); disp_loc_name = NULL; } /* Convert display locale name from UTF-16 to UTF-8. */ intl_convert_utf16_to_utf8( &utf8value, &utf8value_len, disp_name, buflen, &status ); efree( disp_name ); if( U_FAILURE( status ) ) { spprintf(&msg, 0, "locale_get_display_%s :error converting display name for %s to UTF-8", tag_name , tag_name ); intl_error_set( NULL, status, msg , 1 TSRMLS_CC ); efree(msg); RETURN_FALSE; } RETVAL_STRINGL( utf8value, utf8value_len , FALSE); } Commit Message: Fix bug #72241: get_icu_value_internal out-of-bounds read CWE ID: CWE-125
static void get_icu_disp_value_src_php( char* tag_name, INTERNAL_FUNCTION_PARAMETERS) static void get_icu_disp_value_src_php( char* tag_name, INTERNAL_FUNCTION_PARAMETERS) { const char* loc_name = NULL; int loc_name_len = 0; const char* disp_loc_name = NULL; int disp_loc_name_len = 0; int free_loc_name = 0; UChar* disp_name = NULL; int32_t disp_name_len = 0; char* mod_loc_name = NULL; int32_t buflen = 512; UErrorCode status = U_ZERO_ERROR; char* utf8value = NULL; int utf8value_len = 0; char* msg = NULL; int grOffset = 0; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s|s", &loc_name, &loc_name_len , &disp_loc_name ,&disp_loc_name_len ) == FAILURE) { spprintf(&msg , 0, "locale_get_display_%s : unable to parse input params", tag_name ); intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, msg , 1 TSRMLS_CC ); efree(msg); RETURN_FALSE; } if(loc_name_len > ULOC_FULLNAME_CAPACITY) { /* See bug 67397: overlong locale names cause trouble in uloc_getDisplayName */ spprintf(&msg , 0, "locale_get_display_%s : name too long", tag_name ); intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, msg , 1 TSRMLS_CC ); efree(msg); RETURN_FALSE; } if(loc_name_len == 0) { loc_name = intl_locale_get_default(TSRMLS_C); } if( strcmp(tag_name, DISP_NAME) != 0 ){ /* Handle grandfathered languages */ grOffset = findOffset( LOC_GRANDFATHERED , loc_name ); if( grOffset >= 0 ){ if( strcmp(tag_name , LOC_LANG_TAG)==0 ){ mod_loc_name = getPreferredTag( loc_name ); } else { /* Since Grandfathered, no value, do nothing, retutn NULL */ RETURN_FALSE; } } } /* end of if != LOC_CANONICAL_TAG */ if( mod_loc_name==NULL ){ mod_loc_name = estrdup( loc_name ); } /* Check if disp_loc_name passed , if not use default locale */ if( !disp_loc_name){ disp_loc_name = estrdup(intl_locale_get_default(TSRMLS_C)); free_loc_name = 1; } /* Get the disp_value for the given locale */ do{ disp_name = erealloc( disp_name , buflen * sizeof(UChar) ); disp_name_len = buflen; if( strcmp(tag_name , LOC_LANG_TAG)==0 ){ buflen = uloc_getDisplayLanguage ( mod_loc_name , disp_loc_name , disp_name , disp_name_len , &status); } else if( strcmp(tag_name , LOC_SCRIPT_TAG)==0 ){ buflen = uloc_getDisplayScript ( mod_loc_name , disp_loc_name , disp_name , disp_name_len , &status); } else if( strcmp(tag_name , LOC_REGION_TAG)==0 ){ buflen = uloc_getDisplayCountry ( mod_loc_name , disp_loc_name , disp_name , disp_name_len , &status); } else if( strcmp(tag_name , LOC_VARIANT_TAG)==0 ){ buflen = uloc_getDisplayVariant ( mod_loc_name , disp_loc_name , disp_name , disp_name_len , &status); } else if( strcmp(tag_name , DISP_NAME)==0 ){ buflen = uloc_getDisplayName ( mod_loc_name , disp_loc_name , disp_name , disp_name_len , &status); } /* U_STRING_NOT_TERMINATED_WARNING is admissible here; don't look for it */ if( U_FAILURE( status ) ) { if( status == U_BUFFER_OVERFLOW_ERROR ) { status = U_ZERO_ERROR; continue; } spprintf(&msg, 0, "locale_get_display_%s : unable to get locale %s", tag_name , tag_name ); intl_error_set( NULL, status, msg , 1 TSRMLS_CC ); efree(msg); if( disp_name){ efree( disp_name ); } if( mod_loc_name){ efree( mod_loc_name ); } if (free_loc_name) { efree((void *)disp_loc_name); disp_loc_name = NULL; } RETURN_FALSE; } } while( buflen > disp_name_len ); if( mod_loc_name){ efree( mod_loc_name ); } if (free_loc_name) { efree((void *)disp_loc_name); disp_loc_name = NULL; } /* Convert display locale name from UTF-16 to UTF-8. */ intl_convert_utf16_to_utf8( &utf8value, &utf8value_len, disp_name, buflen, &status ); efree( disp_name ); if( U_FAILURE( status ) ) { spprintf(&msg, 0, "locale_get_display_%s :error converting display name for %s to UTF-8", tag_name , tag_name ); intl_error_set( NULL, status, msg , 1 TSRMLS_CC ); efree(msg); RETURN_FALSE; } RETVAL_STRINGL( utf8value, utf8value_len , FALSE); }
167,204
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: FrameView::FrameView(LocalFrame* frame) : m_frame(frame) , m_canHaveScrollbars(true) , m_slowRepaintObjectCount(0) , m_hasPendingLayout(false) , m_layoutSubtreeRoot(0) , m_inSynchronousPostLayout(false) , m_postLayoutTasksTimer(this, &FrameView::postLayoutTimerFired) , m_updateWidgetsTimer(this, &FrameView::updateWidgetsTimerFired) , m_isTransparent(false) , m_baseBackgroundColor(Color::white) , m_mediaType("screen") , m_overflowStatusDirty(true) , m_viewportRenderer(0) , m_wasScrolledByUser(false) , m_inProgrammaticScroll(false) , m_safeToPropagateScrollToParent(true) , m_isTrackingPaintInvalidations(false) , m_scrollCorner(nullptr) , m_hasSoftwareFilters(false) , m_visibleContentScaleFactor(1) , m_inputEventsScaleFactorForEmulation(1) , m_layoutSizeFixedToFrameSize(true) , m_didScrollTimer(this, &FrameView::didScrollTimerFired) { ASSERT(m_frame); init(); if (!m_frame->isMainFrame()) return; ScrollableArea::setVerticalScrollElasticity(ScrollElasticityAllowed); ScrollableArea::setHorizontalScrollElasticity(ScrollElasticityAllowed); } Commit Message: Defer call to updateWidgetPositions() outside of RenderLayerScrollableArea. updateWidgetPositions() can destroy the render tree, so it should never be called from inside RenderLayerScrollableArea. Leaving it there allows for the potential of use-after-free bugs. BUG=402407 [email protected] Review URL: https://codereview.chromium.org/490473003 git-svn-id: svn://svn.chromium.org/blink/trunk@180681 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-416
FrameView::FrameView(LocalFrame* frame) : m_frame(frame) , m_canHaveScrollbars(true) , m_slowRepaintObjectCount(0) , m_hasPendingLayout(false) , m_layoutSubtreeRoot(0) , m_inSynchronousPostLayout(false) , m_postLayoutTasksTimer(this, &FrameView::postLayoutTimerFired) , m_updateWidgetsTimer(this, &FrameView::updateWidgetsTimerFired) , m_isTransparent(false) , m_baseBackgroundColor(Color::white) , m_mediaType("screen") , m_overflowStatusDirty(true) , m_viewportRenderer(0) , m_wasScrolledByUser(false) , m_inProgrammaticScroll(false) , m_safeToPropagateScrollToParent(true) , m_isTrackingPaintInvalidations(false) , m_scrollCorner(nullptr) , m_hasSoftwareFilters(false) , m_visibleContentScaleFactor(1) , m_inputEventsScaleFactorForEmulation(1) , m_layoutSizeFixedToFrameSize(true) , m_didScrollTimer(this, &FrameView::didScrollTimerFired) , m_needsUpdateWidgetPositions(false) { ASSERT(m_frame); init(); if (!m_frame->isMainFrame()) return; ScrollableArea::setVerticalScrollElasticity(ScrollElasticityAllowed); ScrollableArea::setHorizontalScrollElasticity(ScrollElasticityAllowed); }
171,635
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: MYSQLND_METHOD(mysqlnd_conn_data, set_server_option)(MYSQLND_CONN_DATA * const conn, enum_mysqlnd_server_option option TSRMLS_DC) { size_t this_func = STRUCT_OFFSET(struct st_mysqlnd_conn_data_methods, set_server_option); zend_uchar buffer[2]; enum_func_status ret = FAIL; DBG_ENTER("mysqlnd_conn_data::set_server_option"); if (PASS == conn->m->local_tx_start(conn, this_func TSRMLS_CC)) { int2store(buffer, (unsigned int) option); ret = conn->m->simple_command(conn, COM_SET_OPTION, buffer, sizeof(buffer), PROT_EOF_PACKET, FALSE, TRUE TSRMLS_CC); conn->m->local_tx_end(conn, this_func, ret TSRMLS_CC); } DBG_RETURN(ret); } Commit Message: CWE ID: CWE-284
MYSQLND_METHOD(mysqlnd_conn_data, set_server_option)(MYSQLND_CONN_DATA * const conn, enum_mysqlnd_server_option option TSRMLS_DC) { size_t this_func = STRUCT_OFFSET(struct st_mysqlnd_conn_data_methods, set_server_option); zend_uchar buffer[2]; enum_func_status ret = FAIL; DBG_ENTER("mysqlnd_conn_data::set_server_option"); if (PASS == conn->m->local_tx_start(conn, this_func TSRMLS_CC)) { int2store(buffer, (unsigned int) option); ret = conn->m->simple_command(conn, COM_SET_OPTION, buffer, sizeof(buffer), PROT_EOF_PACKET, FALSE, TRUE TSRMLS_CC); conn->m->local_tx_end(conn, this_func, ret TSRMLS_CC); } DBG_RETURN(ret); }
165,274
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static MagickBooleanType WriteTIFFImage(const ImageInfo *image_info, Image *image) { const char *mode, *option; CompressionType compression; EndianType endian_type; MagickBooleanType debug, status; MagickOffsetType scene; QuantumInfo *quantum_info; QuantumType quantum_type; register ssize_t i; size_t imageListLength; ssize_t y; TIFF *tiff; TIFFInfo tiff_info; uint16 bits_per_sample, compress_tag, endian, photometric, predictor; unsigned char *pixels; /* Open TIFF file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); (void) SetMagickThreadValue(tiff_exception,&image->exception); endian_type=UndefinedEndian; option=GetImageOption(image_info,"tiff:endian"); if (option != (const char *) NULL) { if (LocaleNCompare(option,"msb",3) == 0) endian_type=MSBEndian; if (LocaleNCompare(option,"lsb",3) == 0) endian_type=LSBEndian;; } switch (endian_type) { case LSBEndian: mode="wl"; break; case MSBEndian: mode="wb"; break; default: mode="w"; break; } #if defined(TIFF_VERSION_BIG) if (LocaleCompare(image_info->magick,"TIFF64") == 0) switch (endian_type) { case LSBEndian: mode="wl8"; break; case MSBEndian: mode="wb8"; break; default: mode="w8"; break; } #endif tiff=TIFFClientOpen(image->filename,mode,(thandle_t) image,TIFFReadBlob, TIFFWriteBlob,TIFFSeekBlob,TIFFCloseBlob,TIFFGetBlobSize,TIFFMapBlob, TIFFUnmapBlob); if (tiff == (TIFF *) NULL) return(MagickFalse); if (image->exception.severity > ErrorException) { TIFFClose(tiff); return(MagickFalse); } (void) DeleteImageProfile(image,"tiff:37724"); scene=0; debug=IsEventLogging(); (void) debug; imageListLength=GetImageListLength(image); do { /* Initialize TIFF fields. */ if ((image_info->type != UndefinedType) && (image_info->type != OptimizeType)) (void) SetImageType(image,image_info->type); compression=UndefinedCompression; if (image->compression != JPEGCompression) compression=image->compression; if (image_info->compression != UndefinedCompression) compression=image_info->compression; switch (compression) { case FaxCompression: case Group4Compression: { (void) SetImageType(image,BilevelType); (void) SetImageDepth(image,1); break; } case JPEGCompression: { (void) SetImageStorageClass(image,DirectClass); (void) SetImageDepth(image,8); break; } default: break; } quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); if ((image->storage_class != PseudoClass) && (image->depth >= 32) && (quantum_info->format == UndefinedQuantumFormat) && (IsHighDynamicRangeImage(image,&image->exception) != MagickFalse)) { status=SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat); if (status == MagickFalse) { quantum_info=DestroyQuantumInfo(quantum_info); ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } } if ((LocaleCompare(image_info->magick,"PTIF") == 0) && (GetPreviousImageInList(image) != (Image *) NULL)) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_REDUCEDIMAGE); if ((image->columns != (uint32) image->columns) || (image->rows != (uint32) image->rows)) ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit"); (void) TIFFSetField(tiff,TIFFTAG_IMAGELENGTH,(uint32) image->rows); (void) TIFFSetField(tiff,TIFFTAG_IMAGEWIDTH,(uint32) image->columns); switch (compression) { case FaxCompression: { compress_tag=COMPRESSION_CCITTFAX3; option=GetImageOption(image_info,"quantum:polarity"); if (option == (const char *) NULL) SetQuantumMinIsWhite(quantum_info,MagickTrue); break; } case Group4Compression: { compress_tag=COMPRESSION_CCITTFAX4; option=GetImageOption(image_info,"quantum:polarity"); if (option == (const char *) NULL) SetQuantumMinIsWhite(quantum_info,MagickTrue); break; } #if defined(COMPRESSION_JBIG) case JBIG1Compression: { compress_tag=COMPRESSION_JBIG; break; } #endif case JPEGCompression: { compress_tag=COMPRESSION_JPEG; break; } #if defined(COMPRESSION_LZMA) case LZMACompression: { compress_tag=COMPRESSION_LZMA; break; } #endif case LZWCompression: { compress_tag=COMPRESSION_LZW; break; } case RLECompression: { compress_tag=COMPRESSION_PACKBITS; break; } #if defined(COMPRESSION_WEBP) case WebPCompression: { compress_tag=COMPRESSION_WEBP; break; } #endif case ZipCompression: { compress_tag=COMPRESSION_ADOBE_DEFLATE; break; } #if defined(COMPRESSION_ZSTD) case ZstdCompression: { compress_tag=COMPRESSION_ZSTD; break; } #endif case NoCompression: default: { compress_tag=COMPRESSION_NONE; break; } } #if defined(MAGICKCORE_HAVE_TIFFISCODECCONFIGURED) || (TIFFLIB_VERSION > 20040919) if ((compress_tag != COMPRESSION_NONE) && (TIFFIsCODECConfigured(compress_tag) == 0)) { (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"CompressionNotSupported","`%s'",CommandOptionToMnemonic( MagickCompressOptions,(ssize_t) compression)); compress_tag=COMPRESSION_NONE; } #else switch (compress_tag) { #if defined(CCITT_SUPPORT) case COMPRESSION_CCITTFAX3: case COMPRESSION_CCITTFAX4: #endif #if defined(YCBCR_SUPPORT) && defined(JPEG_SUPPORT) case COMPRESSION_JPEG: #endif #if defined(LZMA_SUPPORT) && defined(COMPRESSION_LZMA) case COMPRESSION_LZMA: #endif #if defined(LZW_SUPPORT) case COMPRESSION_LZW: #endif #if defined(PACKBITS_SUPPORT) case COMPRESSION_PACKBITS: #endif #if defined(ZIP_SUPPORT) case COMPRESSION_ADOBE_DEFLATE: #endif case COMPRESSION_NONE: break; default: { (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"CompressionNotSupported","`%s'",CommandOptionToMnemonic( MagickCompressOptions,(ssize_t) compression)); compress_tag=COMPRESSION_NONE; break; } } #endif if (image->colorspace == CMYKColorspace) { photometric=PHOTOMETRIC_SEPARATED; (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,4); (void) TIFFSetField(tiff,TIFFTAG_INKSET,INKSET_CMYK); } else { /* Full color TIFF raster. */ if (image->colorspace == LabColorspace) { photometric=PHOTOMETRIC_CIELAB; EncodeLabImage(image,&image->exception); } else if (image->colorspace == YCbCrColorspace) { photometric=PHOTOMETRIC_YCBCR; (void) TIFFSetField(tiff,TIFFTAG_YCBCRSUBSAMPLING,1,1); (void) SetImageStorageClass(image,DirectClass); (void) SetImageDepth(image,8); } else photometric=PHOTOMETRIC_RGB; (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,3); if ((image_info->type != TrueColorType) && (image_info->type != TrueColorMatteType)) { if ((image_info->type != PaletteType) && (SetImageGray(image,&image->exception) != MagickFalse)) { photometric=(uint16) (quantum_info->min_is_white != MagickFalse ? PHOTOMETRIC_MINISWHITE : PHOTOMETRIC_MINISBLACK); (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,1); if ((image->depth == 1) && (image->matte == MagickFalse)) SetImageMonochrome(image,&image->exception); } else if (image->storage_class == PseudoClass) { size_t depth; /* Colormapped TIFF raster. */ (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,1); photometric=PHOTOMETRIC_PALETTE; depth=1; while ((GetQuantumRange(depth)+1) < image->colors) depth<<=1; status=SetQuantumDepth(image,quantum_info,depth); if (status == MagickFalse) ThrowWriterException(ResourceLimitError, "MemoryAllocationFailed"); } } } (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_FILLORDER,&endian); if ((compress_tag == COMPRESSION_CCITTFAX3) || (compress_tag == COMPRESSION_CCITTFAX4)) { if ((photometric != PHOTOMETRIC_MINISWHITE) && (photometric != PHOTOMETRIC_MINISBLACK)) { compress_tag=COMPRESSION_NONE; endian=FILLORDER_MSB2LSB; } } option=GetImageOption(image_info,"tiff:fill-order"); if (option != (const char *) NULL) { if (LocaleNCompare(option,"msb",3) == 0) endian=FILLORDER_MSB2LSB; if (LocaleNCompare(option,"lsb",3) == 0) endian=FILLORDER_LSB2MSB; } (void) TIFFSetField(tiff,TIFFTAG_COMPRESSION,compress_tag); (void) TIFFSetField(tiff,TIFFTAG_FILLORDER,endian); (void) TIFFSetField(tiff,TIFFTAG_BITSPERSAMPLE,quantum_info->depth); if (image->matte != MagickFalse) { uint16 extra_samples, sample_info[1], samples_per_pixel; /* TIFF has a matte channel. */ extra_samples=1; sample_info[0]=EXTRASAMPLE_UNASSALPHA; option=GetImageOption(image_info,"tiff:alpha"); if (option != (const char *) NULL) { if (LocaleCompare(option,"associated") == 0) sample_info[0]=EXTRASAMPLE_ASSOCALPHA; else if (LocaleCompare(option,"unspecified") == 0) sample_info[0]=EXTRASAMPLE_UNSPECIFIED; } (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLESPERPIXEL, &samples_per_pixel); (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,samples_per_pixel+1); (void) TIFFSetField(tiff,TIFFTAG_EXTRASAMPLES,extra_samples, &sample_info); if (sample_info[0] == EXTRASAMPLE_ASSOCALPHA) SetQuantumAlphaType(quantum_info,AssociatedQuantumAlpha); } (void) TIFFSetField(tiff,TIFFTAG_PHOTOMETRIC,photometric); switch (quantum_info->format) { case FloatingPointQuantumFormat: { (void) TIFFSetField(tiff,TIFFTAG_SAMPLEFORMAT,SAMPLEFORMAT_IEEEFP); (void) TIFFSetField(tiff,TIFFTAG_SMINSAMPLEVALUE,quantum_info->minimum); (void) TIFFSetField(tiff,TIFFTAG_SMAXSAMPLEVALUE,quantum_info->maximum); break; } case SignedQuantumFormat: { (void) TIFFSetField(tiff,TIFFTAG_SAMPLEFORMAT,SAMPLEFORMAT_INT); break; } case UnsignedQuantumFormat: { (void) TIFFSetField(tiff,TIFFTAG_SAMPLEFORMAT,SAMPLEFORMAT_UINT); break; } default: break; } (void) TIFFSetField(tiff,TIFFTAG_PLANARCONFIG,PLANARCONFIG_CONTIG); if (photometric == PHOTOMETRIC_RGB) if ((image_info->interlace == PlaneInterlace) || (image_info->interlace == PartitionInterlace)) (void) TIFFSetField(tiff,TIFFTAG_PLANARCONFIG,PLANARCONFIG_SEPARATE); predictor=0; switch (compress_tag) { case COMPRESSION_JPEG: { #if defined(JPEG_SUPPORT) if (image_info->quality != UndefinedCompressionQuality) (void) TIFFSetField(tiff,TIFFTAG_JPEGQUALITY,image_info->quality); (void) TIFFSetField(tiff,TIFFTAG_JPEGCOLORMODE,JPEGCOLORMODE_RAW); if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) { const char *value; (void) TIFFSetField(tiff,TIFFTAG_JPEGCOLORMODE,JPEGCOLORMODE_RGB); if (image->colorspace == YCbCrColorspace) { const char *sampling_factor; GeometryInfo geometry_info; MagickStatusType flags; sampling_factor=(const char *) NULL; value=GetImageProperty(image,"jpeg:sampling-factor"); if (value != (char *) NULL) { sampling_factor=value; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Input sampling-factors=%s",sampling_factor); } if (image_info->sampling_factor != (char *) NULL) sampling_factor=image_info->sampling_factor; if (sampling_factor != (const char *) NULL) { flags=ParseGeometry(sampling_factor,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; (void) TIFFSetField(tiff,TIFFTAG_YCBCRSUBSAMPLING,(uint16) geometry_info.rho,(uint16) geometry_info.sigma); } } } (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample); if (bits_per_sample == 12) (void) TIFFSetField(tiff,TIFFTAG_JPEGTABLESMODE,JPEGTABLESMODE_QUANT); #endif break; } case COMPRESSION_ADOBE_DEFLATE: { (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample); if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; (void) TIFFSetField(tiff,TIFFTAG_ZIPQUALITY,(long) ( image_info->quality == UndefinedCompressionQuality ? 7 : MagickMin((ssize_t) image_info->quality/10,9))); break; } case COMPRESSION_CCITTFAX3: { /* Byte-aligned EOL. */ (void) TIFFSetField(tiff,TIFFTAG_GROUP3OPTIONS,4); break; } case COMPRESSION_CCITTFAX4: break; #if defined(LZMA_SUPPORT) && defined(COMPRESSION_LZMA) case COMPRESSION_LZMA: { if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; (void) TIFFSetField(tiff,TIFFTAG_LZMAPRESET,(long) ( image_info->quality == UndefinedCompressionQuality ? 7 : MagickMin((ssize_t) image_info->quality/10,9))); break; } #endif case COMPRESSION_LZW: { (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample); if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; break; } #if defined(WEBP_SUPPORT) && defined(COMPRESSION_WEBP) case COMPRESSION_WEBP: { (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample); if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; (void) TIFFSetField(tiff,TIFFTAG_WEBP_LEVEL,mage_info->quality); if (image_info->quality >= 100) (void) TIFFSetField(tiff,TIFFTAG_WEBP_LOSSLESS,1); break; } #endif #if defined(ZSTD_SUPPORT) && defined(COMPRESSION_ZSTD) case COMPRESSION_ZSTD: { (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample); if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; (void) TIFFSetField(tiff,TIFFTAG_ZSTD_LEVEL,22*image_info->quality/ 100.0); break; } #endif default: break; } option=GetImageOption(image_info,"tiff:predictor"); if (option != (const char * ) NULL) predictor=(size_t) strtol(option,(char **) NULL,10); if (predictor != 0) (void) TIFFSetField(tiff,TIFFTAG_PREDICTOR,predictor); if ((image->x_resolution != 0.0) && (image->y_resolution != 0.0)) { unsigned short units; /* Set image resolution. */ units=RESUNIT_NONE; if (image->units == PixelsPerInchResolution) units=RESUNIT_INCH; if (image->units == PixelsPerCentimeterResolution) units=RESUNIT_CENTIMETER; (void) TIFFSetField(tiff,TIFFTAG_RESOLUTIONUNIT,(uint16) units); (void) TIFFSetField(tiff,TIFFTAG_XRESOLUTION,image->x_resolution); (void) TIFFSetField(tiff,TIFFTAG_YRESOLUTION,image->y_resolution); if ((image->page.x < 0) || (image->page.y < 0)) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"TIFF: negative image positions unsupported","%s", image->filename); if ((image->page.x > 0) && (image->x_resolution > 0.0)) { /* Set horizontal image position. */ (void) TIFFSetField(tiff,TIFFTAG_XPOSITION,(float) image->page.x/ image->x_resolution); } if ((image->page.y > 0) && (image->y_resolution > 0.0)) { /* Set vertical image position. */ (void) TIFFSetField(tiff,TIFFTAG_YPOSITION,(float) image->page.y/ image->y_resolution); } } if (image->chromaticity.white_point.x != 0.0) { float chromaticity[6]; /* Set image chromaticity. */ chromaticity[0]=(float) image->chromaticity.red_primary.x; chromaticity[1]=(float) image->chromaticity.red_primary.y; chromaticity[2]=(float) image->chromaticity.green_primary.x; chromaticity[3]=(float) image->chromaticity.green_primary.y; chromaticity[4]=(float) image->chromaticity.blue_primary.x; chromaticity[5]=(float) image->chromaticity.blue_primary.y; (void) TIFFSetField(tiff,TIFFTAG_PRIMARYCHROMATICITIES,chromaticity); chromaticity[0]=(float) image->chromaticity.white_point.x; chromaticity[1]=(float) image->chromaticity.white_point.y; (void) TIFFSetField(tiff,TIFFTAG_WHITEPOINT,chromaticity); } if ((LocaleCompare(image_info->magick,"PTIF") != 0) && (image_info->adjoin != MagickFalse) && (imageListLength > 1)) { (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_PAGE); if (image->scene != 0) (void) TIFFSetField(tiff,TIFFTAG_PAGENUMBER,(uint16) image->scene, imageListLength); } if (image->orientation != UndefinedOrientation) (void) TIFFSetField(tiff,TIFFTAG_ORIENTATION,(uint16) image->orientation); else (void) TIFFSetField(tiff,TIFFTAG_ORIENTATION,ORIENTATION_TOPLEFT); (void) TIFFSetProfiles(tiff,image); { uint16 page, pages; page=(uint16) scene; pages=(uint16) imageListLength; if ((LocaleCompare(image_info->magick,"PTIF") != 0) && (image_info->adjoin != MagickFalse) && (pages > 1)) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_PAGE); (void) TIFFSetField(tiff,TIFFTAG_PAGENUMBER,page,pages); } (void) TIFFSetProperties(tiff,image_info,image); DisableMSCWarning(4127) if (0) RestoreMSCWarning (void) TIFFSetEXIFProperties(tiff,image); /* Write image scanlines. */ if (GetTIFFInfo(image_info,tiff,&tiff_info) == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); quantum_info->endian=LSBEndian; pixels=GetQuantumPixels(quantum_info); tiff_info.scanline=GetQuantumPixels(quantum_info); switch (photometric) { case PHOTOMETRIC_CIELAB: case PHOTOMETRIC_YCBCR: case PHOTOMETRIC_RGB: { /* RGB TIFF image. */ switch (image_info->interlace) { case NoInterlace: default: { quantum_type=RGBQuantum; if (image->matte != MagickFalse) quantum_type=RGBAQuantum; for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,quantum_type,pixels,&image->exception); if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } break; } case PlaneInterlace: case PartitionInterlace: { /* Plane interlacing: RRRRRR...GGGGGG...BBBBBB... */ for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,RedQuantum,pixels,&image->exception); if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,100,400); if (status == MagickFalse) break; } for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GreenQuantum,pixels,&image->exception); if (TIFFWritePixels(tiff,&tiff_info,y,1,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,200,400); if (status == MagickFalse) break; } for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,BlueQuantum,pixels,&image->exception); if (TIFFWritePixels(tiff,&tiff_info,y,2,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,300,400); if (status == MagickFalse) break; } if (image->matte != MagickFalse) for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1, &image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,AlphaQuantum,pixels,&image->exception); if (TIFFWritePixels(tiff,&tiff_info,y,3,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,400,400); if (status == MagickFalse) break; } break; } } break; } case PHOTOMETRIC_SEPARATED: { /* CMYK TIFF image. */ quantum_type=CMYKQuantum; if (image->matte != MagickFalse) quantum_type=CMYKAQuantum; if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,quantum_type,pixels,&image->exception); if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case PHOTOMETRIC_PALETTE: { uint16 *blue, *green, *red; /* Colormapped TIFF image. */ red=(uint16 *) AcquireQuantumMemory(65536,sizeof(*red)); green=(uint16 *) AcquireQuantumMemory(65536,sizeof(*green)); blue=(uint16 *) AcquireQuantumMemory(65536,sizeof(*blue)); if ((red == (uint16 *) NULL) || (green == (uint16 *) NULL) || (blue == (uint16 *) NULL)) { if (red != (uint16 *) NULL) red=(uint16 *) RelinquishMagickMemory(red); if (green != (uint16 *) NULL) green=(uint16 *) RelinquishMagickMemory(green); if (blue != (uint16 *) NULL) blue=(uint16 *) RelinquishMagickMemory(blue); ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } /* Initialize TIFF colormap. */ (void) memset(red,0,65536*sizeof(*red)); (void) memset(green,0,65536*sizeof(*green)); (void) memset(blue,0,65536*sizeof(*blue)); for (i=0; i < (ssize_t) image->colors; i++) { red[i]=ScaleQuantumToShort(image->colormap[i].red); green[i]=ScaleQuantumToShort(image->colormap[i].green); blue[i]=ScaleQuantumToShort(image->colormap[i].blue); } (void) TIFFSetField(tiff,TIFFTAG_COLORMAP,red,green,blue); red=(uint16 *) RelinquishMagickMemory(red); green=(uint16 *) RelinquishMagickMemory(green); blue=(uint16 *) RelinquishMagickMemory(blue); } default: { /* Convert PseudoClass packets to contiguous grayscale scanlines. */ quantum_type=IndexQuantum; if (image->matte != MagickFalse) { if (photometric != PHOTOMETRIC_PALETTE) quantum_type=GrayAlphaQuantum; else quantum_type=IndexAlphaQuantum; } else if (photometric != PHOTOMETRIC_PALETTE) quantum_type=GrayQuantum; for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,quantum_type,pixels,&image->exception); if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } } quantum_info=DestroyQuantumInfo(quantum_info); if (image->colorspace == LabColorspace) DecodeLabImage(image,&image->exception); DestroyTIFFInfo(&tiff_info); if (image->exception.severity > ErrorException) break; DisableMSCWarning(4127) if (0 && (image_info->verbose != MagickFalse)) RestoreMSCWarning TIFFPrintDirectory(tiff,stdout,MagickFalse); (void) TIFFWriteDirectory(tiff); image=SyncNextImageInList(image); if (image == (Image *) NULL) break; status=SetImageProgress(image,SaveImagesTag,scene++,imageListLength); if (status == MagickFalse) break; } while (image_info->adjoin != MagickFalse); TIFFClose(tiff); return(image->exception.severity > ErrorException ? MagickFalse : MagickTrue); } Commit Message: https://github.com/ImageMagick/ImageMagick/issues/1560 CWE ID: CWE-125
static MagickBooleanType WriteTIFFImage(const ImageInfo *image_info, Image *image) { const char *mode, *option; CompressionType compression; EndianType endian_type; MagickBooleanType debug, status; MagickOffsetType scene; QuantumInfo *quantum_info; QuantumType quantum_type; register ssize_t i; size_t imageListLength; ssize_t y; TIFF *tiff; TIFFInfo tiff_info; uint16 bits_per_sample, compress_tag, endian, photometric, predictor; unsigned char *pixels; /* Open TIFF file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); (void) SetMagickThreadValue(tiff_exception,&image->exception); endian_type=UndefinedEndian; option=GetImageOption(image_info,"tiff:endian"); if (option != (const char *) NULL) { if (LocaleNCompare(option,"msb",3) == 0) endian_type=MSBEndian; if (LocaleNCompare(option,"lsb",3) == 0) endian_type=LSBEndian;; } switch (endian_type) { case LSBEndian: mode="wl"; break; case MSBEndian: mode="wb"; break; default: mode="w"; break; } #if defined(TIFF_VERSION_BIG) if (LocaleCompare(image_info->magick,"TIFF64") == 0) switch (endian_type) { case LSBEndian: mode="wl8"; break; case MSBEndian: mode="wb8"; break; default: mode="w8"; break; } #endif tiff=TIFFClientOpen(image->filename,mode,(thandle_t) image,TIFFReadBlob, TIFFWriteBlob,TIFFSeekBlob,TIFFCloseBlob,TIFFGetBlobSize,TIFFMapBlob, TIFFUnmapBlob); if (tiff == (TIFF *) NULL) return(MagickFalse); if (image->exception.severity > ErrorException) { TIFFClose(tiff); return(MagickFalse); } (void) DeleteImageProfile(image,"tiff:37724"); scene=0; debug=IsEventLogging(); (void) debug; imageListLength=GetImageListLength(image); do { /* Initialize TIFF fields. */ if ((image_info->type != UndefinedType) && (image_info->type != OptimizeType)) (void) SetImageType(image,image_info->type); compression=UndefinedCompression; if (image->compression != JPEGCompression) compression=image->compression; if (image_info->compression != UndefinedCompression) compression=image_info->compression; switch (compression) { case FaxCompression: case Group4Compression: { (void) SetImageType(image,BilevelType); (void) SetImageDepth(image,1); break; } case JPEGCompression: { (void) SetImageStorageClass(image,DirectClass); (void) SetImageDepth(image,8); break; } default: break; } quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); if ((image->storage_class != PseudoClass) && (image->depth >= 32) && (quantum_info->format == UndefinedQuantumFormat) && (IsHighDynamicRangeImage(image,&image->exception) != MagickFalse)) { status=SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat); if (status == MagickFalse) { quantum_info=DestroyQuantumInfo(quantum_info); ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } } if ((LocaleCompare(image_info->magick,"PTIF") == 0) && (GetPreviousImageInList(image) != (Image *) NULL)) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_REDUCEDIMAGE); if ((image->columns != (uint32) image->columns) || (image->rows != (uint32) image->rows)) ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit"); (void) TIFFSetField(tiff,TIFFTAG_IMAGELENGTH,(uint32) image->rows); (void) TIFFSetField(tiff,TIFFTAG_IMAGEWIDTH,(uint32) image->columns); switch (compression) { case FaxCompression: { compress_tag=COMPRESSION_CCITTFAX3; option=GetImageOption(image_info,"quantum:polarity"); if (option == (const char *) NULL) SetQuantumMinIsWhite(quantum_info,MagickTrue); break; } case Group4Compression: { compress_tag=COMPRESSION_CCITTFAX4; option=GetImageOption(image_info,"quantum:polarity"); if (option == (const char *) NULL) SetQuantumMinIsWhite(quantum_info,MagickTrue); break; } #if defined(COMPRESSION_JBIG) case JBIG1Compression: { compress_tag=COMPRESSION_JBIG; break; } #endif case JPEGCompression: { compress_tag=COMPRESSION_JPEG; break; } #if defined(COMPRESSION_LZMA) case LZMACompression: { compress_tag=COMPRESSION_LZMA; break; } #endif case LZWCompression: { compress_tag=COMPRESSION_LZW; break; } case RLECompression: { compress_tag=COMPRESSION_PACKBITS; break; } #if defined(COMPRESSION_WEBP) case WebPCompression: { compress_tag=COMPRESSION_WEBP; break; } #endif case ZipCompression: { compress_tag=COMPRESSION_ADOBE_DEFLATE; break; } #if defined(COMPRESSION_ZSTD) case ZstdCompression: { compress_tag=COMPRESSION_ZSTD; break; } #endif case NoCompression: default: { compress_tag=COMPRESSION_NONE; break; } } #if defined(MAGICKCORE_HAVE_TIFFISCODECCONFIGURED) || (TIFFLIB_VERSION > 20040919) if ((compress_tag != COMPRESSION_NONE) && (TIFFIsCODECConfigured(compress_tag) == 0)) { (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"CompressionNotSupported","`%s'",CommandOptionToMnemonic( MagickCompressOptions,(ssize_t) compression)); compress_tag=COMPRESSION_NONE; } #else switch (compress_tag) { #if defined(CCITT_SUPPORT) case COMPRESSION_CCITTFAX3: case COMPRESSION_CCITTFAX4: #endif #if defined(YCBCR_SUPPORT) && defined(JPEG_SUPPORT) case COMPRESSION_JPEG: #endif #if defined(LZMA_SUPPORT) && defined(COMPRESSION_LZMA) case COMPRESSION_LZMA: #endif #if defined(LZW_SUPPORT) case COMPRESSION_LZW: #endif #if defined(PACKBITS_SUPPORT) case COMPRESSION_PACKBITS: #endif #if defined(ZIP_SUPPORT) case COMPRESSION_ADOBE_DEFLATE: #endif case COMPRESSION_NONE: break; default: { (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"CompressionNotSupported","`%s'",CommandOptionToMnemonic( MagickCompressOptions,(ssize_t) compression)); compress_tag=COMPRESSION_NONE; break; } } #endif if (image->colorspace == CMYKColorspace) { photometric=PHOTOMETRIC_SEPARATED; (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,4); (void) TIFFSetField(tiff,TIFFTAG_INKSET,INKSET_CMYK); } else { /* Full color TIFF raster. */ if (image->colorspace == LabColorspace) { photometric=PHOTOMETRIC_CIELAB; EncodeLabImage(image,&image->exception); } else if (image->colorspace == YCbCrColorspace) { photometric=PHOTOMETRIC_YCBCR; (void) TIFFSetField(tiff,TIFFTAG_YCBCRSUBSAMPLING,1,1); (void) SetImageStorageClass(image,DirectClass); (void) SetImageDepth(image,8); } else photometric=PHOTOMETRIC_RGB; (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,3); if ((image_info->type != TrueColorType) && (image_info->type != TrueColorMatteType)) { if ((image_info->type != PaletteType) && (SetImageGray(image,&image->exception) != MagickFalse)) { photometric=(uint16) (quantum_info->min_is_white != MagickFalse ? PHOTOMETRIC_MINISWHITE : PHOTOMETRIC_MINISBLACK); (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,1); if ((image->depth == 1) && (image->matte == MagickFalse)) SetImageMonochrome(image,&image->exception); } else if (image->storage_class == PseudoClass) { size_t depth; /* Colormapped TIFF raster. */ (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,1); photometric=PHOTOMETRIC_PALETTE; depth=1; while ((GetQuantumRange(depth)+1) < image->colors) depth<<=1; status=SetQuantumDepth(image,quantum_info,depth); if (status == MagickFalse) ThrowWriterException(ResourceLimitError, "MemoryAllocationFailed"); } } } (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_FILLORDER,&endian); if ((compress_tag == COMPRESSION_CCITTFAX3) || (compress_tag == COMPRESSION_CCITTFAX4)) { if ((photometric != PHOTOMETRIC_MINISWHITE) && (photometric != PHOTOMETRIC_MINISBLACK)) { compress_tag=COMPRESSION_NONE; endian=FILLORDER_MSB2LSB; } } option=GetImageOption(image_info,"tiff:fill-order"); if (option != (const char *) NULL) { if (LocaleNCompare(option,"msb",3) == 0) endian=FILLORDER_MSB2LSB; if (LocaleNCompare(option,"lsb",3) == 0) endian=FILLORDER_LSB2MSB; } (void) TIFFSetField(tiff,TIFFTAG_COMPRESSION,compress_tag); (void) TIFFSetField(tiff,TIFFTAG_FILLORDER,endian); (void) TIFFSetField(tiff,TIFFTAG_BITSPERSAMPLE,quantum_info->depth); if (image->matte != MagickFalse) { uint16 extra_samples, sample_info[1], samples_per_pixel; /* TIFF has a matte channel. */ extra_samples=1; sample_info[0]=EXTRASAMPLE_UNASSALPHA; option=GetImageOption(image_info,"tiff:alpha"); if (option != (const char *) NULL) { if (LocaleCompare(option,"associated") == 0) sample_info[0]=EXTRASAMPLE_ASSOCALPHA; else if (LocaleCompare(option,"unspecified") == 0) sample_info[0]=EXTRASAMPLE_UNSPECIFIED; } (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLESPERPIXEL, &samples_per_pixel); (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,samples_per_pixel+1); (void) TIFFSetField(tiff,TIFFTAG_EXTRASAMPLES,extra_samples, &sample_info); if (sample_info[0] == EXTRASAMPLE_ASSOCALPHA) SetQuantumAlphaType(quantum_info,AssociatedQuantumAlpha); } (void) TIFFSetField(tiff,TIFFTAG_PHOTOMETRIC,photometric); switch (quantum_info->format) { case FloatingPointQuantumFormat: { (void) TIFFSetField(tiff,TIFFTAG_SAMPLEFORMAT,SAMPLEFORMAT_IEEEFP); (void) TIFFSetField(tiff,TIFFTAG_SMINSAMPLEVALUE,quantum_info->minimum); (void) TIFFSetField(tiff,TIFFTAG_SMAXSAMPLEVALUE,quantum_info->maximum); break; } case SignedQuantumFormat: { (void) TIFFSetField(tiff,TIFFTAG_SAMPLEFORMAT,SAMPLEFORMAT_INT); break; } case UnsignedQuantumFormat: { (void) TIFFSetField(tiff,TIFFTAG_SAMPLEFORMAT,SAMPLEFORMAT_UINT); break; } default: break; } (void) TIFFSetField(tiff,TIFFTAG_PLANARCONFIG,PLANARCONFIG_CONTIG); if (photometric == PHOTOMETRIC_RGB) if ((image_info->interlace == PlaneInterlace) || (image_info->interlace == PartitionInterlace)) (void) TIFFSetField(tiff,TIFFTAG_PLANARCONFIG,PLANARCONFIG_SEPARATE); predictor=0; switch (compress_tag) { case COMPRESSION_JPEG: { #if defined(JPEG_SUPPORT) if (image_info->quality != UndefinedCompressionQuality) (void) TIFFSetField(tiff,TIFFTAG_JPEGQUALITY,image_info->quality); (void) TIFFSetField(tiff,TIFFTAG_JPEGCOLORMODE,JPEGCOLORMODE_RAW); if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) { const char *value; (void) TIFFSetField(tiff,TIFFTAG_JPEGCOLORMODE,JPEGCOLORMODE_RGB); if (image->colorspace == YCbCrColorspace) { const char *sampling_factor; GeometryInfo geometry_info; MagickStatusType flags; sampling_factor=(const char *) NULL; value=GetImageProperty(image,"jpeg:sampling-factor"); if (value != (char *) NULL) { sampling_factor=value; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Input sampling-factors=%s",sampling_factor); } if (image_info->sampling_factor != (char *) NULL) sampling_factor=image_info->sampling_factor; if (sampling_factor != (const char *) NULL) { flags=ParseGeometry(sampling_factor,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; (void) TIFFSetField(tiff,TIFFTAG_YCBCRSUBSAMPLING,(uint16) geometry_info.rho,(uint16) geometry_info.sigma); } } } (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample); if (bits_per_sample == 12) (void) TIFFSetField(tiff,TIFFTAG_JPEGTABLESMODE,JPEGTABLESMODE_QUANT); #endif break; } case COMPRESSION_ADOBE_DEFLATE: { (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample); if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; (void) TIFFSetField(tiff,TIFFTAG_ZIPQUALITY,(long) ( image_info->quality == UndefinedCompressionQuality ? 7 : MagickMin((ssize_t) image_info->quality/10,9))); break; } case COMPRESSION_CCITTFAX3: { /* Byte-aligned EOL. */ (void) TIFFSetField(tiff,TIFFTAG_GROUP3OPTIONS,4); break; } case COMPRESSION_CCITTFAX4: break; #if defined(LZMA_SUPPORT) && defined(COMPRESSION_LZMA) case COMPRESSION_LZMA: { if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; (void) TIFFSetField(tiff,TIFFTAG_LZMAPRESET,(long) ( image_info->quality == UndefinedCompressionQuality ? 7 : MagickMin((ssize_t) image_info->quality/10,9))); break; } #endif case COMPRESSION_LZW: { (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample); if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; break; } #if defined(WEBP_SUPPORT) && defined(COMPRESSION_WEBP) case COMPRESSION_WEBP: { (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample); if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; (void) TIFFSetField(tiff,TIFFTAG_WEBP_LEVEL,mage_info->quality); if (image_info->quality >= 100) (void) TIFFSetField(tiff,TIFFTAG_WEBP_LOSSLESS,1); break; } #endif #if defined(ZSTD_SUPPORT) && defined(COMPRESSION_ZSTD) case COMPRESSION_ZSTD: { (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample); if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; (void) TIFFSetField(tiff,TIFFTAG_ZSTD_LEVEL,22*image_info->quality/ 100.0); break; } #endif default: break; } option=GetImageOption(image_info,"tiff:predictor"); if (option != (const char * ) NULL) predictor=(size_t) strtol(option,(char **) NULL,10); if (predictor != 0) (void) TIFFSetField(tiff,TIFFTAG_PREDICTOR,predictor); if ((image->x_resolution != 0.0) && (image->y_resolution != 0.0)) { unsigned short units; /* Set image resolution. */ units=RESUNIT_NONE; if (image->units == PixelsPerInchResolution) units=RESUNIT_INCH; if (image->units == PixelsPerCentimeterResolution) units=RESUNIT_CENTIMETER; (void) TIFFSetField(tiff,TIFFTAG_RESOLUTIONUNIT,(uint16) units); (void) TIFFSetField(tiff,TIFFTAG_XRESOLUTION,image->x_resolution); (void) TIFFSetField(tiff,TIFFTAG_YRESOLUTION,image->y_resolution); if ((image->page.x < 0) || (image->page.y < 0)) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"TIFF: negative image positions unsupported","%s", image->filename); if ((image->page.x > 0) && (image->x_resolution > 0.0)) { /* Set horizontal image position. */ (void) TIFFSetField(tiff,TIFFTAG_XPOSITION,(float) image->page.x/ image->x_resolution); } if ((image->page.y > 0) && (image->y_resolution > 0.0)) { /* Set vertical image position. */ (void) TIFFSetField(tiff,TIFFTAG_YPOSITION,(float) image->page.y/ image->y_resolution); } } if (image->chromaticity.white_point.x != 0.0) { float chromaticity[6]; /* Set image chromaticity. */ chromaticity[0]=(float) image->chromaticity.red_primary.x; chromaticity[1]=(float) image->chromaticity.red_primary.y; chromaticity[2]=(float) image->chromaticity.green_primary.x; chromaticity[3]=(float) image->chromaticity.green_primary.y; chromaticity[4]=(float) image->chromaticity.blue_primary.x; chromaticity[5]=(float) image->chromaticity.blue_primary.y; (void) TIFFSetField(tiff,TIFFTAG_PRIMARYCHROMATICITIES,chromaticity); chromaticity[0]=(float) image->chromaticity.white_point.x; chromaticity[1]=(float) image->chromaticity.white_point.y; (void) TIFFSetField(tiff,TIFFTAG_WHITEPOINT,chromaticity); } if ((LocaleCompare(image_info->magick,"PTIF") != 0) && (image_info->adjoin != MagickFalse) && (imageListLength > 1)) { (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_PAGE); if (image->scene != 0) (void) TIFFSetField(tiff,TIFFTAG_PAGENUMBER,(uint16) image->scene, imageListLength); } if (image->orientation != UndefinedOrientation) (void) TIFFSetField(tiff,TIFFTAG_ORIENTATION,(uint16) image->orientation); else (void) TIFFSetField(tiff,TIFFTAG_ORIENTATION,ORIENTATION_TOPLEFT); (void) TIFFSetProfiles(tiff,image); { uint16 page, pages; page=(uint16) scene; pages=(uint16) imageListLength; if ((LocaleCompare(image_info->magick,"PTIF") != 0) && (image_info->adjoin != MagickFalse) && (pages > 1)) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_PAGE); (void) TIFFSetField(tiff,TIFFTAG_PAGENUMBER,page,pages); } (void) TIFFSetProperties(tiff,image_info,image); DisableMSCWarning(4127) if (0) RestoreMSCWarning (void) TIFFSetEXIFProperties(tiff,image); /* Write image scanlines. */ if (GetTIFFInfo(image_info,tiff,&tiff_info) == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); quantum_info->endian=LSBEndian; pixels=GetQuantumPixels(quantum_info); tiff_info.scanline=GetQuantumPixels(quantum_info); switch (photometric) { case PHOTOMETRIC_CIELAB: case PHOTOMETRIC_YCBCR: case PHOTOMETRIC_RGB: { /* RGB TIFF image. */ switch (image_info->interlace) { case NoInterlace: default: { quantum_type=RGBQuantum; if (image->matte != MagickFalse) quantum_type=RGBAQuantum; for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,quantum_type,pixels,&image->exception); if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } break; } case PlaneInterlace: case PartitionInterlace: { /* Plane interlacing: RRRRRR...GGGGGG...BBBBBB... */ for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,RedQuantum,pixels,&image->exception); if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,100,400); if (status == MagickFalse) break; } for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GreenQuantum,pixels,&image->exception); if (TIFFWritePixels(tiff,&tiff_info,y,1,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,200,400); if (status == MagickFalse) break; } for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,BlueQuantum,pixels,&image->exception); if (TIFFWritePixels(tiff,&tiff_info,y,2,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,300,400); if (status == MagickFalse) break; } if (image->matte != MagickFalse) for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1, &image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,AlphaQuantum,pixels,&image->exception); if (TIFFWritePixels(tiff,&tiff_info,y,3,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,400,400); if (status == MagickFalse) break; } break; } } break; } case PHOTOMETRIC_SEPARATED: { /* CMYK TIFF image. */ quantum_type=CMYKQuantum; if (image->matte != MagickFalse) quantum_type=CMYKAQuantum; if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,quantum_type,pixels,&image->exception); if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case PHOTOMETRIC_PALETTE: { uint16 *blue, *green, *red; /* Colormapped TIFF image. */ red=(uint16 *) AcquireQuantumMemory(65536,sizeof(*red)); green=(uint16 *) AcquireQuantumMemory(65536,sizeof(*green)); blue=(uint16 *) AcquireQuantumMemory(65536,sizeof(*blue)); if ((red == (uint16 *) NULL) || (green == (uint16 *) NULL) || (blue == (uint16 *) NULL)) { if (red != (uint16 *) NULL) red=(uint16 *) RelinquishMagickMemory(red); if (green != (uint16 *) NULL) green=(uint16 *) RelinquishMagickMemory(green); if (blue != (uint16 *) NULL) blue=(uint16 *) RelinquishMagickMemory(blue); ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } /* Initialize TIFF colormap. */ (void) memset(red,0,65536*sizeof(*red)); (void) memset(green,0,65536*sizeof(*green)); (void) memset(blue,0,65536*sizeof(*blue)); for (i=0; i < (ssize_t) image->colors; i++) { red[i]=ScaleQuantumToShort(image->colormap[i].red); green[i]=ScaleQuantumToShort(image->colormap[i].green); blue[i]=ScaleQuantumToShort(image->colormap[i].blue); } (void) TIFFSetField(tiff,TIFFTAG_COLORMAP,red,green,blue); red=(uint16 *) RelinquishMagickMemory(red); green=(uint16 *) RelinquishMagickMemory(green); blue=(uint16 *) RelinquishMagickMemory(blue); } default: { /* Convert PseudoClass packets to contiguous grayscale scanlines. */ quantum_type=IndexQuantum; if (image->matte != MagickFalse) { if (photometric != PHOTOMETRIC_PALETTE) quantum_type=GrayAlphaQuantum; else quantum_type=IndexAlphaQuantum; } else if (photometric != PHOTOMETRIC_PALETTE) quantum_type=GrayQuantum; for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,quantum_type,pixels,&image->exception); if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } } quantum_info=DestroyQuantumInfo(quantum_info); if (image->colorspace == LabColorspace) DecodeLabImage(image,&image->exception); DestroyTIFFInfo(&tiff_info); DisableMSCWarning(4127) if (0 && (image_info->verbose != MagickFalse)) RestoreMSCWarning TIFFPrintDirectory(tiff,stdout,MagickFalse); if (TIFFWriteDirectory(tiff) == 0) { status=MagickFalse; break; } image=SyncNextImageInList(image); if (image == (Image *) NULL) break; status=SetImageProgress(image,SaveImagesTag,scene++,imageListLength); if (status == MagickFalse) break; } while (image_info->adjoin != MagickFalse); TIFFClose(tiff); return(status); }
169,553
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void RunCoeffCheck() { ACMRandom rnd(ACMRandom::DeterministicSeed()); const int count_test_block = 5000; DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs); DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs); DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs); for (int i = 0; i < count_test_block; ++i) { for (int j = 0; j < kNumCoeffs; ++j) input_block[j] = rnd.Rand8() - rnd.Rand8(); fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_); REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_)); for (int j = 0; j < kNumCoeffs; ++j) EXPECT_EQ(output_block[j], output_ref_block[j]); } } Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478 DO NOT MERGE - libvpx: Pull from upstream Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06 BUG=23452792 Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec CWE ID: CWE-119
void RunCoeffCheck() { ACMRandom rnd(ACMRandom::DeterministicSeed()); const int count_test_block = 5000; DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]); DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]); DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]); for (int i = 0; i < count_test_block; ++i) { // Initialize a test block with input range [-mask_, mask_]. for (int j = 0; j < kNumCoeffs; ++j) input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_); fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_); ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_)); for (int j = 0; j < kNumCoeffs; ++j) EXPECT_EQ(output_block[j], output_ref_block[j]); } }
174,548
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int readSeparateStripsIntoBuffer (TIFF *in, uint8 *obuf, uint32 length, uint32 width, uint16 spp, struct dump_opts *dump) { int i, j, bytes_per_sample, bytes_per_pixel, shift_width, result = 1; int32 bytes_read = 0; uint16 bps, nstrips, planar, strips_per_sample; uint32 src_rowsize, dst_rowsize, rows_processed, rps; uint32 rows_this_strip = 0; tsample_t s; tstrip_t strip; tsize_t scanlinesize = TIFFScanlineSize(in); tsize_t stripsize = TIFFStripSize(in); unsigned char *srcbuffs[MAX_SAMPLES]; unsigned char *buff = NULL; unsigned char *dst = NULL; if (obuf == NULL) { TIFFError("readSeparateStripsIntoBuffer","Invalid buffer argument"); return (0); } memset (srcbuffs, '\0', sizeof(srcbuffs)); TIFFGetField(in, TIFFTAG_BITSPERSAMPLE, &bps); TIFFGetFieldDefaulted(in, TIFFTAG_PLANARCONFIG, &planar); TIFFGetFieldDefaulted(in, TIFFTAG_ROWSPERSTRIP, &rps); if (rps > length) rps = length; bytes_per_sample = (bps + 7) / 8; bytes_per_pixel = ((bps * spp) + 7) / 8; if (bytes_per_pixel < (bytes_per_sample + 1)) shift_width = bytes_per_pixel; else shift_width = bytes_per_sample + 1; src_rowsize = ((bps * width) + 7) / 8; dst_rowsize = ((bps * width * spp) + 7) / 8; dst = obuf; if ((dump->infile != NULL) && (dump->level == 3)) { dump_info (dump->infile, dump->format, "", "Image width %d, length %d, Scanline size, %4d bytes", width, length, scanlinesize); dump_info (dump->infile, dump->format, "", "Bits per sample %d, Samples per pixel %d, Shift width %d", bps, spp, shift_width); } /* Libtiff seems to assume/require that data for separate planes are * written one complete plane after another and not interleaved in any way. * Multiple scanlines and possibly strips of the same plane must be * written before data for any other plane. */ nstrips = TIFFNumberOfStrips(in); strips_per_sample = nstrips /spp; for (s = 0; (s < spp) && (s < MAX_SAMPLES); s++) { srcbuffs[s] = NULL; buff = _TIFFmalloc(stripsize); if (!buff) { TIFFError ("readSeparateStripsIntoBuffer", "Unable to allocate strip read buffer for sample %d", s); for (i = 0; i < s; i++) _TIFFfree (srcbuffs[i]); return 0; } srcbuffs[s] = buff; } rows_processed = 0; for (j = 0; (j < strips_per_sample) && (result == 1); j++) { for (s = 0; (s < spp) && (s < MAX_SAMPLES); s++) { buff = srcbuffs[s]; strip = (s * strips_per_sample) + j; bytes_read = TIFFReadEncodedStrip (in, strip, buff, stripsize); rows_this_strip = bytes_read / src_rowsize; if (bytes_read < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read strip %lu for sample %d", (unsigned long) strip, s + 1); result = 0; break; } #ifdef DEVELMODE TIFFError("", "Strip %2d, read %5d bytes for %4d scanlines, shift width %d", strip, bytes_read, rows_this_strip, shift_width); #endif } if (rps > rows_this_strip) rps = rows_this_strip; dst = obuf + (dst_rowsize * rows_processed); if ((bps % 8) == 0) { if (combineSeparateSamplesBytes (srcbuffs, dst, width, rps, spp, bps, dump->infile, dump->format, dump->level)) { result = 0; break; } } else { switch (shift_width) { case 1: if (combineSeparateSamples8bits (srcbuffs, dst, width, rps, spp, bps, dump->infile, dump->format, dump->level)) { result = 0; break; } break; case 2: if (combineSeparateSamples16bits (srcbuffs, dst, width, rps, spp, bps, dump->infile, dump->format, dump->level)) { result = 0; break; } break; case 3: if (combineSeparateSamples24bits (srcbuffs, dst, width, rps, spp, bps, dump->infile, dump->format, dump->level)) { result = 0; break; } break; case 4: case 5: case 6: case 7: case 8: if (combineSeparateSamples32bits (srcbuffs, dst, width, rps, spp, bps, dump->infile, dump->format, dump->level)) { result = 0; break; } break; default: TIFFError ("readSeparateStripsIntoBuffer", "Unsupported bit depth: %d", bps); result = 0; break; } } if ((rows_processed + rps) > length) { rows_processed = length; rps = length - rows_processed; } else rows_processed += rps; } /* free any buffers allocated for each plane or scanline and * any temporary buffers */ for (s = 0; (s < spp) && (s < MAX_SAMPLES); s++) { buff = srcbuffs[s]; if (buff != NULL) _TIFFfree(buff); } return (result); } /* end readSeparateStripsIntoBuffer */ Commit Message: * tools/tiffcp.c: fix read of undefined variable in case of missing required tags. Found on test case of MSVR 35100. * tools/tiffcrop.c: fix read of undefined buffer in readContigStripsIntoBuffer() due to uint16 overflow. Probably not a security issue but I can be wrong. Reported as MSVR 35100 by Axel Souchet from the MSRC Vulnerabilities & Mitigations team. CWE ID: CWE-190
static int readSeparateStripsIntoBuffer (TIFF *in, uint8 *obuf, uint32 length, uint32 width, uint16 spp, struct dump_opts *dump) { int i, bytes_per_sample, bytes_per_pixel, shift_width, result = 1; uint32 j; int32 bytes_read = 0; uint16 bps, planar; uint32 nstrips; uint32 strips_per_sample; uint32 src_rowsize, dst_rowsize, rows_processed, rps; uint32 rows_this_strip = 0; tsample_t s; tstrip_t strip; tsize_t scanlinesize = TIFFScanlineSize(in); tsize_t stripsize = TIFFStripSize(in); unsigned char *srcbuffs[MAX_SAMPLES]; unsigned char *buff = NULL; unsigned char *dst = NULL; if (obuf == NULL) { TIFFError("readSeparateStripsIntoBuffer","Invalid buffer argument"); return (0); } memset (srcbuffs, '\0', sizeof(srcbuffs)); TIFFGetField(in, TIFFTAG_BITSPERSAMPLE, &bps); TIFFGetFieldDefaulted(in, TIFFTAG_PLANARCONFIG, &planar); TIFFGetFieldDefaulted(in, TIFFTAG_ROWSPERSTRIP, &rps); if (rps > length) rps = length; bytes_per_sample = (bps + 7) / 8; bytes_per_pixel = ((bps * spp) + 7) / 8; if (bytes_per_pixel < (bytes_per_sample + 1)) shift_width = bytes_per_pixel; else shift_width = bytes_per_sample + 1; src_rowsize = ((bps * width) + 7) / 8; dst_rowsize = ((bps * width * spp) + 7) / 8; dst = obuf; if ((dump->infile != NULL) && (dump->level == 3)) { dump_info (dump->infile, dump->format, "", "Image width %d, length %d, Scanline size, %4d bytes", width, length, scanlinesize); dump_info (dump->infile, dump->format, "", "Bits per sample %d, Samples per pixel %d, Shift width %d", bps, spp, shift_width); } /* Libtiff seems to assume/require that data for separate planes are * written one complete plane after another and not interleaved in any way. * Multiple scanlines and possibly strips of the same plane must be * written before data for any other plane. */ nstrips = TIFFNumberOfStrips(in); strips_per_sample = nstrips /spp; for (s = 0; (s < spp) && (s < MAX_SAMPLES); s++) { srcbuffs[s] = NULL; buff = _TIFFmalloc(stripsize); if (!buff) { TIFFError ("readSeparateStripsIntoBuffer", "Unable to allocate strip read buffer for sample %d", s); for (i = 0; i < s; i++) _TIFFfree (srcbuffs[i]); return 0; } srcbuffs[s] = buff; } rows_processed = 0; for (j = 0; (j < strips_per_sample) && (result == 1); j++) { for (s = 0; (s < spp) && (s < MAX_SAMPLES); s++) { buff = srcbuffs[s]; strip = (s * strips_per_sample) + j; bytes_read = TIFFReadEncodedStrip (in, strip, buff, stripsize); rows_this_strip = bytes_read / src_rowsize; if (bytes_read < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read strip %lu for sample %d", (unsigned long) strip, s + 1); result = 0; break; } #ifdef DEVELMODE TIFFError("", "Strip %2d, read %5d bytes for %4d scanlines, shift width %d", strip, bytes_read, rows_this_strip, shift_width); #endif } if (rps > rows_this_strip) rps = rows_this_strip; dst = obuf + (dst_rowsize * rows_processed); if ((bps % 8) == 0) { if (combineSeparateSamplesBytes (srcbuffs, dst, width, rps, spp, bps, dump->infile, dump->format, dump->level)) { result = 0; break; } } else { switch (shift_width) { case 1: if (combineSeparateSamples8bits (srcbuffs, dst, width, rps, spp, bps, dump->infile, dump->format, dump->level)) { result = 0; break; } break; case 2: if (combineSeparateSamples16bits (srcbuffs, dst, width, rps, spp, bps, dump->infile, dump->format, dump->level)) { result = 0; break; } break; case 3: if (combineSeparateSamples24bits (srcbuffs, dst, width, rps, spp, bps, dump->infile, dump->format, dump->level)) { result = 0; break; } break; case 4: case 5: case 6: case 7: case 8: if (combineSeparateSamples32bits (srcbuffs, dst, width, rps, spp, bps, dump->infile, dump->format, dump->level)) { result = 0; break; } break; default: TIFFError ("readSeparateStripsIntoBuffer", "Unsupported bit depth: %d", bps); result = 0; break; } } if ((rows_processed + rps) > length) { rows_processed = length; rps = length - rows_processed; } else rows_processed += rps; } /* free any buffers allocated for each plane or scanline and * any temporary buffers */ for (s = 0; (s < spp) && (s < MAX_SAMPLES); s++) { buff = srcbuffs[s]; if (buff != NULL) _TIFFfree(buff); } return (result); } /* end readSeparateStripsIntoBuffer */
166,867
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: ikev1_vid_print(netdissect_options *ndo, u_char tpay _U_, const struct isakmp_gen *ext, u_int item_len _U_, const u_char *ep _U_, uint32_t phase _U_, uint32_t doi _U_, uint32_t proto _U_, int depth _U_) { struct isakmp_gen e; ND_PRINT((ndo,"%s:", NPSTR(ISAKMP_NPTYPE_VID))); ND_TCHECK(*ext); UNALIGNED_MEMCPY(&e, ext, sizeof(e)); ND_PRINT((ndo," len=%d", ntohs(e.len) - 4)); if (2 < ndo->ndo_vflag && 4 < ntohs(e.len)) { ND_PRINT((ndo," ")); if (!rawprint(ndo, (const uint8_t *)(ext + 1), ntohs(e.len) - 4)) goto trunc; } return (const u_char *)ext + ntohs(e.len); trunc: ND_PRINT((ndo," [|%s]", NPSTR(ISAKMP_NPTYPE_VID))); return NULL; } Commit Message: CVE-2017-13690/IKEv2: Fix some bounds checks. Use a pointer of the correct type in ND_TCHECK(), or use ND_TCHECK2() and provide the correct length. While we're at it, remove the blank line between some checks and the UNALIGNED_MEMCPY()s they protect. Also, note the places where we print the entire payload. This fixes a buffer over-read discovered by Bhargava Shastry, SecT/TU Berlin. Add a test using the capture file supplied by the reporter(s). CWE ID: CWE-125
ikev1_vid_print(netdissect_options *ndo, u_char tpay _U_, const struct isakmp_gen *ext, u_int item_len _U_, const u_char *ep _U_, uint32_t phase _U_, uint32_t doi _U_, uint32_t proto _U_, int depth _U_) { struct isakmp_gen e; ND_PRINT((ndo,"%s:", NPSTR(ISAKMP_NPTYPE_VID))); ND_TCHECK(*ext); UNALIGNED_MEMCPY(&e, ext, sizeof(e)); ND_PRINT((ndo," len=%d", ntohs(e.len) - 4)); if (2 < ndo->ndo_vflag && 4 < ntohs(e.len)) { /* Print the entire payload in hex */ ND_PRINT((ndo," ")); if (!rawprint(ndo, (const uint8_t *)(ext + 1), ntohs(e.len) - 4)) goto trunc; } return (const u_char *)ext + ntohs(e.len); trunc: ND_PRINT((ndo," [|%s]", NPSTR(ISAKMP_NPTYPE_VID))); return NULL; }
167,795
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int store_xauthority(void) { fs_build_mnt_dir(); char *src; char *dest = RUN_XAUTHORITY_FILE; FILE *fp = fopen(dest, "w"); if (fp) { fprintf(fp, "\n"); SET_PERMS_STREAM(fp, getuid(), getgid(), 0600); fclose(fp); } if (asprintf(&src, "%s/.Xauthority", cfg.homedir) == -1) errExit("asprintf"); struct stat s; if (stat(src, &s) == 0) { if (is_link(src)) { fprintf(stderr, "Error: invalid .Xauthority file\n"); exit(1); } pid_t child = fork(); if (child < 0) errExit("fork"); if (child == 0) { drop_privs(0); int rv = copy_file(src, dest); if (rv) fprintf(stderr, "Warning: cannot transfer .Xauthority in private home directory\n"); else { fs_logger2("clone", dest); } _exit(0); } waitpid(child, NULL, 0); if (chown(dest, getuid(), getgid()) == -1) errExit("fchown"); if (chmod(dest, 0600) == -1) errExit("fchmod"); return 1; // file copied } return 0; } Commit Message: security fix CWE ID: CWE-269
static int store_xauthority(void) { fs_build_mnt_dir(); char *src; char *dest = RUN_XAUTHORITY_FILE; // create an empty file as root, and change ownership to user FILE *fp = fopen(dest, "w"); if (fp) { fprintf(fp, "\n"); SET_PERMS_STREAM(fp, getuid(), getgid(), 0600); fclose(fp); } if (asprintf(&src, "%s/.Xauthority", cfg.homedir) == -1) errExit("asprintf"); struct stat s; if (stat(src, &s) == 0) { if (is_link(src)) { fprintf(stderr, "Warning: invalid .Xauthority file\n"); return 0; } copy_file_as_user(src, dest, getuid(), getgid(), 0600); // regular user fs_logger2("clone", dest); return 1; // file copied } return 0; }
170,100
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int nbd_negotiate_read(QIOChannel *ioc, void *buffer, size_t size) { ssize_t ret; guint watch; assert(qemu_in_coroutine()); /* Negotiation are always in main loop. */ watch = qio_channel_add_watch(ioc, G_IO_IN, nbd_negotiate_continue, qemu_coroutine_self(), NULL); ret = nbd_read(ioc, buffer, size, NULL); g_source_remove(watch); return ret; } Commit Message: CWE ID: CWE-20
static int nbd_negotiate_read(QIOChannel *ioc, void *buffer, size_t size)
165,454
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static Image *ReadVIFFImage(const ImageInfo *image_info, ExceptionInfo *exception) { #define VFF_CM_genericRGB 15 #define VFF_CM_ntscRGB 1 #define VFF_CM_NONE 0 #define VFF_DEP_DECORDER 0x4 #define VFF_DEP_NSORDER 0x8 #define VFF_DES_RAW 0 #define VFF_LOC_IMPLICIT 1 #define VFF_MAPTYP_NONE 0 #define VFF_MAPTYP_1_BYTE 1 #define VFF_MAPTYP_2_BYTE 2 #define VFF_MAPTYP_4_BYTE 4 #define VFF_MAPTYP_FLOAT 5 #define VFF_MAPTYP_DOUBLE 7 #define VFF_MS_NONE 0 #define VFF_MS_ONEPERBAND 1 #define VFF_MS_SHARED 3 #define VFF_TYP_BIT 0 #define VFF_TYP_1_BYTE 1 #define VFF_TYP_2_BYTE 2 #define VFF_TYP_4_BYTE 4 #define VFF_TYP_FLOAT 5 #define VFF_TYP_DOUBLE 9 typedef struct _ViffInfo { unsigned char identifier, file_type, release, version, machine_dependency, reserve[3]; char comment[512]; unsigned int rows, columns, subrows; int x_offset, y_offset; float x_bits_per_pixel, y_bits_per_pixel; unsigned int location_type, location_dimension, number_of_images, number_data_bands, data_storage_type, data_encode_scheme, map_scheme, map_storage_type, map_rows, map_columns, map_subrows, map_enable, maps_per_cycle, color_space_model; } ViffInfo; double min_value, scale_factor, value; Image *image; int bit; MagickBooleanType status; MagickSizeType number_pixels; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; register ssize_t i; register unsigned char *p; size_t bytes_per_pixel, max_packets, quantum; ssize_t count, y; unsigned char *pixels; unsigned long lsb_first; ViffInfo viff_info; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read VIFF header (1024 bytes). */ count=ReadBlob(image,1,&viff_info.identifier); do { /* Verify VIFF identifier. */ if ((count != 1) || ((unsigned char) viff_info.identifier != 0xab)) ThrowReaderException(CorruptImageError,"NotAVIFFImage"); /* Initialize VIFF image. */ (void) ReadBlob(image,sizeof(viff_info.file_type),&viff_info.file_type); (void) ReadBlob(image,sizeof(viff_info.release),&viff_info.release); (void) ReadBlob(image,sizeof(viff_info.version),&viff_info.version); (void) ReadBlob(image,sizeof(viff_info.machine_dependency), &viff_info.machine_dependency); (void) ReadBlob(image,sizeof(viff_info.reserve),viff_info.reserve); (void) ReadBlob(image,512,(unsigned char *) viff_info.comment); viff_info.comment[511]='\0'; if (strlen(viff_info.comment) > 4) (void) SetImageProperty(image,"comment",viff_info.comment); if ((viff_info.machine_dependency == VFF_DEP_DECORDER) || (viff_info.machine_dependency == VFF_DEP_NSORDER)) image->endian=LSBEndian; else image->endian=MSBEndian; viff_info.rows=ReadBlobLong(image); viff_info.columns=ReadBlobLong(image); viff_info.subrows=ReadBlobLong(image); viff_info.x_offset=ReadBlobSignedLong(image); viff_info.y_offset=ReadBlobSignedLong(image); viff_info.x_bits_per_pixel=(float) ReadBlobLong(image); viff_info.y_bits_per_pixel=(float) ReadBlobLong(image); viff_info.location_type=ReadBlobLong(image); viff_info.location_dimension=ReadBlobLong(image); viff_info.number_of_images=ReadBlobLong(image); viff_info.number_data_bands=ReadBlobLong(image); viff_info.data_storage_type=ReadBlobLong(image); viff_info.data_encode_scheme=ReadBlobLong(image); viff_info.map_scheme=ReadBlobLong(image); viff_info.map_storage_type=ReadBlobLong(image); viff_info.map_rows=ReadBlobLong(image); viff_info.map_columns=ReadBlobLong(image); viff_info.map_subrows=ReadBlobLong(image); viff_info.map_enable=ReadBlobLong(image); viff_info.maps_per_cycle=ReadBlobLong(image); viff_info.color_space_model=ReadBlobLong(image); for (i=0; i < 420; i++) (void) ReadBlobByte(image); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); image->columns=viff_info.rows; image->rows=viff_info.columns; image->depth=viff_info.x_bits_per_pixel <= 8 ? 8UL : MAGICKCORE_QUANTUM_DEPTH; /* Verify that we can read this VIFF image. */ number_pixels=(MagickSizeType) viff_info.columns*viff_info.rows; if (number_pixels != (size_t) number_pixels) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (number_pixels == 0) ThrowReaderException(CoderError,"ImageColumnOrRowSizeIsNotSupported"); if ((viff_info.number_data_bands < 1) || (viff_info.number_data_bands > 4)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if ((viff_info.data_storage_type != VFF_TYP_BIT) && (viff_info.data_storage_type != VFF_TYP_1_BYTE) && (viff_info.data_storage_type != VFF_TYP_2_BYTE) && (viff_info.data_storage_type != VFF_TYP_4_BYTE) && (viff_info.data_storage_type != VFF_TYP_FLOAT) && (viff_info.data_storage_type != VFF_TYP_DOUBLE)) ThrowReaderException(CoderError,"DataStorageTypeIsNotSupported"); if (viff_info.data_encode_scheme != VFF_DES_RAW) ThrowReaderException(CoderError,"DataEncodingSchemeIsNotSupported"); if ((viff_info.map_storage_type != VFF_MAPTYP_NONE) && (viff_info.map_storage_type != VFF_MAPTYP_1_BYTE) && (viff_info.map_storage_type != VFF_MAPTYP_2_BYTE) && (viff_info.map_storage_type != VFF_MAPTYP_4_BYTE) && (viff_info.map_storage_type != VFF_MAPTYP_FLOAT) && (viff_info.map_storage_type != VFF_MAPTYP_DOUBLE)) ThrowReaderException(CoderError,"MapStorageTypeIsNotSupported"); if ((viff_info.color_space_model != VFF_CM_NONE) && (viff_info.color_space_model != VFF_CM_ntscRGB) && (viff_info.color_space_model != VFF_CM_genericRGB)) ThrowReaderException(CoderError,"ColorspaceModelIsNotSupported"); if (viff_info.location_type != VFF_LOC_IMPLICIT) ThrowReaderException(CoderError,"LocationTypeIsNotSupported"); if (viff_info.number_of_images != 1) ThrowReaderException(CoderError,"NumberOfImagesIsNotSupported"); if (viff_info.map_rows == 0) viff_info.map_scheme=VFF_MS_NONE; switch ((int) viff_info.map_scheme) { case VFF_MS_NONE: { if (viff_info.number_data_bands < 3) { /* Create linear color ramp. */ if (viff_info.data_storage_type == VFF_TYP_BIT) image->colors=2; else if (viff_info.data_storage_type == VFF_MAPTYP_1_BYTE) image->colors=256UL; else image->colors=image->depth <= 8 ? 256UL : 65536UL; if (AcquireImageColormap(image,image->colors) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } break; } case VFF_MS_ONEPERBAND: case VFF_MS_SHARED: { unsigned char *viff_colormap; /* Allocate VIFF colormap. */ switch ((int) viff_info.map_storage_type) { case VFF_MAPTYP_1_BYTE: bytes_per_pixel=1; break; case VFF_MAPTYP_2_BYTE: bytes_per_pixel=2; break; case VFF_MAPTYP_4_BYTE: bytes_per_pixel=4; break; case VFF_MAPTYP_FLOAT: bytes_per_pixel=4; break; case VFF_MAPTYP_DOUBLE: bytes_per_pixel=8; break; default: bytes_per_pixel=1; break; } image->colors=viff_info.map_columns; if (AcquireImageColormap(image,image->colors) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (viff_info.map_rows > (viff_info.map_rows*bytes_per_pixel*sizeof(*viff_colormap))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); viff_colormap=(unsigned char *) AcquireQuantumMemory(image->colors, viff_info.map_rows*bytes_per_pixel*sizeof(*viff_colormap)); if (viff_colormap == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); /* Read VIFF raster colormap. */ (void) ReadBlob(image,bytes_per_pixel*image->colors*viff_info.map_rows, viff_colormap); lsb_first=1; if (*(char *) &lsb_first && ((viff_info.machine_dependency != VFF_DEP_DECORDER) && (viff_info.machine_dependency != VFF_DEP_NSORDER))) switch ((int) viff_info.map_storage_type) { case VFF_MAPTYP_2_BYTE: { MSBOrderShort(viff_colormap,(bytes_per_pixel*image->colors* viff_info.map_rows)); break; } case VFF_MAPTYP_4_BYTE: case VFF_MAPTYP_FLOAT: { MSBOrderLong(viff_colormap,(bytes_per_pixel*image->colors* viff_info.map_rows)); break; } default: break; } for (i=0; i < (ssize_t) (viff_info.map_rows*image->colors); i++) { switch ((int) viff_info.map_storage_type) { case VFF_MAPTYP_2_BYTE: value=1.0*((short *) viff_colormap)[i]; break; case VFF_MAPTYP_4_BYTE: value=1.0*((int *) viff_colormap)[i]; break; case VFF_MAPTYP_FLOAT: value=((float *) viff_colormap)[i]; break; case VFF_MAPTYP_DOUBLE: value=((double *) viff_colormap)[i]; break; default: value=1.0*viff_colormap[i]; break; } if (i < (ssize_t) image->colors) { image->colormap[i].red=ScaleCharToQuantum((unsigned char) value); image->colormap[i].green=ScaleCharToQuantum((unsigned char) value); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) value); } else if (i < (ssize_t) (2*image->colors)) image->colormap[i % image->colors].green=ScaleCharToQuantum( (unsigned char) value); else if (i < (ssize_t) (3*image->colors)) image->colormap[i % image->colors].blue=ScaleCharToQuantum( (unsigned char) value); } viff_colormap=(unsigned char *) RelinquishMagickMemory(viff_colormap); break; } default: ThrowReaderException(CoderError,"ColormapTypeNotSupported"); } /* Initialize image structure. */ image->matte=viff_info.number_data_bands == 4 ? MagickTrue : MagickFalse; image->storage_class= (viff_info.number_data_bands < 3 ? PseudoClass : DirectClass); image->columns=viff_info.rows; image->rows=viff_info.columns; if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } /* Allocate VIFF pixels. */ switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: bytes_per_pixel=2; break; case VFF_TYP_4_BYTE: bytes_per_pixel=4; break; case VFF_TYP_FLOAT: bytes_per_pixel=4; break; case VFF_TYP_DOUBLE: bytes_per_pixel=8; break; default: bytes_per_pixel=1; break; } if (viff_info.data_storage_type == VFF_TYP_BIT) { if (CheckMemoryOverflow((image->columns+7UL) >> 3UL,image->rows) != MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); max_packets=((image->columns+7UL) >> 3UL)*image->rows; } else { if (CheckMemoryOverflow(number_pixels,viff_info.number_data_bands) != MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); max_packets=(size_t) (number_pixels*viff_info.number_data_bands); } pixels=(unsigned char *) AcquireQuantumMemory(MagickMax(number_pixels, max_packets),bytes_per_pixel*sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); (void) ReadBlob(image,bytes_per_pixel*max_packets,pixels); lsb_first=1; if (*(char *) &lsb_first && ((viff_info.machine_dependency != VFF_DEP_DECORDER) && (viff_info.machine_dependency != VFF_DEP_NSORDER))) switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: { MSBOrderShort(pixels,bytes_per_pixel*max_packets); break; } case VFF_TYP_4_BYTE: case VFF_TYP_FLOAT: { MSBOrderLong(pixels,bytes_per_pixel*max_packets); break; } default: break; } min_value=0.0; scale_factor=1.0; if ((viff_info.data_storage_type != VFF_TYP_1_BYTE) && (viff_info.map_scheme == VFF_MS_NONE)) { double max_value; /* Determine scale factor. */ switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: value=1.0*((short *) pixels)[0]; break; case VFF_TYP_4_BYTE: value=1.0*((int *) pixels)[0]; break; case VFF_TYP_FLOAT: value=((float *) pixels)[0]; break; case VFF_TYP_DOUBLE: value=((double *) pixels)[0]; break; default: value=1.0*pixels[0]; break; } max_value=value; min_value=value; for (i=0; i < (ssize_t) max_packets; i++) { switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: value=1.0*((short *) pixels)[i]; break; case VFF_TYP_4_BYTE: value=1.0*((int *) pixels)[i]; break; case VFF_TYP_FLOAT: value=((float *) pixels)[i]; break; case VFF_TYP_DOUBLE: value=((double *) pixels)[i]; break; default: value=1.0*pixels[i]; break; } if (value > max_value) max_value=value; else if (value < min_value) min_value=value; } if ((min_value == 0) && (max_value == 0)) scale_factor=0; else if (min_value == max_value) { scale_factor=(MagickRealType) QuantumRange/min_value; min_value=0; } else scale_factor=(MagickRealType) QuantumRange/(max_value-min_value); } /* Convert pixels to Quantum size. */ p=(unsigned char *) pixels; for (i=0; i < (ssize_t) max_packets; i++) { switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: value=1.0*((short *) pixels)[i]; break; case VFF_TYP_4_BYTE: value=1.0*((int *) pixels)[i]; break; case VFF_TYP_FLOAT: value=((float *) pixels)[i]; break; case VFF_TYP_DOUBLE: value=((double *) pixels)[i]; break; default: value=1.0*pixels[i]; break; } if (viff_info.map_scheme == VFF_MS_NONE) { value=(value-min_value)*scale_factor; if (value > QuantumRange) value=QuantumRange; else if (value < 0) value=0; } *p=(unsigned char) ((Quantum) value); p++; } /* Convert VIFF raster image to pixel packets. */ p=(unsigned char *) pixels; if (viff_info.data_storage_type == VFF_TYP_BIT) { /* Convert bitmap scanline. */ if (image->storage_class != PseudoClass) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < (ssize_t) (image->columns-7); x+=8) { for (bit=0; bit < 8; bit++) { quantum=(size_t) ((*p) & (0x01 << bit) ? 0 : 1); SetPixelRed(q,quantum == 0 ? 0 : QuantumRange); SetPixelGreen(q,quantum == 0 ? 0 : QuantumRange); SetPixelBlue(q,quantum == 0 ? 0 : QuantumRange); if (image->storage_class == PseudoClass) SetPixelIndex(indexes+x+bit,quantum); } p++; } if ((image->columns % 8) != 0) { for (bit=0; bit < (int) (image->columns % 8); bit++) { quantum=(size_t) ((*p) & (0x01 << bit) ? 0 : 1); SetPixelRed(q,quantum == 0 ? 0 : QuantumRange); SetPixelGreen(q,quantum == 0 ? 0 : QuantumRange); SetPixelBlue(q,quantum == 0 ? 0 : QuantumRange); if (image->storage_class == PseudoClass) SetPixelIndex(indexes+x+bit,quantum); } p++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } else if (image->storage_class == PseudoClass) for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,*p++); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } else { /* Convert DirectColor scanline. */ number_pixels=(MagickSizeType) image->columns*image->rows; for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleCharToQuantum(*p)); SetPixelGreen(q,ScaleCharToQuantum(*(p+number_pixels))); SetPixelBlue(q,ScaleCharToQuantum(*(p+2*number_pixels))); if (image->colors != 0) { ssize_t index; index=(ssize_t) GetPixelRed(q); SetPixelRed(q,image->colormap[(ssize_t) ConstrainColormapIndex(image,index)].red); index=(ssize_t) GetPixelGreen(q); SetPixelGreen(q,image->colormap[(ssize_t) ConstrainColormapIndex(image,index)].green); index=(ssize_t) GetPixelRed(q); SetPixelBlue(q,image->colormap[(ssize_t) ConstrainColormapIndex(image,index)].blue); } SetPixelOpacity(q,image->matte != MagickFalse ? QuantumRange- ScaleCharToQuantum(*(p+number_pixels*3)) : OpaqueOpacity); p++; q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } pixels=(unsigned char *) RelinquishMagickMemory(pixels); if (image->storage_class == PseudoClass) (void) SyncImage(image); if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; count=ReadBlob(image,1,&viff_info.identifier); if ((count != 0) && (viff_info.identifier == 0xab)) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } } while ((count != 0) && (viff_info.identifier == 0xab)); (void) CloseBlob(image); return(GetFirstImageInList(image)); } Commit Message: Suspend exception processing if there are too many exceptions CWE ID: CWE-119
static Image *ReadVIFFImage(const ImageInfo *image_info, ExceptionInfo *exception) { #define VFF_CM_genericRGB 15 #define VFF_CM_ntscRGB 1 #define VFF_CM_NONE 0 #define VFF_DEP_DECORDER 0x4 #define VFF_DEP_NSORDER 0x8 #define VFF_DES_RAW 0 #define VFF_LOC_IMPLICIT 1 #define VFF_MAPTYP_NONE 0 #define VFF_MAPTYP_1_BYTE 1 #define VFF_MAPTYP_2_BYTE 2 #define VFF_MAPTYP_4_BYTE 4 #define VFF_MAPTYP_FLOAT 5 #define VFF_MAPTYP_DOUBLE 7 #define VFF_MS_NONE 0 #define VFF_MS_ONEPERBAND 1 #define VFF_MS_SHARED 3 #define VFF_TYP_BIT 0 #define VFF_TYP_1_BYTE 1 #define VFF_TYP_2_BYTE 2 #define VFF_TYP_4_BYTE 4 #define VFF_TYP_FLOAT 5 #define VFF_TYP_DOUBLE 9 typedef struct _ViffInfo { unsigned char identifier, file_type, release, version, machine_dependency, reserve[3]; char comment[512]; unsigned int rows, columns, subrows; int x_offset, y_offset; float x_bits_per_pixel, y_bits_per_pixel; unsigned int location_type, location_dimension, number_of_images, number_data_bands, data_storage_type, data_encode_scheme, map_scheme, map_storage_type, map_rows, map_columns, map_subrows, map_enable, maps_per_cycle, color_space_model; } ViffInfo; double min_value, scale_factor, value; Image *image; int bit; MagickBooleanType status; MagickSizeType number_pixels; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; register ssize_t i; register unsigned char *p; size_t bytes_per_pixel, max_packets, quantum; ssize_t count, y; unsigned char *pixels; unsigned long lsb_first; ViffInfo viff_info; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read VIFF header (1024 bytes). */ count=ReadBlob(image,1,&viff_info.identifier); do { /* Verify VIFF identifier. */ if ((count != 1) || ((unsigned char) viff_info.identifier != 0xab)) ThrowReaderException(CorruptImageError,"NotAVIFFImage"); /* Initialize VIFF image. */ (void) ReadBlob(image,sizeof(viff_info.file_type),&viff_info.file_type); (void) ReadBlob(image,sizeof(viff_info.release),&viff_info.release); (void) ReadBlob(image,sizeof(viff_info.version),&viff_info.version); (void) ReadBlob(image,sizeof(viff_info.machine_dependency), &viff_info.machine_dependency); (void) ReadBlob(image,sizeof(viff_info.reserve),viff_info.reserve); (void) ReadBlob(image,512,(unsigned char *) viff_info.comment); viff_info.comment[511]='\0'; if (strlen(viff_info.comment) > 4) (void) SetImageProperty(image,"comment",viff_info.comment); if ((viff_info.machine_dependency == VFF_DEP_DECORDER) || (viff_info.machine_dependency == VFF_DEP_NSORDER)) image->endian=LSBEndian; else image->endian=MSBEndian; viff_info.rows=ReadBlobLong(image); viff_info.columns=ReadBlobLong(image); viff_info.subrows=ReadBlobLong(image); viff_info.x_offset=ReadBlobSignedLong(image); viff_info.y_offset=ReadBlobSignedLong(image); viff_info.x_bits_per_pixel=(float) ReadBlobLong(image); viff_info.y_bits_per_pixel=(float) ReadBlobLong(image); viff_info.location_type=ReadBlobLong(image); viff_info.location_dimension=ReadBlobLong(image); viff_info.number_of_images=ReadBlobLong(image); viff_info.number_data_bands=ReadBlobLong(image); viff_info.data_storage_type=ReadBlobLong(image); viff_info.data_encode_scheme=ReadBlobLong(image); viff_info.map_scheme=ReadBlobLong(image); viff_info.map_storage_type=ReadBlobLong(image); viff_info.map_rows=ReadBlobLong(image); viff_info.map_columns=ReadBlobLong(image); viff_info.map_subrows=ReadBlobLong(image); viff_info.map_enable=ReadBlobLong(image); viff_info.maps_per_cycle=ReadBlobLong(image); viff_info.color_space_model=ReadBlobLong(image); for (i=0; i < 420; i++) (void) ReadBlobByte(image); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); image->columns=viff_info.rows; image->rows=viff_info.columns; image->depth=viff_info.x_bits_per_pixel <= 8 ? 8UL : MAGICKCORE_QUANTUM_DEPTH; /* Verify that we can read this VIFF image. */ number_pixels=(MagickSizeType) viff_info.columns*viff_info.rows; if (number_pixels != (size_t) number_pixels) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (number_pixels == 0) ThrowReaderException(CoderError,"ImageColumnOrRowSizeIsNotSupported"); if ((viff_info.number_data_bands < 1) || (viff_info.number_data_bands > 4)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if ((viff_info.data_storage_type != VFF_TYP_BIT) && (viff_info.data_storage_type != VFF_TYP_1_BYTE) && (viff_info.data_storage_type != VFF_TYP_2_BYTE) && (viff_info.data_storage_type != VFF_TYP_4_BYTE) && (viff_info.data_storage_type != VFF_TYP_FLOAT) && (viff_info.data_storage_type != VFF_TYP_DOUBLE)) ThrowReaderException(CoderError,"DataStorageTypeIsNotSupported"); if (viff_info.data_encode_scheme != VFF_DES_RAW) ThrowReaderException(CoderError,"DataEncodingSchemeIsNotSupported"); if ((viff_info.map_storage_type != VFF_MAPTYP_NONE) && (viff_info.map_storage_type != VFF_MAPTYP_1_BYTE) && (viff_info.map_storage_type != VFF_MAPTYP_2_BYTE) && (viff_info.map_storage_type != VFF_MAPTYP_4_BYTE) && (viff_info.map_storage_type != VFF_MAPTYP_FLOAT) && (viff_info.map_storage_type != VFF_MAPTYP_DOUBLE)) ThrowReaderException(CoderError,"MapStorageTypeIsNotSupported"); if ((viff_info.color_space_model != VFF_CM_NONE) && (viff_info.color_space_model != VFF_CM_ntscRGB) && (viff_info.color_space_model != VFF_CM_genericRGB)) ThrowReaderException(CoderError,"ColorspaceModelIsNotSupported"); if (viff_info.location_type != VFF_LOC_IMPLICIT) ThrowReaderException(CoderError,"LocationTypeIsNotSupported"); if (viff_info.number_of_images != 1) ThrowReaderException(CoderError,"NumberOfImagesIsNotSupported"); if (viff_info.map_rows == 0) viff_info.map_scheme=VFF_MS_NONE; switch ((int) viff_info.map_scheme) { case VFF_MS_NONE: { if (viff_info.number_data_bands < 3) { /* Create linear color ramp. */ if (viff_info.data_storage_type == VFF_TYP_BIT) image->colors=2; else if (viff_info.data_storage_type == VFF_MAPTYP_1_BYTE) image->colors=256UL; else image->colors=image->depth <= 8 ? 256UL : 65536UL; if (AcquireImageColormap(image,image->colors) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } break; } case VFF_MS_ONEPERBAND: case VFF_MS_SHARED: { unsigned char *viff_colormap; /* Allocate VIFF colormap. */ switch ((int) viff_info.map_storage_type) { case VFF_MAPTYP_1_BYTE: bytes_per_pixel=1; break; case VFF_MAPTYP_2_BYTE: bytes_per_pixel=2; break; case VFF_MAPTYP_4_BYTE: bytes_per_pixel=4; break; case VFF_MAPTYP_FLOAT: bytes_per_pixel=4; break; case VFF_MAPTYP_DOUBLE: bytes_per_pixel=8; break; default: bytes_per_pixel=1; break; } image->colors=viff_info.map_columns; if (AcquireImageColormap(image,image->colors) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (viff_info.map_rows > (viff_info.map_rows*bytes_per_pixel*sizeof(*viff_colormap))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); viff_colormap=(unsigned char *) AcquireQuantumMemory(image->colors, viff_info.map_rows*bytes_per_pixel*sizeof(*viff_colormap)); if (viff_colormap == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); /* Read VIFF raster colormap. */ (void) ReadBlob(image,bytes_per_pixel*image->colors*viff_info.map_rows, viff_colormap); lsb_first=1; if (*(char *) &lsb_first && ((viff_info.machine_dependency != VFF_DEP_DECORDER) && (viff_info.machine_dependency != VFF_DEP_NSORDER))) switch ((int) viff_info.map_storage_type) { case VFF_MAPTYP_2_BYTE: { MSBOrderShort(viff_colormap,(bytes_per_pixel*image->colors* viff_info.map_rows)); break; } case VFF_MAPTYP_4_BYTE: case VFF_MAPTYP_FLOAT: { MSBOrderLong(viff_colormap,(bytes_per_pixel*image->colors* viff_info.map_rows)); break; } default: break; } for (i=0; i < (ssize_t) (viff_info.map_rows*image->colors); i++) { switch ((int) viff_info.map_storage_type) { case VFF_MAPTYP_2_BYTE: value=1.0*((short *) viff_colormap)[i]; break; case VFF_MAPTYP_4_BYTE: value=1.0*((int *) viff_colormap)[i]; break; case VFF_MAPTYP_FLOAT: value=((float *) viff_colormap)[i]; break; case VFF_MAPTYP_DOUBLE: value=((double *) viff_colormap)[i]; break; default: value=1.0*viff_colormap[i]; break; } if (i < (ssize_t) image->colors) { image->colormap[i].red=ScaleCharToQuantum((unsigned char) value); image->colormap[i].green=ScaleCharToQuantum((unsigned char) value); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) value); } else if (i < (ssize_t) (2*image->colors)) image->colormap[i % image->colors].green=ScaleCharToQuantum( (unsigned char) value); else if (i < (ssize_t) (3*image->colors)) image->colormap[i % image->colors].blue=ScaleCharToQuantum( (unsigned char) value); } viff_colormap=(unsigned char *) RelinquishMagickMemory(viff_colormap); break; } default: ThrowReaderException(CoderError,"ColormapTypeNotSupported"); } /* Initialize image structure. */ image->matte=viff_info.number_data_bands == 4 ? MagickTrue : MagickFalse; image->storage_class= (viff_info.number_data_bands < 3 ? PseudoClass : DirectClass); image->columns=viff_info.rows; image->rows=viff_info.columns; if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } /* Allocate VIFF pixels. */ switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: bytes_per_pixel=2; break; case VFF_TYP_4_BYTE: bytes_per_pixel=4; break; case VFF_TYP_FLOAT: bytes_per_pixel=4; break; case VFF_TYP_DOUBLE: bytes_per_pixel=8; break; default: bytes_per_pixel=1; break; } if (viff_info.data_storage_type == VFF_TYP_BIT) { if (HeapOverflowSanityCheck((image->columns+7UL) >> 3UL,image->rows) != MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); max_packets=((image->columns+7UL) >> 3UL)*image->rows; } else { if (HeapOverflowSanityCheck(number_pixels,viff_info.number_data_bands) != MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); max_packets=(size_t) (number_pixels*viff_info.number_data_bands); } pixels=(unsigned char *) AcquireQuantumMemory(MagickMax(number_pixels, max_packets),bytes_per_pixel*sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); (void) ReadBlob(image,bytes_per_pixel*max_packets,pixels); lsb_first=1; if (*(char *) &lsb_first && ((viff_info.machine_dependency != VFF_DEP_DECORDER) && (viff_info.machine_dependency != VFF_DEP_NSORDER))) switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: { MSBOrderShort(pixels,bytes_per_pixel*max_packets); break; } case VFF_TYP_4_BYTE: case VFF_TYP_FLOAT: { MSBOrderLong(pixels,bytes_per_pixel*max_packets); break; } default: break; } min_value=0.0; scale_factor=1.0; if ((viff_info.data_storage_type != VFF_TYP_1_BYTE) && (viff_info.map_scheme == VFF_MS_NONE)) { double max_value; /* Determine scale factor. */ switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: value=1.0*((short *) pixels)[0]; break; case VFF_TYP_4_BYTE: value=1.0*((int *) pixels)[0]; break; case VFF_TYP_FLOAT: value=((float *) pixels)[0]; break; case VFF_TYP_DOUBLE: value=((double *) pixels)[0]; break; default: value=1.0*pixels[0]; break; } max_value=value; min_value=value; for (i=0; i < (ssize_t) max_packets; i++) { switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: value=1.0*((short *) pixels)[i]; break; case VFF_TYP_4_BYTE: value=1.0*((int *) pixels)[i]; break; case VFF_TYP_FLOAT: value=((float *) pixels)[i]; break; case VFF_TYP_DOUBLE: value=((double *) pixels)[i]; break; default: value=1.0*pixels[i]; break; } if (value > max_value) max_value=value; else if (value < min_value) min_value=value; } if ((min_value == 0) && (max_value == 0)) scale_factor=0; else if (min_value == max_value) { scale_factor=(MagickRealType) QuantumRange/min_value; min_value=0; } else scale_factor=(MagickRealType) QuantumRange/(max_value-min_value); } /* Convert pixels to Quantum size. */ p=(unsigned char *) pixels; for (i=0; i < (ssize_t) max_packets; i++) { switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: value=1.0*((short *) pixels)[i]; break; case VFF_TYP_4_BYTE: value=1.0*((int *) pixels)[i]; break; case VFF_TYP_FLOAT: value=((float *) pixels)[i]; break; case VFF_TYP_DOUBLE: value=((double *) pixels)[i]; break; default: value=1.0*pixels[i]; break; } if (viff_info.map_scheme == VFF_MS_NONE) { value=(value-min_value)*scale_factor; if (value > QuantumRange) value=QuantumRange; else if (value < 0) value=0; } *p=(unsigned char) ((Quantum) value); p++; } /* Convert VIFF raster image to pixel packets. */ p=(unsigned char *) pixels; if (viff_info.data_storage_type == VFF_TYP_BIT) { /* Convert bitmap scanline. */ if (image->storage_class != PseudoClass) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < (ssize_t) (image->columns-7); x+=8) { for (bit=0; bit < 8; bit++) { quantum=(size_t) ((*p) & (0x01 << bit) ? 0 : 1); SetPixelRed(q,quantum == 0 ? 0 : QuantumRange); SetPixelGreen(q,quantum == 0 ? 0 : QuantumRange); SetPixelBlue(q,quantum == 0 ? 0 : QuantumRange); if (image->storage_class == PseudoClass) SetPixelIndex(indexes+x+bit,quantum); } p++; } if ((image->columns % 8) != 0) { for (bit=0; bit < (int) (image->columns % 8); bit++) { quantum=(size_t) ((*p) & (0x01 << bit) ? 0 : 1); SetPixelRed(q,quantum == 0 ? 0 : QuantumRange); SetPixelGreen(q,quantum == 0 ? 0 : QuantumRange); SetPixelBlue(q,quantum == 0 ? 0 : QuantumRange); if (image->storage_class == PseudoClass) SetPixelIndex(indexes+x+bit,quantum); } p++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } else if (image->storage_class == PseudoClass) for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,*p++); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } else { /* Convert DirectColor scanline. */ number_pixels=(MagickSizeType) image->columns*image->rows; for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleCharToQuantum(*p)); SetPixelGreen(q,ScaleCharToQuantum(*(p+number_pixels))); SetPixelBlue(q,ScaleCharToQuantum(*(p+2*number_pixels))); if (image->colors != 0) { ssize_t index; index=(ssize_t) GetPixelRed(q); SetPixelRed(q,image->colormap[(ssize_t) ConstrainColormapIndex(image,index)].red); index=(ssize_t) GetPixelGreen(q); SetPixelGreen(q,image->colormap[(ssize_t) ConstrainColormapIndex(image,index)].green); index=(ssize_t) GetPixelRed(q); SetPixelBlue(q,image->colormap[(ssize_t) ConstrainColormapIndex(image,index)].blue); } SetPixelOpacity(q,image->matte != MagickFalse ? QuantumRange- ScaleCharToQuantum(*(p+number_pixels*3)) : OpaqueOpacity); p++; q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } pixels=(unsigned char *) RelinquishMagickMemory(pixels); if (image->storage_class == PseudoClass) (void) SyncImage(image); if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; count=ReadBlob(image,1,&viff_info.identifier); if ((count != 0) && (viff_info.identifier == 0xab)) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } } while ((count != 0) && (viff_info.identifier == 0xab)); (void) CloseBlob(image); return(GetFirstImageInList(image)); }
168,540
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static PixelChannels **AcquirePixelThreadSet(const Image *image) { PixelChannels **pixels; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(PixelChannels **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (PixelChannels **) NULL) return((PixelChannels **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { register ssize_t j; pixels[i]=(PixelChannels *) AcquireQuantumMemory(image->columns, sizeof(**pixels)); if (pixels[i] == (PixelChannels *) NULL) return(DestroyPixelThreadSet(pixels)); for (j=0; j < (ssize_t) image->columns; j++) { register ssize_t k; for (k=0; k < MaxPixelChannels; k++) pixels[i][j].channel[k]=0.0; } } return(pixels); } Commit Message: https://github.com/ImageMagick/ImageMagick/issues/1586 CWE ID: CWE-119
static PixelChannels **AcquirePixelThreadSet(const Image *image) static PixelChannels **AcquirePixelThreadSet(const Image *images) { const Image *next; PixelChannels **pixels; register ssize_t i; size_t columns, number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(PixelChannels **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (PixelChannels **) NULL) return((PixelChannels **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); columns=images->columns; for (next=images; next != (Image *) NULL; next=next->next) columns=MagickMax(next->columns,columns); for (i=0; i < (ssize_t) number_threads; i++) { register ssize_t j; pixels[i]=(PixelChannels *) AcquireQuantumMemory(columns,sizeof(**pixels)); if (pixels[i] == (PixelChannels *) NULL) return(DestroyPixelThreadSet(pixels)); for (j=0; j < (ssize_t) columns; j++) { register ssize_t k; for (k=0; k < MaxPixelChannels; k++) pixels[i][j].channel[k]=0.0; } } return(pixels); }
170,205
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: cff_decoder_parse_charstrings( CFF_Decoder* decoder, FT_Byte* charstring_base, FT_ULong charstring_len ) { FT_Error error; CFF_Decoder_Zone* zone; FT_Byte* ip; FT_Byte* limit; CFF_Builder* builder = &decoder->builder; FT_Pos x, y; FT_Fixed seed; FT_Fixed* stack; FT_Int charstring_type = decoder->cff->top_font.font_dict.charstring_type; T2_Hints_Funcs hinter; /* set default width */ decoder->num_hints = 0; decoder->read_width = 1; /* compute random seed from stack address of parameter */ seed = (FT_Fixed)( ( (FT_PtrDist)(char*)&seed ^ (FT_PtrDist)(char*)&decoder ^ (FT_PtrDist)(char*)&charstring_base ) & FT_ULONG_MAX ) ; seed = ( seed ^ ( seed >> 10 ) ^ ( seed >> 20 ) ) & 0xFFFFL; if ( seed == 0 ) seed = 0x7384; /* initialize the decoder */ decoder->top = decoder->stack; decoder->zone = decoder->zones; zone = decoder->zones; stack = decoder->top; hinter = (T2_Hints_Funcs)builder->hints_funcs; builder->path_begun = 0; zone->base = charstring_base; limit = zone->limit = charstring_base + charstring_len; ip = zone->cursor = zone->base; error = CFF_Err_Ok; x = builder->pos_x; y = builder->pos_y; /* begin hints recording session, if any */ if ( hinter ) hinter->open( hinter->hints ); /* now execute loop */ while ( ip < limit ) { CFF_Operator op; FT_Byte v; /********************************************************************/ /* */ /* Decode operator or operand */ /* */ v = *ip++; if ( v >= 32 || v == 28 ) { FT_Int shift = 16; FT_Int32 val; /* this is an operand, push it on the stack */ if ( v == 28 ) { if ( ip + 1 >= limit ) goto Syntax_Error; val = (FT_Short)( ( (FT_Short)ip[0] << 8 ) | ip[1] ); ip += 2; } else if ( v < 247 ) val = (FT_Int32)v - 139; else if ( v < 251 ) { if ( ip >= limit ) goto Syntax_Error; val = ( (FT_Int32)v - 247 ) * 256 + *ip++ + 108; } else if ( v < 255 ) { if ( ip >= limit ) goto Syntax_Error; val = -( (FT_Int32)v - 251 ) * 256 - *ip++ - 108; } else { if ( ip + 3 >= limit ) goto Syntax_Error; val = ( (FT_Int32)ip[0] << 24 ) | ( (FT_Int32)ip[1] << 16 ) | ( (FT_Int32)ip[2] << 8 ) | ip[3]; ip += 4; if ( charstring_type == 2 ) shift = 0; } if ( decoder->top - stack >= CFF_MAX_OPERANDS ) goto Stack_Overflow; val <<= shift; *decoder->top++ = val; #ifdef FT_DEBUG_LEVEL_TRACE if ( !( val & 0xFFFFL ) ) FT_TRACE4(( " %ld", (FT_Int32)( val >> 16 ) )); else FT_TRACE4(( " %.2f", val / 65536.0 )); #endif } else { /* The specification says that normally arguments are to be taken */ /* from the bottom of the stack. However, this seems not to be */ /* correct, at least for Acroread 7.0.8 on GNU/Linux: It pops the */ /* arguments similar to a PS interpreter. */ FT_Fixed* args = decoder->top; FT_Int num_args = (FT_Int)( args - decoder->stack ); FT_Int req_args; /* find operator */ op = cff_op_unknown; switch ( v ) { case 1: op = cff_op_hstem; break; case 3: op = cff_op_vstem; break; case 4: op = cff_op_vmoveto; break; case 5: op = cff_op_rlineto; break; case 6: op = cff_op_hlineto; break; case 7: op = cff_op_vlineto; break; case 8: op = cff_op_rrcurveto; break; case 9: op = cff_op_closepath; break; case 10: op = cff_op_callsubr; break; case 11: op = cff_op_return; break; case 12: { if ( ip >= limit ) goto Syntax_Error; v = *ip++; switch ( v ) { case 0: op = cff_op_dotsection; break; case 1: /* this is actually the Type1 vstem3 operator */ op = cff_op_vstem; break; case 2: /* this is actually the Type1 hstem3 operator */ op = cff_op_hstem; break; case 3: op = cff_op_and; break; case 4: op = cff_op_or; break; case 5: op = cff_op_not; break; case 6: op = cff_op_seac; break; case 7: op = cff_op_sbw; break; case 8: op = cff_op_store; break; case 9: op = cff_op_abs; break; case 10: op = cff_op_add; break; case 11: op = cff_op_sub; break; case 12: op = cff_op_div; break; case 13: op = cff_op_load; break; case 14: op = cff_op_neg; break; case 15: op = cff_op_eq; break; case 16: op = cff_op_callothersubr; break; case 17: op = cff_op_pop; break; case 18: op = cff_op_drop; break; case 20: op = cff_op_put; break; case 21: op = cff_op_get; break; case 22: op = cff_op_ifelse; break; case 23: op = cff_op_random; break; case 24: op = cff_op_mul; break; case 26: op = cff_op_sqrt; break; case 27: op = cff_op_dup; break; case 28: op = cff_op_exch; break; case 29: op = cff_op_index; break; case 30: op = cff_op_roll; break; case 33: op = cff_op_setcurrentpoint; break; case 34: op = cff_op_hflex; break; case 35: op = cff_op_flex; break; case 36: op = cff_op_hflex1; break; case 37: op = cff_op_flex1; break; default: /* decrement ip for syntax error message */ ip--; } } break; case 13: op = cff_op_hsbw; break; case 14: op = cff_op_endchar; break; case 16: op = cff_op_blend; break; case 18: op = cff_op_hstemhm; break; case 19: op = cff_op_hintmask; break; case 20: op = cff_op_cntrmask; break; case 21: op = cff_op_rmoveto; break; case 22: op = cff_op_hmoveto; break; case 23: op = cff_op_vstemhm; break; case 24: op = cff_op_rcurveline; break; case 25: op = cff_op_rlinecurve; break; case 26: op = cff_op_vvcurveto; break; case 27: op = cff_op_hhcurveto; break; case 29: op = cff_op_callgsubr; break; case 30: op = cff_op_vhcurveto; break; case 31: op = cff_op_hvcurveto; break; default: break; } if ( op == cff_op_unknown ) goto Syntax_Error; /* check arguments */ req_args = cff_argument_counts[op]; if ( req_args & CFF_COUNT_CHECK_WIDTH ) { if ( num_args > 0 && decoder->read_width ) { /* If `nominal_width' is non-zero, the number is really a */ /* difference against `nominal_width'. Else, the number here */ /* is truly a width, not a difference against `nominal_width'. */ /* If the font does not set `nominal_width', then */ /* `nominal_width' defaults to zero, and so we can set */ /* `glyph_width' to `nominal_width' plus number on the stack */ /* -- for either case. */ FT_Int set_width_ok; switch ( op ) { case cff_op_hmoveto: case cff_op_vmoveto: set_width_ok = num_args & 2; break; case cff_op_hstem: case cff_op_vstem: case cff_op_hstemhm: case cff_op_vstemhm: case cff_op_rmoveto: case cff_op_hintmask: case cff_op_cntrmask: set_width_ok = num_args & 1; break; case cff_op_endchar: /* If there is a width specified for endchar, we either have */ /* 1 argument or 5 arguments. We like to argue. */ set_width_ok = ( num_args == 5 ) || ( num_args == 1 ); break; default: set_width_ok = 0; break; } if ( set_width_ok ) { decoder->glyph_width = decoder->nominal_width + ( stack[0] >> 16 ); if ( decoder->width_only ) { /* we only want the advance width; stop here */ break; } /* Consumed an argument. */ num_args--; } } decoder->read_width = 0; req_args = 0; } req_args &= 0x000F; if ( num_args < req_args ) goto Stack_Underflow; args -= req_args; num_args -= req_args; /* At this point, `args' points to the first argument of the */ /* operand in case `req_args' isn't zero. Otherwise, we have */ /* to adjust `args' manually. */ /* Note that we only pop arguments from the stack which we */ /* really need and can digest so that we can continue in case */ /* of superfluous stack elements. */ switch ( op ) { case cff_op_hstem: case cff_op_vstem: case cff_op_hstemhm: case cff_op_vstemhm: /* the number of arguments is always even here */ FT_TRACE4(( op == cff_op_hstem ? " hstem\n" : ( op == cff_op_vstem ? " vstem\n" : ( op == cff_op_hstemhm ? " hstemhm\n" : " vstemhm\n" ) ) )); if ( hinter ) hinter->stems( hinter->hints, ( op == cff_op_hstem || op == cff_op_hstemhm ), num_args / 2, args - ( num_args & ~1 ) ); decoder->num_hints += num_args / 2; args = stack; break; case cff_op_hintmask: case cff_op_cntrmask: FT_TRACE4(( op == cff_op_hintmask ? " hintmask" : " cntrmask" )); /* implement vstem when needed -- */ /* the specification doesn't say it, but this also works */ /* with the 'cntrmask' operator */ /* */ if ( num_args > 0 ) { if ( hinter ) hinter->stems( hinter->hints, 0, num_args / 2, args - ( num_args & ~1 ) ); decoder->num_hints += num_args / 2; } if ( hinter ) { if ( op == cff_op_hintmask ) hinter->hintmask( hinter->hints, builder->current->n_points, decoder->num_hints, ip ); else hinter->counter( hinter->hints, decoder->num_hints, ip ); } #ifdef FT_DEBUG_LEVEL_TRACE { FT_UInt maskbyte; FT_TRACE4(( " (maskbytes: " )); for ( maskbyte = 0; maskbyte < (FT_UInt)(( decoder->num_hints + 7 ) >> 3); maskbyte++, ip++ ) FT_TRACE4(( "0x%02X", *ip )); FT_TRACE4(( ")\n" )); } #else ip += ( decoder->num_hints + 7 ) >> 3; #endif if ( ip >= limit ) goto Syntax_Error; args = stack; break; case cff_op_rmoveto: FT_TRACE4(( " rmoveto\n" )); cff_builder_close_contour( builder ); builder->path_begun = 0; x += args[-2]; y += args[-1]; args = stack; break; case cff_op_vmoveto: FT_TRACE4(( " vmoveto\n" )); cff_builder_close_contour( builder ); builder->path_begun = 0; y += args[-1]; args = stack; break; case cff_op_hmoveto: FT_TRACE4(( " hmoveto\n" )); cff_builder_close_contour( builder ); builder->path_begun = 0; x += args[-1]; args = stack; break; case cff_op_rlineto: FT_TRACE4(( " rlineto\n" )); if ( cff_builder_start_point ( builder, x, y ) || check_points( builder, num_args / 2 ) ) goto Fail; if ( num_args < 2 ) goto Stack_Underflow; args -= num_args & ~1; while ( args < decoder->top ) { x += args[0]; y += args[1]; cff_builder_add_point( builder, x, y, 1 ); args += 2; } args = stack; break; case cff_op_hlineto: case cff_op_vlineto: { FT_Int phase = ( op == cff_op_hlineto ); FT_TRACE4(( op == cff_op_hlineto ? " hlineto\n" : " vlineto\n" )); if ( num_args < 1 ) goto Stack_Underflow; if ( cff_builder_start_point ( builder, x, y ) || check_points( builder, num_args ) ) goto Fail; args = stack; while ( args < decoder->top ) { if ( phase ) x += args[0]; else y += args[0]; if ( cff_builder_add_point1( builder, x, y ) ) goto Fail; args++; phase ^= 1; } args = stack; } break; case cff_op_rrcurveto: { FT_Int nargs; FT_TRACE4(( " rrcurveto\n" )); if ( num_args < 6 ) goto Stack_Underflow; nargs = num_args - num_args % 6; if ( cff_builder_start_point ( builder, x, y ) || check_points( builder, nargs / 2 ) ) goto Fail; args -= nargs; while ( args < decoder->top ) { x += args[0]; y += args[1]; cff_builder_add_point( builder, x, y, 0 ); x += args[2]; y += args[3]; cff_builder_add_point( builder, x, y, 0 ); x += args[4]; y += args[5]; cff_builder_add_point( builder, x, y, 1 ); args += 6; } args = stack; } break; case cff_op_vvcurveto: { FT_Int nargs; FT_TRACE4(( " vvcurveto\n" )); if ( num_args < 4 ) goto Stack_Underflow; /* if num_args isn't of the form 4n or 4n+1, */ /* we reduce it to 4n+1 */ nargs = num_args - num_args % 4; if ( num_args - nargs > 0 ) nargs += 1; if ( cff_builder_start_point( builder, x, y ) ) goto Fail; args -= nargs; if ( nargs & 1 ) { x += args[0]; args++; nargs--; } if ( check_points( builder, 3 * ( nargs / 4 ) ) ) goto Fail; while ( args < decoder->top ) { y += args[0]; cff_builder_add_point( builder, x, y, 0 ); x += args[1]; y += args[2]; cff_builder_add_point( builder, x, y, 0 ); y += args[3]; cff_builder_add_point( builder, x, y, 1 ); args += 4; } args = stack; } break; case cff_op_hhcurveto: { FT_Int nargs; FT_TRACE4(( " hhcurveto\n" )); if ( num_args < 4 ) goto Stack_Underflow; /* if num_args isn't of the form 4n or 4n+1, */ /* we reduce it to 4n+1 */ nargs = num_args - num_args % 4; if ( num_args - nargs > 0 ) nargs += 1; if ( cff_builder_start_point( builder, x, y ) ) goto Fail; args -= nargs; if ( nargs & 1 ) { y += args[0]; args++; nargs--; } if ( check_points( builder, 3 * ( nargs / 4 ) ) ) goto Fail; while ( args < decoder->top ) { x += args[0]; cff_builder_add_point( builder, x, y, 0 ); x += args[1]; y += args[2]; cff_builder_add_point( builder, x, y, 0 ); x += args[3]; cff_builder_add_point( builder, x, y, 1 ); args += 4; } args = stack; } break; case cff_op_vhcurveto: case cff_op_hvcurveto: { FT_Int phase; FT_Int nargs; FT_TRACE4(( op == cff_op_vhcurveto ? " vhcurveto\n" : " hvcurveto\n" )); if ( cff_builder_start_point( builder, x, y ) ) goto Fail; if ( num_args < 4 ) goto Stack_Underflow; /* if num_args isn't of the form 8n, 8n+1, 8n+4, or 8n+5, */ /* we reduce it to the largest one which fits */ nargs = num_args - num_args % 4; if ( num_args - nargs > 0 ) nargs += 1; args -= nargs; if ( check_points( builder, ( nargs / 4 ) * 3 ) ) goto Stack_Underflow; phase = ( op == cff_op_hvcurveto ); while ( nargs >= 4 ) { nargs -= 4; if ( phase ) { x += args[0]; cff_builder_add_point( builder, x, y, 0 ); x += args[1]; y += args[2]; cff_builder_add_point( builder, x, y, 0 ); y += args[3]; if ( nargs == 1 ) x += args[4]; cff_builder_add_point( builder, x, y, 1 ); } else { y += args[0]; cff_builder_add_point( builder, x, y, 0 ); x += args[1]; y += args[2]; cff_builder_add_point( builder, x, y, 0 ); x += args[3]; if ( nargs == 1 ) y += args[4]; cff_builder_add_point( builder, x, y, 1 ); } args += 4; phase ^= 1; } args = stack; } break; case cff_op_rlinecurve: { FT_Int num_lines; FT_Int nargs; FT_TRACE4(( " rlinecurve\n" )); if ( num_args < 8 ) goto Stack_Underflow; nargs = num_args & ~1; num_lines = ( nargs - 6 ) / 2; if ( cff_builder_start_point( builder, x, y ) || check_points( builder, num_lines + 3 ) ) goto Fail; args -= nargs; /* first, add the line segments */ while ( num_lines > 0 ) { x += args[0]; y += args[1]; cff_builder_add_point( builder, x, y, 1 ); args += 2; num_lines--; } /* then the curve */ x += args[0]; y += args[1]; cff_builder_add_point( builder, x, y, 0 ); x += args[2]; y += args[3]; cff_builder_add_point( builder, x, y, 0 ); x += args[4]; y += args[5]; cff_builder_add_point( builder, x, y, 1 ); args = stack; } break; case cff_op_rcurveline: { FT_Int num_curves; FT_Int nargs; FT_TRACE4(( " rcurveline\n" )); if ( num_args < 8 ) goto Stack_Underflow; nargs = num_args - 2; nargs = nargs - nargs % 6 + 2; num_curves = ( nargs - 2 ) / 6; if ( cff_builder_start_point ( builder, x, y ) || check_points( builder, num_curves * 3 + 2 ) ) goto Fail; args -= nargs; /* first, add the curves */ while ( num_curves > 0 ) { x += args[0]; y += args[1]; cff_builder_add_point( builder, x, y, 0 ); x += args[2]; y += args[3]; cff_builder_add_point( builder, x, y, 0 ); x += args[4]; y += args[5]; cff_builder_add_point( builder, x, y, 1 ); args += 6; num_curves--; } /* then the final line */ x += args[0]; y += args[1]; cff_builder_add_point( builder, x, y, 1 ); args = stack; } break; case cff_op_hflex1: { FT_Pos start_y; FT_TRACE4(( " hflex1\n" )); /* adding five more points: 4 control points, 1 on-curve point */ /* -- make sure we have enough space for the start point if it */ /* needs to be added */ if ( cff_builder_start_point( builder, x, y ) || check_points( builder, 6 ) ) goto Fail; /* record the starting point's y position for later use */ start_y = y; /* first control point */ x += args[0]; y += args[1]; cff_builder_add_point( builder, x, y, 0 ); /* second control point */ x += args[2]; y += args[3]; cff_builder_add_point( builder, x, y, 0 ); /* join point; on curve, with y-value the same as the last */ /* control point's y-value */ x += args[4]; cff_builder_add_point( builder, x, y, 1 ); /* third control point, with y-value the same as the join */ /* point's y-value */ x += args[5]; cff_builder_add_point( builder, x, y, 0 ); /* fourth control point */ x += args[6]; y += args[7]; cff_builder_add_point( builder, x, y, 0 ); /* ending point, with y-value the same as the start */ x += args[8]; y = start_y; cff_builder_add_point( builder, x, y, 1 ); args = stack; break; } case cff_op_hflex: { FT_Pos start_y; FT_TRACE4(( " hflex\n" )); /* adding six more points; 4 control points, 2 on-curve points */ if ( cff_builder_start_point( builder, x, y ) || check_points( builder, 6 ) ) goto Fail; /* record the starting point's y-position for later use */ start_y = y; /* first control point */ x += args[0]; cff_builder_add_point( builder, x, y, 0 ); /* second control point */ x += args[1]; y += args[2]; cff_builder_add_point( builder, x, y, 0 ); /* join point; on curve, with y-value the same as the last */ /* control point's y-value */ x += args[3]; cff_builder_add_point( builder, x, y, 1 ); /* third control point, with y-value the same as the join */ /* point's y-value */ x += args[4]; cff_builder_add_point( builder, x, y, 0 ); /* fourth control point */ x += args[5]; y = start_y; cff_builder_add_point( builder, x, y, 0 ); /* ending point, with y-value the same as the start point's */ /* y-value -- we don't add this point, though */ x += args[6]; cff_builder_add_point( builder, x, y, 1 ); args = stack; break; } case cff_op_flex1: { FT_Pos start_x, start_y; /* record start x, y values for */ /* alter use */ FT_Fixed dx = 0, dy = 0; /* used in horizontal/vertical */ /* algorithm below */ FT_Int horizontal, count; FT_Fixed* temp; FT_TRACE4(( " flex1\n" )); /* adding six more points; 4 control points, 2 on-curve points */ if ( cff_builder_start_point( builder, x, y ) || check_points( builder, 6 ) ) goto Fail; /* record the starting point's x, y position for later use */ start_x = x; start_y = y; /* XXX: figure out whether this is supposed to be a horizontal */ /* or vertical flex; the Type 2 specification is vague... */ temp = args; /* grab up to the last argument */ for ( count = 5; count > 0; count-- ) { dx += temp[0]; dy += temp[1]; temp += 2; } if ( dx < 0 ) dx = -dx; if ( dy < 0 ) dy = -dy; /* strange test, but here it is... */ horizontal = ( dx > dy ); for ( count = 5; count > 0; count-- ) { x += args[0]; y += args[1]; cff_builder_add_point( builder, x, y, (FT_Bool)( count == 3 ) ); args += 2; } /* is last operand an x- or y-delta? */ if ( horizontal ) { x += args[0]; y = start_y; } else { x = start_x; y += args[0]; } cff_builder_add_point( builder, x, y, 1 ); args = stack; break; } case cff_op_flex: { FT_UInt count; FT_TRACE4(( " flex\n" )); if ( cff_builder_start_point( builder, x, y ) || check_points( builder, 6 ) ) goto Fail; for ( count = 6; count > 0; count-- ) { x += args[0]; y += args[1]; cff_builder_add_point( builder, x, y, (FT_Bool)( count == 4 || count == 1 ) ); args += 2; } args = stack; } break; case cff_op_seac: FT_TRACE4(( " seac\n" )); error = cff_operator_seac( decoder, args[0], args[1], args[2], (FT_Int)( args[3] >> 16 ), (FT_Int)( args[4] >> 16 ) ); /* add current outline to the glyph slot */ FT_GlyphLoader_Add( builder->loader ); /* return now! */ FT_TRACE4(( "\n" )); return error; case cff_op_endchar: FT_TRACE4(( " endchar\n" )); /* We are going to emulate the seac operator. */ if ( num_args >= 4 ) { /* Save glyph width so that the subglyphs don't overwrite it. */ FT_Pos glyph_width = decoder->glyph_width; error = cff_operator_seac( decoder, 0L, args[-4], args[-3], (FT_Int)( args[-2] >> 16 ), (FT_Int)( args[-1] >> 16 ) ); decoder->glyph_width = glyph_width; } else { if ( !error ) error = CFF_Err_Ok; cff_builder_close_contour( builder ); /* close hints recording session */ if ( hinter ) { if ( hinter->close( hinter->hints, builder->current->n_points ) ) goto Syntax_Error; /* apply hints to the loaded glyph outline now */ hinter->apply( hinter->hints, builder->current, (PSH_Globals)builder->hints_globals, decoder->hint_mode ); } /* add current outline to the glyph slot */ FT_GlyphLoader_Add( builder->loader ); } /* return now! */ FT_TRACE4(( "\n" )); return error; case cff_op_abs: FT_TRACE4(( " abs\n" )); if ( args[0] < 0 ) args[0] = -args[0]; args++; break; case cff_op_add: FT_TRACE4(( " add\n" )); args[0] += args[1]; args++; break; case cff_op_sub: FT_TRACE4(( " sub\n" )); args[0] -= args[1]; args++; break; case cff_op_div: FT_TRACE4(( " div\n" )); args[0] = FT_DivFix( args[0], args[1] ); args++; break; case cff_op_neg: FT_TRACE4(( " neg\n" )); args[0] = -args[0]; args++; break; case cff_op_random: { FT_Fixed Rand; FT_TRACE4(( " rand\n" )); Rand = seed; if ( Rand >= 0x8000L ) Rand++; args[0] = Rand; seed = FT_MulFix( seed, 0x10000L - seed ); if ( seed == 0 ) seed += 0x2873; args++; } break; case cff_op_mul: FT_TRACE4(( " mul\n" )); args[0] = FT_MulFix( args[0], args[1] ); args++; break; case cff_op_sqrt: FT_TRACE4(( " sqrt\n" )); if ( args[0] > 0 ) { FT_Int count = 9; FT_Fixed root = args[0]; FT_Fixed new_root; for (;;) { new_root = ( root + FT_DivFix( args[0], root ) + 1 ) >> 1; if ( new_root == root || count <= 0 ) break; root = new_root; } args[0] = new_root; } else args[0] = 0; args++; break; case cff_op_drop: /* nothing */ FT_TRACE4(( " drop\n" )); break; case cff_op_exch: { FT_Fixed tmp; FT_TRACE4(( " exch\n" )); tmp = args[0]; args[0] = args[1]; args[1] = tmp; args += 2; } break; case cff_op_index: { FT_Int idx = (FT_Int)( args[0] >> 16 ); FT_TRACE4(( " index\n" )); if ( idx < 0 ) idx = 0; else if ( idx > num_args - 2 ) idx = num_args - 2; args[0] = args[-( idx + 1 )]; args++; } break; case cff_op_roll: { FT_Int count = (FT_Int)( args[0] >> 16 ); FT_Int idx = (FT_Int)( args[1] >> 16 ); FT_TRACE4(( " roll\n" )); if ( count <= 0 ) count = 1; args -= count; if ( args < stack ) goto Stack_Underflow; if ( idx >= 0 ) { while ( idx > 0 ) { FT_Fixed tmp = args[count - 1]; FT_Int i; for ( i = count - 2; i >= 0; i-- ) args[i + 1] = args[i]; args[0] = tmp; idx--; } } else { while ( idx < 0 ) { FT_Fixed tmp = args[0]; FT_Int i; for ( i = 0; i < count - 1; i++ ) args[i] = args[i + 1]; args[count - 1] = tmp; idx++; } } args += count; } break; case cff_op_dup: FT_TRACE4(( " dup\n" )); args[1] = args[0]; args += 2; break; case cff_op_put: { FT_Fixed val = args[0]; FT_Int idx = (FT_Int)( args[1] >> 16 ); FT_TRACE4(( " put\n" )); if ( idx >= 0 && idx < CFF_MAX_TRANS_ELEMENTS ) decoder->buildchar[idx] = val; } break; case cff_op_get: { FT_Int idx = (FT_Int)( args[0] >> 16 ); FT_Fixed val = 0; FT_TRACE4(( " get\n" )); if ( idx >= 0 && idx < CFF_MAX_TRANS_ELEMENTS ) val = decoder->buildchar[idx]; args[0] = val; args++; } break; case cff_op_store: FT_TRACE4(( " store\n")); goto Unimplemented; case cff_op_load: FT_TRACE4(( " load\n" )); goto Unimplemented; case cff_op_dotsection: /* this operator is deprecated and ignored by the parser */ FT_TRACE4(( " dotsection\n" )); break; case cff_op_closepath: /* this is an invalid Type 2 operator; however, there */ /* exist fonts which are incorrectly converted from probably */ /* Type 1 to CFF, and some parsers seem to accept it */ FT_TRACE4(( " closepath (invalid op)\n" )); args = stack; break; case cff_op_hsbw: /* this is an invalid Type 2 operator; however, there */ /* exist fonts which are incorrectly converted from probably */ /* Type 1 to CFF, and some parsers seem to accept it */ FT_TRACE4(( " hsbw (invalid op)\n" )); decoder->glyph_width = decoder->nominal_width + ( args[1] >> 16 ); decoder->builder.left_bearing.x = args[0]; decoder->builder.left_bearing.y = 0; x = decoder->builder.pos_x + args[0]; y = decoder->builder.pos_y; args = stack; break; case cff_op_sbw: /* this is an invalid Type 2 operator; however, there */ /* exist fonts which are incorrectly converted from probably */ /* Type 1 to CFF, and some parsers seem to accept it */ FT_TRACE4(( " sbw (invalid op)\n" )); decoder->glyph_width = decoder->nominal_width + ( args[2] >> 16 ); decoder->builder.left_bearing.x = args[0]; decoder->builder.left_bearing.y = args[1]; x = decoder->builder.pos_x + args[0]; y = decoder->builder.pos_y + args[1]; args = stack; break; case cff_op_setcurrentpoint: /* this is an invalid Type 2 operator; however, there */ /* exist fonts which are incorrectly converted from probably */ /* Type 1 to CFF, and some parsers seem to accept it */ FT_TRACE4(( " setcurrentpoint (invalid op)\n" )); x = decoder->builder.pos_x + args[0]; y = decoder->builder.pos_y + args[1]; args = stack; break; case cff_op_callothersubr: /* this is an invalid Type 2 operator; however, there */ /* exist fonts which are incorrectly converted from probably */ /* Type 1 to CFF, and some parsers seem to accept it */ FT_TRACE4(( " callothersubr (invalid op)\n" )); /* subsequent `pop' operands should add the arguments, */ /* this is the implementation described for `unknown' other */ /* subroutines in the Type1 spec. */ args -= 2 + ( args[-2] >> 16 ); break; case cff_op_pop: /* Type 1 to CFF, and some parsers seem to accept it */ FT_TRACE4(( " pop (invalid op)\n" )); args++; break; case cff_op_and: { FT_Fixed cond = args[0] && args[1]; FT_TRACE4(( " and\n" )); args[0] = cond ? 0x10000L : 0; args++; } break; case cff_op_or: { FT_Fixed cond = args[0] || args[1]; FT_TRACE4(( " or\n" )); args[0] = cond ? 0x10000L : 0; args++; } break; case cff_op_eq: { FT_Fixed cond = !args[0]; FT_TRACE4(( " eq\n" )); args[0] = cond ? 0x10000L : 0; args++; } break; case cff_op_ifelse: { FT_Fixed cond = ( args[2] <= args[3] ); FT_TRACE4(( " ifelse\n" )); if ( !cond ) args[0] = args[1]; args++; } break; case cff_op_callsubr: { FT_UInt idx = (FT_UInt)( ( args[0] >> 16 ) + decoder->locals_bias ); FT_TRACE4(( " callsubr(%d)\n", idx )); if ( idx >= decoder->num_locals ) { FT_ERROR(( "cff_decoder_parse_charstrings:" " invalid local subr index\n" )); goto Syntax_Error; } if ( zone - decoder->zones >= CFF_MAX_SUBRS_CALLS ) { FT_ERROR(( "cff_decoder_parse_charstrings:" " too many nested subrs\n" )); goto Syntax_Error; } zone->cursor = ip; /* save current instruction pointer */ zone++; zone->base = decoder->locals[idx]; zone->limit = decoder->locals[idx + 1]; zone->cursor = zone->base; if ( !zone->base || zone->limit == zone->base ) { FT_ERROR(( "cff_decoder_parse_charstrings:" " invoking empty subrs\n" )); goto Syntax_Error; } decoder->zone = zone; ip = zone->base; limit = zone->limit; } break; case cff_op_callgsubr: { FT_UInt idx = (FT_UInt)( ( args[0] >> 16 ) + decoder->globals_bias ); FT_TRACE4(( " callgsubr(%d)\n", idx )); if ( idx >= decoder->num_globals ) { FT_ERROR(( "cff_decoder_parse_charstrings:" " invalid global subr index\n" )); goto Syntax_Error; } if ( zone - decoder->zones >= CFF_MAX_SUBRS_CALLS ) { FT_ERROR(( "cff_decoder_parse_charstrings:" " too many nested subrs\n" )); goto Syntax_Error; } zone->cursor = ip; /* save current instruction pointer */ zone++; zone->base = decoder->globals[idx]; zone->limit = decoder->globals[idx + 1]; zone->cursor = zone->base; if ( !zone->base || zone->limit == zone->base ) { FT_ERROR(( "cff_decoder_parse_charstrings:" " invoking empty subrs\n" )); goto Syntax_Error; } decoder->zone = zone; ip = zone->base; limit = zone->limit; } break; case cff_op_return: FT_TRACE4(( " return\n" )); if ( decoder->zone <= decoder->zones ) { FT_ERROR(( "cff_decoder_parse_charstrings:" " unexpected return\n" )); goto Syntax_Error; } decoder->zone--; zone = decoder->zone; ip = zone->cursor; limit = zone->limit; break; default: Unimplemented: FT_ERROR(( "Unimplemented opcode: %d", ip[-1] )); if ( ip[-1] == 12 ) FT_ERROR(( " %d", ip[0] )); FT_ERROR(( "\n" )); return CFF_Err_Unimplemented_Feature; } decoder->top = args; } /* general operator processing */ } /* while ip < limit */ FT_TRACE4(( "..end..\n\n" )); Fail: return error; Syntax_Error: FT_TRACE4(( "cff_decoder_parse_charstrings: syntax error\n" )); return CFF_Err_Invalid_File_Format; Stack_Underflow: FT_TRACE4(( "cff_decoder_parse_charstrings: stack underflow\n" )); return CFF_Err_Too_Few_Arguments; Stack_Overflow: FT_TRACE4(( "cff_decoder_parse_charstrings: stack overflow\n" )); return CFF_Err_Stack_Overflow; } Commit Message: CWE ID: CWE-189
cff_decoder_parse_charstrings( CFF_Decoder* decoder, FT_Byte* charstring_base, FT_ULong charstring_len ) { FT_Error error; CFF_Decoder_Zone* zone; FT_Byte* ip; FT_Byte* limit; CFF_Builder* builder = &decoder->builder; FT_Pos x, y; FT_Fixed seed; FT_Fixed* stack; FT_Int charstring_type = decoder->cff->top_font.font_dict.charstring_type; T2_Hints_Funcs hinter; /* set default width */ decoder->num_hints = 0; decoder->read_width = 1; /* compute random seed from stack address of parameter */ seed = (FT_Fixed)( ( (FT_PtrDist)(char*)&seed ^ (FT_PtrDist)(char*)&decoder ^ (FT_PtrDist)(char*)&charstring_base ) & FT_ULONG_MAX ) ; seed = ( seed ^ ( seed >> 10 ) ^ ( seed >> 20 ) ) & 0xFFFFL; if ( seed == 0 ) seed = 0x7384; /* initialize the decoder */ decoder->top = decoder->stack; decoder->zone = decoder->zones; zone = decoder->zones; stack = decoder->top; hinter = (T2_Hints_Funcs)builder->hints_funcs; builder->path_begun = 0; zone->base = charstring_base; limit = zone->limit = charstring_base + charstring_len; ip = zone->cursor = zone->base; error = CFF_Err_Ok; x = builder->pos_x; y = builder->pos_y; /* begin hints recording session, if any */ if ( hinter ) hinter->open( hinter->hints ); /* now execute loop */ while ( ip < limit ) { CFF_Operator op; FT_Byte v; /********************************************************************/ /* */ /* Decode operator or operand */ /* */ v = *ip++; if ( v >= 32 || v == 28 ) { FT_Int shift = 16; FT_Int32 val; /* this is an operand, push it on the stack */ if ( v == 28 ) { if ( ip + 1 >= limit ) goto Syntax_Error; val = (FT_Short)( ( (FT_Short)ip[0] << 8 ) | ip[1] ); ip += 2; } else if ( v < 247 ) val = (FT_Int32)v - 139; else if ( v < 251 ) { if ( ip >= limit ) goto Syntax_Error; val = ( (FT_Int32)v - 247 ) * 256 + *ip++ + 108; } else if ( v < 255 ) { if ( ip >= limit ) goto Syntax_Error; val = -( (FT_Int32)v - 251 ) * 256 - *ip++ - 108; } else { if ( ip + 3 >= limit ) goto Syntax_Error; val = ( (FT_Int32)ip[0] << 24 ) | ( (FT_Int32)ip[1] << 16 ) | ( (FT_Int32)ip[2] << 8 ) | ip[3]; ip += 4; if ( charstring_type == 2 ) shift = 0; } if ( decoder->top - stack >= CFF_MAX_OPERANDS ) goto Stack_Overflow; val <<= shift; *decoder->top++ = val; #ifdef FT_DEBUG_LEVEL_TRACE if ( !( val & 0xFFFFL ) ) FT_TRACE4(( " %ld", (FT_Int32)( val >> 16 ) )); else FT_TRACE4(( " %.2f", val / 65536.0 )); #endif } else { /* The specification says that normally arguments are to be taken */ /* from the bottom of the stack. However, this seems not to be */ /* correct, at least for Acroread 7.0.8 on GNU/Linux: It pops the */ /* arguments similar to a PS interpreter. */ FT_Fixed* args = decoder->top; FT_Int num_args = (FT_Int)( args - decoder->stack ); FT_Int req_args; /* find operator */ op = cff_op_unknown; switch ( v ) { case 1: op = cff_op_hstem; break; case 3: op = cff_op_vstem; break; case 4: op = cff_op_vmoveto; break; case 5: op = cff_op_rlineto; break; case 6: op = cff_op_hlineto; break; case 7: op = cff_op_vlineto; break; case 8: op = cff_op_rrcurveto; break; case 9: op = cff_op_closepath; break; case 10: op = cff_op_callsubr; break; case 11: op = cff_op_return; break; case 12: { if ( ip >= limit ) goto Syntax_Error; v = *ip++; switch ( v ) { case 0: op = cff_op_dotsection; break; case 1: /* this is actually the Type1 vstem3 operator */ op = cff_op_vstem; break; case 2: /* this is actually the Type1 hstem3 operator */ op = cff_op_hstem; break; case 3: op = cff_op_and; break; case 4: op = cff_op_or; break; case 5: op = cff_op_not; break; case 6: op = cff_op_seac; break; case 7: op = cff_op_sbw; break; case 8: op = cff_op_store; break; case 9: op = cff_op_abs; break; case 10: op = cff_op_add; break; case 11: op = cff_op_sub; break; case 12: op = cff_op_div; break; case 13: op = cff_op_load; break; case 14: op = cff_op_neg; break; case 15: op = cff_op_eq; break; case 16: op = cff_op_callothersubr; break; case 17: op = cff_op_pop; break; case 18: op = cff_op_drop; break; case 20: op = cff_op_put; break; case 21: op = cff_op_get; break; case 22: op = cff_op_ifelse; break; case 23: op = cff_op_random; break; case 24: op = cff_op_mul; break; case 26: op = cff_op_sqrt; break; case 27: op = cff_op_dup; break; case 28: op = cff_op_exch; break; case 29: op = cff_op_index; break; case 30: op = cff_op_roll; break; case 33: op = cff_op_setcurrentpoint; break; case 34: op = cff_op_hflex; break; case 35: op = cff_op_flex; break; case 36: op = cff_op_hflex1; break; case 37: op = cff_op_flex1; break; default: /* decrement ip for syntax error message */ ip--; } } break; case 13: op = cff_op_hsbw; break; case 14: op = cff_op_endchar; break; case 16: op = cff_op_blend; break; case 18: op = cff_op_hstemhm; break; case 19: op = cff_op_hintmask; break; case 20: op = cff_op_cntrmask; break; case 21: op = cff_op_rmoveto; break; case 22: op = cff_op_hmoveto; break; case 23: op = cff_op_vstemhm; break; case 24: op = cff_op_rcurveline; break; case 25: op = cff_op_rlinecurve; break; case 26: op = cff_op_vvcurveto; break; case 27: op = cff_op_hhcurveto; break; case 29: op = cff_op_callgsubr; break; case 30: op = cff_op_vhcurveto; break; case 31: op = cff_op_hvcurveto; break; default: break; } if ( op == cff_op_unknown ) goto Syntax_Error; /* check arguments */ req_args = cff_argument_counts[op]; if ( req_args & CFF_COUNT_CHECK_WIDTH ) { if ( num_args > 0 && decoder->read_width ) { /* If `nominal_width' is non-zero, the number is really a */ /* difference against `nominal_width'. Else, the number here */ /* is truly a width, not a difference against `nominal_width'. */ /* If the font does not set `nominal_width', then */ /* `nominal_width' defaults to zero, and so we can set */ /* `glyph_width' to `nominal_width' plus number on the stack */ /* -- for either case. */ FT_Int set_width_ok; switch ( op ) { case cff_op_hmoveto: case cff_op_vmoveto: set_width_ok = num_args & 2; break; case cff_op_hstem: case cff_op_vstem: case cff_op_hstemhm: case cff_op_vstemhm: case cff_op_rmoveto: case cff_op_hintmask: case cff_op_cntrmask: set_width_ok = num_args & 1; break; case cff_op_endchar: /* If there is a width specified for endchar, we either have */ /* 1 argument or 5 arguments. We like to argue. */ set_width_ok = ( num_args == 5 ) || ( num_args == 1 ); break; default: set_width_ok = 0; break; } if ( set_width_ok ) { decoder->glyph_width = decoder->nominal_width + ( stack[0] >> 16 ); if ( decoder->width_only ) { /* we only want the advance width; stop here */ break; } /* Consumed an argument. */ num_args--; } } decoder->read_width = 0; req_args = 0; } req_args &= 0x000F; if ( num_args < req_args ) goto Stack_Underflow; args -= req_args; num_args -= req_args; /* At this point, `args' points to the first argument of the */ /* operand in case `req_args' isn't zero. Otherwise, we have */ /* to adjust `args' manually. */ /* Note that we only pop arguments from the stack which we */ /* really need and can digest so that we can continue in case */ /* of superfluous stack elements. */ switch ( op ) { case cff_op_hstem: case cff_op_vstem: case cff_op_hstemhm: case cff_op_vstemhm: /* the number of arguments is always even here */ FT_TRACE4(( op == cff_op_hstem ? " hstem\n" : ( op == cff_op_vstem ? " vstem\n" : ( op == cff_op_hstemhm ? " hstemhm\n" : " vstemhm\n" ) ) )); if ( hinter ) hinter->stems( hinter->hints, ( op == cff_op_hstem || op == cff_op_hstemhm ), num_args / 2, args - ( num_args & ~1 ) ); decoder->num_hints += num_args / 2; args = stack; break; case cff_op_hintmask: case cff_op_cntrmask: FT_TRACE4(( op == cff_op_hintmask ? " hintmask" : " cntrmask" )); /* implement vstem when needed -- */ /* the specification doesn't say it, but this also works */ /* with the 'cntrmask' operator */ /* */ if ( num_args > 0 ) { if ( hinter ) hinter->stems( hinter->hints, 0, num_args / 2, args - ( num_args & ~1 ) ); decoder->num_hints += num_args / 2; } if ( hinter ) { if ( op == cff_op_hintmask ) hinter->hintmask( hinter->hints, builder->current->n_points, decoder->num_hints, ip ); else hinter->counter( hinter->hints, decoder->num_hints, ip ); } #ifdef FT_DEBUG_LEVEL_TRACE { FT_UInt maskbyte; FT_TRACE4(( " (maskbytes: " )); for ( maskbyte = 0; maskbyte < (FT_UInt)(( decoder->num_hints + 7 ) >> 3); maskbyte++, ip++ ) FT_TRACE4(( "0x%02X", *ip )); FT_TRACE4(( ")\n" )); } #else ip += ( decoder->num_hints + 7 ) >> 3; #endif if ( ip >= limit ) goto Syntax_Error; args = stack; break; case cff_op_rmoveto: FT_TRACE4(( " rmoveto\n" )); cff_builder_close_contour( builder ); builder->path_begun = 0; x += args[-2]; y += args[-1]; args = stack; break; case cff_op_vmoveto: FT_TRACE4(( " vmoveto\n" )); cff_builder_close_contour( builder ); builder->path_begun = 0; y += args[-1]; args = stack; break; case cff_op_hmoveto: FT_TRACE4(( " hmoveto\n" )); cff_builder_close_contour( builder ); builder->path_begun = 0; x += args[-1]; args = stack; break; case cff_op_rlineto: FT_TRACE4(( " rlineto\n" )); if ( cff_builder_start_point ( builder, x, y ) || check_points( builder, num_args / 2 ) ) goto Fail; if ( num_args < 2 ) goto Stack_Underflow; args -= num_args & ~1; while ( args < decoder->top ) { x += args[0]; y += args[1]; cff_builder_add_point( builder, x, y, 1 ); args += 2; } args = stack; break; case cff_op_hlineto: case cff_op_vlineto: { FT_Int phase = ( op == cff_op_hlineto ); FT_TRACE4(( op == cff_op_hlineto ? " hlineto\n" : " vlineto\n" )); if ( num_args < 1 ) goto Stack_Underflow; if ( cff_builder_start_point ( builder, x, y ) || check_points( builder, num_args ) ) goto Fail; args = stack; while ( args < decoder->top ) { if ( phase ) x += args[0]; else y += args[0]; if ( cff_builder_add_point1( builder, x, y ) ) goto Fail; args++; phase ^= 1; } args = stack; } break; case cff_op_rrcurveto: { FT_Int nargs; FT_TRACE4(( " rrcurveto\n" )); if ( num_args < 6 ) goto Stack_Underflow; nargs = num_args - num_args % 6; if ( cff_builder_start_point ( builder, x, y ) || check_points( builder, nargs / 2 ) ) goto Fail; args -= nargs; while ( args < decoder->top ) { x += args[0]; y += args[1]; cff_builder_add_point( builder, x, y, 0 ); x += args[2]; y += args[3]; cff_builder_add_point( builder, x, y, 0 ); x += args[4]; y += args[5]; cff_builder_add_point( builder, x, y, 1 ); args += 6; } args = stack; } break; case cff_op_vvcurveto: { FT_Int nargs; FT_TRACE4(( " vvcurveto\n" )); if ( num_args < 4 ) goto Stack_Underflow; /* if num_args isn't of the form 4n or 4n+1, */ /* we reduce it to 4n+1 */ nargs = num_args - num_args % 4; if ( num_args - nargs > 0 ) nargs += 1; if ( cff_builder_start_point( builder, x, y ) ) goto Fail; args -= nargs; if ( nargs & 1 ) { x += args[0]; args++; nargs--; } if ( check_points( builder, 3 * ( nargs / 4 ) ) ) goto Fail; while ( args < decoder->top ) { y += args[0]; cff_builder_add_point( builder, x, y, 0 ); x += args[1]; y += args[2]; cff_builder_add_point( builder, x, y, 0 ); y += args[3]; cff_builder_add_point( builder, x, y, 1 ); args += 4; } args = stack; } break; case cff_op_hhcurveto: { FT_Int nargs; FT_TRACE4(( " hhcurveto\n" )); if ( num_args < 4 ) goto Stack_Underflow; /* if num_args isn't of the form 4n or 4n+1, */ /* we reduce it to 4n+1 */ nargs = num_args - num_args % 4; if ( num_args - nargs > 0 ) nargs += 1; if ( cff_builder_start_point( builder, x, y ) ) goto Fail; args -= nargs; if ( nargs & 1 ) { y += args[0]; args++; nargs--; } if ( check_points( builder, 3 * ( nargs / 4 ) ) ) goto Fail; while ( args < decoder->top ) { x += args[0]; cff_builder_add_point( builder, x, y, 0 ); x += args[1]; y += args[2]; cff_builder_add_point( builder, x, y, 0 ); x += args[3]; cff_builder_add_point( builder, x, y, 1 ); args += 4; } args = stack; } break; case cff_op_vhcurveto: case cff_op_hvcurveto: { FT_Int phase; FT_Int nargs; FT_TRACE4(( op == cff_op_vhcurveto ? " vhcurveto\n" : " hvcurveto\n" )); if ( cff_builder_start_point( builder, x, y ) ) goto Fail; if ( num_args < 4 ) goto Stack_Underflow; /* if num_args isn't of the form 8n, 8n+1, 8n+4, or 8n+5, */ /* we reduce it to the largest one which fits */ nargs = num_args - num_args % 4; if ( num_args - nargs > 0 ) nargs += 1; args -= nargs; if ( check_points( builder, ( nargs / 4 ) * 3 ) ) goto Stack_Underflow; phase = ( op == cff_op_hvcurveto ); while ( nargs >= 4 ) { nargs -= 4; if ( phase ) { x += args[0]; cff_builder_add_point( builder, x, y, 0 ); x += args[1]; y += args[2]; cff_builder_add_point( builder, x, y, 0 ); y += args[3]; if ( nargs == 1 ) x += args[4]; cff_builder_add_point( builder, x, y, 1 ); } else { y += args[0]; cff_builder_add_point( builder, x, y, 0 ); x += args[1]; y += args[2]; cff_builder_add_point( builder, x, y, 0 ); x += args[3]; if ( nargs == 1 ) y += args[4]; cff_builder_add_point( builder, x, y, 1 ); } args += 4; phase ^= 1; } args = stack; } break; case cff_op_rlinecurve: { FT_Int num_lines; FT_Int nargs; FT_TRACE4(( " rlinecurve\n" )); if ( num_args < 8 ) goto Stack_Underflow; nargs = num_args & ~1; num_lines = ( nargs - 6 ) / 2; if ( cff_builder_start_point( builder, x, y ) || check_points( builder, num_lines + 3 ) ) goto Fail; args -= nargs; /* first, add the line segments */ while ( num_lines > 0 ) { x += args[0]; y += args[1]; cff_builder_add_point( builder, x, y, 1 ); args += 2; num_lines--; } /* then the curve */ x += args[0]; y += args[1]; cff_builder_add_point( builder, x, y, 0 ); x += args[2]; y += args[3]; cff_builder_add_point( builder, x, y, 0 ); x += args[4]; y += args[5]; cff_builder_add_point( builder, x, y, 1 ); args = stack; } break; case cff_op_rcurveline: { FT_Int num_curves; FT_Int nargs; FT_TRACE4(( " rcurveline\n" )); if ( num_args < 8 ) goto Stack_Underflow; nargs = num_args - 2; nargs = nargs - nargs % 6 + 2; num_curves = ( nargs - 2 ) / 6; if ( cff_builder_start_point ( builder, x, y ) || check_points( builder, num_curves * 3 + 2 ) ) goto Fail; args -= nargs; /* first, add the curves */ while ( num_curves > 0 ) { x += args[0]; y += args[1]; cff_builder_add_point( builder, x, y, 0 ); x += args[2]; y += args[3]; cff_builder_add_point( builder, x, y, 0 ); x += args[4]; y += args[5]; cff_builder_add_point( builder, x, y, 1 ); args += 6; num_curves--; } /* then the final line */ x += args[0]; y += args[1]; cff_builder_add_point( builder, x, y, 1 ); args = stack; } break; case cff_op_hflex1: { FT_Pos start_y; FT_TRACE4(( " hflex1\n" )); /* adding five more points: 4 control points, 1 on-curve point */ /* -- make sure we have enough space for the start point if it */ /* needs to be added */ if ( cff_builder_start_point( builder, x, y ) || check_points( builder, 6 ) ) goto Fail; /* record the starting point's y position for later use */ start_y = y; /* first control point */ x += args[0]; y += args[1]; cff_builder_add_point( builder, x, y, 0 ); /* second control point */ x += args[2]; y += args[3]; cff_builder_add_point( builder, x, y, 0 ); /* join point; on curve, with y-value the same as the last */ /* control point's y-value */ x += args[4]; cff_builder_add_point( builder, x, y, 1 ); /* third control point, with y-value the same as the join */ /* point's y-value */ x += args[5]; cff_builder_add_point( builder, x, y, 0 ); /* fourth control point */ x += args[6]; y += args[7]; cff_builder_add_point( builder, x, y, 0 ); /* ending point, with y-value the same as the start */ x += args[8]; y = start_y; cff_builder_add_point( builder, x, y, 1 ); args = stack; break; } case cff_op_hflex: { FT_Pos start_y; FT_TRACE4(( " hflex\n" )); /* adding six more points; 4 control points, 2 on-curve points */ if ( cff_builder_start_point( builder, x, y ) || check_points( builder, 6 ) ) goto Fail; /* record the starting point's y-position for later use */ start_y = y; /* first control point */ x += args[0]; cff_builder_add_point( builder, x, y, 0 ); /* second control point */ x += args[1]; y += args[2]; cff_builder_add_point( builder, x, y, 0 ); /* join point; on curve, with y-value the same as the last */ /* control point's y-value */ x += args[3]; cff_builder_add_point( builder, x, y, 1 ); /* third control point, with y-value the same as the join */ /* point's y-value */ x += args[4]; cff_builder_add_point( builder, x, y, 0 ); /* fourth control point */ x += args[5]; y = start_y; cff_builder_add_point( builder, x, y, 0 ); /* ending point, with y-value the same as the start point's */ /* y-value -- we don't add this point, though */ x += args[6]; cff_builder_add_point( builder, x, y, 1 ); args = stack; break; } case cff_op_flex1: { FT_Pos start_x, start_y; /* record start x, y values for */ /* alter use */ FT_Fixed dx = 0, dy = 0; /* used in horizontal/vertical */ /* algorithm below */ FT_Int horizontal, count; FT_Fixed* temp; FT_TRACE4(( " flex1\n" )); /* adding six more points; 4 control points, 2 on-curve points */ if ( cff_builder_start_point( builder, x, y ) || check_points( builder, 6 ) ) goto Fail; /* record the starting point's x, y position for later use */ start_x = x; start_y = y; /* XXX: figure out whether this is supposed to be a horizontal */ /* or vertical flex; the Type 2 specification is vague... */ temp = args; /* grab up to the last argument */ for ( count = 5; count > 0; count-- ) { dx += temp[0]; dy += temp[1]; temp += 2; } if ( dx < 0 ) dx = -dx; if ( dy < 0 ) dy = -dy; /* strange test, but here it is... */ horizontal = ( dx > dy ); for ( count = 5; count > 0; count-- ) { x += args[0]; y += args[1]; cff_builder_add_point( builder, x, y, (FT_Bool)( count == 3 ) ); args += 2; } /* is last operand an x- or y-delta? */ if ( horizontal ) { x += args[0]; y = start_y; } else { x = start_x; y += args[0]; } cff_builder_add_point( builder, x, y, 1 ); args = stack; break; } case cff_op_flex: { FT_UInt count; FT_TRACE4(( " flex\n" )); if ( cff_builder_start_point( builder, x, y ) || check_points( builder, 6 ) ) goto Fail; for ( count = 6; count > 0; count-- ) { x += args[0]; y += args[1]; cff_builder_add_point( builder, x, y, (FT_Bool)( count == 4 || count == 1 ) ); args += 2; } args = stack; } break; case cff_op_seac: FT_TRACE4(( " seac\n" )); error = cff_operator_seac( decoder, args[0], args[1], args[2], (FT_Int)( args[3] >> 16 ), (FT_Int)( args[4] >> 16 ) ); /* add current outline to the glyph slot */ FT_GlyphLoader_Add( builder->loader ); /* return now! */ FT_TRACE4(( "\n" )); return error; case cff_op_endchar: FT_TRACE4(( " endchar\n" )); /* We are going to emulate the seac operator. */ if ( num_args >= 4 ) { /* Save glyph width so that the subglyphs don't overwrite it. */ FT_Pos glyph_width = decoder->glyph_width; error = cff_operator_seac( decoder, 0L, args[-4], args[-3], (FT_Int)( args[-2] >> 16 ), (FT_Int)( args[-1] >> 16 ) ); decoder->glyph_width = glyph_width; } else { if ( !error ) error = CFF_Err_Ok; cff_builder_close_contour( builder ); /* close hints recording session */ if ( hinter ) { if ( hinter->close( hinter->hints, builder->current->n_points ) ) goto Syntax_Error; /* apply hints to the loaded glyph outline now */ hinter->apply( hinter->hints, builder->current, (PSH_Globals)builder->hints_globals, decoder->hint_mode ); } /* add current outline to the glyph slot */ FT_GlyphLoader_Add( builder->loader ); } /* return now! */ FT_TRACE4(( "\n" )); return error; case cff_op_abs: FT_TRACE4(( " abs\n" )); if ( args[0] < 0 ) args[0] = -args[0]; args++; break; case cff_op_add: FT_TRACE4(( " add\n" )); args[0] += args[1]; args++; break; case cff_op_sub: FT_TRACE4(( " sub\n" )); args[0] -= args[1]; args++; break; case cff_op_div: FT_TRACE4(( " div\n" )); args[0] = FT_DivFix( args[0], args[1] ); args++; break; case cff_op_neg: FT_TRACE4(( " neg\n" )); args[0] = -args[0]; args++; break; case cff_op_random: { FT_Fixed Rand; FT_TRACE4(( " rand\n" )); Rand = seed; if ( Rand >= 0x8000L ) Rand++; args[0] = Rand; seed = FT_MulFix( seed, 0x10000L - seed ); if ( seed == 0 ) seed += 0x2873; args++; } break; case cff_op_mul: FT_TRACE4(( " mul\n" )); args[0] = FT_MulFix( args[0], args[1] ); args++; break; case cff_op_sqrt: FT_TRACE4(( " sqrt\n" )); if ( args[0] > 0 ) { FT_Int count = 9; FT_Fixed root = args[0]; FT_Fixed new_root; for (;;) { new_root = ( root + FT_DivFix( args[0], root ) + 1 ) >> 1; if ( new_root == root || count <= 0 ) break; root = new_root; } args[0] = new_root; } else args[0] = 0; args++; break; case cff_op_drop: /* nothing */ FT_TRACE4(( " drop\n" )); break; case cff_op_exch: { FT_Fixed tmp; FT_TRACE4(( " exch\n" )); tmp = args[0]; args[0] = args[1]; args[1] = tmp; args += 2; } break; case cff_op_index: { FT_Int idx = (FT_Int)( args[0] >> 16 ); FT_TRACE4(( " index\n" )); if ( idx < 0 ) idx = 0; else if ( idx > num_args - 2 ) idx = num_args - 2; args[0] = args[-( idx + 1 )]; args++; } break; case cff_op_roll: { FT_Int count = (FT_Int)( args[0] >> 16 ); FT_Int idx = (FT_Int)( args[1] >> 16 ); FT_TRACE4(( " roll\n" )); if ( count <= 0 ) count = 1; args -= count; if ( args < stack ) goto Stack_Underflow; if ( idx >= 0 ) { while ( idx > 0 ) { FT_Fixed tmp = args[count - 1]; FT_Int i; for ( i = count - 2; i >= 0; i-- ) args[i + 1] = args[i]; args[0] = tmp; idx--; } } else { while ( idx < 0 ) { FT_Fixed tmp = args[0]; FT_Int i; for ( i = 0; i < count - 1; i++ ) args[i] = args[i + 1]; args[count - 1] = tmp; idx++; } } args += count; } break; case cff_op_dup: FT_TRACE4(( " dup\n" )); args[1] = args[0]; args += 2; break; case cff_op_put: { FT_Fixed val = args[0]; FT_Int idx = (FT_Int)( args[1] >> 16 ); FT_TRACE4(( " put\n" )); if ( idx >= 0 && idx < CFF_MAX_TRANS_ELEMENTS ) decoder->buildchar[idx] = val; } break; case cff_op_get: { FT_Int idx = (FT_Int)( args[0] >> 16 ); FT_Fixed val = 0; FT_TRACE4(( " get\n" )); if ( idx >= 0 && idx < CFF_MAX_TRANS_ELEMENTS ) val = decoder->buildchar[idx]; args[0] = val; args++; } break; case cff_op_store: FT_TRACE4(( " store\n")); goto Unimplemented; case cff_op_load: FT_TRACE4(( " load\n" )); goto Unimplemented; case cff_op_dotsection: /* this operator is deprecated and ignored by the parser */ FT_TRACE4(( " dotsection\n" )); break; case cff_op_closepath: /* this is an invalid Type 2 operator; however, there */ /* exist fonts which are incorrectly converted from probably */ /* Type 1 to CFF, and some parsers seem to accept it */ FT_TRACE4(( " closepath (invalid op)\n" )); args = stack; break; case cff_op_hsbw: /* this is an invalid Type 2 operator; however, there */ /* exist fonts which are incorrectly converted from probably */ /* Type 1 to CFF, and some parsers seem to accept it */ FT_TRACE4(( " hsbw (invalid op)\n" )); decoder->glyph_width = decoder->nominal_width + ( args[1] >> 16 ); decoder->builder.left_bearing.x = args[0]; decoder->builder.left_bearing.y = 0; x = decoder->builder.pos_x + args[0]; y = decoder->builder.pos_y; args = stack; break; case cff_op_sbw: /* this is an invalid Type 2 operator; however, there */ /* exist fonts which are incorrectly converted from probably */ /* Type 1 to CFF, and some parsers seem to accept it */ FT_TRACE4(( " sbw (invalid op)\n" )); decoder->glyph_width = decoder->nominal_width + ( args[2] >> 16 ); decoder->builder.left_bearing.x = args[0]; decoder->builder.left_bearing.y = args[1]; x = decoder->builder.pos_x + args[0]; y = decoder->builder.pos_y + args[1]; args = stack; break; case cff_op_setcurrentpoint: /* this is an invalid Type 2 operator; however, there */ /* exist fonts which are incorrectly converted from probably */ /* Type 1 to CFF, and some parsers seem to accept it */ FT_TRACE4(( " setcurrentpoint (invalid op)\n" )); x = decoder->builder.pos_x + args[0]; y = decoder->builder.pos_y + args[1]; args = stack; break; case cff_op_callothersubr: /* this is an invalid Type 2 operator; however, there */ /* exist fonts which are incorrectly converted from probably */ /* Type 1 to CFF, and some parsers seem to accept it */ FT_TRACE4(( " callothersubr (invalid op)\n" )); /* subsequent `pop' operands should add the arguments, */ /* this is the implementation described for `unknown' other */ /* subroutines in the Type1 spec. */ args -= 2 + ( args[-2] >> 16 ); if ( args < stack ) goto Stack_Underflow; break; case cff_op_pop: /* Type 1 to CFF, and some parsers seem to accept it */ FT_TRACE4(( " pop (invalid op)\n" )); args++; break; case cff_op_and: { FT_Fixed cond = args[0] && args[1]; FT_TRACE4(( " and\n" )); args[0] = cond ? 0x10000L : 0; args++; } break; case cff_op_or: { FT_Fixed cond = args[0] || args[1]; FT_TRACE4(( " or\n" )); args[0] = cond ? 0x10000L : 0; args++; } break; case cff_op_eq: { FT_Fixed cond = !args[0]; FT_TRACE4(( " eq\n" )); args[0] = cond ? 0x10000L : 0; args++; } break; case cff_op_ifelse: { FT_Fixed cond = ( args[2] <= args[3] ); FT_TRACE4(( " ifelse\n" )); if ( !cond ) args[0] = args[1]; args++; } break; case cff_op_callsubr: { FT_UInt idx = (FT_UInt)( ( args[0] >> 16 ) + decoder->locals_bias ); FT_TRACE4(( " callsubr(%d)\n", idx )); if ( idx >= decoder->num_locals ) { FT_ERROR(( "cff_decoder_parse_charstrings:" " invalid local subr index\n" )); goto Syntax_Error; } if ( zone - decoder->zones >= CFF_MAX_SUBRS_CALLS ) { FT_ERROR(( "cff_decoder_parse_charstrings:" " too many nested subrs\n" )); goto Syntax_Error; } zone->cursor = ip; /* save current instruction pointer */ zone++; zone->base = decoder->locals[idx]; zone->limit = decoder->locals[idx + 1]; zone->cursor = zone->base; if ( !zone->base || zone->limit == zone->base ) { FT_ERROR(( "cff_decoder_parse_charstrings:" " invoking empty subrs\n" )); goto Syntax_Error; } decoder->zone = zone; ip = zone->base; limit = zone->limit; } break; case cff_op_callgsubr: { FT_UInt idx = (FT_UInt)( ( args[0] >> 16 ) + decoder->globals_bias ); FT_TRACE4(( " callgsubr(%d)\n", idx )); if ( idx >= decoder->num_globals ) { FT_ERROR(( "cff_decoder_parse_charstrings:" " invalid global subr index\n" )); goto Syntax_Error; } if ( zone - decoder->zones >= CFF_MAX_SUBRS_CALLS ) { FT_ERROR(( "cff_decoder_parse_charstrings:" " too many nested subrs\n" )); goto Syntax_Error; } zone->cursor = ip; /* save current instruction pointer */ zone++; zone->base = decoder->globals[idx]; zone->limit = decoder->globals[idx + 1]; zone->cursor = zone->base; if ( !zone->base || zone->limit == zone->base ) { FT_ERROR(( "cff_decoder_parse_charstrings:" " invoking empty subrs\n" )); goto Syntax_Error; } decoder->zone = zone; ip = zone->base; limit = zone->limit; } break; case cff_op_return: FT_TRACE4(( " return\n" )); if ( decoder->zone <= decoder->zones ) { FT_ERROR(( "cff_decoder_parse_charstrings:" " unexpected return\n" )); goto Syntax_Error; } decoder->zone--; zone = decoder->zone; ip = zone->cursor; limit = zone->limit; break; default: Unimplemented: FT_ERROR(( "Unimplemented opcode: %d", ip[-1] )); if ( ip[-1] == 12 ) FT_ERROR(( " %d", ip[0] )); FT_ERROR(( "\n" )); return CFF_Err_Unimplemented_Feature; } decoder->top = args; } /* general operator processing */ } /* while ip < limit */ FT_TRACE4(( "..end..\n\n" )); Fail: return error; Syntax_Error: FT_TRACE4(( "cff_decoder_parse_charstrings: syntax error\n" )); return CFF_Err_Invalid_File_Format; Stack_Underflow: FT_TRACE4(( "cff_decoder_parse_charstrings: stack underflow\n" )); return CFF_Err_Too_Few_Arguments; Stack_Overflow: FT_TRACE4(( "cff_decoder_parse_charstrings: stack overflow\n" )); return CFF_Err_Stack_Overflow; }
165,008
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: ikev1_n_print(netdissect_options *ndo, u_char tpay _U_, const struct isakmp_gen *ext, u_int item_len, const u_char *ep, uint32_t phase _U_, uint32_t doi0 _U_, uint32_t proto0 _U_, int depth _U_) { const struct ikev1_pl_n *p; struct ikev1_pl_n n; const u_char *cp; const u_char *ep2; uint32_t doi; uint32_t proto; static const char *notify_error_str[] = { NULL, "INVALID-PAYLOAD-TYPE", "DOI-NOT-SUPPORTED", "SITUATION-NOT-SUPPORTED", "INVALID-COOKIE", "INVALID-MAJOR-VERSION", "INVALID-MINOR-VERSION", "INVALID-EXCHANGE-TYPE", "INVALID-FLAGS", "INVALID-MESSAGE-ID", "INVALID-PROTOCOL-ID", "INVALID-SPI", "INVALID-TRANSFORM-ID", "ATTRIBUTES-NOT-SUPPORTED", "NO-PROPOSAL-CHOSEN", "BAD-PROPOSAL-SYNTAX", "PAYLOAD-MALFORMED", "INVALID-KEY-INFORMATION", "INVALID-ID-INFORMATION", "INVALID-CERT-ENCODING", "INVALID-CERTIFICATE", "CERT-TYPE-UNSUPPORTED", "INVALID-CERT-AUTHORITY", "INVALID-HASH-INFORMATION", "AUTHENTICATION-FAILED", "INVALID-SIGNATURE", "ADDRESS-NOTIFICATION", "NOTIFY-SA-LIFETIME", "CERTIFICATE-UNAVAILABLE", "UNSUPPORTED-EXCHANGE-TYPE", "UNEQUAL-PAYLOAD-LENGTHS", }; static const char *ipsec_notify_error_str[] = { "RESERVED", }; static const char *notify_status_str[] = { "CONNECTED", }; static const char *ipsec_notify_status_str[] = { "RESPONDER-LIFETIME", "REPLAY-STATUS", "INITIAL-CONTACT", }; /* NOTE: these macro must be called with x in proper range */ /* 0 - 8191 */ #define NOTIFY_ERROR_STR(x) \ STR_OR_ID((x), notify_error_str) /* 8192 - 16383 */ #define IPSEC_NOTIFY_ERROR_STR(x) \ STR_OR_ID((u_int)((x) - 8192), ipsec_notify_error_str) /* 16384 - 24575 */ #define NOTIFY_STATUS_STR(x) \ STR_OR_ID((u_int)((x) - 16384), notify_status_str) /* 24576 - 32767 */ #define IPSEC_NOTIFY_STATUS_STR(x) \ STR_OR_ID((u_int)((x) - 24576), ipsec_notify_status_str) ND_PRINT((ndo,"%s:", NPSTR(ISAKMP_NPTYPE_N))); p = (const struct ikev1_pl_n *)ext; ND_TCHECK(*p); UNALIGNED_MEMCPY(&n, ext, sizeof(n)); doi = ntohl(n.doi); proto = n.prot_id; if (doi != 1) { ND_PRINT((ndo," doi=%d", doi)); ND_PRINT((ndo," proto=%d", proto)); if (ntohs(n.type) < 8192) ND_PRINT((ndo," type=%s", NOTIFY_ERROR_STR(ntohs(n.type)))); else if (ntohs(n.type) < 16384) ND_PRINT((ndo," type=%s", numstr(ntohs(n.type)))); else if (ntohs(n.type) < 24576) ND_PRINT((ndo," type=%s", NOTIFY_STATUS_STR(ntohs(n.type)))); else ND_PRINT((ndo," type=%s", numstr(ntohs(n.type)))); if (n.spi_size) { ND_PRINT((ndo," spi=")); if (!rawprint(ndo, (const uint8_t *)(p + 1), n.spi_size)) goto trunc; } return (const u_char *)(p + 1) + n.spi_size; } ND_PRINT((ndo," doi=ipsec")); ND_PRINT((ndo," proto=%s", PROTOIDSTR(proto))); if (ntohs(n.type) < 8192) ND_PRINT((ndo," type=%s", NOTIFY_ERROR_STR(ntohs(n.type)))); else if (ntohs(n.type) < 16384) ND_PRINT((ndo," type=%s", IPSEC_NOTIFY_ERROR_STR(ntohs(n.type)))); else if (ntohs(n.type) < 24576) ND_PRINT((ndo," type=%s", NOTIFY_STATUS_STR(ntohs(n.type)))); else if (ntohs(n.type) < 32768) ND_PRINT((ndo," type=%s", IPSEC_NOTIFY_STATUS_STR(ntohs(n.type)))); else ND_PRINT((ndo," type=%s", numstr(ntohs(n.type)))); if (n.spi_size) { ND_PRINT((ndo," spi=")); if (!rawprint(ndo, (const uint8_t *)(p + 1), n.spi_size)) goto trunc; } cp = (const u_char *)(p + 1) + n.spi_size; ep2 = (const u_char *)p + item_len; if (cp < ep) { switch (ntohs(n.type)) { case IPSECDOI_NTYPE_RESPONDER_LIFETIME: { const struct attrmap *map = oakley_t_map; size_t nmap = sizeof(oakley_t_map)/sizeof(oakley_t_map[0]); ND_PRINT((ndo," attrs=(")); while (cp < ep && cp < ep2) { cp = ikev1_attrmap_print(ndo, cp, (ep < ep2) ? ep : ep2, map, nmap); } ND_PRINT((ndo,")")); break; } case IPSECDOI_NTYPE_REPLAY_STATUS: ND_PRINT((ndo," status=(")); ND_PRINT((ndo,"replay detection %sabled", EXTRACT_32BITS(cp) ? "en" : "dis")); ND_PRINT((ndo,")")); break; default: /* * XXX - fill in more types here; see, for example, * draft-ietf-ipsec-notifymsg-04. */ if (ndo->ndo_vflag > 3) { ND_PRINT((ndo," data=(")); if (!rawprint(ndo, (const uint8_t *)(cp), ep - cp)) goto trunc; ND_PRINT((ndo,")")); } else { if (!ike_show_somedata(ndo, cp, ep)) goto trunc; } break; } } return (const u_char *)ext + item_len; trunc: ND_PRINT((ndo," [|%s]", NPSTR(ISAKMP_NPTYPE_N))); return NULL; } Commit Message: CVE-2017-13039/IKEv1: Do more bounds checking. Have ikev1_attrmap_print() and ikev1_attr_print() do full bounds checking, and return null on a bounds overflow. Have their callers check for a null return. This fixes a buffer over-read discovered by Bhargava Shastry, SecT/TU Berlin. Add a test using the capture file supplied by the reporter(s), modified so the capture file won't be rejected as an invalid capture. CWE ID: CWE-125
ikev1_n_print(netdissect_options *ndo, u_char tpay _U_, const struct isakmp_gen *ext, u_int item_len, const u_char *ep, uint32_t phase _U_, uint32_t doi0 _U_, uint32_t proto0 _U_, int depth _U_) { const struct ikev1_pl_n *p; struct ikev1_pl_n n; const u_char *cp; const u_char *ep2; uint32_t doi; uint32_t proto; static const char *notify_error_str[] = { NULL, "INVALID-PAYLOAD-TYPE", "DOI-NOT-SUPPORTED", "SITUATION-NOT-SUPPORTED", "INVALID-COOKIE", "INVALID-MAJOR-VERSION", "INVALID-MINOR-VERSION", "INVALID-EXCHANGE-TYPE", "INVALID-FLAGS", "INVALID-MESSAGE-ID", "INVALID-PROTOCOL-ID", "INVALID-SPI", "INVALID-TRANSFORM-ID", "ATTRIBUTES-NOT-SUPPORTED", "NO-PROPOSAL-CHOSEN", "BAD-PROPOSAL-SYNTAX", "PAYLOAD-MALFORMED", "INVALID-KEY-INFORMATION", "INVALID-ID-INFORMATION", "INVALID-CERT-ENCODING", "INVALID-CERTIFICATE", "CERT-TYPE-UNSUPPORTED", "INVALID-CERT-AUTHORITY", "INVALID-HASH-INFORMATION", "AUTHENTICATION-FAILED", "INVALID-SIGNATURE", "ADDRESS-NOTIFICATION", "NOTIFY-SA-LIFETIME", "CERTIFICATE-UNAVAILABLE", "UNSUPPORTED-EXCHANGE-TYPE", "UNEQUAL-PAYLOAD-LENGTHS", }; static const char *ipsec_notify_error_str[] = { "RESERVED", }; static const char *notify_status_str[] = { "CONNECTED", }; static const char *ipsec_notify_status_str[] = { "RESPONDER-LIFETIME", "REPLAY-STATUS", "INITIAL-CONTACT", }; /* NOTE: these macro must be called with x in proper range */ /* 0 - 8191 */ #define NOTIFY_ERROR_STR(x) \ STR_OR_ID((x), notify_error_str) /* 8192 - 16383 */ #define IPSEC_NOTIFY_ERROR_STR(x) \ STR_OR_ID((u_int)((x) - 8192), ipsec_notify_error_str) /* 16384 - 24575 */ #define NOTIFY_STATUS_STR(x) \ STR_OR_ID((u_int)((x) - 16384), notify_status_str) /* 24576 - 32767 */ #define IPSEC_NOTIFY_STATUS_STR(x) \ STR_OR_ID((u_int)((x) - 24576), ipsec_notify_status_str) ND_PRINT((ndo,"%s:", NPSTR(ISAKMP_NPTYPE_N))); p = (const struct ikev1_pl_n *)ext; ND_TCHECK(*p); UNALIGNED_MEMCPY(&n, ext, sizeof(n)); doi = ntohl(n.doi); proto = n.prot_id; if (doi != 1) { ND_PRINT((ndo," doi=%d", doi)); ND_PRINT((ndo," proto=%d", proto)); if (ntohs(n.type) < 8192) ND_PRINT((ndo," type=%s", NOTIFY_ERROR_STR(ntohs(n.type)))); else if (ntohs(n.type) < 16384) ND_PRINT((ndo," type=%s", numstr(ntohs(n.type)))); else if (ntohs(n.type) < 24576) ND_PRINT((ndo," type=%s", NOTIFY_STATUS_STR(ntohs(n.type)))); else ND_PRINT((ndo," type=%s", numstr(ntohs(n.type)))); if (n.spi_size) { ND_PRINT((ndo," spi=")); if (!rawprint(ndo, (const uint8_t *)(p + 1), n.spi_size)) goto trunc; } return (const u_char *)(p + 1) + n.spi_size; } ND_PRINT((ndo," doi=ipsec")); ND_PRINT((ndo," proto=%s", PROTOIDSTR(proto))); if (ntohs(n.type) < 8192) ND_PRINT((ndo," type=%s", NOTIFY_ERROR_STR(ntohs(n.type)))); else if (ntohs(n.type) < 16384) ND_PRINT((ndo," type=%s", IPSEC_NOTIFY_ERROR_STR(ntohs(n.type)))); else if (ntohs(n.type) < 24576) ND_PRINT((ndo," type=%s", NOTIFY_STATUS_STR(ntohs(n.type)))); else if (ntohs(n.type) < 32768) ND_PRINT((ndo," type=%s", IPSEC_NOTIFY_STATUS_STR(ntohs(n.type)))); else ND_PRINT((ndo," type=%s", numstr(ntohs(n.type)))); if (n.spi_size) { ND_PRINT((ndo," spi=")); if (!rawprint(ndo, (const uint8_t *)(p + 1), n.spi_size)) goto trunc; } cp = (const u_char *)(p + 1) + n.spi_size; ep2 = (const u_char *)p + item_len; if (cp < ep) { switch (ntohs(n.type)) { case IPSECDOI_NTYPE_RESPONDER_LIFETIME: { const struct attrmap *map = oakley_t_map; size_t nmap = sizeof(oakley_t_map)/sizeof(oakley_t_map[0]); ND_PRINT((ndo," attrs=(")); while (cp < ep && cp < ep2) { cp = ikev1_attrmap_print(ndo, cp, ep2, map, nmap); if (cp == NULL) { ND_PRINT((ndo,")")); goto trunc; } } ND_PRINT((ndo,")")); break; } case IPSECDOI_NTYPE_REPLAY_STATUS: ND_PRINT((ndo," status=(")); ND_PRINT((ndo,"replay detection %sabled", EXTRACT_32BITS(cp) ? "en" : "dis")); ND_PRINT((ndo,")")); break; default: /* * XXX - fill in more types here; see, for example, * draft-ietf-ipsec-notifymsg-04. */ if (ndo->ndo_vflag > 3) { ND_PRINT((ndo," data=(")); if (!rawprint(ndo, (const uint8_t *)(cp), ep - cp)) goto trunc; ND_PRINT((ndo,")")); } else { if (!ike_show_somedata(ndo, cp, ep)) goto trunc; } break; } } return (const u_char *)ext + item_len; trunc: ND_PRINT((ndo," [|%s]", NPSTR(ISAKMP_NPTYPE_N))); return NULL; }
167,841
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void WasmCompileStreamingImpl(const v8::FunctionCallbackInfo<v8::Value>& args) { v8::Isolate* isolate = args.GetIsolate(); ScriptState* script_state = ScriptState::ForRelevantRealm(args); v8::Local<v8::Function> compile_callback = v8::Function::New(isolate, CompileFromResponseCallback); V8SetReturnValue(args, ScriptPromise::Cast(script_state, args[0]) .Then(compile_callback) .V8Value()); } Commit Message: [wasm] Use correct bindings APIs Use ScriptState::ForCurrentRealm in static methods, instead of ForRelevantRealm(). Bug: chromium:788453 Change-Id: I63bd25e3f5a4e8d7cbaff945da8df0d71aa65527 Reviewed-on: https://chromium-review.googlesource.com/795096 Commit-Queue: Mircea Trofin <[email protected]> Reviewed-by: Yuki Shiino <[email protected]> Reviewed-by: Kentaro Hara <[email protected]> Cr-Commit-Position: refs/heads/master@{#520174} CWE ID: CWE-79
void WasmCompileStreamingImpl(const v8::FunctionCallbackInfo<v8::Value>& args) { v8::Isolate* isolate = args.GetIsolate(); ScriptState* script_state = ScriptState::ForCurrentRealm(args); v8::Local<v8::Function> compile_callback = v8::Function::New(isolate, CompileFromResponseCallback); V8SetReturnValue(args, ScriptPromise::Cast(script_state, args[0]) .Then(compile_callback) .V8Value()); }
172,939
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void LauncherView::CalculateIdealBounds(IdealBounds* bounds) { int available_size = primary_axis_coordinate(width(), height()); if (!available_size) return; int x = primary_axis_coordinate(kLeadingInset, 0); int y = primary_axis_coordinate(0, kLeadingInset); for (int i = 0; i < view_model_->view_size(); ++i) { view_model_->set_ideal_bounds(i, gfx::Rect( x, y, kLauncherPreferredSize, kLauncherPreferredSize)); x = primary_axis_coordinate(x + kLauncherPreferredSize + kButtonSpacing, 0); y = primary_axis_coordinate(0, y + kLauncherPreferredSize + kButtonSpacing); } if (view_model_->view_size() > 0) { view_model_->set_ideal_bounds(0, gfx::Rect(gfx::Size( primary_axis_coordinate(kLeadingInset + kLauncherPreferredSize, kLauncherPreferredSize), primary_axis_coordinate(kLauncherPreferredSize, kLeadingInset + kLauncherPreferredSize)))); } bounds->overflow_bounds.set_size( gfx::Size(kLauncherPreferredSize, kLauncherPreferredSize)); last_visible_index_ = DetermineLastVisibleIndex( available_size - kLeadingInset - kLauncherPreferredSize - kButtonSpacing - kLauncherPreferredSize); int app_list_index = view_model_->view_size() - 1; bool show_overflow = (last_visible_index_ + 1 < app_list_index); for (int i = 0; i < view_model_->view_size(); ++i) { view_model_->view_at(i)->SetVisible( i == app_list_index || i <= last_visible_index_); } overflow_button_->SetVisible(show_overflow); if (show_overflow) { DCHECK_NE(0, view_model_->view_size()); if (last_visible_index_ == -1) { x = primary_axis_coordinate(kLeadingInset, 0); y = primary_axis_coordinate(0, kLeadingInset); } else { x = primary_axis_coordinate( view_model_->ideal_bounds(last_visible_index_).right(), 0); y = primary_axis_coordinate(0, view_model_->ideal_bounds(last_visible_index_).bottom()); } gfx::Rect app_list_bounds = view_model_->ideal_bounds(app_list_index); app_list_bounds.set_x(x); app_list_bounds.set_y(y); view_model_->set_ideal_bounds(app_list_index, app_list_bounds); x = primary_axis_coordinate(x + kLauncherPreferredSize + kButtonSpacing, 0); y = primary_axis_coordinate(0, y + kLauncherPreferredSize + kButtonSpacing); bounds->overflow_bounds.set_x(x); bounds->overflow_bounds.set_y(y); } } Commit Message: ash: Add launcher overflow bubble. - Host a LauncherView in bubble to display overflown items; - Mouse wheel and two-finger scroll to scroll the LauncherView in bubble in case overflow bubble is overflown; - Fit bubble when items are added/removed; - Keep launcher bar on screen when the bubble is shown; BUG=128054 TEST=Verify launcher overflown items are in a bubble instead of menu. Review URL: https://chromiumcodereview.appspot.com/10659003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@146460 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-119
void LauncherView::CalculateIdealBounds(IdealBounds* bounds) { int available_size = primary_axis_coordinate(width(), height()); if (!available_size) return; int x = primary_axis_coordinate(leading_inset(), 0); int y = primary_axis_coordinate(0, leading_inset()); for (int i = 0; i < view_model_->view_size(); ++i) { if (i < first_visible_index_) { view_model_->set_ideal_bounds(i, gfx::Rect(x, y, 0, 0)); continue; } view_model_->set_ideal_bounds(i, gfx::Rect( x, y, kLauncherPreferredSize, kLauncherPreferredSize)); x = primary_axis_coordinate(x + kLauncherPreferredSize + kButtonSpacing, 0); y = primary_axis_coordinate(0, y + kLauncherPreferredSize + kButtonSpacing); } int app_list_index = view_model_->view_size() - 1; if (is_overflow_mode()) { last_visible_index_ = app_list_index - 1; for (int i = 0; i < view_model_->view_size(); ++i) { view_model_->view_at(i)->SetVisible( i >= first_visible_index_ && i <= last_visible_index_); } return; } if (view_model_->view_size() > 0) { view_model_->set_ideal_bounds(0, gfx::Rect(gfx::Size( primary_axis_coordinate(leading_inset() + kLauncherPreferredSize, kLauncherPreferredSize), primary_axis_coordinate(kLauncherPreferredSize, leading_inset() + kLauncherPreferredSize)))); } bounds->overflow_bounds.set_size( gfx::Size(kLauncherPreferredSize, kLauncherPreferredSize)); last_visible_index_ = DetermineLastVisibleIndex( available_size - leading_inset() - kLauncherPreferredSize - kButtonSpacing - kLauncherPreferredSize); bool show_overflow = (last_visible_index_ + 1 < app_list_index); for (int i = 0; i < view_model_->view_size(); ++i) { view_model_->view_at(i)->SetVisible( i == app_list_index || i <= last_visible_index_); } overflow_button_->SetVisible(show_overflow); if (show_overflow) { DCHECK_NE(0, view_model_->view_size()); if (last_visible_index_ == -1) { x = primary_axis_coordinate(leading_inset(), 0); y = primary_axis_coordinate(0, leading_inset()); } else { x = primary_axis_coordinate( view_model_->ideal_bounds(last_visible_index_).right(), 0); y = primary_axis_coordinate(0, view_model_->ideal_bounds(last_visible_index_).bottom()); } gfx::Rect app_list_bounds = view_model_->ideal_bounds(app_list_index); bounds->overflow_bounds.set_x(x); bounds->overflow_bounds.set_y(y); x = primary_axis_coordinate(x + kLauncherPreferredSize + kButtonSpacing, 0); y = primary_axis_coordinate(0, y + kLauncherPreferredSize + kButtonSpacing); app_list_bounds.set_x(x); app_list_bounds.set_y(y); view_model_->set_ideal_bounds(app_list_index, app_list_bounds); } else { if (overflow_bubble_.get()) overflow_bubble_->Hide(); } }
170,888
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static MagickBooleanType WriteINLINEImage(const ImageInfo *image_info, Image *image) { char *base64, message[MaxTextExtent]; const MagickInfo *magick_info; ExceptionInfo *exception; Image *write_image; ImageInfo *write_info; MagickBooleanType status; size_t blob_length, encode_length; unsigned char *blob; /* Convert image to base64-encoding. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); write_info=CloneImageInfo(image_info); (void) SetImageInfo(write_info,1,exception); if (LocaleCompare(write_info->magick,"INLINE") == 0) (void) CopyMagickString(write_info->magick,image->magick,MaxTextExtent); magick_info=GetMagickInfo(write_info->magick,exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickMimeType(magick_info) == (const char *) NULL)) ThrowWriterException(CorruptImageError,"ImageTypeNotSupported"); (void) CopyMagickString(image->filename,write_info->filename,MaxTextExtent); blob_length=2048; write_image=CloneImage(image,0,0,MagickTrue,exception); if (write_image == (Image *) NULL) { write_info=DestroyImageInfo(write_info); return(MagickTrue); } blob=(unsigned char *) ImageToBlob(write_info,write_image,&blob_length, exception); write_image=DestroyImage(write_image); write_info=DestroyImageInfo(write_info); if (blob == (unsigned char *) NULL) return(MagickFalse); encode_length=0; base64=Base64Encode(blob,blob_length,&encode_length); blob=(unsigned char *) RelinquishMagickMemory(blob); if (base64 == (char *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); /* Write base64-encoded image. */ status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) { base64=DestroyString(base64); return(status); } (void) FormatLocaleString(message,MaxTextExtent,"data:%s;base64,", GetMagickMimeType(magick_info)); (void) WriteBlobString(image,message); (void) WriteBlobString(image,base64); base64=DestroyString(base64); return(MagickTrue); } Commit Message: https://github.com/ImageMagick/ImageMagick/issues/572 CWE ID: CWE-772
static MagickBooleanType WriteINLINEImage(const ImageInfo *image_info, Image *image) { char *base64, message[MaxTextExtent]; const MagickInfo *magick_info; ExceptionInfo *exception; Image *write_image; ImageInfo *write_info; MagickBooleanType status; size_t blob_length, encode_length; unsigned char *blob; /* Convert image to base64-encoding. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); write_info=CloneImageInfo(image_info); (void) SetImageInfo(write_info,1,exception); if (LocaleCompare(write_info->magick,"INLINE") == 0) (void) CopyMagickString(write_info->magick,image->magick,MaxTextExtent); magick_info=GetMagickInfo(write_info->magick,exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickMimeType(magick_info) == (const char *) NULL)) { write_info=DestroyImageInfo(write_info); ThrowWriterException(CorruptImageError,"ImageTypeNotSupported"); } (void) CopyMagickString(image->filename,write_info->filename,MaxTextExtent); blob_length=2048; write_image=CloneImage(image,0,0,MagickTrue,exception); if (write_image == (Image *) NULL) { write_info=DestroyImageInfo(write_info); return(MagickTrue); } blob=(unsigned char *) ImageToBlob(write_info,write_image,&blob_length, exception); write_image=DestroyImage(write_image); write_info=DestroyImageInfo(write_info); if (blob == (unsigned char *) NULL) return(MagickFalse); encode_length=0; base64=Base64Encode(blob,blob_length,&encode_length); blob=(unsigned char *) RelinquishMagickMemory(blob); if (base64 == (char *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); /* Write base64-encoded image. */ status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) { base64=DestroyString(base64); return(status); } (void) FormatLocaleString(message,MaxTextExtent,"data:%s;base64,", GetMagickMimeType(magick_info)); (void) WriteBlobString(image,message); (void) WriteBlobString(image,base64); base64=DestroyString(base64); return(MagickTrue); }
167,971
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: PHP_FUNCTION(mdecrypt_generic) { zval *mcryptind; char *data; int data_len; php_mcrypt *pm; char* data_s; int block_size, data_size; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rs", &mcryptind, &data, &data_len) == FAILURE) { return; } ZEND_FETCH_RESOURCE(pm, php_mcrypt * , &mcryptind, -1, "MCrypt", le_mcrypt); PHP_MCRYPT_INIT_CHECK if (data_len == 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "An empty string was passed"); RETURN_FALSE } /* Check blocksize */ if (mcrypt_enc_is_block_mode(pm->td) == 1) { /* It's a block algorithm */ block_size = mcrypt_enc_get_block_size(pm->td); data_size = (((data_len - 1) / block_size) + 1) * block_size; data_s = emalloc(data_size + 1); memset(data_s, 0, data_size); memcpy(data_s, data, data_len); } else { /* It's not a block algorithm */ data_size = data_len; data_s = emalloc(data_size + 1); memset(data_s, 0, data_size); memcpy(data_s, data, data_len); } mdecrypt_generic(pm->td, data_s, data_size); RETVAL_STRINGL(data_s, data_size, 1); efree(data_s); } Commit Message: Fix bug #72455: Heap Overflow due to integer overflows CWE ID: CWE-190
PHP_FUNCTION(mdecrypt_generic) { zval *mcryptind; char *data; int data_len; php_mcrypt *pm; char* data_s; int block_size, data_size; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rs", &mcryptind, &data, &data_len) == FAILURE) { return; } ZEND_FETCH_RESOURCE(pm, php_mcrypt * , &mcryptind, -1, "MCrypt", le_mcrypt); PHP_MCRYPT_INIT_CHECK if (data_len == 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "An empty string was passed"); RETURN_FALSE } /* Check blocksize */ if (mcrypt_enc_is_block_mode(pm->td) == 1) { /* It's a block algorithm */ block_size = mcrypt_enc_get_block_size(pm->td); data_size = (((data_len - 1) / block_size) + 1) * block_size; if (data_size <= 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Integer overflow in data size"); RETURN_FALSE; } data_s = emalloc(data_size + 1); memset(data_s, 0, data_size); memcpy(data_s, data, data_len); } else { /* It's not a block algorithm */ data_size = data_len; data_s = emalloc(data_size + 1); memset(data_s, 0, data_size); memcpy(data_s, data, data_len); } mdecrypt_generic(pm->td, data_s, data_size); RETVAL_STRINGL(data_s, data_size, 1); efree(data_s); }
167,092
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) { return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE, indx, 0, data, size, 500); } Commit Message: rtl8150: Use heap buffers for all register access Allocating USB buffers on the stack is not portable, and no longer works on x86_64 (with VMAP_STACK enabled as per default). Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Ben Hutchings <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-119
static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data) { void *buf; int ret; buf = kmemdup(data, size, GFP_NOIO); if (!buf) return -ENOMEM; ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE, indx, 0, buf, size, 500); kfree(buf); return ret; }
168,215
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ChromeMockRenderThread::OnMsgOpenChannelToExtension( int routing_id, const std::string& source_extension_id, const std::string& target_extension_id, const std::string& channel_name, int* port_id) { *port_id = 0; } Commit Message: Print preview: Use an ID instead of memory pointer string in WebUI. BUG=144051 Review URL: https://chromiumcodereview.appspot.com/10870003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@153342 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-200
void ChromeMockRenderThread::OnMsgOpenChannelToExtension( int routing_id, const std::string& source_extension_id, const std::string& target_extension_id, const std::string& channel_name, int* port_id) { *port_id = 0; }
170,852
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: PHPAPI int php_var_unserialize(UNSERIALIZE_PARAMETER) { const unsigned char *cursor, *limit, *marker, *start; zval **rval_ref; limit = max; cursor = *p; if (YYCURSOR >= YYLIMIT) { return 0; } if (var_hash && cursor[0] != 'R') { var_push(var_hash, rval); } start = cursor; #line 496 "ext/standard/var_unserializer.c" { YYCTYPE yych; static const unsigned char yybm[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; if ((YYLIMIT - YYCURSOR) < 7) YYFILL(7); yych = *YYCURSOR; switch (yych) { case 'C': case 'O': goto yy13; case 'N': goto yy5; case 'R': goto yy2; case 'S': goto yy10; case 'a': goto yy11; case 'b': goto yy6; case 'd': goto yy8; case 'i': goto yy7; case 'o': goto yy12; case 'r': goto yy4; case 's': goto yy9; case '}': goto yy14; default: goto yy16; } yy2: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy95; yy3: #line 861 "ext/standard/var_unserializer.re" { return 0; } #line 558 "ext/standard/var_unserializer.c" yy4: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy89; goto yy3; yy5: yych = *++YYCURSOR; if (yych == ';') goto yy87; goto yy3; yy6: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy83; goto yy3; yy7: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy77; goto yy3; yy8: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy53; goto yy3; yy9: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy46; goto yy3; yy10: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy39; goto yy3; yy11: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy32; goto yy3; yy12: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy25; goto yy3; yy13: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy17; goto yy3; yy14: ++YYCURSOR; #line 855 "ext/standard/var_unserializer.re" { /* this is the case where we have less data than planned */ php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Unexpected end of serialized data"); return 0; /* not sure if it should be 0 or 1 here? */ } #line 607 "ext/standard/var_unserializer.c" yy16: yych = *++YYCURSOR; goto yy3; yy17: yych = *++YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } if (yych == '+') goto yy19; yy18: YYCURSOR = YYMARKER; goto yy3; yy19: yych = *++YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } goto yy18; yy20: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } if (yych <= '/') goto yy18; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 708 "ext/standard/var_unserializer.re" { size_t len, len2, len3, maxlen; long elements; char *class_name; zend_class_entry *ce; zend_class_entry **pce; int incomplete_class = 0; int custom_object = 0; zval *user_func; zval *retval_ptr; zval **args[1]; zval *arg_func_name; if (!var_hash) return 0; if (*start == 'C') { custom_object = 1; } INIT_PZVAL(*rval); len2 = len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len || len == 0) { *p = start + 2; return 0; } class_name = (char*)YYCURSOR; YYCURSOR += len; if (*(YYCURSOR) != '"') { *p = YYCURSOR; return 0; } if (*(YYCURSOR+1) != ':') { *p = YYCURSOR+1; return 0; } len3 = strspn(class_name, "0123456789_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377\\"); if (len3 != len) { *p = YYCURSOR + len3 - len; return 0; } class_name = estrndup(class_name, len); do { /* Try to find class directly */ BG(serialize_lock)++; if (zend_lookup_class(class_name, len2, &pce TSRMLS_CC) == SUCCESS) { BG(serialize_lock)--; if (EG(exception)) { efree(class_name); return 0; } ce = *pce; break; } BG(serialize_lock)--; if (EG(exception)) { efree(class_name); return 0; } /* Check for unserialize callback */ if ((PG(unserialize_callback_func) == NULL) || (PG(unserialize_callback_func)[0] == '\0')) { incomplete_class = 1; ce = PHP_IC_ENTRY; break; } /* Call unserialize callback */ MAKE_STD_ZVAL(user_func); ZVAL_STRING(user_func, PG(unserialize_callback_func), 1); args[0] = &arg_func_name; MAKE_STD_ZVAL(arg_func_name); ZVAL_STRING(arg_func_name, class_name, 1); BG(serialize_lock)++; if (call_user_function_ex(CG(function_table), NULL, user_func, &retval_ptr, 1, args, 0, NULL TSRMLS_CC) != SUCCESS) { BG(serialize_lock)--; if (EG(exception)) { efree(class_name); zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); return 0; } php_error_docref(NULL TSRMLS_CC, E_WARNING, "defined (%s) but not found", user_func->value.str.val); incomplete_class = 1; ce = PHP_IC_ENTRY; zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); break; } BG(serialize_lock)--; if (retval_ptr) { zval_ptr_dtor(&retval_ptr); } if (EG(exception)) { efree(class_name); zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); return 0; } /* The callback function may have defined the class */ if (zend_lookup_class(class_name, len2, &pce TSRMLS_CC) == SUCCESS) { ce = *pce; } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Function %s() hasn't defined the class it was called for", user_func->value.str.val); incomplete_class = 1; ce = PHP_IC_ENTRY; } zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); break; } while (1); *p = YYCURSOR; if (custom_object) { int ret; ret = object_custom(UNSERIALIZE_PASSTHRU, ce); if (ret && incomplete_class) { php_store_class_name(*rval, class_name, len2); } efree(class_name); return ret; } elements = object_common1(UNSERIALIZE_PASSTHRU, ce); if (incomplete_class) { php_store_class_name(*rval, class_name, len2); } efree(class_name); return object_common2(UNSERIALIZE_PASSTHRU, elements); } #line 785 "ext/standard/var_unserializer.c" yy25: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy26; if (yych <= '/') goto yy18; if (yych <= '9') goto yy27; goto yy18; } yy26: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy27: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy27; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 699 "ext/standard/var_unserializer.re" { if (!var_hash) return 0; INIT_PZVAL(*rval); return object_common2(UNSERIALIZE_PASSTHRU, object_common1(UNSERIALIZE_PASSTHRU, ZEND_STANDARD_CLASS_DEF_PTR)); } #line 819 "ext/standard/var_unserializer.c" yy32: yych = *++YYCURSOR; if (yych == '+') goto yy33; if (yych <= '/') goto yy18; if (yych <= '9') goto yy34; goto yy18; yy33: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy34: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy34; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '{') goto yy18; ++YYCURSOR; #line 678 "ext/standard/var_unserializer.re" { long elements = parse_iv(start + 2); /* use iv() not uiv() in order to check data range */ *p = YYCURSOR; if (!var_hash) return 0; if (elements < 0) { return 0; } INIT_PZVAL(*rval); array_init_size(*rval, elements); if (!process_nested_data(UNSERIALIZE_PASSTHRU, Z_ARRVAL_PP(rval), elements, 0)) { return 0; } return finish_nested_data(UNSERIALIZE_PASSTHRU); } #line 861 "ext/standard/var_unserializer.c" yy39: yych = *++YYCURSOR; if (yych == '+') goto yy40; if (yych <= '/') goto yy18; if (yych <= '9') goto yy41; goto yy18; yy40: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy41: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy41; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 643 "ext/standard/var_unserializer.re" { size_t len, maxlen; char *str; len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len) { *p = start + 2; return 0; } if ((str = unserialize_str(&YYCURSOR, &len, maxlen)) == NULL) { return 0; } if (*(YYCURSOR) != '"') { efree(str); *p = YYCURSOR; return 0; } if (*(YYCURSOR + 1) != ';') { efree(str); *p = YYCURSOR + 1; return 0; } YYCURSOR += 2; *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_STRINGL(*rval, str, len, 0); return 1; } #line 917 "ext/standard/var_unserializer.c" yy46: yych = *++YYCURSOR; if (yych == '+') goto yy47; if (yych <= '/') goto yy18; if (yych <= '9') goto yy48; goto yy18; yy47: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy48: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy48; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 610 "ext/standard/var_unserializer.re" { size_t len, maxlen; char *str; len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len) { *p = start + 2; return 0; } str = (char*)YYCURSOR; YYCURSOR += len; if (*(YYCURSOR) != '"') { *p = YYCURSOR; return 0; } if (*(YYCURSOR + 1) != ';') { *p = YYCURSOR + 1; return 0; } YYCURSOR += 2; *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_STRINGL(*rval, str, len, 1); return 1; } #line 971 "ext/standard/var_unserializer.c" yy53: yych = *++YYCURSOR; if (yych <= '/') { if (yych <= ',') { if (yych == '+') goto yy57; goto yy18; } else { if (yych <= '-') goto yy55; if (yych <= '.') goto yy60; goto yy18; } } else { if (yych <= 'I') { if (yych <= '9') goto yy58; if (yych <= 'H') goto yy18; goto yy56; } else { if (yych != 'N') goto yy18; } } yych = *++YYCURSOR; if (yych == 'A') goto yy76; goto yy18; yy55: yych = *++YYCURSOR; if (yych <= '/') { if (yych == '.') goto yy60; goto yy18; } else { if (yych <= '9') goto yy58; if (yych != 'I') goto yy18; } yy56: yych = *++YYCURSOR; if (yych == 'N') goto yy72; goto yy18; yy57: yych = *++YYCURSOR; if (yych == '.') goto yy60; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy58: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ':') { if (yych <= '.') { if (yych <= '-') goto yy18; goto yy70; } else { if (yych <= '/') goto yy18; if (yych <= '9') goto yy58; goto yy18; } } else { if (yych <= 'E') { if (yych <= ';') goto yy63; if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy60: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy61: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ';') { if (yych <= '/') goto yy18; if (yych <= '9') goto yy61; if (yych <= ':') goto yy18; } else { if (yych <= 'E') { if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy63: ++YYCURSOR; #line 600 "ext/standard/var_unserializer.re" { #if SIZEOF_LONG == 4 use_double: #endif *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_DOUBLE(*rval, zend_strtod((const char *)start + 2, NULL)); return 1; } #line 1069 "ext/standard/var_unserializer.c" yy65: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy66; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; goto yy18; } yy66: yych = *++YYCURSOR; if (yych <= ',') { if (yych == '+') goto yy69; goto yy18; } else { if (yych <= '-') goto yy69; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; } yy67: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; if (yych == ';') goto yy63; goto yy18; yy69: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; goto yy18; yy70: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ';') { if (yych <= '/') goto yy18; if (yych <= '9') goto yy70; if (yych <= ':') goto yy18; goto yy63; } else { if (yych <= 'E') { if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy72: yych = *++YYCURSOR; if (yych != 'F') goto yy18; yy73: yych = *++YYCURSOR; if (yych != ';') goto yy18; ++YYCURSOR; #line 585 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); if (!strncmp(start + 2, "NAN", 3)) { ZVAL_DOUBLE(*rval, php_get_nan()); } else if (!strncmp(start + 2, "INF", 3)) { ZVAL_DOUBLE(*rval, php_get_inf()); } else if (!strncmp(start + 2, "-INF", 4)) { ZVAL_DOUBLE(*rval, -php_get_inf()); } return 1; } #line 1143 "ext/standard/var_unserializer.c" yy76: yych = *++YYCURSOR; if (yych == 'N') goto yy73; goto yy18; yy77: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy78; if (yych <= '/') goto yy18; if (yych <= '9') goto yy79; goto yy18; } yy78: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy79: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy79; if (yych != ';') goto yy18; ++YYCURSOR; #line 558 "ext/standard/var_unserializer.re" { #if SIZEOF_LONG == 4 int digits = YYCURSOR - start - 3; if (start[2] == '-' || start[2] == '+') { digits--; } /* Use double for large long values that were serialized on a 64-bit system */ if (digits >= MAX_LENGTH_OF_LONG - 1) { if (digits == MAX_LENGTH_OF_LONG - 1) { int cmp = strncmp(YYCURSOR - MAX_LENGTH_OF_LONG, long_min_digits, MAX_LENGTH_OF_LONG - 1); if (!(cmp < 0 || (cmp == 0 && start[2] == '-'))) { goto use_double; } } else { goto use_double; } } #endif *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_LONG(*rval, parse_iv(start + 2)); return 1; } #line 1197 "ext/standard/var_unserializer.c" yy83: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= '2') goto yy18; yych = *++YYCURSOR; if (yych != ';') goto yy18; ++YYCURSOR; #line 551 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_BOOL(*rval, parse_iv(start + 2)); return 1; } #line 1212 "ext/standard/var_unserializer.c" yy87: ++YYCURSOR; #line 544 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_NULL(*rval); return 1; } #line 1222 "ext/standard/var_unserializer.c" yy89: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy90; if (yych <= '/') goto yy18; if (yych <= '9') goto yy91; goto yy18; } yy90: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy91: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy91; if (yych != ';') goto yy18; ++YYCURSOR; #line 521 "ext/standard/var_unserializer.re" { long id; *p = YYCURSOR; if (!var_hash) return 0; id = parse_iv(start + 2) - 1; if (id == -1 || var_access(var_hash, id, &rval_ref) != SUCCESS) { return 0; } if (*rval == *rval_ref) return 0; if (*rval != NULL) { var_push_dtor_no_addref(var_hash, rval); } *rval = *rval_ref; Z_ADDREF_PP(rval); Z_UNSET_ISREF_PP(rval); return 1; } #line 1268 "ext/standard/var_unserializer.c" yy95: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy96; if (yych <= '/') goto yy18; if (yych <= '9') goto yy97; goto yy18; } yy96: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy97: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy97; if (yych != ';') goto yy18; ++YYCURSOR; #line 500 "ext/standard/var_unserializer.re" { long id; *p = YYCURSOR; if (!var_hash) return 0; id = parse_iv(start + 2) - 1; if (id == -1 || var_access(var_hash, id, &rval_ref) != SUCCESS) { return 0; } if (*rval != NULL) { var_push_dtor_no_addref(var_hash, rval); } *rval = *rval_ref; Z_ADDREF_PP(rval); Z_SET_ISREF_PP(rval); return 1; } #line 1312 "ext/standard/var_unserializer.c" } #line 863 "ext/standard/var_unserializer.re" return 0; } Commit Message: Fix bug #73825 - Heap out of bounds read on unserialize in finish_nested_data() CWE ID: CWE-125
PHPAPI int php_var_unserialize(UNSERIALIZE_PARAMETER) { const unsigned char *cursor, *limit, *marker, *start; zval **rval_ref; limit = max; cursor = *p; if (YYCURSOR >= YYLIMIT) { return 0; } if (var_hash && cursor[0] != 'R') { var_push(var_hash, rval); } start = cursor; #line 501 "ext/standard/var_unserializer.c" { YYCTYPE yych; static const unsigned char yybm[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; if ((YYLIMIT - YYCURSOR) < 7) YYFILL(7); yych = *YYCURSOR; switch (yych) { case 'C': case 'O': goto yy13; case 'N': goto yy5; case 'R': goto yy2; case 'S': goto yy10; case 'a': goto yy11; case 'b': goto yy6; case 'd': goto yy8; case 'i': goto yy7; case 'o': goto yy12; case 'r': goto yy4; case 's': goto yy9; case '}': goto yy14; default: goto yy16; } yy2: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy95; yy3: #line 875 "ext/standard/var_unserializer.re" { return 0; } #line 563 "ext/standard/var_unserializer.c" yy4: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy89; goto yy3; yy5: yych = *++YYCURSOR; if (yych == ';') goto yy87; goto yy3; yy6: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy83; goto yy3; yy7: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy77; goto yy3; yy8: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy53; goto yy3; yy9: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy46; goto yy3; yy10: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy39; goto yy3; yy11: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy32; goto yy3; yy12: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy25; goto yy3; yy13: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy17; goto yy3; yy14: ++YYCURSOR; #line 869 "ext/standard/var_unserializer.re" { /* this is the case where we have less data than planned */ php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Unexpected end of serialized data"); return 0; /* not sure if it should be 0 or 1 here? */ } #line 612 "ext/standard/var_unserializer.c" yy16: yych = *++YYCURSOR; goto yy3; yy17: yych = *++YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } if (yych == '+') goto yy19; yy18: YYCURSOR = YYMARKER; goto yy3; yy19: yych = *++YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } goto yy18; yy20: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } if (yych <= '/') goto yy18; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 717 "ext/standard/var_unserializer.re" { size_t len, len2, len3, maxlen; long elements; char *class_name; zend_class_entry *ce; zend_class_entry **pce; int incomplete_class = 0; int custom_object = 0; zval *user_func; zval *retval_ptr; zval **args[1]; zval *arg_func_name; if (!var_hash) return 0; if (*start == 'C') { custom_object = 1; } INIT_PZVAL(*rval); len2 = len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len || len == 0) { *p = start + 2; return 0; } class_name = (char*)YYCURSOR; YYCURSOR += len; if (*(YYCURSOR) != '"') { *p = YYCURSOR; return 0; } if (*(YYCURSOR+1) != ':') { *p = YYCURSOR+1; return 0; } len3 = strspn(class_name, "0123456789_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377\\"); if (len3 != len) { *p = YYCURSOR + len3 - len; return 0; } class_name = estrndup(class_name, len); do { /* Try to find class directly */ BG(serialize_lock)++; if (zend_lookup_class(class_name, len2, &pce TSRMLS_CC) == SUCCESS) { BG(serialize_lock)--; if (EG(exception)) { efree(class_name); return 0; } ce = *pce; break; } BG(serialize_lock)--; if (EG(exception)) { efree(class_name); return 0; } /* Check for unserialize callback */ if ((PG(unserialize_callback_func) == NULL) || (PG(unserialize_callback_func)[0] == '\0')) { incomplete_class = 1; ce = PHP_IC_ENTRY; break; } /* Call unserialize callback */ MAKE_STD_ZVAL(user_func); ZVAL_STRING(user_func, PG(unserialize_callback_func), 1); args[0] = &arg_func_name; MAKE_STD_ZVAL(arg_func_name); ZVAL_STRING(arg_func_name, class_name, 1); BG(serialize_lock)++; if (call_user_function_ex(CG(function_table), NULL, user_func, &retval_ptr, 1, args, 0, NULL TSRMLS_CC) != SUCCESS) { BG(serialize_lock)--; if (EG(exception)) { efree(class_name); zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); return 0; } php_error_docref(NULL TSRMLS_CC, E_WARNING, "defined (%s) but not found", user_func->value.str.val); incomplete_class = 1; ce = PHP_IC_ENTRY; zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); break; } BG(serialize_lock)--; if (retval_ptr) { zval_ptr_dtor(&retval_ptr); } if (EG(exception)) { efree(class_name); zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); return 0; } /* The callback function may have defined the class */ if (zend_lookup_class(class_name, len2, &pce TSRMLS_CC) == SUCCESS) { ce = *pce; } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Function %s() hasn't defined the class it was called for", user_func->value.str.val); incomplete_class = 1; ce = PHP_IC_ENTRY; } zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); break; } while (1); *p = YYCURSOR; if (custom_object) { int ret; ret = object_custom(UNSERIALIZE_PASSTHRU, ce); if (ret && incomplete_class) { php_store_class_name(*rval, class_name, len2); } efree(class_name); return ret; } elements = object_common1(UNSERIALIZE_PASSTHRU, ce); if (elements < 0) { efree(class_name); return 0; } if (incomplete_class) { php_store_class_name(*rval, class_name, len2); } efree(class_name); return object_common2(UNSERIALIZE_PASSTHRU, elements); } #line 795 "ext/standard/var_unserializer.c" yy25: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy26; if (yych <= '/') goto yy18; if (yych <= '9') goto yy27; goto yy18; } yy26: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy27: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy27; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 704 "ext/standard/var_unserializer.re" { long elements; if (!var_hash) return 0; INIT_PZVAL(*rval); elements = object_common1(UNSERIALIZE_PASSTHRU, ZEND_STANDARD_CLASS_DEF_PTR); if (elements < 0) { return 0; } return object_common2(UNSERIALIZE_PASSTHRU, elements); } #line 833 "ext/standard/var_unserializer.c" yy32: yych = *++YYCURSOR; if (yych == '+') goto yy33; if (yych <= '/') goto yy18; if (yych <= '9') goto yy34; goto yy18; yy33: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy34: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy34; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '{') goto yy18; ++YYCURSOR; #line 683 "ext/standard/var_unserializer.re" { long elements = parse_iv(start + 2); /* use iv() not uiv() in order to check data range */ *p = YYCURSOR; if (!var_hash) return 0; if (elements < 0) { return 0; } INIT_PZVAL(*rval); array_init_size(*rval, elements); if (!process_nested_data(UNSERIALIZE_PASSTHRU, Z_ARRVAL_PP(rval), elements, 0)) { return 0; } return finish_nested_data(UNSERIALIZE_PASSTHRU); } #line 875 "ext/standard/var_unserializer.c" yy39: yych = *++YYCURSOR; if (yych == '+') goto yy40; if (yych <= '/') goto yy18; if (yych <= '9') goto yy41; goto yy18; yy40: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy41: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy41; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 648 "ext/standard/var_unserializer.re" { size_t len, maxlen; char *str; len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len) { *p = start + 2; return 0; } if ((str = unserialize_str(&YYCURSOR, &len, maxlen)) == NULL) { return 0; } if (*(YYCURSOR) != '"') { efree(str); *p = YYCURSOR; return 0; } if (*(YYCURSOR + 1) != ';') { efree(str); *p = YYCURSOR + 1; return 0; } YYCURSOR += 2; *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_STRINGL(*rval, str, len, 0); return 1; } #line 931 "ext/standard/var_unserializer.c" yy46: yych = *++YYCURSOR; if (yych == '+') goto yy47; if (yych <= '/') goto yy18; if (yych <= '9') goto yy48; goto yy18; yy47: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy48: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy48; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 615 "ext/standard/var_unserializer.re" { size_t len, maxlen; char *str; len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len) { *p = start + 2; return 0; } str = (char*)YYCURSOR; YYCURSOR += len; if (*(YYCURSOR) != '"') { *p = YYCURSOR; return 0; } if (*(YYCURSOR + 1) != ';') { *p = YYCURSOR + 1; return 0; } YYCURSOR += 2; *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_STRINGL(*rval, str, len, 1); return 1; } #line 985 "ext/standard/var_unserializer.c" yy53: yych = *++YYCURSOR; if (yych <= '/') { if (yych <= ',') { if (yych == '+') goto yy57; goto yy18; } else { if (yych <= '-') goto yy55; if (yych <= '.') goto yy60; goto yy18; } } else { if (yych <= 'I') { if (yych <= '9') goto yy58; if (yych <= 'H') goto yy18; goto yy56; } else { if (yych != 'N') goto yy18; } } yych = *++YYCURSOR; if (yych == 'A') goto yy76; goto yy18; yy55: yych = *++YYCURSOR; if (yych <= '/') { if (yych == '.') goto yy60; goto yy18; } else { if (yych <= '9') goto yy58; if (yych != 'I') goto yy18; } yy56: yych = *++YYCURSOR; if (yych == 'N') goto yy72; goto yy18; yy57: yych = *++YYCURSOR; if (yych == '.') goto yy60; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy58: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ':') { if (yych <= '.') { if (yych <= '-') goto yy18; goto yy70; } else { if (yych <= '/') goto yy18; if (yych <= '9') goto yy58; goto yy18; } } else { if (yych <= 'E') { if (yych <= ';') goto yy63; if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy60: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy61: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ';') { if (yych <= '/') goto yy18; if (yych <= '9') goto yy61; if (yych <= ':') goto yy18; } else { if (yych <= 'E') { if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy63: ++YYCURSOR; #line 605 "ext/standard/var_unserializer.re" { #if SIZEOF_LONG == 4 use_double: #endif *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_DOUBLE(*rval, zend_strtod((const char *)start + 2, NULL)); return 1; } #line 1083 "ext/standard/var_unserializer.c" yy65: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy66; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; goto yy18; } yy66: yych = *++YYCURSOR; if (yych <= ',') { if (yych == '+') goto yy69; goto yy18; } else { if (yych <= '-') goto yy69; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; } yy67: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; if (yych == ';') goto yy63; goto yy18; yy69: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; goto yy18; yy70: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ';') { if (yych <= '/') goto yy18; if (yych <= '9') goto yy70; if (yych <= ':') goto yy18; goto yy63; } else { if (yych <= 'E') { if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy72: yych = *++YYCURSOR; if (yych != 'F') goto yy18; yy73: yych = *++YYCURSOR; if (yych != ';') goto yy18; ++YYCURSOR; #line 590 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); if (!strncmp(start + 2, "NAN", 3)) { ZVAL_DOUBLE(*rval, php_get_nan()); } else if (!strncmp(start + 2, "INF", 3)) { ZVAL_DOUBLE(*rval, php_get_inf()); } else if (!strncmp(start + 2, "-INF", 4)) { ZVAL_DOUBLE(*rval, -php_get_inf()); } return 1; } #line 1157 "ext/standard/var_unserializer.c" yy76: yych = *++YYCURSOR; if (yych == 'N') goto yy73; goto yy18; yy77: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy78; if (yych <= '/') goto yy18; if (yych <= '9') goto yy79; goto yy18; } yy78: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy79: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy79; if (yych != ';') goto yy18; ++YYCURSOR; #line 563 "ext/standard/var_unserializer.re" { #if SIZEOF_LONG == 4 int digits = YYCURSOR - start - 3; if (start[2] == '-' || start[2] == '+') { digits--; } /* Use double for large long values that were serialized on a 64-bit system */ if (digits >= MAX_LENGTH_OF_LONG - 1) { if (digits == MAX_LENGTH_OF_LONG - 1) { int cmp = strncmp(YYCURSOR - MAX_LENGTH_OF_LONG, long_min_digits, MAX_LENGTH_OF_LONG - 1); if (!(cmp < 0 || (cmp == 0 && start[2] == '-'))) { goto use_double; } } else { goto use_double; } } #endif *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_LONG(*rval, parse_iv(start + 2)); return 1; } #line 1211 "ext/standard/var_unserializer.c" yy83: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= '2') goto yy18; yych = *++YYCURSOR; if (yych != ';') goto yy18; ++YYCURSOR; #line 556 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_BOOL(*rval, parse_iv(start + 2)); return 1; } #line 1226 "ext/standard/var_unserializer.c" yy87: ++YYCURSOR; #line 549 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_NULL(*rval); return 1; } #line 1236 "ext/standard/var_unserializer.c" yy89: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy90; if (yych <= '/') goto yy18; if (yych <= '9') goto yy91; goto yy18; } yy90: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy91: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy91; if (yych != ';') goto yy18; ++YYCURSOR; #line 526 "ext/standard/var_unserializer.re" { long id; *p = YYCURSOR; if (!var_hash) return 0; id = parse_iv(start + 2) - 1; if (id == -1 || var_access(var_hash, id, &rval_ref) != SUCCESS) { return 0; } if (*rval == *rval_ref) return 0; if (*rval != NULL) { var_push_dtor_no_addref(var_hash, rval); } *rval = *rval_ref; Z_ADDREF_PP(rval); Z_UNSET_ISREF_PP(rval); return 1; } #line 1282 "ext/standard/var_unserializer.c" yy95: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy96; if (yych <= '/') goto yy18; if (yych <= '9') goto yy97; goto yy18; } yy96: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy97: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy97; if (yych != ';') goto yy18; ++YYCURSOR; #line 505 "ext/standard/var_unserializer.re" { long id; *p = YYCURSOR; if (!var_hash) return 0; id = parse_iv(start + 2) - 1; if (id == -1 || var_access(var_hash, id, &rval_ref) != SUCCESS) { return 0; } if (*rval != NULL) { var_push_dtor_no_addref(var_hash, rval); } *rval = *rval_ref; Z_ADDREF_PP(rval); Z_SET_ISREF_PP(rval); return 1; } #line 1326 "ext/standard/var_unserializer.c" } #line 877 "ext/standard/var_unserializer.re" return 0; }
168,515
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int cJSON_GetArraySize( cJSON *array ) { cJSON *c = array->child; int i = 0; while ( c ) { ++i; c = c->next; } return i; } Commit Message: Fix a buffer overflow / heap corruption issue that could occur if a malformed JSON string was passed on the control channel. This issue, present in the cJSON library, was already fixed upstream, so was addressed here in iperf3 by importing a newer version of cJSON (plus local ESnet modifications). Discovered and reported by Dave McDaniel, Cisco Talos. Based on a patch by @dopheide-esnet, with input from @DaveGamble. Cross-references: TALOS-CAN-0164, ESNET-SECADV-2016-0001, CVE-2016-4303 (cherry picked from commit ed94082be27d971a5e1b08b666e2c217cf470a40) Signed-off-by: Bruce A. Mah <[email protected]> CWE ID: CWE-119
int cJSON_GetArraySize( cJSON *array )
167,287
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int get_client_master_key(SSL *s) { int is_export, i, n, keya, ek; unsigned long len; unsigned char *p; const SSL_CIPHER *cp; const EVP_CIPHER *c; const EVP_MD *md; p = (unsigned char *)s->init_buf->data; if (s->state == SSL2_ST_GET_CLIENT_MASTER_KEY_A) { i = ssl2_read(s, (char *)&(p[s->init_num]), 10 - s->init_num); if (i < (10 - s->init_num)) return (ssl2_part_read(s, SSL_F_GET_CLIENT_MASTER_KEY, i)); s->init_num = 10; if (*(p++) != SSL2_MT_CLIENT_MASTER_KEY) { if (p[-1] != SSL2_MT_ERROR) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, SSL_R_READ_WRONG_PACKET_TYPE); } else SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, SSL_R_PEER_ERROR); return (-1); } cp = ssl2_get_cipher_by_char(p); if (cp == NULL) { ssl2_return_error(s, SSL2_PE_NO_CIPHER); SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, SSL_R_NO_CIPHER_MATCH); return (-1); } s->session->cipher = cp; p += 3; n2s(p, i); s->s2->tmp.clear = i; n2s(p, i); s->s2->tmp.enc = i; n2s(p, i); if (i > SSL_MAX_KEY_ARG_LENGTH) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, SSL_R_KEY_ARG_TOO_LONG); return -1; } s->session->key_arg_length = i; s->state = SSL2_ST_GET_CLIENT_MASTER_KEY_B; } /* SSL2_ST_GET_CLIENT_MASTER_KEY_B */ p = (unsigned char *)s->init_buf->data; if (s->init_buf->length < SSL2_MAX_RECORD_LENGTH_3_BYTE_HEADER) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, ERR_R_INTERNAL_ERROR); return -1; } keya = s->session->key_arg_length; len = 10 + (unsigned long)s->s2->tmp.clear + (unsigned long)s->s2->tmp.enc + (unsigned long)keya; if (len > SSL2_MAX_RECORD_LENGTH_3_BYTE_HEADER) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, SSL_R_MESSAGE_TOO_LONG); return -1; } n = (int)len - s->init_num; i = ssl2_read(s, (char *)&(p[s->init_num]), n); if (i != n) return (ssl2_part_read(s, SSL_F_GET_CLIENT_MASTER_KEY, i)); if (s->msg_callback) { /* CLIENT-MASTER-KEY */ s->msg_callback(0, s->version, 0, p, (size_t)len, s, s->msg_callback_arg); } p += 10; memcpy(s->session->key_arg, &(p[s->s2->tmp.clear + s->s2->tmp.enc]), (unsigned int)keya); if (s->cert->pkeys[SSL_PKEY_RSA_ENC].privatekey == NULL) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, SSL_R_NO_PRIVATEKEY); return (-1); } i = ssl_rsa_private_decrypt(s->cert, s->s2->tmp.enc, &(p[s->s2->tmp.clear]), &(p[s->s2->tmp.clear]), (s->s2->ssl2_rollback) ? RSA_SSLV23_PADDING : RSA_PKCS1_PADDING); is_export = SSL_C_IS_EXPORT(s->session->cipher); (s->s2->ssl2_rollback) ? RSA_SSLV23_PADDING : RSA_PKCS1_PADDING); is_export = SSL_C_IS_EXPORT(s->session->cipher); if (!ssl_cipher_get_evp(s->session, &c, &md, NULL, NULL, NULL)) { ssl2_return_error(s, SSL2_PE_NO_CIPHER); SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, SSL_R_PROBLEMS_MAPPING_CIPHER_FUNCTIONS); return (0); } else ek = 5; /* bad decrypt */ # if 1 /* * If a bad decrypt, continue with protocol but with a random master * secret (Bleichenbacher attack) */ if ((i < 0) || ((!is_export && (i != EVP_CIPHER_key_length(c))) || (is_export && ((i != ek) || (s->s2->tmp.clear + (unsigned int)i != (unsigned int) EVP_CIPHER_key_length(c)))))) { ERR_clear_error(); if (is_export) i = ek; else i = EVP_CIPHER_key_length(c); if (RAND_pseudo_bytes(p, i) <= 0) return 0; } # else unsigned long len; unsigned char *p; STACK_OF(SSL_CIPHER) *cs; /* a stack of SSL_CIPHERS */ STACK_OF(SSL_CIPHER) *cl; /* the ones we want to use */ STACK_OF(SSL_CIPHER) *prio, *allow; int z; /* * This is a bit of a hack to check for the correct packet type the first * time round. */ if (s->state == SSL2_ST_GET_CLIENT_HELLO_A) { s->first_packet = 1; s->state = SSL2_ST_GET_CLIENT_HELLO_B; } # endif if (is_export) i += s->s2->tmp.clear; if (i > SSL_MAX_MASTER_KEY_LENGTH) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); if (*(p++) != SSL2_MT_CLIENT_HELLO) { if (p[-1] != SSL2_MT_ERROR) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_READ_WRONG_PACKET_TYPE); } else SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_PEER_ERROR); return (-1); } n2s(p, i); if (i < s->version) s->version = i; n2s(p, i); s->s2->tmp.cipher_spec_length = i; n2s(p, i); s->s2->tmp.session_id_length = i; n2s(p, i); s->s2->challenge_length = i; if ((i < SSL2_MIN_CHALLENGE_LENGTH) || (i > SSL2_MAX_CHALLENGE_LENGTH)) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_INVALID_CHALLENGE_LENGTH); return (-1); } s->state = SSL2_ST_GET_CLIENT_HELLO_C; } /* SSL2_ST_GET_CLIENT_HELLO_C */ p = (unsigned char *)s->init_buf->data; len = 9 + (unsigned long)s->s2->tmp.cipher_spec_length + (unsigned long)s->s2->challenge_length + (unsigned long)s->s2->tmp.session_id_length; if (len > SSL2_MAX_RECORD_LENGTH_3_BYTE_HEADER) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_MESSAGE_TOO_LONG); return -1; } n = (int)len - s->init_num; i = ssl2_read(s, (char *)&(p[s->init_num]), n); if (i != n) return (ssl2_part_read(s, SSL_F_GET_CLIENT_HELLO, i)); if (s->msg_callback) { /* CLIENT-HELLO */ s->msg_callback(0, s->version, 0, p, (size_t)len, s, s->msg_callback_arg); } p += 9; /* * get session-id before cipher stuff so we can get out session structure * if it is cached */ /* session-id */ if ((s->s2->tmp.session_id_length != 0) && (s->s2->tmp.session_id_length != SSL2_SSL_SESSION_ID_LENGTH)) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_BAD_SSL_SESSION_ID_LENGTH); return (-1); } if (s->s2->tmp.session_id_length == 0) { if (!ssl_get_new_session(s, 1)) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); return (-1); } } else { i = ssl_get_prev_session(s, &(p[s->s2->tmp.cipher_spec_length]), s->s2->tmp.session_id_length, NULL); if (i == 1) { /* previous session */ s->hit = 1; } else if (i == -1) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); return (-1); } else { if (s->cert == NULL) { ssl2_return_error(s, SSL2_PE_NO_CERTIFICATE); SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_NO_CERTIFICATE_SET); return (-1); } if (!ssl_get_new_session(s, 1)) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); return (-1); } } } if (!s->hit) { cs = ssl_bytes_to_cipher_list(s, p, s->s2->tmp.cipher_spec_length, &s->session->ciphers); if (cs == NULL) goto mem_err; cl = SSL_get_ciphers(s); if (s->options & SSL_OP_CIPHER_SERVER_PREFERENCE) { prio = sk_SSL_CIPHER_dup(cl); if (prio == NULL) goto mem_err; allow = cs; } else { prio = cs; allow = cl; } for (z = 0; z < sk_SSL_CIPHER_num(prio); z++) { if (sk_SSL_CIPHER_find(allow, sk_SSL_CIPHER_value(prio, z)) < 0) { (void)sk_SSL_CIPHER_delete(prio, z); z--; } } if (s->options & SSL_OP_CIPHER_SERVER_PREFERENCE) { sk_SSL_CIPHER_free(s->session->ciphers); s->session->ciphers = prio; } /* * s->session->ciphers should now have a list of ciphers that are on * both the client and server. This list is ordered by the order the * client sent the ciphers or in the order of the server's preference * if SSL_OP_CIPHER_SERVER_PREFERENCE was set. */ } p += s->s2->tmp.cipher_spec_length; /* done cipher selection */ /* session id extracted already */ p += s->s2->tmp.session_id_length; /* challenge */ if (s->s2->challenge_length > sizeof s->s2->challenge) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); SSLerr(SSL_F_GET_CLIENT_HELLO, ERR_R_INTERNAL_ERROR); return -1; } memcpy(s->s2->challenge, p, (unsigned int)s->s2->challenge_length); return (1); mem_err: SSLerr(SSL_F_GET_CLIENT_HELLO, ERR_R_MALLOC_FAILURE); return (0); } Commit Message: CWE ID: CWE-20
static int get_client_master_key(SSL *s) { int is_export, i, n, keya, ek; unsigned long len; unsigned char *p; const SSL_CIPHER *cp; const EVP_CIPHER *c; const EVP_MD *md; p = (unsigned char *)s->init_buf->data; if (s->state == SSL2_ST_GET_CLIENT_MASTER_KEY_A) { i = ssl2_read(s, (char *)&(p[s->init_num]), 10 - s->init_num); if (i < (10 - s->init_num)) return (ssl2_part_read(s, SSL_F_GET_CLIENT_MASTER_KEY, i)); s->init_num = 10; if (*(p++) != SSL2_MT_CLIENT_MASTER_KEY) { if (p[-1] != SSL2_MT_ERROR) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, SSL_R_READ_WRONG_PACKET_TYPE); } else SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, SSL_R_PEER_ERROR); return (-1); } cp = ssl2_get_cipher_by_char(p); if (cp == NULL) { ssl2_return_error(s, SSL2_PE_NO_CIPHER); SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, SSL_R_NO_CIPHER_MATCH); return (-1); } s->session->cipher = cp; p += 3; n2s(p, i); s->s2->tmp.clear = i; n2s(p, i); s->s2->tmp.enc = i; n2s(p, i); if (i > SSL_MAX_KEY_ARG_LENGTH) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, SSL_R_KEY_ARG_TOO_LONG); return -1; } s->session->key_arg_length = i; s->state = SSL2_ST_GET_CLIENT_MASTER_KEY_B; } /* SSL2_ST_GET_CLIENT_MASTER_KEY_B */ p = (unsigned char *)s->init_buf->data; if (s->init_buf->length < SSL2_MAX_RECORD_LENGTH_3_BYTE_HEADER) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, ERR_R_INTERNAL_ERROR); return -1; } keya = s->session->key_arg_length; len = 10 + (unsigned long)s->s2->tmp.clear + (unsigned long)s->s2->tmp.enc + (unsigned long)keya; if (len > SSL2_MAX_RECORD_LENGTH_3_BYTE_HEADER) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, SSL_R_MESSAGE_TOO_LONG); return -1; } n = (int)len - s->init_num; i = ssl2_read(s, (char *)&(p[s->init_num]), n); if (i != n) return (ssl2_part_read(s, SSL_F_GET_CLIENT_MASTER_KEY, i)); if (s->msg_callback) { /* CLIENT-MASTER-KEY */ s->msg_callback(0, s->version, 0, p, (size_t)len, s, s->msg_callback_arg); } p += 10; memcpy(s->session->key_arg, &(p[s->s2->tmp.clear + s->s2->tmp.enc]), (unsigned int)keya); if (s->cert->pkeys[SSL_PKEY_RSA_ENC].privatekey == NULL) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, SSL_R_NO_PRIVATEKEY); return (-1); } is_export = SSL_C_IS_EXPORT(s->session->cipher); (s->s2->ssl2_rollback) ? RSA_SSLV23_PADDING : RSA_PKCS1_PADDING); is_export = SSL_C_IS_EXPORT(s->session->cipher); if (!ssl_cipher_get_evp(s->session, &c, &md, NULL, NULL, NULL)) { ssl2_return_error(s, SSL2_PE_NO_CIPHER); SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, SSL_R_PROBLEMS_MAPPING_CIPHER_FUNCTIONS); return (0); } else ek = 5; /* * The format of the CLIENT-MASTER-KEY message is * 1 byte message type * 3 bytes cipher * 2-byte clear key length (stored in s->s2->tmp.clear) * 2-byte encrypted key length (stored in s->s2->tmp.enc) * 2-byte key args length (IV etc) * clear key * encrypted key * key args * * If the cipher is an export cipher, then the encrypted key bytes * are a fixed portion of the total key (5 or 8 bytes). The size of * this portion is in |ek|. If the cipher is not an export cipher, * then the entire key material is encrypted (i.e., clear key length * must be zero). */ if ((!is_export && s->s2->tmp.clear != 0) || (is_export && s->s2->tmp.clear + ek != EVP_CIPHER_key_length(c))) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); SSLerr(SSL_F_GET_CLIENT_MASTER_KEY,SSL_R_BAD_LENGTH); return -1; } /* * The encrypted blob must decrypt to the encrypted portion of the key. * Decryption can't be expanding, so if we don't have enough encrypted * bytes to fit the key in the buffer, stop now. */ if ((is_export && s->s2->tmp.enc < ek) || (!is_export && s->s2->tmp.enc < EVP_CIPHER_key_length(c))) { ssl2_return_error(s,SSL2_PE_UNDEFINED_ERROR); SSLerr(SSL_F_GET_CLIENT_MASTER_KEY,SSL_R_LENGTH_TOO_SHORT); return -1; } i = ssl_rsa_private_decrypt(s->cert, s->s2->tmp.enc, &(p[s->s2->tmp.clear]), &(p[s->s2->tmp.clear]), (s->s2->ssl2_rollback) ? RSA_SSLV23_PADDING : RSA_PKCS1_PADDING); /* bad decrypt */ # if 1 /* * If a bad decrypt, continue with protocol but with a random master * secret (Bleichenbacher attack) */ if ((i < 0) || ((!is_export && i != EVP_CIPHER_key_length(c)) || (is_export && i != ek))) { ERR_clear_error(); if (is_export) i = ek; else i = EVP_CIPHER_key_length(c); if (RAND_pseudo_bytes(&p[s->s2->tmp.clear], i) <= 0) return 0; } # else unsigned long len; unsigned char *p; STACK_OF(SSL_CIPHER) *cs; /* a stack of SSL_CIPHERS */ STACK_OF(SSL_CIPHER) *cl; /* the ones we want to use */ STACK_OF(SSL_CIPHER) *prio, *allow; int z; /* * This is a bit of a hack to check for the correct packet type the first * time round. */ if (s->state == SSL2_ST_GET_CLIENT_HELLO_A) { s->first_packet = 1; s->state = SSL2_ST_GET_CLIENT_HELLO_B; } # endif if (is_export) i = EVP_CIPHER_key_length(c); if (i > SSL_MAX_MASTER_KEY_LENGTH) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); if (*(p++) != SSL2_MT_CLIENT_HELLO) { if (p[-1] != SSL2_MT_ERROR) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_READ_WRONG_PACKET_TYPE); } else SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_PEER_ERROR); return (-1); } n2s(p, i); if (i < s->version) s->version = i; n2s(p, i); s->s2->tmp.cipher_spec_length = i; n2s(p, i); s->s2->tmp.session_id_length = i; n2s(p, i); s->s2->challenge_length = i; if ((i < SSL2_MIN_CHALLENGE_LENGTH) || (i > SSL2_MAX_CHALLENGE_LENGTH)) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_INVALID_CHALLENGE_LENGTH); return (-1); } s->state = SSL2_ST_GET_CLIENT_HELLO_C; } /* SSL2_ST_GET_CLIENT_HELLO_C */ p = (unsigned char *)s->init_buf->data; len = 9 + (unsigned long)s->s2->tmp.cipher_spec_length + (unsigned long)s->s2->challenge_length + (unsigned long)s->s2->tmp.session_id_length; if (len > SSL2_MAX_RECORD_LENGTH_3_BYTE_HEADER) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_MESSAGE_TOO_LONG); return -1; } n = (int)len - s->init_num; i = ssl2_read(s, (char *)&(p[s->init_num]), n); if (i != n) return (ssl2_part_read(s, SSL_F_GET_CLIENT_HELLO, i)); if (s->msg_callback) { /* CLIENT-HELLO */ s->msg_callback(0, s->version, 0, p, (size_t)len, s, s->msg_callback_arg); } p += 9; /* * get session-id before cipher stuff so we can get out session structure * if it is cached */ /* session-id */ if ((s->s2->tmp.session_id_length != 0) && (s->s2->tmp.session_id_length != SSL2_SSL_SESSION_ID_LENGTH)) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_BAD_SSL_SESSION_ID_LENGTH); return (-1); } if (s->s2->tmp.session_id_length == 0) { if (!ssl_get_new_session(s, 1)) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); return (-1); } } else { i = ssl_get_prev_session(s, &(p[s->s2->tmp.cipher_spec_length]), s->s2->tmp.session_id_length, NULL); if (i == 1) { /* previous session */ s->hit = 1; } else if (i == -1) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); return (-1); } else { if (s->cert == NULL) { ssl2_return_error(s, SSL2_PE_NO_CERTIFICATE); SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_NO_CERTIFICATE_SET); return (-1); } if (!ssl_get_new_session(s, 1)) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); return (-1); } } } if (!s->hit) { cs = ssl_bytes_to_cipher_list(s, p, s->s2->tmp.cipher_spec_length, &s->session->ciphers); if (cs == NULL) goto mem_err; cl = SSL_get_ciphers(s); if (s->options & SSL_OP_CIPHER_SERVER_PREFERENCE) { prio = sk_SSL_CIPHER_dup(cl); if (prio == NULL) goto mem_err; allow = cs; } else { prio = cs; allow = cl; } for (z = 0; z < sk_SSL_CIPHER_num(prio); z++) { if (sk_SSL_CIPHER_find(allow, sk_SSL_CIPHER_value(prio, z)) < 0) { (void)sk_SSL_CIPHER_delete(prio, z); z--; } } if (s->options & SSL_OP_CIPHER_SERVER_PREFERENCE) { sk_SSL_CIPHER_free(s->session->ciphers); s->session->ciphers = prio; } /* * s->session->ciphers should now have a list of ciphers that are on * both the client and server. This list is ordered by the order the * client sent the ciphers or in the order of the server's preference * if SSL_OP_CIPHER_SERVER_PREFERENCE was set. */ } p += s->s2->tmp.cipher_spec_length; /* done cipher selection */ /* session id extracted already */ p += s->s2->tmp.session_id_length; /* challenge */ if (s->s2->challenge_length > sizeof s->s2->challenge) { ssl2_return_error(s, SSL2_PE_UNDEFINED_ERROR); SSLerr(SSL_F_GET_CLIENT_HELLO, ERR_R_INTERNAL_ERROR); return -1; } memcpy(s->s2->challenge, p, (unsigned int)s->s2->challenge_length); return (1); mem_err: SSLerr(SSL_F_GET_CLIENT_HELLO, ERR_R_MALLOC_FAILURE); return (0); }
164,802
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static double abserr(PNG_CONST png_modifier *pm, int in_depth, int out_depth) { /* Absolute error permitted in linear values - affected by the bit depth of * the calculations. */ if (pm->assume_16_bit_calculations || (pm->calculations_use_input_precision ? in_depth : out_depth) == 16) return pm->maxabs16; else return pm->maxabs8; } Commit Message: DO NOT MERGE Update libpng to 1.6.20 BUG:23265085 Change-Id: I85199805636d771f3597b691b63bc0bf46084833 (cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82) CWE ID:
static double abserr(PNG_CONST png_modifier *pm, int in_depth, int out_depth) static double abserr(const png_modifier *pm, int in_depth, int out_depth) { /* Absolute error permitted in linear values - affected by the bit depth of * the calculations. */ if (pm->assume_16_bit_calculations || (pm->calculations_use_input_precision ? in_depth : out_depth) == 16) return pm->maxabs16; else return pm->maxabs8; }
173,603
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool AppCacheBackendImpl::SelectCacheForSharedWorker( int host_id, int64 appcache_id) { AppCacheHost* host = GetHost(host_id); if (!host || host->was_select_cache_called()) return false; host->SelectCacheForSharedWorker(appcache_id); return true; } Commit Message: Fix possible map::end() dereference in AppCacheUpdateJob triggered by a compromised renderer. BUG=551044 Review URL: https://codereview.chromium.org/1418783005 Cr-Commit-Position: refs/heads/master@{#358815} CWE ID:
bool AppCacheBackendImpl::SelectCacheForSharedWorker( int host_id, int64 appcache_id) { AppCacheHost* host = GetHost(host_id); if (!host) return false; return host->SelectCacheForSharedWorker(appcache_id); }
171,737
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, j, number_layers; size=GetPSDSize(psd_info,image); if (size == 0) { /* Skip layers & masks. */ (void) ReadBlobLong(image); count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); status=MagickFalse; if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0)) return(MagickTrue); else { count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); if ((count != 0) && (LocaleNCompare(type,"Lr16",4) == 0)) size=GetPSDSize(psd_info,image); else return(MagickTrue); } } status=MagickTrue; if (size != 0) { layer_info=(LayerInfo *) NULL; number_layers=(short) ReadBlobShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->alpha_trait=BlendPixelTrait; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) ResetMagickMemory(layer_info,0,(size_t) number_layers* sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t x, y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); layer_info[i].page.y=ReadBlobSignedLong(image); layer_info[i].page.x=ReadBlobSignedLong(image); y=ReadBlobSignedLong(image); x=ReadBlobSignedLong(image); layer_info[i].page.width=(size_t) (x-layer_info[i].page.x); layer_info[i].page.height=(size_t) (y-layer_info[i].page.y); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey); ReversePSDString(image,layer_info[i].blendkey,4); layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=ReadBlobSignedLong(image); layer_info[i].mask.page.x=ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) (ReadBlobLong(image)- layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) (ReadBlobLong(image)- layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width, (double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); /* We read it, but don't use it... */ for (j=0; j < (ssize_t) length; j+=8) { size_t blend_source=ReadBlobLong(image); size_t blend_dest=ReadBlobLong(image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " source(%x), dest(%x)",(unsigned int) blend_source,(unsigned int) blend_dest); } } /* Layer name. */ length=(MagickSizeType) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info,exception); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping == MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=0; j < layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,i,(MagickSizeType) number_layers); if (status == MagickFalse) break; } } if (status != MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers > 0) { for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; } layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } else layer_info=DestroyLayerInfo(layer_info,number_layers); } return(status); } Commit Message: Fix improper cast that could cause an overflow as demonstrated in #347. CWE ID: CWE-119
ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, j, number_layers; size=GetPSDSize(psd_info,image); if (size == 0) { /* Skip layers & masks. */ (void) ReadBlobLong(image); count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); status=MagickFalse; if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0)) return(MagickTrue); else { count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); if ((count != 0) && (LocaleNCompare(type,"Lr16",4) == 0)) size=GetPSDSize(psd_info,image); else return(MagickTrue); } } status=MagickTrue; if (size != 0) { layer_info=(LayerInfo *) NULL; number_layers=(short) ReadBlobShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->alpha_trait=BlendPixelTrait; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) ResetMagickMemory(layer_info,0,(size_t) number_layers* sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t x, y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); layer_info[i].page.y=ReadBlobSignedLong(image); layer_info[i].page.x=ReadBlobSignedLong(image); y=ReadBlobSignedLong(image); x=ReadBlobSignedLong(image); layer_info[i].page.width=(size_t) (x-layer_info[i].page.x); layer_info[i].page.height=(size_t) (y-layer_info[i].page.y); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey); ReversePSDString(image,layer_info[i].blendkey,4); layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=ReadBlobSignedLong(image); layer_info[i].mask.page.x=ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) (ReadBlobLong(image)- layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) (ReadBlobLong(image)- layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width, (double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); /* We read it, but don't use it... */ for (j=0; j < (ssize_t) length; j+=8) { size_t blend_source=ReadBlobLong(image); size_t blend_dest=ReadBlobLong(image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " source(%x), dest(%x)",(unsigned int) blend_source,(unsigned int) blend_dest); } } /* Layer name. */ length=(MagickSizeType) (unsigned char) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info,exception); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping == MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=0; j < layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,i,(MagickSizeType) number_layers); if (status == MagickFalse) break; } } if (status != MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers > 0) { for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; } layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } else layer_info=DestroyLayerInfo(layer_info,number_layers); } return(status); }
168,401
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: OMX_ERRORTYPE SoftFlacEncoder::internalSetParameter( OMX_INDEXTYPE index, const OMX_PTR params) { switch (index) { case OMX_IndexParamAudioPcm: { ALOGV("SoftFlacEncoder::internalSetParameter(OMX_IndexParamAudioPcm)"); OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams = (OMX_AUDIO_PARAM_PCMMODETYPE *)params; if (pcmParams->nPortIndex != 0 && pcmParams->nPortIndex != 1) { ALOGE("SoftFlacEncoder::internalSetParameter() Error #1"); return OMX_ErrorUndefined; } if (pcmParams->nChannels < 1 || pcmParams->nChannels > 2) { return OMX_ErrorUndefined; } mNumChannels = pcmParams->nChannels; mSampleRate = pcmParams->nSamplingRate; ALOGV("will encode %d channels at %dHz", mNumChannels, mSampleRate); return configureEncoder(); } case OMX_IndexParamStandardComponentRole: { ALOGV("SoftFlacEncoder::internalSetParameter(OMX_IndexParamStandardComponentRole)"); const OMX_PARAM_COMPONENTROLETYPE *roleParams = (const OMX_PARAM_COMPONENTROLETYPE *)params; if (strncmp((const char *)roleParams->cRole, "audio_encoder.flac", OMX_MAX_STRINGNAME_SIZE - 1)) { ALOGE("SoftFlacEncoder::internalSetParameter(OMX_IndexParamStandardComponentRole)" "error"); return OMX_ErrorUndefined; } return OMX_ErrorNone; } case OMX_IndexParamAudioFlac: { OMX_AUDIO_PARAM_FLACTYPE *flacParams = (OMX_AUDIO_PARAM_FLACTYPE *)params; mCompressionLevel = flacParams->nCompressionLevel; // range clamping done inside encoder return OMX_ErrorNone; } case OMX_IndexParamPortDefinition: { OMX_PARAM_PORTDEFINITIONTYPE *defParams = (OMX_PARAM_PORTDEFINITIONTYPE *)params; if (defParams->nPortIndex == 0) { if (defParams->nBufferSize > kMaxInputBufferSize) { ALOGE("Input buffer size must be at most %d bytes", kMaxInputBufferSize); return OMX_ErrorUnsupportedSetting; } } } default: ALOGV("SoftFlacEncoder::internalSetParameter(default)"); return SimpleSoftOMXComponent::internalSetParameter(index, params); } } Commit Message: DO NOT MERGE Verify OMX buffer sizes prior to access Bug: 27207275 Change-Id: I4412825d1ee233d993af0a67708bea54304ff62d CWE ID: CWE-119
OMX_ERRORTYPE SoftFlacEncoder::internalSetParameter( OMX_INDEXTYPE index, const OMX_PTR params) { switch (index) { case OMX_IndexParamAudioPcm: { ALOGV("SoftFlacEncoder::internalSetParameter(OMX_IndexParamAudioPcm)"); OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams = (OMX_AUDIO_PARAM_PCMMODETYPE *)params; if (!isValidOMXParam(pcmParams)) { return OMX_ErrorBadParameter; } if (pcmParams->nPortIndex != 0 && pcmParams->nPortIndex != 1) { ALOGE("SoftFlacEncoder::internalSetParameter() Error #1"); return OMX_ErrorUndefined; } if (pcmParams->nChannels < 1 || pcmParams->nChannels > 2) { return OMX_ErrorUndefined; } mNumChannels = pcmParams->nChannels; mSampleRate = pcmParams->nSamplingRate; ALOGV("will encode %d channels at %dHz", mNumChannels, mSampleRate); return configureEncoder(); } case OMX_IndexParamStandardComponentRole: { ALOGV("SoftFlacEncoder::internalSetParameter(OMX_IndexParamStandardComponentRole)"); const OMX_PARAM_COMPONENTROLETYPE *roleParams = (const OMX_PARAM_COMPONENTROLETYPE *)params; if (!isValidOMXParam(roleParams)) { return OMX_ErrorBadParameter; } if (strncmp((const char *)roleParams->cRole, "audio_encoder.flac", OMX_MAX_STRINGNAME_SIZE - 1)) { ALOGE("SoftFlacEncoder::internalSetParameter(OMX_IndexParamStandardComponentRole)" "error"); return OMX_ErrorUndefined; } return OMX_ErrorNone; } case OMX_IndexParamAudioFlac: { OMX_AUDIO_PARAM_FLACTYPE *flacParams = (OMX_AUDIO_PARAM_FLACTYPE *)params; if (!isValidOMXParam(flacParams)) { return OMX_ErrorBadParameter; } mCompressionLevel = flacParams->nCompressionLevel; // range clamping done inside encoder return OMX_ErrorNone; } case OMX_IndexParamPortDefinition: { OMX_PARAM_PORTDEFINITIONTYPE *defParams = (OMX_PARAM_PORTDEFINITIONTYPE *)params; if (!isValidOMXParam(defParams)) { return OMX_ErrorBadParameter; } if (defParams->nPortIndex == 0) { if (defParams->nBufferSize > kMaxInputBufferSize) { ALOGE("Input buffer size must be at most %d bytes", kMaxInputBufferSize); return OMX_ErrorUnsupportedSetting; } } } default: ALOGV("SoftFlacEncoder::internalSetParameter(default)"); return SimpleSoftOMXComponent::internalSetParameter(index, params); } }
174,204
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static UINT drdynvc_process_capability_request(drdynvcPlugin* drdynvc, int Sp, int cbChId, wStream* s) { UINT status; if (!drdynvc) return CHANNEL_RC_BAD_INIT_HANDLE; WLog_Print(drdynvc->log, WLOG_TRACE, "capability_request Sp=%d cbChId=%d", Sp, cbChId); Stream_Seek(s, 1); /* pad */ Stream_Read_UINT16(s, drdynvc->version); /* RDP8 servers offer version 3, though Microsoft forgot to document it * in their early documents. It behaves the same as version 2. */ if ((drdynvc->version == 2) || (drdynvc->version == 3)) { Stream_Read_UINT16(s, drdynvc->PriorityCharge0); Stream_Read_UINT16(s, drdynvc->PriorityCharge1); Stream_Read_UINT16(s, drdynvc->PriorityCharge2); Stream_Read_UINT16(s, drdynvc->PriorityCharge3); } status = drdynvc_send_capability_response(drdynvc); drdynvc->state = DRDYNVC_STATE_READY; return status; } Commit Message: Fix for #4866: Added additional length checks CWE ID:
static UINT drdynvc_process_capability_request(drdynvcPlugin* drdynvc, int Sp, int cbChId, wStream* s) { UINT status; if (!drdynvc) return CHANNEL_RC_BAD_INIT_HANDLE; if (Stream_GetRemainingLength(s) < 3) return ERROR_INVALID_DATA; WLog_Print(drdynvc->log, WLOG_TRACE, "capability_request Sp=%d cbChId=%d", Sp, cbChId); Stream_Seek(s, 1); /* pad */ Stream_Read_UINT16(s, drdynvc->version); /* RDP8 servers offer version 3, though Microsoft forgot to document it * in their early documents. It behaves the same as version 2. */ if ((drdynvc->version == 2) || (drdynvc->version == 3)) { if (Stream_GetRemainingLength(s) < 8) return ERROR_INVALID_DATA; Stream_Read_UINT16(s, drdynvc->PriorityCharge0); Stream_Read_UINT16(s, drdynvc->PriorityCharge1); Stream_Read_UINT16(s, drdynvc->PriorityCharge2); Stream_Read_UINT16(s, drdynvc->PriorityCharge3); } status = drdynvc_send_capability_response(drdynvc); drdynvc->state = DRDYNVC_STATE_READY; return status; }
168,934
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: ssize_t nbd_receive_reply(QIOChannel *ioc, NBDReply *reply) { uint8_t buf[NBD_REPLY_SIZE]; uint32_t magic; ssize_t ret; ret = read_sync(ioc, buf, sizeof(buf)); if (ret < 0) { return ret; } if (ret != sizeof(buf)) { LOG("read failed"); return -EINVAL; } /* Reply [ 0 .. 3] magic (NBD_REPLY_MAGIC) [ 4 .. 7] error (0 == no error) [ 7 .. 15] handle */ magic = ldl_be_p(buf); reply->error = ldl_be_p(buf + 4); reply->handle = ldq_be_p(buf + 8); reply->error = nbd_errno_to_system_errno(reply->error); if (reply->error == ESHUTDOWN) { /* This works even on mingw which lacks a native ESHUTDOWN */ LOG("server shutting down"); return -EINVAL; } TRACE("Got reply: { magic = 0x%" PRIx32 ", .error = % " PRId32 ", handle = %" PRIu64" }", magic, reply->error, reply->handle); if (magic != NBD_REPLY_MAGIC) { LOG("invalid magic (got 0x%" PRIx32 ")", magic); return -EINVAL; } return 0; } Commit Message: CWE ID: CWE-20
ssize_t nbd_receive_reply(QIOChannel *ioc, NBDReply *reply) { uint8_t buf[NBD_REPLY_SIZE]; uint32_t magic; ssize_t ret; ret = read_sync(ioc, buf, sizeof(buf)); if (ret <= 0) { return ret; } if (ret != sizeof(buf)) { LOG("read failed"); return -EINVAL; } /* Reply [ 0 .. 3] magic (NBD_REPLY_MAGIC) [ 4 .. 7] error (0 == no error) [ 7 .. 15] handle */ magic = ldl_be_p(buf); reply->error = ldl_be_p(buf + 4); reply->handle = ldq_be_p(buf + 8); reply->error = nbd_errno_to_system_errno(reply->error); if (reply->error == ESHUTDOWN) { /* This works even on mingw which lacks a native ESHUTDOWN */ LOG("server shutting down"); return -EINVAL; } TRACE("Got reply: { magic = 0x%" PRIx32 ", .error = % " PRId32 ", handle = %" PRIu64" }", magic, reply->error, reply->handle); if (magic != NBD_REPLY_MAGIC) { LOG("invalid magic (got 0x%" PRIx32 ")", magic); return -EINVAL; } return 0; }
165,450
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int isofs_read_inode(struct inode *inode) { struct super_block *sb = inode->i_sb; struct isofs_sb_info *sbi = ISOFS_SB(sb); unsigned long bufsize = ISOFS_BUFFER_SIZE(inode); unsigned long block; int high_sierra = sbi->s_high_sierra; struct buffer_head *bh = NULL; struct iso_directory_record *de; struct iso_directory_record *tmpde = NULL; unsigned int de_len; unsigned long offset; struct iso_inode_info *ei = ISOFS_I(inode); int ret = -EIO; block = ei->i_iget5_block; bh = sb_bread(inode->i_sb, block); if (!bh) goto out_badread; offset = ei->i_iget5_offset; de = (struct iso_directory_record *) (bh->b_data + offset); de_len = *(unsigned char *) de; if (offset + de_len > bufsize) { int frag1 = bufsize - offset; tmpde = kmalloc(de_len, GFP_KERNEL); if (tmpde == NULL) { printk(KERN_INFO "%s: out of memory\n", __func__); ret = -ENOMEM; goto fail; } memcpy(tmpde, bh->b_data + offset, frag1); brelse(bh); bh = sb_bread(inode->i_sb, ++block); if (!bh) goto out_badread; memcpy((char *)tmpde+frag1, bh->b_data, de_len - frag1); de = tmpde; } inode->i_ino = isofs_get_ino(ei->i_iget5_block, ei->i_iget5_offset, ISOFS_BUFFER_BITS(inode)); /* Assume it is a normal-format file unless told otherwise */ ei->i_file_format = isofs_file_normal; if (de->flags[-high_sierra] & 2) { if (sbi->s_dmode != ISOFS_INVALID_MODE) inode->i_mode = S_IFDIR | sbi->s_dmode; else inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; set_nlink(inode, 1); /* * Set to 1. We know there are 2, but * the find utility tries to optimize * if it is 2, and it screws up. It is * easier to give 1 which tells find to * do it the hard way. */ } else { if (sbi->s_fmode != ISOFS_INVALID_MODE) { inode->i_mode = S_IFREG | sbi->s_fmode; } else { /* * Set default permissions: r-x for all. The disc * could be shared with DOS machines so virtually * anything could be a valid executable. */ inode->i_mode = S_IFREG | S_IRUGO | S_IXUGO; } set_nlink(inode, 1); } inode->i_uid = sbi->s_uid; inode->i_gid = sbi->s_gid; inode->i_blocks = 0; ei->i_format_parm[0] = 0; ei->i_format_parm[1] = 0; ei->i_format_parm[2] = 0; ei->i_section_size = isonum_733(de->size); if (de->flags[-high_sierra] & 0x80) { ret = isofs_read_level3_size(inode); if (ret < 0) goto fail; ret = -EIO; } else { ei->i_next_section_block = 0; ei->i_next_section_offset = 0; inode->i_size = isonum_733(de->size); } /* * Some dipshit decided to store some other bit of information * in the high byte of the file length. Truncate size in case * this CDROM was mounted with the cruft option. */ if (sbi->s_cruft) inode->i_size &= 0x00ffffff; if (de->interleave[0]) { printk(KERN_DEBUG "ISOFS: Interleaved files not (yet) supported.\n"); inode->i_size = 0; } /* I have no idea what file_unit_size is used for, so we will flag it for now */ if (de->file_unit_size[0] != 0) { printk(KERN_DEBUG "ISOFS: File unit size != 0 for ISO file (%ld).\n", inode->i_ino); } /* I have no idea what other flag bits are used for, so we will flag it for now */ #ifdef DEBUG if((de->flags[-high_sierra] & ~2)!= 0){ printk(KERN_DEBUG "ISOFS: Unusual flag settings for ISO file " "(%ld %x).\n", inode->i_ino, de->flags[-high_sierra]); } #endif inode->i_mtime.tv_sec = inode->i_atime.tv_sec = inode->i_ctime.tv_sec = iso_date(de->date, high_sierra); inode->i_mtime.tv_nsec = inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = 0; ei->i_first_extent = (isonum_733(de->extent) + isonum_711(de->ext_attr_length)); /* Set the number of blocks for stat() - should be done before RR */ inode->i_blocks = (inode->i_size + 511) >> 9; /* * Now test for possible Rock Ridge extensions which will override * some of these numbers in the inode structure. */ if (!high_sierra) { parse_rock_ridge_inode(de, inode); /* if we want uid/gid set, override the rock ridge setting */ if (sbi->s_uid_set) inode->i_uid = sbi->s_uid; if (sbi->s_gid_set) inode->i_gid = sbi->s_gid; } /* Now set final access rights if overriding rock ridge setting */ if (S_ISDIR(inode->i_mode) && sbi->s_overriderockperm && sbi->s_dmode != ISOFS_INVALID_MODE) inode->i_mode = S_IFDIR | sbi->s_dmode; if (S_ISREG(inode->i_mode) && sbi->s_overriderockperm && sbi->s_fmode != ISOFS_INVALID_MODE) inode->i_mode = S_IFREG | sbi->s_fmode; /* Install the inode operations vector */ if (S_ISREG(inode->i_mode)) { inode->i_fop = &generic_ro_fops; switch (ei->i_file_format) { #ifdef CONFIG_ZISOFS case isofs_file_compressed: inode->i_data.a_ops = &zisofs_aops; break; #endif default: inode->i_data.a_ops = &isofs_aops; break; } } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &isofs_dir_inode_operations; inode->i_fop = &isofs_dir_operations; } else if (S_ISLNK(inode->i_mode)) { inode->i_op = &page_symlink_inode_operations; inode->i_data.a_ops = &isofs_symlink_aops; } else /* XXX - parse_rock_ridge_inode() had already set i_rdev. */ init_special_inode(inode, inode->i_mode, inode->i_rdev); ret = 0; out: kfree(tmpde); if (bh) brelse(bh); return ret; out_badread: printk(KERN_WARNING "ISOFS: unable to read i-node block\n"); fail: goto out; } Commit Message: isofs: Fix unbounded recursion when processing relocated directories We did not check relocated directory in any way when processing Rock Ridge 'CL' tag. Thus a corrupted isofs image can possibly have a CL entry pointing to another CL entry leading to possibly unbounded recursion in kernel code and thus stack overflow or deadlocks (if there is a loop created from CL entries). Fix the problem by not allowing CL entry to point to a directory entry with CL entry (such use makes no good sense anyway) and by checking whether CL entry doesn't point to itself. CC: [email protected] Reported-by: Chris Evans <[email protected]> Signed-off-by: Jan Kara <[email protected]> CWE ID: CWE-20
static int isofs_read_inode(struct inode *inode) static int isofs_read_inode(struct inode *inode, int relocated) { struct super_block *sb = inode->i_sb; struct isofs_sb_info *sbi = ISOFS_SB(sb); unsigned long bufsize = ISOFS_BUFFER_SIZE(inode); unsigned long block; int high_sierra = sbi->s_high_sierra; struct buffer_head *bh = NULL; struct iso_directory_record *de; struct iso_directory_record *tmpde = NULL; unsigned int de_len; unsigned long offset; struct iso_inode_info *ei = ISOFS_I(inode); int ret = -EIO; block = ei->i_iget5_block; bh = sb_bread(inode->i_sb, block); if (!bh) goto out_badread; offset = ei->i_iget5_offset; de = (struct iso_directory_record *) (bh->b_data + offset); de_len = *(unsigned char *) de; if (offset + de_len > bufsize) { int frag1 = bufsize - offset; tmpde = kmalloc(de_len, GFP_KERNEL); if (tmpde == NULL) { printk(KERN_INFO "%s: out of memory\n", __func__); ret = -ENOMEM; goto fail; } memcpy(tmpde, bh->b_data + offset, frag1); brelse(bh); bh = sb_bread(inode->i_sb, ++block); if (!bh) goto out_badread; memcpy((char *)tmpde+frag1, bh->b_data, de_len - frag1); de = tmpde; } inode->i_ino = isofs_get_ino(ei->i_iget5_block, ei->i_iget5_offset, ISOFS_BUFFER_BITS(inode)); /* Assume it is a normal-format file unless told otherwise */ ei->i_file_format = isofs_file_normal; if (de->flags[-high_sierra] & 2) { if (sbi->s_dmode != ISOFS_INVALID_MODE) inode->i_mode = S_IFDIR | sbi->s_dmode; else inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; set_nlink(inode, 1); /* * Set to 1. We know there are 2, but * the find utility tries to optimize * if it is 2, and it screws up. It is * easier to give 1 which tells find to * do it the hard way. */ } else { if (sbi->s_fmode != ISOFS_INVALID_MODE) { inode->i_mode = S_IFREG | sbi->s_fmode; } else { /* * Set default permissions: r-x for all. The disc * could be shared with DOS machines so virtually * anything could be a valid executable. */ inode->i_mode = S_IFREG | S_IRUGO | S_IXUGO; } set_nlink(inode, 1); } inode->i_uid = sbi->s_uid; inode->i_gid = sbi->s_gid; inode->i_blocks = 0; ei->i_format_parm[0] = 0; ei->i_format_parm[1] = 0; ei->i_format_parm[2] = 0; ei->i_section_size = isonum_733(de->size); if (de->flags[-high_sierra] & 0x80) { ret = isofs_read_level3_size(inode); if (ret < 0) goto fail; ret = -EIO; } else { ei->i_next_section_block = 0; ei->i_next_section_offset = 0; inode->i_size = isonum_733(de->size); } /* * Some dipshit decided to store some other bit of information * in the high byte of the file length. Truncate size in case * this CDROM was mounted with the cruft option. */ if (sbi->s_cruft) inode->i_size &= 0x00ffffff; if (de->interleave[0]) { printk(KERN_DEBUG "ISOFS: Interleaved files not (yet) supported.\n"); inode->i_size = 0; } /* I have no idea what file_unit_size is used for, so we will flag it for now */ if (de->file_unit_size[0] != 0) { printk(KERN_DEBUG "ISOFS: File unit size != 0 for ISO file (%ld).\n", inode->i_ino); } /* I have no idea what other flag bits are used for, so we will flag it for now */ #ifdef DEBUG if((de->flags[-high_sierra] & ~2)!= 0){ printk(KERN_DEBUG "ISOFS: Unusual flag settings for ISO file " "(%ld %x).\n", inode->i_ino, de->flags[-high_sierra]); } #endif inode->i_mtime.tv_sec = inode->i_atime.tv_sec = inode->i_ctime.tv_sec = iso_date(de->date, high_sierra); inode->i_mtime.tv_nsec = inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = 0; ei->i_first_extent = (isonum_733(de->extent) + isonum_711(de->ext_attr_length)); /* Set the number of blocks for stat() - should be done before RR */ inode->i_blocks = (inode->i_size + 511) >> 9; /* * Now test for possible Rock Ridge extensions which will override * some of these numbers in the inode structure. */ if (!high_sierra) { parse_rock_ridge_inode(de, inode, relocated); /* if we want uid/gid set, override the rock ridge setting */ if (sbi->s_uid_set) inode->i_uid = sbi->s_uid; if (sbi->s_gid_set) inode->i_gid = sbi->s_gid; } /* Now set final access rights if overriding rock ridge setting */ if (S_ISDIR(inode->i_mode) && sbi->s_overriderockperm && sbi->s_dmode != ISOFS_INVALID_MODE) inode->i_mode = S_IFDIR | sbi->s_dmode; if (S_ISREG(inode->i_mode) && sbi->s_overriderockperm && sbi->s_fmode != ISOFS_INVALID_MODE) inode->i_mode = S_IFREG | sbi->s_fmode; /* Install the inode operations vector */ if (S_ISREG(inode->i_mode)) { inode->i_fop = &generic_ro_fops; switch (ei->i_file_format) { #ifdef CONFIG_ZISOFS case isofs_file_compressed: inode->i_data.a_ops = &zisofs_aops; break; #endif default: inode->i_data.a_ops = &isofs_aops; break; } } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &isofs_dir_inode_operations; inode->i_fop = &isofs_dir_operations; } else if (S_ISLNK(inode->i_mode)) { inode->i_op = &page_symlink_inode_operations; inode->i_data.a_ops = &isofs_symlink_aops; } else /* XXX - parse_rock_ridge_inode() had already set i_rdev. */ init_special_inode(inode, inode->i_mode, inode->i_rdev); ret = 0; out: kfree(tmpde); if (bh) brelse(bh); return ret; out_badread: printk(KERN_WARNING "ISOFS: unable to read i-node block\n"); fail: goto out; }
166,269
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void HTML_put_string(HTStructured * me, const char *s) { #ifdef USE_PRETTYSRC char *translated_string = NULL; #endif if (s == NULL || (LYMapsOnly && me->sp[0].tag_number != HTML_OBJECT)) return; #ifdef USE_PRETTYSRC if (psrc_convert_string) { StrAllocCopy(translated_string, s); TRANSLATE_AND_UNESCAPE_ENTITIES(&translated_string, TRUE, FALSE); s = (const char *) translated_string; } #endif switch (me->sp[0].tag_number) { case HTML_COMMENT: break; /* Do Nothing */ case HTML_TITLE: HTChunkPuts(&me->title, s); break; case HTML_STYLE: HTChunkPuts(&me->style_block, s); break; case HTML_SCRIPT: HTChunkPuts(&me->script, s); break; case HTML_PRE: /* Formatted text */ case HTML_LISTING: /* Literal text */ case HTML_XMP: case HTML_PLAINTEXT: /* * We guarantee that the style is up-to-date in begin_litteral */ HText_appendText(me->text, s); break; case HTML_OBJECT: HTChunkPuts(&me->object, s); break; case HTML_TEXTAREA: HTChunkPuts(&me->textarea, s); break; case HTML_SELECT: case HTML_OPTION: HTChunkPuts(&me->option, s); break; case HTML_MATH: HTChunkPuts(&me->math, s); break; default: /* Free format text? */ if (!me->sp->style->freeFormat) { /* * If we are within a preformatted text style not caught by the * cases above (HTML_PRE or similar may not be the last element * pushed on the style stack). - kw */ #ifdef USE_PRETTYSRC if (psrc_view) { /* * We do this so that a raw '\r' in the string will not be * interpreted as an internal request to break a line - passing * '\r' to HText_appendText is treated by it as a request to * insert a blank line - VH */ for (; *s; ++s) HTML_put_character(me, *s); } else #endif HText_appendText(me->text, s); break; } else { const char *p = s; char c; if (me->style_change) { for (; *p && ((*p == '\n') || (*p == '\r') || (*p == ' ') || (*p == '\t')); p++) ; /* Ignore leaders */ if (!*p) break; UPDATE_STYLE; } for (; *p; p++) { if (*p == 13 && p[1] != 10) { /* * Treat any '\r' which is not followed by '\n' as '\n', to * account for macintosh lineend in ALT attributes etc. - * kw */ c = '\n'; } else { c = *p; } if (me->style_change) { if ((c == '\n') || (c == ' ') || (c == '\t')) continue; /* Ignore it */ UPDATE_STYLE; } if (c == '\n') { if (!FIX_JAPANESE_SPACES) { if (me->in_word) { if (HText_getLastChar(me->text) != ' ') HText_appendCharacter(me->text, ' '); me->in_word = NO; } } } else if (c == ' ' || c == '\t') { if (HText_getLastChar(me->text) != ' ') HText_appendCharacter(me->text, ' '); } else if (c == '\r') { /* ignore */ } else { HText_appendCharacter(me->text, c); me->in_word = YES; } /* set the Last Character */ if (c == '\n' || c == '\t') { /* set it to a generic separator */ HText_setLastChar(me->text, ' '); } else if (c == '\r' && HText_getLastChar(me->text) == ' ') { /* * \r's are ignored. In order to keep collapsing spaces * correctly, we must default back to the previous * separator, if there was one. So we set LastChar to a * generic separator. */ HText_setLastChar(me->text, ' '); } else { HText_setLastChar(me->text, c); } } /* for */ } } /* end switch */ #ifdef USE_PRETTYSRC if (psrc_convert_string) { psrc_convert_string = FALSE; FREE(translated_string); } #endif } Commit Message: snapshot of project "lynx", label v2-8-9dev_15b CWE ID: CWE-416
void HTML_put_string(HTStructured * me, const char *s) { HTChunk *target = NULL; #ifdef USE_PRETTYSRC char *translated_string = NULL; #endif if (s == NULL || (LYMapsOnly && me->sp[0].tag_number != HTML_OBJECT)) return; #ifdef USE_PRETTYSRC if (psrc_convert_string) { StrAllocCopy(translated_string, s); TRANSLATE_AND_UNESCAPE_ENTITIES(&translated_string, TRUE, FALSE); s = (const char *) translated_string; } #endif switch (me->sp[0].tag_number) { case HTML_COMMENT: break; /* Do Nothing */ case HTML_TITLE: target = &me->title; break; case HTML_STYLE: target = &me->style_block; break; case HTML_SCRIPT: target = &me->script; break; case HTML_PRE: /* Formatted text */ case HTML_LISTING: /* Literal text */ case HTML_XMP: case HTML_PLAINTEXT: /* * We guarantee that the style is up-to-date in begin_litteral */ HText_appendText(me->text, s); break; case HTML_OBJECT: target = &me->object; break; case HTML_TEXTAREA: target = &me->textarea; break; case HTML_SELECT: case HTML_OPTION: target = &me->option; break; case HTML_MATH: target = &me->math; break; default: /* Free format text? */ if (!me->sp->style->freeFormat) { /* * If we are within a preformatted text style not caught by the * cases above (HTML_PRE or similar may not be the last element * pushed on the style stack). - kw */ #ifdef USE_PRETTYSRC if (psrc_view) { /* * We do this so that a raw '\r' in the string will not be * interpreted as an internal request to break a line - passing * '\r' to HText_appendText is treated by it as a request to * insert a blank line - VH */ for (; *s; ++s) HTML_put_character(me, *s); } else #endif HText_appendText(me->text, s); break; } else { const char *p = s; char c; if (me->style_change) { for (; *p && ((*p == '\n') || (*p == '\r') || (*p == ' ') || (*p == '\t')); p++) ; /* Ignore leaders */ if (!*p) break; UPDATE_STYLE; } for (; *p; p++) { if (*p == 13 && p[1] != 10) { /* * Treat any '\r' which is not followed by '\n' as '\n', to * account for macintosh lineend in ALT attributes etc. - * kw */ c = '\n'; } else { c = *p; } if (me->style_change) { if ((c == '\n') || (c == ' ') || (c == '\t')) continue; /* Ignore it */ UPDATE_STYLE; } if (c == '\n') { if (!FIX_JAPANESE_SPACES) { if (me->in_word) { if (HText_getLastChar(me->text) != ' ') HText_appendCharacter(me->text, ' '); me->in_word = NO; } } } else if (c == ' ' || c == '\t') { if (HText_getLastChar(me->text) != ' ') HText_appendCharacter(me->text, ' '); } else if (c == '\r') { /* ignore */ } else { HText_appendCharacter(me->text, c); me->in_word = YES; } /* set the Last Character */ if (c == '\n' || c == '\t') { /* set it to a generic separator */ HText_setLastChar(me->text, ' '); } else if (c == '\r' && HText_getLastChar(me->text) == ' ') { /* * \r's are ignored. In order to keep collapsing spaces * correctly, we must default back to the previous * separator, if there was one. So we set LastChar to a * generic separator. */ HText_setLastChar(me->text, ' '); } else { HText_setLastChar(me->text, c); } } /* for */ } } /* end switch */ if (target != NULL) { if (target->data == s) { CTRACE((tfp, "BUG: appending chunk to itself: `%.*s'\n", target->size, target->data)); } else { HTChunkPuts(target, s); } } #ifdef USE_PRETTYSRC if (psrc_convert_string) { psrc_convert_string = FALSE; FREE(translated_string); } #endif }
167,629
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void *hashtable_get(hashtable_t *hashtable, const char *key) { pair_t *pair; size_t hash; bucket_t *bucket; hash = hash_str(key); bucket = &hashtable->buckets[hash % num_buckets(hashtable)]; pair = hashtable_find_pair(hashtable, bucket, key, hash); if(!pair) return NULL; return pair->value; } Commit Message: CVE-2013-6401: Change hash function, randomize hashes Thanks to Florian Weimer and Eric Sesterhenn for reporting, reviewing and testing. CWE ID: CWE-310
void *hashtable_get(hashtable_t *hashtable, const char *key) { pair_t *pair; size_t hash; bucket_t *bucket; hash = hash_str(key); bucket = &hashtable->buckets[hash & hashmask(hashtable->order)]; pair = hashtable_find_pair(hashtable, bucket, key, hash); if(!pair) return NULL; return pair->value; }
166,530
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: xmlParseMisc(xmlParserCtxtPtr ctxt) { while (((RAW == '<') && (NXT(1) == '?')) || (CMP4(CUR_PTR, '<', '!', '-', '-')) || IS_BLANK_CH(CUR)) { if ((RAW == '<') && (NXT(1) == '?')) { xmlParsePI(ctxt); } else if (IS_BLANK_CH(CUR)) { NEXT; } else xmlParseComment(ctxt); } } Commit Message: libxml: XML_PARSER_EOF checks from upstream BUG=229019 TBR=cpu Review URL: https://chromiumcodereview.appspot.com/14053009 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@196804 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-119
xmlParseMisc(xmlParserCtxtPtr ctxt) { while ((ctxt->instate != XML_PARSER_EOF) && (((RAW == '<') && (NXT(1) == '?')) || (CMP4(CUR_PTR, '<', '!', '-', '-')) || IS_BLANK_CH(CUR))) { if ((RAW == '<') && (NXT(1) == '?')) { xmlParsePI(ctxt); } else if (IS_BLANK_CH(CUR)) { NEXT; } else xmlParseComment(ctxt); } }
171,294
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void handle_pxe_menu(cmd_tbl_t *cmdtp, struct pxe_menu *cfg) { void *choice; struct menu *m; int err; #ifdef CONFIG_CMD_BMP /* display BMP if available */ if (cfg->bmp) { if (get_relfile(cmdtp, cfg->bmp, image_load_addr)) { run_command("cls", 0); bmp_display(image_load_addr, BMP_ALIGN_CENTER, BMP_ALIGN_CENTER); } else { printf("Skipping background bmp %s for failure\n", cfg->bmp); } } #endif m = pxe_menu_to_menu(cfg); if (!m) return; err = menu_get_choice(m, &choice); menu_destroy(m); /* * err == 1 means we got a choice back from menu_get_choice. * * err == -ENOENT if the menu was setup to select the default but no * default was set. in that case, we should continue trying to boot * labels that haven't been attempted yet. * * otherwise, the user interrupted or there was some other error and * we give up. */ if (err == 1) { err = label_boot(cmdtp, choice); if (!err) return; } else if (err != -ENOENT) { return; } boot_unattempted_labels(cmdtp, cfg); } Commit Message: Merge branch '2020-01-22-master-imports' - Re-add U8500 platform support - Add bcm968360bg support - Assorted Keymile fixes - Other assorted bugfixes CWE ID: CWE-787
void handle_pxe_menu(cmd_tbl_t *cmdtp, struct pxe_menu *cfg) { void *choice; struct menu *m; int err; #ifdef CONFIG_CMD_BMP /* display BMP if available */ if (cfg->bmp) { if (get_relfile(cmdtp, cfg->bmp, image_load_addr)) { if (CONFIG_IS_ENABLED(CMD_CLS)) run_command("cls", 0); bmp_display(image_load_addr, BMP_ALIGN_CENTER, BMP_ALIGN_CENTER); } else { printf("Skipping background bmp %s for failure\n", cfg->bmp); } } #endif m = pxe_menu_to_menu(cfg); if (!m) return; err = menu_get_choice(m, &choice); menu_destroy(m); /* * err == 1 means we got a choice back from menu_get_choice. * * err == -ENOENT if the menu was setup to select the default but no * default was set. in that case, we should continue trying to boot * labels that haven't been attempted yet. * * otherwise, the user interrupted or there was some other error and * we give up. */ if (err == 1) { err = label_boot(cmdtp, choice); if (!err) return; } else if (err != -ENOENT) { return; } boot_unattempted_labels(cmdtp, cfg); }
169,637
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int32 CommandBufferProxyImpl::RegisterTransferBuffer( base::SharedMemory* shared_memory, size_t size, int32 id_request) { if (last_state_.error != gpu::error::kNoError) return -1; int32 id; if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer( route_id_, shared_memory->handle(), // Returns FileDescriptor with auto_close off. size, id_request, &id))) { return -1; } return id; } Commit Message: Convert plugin and GPU process to brokered handle duplication. BUG=119250 Review URL: https://chromiumcodereview.appspot.com/9958034 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@132303 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID:
int32 CommandBufferProxyImpl::RegisterTransferBuffer( base::SharedMemory* shared_memory, size_t size, int32 id_request) { if (last_state_.error != gpu::error::kNoError) return -1; // Returns FileDescriptor with auto_close off. base::SharedMemoryHandle handle = shared_memory->handle(); #if defined(OS_WIN) // Windows needs to explicitly duplicate the handle out to another process. if (!sandbox::BrokerDuplicateHandle(handle, channel_->gpu_pid(), &handle, FILE_MAP_WRITE, 0)) { return -1; } #endif int32 id; if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer( route_id_, handle, size, id_request, &id))) { return -1; } return id; }
170,927
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static v8::Handle<v8::Value> postMessageCallback(const v8::Arguments& args) { INC_STATS("DOM.TestActiveDOMObject.postMessage"); if (args.Length() < 1) return V8Proxy::throwNotEnoughArgumentsError(); TestActiveDOMObject* imp = V8TestActiveDOMObject::toNative(args.Holder()); STRING_TO_V8PARAMETER_EXCEPTION_BLOCK(V8Parameter<>, message, MAYBE_MISSING_PARAMETER(args, 0, DefaultIsUndefined)); imp->postMessage(message); return v8::Handle<v8::Value>(); } Commit Message: [V8] Pass Isolate to throwNotEnoughArgumentsError() https://bugs.webkit.org/show_bug.cgi?id=86983 Reviewed by Adam Barth. The objective is to pass Isolate around in V8 bindings. This patch passes Isolate to throwNotEnoughArgumentsError(). No tests. No change in behavior. * bindings/scripts/CodeGeneratorV8.pm: (GenerateArgumentsCountCheck): (GenerateEventConstructorCallback): * bindings/scripts/test/V8/V8Float64Array.cpp: (WebCore::Float64ArrayV8Internal::fooCallback): * bindings/scripts/test/V8/V8TestActiveDOMObject.cpp: (WebCore::TestActiveDOMObjectV8Internal::excitingFunctionCallback): (WebCore::TestActiveDOMObjectV8Internal::postMessageCallback): * bindings/scripts/test/V8/V8TestCustomNamedGetter.cpp: (WebCore::TestCustomNamedGetterV8Internal::anotherFunctionCallback): * bindings/scripts/test/V8/V8TestEventConstructor.cpp: (WebCore::V8TestEventConstructor::constructorCallback): * bindings/scripts/test/V8/V8TestEventTarget.cpp: (WebCore::TestEventTargetV8Internal::itemCallback): (WebCore::TestEventTargetV8Internal::dispatchEventCallback): * bindings/scripts/test/V8/V8TestInterface.cpp: (WebCore::TestInterfaceV8Internal::supplementalMethod2Callback): (WebCore::V8TestInterface::constructorCallback): * bindings/scripts/test/V8/V8TestMediaQueryListListener.cpp: (WebCore::TestMediaQueryListListenerV8Internal::methodCallback): * bindings/scripts/test/V8/V8TestNamedConstructor.cpp: (WebCore::V8TestNamedConstructorConstructorCallback): * bindings/scripts/test/V8/V8TestObj.cpp: (WebCore::TestObjV8Internal::voidMethodWithArgsCallback): (WebCore::TestObjV8Internal::intMethodWithArgsCallback): (WebCore::TestObjV8Internal::objMethodWithArgsCallback): (WebCore::TestObjV8Internal::methodWithSequenceArgCallback): (WebCore::TestObjV8Internal::methodReturningSequenceCallback): (WebCore::TestObjV8Internal::methodThatRequiresAllArgsAndThrowsCallback): (WebCore::TestObjV8Internal::serializedValueCallback): (WebCore::TestObjV8Internal::idbKeyCallback): (WebCore::TestObjV8Internal::optionsObjectCallback): (WebCore::TestObjV8Internal::methodWithNonOptionalArgAndOptionalArgCallback): (WebCore::TestObjV8Internal::methodWithNonOptionalArgAndTwoOptionalArgsCallback): (WebCore::TestObjV8Internal::methodWithCallbackArgCallback): (WebCore::TestObjV8Internal::methodWithNonCallbackArgAndCallbackArgCallback): (WebCore::TestObjV8Internal::overloadedMethod1Callback): (WebCore::TestObjV8Internal::overloadedMethod2Callback): (WebCore::TestObjV8Internal::overloadedMethod3Callback): (WebCore::TestObjV8Internal::overloadedMethod4Callback): (WebCore::TestObjV8Internal::overloadedMethod5Callback): (WebCore::TestObjV8Internal::overloadedMethod6Callback): (WebCore::TestObjV8Internal::overloadedMethod7Callback): (WebCore::TestObjV8Internal::overloadedMethod11Callback): (WebCore::TestObjV8Internal::overloadedMethod12Callback): (WebCore::TestObjV8Internal::enabledAtRuntimeMethod1Callback): (WebCore::TestObjV8Internal::enabledAtRuntimeMethod2Callback): (WebCore::TestObjV8Internal::convert1Callback): (WebCore::TestObjV8Internal::convert2Callback): (WebCore::TestObjV8Internal::convert3Callback): (WebCore::TestObjV8Internal::convert4Callback): (WebCore::TestObjV8Internal::convert5Callback): (WebCore::TestObjV8Internal::strictFunctionCallback): (WebCore::V8TestObj::constructorCallback): * bindings/scripts/test/V8/V8TestSerializedScriptValueInterface.cpp: (WebCore::TestSerializedScriptValueInterfaceV8Internal::acceptTransferListCallback): (WebCore::V8TestSerializedScriptValueInterface::constructorCallback): * bindings/v8/ScriptController.cpp: (WebCore::setValueAndClosePopupCallback): * bindings/v8/V8Proxy.cpp: (WebCore::V8Proxy::throwNotEnoughArgumentsError): * bindings/v8/V8Proxy.h: (V8Proxy): * bindings/v8/custom/V8AudioContextCustom.cpp: (WebCore::V8AudioContext::constructorCallback): * bindings/v8/custom/V8DataViewCustom.cpp: (WebCore::V8DataView::getInt8Callback): (WebCore::V8DataView::getUint8Callback): (WebCore::V8DataView::setInt8Callback): (WebCore::V8DataView::setUint8Callback): * bindings/v8/custom/V8DirectoryEntryCustom.cpp: (WebCore::V8DirectoryEntry::getDirectoryCallback): (WebCore::V8DirectoryEntry::getFileCallback): * bindings/v8/custom/V8IntentConstructor.cpp: (WebCore::V8Intent::constructorCallback): * bindings/v8/custom/V8SVGLengthCustom.cpp: (WebCore::V8SVGLength::convertToSpecifiedUnitsCallback): * bindings/v8/custom/V8WebGLRenderingContextCustom.cpp: (WebCore::getObjectParameter): (WebCore::V8WebGLRenderingContext::getAttachedShadersCallback): (WebCore::V8WebGLRenderingContext::getExtensionCallback): (WebCore::V8WebGLRenderingContext::getFramebufferAttachmentParameterCallback): (WebCore::V8WebGLRenderingContext::getParameterCallback): (WebCore::V8WebGLRenderingContext::getProgramParameterCallback): (WebCore::V8WebGLRenderingContext::getShaderParameterCallback): (WebCore::V8WebGLRenderingContext::getUniformCallback): (WebCore::vertexAttribAndUniformHelperf): (WebCore::uniformHelperi): (WebCore::uniformMatrixHelper): * bindings/v8/custom/V8WebKitMutationObserverCustom.cpp: (WebCore::V8WebKitMutationObserver::constructorCallback): (WebCore::V8WebKitMutationObserver::observeCallback): * bindings/v8/custom/V8WebSocketCustom.cpp: (WebCore::V8WebSocket::constructorCallback): (WebCore::V8WebSocket::sendCallback): * bindings/v8/custom/V8XMLHttpRequestCustom.cpp: (WebCore::V8XMLHttpRequest::openCallback): git-svn-id: svn://svn.chromium.org/blink/trunk@117736 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID:
static v8::Handle<v8::Value> postMessageCallback(const v8::Arguments& args) { INC_STATS("DOM.TestActiveDOMObject.postMessage"); if (args.Length() < 1) return V8Proxy::throwNotEnoughArgumentsError(args.GetIsolate()); TestActiveDOMObject* imp = V8TestActiveDOMObject::toNative(args.Holder()); STRING_TO_V8PARAMETER_EXCEPTION_BLOCK(V8Parameter<>, message, MAYBE_MISSING_PARAMETER(args, 0, DefaultIsUndefined)); imp->postMessage(message); return v8::Handle<v8::Value>(); }
171,067
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: SPL_METHOD(SplFileObject, fgets) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_filesystem_file_read(intern, 0 TSRMLS_CC) == FAILURE) { RETURN_FALSE; } RETURN_STRINGL(intern->u.file.current_line, intern->u.file.current_line_len, 1); } /* }}} */ /* {{{ proto string SplFileObject::current() Commit Message: Fix bug #72262 - do not overflow int CWE ID: CWE-190
SPL_METHOD(SplFileObject, fgets) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_filesystem_file_read(intern, 0 TSRMLS_CC) == FAILURE) { RETURN_FALSE; } RETURN_STRINGL(intern->u.file.current_line, intern->u.file.current_line_len, 1); } /* }}} */ /* {{{ proto string SplFileObject::current()
167,054
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int ParseDsdiffHeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config) { int64_t infilesize, total_samples; DFFFileHeader dff_file_header; DFFChunkHeader dff_chunk_header; uint32_t bcount; infilesize = DoGetFileSize (infile); memcpy (&dff_file_header, fourcc, 4); if ((!DoReadFile (infile, ((char *) &dff_file_header) + 4, sizeof (DFFFileHeader) - 4, &bcount) || bcount != sizeof (DFFFileHeader) - 4) || strncmp (dff_file_header.formType, "DSD ", 4)) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &dff_file_header, sizeof (DFFFileHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } #if 1 // this might be a little too picky... WavpackBigEndianToNative (&dff_file_header, DFFFileHeaderFormat); if (infilesize && !(config->qmode & QMODE_IGNORE_LENGTH) && dff_file_header.ckDataSize && dff_file_header.ckDataSize + 1 && dff_file_header.ckDataSize + 12 != infilesize) { error_line ("%s is not a valid .DFF file (by total size)!", infilename); return WAVPACK_SOFT_ERROR; } if (debug_logging_mode) error_line ("file header indicated length = %lld", dff_file_header.ckDataSize); #endif while (1) { if (!DoReadFile (infile, &dff_chunk_header, sizeof (DFFChunkHeader), &bcount) || bcount != sizeof (DFFChunkHeader)) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &dff_chunk_header, sizeof (DFFChunkHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&dff_chunk_header, DFFChunkHeaderFormat); if (debug_logging_mode) error_line ("chunk header indicated length = %lld", dff_chunk_header.ckDataSize); if (!strncmp (dff_chunk_header.ckID, "FVER", 4)) { uint32_t version; if (dff_chunk_header.ckDataSize != sizeof (version) || !DoReadFile (infile, &version, sizeof (version), &bcount) || bcount != sizeof (version)) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &version, sizeof (version))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&version, "L"); if (debug_logging_mode) error_line ("dsdiff file version = 0x%08x", version); } else if (!strncmp (dff_chunk_header.ckID, "PROP", 4)) { char *prop_chunk; if (dff_chunk_header.ckDataSize < 4 || dff_chunk_header.ckDataSize > 1024) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } if (debug_logging_mode) error_line ("got PROP chunk of %d bytes total", (int) dff_chunk_header.ckDataSize); prop_chunk = malloc ((size_t) dff_chunk_header.ckDataSize); if (!DoReadFile (infile, prop_chunk, (uint32_t) dff_chunk_header.ckDataSize, &bcount) || bcount != dff_chunk_header.ckDataSize) { error_line ("%s is not a valid .DFF file!", infilename); free (prop_chunk); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, prop_chunk, (uint32_t) dff_chunk_header.ckDataSize)) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (prop_chunk); return WAVPACK_SOFT_ERROR; } if (!strncmp (prop_chunk, "SND ", 4)) { char *cptr = prop_chunk + 4, *eptr = prop_chunk + dff_chunk_header.ckDataSize; uint16_t numChannels = 0, chansSpecified, chanMask = 0; uint32_t sampleRate; while (eptr - cptr >= sizeof (dff_chunk_header)) { memcpy (&dff_chunk_header, cptr, sizeof (dff_chunk_header)); cptr += sizeof (dff_chunk_header); WavpackBigEndianToNative (&dff_chunk_header, DFFChunkHeaderFormat); if (dff_chunk_header.ckDataSize > 0 && dff_chunk_header.ckDataSize <= eptr - cptr) { if (!strncmp (dff_chunk_header.ckID, "FS ", 4) && dff_chunk_header.ckDataSize == 4) { memcpy (&sampleRate, cptr, sizeof (sampleRate)); WavpackBigEndianToNative (&sampleRate, "L"); cptr += dff_chunk_header.ckDataSize; if (debug_logging_mode) error_line ("got sample rate of %u Hz", sampleRate); } else if (!strncmp (dff_chunk_header.ckID, "CHNL", 4) && dff_chunk_header.ckDataSize >= 2) { memcpy (&numChannels, cptr, sizeof (numChannels)); WavpackBigEndianToNative (&numChannels, "S"); cptr += sizeof (numChannels); chansSpecified = (int)(dff_chunk_header.ckDataSize - sizeof (numChannels)) / 4; if (numChannels < chansSpecified || numChannels < 1 || numChannels > 256) { error_line ("%s is not a valid .DFF file!", infilename); free (prop_chunk); return WAVPACK_SOFT_ERROR; } while (chansSpecified--) { if (!strncmp (cptr, "SLFT", 4) || !strncmp (cptr, "MLFT", 4)) chanMask |= 0x1; else if (!strncmp (cptr, "SRGT", 4) || !strncmp (cptr, "MRGT", 4)) chanMask |= 0x2; else if (!strncmp (cptr, "LS ", 4)) chanMask |= 0x10; else if (!strncmp (cptr, "RS ", 4)) chanMask |= 0x20; else if (!strncmp (cptr, "C ", 4)) chanMask |= 0x4; else if (!strncmp (cptr, "LFE ", 4)) chanMask |= 0x8; else if (debug_logging_mode) error_line ("undefined channel ID %c%c%c%c", cptr [0], cptr [1], cptr [2], cptr [3]); cptr += 4; } if (debug_logging_mode) error_line ("%d channels, mask = 0x%08x", numChannels, chanMask); } else if (!strncmp (dff_chunk_header.ckID, "CMPR", 4) && dff_chunk_header.ckDataSize >= 4) { if (strncmp (cptr, "DSD ", 4)) { error_line ("DSDIFF files must be uncompressed, not \"%c%c%c%c\"!", cptr [0], cptr [1], cptr [2], cptr [3]); free (prop_chunk); return WAVPACK_SOFT_ERROR; } cptr += dff_chunk_header.ckDataSize; } else { if (debug_logging_mode) error_line ("got PROP/SND chunk type \"%c%c%c%c\" of %d bytes", dff_chunk_header.ckID [0], dff_chunk_header.ckID [1], dff_chunk_header.ckID [2], dff_chunk_header.ckID [3], dff_chunk_header.ckDataSize); cptr += dff_chunk_header.ckDataSize; } } else { error_line ("%s is not a valid .DFF file!", infilename); free (prop_chunk); return WAVPACK_SOFT_ERROR; } } if (chanMask && (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED))) { error_line ("this DSDIFF file already has channel order information!"); free (prop_chunk); return WAVPACK_SOFT_ERROR; } else if (chanMask) config->channel_mask = chanMask; config->bits_per_sample = 8; config->bytes_per_sample = 1; config->num_channels = numChannels; config->sample_rate = sampleRate / 8; config->qmode |= QMODE_DSD_MSB_FIRST; } else if (debug_logging_mode) error_line ("got unknown PROP chunk type \"%c%c%c%c\" of %d bytes", prop_chunk [0], prop_chunk [1], prop_chunk [2], prop_chunk [3], dff_chunk_header.ckDataSize); free (prop_chunk); } else if (!strncmp (dff_chunk_header.ckID, "DSD ", 4)) { if (!config->num_channels) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } total_samples = dff_chunk_header.ckDataSize / config->num_channels; break; } else { // just copy unknown chunks to output file int bytes_to_copy = (int)(((dff_chunk_header.ckDataSize) + 1) & ~(int64_t)1); char *buff; if (bytes_to_copy < 0 || bytes_to_copy > 4194304) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } buff = malloc (bytes_to_copy); if (debug_logging_mode) error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes", dff_chunk_header.ckID [0], dff_chunk_header.ckID [1], dff_chunk_header.ckID [2], dff_chunk_header.ckID [3], dff_chunk_header.ckDataSize); if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) || bcount != bytes_to_copy || (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, buff, bytes_to_copy))) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (buff); return WAVPACK_SOFT_ERROR; } free (buff); } } if (debug_logging_mode) error_line ("setting configuration with %lld samples", total_samples); if (!WavpackSetConfiguration64 (wpc, config, total_samples, NULL)) { error_line ("%s: %s", infilename, WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } return WAVPACK_NO_ERROR; } Commit Message: issue #67: make sure sample rate is specified and non-zero in DFF files CWE ID: CWE-824
int ParseDsdiffHeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config) { int64_t infilesize, total_samples; DFFFileHeader dff_file_header; DFFChunkHeader dff_chunk_header; uint32_t bcount; infilesize = DoGetFileSize (infile); memcpy (&dff_file_header, fourcc, 4); if ((!DoReadFile (infile, ((char *) &dff_file_header) + 4, sizeof (DFFFileHeader) - 4, &bcount) || bcount != sizeof (DFFFileHeader) - 4) || strncmp (dff_file_header.formType, "DSD ", 4)) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &dff_file_header, sizeof (DFFFileHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } #if 1 // this might be a little too picky... WavpackBigEndianToNative (&dff_file_header, DFFFileHeaderFormat); if (infilesize && !(config->qmode & QMODE_IGNORE_LENGTH) && dff_file_header.ckDataSize && dff_file_header.ckDataSize + 1 && dff_file_header.ckDataSize + 12 != infilesize) { error_line ("%s is not a valid .DFF file (by total size)!", infilename); return WAVPACK_SOFT_ERROR; } if (debug_logging_mode) error_line ("file header indicated length = %lld", dff_file_header.ckDataSize); #endif while (1) { if (!DoReadFile (infile, &dff_chunk_header, sizeof (DFFChunkHeader), &bcount) || bcount != sizeof (DFFChunkHeader)) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &dff_chunk_header, sizeof (DFFChunkHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&dff_chunk_header, DFFChunkHeaderFormat); if (debug_logging_mode) error_line ("chunk header indicated length = %lld", dff_chunk_header.ckDataSize); if (!strncmp (dff_chunk_header.ckID, "FVER", 4)) { uint32_t version; if (dff_chunk_header.ckDataSize != sizeof (version) || !DoReadFile (infile, &version, sizeof (version), &bcount) || bcount != sizeof (version)) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &version, sizeof (version))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&version, "L"); if (debug_logging_mode) error_line ("dsdiff file version = 0x%08x", version); } else if (!strncmp (dff_chunk_header.ckID, "PROP", 4)) { char *prop_chunk; if (dff_chunk_header.ckDataSize < 4 || dff_chunk_header.ckDataSize > 1024) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } if (debug_logging_mode) error_line ("got PROP chunk of %d bytes total", (int) dff_chunk_header.ckDataSize); prop_chunk = malloc ((size_t) dff_chunk_header.ckDataSize); if (!DoReadFile (infile, prop_chunk, (uint32_t) dff_chunk_header.ckDataSize, &bcount) || bcount != dff_chunk_header.ckDataSize) { error_line ("%s is not a valid .DFF file!", infilename); free (prop_chunk); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, prop_chunk, (uint32_t) dff_chunk_header.ckDataSize)) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (prop_chunk); return WAVPACK_SOFT_ERROR; } if (!strncmp (prop_chunk, "SND ", 4)) { char *cptr = prop_chunk + 4, *eptr = prop_chunk + dff_chunk_header.ckDataSize; uint16_t numChannels = 0, chansSpecified, chanMask = 0; uint32_t sampleRate = 0; while (eptr - cptr >= sizeof (dff_chunk_header)) { memcpy (&dff_chunk_header, cptr, sizeof (dff_chunk_header)); cptr += sizeof (dff_chunk_header); WavpackBigEndianToNative (&dff_chunk_header, DFFChunkHeaderFormat); if (dff_chunk_header.ckDataSize > 0 && dff_chunk_header.ckDataSize <= eptr - cptr) { if (!strncmp (dff_chunk_header.ckID, "FS ", 4) && dff_chunk_header.ckDataSize == 4) { memcpy (&sampleRate, cptr, sizeof (sampleRate)); WavpackBigEndianToNative (&sampleRate, "L"); cptr += dff_chunk_header.ckDataSize; if (debug_logging_mode) error_line ("got sample rate of %u Hz", sampleRate); } else if (!strncmp (dff_chunk_header.ckID, "CHNL", 4) && dff_chunk_header.ckDataSize >= 2) { memcpy (&numChannels, cptr, sizeof (numChannels)); WavpackBigEndianToNative (&numChannels, "S"); cptr += sizeof (numChannels); chansSpecified = (int)(dff_chunk_header.ckDataSize - sizeof (numChannels)) / 4; if (numChannels < chansSpecified || numChannels < 1 || numChannels > 256) { error_line ("%s is not a valid .DFF file!", infilename); free (prop_chunk); return WAVPACK_SOFT_ERROR; } while (chansSpecified--) { if (!strncmp (cptr, "SLFT", 4) || !strncmp (cptr, "MLFT", 4)) chanMask |= 0x1; else if (!strncmp (cptr, "SRGT", 4) || !strncmp (cptr, "MRGT", 4)) chanMask |= 0x2; else if (!strncmp (cptr, "LS ", 4)) chanMask |= 0x10; else if (!strncmp (cptr, "RS ", 4)) chanMask |= 0x20; else if (!strncmp (cptr, "C ", 4)) chanMask |= 0x4; else if (!strncmp (cptr, "LFE ", 4)) chanMask |= 0x8; else if (debug_logging_mode) error_line ("undefined channel ID %c%c%c%c", cptr [0], cptr [1], cptr [2], cptr [3]); cptr += 4; } if (debug_logging_mode) error_line ("%d channels, mask = 0x%08x", numChannels, chanMask); } else if (!strncmp (dff_chunk_header.ckID, "CMPR", 4) && dff_chunk_header.ckDataSize >= 4) { if (strncmp (cptr, "DSD ", 4)) { error_line ("DSDIFF files must be uncompressed, not \"%c%c%c%c\"!", cptr [0], cptr [1], cptr [2], cptr [3]); free (prop_chunk); return WAVPACK_SOFT_ERROR; } cptr += dff_chunk_header.ckDataSize; } else { if (debug_logging_mode) error_line ("got PROP/SND chunk type \"%c%c%c%c\" of %d bytes", dff_chunk_header.ckID [0], dff_chunk_header.ckID [1], dff_chunk_header.ckID [2], dff_chunk_header.ckID [3], dff_chunk_header.ckDataSize); cptr += dff_chunk_header.ckDataSize; } } else { error_line ("%s is not a valid .DFF file!", infilename); free (prop_chunk); return WAVPACK_SOFT_ERROR; } } if (chanMask && (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED))) { error_line ("this DSDIFF file already has channel order information!"); free (prop_chunk); return WAVPACK_SOFT_ERROR; } else if (chanMask) config->channel_mask = chanMask; config->bits_per_sample = 8; config->bytes_per_sample = 1; config->num_channels = numChannels; config->sample_rate = sampleRate / 8; config->qmode |= QMODE_DSD_MSB_FIRST; } else if (debug_logging_mode) error_line ("got unknown PROP chunk type \"%c%c%c%c\" of %d bytes", prop_chunk [0], prop_chunk [1], prop_chunk [2], prop_chunk [3], dff_chunk_header.ckDataSize); free (prop_chunk); } else if (!strncmp (dff_chunk_header.ckID, "DSD ", 4)) { if (!config->num_channels || !config->sample_rate) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } total_samples = dff_chunk_header.ckDataSize / config->num_channels; break; } else { // just copy unknown chunks to output file int bytes_to_copy = (int)(((dff_chunk_header.ckDataSize) + 1) & ~(int64_t)1); char *buff; if (bytes_to_copy < 0 || bytes_to_copy > 4194304) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } buff = malloc (bytes_to_copy); if (debug_logging_mode) error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes", dff_chunk_header.ckID [0], dff_chunk_header.ckID [1], dff_chunk_header.ckID [2], dff_chunk_header.ckID [3], dff_chunk_header.ckDataSize); if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) || bcount != bytes_to_copy || (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, buff, bytes_to_copy))) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (buff); return WAVPACK_SOFT_ERROR; } free (buff); } } if (debug_logging_mode) error_line ("setting configuration with %lld samples", total_samples); if (!WavpackSetConfiguration64 (wpc, config, total_samples, NULL)) { error_line ("%s: %s", infilename, WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } return WAVPACK_NO_ERROR; }
169,692
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static struct fileIdentDesc *udf_find_entry(struct inode *dir, const struct qstr *child, struct udf_fileident_bh *fibh, struct fileIdentDesc *cfi) { struct fileIdentDesc *fi = NULL; loff_t f_pos; int block, flen; unsigned char *fname = NULL; unsigned char *nameptr; uint8_t lfi; uint16_t liu; loff_t size; struct kernel_lb_addr eloc; uint32_t elen; sector_t offset; struct extent_position epos = {}; struct udf_inode_info *dinfo = UDF_I(dir); int isdotdot = child->len == 2 && child->name[0] == '.' && child->name[1] == '.'; size = udf_ext0_offset(dir) + dir->i_size; f_pos = udf_ext0_offset(dir); fibh->sbh = fibh->ebh = NULL; fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1); if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos, &eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30)) goto out_err; block = udf_get_lb_pblock(dir->i_sb, &eloc, offset); if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) epos.offset -= sizeof(struct short_ad); else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) epos.offset -= sizeof(struct long_ad); } else offset = 0; fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block); if (!fibh->sbh) goto out_err; } fname = kmalloc(UDF_NAME_LEN, GFP_NOFS); if (!fname) goto out_err; while (f_pos < size) { fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc, &elen, &offset); if (!fi) goto out_err; liu = le16_to_cpu(cfi->lengthOfImpUse); lfi = cfi->lengthFileIdent; if (fibh->sbh == fibh->ebh) { nameptr = fi->fileIdent + liu; } else { int poffset; /* Unpaded ending offset */ poffset = fibh->soffset + sizeof(struct fileIdentDesc) + liu + lfi; if (poffset >= lfi) nameptr = (uint8_t *)(fibh->ebh->b_data + poffset - lfi); else { nameptr = fname; memcpy(nameptr, fi->fileIdent + liu, lfi - poffset); memcpy(nameptr + lfi - poffset, fibh->ebh->b_data, poffset); } } if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) { if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE)) continue; } if ((cfi->fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) { if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE)) continue; } if ((cfi->fileCharacteristics & FID_FILE_CHAR_PARENT) && isdotdot) goto out_ok; if (!lfi) continue; flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi); if (flen && udf_match(flen, fname, child->len, child->name)) goto out_ok; } out_err: fi = NULL; if (fibh->sbh != fibh->ebh) brelse(fibh->ebh); brelse(fibh->sbh); out_ok: brelse(epos.bh); kfree(fname); return fi; } Commit Message: udf: Check path length when reading symlink Symlink reading code does not check whether the resulting path fits into the page provided by the generic code. This isn't as easy as just checking the symlink size because of various encoding conversions we perform on path. So we have to check whether there is still enough space in the buffer on the fly. CC: [email protected] Reported-by: Carl Henrik Lunde <[email protected]> Signed-off-by: Jan Kara <[email protected]> CWE ID: CWE-17
static struct fileIdentDesc *udf_find_entry(struct inode *dir, const struct qstr *child, struct udf_fileident_bh *fibh, struct fileIdentDesc *cfi) { struct fileIdentDesc *fi = NULL; loff_t f_pos; int block, flen; unsigned char *fname = NULL; unsigned char *nameptr; uint8_t lfi; uint16_t liu; loff_t size; struct kernel_lb_addr eloc; uint32_t elen; sector_t offset; struct extent_position epos = {}; struct udf_inode_info *dinfo = UDF_I(dir); int isdotdot = child->len == 2 && child->name[0] == '.' && child->name[1] == '.'; size = udf_ext0_offset(dir) + dir->i_size; f_pos = udf_ext0_offset(dir); fibh->sbh = fibh->ebh = NULL; fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1); if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos, &eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30)) goto out_err; block = udf_get_lb_pblock(dir->i_sb, &eloc, offset); if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) epos.offset -= sizeof(struct short_ad); else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) epos.offset -= sizeof(struct long_ad); } else offset = 0; fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block); if (!fibh->sbh) goto out_err; } fname = kmalloc(UDF_NAME_LEN, GFP_NOFS); if (!fname) goto out_err; while (f_pos < size) { fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc, &elen, &offset); if (!fi) goto out_err; liu = le16_to_cpu(cfi->lengthOfImpUse); lfi = cfi->lengthFileIdent; if (fibh->sbh == fibh->ebh) { nameptr = fi->fileIdent + liu; } else { int poffset; /* Unpaded ending offset */ poffset = fibh->soffset + sizeof(struct fileIdentDesc) + liu + lfi; if (poffset >= lfi) nameptr = (uint8_t *)(fibh->ebh->b_data + poffset - lfi); else { nameptr = fname; memcpy(nameptr, fi->fileIdent + liu, lfi - poffset); memcpy(nameptr + lfi - poffset, fibh->ebh->b_data, poffset); } } if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) { if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE)) continue; } if ((cfi->fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) { if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE)) continue; } if ((cfi->fileCharacteristics & FID_FILE_CHAR_PARENT) && isdotdot) goto out_ok; if (!lfi) continue; flen = udf_get_filename(dir->i_sb, nameptr, lfi, fname, UDF_NAME_LEN); if (flen && udf_match(flen, fname, child->len, child->name)) goto out_ok; } out_err: fi = NULL; if (fibh->sbh != fibh->ebh) brelse(fibh->ebh); brelse(fibh->sbh); out_ok: brelse(epos.bh); kfree(fname); return fi; }
166,756
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ide_dma_cb(void *opaque, int ret) { IDEState *s = opaque; int n; int64_t sector_num; bool stay_active = false; if (ret == -ECANCELED) { return; } if (ret < 0) { int op = IDE_RETRY_DMA; if (s->dma_cmd == IDE_DMA_READ) op |= IDE_RETRY_READ; else if (s->dma_cmd == IDE_DMA_TRIM) op |= IDE_RETRY_TRIM; if (ide_handle_rw_error(s, -ret, op)) { return; } } n = s->io_buffer_size >> 9; if (n > s->nsector) { /* The PRDs were longer than needed for this request. Shorten them so * we don't get a negative remainder. The Active bit must remain set * after the request completes. */ n = s->nsector; stay_active = true; } sector_num = ide_get_sector(s); if (n > 0) { assert(s->io_buffer_size == s->sg.size); dma_buf_commit(s, s->io_buffer_size); sector_num += n; ide_set_sector(s, sector_num); s->nsector -= n; } /* end of transfer ? */ if (s->nsector == 0) { s->status = READY_STAT | SEEK_STAT; ide_set_irq(s->bus); goto eot; } /* launch next transfer */ n = s->nsector; s->io_buffer_index = 0; s->io_buffer_size = n * 512; if (s->bus->dma->ops->prepare_buf(s->bus->dma, ide_cmd_is_read(s)) == 0) { /* The PRDs were too short. Reset the Active bit, but don't raise an * interrupt. */ s->status = READY_STAT | SEEK_STAT; goto eot; } printf("ide_dma_cb: sector_num=%" PRId64 " n=%d, cmd_cmd=%d\n", sector_num, n, s->dma_cmd); #endif if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) && !ide_sect_range_ok(s, sector_num, n)) { ide_dma_error(s); return; } switch (s->dma_cmd) { case IDE_DMA_READ: s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, sector_num, ide_dma_cb, s); break; case IDE_DMA_WRITE: s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, sector_num, ide_dma_cb, s); break; case IDE_DMA_TRIM: s->bus->dma->aiocb = dma_blk_io(s->blk, &s->sg, sector_num, ide_issue_trim, ide_dma_cb, s, DMA_DIRECTION_TO_DEVICE); break; } return; eot: if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) { block_acct_done(blk_get_stats(s->blk), &s->acct); } ide_set_inactive(s, stay_active); } Commit Message: CWE ID: CWE-399
void ide_dma_cb(void *opaque, int ret) { IDEState *s = opaque; int n; int64_t sector_num; bool stay_active = false; if (ret == -ECANCELED) { return; } if (ret < 0) { int op = IDE_RETRY_DMA; if (s->dma_cmd == IDE_DMA_READ) op |= IDE_RETRY_READ; else if (s->dma_cmd == IDE_DMA_TRIM) op |= IDE_RETRY_TRIM; if (ide_handle_rw_error(s, -ret, op)) { return; } } n = s->io_buffer_size >> 9; if (n > s->nsector) { /* The PRDs were longer than needed for this request. Shorten them so * we don't get a negative remainder. The Active bit must remain set * after the request completes. */ n = s->nsector; stay_active = true; } sector_num = ide_get_sector(s); if (n > 0) { assert(s->io_buffer_size == s->sg.size); dma_buf_commit(s, s->io_buffer_size); sector_num += n; ide_set_sector(s, sector_num); s->nsector -= n; } /* end of transfer ? */ if (s->nsector == 0) { s->status = READY_STAT | SEEK_STAT; ide_set_irq(s->bus); goto eot; } /* launch next transfer */ n = s->nsector; s->io_buffer_index = 0; s->io_buffer_size = n * 512; if (s->bus->dma->ops->prepare_buf(s->bus->dma, ide_cmd_is_read(s)) < 512) { /* The PRDs were too short. Reset the Active bit, but don't raise an * interrupt. */ s->status = READY_STAT | SEEK_STAT; dma_buf_commit(s, 0); goto eot; } printf("ide_dma_cb: sector_num=%" PRId64 " n=%d, cmd_cmd=%d\n", sector_num, n, s->dma_cmd); #endif if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) && !ide_sect_range_ok(s, sector_num, n)) { ide_dma_error(s); return; } switch (s->dma_cmd) { case IDE_DMA_READ: s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, sector_num, ide_dma_cb, s); break; case IDE_DMA_WRITE: s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, sector_num, ide_dma_cb, s); break; case IDE_DMA_TRIM: s->bus->dma->aiocb = dma_blk_io(s->blk, &s->sg, sector_num, ide_issue_trim, ide_dma_cb, s, DMA_DIRECTION_TO_DEVICE); break; } return; eot: if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) { block_acct_done(blk_get_stats(s->blk), &s->acct); } ide_set_inactive(s, stay_active); }
164,839
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: OMX_ERRORTYPE SoftAVC::internalGetParameter(OMX_INDEXTYPE index, OMX_PTR params) { switch (index) { case OMX_IndexParamVideoBitrate: { OMX_VIDEO_PARAM_BITRATETYPE *bitRate = (OMX_VIDEO_PARAM_BITRATETYPE *)params; if (bitRate->nPortIndex != 1) { return OMX_ErrorUndefined; } bitRate->eControlRate = OMX_Video_ControlRateVariable; bitRate->nTargetBitrate = mBitrate; return OMX_ErrorNone; } case OMX_IndexParamVideoAvc: { OMX_VIDEO_PARAM_AVCTYPE *avcParams = (OMX_VIDEO_PARAM_AVCTYPE *)params; if (avcParams->nPortIndex != 1) { return OMX_ErrorUndefined; } OMX_VIDEO_AVCLEVELTYPE omxLevel = OMX_VIDEO_AVCLevel41; if (OMX_ErrorNone != ConvertAvcSpecLevelToOmxAvcLevel(mAVCEncLevel, &omxLevel)) { return OMX_ErrorUndefined; } avcParams->eProfile = OMX_VIDEO_AVCProfileBaseline; avcParams->eLevel = omxLevel; avcParams->nRefFrames = 1; avcParams->bUseHadamard = OMX_TRUE; avcParams->nAllowedPictureTypes = (OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP | OMX_VIDEO_PictureTypeB); avcParams->nRefIdx10ActiveMinus1 = 0; avcParams->nRefIdx11ActiveMinus1 = 0; avcParams->bWeightedPPrediction = OMX_FALSE; avcParams->bconstIpred = OMX_FALSE; avcParams->bDirect8x8Inference = OMX_FALSE; avcParams->bDirectSpatialTemporal = OMX_FALSE; avcParams->nCabacInitIdc = 0; return OMX_ErrorNone; } default: return SoftVideoEncoderOMXComponent::internalGetParameter(index, params); } } Commit Message: DO NOT MERGE Verify OMX buffer sizes prior to access Bug: 27207275 Change-Id: I4412825d1ee233d993af0a67708bea54304ff62d CWE ID: CWE-119
OMX_ERRORTYPE SoftAVC::internalGetParameter(OMX_INDEXTYPE index, OMX_PTR params) { switch (index) { case OMX_IndexParamVideoBitrate: { OMX_VIDEO_PARAM_BITRATETYPE *bitRate = (OMX_VIDEO_PARAM_BITRATETYPE *)params; if (!isValidOMXParam(bitRate)) { return OMX_ErrorBadParameter; } if (bitRate->nPortIndex != 1) { return OMX_ErrorUndefined; } bitRate->eControlRate = OMX_Video_ControlRateVariable; bitRate->nTargetBitrate = mBitrate; return OMX_ErrorNone; } case OMX_IndexParamVideoAvc: { OMX_VIDEO_PARAM_AVCTYPE *avcParams = (OMX_VIDEO_PARAM_AVCTYPE *)params; if (!isValidOMXParam(avcParams)) { return OMX_ErrorBadParameter; } if (avcParams->nPortIndex != 1) { return OMX_ErrorUndefined; } OMX_VIDEO_AVCLEVELTYPE omxLevel = OMX_VIDEO_AVCLevel41; if (OMX_ErrorNone != ConvertAvcSpecLevelToOmxAvcLevel(mAVCEncLevel, &omxLevel)) { return OMX_ErrorUndefined; } avcParams->eProfile = OMX_VIDEO_AVCProfileBaseline; avcParams->eLevel = omxLevel; avcParams->nRefFrames = 1; avcParams->bUseHadamard = OMX_TRUE; avcParams->nAllowedPictureTypes = (OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP | OMX_VIDEO_PictureTypeB); avcParams->nRefIdx10ActiveMinus1 = 0; avcParams->nRefIdx11ActiveMinus1 = 0; avcParams->bWeightedPPrediction = OMX_FALSE; avcParams->bconstIpred = OMX_FALSE; avcParams->bDirect8x8Inference = OMX_FALSE; avcParams->bDirectSpatialTemporal = OMX_FALSE; avcParams->nCabacInitIdc = 0; return OMX_ErrorNone; } default: return SoftVideoEncoderOMXComponent::internalGetParameter(index, params); } }
174,200
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static gboolean irssi_ssl_verify(SSL *ssl, SSL_CTX *ctx, X509 *cert) { if (SSL_get_verify_result(ssl) != X509_V_OK) { unsigned char md[EVP_MAX_MD_SIZE]; unsigned int n; char *str; g_warning("Could not verify SSL servers certificate:"); if ((str = X509_NAME_oneline(X509_get_subject_name(cert), 0, 0)) == NULL) g_warning(" Could not get subject-name from peer certificate"); else { g_warning(" Subject : %s", str); free(str); } if ((str = X509_NAME_oneline(X509_get_issuer_name(cert), 0, 0)) == NULL) g_warning(" Could not get issuer-name from peer certificate"); else { g_warning(" Issuer : %s", str); free(str); } if (! X509_digest(cert, EVP_md5(), md, &n)) g_warning(" Could not get fingerprint from peer certificate"); else { char hex[] = "0123456789ABCDEF"; char fp[EVP_MAX_MD_SIZE*3]; if (n < sizeof(fp)) { unsigned int i; for (i = 0; i < n; i++) { fp[i*3+0] = hex[(md[i] >> 4) & 0xF]; fp[i*3+1] = hex[(md[i] >> 0) & 0xF]; fp[i*3+2] = i == n - 1 ? '\0' : ':'; } g_warning(" MD5 Fingerprint : %s", fp); } } return FALSE; } return TRUE; } Commit Message: Check if an SSL certificate matches the hostname of the server we are connecting to git-svn-id: http://svn.irssi.org/repos/irssi/trunk@5104 dbcabf3a-b0e7-0310-adc4-f8d773084564 CWE ID: CWE-20
static gboolean irssi_ssl_verify(SSL *ssl, SSL_CTX *ctx, X509 *cert) /* Checks if the given string has internal NUL characters. */ static gboolean has_internal_nul(const char* str, int len) { /* Remove trailing nul characters. They would give false alarms */ while (len > 0 && str[len-1] == 0) len--; return strlen(str) != len; } /* tls_dns_name - Extract valid DNS name from subjectAltName value */ static const char *tls_dns_name(const GENERAL_NAME * gn) { const char *dnsname; /* We expect the OpenSSL library to construct GEN_DNS extension objects as ASN1_IA5STRING values. Check we got the right union member. */ if (ASN1_STRING_type(gn->d.ia5) != V_ASN1_IA5STRING) { g_warning("Invalid ASN1 value type in subjectAltName"); return NULL; } /* Safe to treat as an ASCII string possibly holding a DNS name */ dnsname = (char *) ASN1_STRING_data(gn->d.ia5); if (has_internal_nul(dnsname, ASN1_STRING_length(gn->d.ia5))) { g_warning("Internal NUL in subjectAltName"); return NULL; } return dnsname; } /* tls_text_name - extract certificate property value by name */ static char *tls_text_name(X509_NAME *name, int nid) { int pos; X509_NAME_ENTRY *entry; ASN1_STRING *entry_str; int utf8_length; unsigned char *utf8_value; char *result; if (name == 0 || (pos = X509_NAME_get_index_by_NID(name, nid, -1)) < 0) { return NULL; } entry = X509_NAME_get_entry(name, pos); g_return_val_if_fail(entry != NULL, NULL); entry_str = X509_NAME_ENTRY_get_data(entry); g_return_val_if_fail(entry_str != NULL, NULL); /* Convert everything into UTF-8. It's up to OpenSSL to do something reasonable when converting ASCII formats that contain non-ASCII content. */ if ((utf8_length = ASN1_STRING_to_UTF8(&utf8_value, entry_str)) < 0) { g_warning("Error decoding ASN.1 type=%d", ASN1_STRING_type(entry_str)); return NULL; } if (has_internal_nul((char *)utf8_value, utf8_length)) { g_warning("NUL character in hostname in certificate"); OPENSSL_free(utf8_value); return NULL; } result = g_strdup((char *) utf8_value); OPENSSL_free(utf8_value); return result; } /** check if a hostname in the certificate matches the hostname we used for the connection */ static gboolean match_hostname(const char *cert_hostname, const char *hostname) { const char *hostname_left; if (!strcasecmp(cert_hostname, hostname)) { /* exact match */ return TRUE; } else if (cert_hostname[0] == '*' && cert_hostname[1] == '.' && cert_hostname[2] != 0) { /* wildcard match */ /* The initial '*' matches exactly one hostname component */ hostname_left = strchr(hostname, '.'); if (hostname_left != NULL && ! strcasecmp(hostname_left + 1, cert_hostname + 2)) { return TRUE; } } return FALSE; } /* based on verify_extract_name from tls_client.c in postfix */ static gboolean irssi_ssl_verify_hostname(X509 *cert, const char *hostname) { int gen_index, gen_count; gboolean matched = FALSE, has_dns_name = FALSE; const char *cert_dns_name; char *cert_subject_cn; const GENERAL_NAME *gn; STACK_OF(GENERAL_NAME) * gens; /* Verify the dNSName(s) in the peer certificate against the hostname. */ gens = X509_get_ext_d2i(cert, NID_subject_alt_name, 0, 0); if (gens) { gen_count = sk_GENERAL_NAME_num(gens); for (gen_index = 0; gen_index < gen_count && !matched; ++gen_index) { gn = sk_GENERAL_NAME_value(gens, gen_index); if (gn->type != GEN_DNS) continue; /* Even if we have an invalid DNS name, we still ultimately ignore the CommonName, because subjectAltName:DNS is present (though malformed). */ has_dns_name = TRUE; cert_dns_name = tls_dns_name(gn); if (cert_dns_name && *cert_dns_name) { matched = match_hostname(cert_dns_name, hostname); } } /* Free stack *and* member GENERAL_NAME objects */ sk_GENERAL_NAME_pop_free(gens, GENERAL_NAME_free); } if (has_dns_name) { if (! matched) { /* The CommonName in the issuer DN is obsolete when SubjectAltName is available. */ g_warning("None of the Subject Alt Names in the certificate match hostname '%s'", hostname); } return matched; } else { /* No subjectAltNames, look at CommonName */ cert_subject_cn = tls_text_name(X509_get_subject_name(cert), NID_commonName); if (cert_subject_cn && *cert_subject_cn) { matched = match_hostname(cert_subject_cn, hostname); if (! matched) { g_warning("SSL certificate common name '%s' doesn't match host name '%s'", cert_subject_cn, hostname); } } else { g_warning("No subjectAltNames and no valid common name in certificate"); } free(cert_subject_cn); } return matched; } static gboolean irssi_ssl_verify(SSL *ssl, SSL_CTX *ctx, const char* hostname, X509 *cert) { if (SSL_get_verify_result(ssl) != X509_V_OK) { unsigned char md[EVP_MAX_MD_SIZE]; unsigned int n; char *str; g_warning("Could not verify SSL servers certificate:"); if ((str = X509_NAME_oneline(X509_get_subject_name(cert), 0, 0)) == NULL) g_warning(" Could not get subject-name from peer certificate"); else { g_warning(" Subject : %s", str); free(str); } if ((str = X509_NAME_oneline(X509_get_issuer_name(cert), 0, 0)) == NULL) g_warning(" Could not get issuer-name from peer certificate"); else { g_warning(" Issuer : %s", str); free(str); } if (! X509_digest(cert, EVP_md5(), md, &n)) g_warning(" Could not get fingerprint from peer certificate"); else { char hex[] = "0123456789ABCDEF"; char fp[EVP_MAX_MD_SIZE*3]; if (n < sizeof(fp)) { unsigned int i; for (i = 0; i < n; i++) { fp[i*3+0] = hex[(md[i] >> 4) & 0xF]; fp[i*3+1] = hex[(md[i] >> 0) & 0xF]; fp[i*3+2] = i == n - 1 ? '\0' : ':'; } g_warning(" MD5 Fingerprint : %s", fp); } } return FALSE; } else if (! irssi_ssl_verify_hostname(cert, hostname)){ return FALSE; } return TRUE; }
165,518
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: long do_msgsnd(int msqid, long mtype, void __user *mtext, size_t msgsz, int msgflg) { struct msg_queue *msq; struct msg_msg *msg; int err; struct ipc_namespace *ns; ns = current->nsproxy->ipc_ns; if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0) return -EINVAL; if (mtype < 1) return -EINVAL; msg = load_msg(mtext, msgsz); if (IS_ERR(msg)) return PTR_ERR(msg); msg->m_type = mtype; msg->m_ts = msgsz; msq = msg_lock_check(ns, msqid); if (IS_ERR(msq)) { err = PTR_ERR(msq); goto out_free; } for (;;) { struct msg_sender s; err = -EACCES; if (ipcperms(ns, &msq->q_perm, S_IWUGO)) goto out_unlock_free; err = security_msg_queue_msgsnd(msq, msg, msgflg); if (err) goto out_unlock_free; if (msgsz + msq->q_cbytes <= msq->q_qbytes && 1 + msq->q_qnum <= msq->q_qbytes) { break; } /* queue full, wait: */ if (msgflg & IPC_NOWAIT) { err = -EAGAIN; goto out_unlock_free; } ss_add(msq, &s); ipc_rcu_getref(msq); msg_unlock(msq); schedule(); ipc_lock_by_ptr(&msq->q_perm); ipc_rcu_putref(msq); if (msq->q_perm.deleted) { err = -EIDRM; goto out_unlock_free; } ss_del(&s); if (signal_pending(current)) { err = -ERESTARTNOHAND; goto out_unlock_free; } } msq->q_lspid = task_tgid_vnr(current); msq->q_stime = get_seconds(); if (!pipelined_send(msq, msg)) { /* no one is waiting for this message, enqueue it */ list_add_tail(&msg->m_list, &msq->q_messages); msq->q_cbytes += msgsz; msq->q_qnum++; atomic_add(msgsz, &ns->msg_bytes); atomic_inc(&ns->msg_hdrs); } err = 0; msg = NULL; out_unlock_free: msg_unlock(msq); out_free: if (msg != NULL) free_msg(msg); return err; } Commit Message: ipc,sem: fine grained locking for semtimedop Introduce finer grained locking for semtimedop, to handle the common case of a program wanting to manipulate one semaphore from an array with multiple semaphores. If the call is a semop manipulating just one semaphore in an array with multiple semaphores, only take the lock for that semaphore itself. If the call needs to manipulate multiple semaphores, or another caller is in a transaction that manipulates multiple semaphores, the sem_array lock is taken, as well as all the locks for the individual semaphores. On a 24 CPU system, performance numbers with the semop-multi test with N threads and N semaphores, look like this: vanilla Davidlohr's Davidlohr's + Davidlohr's + threads patches rwlock patches v3 patches 10 610652 726325 1783589 2142206 20 341570 365699 1520453 1977878 30 288102 307037 1498167 2037995 40 290714 305955 1612665 2256484 50 288620 312890 1733453 2650292 60 289987 306043 1649360 2388008 70 291298 306347 1723167 2717486 80 290948 305662 1729545 2763582 90 290996 306680 1736021 2757524 100 292243 306700 1773700 3059159 [[email protected]: do not call sem_lock when bogus sma] [[email protected]: make refcounter atomic] Signed-off-by: Rik van Riel <[email protected]> Suggested-by: Linus Torvalds <[email protected]> Acked-by: Davidlohr Bueso <[email protected]> Cc: Chegu Vinod <[email protected]> Cc: Jason Low <[email protected]> Reviewed-by: Michel Lespinasse <[email protected]> Cc: Peter Hurley <[email protected]> Cc: Stanislav Kinsbursky <[email protected]> Tested-by: Emmanuel Benisty <[email protected]> Tested-by: Sedat Dilek <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> CWE ID: CWE-189
long do_msgsnd(int msqid, long mtype, void __user *mtext, size_t msgsz, int msgflg) { struct msg_queue *msq; struct msg_msg *msg; int err; struct ipc_namespace *ns; ns = current->nsproxy->ipc_ns; if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0) return -EINVAL; if (mtype < 1) return -EINVAL; msg = load_msg(mtext, msgsz); if (IS_ERR(msg)) return PTR_ERR(msg); msg->m_type = mtype; msg->m_ts = msgsz; msq = msg_lock_check(ns, msqid); if (IS_ERR(msq)) { err = PTR_ERR(msq); goto out_free; } for (;;) { struct msg_sender s; err = -EACCES; if (ipcperms(ns, &msq->q_perm, S_IWUGO)) goto out_unlock_free; err = security_msg_queue_msgsnd(msq, msg, msgflg); if (err) goto out_unlock_free; if (msgsz + msq->q_cbytes <= msq->q_qbytes && 1 + msq->q_qnum <= msq->q_qbytes) { break; } /* queue full, wait: */ if (msgflg & IPC_NOWAIT) { err = -EAGAIN; goto out_unlock_free; } ss_add(msq, &s); if (!ipc_rcu_getref(msq)) { err = -EIDRM; goto out_unlock_free; } msg_unlock(msq); schedule(); ipc_lock_by_ptr(&msq->q_perm); ipc_rcu_putref(msq); if (msq->q_perm.deleted) { err = -EIDRM; goto out_unlock_free; } ss_del(&s); if (signal_pending(current)) { err = -ERESTARTNOHAND; goto out_unlock_free; } } msq->q_lspid = task_tgid_vnr(current); msq->q_stime = get_seconds(); if (!pipelined_send(msq, msg)) { /* no one is waiting for this message, enqueue it */ list_add_tail(&msg->m_list, &msq->q_messages); msq->q_cbytes += msgsz; msq->q_qnum++; atomic_add(msgsz, &ns->msg_bytes); atomic_inc(&ns->msg_hdrs); } err = 0; msg = NULL; out_unlock_free: msg_unlock(msq); out_free: if (msg != NULL) free_msg(msg); return err; }
165,967
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags) { struct mem_cgroup *memcg; struct page *page; spinlock_t *ptl; pte_t entry; pte_unmap(page_table); /* Check if we need to add a guard page to the stack */ if (check_stack_guard_page(vma, address) < 0) return VM_FAULT_SIGSEGV; /* Use the zero-page for reads */ if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) { entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), vma->vm_page_prot)); page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (!pte_none(*page_table)) goto unlock; goto setpte; } /* Allocate our own private page. */ if (unlikely(anon_vma_prepare(vma))) goto oom; page = alloc_zeroed_user_highpage_movable(vma, address); if (!page) goto oom; if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg)) goto oom_free_page; /* * The memory barrier inside __SetPageUptodate makes sure that * preceeding stores to the page contents become visible before * the set_pte_at() write. */ __SetPageUptodate(page); entry = mk_pte(page, vma->vm_page_prot); if (vma->vm_flags & VM_WRITE) entry = pte_mkwrite(pte_mkdirty(entry)); page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (!pte_none(*page_table)) goto release; inc_mm_counter_fast(mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, address); mem_cgroup_commit_charge(page, memcg, false); lru_cache_add_active_or_unevictable(page, vma); setpte: set_pte_at(mm, address, page_table, entry); /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, page_table); unlock: pte_unmap_unlock(page_table, ptl); return 0; release: mem_cgroup_cancel_charge(page, memcg); page_cache_release(page); goto unlock; oom_free_page: page_cache_release(page); oom: return VM_FAULT_OOM; } Commit Message: mm: avoid setting up anonymous pages into file mapping Reading page fault handler code I've noticed that under right circumstances kernel would map anonymous pages into file mappings: if the VMA doesn't have vm_ops->fault() and the VMA wasn't fully populated on ->mmap(), kernel would handle page fault to not populated pte with do_anonymous_page(). Let's change page fault handler to use do_anonymous_page() only on anonymous VMA (->vm_ops == NULL) and make sure that the VMA is not shared. For file mappings without vm_ops->fault() or shred VMA without vm_ops, page fault on pte_none() entry would lead to SIGBUS. Signed-off-by: Kirill A. Shutemov <[email protected]> Acked-by: Oleg Nesterov <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Willy Tarreau <[email protected]> Cc: [email protected] Signed-off-by: Linus Torvalds <[email protected]> CWE ID: CWE-20
static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags) { struct mem_cgroup *memcg; struct page *page; spinlock_t *ptl; pte_t entry; pte_unmap(page_table); /* File mapping without ->vm_ops ? */ if (vma->vm_flags & VM_SHARED) return VM_FAULT_SIGBUS; /* Check if we need to add a guard page to the stack */ if (check_stack_guard_page(vma, address) < 0) return VM_FAULT_SIGSEGV; /* Use the zero-page for reads */ if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) { entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), vma->vm_page_prot)); page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (!pte_none(*page_table)) goto unlock; goto setpte; } /* Allocate our own private page. */ if (unlikely(anon_vma_prepare(vma))) goto oom; page = alloc_zeroed_user_highpage_movable(vma, address); if (!page) goto oom; if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg)) goto oom_free_page; /* * The memory barrier inside __SetPageUptodate makes sure that * preceeding stores to the page contents become visible before * the set_pte_at() write. */ __SetPageUptodate(page); entry = mk_pte(page, vma->vm_page_prot); if (vma->vm_flags & VM_WRITE) entry = pte_mkwrite(pte_mkdirty(entry)); page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (!pte_none(*page_table)) goto release; inc_mm_counter_fast(mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, address); mem_cgroup_commit_charge(page, memcg, false); lru_cache_add_active_or_unevictable(page, vma); setpte: set_pte_at(mm, address, page_table, entry); /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, page_table); unlock: pte_unmap_unlock(page_table, ptl); return 0; release: mem_cgroup_cancel_charge(page, memcg); page_cache_release(page); goto unlock; oom_free_page: page_cache_release(page); oom: return VM_FAULT_OOM; }
167,567
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void usage_exit() { fprintf(stderr, "Usage: %s <codec> <width> <height> <infile> <outfile> " "<keyframe-interval> [<error-resilient>]\nSee comments in " "simple_encoder.c for more information.\n", exec_name); exit(EXIT_FAILURE); } Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478 DO NOT MERGE - libvpx: Pull from upstream Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06 BUG=23452792 Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec CWE ID: CWE-119
void usage_exit() { void usage_exit(void) { fprintf(stderr, "Usage: %s <codec> <width> <height> <infile> <outfile> " "<keyframe-interval> [<error-resilient>]\nSee comments in " "simple_encoder.c for more information.\n", exec_name); exit(EXIT_FAILURE); }
174,490
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void Reset() { error_nframes_ = 0; droppable_nframes_ = 0; } Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478 DO NOT MERGE - libvpx: Pull from upstream Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06 BUG=23452792 Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec CWE ID: CWE-119
void Reset() { error_nframes_ = 0; droppable_nframes_ = 0; pattern_switch_ = 0; }
174,543
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int extract_status_code(char *buffer, size_t size) { char *buf_code; char *begin; char *end = buffer + size; size_t inc = 0; int code; /* Allocate the room */ buf_code = (char *)MALLOC(10); /* Status-Code extraction */ while (buffer < end && *buffer++ != ' ') ; begin = buffer; while (buffer < end && *buffer++ != ' ') inc++; strncat(buf_code, begin, inc); code = atoi(buf_code); FREE(buf_code); return code; } Commit Message: Fix buffer overflow in extract_status_code() Issue #960 identified that the buffer allocated for copying the HTTP status code could overflow if the http response was corrupted. This commit changes the way the status code is read, avoids copying data, and also ensures that the status code is three digits long, is non-negative and occurs on the first line of the response. Signed-off-by: Quentin Armitage <[email protected]> CWE ID: CWE-119
int extract_status_code(char *buffer, size_t size) { char *end = buffer + size; unsigned long code; /* Status-Code extraction */ while (buffer < end && *buffer != ' ' && *buffer != '\r') buffer++; buffer++; if (buffer + 3 >= end || *buffer == ' ' || buffer[3] != ' ') return 0; code = strtoul(buffer, &end, 10); if (buffer + 3 != end) return 0; return code; }
168,978
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void gdImageLine (gdImagePtr im, int x1, int y1, int x2, int y2, int color) { int dx, dy, incr1, incr2, d, x, y, xend, yend, xdirflag, ydirflag; int wid; int w, wstart; int thick = im->thick; if (color == gdAntiAliased) { /* gdAntiAliased passed as color: use the much faster, much cheaper and equally attractive gdImageAALine implementation. That clips too, so don't clip twice. */ gdImageAALine(im, x1, y1, x2, y2, im->AA_color); return; } /* 2.0.10: Nick Atty: clip to edges of drawing rectangle, return if no points need to be drawn */ if (!clip_1d(&x1,&y1,&x2,&y2,gdImageSX(im)) || !clip_1d(&y1,&x1,&y2,&x2,gdImageSY(im))) { return; } dx = abs (x2 - x1); dy = abs (y2 - y1); if (dx == 0) { gdImageVLine(im, x1, y1, y2, color); return; } else if (dy == 0) { gdImageHLine(im, y1, x1, x2, color); return; } if (dy <= dx) { /* More-or-less horizontal. use wid for vertical stroke */ /* Doug Claar: watch out for NaN in atan2 (2.0.5) */ if ((dx == 0) && (dy == 0)) { wid = 1; } else { /* 2.0.12: Michael Schwartz: divide rather than multiply; TBB: but watch out for /0! */ double ac = cos (atan2 (dy, dx)); if (ac != 0) { wid = thick / ac; } else { wid = 1; } if (wid == 0) { wid = 1; } } d = 2 * dy - dx; incr1 = 2 * dy; incr2 = 2 * (dy - dx); if (x1 > x2) { x = x2; y = y2; ydirflag = (-1); xend = x1; } else { x = x1; y = y1; ydirflag = 1; xend = x2; } /* Set up line thickness */ wstart = y - wid / 2; for (w = wstart; w < wstart + wid; w++) { gdImageSetPixel(im, x, w, color); } if (((y2 - y1) * ydirflag) > 0) { while (x < xend) { x++; if (d < 0) { d += incr1; } else { y++; d += incr2; } wstart = y - wid / 2; for (w = wstart; w < wstart + wid; w++) { gdImageSetPixel (im, x, w, color); } } } else { while (x < xend) { x++; if (d < 0) { d += incr1; } else { y--; d += incr2; } wstart = y - wid / 2; for (w = wstart; w < wstart + wid; w++) { gdImageSetPixel (im, x, w, color); } } } } else { /* More-or-less vertical. use wid for horizontal stroke */ /* 2.0.12: Michael Schwartz: divide rather than multiply; TBB: but watch out for /0! */ double as = sin (atan2 (dy, dx)); if (as != 0) { wid = thick / as; } else { wid = 1; } if (wid == 0) { wid = 1; } d = 2 * dx - dy; incr1 = 2 * dx; incr2 = 2 * (dx - dy); if (y1 > y2) { y = y2; x = x2; yend = y1; xdirflag = (-1); } else { y = y1; x = x1; yend = y2; xdirflag = 1; } /* Set up line thickness */ wstart = x - wid / 2; for (w = wstart; w < wstart + wid; w++) { gdImageSetPixel (im, w, y, color); } if (((x2 - x1) * xdirflag) > 0) { while (y < yend) { y++; if (d < 0) { d += incr1; } else { x++; d += incr2; } wstart = x - wid / 2; for (w = wstart; w < wstart + wid; w++) { gdImageSetPixel (im, w, y, color); } } } else { while (y < yend) { y++; if (d < 0) { d += incr1; } else { x--; d += incr2; } wstart = x - wid / 2; for (w = wstart; w < wstart + wid; w++) { gdImageSetPixel (im, w, y, color); } } } } } Commit Message: iFixed bug #72446 - Integer Overflow in gdImagePaletteToTrueColor() resulting in heap overflow CWE ID: CWE-190
void gdImageLine (gdImagePtr im, int x1, int y1, int x2, int y2, int color) { int dx, dy, incr1, incr2, d, x, y, xend, yend, xdirflag, ydirflag; int wid; int w, wstart; int thick = im->thick; if (color == gdAntiAliased) { /* gdAntiAliased passed as color: use the much faster, much cheaper and equally attractive gdImageAALine implementation. That clips too, so don't clip twice. */ gdImageAALine(im, x1, y1, x2, y2, im->AA_color); return; } /* 2.0.10: Nick Atty: clip to edges of drawing rectangle, return if no points need to be drawn */ if (!clip_1d(&x1,&y1,&x2,&y2,gdImageSX(im)) || !clip_1d(&y1,&x1,&y2,&x2,gdImageSY(im))) { return; } dx = abs (x2 - x1); dy = abs (y2 - y1); if (dx == 0) { gdImageVLine(im, x1, y1, y2, color); return; } else if (dy == 0) { gdImageHLine(im, y1, x1, x2, color); return; } if (dy <= dx) { /* More-or-less horizontal. use wid for vertical stroke */ /* Doug Claar: watch out for NaN in atan2 (2.0.5) */ if ((dx == 0) && (dy == 0)) { wid = 1; } else { /* 2.0.12: Michael Schwartz: divide rather than multiply; TBB: but watch out for /0! */ double ac = cos (atan2 (dy, dx)); if (ac != 0) { wid = thick / ac; } else { wid = 1; } if (wid == 0) { wid = 1; } } d = 2 * dy - dx; incr1 = 2 * dy; incr2 = 2 * (dy - dx); if (x1 > x2) { x = x2; y = y2; ydirflag = (-1); xend = x1; } else { x = x1; y = y1; ydirflag = 1; xend = x2; } /* Set up line thickness */ wstart = y - wid / 2; for (w = wstart; w < wstart + wid; w++) { gdImageSetPixel(im, x, w, color); } if (((y2 - y1) * ydirflag) > 0) { while (x < xend) { x++; if (d < 0) { d += incr1; } else { y++; d += incr2; } wstart = y - wid / 2; for (w = wstart; w < wstart + wid; w++) { gdImageSetPixel (im, x, w, color); } } } else { while (x < xend) { x++; if (d < 0) { d += incr1; } else { y--; d += incr2; } wstart = y - wid / 2; for (w = wstart; w < wstart + wid; w++) { gdImageSetPixel (im, x, w, color); } } } } else { /* More-or-less vertical. use wid for horizontal stroke */ /* 2.0.12: Michael Schwartz: divide rather than multiply; TBB: but watch out for /0! */ double as = sin (atan2 (dy, dx)); if (as != 0) { wid = thick / as; } else { wid = 1; } if (wid == 0) { wid = 1; } d = 2 * dx - dy; incr1 = 2 * dx; incr2 = 2 * (dx - dy); if (y1 > y2) { y = y2; x = x2; yend = y1; xdirflag = (-1); } else { y = y1; x = x1; yend = y2; xdirflag = 1; } /* Set up line thickness */ wstart = x - wid / 2; for (w = wstart; w < wstart + wid; w++) { gdImageSetPixel (im, w, y, color); } if (((x2 - x1) * xdirflag) > 0) { while (y < yend) { y++; if (d < 0) { d += incr1; } else { x++; d += incr2; } wstart = x - wid / 2; for (w = wstart; w < wstart + wid; w++) { gdImageSetPixel (im, w, y, color); } } } else { while (y < yend) { y++; if (d < 0) { d += incr1; } else { x--; d += incr2; } wstart = x - wid / 2; for (w = wstart; w < wstart + wid; w++) { gdImageSetPixel (im, w, y, color); } } } } }
167,129
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void GpuVideoDecodeAccelerator::Initialize( const media::VideoCodecProfile profile, IPC::Message* init_done_msg) { DCHECK(!video_decode_accelerator_.get()); DCHECK(!init_done_msg_); DCHECK(init_done_msg); init_done_msg_ = init_done_msg; #if defined(OS_CHROMEOS) || defined(OS_WIN) DCHECK(stub_ && stub_->decoder()); #if defined(OS_WIN) if (base::win::GetVersion() < base::win::VERSION_WIN7) { NOTIMPLEMENTED() << "HW video decode acceleration not available."; NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); return; } DLOG(INFO) << "Initializing DXVA HW decoder for windows."; DXVAVideoDecodeAccelerator* video_decoder = new DXVAVideoDecodeAccelerator(this); #elif defined(OS_CHROMEOS) // OS_WIN #if defined(ARCH_CPU_ARMEL) OmxVideoDecodeAccelerator* video_decoder = new OmxVideoDecodeAccelerator(this); video_decoder->SetEglState( gfx::GLSurfaceEGL::GetHardwareDisplay(), stub_->decoder()->GetGLContext()->GetHandle()); #elif defined(ARCH_CPU_X86_FAMILY) VaapiVideoDecodeAccelerator* video_decoder = new VaapiVideoDecodeAccelerator(this); gfx::GLContextGLX* glx_context = static_cast<gfx::GLContextGLX*>(stub_->decoder()->GetGLContext()); GLXContext glx_context_handle = static_cast<GLXContext>(glx_context->GetHandle()); video_decoder->SetGlxState(glx_context->display(), glx_context_handle); #endif // ARCH_CPU_ARMEL #endif // OS_WIN video_decode_accelerator_ = video_decoder; if (!video_decode_accelerator_->Initialize(profile)) NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); #else // Update RenderViewImpl::createMediaPlayer when adding clauses. NOTIMPLEMENTED() << "HW video decode acceleration not available."; NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); #endif // defined(OS_CHROMEOS) || defined(OS_WIN) } Commit Message: Revert 137988 - VAVDA is the hardware video decode accelerator for Chrome on Linux and ChromeOS for Intel CPUs (Sandy Bridge and newer). This CL enables VAVDA acceleration for ChromeOS, both for HTML5 video and Flash. The feature is currently hidden behind a command line flag and can be enabled by adding the --enable-vaapi parameter to command line. BUG=117062 TEST=Manual runs of test streams. Change-Id: I386e16739e2ef2230f52a0a434971b33d8654699 Review URL: https://chromiumcodereview.appspot.com/9814001 This is causing crbug.com/129103 [email protected] Review URL: https://chromiumcodereview.appspot.com/10411066 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@138208 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID:
void GpuVideoDecodeAccelerator::Initialize( const media::VideoCodecProfile profile, IPC::Message* init_done_msg) { DCHECK(!video_decode_accelerator_.get()); DCHECK(!init_done_msg_); DCHECK(init_done_msg); init_done_msg_ = init_done_msg; #if (defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL)) || defined(OS_WIN) DCHECK(stub_ && stub_->decoder()); #if defined(OS_WIN) if (base::win::GetVersion() < base::win::VERSION_WIN7) { NOTIMPLEMENTED() << "HW video decode acceleration not available."; NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); return; } DLOG(INFO) << "Initializing DXVA HW decoder for windows."; DXVAVideoDecodeAccelerator* video_decoder = new DXVAVideoDecodeAccelerator(this); #else // OS_WIN OmxVideoDecodeAccelerator* video_decoder = new OmxVideoDecodeAccelerator(this); video_decoder->SetEglState( gfx::GLSurfaceEGL::GetHardwareDisplay(), stub_->decoder()->GetGLContext()->GetHandle()); #endif // OS_WIN video_decode_accelerator_ = video_decoder; if (!video_decode_accelerator_->Initialize(profile)) NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); #else // Update RenderViewImpl::createMediaPlayer when adding clauses. NOTIMPLEMENTED() << "HW video decode acceleration not available."; NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); #endif // defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) }
170,702
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool EditorClientBlackBerry::shouldChangeSelectedRange(Range* fromRange, Range* toRange, EAffinity affinity, bool stillSelecting) { if (m_webPagePrivate->m_dumpRenderTree) return m_webPagePrivate->m_dumpRenderTree->shouldChangeSelectedDOMRangeToDOMRangeAffinityStillSelecting(fromRange, toRange, static_cast<int>(affinity), stillSelecting); Frame* frame = m_webPagePrivate->focusedOrMainFrame(); if (frame && frame->document()) { if (frame->document()->focusedNode() && frame->document()->focusedNode()->hasTagName(HTMLNames::selectTag)) return false; if (m_webPagePrivate->m_inputHandler->isInputMode() && fromRange && toRange && (fromRange->startContainer() == toRange->startContainer())) m_webPagePrivate->m_inputHandler->notifyClientOfKeyboardVisibilityChange(true); } return true; } Commit Message: [BlackBerry] Prevent text selection inside Colour and Date/Time input fields https://bugs.webkit.org/show_bug.cgi?id=111733 Reviewed by Rob Buis. PR 305194. Prevent selection for popup input fields as they are buttons. Informally Reviewed Gen Mak. * WebCoreSupport/EditorClientBlackBerry.cpp: (WebCore::EditorClientBlackBerry::shouldChangeSelectedRange): git-svn-id: svn://svn.chromium.org/blink/trunk@145121 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID:
bool EditorClientBlackBerry::shouldChangeSelectedRange(Range* fromRange, Range* toRange, EAffinity affinity, bool stillSelecting) { if (m_webPagePrivate->m_dumpRenderTree) return m_webPagePrivate->m_dumpRenderTree->shouldChangeSelectedDOMRangeToDOMRangeAffinityStillSelecting(fromRange, toRange, static_cast<int>(affinity), stillSelecting); Frame* frame = m_webPagePrivate->focusedOrMainFrame(); if (frame && frame->document()) { if (Node* focusedNode = frame->document()->focusedNode()) { if (focusedNode->hasTagName(HTMLNames::selectTag)) return false; if (focusedNode->isElementNode() && DOMSupport::isPopupInputField(static_cast<Element*>(focusedNode))) return false; } if (m_webPagePrivate->m_inputHandler->isInputMode() && fromRange && toRange && (fromRange->startContainer() == toRange->startContainer())) m_webPagePrivate->m_inputHandler->notifyClientOfKeyboardVisibilityChange(true); } return true; }
170,780
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void DecrementUntilZero(int* count) { (*count)--; if (!(*count)) base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::RunLoop::QuitCurrentWhenIdleClosureDeprecated()); } Commit Message: Migrate ServiceProcessControl tests off of QuitCurrent*Deprecated(). Bug: 844016 Change-Id: I9403b850456c8ee06cd2539f7cec9599302e81a0 Reviewed-on: https://chromium-review.googlesource.com/1126576 Commit-Queue: Wez <[email protected]> Reviewed-by: Avi Drissman <[email protected]> Cr-Commit-Position: refs/heads/master@{#573131} CWE ID: CWE-94
static void DecrementUntilZero(int* count) { { base::RunLoop run_loop; cloud_print_proxy->GetCloudPrintProxyInfo( base::BindOnce([](base::OnceClosure done, bool, const std::string&, const std::string&) { std::move(done).Run(); }, run_loop.QuitClosure())); run_loop.Run(); } }
172,049
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static size_t _php_mb_regex_get_option_string(char *str, size_t len, OnigOptionType option, OnigSyntaxType *syntax) { size_t len_left = len; size_t len_req = 0; char *p = str; char c; if ((option & ONIG_OPTION_IGNORECASE) != 0) { if (len_left > 0) { --len_left; *(p++) = 'i'; } ++len_req; } if ((option & ONIG_OPTION_EXTEND) != 0) { if (len_left > 0) { --len_left; *(p++) = 'x'; } ++len_req; } if ((option & (ONIG_OPTION_MULTILINE | ONIG_OPTION_SINGLELINE)) == (ONIG_OPTION_MULTILINE | ONIG_OPTION_SINGLELINE)) { if (len_left > 0) { --len_left; *(p++) = 'p'; } ++len_req; } else { if ((option & ONIG_OPTION_MULTILINE) != 0) { if (len_left > 0) { --len_left; *(p++) = 'm'; } ++len_req; } if ((option & ONIG_OPTION_SINGLELINE) != 0) { if (len_left > 0) { --len_left; *(p++) = 's'; } ++len_req; } } if ((option & ONIG_OPTION_FIND_LONGEST) != 0) { if (len_left > 0) { --len_left; *(p++) = 'l'; } ++len_req; } if ((option & ONIG_OPTION_FIND_NOT_EMPTY) != 0) { if (len_left > 0) { --len_left; *(p++) = 'n'; } ++len_req; } c = 0; if (syntax == ONIG_SYNTAX_JAVA) { c = 'j'; } else if (syntax == ONIG_SYNTAX_GNU_REGEX) { c = 'u'; } else if (syntax == ONIG_SYNTAX_GREP) { c = 'g'; } else if (syntax == ONIG_SYNTAX_EMACS) { c = 'c'; } else if (syntax == ONIG_SYNTAX_RUBY) { c = 'r'; } else if (syntax == ONIG_SYNTAX_PERL) { c = 'z'; } else if (syntax == ONIG_SYNTAX_POSIX_BASIC) { c = 'b'; } else if (syntax == ONIG_SYNTAX_POSIX_EXTENDED) { c = 'd'; } if (c != 0) { if (len_left > 0) { --len_left; *(p++) = c; } ++len_req; } if (len_left > 0) { --len_left; *(p++) = '\0'; } ++len_req; if (len < len_req) { return len_req; } return 0; } Commit Message: Fix bug #72402: _php_mb_regex_ereg_replace_exec - double free CWE ID: CWE-415
static size_t _php_mb_regex_get_option_string(char *str, size_t len, OnigOptionType option, OnigSyntaxType *syntax) { size_t len_left = len; size_t len_req = 0; char *p = str; char c; if ((option & ONIG_OPTION_IGNORECASE) != 0) { if (len_left > 0) { --len_left; *(p++) = 'i'; } ++len_req; } if ((option & ONIG_OPTION_EXTEND) != 0) { if (len_left > 0) { --len_left; *(p++) = 'x'; } ++len_req; } if ((option & (ONIG_OPTION_MULTILINE | ONIG_OPTION_SINGLELINE)) == (ONIG_OPTION_MULTILINE | ONIG_OPTION_SINGLELINE)) { if (len_left > 0) { --len_left; *(p++) = 'p'; } ++len_req; } else { if ((option & ONIG_OPTION_MULTILINE) != 0) { if (len_left > 0) { --len_left; *(p++) = 'm'; } ++len_req; } if ((option & ONIG_OPTION_SINGLELINE) != 0) { if (len_left > 0) { --len_left; *(p++) = 's'; } ++len_req; } } if ((option & ONIG_OPTION_FIND_LONGEST) != 0) { if (len_left > 0) { --len_left; *(p++) = 'l'; } ++len_req; } if ((option & ONIG_OPTION_FIND_NOT_EMPTY) != 0) { if (len_left > 0) { --len_left; *(p++) = 'n'; } ++len_req; } c = 0; if (syntax == ONIG_SYNTAX_JAVA) { c = 'j'; } else if (syntax == ONIG_SYNTAX_GNU_REGEX) { c = 'u'; } else if (syntax == ONIG_SYNTAX_GREP) { c = 'g'; } else if (syntax == ONIG_SYNTAX_EMACS) { c = 'c'; } else if (syntax == ONIG_SYNTAX_RUBY) { c = 'r'; } else if (syntax == ONIG_SYNTAX_PERL) { c = 'z'; } else if (syntax == ONIG_SYNTAX_POSIX_BASIC) { c = 'b'; } else if (syntax == ONIG_SYNTAX_POSIX_EXTENDED) { c = 'd'; } if (c != 0) { if (len_left > 0) { --len_left; *(p++) = c; } ++len_req; } if (len_left > 0) { --len_left; *(p++) = '\0'; } ++len_req; if (len < len_req) { return len_req; } return 0; }
167,118
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int usb_enumerate_device_otg(struct usb_device *udev) { int err = 0; #ifdef CONFIG_USB_OTG /* * OTG-aware devices on OTG-capable root hubs may be able to use SRP, * to wake us after we've powered off VBUS; and HNP, switching roles * "host" to "peripheral". The OTG descriptor helps figure this out. */ if (!udev->bus->is_b_host && udev->config && udev->parent == udev->bus->root_hub) { struct usb_otg_descriptor *desc = NULL; struct usb_bus *bus = udev->bus; unsigned port1 = udev->portnum; /* descriptor may appear anywhere in config */ err = __usb_get_extra_descriptor(udev->rawdescriptors[0], le16_to_cpu(udev->config[0].desc.wTotalLength), USB_DT_OTG, (void **) &desc); if (err || !(desc->bmAttributes & USB_OTG_HNP)) return 0; dev_info(&udev->dev, "Dual-Role OTG device on %sHNP port\n", (port1 == bus->otg_port) ? "" : "non-"); /* enable HNP before suspend, it's simpler */ if (port1 == bus->otg_port) { bus->b_hnp_enable = 1; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_SET_FEATURE, 0, USB_DEVICE_B_HNP_ENABLE, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (err < 0) { /* * OTG MESSAGE: report errors here, * customize to match your product. */ dev_err(&udev->dev, "can't set HNP mode: %d\n", err); bus->b_hnp_enable = 0; } } else if (desc->bLength == sizeof (struct usb_otg_descriptor)) { /* Set a_alt_hnp_support for legacy otg device */ err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_SET_FEATURE, 0, USB_DEVICE_A_ALT_HNP_SUPPORT, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (err < 0) dev_err(&udev->dev, "set a_alt_hnp_support failed: %d\n", err); } } #endif return err; } Commit Message: USB: check usb_get_extra_descriptor for proper size When reading an extra descriptor, we need to properly check the minimum and maximum size allowed, to prevent from invalid data being sent by a device. Reported-by: Hui Peng <[email protected]> Reported-by: Mathias Payer <[email protected]> Co-developed-by: Linus Torvalds <[email protected]> Signed-off-by: Hui Peng <[email protected]> Signed-off-by: Mathias Payer <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> Cc: stable <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]> CWE ID: CWE-400
static int usb_enumerate_device_otg(struct usb_device *udev) { int err = 0; #ifdef CONFIG_USB_OTG /* * OTG-aware devices on OTG-capable root hubs may be able to use SRP, * to wake us after we've powered off VBUS; and HNP, switching roles * "host" to "peripheral". The OTG descriptor helps figure this out. */ if (!udev->bus->is_b_host && udev->config && udev->parent == udev->bus->root_hub) { struct usb_otg_descriptor *desc = NULL; struct usb_bus *bus = udev->bus; unsigned port1 = udev->portnum; /* descriptor may appear anywhere in config */ err = __usb_get_extra_descriptor(udev->rawdescriptors[0], le16_to_cpu(udev->config[0].desc.wTotalLength), USB_DT_OTG, (void **) &desc, sizeof(*desc)); if (err || !(desc->bmAttributes & USB_OTG_HNP)) return 0; dev_info(&udev->dev, "Dual-Role OTG device on %sHNP port\n", (port1 == bus->otg_port) ? "" : "non-"); /* enable HNP before suspend, it's simpler */ if (port1 == bus->otg_port) { bus->b_hnp_enable = 1; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_SET_FEATURE, 0, USB_DEVICE_B_HNP_ENABLE, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (err < 0) { /* * OTG MESSAGE: report errors here, * customize to match your product. */ dev_err(&udev->dev, "can't set HNP mode: %d\n", err); bus->b_hnp_enable = 0; } } else if (desc->bLength == sizeof (struct usb_otg_descriptor)) { /* Set a_alt_hnp_support for legacy otg device */ err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_SET_FEATURE, 0, USB_DEVICE_A_ALT_HNP_SUPPORT, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (err < 0) dev_err(&udev->dev, "set a_alt_hnp_support failed: %d\n", err); } } #endif return err; }
168,959
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) { int i; int nr = pagevec_count(pvec); int delta_munlocked; struct pagevec pvec_putback; int pgrescued = 0; pagevec_init(&pvec_putback, 0); /* Phase 1: page isolation */ spin_lock_irq(zone_lru_lock(zone)); for (i = 0; i < nr; i++) { struct page *page = pvec->pages[i]; if (TestClearPageMlocked(page)) { /* * We already have pin from follow_page_mask() * so we can spare the get_page() here. */ if (__munlock_isolate_lru_page(page, false)) continue; else __munlock_isolation_failed(page); } /* * We won't be munlocking this page in the next phase * but we still need to release the follow_page_mask() * pin. We cannot do it under lru_lock however. If it's * the last pin, __page_cache_release() would deadlock. */ pagevec_add(&pvec_putback, pvec->pages[i]); pvec->pages[i] = NULL; } delta_munlocked = -nr + pagevec_count(&pvec_putback); __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); spin_unlock_irq(zone_lru_lock(zone)); /* Now we can release pins of pages that we are not munlocking */ pagevec_release(&pvec_putback); /* Phase 2: page munlock */ for (i = 0; i < nr; i++) { struct page *page = pvec->pages[i]; if (page) { lock_page(page); if (!__putback_lru_fast_prepare(page, &pvec_putback, &pgrescued)) { /* * Slow path. We don't want to lose the last * pin before unlock_page() */ get_page(page); /* for putback_lru_page() */ __munlock_isolated_page(page); unlock_page(page); put_page(page); /* from follow_page_mask() */ } } } /* * Phase 3: page putback for pages that qualified for the fast path * This will also call put_page() to return pin from follow_page_mask() */ if (pagevec_count(&pvec_putback)) __putback_lru_fast(&pvec_putback, pgrescued); } Commit Message: mlock: fix mlock count can not decrease in race condition Kefeng reported that when running the follow test, the mlock count in meminfo will increase permanently: [1] testcase linux:~ # cat test_mlockal grep Mlocked /proc/meminfo for j in `seq 0 10` do for i in `seq 4 15` do ./p_mlockall >> log & done sleep 0.2 done # wait some time to let mlock counter decrease and 5s may not enough sleep 5 grep Mlocked /proc/meminfo linux:~ # cat p_mlockall.c #include <sys/mman.h> #include <stdlib.h> #include <stdio.h> #define SPACE_LEN 4096 int main(int argc, char ** argv) { int ret; void *adr = malloc(SPACE_LEN); if (!adr) return -1; ret = mlockall(MCL_CURRENT | MCL_FUTURE); printf("mlcokall ret = %d\n", ret); ret = munlockall(); printf("munlcokall ret = %d\n", ret); free(adr); return 0; } In __munlock_pagevec() we should decrement NR_MLOCK for each page where we clear the PageMlocked flag. Commit 1ebb7cc6a583 ("mm: munlock: batch NR_MLOCK zone state updates") has introduced a bug where we don't decrement NR_MLOCK for pages where we clear the flag, but fail to isolate them from the lru list (e.g. when the pages are on some other cpu's percpu pagevec). Since PageMlocked stays cleared, the NR_MLOCK accounting gets permanently disrupted by this. Fix it by counting the number of page whose PageMlock flag is cleared. Fixes: 1ebb7cc6a583 (" mm: munlock: batch NR_MLOCK zone state updates") Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Yisheng Xie <[email protected]> Reported-by: Kefeng Wang <[email protected]> Tested-by: Kefeng Wang <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Joern Engel <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Michel Lespinasse <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Xishi Qiu <[email protected]> Cc: zhongjiang <[email protected]> Cc: Hanjun Guo <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> CWE ID: CWE-20
static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) { int i; int nr = pagevec_count(pvec); int delta_munlocked = -nr; struct pagevec pvec_putback; int pgrescued = 0; pagevec_init(&pvec_putback, 0); /* Phase 1: page isolation */ spin_lock_irq(zone_lru_lock(zone)); for (i = 0; i < nr; i++) { struct page *page = pvec->pages[i]; if (TestClearPageMlocked(page)) { /* * We already have pin from follow_page_mask() * so we can spare the get_page() here. */ if (__munlock_isolate_lru_page(page, false)) continue; else __munlock_isolation_failed(page); } else { delta_munlocked++; } /* * We won't be munlocking this page in the next phase * but we still need to release the follow_page_mask() * pin. We cannot do it under lru_lock however. If it's * the last pin, __page_cache_release() would deadlock. */ pagevec_add(&pvec_putback, pvec->pages[i]); pvec->pages[i] = NULL; } __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); spin_unlock_irq(zone_lru_lock(zone)); /* Now we can release pins of pages that we are not munlocking */ pagevec_release(&pvec_putback); /* Phase 2: page munlock */ for (i = 0; i < nr; i++) { struct page *page = pvec->pages[i]; if (page) { lock_page(page); if (!__putback_lru_fast_prepare(page, &pvec_putback, &pgrescued)) { /* * Slow path. We don't want to lose the last * pin before unlock_page() */ get_page(page); /* for putback_lru_page() */ __munlock_isolated_page(page); unlock_page(page); put_page(page); /* from follow_page_mask() */ } } } /* * Phase 3: page putback for pages that qualified for the fast path * This will also call put_page() to return pin from follow_page_mask() */ if (pagevec_count(&pvec_putback)) __putback_lru_fast(&pvec_putback, pgrescued); }
169,402
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static __init int sctp_init(void) { int i; int status = -EINVAL; unsigned long goal; unsigned long limit; int max_share; int order; sock_skb_cb_check_size(sizeof(struct sctp_ulpevent)); /* Allocate bind_bucket and chunk caches. */ status = -ENOBUFS; sctp_bucket_cachep = kmem_cache_create("sctp_bind_bucket", sizeof(struct sctp_bind_bucket), 0, SLAB_HWCACHE_ALIGN, NULL); if (!sctp_bucket_cachep) goto out; sctp_chunk_cachep = kmem_cache_create("sctp_chunk", sizeof(struct sctp_chunk), 0, SLAB_HWCACHE_ALIGN, NULL); if (!sctp_chunk_cachep) goto err_chunk_cachep; status = percpu_counter_init(&sctp_sockets_allocated, 0, GFP_KERNEL); if (status) goto err_percpu_counter_init; /* Implementation specific variables. */ /* Initialize default stream count setup information. */ sctp_max_instreams = SCTP_DEFAULT_INSTREAMS; sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS; /* Initialize handle used for association ids. */ idr_init(&sctp_assocs_id); limit = nr_free_buffer_pages() / 8; limit = max(limit, 128UL); sysctl_sctp_mem[0] = limit / 4 * 3; sysctl_sctp_mem[1] = limit; sysctl_sctp_mem[2] = sysctl_sctp_mem[0] * 2; /* Set per-socket limits to no more than 1/128 the pressure threshold*/ limit = (sysctl_sctp_mem[1]) << (PAGE_SHIFT - 7); max_share = min(4UL*1024*1024, limit); sysctl_sctp_rmem[0] = SK_MEM_QUANTUM; /* give each asoc 1 page min */ sysctl_sctp_rmem[1] = 1500 * SKB_TRUESIZE(1); sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share); sysctl_sctp_wmem[0] = SK_MEM_QUANTUM; sysctl_sctp_wmem[1] = 16*1024; sysctl_sctp_wmem[2] = max(64*1024, max_share); /* Size and allocate the association hash table. * The methodology is similar to that of the tcp hash tables. */ if (totalram_pages >= (128 * 1024)) goal = totalram_pages >> (22 - PAGE_SHIFT); else goal = totalram_pages >> (24 - PAGE_SHIFT); for (order = 0; (1UL << order) < goal; order++) ; do { sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE / sizeof(struct sctp_hashbucket); if ((sctp_assoc_hashsize > (64 * 1024)) && order > 0) continue; sctp_assoc_hashtable = (struct sctp_hashbucket *) __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order); } while (!sctp_assoc_hashtable && --order > 0); if (!sctp_assoc_hashtable) { pr_err("Failed association hash alloc\n"); status = -ENOMEM; goto err_ahash_alloc; } for (i = 0; i < sctp_assoc_hashsize; i++) { rwlock_init(&sctp_assoc_hashtable[i].lock); INIT_HLIST_HEAD(&sctp_assoc_hashtable[i].chain); } /* Allocate and initialize the endpoint hash table. */ sctp_ep_hashsize = 64; sctp_ep_hashtable = kmalloc(64 * sizeof(struct sctp_hashbucket), GFP_KERNEL); if (!sctp_ep_hashtable) { pr_err("Failed endpoint_hash alloc\n"); status = -ENOMEM; goto err_ehash_alloc; } for (i = 0; i < sctp_ep_hashsize; i++) { rwlock_init(&sctp_ep_hashtable[i].lock); INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain); } /* Allocate and initialize the SCTP port hash table. */ do { sctp_port_hashsize = (1UL << order) * PAGE_SIZE / sizeof(struct sctp_bind_hashbucket); if ((sctp_port_hashsize > (64 * 1024)) && order > 0) continue; sctp_port_hashtable = (struct sctp_bind_hashbucket *) __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order); } while (!sctp_port_hashtable && --order > 0); if (!sctp_port_hashtable) { pr_err("Failed bind hash alloc\n"); status = -ENOMEM; goto err_bhash_alloc; } for (i = 0; i < sctp_port_hashsize; i++) { spin_lock_init(&sctp_port_hashtable[i].lock); INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain); } pr_info("Hash tables configured (established %d bind %d)\n", sctp_assoc_hashsize, sctp_port_hashsize); sctp_sysctl_register(); INIT_LIST_HEAD(&sctp_address_families); sctp_v4_pf_init(); sctp_v6_pf_init(); status = sctp_v4_protosw_init(); if (status) goto err_protosw_init; status = sctp_v6_protosw_init(); if (status) goto err_v6_protosw_init; status = register_pernet_subsys(&sctp_net_ops); if (status) goto err_register_pernet_subsys; status = sctp_v4_add_protocol(); if (status) goto err_add_protocol; /* Register SCTP with inet6 layer. */ status = sctp_v6_add_protocol(); if (status) goto err_v6_add_protocol; out: return status; err_v6_add_protocol: sctp_v4_del_protocol(); err_add_protocol: unregister_pernet_subsys(&sctp_net_ops); err_register_pernet_subsys: sctp_v6_protosw_exit(); err_v6_protosw_init: sctp_v4_protosw_exit(); err_protosw_init: sctp_v4_pf_exit(); sctp_v6_pf_exit(); sctp_sysctl_unregister(); free_pages((unsigned long)sctp_port_hashtable, get_order(sctp_port_hashsize * sizeof(struct sctp_bind_hashbucket))); err_bhash_alloc: kfree(sctp_ep_hashtable); err_ehash_alloc: free_pages((unsigned long)sctp_assoc_hashtable, get_order(sctp_assoc_hashsize * sizeof(struct sctp_hashbucket))); err_ahash_alloc: percpu_counter_destroy(&sctp_sockets_allocated); err_percpu_counter_init: kmem_cache_destroy(sctp_chunk_cachep); err_chunk_cachep: kmem_cache_destroy(sctp_bucket_cachep); goto out; } Commit Message: sctp: fix race on protocol/netns initialization Consider sctp module is unloaded and is being requested because an user is creating a sctp socket. During initialization, sctp will add the new protocol type and then initialize pernet subsys: status = sctp_v4_protosw_init(); if (status) goto err_protosw_init; status = sctp_v6_protosw_init(); if (status) goto err_v6_protosw_init; status = register_pernet_subsys(&sctp_net_ops); The problem is that after those calls to sctp_v{4,6}_protosw_init(), it is possible for userspace to create SCTP sockets like if the module is already fully loaded. If that happens, one of the possible effects is that we will have readers for net->sctp.local_addr_list list earlier than expected and sctp_net_init() does not take precautions while dealing with that list, leading to a potential panic but not limited to that, as sctp_sock_init() will copy a bunch of blank/partially initialized values from net->sctp. The race happens like this: CPU 0 | CPU 1 socket() | __sock_create | socket() inet_create | __sock_create list_for_each_entry_rcu( | answer, &inetsw[sock->type], | list) { | inet_create /* no hits */ | if (unlikely(err)) { | ... | request_module() | /* socket creation is blocked | * the module is fully loaded | */ | sctp_init | sctp_v4_protosw_init | inet_register_protosw | list_add_rcu(&p->list, | last_perm); | | list_for_each_entry_rcu( | answer, &inetsw[sock->type], sctp_v6_protosw_init | list) { | /* hit, so assumes protocol | * is already loaded | */ | /* socket creation continues | * before netns is initialized | */ register_pernet_subsys | Simply inverting the initialization order between register_pernet_subsys() and sctp_v4_protosw_init() is not possible because register_pernet_subsys() will create a control sctp socket, so the protocol must be already visible by then. Deferring the socket creation to a work-queue is not good specially because we loose the ability to handle its errors. So, as suggested by Vlad, the fix is to split netns initialization in two moments: defaults and control socket, so that the defaults are already loaded by when we register the protocol, while control socket initialization is kept at the same moment it is today. Fixes: 4db67e808640 ("sctp: Make the address lists per network namespace") Signed-off-by: Vlad Yasevich <[email protected]> Signed-off-by: Marcelo Ricardo Leitner <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-119
static __init int sctp_init(void) { int i; int status = -EINVAL; unsigned long goal; unsigned long limit; int max_share; int order; sock_skb_cb_check_size(sizeof(struct sctp_ulpevent)); /* Allocate bind_bucket and chunk caches. */ status = -ENOBUFS; sctp_bucket_cachep = kmem_cache_create("sctp_bind_bucket", sizeof(struct sctp_bind_bucket), 0, SLAB_HWCACHE_ALIGN, NULL); if (!sctp_bucket_cachep) goto out; sctp_chunk_cachep = kmem_cache_create("sctp_chunk", sizeof(struct sctp_chunk), 0, SLAB_HWCACHE_ALIGN, NULL); if (!sctp_chunk_cachep) goto err_chunk_cachep; status = percpu_counter_init(&sctp_sockets_allocated, 0, GFP_KERNEL); if (status) goto err_percpu_counter_init; /* Implementation specific variables. */ /* Initialize default stream count setup information. */ sctp_max_instreams = SCTP_DEFAULT_INSTREAMS; sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS; /* Initialize handle used for association ids. */ idr_init(&sctp_assocs_id); limit = nr_free_buffer_pages() / 8; limit = max(limit, 128UL); sysctl_sctp_mem[0] = limit / 4 * 3; sysctl_sctp_mem[1] = limit; sysctl_sctp_mem[2] = sysctl_sctp_mem[0] * 2; /* Set per-socket limits to no more than 1/128 the pressure threshold*/ limit = (sysctl_sctp_mem[1]) << (PAGE_SHIFT - 7); max_share = min(4UL*1024*1024, limit); sysctl_sctp_rmem[0] = SK_MEM_QUANTUM; /* give each asoc 1 page min */ sysctl_sctp_rmem[1] = 1500 * SKB_TRUESIZE(1); sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share); sysctl_sctp_wmem[0] = SK_MEM_QUANTUM; sysctl_sctp_wmem[1] = 16*1024; sysctl_sctp_wmem[2] = max(64*1024, max_share); /* Size and allocate the association hash table. * The methodology is similar to that of the tcp hash tables. */ if (totalram_pages >= (128 * 1024)) goal = totalram_pages >> (22 - PAGE_SHIFT); else goal = totalram_pages >> (24 - PAGE_SHIFT); for (order = 0; (1UL << order) < goal; order++) ; do { sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE / sizeof(struct sctp_hashbucket); if ((sctp_assoc_hashsize > (64 * 1024)) && order > 0) continue; sctp_assoc_hashtable = (struct sctp_hashbucket *) __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order); } while (!sctp_assoc_hashtable && --order > 0); if (!sctp_assoc_hashtable) { pr_err("Failed association hash alloc\n"); status = -ENOMEM; goto err_ahash_alloc; } for (i = 0; i < sctp_assoc_hashsize; i++) { rwlock_init(&sctp_assoc_hashtable[i].lock); INIT_HLIST_HEAD(&sctp_assoc_hashtable[i].chain); } /* Allocate and initialize the endpoint hash table. */ sctp_ep_hashsize = 64; sctp_ep_hashtable = kmalloc(64 * sizeof(struct sctp_hashbucket), GFP_KERNEL); if (!sctp_ep_hashtable) { pr_err("Failed endpoint_hash alloc\n"); status = -ENOMEM; goto err_ehash_alloc; } for (i = 0; i < sctp_ep_hashsize; i++) { rwlock_init(&sctp_ep_hashtable[i].lock); INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain); } /* Allocate and initialize the SCTP port hash table. */ do { sctp_port_hashsize = (1UL << order) * PAGE_SIZE / sizeof(struct sctp_bind_hashbucket); if ((sctp_port_hashsize > (64 * 1024)) && order > 0) continue; sctp_port_hashtable = (struct sctp_bind_hashbucket *) __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order); } while (!sctp_port_hashtable && --order > 0); if (!sctp_port_hashtable) { pr_err("Failed bind hash alloc\n"); status = -ENOMEM; goto err_bhash_alloc; } for (i = 0; i < sctp_port_hashsize; i++) { spin_lock_init(&sctp_port_hashtable[i].lock); INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain); } pr_info("Hash tables configured (established %d bind %d)\n", sctp_assoc_hashsize, sctp_port_hashsize); sctp_sysctl_register(); INIT_LIST_HEAD(&sctp_address_families); sctp_v4_pf_init(); sctp_v6_pf_init(); status = register_pernet_subsys(&sctp_defaults_ops); if (status) goto err_register_defaults; status = sctp_v4_protosw_init(); if (status) goto err_protosw_init; status = sctp_v6_protosw_init(); if (status) goto err_v6_protosw_init; status = register_pernet_subsys(&sctp_ctrlsock_ops); if (status) goto err_register_ctrlsock; status = sctp_v4_add_protocol(); if (status) goto err_add_protocol; /* Register SCTP with inet6 layer. */ status = sctp_v6_add_protocol(); if (status) goto err_v6_add_protocol; out: return status; err_v6_add_protocol: sctp_v4_del_protocol(); err_add_protocol: unregister_pernet_subsys(&sctp_ctrlsock_ops); err_register_ctrlsock: sctp_v6_protosw_exit(); err_v6_protosw_init: sctp_v4_protosw_exit(); err_protosw_init: unregister_pernet_subsys(&sctp_defaults_ops); err_register_defaults: sctp_v4_pf_exit(); sctp_v6_pf_exit(); sctp_sysctl_unregister(); free_pages((unsigned long)sctp_port_hashtable, get_order(sctp_port_hashsize * sizeof(struct sctp_bind_hashbucket))); err_bhash_alloc: kfree(sctp_ep_hashtable); err_ehash_alloc: free_pages((unsigned long)sctp_assoc_hashtable, get_order(sctp_assoc_hashsize * sizeof(struct sctp_hashbucket))); err_ahash_alloc: percpu_counter_destroy(&sctp_sockets_allocated); err_percpu_counter_init: kmem_cache_destroy(sctp_chunk_cachep); err_chunk_cachep: kmem_cache_destroy(sctp_bucket_cachep); goto out; }
166,606
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, bool zeropage, bool *mmap_changing) { struct vm_area_struct *dst_vma; ssize_t err; pmd_t *dst_pmd; unsigned long src_addr, dst_addr; long copied; struct page *page; /* * Sanitize the command parameters: */ BUG_ON(dst_start & ~PAGE_MASK); BUG_ON(len & ~PAGE_MASK); /* Does the address range wrap, or is the span zero-sized? */ BUG_ON(src_start + len <= src_start); BUG_ON(dst_start + len <= dst_start); src_addr = src_start; dst_addr = dst_start; copied = 0; page = NULL; retry: down_read(&dst_mm->mmap_sem); /* * If memory mappings are changing because of non-cooperative * operation (e.g. mremap) running in parallel, bail out and * request the user to retry later */ err = -EAGAIN; if (mmap_changing && READ_ONCE(*mmap_changing)) goto out_unlock; /* * Make sure the vma is not shared, that the dst range is * both valid and fully within a single existing vma. */ err = -ENOENT; dst_vma = find_vma(dst_mm, dst_start); if (!dst_vma) goto out_unlock; /* * Be strict and only allow __mcopy_atomic on userfaultfd * registered ranges to prevent userland errors going * unnoticed. As far as the VM consistency is concerned, it * would be perfectly safe to remove this check, but there's * no useful usage for __mcopy_atomic ouside of userfaultfd * registered ranges. This is after all why these are ioctls * belonging to the userfaultfd and not syscalls. */ if (!dst_vma->vm_userfaultfd_ctx.ctx) goto out_unlock; if (dst_start < dst_vma->vm_start || dst_start + len > dst_vma->vm_end) goto out_unlock; err = -EINVAL; /* * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but * it will overwrite vm_ops, so vma_is_anonymous must return false. */ if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && dst_vma->vm_flags & VM_SHARED)) goto out_unlock; /* * If this is a HUGETLB vma, pass off to appropriate routine */ if (is_vm_hugetlb_page(dst_vma)) return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start, src_start, len, zeropage); if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) goto out_unlock; /* * Ensure the dst_vma has a anon_vma or this page * would get a NULL anon_vma when moved in the * dst_vma. */ err = -ENOMEM; if (!(dst_vma->vm_flags & VM_SHARED) && unlikely(anon_vma_prepare(dst_vma))) goto out_unlock; while (src_addr < src_start + len) { pmd_t dst_pmdval; BUG_ON(dst_addr >= dst_start + len); dst_pmd = mm_alloc_pmd(dst_mm, dst_addr); if (unlikely(!dst_pmd)) { err = -ENOMEM; break; } dst_pmdval = pmd_read_atomic(dst_pmd); /* * If the dst_pmd is mapped as THP don't * override it and just be strict. */ if (unlikely(pmd_trans_huge(dst_pmdval))) { err = -EEXIST; break; } if (unlikely(pmd_none(dst_pmdval)) && unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) { err = -ENOMEM; break; } /* If an huge pmd materialized from under us fail */ if (unlikely(pmd_trans_huge(*dst_pmd))) { err = -EFAULT; break; } BUG_ON(pmd_none(*dst_pmd)); BUG_ON(pmd_trans_huge(*dst_pmd)); err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, src_addr, &page, zeropage); cond_resched(); if (unlikely(err == -ENOENT)) { void *page_kaddr; up_read(&dst_mm->mmap_sem); BUG_ON(!page); page_kaddr = kmap(page); err = copy_from_user(page_kaddr, (const void __user *) src_addr, PAGE_SIZE); kunmap(page); if (unlikely(err)) { err = -EFAULT; goto out; } goto retry; } else BUG_ON(page); if (!err) { dst_addr += PAGE_SIZE; src_addr += PAGE_SIZE; copied += PAGE_SIZE; if (fatal_signal_pending(current)) err = -EINTR; } if (err) break; } out_unlock: up_read(&dst_mm->mmap_sem); out: if (page) put_page(page); BUG_ON(copied < 0); BUG_ON(err > 0); BUG_ON(!copied && !err); return copied ? copied : err; } Commit Message: userfaultfd: shmem/hugetlbfs: only allow to register VM_MAYWRITE vmas After the VMA to register the uffd onto is found, check that it has VM_MAYWRITE set before allowing registration. This way we inherit all common code checks before allowing to fill file holes in shmem and hugetlbfs with UFFDIO_COPY. The userfaultfd memory model is not applicable for readonly files unless it's a MAP_PRIVATE. Link: http://lkml.kernel.org/r/[email protected] Fixes: ff62a3421044 ("hugetlb: implement memfd sealing") Signed-off-by: Andrea Arcangeli <[email protected]> Reviewed-by: Mike Rapoport <[email protected]> Reviewed-by: Hugh Dickins <[email protected]> Reported-by: Jann Horn <[email protected]> Fixes: 4c27fe4c4c84 ("userfaultfd: shmem: add shmem_mcopy_atomic_pte for userfaultfd support") Cc: <[email protected]> Cc: "Dr. David Alan Gilbert" <[email protected]> Cc: Mike Kravetz <[email protected]> Cc: Peter Xu <[email protected]> Cc: [email protected] Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> CWE ID:
static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, bool zeropage, bool *mmap_changing) { struct vm_area_struct *dst_vma; ssize_t err; pmd_t *dst_pmd; unsigned long src_addr, dst_addr; long copied; struct page *page; /* * Sanitize the command parameters: */ BUG_ON(dst_start & ~PAGE_MASK); BUG_ON(len & ~PAGE_MASK); /* Does the address range wrap, or is the span zero-sized? */ BUG_ON(src_start + len <= src_start); BUG_ON(dst_start + len <= dst_start); src_addr = src_start; dst_addr = dst_start; copied = 0; page = NULL; retry: down_read(&dst_mm->mmap_sem); /* * If memory mappings are changing because of non-cooperative * operation (e.g. mremap) running in parallel, bail out and * request the user to retry later */ err = -EAGAIN; if (mmap_changing && READ_ONCE(*mmap_changing)) goto out_unlock; /* * Make sure the vma is not shared, that the dst range is * both valid and fully within a single existing vma. */ err = -ENOENT; dst_vma = find_vma(dst_mm, dst_start); if (!dst_vma) goto out_unlock; /* * Check the vma is registered in uffd, this is required to * enforce the VM_MAYWRITE check done at uffd registration * time. */ if (!dst_vma->vm_userfaultfd_ctx.ctx) goto out_unlock; if (dst_start < dst_vma->vm_start || dst_start + len > dst_vma->vm_end) goto out_unlock; err = -EINVAL; /* * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but * it will overwrite vm_ops, so vma_is_anonymous must return false. */ if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && dst_vma->vm_flags & VM_SHARED)) goto out_unlock; /* * If this is a HUGETLB vma, pass off to appropriate routine */ if (is_vm_hugetlb_page(dst_vma)) return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start, src_start, len, zeropage); if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) goto out_unlock; /* * Ensure the dst_vma has a anon_vma or this page * would get a NULL anon_vma when moved in the * dst_vma. */ err = -ENOMEM; if (!(dst_vma->vm_flags & VM_SHARED) && unlikely(anon_vma_prepare(dst_vma))) goto out_unlock; while (src_addr < src_start + len) { pmd_t dst_pmdval; BUG_ON(dst_addr >= dst_start + len); dst_pmd = mm_alloc_pmd(dst_mm, dst_addr); if (unlikely(!dst_pmd)) { err = -ENOMEM; break; } dst_pmdval = pmd_read_atomic(dst_pmd); /* * If the dst_pmd is mapped as THP don't * override it and just be strict. */ if (unlikely(pmd_trans_huge(dst_pmdval))) { err = -EEXIST; break; } if (unlikely(pmd_none(dst_pmdval)) && unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) { err = -ENOMEM; break; } /* If an huge pmd materialized from under us fail */ if (unlikely(pmd_trans_huge(*dst_pmd))) { err = -EFAULT; break; } BUG_ON(pmd_none(*dst_pmd)); BUG_ON(pmd_trans_huge(*dst_pmd)); err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, src_addr, &page, zeropage); cond_resched(); if (unlikely(err == -ENOENT)) { void *page_kaddr; up_read(&dst_mm->mmap_sem); BUG_ON(!page); page_kaddr = kmap(page); err = copy_from_user(page_kaddr, (const void __user *) src_addr, PAGE_SIZE); kunmap(page); if (unlikely(err)) { err = -EFAULT; goto out; } goto retry; } else BUG_ON(page); if (!err) { dst_addr += PAGE_SIZE; src_addr += PAGE_SIZE; copied += PAGE_SIZE; if (fatal_signal_pending(current)) err = -EINTR; } if (err) break; } out_unlock: up_read(&dst_mm->mmap_sem); out: if (page) put_page(page); BUG_ON(copied < 0); BUG_ON(err > 0); BUG_ON(!copied && !err); return copied ? copied : err; }
169,007
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: UseCounterPageLoadMetricsObserver::GetAllowedUkmFeatures() { static base::NoDestructor<UseCounterPageLoadMetricsObserver::UkmFeatureList> opt_in_features(std::initializer_list<WebFeature>({ WebFeature::kNavigatorVibrate, WebFeature::kNavigatorVibrateSubFrame, WebFeature::kTouchEventPreventedNoTouchAction, WebFeature::kTouchEventPreventedForcedDocumentPassiveNoTouchAction, WebFeature::kDataUriHasOctothorpe, WebFeature::kApplicationCacheManifestSelectInsecureOrigin, WebFeature::kApplicationCacheManifestSelectSecureOrigin, WebFeature::kMixedContentAudio, WebFeature::kMixedContentImage, WebFeature::kMixedContentVideo, WebFeature::kMixedContentPlugin, WebFeature::kOpenerNavigationWithoutGesture, WebFeature::kUsbRequestDevice, WebFeature::kXMLHttpRequestSynchronous, WebFeature::kPaymentHandler, WebFeature::kPaymentRequestShowWithoutGesture, WebFeature::kHTMLImports, WebFeature::kHTMLImportsHasStyleSheets, WebFeature::kElementCreateShadowRoot, WebFeature::kDocumentRegisterElement, WebFeature::kCredentialManagerCreatePublicKeyCredential, WebFeature::kCredentialManagerGetPublicKeyCredential, WebFeature::kCredentialManagerMakePublicKeyCredentialSuccess, WebFeature::kCredentialManagerGetPublicKeyCredentialSuccess, WebFeature::kV8AudioContext_Constructor, WebFeature::kElementAttachShadow, WebFeature::kElementAttachShadowOpen, WebFeature::kElementAttachShadowClosed, WebFeature::kCustomElementRegistryDefine, WebFeature::kTextToSpeech_Speak, WebFeature::kTextToSpeech_SpeakDisallowedByAutoplay, WebFeature::kCSSEnvironmentVariable, WebFeature::kCSSEnvironmentVariable_SafeAreaInsetTop, WebFeature::kCSSEnvironmentVariable_SafeAreaInsetLeft, WebFeature::kCSSEnvironmentVariable_SafeAreaInsetRight, WebFeature::kCSSEnvironmentVariable_SafeAreaInsetBottom, WebFeature::kMediaControlsDisplayCutoutGesture, WebFeature::kPolymerV1Detected, WebFeature::kPolymerV2Detected, WebFeature::kFullscreenSecureOrigin, WebFeature::kFullscreenInsecureOrigin, WebFeature::kPrefixedVideoEnterFullscreen, WebFeature::kPrefixedVideoExitFullscreen, WebFeature::kPrefixedVideoEnterFullScreen, WebFeature::kPrefixedVideoExitFullScreen, WebFeature::kDocumentLevelPassiveDefaultEventListenerPreventedWheel, WebFeature::kDocumentDomainBlockedCrossOriginAccess, WebFeature::kDocumentDomainEnabledCrossOriginAccess, WebFeature::kSuppressHistoryEntryWithoutUserGesture, WebFeature::kCursorImageGT32x32, WebFeature::kCursorImageLE32x32, WebFeature::kHistoryPushState, WebFeature::kHistoryReplaceState, WebFeature::kCursorImageGT64x64, WebFeature::kAdClick, WebFeature::kUpdateWithoutShippingOptionOnShippingAddressChange, WebFeature::kUpdateWithoutShippingOptionOnShippingOptionChange, WebFeature::kSignedExchangeInnerResponseInMainFrame, WebFeature::kSignedExchangeInnerResponseInSubFrame, WebFeature::kWebShareShare, WebFeature::kHTMLAnchorElementDownloadInSandboxWithUserGesture, WebFeature::kHTMLAnchorElementDownloadInSandboxWithoutUserGesture, WebFeature::kNavigationDownloadInSandboxWithUserGesture, WebFeature::kNavigationDownloadInSandboxWithoutUserGesture, WebFeature::kDownloadInAdFrameWithUserGesture, WebFeature::kDownloadInAdFrameWithoutUserGesture, WebFeature::kOpenWebDatabase, WebFeature::kV8MediaCapabilities_DecodingInfo_Method, })); return *opt_in_features; } Commit Message: Add kOpenerNavigationDownloadCrossOriginNoGesture to UKM whitelist Bug: 632514 Change-Id: Ibd09c4d8635873e02f9b484ec720b71ae6e3588f Reviewed-on: https://chromium-review.googlesource.com/c/1399521 Reviewed-by: Bryan McQuade <[email protected]> Commit-Queue: Charlie Harrison <[email protected]> Cr-Commit-Position: refs/heads/master@{#620513} CWE ID: CWE-20
UseCounterPageLoadMetricsObserver::GetAllowedUkmFeatures() { static base::NoDestructor<UseCounterPageLoadMetricsObserver::UkmFeatureList> opt_in_features(std::initializer_list<WebFeature>({ WebFeature::kNavigatorVibrate, WebFeature::kNavigatorVibrateSubFrame, WebFeature::kTouchEventPreventedNoTouchAction, WebFeature::kTouchEventPreventedForcedDocumentPassiveNoTouchAction, WebFeature::kDataUriHasOctothorpe, WebFeature::kApplicationCacheManifestSelectInsecureOrigin, WebFeature::kApplicationCacheManifestSelectSecureOrigin, WebFeature::kMixedContentAudio, WebFeature::kMixedContentImage, WebFeature::kMixedContentVideo, WebFeature::kMixedContentPlugin, WebFeature::kOpenerNavigationWithoutGesture, WebFeature::kUsbRequestDevice, WebFeature::kXMLHttpRequestSynchronous, WebFeature::kPaymentHandler, WebFeature::kPaymentRequestShowWithoutGesture, WebFeature::kHTMLImports, WebFeature::kHTMLImportsHasStyleSheets, WebFeature::kElementCreateShadowRoot, WebFeature::kDocumentRegisterElement, WebFeature::kCredentialManagerCreatePublicKeyCredential, WebFeature::kCredentialManagerGetPublicKeyCredential, WebFeature::kCredentialManagerMakePublicKeyCredentialSuccess, WebFeature::kCredentialManagerGetPublicKeyCredentialSuccess, WebFeature::kV8AudioContext_Constructor, WebFeature::kElementAttachShadow, WebFeature::kElementAttachShadowOpen, WebFeature::kElementAttachShadowClosed, WebFeature::kCustomElementRegistryDefine, WebFeature::kTextToSpeech_Speak, WebFeature::kTextToSpeech_SpeakDisallowedByAutoplay, WebFeature::kCSSEnvironmentVariable, WebFeature::kCSSEnvironmentVariable_SafeAreaInsetTop, WebFeature::kCSSEnvironmentVariable_SafeAreaInsetLeft, WebFeature::kCSSEnvironmentVariable_SafeAreaInsetRight, WebFeature::kCSSEnvironmentVariable_SafeAreaInsetBottom, WebFeature::kMediaControlsDisplayCutoutGesture, WebFeature::kPolymerV1Detected, WebFeature::kPolymerV2Detected, WebFeature::kFullscreenSecureOrigin, WebFeature::kFullscreenInsecureOrigin, WebFeature::kPrefixedVideoEnterFullscreen, WebFeature::kPrefixedVideoExitFullscreen, WebFeature::kPrefixedVideoEnterFullScreen, WebFeature::kPrefixedVideoExitFullScreen, WebFeature::kDocumentLevelPassiveDefaultEventListenerPreventedWheel, WebFeature::kDocumentDomainBlockedCrossOriginAccess, WebFeature::kDocumentDomainEnabledCrossOriginAccess, WebFeature::kSuppressHistoryEntryWithoutUserGesture, WebFeature::kCursorImageGT32x32, WebFeature::kCursorImageLE32x32, WebFeature::kHistoryPushState, WebFeature::kHistoryReplaceState, WebFeature::kCursorImageGT64x64, WebFeature::kAdClick, WebFeature::kUpdateWithoutShippingOptionOnShippingAddressChange, WebFeature::kUpdateWithoutShippingOptionOnShippingOptionChange, WebFeature::kSignedExchangeInnerResponseInMainFrame, WebFeature::kSignedExchangeInnerResponseInSubFrame, WebFeature::kWebShareShare, WebFeature::kHTMLAnchorElementDownloadInSandboxWithUserGesture, WebFeature::kHTMLAnchorElementDownloadInSandboxWithoutUserGesture, WebFeature::kNavigationDownloadInSandboxWithUserGesture, WebFeature::kNavigationDownloadInSandboxWithoutUserGesture, WebFeature::kDownloadInAdFrameWithUserGesture, WebFeature::kDownloadInAdFrameWithoutUserGesture, WebFeature::kOpenWebDatabase, WebFeature::kV8MediaCapabilities_DecodingInfo_Method, WebFeature::kOpenerNavigationDownloadCrossOriginNoGesture, })); return *opt_in_features; }
173,060
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: sysapi_translate_arch( const char *machine, const char *) { char tmp[64]; char *tmparch; #if defined(AIX) /* AIX machines have a ton of different models encoded into the uname structure, so go to some other function to decode and group the architecture together */ struct utsname buf; if( uname(&buf) < 0 ) { return NULL; } return( get_aix_arch( &buf ) ); #elif defined(HPUX) return( get_hpux_arch( ) ); #else if( !strcmp(machine, "alpha") ) { sprintf( tmp, "ALPHA" ); } else if( !strcmp(machine, "i86pc") ) { sprintf( tmp, "INTEL" ); } else if( !strcmp(machine, "i686") ) { sprintf( tmp, "INTEL" ); } else if( !strcmp(machine, "i586") ) { sprintf( tmp, "INTEL" ); } else if( !strcmp(machine, "i486") ) { sprintf( tmp, "INTEL" ); } else if( !strcmp(machine, "i386") ) { //LDAP entry #if defined(Darwin) /* Mac OS X often claims to be i386 in uname, even if the * hardware is x86_64 and the OS can run 64-bit binaries. * We'll base our architecture name on the default build * target for gcc. In 10.5 and earlier, that's i386. * On 10.6, it's x86_64. * The value we're querying is the kernel version. * 10.6 kernels have a version that starts with "10." * Older versions have a lower first number. */ int ret; char val[32]; size_t len = sizeof(val); /* assume x86 */ sprintf( tmp, "INTEL" ); ret = sysctlbyname("kern.osrelease", &val, &len, NULL, 0); if (ret == 0 && strncmp(val, "10.", 3) == 0) { /* but we could be proven wrong */ sprintf( tmp, "X86_64" ); } #else sprintf( tmp, "INTEL" ); #endif } else if( !strcmp(machine, "ia64") ) { sprintf( tmp, "IA64" ); } else if( !strcmp(machine, "x86_64") ) { sprintf( tmp, "X86_64" ); } else if( !strcmp(machine, "amd64") ) { sprintf( tmp, "X86_64" ); } else if( !strcmp(machine, "sun4u") ) { sprintf( tmp, "SUN4u" ); } else if( !strcmp(machine, "sun4m") ) { sprintf( tmp, "SUN4x" ); } else if( !strcmp(machine, "sun4c") ) { sprintf( tmp, "SUN4x" ); } else if( !strcmp(machine, "sparc") ) { //LDAP entry sprintf( tmp, "SUN4x" ); } else if( !strcmp(machine, "Power Macintosh") ) { //LDAP entry sprintf( tmp, "PPC" ); } else if( !strcmp(machine, "ppc") ) { sprintf( tmp, "PPC" ); } else if( !strcmp(machine, "ppc32") ) { sprintf( tmp, "PPC" ); } else if( !strcmp(machine, "ppc64") ) { sprintf( tmp, "PPC64" ); } else { sprintf( tmp, machine ); } tmparch = strdup( tmp ); if( !tmparch ) { EXCEPT( "Out of memory!" ); } return( tmparch ); #endif /* if HPUX else */ } Commit Message: CWE ID: CWE-134
sysapi_translate_arch( const char *machine, const char *) { char tmp[64]; char *tmparch; #if defined(AIX) /* AIX machines have a ton of different models encoded into the uname structure, so go to some other function to decode and group the architecture together */ struct utsname buf; if( uname(&buf) < 0 ) { return NULL; } return( get_aix_arch( &buf ) ); #elif defined(HPUX) return( get_hpux_arch( ) ); #else if( !strcmp(machine, "alpha") ) { sprintf( tmp, "ALPHA" ); } else if( !strcmp(machine, "i86pc") ) { sprintf( tmp, "INTEL" ); } else if( !strcmp(machine, "i686") ) { sprintf( tmp, "INTEL" ); } else if( !strcmp(machine, "i586") ) { sprintf( tmp, "INTEL" ); } else if( !strcmp(machine, "i486") ) { sprintf( tmp, "INTEL" ); } else if( !strcmp(machine, "i386") ) { //LDAP entry #if defined(Darwin) /* Mac OS X often claims to be i386 in uname, even if the * hardware is x86_64 and the OS can run 64-bit binaries. * We'll base our architecture name on the default build * target for gcc. In 10.5 and earlier, that's i386. * On 10.6, it's x86_64. * The value we're querying is the kernel version. * 10.6 kernels have a version that starts with "10." * Older versions have a lower first number. */ int ret; char val[32]; size_t len = sizeof(val); /* assume x86 */ sprintf( tmp, "INTEL" ); ret = sysctlbyname("kern.osrelease", &val, &len, NULL, 0); if (ret == 0 && strncmp(val, "10.", 3) == 0) { /* but we could be proven wrong */ sprintf( tmp, "X86_64" ); } #else sprintf( tmp, "INTEL" ); #endif } else if( !strcmp(machine, "ia64") ) { sprintf( tmp, "IA64" ); } else if( !strcmp(machine, "x86_64") ) { sprintf( tmp, "X86_64" ); } else if( !strcmp(machine, "amd64") ) { sprintf( tmp, "X86_64" ); } else if( !strcmp(machine, "sun4u") ) { sprintf( tmp, "SUN4u" ); } else if( !strcmp(machine, "sun4m") ) { sprintf( tmp, "SUN4x" ); } else if( !strcmp(machine, "sun4c") ) { sprintf( tmp, "SUN4x" ); } else if( !strcmp(machine, "sparc") ) { //LDAP entry sprintf( tmp, "SUN4x" ); } else if( !strcmp(machine, "Power Macintosh") ) { //LDAP entry sprintf( tmp, "PPC" ); } else if( !strcmp(machine, "ppc") ) { sprintf( tmp, "PPC" ); } else if( !strcmp(machine, "ppc32") ) { sprintf( tmp, "PPC" ); } else if( !strcmp(machine, "ppc64") ) { sprintf( tmp, "PPC64" ); } else { sprintf( tmp, "%s", machine ); } tmparch = strdup( tmp ); if( !tmparch ) { EXCEPT( "Out of memory!" ); } return( tmparch ); #endif /* if HPUX else */ }
165,380
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static Image *ReadNULLImage(const ImageInfo *image_info, ExceptionInfo *exception) { Image *image; MagickPixelPacket background; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; ssize_t y; /* Initialize Image structure. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); if (image->columns == 0) image->columns=1; if (image->rows == 0) image->rows=1; image->matte=MagickTrue; GetMagickPixelPacket(image,&background); background.opacity=(MagickRealType) TransparentOpacity; if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelPacket(image,&background,q,indexes); q++; indexes++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(GetFirstImageInList(image)); } Commit Message: CWE ID: CWE-119
static Image *ReadNULLImage(const ImageInfo *image_info, ExceptionInfo *exception) { Image *image; MagickBooleanType status; MagickPixelPacket background; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; ssize_t y; /* Initialize Image structure. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); if (image->columns == 0) image->columns=1; if (image->rows == 0) image->rows=1; status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } image->matte=MagickTrue; GetMagickPixelPacket(image,&background); background.opacity=(MagickRealType) TransparentOpacity; if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelPacket(image,&background,q,indexes); q++; indexes++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(GetFirstImageInList(image)); }
168,586
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: PHP_METHOD(Phar, addEmptyDir) { char *dirname; size_t dirname_len; PHAR_ARCHIVE_OBJECT(); if (zend_parse_parameters(ZEND_NUM_ARGS(), "s", &dirname, &dirname_len) == FAILURE) { return; } if (dirname_len >= sizeof(".phar")-1 && !memcmp(dirname, ".phar", sizeof(".phar")-1)) { zend_throw_exception_ex(spl_ce_BadMethodCallException, 0, "Cannot create a directory in magic \".phar\" directory"); return; } phar_mkdir(&phar_obj->archive, dirname, dirname_len); } Commit Message: CWE ID: CWE-20
PHP_METHOD(Phar, addEmptyDir) { char *dirname; size_t dirname_len; PHAR_ARCHIVE_OBJECT(); if (zend_parse_parameters(ZEND_NUM_ARGS(), "p", &dirname, &dirname_len) == FAILURE) { return; } if (dirname_len >= sizeof(".phar")-1 && !memcmp(dirname, ".phar", sizeof(".phar")-1)) { zend_throw_exception_ex(spl_ce_BadMethodCallException, 0, "Cannot create a directory in magic \".phar\" directory"); return; } phar_mkdir(&phar_obj->archive, dirname, dirname_len); }
165,069
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: NORET_TYPE void do_exit(long code) { struct task_struct *tsk = current; int group_dead; profile_task_exit(tsk); WARN_ON(atomic_read(&tsk->fs_excl)); if (unlikely(in_interrupt())) panic("Aiee, killing interrupt handler!"); if (unlikely(!tsk->pid)) panic("Attempted to kill the idle task!"); tracehook_report_exit(&code); validate_creds_for_do_exit(tsk); /* * We're taking recursive faults here in do_exit. Safest is to just * leave this task alone and wait for reboot. */ if (unlikely(tsk->flags & PF_EXITING)) { printk(KERN_ALERT "Fixing recursive fault but reboot is needed!\n"); /* * We can do this unlocked here. The futex code uses * this flag just to verify whether the pi state * cleanup has been done or not. In the worst case it * loops once more. We pretend that the cleanup was * done as there is no way to return. Either the * OWNER_DIED bit is set by now or we push the blocked * task into the wait for ever nirwana as well. */ tsk->flags |= PF_EXITPIDONE; set_current_state(TASK_UNINTERRUPTIBLE); schedule(); } exit_irq_thread(); exit_signals(tsk); /* sets PF_EXITING */ /* * tsk->flags are checked in the futex code to protect against * an exiting task cleaning up the robust pi futexes. */ smp_mb(); spin_unlock_wait(&tsk->pi_lock); if (unlikely(in_atomic())) printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", current->comm, task_pid_nr(current), preempt_count()); acct_update_integrals(tsk); group_dead = atomic_dec_and_test(&tsk->signal->live); if (group_dead) { hrtimer_cancel(&tsk->signal->real_timer); exit_itimers(tsk->signal); if (tsk->mm) setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); } acct_collect(code, group_dead); if (group_dead) tty_audit_exit(); if (unlikely(tsk->audit_context)) audit_free(tsk); tsk->exit_code = code; taskstats_exit(tsk, group_dead); exit_mm(tsk); if (group_dead) acct_process(); trace_sched_process_exit(tsk); exit_sem(tsk); exit_files(tsk); exit_fs(tsk); check_stack_usage(); exit_thread(); cgroup_exit(tsk, 1); if (group_dead && tsk->signal->leader) disassociate_ctty(1); module_put(task_thread_info(tsk)->exec_domain->module); proc_exit_connector(tsk); /* * Flush inherited counters to the parent - before the parent * gets woken up by child-exit notifications. */ perf_event_exit_task(tsk); exit_notify(tsk, group_dead); #ifdef CONFIG_NUMA mpol_put(tsk->mempolicy); tsk->mempolicy = NULL; #endif #ifdef CONFIG_FUTEX if (unlikely(current->pi_state_cache)) kfree(current->pi_state_cache); #endif /* * Make sure we are holding no locks: */ debug_check_no_locks_held(tsk); /* * We can do this unlocked here. The futex code uses this flag * just to verify whether the pi state cleanup has been done * or not. In the worst case it loops once more. */ tsk->flags |= PF_EXITPIDONE; if (tsk->io_context) exit_io_context(); if (tsk->splice_pipe) __free_pipe_info(tsk->splice_pipe); validate_creds_for_do_exit(tsk); preempt_disable(); exit_rcu(); /* causes final put_task_struct in finish_task_switch(). */ tsk->state = TASK_DEAD; schedule(); BUG(); /* Avoid "noreturn function does return". */ for (;;) cpu_relax(); /* For when BUG is null */ } Commit Message: block: Fix io_context leak after failure of clone with CLONE_IO With CLONE_IO, parent's io_context->nr_tasks is incremented, but never decremented whenever copy_process() fails afterwards, which prevents exit_io_context() from calling IO schedulers exit functions. Give a task_struct to exit_io_context(), and call exit_io_context() instead of put_io_context() in copy_process() cleanup path. Signed-off-by: Louis Rilling <[email protected]> Signed-off-by: Jens Axboe <[email protected]> CWE ID: CWE-20
NORET_TYPE void do_exit(long code) { struct task_struct *tsk = current; int group_dead; profile_task_exit(tsk); WARN_ON(atomic_read(&tsk->fs_excl)); if (unlikely(in_interrupt())) panic("Aiee, killing interrupt handler!"); if (unlikely(!tsk->pid)) panic("Attempted to kill the idle task!"); tracehook_report_exit(&code); validate_creds_for_do_exit(tsk); /* * We're taking recursive faults here in do_exit. Safest is to just * leave this task alone and wait for reboot. */ if (unlikely(tsk->flags & PF_EXITING)) { printk(KERN_ALERT "Fixing recursive fault but reboot is needed!\n"); /* * We can do this unlocked here. The futex code uses * this flag just to verify whether the pi state * cleanup has been done or not. In the worst case it * loops once more. We pretend that the cleanup was * done as there is no way to return. Either the * OWNER_DIED bit is set by now or we push the blocked * task into the wait for ever nirwana as well. */ tsk->flags |= PF_EXITPIDONE; set_current_state(TASK_UNINTERRUPTIBLE); schedule(); } exit_irq_thread(); exit_signals(tsk); /* sets PF_EXITING */ /* * tsk->flags are checked in the futex code to protect against * an exiting task cleaning up the robust pi futexes. */ smp_mb(); spin_unlock_wait(&tsk->pi_lock); if (unlikely(in_atomic())) printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", current->comm, task_pid_nr(current), preempt_count()); acct_update_integrals(tsk); group_dead = atomic_dec_and_test(&tsk->signal->live); if (group_dead) { hrtimer_cancel(&tsk->signal->real_timer); exit_itimers(tsk->signal); if (tsk->mm) setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); } acct_collect(code, group_dead); if (group_dead) tty_audit_exit(); if (unlikely(tsk->audit_context)) audit_free(tsk); tsk->exit_code = code; taskstats_exit(tsk, group_dead); exit_mm(tsk); if (group_dead) acct_process(); trace_sched_process_exit(tsk); exit_sem(tsk); exit_files(tsk); exit_fs(tsk); check_stack_usage(); exit_thread(); cgroup_exit(tsk, 1); if (group_dead && tsk->signal->leader) disassociate_ctty(1); module_put(task_thread_info(tsk)->exec_domain->module); proc_exit_connector(tsk); /* * Flush inherited counters to the parent - before the parent * gets woken up by child-exit notifications. */ perf_event_exit_task(tsk); exit_notify(tsk, group_dead); #ifdef CONFIG_NUMA mpol_put(tsk->mempolicy); tsk->mempolicy = NULL; #endif #ifdef CONFIG_FUTEX if (unlikely(current->pi_state_cache)) kfree(current->pi_state_cache); #endif /* * Make sure we are holding no locks: */ debug_check_no_locks_held(tsk); /* * We can do this unlocked here. The futex code uses this flag * just to verify whether the pi state cleanup has been done * or not. In the worst case it loops once more. */ tsk->flags |= PF_EXITPIDONE; if (tsk->io_context) exit_io_context(tsk); if (tsk->splice_pipe) __free_pipe_info(tsk->splice_pipe); validate_creds_for_do_exit(tsk); preempt_disable(); exit_rcu(); /* causes final put_task_struct in finish_task_switch(). */ tsk->state = TASK_DEAD; schedule(); BUG(); /* Avoid "noreturn function does return". */ for (;;) cpu_relax(); /* For when BUG is null */ }
169,886
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len, int atomic) { unsigned long copy; while (len > 0) { while (!iov->iov_len) iov++; copy = min_t(unsigned long, len, iov->iov_len); if (atomic) { if (__copy_from_user_inatomic(to, iov->iov_base, copy)) return -EFAULT; } else { if (copy_from_user(to, iov->iov_base, copy)) return -EFAULT; } to += copy; len -= copy; iov->iov_base += copy; iov->iov_len -= copy; } return 0; } Commit Message: new helper: copy_page_from_iter() parallel to copy_page_to_iter(). pipe_write() switched to it (and became ->write_iter()). Signed-off-by: Al Viro <[email protected]> CWE ID: CWE-17
pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
166,686
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: std::set<std::string> GetDistinctHosts(const URLPatternSet& host_patterns, bool include_rcd, bool exclude_file_scheme) { typedef base::StringPairs HostVector; HostVector hosts_best_rcd; for (const URLPattern& pattern : host_patterns) { if (exclude_file_scheme && pattern.scheme() == url::kFileScheme) continue; std::string host = pattern.host(); if (pattern.match_subdomains()) host = "*." + host; std::string rcd; size_t reg_len = net::registry_controlled_domains::PermissiveGetHostRegistryLength( host, net::registry_controlled_domains::EXCLUDE_UNKNOWN_REGISTRIES, net::registry_controlled_domains::EXCLUDE_PRIVATE_REGISTRIES); if (reg_len && reg_len != std::string::npos) { if (include_rcd) // else leave rcd empty rcd = host.substr(host.size() - reg_len); host = host.substr(0, host.size() - reg_len); } HostVector::iterator it = hosts_best_rcd.begin(); for (; it != hosts_best_rcd.end(); ++it) { if (it->first == host) break; } if (it != hosts_best_rcd.end()) { if (include_rcd && RcdBetterThan(rcd, it->second)) it->second = rcd; } else { // Previously unseen host, append it. hosts_best_rcd.push_back(std::make_pair(host, rcd)); } } std::set<std::string> distinct_hosts; for (const auto& host_rcd : hosts_best_rcd) distinct_hosts.insert(host_rcd.first + host_rcd.second); return distinct_hosts; } Commit Message: Ensure IDN domains are in punycode format in extension host permissions Today in extension dialogs and bubbles, IDN domains in host permissions are not displayed in punycode format. There is a low security risk that granting such permission would allow extensions to interact with pages using spoofy IDN domains. Note that this does not affect the omnibox, which would represent the origin properly. To address this issue, this CL converts IDN domains in host permissions to punycode format. Bug: 745580 Change-Id: Ifc04030fae645f8a78ac8fde170660f2d514acce Reviewed-on: https://chromium-review.googlesource.com/644140 Commit-Queue: catmullings <[email protected]> Reviewed-by: Istiaque Ahmed <[email protected]> Reviewed-by: Tommy Li <[email protected]> Cr-Commit-Position: refs/heads/master@{#499090} CWE ID: CWE-20
std::set<std::string> GetDistinctHosts(const URLPatternSet& host_patterns, bool include_rcd, bool exclude_file_scheme) { typedef base::StringPairs HostVector; HostVector hosts_best_rcd; for (const URLPattern& pattern : host_patterns) { if (exclude_file_scheme && pattern.scheme() == url::kFileScheme) continue; std::string host = pattern.host(); if (!host.empty()) { // Convert the host into a secure format. For example, an IDN domain is // converted to punycode. host = base::UTF16ToUTF8(url_formatter::FormatUrlForSecurityDisplay( GURL(base::StringPrintf("%s%s%s", url::kHttpScheme, url::kStandardSchemeSeparator, host.c_str())), url_formatter::SchemeDisplay::OMIT_HTTP_AND_HTTPS)); } if (pattern.match_subdomains()) host = "*." + host; std::string rcd; size_t reg_len = net::registry_controlled_domains::PermissiveGetHostRegistryLength( host, net::registry_controlled_domains::EXCLUDE_UNKNOWN_REGISTRIES, net::registry_controlled_domains::EXCLUDE_PRIVATE_REGISTRIES); if (reg_len && reg_len != std::string::npos) { if (include_rcd) // else leave rcd empty rcd = host.substr(host.size() - reg_len); host = host.substr(0, host.size() - reg_len); } HostVector::iterator it = hosts_best_rcd.begin(); for (; it != hosts_best_rcd.end(); ++it) { if (it->first == host) break; } if (it != hosts_best_rcd.end()) { if (include_rcd && RcdBetterThan(rcd, it->second)) it->second = rcd; } else { // Previously unseen host, append it. hosts_best_rcd.push_back(std::make_pair(host, rcd)); } } std::set<std::string> distinct_hosts; for (const auto& host_rcd : hosts_best_rcd) distinct_hosts.insert(host_rcd.first + host_rcd.second); return distinct_hosts; }
172,961
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) { struct jffs2_full_dnode *old_metadata, *new_metadata; struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_raw_inode *ri; union jffs2_device_node dev; unsigned char *mdata = NULL; int mdatalen = 0; unsigned int ivalid; uint32_t alloclen; int ret; D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino)); ret = inode_change_ok(inode, iattr); if (ret) return ret; /* Special cases - we don't want more than one data node for these types on the medium at any time. So setattr /* Special cases - we don't want more than one data node for these types on the medium at any time. So setattr must read the original data associated with the node (i.e. the device numbers or the target name) and write it out again with the appropriate data attached */ if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) { /* For these, we don't actually need to read the old node */ mdatalen = jffs2_encode_dev(&dev, inode->i_rdev); mdata = (char *)&dev; D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen)); } else if (S_ISLNK(inode->i_mode)) { down(&f->sem); mdatalen = f->metadata->size; mdata = kmalloc(f->metadata->size, GFP_USER); if (!mdata) { up(&f->sem); return -ENOMEM; } ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen); if (ret) { up(&f->sem); kfree(mdata); return ret; } up(&f->sem); D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen)); } ri = jffs2_alloc_raw_inode(); if (!ri) { if (S_ISLNK(inode->i_mode)) kfree(mdata); return -ENOMEM; } ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); if (ret) { jffs2_free_raw_inode(ri); if (S_ISLNK(inode->i_mode & S_IFMT)) kfree(mdata); return ret; } down(&f->sem); ivalid = iattr->ia_valid; ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen); ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)); ri->ino = cpu_to_je32(inode->i_ino); ri->version = cpu_to_je32(++f->highest_version); ri->uid = cpu_to_je16((ivalid & ATTR_UID)?iattr->ia_uid:inode->i_uid); ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid); if (ivalid & ATTR_MODE) if (iattr->ia_mode & S_ISGID && !in_group_p(je16_to_cpu(ri->gid)) && !capable(CAP_FSETID)) ri->mode = cpu_to_jemode(iattr->ia_mode & ~S_ISGID); else ri->mode = cpu_to_jemode(iattr->ia_mode); else ri->mode = cpu_to_jemode(inode->i_mode); ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size); ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime)); ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime)); ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime)); ri->offset = cpu_to_je32(0); ri->csize = ri->dsize = cpu_to_je32(mdatalen); ri->compr = JFFS2_COMPR_NONE; if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) { /* It's an extension. Make it a hole node */ ri->compr = JFFS2_COMPR_ZERO; ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size); ri->offset = cpu_to_je32(inode->i_size); } ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); if (mdatalen) ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen)); else ri->data_crc = cpu_to_je32(0); new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, ALLOC_NORMAL); if (S_ISLNK(inode->i_mode)) kfree(mdata); if (IS_ERR(new_metadata)) { jffs2_complete_reservation(c); jffs2_free_raw_inode(ri); up(&f->sem); return PTR_ERR(new_metadata); } /* It worked. Update the inode */ inode->i_atime = ITIME(je32_to_cpu(ri->atime)); inode->i_ctime = ITIME(je32_to_cpu(ri->ctime)); inode->i_mtime = ITIME(je32_to_cpu(ri->mtime)); inode->i_mode = jemode_to_cpu(ri->mode); inode->i_uid = je16_to_cpu(ri->uid); inode->i_gid = je16_to_cpu(ri->gid); old_metadata = f->metadata; if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size); if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) { jffs2_add_full_dnode_to_inode(c, f, new_metadata); inode->i_size = iattr->ia_size; f->metadata = NULL; } else { f->metadata = new_metadata; } if (old_metadata) { jffs2_mark_node_obsolete(c, old_metadata->raw); jffs2_free_full_dnode(old_metadata); } jffs2_free_raw_inode(ri); up(&f->sem); jffs2_complete_reservation(c); /* We have to do the vmtruncate() without f->sem held, since some pages may be locked and waiting for it in readpage(). We are protected from a simultaneous write() extending i_size back past iattr->ia_size, because do_truncate() holds the generic inode semaphore. */ if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) vmtruncate(inode, iattr->ia_size); return 0; } Commit Message: CWE ID: CWE-264
static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) { struct jffs2_full_dnode *old_metadata, *new_metadata; struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_raw_inode *ri; union jffs2_device_node dev; unsigned char *mdata = NULL; int mdatalen = 0; unsigned int ivalid; uint32_t alloclen; int ret; D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino)); /* Special cases - we don't want more than one data node for these types on the medium at any time. So setattr /* Special cases - we don't want more than one data node for these types on the medium at any time. So setattr must read the original data associated with the node (i.e. the device numbers or the target name) and write it out again with the appropriate data attached */ if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) { /* For these, we don't actually need to read the old node */ mdatalen = jffs2_encode_dev(&dev, inode->i_rdev); mdata = (char *)&dev; D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen)); } else if (S_ISLNK(inode->i_mode)) { down(&f->sem); mdatalen = f->metadata->size; mdata = kmalloc(f->metadata->size, GFP_USER); if (!mdata) { up(&f->sem); return -ENOMEM; } ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen); if (ret) { up(&f->sem); kfree(mdata); return ret; } up(&f->sem); D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen)); } ri = jffs2_alloc_raw_inode(); if (!ri) { if (S_ISLNK(inode->i_mode)) kfree(mdata); return -ENOMEM; } ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); if (ret) { jffs2_free_raw_inode(ri); if (S_ISLNK(inode->i_mode & S_IFMT)) kfree(mdata); return ret; } down(&f->sem); ivalid = iattr->ia_valid; ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen); ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)); ri->ino = cpu_to_je32(inode->i_ino); ri->version = cpu_to_je32(++f->highest_version); ri->uid = cpu_to_je16((ivalid & ATTR_UID)?iattr->ia_uid:inode->i_uid); ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid); if (ivalid & ATTR_MODE) if (iattr->ia_mode & S_ISGID && !in_group_p(je16_to_cpu(ri->gid)) && !capable(CAP_FSETID)) ri->mode = cpu_to_jemode(iattr->ia_mode & ~S_ISGID); else ri->mode = cpu_to_jemode(iattr->ia_mode); else ri->mode = cpu_to_jemode(inode->i_mode); ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size); ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime)); ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime)); ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime)); ri->offset = cpu_to_je32(0); ri->csize = ri->dsize = cpu_to_je32(mdatalen); ri->compr = JFFS2_COMPR_NONE; if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) { /* It's an extension. Make it a hole node */ ri->compr = JFFS2_COMPR_ZERO; ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size); ri->offset = cpu_to_je32(inode->i_size); } ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); if (mdatalen) ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen)); else ri->data_crc = cpu_to_je32(0); new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, ALLOC_NORMAL); if (S_ISLNK(inode->i_mode)) kfree(mdata); if (IS_ERR(new_metadata)) { jffs2_complete_reservation(c); jffs2_free_raw_inode(ri); up(&f->sem); return PTR_ERR(new_metadata); } /* It worked. Update the inode */ inode->i_atime = ITIME(je32_to_cpu(ri->atime)); inode->i_ctime = ITIME(je32_to_cpu(ri->ctime)); inode->i_mtime = ITIME(je32_to_cpu(ri->mtime)); inode->i_mode = jemode_to_cpu(ri->mode); inode->i_uid = je16_to_cpu(ri->uid); inode->i_gid = je16_to_cpu(ri->gid); old_metadata = f->metadata; if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size); if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) { jffs2_add_full_dnode_to_inode(c, f, new_metadata); inode->i_size = iattr->ia_size; f->metadata = NULL; } else { f->metadata = new_metadata; } if (old_metadata) { jffs2_mark_node_obsolete(c, old_metadata->raw); jffs2_free_full_dnode(old_metadata); } jffs2_free_raw_inode(ri); up(&f->sem); jffs2_complete_reservation(c); /* We have to do the vmtruncate() without f->sem held, since some pages may be locked and waiting for it in readpage(). We are protected from a simultaneous write() extending i_size back past iattr->ia_size, because do_truncate() holds the generic inode semaphore. */ if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) vmtruncate(inode, iattr->ia_size); return 0; }
164,657
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void Rp_test(js_State *J) { js_Regexp *re; const char *text; int opts; Resub m; re = js_toregexp(J, 0); text = js_tostring(J, 1); opts = 0; if (re->flags & JS_REGEXP_G) { if (re->last > strlen(text)) { re->last = 0; js_pushboolean(J, 0); return; } if (re->last > 0) { text += re->last; opts |= REG_NOTBOL; } } if (!js_regexec(re->prog, text, &m, opts)) { if (re->flags & JS_REGEXP_G) re->last = re->last + (m.sub[0].ep - text); js_pushboolean(J, 1); return; } if (re->flags & JS_REGEXP_G) re->last = 0; js_pushboolean(J, 0); } Commit Message: Bug 700937: Limit recursion in regexp matcher. Also handle negative return code as an error in the JS bindings. CWE ID: CWE-400
static void Rp_test(js_State *J) { js_Regexp *re; const char *text; int result; int opts; Resub m; re = js_toregexp(J, 0); text = js_tostring(J, 1); opts = 0; if (re->flags & JS_REGEXP_G) { if (re->last > strlen(text)) { re->last = 0; js_pushboolean(J, 0); return; } if (re->last > 0) { text += re->last; opts |= REG_NOTBOL; } } result = js_regexec(re->prog, text, &m, opts); if (result < 0) js_error(J, "regexec failed"); if (result == 0) { if (re->flags & JS_REGEXP_G) re->last = re->last + (m.sub[0].ep - text); js_pushboolean(J, 1); return; } if (re->flags & JS_REGEXP_G) re->last = 0; js_pushboolean(J, 0); }
169,696
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: xmlParseExtParsedEnt(xmlParserCtxtPtr ctxt) { xmlChar start[4]; xmlCharEncoding enc; if ((ctxt == NULL) || (ctxt->input == NULL)) return(-1); xmlDefaultSAXHandlerInit(); xmlDetectSAX2(ctxt); GROW; /* * SAX: beginning of the document processing. */ if ((ctxt->sax) && (ctxt->sax->setDocumentLocator)) ctxt->sax->setDocumentLocator(ctxt->userData, &xmlDefaultSAXLocator); /* * Get the 4 first bytes and decode the charset * if enc != XML_CHAR_ENCODING_NONE * plug some encoding conversion routines. */ if ((ctxt->input->end - ctxt->input->cur) >= 4) { start[0] = RAW; start[1] = NXT(1); start[2] = NXT(2); start[3] = NXT(3); enc = xmlDetectCharEncoding(start, 4); if (enc != XML_CHAR_ENCODING_NONE) { xmlSwitchEncoding(ctxt, enc); } } if (CUR == 0) { xmlFatalErr(ctxt, XML_ERR_DOCUMENT_EMPTY, NULL); } /* * Check for the XMLDecl in the Prolog. */ GROW; if ((CMP5(CUR_PTR, '<', '?', 'x', 'm', 'l')) && (IS_BLANK_CH(NXT(5)))) { /* * Note that we will switch encoding on the fly. */ xmlParseXMLDecl(ctxt); if (ctxt->errNo == XML_ERR_UNSUPPORTED_ENCODING) { /* * The XML REC instructs us to stop parsing right here */ return(-1); } SKIP_BLANKS; } else { ctxt->version = xmlCharStrdup(XML_DEFAULT_VERSION); } if ((ctxt->sax) && (ctxt->sax->startDocument) && (!ctxt->disableSAX)) ctxt->sax->startDocument(ctxt->userData); /* * Doing validity checking on chunk doesn't make sense */ ctxt->instate = XML_PARSER_CONTENT; ctxt->validate = 0; ctxt->loadsubset = 0; ctxt->depth = 0; xmlParseContent(ctxt); if ((RAW == '<') && (NXT(1) == '/')) { xmlFatalErr(ctxt, XML_ERR_NOT_WELL_BALANCED, NULL); } else if (RAW != 0) { xmlFatalErr(ctxt, XML_ERR_EXTRA_CONTENT, NULL); } /* * SAX: end of the document processing. */ if ((ctxt->sax) && (ctxt->sax->endDocument != NULL)) ctxt->sax->endDocument(ctxt->userData); if (! ctxt->wellFormed) return(-1); return(0); } Commit Message: libxml: XML_PARSER_EOF checks from upstream BUG=229019 TBR=cpu Review URL: https://chromiumcodereview.appspot.com/14053009 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@196804 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-119
xmlParseExtParsedEnt(xmlParserCtxtPtr ctxt) { xmlChar start[4]; xmlCharEncoding enc; if ((ctxt == NULL) || (ctxt->input == NULL)) return(-1); xmlDefaultSAXHandlerInit(); xmlDetectSAX2(ctxt); GROW; /* * SAX: beginning of the document processing. */ if ((ctxt->sax) && (ctxt->sax->setDocumentLocator)) ctxt->sax->setDocumentLocator(ctxt->userData, &xmlDefaultSAXLocator); /* * Get the 4 first bytes and decode the charset * if enc != XML_CHAR_ENCODING_NONE * plug some encoding conversion routines. */ if ((ctxt->input->end - ctxt->input->cur) >= 4) { start[0] = RAW; start[1] = NXT(1); start[2] = NXT(2); start[3] = NXT(3); enc = xmlDetectCharEncoding(start, 4); if (enc != XML_CHAR_ENCODING_NONE) { xmlSwitchEncoding(ctxt, enc); } } if (CUR == 0) { xmlFatalErr(ctxt, XML_ERR_DOCUMENT_EMPTY, NULL); } /* * Check for the XMLDecl in the Prolog. */ GROW; if ((CMP5(CUR_PTR, '<', '?', 'x', 'm', 'l')) && (IS_BLANK_CH(NXT(5)))) { /* * Note that we will switch encoding on the fly. */ xmlParseXMLDecl(ctxt); if (ctxt->errNo == XML_ERR_UNSUPPORTED_ENCODING) { /* * The XML REC instructs us to stop parsing right here */ return(-1); } SKIP_BLANKS; } else { ctxt->version = xmlCharStrdup(XML_DEFAULT_VERSION); } if ((ctxt->sax) && (ctxt->sax->startDocument) && (!ctxt->disableSAX)) ctxt->sax->startDocument(ctxt->userData); if (ctxt->instate == XML_PARSER_EOF) return(-1); /* * Doing validity checking on chunk doesn't make sense */ ctxt->instate = XML_PARSER_CONTENT; ctxt->validate = 0; ctxt->loadsubset = 0; ctxt->depth = 0; xmlParseContent(ctxt); if (ctxt->instate == XML_PARSER_EOF) return(-1); if ((RAW == '<') && (NXT(1) == '/')) { xmlFatalErr(ctxt, XML_ERR_NOT_WELL_BALANCED, NULL); } else if (RAW != 0) { xmlFatalErr(ctxt, XML_ERR_EXTRA_CONTENT, NULL); } /* * SAX: end of the document processing. */ if ((ctxt->sax) && (ctxt->sax->endDocument != NULL)) ctxt->sax->endDocument(ctxt->userData); if (! ctxt->wellFormed) return(-1); return(0); }
171,291
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags, key_perm_t perm) { struct keyring_search_context ctx = { .match_data.cmp = lookup_user_key_possessed, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .flags = KEYRING_SEARCH_NO_STATE_CHECK, }; struct request_key_auth *rka; struct key *key; key_ref_t key_ref, skey_ref; int ret; try_again: ctx.cred = get_current_cred(); key_ref = ERR_PTR(-ENOKEY); switch (id) { case KEY_SPEC_THREAD_KEYRING: if (!ctx.cred->thread_keyring) { if (!(lflags & KEY_LOOKUP_CREATE)) goto error; ret = install_thread_keyring(); if (ret < 0) { key_ref = ERR_PTR(ret); goto error; } goto reget_creds; } key = ctx.cred->thread_keyring; __key_get(key); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_PROCESS_KEYRING: if (!ctx.cred->process_keyring) { if (!(lflags & KEY_LOOKUP_CREATE)) goto error; ret = install_process_keyring(); if (ret < 0) { key_ref = ERR_PTR(ret); goto error; } goto reget_creds; } key = ctx.cred->process_keyring; __key_get(key); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_SESSION_KEYRING: if (!ctx.cred->session_keyring) { /* always install a session keyring upon access if one * doesn't exist yet */ ret = install_user_keyrings(); if (ret < 0) goto error; if (lflags & KEY_LOOKUP_CREATE) ret = join_session_keyring(NULL); else ret = install_session_keyring( ctx.cred->user->session_keyring); if (ret < 0) goto error; goto reget_creds; } else if (ctx.cred->session_keyring == ctx.cred->user->session_keyring && lflags & KEY_LOOKUP_CREATE) { ret = join_session_keyring(NULL); if (ret < 0) goto error; goto reget_creds; } rcu_read_lock(); key = rcu_dereference(ctx.cred->session_keyring); __key_get(key); rcu_read_unlock(); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_USER_KEYRING: if (!ctx.cred->user->uid_keyring) { ret = install_user_keyrings(); if (ret < 0) goto error; } key = ctx.cred->user->uid_keyring; __key_get(key); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_USER_SESSION_KEYRING: if (!ctx.cred->user->session_keyring) { ret = install_user_keyrings(); if (ret < 0) goto error; } key = ctx.cred->user->session_keyring; __key_get(key); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_GROUP_KEYRING: /* group keyrings are not yet supported */ key_ref = ERR_PTR(-EINVAL); goto error; case KEY_SPEC_REQKEY_AUTH_KEY: key = ctx.cred->request_key_auth; if (!key) goto error; __key_get(key); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_REQUESTOR_KEYRING: if (!ctx.cred->request_key_auth) goto error; down_read(&ctx.cred->request_key_auth->sem); if (test_bit(KEY_FLAG_REVOKED, &ctx.cred->request_key_auth->flags)) { key_ref = ERR_PTR(-EKEYREVOKED); key = NULL; } else { rka = ctx.cred->request_key_auth->payload.data[0]; key = rka->dest_keyring; __key_get(key); } up_read(&ctx.cred->request_key_auth->sem); if (!key) goto error; key_ref = make_key_ref(key, 1); break; default: key_ref = ERR_PTR(-EINVAL); if (id < 1) goto error; key = key_lookup(id); if (IS_ERR(key)) { key_ref = ERR_CAST(key); goto error; } key_ref = make_key_ref(key, 0); /* check to see if we possess the key */ ctx.index_key.type = key->type; ctx.index_key.description = key->description; ctx.index_key.desc_len = strlen(key->description); ctx.match_data.raw_data = key; kdebug("check possessed"); skey_ref = search_process_keyrings(&ctx); kdebug("possessed=%p", skey_ref); if (!IS_ERR(skey_ref)) { key_put(key); key_ref = skey_ref; } break; } /* unlink does not use the nominated key in any way, so can skip all * the permission checks as it is only concerned with the keyring */ if (lflags & KEY_LOOKUP_FOR_UNLINK) { ret = 0; goto error; } if (!(lflags & KEY_LOOKUP_PARTIAL)) { ret = wait_for_key_construction(key, true); switch (ret) { case -ERESTARTSYS: goto invalid_key; default: if (perm) goto invalid_key; case 0: break; } } else if (perm) { ret = key_validate(key); if (ret < 0) goto invalid_key; } ret = -EIO; if (!(lflags & KEY_LOOKUP_PARTIAL) && !test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) goto invalid_key; /* check the permissions */ ret = key_task_permission(key_ref, ctx.cred, perm); if (ret < 0) goto invalid_key; key->last_used_at = current_kernel_time().tv_sec; error: put_cred(ctx.cred); return key_ref; invalid_key: key_ref_put(key_ref); key_ref = ERR_PTR(ret); goto error; /* if we attempted to install a keyring, then it may have caused new * creds to be installed */ reget_creds: put_cred(ctx.cred); goto try_again; } Commit Message: KEYS: Fix race between updating and finding a negative key Consolidate KEY_FLAG_INSTANTIATED, KEY_FLAG_NEGATIVE and the rejection error into one field such that: (1) The instantiation state can be modified/read atomically. (2) The error can be accessed atomically with the state. (3) The error isn't stored unioned with the payload pointers. This deals with the problem that the state is spread over three different objects (two bits and a separate variable) and reading or updating them atomically isn't practical, given that not only can uninstantiated keys change into instantiated or rejected keys, but rejected keys can also turn into instantiated keys - and someone accessing the key might not be using any locking. The main side effect of this problem is that what was held in the payload may change, depending on the state. For instance, you might observe the key to be in the rejected state. You then read the cached error, but if the key semaphore wasn't locked, the key might've become instantiated between the two reads - and you might now have something in hand that isn't actually an error code. The state is now KEY_IS_UNINSTANTIATED, KEY_IS_POSITIVE or a negative error code if the key is negatively instantiated. The key_is_instantiated() function is replaced with key_is_positive() to avoid confusion as negative keys are also 'instantiated'. Additionally, barriering is included: (1) Order payload-set before state-set during instantiation. (2) Order state-read before payload-read when using the key. Further separate barriering is necessary if RCU is being used to access the payload content after reading the payload pointers. Fixes: 146aa8b1453b ("KEYS: Merge the type-specific data with the payload data") Cc: [email protected] # v4.4+ Reported-by: Eric Biggers <[email protected]> Signed-off-by: David Howells <[email protected]> Reviewed-by: Eric Biggers <[email protected]> CWE ID: CWE-20
key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags, key_perm_t perm) { struct keyring_search_context ctx = { .match_data.cmp = lookup_user_key_possessed, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .flags = KEYRING_SEARCH_NO_STATE_CHECK, }; struct request_key_auth *rka; struct key *key; key_ref_t key_ref, skey_ref; int ret; try_again: ctx.cred = get_current_cred(); key_ref = ERR_PTR(-ENOKEY); switch (id) { case KEY_SPEC_THREAD_KEYRING: if (!ctx.cred->thread_keyring) { if (!(lflags & KEY_LOOKUP_CREATE)) goto error; ret = install_thread_keyring(); if (ret < 0) { key_ref = ERR_PTR(ret); goto error; } goto reget_creds; } key = ctx.cred->thread_keyring; __key_get(key); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_PROCESS_KEYRING: if (!ctx.cred->process_keyring) { if (!(lflags & KEY_LOOKUP_CREATE)) goto error; ret = install_process_keyring(); if (ret < 0) { key_ref = ERR_PTR(ret); goto error; } goto reget_creds; } key = ctx.cred->process_keyring; __key_get(key); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_SESSION_KEYRING: if (!ctx.cred->session_keyring) { /* always install a session keyring upon access if one * doesn't exist yet */ ret = install_user_keyrings(); if (ret < 0) goto error; if (lflags & KEY_LOOKUP_CREATE) ret = join_session_keyring(NULL); else ret = install_session_keyring( ctx.cred->user->session_keyring); if (ret < 0) goto error; goto reget_creds; } else if (ctx.cred->session_keyring == ctx.cred->user->session_keyring && lflags & KEY_LOOKUP_CREATE) { ret = join_session_keyring(NULL); if (ret < 0) goto error; goto reget_creds; } rcu_read_lock(); key = rcu_dereference(ctx.cred->session_keyring); __key_get(key); rcu_read_unlock(); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_USER_KEYRING: if (!ctx.cred->user->uid_keyring) { ret = install_user_keyrings(); if (ret < 0) goto error; } key = ctx.cred->user->uid_keyring; __key_get(key); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_USER_SESSION_KEYRING: if (!ctx.cred->user->session_keyring) { ret = install_user_keyrings(); if (ret < 0) goto error; } key = ctx.cred->user->session_keyring; __key_get(key); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_GROUP_KEYRING: /* group keyrings are not yet supported */ key_ref = ERR_PTR(-EINVAL); goto error; case KEY_SPEC_REQKEY_AUTH_KEY: key = ctx.cred->request_key_auth; if (!key) goto error; __key_get(key); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_REQUESTOR_KEYRING: if (!ctx.cred->request_key_auth) goto error; down_read(&ctx.cred->request_key_auth->sem); if (test_bit(KEY_FLAG_REVOKED, &ctx.cred->request_key_auth->flags)) { key_ref = ERR_PTR(-EKEYREVOKED); key = NULL; } else { rka = ctx.cred->request_key_auth->payload.data[0]; key = rka->dest_keyring; __key_get(key); } up_read(&ctx.cred->request_key_auth->sem); if (!key) goto error; key_ref = make_key_ref(key, 1); break; default: key_ref = ERR_PTR(-EINVAL); if (id < 1) goto error; key = key_lookup(id); if (IS_ERR(key)) { key_ref = ERR_CAST(key); goto error; } key_ref = make_key_ref(key, 0); /* check to see if we possess the key */ ctx.index_key.type = key->type; ctx.index_key.description = key->description; ctx.index_key.desc_len = strlen(key->description); ctx.match_data.raw_data = key; kdebug("check possessed"); skey_ref = search_process_keyrings(&ctx); kdebug("possessed=%p", skey_ref); if (!IS_ERR(skey_ref)) { key_put(key); key_ref = skey_ref; } break; } /* unlink does not use the nominated key in any way, so can skip all * the permission checks as it is only concerned with the keyring */ if (lflags & KEY_LOOKUP_FOR_UNLINK) { ret = 0; goto error; } if (!(lflags & KEY_LOOKUP_PARTIAL)) { ret = wait_for_key_construction(key, true); switch (ret) { case -ERESTARTSYS: goto invalid_key; default: if (perm) goto invalid_key; case 0: break; } } else if (perm) { ret = key_validate(key); if (ret < 0) goto invalid_key; } ret = -EIO; if (!(lflags & KEY_LOOKUP_PARTIAL) && key_read_state(key) == KEY_IS_UNINSTANTIATED) goto invalid_key; /* check the permissions */ ret = key_task_permission(key_ref, ctx.cred, perm); if (ret < 0) goto invalid_key; key->last_used_at = current_kernel_time().tv_sec; error: put_cred(ctx.cred); return key_ref; invalid_key: key_ref_put(key_ref); key_ref = ERR_PTR(ret); goto error; /* if we attempted to install a keyring, then it may have caused new * creds to be installed */ reget_creds: put_cred(ctx.cred); goto try_again; }
167,705
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static PHP_RINIT_FUNCTION(libxml) { if (_php_libxml_per_request_initialization) { /* report errors via handler rather than stderr */ xmlSetGenericErrorFunc(NULL, php_libxml_error_handler); xmlParserInputBufferCreateFilenameDefault(php_libxml_input_buffer_create_filename); xmlOutputBufferCreateFilenameDefault(php_libxml_output_buffer_create_filename); } return SUCCESS; } Commit Message: CWE ID:
static PHP_RINIT_FUNCTION(libxml) { if (_php_libxml_per_request_initialization) { /* report errors via handler rather than stderr */ xmlSetGenericErrorFunc(NULL, php_libxml_error_handler); xmlParserInputBufferCreateFilenameDefault(php_libxml_input_buffer_create_filename); xmlOutputBufferCreateFilenameDefault(php_libxml_output_buffer_create_filename); /* Enable the entity loader by default. This ensure that * other threads/requests that might have disable the loader * do not affect the current request. */ LIBXML(entity_loader_disabled) = 0; } return SUCCESS; }
165,273
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static inline unsigned int get_next_char( enum entity_charset charset, const unsigned char *str, size_t str_len, size_t *cursor, int *status) { size_t pos = *cursor; unsigned int this_char = 0; *status = SUCCESS; assert(pos <= str_len); if (!CHECK_LEN(pos, 1)) MB_FAILURE(pos, 1); switch (charset) { case cs_utf_8: { /* We'll follow strategy 2. from section 3.6.1 of UTR #36: * "In a reported illegal byte sequence, do not include any * non-initial byte that encodes a valid character or is a leading * byte for a valid sequence." */ unsigned char c; c = str[pos]; if (c < 0x80) { this_char = c; pos++; } else if (c < 0xc2) { MB_FAILURE(pos, 1); } else if (c < 0xe0) { if (!CHECK_LEN(pos, 2)) MB_FAILURE(pos, 1); if (!utf8_trail(str[pos + 1])) { MB_FAILURE(pos, utf8_lead(str[pos + 1]) ? 1 : 2); } this_char = ((c & 0x1f) << 6) | (str[pos + 1] & 0x3f); if (this_char < 0x80) { /* non-shortest form */ MB_FAILURE(pos, 2); } pos += 2; } else if (c < 0xf0) { size_t avail = str_len - pos; if (avail < 3 || !utf8_trail(str[pos + 1]) || !utf8_trail(str[pos + 2])) { if (avail < 2 || utf8_lead(str[pos + 1])) MB_FAILURE(pos, 1); else if (avail < 3 || utf8_lead(str[pos + 2])) MB_FAILURE(pos, 2); else MB_FAILURE(pos, 3); } this_char = ((c & 0x0f) << 12) | ((str[pos + 1] & 0x3f) << 6) | (str[pos + 2] & 0x3f); if (this_char < 0x800) { /* non-shortest form */ MB_FAILURE(pos, 3); } else if (this_char >= 0xd800 && this_char <= 0xdfff) { /* surrogate */ MB_FAILURE(pos, 3); } pos += 3; } else if (c < 0xf5) { size_t avail = str_len - pos; if (avail < 4 || !utf8_trail(str[pos + 1]) || !utf8_trail(str[pos + 2]) || !utf8_trail(str[pos + 3])) { if (avail < 2 || utf8_lead(str[pos + 1])) MB_FAILURE(pos, 1); else if (avail < 3 || utf8_lead(str[pos + 2])) MB_FAILURE(pos, 2); else if (avail < 4 || utf8_lead(str[pos + 3])) MB_FAILURE(pos, 3); else MB_FAILURE(pos, 4); } this_char = ((c & 0x07) << 18) | ((str[pos + 1] & 0x3f) << 12) | ((str[pos + 2] & 0x3f) << 6) | (str[pos + 3] & 0x3f); if (this_char < 0x10000 || this_char > 0x10FFFF) { /* non-shortest form or outside range */ MB_FAILURE(pos, 4); } pos += 4; } else { MB_FAILURE(pos, 1); } } break; case cs_big5: /* reference http://demo.icu-project.org/icu-bin/convexp?conv=big5 */ { unsigned char c = str[pos]; if (c >= 0x81 && c <= 0xFE) { unsigned char next; if (!CHECK_LEN(pos, 2)) MB_FAILURE(pos, 1); next = str[pos + 1]; if ((next >= 0x40 && next <= 0x7E) || (next >= 0xA1 && next <= 0xFE)) { this_char = (c << 8) | next; } else { MB_FAILURE(pos, 1); } pos += 2; } else { this_char = c; pos += 1; } } break; case cs_big5hkscs: { unsigned char c = str[pos]; if (c >= 0x81 && c <= 0xFE) { unsigned char next; if (!CHECK_LEN(pos, 2)) MB_FAILURE(pos, 1); next = str[pos + 1]; if ((next >= 0x40 && next <= 0x7E) || (next >= 0xA1 && next <= 0xFE)) { this_char = (c << 8) | next; } else if (next != 0x80 && next != 0xFF) { MB_FAILURE(pos, 1); } else { MB_FAILURE(pos, 2); } pos += 2; } else { this_char = c; pos += 1; } } break; case cs_gb2312: /* EUC-CN */ { unsigned char c = str[pos]; if (c >= 0xA1 && c <= 0xFE) { unsigned char next; if (!CHECK_LEN(pos, 2)) MB_FAILURE(pos, 1); next = str[pos + 1]; if (gb2312_trail(next)) { this_char = (c << 8) | next; } else if (gb2312_lead(next)) { MB_FAILURE(pos, 1); } else { MB_FAILURE(pos, 2); } pos += 2; } else if (gb2312_lead(c)) { this_char = c; pos += 1; } else { MB_FAILURE(pos, 1); } } break; case cs_sjis: { unsigned char c = str[pos]; if ((c >= 0x81 && c <= 0x9F) || (c >= 0xE0 && c <= 0xFC)) { unsigned char next; if (!CHECK_LEN(pos, 2)) MB_FAILURE(pos, 1); next = str[pos + 1]; if (sjis_trail(next)) { this_char = (c << 8) | next; } else if (sjis_lead(next)) { MB_FAILURE(pos, 1); } else { MB_FAILURE(pos, 2); } pos += 2; } else if (c < 0x80 || (c >= 0xA1 && c <= 0xDF)) { this_char = c; pos += 1; } else { MB_FAILURE(pos, 1); } } break; case cs_eucjp: { unsigned char c = str[pos]; if (c >= 0xA1 && c <= 0xFE) { unsigned next; if (!CHECK_LEN(pos, 2)) MB_FAILURE(pos, 1); next = str[pos + 1]; if (next >= 0xA1 && next <= 0xFE) { /* this a jis kanji char */ this_char = (c << 8) | next; } else { MB_FAILURE(pos, (next != 0xA0 && next != 0xFF) ? 1 : 2); } pos += 2; } else if (c == 0x8E) { unsigned next; if (!CHECK_LEN(pos, 2)) MB_FAILURE(pos, 1); next = str[pos + 1]; if (next >= 0xA1 && next <= 0xDF) { /* JIS X 0201 kana */ this_char = (c << 8) | next; } else { MB_FAILURE(pos, (next != 0xA0 && next != 0xFF) ? 1 : 2); } pos += 2; } else if (c == 0x8F) { size_t avail = str_len - pos; if (avail < 3 || !(str[pos + 1] >= 0xA1 && str[pos + 1] <= 0xFE) || !(str[pos + 2] >= 0xA1 && str[pos + 2] <= 0xFE)) { if (avail < 2 || (str[pos + 1] != 0xA0 && str[pos + 1] != 0xFF)) MB_FAILURE(pos, 1); else if (avail < 3 || (str[pos + 2] != 0xA0 && str[pos + 2] != 0xFF)) MB_FAILURE(pos, 2); else MB_FAILURE(pos, 3); } else { /* JIS X 0212 hojo-kanji */ this_char = (c << 16) | (str[pos + 1] << 8) | str[pos + 2]; } pos += 3; } else if (c != 0xA0 && c != 0xFF) { /* character encoded in 1 code unit */ this_char = c; pos += 1; } else { MB_FAILURE(pos, 1); } } break; default: /* single-byte charsets */ this_char = str[pos++]; break; } *cursor = pos; return this_char; } Commit Message: Fix bug #72135 - don't create strings with lengths outside int range CWE ID: CWE-190
static inline unsigned int get_next_char( enum entity_charset charset, const unsigned char *str, size_t str_len, size_t *cursor, int *status) { size_t pos = *cursor; unsigned int this_char = 0; *status = SUCCESS; assert(pos <= str_len); if (!CHECK_LEN(pos, 1)) MB_FAILURE(pos, 1); switch (charset) { case cs_utf_8: { /* We'll follow strategy 2. from section 3.6.1 of UTR #36: * "In a reported illegal byte sequence, do not include any * non-initial byte that encodes a valid character or is a leading * byte for a valid sequence." */ unsigned char c; c = str[pos]; if (c < 0x80) { this_char = c; pos++; } else if (c < 0xc2) { MB_FAILURE(pos, 1); } else if (c < 0xe0) { if (!CHECK_LEN(pos, 2)) MB_FAILURE(pos, 1); if (!utf8_trail(str[pos + 1])) { MB_FAILURE(pos, utf8_lead(str[pos + 1]) ? 1 : 2); } this_char = ((c & 0x1f) << 6) | (str[pos + 1] & 0x3f); if (this_char < 0x80) { /* non-shortest form */ MB_FAILURE(pos, 2); } pos += 2; } else if (c < 0xf0) { size_t avail = str_len - pos; if (avail < 3 || !utf8_trail(str[pos + 1]) || !utf8_trail(str[pos + 2])) { if (avail < 2 || utf8_lead(str[pos + 1])) MB_FAILURE(pos, 1); else if (avail < 3 || utf8_lead(str[pos + 2])) MB_FAILURE(pos, 2); else MB_FAILURE(pos, 3); } this_char = ((c & 0x0f) << 12) | ((str[pos + 1] & 0x3f) << 6) | (str[pos + 2] & 0x3f); if (this_char < 0x800) { /* non-shortest form */ MB_FAILURE(pos, 3); } else if (this_char >= 0xd800 && this_char <= 0xdfff) { /* surrogate */ MB_FAILURE(pos, 3); } pos += 3; } else if (c < 0xf5) { size_t avail = str_len - pos; if (avail < 4 || !utf8_trail(str[pos + 1]) || !utf8_trail(str[pos + 2]) || !utf8_trail(str[pos + 3])) { if (avail < 2 || utf8_lead(str[pos + 1])) MB_FAILURE(pos, 1); else if (avail < 3 || utf8_lead(str[pos + 2])) MB_FAILURE(pos, 2); else if (avail < 4 || utf8_lead(str[pos + 3])) MB_FAILURE(pos, 3); else MB_FAILURE(pos, 4); } this_char = ((c & 0x07) << 18) | ((str[pos + 1] & 0x3f) << 12) | ((str[pos + 2] & 0x3f) << 6) | (str[pos + 3] & 0x3f); if (this_char < 0x10000 || this_char > 0x10FFFF) { /* non-shortest form or outside range */ MB_FAILURE(pos, 4); } pos += 4; } else { MB_FAILURE(pos, 1); } } break; case cs_big5: /* reference http://demo.icu-project.org/icu-bin/convexp?conv=big5 */ { unsigned char c = str[pos]; if (c >= 0x81 && c <= 0xFE) { unsigned char next; if (!CHECK_LEN(pos, 2)) MB_FAILURE(pos, 1); next = str[pos + 1]; if ((next >= 0x40 && next <= 0x7E) || (next >= 0xA1 && next <= 0xFE)) { this_char = (c << 8) | next; } else { MB_FAILURE(pos, 1); } pos += 2; } else { this_char = c; pos += 1; } } break; case cs_big5hkscs: { unsigned char c = str[pos]; if (c >= 0x81 && c <= 0xFE) { unsigned char next; if (!CHECK_LEN(pos, 2)) MB_FAILURE(pos, 1); next = str[pos + 1]; if ((next >= 0x40 && next <= 0x7E) || (next >= 0xA1 && next <= 0xFE)) { this_char = (c << 8) | next; } else if (next != 0x80 && next != 0xFF) { MB_FAILURE(pos, 1); } else { MB_FAILURE(pos, 2); } pos += 2; } else { this_char = c; pos += 1; } } break; case cs_gb2312: /* EUC-CN */ { unsigned char c = str[pos]; if (c >= 0xA1 && c <= 0xFE) { unsigned char next; if (!CHECK_LEN(pos, 2)) MB_FAILURE(pos, 1); next = str[pos + 1]; if (gb2312_trail(next)) { this_char = (c << 8) | next; } else if (gb2312_lead(next)) { MB_FAILURE(pos, 1); } else { MB_FAILURE(pos, 2); } pos += 2; } else if (gb2312_lead(c)) { this_char = c; pos += 1; } else { MB_FAILURE(pos, 1); } } break; case cs_sjis: { unsigned char c = str[pos]; if ((c >= 0x81 && c <= 0x9F) || (c >= 0xE0 && c <= 0xFC)) { unsigned char next; if (!CHECK_LEN(pos, 2)) MB_FAILURE(pos, 1); next = str[pos + 1]; if (sjis_trail(next)) { this_char = (c << 8) | next; } else if (sjis_lead(next)) { MB_FAILURE(pos, 1); } else { MB_FAILURE(pos, 2); } pos += 2; } else if (c < 0x80 || (c >= 0xA1 && c <= 0xDF)) { this_char = c; pos += 1; } else { MB_FAILURE(pos, 1); } } break; case cs_eucjp: { unsigned char c = str[pos]; if (c >= 0xA1 && c <= 0xFE) { unsigned next; if (!CHECK_LEN(pos, 2)) MB_FAILURE(pos, 1); next = str[pos + 1]; if (next >= 0xA1 && next <= 0xFE) { /* this a jis kanji char */ this_char = (c << 8) | next; } else { MB_FAILURE(pos, (next != 0xA0 && next != 0xFF) ? 1 : 2); } pos += 2; } else if (c == 0x8E) { unsigned next; if (!CHECK_LEN(pos, 2)) MB_FAILURE(pos, 1); next = str[pos + 1]; if (next >= 0xA1 && next <= 0xDF) { /* JIS X 0201 kana */ this_char = (c << 8) | next; } else { MB_FAILURE(pos, (next != 0xA0 && next != 0xFF) ? 1 : 2); } pos += 2; } else if (c == 0x8F) { size_t avail = str_len - pos; if (avail < 3 || !(str[pos + 1] >= 0xA1 && str[pos + 1] <= 0xFE) || !(str[pos + 2] >= 0xA1 && str[pos + 2] <= 0xFE)) { if (avail < 2 || (str[pos + 1] != 0xA0 && str[pos + 1] != 0xFF)) MB_FAILURE(pos, 1); else if (avail < 3 || (str[pos + 2] != 0xA0 && str[pos + 2] != 0xFF)) MB_FAILURE(pos, 2); else MB_FAILURE(pos, 3); } else { /* JIS X 0212 hojo-kanji */ this_char = (c << 16) | (str[pos + 1] << 8) | str[pos + 2]; } pos += 3; } else if (c != 0xA0 && c != 0xFF) { /* character encoded in 1 code unit */ this_char = c; pos += 1; } else { MB_FAILURE(pos, 1); } } break; default: /* single-byte charsets */ this_char = str[pos++]; break; } *cursor = pos; return this_char; }
167,172
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int ovl_fill_super(struct super_block *sb, void *data, int silent) { struct path lowerpath; struct path upperpath; struct path workpath; struct inode *root_inode; struct dentry *root_dentry; struct ovl_entry *oe; struct ovl_fs *ufs; struct kstatfs statfs; int err; err = -ENOMEM; ufs = kzalloc(sizeof(struct ovl_fs), GFP_KERNEL); if (!ufs) goto out; err = ovl_parse_opt((char *) data, &ufs->config); if (err) goto out_free_config; /* FIXME: workdir is not needed for a R/O mount */ err = -EINVAL; if (!ufs->config.upperdir || !ufs->config.lowerdir || !ufs->config.workdir) { pr_err("overlayfs: missing upperdir or lowerdir or workdir\n"); goto out_free_config; } err = -ENOMEM; oe = ovl_alloc_entry(); if (oe == NULL) goto out_free_config; err = ovl_mount_dir(ufs->config.upperdir, &upperpath); if (err) goto out_free_oe; err = ovl_mount_dir(ufs->config.lowerdir, &lowerpath); if (err) goto out_put_upperpath; err = ovl_mount_dir(ufs->config.workdir, &workpath); if (err) goto out_put_lowerpath; err = -EINVAL; if (!S_ISDIR(upperpath.dentry->d_inode->i_mode) || !S_ISDIR(lowerpath.dentry->d_inode->i_mode) || !S_ISDIR(workpath.dentry->d_inode->i_mode)) { pr_err("overlayfs: upperdir or lowerdir or workdir not a directory\n"); goto out_put_workpath; } if (upperpath.mnt != workpath.mnt) { pr_err("overlayfs: workdir and upperdir must reside under the same mount\n"); goto out_put_workpath; } if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) { pr_err("overlayfs: workdir and upperdir must be separate subtrees\n"); goto out_put_workpath; } if (!ovl_is_allowed_fs_type(upperpath.dentry)) { pr_err("overlayfs: filesystem of upperdir is not supported\n"); goto out_put_workpath; } if (!ovl_is_allowed_fs_type(lowerpath.dentry)) { pr_err("overlayfs: filesystem of lowerdir is not supported\n"); goto out_put_workpath; } err = vfs_statfs(&lowerpath, &statfs); if (err) { pr_err("overlayfs: statfs failed on lowerpath\n"); goto out_put_workpath; } ufs->lower_namelen = statfs.f_namelen; ufs->upper_mnt = clone_private_mount(&upperpath); err = PTR_ERR(ufs->upper_mnt); if (IS_ERR(ufs->upper_mnt)) { pr_err("overlayfs: failed to clone upperpath\n"); goto out_put_workpath; } ufs->lower_mnt = clone_private_mount(&lowerpath); err = PTR_ERR(ufs->lower_mnt); if (IS_ERR(ufs->lower_mnt)) { pr_err("overlayfs: failed to clone lowerpath\n"); goto out_put_upper_mnt; } ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); err = PTR_ERR(ufs->workdir); if (IS_ERR(ufs->workdir)) { pr_err("overlayfs: failed to create directory %s/%s\n", ufs->config.workdir, OVL_WORKDIR_NAME); goto out_put_lower_mnt; } /* * Make lower_mnt R/O. That way fchmod/fchown on lower file * will fail instead of modifying lower fs. */ ufs->lower_mnt->mnt_flags |= MNT_READONLY; /* If the upper fs is r/o, we mark overlayfs r/o too */ if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY) sb->s_flags |= MS_RDONLY; sb->s_d_op = &ovl_dentry_operations; err = -ENOMEM; root_inode = ovl_new_inode(sb, S_IFDIR, oe); if (!root_inode) goto out_put_workdir; root_dentry = d_make_root(root_inode); if (!root_dentry) goto out_put_workdir; mntput(upperpath.mnt); mntput(lowerpath.mnt); path_put(&workpath); oe->__upperdentry = upperpath.dentry; oe->lowerdentry = lowerpath.dentry; root_dentry->d_fsdata = oe; sb->s_magic = OVERLAYFS_SUPER_MAGIC; sb->s_op = &ovl_super_operations; sb->s_root = root_dentry; sb->s_fs_info = ufs; return 0; out_put_workdir: dput(ufs->workdir); out_put_lower_mnt: mntput(ufs->lower_mnt); out_put_upper_mnt: mntput(ufs->upper_mnt); out_put_workpath: path_put(&workpath); out_put_lowerpath: path_put(&lowerpath); out_put_upperpath: path_put(&upperpath); out_free_oe: kfree(oe); out_free_config: kfree(ufs->config.lowerdir); kfree(ufs->config.upperdir); kfree(ufs->config.workdir); kfree(ufs); out: return err; } Commit Message: fs: limit filesystem stacking depth Add a simple read-only counter to super_block that indicates how deep this is in the stack of filesystems. Previously ecryptfs was the only stackable filesystem and it explicitly disallowed multiple layers of itself. Overlayfs, however, can be stacked recursively and also may be stacked on top of ecryptfs or vice versa. To limit the kernel stack usage we must limit the depth of the filesystem stack. Initially the limit is set to 2. Signed-off-by: Miklos Szeredi <[email protected]> CWE ID: CWE-264
static int ovl_fill_super(struct super_block *sb, void *data, int silent) { struct path lowerpath; struct path upperpath; struct path workpath; struct inode *root_inode; struct dentry *root_dentry; struct ovl_entry *oe; struct ovl_fs *ufs; struct kstatfs statfs; int err; err = -ENOMEM; ufs = kzalloc(sizeof(struct ovl_fs), GFP_KERNEL); if (!ufs) goto out; err = ovl_parse_opt((char *) data, &ufs->config); if (err) goto out_free_config; /* FIXME: workdir is not needed for a R/O mount */ err = -EINVAL; if (!ufs->config.upperdir || !ufs->config.lowerdir || !ufs->config.workdir) { pr_err("overlayfs: missing upperdir or lowerdir or workdir\n"); goto out_free_config; } err = -ENOMEM; oe = ovl_alloc_entry(); if (oe == NULL) goto out_free_config; err = ovl_mount_dir(ufs->config.upperdir, &upperpath); if (err) goto out_free_oe; err = ovl_mount_dir(ufs->config.lowerdir, &lowerpath); if (err) goto out_put_upperpath; err = ovl_mount_dir(ufs->config.workdir, &workpath); if (err) goto out_put_lowerpath; err = -EINVAL; if (!S_ISDIR(upperpath.dentry->d_inode->i_mode) || !S_ISDIR(lowerpath.dentry->d_inode->i_mode) || !S_ISDIR(workpath.dentry->d_inode->i_mode)) { pr_err("overlayfs: upperdir or lowerdir or workdir not a directory\n"); goto out_put_workpath; } if (upperpath.mnt != workpath.mnt) { pr_err("overlayfs: workdir and upperdir must reside under the same mount\n"); goto out_put_workpath; } if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) { pr_err("overlayfs: workdir and upperdir must be separate subtrees\n"); goto out_put_workpath; } if (!ovl_is_allowed_fs_type(upperpath.dentry)) { pr_err("overlayfs: filesystem of upperdir is not supported\n"); goto out_put_workpath; } if (!ovl_is_allowed_fs_type(lowerpath.dentry)) { pr_err("overlayfs: filesystem of lowerdir is not supported\n"); goto out_put_workpath; } err = vfs_statfs(&lowerpath, &statfs); if (err) { pr_err("overlayfs: statfs failed on lowerpath\n"); goto out_put_workpath; } ufs->lower_namelen = statfs.f_namelen; sb->s_stack_depth = max(upperpath.mnt->mnt_sb->s_stack_depth, lowerpath.mnt->mnt_sb->s_stack_depth) + 1; err = -EINVAL; if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { pr_err("overlayfs: maximum fs stacking depth exceeded\n"); goto out_put_workpath; } ufs->upper_mnt = clone_private_mount(&upperpath); err = PTR_ERR(ufs->upper_mnt); if (IS_ERR(ufs->upper_mnt)) { pr_err("overlayfs: failed to clone upperpath\n"); goto out_put_workpath; } ufs->lower_mnt = clone_private_mount(&lowerpath); err = PTR_ERR(ufs->lower_mnt); if (IS_ERR(ufs->lower_mnt)) { pr_err("overlayfs: failed to clone lowerpath\n"); goto out_put_upper_mnt; } ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); err = PTR_ERR(ufs->workdir); if (IS_ERR(ufs->workdir)) { pr_err("overlayfs: failed to create directory %s/%s\n", ufs->config.workdir, OVL_WORKDIR_NAME); goto out_put_lower_mnt; } /* * Make lower_mnt R/O. That way fchmod/fchown on lower file * will fail instead of modifying lower fs. */ ufs->lower_mnt->mnt_flags |= MNT_READONLY; /* If the upper fs is r/o, we mark overlayfs r/o too */ if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY) sb->s_flags |= MS_RDONLY; sb->s_d_op = &ovl_dentry_operations; err = -ENOMEM; root_inode = ovl_new_inode(sb, S_IFDIR, oe); if (!root_inode) goto out_put_workdir; root_dentry = d_make_root(root_inode); if (!root_dentry) goto out_put_workdir; mntput(upperpath.mnt); mntput(lowerpath.mnt); path_put(&workpath); oe->__upperdentry = upperpath.dentry; oe->lowerdentry = lowerpath.dentry; root_dentry->d_fsdata = oe; sb->s_magic = OVERLAYFS_SUPER_MAGIC; sb->s_op = &ovl_super_operations; sb->s_root = root_dentry; sb->s_fs_info = ufs; return 0; out_put_workdir: dput(ufs->workdir); out_put_lower_mnt: mntput(ufs->lower_mnt); out_put_upper_mnt: mntput(ufs->upper_mnt); out_put_workpath: path_put(&workpath); out_put_lowerpath: path_put(&lowerpath); out_put_upperpath: path_put(&upperpath); out_free_oe: kfree(oe); out_free_config: kfree(ufs->config.lowerdir); kfree(ufs->config.upperdir); kfree(ufs->config.workdir); kfree(ufs); out: return err; }
168,896
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ApplyBlockElementCommand::formatSelection(const VisiblePosition& startOfSelection, const VisiblePosition& endOfSelection) { Position start = startOfSelection.deepEquivalent().downstream(); if (isAtUnsplittableElement(start)) { RefPtr<Element> blockquote = createBlockElement(); insertNodeAt(blockquote, start); RefPtr<Element> placeholder = createBreakElement(document()); appendNode(placeholder, blockquote); setEndingSelection(VisibleSelection(positionBeforeNode(placeholder.get()), DOWNSTREAM, endingSelection().isDirectional())); return; } RefPtr<Element> blockquoteForNextIndent; VisiblePosition endOfCurrentParagraph = endOfParagraph(startOfSelection); VisiblePosition endAfterSelection = endOfParagraph(endOfParagraph(endOfSelection).next()); m_endOfLastParagraph = endOfParagraph(endOfSelection).deepEquivalent(); bool atEnd = false; Position end; while (endOfCurrentParagraph != endAfterSelection && !atEnd) { if (endOfCurrentParagraph.deepEquivalent() == m_endOfLastParagraph) atEnd = true; rangeForParagraphSplittingTextNodesIfNeeded(endOfCurrentParagraph, start, end); endOfCurrentParagraph = end; Position afterEnd = end.next(); Node* enclosingCell = enclosingNodeOfType(start, &isTableCell); VisiblePosition endOfNextParagraph = endOfNextParagrahSplittingTextNodesIfNeeded(endOfCurrentParagraph, start, end); formatRange(start, end, m_endOfLastParagraph, blockquoteForNextIndent); if (enclosingCell && enclosingCell != enclosingNodeOfType(endOfNextParagraph.deepEquivalent(), &isTableCell)) blockquoteForNextIndent = 0; if (endAfterSelection.isNotNull() && !endAfterSelection.deepEquivalent().inDocument()) break; if (endOfNextParagraph.isNotNull() && !endOfNextParagraph.deepEquivalent().inDocument()) { ASSERT_NOT_REACHED(); return; } endOfCurrentParagraph = endOfNextParagraph; } } Commit Message: Remove false assertion in ApplyBlockElementCommand::formatSelection() Note: This patch is preparation of fixing issue 294456. This patch removes false assertion in ApplyBlockElementCommand::formatSelection(), when contents of being indent is modified, e.g. mutation event, |endOfNextParagraph| can hold removed contents. BUG=294456 TEST=n/a [email protected] Review URL: https://codereview.chromium.org/25657004 git-svn-id: svn://svn.chromium.org/blink/trunk@158701 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-399
void ApplyBlockElementCommand::formatSelection(const VisiblePosition& startOfSelection, const VisiblePosition& endOfSelection) { Position start = startOfSelection.deepEquivalent().downstream(); if (isAtUnsplittableElement(start)) { RefPtr<Element> blockquote = createBlockElement(); insertNodeAt(blockquote, start); RefPtr<Element> placeholder = createBreakElement(document()); appendNode(placeholder, blockquote); setEndingSelection(VisibleSelection(positionBeforeNode(placeholder.get()), DOWNSTREAM, endingSelection().isDirectional())); return; } RefPtr<Element> blockquoteForNextIndent; VisiblePosition endOfCurrentParagraph = endOfParagraph(startOfSelection); VisiblePosition endAfterSelection = endOfParagraph(endOfParagraph(endOfSelection).next()); m_endOfLastParagraph = endOfParagraph(endOfSelection).deepEquivalent(); bool atEnd = false; Position end; while (endOfCurrentParagraph != endAfterSelection && !atEnd) { if (endOfCurrentParagraph.deepEquivalent() == m_endOfLastParagraph) atEnd = true; rangeForParagraphSplittingTextNodesIfNeeded(endOfCurrentParagraph, start, end); endOfCurrentParagraph = end; Position afterEnd = end.next(); Node* enclosingCell = enclosingNodeOfType(start, &isTableCell); VisiblePosition endOfNextParagraph = endOfNextParagrahSplittingTextNodesIfNeeded(endOfCurrentParagraph, start, end); formatRange(start, end, m_endOfLastParagraph, blockquoteForNextIndent); if (enclosingCell && enclosingCell != enclosingNodeOfType(endOfNextParagraph.deepEquivalent(), &isTableCell)) blockquoteForNextIndent = 0; if (endAfterSelection.isNotNull() && !endAfterSelection.deepEquivalent().inDocument()) break; // If somehow, e.g. mutation event handler, we did, return to prevent crashes. if (endOfNextParagraph.isNotNull() && !endOfNextParagraph.deepEquivalent().inDocument()) return; endOfCurrentParagraph = endOfNextParagraph; } }
171,170
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: long Cluster::GetEntry(long index, const mkvparser::BlockEntry*& pEntry) const { assert(m_pos >= m_element_start); pEntry = NULL; if (index < 0) return -1; //generic error if (m_entries_count < 0) return E_BUFFER_NOT_FULL; assert(m_entries); assert(m_entries_size > 0); assert(m_entries_count <= m_entries_size); if (index < m_entries_count) { pEntry = m_entries[index]; assert(pEntry); return 1; //found entry } if (m_element_size < 0) //we don't know cluster end yet return E_BUFFER_NOT_FULL; //underflow const long long element_stop = m_element_start + m_element_size; if (m_pos >= element_stop) return 0; //nothing left to parse return E_BUFFER_NOT_FULL; //underflow, since more remains to be parsed } Commit Message: libwebm: Pull from upstream Rolling mkvparser from upstream. Primarily for fixing a bug on parsing failures with certain Opus WebM files. Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae The diff is so huge because there were some style clean ups upstream. But it was ensured that there were no breaking changes when the style clean ups was done upstream. Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039 CWE ID: CWE-119
long Cluster::GetEntry(long index, const mkvparser::BlockEntry*& pEntry) const if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } unsigned char flags; status = pReader->Read(pos, 1, &flags);
174,314
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: tcpmss_mangle_packet(struct sk_buff *skb, const struct xt_action_param *par, unsigned int family, unsigned int tcphoff, unsigned int minlen) { const struct xt_tcpmss_info *info = par->targinfo; struct tcphdr *tcph; int len, tcp_hdrlen; unsigned int i; __be16 oldval; u16 newmss; u8 *opt; /* This is a fragment, no TCP header is available */ if (par->fragoff != 0) return 0; if (!skb_make_writable(skb, skb->len)) return -1; len = skb->len - tcphoff; if (len < (int)sizeof(struct tcphdr)) return -1; tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); tcp_hdrlen = tcph->doff * 4; if (len < tcp_hdrlen) return -1; if (info->mss == XT_TCPMSS_CLAMP_PMTU) { struct net *net = xt_net(par); unsigned int in_mtu = tcpmss_reverse_mtu(net, skb, family); unsigned int min_mtu = min(dst_mtu(skb_dst(skb)), in_mtu); if (min_mtu <= minlen) { net_err_ratelimited("unknown or invalid path-MTU (%u)\n", min_mtu); return -1; } newmss = min_mtu - minlen; } else newmss = info->mss; opt = (u_int8_t *)tcph; for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) { if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) { u_int16_t oldmss; oldmss = (opt[i+2] << 8) | opt[i+3]; /* Never increase MSS, even when setting it, as * doing so results in problems for hosts that rely * on MSS being set correctly. */ if (oldmss <= newmss) return 0; opt[i+2] = (newmss & 0xff00) >> 8; opt[i+3] = newmss & 0x00ff; inet_proto_csum_replace2(&tcph->check, skb, htons(oldmss), htons(newmss), false); return 0; } } /* There is data after the header so the option can't be added * without moving it, and doing so may make the SYN packet * itself too large. Accept the packet unmodified instead. */ if (len > tcp_hdrlen) return 0; /* * MSS Option not found ?! add it.. */ if (skb_tailroom(skb) < TCPOLEN_MSS) { if (pskb_expand_head(skb, 0, TCPOLEN_MSS - skb_tailroom(skb), GFP_ATOMIC)) return -1; tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); } skb_put(skb, TCPOLEN_MSS); /* * IPv4: RFC 1122 states "If an MSS option is not received at * connection setup, TCP MUST assume a default send MSS of 536". * IPv6: RFC 2460 states IPv6 has a minimum MTU of 1280 and a minimum * length IPv6 header of 60, ergo the default MSS value is 1220 * Since no MSS was provided, we must use the default values */ if (xt_family(par) == NFPROTO_IPV4) newmss = min(newmss, (u16)536); else newmss = min(newmss, (u16)1220); opt = (u_int8_t *)tcph + sizeof(struct tcphdr); memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr)); inet_proto_csum_replace2(&tcph->check, skb, htons(len), htons(len + TCPOLEN_MSS), true); opt[0] = TCPOPT_MSS; opt[1] = TCPOLEN_MSS; opt[2] = (newmss & 0xff00) >> 8; opt[3] = newmss & 0x00ff; inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), false); oldval = ((__be16 *)tcph)[6]; tcph->doff += TCPOLEN_MSS/4; inet_proto_csum_replace2(&tcph->check, skb, oldval, ((__be16 *)tcph)[6], false); return TCPOLEN_MSS; } Commit Message: netfilter: xt_TCPMSS: add more sanity tests on tcph->doff Denys provided an awesome KASAN report pointing to an use after free in xt_TCPMSS I have provided three patches to fix this issue, either in xt_TCPMSS or in xt_tcpudp.c. It seems xt_TCPMSS patch has the smallest possible impact. Signed-off-by: Eric Dumazet <[email protected]> Reported-by: Denys Fedoryshchenko <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]> CWE ID: CWE-416
tcpmss_mangle_packet(struct sk_buff *skb, const struct xt_action_param *par, unsigned int family, unsigned int tcphoff, unsigned int minlen) { const struct xt_tcpmss_info *info = par->targinfo; struct tcphdr *tcph; int len, tcp_hdrlen; unsigned int i; __be16 oldval; u16 newmss; u8 *opt; /* This is a fragment, no TCP header is available */ if (par->fragoff != 0) return 0; if (!skb_make_writable(skb, skb->len)) return -1; len = skb->len - tcphoff; if (len < (int)sizeof(struct tcphdr)) return -1; tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); tcp_hdrlen = tcph->doff * 4; if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr)) return -1; if (info->mss == XT_TCPMSS_CLAMP_PMTU) { struct net *net = xt_net(par); unsigned int in_mtu = tcpmss_reverse_mtu(net, skb, family); unsigned int min_mtu = min(dst_mtu(skb_dst(skb)), in_mtu); if (min_mtu <= minlen) { net_err_ratelimited("unknown or invalid path-MTU (%u)\n", min_mtu); return -1; } newmss = min_mtu - minlen; } else newmss = info->mss; opt = (u_int8_t *)tcph; for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) { if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) { u_int16_t oldmss; oldmss = (opt[i+2] << 8) | opt[i+3]; /* Never increase MSS, even when setting it, as * doing so results in problems for hosts that rely * on MSS being set correctly. */ if (oldmss <= newmss) return 0; opt[i+2] = (newmss & 0xff00) >> 8; opt[i+3] = newmss & 0x00ff; inet_proto_csum_replace2(&tcph->check, skb, htons(oldmss), htons(newmss), false); return 0; } } /* There is data after the header so the option can't be added * without moving it, and doing so may make the SYN packet * itself too large. Accept the packet unmodified instead. */ if (len > tcp_hdrlen) return 0; /* tcph->doff has 4 bits, do not wrap it to 0 */ if (tcp_hdrlen >= 15 * 4) return 0; /* * MSS Option not found ?! add it.. */ if (skb_tailroom(skb) < TCPOLEN_MSS) { if (pskb_expand_head(skb, 0, TCPOLEN_MSS - skb_tailroom(skb), GFP_ATOMIC)) return -1; tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); } skb_put(skb, TCPOLEN_MSS); /* * IPv4: RFC 1122 states "If an MSS option is not received at * connection setup, TCP MUST assume a default send MSS of 536". * IPv6: RFC 2460 states IPv6 has a minimum MTU of 1280 and a minimum * length IPv6 header of 60, ergo the default MSS value is 1220 * Since no MSS was provided, we must use the default values */ if (xt_family(par) == NFPROTO_IPV4) newmss = min(newmss, (u16)536); else newmss = min(newmss, (u16)1220); opt = (u_int8_t *)tcph + sizeof(struct tcphdr); memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr)); inet_proto_csum_replace2(&tcph->check, skb, htons(len), htons(len + TCPOLEN_MSS), true); opt[0] = TCPOPT_MSS; opt[1] = TCPOLEN_MSS; opt[2] = (newmss & 0xff00) >> 8; opt[3] = newmss & 0x00ff; inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), false); oldval = ((__be16 *)tcph)[6]; tcph->doff += TCPOLEN_MSS/4; inet_proto_csum_replace2(&tcph->check, skb, oldval, ((__be16 *)tcph)[6], false); return TCPOLEN_MSS; }
169,426
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: struct lib_t* MACH0_(get_libs)(struct MACH0_(obj_t)* bin) { struct lib_t *libs; int i; if (!bin->nlibs) return NULL; if (!(libs = calloc ((bin->nlibs + 1), sizeof(struct lib_t)))) return NULL; for (i = 0; i < bin->nlibs; i++) { strncpy (libs[i].name, bin->libs[i], R_BIN_MACH0_STRING_LENGTH); libs[i].name[R_BIN_MACH0_STRING_LENGTH-1] = '\0'; libs[i].last = 0; } libs[i].last = 1; return libs; } Commit Message: Fix null deref and uaf in mach0 parser CWE ID: CWE-416
struct lib_t* MACH0_(get_libs)(struct MACH0_(obj_t)* bin) { struct lib_t *libs; int i; if (!bin->nlibs) { return NULL; } if (!(libs = calloc ((bin->nlibs + 1), sizeof(struct lib_t)))) { return NULL; } for (i = 0; i < bin->nlibs; i++) { strncpy (libs[i].name, bin->libs[i], R_BIN_MACH0_STRING_LENGTH); libs[i].name[R_BIN_MACH0_STRING_LENGTH-1] = '\0'; libs[i].last = 0; } libs[i].last = 1; return libs; }
168,234
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ImageBitmapFactories::ImageBitmapLoader::LoadBlobAsync( Blob* blob) { loader_->Start(blob->GetBlobDataHandle()); } Commit Message: Fix UAP in ImageBitmapLoader/FileReaderLoader FileReaderLoader stores its client as a raw pointer, so in cases like ImageBitmapLoader where the FileReaderLoaderClient really is garbage collected we have to make sure to destroy the FileReaderLoader when the ExecutionContext that owns it is destroyed. Bug: 913970 Change-Id: I40b02115367cf7bf5bbbbb8e9b57874d2510f861 Reviewed-on: https://chromium-review.googlesource.com/c/1374511 Reviewed-by: Jeremy Roman <[email protected]> Commit-Queue: Marijn Kruisselbrink <[email protected]> Cr-Commit-Position: refs/heads/master@{#616342} CWE ID: CWE-416
void ImageBitmapFactories::ImageBitmapLoader::LoadBlobAsync( void ImageBitmapFactories::ImageBitmapLoader::LoadBlobAsync(Blob* blob) { loader_->Start(blob->GetBlobDataHandle()); }
173,068
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void update_logging() { bool should_log = module_started && (logging_enabled_via_api || stack_config->get_btsnoop_turned_on()); if (should_log == is_logging) return; is_logging = should_log; if (should_log) { btsnoop_net_open(); const char *log_path = stack_config->get_btsnoop_log_path(); if (stack_config->get_btsnoop_should_save_last()) { char last_log_path[PATH_MAX]; snprintf(last_log_path, PATH_MAX, "%s.%llu", log_path, btsnoop_timestamp()); if (!rename(log_path, last_log_path) && errno != ENOENT) LOG_ERROR("%s unable to rename '%s' to '%s': %s", __func__, log_path, last_log_path, strerror(errno)); } logfile_fd = open(log_path, O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH); if (logfile_fd == INVALID_FD) { LOG_ERROR("%s unable to open '%s': %s", __func__, log_path, strerror(errno)); is_logging = false; return; } write(logfile_fd, "btsnoop\0\0\0\0\1\0\0\x3\xea", 16); } else { if (logfile_fd != INVALID_FD) close(logfile_fd); logfile_fd = INVALID_FD; btsnoop_net_close(); } } Commit Message: DO NOT MERGE Fix potential DoS caused by delivering signal to BT process Bug: 28885210 Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360 Conflicts: btif/co/bta_hh_co.c btif/src/btif_core.c Merge conflict resolution of ag/1161415 (referencing ag/1164670) - Directly into mnc-mr2-release CWE ID: CWE-284
static void update_logging() { bool should_log = module_started && (logging_enabled_via_api || stack_config->get_btsnoop_turned_on()); if (should_log == is_logging) return; is_logging = should_log; if (should_log) { btsnoop_net_open(); const char *log_path = stack_config->get_btsnoop_log_path(); if (stack_config->get_btsnoop_should_save_last()) { char last_log_path[PATH_MAX]; snprintf(last_log_path, PATH_MAX, "%s.%llu", log_path, btsnoop_timestamp()); if (!rename(log_path, last_log_path) && errno != ENOENT) LOG_ERROR("%s unable to rename '%s' to '%s': %s", __func__, log_path, last_log_path, strerror(errno)); } logfile_fd = TEMP_FAILURE_RETRY(open(log_path, O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH)); if (logfile_fd == INVALID_FD) { LOG_ERROR("%s unable to open '%s': %s", __func__, log_path, strerror(errno)); is_logging = false; return; } TEMP_FAILURE_RETRY(write(logfile_fd, "btsnoop\0\0\0\0\1\0\0\x3\xea", 16)); } else { if (logfile_fd != INVALID_FD) close(logfile_fd); logfile_fd = INVALID_FD; btsnoop_net_close(); } }
173,473
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: double VideoTrack::GetFrameRate() const { return m_rate; } Commit Message: libwebm: Pull from upstream Rolling mkvparser from upstream. Primarily for fixing a bug on parsing failures with certain Opus WebM files. Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae The diff is so huge because there were some style clean ups upstream. But it was ensured that there were no breaking changes when the style clean ups was done upstream. Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039 CWE ID: CWE-119
double VideoTrack::GetFrameRate() const
174,326
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void TestBlinkPlatformSupport::cryptographicallyRandomValues( unsigned char* buffer, size_t length) { } Commit Message: Add assertions that the empty Platform::cryptographicallyRandomValues() overrides are not being used. These implementations are not safe and look scary if not accompanied by an assertion. Also one of the comments was incorrect. BUG=552749 Review URL: https://codereview.chromium.org/1419293005 Cr-Commit-Position: refs/heads/master@{#359229} CWE ID: CWE-310
void TestBlinkPlatformSupport::cryptographicallyRandomValues( unsigned char* buffer, size_t length) { base::RandBytes(buffer, length); }
172,238
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: BlockEntry::~BlockEntry() { } Commit Message: libwebm: Pull from upstream Rolling mkvparser from upstream. Primarily for fixing a bug on parsing failures with certain Opus WebM files. Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae The diff is so huge because there were some style clean ups upstream. But it was ensured that there were no breaking changes when the style clean ups was done upstream. Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039 CWE ID: CWE-119
BlockEntry::~BlockEntry()
174,456
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void RenderBox::styleWillChange(StyleDifference diff, const RenderStyle& newStyle) { RenderStyle* oldStyle = style(); if (oldStyle) { if (diff >= StyleDifferenceRepaint && node() && (isHTMLHtmlElement(*node()) || isHTMLBodyElement(*node()))) { view()->repaint(); if (oldStyle->hasEntirelyFixedBackground() != newStyle.hasEntirelyFixedBackground()) view()->compositor()->setNeedsUpdateFixedBackground(); } if (diff == StyleDifferenceLayout && parent() && oldStyle->position() != newStyle.position()) { markContainingBlocksForLayout(); if (oldStyle->position() == StaticPosition) repaint(); else if (newStyle.hasOutOfFlowPosition()) parent()->setChildNeedsLayout(); if (isFloating() && !isOutOfFlowPositioned() && newStyle.hasOutOfFlowPosition()) removeFloatingOrPositionedChildFromBlockLists(); } } else if (isBody()) view()->repaint(); RenderBoxModelObject::styleWillChange(diff, newStyle); } Commit Message: Separate repaint and layout requirements of StyleDifference (Step 1) Previously StyleDifference was an enum that proximately bigger values imply smaller values (e.g. StyleDifferenceLayout implies StyleDifferenceRepaint). This causes unnecessary repaints in some cases on layout change. Convert StyleDifference to a structure containing relatively independent flags. This change doesn't directly improve the result, but can make further repaint optimizations possible. Step 1 doesn't change any functionality. RenderStyle still generate the legacy StyleDifference enum when comparing styles and convert the result to the new StyleDifference. Implicit requirements are not handled during the conversion. Converted call sites to use the new StyleDifference according to the following conversion rules: - diff == StyleDifferenceEqual (&& !context) => diff.hasNoChange() - diff == StyleDifferenceRepaint => diff.needsRepaintObjectOnly() - diff == StyleDifferenceRepaintLayer => diff.needsRepaintLayer() - diff == StyleDifferenceRepaint || diff == StyleDifferenceRepaintLayer => diff.needsRepaintLayer() - diff >= StyleDifferenceRepaint => diff.needsRepaint() || diff.needsLayout() - diff >= StyleDifferenceRepaintLayer => diff.needsRepaintLayer() || diff.needsLayout() - diff > StyleDifferenceRepaintLayer => diff.needsLayout() - diff == StyleDifferencePositionedMovementLayoutOnly => diff.needsPositionedMovementLayoutOnly() - diff == StyleDifferenceLayout => diff.needsFullLayout() BUG=358460 TEST=All existing layout tests. [email protected], [email protected], [email protected] Committed: https://src.chromium.org/viewvc/blink?view=rev&revision=171983 Review URL: https://codereview.chromium.org/236203020 git-svn-id: svn://svn.chromium.org/blink/trunk@172331 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-119
void RenderBox::styleWillChange(StyleDifference diff, const RenderStyle& newStyle) { RenderStyle* oldStyle = style(); if (oldStyle) { if ((diff.needsRepaint() || diff.needsLayout()) && node() && (isHTMLHtmlElement(*node()) || isHTMLBodyElement(*node()))) { view()->repaint(); if (oldStyle->hasEntirelyFixedBackground() != newStyle.hasEntirelyFixedBackground()) view()->compositor()->setNeedsUpdateFixedBackground(); } if (diff.needsFullLayout() && parent() && oldStyle->position() != newStyle.position()) { markContainingBlocksForLayout(); if (oldStyle->position() == StaticPosition) repaint(); else if (newStyle.hasOutOfFlowPosition()) parent()->setChildNeedsLayout(); if (isFloating() && !isOutOfFlowPositioned() && newStyle.hasOutOfFlowPosition()) removeFloatingOrPositionedChildFromBlockLists(); } } else if (isBody()) view()->repaint(); RenderBoxModelObject::styleWillChange(diff, newStyle); }
171,465
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static ssize_t in_read(struct audio_stream_in *stream, void* buffer, size_t bytes) { struct a2dp_stream_in *in = (struct a2dp_stream_in *)stream; int read; DEBUG("read %zu bytes, state: %d", bytes, in->common.state); if (in->common.state == AUDIO_A2DP_STATE_SUSPENDED) { DEBUG("stream suspended"); return -1; } /* only allow autostarting if we are in stopped or standby */ if ((in->common.state == AUDIO_A2DP_STATE_STOPPED) || (in->common.state == AUDIO_A2DP_STATE_STANDBY)) { pthread_mutex_lock(&in->common.lock); if (start_audio_datapath(&in->common) < 0) { /* emulate time this write represents to avoid very fast write failures during transition periods or remote suspend */ int us_delay = calc_audiotime(in->common.cfg, bytes); DEBUG("emulate a2dp read delay (%d us)", us_delay); usleep(us_delay); pthread_mutex_unlock(&in->common.lock); return -1; } pthread_mutex_unlock(&in->common.lock); } else if (in->common.state != AUDIO_A2DP_STATE_STARTED) { ERROR("stream not in stopped or standby"); return -1; } read = skt_read(in->common.audio_fd, buffer, bytes); if (read == -1) { skt_disconnect(in->common.audio_fd); in->common.audio_fd = AUDIO_SKT_DISCONNECTED; in->common.state = AUDIO_A2DP_STATE_STOPPED; } else if (read == 0) { DEBUG("read time out - return zeros"); memset(buffer, 0, bytes); read = bytes; } DEBUG("read %d bytes out of %zu bytes", read, bytes); return read; } Commit Message: DO NOT MERGE Fix potential DoS caused by delivering signal to BT process Bug: 28885210 Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360 Conflicts: btif/co/bta_hh_co.c btif/src/btif_core.c Merge conflict resolution of ag/1161415 (referencing ag/1164670) - Directly into mnc-mr2-release CWE ID: CWE-284
static ssize_t in_read(struct audio_stream_in *stream, void* buffer, size_t bytes) { struct a2dp_stream_in *in = (struct a2dp_stream_in *)stream; int read; DEBUG("read %zu bytes, state: %d", bytes, in->common.state); if (in->common.state == AUDIO_A2DP_STATE_SUSPENDED) { DEBUG("stream suspended"); return -1; } /* only allow autostarting if we are in stopped or standby */ if ((in->common.state == AUDIO_A2DP_STATE_STOPPED) || (in->common.state == AUDIO_A2DP_STATE_STANDBY)) { pthread_mutex_lock(&in->common.lock); if (start_audio_datapath(&in->common) < 0) { /* emulate time this write represents to avoid very fast write failures during transition periods or remote suspend */ int us_delay = calc_audiotime(in->common.cfg, bytes); DEBUG("emulate a2dp read delay (%d us)", us_delay); TEMP_FAILURE_RETRY(usleep(us_delay)); pthread_mutex_unlock(&in->common.lock); return -1; } pthread_mutex_unlock(&in->common.lock); } else if (in->common.state != AUDIO_A2DP_STATE_STARTED) { ERROR("stream not in stopped or standby"); return -1; } read = skt_read(in->common.audio_fd, buffer, bytes); if (read == -1) { skt_disconnect(in->common.audio_fd); in->common.audio_fd = AUDIO_SKT_DISCONNECTED; in->common.state = AUDIO_A2DP_STATE_STOPPED; } else if (read == 0) { DEBUG("read time out - return zeros"); memset(buffer, 0, bytes); read = bytes; } DEBUG("read %d bytes out of %zu bytes", read, bytes); return read; }
173,426
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int jas_stream_pad(jas_stream_t *stream, int n, int c) { int m; m = n; for (m = n; m > 0; --m) { if (jas_stream_putc(stream, c) == EOF) return n - m; } return n; } Commit Message: Made some changes to the I/O stream library for memory streams. There were a number of potential problems due to the possibility of integer overflow. Changed some integral types to the larger types size_t or ssize_t. For example, the function mem_resize now takes the buffer size parameter as a size_t. Added a new function jas_stream_memopen2, which takes a buffer size specified as a size_t instead of an int. This can be used in jas_image_cmpt_create to avoid potential overflow problems. Added a new function jas_deprecated to warn about reliance on deprecated library behavior. CWE ID: CWE-190
int jas_stream_pad(jas_stream_t *stream, int n, int c) { int m; if (n < 0) { jas_deprecated("negative count for jas_stream_pad"); } m = n; for (m = n; m > 0; --m) { if (jas_stream_putc(stream, c) == EOF) return n - m; } return n; }
168,746
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void InspectorTraceEvents::WillSendRequest( ExecutionContext*, unsigned long identifier, DocumentLoader* loader, ResourceRequest& request, const ResourceResponse& redirect_response, const FetchInitiatorInfo&) { LocalFrame* frame = loader ? loader->GetFrame() : nullptr; TRACE_EVENT_INSTANT1( "devtools.timeline", "ResourceSendRequest", TRACE_EVENT_SCOPE_THREAD, "data", InspectorSendRequestEvent::Data(identifier, frame, request)); probe::AsyncTaskScheduled(frame ? frame->GetDocument() : nullptr, "SendRequest", AsyncId(identifier)); } Commit Message: DevTools: send proper resource type in Network.RequestWillBeSent This patch plumbs resoure type into the DispatchWillSendRequest instrumenation. This allows us to report accurate type in Network.RequestWillBeSent event, instead of "Other", that we report today. BUG=765501 R=dgozman Change-Id: I0134c08b841e8dd247fdc8ff208bfd51e462709c Reviewed-on: https://chromium-review.googlesource.com/667504 Reviewed-by: Pavel Feldman <[email protected]> Reviewed-by: Dmitry Gozman <[email protected]> Commit-Queue: Andrey Lushnikov <[email protected]> Cr-Commit-Position: refs/heads/master@{#507936} CWE ID: CWE-119
void InspectorTraceEvents::WillSendRequest( ExecutionContext*, unsigned long identifier, DocumentLoader* loader, ResourceRequest& request, const ResourceResponse& redirect_response, const FetchInitiatorInfo&, Resource::Type) { LocalFrame* frame = loader ? loader->GetFrame() : nullptr; TRACE_EVENT_INSTANT1( "devtools.timeline", "ResourceSendRequest", TRACE_EVENT_SCOPE_THREAD, "data", InspectorSendRequestEvent::Data(identifier, frame, request)); probe::AsyncTaskScheduled(frame ? frame->GetDocument() : nullptr, "SendRequest", AsyncId(identifier)); }
172,471
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int vivid_fb_ioctl(struct fb_info *info, unsigned cmd, unsigned long arg) { struct vivid_dev *dev = (struct vivid_dev *)info->par; switch (cmd) { case FBIOGET_VBLANK: { struct fb_vblank vblank; vblank.flags = FB_VBLANK_HAVE_COUNT | FB_VBLANK_HAVE_VCOUNT | FB_VBLANK_HAVE_VSYNC; vblank.count = 0; vblank.vcount = 0; vblank.hcount = 0; if (copy_to_user((void __user *)arg, &vblank, sizeof(vblank))) return -EFAULT; return 0; } default: dprintk(dev, 1, "Unknown ioctl %08x\n", cmd); return -EINVAL; } return 0; } Commit Message: [media] media/vivid-osd: fix info leak in ioctl The vivid_fb_ioctl() code fails to initialize the 16 _reserved bytes of struct fb_vblank after the ->hcount member. Add an explicit memset(0) before filling the structure to avoid the info leak. Signed-off-by: Salva Peiró <[email protected]> Signed-off-by: Hans Verkuil <[email protected]> Signed-off-by: Mauro Carvalho Chehab <[email protected]> CWE ID: CWE-200
static int vivid_fb_ioctl(struct fb_info *info, unsigned cmd, unsigned long arg) { struct vivid_dev *dev = (struct vivid_dev *)info->par; switch (cmd) { case FBIOGET_VBLANK: { struct fb_vblank vblank; memset(&vblank, 0, sizeof(vblank)); vblank.flags = FB_VBLANK_HAVE_COUNT | FB_VBLANK_HAVE_VCOUNT | FB_VBLANK_HAVE_VSYNC; vblank.count = 0; vblank.vcount = 0; vblank.hcount = 0; if (copy_to_user((void __user *)arg, &vblank, sizeof(vblank))) return -EFAULT; return 0; } default: dprintk(dev, 1, "Unknown ioctl %08x\n", cmd); return -EINVAL; } return 0; }
166,575
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: PixelBufferRasterWorkerPool::PixelBufferRasterWorkerPool( ResourceProvider* resource_provider, ContextProvider* context_provider, size_t num_threads, size_t max_transfer_buffer_usage_bytes) : RasterWorkerPool(resource_provider, context_provider, num_threads), shutdown_(false), scheduled_raster_task_count_(0), bytes_pending_upload_(0), max_bytes_pending_upload_(max_transfer_buffer_usage_bytes), has_performed_uploads_since_last_flush_(false), check_for_completed_raster_tasks_pending_(false), should_notify_client_if_no_tasks_are_pending_(false), should_notify_client_if_no_tasks_required_for_activation_are_pending_( false) { } Commit Message: cc: Simplify raster task completion notification logic (Relanding after missing activation bug fixed in https://codereview.chromium.org/131763003/) Previously the pixel buffer raster worker pool used a combination of polling and explicit notifications from the raster worker pool to decide when to tell the client about the completion of 1) all tasks or 2) the subset of tasks required for activation. This patch simplifies the logic by only triggering the notification based on the OnRasterTasksFinished and OnRasterTasksRequiredForActivationFinished calls from the worker pool. BUG=307841,331534 Review URL: https://codereview.chromium.org/99873007 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@243991 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-20
PixelBufferRasterWorkerPool::PixelBufferRasterWorkerPool( ResourceProvider* resource_provider, ContextProvider* context_provider, size_t num_threads, size_t max_transfer_buffer_usage_bytes) : RasterWorkerPool(resource_provider, context_provider, num_threads), shutdown_(false), scheduled_raster_task_count_(0), bytes_pending_upload_(0), max_bytes_pending_upload_(max_transfer_buffer_usage_bytes), has_performed_uploads_since_last_flush_(false), check_for_completed_raster_tasks_pending_(false), should_notify_client_if_no_tasks_are_pending_(false), should_notify_client_if_no_tasks_required_for_activation_are_pending_( false), raster_finished_task_pending_(false), raster_required_for_activation_finished_task_pending_(false) { }
171,262