func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
RecordAReply(CallbackListPtr *pcbl, void *nulldata, void *calldata) { RecordContextPtr pContext; RecordClientsAndProtocolPtr pRCAP; int eci; ReplyInfoRec *pri = (ReplyInfoRec *) calldata; ClientPtr client = pri->client; for (eci = 0; eci < numEnabledContexts; eci++) { pContext = ppAllContexts[eci]; pRCAP = RecordFindClientOnContext(pContext, client->clientAsMask, NULL); if (pRCAP) { int majorop = client->majorOp; if (pContext->continuedReply) { RecordAProtocolElement(pContext, client, XRecordFromServer, (void *) pri->replyData, pri->dataLenBytes, pri->padBytes, /* continuation */ -1); if (!pri->bytesRemaining) pContext->continuedReply = 0; } else if (pri->startOfReply && pRCAP->pReplyMajorOpSet && RecordIsMemberOfSet(pRCAP->pReplyMajorOpSet, majorop)) { if (majorop <= 127) { /* core reply */ RecordAProtocolElement(pContext, client, XRecordFromServer, (void *) pri->replyData, pri->dataLenBytes, 0, pri->bytesRemaining); if (pri->bytesRemaining) pContext->continuedReply = 1; } else { /* extension, check minor opcode */ int minorop = client->minorOp; int numMinOpInfo; RecordMinorOpPtr pMinorOpInfo = pRCAP->pReplyMinOpInfo; assert(pMinorOpInfo); numMinOpInfo = pMinorOpInfo->count; pMinorOpInfo++; assert(numMinOpInfo); for (; numMinOpInfo; numMinOpInfo--, pMinorOpInfo++) { if (majorop >= pMinorOpInfo->major.first && majorop <= pMinorOpInfo->major.last && RecordIsMemberOfSet(pMinorOpInfo->major.pMinOpSet, minorop)) { RecordAProtocolElement(pContext, client, XRecordFromServer, (void *) pri->replyData, pri->dataLenBytes, 0, pri->bytesRemaining); if (pri->bytesRemaining) pContext->continuedReply = 1; break; } } /* end for each minor op info */ } /* end extension reply */ } /* end continued reply vs. start of reply */ } /* end client is registered on this context */ } /* end for each context */ } /* RecordAReply */
0
[ "CWE-191" ]
xserver
2902b78535ecc6821cc027351818b28a5c7fdbdc
335,074,118,280,486,800,000,000,000,000,000,000,000
62
Fix XRecordRegisterClients() Integer underflow CVE-2020-14362 ZDI-CAN-11574 This vulnerability was discovered by: Jan-Niklas Sohn working with Trend Micro Zero Day Initiative Signed-off-by: Matthieu Herrb <[email protected]>
static void kvm_resume(void) { if (kvm_usage_count) { #ifdef CONFIG_LOCKDEP WARN_ON(lockdep_is_held(&kvm_count_lock)); #endif hardware_enable_nolock(NULL); } }
0
[ "CWE-416" ]
linux
0774a964ef561b7170d8d1b1bfe6f88002b6d219
90,841,378,524,437,740,000,000,000,000,000,000,000
9
KVM: Fix out of range accesses to memslots Reset the LRU slot if it becomes invalid when deleting a memslot to fix an out-of-bounds/use-after-free access when searching through memslots. Explicitly check for there being no used slots in search_memslots(), and in the caller of s390's approximation variant. Fixes: 36947254e5f9 ("KVM: Dynamically size memslot array based on number of used slots") Reported-by: Qian Cai <[email protected]> Cc: Peter Xu <[email protected]> Signed-off-by: Sean Christopherson <[email protected]> Message-Id: <[email protected]> Acked-by: Christian Borntraeger <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
SYSCALL_DEFINE3(init_module, void __user *, umod, unsigned long, len, const char __user *, uargs) { int err; struct load_info info = { }; err = may_init_module(); if (err) return err; pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n", umod, len, uargs); err = copy_module_from_user(umod, len, &info); if (err) return err; return load_module(&info, uargs, 0); }
0
[ "CWE-362", "CWE-347" ]
linux
0c18f29aae7ce3dadd26d8ee3505d07cc982df75
244,102,221,118,060,030,000,000,000,000,000,000,000
19
module: limit enabling module.sig_enforce Irrespective as to whether CONFIG_MODULE_SIG is configured, specifying "module.sig_enforce=1" on the boot command line sets "sig_enforce". Only allow "sig_enforce" to be set when CONFIG_MODULE_SIG is configured. This patch makes the presence of /sys/module/module/parameters/sig_enforce dependent on CONFIG_MODULE_SIG=y. Fixes: fda784e50aac ("module: export module signature enforcement status") Reported-by: Nayna Jain <[email protected]> Tested-by: Mimi Zohar <[email protected]> Tested-by: Jessica Yu <[email protected]> Signed-off-by: Mimi Zohar <[email protected]> Signed-off-by: Jessica Yu <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
void Filter::resetOtherUpstreams(UpstreamRequest& upstream_request) { // Pop each upstream request on the list and reset it if it's not the one // provided. At the end we'll move it back into the list. UpstreamRequestPtr final_upstream_request; while (!upstream_requests_.empty()) { UpstreamRequestPtr upstream_request_tmp = upstream_requests_.back()->removeFromList(upstream_requests_); if (upstream_request_tmp.get() != &upstream_request) { upstream_request_tmp->resetStream(); // TODO: per-host stat for hedge abandoned. // TODO: cluster stat for hedge abandoned. } else { final_upstream_request = std::move(upstream_request_tmp); } } ASSERT(final_upstream_request); // Now put the final request back on this list. LinkedList::moveIntoList(std::move(final_upstream_request), upstream_requests_); }
0
[ "CWE-703" ]
envoy
18871dbfb168d3512a10c78dd267ff7c03f564c6
141,393,495,301,707,510,000,000,000,000,000,000,000
20
[1.18] CVE-2022-21655 Crash with direct_response Signed-off-by: Otto van der Schaaf <[email protected]>
CtPtr ProtocolV2::handle_read_frame_segment(rx_buffer_t &&rx_buffer, int r) { ldout(cct, 20) << __func__ << " r=" << r << dendl; if (r < 0) { ldout(cct, 1) << __func__ << " read frame segment failed r=" << r << " (" << cpp_strerror(r) << ")" << dendl; return _fault(); } rx_segments_data.emplace_back(); rx_segments_data.back().push_back(std::move(rx_buffer)); // decrypt incoming data // FIXME: if (auth_meta->is_mode_secure()) { if (session_stream_handlers.rx) { ceph_assert(session_stream_handlers.rx); auto& new_seg = rx_segments_data.back(); if (new_seg.length()) { auto padded = session_stream_handlers.rx->authenticated_decrypt_update( std::move(new_seg), segment_t::DEFAULT_ALIGNMENT); const auto idx = rx_segments_data.size() - 1; new_seg.clear(); padded.splice(0, rx_segments_desc[idx].length, &new_seg); ldout(cct, 20) << __func__ << " unpadded new_seg.length()=" << new_seg.length() << dendl; } } if (rx_segments_desc.size() == rx_segments_data.size()) { // OK, all segments planned to read are read. Can go with epilogue. return READ(get_epilogue_size(), handle_read_frame_epilogue_main); } else { // TODO: for makeshift only. This will be more generic and throttled return read_frame_segment(); } }
0
[ "CWE-323" ]
ceph
20b7bb685c5ea74c651ca1ea547ac66b0fee7035
258,588,576,453,602,570,000,000,000,000,000,000,000
39
msg/async/ProtocolV2: avoid AES-GCM nonce reuse vulnerabilities The secure mode uses AES-128-GCM with 96-bit nonces consisting of a 32-bit counter followed by a 64-bit salt. The counter is incremented after processing each frame, the salt is fixed for the duration of the session. Both are initialized from the session key generated during session negotiation, so the counter starts with essentially a random value. It is allowed to wrap, and, after 2**32 frames, it repeats, resulting in nonce reuse (the actual sequence numbers that the messenger works with are 64-bit, so the session continues on). Because of how GCM works, this completely breaks both confidentiality and integrity aspects of the secure mode. A single nonce reuse reveals the XOR of two plaintexts and almost completely reveals the subkey used for producing authentication tags. After a few nonces get used twice, all confidentiality and integrity goes out the window and the attacker can potentially encrypt-authenticate plaintext of their choice. We can't easily change the nonce format to extend the counter to 64 bits (and possibly XOR it with a longer salt). Instead, just remember the initial nonce and cut the session before it repeats, forcing renegotiation. Signed-off-by: Ilya Dryomov <[email protected]> Reviewed-by: Radoslaw Zarzynski <[email protected]> Reviewed-by: Sage Weil <[email protected]> Conflicts: src/msg/async/ProtocolV2.h [ context: commit ed3ec4c01d17 ("msg: Build target 'common' without using namespace in headers") not in octopus ]
mrb_exc_get(mrb_state *mrb, const char *name) { struct RClass *exc, *e; mrb_value c = mrb_const_get(mrb, mrb_obj_value(mrb->object_class), mrb_intern_cstr(mrb, name)); if (mrb_type(c) != MRB_TT_CLASS) { mrb_raise(mrb, mrb->eException_class, "exception corrupted"); } exc = e = mrb_class_ptr(c); while (e) { if (e == mrb->eException_class) return exc; e = e->super; } return mrb->eException_class; }
0
[ "CWE-476", "CWE-415" ]
mruby
faa4eaf6803bd11669bc324b4c34e7162286bfa3
325,485,568,398,417,580,000,000,000,000,000,000,000
18
`mrb_class_real()` did not work for `BasicObject`; fix #4037
TEE_Result syscall_check_access_rights(unsigned long flags, const void *buf, size_t len) { TEE_Result res; struct tee_ta_session *s; res = tee_ta_get_current_session(&s); if (res != TEE_SUCCESS) return res; return tee_mmu_check_access_rights(to_user_ta_ctx(s->ctx), flags, (uaddr_t)buf, len); }
0
[ "CWE-119", "CWE-787" ]
optee_os
d5c5b0b77b2b589666024d219a8007b3f5b6faeb
197,057,172,126,377,640,000,000,000,000,000,000,000
13
core: svc: always check ta parameters Always check TA parameters from a user TA. This prevents a user TA from passing invalid pointers to a pseudo TA. Fixes: OP-TEE-2018-0007: "Buffer checks missing when calling pseudo TAs". Signed-off-by: Jens Wiklander <[email protected]> Tested-by: Joakim Bech <[email protected]> (QEMU v7, v8) Reviewed-by: Joakim Bech <[email protected]> Reported-by: Riscure <[email protected]> Reported-by: Alyssa Milburn <[email protected]> Acked-by: Etienne Carriere <[email protected]>
set_interface_var(const char *iface, const char *var, const char *name, uint32_t val) { FILE *fp; char spath[64+IFNAMSIZ]; /* XXX: magic constant */ if (snprintf(spath, sizeof(spath), var, iface) >= sizeof(spath)) return -1; if (access(spath, F_OK) != 0) return -1; fp = fopen(spath, "w"); if (!fp) { if (name) flog(LOG_ERR, "failed to set %s (%u) for %s: %s", name, val, iface, strerror(errno)); return -1; } fprintf(fp, "%u", val); fclose(fp); return 0; }
1
[ "CWE-22" ]
radvd
92e22ca23e52066da2258df8c76a2dca8a428bcc
75,598,449,882,234,790,000,000,000,000,000,000,000
24
set_interface_var() doesn't check interface name and blindly does fopen(path "/" ifname, "w") on it. As "ifname" is an untrusted input, it should be checked for ".." and/or "/" in it. Otherwise, an infected unprivileged daemon may overwrite contents of file named "mtu", "hoplimit", etc. in arbitrary location with arbitrary 32-bit value in decimal representation ("%d"). If an attacker has a local account or may create arbitrary symlinks with these names in any location (e.g. /tmp), any file may be overwritten with a decimal value.
CreateApparentRootDirectory(void) { const char *root; /* * DnD_GetFileRoot() gives us a pointer to a static string, so there's no * need to free anything. * * XXX On XDG platforms this path ("/tmp/VMwareDnD") is created by an * init script, so we could remove some of the code below and just bail * if the user deletes it. */ root = DnD_GetFileRoot(); if (!root) { return NULL; } if (File_Exists(root)) { if (!DnDRootDirUsable(root)) { /* * The directory already exists and its permissions are wrong. */ Log_Trivia("dnd: The root dir is not usable.\n"); return NULL; } } else { if ( !File_CreateDirectory(root) || !DnDSetPermissionsOnRootDir(root)) { /* We couldn't create the directory or set the permissions. */ return NULL; } } return root; }
0
[]
open-vm-tools
e88f91b00a715b79255de6576506d80ecfdb064c
337,139,538,478,273,500,000,000,000,000,000,000,000
36
Fix possible security issue with the permissions of the intermediate staging directory and path /tmp/VMwareDnD is a staging directory used for DnD and CnP. It should be a regular directory, but malicious code or user may create the /tmp/VMwareDnD as a symbolic link which points elsewhere on the system. This may provide user access to user B's files. Do not set the permission of the root directory if the root directory already exists and has the wrong permission. The permission of the directory must be 1777 if it is created by the VMToolsi. If not, then the directory has been created or modified by malicious code or user, so just cancel the host to guest DnD or CnP operation.
Bool gf_filter_update_arg_apply(GF_Filter *filter, const char *arg_name, const char *arg_value, Bool is_sync_call) { u32 i=0; //find arg while (filter->freg->args) { GF_PropertyValue argv; const GF_FilterArgs *a = &filter->freg->args[i]; i++; Bool is_meta = GF_FALSE; if (!a || !a->arg_name) break; if ((a->flags & GF_FS_ARG_META) && !strcmp(a->arg_name, "*")) { if (!filter->freg->update_arg) continue; is_meta = GF_TRUE; } else if (strcmp(a->arg_name, arg_name)) { continue; } //we found the argument if (!is_meta && ! (a->flags & (GF_FS_ARG_UPDATE|GF_FS_ARG_UPDATE_SYNC) ) ) { GF_LOG(GF_LOG_WARNING, GF_LOG_FILTER, ("Argument %s of filter %s is not updatable - ignoring\n", a->arg_name, filter->name)); return GF_TRUE; } if (a->flags & GF_FS_ARG_UPDATE_SYNC) { if (!is_sync_call) return GF_TRUE; } argv = gf_filter_parse_prop_solve_env_var(filter, a->arg_type, a->arg_name, arg_value, a->min_max_enum); if (argv.type != GF_PROP_FORBIDEN) { GF_Err e = GF_OK; if (!is_sync_call) { FSESS_CHECK_THREAD(filter) } //if no update function consider the arg OK if (filter->freg->update_arg) { e = filter->freg->update_arg(filter, arg_name, &argv); } if (e==GF_OK) { if (!is_meta) gf_filter_set_arg(filter, a, &argv); } else if (e!=GF_NOT_FOUND) { GF_LOG(GF_LOG_WARNING, GF_LOG_FILTER, ("Filter %s did not accept update of arg %s to value %s: %s\n", filter->name, arg_name, arg_value, gf_error_to_string(e) )); } } else { GF_LOG(GF_LOG_ERROR, GF_LOG_FILTER, ("Failed to parse argument %s value %s\n", a->arg_name, a->arg_default_val)); } return GF_TRUE; } return GF_FALSE; }
0
[ "CWE-787" ]
gpac
da37ec8582266983d0ec4b7550ec907401ec441e
325,276,358,712,833,530,000,000,000,000,000,000,000
53
fixed crashes for very long path - cf #1908
sc_pkcs15emu_esteid_init (sc_pkcs15_card_t * p15card) { sc_card_t *card = p15card->card; unsigned char buff[128]; int r, i; size_t field_length = 0, modulus_length = 0; sc_path_t tmppath; set_string (&p15card->tokeninfo->label, "ID-kaart"); set_string (&p15card->tokeninfo->manufacturer_id, "AS Sertifitseerimiskeskus"); /* Select application directory */ sc_format_path ("3f00eeee5044", &tmppath); r = sc_select_file (card, &tmppath, NULL); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "select esteid PD failed"); /* read the serial (document number) */ r = sc_read_record (card, SC_ESTEID_PD_DOCUMENT_NR, buff, sizeof(buff), SC_RECORD_BY_REC_NR); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "read document number failed"); buff[MIN((size_t) r, (sizeof buff)-1)] = '\0'; set_string (&p15card->tokeninfo->serial_number, (const char *) buff); p15card->tokeninfo->flags = SC_PKCS15_TOKEN_PRN_GENERATION | SC_PKCS15_TOKEN_EID_COMPLIANT | SC_PKCS15_TOKEN_READONLY; /* add certificates */ for (i = 0; i < 2; i++) { static const char *esteid_cert_names[2] = { "Isikutuvastus", "Allkirjastamine"}; static char const *esteid_cert_paths[2] = { "3f00eeeeaace", "3f00eeeeddce"}; static int esteid_cert_ids[2] = {1, 2}; struct sc_pkcs15_cert_info cert_info; struct sc_pkcs15_object cert_obj; memset(&cert_info, 0, sizeof(cert_info)); memset(&cert_obj, 0, sizeof(cert_obj)); cert_info.id.value[0] = esteid_cert_ids[i]; cert_info.id.len = 1; sc_format_path(esteid_cert_paths[i], &cert_info.path); strlcpy(cert_obj.label, esteid_cert_names[i], sizeof(cert_obj.label)); r = sc_pkcs15emu_add_x509_cert(p15card, &cert_obj, &cert_info); if (r < 0) return SC_ERROR_INTERNAL; if (i == 0) { sc_pkcs15_cert_t *cert = NULL; r = sc_pkcs15_read_certificate(p15card, &cert_info, &cert); if (r < 0) return SC_ERROR_INTERNAL; if (cert->key->algorithm == SC_ALGORITHM_EC) field_length = cert->key->u.ec.params.field_length; else modulus_length = cert->key->u.rsa.modulus.len * 8; if (r == SC_SUCCESS) { static const struct sc_object_id cn_oid = {{ 2, 5, 4, 3, -1 }}; u8 *cn_name = NULL; size_t cn_len = 0; sc_pkcs15_get_name_from_dn(card->ctx, cert->subject, cert->subject_len, &cn_oid, &cn_name, &cn_len); if (cn_len > 0) { char *token_name = malloc(cn_len+1); if (token_name) { memcpy(token_name, cn_name, cn_len); token_name[cn_len] = '\0'; set_string(&p15card->tokeninfo->label, (const char*)token_name); free(token_name); } } free(cn_name); sc_pkcs15_free_certificate(cert); } } } /* the file with key pin info (tries left) */ sc_format_path ("3f000016", &tmppath); r = sc_select_file (card, &tmppath, NULL); if (r < 0) return SC_ERROR_INTERNAL; /* add pins */ for (i = 0; i < 3; i++) { unsigned char tries_left; static const char *esteid_pin_names[3] = { "PIN1", "PIN2", "PUK" }; static const int esteid_pin_min[3] = {4, 5, 8}; static const int esteid_pin_ref[3] = {1, 2, 0}; static const int esteid_pin_authid[3] = {1, 2, 3}; static const int esteid_pin_flags[3] = {0, 0, SC_PKCS15_PIN_FLAG_UNBLOCKING_PIN}; struct sc_pkcs15_auth_info pin_info; struct sc_pkcs15_object pin_obj; memset(&pin_info, 0, sizeof(pin_info)); memset(&pin_obj, 0, sizeof(pin_obj)); /* read the number of tries left for the PIN */ r = sc_read_record (card, i + 1, buff, sizeof(buff), SC_RECORD_BY_REC_NR); if (r < 0) return SC_ERROR_INTERNAL; tries_left = buff[5]; pin_info.auth_id.len = 1; pin_info.auth_id.value[0] = esteid_pin_authid[i]; pin_info.auth_type = SC_PKCS15_PIN_AUTH_TYPE_PIN; pin_info.attrs.pin.reference = esteid_pin_ref[i]; pin_info.attrs.pin.flags = esteid_pin_flags[i]; pin_info.attrs.pin.type = SC_PKCS15_PIN_TYPE_ASCII_NUMERIC; pin_info.attrs.pin.min_length = esteid_pin_min[i]; pin_info.attrs.pin.stored_length = 12; pin_info.attrs.pin.max_length = 12; pin_info.attrs.pin.pad_char = '\0'; pin_info.tries_left = (int)tries_left; pin_info.max_tries = 3; strlcpy(pin_obj.label, esteid_pin_names[i], sizeof(pin_obj.label)); pin_obj.flags = esteid_pin_flags[i]; /* Link normal PINs with PUK */ if (i < 2) { pin_obj.auth_id.len = 1; pin_obj.auth_id.value[0] = 3; } r = sc_pkcs15emu_add_pin_obj(p15card, &pin_obj, &pin_info); if (r < 0) return SC_ERROR_INTERNAL; } /* add private keys */ for (i = 0; i < 2; i++) { static int prkey_pin[2] = {1, 2}; static const char *prkey_name[2] = { "Isikutuvastus", "Allkirjastamine"}; struct sc_pkcs15_prkey_info prkey_info; struct sc_pkcs15_object prkey_obj; memset(&prkey_info, 0, sizeof(prkey_info)); memset(&prkey_obj, 0, sizeof(prkey_obj)); prkey_info.id.len = 1; prkey_info.id.value[0] = prkey_pin[i]; prkey_info.native = 1; prkey_info.key_reference = i + 1; prkey_info.field_length = field_length; prkey_info.modulus_length = modulus_length; if (i == 1) prkey_info.usage = SC_PKCS15_PRKEY_USAGE_NONREPUDIATION; else if(field_length > 0) // ECC has sign and derive usage prkey_info.usage = SC_PKCS15_PRKEY_USAGE_SIGN | SC_PKCS15_PRKEY_USAGE_DERIVE; else prkey_info.usage = SC_PKCS15_PRKEY_USAGE_SIGN | SC_PKCS15_PRKEY_USAGE_ENCRYPT | SC_PKCS15_PRKEY_USAGE_DECRYPT; strlcpy(prkey_obj.label, prkey_name[i], sizeof(prkey_obj.label)); prkey_obj.auth_id.len = 1; prkey_obj.auth_id.value[0] = prkey_pin[i]; prkey_obj.user_consent = 0; prkey_obj.flags = SC_PKCS15_CO_FLAG_PRIVATE; if(field_length > 0) r = sc_pkcs15emu_add_ec_prkey(p15card, &prkey_obj, &prkey_info); else r = sc_pkcs15emu_add_rsa_prkey(p15card, &prkey_obj, &prkey_info); if (r < 0) return SC_ERROR_INTERNAL; } return SC_SUCCESS; }
0
[ "CWE-415", "CWE-119" ]
OpenSC
360e95d45ac4123255a4c796db96337f332160ad
116,350,460,801,124,430,000,000,000,000,000,000,000
180
fixed out of bounds writes Thanks to Eric Sesterhenn from X41 D-SEC GmbH for reporting the problems.
string inferCodeRevisionFromCapistranoSymlink(const SpawnPreparationInfo &info) const { if (extractBaseName(info.appRoot) == "current") { char buf[PATH_MAX + 1]; ssize_t ret; do { ret = readlink(info.appRoot.c_str(), buf, PATH_MAX); } while (ret == -1 && errno == EINTR); if (ret == -1) { if (errno == EINVAL) { return string(); } else { int e = errno; P_WARN("Cannot read symlink " << info.appRoot << ": " << strerror(e)); } } buf[ret] = '\0'; return extractBaseName(buf); } else { return string(); } }
0
[ "CWE-200", "CWE-61" ]
passenger
4043718264095cde6623c2cbe8c644541036d7bf
248,040,643,719,869,800,000,000,000,000,000,000,000
23
Disable unused feature.
static ssize_t ims_pcu_ofn_reg_data_show(struct device *dev, struct device_attribute *dattr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); int error; u8 data; mutex_lock(&pcu->cmd_mutex); error = ims_pcu_read_ofn_config(pcu, pcu->ofn_reg_addr, &data); mutex_unlock(&pcu->cmd_mutex); if (error) return error; return scnprintf(buf, PAGE_SIZE, "%x\n", data);
0
[ "CWE-703" ]
linux
a0ad220c96692eda76b2e3fd7279f3dcd1d8a8ff
191,243,898,651,482,460,000,000,000,000,000,000,000
18
Input: ims-pcu - sanity check against missing interfaces A malicious device missing interface can make the driver oops. Add sanity checking. Signed-off-by: Oliver Neukum <[email protected]> CC: [email protected] Signed-off-by: Dmitry Torokhov <[email protected]>
pci_msi_enabled(struct pci_vdev *dev) { return dev->msi.enabled; }
0
[ "CWE-617", "CWE-703" ]
acrn-hypervisor
6199e653418eda58cd698d8769820904453e2535
227,821,442,412,297,160,000,000,000,000,000,000,000
4
dm: validate the input in 'pci_emul_mem_handler()' checking the inputs explicitly instead of using Assert. Tracked-On: #4003 Signed-off-by: Yonghua Huang <[email protected]> Reviewed-by: Shuo Liu <[email protected]> Acked-by: Yu Wang <[email protected]>
parse_options(const char *data, struct parsed_mount_info *parsed_info) { char *value = NULL; char *equals = NULL; char *next_keyword = NULL; char *out = parsed_info->options; unsigned long *filesys_flags = &parsed_info->flags; int out_len = 0; int word_len; int rc = 0; int got_bkupuid = 0; int got_bkupgid = 0; int got_uid = 0; int got_cruid = 0; int got_gid = 0; int got_snapshot = 0; uid_t uid, cruid = 0, bkupuid = 0; gid_t gid, bkupgid = 0; char *ep; struct passwd *pw; struct group *gr; /* * max 64-bit uint in decimal is 18446744073709551615 which is 20 chars * wide +1 for NULL, and +1 for good measure */ char txtbuf[22]; unsigned long long snapshot; struct tm tm; /* make sure we're starting from beginning */ out[0] = '\0'; /* BB fixme check for separator override BB */ uid = getuid(); if (uid != 0) got_uid = 1; gid = getgid(); if (gid != 0) got_gid = 1; if (!data) return EX_USAGE; /* * format is keyword,keyword2=value2,keyword3=value3... * data = next keyword * value = next value ie stuff after equal sign */ while (data && *data) { next_keyword = strchr(data, ','); /* BB handle sep= */ /* temporarily null terminate end of keyword=value pair */ if (next_keyword) *next_keyword++ = 0; /* temporarily null terminate keyword if there's a value */ value = NULL; if ((equals = strchr(data, '=')) != NULL) { *equals = '\0'; value = equals + 1; } switch(parse_opt_token(data)) { case OPT_USERS: if (!value || !*value) { *filesys_flags |= MS_USERS; goto nocopy; } break; case OPT_USER: if (!value || !*value) { if (data[4] == '\0') { *filesys_flags |= MS_USER; goto nocopy; } else { fprintf(stderr, "username specified with no parameter\n"); return EX_USAGE; } } else { strlcpy(parsed_info->username, value, sizeof(parsed_info->username)); parsed_info->got_user = 1; goto nocopy; } case OPT_PASS: if (parsed_info->got_password) { fprintf(stderr, "password specified twice, ignoring second\n"); goto nocopy; } if (!value || !*value) { parsed_info->got_password = 1; goto nocopy; } rc = set_password(parsed_info, value); if (rc) return rc; goto nocopy; case OPT_SEC: if (value) { if (!strncmp(value, "none", 4) || !strncmp(value, "krb5", 4)) parsed_info->got_password = 1; } break; case OPT_IP: if (!value || !*value) { fprintf(stderr, "target ip address argument missing\n"); } else if (strnlen(value, MAX_ADDRESS_LEN) <= MAX_ADDRESS_LEN) { strcpy(parsed_info->addrlist, value); if (parsed_info->verboseflag) fprintf(stderr, "ip address %s override specified\n", value); goto nocopy; } else { fprintf(stderr, "ip address too long\n"); return EX_USAGE; } break; /* unc || target || path */ case OPT_UNC: if (!value || !*value) { fprintf(stderr, "invalid path to network resource\n"); return EX_USAGE; } rc = parse_unc(value, parsed_info, thisprogram); if (rc) return rc; break; /* dom || workgroup */ case OPT_DOM: if (!value) { /* * An empty domain has been passed */ /* not necessary but better safe than.. */ parsed_info->domain[0] = '\0'; parsed_info->got_domain = 1; goto nocopy; } if (strnlen(value, sizeof(parsed_info->domain)) >= sizeof(parsed_info->domain)) { fprintf(stderr, "domain name too long\n"); return EX_USAGE; } strlcpy(parsed_info->domain, value, sizeof(parsed_info->domain)); goto nocopy; case OPT_CRED: if (!value || !*value) { fprintf(stderr, "invalid credential file name specified\n"); return EX_USAGE; } rc = open_cred_file(value, parsed_info); if (rc) { fprintf(stderr, "error %d (%s) opening credential file %s\n", rc, strerror(rc), value); return rc; } goto nocopy; case OPT_UID: if (!value || !*value) goto nocopy; got_uid = 1; pw = getpwnam(value); if (pw) { uid = pw->pw_uid; goto nocopy; } errno = 0; uid = strtoul(value, &ep, 10); if (errno == 0 && *ep == '\0') goto nocopy; fprintf(stderr, "bad option uid=\"%s\"\n", value); return EX_USAGE; case OPT_CRUID: if (!value || !*value) goto nocopy; got_cruid = 1; pw = getpwnam(value); if (pw) { cruid = pw->pw_uid; goto nocopy; } errno = 0; cruid = strtoul(value, &ep, 10); if (errno == 0 && *ep == '\0') goto nocopy; fprintf(stderr, "bad option: cruid=\"%s\"\n", value); return EX_USAGE; case OPT_GID: if (!value || !*value) goto nocopy; got_gid = 1; gr = getgrnam(value); if (gr) { gid = gr->gr_gid; goto nocopy; } errno = 0; gid = strtoul(value, &ep, 10); if (errno == 0 && *ep == '\0') goto nocopy; fprintf(stderr, "bad option: gid=\"%s\"\n", value); return EX_USAGE; /* fmask falls through to file_mode */ case OPT_FMASK: fprintf(stderr, "WARNING: CIFS mount option 'fmask' is\ deprecated. Use 'file_mode' instead.\n"); data = "file_mode"; /* BB fix this */ /* Fallthrough */ case OPT_FILE_MODE: if (!value || !*value) { fprintf(stderr, "Option '%s' requires a numerical argument\n", data); return EX_USAGE; } if (value[0] != '0') fprintf(stderr, "WARNING: '%s' not expressed in octal.\n", data); break; /* dmask falls through to dir_mode */ case OPT_DMASK: fprintf(stderr, "WARNING: CIFS mount option 'dmask' is\ deprecated. Use 'dir_mode' instead.\n"); data = "dir_mode"; /* Fallthrough */ case OPT_DIR_MODE: if (!value || !*value) { fprintf(stderr, "Option '%s' requires a numerical argument\n", data); return EX_USAGE; } if (value[0] != '0') fprintf(stderr, "WARNING: '%s' not expressed in octal.\n", data); break; case OPT_NO_SUID: *filesys_flags |= MS_NOSUID; goto nocopy; case OPT_SUID: *filesys_flags &= ~MS_NOSUID; goto nocopy; case OPT_NO_DEV: *filesys_flags |= MS_NODEV; goto nocopy; case OPT_NO_LOCK: *filesys_flags &= ~MS_MANDLOCK; break; case OPT_MAND: *filesys_flags |= MS_MANDLOCK; goto nocopy; case OPT_NOMAND: *filesys_flags &= ~MS_MANDLOCK; goto nocopy; case OPT_DEV: *filesys_flags &= ~MS_NODEV; goto nocopy; case OPT_NO_EXEC: *filesys_flags |= MS_NOEXEC; goto nocopy; case OPT_EXEC: *filesys_flags &= ~MS_NOEXEC; goto nocopy; case OPT_GUEST: parsed_info->got_user = 1; parsed_info->got_password = 1; goto nocopy; case OPT_RO: *filesys_flags |= MS_RDONLY; goto nocopy; case OPT_RW: *filesys_flags &= ~MS_RDONLY; goto nocopy; case OPT_REMOUNT: *filesys_flags |= MS_REMOUNT; goto nocopy; case OPT_IGNORE: goto nocopy; case OPT_BKUPUID: if (!value || !*value) goto nocopy; got_bkupuid = 1; errno = 0; bkupuid = strtoul(value, &ep, 10); if (errno == 0 && *ep == '\0') goto nocopy; pw = getpwnam(value); if (pw == NULL) { fprintf(stderr, "bad user name \"%s\"\n", value); return EX_USAGE; } bkupuid = pw->pw_uid; goto nocopy; case OPT_BKUPGID: if (!value || !*value) goto nocopy; got_bkupgid = 1; errno = 0; bkupgid = strtoul(value, &ep, 10); if (errno == 0 && *ep == '\0') goto nocopy; gr = getgrnam(value); if (gr == NULL) { fprintf(stderr, "bad group name \"%s\"\n", value); return EX_USAGE; } bkupgid = gr->gr_gid; goto nocopy; case OPT_NOFAIL: parsed_info->nofail = 1; goto nocopy; case OPT_SNAPSHOT: if (!value || !*value) goto nocopy; if (strncmp(value, "@GMT-", 5)) break; if ((strlen(value) != GMT_NAME_LEN) || (strptime(value, GMT_FORMAT, &tm) == NULL)) { fprintf(stderr, "bad snapshot token\n"); return EX_USAGE; } snapshot = timegm(&tm) * 10000000 + NTFS_TIME_OFFSET; got_snapshot = 1; goto nocopy; } /* check size before copying option to buffer */ word_len = strlen(data); if (value) word_len += 1 + strlen(value); /* need 2 extra bytes for comma and null byte */ if (out_len + word_len + 2 > MAX_OPTIONS_LEN) { fprintf(stderr, "Options string too long\n"); return EX_USAGE; } /* put back equals sign, if any */ if (equals) *equals = '='; /* go ahead and copy */ if (out_len) strlcat(out, ",", MAX_OPTIONS_LEN); strlcat(out, data, MAX_OPTIONS_LEN); out_len = strlen(out); nocopy: data = next_keyword; } /* special-case the uid and gid */ if (got_uid) { word_len = snprintf(txtbuf, sizeof(txtbuf), "%u", uid); /* comma + "uid=" + terminating NULL == 6 */ if (out_len + word_len + 6 > MAX_OPTIONS_LEN) { fprintf(stderr, "Options string too long\n"); return EX_USAGE; } if (out_len) { strlcat(out, ",", MAX_OPTIONS_LEN); out_len++; } snprintf(out + out_len, word_len + 5, "uid=%s", txtbuf); out_len = strlen(out); } if (got_cruid) { word_len = snprintf(txtbuf, sizeof(txtbuf), "%u", cruid); /* comma + "cruid=" + terminating NULL == 8 */ if (out_len + word_len + 8 > MAX_OPTIONS_LEN) { fprintf(stderr, "Options string too long\n"); return EX_USAGE; } if (out_len) { strlcat(out, ",", MAX_OPTIONS_LEN); out_len++; } snprintf(out + out_len, word_len + 7, "cruid=%s", txtbuf); out_len = strlen(out); } if (got_gid) { word_len = snprintf(txtbuf, sizeof(txtbuf), "%u", gid); /* comma + "gid=" + terminating NULL == 6 */ if (out_len + word_len + 6 > MAX_OPTIONS_LEN) { fprintf(stderr, "Options string too long\n"); return EX_USAGE; } if (out_len) { strlcat(out, ",", MAX_OPTIONS_LEN); out_len++; } snprintf(out + out_len, word_len + 5, "gid=%s", txtbuf); } if (got_bkupuid) { word_len = snprintf(txtbuf, sizeof(txtbuf), "%u", bkupuid); /* comma + "backupuid=" + terminating NULL == 12 */ if (out_len + word_len + 12 > MAX_OPTIONS_LEN) { fprintf(stderr, "Options string too long\n"); return EX_USAGE; } if (out_len) { strlcat(out, ",", MAX_OPTIONS_LEN); out_len++; } snprintf(out + out_len, word_len + 11, "backupuid=%s", txtbuf); out_len = strlen(out); } if (got_bkupgid) { word_len = snprintf(txtbuf, sizeof(txtbuf), "%u", bkupgid); /* comma + "backupgid=" + terminating NULL == 12 */ if (out_len + word_len + 12 > MAX_OPTIONS_LEN) { fprintf(stderr, "Options string too long\n"); return EX_USAGE; } if (out_len) { strlcat(out, ",", MAX_OPTIONS_LEN); out_len++; } snprintf(out + out_len, word_len + 11, "backupgid=%s", txtbuf); } if (got_snapshot) { word_len = snprintf(txtbuf, sizeof(txtbuf), "%llu", snapshot); /* comma + "snapshot=" + terminating NULL == 11 */ if (out_len + word_len + 11 > MAX_OPTIONS_LEN) { fprintf(stderr, "Options string too long\n"); return EX_USAGE; } if (out_len) { strlcat(out, ",", MAX_OPTIONS_LEN); out_len++; } snprintf(out + out_len, word_len + 11, "snapshot=%s", txtbuf); } return 0; }
0
[ "CWE-78" ]
cifs-utils
48a654e2e763fce24c22e1b9c695b42804bbdd4a
132,829,271,673,690,600,000,000,000,000,000,000,000
493
CVE-2020-14342: mount.cifs: fix shell command injection A bug has been reported recently for the mount.cifs utility which is part of the cifs-utils package. The tool has a shell injection issue where one can embed shell commands via the username mount option. Those commands will be run via popen() in the context of the user calling mount. The bug requires cifs-utils to be built with --with-systemd (enabled by default if supported). A quick test to check if the mount.cifs binary is vulnerable is to look for popen() calls like so: $ nm mount.cifs | grep popen U popen@@GLIBC_2.2.5 If the user is allowed to run mount.cifs via sudo, he can obtain a root shell. sudo mount.cifs -o username='`sh`' //1 /mnt If mount.cifs has the setuid bit, the command will still be run as the calling user (no privilege escalation). The bug was introduced in June 2012 with commit 4e264031d0da7d3f2 ("mount.cifs: Use systemd's mechanism for getting password, if present."). Affected versions: cifs-utils-5.6 cifs-utils-5.7 cifs-utils-5.8 cifs-utils-5.9 cifs-utils-6.0 cifs-utils-6.1 cifs-utils-6.2 cifs-utils-6.3 cifs-utils-6.4 cifs-utils-6.5 cifs-utils-6.6 cifs-utils-6.7 cifs-utils-6.8 cifs-utils-6.9 cifs-utils-6.10 Bug: https://bugzilla.samba.org/show_bug.cgi?id=14442 Reported-by: Vadim Lebedev <[email protected]> Signed-off-by: Paulo Alcantara (SUSE) <[email protected]> Signed-off-by: Aurelien Aptel <[email protected]>
static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) { struct vcpu_svm *svm = to_svm(vcpu); u32 dummy; u32 eax = 1; svm->spec_ctrl = 0; svm->virt_spec_ctrl = 0; if (!init_event) { vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE; if (kvm_vcpu_is_reset_bsp(vcpu)) vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP; } init_vmcb(vcpu); kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, false); kvm_rdx_write(vcpu, eax); if (kvm_vcpu_apicv_active(vcpu) && !init_event) avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE); }
0
[ "CWE-862" ]
kvm
0f923e07124df069ba68d8bb12324398f4b6b709
177,174,519,186,877,000,000,000,000,000,000,000,000
23
KVM: nSVM: avoid picking up unsupported bits from L2 in int_ctl (CVE-2021-3653) * Invert the mask of bits that we pick from L2 in nested_vmcb02_prepare_control * Invert and explicitly use VIRQ related bits bitmask in svm_clear_vintr This fixes a security issue that allowed a malicious L1 to run L2 with AVIC enabled, which allowed the L2 to exploit the uninitialized and enabled AVIC to read/write the host physical memory at some offsets. Fixes: 3d6368ef580a ("KVM: SVM: Add VMRUN handler") Signed-off-by: Maxim Levitsky <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
inline void LstmCell( const LstmCellParams& params, const RuntimeShape& unextended_input_shape, const float* input_data, const RuntimeShape& unextended_prev_activ_shape, const float* prev_activ_data, const RuntimeShape& weights_shape, const float* weights_data, const RuntimeShape& unextended_bias_shape, const float* bias_data, const RuntimeShape& unextended_prev_state_shape, const float* prev_state_data, const RuntimeShape& unextended_output_state_shape, float* output_state_data, const RuntimeShape& unextended_output_activ_shape, float* output_activ_data, const RuntimeShape& unextended_concat_temp_shape, float* concat_temp_data, const RuntimeShape& unextended_activ_temp_shape, float* activ_temp_data, CpuBackendContext* cpu_backend_context) { ruy::profiler::ScopeLabel label("LstmCell"); TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4); TFLITE_DCHECK_LE(unextended_prev_activ_shape.DimensionsCount(), 4); TFLITE_DCHECK_LE(unextended_bias_shape.DimensionsCount(), 4); TFLITE_DCHECK_LE(unextended_prev_state_shape.DimensionsCount(), 4); TFLITE_DCHECK_LE(unextended_output_state_shape.DimensionsCount(), 4); TFLITE_DCHECK_LE(unextended_output_activ_shape.DimensionsCount(), 4); TFLITE_DCHECK_LE(unextended_concat_temp_shape.DimensionsCount(), 4); TFLITE_DCHECK_LE(unextended_activ_temp_shape.DimensionsCount(), 4); const RuntimeShape input_shape = RuntimeShape::ExtendedShape(4, unextended_input_shape); const RuntimeShape prev_activ_shape = RuntimeShape::ExtendedShape(4, unextended_prev_activ_shape); const RuntimeShape bias_shape = RuntimeShape::ExtendedShape(4, unextended_bias_shape); const RuntimeShape prev_state_shape = RuntimeShape::ExtendedShape(4, unextended_prev_state_shape); const RuntimeShape output_state_shape = RuntimeShape::ExtendedShape(4, unextended_output_state_shape); const RuntimeShape output_activ_shape = RuntimeShape::ExtendedShape(4, unextended_output_activ_shape); const RuntimeShape concat_temp_shape = RuntimeShape::ExtendedShape(4, unextended_concat_temp_shape); const RuntimeShape activ_temp_shape = RuntimeShape::ExtendedShape(4, unextended_activ_temp_shape); TFLITE_DCHECK_GE(weights_shape.DimensionsCount(), 2); const int weights_dim_count = weights_shape.DimensionsCount(); MatchingDim( // batches input_shape, 0, prev_activ_shape, 0, prev_state_shape, 0, output_state_shape, 0, output_activ_shape, 0); MatchingDim( // height input_shape, 1, prev_activ_shape, 1, prev_state_shape, 1, output_state_shape, 1, output_activ_shape, 1); MatchingDim( // width input_shape, 2, prev_activ_shape, 2, prev_state_shape, 2, output_state_shape, 2, output_activ_shape, 2); const int input_depth = input_shape.Dims(3); const int prev_activ_depth = prev_activ_shape.Dims(3); const int total_input_depth = prev_activ_depth + input_depth; TFLITE_DCHECK_EQ(weights_shape.Dims(weights_dim_count - 1), total_input_depth); TFLITE_DCHECK_EQ(FlatSizeSkipDim(bias_shape, 3), 1); const int intern_activ_depth = MatchingDim(weights_shape, weights_dim_count - 2, bias_shape, 3); TFLITE_DCHECK_EQ(weights_shape.FlatSize(), intern_activ_depth * total_input_depth); TFLITE_DCHECK_EQ(intern_activ_depth % 4, 0); const int output_depth = MatchingDim(prev_state_shape, 3, prev_activ_shape, 3, output_state_shape, 3, output_activ_shape, 3); TFLITE_DCHECK_EQ(output_depth, intern_activ_depth / 4); // Concatenate prev_activ and input data together std::vector<float const*> concat_input_arrays_data; std::vector<RuntimeShape const*> concat_input_arrays_shapes; concat_input_arrays_data.push_back(input_data); concat_input_arrays_data.push_back(prev_activ_data); concat_input_arrays_shapes.push_back(&input_shape); concat_input_arrays_shapes.push_back(&prev_activ_shape); tflite::ConcatenationParams concat_params; concat_params.axis = 3; concat_params.inputs_count = concat_input_arrays_data.size(); Concatenation(concat_params, &(concat_input_arrays_shapes[0]), &(concat_input_arrays_data[0]), concat_temp_shape, concat_temp_data); // Fully connected tflite::FullyConnectedParams fc_params; fc_params.float_activation_min = std::numeric_limits<float>::lowest(); fc_params.float_activation_max = std::numeric_limits<float>::max(); fc_params.lhs_cacheable = false; fc_params.rhs_cacheable = false; FullyConnected(fc_params, concat_temp_shape, concat_temp_data, weights_shape, weights_data, bias_shape, bias_data, activ_temp_shape, activ_temp_data, cpu_backend_context); // Map raw arrays to Eigen arrays so we can use Eigen's optimized array // operations. ArrayMap<float> activ_temp_map = MapAsArrayWithLastDimAsRows(activ_temp_data, activ_temp_shape); auto input_gate_sm = activ_temp_map.block(0 * output_depth, 0, output_depth, activ_temp_map.cols()); auto new_input_sm = activ_temp_map.block(1 * output_depth, 0, output_depth, activ_temp_map.cols()); auto forget_gate_sm = activ_temp_map.block(2 * output_depth, 0, output_depth, activ_temp_map.cols()); auto output_gate_sm = activ_temp_map.block(3 * output_depth, 0, output_depth, activ_temp_map.cols()); ArrayMap<const float> prev_state_map = MapAsArrayWithLastDimAsRows(prev_state_data, prev_state_shape); ArrayMap<float> output_state_map = MapAsArrayWithLastDimAsRows(output_state_data, output_state_shape); ArrayMap<float> output_activ_map = MapAsArrayWithLastDimAsRows(output_activ_data, output_activ_shape); // Combined memory state and final output calculation ruy::profiler::ScopeLabel label2("MemoryStateAndFinalOutput"); output_state_map = input_gate_sm.unaryExpr(Eigen::internal::scalar_logistic_op<float>()) * new_input_sm.tanh() + forget_gate_sm.unaryExpr(Eigen::internal::scalar_logistic_op<float>()) * prev_state_map; output_activ_map = output_gate_sm.unaryExpr(Eigen::internal::scalar_logistic_op<float>()) * output_state_map.tanh(); }
0
[ "CWE-476", "CWE-369" ]
tensorflow
15691e456c7dc9bd6be203b09765b063bf4a380c
213,237,408,716,574,200,000,000,000,000,000,000,000
119
Prevent dereferencing of null pointers in TFLite's `add.cc`. PiperOrigin-RevId: 387244946 Change-Id: I56094233327fbd8439b92e1dbb1262176e00eeb9
xmlBufferSetAllocationScheme(xmlBufferPtr buf, xmlBufferAllocationScheme scheme) { if (buf == NULL) { #ifdef DEBUG_BUFFER xmlGenericError(xmlGenericErrorContext, "xmlBufferSetAllocationScheme: buf == NULL\n"); #endif return; } if ((buf->alloc == XML_BUFFER_ALLOC_IMMUTABLE) || (buf->alloc == XML_BUFFER_ALLOC_IO)) return; if ((scheme == XML_BUFFER_ALLOC_DOUBLEIT) || (scheme == XML_BUFFER_ALLOC_EXACT) || (scheme == XML_BUFFER_ALLOC_HYBRID) || (scheme == XML_BUFFER_ALLOC_IMMUTABLE)) buf->alloc = scheme; }
0
[ "CWE-20" ]
libxml2
bdd66182ef53fe1f7209ab6535fda56366bd7ac9
306,245,546,924,384,900,000,000,000,000,000,000,000
17
Avoid building recursive entities For https://bugzilla.gnome.org/show_bug.cgi?id=762100 When we detect a recusive entity we should really not build the associated data, moreover if someone bypass libxml2 fatal errors and still tries to serialize a broken entity make sure we don't risk to get ito a recursion * parser.c: xmlParserEntityCheck() don't build if entity loop were found and remove the associated text content * tree.c: xmlStringGetNodeList() avoid a potential recursion
SendRectSimple(rfbClientPtr cl, int x, int y, int w, int h) { int maxBeforeSize, maxAfterSize; int maxRectSize, maxRectWidth; int subrectMaxWidth, subrectMaxHeight; int dx, dy; int rw, rh; maxRectSize = tightConf[compressLevel].maxRectSize; maxRectWidth = tightConf[compressLevel].maxRectWidth; maxBeforeSize = maxRectSize * (cl->format.bitsPerPixel / 8); maxAfterSize = maxBeforeSize + (maxBeforeSize + 99) / 100 + 12; if (tightBeforeBufSize < maxBeforeSize) { tightBeforeBufSize = maxBeforeSize; if (tightBeforeBuf == NULL) tightBeforeBuf = (char *)malloc(tightBeforeBufSize); else tightBeforeBuf = (char *)realloc(tightBeforeBuf, tightBeforeBufSize); } if (tightAfterBufSize < maxAfterSize) { tightAfterBufSize = maxAfterSize; if (tightAfterBuf == NULL) tightAfterBuf = (char *)malloc(tightAfterBufSize); else tightAfterBuf = (char *)realloc(tightAfterBuf, tightAfterBufSize); } if (w > maxRectWidth || w * h > maxRectSize) { subrectMaxWidth = (w > maxRectWidth) ? maxRectWidth : w; subrectMaxHeight = maxRectSize / subrectMaxWidth; for (dy = 0; dy < h; dy += subrectMaxHeight) { for (dx = 0; dx < w; dx += maxRectWidth) { rw = (dx + maxRectWidth < w) ? maxRectWidth : w - dx; rh = (dy + subrectMaxHeight < h) ? subrectMaxHeight : h - dy; if (!SendSubrect(cl, x+dx, y+dy, rw, rh)) return FALSE; } } } else { if (!SendSubrect(cl, x, y, w, h)) return FALSE; } return TRUE; }
0
[]
libvncserver
804335f9d296440bb708ca844f5d89b58b50b0c6
282,956,838,767,217,900,000,000,000,000,000,000,000
51
Thread safety for zrle, zlib, tight. Proposed tight security type fix for debian bug 517422.
wStream* rdp_send_stream_pdu_init(rdpRdp* rdp) { wStream* s = rdp_send_stream_init(rdp); if (!s) return NULL; if (!Stream_SafeSeek(s, RDP_SHARE_CONTROL_HEADER_LENGTH)) goto fail; return s; fail: Stream_Release(s); return NULL; }
0
[ "CWE-125" ]
FreeRDP
9301bfe730c66180263248b74353daa99f5a969b
72,622,654,716,197,970,000,000,000,000,000,000,000
15
Fixed #6007: Boundary checks in rdp_read_flow_control_pdu
xmlTextReaderGetAttribute(xmlTextReaderPtr reader, const xmlChar *name) { xmlChar *prefix = NULL; xmlChar *localname; xmlNsPtr ns; xmlChar *ret = NULL; if ((reader == NULL) || (name == NULL)) return(NULL); if (reader->node == NULL) return(NULL); if (reader->curnode != NULL) return(NULL); /* TODO: handle the xmlDecl */ if (reader->node->type != XML_ELEMENT_NODE) return(NULL); localname = xmlSplitQName2(name, &prefix); if (localname == NULL) { /* * Namespace default decl */ if (xmlStrEqual(name, BAD_CAST "xmlns")) { ns = reader->node->nsDef; while (ns != NULL) { if (ns->prefix == NULL) { return(xmlStrdup(ns->href)); } ns = ns->next; } return NULL; } return(xmlGetNoNsProp(reader->node, name)); } /* * Namespace default decl */ if (xmlStrEqual(prefix, BAD_CAST "xmlns")) { ns = reader->node->nsDef; while (ns != NULL) { if ((ns->prefix != NULL) && (xmlStrEqual(ns->prefix, localname))) { ret = xmlStrdup(ns->href); break; } ns = ns->next; } } else { ns = xmlSearchNs(reader->node->doc, reader->node, prefix); if (ns != NULL) ret = xmlGetNsProp(reader->node, localname, ns->href); } xmlFree(localname); if (prefix != NULL) xmlFree(prefix); return(ret); }
0
[ "CWE-399" ]
libxml2
213f1fe0d76d30eaed6e5853057defc43e6df2c9
133,641,587,924,605,370,000,000,000,000,000,000,000
58
CVE-2015-1819 Enforce the reader to run in constant memory One of the operation on the reader could resolve entities leading to the classic expansion issue. Make sure the buffer used for xmlreader operation is bounded. Introduce a new allocation type for the buffers for this effect.
static void free_export_entry(RBinWasmExportEntry *entry) { if (entry) { free (entry->field_str); free (entry); } }
0
[ "CWE-787" ]
radare2
b4ca66f5d4363d68a6379e5706353b3bde5104a4
181,850,403,941,295,030,000,000,000,000,000,000,000
6
Fix #20336 - wasm bin parser ##crash
static int cluster_pages_for_defrag(struct inode *inode, struct page **pages, unsigned long start_index, unsigned long num_pages) { unsigned long file_end; u64 isize = i_size_read(inode); u64 page_start; u64 page_end; u64 page_cnt; int ret; int i; int i_done; struct btrfs_ordered_extent *ordered; struct extent_state *cached_state = NULL; struct extent_io_tree *tree; struct extent_changeset *data_reserved = NULL; gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); file_end = (isize - 1) >> PAGE_SHIFT; if (!isize || start_index > file_end) return 0; page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1); ret = btrfs_delalloc_reserve_space(inode, &data_reserved, start_index << PAGE_SHIFT, page_cnt << PAGE_SHIFT); if (ret) return ret; i_done = 0; tree = &BTRFS_I(inode)->io_tree; /* step one, lock all the pages */ for (i = 0; i < page_cnt; i++) { struct page *page; again: page = find_or_create_page(inode->i_mapping, start_index + i, mask); if (!page) break; page_start = page_offset(page); page_end = page_start + PAGE_SIZE - 1; while (1) { lock_extent_bits(tree, page_start, page_end, &cached_state); ordered = btrfs_lookup_ordered_extent(inode, page_start); unlock_extent_cached(tree, page_start, page_end, &cached_state); if (!ordered) break; unlock_page(page); btrfs_start_ordered_extent(inode, ordered, 1); btrfs_put_ordered_extent(ordered); lock_page(page); /* * we unlocked the page above, so we need check if * it was released or not. */ if (page->mapping != inode->i_mapping) { unlock_page(page); put_page(page); goto again; } } if (!PageUptodate(page)) { btrfs_readpage(NULL, page); lock_page(page); if (!PageUptodate(page)) { unlock_page(page); put_page(page); ret = -EIO; break; } } if (page->mapping != inode->i_mapping) { unlock_page(page); put_page(page); goto again; } pages[i] = page; i_done++; } if (!i_done || ret) goto out; if (!(inode->i_sb->s_flags & SB_ACTIVE)) goto out; /* * so now we have a nice long stream of locked * and up to date pages, lets wait on them */ for (i = 0; i < i_done; i++) wait_on_page_writeback(pages[i]); page_start = page_offset(pages[0]); page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE; lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end - 1, &cached_state); clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0, &cached_state); if (i_done != page_cnt) { spin_lock(&BTRFS_I(inode)->lock); btrfs_mod_outstanding_extents(BTRFS_I(inode), 1); spin_unlock(&BTRFS_I(inode)->lock); btrfs_delalloc_release_space(inode, data_reserved, start_index << PAGE_SHIFT, (page_cnt - i_done) << PAGE_SHIFT, true); } set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1, &cached_state); unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end - 1, &cached_state); for (i = 0; i < i_done; i++) { clear_page_dirty_for_io(pages[i]); ClearPageChecked(pages[i]); set_page_extent_mapped(pages[i]); set_page_dirty(pages[i]); unlock_page(pages[i]); put_page(pages[i]); } btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT, false); extent_changeset_free(data_reserved); return i_done; out: for (i = 0; i < i_done; i++) { unlock_page(pages[i]); put_page(pages[i]); } btrfs_delalloc_release_space(inode, data_reserved, start_index << PAGE_SHIFT, page_cnt << PAGE_SHIFT, true); btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT, true); extent_changeset_free(data_reserved); return ret; }
0
[ "CWE-476", "CWE-284" ]
linux
09ba3bc9dd150457c506e4661380a6183af651c1
69,914,384,736,945,540,000,000,000,000,000,000,000
154
btrfs: merge btrfs_find_device and find_device Both btrfs_find_device() and find_device() does the same thing except that the latter does not take the seed device onto account in the device scanning context. We can merge them. Signed-off-by: Anand Jain <[email protected]> Reviewed-by: David Sterba <[email protected]> Signed-off-by: David Sterba <[email protected]>
int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); struct sk_buff *skb; unsigned int ulen, copied; int peeked, off = 0; int err; int is_udplite = IS_UDPLITE(sk); bool slow; if (flags & MSG_ERRQUEUE) return ip_recv_error(sk, msg, len, addr_len); try_again: skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), &peeked, &off, &err); if (!skb) goto out; ulen = skb->len - sizeof(struct udphdr); copied = len; if (copied > ulen) copied = ulen; else if (copied < ulen) msg->msg_flags |= MSG_TRUNC; /* * If checksum is needed at all, try to do it while copying the * data. If the data is truncated, or if we only want a partial * coverage checksum (UDP-Lite), do it before the copy. */ if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { if (udp_lib_checksum_complete(skb)) goto csum_copy_err; } if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_msg(skb, sizeof(struct udphdr), msg, copied); else { err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr), msg); if (err == -EINVAL) goto csum_copy_err; } if (unlikely(err)) { trace_kfree_skb(skb, udp_recvmsg); if (!peeked) { atomic_inc(&sk->sk_drops); UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } goto out_free; } if (!peeked) UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_port = udp_hdr(skb)->source; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); *addr_len = sizeof(*sin); } if (inet->cmsg_flags) ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr)); err = copied; if (flags & MSG_TRUNC) err = ulen; out_free: skb_free_datagram_locked(sk, skb); out: return err; csum_copy_err: slow = lock_sock_fast(sk); if (!skb_kill_datagram(sk, skb, flags)) { UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } unlock_sock_fast(sk, slow); if (noblock) return -EAGAIN; /* starting over for a new packet */ msg->msg_flags &= ~MSG_TRUNC; goto try_again; }
1
[ "CWE-399" ]
linux
beb39db59d14990e401e235faf66a6b9b31240b0
270,289,096,240,526,400,000,000,000,000,000,000,000
101
udp: fix behavior of wrong checksums We have two problems in UDP stack related to bogus checksums : 1) We return -EAGAIN to application even if receive queue is not empty. This breaks applications using edge trigger epoll() 2) Under UDP flood, we can loop forever without yielding to other processes, potentially hanging the host, especially on non SMP. This patch is an attempt to make things better. We might in the future add extra support for rt applications wanting to better control time spent doing a recv() in a hostile environment. For example we could validate checksums before queuing packets in socket receive queue. Signed-off-by: Eric Dumazet <[email protected]> Cc: Willem de Bruijn <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void appendToBuffer(const StaticString &dataToAdd) { TRACE_POINT(); lock_guard<boost::mutex> l(dataSyncher); data.append(dataToAdd.data(), dataToAdd.size()); }
0
[]
passenger
8c6693e0818772c345c979840d28312c2edd4ba4
339,641,331,008,071,960,000,000,000,000,000,000,000
5
Security check socket filenames reported by spawned application processes.
void unlock(const unsigned int n) { ReleaseMutex(mutex[n]); }
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
111,254,044,414,880,980,000,000,000,000,000,000,000
1
Fix other issues in 'CImg<T>::load_bmp()'.
static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp, struct elfhdr *interp_ehdr, struct arch_elf_state *state) { /* Dummy implementation, always proceed */ return 0; }
0
[]
linux
eab09532d40090698b05a07c1c87f39fdbc5fab5
58,929,140,326,703,990,000,000,000,000,000,000,000
7
binfmt_elf: use ELF_ET_DYN_BASE only for PIE The ELF_ET_DYN_BASE position was originally intended to keep loaders away from ET_EXEC binaries. (For example, running "/lib/ld-linux.so.2 /bin/cat" might cause the subsequent load of /bin/cat into where the loader had been loaded.) With the advent of PIE (ET_DYN binaries with an INTERP Program Header), ELF_ET_DYN_BASE continued to be used since the kernel was only looking at ET_DYN. However, since ELF_ET_DYN_BASE is traditionally set at the top 1/3rd of the TASK_SIZE, a substantial portion of the address space is unused. For 32-bit tasks when RLIMIT_STACK is set to RLIM_INFINITY, programs are loaded above the mmap region. This means they can be made to collide (CVE-2017-1000370) or nearly collide (CVE-2017-1000371) with pathological stack regions. Lowering ELF_ET_DYN_BASE solves both by moving programs below the mmap region in all cases, and will now additionally avoid programs falling back to the mmap region by enforcing MAP_FIXED for program loads (i.e. if it would have collided with the stack, now it will fail to load instead of falling back to the mmap region). To allow for a lower ELF_ET_DYN_BASE, loaders (ET_DYN without INTERP) are loaded into the mmap region, leaving space available for either an ET_EXEC binary with a fixed location or PIE being loaded into mmap by the loader. Only PIE programs are loaded offset from ELF_ET_DYN_BASE, which means architectures can now safely lower their values without risk of loaders colliding with their subsequently loaded programs. For 64-bit, ELF_ET_DYN_BASE is best set to 4GB to allow runtimes to use the entire 32-bit address space for 32-bit pointers. Thanks to PaX Team, Daniel Micay, and Rik van Riel for inspiration and suggestions on how to implement this solution. Fixes: d1fd836dcf00 ("mm: split ET_DYN ASLR from mmap ASLR") Link: http://lkml.kernel.org/r/20170621173201.GA114489@beast Signed-off-by: Kees Cook <[email protected]> Acked-by: Rik van Riel <[email protected]> Cc: Daniel Micay <[email protected]> Cc: Qualys Security Advisory <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Alexander Viro <[email protected]> Cc: Dmitry Safonov <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Grzegorz Andrejczuk <[email protected]> Cc: Masahiro Yamada <[email protected]> Cc: Benjamin Herrenschmidt <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Heiko Carstens <[email protected]> Cc: James Hogan <[email protected]> Cc: Martin Schwidefsky <[email protected]> Cc: Michael Ellerman <[email protected]> Cc: Paul Mackerras <[email protected]> Cc: Pratyush Anand <[email protected]> Cc: Russell King <[email protected]> Cc: Will Deacon <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static int fts3DoIncrmerge( Fts3Table *p, /* FTS3 table handle */ const char *zParam /* Nul-terminated string containing "A,B" */ ){ int rc; int nMin = (FTS3_MERGE_COUNT / 2); int nMerge = 0; const char *z = zParam; /* Read the first integer value */ nMerge = fts3Getint(&z); /* If the first integer value is followed by a ',', read the second ** integer value. */ if( z[0]==',' && z[1]!='\0' ){ z++; nMin = fts3Getint(&z); } if( z[0]!='\0' || nMin<2 ){ rc = SQLITE_ERROR; }else{ rc = SQLITE_OK; if( !p->bHasStat ){ assert( p->bFts4==0 ); sqlite3Fts3CreateStatTable(&rc, p); } if( rc==SQLITE_OK ){ rc = sqlite3Fts3Incrmerge(p, nMerge, nMin); } sqlite3Fts3SegmentsClose(p); } return rc; }
1
[ "CWE-787" ]
sqlite
c72f2fb7feff582444b8ffdc6c900c69847ce8a9
227,397,288,920,688,400,000,000,000,000,000,000,000
34
More improvements to shadow table corruption detection in FTS3. FossilOrigin-Name: 51525f9c3235967bc00a090e84c70a6400698c897aa4742e817121c725b8c99d
Status _checkV2RolesArray(const BSONElement& rolesElement) { if (rolesElement.eoo()) { return _badValue("User document needs 'roles' field to be provided"); } if (rolesElement.type() != Array) { return _badValue("'roles' field must be an array"); } for (BSONObjIterator iter(rolesElement.embeddedObject()); iter.more(); iter.next()) { if ((*iter).type() != Object) { return _badValue("Elements in 'roles' array must objects"); } Status status = V2UserDocumentParser::checkValidRoleObject((*iter).Obj()); if (!status.isOK()) return status; } return Status::OK(); }
0
[ "CWE-613" ]
mongo
e55d6e2292e5dbe2f97153251d8193d1cc89f5d7
167,276,558,883,052,840,000,000,000,000,000,000,000
17
SERVER-38984 Validate unique User ID on UserCache hit
void ovs_lock(void) { mutex_lock(&ovs_mutex); }
0
[ "CWE-416" ]
net
36d5fe6a000790f56039afe26834265db0a3ad4c
323,940,927,593,627,500,000,000,000,000,000,000,000
4
core, nfqueue, openvswitch: Orphan frags in skb_zerocopy and handle errors skb_zerocopy can copy elements of the frags array between skbs, but it doesn't orphan them. Also, it doesn't handle errors, so this patch takes care of that as well, and modify the callers accordingly. skb_tx_error() is also added to the callers so they will signal the failed delivery towards the creator of the skb. Signed-off-by: Zoltan Kiss <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void event_target_unavailable(IRC_SERVER_REC *server, const char *data) { char *params, *channel; g_return_if_fail(data != NULL); params = event_get_params(data, 2, NULL, &channel); if (!server_ischannel(SERVER(server), channel)) { /* nick is unavailable. */ event_nick_in_use(server, data); } g_free(params); }
0
[ "CWE-476" ]
irssi
6c6c42e3d1b49d90aacc0b67f8540471cae02a1d
59,133,945,605,519,940,000,000,000,000,000,000,000
14
Merge branch 'security' into 'master' See merge request !7
gin::ObjectTemplateBuilder GetObjectTemplateBuilder( v8::Isolate* isolate) override { return gin::Wrappable<IPCRenderer>::GetObjectTemplateBuilder(isolate) .SetMethod("send", &IPCRenderer::SendMessage) .SetMethod("sendSync", &IPCRenderer::SendSync) .SetMethod("sendTo", &IPCRenderer::SendTo) .SetMethod("sendToHost", &IPCRenderer::SendToHost) .SetMethod("invoke", &IPCRenderer::Invoke) .SetMethod("postMessage", &IPCRenderer::PostMessage); }
0
[]
electron
e9fa834757f41c0b9fe44a4dffe3d7d437f52d34
98,865,481,553,504,000,000,000,000,000,000,000,000
10
fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33344) * fix: ensure ElectronBrowser mojo service is only bound to authorized render frames Notes: no-notes * refactor: extract electron API IPC to its own mojo interface * fix: just check main frame not primary main frame Co-authored-by: Samuel Attard <[email protected]> Co-authored-by: Samuel Attard <[email protected]>
int bdrv_amend_options(BlockDriverState *bs, QEMUOptionParameter *options) { if (bs->drv->bdrv_amend_options == NULL) { return -ENOTSUP; } return bs->drv->bdrv_amend_options(bs, options); }
0
[ "CWE-190" ]
qemu
8f4754ede56e3f9ea3fd7207f4a7c4453e59285b
17,184,879,606,035,321,000,000,000,000,000,000,000
7
block: Limit request size (CVE-2014-0143) Limiting the size of a single request to INT_MAX not only fixes a direct integer overflow in bdrv_check_request() (which would only trigger bad behaviour with ridiculously huge images, as in close to 2^64 bytes), but can also prevent overflows in all block drivers. Signed-off-by: Kevin Wolf <[email protected]> Reviewed-by: Max Reitz <[email protected]> Signed-off-by: Stefan Hajnoczi <[email protected]>
static void iscsi_ioctl_handle_emulated(IscsiAIOCB *acb, int req, void *buf) { BlockDriverState *bs = acb->common.bs; IscsiLun *iscsilun = bs->opaque; int ret = 0; switch (req) { case SG_GET_VERSION_NUM: *(int *)buf = 30000; break; case SG_GET_SCSI_ID: ((struct sg_scsi_id *)buf)->scsi_type = iscsilun->type; break; default: ret = -EINVAL; } assert(!acb->bh); acb->bh = aio_bh_new(bdrv_get_aio_context(bs), iscsi_ioctl_bh_completion, acb); acb->ret = ret; qemu_bh_schedule(acb->bh); }
0
[ "CWE-125" ]
qemu
ff0507c239a246fd7215b31c5658fc6a3ee1e4c5
335,757,062,989,010,180,000,000,000,000,000,000,000
22
block/iscsi:fix heap-buffer-overflow in iscsi_aio_ioctl_cb There is an overflow, the source 'datain.data[2]' is 100 bytes, but the 'ss' is 252 bytes.This may cause a security issue because we can access a lot of unrelated memory data. The len for sbp copy data should take the minimum of mx_sb_len and sb_len_wr, not the maximum. If we use iscsi device for VM backend storage, ASAN show stack: READ of size 252 at 0xfffd149dcfc4 thread T0 #0 0xaaad433d0d34 in __asan_memcpy (aarch64-softmmu/qemu-system-aarch64+0x2cb0d34) #1 0xaaad45f9d6d0 in iscsi_aio_ioctl_cb /qemu/block/iscsi.c:996:9 #2 0xfffd1af0e2dc (/usr/lib64/iscsi/libiscsi.so.8+0xe2dc) #3 0xfffd1af0d174 (/usr/lib64/iscsi/libiscsi.so.8+0xd174) #4 0xfffd1af19fac (/usr/lib64/iscsi/libiscsi.so.8+0x19fac) #5 0xaaad45f9acc8 in iscsi_process_read /qemu/block/iscsi.c:403:5 #6 0xaaad4623733c in aio_dispatch_handler /qemu/util/aio-posix.c:467:9 #7 0xaaad4622f350 in aio_dispatch_handlers /qemu/util/aio-posix.c:510:20 #8 0xaaad4622f350 in aio_dispatch /qemu/util/aio-posix.c:520 #9 0xaaad46215944 in aio_ctx_dispatch /qemu/util/async.c:298:5 #10 0xfffd1bed12f4 in g_main_context_dispatch (/lib64/libglib-2.0.so.0+0x512f4) #11 0xaaad46227de0 in glib_pollfds_poll /qemu/util/main-loop.c:219:9 #12 0xaaad46227de0 in os_host_main_loop_wait /qemu/util/main-loop.c:242 #13 0xaaad46227de0 in main_loop_wait /qemu/util/main-loop.c:518 #14 0xaaad43d9d60c in qemu_main_loop /qemu/softmmu/vl.c:1662:9 #15 0xaaad4607a5b0 in main /qemu/softmmu/main.c:49:5 #16 0xfffd1a460b9c in __libc_start_main (/lib64/libc.so.6+0x20b9c) #17 0xaaad43320740 in _start (aarch64-softmmu/qemu-system-aarch64+0x2c00740) 0xfffd149dcfc4 is located 0 bytes to the right of 100-byte region [0xfffd149dcf60,0xfffd149dcfc4) allocated by thread T0 here: #0 0xaaad433d1e70 in __interceptor_malloc (aarch64-softmmu/qemu-system-aarch64+0x2cb1e70) #1 0xfffd1af0e254 (/usr/lib64/iscsi/libiscsi.so.8+0xe254) #2 0xfffd1af0d174 (/usr/lib64/iscsi/libiscsi.so.8+0xd174) #3 0xfffd1af19fac (/usr/lib64/iscsi/libiscsi.so.8+0x19fac) #4 0xaaad45f9acc8 in iscsi_process_read /qemu/block/iscsi.c:403:5 #5 0xaaad4623733c in aio_dispatch_handler /qemu/util/aio-posix.c:467:9 #6 0xaaad4622f350 in aio_dispatch_handlers /qemu/util/aio-posix.c:510:20 #7 0xaaad4622f350 in aio_dispatch /qemu/util/aio-posix.c:520 #8 0xaaad46215944 in aio_ctx_dispatch /qemu/util/async.c:298:5 #9 0xfffd1bed12f4 in g_main_context_dispatch (/lib64/libglib-2.0.so.0+0x512f4) #10 0xaaad46227de0 in glib_pollfds_poll /qemu/util/main-loop.c:219:9 #11 0xaaad46227de0 in os_host_main_loop_wait /qemu/util/main-loop.c:242 #12 0xaaad46227de0 in main_loop_wait /qemu/util/main-loop.c:518 #13 0xaaad43d9d60c in qemu_main_loop /qemu/softmmu/vl.c:1662:9 #14 0xaaad4607a5b0 in main /qemu/softmmu/main.c:49:5 #15 0xfffd1a460b9c in __libc_start_main (/lib64/libc.so.6+0x20b9c) #16 0xaaad43320740 in _start (aarch64-softmmu/qemu-system-aarch64+0x2c00740) Reported-by: Euler Robot <[email protected]> Signed-off-by: Chen Qun <[email protected]> Reviewed-by: Stefan Hajnoczi <[email protected]> Message-id: [email protected] Reviewed-by: Daniel P. Berrangé <[email protected]> Signed-off-by: Peter Maydell <[email protected]>
word32 Signature_Encoder::SetDigest(const byte* d, word32 dSz, byte* output) { output[0] = OCTET_STRING; output[1] = dSz; memcpy(&output[2], d, dSz); return dSz + 2; }
0
[ "CWE-254" ]
mysql-server
e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69
23,132,795,980,733,900,000,000,000,000,000,000,000
8
Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED.
static void coroutine_fn v9fs_read(void *opaque) { int32_t fid; uint64_t off; ssize_t err = 0; int32_t count = 0; size_t offset = 7; uint32_t max_count; V9fsFidState *fidp; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; err = pdu_unmarshal(pdu, offset, "dqd", &fid, &off, &max_count); if (err < 0) { goto out_nofid; } trace_v9fs_read(pdu->tag, pdu->id, fid, off, max_count); fidp = get_fid(pdu, fid); if (fidp == NULL) { err = -EINVAL; goto out_nofid; } if (fidp->fid_type == P9_FID_DIR) { if (s->proto_version != V9FS_PROTO_2000U) { warn_report_once( "9p: bad client: T_read request on directory only expected " "with 9P2000.u protocol version" ); err = -EOPNOTSUPP; goto out; } if (off == 0) { v9fs_co_rewinddir(pdu, fidp); } count = v9fs_do_readdir_with_stat(pdu, fidp, max_count); if (count < 0) { err = count; goto out; } err = pdu_marshal(pdu, offset, "d", count); if (err < 0) { goto out; } err += offset + count; } else if (fidp->fid_type == P9_FID_FILE) { QEMUIOVector qiov_full; QEMUIOVector qiov; int32_t len; v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, max_count, false); qemu_iovec_init(&qiov, qiov_full.niov); do { qemu_iovec_reset(&qiov); qemu_iovec_concat(&qiov, &qiov_full, count, qiov_full.size - count); if (0) { print_sg(qiov.iov, qiov.niov); } /* Loop in case of EINTR */ do { len = v9fs_co_preadv(pdu, fidp, qiov.iov, qiov.niov, off); if (len >= 0) { off += len; count += len; } } while (len == -EINTR && !pdu->cancelled); if (len < 0) { /* IO error return the error */ err = len; goto out_free_iovec; } } while (count < max_count && len > 0); err = pdu_marshal(pdu, offset, "d", count); if (err < 0) { goto out_free_iovec; } err += offset + count; out_free_iovec: qemu_iovec_destroy(&qiov); qemu_iovec_destroy(&qiov_full); } else if (fidp->fid_type == P9_FID_XATTR) { err = v9fs_xattr_read(s, pdu, fidp, off, max_count); } else { err = -EINVAL; } trace_v9fs_read_return(pdu->tag, pdu->id, count, err); out: put_fid(pdu, fidp); out_nofid: pdu_complete(pdu, err); }
0
[ "CWE-362" ]
qemu
89fbea8737e8f7b954745a1ffc4238d377055305
283,025,395,127,964,770,000,000,000,000,000,000,000
91
9pfs: Fully restart unreclaim loop (CVE-2021-20181) Depending on the client activity, the server can be asked to open a huge number of file descriptors and eventually hit RLIMIT_NOFILE. This is currently mitigated using a reclaim logic : the server closes the file descriptors of idle fids, based on the assumption that it will be able to re-open them later. This assumption doesn't hold of course if the client requests the file to be unlinked. In this case, we loop on the entire fid list and mark all related fids as unreclaimable (the reclaim logic will just ignore them) and, of course, we open or re-open their file descriptors if needed since we're about to unlink the file. This is the purpose of v9fs_mark_fids_unreclaim(). Since the actual opening of a file can cause the coroutine to yield, another client request could possibly add a new fid that we may want to mark as non-reclaimable as well. The loop is thus restarted if the re-open request was actually transmitted to the backend. This is achieved by keeping a reference on the first fid (head) before traversing the list. This is wrong in several ways: - a potential clunk request from the client could tear the first fid down and cause the reference to be stale. This leads to a use-after-free error that can be detected with ASAN, using a custom 9p client - fids are added at the head of the list : restarting from the previous head will always miss fids added by a some other potential request All these problems could be avoided if fids were being added at the end of the list. This can be achieved with a QSIMPLEQ, but this is probably too much change for a bug fix. For now let's keep it simple and just restart the loop from the current head. Fixes: CVE-2021-20181 Buglink: https://bugs.launchpad.net/qemu/+bug/1911666 Reported-by: Zero Day Initiative <[email protected]> Reviewed-by: Christian Schoenebeck <[email protected]> Reviewed-by: Stefano Stabellini <[email protected]> Message-Id: <[email protected]> Signed-off-by: Greg Kurz <[email protected]>
R_API RList* r_core_anal_cycles(RCore *core, int ccl) { ut64 addr = core->offset; int depth = 0; RAnalOp *op = NULL; RAnalCycleFrame *prev = NULL, *cf = NULL; RAnalCycleHook *ch; RList *hooks = r_list_new (); if (!hooks) { return NULL; } cf = r_anal_cycle_frame_new (); r_cons_break_push (NULL, NULL); while (cf && !r_cons_is_breaked ()) { if ((op = r_core_anal_op (core, addr, R_ANAL_OP_MASK_BASIC)) && (op->cycles) && (ccl > 0)) { r_cons_clear_line (1); eprintf ("%i -- ", ccl); addr += op->size; switch (op->type) { case R_ANAL_OP_TYPE_JMP: addr = op->jump; ccl -= op->cycles; loganal (op->addr, addr, depth); break; case R_ANAL_OP_TYPE_UJMP: case R_ANAL_OP_TYPE_MJMP: case R_ANAL_OP_TYPE_UCALL: case R_ANAL_OP_TYPE_ICALL: case R_ANAL_OP_TYPE_RCALL: case R_ANAL_OP_TYPE_IRCALL: ch = R_NEW0 (RAnalCycleHook); ch->addr = op->addr; eprintf ("0x%08"PFMT64x" > ?\r", op->addr); ch->cycles = ccl; r_list_append (hooks, ch); ch = NULL; while (!ch && cf) { ch = r_list_pop (cf->hooks); if (ch) { addr = ch->addr; ccl = ch->cycles; free (ch); } else { r_anal_cycle_frame_free (cf); cf = prev; if (cf) { prev = cf->prev; } } } break; case R_ANAL_OP_TYPE_CJMP: ch = R_NEW0 (RAnalCycleHook); ch->addr = addr; ch->cycles = ccl - op->failcycles; r_list_push (cf->hooks, ch); ch = NULL; addr = op->jump; loganal (op->addr, addr, depth); break; case R_ANAL_OP_TYPE_UCJMP: case R_ANAL_OP_TYPE_UCCALL: ch = R_NEW0 (RAnalCycleHook); ch->addr = op->addr; ch->cycles = ccl; r_list_append (hooks, ch); ch = NULL; ccl -= op->failcycles; eprintf ("0x%08"PFMT64x" > ?\r", op->addr); break; case R_ANAL_OP_TYPE_CCALL: ch = R_NEW0 (RAnalCycleHook); ch->addr = addr; ch->cycles = ccl - op->failcycles; r_list_push (cf->hooks, ch); ch = NULL; case R_ANAL_OP_TYPE_CALL: if (op->addr != op->jump) { //no selfies cf->naddr = addr; prev = cf; cf = r_anal_cycle_frame_new (); cf->prev = prev; } ccl -= op->cycles; addr = op->jump; loganal (op->addr, addr, depth - 1); break; case R_ANAL_OP_TYPE_RET: ch = R_NEW0 (RAnalCycleHook); if (prev) { ch->addr = prev->naddr; ccl -= op->cycles; ch->cycles = ccl; r_list_push (prev->hooks, ch); eprintf ("0x%08"PFMT64x" < 0x%08"PFMT64x"\r", prev->naddr, op->addr); } else { ch->addr = op->addr; ch->cycles = ccl; r_list_append (hooks, ch); eprintf ("? < 0x%08"PFMT64x"\r", op->addr); } ch = NULL; while (!ch && cf) { ch = r_list_pop (cf->hooks); if (ch) { addr = ch->addr; ccl = ch->cycles; free (ch); } else { r_anal_cycle_frame_free (cf); cf = prev; if (cf) { prev = cf->prev; } } } break; case R_ANAL_OP_TYPE_CRET: ch = R_NEW0 (RAnalCycleHook); if (prev) { ch->addr = prev->naddr; ch->cycles = ccl - op->cycles; r_list_push (prev->hooks, ch); eprintf ("0x%08"PFMT64x" < 0x%08"PFMT64x"\r", prev->naddr, op->addr); } else { ch->addr = op->addr; ch->cycles = ccl - op->cycles; r_list_append (hooks, ch); eprintf ("? < 0x%08"PFMT64x"\r", op->addr); } ccl -= op->failcycles; break; default: ccl -= op->cycles; eprintf ("0x%08"PFMT64x"\r", op->addr); break; } } else { ch = R_NEW0 (RAnalCycleHook); if (!ch) { r_anal_cycle_frame_free (cf); r_list_free (hooks); return NULL; } ch->addr = addr; ch->cycles = ccl; r_list_append (hooks, ch); ch = NULL; while (!ch && cf) { ch = r_list_pop (cf->hooks); if (ch) { addr = ch->addr; ccl = ch->cycles; free (ch); } else { r_anal_cycle_frame_free (cf); cf = prev; if (cf) { prev = cf->prev; } } } } r_anal_op_free (op); } if (r_cons_is_breaked ()) { while (cf) { ch = r_list_pop (cf->hooks); while (ch) { free (ch); ch = r_list_pop (cf->hooks); } prev = cf->prev; r_anal_cycle_frame_free (cf); cf = prev; } } r_cons_break_pop (); return hooks; }
0
[ "CWE-416" ]
radare2
10517e3ff0e609697eb8cde60ec8dc999ee5ea24
248,057,184,089,810,450,000,000,000,000,000,000,000
179
aaef on arm/thumb switches causes uaf ##crash * Reported by peacock-doris via huntr.dev * Reproducer: poc_uaf_r_reg_get
DSA_Signer::DSA_Signer(const DSA_PrivateKey& key) : key_(key) {}
0
[]
mysql-server
5c6169fb309981b564a17bee31b367a18866d674
70,295,532,747,550,350,000,000,000,000,000,000,000
3
Bug #24740291: YASSL UPDATE TO 2.4.2
struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, struct qstr *name) { struct dentry *found, *res; /* * First check if a dentry matching the name already exists, * if not go ahead and create it now. */ found = d_hash_and_lookup(dentry->d_parent, name); if (found) { iput(inode); return found; } if (d_in_lookup(dentry)) { found = d_alloc_parallel(dentry->d_parent, name, dentry->d_wait); if (IS_ERR(found) || !d_in_lookup(found)) { iput(inode); return found; } } else { found = d_alloc(dentry->d_parent, name); if (!found) { iput(inode); return ERR_PTR(-ENOMEM); } } res = d_splice_alias(inode, found); if (res) { dput(found); return res; } return found; }
0
[ "CWE-362", "CWE-399" ]
linux
49d31c2f389acfe83417083e1208422b4091cd9e
4,782,680,713,006,220,000,000,000,000,000,000,000
35
dentry name snapshots take_dentry_name_snapshot() takes a safe snapshot of dentry name; if the name is a short one, it gets copied into caller-supplied structure, otherwise an extra reference to external name is grabbed (those are never modified). In either case the pointer to stable string is stored into the same structure. dentry must be held by the caller of take_dentry_name_snapshot(), but may be freely dropped afterwards - the snapshot will stay until destroyed by release_dentry_name_snapshot(). Intended use: struct name_snapshot s; take_dentry_name_snapshot(&s, dentry); ... access s.name ... release_dentry_name_snapshot(&s); Replaces fsnotify_oldname_...(), gets used in fsnotify to obtain the name to pass down with event. Signed-off-by: Al Viro <[email protected]>
static void rbd_notify_op_lock(struct rbd_device *rbd_dev, enum rbd_notify_op notify_op) { __rbd_notify_op_lock(rbd_dev, notify_op, NULL, NULL); }
0
[ "CWE-863" ]
linux
f44d04e696feaf13d192d942c4f14ad2e117065a
268,522,954,791,189,900,000,000,000,000,000,000,000
5
rbd: require global CAP_SYS_ADMIN for mapping and unmapping It turns out that currently we rely only on sysfs attribute permissions: $ ll /sys/bus/rbd/{add*,remove*} --w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/add --w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/add_single_major --w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/remove --w------- 1 root root 4096 Sep 3 20:38 /sys/bus/rbd/remove_single_major This means that images can be mapped and unmapped (i.e. block devices can be created and deleted) by a UID 0 process even after it drops all privileges or by any process with CAP_DAC_OVERRIDE in its user namespace as long as UID 0 is mapped into that user namespace. Be consistent with other virtual block devices (loop, nbd, dm, md, etc) and require CAP_SYS_ADMIN in the initial user namespace for mapping and unmapping, and also for dumping the configuration string and refreshing the image header. Cc: [email protected] Signed-off-by: Ilya Dryomov <[email protected]> Reviewed-by: Jeff Layton <[email protected]>
video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg, v4l2_kioctl func) { char sbuf[128]; void *mbuf = NULL; void *parg = (void *)arg; long err = -EINVAL; bool has_array_args; bool always_copy = false; size_t array_size = 0; void __user *user_ptr = NULL; void **kernel_ptr = NULL; unsigned int cmd = video_translate_cmd(orig_cmd); const size_t ioc_size = _IOC_SIZE(cmd); /* Copy arguments into temp kernel buffer */ if (_IOC_DIR(cmd) != _IOC_NONE) { if (ioc_size <= sizeof(sbuf)) { parg = sbuf; } else { /* too big to allocate from stack */ mbuf = kvmalloc(ioc_size, GFP_KERNEL); if (NULL == mbuf) return -ENOMEM; parg = mbuf; } err = video_get_user((void __user *)arg, parg, cmd, orig_cmd, &always_copy); if (err) goto out; } err = check_array_args(cmd, parg, &array_size, &user_ptr, &kernel_ptr); if (err < 0) goto out; has_array_args = err; if (has_array_args) { /* * When adding new types of array args, make sure that the * parent argument to ioctl (which contains the pointer to the * array) fits into sbuf (so that mbuf will still remain * unused up to here). */ mbuf = kvmalloc(array_size, GFP_KERNEL); err = -ENOMEM; if (NULL == mbuf) goto out_array_args; err = -EFAULT; if (in_compat_syscall()) err = v4l2_compat_get_array_args(file, mbuf, user_ptr, array_size, orig_cmd, parg); else err = copy_from_user(mbuf, user_ptr, array_size) ? -EFAULT : 0; if (err) goto out_array_args; *kernel_ptr = mbuf; } /* Handles IOCTL */ err = func(file, cmd, parg); if (err == -ENOTTY || err == -ENOIOCTLCMD) { err = -ENOTTY; goto out; } if (err == 0) { if (cmd == VIDIOC_DQBUF) trace_v4l2_dqbuf(video_devdata(file)->minor, parg); else if (cmd == VIDIOC_QBUF) trace_v4l2_qbuf(video_devdata(file)->minor, parg); } if (has_array_args) { *kernel_ptr = (void __force *)user_ptr; if (in_compat_syscall()) { int put_err; put_err = v4l2_compat_put_array_args(file, user_ptr, mbuf, array_size, orig_cmd, parg); if (put_err) err = put_err; } else if (copy_to_user(user_ptr, mbuf, array_size)) { err = -EFAULT; } goto out_array_args; } /* * Some ioctls can return an error, but still have valid * results that must be returned. */ if (err < 0 && !always_copy) goto out; out_array_args: if (video_put_user((void __user *)arg, parg, cmd, orig_cmd)) err = -EFAULT; out: kvfree(mbuf); return err; }
1
[ "CWE-401" ]
linux
fb18802a338b36f675a388fc03d2aa504a0d0899
335,168,946,814,517,320,000,000,000,000,000,000,000
105
media: v4l: ioctl: Fix memory leak in video_usercopy When an IOCTL with argument size larger than 128 that also used array arguments were handled, two memory allocations were made but alas, only the latter one of them was released. This happened because there was only a single local variable to hold such a temporary allocation. Fix this by adding separate variables to hold the pointers to the temporary allocations. Reported-by: Arnd Bergmann <[email protected]> Reported-by: [email protected] Fixes: d14e6d76ebf7 ("[media] v4l: Add multi-planar ioctl handling code") Cc: [email protected] Signed-off-by: Sakari Ailus <[email protected]> Acked-by: Arnd Bergmann <[email protected]> Acked-by: Hans Verkuil <[email protected]> Reviewed-by: Laurent Pinchart <[email protected]> Signed-off-by: Mauro Carvalho Chehab <[email protected]>
void SSL_SESSION_free(SSL_SESSION *ss) { int i; if (ss == NULL) return; i = CRYPTO_add(&ss->references, -1, CRYPTO_LOCK_SSL_SESSION); #ifdef REF_PRINT REF_PRINT("SSL_SESSION", ss); #endif if (i > 0) return; #ifdef REF_CHECK if (i < 0) { fprintf(stderr, "SSL_SESSION_free, bad reference count\n"); abort(); /* ok */ } #endif CRYPTO_free_ex_data(CRYPTO_EX_INDEX_SSL_SESSION, ss, &ss->ex_data); OPENSSL_cleanse(ss->key_arg, sizeof ss->key_arg); OPENSSL_cleanse(ss->master_key, sizeof ss->master_key); OPENSSL_cleanse(ss->session_id, sizeof ss->session_id); if (ss->sess_cert != NULL) ssl_sess_cert_free(ss->sess_cert); if (ss->peer != NULL) X509_free(ss->peer); if (ss->ciphers != NULL) sk_SSL_CIPHER_free(ss->ciphers); #ifndef OPENSSL_NO_TLSEXT if (ss->tlsext_hostname != NULL) OPENSSL_free(ss->tlsext_hostname); if (ss->tlsext_tick != NULL) OPENSSL_free(ss->tlsext_tick); # ifndef OPENSSL_NO_EC ss->tlsext_ecpointformatlist_length = 0; if (ss->tlsext_ecpointformatlist != NULL) OPENSSL_free(ss->tlsext_ecpointformatlist); ss->tlsext_ellipticcurvelist_length = 0; if (ss->tlsext_ellipticcurvelist != NULL) OPENSSL_free(ss->tlsext_ellipticcurvelist); # endif /* OPENSSL_NO_EC */ #endif #ifndef OPENSSL_NO_PSK if (ss->psk_identity_hint != NULL) OPENSSL_free(ss->psk_identity_hint); if (ss->psk_identity != NULL) OPENSSL_free(ss->psk_identity); #endif #ifndef OPENSSL_NO_SRP if (ss->srp_username != NULL) OPENSSL_free(ss->srp_username); #endif OPENSSL_cleanse(ss, sizeof(*ss)); OPENSSL_free(ss); }
0
[ "CWE-362" ]
openssl
939b4960276b040fc0ed52232238fcc9e2e9ec21
81,397,742,319,612,070,000,000,000,000,000,000,000
58
Fix race condition in NewSessionTicket If a NewSessionTicket is received by a multi-threaded client when attempting to reuse a previous ticket then a race condition can occur potentially leading to a double free of the ticket data. CVE-2015-1791 This also fixes RT#3808 where a session ID is changed for a session already in the client session cache. Since the session ID is the key to the cache this breaks the cache access. Parts of this patch were inspired by this Akamai change: https://github.com/akamai/openssl/commit/c0bf69a791239ceec64509f9f19fcafb2461b0d3 Reviewed-by: Rich Salz <[email protected]> (cherry picked from commit 27c76b9b8010b536687318739c6f631ce4194688) Conflicts: ssl/ssl.h ssl/ssl_err.c
QPDFObjectHandle::dereference() { if (this->obj.getPointer() == 0) { PointerHolder<QPDFObject> obj = QPDF::Resolver::resolve( this->qpdf, this->objid, this->generation); if (obj.getPointer() == 0) { QTC::TC("qpdf", "QPDFObjectHandle indirect to unknown"); this->obj = new QPDF_Null(); } else if (dynamic_cast<QPDF_Reserved*>(obj.getPointer())) { // Do not resolve } else { this->reserved = false; this->obj = obj; } } }
0
[ "CWE-835" ]
qpdf
afe0242b263a9e1a8d51dd81e42ab6de2e5127eb
171,314,799,305,822,570,000,000,000,000,000,000,000
22
Handle object ID 0 (fixes #99) This is CVE-2017-9208. The QPDF library uses object ID 0 internally as a sentinel to represent a direct object, but prior to this fix, was not blocking handling of 0 0 obj or 0 0 R as a special case. Creating an object in the file with 0 0 obj could cause various infinite loops. The PDF spec doesn't allow for object 0. Having qpdf handle object 0 might be a better fix, but changing all the places in the code that assumes objid == 0 means direct would be risky.
test_keys ( ELG_secret_key *sk, unsigned int nbits, int nodie ) { ELG_public_key pk; gcry_mpi_t test = gcry_mpi_new ( 0 ); gcry_mpi_t out1_a = gcry_mpi_new ( nbits ); gcry_mpi_t out1_b = gcry_mpi_new ( nbits ); gcry_mpi_t out2 = gcry_mpi_new ( nbits ); int failed = 0; pk.p = sk->p; pk.g = sk->g; pk.y = sk->y; gcry_mpi_randomize ( test, nbits, GCRY_WEAK_RANDOM ); do_encrypt ( out1_a, out1_b, test, &pk ); decrypt ( out2, out1_a, out1_b, sk ); if ( mpi_cmp( test, out2 ) ) failed |= 1; sign ( out1_a, out1_b, test, sk ); if ( !verify( out1_a, out1_b, test, &pk ) ) failed |= 2; gcry_mpi_release ( test ); gcry_mpi_release ( out1_a ); gcry_mpi_release ( out1_b ); gcry_mpi_release ( out2 ); if (failed && !nodie) log_fatal ("Elgamal test key for %s %s failed\n", (failed & 1)? "encrypt+decrypt":"", (failed & 2)? "sign+verify":""); if (failed && DBG_CIPHER) log_debug ("Elgamal test key for %s %s failed\n", (failed & 1)? "encrypt+decrypt":"", (failed & 2)? "sign+verify":""); return failed; }
0
[ "CWE-200" ]
libgcrypt
35cd81f134c0da4e7e6fcfe40d270ee1251f52c2
257,114,281,611,063,450,000,000,000,000,000,000,000
40
cipher: Use ciphertext blinding for Elgamal decryption. * cipher/elgamal.c (USE_BLINDING): New. (decrypt): Rewrite to use ciphertext blinding. -- CVE-id: CVE-2014-3591 As a countermeasure to a new side-channel attacks on sliding windows exponentiation we blind the ciphertext for Elgamal decryption. This is similar to what we are doing with RSA. This patch is a backport of the GnuPG 1.4 commit ff53cf06e966dce0daba5f2c84e03ab9db2c3c8b. Unfortunately, the performance impact of Elgamal blinding is quite noticeable (i5-2410M CPU @ 2.30GHz TP 220): Algorithm generate 100*priv 100*public ------------------------------------------------ ELG 1024 bit - 100ms 90ms ELG 2048 bit - 330ms 350ms ELG 3072 bit - 660ms 790ms Algorithm generate 100*priv 100*public ------------------------------------------------ ELG 1024 bit - 150ms 90ms ELG 2048 bit - 520ms 360ms ELG 3072 bit - 1100ms 800ms Signed-off-by: Werner Koch <[email protected]> (cherry picked from commit 410d70bad9a650e3837055e36f157894ae49a57d) Resolved conflicts: cipher/elgamal.c.
nautilus_application_close_all_navigation_windows (void) { GList *list_copy; GList *l; list_copy = g_list_copy (nautilus_application_window_list); for (l = list_copy; l != NULL; l = l->next) { NautilusWindow *window; window = NAUTILUS_WINDOW (l->data); if (NAUTILUS_IS_NAVIGATION_WINDOW (window)) { nautilus_window_close (window); } } g_list_free (list_copy); }
0
[]
nautilus
1e1c916f5537eb5e4144950f291f4a3962fc2395
3,196,674,671,878,440,600,000,000,000,000,000,000
17
Add "interactive" argument to nautilus_file_mark_desktop_file_trusted. 2009-02-24 Alexander Larsson <[email protected]> * libnautilus-private/nautilus-file-operations.c: * libnautilus-private/nautilus-file-operations.h: * libnautilus-private/nautilus-mime-actions.c: Add "interactive" argument to nautilus_file_mark_desktop_file_trusted. * src/nautilus-application.c: Mark all desktopfiles on the desktop trusted on first run. svn path=/trunk/; revision=15009
ESTree::NodeKind getLazyFunctionKind(ESTree::FunctionLikeNode *node) { if (node->isMethodDefinition) { // This is not a regular function expression but getter/setter. // If we want to reparse it later, we have to start from an // identifier and not from a 'function' keyword. return ESTree::NodeKind::Property; } return node->getKind(); }
0
[ "CWE-125", "CWE-787" ]
hermes
091835377369c8fd5917d9b87acffa721ad2a168
90,943,214,117,058,700,000,000,000,000,000,000,000
9
Correctly restore whether or not a function is an inner generator Summary: If a generator was large enough to be lazily compiled, we would lose that information when reconstituting the function's context. This meant the function was generated as a regular function instead of a generator. #utd-hermes-ignore-android Reviewed By: tmikov Differential Revision: D23580247 fbshipit-source-id: af5628bf322cbdc7c7cdfbb5f8d0756328518ea1
static void resume_callback(void) { gboolean suspend = FALSE; DBG("Resuming ..."); queue_foreach(devices, set_suspend, GINT_TO_POINTER(suspend)); }
0
[]
bluez
8cdbd3b09f29da29374e2f83369df24228da0ad1
290,141,632,237,407,970,000,000,000,000,000,000,000
8
HOGP must only accept data from bonded devices. HOGP 1.0 Section 6.1 establishes that the HOGP must require bonding. Reference: https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00352.htm
static CImg<T> get_load_bmp(std::FILE *const file) { return CImg<T>().load_bmp(file); }
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
314,485,715,867,327,950,000,000,000,000,000,000,000
3
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
trim_subnode_hash(apr_hash_t **hash, int latest_any_var, apr_pool_t *scratch_pool) { if (*hash) { apr_array_header_t *to_remove = apr_array_make(scratch_pool, 0, sizeof(node_t *)); apr_hash_index_t *hi; for (hi = apr_hash_first(scratch_pool, *hash); hi; hi = apr_hash_next(hi)) { node_t *node = apr_hash_this_val(hi); if (trim_tree(node, latest_any_var, scratch_pool)) APR_ARRAY_PUSH(to_remove, node_t *) = node; } /* Are some nodes left? */ if (to_remove->nelts < apr_hash_count(*hash)) { /* Remove empty nodes (if any). */ int i; for (i = 0; i < to_remove->nelts; ++i) { node_t *node = APR_ARRAY_IDX(to_remove, i, node_t *); apr_hash_set(*hash, node->segment.data, node->segment.len, NULL); } return FALSE; } /* No nodes left. A NULL hash is more efficient than an empty one. */ *hash = NULL; } return TRUE; }
0
[ "CWE-703" ]
subversion
e1b615840932fb46aefe1cd90d2115720af4600e
169,482,277,974,431,550,000,000,000,000,000,000,000
40
Fix issue #4880 "Use-after-free of object-pools when used as httpd module" Ensure that we initialize authz again if the pool which our authz caches depend on is cleared. Apache HTTPD may run pre/post config hooks multiple times and clear its global configuration pool which our authz caching pools depend on. Reported-by: Thomas Weißschuh (thomas {at} t-8ch dot de) Thomas has also confirmed that this patch fixes the problem. * subversion/libsvn_repos/authz.c (deinit_authz): New pool cleanup handler which resets authz initialization in case the parent pool of our authz caches is cleared. (synchronized_authz_initialize): Register new pool cleanup handler. git-svn-id: https://svn.apache.org/repos/asf/subversion/trunk@1894734 13f79535-47bb-0310-9956-ffa450edef68
TEST_F(Http1ClientConnectionImplTest, SimpleGet) { initialize(); MockResponseDecoder response_decoder; Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); std::string output; ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}}; request_encoder.encodeHeaders(headers, true); EXPECT_EQ("GET / HTTP/1.1\r\ncontent-length: 0\r\n\r\n", output); }
0
[ "CWE-770" ]
envoy
7ca28ff7d46454ae930e193d97b7d08156b1ba59
12,100,615,649,391,940,000,000,000,000,000,000,000
13
[http1] Include request URL in request header size computation, and reject partial headers that exceed configured limits (#145) Signed-off-by: antonio <[email protected]>
static MagickBooleanType IsIPL(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"data",4) == 0) return(MagickTrue); return(MagickFalse); }
0
[ "CWE-401" ]
ImageMagick6
210474b2fac6a661bfa7ed563213920e93e76395
76,973,816,167,029,700,000,000,000,000,000,000,000
8
Fix ultra rare but potential memory-leak
void jpc_qmfb_split_colgrp(jpc_fix_t *a, int numrows, int stride, int parity) { int bufsize = JPC_CEILDIVPOW2(numrows, 1); jpc_fix_t splitbuf[QMFB_SPLITBUFSIZE * JPC_QMFB_COLGRPSIZE]; jpc_fix_t *buf = splitbuf; jpc_fix_t *srcptr; jpc_fix_t *dstptr; register jpc_fix_t *srcptr2; register jpc_fix_t *dstptr2; register int n; register int i; int m; int hstartrow; /* Get a buffer. */ if (bufsize > QMFB_SPLITBUFSIZE) { if (!(buf = jas_alloc3(bufsize, JPC_QMFB_COLGRPSIZE, sizeof(jpc_fix_t)))) { /* We have no choice but to commit suicide in this case. */ abort(); } } if (numrows >= 2) { hstartrow = (numrows + 1 - parity) >> 1; // ORIGINAL (WRONG): m = (parity) ? hstartrow : (numrows - hstartrow); m = numrows - hstartrow; /* Save the samples destined for the highpass channel. */ n = m; dstptr = buf; srcptr = &a[(1 - parity) * stride]; while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += JPC_QMFB_COLGRPSIZE; srcptr += stride << 1; } /* Copy the appropriate samples into the lowpass channel. */ dstptr = &a[(1 - parity) * stride]; srcptr = &a[(2 - parity) * stride]; n = numrows - m - (!parity); while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += stride; srcptr += stride << 1; } /* Copy the saved samples into the highpass channel. */ dstptr = &a[hstartrow * stride]; srcptr = buf; n = m; while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += stride; srcptr += JPC_QMFB_COLGRPSIZE; } } /* If the split buffer was allocated on the heap, free this memory. */ if (buf != splitbuf) { jas_free(buf); } }
0
[ "CWE-119", "CWE-787" ]
jasper
4a59cfaf9ab3d48fca4a15c0d2674bf7138e3d1a
54,036,109,988,506,270,000,000,000,000,000,000,000
83
Fixed a buffer overrun problem in the QMFB code in the JPC codec that was caused by a buffer being allocated with a size that was too small in some cases. Added a new regression test case.
static int phar_tar_writeheaders_int(phar_entry_info *entry, void *argument) /* {{{ */ { tar_header header; size_t pos; struct _phar_pass_tar_info *fp = (struct _phar_pass_tar_info *)argument; char padding[512]; if (entry->is_mounted) { return ZEND_HASH_APPLY_KEEP; } if (entry->is_deleted) { if (entry->fp_refcount <= 0) { return ZEND_HASH_APPLY_REMOVE; } else { /* we can't delete this in-memory until it is closed */ return ZEND_HASH_APPLY_KEEP; } } phar_add_virtual_dirs(entry->phar, entry->filename, entry->filename_len); memset((char *) &header, 0, sizeof(header)); if (entry->filename_len > 100) { char *boundary; if (entry->filename_len > 256) { if (fp->error) { spprintf(fp->error, 4096, "tar-based phar \"%s\" cannot be created, filename \"%s\" is too long for tar file format", entry->phar->fname, entry->filename); } return ZEND_HASH_APPLY_STOP; } boundary = entry->filename + entry->filename_len - 101; while (*boundary && *boundary != '/') { ++boundary; } if (!*boundary || ((boundary - entry->filename) > 155)) { if (fp->error) { spprintf(fp->error, 4096, "tar-based phar \"%s\" cannot be created, filename \"%s\" is too long for tar file format", entry->phar->fname, entry->filename); } return ZEND_HASH_APPLY_STOP; } memcpy(header.prefix, entry->filename, boundary - entry->filename); memcpy(header.name, boundary + 1, entry->filename_len - (boundary + 1 - entry->filename)); } else { memcpy(header.name, entry->filename, entry->filename_len); } phar_tar_octal(header.mode, entry->flags & PHAR_ENT_PERM_MASK, sizeof(header.mode)-1); if (FAILURE == phar_tar_octal(header.size, entry->uncompressed_filesize, sizeof(header.size)-1)) { if (fp->error) { spprintf(fp->error, 4096, "tar-based phar \"%s\" cannot be created, filename \"%s\" is too large for tar file format", entry->phar->fname, entry->filename); } return ZEND_HASH_APPLY_STOP; } if (FAILURE == phar_tar_octal(header.mtime, entry->timestamp, sizeof(header.mtime)-1)) { if (fp->error) { spprintf(fp->error, 4096, "tar-based phar \"%s\" cannot be created, file modification time of file \"%s\" is too large for tar file format", entry->phar->fname, entry->filename); } return ZEND_HASH_APPLY_STOP; } /* calc checksum */ header.typeflag = entry->tar_type; if (entry->link) { if (strlcpy(header.linkname, entry->link, sizeof(header.linkname)) >= sizeof(header.linkname)) { if (fp->error) { spprintf(fp->error, 4096, "tar-based phar \"%s\" cannot be created, link \"%s\" is too long for format", entry->phar->fname, entry->link); } return ZEND_HASH_APPLY_STOP; } } strncpy(header.magic, "ustar", sizeof("ustar")-1); strncpy(header.version, "00", sizeof("00")-1); strncpy(header.checksum, " ", sizeof(" ")-1); entry->crc32 = phar_tar_checksum((char *)&header, sizeof(header)); if (FAILURE == phar_tar_octal(header.checksum, entry->crc32, sizeof(header.checksum)-1)) { if (fp->error) { spprintf(fp->error, 4096, "tar-based phar \"%s\" cannot be created, checksum of file \"%s\" is too large for tar file format", entry->phar->fname, entry->filename); } return ZEND_HASH_APPLY_STOP; } /* write header */ entry->header_offset = php_stream_tell(fp->new); if (sizeof(header) != php_stream_write(fp->new, (char *) &header, sizeof(header))) { if (fp->error) { spprintf(fp->error, 4096, "tar-based phar \"%s\" cannot be created, header for file \"%s\" could not be written", entry->phar->fname, entry->filename); } return ZEND_HASH_APPLY_STOP; } pos = php_stream_tell(fp->new); /* save start of file within tar */ /* write contents */ if (entry->uncompressed_filesize) { if (FAILURE == phar_open_entry_fp(entry, fp->error, 0)) { return ZEND_HASH_APPLY_STOP; } if (-1 == phar_seek_efp(entry, 0, SEEK_SET, 0, 0)) { if (fp->error) { spprintf(fp->error, 4096, "tar-based phar \"%s\" cannot be created, contents of file \"%s\" could not be written, seek failed", entry->phar->fname, entry->filename); } return ZEND_HASH_APPLY_STOP; } if (SUCCESS != php_stream_copy_to_stream_ex(phar_get_efp(entry, 0), fp->new, entry->uncompressed_filesize, NULL)) { if (fp->error) { spprintf(fp->error, 4096, "tar-based phar \"%s\" cannot be created, contents of file \"%s\" could not be written", entry->phar->fname, entry->filename); } return ZEND_HASH_APPLY_STOP; } memset(padding, 0, 512); php_stream_write(fp->new, padding, ((entry->uncompressed_filesize +511)&~511) - entry->uncompressed_filesize); } if (!entry->is_modified && entry->fp_refcount) { /* open file pointers refer to this fp, do not free the stream */ switch (entry->fp_type) { case PHAR_FP: fp->free_fp = 0; break; case PHAR_UFP: fp->free_ufp = 0; default: break; } } entry->is_modified = 0; if (entry->fp_type == PHAR_MOD && entry->fp != entry->phar->fp && entry->fp != entry->phar->ufp) { if (!entry->fp_refcount) { php_stream_close(entry->fp); } entry->fp = NULL; } entry->fp_type = PHAR_FP; /* note new location within tar */ entry->offset = entry->offset_abs = pos; return ZEND_HASH_APPLY_KEEP; }
0
[ "CWE-119" ]
php-src
e0f5d62bd6690169998474b62f92a8c5ddf0e699
301,866,701,083,310,860,000,000,000,000,000,000,000
151
Fix bug #77586 - phar_tar_writeheaders_int() buffer overflow
int arc4_test() { byte cipher[16]; byte plain[16]; const char* keys[] = { "\x01\x23\x45\x67\x89\xab\xcd\xef", "\x01\x23\x45\x67\x89\xab\xcd\xef", "\x00\x00\x00\x00\x00\x00\x00\x00", "\xef\x01\x23\x45" }; testVector test_arc4[] = { testVector("\x01\x23\x45\x67\x89\xab\xcd\xef", "\x75\xb7\x87\x80\x99\xe0\xc5\x96"), testVector("\x00\x00\x00\x00\x00\x00\x00\x00", "\x74\x94\xc2\xe7\x10\x4b\x08\x79"), testVector("\x00\x00\x00\x00\x00\x00\x00\x00", "\xde\x18\x89\x41\xa3\x37\x5d\x3a"), testVector("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", "\xd6\xa1\x41\xa7\xec\x3c\x38\xdf\xbd\x61") }; int times( sizeof(test_arc4) / sizeof(testVector) ); for (int i = 0; i < times; ++i) { ARC4::Encryption enc; ARC4::Decryption dec; enc.SetKey((byte*)keys[i], (word32)strlen(keys[i])); dec.SetKey((byte*)keys[i], (word32)strlen(keys[i])); enc.Process(cipher, test_arc4[i].input_, test_arc4[i].outLen_); dec.Process(plain, cipher, test_arc4[i].outLen_); if (memcmp(plain, test_arc4[i].input_, test_arc4[i].outLen_)) return -30 - i; if (memcmp(cipher, test_arc4[i].output_, test_arc4[i].outLen_)) return -40 - i; } return 0; }
0
[]
mysql-server
5c6169fb309981b564a17bee31b367a18866d674
294,967,827,942,538,200,000,000,000,000,000,000,000
46
Bug #24740291: YASSL UPDATE TO 2.4.2
d_lite_strftime(int argc, VALUE *argv, VALUE self) { return date_strftime_internal(argc, argv, self, "%Y-%m-%d", set_tmx); }
0
[]
date
3959accef8da5c128f8a8e2fd54e932a4fb253b0
34,483,183,775,052,220,000,000,000,000,000,000,000
5
Add length limit option for methods that parses date strings `Date.parse` now raises an ArgumentError when a given date string is longer than 128. You can configure the limit by giving `limit` keyword arguments like `Date.parse(str, limit: 1000)`. If you pass `limit: nil`, the limit is disabled. Not only `Date.parse` but also the following methods are changed. * Date._parse * Date.parse * DateTime.parse * Date._iso8601 * Date.iso8601 * DateTime.iso8601 * Date._rfc3339 * Date.rfc3339 * DateTime.rfc3339 * Date._xmlschema * Date.xmlschema * DateTime.xmlschema * Date._rfc2822 * Date.rfc2822 * DateTime.rfc2822 * Date._rfc822 * Date.rfc822 * DateTime.rfc822 * Date._jisx0301 * Date.jisx0301 * DateTime.jisx0301
static int64_t wav_get_length(pcm_reader_t *reader) { return ((wav_reader_t *)reader)->length; }
0
[ "CWE-703" ]
fdkaac
4ec1422bd951a137225ffa4052da120e2ab0a0f4
73,121,943,117,450,860,000,000,000,000,000,000,000
4
wav/caf parser: ensure fmt/desc chunk fixes https://github.com/nu774/fdkaac/issues/52
DeepTiledInputFile::tileXSize () const { return _data->tileDesc.xSize; }
0
[ "CWE-125" ]
openexr
e79d2296496a50826a15c667bf92bdc5a05518b4
275,886,701,734,618,100,000,000,000,000,000,000,000
4
fix memory leaks and invalid memory accesses Signed-off-by: Peter Hillman <[email protected]>
static struct dentry *sockfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_pseudo(fs_type, "socket:", &sockfs_ops, &sockfs_dentry_operations, SOCKFS_MAGIC); }
0
[ "CWE-264" ]
net
4de930efc23b92ddf88ce91c405ee645fe6e27ea
218,491,091,403,350,920,000,000,000,000,000,000,000
6
net: validate the range we feed to iov_iter_init() in sys_sendto/sys_recvfrom Cc: [email protected] # v3.19 Signed-off-by: Al Viro <[email protected]> Signed-off-by: David S. Miller <[email protected]>
rfbSendFramebufferUpdate(rfbClientPtr cl, sraRegionPtr givenUpdateRegion) { sraRectangleIterator* i=NULL; sraRect rect; int nUpdateRegionRects; rfbFramebufferUpdateMsg *fu = (rfbFramebufferUpdateMsg *)cl->updateBuf; sraRegionPtr updateRegion, updateCopyRegion, tmpRegion, cursorRegion; sraRect cursorBounds = { 0, 0, 0, 0 }; int dx, dy; rfbBool sendCursorShape = FALSE; rfbBool sendCursorPos = FALSE; rfbBool cursorIsDrawn = FALSE; /* * If framebuffer size was changed and the client supports NewFBSize * encoding, just send NewFBSize marker and return. */ if (cl->useNewFBSize && cl->newFBSizePending) { LOCK(cl->updateMutex); cl->newFBSizePending = FALSE; UNLOCK(cl->updateMutex); cl->rfbFramebufferUpdateMessagesSent++; fu->type = rfbFramebufferUpdate; fu->nRects = Swap16IfLE(1); cl->ublen = sz_rfbFramebufferUpdateMsg; if (!rfbSendNewFBSize(cl, cl->screen->width, cl->screen->height)) { return FALSE; } return rfbSendUpdateBuf(cl); } LOCK(cl->updateMutex); /* * The modifiedRegion may overlap the destination copyRegion. We remove * any overlapping bits from the copyRegion (since they'd only be * overwritten anyway). */ sraRgnSubtract(cl->copyRegion,cl->modifiedRegion); updateRegion = sraRgnCreateRgn(givenUpdateRegion); sraRgnOr(updateRegion,cl->copyRegion); /* * If the client doesn't support cursor updates, we want to draw * the cursor locally, send the update and then undraw it. * We only want to do this if the cursor has moved/changed or * if it is contained in updateRegion * * We also want to send an update for the region where the cursor * was last drawn, again only if the cursor has moved/changed. */ LOCK(cl->screen->cursorMutex); cursorRegion = NULL; if (cl->enableCursorUpdates) { sendCursorShape = cl->cursorWasChanged; sendCursorPos = cl->cursorWasMoved; } else { sraRegionPtr lastDrawnCursorRegion; if (rfbGetCursorBounds(cl->screen, &cursorBounds)) { cursorRegion = sraRgnCreateRect(cursorBounds.x1, cursorBounds.y1, cursorBounds.x2, cursorBounds.y2); } lastDrawnCursorRegion = sraRgnCreateRect(cl->lastDrawnCursorBounds.x1, cl->lastDrawnCursorBounds.y1, cl->lastDrawnCursorBounds.x2, cl->lastDrawnCursorBounds.y2); if (cursorRegion) { sraRgnOr(cursorRegion, lastDrawnCursorRegion); sraRgnDestroy(lastDrawnCursorRegion); } else { cursorRegion = lastDrawnCursorRegion; } if (cl->cursorWasChanged || cl->cursorWasMoved) sraRgnOr(updateRegion, cursorRegion); } /* * The client is interested in the region requestedRegion. The region * which should be updated now is the intersection of requestedRegion * and the union of modifiedRegion and copyRegion. If it's empty then * no update is needed. */ if (!sraRgnAnd(updateRegion, cl->requestedRegion) && !sendCursorShape && !sendCursorPos) { sraRgnDestroy(updateRegion); if (cursorRegion) sraRgnDestroy(cursorRegion); UNLOCK(cl->updateMutex); UNLOCK(cl->cursorMutex); return TRUE; } /* * Put up the cursor if any part of it is in updateRegion. */ if (cursorRegion) { tmpRegion = sraRgnCreateRgn(cursorRegion); if (sraRgnAnd(tmpRegion, updateRegion)) { rfbDrawCursor(cl->screen, &cursorBounds); cursorIsDrawn = TRUE; cl->cursorWasMoved = FALSE; cl->cursorWasChanged = FALSE; cl->lastDrawnCursorBounds = cursorBounds; sraRgnOr(cl->modifiedRegion, cursorRegion); sraRgnOr(updateRegion, cursorRegion); sraRgnAnd(updateRegion, cl->requestedRegion); } sraRgnDestroy(tmpRegion); sraRgnDestroy(cursorRegion); } /* * We assume that the client doesn't have any pixel data outside the * requestedRegion. In other words, both the source and destination of a * copy must lie within requestedRegion. So the region we can send as a * copy is the intersection of the copyRegion with both the requestedRegion * and the requestedRegion translated by the amount of the copy. We set * updateCopyRegion to this. */ updateCopyRegion = sraRgnCreateRgn(cl->copyRegion); sraRgnAnd(updateCopyRegion,cl->requestedRegion); tmpRegion = sraRgnCreateRgn(cl->requestedRegion); sraRgnOffset(tmpRegion,cl->copyDX,cl->copyDY); sraRgnAnd(updateCopyRegion,tmpRegion); sraRgnDestroy(tmpRegion); dx = cl->copyDX; dy = cl->copyDY; /* * Next we remove updateCopyRegion from updateRegion so that updateRegion * is the part of this update which is sent as ordinary pixel data (i.e not * a copy). */ sraRgnSubtract(updateRegion,updateCopyRegion); /* * Finally we leave modifiedRegion to be the remainder (if any) of parts of * the screen which are modified but outside the requestedRegion. We also * empty both the requestedRegion and the copyRegion - note that we never * carry over a copyRegion for a future update. */ sraRgnOr(cl->modifiedRegion,cl->copyRegion); sraRgnSubtract(cl->modifiedRegion,updateRegion); sraRgnSubtract(cl->modifiedRegion,updateCopyRegion); sraRgnMakeEmpty(cl->requestedRegion); sraRgnMakeEmpty(cl->copyRegion); cl->copyDX = 0; cl->copyDY = 0; UNLOCK(cl->updateMutex); /* * Now send the update. */ cl->rfbFramebufferUpdateMessagesSent++; if (cl->preferredEncoding == rfbEncodingCoRRE) { nUpdateRegionRects = 0; for(i = sraRgnGetIterator(updateRegion); sraRgnIteratorNext(i,&rect);){ int x = rect.x1; int y = rect.y1; int w = rect.x2 - x; int h = rect.y2 - y; nUpdateRegionRects += (((w-1) / cl->correMaxWidth + 1) * ((h-1) / cl->correMaxHeight + 1)); } sraRgnReleaseIterator(i); #ifdef HAVE_LIBZ } else if (cl->preferredEncoding == rfbEncodingZlib) { nUpdateRegionRects = 0; for(i = sraRgnGetIterator(updateRegion); sraRgnIteratorNext(i,&rect);){ int x = rect.x1; int y = rect.y1; int w = rect.x2 - x; int h = rect.y2 - y; nUpdateRegionRects += (((h-1) / (ZLIB_MAX_SIZE( w ) / w)) + 1); } #ifdef HAVE_LIBJPEG } else if (cl->preferredEncoding == rfbEncodingTight) { nUpdateRegionRects = 0; for(i = sraRgnGetIterator(updateRegion); sraRgnIteratorNext(i,&rect);){ int x = rect.x1; int y = rect.y1; int w = rect.x2 - x; int h = rect.y2 - y; int n = rfbNumCodedRectsTight(cl, x, y, w, h); if (n == 0) { nUpdateRegionRects = 0xFFFF; break; } nUpdateRegionRects += n; } sraRgnReleaseIterator(i); #endif #endif } else { nUpdateRegionRects = sraRgnCountRects(updateRegion); } fu->type = rfbFramebufferUpdate; if (nUpdateRegionRects != 0xFFFF) { if(cl->screen->maxRectsPerUpdate>0 /* CoRRE splits the screen into smaller squares */ && cl->preferredEncoding != rfbEncodingCoRRE #ifdef HAVE_LIBZ /* Zlib encoding splits rectangles up into smaller chunks */ && cl->preferredEncoding != rfbEncodingZlib #ifdef HAVE_LIBJPEG /* Tight encoding counts the rectangles differently */ && cl->preferredEncoding != rfbEncodingTight #endif #endif /* HAVE_LIBZ */ && nUpdateRegionRects>cl->screen->maxRectsPerUpdate) { sraRegion* newUpdateRegion = sraRgnBBox(updateRegion); sraRgnDestroy(updateRegion); updateRegion = newUpdateRegion; nUpdateRegionRects = sraRgnCountRects(updateRegion); } fu->nRects = Swap16IfLE((uint16_t)(sraRgnCountRects(updateCopyRegion) + nUpdateRegionRects + !!sendCursorShape + !!sendCursorPos)); } else { fu->nRects = 0xFFFF; } cl->ublen = sz_rfbFramebufferUpdateMsg; UNLOCK(cl->cursorMutex); if (sendCursorShape) { cl->cursorWasChanged = FALSE; if (!rfbSendCursorShape(cl)) goto tx_error; } if (sendCursorPos) { cl->cursorWasMoved = FALSE; if (!rfbSendCursorPos(cl)) goto tx_error; } if (!sraRgnEmpty(updateCopyRegion)) { if (!rfbSendCopyRegion(cl,updateCopyRegion,dx,dy)) goto tx_error; } for(i = sraRgnGetIterator(updateRegion); sraRgnIteratorNext(i,&rect);){ int x = rect.x1; int y = rect.y1; int w = rect.x2 - x; int h = rect.y2 - y; cl->rfbRawBytesEquivalent += (sz_rfbFramebufferUpdateRectHeader + w * (cl->format.bitsPerPixel / 8) * h); switch (cl->preferredEncoding) { case rfbEncodingRaw: if (!rfbSendRectEncodingRaw(cl, x, y, w, h)) goto tx_error; break; case rfbEncodingRRE: if (!rfbSendRectEncodingRRE(cl, x, y, w, h)) goto tx_error; break; case rfbEncodingCoRRE: if (!rfbSendRectEncodingCoRRE(cl, x, y, w, h)) goto tx_error; break; case rfbEncodingHextile: if (!rfbSendRectEncodingHextile(cl, x, y, w, h)) goto tx_error; break; #ifdef HAVE_LIBZ case rfbEncodingZlib: if (!rfbSendRectEncodingZlib(cl, x, y, w, h)) goto tx_error; break; #ifdef HAVE_LIBJPEG case rfbEncodingTight: if (!rfbSendRectEncodingTight(cl, x, y, w, h)) goto tx_error; break; #endif #endif #ifdef HAVE_LIBZ case rfbEncodingZRLE: if (!rfbSendRectEncodingZRLE(cl, x, y, w, h)) goto tx_error; break; #endif } } if (cursorIsDrawn) rfbUndrawCursor(cl->screen, &cursorBounds); if ( nUpdateRegionRects == 0xFFFF && !rfbSendLastRectMarker(cl) ) goto tx_error; if (!rfbSendUpdateBuf(cl)) goto tx_error; sraRgnReleaseIterator(i); sraRgnDestroy(updateCopyRegion); sraRgnDestroy(updateRegion); return TRUE; tx_error: if (cursorIsDrawn) rfbUndrawCursor(cl->screen, &cursorBounds); if (i) sraRgnReleaseIterator(i); sraRgnDestroy(updateCopyRegion); sraRgnDestroy(updateRegion); return FALSE; }
1
[ "CWE-119" ]
vino
dff52694a384fe95195f2211254026b752d63ec4
18,477,487,212,014,695,000,000,000,000,000,000,000
344
Avoid out-of-bounds memory accesses This fixes two critical security vulnerabilities that lead to an out-of-bounds memory access with a crafted client framebuffer update request packet. The dimensions of the update from the packet are checked to ensure that they are within the screen dimensions. Thanks to Kevin Chen from the Bitblaze group for the reports in bugs 641802 and 641803. The CVE identifiers for these vulnerabilities are CVE-2011-0904 and CVE-2011-0905.
sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) { SDHCIState *s = (SDHCIState *)opaque; unsigned shift = 8 * (offset & 0x3); uint32_t mask = ~(((1ULL << (size * 8)) - 1) << shift); uint32_t value = val; value <<= shift; switch (offset & ~0x3) { case SDHC_SYSAD: s->sdmasysad = (s->sdmasysad & mask) | value; MASKED_WRITE(s->sdmasysad, mask, value); /* Writing to last byte of sdmasysad might trigger transfer */ if (!(mask & 0xFF000000) && TRANSFERRING_DATA(s->prnsts) && s->blkcnt && s->blksize && SDHC_DMA_TYPE(s->hostctl1) == SDHC_CTRL_SDMA) { if (s->trnmod & SDHC_TRNS_MULTI) { sdhci_sdma_transfer_multi_blocks(s); } else { sdhci_sdma_transfer_single_block(s); } } break; case SDHC_BLKSIZE: if (!TRANSFERRING_DATA(s->prnsts)) { MASKED_WRITE(s->blksize, mask, extract32(value, 0, 12)); MASKED_WRITE(s->blkcnt, mask >> 16, value >> 16); } /* Limit block size to the maximum buffer size */ if (extract32(s->blksize, 0, 12) > s->buf_maxsz) { qemu_log_mask(LOG_GUEST_ERROR, "%s: Size 0x%x is larger than " "the maximum buffer 0x%x\n", __func__, s->blksize, s->buf_maxsz); s->blksize = deposit32(s->blksize, 0, 12, s->buf_maxsz); } break; case SDHC_ARGUMENT: MASKED_WRITE(s->argument, mask, value); break; case SDHC_TRNMOD: /* DMA can be enabled only if it is supported as indicated by * capabilities register */ if (!(s->capareg & R_SDHC_CAPAB_SDMA_MASK)) { value &= ~SDHC_TRNS_DMA; } MASKED_WRITE(s->trnmod, mask, value & SDHC_TRNMOD_MASK); MASKED_WRITE(s->cmdreg, mask >> 16, value >> 16); /* Writing to the upper byte of CMDREG triggers SD command generation */ if ((mask & 0xFF000000) || !sdhci_can_issue_command(s)) { break; } sdhci_send_command(s); break; case SDHC_BDATA: if (sdhci_buff_access_is_sequential(s, offset - SDHC_BDATA)) { sdhci_write_dataport(s, value >> shift, size); } break; case SDHC_HOSTCTL: if (!(mask & 0xFF0000)) { sdhci_blkgap_write(s, value >> 16); } MASKED_WRITE(s->hostctl1, mask, value); MASKED_WRITE(s->pwrcon, mask >> 8, value >> 8); MASKED_WRITE(s->wakcon, mask >> 24, value >> 24); if (!(s->prnsts & SDHC_CARD_PRESENT) || ((s->pwrcon >> 1) & 0x7) < 5 || !(s->capareg & (1 << (31 - ((s->pwrcon >> 1) & 0x7))))) { s->pwrcon &= ~SDHC_POWER_ON; } break; case SDHC_CLKCON: if (!(mask & 0xFF000000)) { sdhci_reset_write(s, value >> 24); } MASKED_WRITE(s->clkcon, mask, value); MASKED_WRITE(s->timeoutcon, mask >> 16, value >> 16); if (s->clkcon & SDHC_CLOCK_INT_EN) { s->clkcon |= SDHC_CLOCK_INT_STABLE; } else { s->clkcon &= ~SDHC_CLOCK_INT_STABLE; } break; case SDHC_NORINTSTS: if (s->norintstsen & SDHC_NISEN_CARDINT) { value &= ~SDHC_NIS_CARDINT; } s->norintsts &= mask | ~value; s->errintsts &= (mask >> 16) | ~(value >> 16); if (s->errintsts) { s->norintsts |= SDHC_NIS_ERR; } else { s->norintsts &= ~SDHC_NIS_ERR; } sdhci_update_irq(s); break; case SDHC_NORINTSTSEN: MASKED_WRITE(s->norintstsen, mask, value); MASKED_WRITE(s->errintstsen, mask >> 16, value >> 16); s->norintsts &= s->norintstsen; s->errintsts &= s->errintstsen; if (s->errintsts) { s->norintsts |= SDHC_NIS_ERR; } else { s->norintsts &= ~SDHC_NIS_ERR; } /* Quirk for Raspberry Pi: pending card insert interrupt * appears when first enabled after power on */ if ((s->norintstsen & SDHC_NISEN_INSERT) && s->pending_insert_state) { assert(s->pending_insert_quirk); s->norintsts |= SDHC_NIS_INSERT; s->pending_insert_state = false; } sdhci_update_irq(s); break; case SDHC_NORINTSIGEN: MASKED_WRITE(s->norintsigen, mask, value); MASKED_WRITE(s->errintsigen, mask >> 16, value >> 16); sdhci_update_irq(s); break; case SDHC_ADMAERR: MASKED_WRITE(s->admaerr, mask, value); break; case SDHC_ADMASYSADDR: s->admasysaddr = (s->admasysaddr & (0xFFFFFFFF00000000ULL | (uint64_t)mask)) | (uint64_t)value; break; case SDHC_ADMASYSADDR + 4: s->admasysaddr = (s->admasysaddr & (0x00000000FFFFFFFFULL | ((uint64_t)mask << 32))) | ((uint64_t)value << 32); break; case SDHC_FEAER: s->acmd12errsts |= value; s->errintsts |= (value >> 16) & s->errintstsen; if (s->acmd12errsts) { s->errintsts |= SDHC_EIS_CMD12ERR; } if (s->errintsts) { s->norintsts |= SDHC_NIS_ERR; } sdhci_update_irq(s); break; case SDHC_ACMD12ERRSTS: MASKED_WRITE(s->acmd12errsts, mask, value & UINT16_MAX); if (s->uhs_mode >= UHS_I) { MASKED_WRITE(s->hostctl2, mask >> 16, value >> 16); if (FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, V18_ENA)) { sdbus_set_voltage(&s->sdbus, SD_VOLTAGE_1_8V); } else { sdbus_set_voltage(&s->sdbus, SD_VOLTAGE_3_3V); } } break; case SDHC_CAPAB: case SDHC_CAPAB + 4: case SDHC_MAXCURR: case SDHC_MAXCURR + 4: qemu_log_mask(LOG_GUEST_ERROR, "SDHC wr_%ub @0x%02" HWADDR_PRIx " <- 0x%08x read-only\n", size, offset, value >> shift); break; default: qemu_log_mask(LOG_UNIMP, "SDHC wr_%ub @0x%02" HWADDR_PRIx " <- 0x%08x " "not implemented\n", size, offset, value >> shift); break; } trace_sdhci_access("wr", size << 3, offset, "<-", value >> shift, value >> shift); }
0
[ "CWE-119" ]
qemu
dfba99f17feb6d4a129da19d38df1bcd8579d1c3
5,296,713,502,602,658,500,000,000,000,000,000,000
174
hw/sd/sdhci: Fix DMA Transfer Block Size field The 'Transfer Block Size' field is 12-bit wide. See section '2.2.2. Block Size Register (Offset 004h)' in datasheet. Two different bug reproducer available: - https://bugs.launchpad.net/qemu/+bug/1892960 - https://ruhr-uni-bochum.sciebo.de/s/NNWP2GfwzYKeKwE?path=%2Fsdhci_oob_write1 Cc: [email protected] Buglink: https://bugs.launchpad.net/qemu/+bug/1892960 Fixes: d7dfca0807a ("hw/sdhci: introduce standard SD host controller") Reported-by: Alexander Bulekov <[email protected]> Signed-off-by: Philippe Mathieu-Daudé <[email protected]> Reviewed-by: Prasad J Pandit <[email protected]> Tested-by: Alexander Bulekov <[email protected]> Message-Id: <[email protected]>
add_card(struct pci_dev *dev, const struct pci_device_id *unused) { struct pcilynx *lynx; u32 p, end; int ret, i; if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) { dev_err(&dev->dev, "DMA address limits not supported for PCILynx hardware\n"); return -ENXIO; } if (pci_enable_device(dev)) { dev_err(&dev->dev, "Failed to enable PCILynx hardware\n"); return -ENXIO; } pci_set_master(dev); lynx = kzalloc(sizeof *lynx, GFP_KERNEL); if (lynx == NULL) { dev_err(&dev->dev, "Failed to allocate control structure\n"); ret = -ENOMEM; goto fail_disable; } lynx->pci_device = dev; pci_set_drvdata(dev, lynx); spin_lock_init(&lynx->client_list_lock); INIT_LIST_HEAD(&lynx->client_list); kref_init(&lynx->kref); lynx->registers = ioremap(pci_resource_start(dev, 0), PCILYNX_MAX_REGISTER); if (lynx->registers == NULL) { dev_err(&dev->dev, "Failed to map registers\n"); ret = -ENOMEM; goto fail_deallocate_lynx; } lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device, sizeof(struct pcl), &lynx->rcv_start_pcl_bus); lynx->rcv_pcl = pci_alloc_consistent(lynx->pci_device, sizeof(struct pcl), &lynx->rcv_pcl_bus); lynx->rcv_buffer = pci_alloc_consistent(lynx->pci_device, RCV_BUFFER_SIZE, &lynx->rcv_buffer_bus); if (lynx->rcv_start_pcl == NULL || lynx->rcv_pcl == NULL || lynx->rcv_buffer == NULL) { dev_err(&dev->dev, "Failed to allocate receive buffer\n"); ret = -ENOMEM; goto fail_deallocate_buffers; } lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus); lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID); lynx->rcv_pcl->async_error_next = cpu_to_le32(PCL_NEXT_INVALID); lynx->rcv_pcl->buffer[0].control = cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2044); lynx->rcv_pcl->buffer[0].pointer = cpu_to_le32(lynx->rcv_buffer_bus + 4); p = lynx->rcv_buffer_bus + 2048; end = lynx->rcv_buffer_bus + RCV_BUFFER_SIZE; for (i = 1; p < end; i++, p += 2048) { lynx->rcv_pcl->buffer[i].control = cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2048); lynx->rcv_pcl->buffer[i].pointer = cpu_to_le32(p); } lynx->rcv_pcl->buffer[i - 1].control |= cpu_to_le32(PCL_LAST_BUFF); reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET); /* Fix buggy cards with autoboot pin not tied low: */ reg_write(lynx, DMA0_CHAN_CTRL, 0); reg_write(lynx, DMA_GLOBAL_REGISTER, 0x00 << 24); #if 0 /* now, looking for PHY register set */ if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) { lynx->phyic.reg_1394a = 1; PRINT(KERN_INFO, lynx->id, "found 1394a conform PHY (using extended register set)"); lynx->phyic.vendor = get_phy_vendorid(lynx); lynx->phyic.product = get_phy_productid(lynx); } else { lynx->phyic.reg_1394a = 0; PRINT(KERN_INFO, lynx->id, "found old 1394 PHY"); } #endif /* Setup the general receive FIFO max size. */ reg_write(lynx, FIFO_SIZES, 255); reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL); reg_write(lynx, LINK_INT_ENABLE, LINK_INT_PHY_TIME_OUT | LINK_INT_PHY_REG_RCVD | LINK_INT_PHY_BUSRESET | LINK_INT_IT_STUCK | LINK_INT_AT_STUCK | LINK_INT_SNTRJ | LINK_INT_TC_ERR | LINK_INT_GRF_OVER_FLOW | LINK_INT_ITF_UNDER_FLOW | LINK_INT_ATF_UNDER_FLOW); /* Disable the L flag in self ID packets. */ set_phy_reg(lynx, 4, 0); /* Put this baby into snoop mode */ reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_SNOOP_ENABLE); run_pcl(lynx, lynx->rcv_start_pcl_bus, 0); if (request_irq(dev->irq, irq_handler, IRQF_SHARED, driver_name, lynx)) { dev_err(&dev->dev, "Failed to allocate shared interrupt %d\n", dev->irq); ret = -EIO; goto fail_deallocate_buffers; } lynx->misc.parent = &dev->dev; lynx->misc.minor = MISC_DYNAMIC_MINOR; lynx->misc.name = "nosy"; lynx->misc.fops = &nosy_ops; mutex_lock(&card_mutex); ret = misc_register(&lynx->misc); if (ret) { dev_err(&dev->dev, "Failed to register misc char device\n"); mutex_unlock(&card_mutex); goto fail_free_irq; } list_add_tail(&lynx->link, &card_list); mutex_unlock(&card_mutex); dev_info(&dev->dev, "Initialized PCILynx IEEE1394 card, irq=%d\n", dev->irq); return 0; fail_free_irq: reg_write(lynx, PCI_INT_ENABLE, 0); free_irq(lynx->pci_device->irq, lynx); fail_deallocate_buffers: if (lynx->rcv_start_pcl) pci_free_consistent(lynx->pci_device, sizeof(struct pcl), lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus); if (lynx->rcv_pcl) pci_free_consistent(lynx->pci_device, sizeof(struct pcl), lynx->rcv_pcl, lynx->rcv_pcl_bus); if (lynx->rcv_buffer) pci_free_consistent(lynx->pci_device, PAGE_SIZE, lynx->rcv_buffer, lynx->rcv_buffer_bus); iounmap(lynx->registers); fail_deallocate_lynx: kfree(lynx); fail_disable: pci_disable_device(dev); return ret; }
0
[ "CWE-416" ]
linux
829933ef05a951c8ff140e814656d73e74915faf
299,260,390,002,095,100,000,000,000,000,000,000,000
159
firewire: nosy: Fix a use-after-free bug in nosy_ioctl() For each device, the nosy driver allocates a pcilynx structure. A use-after-free might happen in the following scenario: 1. Open nosy device for the first time and call ioctl with command NOSY_IOC_START, then a new client A will be malloced and added to doubly linked list. 2. Open nosy device for the second time and call ioctl with command NOSY_IOC_START, then a new client B will be malloced and added to doubly linked list. 3. Call ioctl with command NOSY_IOC_START for client A, then client A will be readded to the doubly linked list. Now the doubly linked list is messed up. 4. Close the first nosy device and nosy_release will be called. In nosy_release, client A will be unlinked and freed. 5. Close the second nosy device, and client A will be referenced, resulting in UAF. The root cause of this bug is that the element in the doubly linked list is reentered into the list. Fix this bug by adding a check before inserting a client. If a client is already in the linked list, don't insert it. The following KASAN report reveals it: BUG: KASAN: use-after-free in nosy_release+0x1ea/0x210 Write of size 8 at addr ffff888102ad7360 by task poc CPU: 3 PID: 337 Comm: poc Not tainted 5.12.0-rc5+ #6 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.12.0-59-gc9ba5276e321-prebuilt.qemu.org 04/01/2014 Call Trace: nosy_release+0x1ea/0x210 __fput+0x1e2/0x840 task_work_run+0xe8/0x180 exit_to_user_mode_prepare+0x114/0x120 syscall_exit_to_user_mode+0x1d/0x40 entry_SYSCALL_64_after_hwframe+0x44/0xae Allocated by task 337: nosy_open+0x154/0x4d0 misc_open+0x2ec/0x410 chrdev_open+0x20d/0x5a0 do_dentry_open+0x40f/0xe80 path_openat+0x1cf9/0x37b0 do_filp_open+0x16d/0x390 do_sys_openat2+0x11d/0x360 __x64_sys_open+0xfd/0x1a0 do_syscall_64+0x33/0x40 entry_SYSCALL_64_after_hwframe+0x44/0xae Freed by task 337: kfree+0x8f/0x210 nosy_release+0x158/0x210 __fput+0x1e2/0x840 task_work_run+0xe8/0x180 exit_to_user_mode_prepare+0x114/0x120 syscall_exit_to_user_mode+0x1d/0x40 entry_SYSCALL_64_after_hwframe+0x44/0xae The buggy address belongs to the object at ffff888102ad7300 which belongs to the cache kmalloc-128 of size 128 The buggy address is located 96 bytes inside of 128-byte region [ffff888102ad7300, ffff888102ad7380) [ Modified to use 'list_empty()' inside proper lock - Linus ] Link: https://lore.kernel.org/lkml/[email protected]/ Reported-and-tested-by: 马哲宇 (Zheyu Ma) <[email protected]> Signed-off-by: Zheyu Ma <[email protected]> Cc: Greg Kroah-Hartman <[email protected]> Cc: Stefan Richter <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
uint32_t FUN_080bb148(uint32_t *dst, uint32_t *src, uint16_t src_len) { void *retaddr = __builtin_extract_return_addr(__builtin_return_address(0)); // determine the right buffer size based on the function that called us uint32_t dst_len = 0; if (memcmp(retaddr,"\x66\x89\x85\xec\xfb\xff\xff",7) == 0) { // MOV word ptr [EBP + local_418],AX (process_fmt()) dst_len = 1024; } else if (memcmp(retaddr,"\x83\xc4\x0c",3) == 0) { // ADD ESP, 0xc (fmt_cell_combine()) dst_len = 256; } uint32_t *dptr, *dst_end = dst + dst_len/4; for (dptr = dst; src_len >= 4 && dptr < dst_end; dptr++) { *dptr = *src++; src_len -= 4; if (*dptr > 0x80000000) { *dptr &= 0x7fffffff; if (src_len != 0) { uint8_t *sptr = (uint8_t *)src; uint8_t reps = *sptr; if (dptr + reps + 1 >= dst_end) { // prevent overflow break; } memdup(dptr, 4, reps*4 + 4); src_len -= 1; dptr += reps; src = (uint32_t *)(sptr+1); } } } return dptr - dst; }
0
[ "CWE-787" ]
123elf
92738c435690ae467ecc1f99d2bcea56f198205a
328,020,374,560,907,750,000,000,000,000,000,000,000
35
Reimplementation of function at 0x80bb148 that prevents overflowing the destination buffer. - Adds symbol FUN_80bb148 using objcopy --add-symbol - Adds it to undefine.lst so it can be replaced - Replaces it with a function that stops copying if the destination buffer is full. The size is determined based on the calling function.
gsd_xrandr_manager_finalize (GObject *object) { GsdXrandrManager *xrandr_manager; g_return_if_fail (object != NULL); g_return_if_fail (GSD_IS_XRANDR_MANAGER (object)); xrandr_manager = GSD_XRANDR_MANAGER (object); g_return_if_fail (xrandr_manager->priv != NULL); G_OBJECT_CLASS (gsd_xrandr_manager_parent_class)->finalize (object); }
0
[]
gnome-settings-daemon
be513b3c7d80d0b7013d79ce46d7eeca929705cc
42,300,789,536,489,006,000,000,000,000,000,000,000
13
Implement autoconfiguration of the outputs This is similar in spirit to 'xrandr --auto', but we disfavor selecting clone modes. Instead, we lay out the outputs left-to-right. Signed-off-by: Federico Mena Quintero <[email protected]>
add_bwrap_wrapper (FlatpakBwrap *bwrap, const char *app_info_path, GError **error) { glnx_autofd int app_info_fd = -1; g_auto(GLnxDirFdIterator) dir_iter = { 0 }; struct dirent *dent; g_autofree char *user_runtime_dir = flatpak_get_real_xdg_runtime_dir (); g_autofree char *proxy_socket_dir = g_build_filename (user_runtime_dir, ".dbus-proxy/", NULL); app_info_fd = open (app_info_path, O_RDONLY | O_CLOEXEC); if (app_info_fd == -1) return glnx_throw_errno_prefix (error, _("Failed to open app info file")); if (!glnx_dirfd_iterator_init_at (AT_FDCWD, "/", FALSE, &dir_iter, error)) return FALSE; flatpak_bwrap_add_arg (bwrap, flatpak_get_bwrap ()); while (TRUE) { glnx_autofd int o_path_fd = -1; struct statfs stfs; if (!glnx_dirfd_iterator_next_dent_ensure_dtype (&dir_iter, &dent, NULL, error)) return FALSE; if (dent == NULL) break; if (strcmp (dent->d_name, ".flatpak-info") == 0) continue; /* O_PATH + fstatfs is the magic that we need to statfs without automounting the target */ o_path_fd = openat (dir_iter.fd, dent->d_name, O_PATH | O_NOFOLLOW | O_CLOEXEC); if (o_path_fd == -1 || fstatfs (o_path_fd, &stfs) != 0 || stfs.f_type == AUTOFS_SUPER_MAGIC) continue; /* AUTOFS mounts are risky and can cause us to block (see issue #1633), so ignore it. Its unlikely the proxy needs such a directory. */ if (dent->d_type == DT_DIR) { if (strcmp (dent->d_name, "tmp") == 0 || strcmp (dent->d_name, "var") == 0 || strcmp (dent->d_name, "run") == 0) flatpak_bwrap_add_arg (bwrap, "--bind"); else flatpak_bwrap_add_arg (bwrap, "--ro-bind"); flatpak_bwrap_add_arg_printf (bwrap, "/%s", dent->d_name); flatpak_bwrap_add_arg_printf (bwrap, "/%s", dent->d_name); } else if (dent->d_type == DT_LNK) { g_autofree gchar *target = NULL; target = glnx_readlinkat_malloc (dir_iter.fd, dent->d_name, NULL, error); if (target == NULL) return FALSE; flatpak_bwrap_add_args (bwrap, "--symlink", target, NULL); flatpak_bwrap_add_arg_printf (bwrap, "/%s", dent->d_name); } } flatpak_bwrap_add_args (bwrap, "--bind", proxy_socket_dir, proxy_socket_dir, NULL); /* This is a file rather than a bind mount, because it will then not be unmounted from the namespace when the namespace dies. */ flatpak_bwrap_add_args_data_fd (bwrap, "--file", glnx_steal_fd (&app_info_fd), "/.flatpak-info"); if (!flatpak_bwrap_bundle_args (bwrap, 1, -1, FALSE, error)) return FALSE; return TRUE; }
0
[ "CWE-94", "CWE-74" ]
flatpak
6d1773d2a54dde9b099043f07a2094a4f1c2f486
106,936,580,798,963,560,000,000,000,000,000,000,000
74
run: Convert all environment variables into bwrap arguments This avoids some of them being filtered out by a setuid bwrap. It also means that if they came from an untrusted source, they cannot be used to inject arbitrary code into a non-setuid bwrap via mechanisms like LD_PRELOAD. Because they get bundled into a memfd or temporary file, they do not actually appear in argv, ensuring that they remain inaccessible to processes running under a different uid (which is important if their values are tokens or other secrets). Signed-off-by: Simon McVittie <[email protected]> Part-of: https://github.com/flatpak/flatpak/security/advisories/GHSA-4ppf-fxf6-vxg2
static void flush_dpb(AVCodecContext *avctx) { H264Context *h = avctx->priv_data; int i; for (i = 0; i <= MAX_DELAYED_PIC_COUNT; i++) { if (h->delayed_pic[i]) h->delayed_pic[i]->reference = 0; h->delayed_pic[i] = NULL; } flush_change(h); if (h->DPB) for (i = 0; i < MAX_PICTURE_COUNT; i++) unref_picture(h, &h->DPB[i]); h->cur_pic_ptr = NULL; unref_picture(h, &h->cur_pic); h->mb_x = h->mb_y = 0; h->parse_context.state = -1; h->parse_context.frame_start_found = 0; h->parse_context.overread = 0; h->parse_context.overread_index = 0; h->parse_context.index = 0; h->parse_context.last_index = 0; }
0
[ "CWE-703" ]
FFmpeg
29ffeef5e73b8f41ff3a3f2242d356759c66f91f
173,017,244,576,667,500,000,000,000,000,000,000,000
28
avcodec/h264: do not trust last_pic_droppable when marking pictures as done This simplifies the code and fixes a deadlock Fixes Ticket2927 Signed-off-by: Michael Niedermayer <[email protected]>
void ssl_set_session_ticket_lifetime( ssl_context *ssl, int lifetime ) { ssl->ticket_lifetime = lifetime; }
0
[ "CWE-119" ]
mbedtls
c988f32adde62a169ba340fee0da15aecd40e76e
335,308,838,680,255,100,000,000,000,000,000,000,000
4
Added max length checking of hostname
_extend_find_entry( netsnmp_request_info *request, netsnmp_table_request_info *table_info, int mode ) { netsnmp_extend *eptr; extend_registration_block *ereg; unsigned int line_idx; oid oid_buf[MAX_OID_LEN]; int oid_len; int i; char *token; size_t token_len; if (!request || !table_info || !table_info->indexes || !table_info->indexes->next_variable) { DEBUGMSGTL(( "nsExtendTable:output2", "invalid invocation\n")); return NULL; } ereg = _find_extension_block( request->requestvb->name, request->requestvb->name_length ); /*** * GET handling - find the exact entry being requested ***/ if ( mode == MODE_GET ) { DEBUGMSGTL(( "nsExtendTable:output2", "GET: %s / %ld\n ", table_info->indexes->val.string, *table_info->indexes->next_variable->val.integer)); for ( eptr = ereg->ehead; eptr; eptr = eptr->next ) { if ( !strcmp( eptr->token, (char *) table_info->indexes->val.string )) break; } if ( eptr ) { /* * Ensure the output is available... */ if (!(eptr->flags & NS_EXTEND_FLAGS_ACTIVE) || (netsnmp_cache_check_and_reload( eptr->cache ) < 0 )) return NULL; /* * ...and check the line requested is valid */ line_idx = *table_info->indexes->next_variable->val.integer; if (line_idx < 1 || line_idx > eptr->numlines) return NULL; } } /*** * GETNEXT handling - find the first suitable entry ***/ else { if (!table_info->indexes->val_len ) { DEBUGMSGTL(( "nsExtendTable:output2", "GETNEXT: first entry\n")); /* * Beginning of the table - find the first active * (and successful) entry, and use the first line of it */ for (eptr = ereg->ehead; eptr; eptr = eptr->next ) { if ((eptr->flags & NS_EXTEND_FLAGS_ACTIVE) && (netsnmp_cache_check_and_reload( eptr->cache ) >= 0 )) { line_idx = 1; break; } } } else { token = (char *) table_info->indexes->val.string; token_len = table_info->indexes->val_len; line_idx = *table_info->indexes->next_variable->val.integer; DEBUGMSGTL(( "nsExtendTable:output2", "GETNEXT: %s / %d\n ", token, line_idx )); /* * Otherwise, find the first entry not earlier * than the requested token... */ for (eptr = ereg->ehead; eptr; eptr = eptr->next ) { if ( strlen(eptr->token) > token_len ) break; if ( strlen(eptr->token) == token_len && strcmp(eptr->token, token) >= 0 ) break; } if (!eptr) return NULL; /* (assuming there is one) */ /* * ... and make sure it's active & the output is available * (or use the first following entry that is) */ for ( ; eptr; eptr = eptr->next ) { if ((eptr->flags & NS_EXTEND_FLAGS_ACTIVE) && (netsnmp_cache_check_and_reload( eptr->cache ) >= 0 )) { break; } line_idx = 1; } if (!eptr) return NULL; /* (assuming there is one) */ /* * If we're working with the same entry that was requested, * see whether we've reached the end of the output... */ if (!strcmp( eptr->token, token )) { if ( eptr->numlines <= line_idx ) { /* * ... and if so, move on to the first line * of the next (active and successful) entry. */ line_idx = 1; for (eptr = eptr->next ; eptr; eptr = eptr->next ) { if ((eptr->flags & NS_EXTEND_FLAGS_ACTIVE) && (netsnmp_cache_check_and_reload( eptr->cache ) >= 0 )) { break; } } } else { /* * Otherwise just use the next line of this entry. */ line_idx++; } } else { /* * If this is not the same entry that was requested, * then we should return the first line. */ line_idx = 1; } } if (eptr) { DEBUGMSGTL(( "nsExtendTable:output2", "GETNEXT -> %s / %d\n ", eptr->token, line_idx)); /* * Since we're processing a GETNEXT request, * now we've found the appropriate entry (and line), * we need to update the varbind OID ... */ memset(oid_buf, 0, sizeof(oid_buf)); oid_len = ereg->oid_len; memcpy( oid_buf, ereg->root_oid, oid_len*sizeof(oid)); oid_buf[ oid_len++ ] = 4; /* nsExtendOutput2Table */ oid_buf[ oid_len++ ] = 1; /* nsExtendOutput2Entry */ oid_buf[ oid_len++ ] = COLUMN_EXTOUT2_OUTLINE; /* string token index */ oid_buf[ oid_len++ ] = strlen(eptr->token); for ( i=0; i < (int)strlen(eptr->token); i++ ) oid_buf[ oid_len+i ] = eptr->token[i]; oid_len += strlen( eptr->token ); /* plus line number */ oid_buf[ oid_len++ ] = line_idx; snmp_set_var_objid( request->requestvb, oid_buf, oid_len ); /* * ... and index values to match. */ snmp_set_var_value( table_info->indexes, eptr->token, strlen(eptr->token)); snmp_set_var_value( table_info->indexes->next_variable, (const u_char*)&line_idx, sizeof(line_idx)); } } return eptr; /* Finally, signal success */ }
0
[ "CWE-269" ]
net-snmp
77f6c60f57dba0aaea5d8ef1dd94bcd0c8e6d205
89,813,453,365,998,480,000,000,000,000,000,000,000
168
make the extend mib read-only by default
bool Virtual_tmp_table::sp_set_all_fields_from_item(THD *thd, Item *value) { DBUG_ASSERT(value->fixed); DBUG_ASSERT(value->cols() == s->fields); for (uint i= 0; i < value->cols(); i++) { if (field[i]->sp_prepare_and_store_item(thd, value->addr(i))) return true; } return false; }
0
[]
server
ff77a09bda884fe6bf3917eb29b9d3a2f53f919b
232,963,102,306,809,240,000,000,000,000,000,000,000
11
MDEV-22464 Server crash on UPDATE with nested subquery Uninitialized ref_pointer_array[] because setup_fields() got empty fields list. mysql_multi_update() for some reason does that by substituting the fields list with empty total_list for the mysql_select() call (looks like wrong merge since total_list is not used anywhere else and is always empty). The fix would be to return back the original fields list. But this fails update_use_source.test case: --error ER_BAD_FIELD_ERROR update v1 set t1c1=2 order by 1; Actually not failing the above seems to be ok. The other fix would be to keep resolve_in_select_list false (and that keeps outer context from being resolved in Item_ref::fix_fields()). This fix is more consistent with how SELECT behaves: --error ER_SUBQUERY_NO_1_ROW select a from t1 where a= (select 2 from t1 having (a = 3)); So this patch implements this fix.
void PsdImage::readResourceBlock(uint16_t resourceId, uint32_t resourceSize) { switch(resourceId) { case kPhotoshopResourceID_IPTC_NAA: { DataBuf rawIPTC(resourceSize); io_->read(rawIPTC.pData_, rawIPTC.size_); if (io_->error() || io_->eof()) throw Error(kerFailedToReadImageData); if (IptcParser::decode(iptcData_, rawIPTC.pData_, rawIPTC.size_)) { #ifndef SUPPRESS_WARNINGS EXV_WARNING << "Failed to decode IPTC metadata.\n"; #endif iptcData_.clear(); } break; } case kPhotoshopResourceID_ExifInfo: { DataBuf rawExif(resourceSize); io_->read(rawExif.pData_, rawExif.size_); if (io_->error() || io_->eof()) throw Error(kerFailedToReadImageData); ByteOrder bo = ExifParser::decode(exifData_, rawExif.pData_, rawExif.size_); setByteOrder(bo); if (rawExif.size_ > 0 && byteOrder() == invalidByteOrder) { #ifndef SUPPRESS_WARNINGS EXV_WARNING << "Failed to decode Exif metadata.\n"; #endif exifData_.clear(); } break; } case kPhotoshopResourceID_XMPPacket: { DataBuf xmpPacket(resourceSize); io_->read(xmpPacket.pData_, xmpPacket.size_); if (io_->error() || io_->eof()) throw Error(kerFailedToReadImageData); xmpPacket_.assign(reinterpret_cast<char *>(xmpPacket.pData_), xmpPacket.size_); if (xmpPacket_.size() > 0 && XmpParser::decode(xmpData_, xmpPacket_)) { #ifndef SUPPRESS_WARNINGS EXV_WARNING << "Failed to decode XMP metadata.\n"; #endif } break; } // - PS 4.0 preview data is fetched from ThumbnailResource // - PS >= 5.0 preview data is fetched from ThumbnailResource2 case kPhotoshopResourceID_ThumbnailResource: case kPhotoshopResourceID_ThumbnailResource2: { /* Photoshop thumbnail resource header offset length name description ====== ======== ==== =========== 0 4 bytes format = 1 (kJpegRGB). Also supports kRawRGB (0). 4 4 bytes width Width of thumbnail in pixels. 8 4 bytes height Height of thumbnail in pixels. 12 4 bytes widthbytes Padded row bytes as (width * bitspixel + 31) / 32 * 4. 16 4 bytes size Total size as widthbytes * height * planes 20 4 bytes compressedsize Size after compression. Used for consistentcy check. 24 2 bytes bitspixel = 24. Bits per pixel. 26 2 bytes planes = 1. Number of planes. 28 variable data JFIF data in RGB format. Note: For resource ID 1033 the data is in BGR format. */ byte buf[28]; if (io_->read(buf, 28) != 28) { throw Error(kerNotAnImage, "Photoshop"); } NativePreview nativePreview; nativePreview.position_ = io_->tell(); nativePreview.size_ = getLong(buf + 20, bigEndian); // compressedsize nativePreview.width_ = getLong(buf + 4, bigEndian); nativePreview.height_ = getLong(buf + 8, bigEndian); const uint32_t format = getLong(buf + 0, bigEndian); if (nativePreview.size_ > 0 && nativePreview.position_ >= 0) { io_->seek(static_cast<long>(nativePreview.size_), BasicIo::cur); if (io_->error() || io_->eof()) throw Error(kerFailedToReadImageData); if (format == 1) { nativePreview.filter_ = ""; nativePreview.mimeType_ = "image/jpeg"; nativePreviews_.push_back(nativePreview); } else { // unsupported format of native preview } } break; } default: { break; } } } // PsdImage::readResourceBlock
0
[ "CWE-125" ]
exiv2
68966932510213b5656fcf433ab6d7e26f48e23b
231,449,338,118,500,600,000,000,000,000,000,000,000
102
PSD: Use Safe::add for preventing overflows in PSD files
preFormUpdateBuffer(Buffer *buf) { struct pre_form *pf; struct pre_form_item *pi; int i; Anchor *a; FormList *fl; FormItemList *fi; #ifdef MENU_SELECT FormSelectOptionItem *opt; int j; #endif if (!buf || !buf->formitem || !PreForm) return; for (pf = PreForm; pf; pf = pf->next) { if (pf->re_url) { Str url = parsedURL2Str(&buf->currentURL); if (!RegexMatch(pf->re_url, url->ptr, url->length, 1)) continue; } else if (pf->url) { if (Strcmp_charp(parsedURL2Str(&buf->currentURL), pf->url)) continue; } else continue; for (i = 0; i < buf->formitem->nanchor; i++) { a = &buf->formitem->anchors[i]; fi = (FormItemList *)a->url; fl = fi->parent; if (pf->name && (!fl->name || strcmp(fl->name, pf->name))) continue; if (pf->action && (!fl->action || Strcmp_charp(fl->action, pf->action))) continue; for (pi = pf->item; pi; pi = pi->next) { if (pi->type != fi->type) continue; if (pi->type == FORM_INPUT_SUBMIT || pi->type == FORM_INPUT_IMAGE) { if ((!pi->name || !*pi->name || (fi->name && !Strcmp_charp(fi->name, pi->name))) && (!pi->value || !*pi->value || (fi->value && !Strcmp_charp(fi->value, pi->value)))) buf->submit = a; continue; } if (!pi->name || !fi->name || Strcmp_charp(fi->name, pi->name)) continue; switch (pi->type) { case FORM_INPUT_TEXT: case FORM_INPUT_FILE: case FORM_INPUT_PASSWORD: case FORM_TEXTAREA: fi->value = Strnew_charp(pi->value); formUpdateBuffer(a, buf, fi); break; case FORM_INPUT_CHECKBOX: if (pi->value && fi->value && !Strcmp_charp(fi->value, pi->value)) { fi->checked = pi->checked; formUpdateBuffer(a, buf, fi); } break; case FORM_INPUT_RADIO: if (pi->value && fi->value && !Strcmp_charp(fi->value, pi->value)) formRecheckRadio(a, buf, fi); break; #ifdef MENU_SELECT case FORM_SELECT: for (j = 0, opt = fi->select_option; opt != NULL; j++, opt = opt->next) { if (pi->value && opt->value && !Strcmp_charp(opt->value, pi->value)) { fi->selected = j; fi->value = opt->value; fi->label = opt->label; updateSelectOption(fi, fi->select_option); formUpdateBuffer(a, buf, fi); break; } } break; #endif } } } } }
0
[ "CWE-119" ]
w3m
9f0bdcfdf061db3520bd1f112bdc5e83acdec4be
235,885,545,328,567,040,000,000,000,000,000,000,000
92
Prevent segfault for formUpdateBuffer Bug-Debian: https://github.com/tats/w3m/issues/9 Bug-Debian: https://github.com/tats/w3m/issues/10
static void mailbox_release_resources(struct mailbox *mailbox) { int i; if (mailbox->i.dirty) abort(); /* just close the header */ xclose(mailbox->header_fd); /* release and unmap index */ xclose(mailbox->index_fd); mailbox->index_locktype = 0; /* lock was released by closing fd */ if (mailbox->index_base) map_free(&mailbox->index_base, &mailbox->index_len); /* release caches */ for (i = 0; i < mailbox->caches.count; i++) { struct mappedfile *cachefile = ptrarray_nth(&mailbox->caches, i); mappedfile_close(&cachefile); } ptrarray_fini(&mailbox->caches); }
0
[]
cyrus-imapd
1d6d15ee74e11a9bd745e80be69869e5fb8d64d6
224,559,205,988,991,750,000,000,000,000,000,000,000
23
mailbox.c/reconstruct.c: Add mailbox_mbentry_from_path()
Spec &spec() { return spec_; }
0
[ "CWE-134", "CWE-119", "CWE-787" ]
fmt
8cf30aa2be256eba07bb1cefb998c52326e846e7
203,903,769,534,426,700,000,000,000,000,000,000,000
1
Fix segfault on complex pointer formatting (#642)
struct sctp_packet *sctp_packet_init(struct sctp_packet *packet, struct sctp_transport *transport, __u16 sport, __u16 dport) { struct sctp_association *asoc = transport->asoc; size_t overhead; SCTP_DEBUG_PRINTK("%s: packet:%p transport:%p\n", __func__, packet, transport); packet->transport = transport; packet->source_port = sport; packet->destination_port = dport; INIT_LIST_HEAD(&packet->chunk_list); if (asoc) { struct sctp_sock *sp = sctp_sk(asoc->base.sk); overhead = sp->pf->af->net_header_len; } else { overhead = sizeof(struct ipv6hdr); } overhead += sizeof(struct sctphdr); packet->overhead = overhead; sctp_packet_reset(packet); packet->vtag = 0; packet->malloced = 0; return packet; }
0
[]
linux
196d67593439b03088913227093e374235596e33
269,815,415,518,562,200,000,000,000,000,000,000,000
27
sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call The current SCTP stack is lacking a mechanism to have per association statistics. This is an implementation modeled after OpenSolaris' SCTP_GET_ASSOC_STATS. Userspace part will follow on lksctp if/when there is a general ACK on this. V4: - Move ipackets++ before q->immediate.func() for consistency reasons - Move sctp_max_rto() at the end of sctp_transport_update_rto() to avoid returning bogus RTO values - return asoc->rto_min when max_obs_rto value has not changed V3: - Increase ictrlchunks in sctp_assoc_bh_rcv() as well - Move ipackets++ to sctp_inq_push() - return 0 when no rto updates took place since the last call V2: - Implement partial retrieval of stat struct to cope for future expansion - Kill the rtxpackets counter as it cannot be precise anyway - Rename outseqtsns to outofseqtsns to make it clearer that these are out of sequence unexpected TSNs - Move asoc->ipackets++ under a lock to avoid potential miscounts - Fold asoc->opackets++ into the already existing asoc check - Kill unneeded (q->asoc) test when increasing rtxchunks - Do not count octrlchunks if sending failed (SCTP_XMIT_OK != 0) - Don't count SHUTDOWNs as SACKs - Move SCTP_GET_ASSOC_STATS to the private space API - Adjust the len check in sctp_getsockopt_assoc_stats() to allow for future struct growth - Move association statistics in their own struct - Update idupchunks when we send a SACK with dup TSNs - return min_rto in max_rto when RTO has not changed. Also return the transport when max_rto last changed. Signed-off: Michele Baldessari <[email protected]> Acked-by: Vlad Yasevich <[email protected]> Signed-off-by: David S. Miller <[email protected]>
has_cursormovedI(void) { return (first_autopat[(int)EVENT_CURSORMOVEDI] != NULL); }
0
[ "CWE-200", "CWE-668" ]
vim
5a73e0ca54c77e067c3b12ea6f35e3e8681e8cf8
56,782,205,224,408,800,000,000,000,000,000,000,000
4
patch 8.0.1263: others can read the swap file if a user is careless Problem: Others can read the swap file if a user is careless with his primary group. Solution: If the group permission allows for reading but the world permissions doesn't, make sure the group is right.
static int vgacon_switch(struct vc_data *c) { int x = c->vc_cols * VGA_FONTWIDTH; int y = c->vc_rows * c->vc_font.height; int rows = screen_info.orig_video_lines * vga_default_font_height/ c->vc_font.height; /* * We need to save screen size here as it's the only way * we can spot the screen has been resized and we need to * set size of freshly allocated screens ourselves. */ vga_video_num_columns = c->vc_cols; vga_video_num_lines = c->vc_rows; /* We can only copy out the size of the video buffer here, * otherwise we get into VGA BIOS */ if (!vga_is_gfx) { scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf, c->vc_screenbuf_size > vga_vram_size ? vga_vram_size : c->vc_screenbuf_size); if ((vgacon_xres != x || vgacon_yres != y) && (!(vga_video_num_columns % 2) && vga_video_num_columns <= screen_info.orig_video_cols && vga_video_num_lines <= rows)) vgacon_doresize(c, c->vc_cols, c->vc_rows); } return 0; /* Redrawing not needed */ }
0
[ "CWE-125" ]
linux
973c096f6a85e5b5f2a295126ba6928d9a6afd45
249,447,653,876,898,950,000,000,000,000,000,000,000
31
vgacon: remove software scrollback support Yunhai Zhang recently fixed a VGA software scrollback bug in commit ebfdfeeae8c0 ("vgacon: Fix for missing check in scrollback handling"), but that then made people look more closely at some of this code, and there were more problems on the vgacon side, but also the fbcon software scrollback. We don't really have anybody who maintains this code - probably because nobody actually _uses_ it any more. Sure, people still use both VGA and the framebuffer consoles, but they are no longer the main user interfaces to the kernel, and haven't been for decades, so these kinds of extra features end up bitrotting and not really being used. So rather than try to maintain a likely unused set of code, I'll just aggressively remove it, and see if anybody even notices. Maybe there are people who haven't jumped on the whole GUI badnwagon yet, and think it's just a fad. And maybe those people use the scrollback code. If that turns out to be the case, we can resurrect this again, once we've found the sucker^Wmaintainer for it who actually uses it. Reported-by: NopNop Nop <[email protected]> Tested-by: Willy Tarreau <[email protected]> Cc: 张云海 <[email protected]> Acked-by: Andy Lutomirski <[email protected]> Acked-by: Willy Tarreau <[email protected]> Reviewed-by: Greg Kroah-Hartman <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
Nef_polyhedron_2<T,Items,Mark>& operator+=(const Nef_polyhedron_2<T,Items,Mark>& N1) { *this = join(N1); return *this; }
0
[ "CWE-269" ]
cgal
618b409b0fbcef7cb536a4134ae3a424ef5aae45
241,297,284,418,842,350,000,000,000,000,000,000,000
2
Fix Nef_2 and Nef_S2 IO
size_t olm_pk_sign( OlmPkSigning *signing, uint8_t const * message, size_t message_length, uint8_t * signature, size_t signature_length ) { if (signature_length < olm_pk_signature_length()) { signing->last_error = OlmErrorCode::OLM_OUTPUT_BUFFER_TOO_SMALL; return std::size_t(-1); } uint8_t *raw_sig = signature + olm_pk_signature_length() - ED25519_SIGNATURE_LENGTH; _olm_crypto_ed25519_sign( &signing->key_pair, message, message_length, raw_sig ); olm::encode_base64(raw_sig, ED25519_SIGNATURE_LENGTH, signature); return olm_pk_signature_length(); }
0
[ "CWE-787" ]
olm
ccc0d122ee1b4d5e5ca4ec1432086be17d5f901b
256,583,099,409,645,700,000,000,000,000,000,000,000
17
olm_pk_decrypt: Ensure inputs are of correct length.
gst_asf_demux_process_language_list (GstASFDemux * demux, guint8 * data, guint64 size) { guint i; if (size < 2) goto not_enough_data; if (demux->languages) { GST_WARNING ("More than one LANGUAGE_LIST object in stream"); g_strfreev (demux->languages); demux->languages = NULL; demux->num_languages = 0; } demux->num_languages = gst_asf_demux_get_uint16 (&data, &size); GST_LOG ("%u languages:", demux->num_languages); demux->languages = g_new0 (gchar *, demux->num_languages + 1); for (i = 0; i < demux->num_languages; ++i) { guint8 len, *lang_data = NULL; if (size < 1) goto not_enough_data; len = gst_asf_demux_get_uint8 (&data, &size); if (gst_asf_demux_get_bytes (&lang_data, len, &data, &size)) { gchar *utf8; utf8 = g_convert ((gchar *) lang_data, len, "UTF-8", "UTF-16LE", NULL, NULL, NULL); /* truncate "en-us" etc. to just "en" */ if (utf8 && strlen (utf8) >= 5 && (utf8[2] == '-' || utf8[2] == '_')) { utf8[2] = '\0'; } GST_DEBUG ("[%u] %s", i, GST_STR_NULL (utf8)); demux->languages[i] = utf8; g_free (lang_data); } else { goto not_enough_data; } } return GST_FLOW_OK; not_enough_data: { GST_WARNING_OBJECT (demux, "short read parsing language list object!"); g_free (demux->languages); demux->languages = NULL; demux->num_languages = 0; return GST_FLOW_OK; /* not fatal */ } }
0
[ "CWE-125", "CWE-787" ]
gst-plugins-ugly
d21017b52a585f145e8d62781bcc1c5fefc7ee37
34,469,243,478,105,854,000,000,000,000,000,000,000
54
asfdemux: Check that we have enough data available before parsing bool/uint extended content descriptors https://bugzilla.gnome.org/show_bug.cgi?id=777955
static inline void ModulateHSI(const double percent_hue, const double percent_saturation,const double percent_intensity,double *red, double *green,double *blue) { double intensity, hue, saturation; /* Increase or decrease color intensity, saturation, or hue. */ ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue > 1.0) hue-=1.0; saturation*=0.01*percent_saturation; intensity*=0.01*percent_intensity; ConvertHSIToRGB(hue,saturation,intensity,red,green,blue); }
1
[ "CWE-835" ]
ImageMagick
a80ee0ee1a083b4991d12ed4c07b7c7c5890f329
124,461,776,148,275,620,000,000,000,000,000,000,000
22
https://www.imagemagick.org/discourse-server/viewtopic.php?f=3&t=31506
HttpHdrRangeSpec::outputInfo( char const *note) const { debugs(64, 5, "HttpHdrRangeSpec::canonize: " << note << ": [" << offset << ", " << offset + length << ") len: " << length); }
0
[ "CWE-116" ]
squid
7024fb734a59409889e53df2257b3fc817809fb4
299,817,763,197,413,420,000,000,000,000,000,000,000
6
Handle more Range requests (#790) Also removed some effectively unused code.
start_input_ppm(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) { ppm_source_ptr source = (ppm_source_ptr)sinfo; int c; unsigned int w, h, maxval; boolean need_iobuffer, use_raw_buffer, need_rescale; if (getc(source->pub.input_file) != 'P') ERREXIT(cinfo, JERR_PPM_NOT); c = getc(source->pub.input_file); /* subformat discriminator character */ /* detect unsupported variants (ie, PBM) before trying to read header */ switch (c) { case '2': /* it's a text-format PGM file */ case '3': /* it's a text-format PPM file */ case '5': /* it's a raw-format PGM file */ case '6': /* it's a raw-format PPM file */ break; default: ERREXIT(cinfo, JERR_PPM_NOT); break; } /* fetch the remaining header info */ w = read_pbm_integer(cinfo, source->pub.input_file, 65535); h = read_pbm_integer(cinfo, source->pub.input_file, 65535); maxval = read_pbm_integer(cinfo, source->pub.input_file, 65535); if (w <= 0 || h <= 0 || maxval <= 0) /* error check */ ERREXIT(cinfo, JERR_PPM_NOT); cinfo->data_precision = BITS_IN_JSAMPLE; /* we always rescale data to this */ cinfo->image_width = (JDIMENSION)w; cinfo->image_height = (JDIMENSION)h; source->maxval = maxval; /* initialize flags to most common settings */ need_iobuffer = TRUE; /* do we need an I/O buffer? */ use_raw_buffer = FALSE; /* do we map input buffer onto I/O buffer? */ need_rescale = TRUE; /* do we need a rescale array? */ switch (c) { case '2': /* it's a text-format PGM file */ if (cinfo->in_color_space == JCS_UNKNOWN) cinfo->in_color_space = JCS_GRAYSCALE; TRACEMS2(cinfo, 1, JTRC_PGM_TEXT, w, h); if (cinfo->in_color_space == JCS_GRAYSCALE) source->pub.get_pixel_rows = get_text_gray_row; else if (IsExtRGB(cinfo->in_color_space)) source->pub.get_pixel_rows = get_text_gray_rgb_row; else if (cinfo->in_color_space == JCS_CMYK) source->pub.get_pixel_rows = get_text_gray_cmyk_row; else ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); need_iobuffer = FALSE; break; case '3': /* it's a text-format PPM file */ if (cinfo->in_color_space == JCS_UNKNOWN) cinfo->in_color_space = JCS_EXT_RGB; TRACEMS2(cinfo, 1, JTRC_PPM_TEXT, w, h); if (IsExtRGB(cinfo->in_color_space)) source->pub.get_pixel_rows = get_text_rgb_row; else if (cinfo->in_color_space == JCS_CMYK) source->pub.get_pixel_rows = get_text_rgb_cmyk_row; else ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); need_iobuffer = FALSE; break; case '5': /* it's a raw-format PGM file */ if (cinfo->in_color_space == JCS_UNKNOWN) cinfo->in_color_space = JCS_GRAYSCALE; TRACEMS2(cinfo, 1, JTRC_PGM, w, h); if (maxval > 255) { source->pub.get_pixel_rows = get_word_gray_row; } else if (maxval == MAXJSAMPLE && sizeof(JSAMPLE) == sizeof(U_CHAR) && cinfo->in_color_space == JCS_GRAYSCALE) { source->pub.get_pixel_rows = get_raw_row; use_raw_buffer = TRUE; need_rescale = FALSE; } else { if (cinfo->in_color_space == JCS_GRAYSCALE) source->pub.get_pixel_rows = get_scaled_gray_row; else if (IsExtRGB(cinfo->in_color_space)) source->pub.get_pixel_rows = get_gray_rgb_row; else if (cinfo->in_color_space == JCS_CMYK) source->pub.get_pixel_rows = get_gray_cmyk_row; else ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); } break; case '6': /* it's a raw-format PPM file */ if (cinfo->in_color_space == JCS_UNKNOWN) cinfo->in_color_space = JCS_EXT_RGB; TRACEMS2(cinfo, 1, JTRC_PPM, w, h); if (maxval > 255) { source->pub.get_pixel_rows = get_word_rgb_row; } else if (maxval == MAXJSAMPLE && sizeof(JSAMPLE) == sizeof(U_CHAR) && (cinfo->in_color_space == JCS_EXT_RGB #if RGB_RED == 0 && RGB_GREEN == 1 && RGB_BLUE == 2 && RGB_PIXELSIZE == 3 || cinfo->in_color_space == JCS_RGB #endif )) { source->pub.get_pixel_rows = get_raw_row; use_raw_buffer = TRUE; need_rescale = FALSE; } else { if (IsExtRGB(cinfo->in_color_space)) source->pub.get_pixel_rows = get_rgb_row; else if (cinfo->in_color_space == JCS_CMYK) source->pub.get_pixel_rows = get_rgb_cmyk_row; else ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); } break; } if (IsExtRGB(cinfo->in_color_space)) cinfo->input_components = rgb_pixelsize[cinfo->in_color_space]; else if (cinfo->in_color_space == JCS_GRAYSCALE) cinfo->input_components = 1; else if (cinfo->in_color_space == JCS_CMYK) cinfo->input_components = 4; /* Allocate space for I/O buffer: 1 or 3 bytes or words/pixel. */ if (need_iobuffer) { if (c == '6') source->buffer_width = (size_t)w * 3 * ((maxval <= 255) ? sizeof(U_CHAR) : (2 * sizeof(U_CHAR))); else source->buffer_width = (size_t)w * ((maxval <= 255) ? sizeof(U_CHAR) : (2 * sizeof(U_CHAR))); source->iobuffer = (U_CHAR *) (*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE, source->buffer_width); } /* Create compressor input buffer. */ if (use_raw_buffer) { /* For unscaled raw-input case, we can just map it onto the I/O buffer. */ /* Synthesize a JSAMPARRAY pointer structure */ source->pixrow = (JSAMPROW)source->iobuffer; source->pub.buffer = &source->pixrow; source->pub.buffer_height = 1; } else { /* Need to translate anyway, so make a separate sample buffer. */ source->pub.buffer = (*cinfo->mem->alloc_sarray) ((j_common_ptr)cinfo, JPOOL_IMAGE, (JDIMENSION)w * cinfo->input_components, (JDIMENSION)1); source->pub.buffer_height = 1; } /* Compute the rescaling array if required. */ if (need_rescale) { long val, half_maxval; /* On 16-bit-int machines we have to be careful of maxval = 65535 */ source->rescale = (JSAMPLE *) (*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE, (size_t)(((long)maxval + 1L) * sizeof(JSAMPLE))); half_maxval = maxval / 2; for (val = 0; val <= (long)maxval; val++) { /* The multiplication here must be done in 32 bits to avoid overflow */ source->rescale[val] = (JSAMPLE)((val * MAXJSAMPLE + half_maxval) / maxval); } } }
1
[ "CWE-125" ]
libjpeg-turbo
3de15e0c344d11d4b90f4a47136467053eb2d09a
204,505,600,099,522,900,000,000,000,000,000,000,000
172
rdppm.c: Fix buf overrun caused by bad binary PPM This extends the fix in 1e81b0c3ea26f4ea8f56de05367469333de64a9f to include binary PPM files with maximum values < 255, thus preventing a malformed binary PPM input file with those specifications from triggering an overrun of the rescale array and potentially crashing cjpeg, TJBench, or any program that uses the tjLoadImage() function. Fixes #433
struct snd_seq_client_port *snd_seq_port_query_nearest(struct snd_seq_client *client, struct snd_seq_port_info *pinfo) { int num; struct snd_seq_client_port *port, *found; num = pinfo->addr.port; found = NULL; read_lock(&client->ports_lock); list_for_each_entry(port, &client->ports_list_head, list) { if (port->addr.port < num) continue; if (port->addr.port == num) { found = port; break; } if (found == NULL || port->addr.port < found->addr.port) found = port; } if (found) { if (found->closing) found = NULL; else snd_use_lock_use(&found->use_lock); } read_unlock(&client->ports_lock); return found; }
0
[ "CWE-416", "CWE-362" ]
linux
71105998845fb012937332fe2e806d443c09e026
286,933,111,770,903,420,000,000,000,000,000,000,000
28
ALSA: seq: Fix use-after-free at creating a port There is a potential race window opened at creating and deleting a port via ioctl, as spotted by fuzzing. snd_seq_create_port() creates a port object and returns its pointer, but it doesn't take the refcount, thus it can be deleted immediately by another thread. Meanwhile, snd_seq_ioctl_create_port() still calls the function snd_seq_system_client_ev_port_start() with the created port object that is being deleted, and this triggers use-after-free like: BUG: KASAN: use-after-free in snd_seq_ioctl_create_port+0x504/0x630 [snd_seq] at addr ffff8801f2241cb1 ============================================================================= BUG kmalloc-512 (Tainted: G B ): kasan: bad access detected ----------------------------------------------------------------------------- INFO: Allocated in snd_seq_create_port+0x94/0x9b0 [snd_seq] age=1 cpu=3 pid=4511 ___slab_alloc+0x425/0x460 __slab_alloc+0x20/0x40 kmem_cache_alloc_trace+0x150/0x190 snd_seq_create_port+0x94/0x9b0 [snd_seq] snd_seq_ioctl_create_port+0xd1/0x630 [snd_seq] snd_seq_do_ioctl+0x11c/0x190 [snd_seq] snd_seq_ioctl+0x40/0x80 [snd_seq] do_vfs_ioctl+0x54b/0xda0 SyS_ioctl+0x79/0x90 entry_SYSCALL_64_fastpath+0x16/0x75 INFO: Freed in port_delete+0x136/0x1a0 [snd_seq] age=1 cpu=2 pid=4717 __slab_free+0x204/0x310 kfree+0x15f/0x180 port_delete+0x136/0x1a0 [snd_seq] snd_seq_delete_port+0x235/0x350 [snd_seq] snd_seq_ioctl_delete_port+0xc8/0x180 [snd_seq] snd_seq_do_ioctl+0x11c/0x190 [snd_seq] snd_seq_ioctl+0x40/0x80 [snd_seq] do_vfs_ioctl+0x54b/0xda0 SyS_ioctl+0x79/0x90 entry_SYSCALL_64_fastpath+0x16/0x75 Call Trace: [<ffffffff81b03781>] dump_stack+0x63/0x82 [<ffffffff81531b3b>] print_trailer+0xfb/0x160 [<ffffffff81536db4>] object_err+0x34/0x40 [<ffffffff815392d3>] kasan_report.part.2+0x223/0x520 [<ffffffffa07aadf4>] ? snd_seq_ioctl_create_port+0x504/0x630 [snd_seq] [<ffffffff815395fe>] __asan_report_load1_noabort+0x2e/0x30 [<ffffffffa07aadf4>] snd_seq_ioctl_create_port+0x504/0x630 [snd_seq] [<ffffffffa07aa8f0>] ? snd_seq_ioctl_delete_port+0x180/0x180 [snd_seq] [<ffffffff8136be50>] ? taskstats_exit+0xbc0/0xbc0 [<ffffffffa07abc5c>] snd_seq_do_ioctl+0x11c/0x190 [snd_seq] [<ffffffffa07abd10>] snd_seq_ioctl+0x40/0x80 [snd_seq] [<ffffffff8136d433>] ? acct_account_cputime+0x63/0x80 [<ffffffff815b515b>] do_vfs_ioctl+0x54b/0xda0 ..... We may fix this in a few different ways, and in this patch, it's fixed simply by taking the refcount properly at snd_seq_create_port() and letting the caller unref the object after use. Also, there is another potential use-after-free by sprintf() call in snd_seq_create_port(), and this is moved inside the lock. This fix covers CVE-2017-15265. Reported-and-tested-by: Michael23 Yu <[email protected]> Suggested-by: Linus Torvalds <[email protected]> Cc: <[email protected]> Signed-off-by: Takashi Iwai <[email protected]>
header_afiol(struct archive_read *a, struct cpio *cpio, struct archive_entry *entry, size_t *namelength, size_t *name_pad) { const void *h; const char *header; a->archive.archive_format = ARCHIVE_FORMAT_CPIO_AFIO_LARGE; a->archive.archive_format_name = "afio large ASCII"; /* Read fixed-size portion of header. */ h = __archive_read_ahead(a, afiol_header_size, NULL); if (h == NULL) return (ARCHIVE_FATAL); /* Parse out octal fields. */ header = (const char *)h; archive_entry_set_dev(entry, (dev_t)atol16(header + afiol_dev_offset, afiol_dev_size)); archive_entry_set_ino(entry, atol16(header + afiol_ino_offset, afiol_ino_size)); archive_entry_set_mode(entry, (mode_t)atol8(header + afiol_mode_offset, afiol_mode_size)); archive_entry_set_uid(entry, atol16(header + afiol_uid_offset, afiol_uid_size)); archive_entry_set_gid(entry, atol16(header + afiol_gid_offset, afiol_gid_size)); archive_entry_set_nlink(entry, (unsigned int)atol16(header + afiol_nlink_offset, afiol_nlink_size)); archive_entry_set_rdev(entry, (dev_t)atol16(header + afiol_rdev_offset, afiol_rdev_size)); archive_entry_set_mtime(entry, atol16(header + afiol_mtime_offset, afiol_mtime_size), 0); *namelength = (size_t)atol16(header + afiol_namesize_offset, afiol_namesize_size); *name_pad = 0; /* No padding of filename. */ cpio->entry_bytes_remaining = atol16(header + afiol_filesize_offset, afiol_filesize_size); archive_entry_set_size(entry, cpio->entry_bytes_remaining); cpio->entry_padding = 0; __archive_read_consume(a, afiol_header_size); return (ARCHIVE_OK); }
0
[]
libarchive
fd7e0c02e272913a0a8b6d492c7260dfca0b1408
174,164,715,098,140,700,000,000,000,000,000,000,000
39
Reject cpio symlinks that exceed 1MB
endElementNsSplit(void *ctx, const xmlChar * localname, const xmlChar * prefix, const xmlChar * URI) { xmlSchemaSAXPlugPtr ctxt = (xmlSchemaSAXPlugPtr) ctx; if (ctxt == NULL) return; if ((ctxt->user_sax != NULL) && (ctxt->user_sax->endElementNs != NULL)) ctxt->user_sax->endElementNs(ctxt->user_data, localname, prefix, URI); if (ctxt->ctxt != NULL) xmlSchemaSAXHandleEndElementNs(ctxt->ctxt, localname, prefix, URI); }
0
[ "CWE-134" ]
libxml2
4472c3a5a5b516aaf59b89be602fbce52756c3e9
315,116,525,412,056,630,000,000,000,000,000,000,000
11
Fix some format string warnings with possible format string vulnerability For https://bugzilla.gnome.org/show_bug.cgi?id=761029 Decorate every method in libxml2 with the appropriate LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups following the reports.
mlxreg_host_shaper_config(struct rte_eth_dev *dev, bool lwm_triggered, uint8_t rate) { #ifdef HAVE_MLX5_MSTFLINT struct mlx5_priv *priv = dev->data->dev_private; uint32_t data[MLX5_ST_SZ_DW(register_qshr)] = {0}; int rc, retry_count = 3; mfile *mf = NULL; int status; void *ptr; mf = mopen(priv->sh->ibdev_name); if (!mf) { DRV_LOG(WARNING, "mopen failed\n"); rte_errno = ENOENT; return -rte_errno; } MLX5_SET(register_qshr, data, connected_host, 1); MLX5_SET(register_qshr, data, fast_response, lwm_triggered ? 1 : 0); MLX5_SET(register_qshr, data, local_port, 1); ptr = MLX5_ADDR_OF(register_qshr, data, global_config); MLX5_SET(ets_global_config_register, ptr, rate_limit_update, 1); MLX5_SET(ets_global_config_register, ptr, max_bw_units, rate ? ETS_GLOBAL_CONFIG_BW_UNIT_HUNDREDS_MBPS : ETS_GLOBAL_CONFIG_BW_UNIT_DISABLED); MLX5_SET(ets_global_config_register, ptr, max_bw_value, rate); do { rc = maccess_reg(mf, MLX5_QSHR_REGISTER_ID, MACCESS_REG_METHOD_SET, (u_int32_t *)&data[0], sizeof(data), sizeof(data), sizeof(data), &status); if ((rc != ME_ICMD_STATUS_IFC_BUSY && status != ME_REG_ACCESS_BAD_PARAM) || !(mf->flags & MDEVS_REM)) { break; } DRV_LOG(WARNING, "%s retry.", __func__); usleep(10000); } while (retry_count-- > 0); mclose(mf); rte_errno = (rc == ME_REG_ACCESS_DEV_BUSY) ? EBUSY : EIO; return rc ? -rte_errno : 0; #else (void)dev; (void)lwm_triggered; (void)rate; return -1; #endif }
0
[]
dpdk
60b254e3923d007bcadbb8d410f95ad89a2f13fa
145,963,607,253,809,300,000,000,000,000,000,000,000
53
net/mlx5: fix Rx queue recovery mechanism The local variables are getting inconsistent in data receiving routines after queue error recovery. Receive queue consumer index is getting wrong, need to reset one to the size of the queue (as RQ was fully replenished in recovery procedure). In MPRQ case, also the local consumed strd variable should be reset. CVE-2022-28199 Fixes: 88c0733535d6 ("net/mlx5: extend Rx completion with error handling") Cc: [email protected] Signed-off-by: Alexander Kozyrev <[email protected]> Signed-off-by: Matan Azrad <[email protected]>
static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) { int ret; if (tg3_flag(tp, EEPROM_WRITE_PROT)) { tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & ~GRC_LCLCTRL_GPIO_OUTPUT1); udelay(40); } if (!tg3_flag(tp, NVRAM)) { ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); } else { u32 grc_mode; ret = tg3_nvram_lock(tp); if (ret) return ret; tg3_enable_nvram_access(tp); if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) tw32(NVRAM_WRITE1, 0x406); grc_mode = tr32(GRC_MODE); tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { ret = tg3_nvram_write_block_buffered(tp, offset, len, buf); } else { ret = tg3_nvram_write_block_unbuffered(tp, offset, len, buf); } grc_mode = tr32(GRC_MODE); tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); tg3_disable_nvram_access(tp); tg3_nvram_unlock(tp); } if (tg3_flag(tp, EEPROM_WRITE_PROT)) { tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); udelay(40); } return ret; }
0
[ "CWE-476", "CWE-119" ]
linux
715230a44310a8cf66fbfb5a46f9a62a9b2de424
228,825,528,440,723,400,000,000,000,000,000,000,000
48
tg3: fix length overflow in VPD firmware parsing Commit 184b89044fb6e2a74611dafa69b1dce0d98612c6 ("tg3: Use VPD fw version when present") introduced VPD parsing that contained a potential length overflow. Limit the hardware's reported firmware string length (max 255 bytes) to stay inside the driver's firmware string length (32 bytes). On overflow, truncate the formatted firmware string instead of potentially overwriting portions of the tg3 struct. http://cansecwest.com/slides/2013/PrivateCore%20CSW%202013.pdf Signed-off-by: Kees Cook <[email protected]> Reported-by: Oded Horovitz <[email protected]> Reported-by: Brad Spengler <[email protected]> Cc: [email protected] Cc: Matt Carlson <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int __init ecryptfs_init(void) { int rc; if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_CACHE_SIZE) { rc = -EINVAL; ecryptfs_printk(KERN_ERR, "The eCryptfs extent size is " "larger than the host's page size, and so " "eCryptfs cannot run on this system. The " "default eCryptfs extent size is [%u] bytes; " "the page size is [%lu] bytes.\n", ECRYPTFS_DEFAULT_EXTENT_SIZE, (unsigned long)PAGE_CACHE_SIZE); goto out; } rc = ecryptfs_init_kmem_caches(); if (rc) { printk(KERN_ERR "Failed to allocate one or more kmem_cache objects\n"); goto out; } rc = do_sysfs_registration(); if (rc) { printk(KERN_ERR "sysfs registration failed\n"); goto out_free_kmem_caches; } rc = ecryptfs_init_kthread(); if (rc) { printk(KERN_ERR "%s: kthread initialization failed; " "rc = [%d]\n", __func__, rc); goto out_do_sysfs_unregistration; } rc = ecryptfs_init_messaging(); if (rc) { printk(KERN_ERR "Failure occurred while attempting to " "initialize the communications channel to " "ecryptfsd\n"); goto out_destroy_kthread; } rc = ecryptfs_init_crypto(); if (rc) { printk(KERN_ERR "Failure whilst attempting to init crypto; " "rc = [%d]\n", rc); goto out_release_messaging; } rc = register_filesystem(&ecryptfs_fs_type); if (rc) { printk(KERN_ERR "Failed to register filesystem\n"); goto out_destroy_crypto; } if (ecryptfs_verbosity > 0) printk(KERN_CRIT "eCryptfs verbosity set to %d. Secret values " "will be written to the syslog!\n", ecryptfs_verbosity); goto out; out_destroy_crypto: ecryptfs_destroy_crypto(); out_release_messaging: ecryptfs_release_messaging(); out_destroy_kthread: ecryptfs_destroy_kthread(); out_do_sysfs_unregistration: do_sysfs_unregistration(); out_free_kmem_caches: ecryptfs_free_kmem_caches(); out: return rc; }
0
[ "CWE-284", "CWE-264" ]
linux
69c433ed2ecd2d3264efd7afec4439524b319121
128,149,436,099,985,800,000,000,000,000,000,000,000
68
fs: limit filesystem stacking depth Add a simple read-only counter to super_block that indicates how deep this is in the stack of filesystems. Previously ecryptfs was the only stackable filesystem and it explicitly disallowed multiple layers of itself. Overlayfs, however, can be stacked recursively and also may be stacked on top of ecryptfs or vice versa. To limit the kernel stack usage we must limit the depth of the filesystem stack. Initially the limit is set to 2. Signed-off-by: Miklos Szeredi <[email protected]>
json_bool lh_table_lookup_ex(struct lh_table* t, const void* k, void **v) { struct lh_entry *e = lh_table_lookup_entry(t, k); if (e != NULL) { if (v != NULL) *v = (void *)e->v; return TRUE; /* key found */ } if (v != NULL) *v = NULL; return FALSE; /* key not found */ }
0
[ "CWE-119", "CWE-310" ]
json-c
64e36901a0614bf64a19bc3396469c66dcd0b015
76,189,328,089,940,970,000,000,000,000,000,000,000
10
Patch to address the following issues: * CVE-2013-6371: hash collision denial of service * CVE-2013-6370: buffer overflow if size_t is larger than int
void sdsclear(sds s) { sdssetlen(s, 0); s[0] = '\0'; }
0
[ "CWE-190" ]
redis
d32f2e9999ce003bad0bd2c3bca29f64dcce4433
289,576,666,586,583,120,000,000,000,000,000,000,000
4
Fix integer overflow (CVE-2021-21309). (#8522) On 32-bit systems, setting the proto-max-bulk-len config parameter to a high value may result with integer overflow and a subsequent heap overflow when parsing an input bulk (CVE-2021-21309). This fix has two parts: Set a reasonable limit to the config parameter. Add additional checks to prevent the problem in other potential but unknown code paths.
static int __ext4_block_zero_page_range(handle_t *handle, struct address_space *mapping, loff_t from, loff_t length) { ext4_fsblk_t index = from >> PAGE_SHIFT; unsigned offset = from & (PAGE_SIZE-1); unsigned blocksize, pos; ext4_lblk_t iblock; struct inode *inode = mapping->host; struct buffer_head *bh; struct page *page; int err = 0; page = find_or_create_page(mapping, from >> PAGE_SHIFT, mapping_gfp_constraint(mapping, ~__GFP_FS)); if (!page) return -ENOMEM; blocksize = inode->i_sb->s_blocksize; iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits); if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); /* Find the buffer that contains "offset" */ bh = page_buffers(page); pos = blocksize; while (offset >= pos) { bh = bh->b_this_page; iblock++; pos += blocksize; } if (buffer_freed(bh)) { BUFFER_TRACE(bh, "freed: skip"); goto unlock; } if (!buffer_mapped(bh)) { BUFFER_TRACE(bh, "unmapped"); ext4_get_block(inode, iblock, bh, 0); /* unmapped? It's a hole - nothing to do */ if (!buffer_mapped(bh)) { BUFFER_TRACE(bh, "still unmapped"); goto unlock; } } /* Ok, it's mapped. Make sure it's up-to-date */ if (PageUptodate(page)) set_buffer_uptodate(bh); if (!buffer_uptodate(bh)) { err = -EIO; ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); /* Uhhuh. Read error. Complain and punt. */ if (!buffer_uptodate(bh)) goto unlock; if (S_ISREG(inode->i_mode) && ext4_encrypted_inode(inode)) { /* We expect the key to be set. */ BUG_ON(!ext4_has_encryption_key(inode)); BUG_ON(blocksize != PAGE_SIZE); WARN_ON_ONCE(ext4_decrypt(page)); } } if (ext4_should_journal_data(inode)) { BUFFER_TRACE(bh, "get write access"); err = ext4_journal_get_write_access(handle, bh); if (err) goto unlock; } zero_user(page, offset, length); BUFFER_TRACE(bh, "zeroed end of block"); if (ext4_should_journal_data(inode)) { err = ext4_handle_dirty_metadata(handle, inode, bh); } else { err = 0; mark_buffer_dirty(bh); if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) err = ext4_jbd2_file_inode(handle, inode); } unlock: unlock_page(page); put_page(page); return err; }
0
[ "CWE-200" ]
linux
06bd3c36a733ac27962fea7d6f47168841376824
282,556,652,033,289,940,000,000,000,000,000,000,000
88
ext4: fix data exposure after a crash Huang has reported that in his powerfail testing he is seeing stale block contents in some of recently allocated blocks although he mounts ext4 in data=ordered mode. After some investigation I have found out that indeed when delayed allocation is used, we don't add inode to transaction's list of inodes needing flushing before commit. Originally we were doing that but commit f3b59291a69d removed the logic with a flawed argument that it is not needed. The problem is that although for delayed allocated blocks we write their contents immediately after allocating them, there is no guarantee that the IO scheduler or device doesn't reorder things and thus transaction allocating blocks and attaching them to inode can reach stable storage before actual block contents. Actually whenever we attach freshly allocated blocks to inode using a written extent, we should add inode to transaction's ordered inode list to make sure we properly wait for block contents to be written before committing the transaction. So that is what we do in this patch. This also handles other cases where stale data exposure was possible - like filling hole via mmap in data=ordered,nodelalloc mode. The only exception to the above rule are extending direct IO writes where blkdev_direct_IO() waits for IO to complete before increasing i_size and thus stale data exposure is not possible. For now we don't complicate the code with optimizing this special case since the overhead is pretty low. In case this is observed to be a performance problem we can always handle it using a special flag to ext4_map_blocks(). CC: [email protected] Fixes: f3b59291a69d0b734be1fc8be489fef2dd846d3d Reported-by: "HUANG Weller (CM/ESW12-CN)" <[email protected]> Tested-by: "HUANG Weller (CM/ESW12-CN)" <[email protected]> Signed-off-by: Jan Kara <[email protected]> Signed-off-by: Theodore Ts'o <[email protected]>
static uint32_t scsi_init_iovec(SCSIDiskReq *r) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); if (!r->iov.iov_base) { r->buflen = SCSI_DMA_BUF_SIZE; r->iov.iov_base = qemu_blockalign(s->bs, r->buflen); } r->iov.iov_len = MIN(r->sector_count * 512, r->buflen); qemu_iovec_init_external(&r->qiov, &r->iov, 1); return r->qiov.size / 512; }
0
[ "CWE-119", "CWE-787" ]
qemu
7285477ab11831b1cf56e45878a89170dd06d9b9
155,787,867,687,287,540,000,000,000,000,000,000,000
12
scsi-disk: lazily allocate bounce buffer It will not be needed for reads and writes if the HBA provides a sglist. In addition, this lets scsi-disk refuse commands with an excessive allocation length, as well as limit memory on usual well-behaved guests. Signed-off-by: Paolo Bonzini <[email protected]> Signed-off-by: Kevin Wolf <[email protected]>
static int on_headers_complete(http_parser* self_) { HTTPParser* self = static_cast<HTTPParser*>(self_); if (!self->header_field.empty()) { self->req.headers.emplace(std::move(self->header_field), std::move(self->header_value)); } self->set_connection_parameters(); self->process_header(); return 0; }
0
[ "CWE-416" ]
Crow
fba01dc76d6ea940ad7c8392e8f39f9647241d8e
16,645,121,580,963,648,000,000,000,000,000,000,000
13
Prevent HTTP pipelining which Crow doesn't support.
ews_get_period (ESoapParameter *node) { EEwsCalendarPeriod *period = NULL; gchar *bias = NULL; gchar *name = NULL; gchar *id = NULL; bias = e_soap_parameter_get_property (node, "Bias"); name = e_soap_parameter_get_property (node, "Name"); id = e_soap_parameter_get_property (node, "Id"); if (bias == NULL || name == NULL || id == NULL) { g_free (bias); g_free (name); g_free (id); return NULL; } period = e_ews_calendar_period_new (); period->bias = bias; period->name = name; period->id = id; return period; }
0
[ "CWE-295" ]
evolution-ews
915226eca9454b8b3e5adb6f2fff9698451778de
70,853,326,339,039,210,000,000,000,000,000,000,000
26
I#27 - SSL Certificates are not validated This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too. Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) { struct address_space *mapping = mpd->inode->i_mapping; struct pagevec pvec; unsigned int nr_pages; long left = mpd->wbc->nr_to_write; pgoff_t index = mpd->first_page; pgoff_t end = mpd->last_page; int tag; int i, err = 0; int blkbits = mpd->inode->i_blkbits; ext4_lblk_t lblk; struct buffer_head *head; if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages) tag = PAGECACHE_TAG_TOWRITE; else tag = PAGECACHE_TAG_DIRTY; pagevec_init(&pvec); mpd->map.m_len = 0; mpd->next_page = index; while (index <= end) { nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, tag); if (nr_pages == 0) goto out; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; /* * Accumulated enough dirty pages? This doesn't apply * to WB_SYNC_ALL mode. For integrity sync we have to * keep going because someone may be concurrently * dirtying pages, and we might have synced a lot of * newly appeared dirty pages, but have not synced all * of the old dirty pages. */ if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0) goto out; /* If we can't merge this page, we are done. */ if (mpd->map.m_len > 0 && mpd->next_page != page->index) goto out; lock_page(page); /* * If the page is no longer dirty, or its mapping no * longer corresponds to inode we are writing (which * means it has been truncated or invalidated), or the * page is already under writeback and we are not doing * a data integrity writeback, skip the page */ if (!PageDirty(page) || (PageWriteback(page) && (mpd->wbc->sync_mode == WB_SYNC_NONE)) || unlikely(page->mapping != mapping)) { unlock_page(page); continue; } wait_on_page_writeback(page); BUG_ON(PageWriteback(page)); if (mpd->map.m_len == 0) mpd->first_page = page->index; mpd->next_page = page->index + 1; /* Add all dirty buffers to mpd */ lblk = ((ext4_lblk_t)page->index) << (PAGE_SHIFT - blkbits); head = page_buffers(page); err = mpage_process_page_bufs(mpd, head, head, lblk); if (err <= 0) goto out; err = 0; left--; } pagevec_release(&pvec); cond_resched(); } return 0; out: pagevec_release(&pvec); return err; }
0
[]
linux
8e4b5eae5decd9dfe5a4ee369c22028f90ab4c44
209,483,805,411,650,420,000,000,000,000,000,000,000
86
ext4: fail ext4_iget for root directory if unallocated If the root directory has an i_links_count of zero, then when the file system is mounted, then when ext4_fill_super() notices the problem and tries to call iput() the root directory in the error return path, ext4_evict_inode() will try to free the inode on disk, before all of the file system structures are set up, and this will result in an OOPS caused by a NULL pointer dereference. This issue has been assigned CVE-2018-1092. https://bugzilla.kernel.org/show_bug.cgi?id=199179 https://bugzilla.redhat.com/show_bug.cgi?id=1560777 Reported-by: Wen Xu <[email protected]> Signed-off-by: Theodore Ts'o <[email protected]> Cc: [email protected]
static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx, struct nft_set *set, const struct nft_set_iter *iter, struct nft_set_elem *elem) { const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) && *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END) return 0; return nft_check_loops(ctx, ext); }
0
[ "CWE-665" ]
linux
ad9f151e560b016b6ad3280b48e42fa11e1a5440
49,846,928,762,703,040,000,000,000,000,000,000,000
13
netfilter: nf_tables: initialize set before expression setup nft_set_elem_expr_alloc() needs an initialized set if expression sets on the NFT_EXPR_GC flag. Move set fields initialization before expression setup. [4512935.019450] ================================================================== [4512935.019456] BUG: KASAN: null-ptr-deref in nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables] [4512935.019487] Read of size 8 at addr 0000000000000070 by task nft/23532 [4512935.019494] CPU: 1 PID: 23532 Comm: nft Not tainted 5.12.0-rc4+ #48 [...] [4512935.019502] Call Trace: [4512935.019505] dump_stack+0x89/0xb4 [4512935.019512] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables] [4512935.019536] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables] [4512935.019560] kasan_report.cold.12+0x5f/0xd8 [4512935.019566] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables] [4512935.019590] nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables] [4512935.019615] nf_tables_newset+0xc7f/0x1460 [nf_tables] Reported-by: [email protected] Fixes: 65038428b2c6 ("netfilter: nf_tables: allow to specify stateful expression in set definition") Signed-off-by: Pablo Neira Ayuso <[email protected]>
static void TIFFErrors(const char *module,const char *format,va_list error) { char message[MaxTextExtent]; ExceptionInfo *exception; #if defined(MAGICKCORE_HAVE_VSNPRINTF) (void) vsnprintf(message,MaxTextExtent-2,format,error); #else (void) vsprintf(message,format,error); #endif message[MaxTextExtent-2]='\0'; (void) ConcatenateMagickString(message,".",MaxTextExtent); exception=(ExceptionInfo *) GetMagickThreadValue(tiff_exception); if (exception != (ExceptionInfo *) NULL) (void) ThrowMagickException(exception,GetMagickModule(),CoderError,message, "`%s'",module); }
0
[ "CWE-125" ]
ImageMagick6
d8d844c6f23f4d90d8fe893fe9225dd78fc1e6ef
317,779,246,392,134,400,000,000,000,000,000,000,000
20
https://github.com/ImageMagick/ImageMagick/issues/1532
int vt_do_kdskbmeta(int console, unsigned int arg) { struct kbd_struct *kb = kbd_table + console; int ret = 0; unsigned long flags; spin_lock_irqsave(&kbd_event_lock, flags); switch(arg) { case K_METABIT: clr_vc_kbd_mode(kb, VC_META); break; case K_ESCPREFIX: set_vc_kbd_mode(kb, VC_META); break; default: ret = -EINVAL; } spin_unlock_irqrestore(&kbd_event_lock, flags); return ret; }
0
[ "CWE-416" ]
linux
6ca03f90527e499dd5e32d6522909e2ad390896b
97,769,438,635,676,230,000,000,000,000,000,000,000
20
vt: keyboard, simplify vt_kdgkbsent Use 'strlen' of the string, add one for NUL terminator and simply do 'copy_to_user' instead of the explicit 'for' loop. This makes the KDGKBSENT case more compact. The only thing we need to take care about is NULL 'func_table[i]'. Use an empty string in that case. The original check for overflow could never trigger as the func_buf strings are always shorter or equal to 'struct kbsentry's. Cc: <[email protected]> Signed-off-by: Jiri Slaby <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Greg Kroah-Hartman <[email protected]>
PHP_FUNCTION(getimagesize) { php_getimagesize_from_any(INTERNAL_FUNCTION_PARAM_PASSTHRU, FROM_PATH); }
0
[]
php-src
87829c09a1d9e39bee994460d7ccf19dd20eda14
250,892,124,863,195,000,000,000,000,000,000,000,000
4
Fix #70052: getimagesize() fails for very large and very small WBMP Very large WBMP (width or height greater than 2**31-1) cause an overflow and circumvent the size limitation of 2048x2048 px. Very small WBMP (less than 12 bytes) cause a read error and are not recognized. This patch fixes both bugs.
void SSL::set_session(SSL_SESSION* s) { if (getSecurity().GetContext()->GetSessionCacheOff()) return; if (s && GetSessions().lookup(s->GetID(), &secure_.use_resume())) { secure_.set_resuming(true); crypto_.use_certManager().setPeerX509(s->GetPeerX509()); } }
0
[ "CWE-254" ]
mysql-server
e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69
289,092,591,302,010,460,000,000,000,000,000,000,000
10
Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED.
onig_print_statistics(FILE* f) { int i; fprintf(f, " count prev time\n"); for (i = 0; OnigOpInfo[i].opcode >= 0; i++) { fprintf(f, "%8d: %8d: %10ld: %s\n", OpCounter[i], OpPrevCounter[i], OpTime[i], OnigOpInfo[i].name); } fprintf(f, "\nmax stack depth: %d\n", MaxStackDepth); }
0
[ "CWE-125" ]
oniguruma
65a9b1aa03c9bc2dc01b074295b9603232cb3b78
177,110,906,900,618,360,000,000,000,000,000,000,000
10
onig-5.9.2