code
stringlengths
23
2.05k
label_name
stringlengths
6
7
label
int64
0
37
header_gets (SF_PRIVATE *psf, char *ptr, int bufsize) { int k ; for (k = 0 ; k < bufsize - 1 ; k++) { if (psf->headindex < psf->headend) { ptr [k] = psf->header [psf->headindex] ; psf->headindex ++ ; } else { psf->headend += psf_fread (psf->header + psf->headend, 1, 1, psf) ; ptr [k] = psf->header [psf->headindex] ; psf->headindex = psf->headend ; } ; if (ptr [k] == '\n') break ; } ; ptr [k] = 0 ; return k ; } /* header_gets */
CWE-119
26
int ieee80211_radiotap_iterator_init( struct ieee80211_radiotap_iterator *iterator, struct ieee80211_radiotap_header *radiotap_header, int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns) { /* Linux only supports version 0 radiotap format */ if (radiotap_header->it_version) return -EINVAL; /* sanity check for allowed length and radiotap length field */ if (max_length < get_unaligned_le16(&radiotap_header->it_len)) return -EINVAL; iterator->_rtheader = radiotap_header; iterator->_max_length = get_unaligned_le16(&radiotap_header->it_len); iterator->_arg_index = 0; iterator->_bitmap_shifter = get_unaligned_le32(&radiotap_header->it_present); iterator->_arg = (uint8_t *)radiotap_header + sizeof(*radiotap_header); iterator->_reset_on_ext = 0; iterator->_next_bitmap = &radiotap_header->it_present; iterator->_next_bitmap++; iterator->_vns = vns; iterator->current_namespace = &radiotap_ns; iterator->is_radiotap_ns = 1; /* find payload start allowing for extended bitmap(s) */ if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) { while (get_unaligned_le32(iterator->_arg) & (1 << IEEE80211_RADIOTAP_EXT)) { iterator->_arg += sizeof(uint32_t); /* * check for insanity where the present bitmaps * keep claiming to extend up to or even beyond the * stated radiotap header length */ if ((unsigned long)iterator->_arg - (unsigned long)iterator->_rtheader > (unsigned long)iterator->_max_length) return -EINVAL; } iterator->_arg += sizeof(uint32_t); /* * no need to check again for blowing past stated radiotap * header length, because ieee80211_radiotap_iterator_next * checks it before it is dereferenced */ } iterator->this_arg = iterator->_arg; /* we are all initialized happily */ return 0; }
CWE-119
26
set_umask(const char *optarg) { long umask_long; mode_t umask_val; char *endptr; umask_long = strtoll(optarg, &endptr, 0); if (*endptr || umask_long < 0 || umask_long & ~0777L) { fprintf(stderr, "Invalid --umask option %s", optarg); return; } umask_val = umask_long & 0777; umask(umask_val); umask_cmdline = true; return umask_val; }
CWE-200
10
find_link_ref(struct link_ref **references, uint8_t *name, size_t length) { unsigned int hash = hash_link_ref(name, length); struct link_ref *ref = NULL; ref = references[hash % REF_TABLE_SIZE]; while (ref != NULL) { if (ref->id == hash) return ref; ref = ref->next; } return NULL; }
CWE-327
3
static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct sco_pinfo *pi = sco_pi(sk); lock_sock(sk); if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { sco_conn_defer_accept(pi->conn->hcon, pi->setting); sk->sk_state = BT_CONFIG; msg->msg_namelen = 0; release_sock(sk); return 0; } release_sock(sk); return bt_sock_recvmsg(iocb, sock, msg, len, flags); }
CWE-20
0
static zend_bool add_post_var(zval *arr, post_var_data_t *var, zend_bool eof) { char *ksep, *vsep, *val; size_t klen, vlen; size_t new_vlen; if (var->ptr >= var->end) { return 0; } vsep = memchr(var->ptr, '&', var->end - var->ptr); if (!vsep) { if (!eof) { return 0; } else { vsep = var->end; } } ksep = memchr(var->ptr, '=', vsep - var->ptr); if (ksep) { *ksep = '\0'; /* "foo=bar&" or "foo=&" */ klen = ksep - var->ptr; vlen = vsep - ++ksep; } else { ksep = ""; /* "foo&" */ klen = vsep - var->ptr; vlen = 0; } php_url_decode(var->ptr, klen); val = estrndup(ksep, vlen); if (vlen) { vlen = php_url_decode(val, vlen); } if (sapi_module.input_filter(PARSE_POST, var->ptr, &val, vlen, &new_vlen)) { php_register_variable_safe(var->ptr, val, new_vlen, arr); } efree(val); var->ptr = vsep + (vsep != var->end); return 1; }
CWE-400
2
mm_answer_pam_init_ctx(int sock, Buffer *m) { debug3("%s", __func__); authctxt->user = buffer_get_string(m, NULL); sshpam_ctxt = (sshpam_device.init_ctx)(authctxt); sshpam_authok = NULL; buffer_clear(m); if (sshpam_ctxt != NULL) { monitor_permit(mon_dispatch, MONITOR_REQ_PAM_FREE_CTX, 1); buffer_put_int(m, 1); } else { buffer_put_int(m, 0); } mm_request_send(sock, MONITOR_ANS_PAM_INIT_CTX, m); return (0); }
CWE-20
0
void headerMergeLegacySigs(Header h, Header sigh) { HeaderIterator hi; struct rpmtd_s td; hi = headerInitIterator(sigh); for (; headerNext(hi, &td); rpmtdFreeData(&td)) { switch (td.tag) { /* XXX Translate legacy signature tag values. */ case RPMSIGTAG_SIZE: td.tag = RPMTAG_SIGSIZE; break; case RPMSIGTAG_PGP: td.tag = RPMTAG_SIGPGP; break; case RPMSIGTAG_MD5: td.tag = RPMTAG_SIGMD5; break; case RPMSIGTAG_GPG: td.tag = RPMTAG_SIGGPG; break; case RPMSIGTAG_PGP5: td.tag = RPMTAG_SIGPGP5; break; case RPMSIGTAG_PAYLOADSIZE: td.tag = RPMTAG_ARCHIVESIZE; break; case RPMSIGTAG_FILESIGNATURES: td.tag = RPMTAG_FILESIGNATURES; break; case RPMSIGTAG_FILESIGNATURELENGTH: td.tag = RPMTAG_FILESIGNATURELENGTH; break; case RPMSIGTAG_VERITYSIGNATURES: case RPMSIGTAG_VERITYSIGNATUREALGO: case RPMSIGTAG_SHA1: case RPMSIGTAG_SHA256: case RPMSIGTAG_DSA: case RPMSIGTAG_RSA: default: if (!(td.tag >= HEADER_SIGBASE && td.tag < HEADER_TAGBASE)) continue; break; } if (!headerIsEntry(h, td.tag)) { switch (td.type) { case RPM_NULL_TYPE: continue; break; case RPM_CHAR_TYPE: case RPM_INT8_TYPE: case RPM_INT16_TYPE: case RPM_INT32_TYPE: case RPM_INT64_TYPE: if (td.count != 1) continue; break; case RPM_STRING_TYPE: case RPM_STRING_ARRAY_TYPE: case RPM_BIN_TYPE: if (td.count >= 16*1024) continue; break; case RPM_I18NSTRING_TYPE: continue; break; } (void) headerPut(h, &td, HEADERPUT_DEFAULT); } } headerFreeIterator(hi); }
CWE-345
22
asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) { enum direction dir = decode_direction(insn); int size = decode_access_size(regs, insn); int orig_asi, asi; current_thread_info()->kern_una_regs = regs; current_thread_info()->kern_una_insn = insn; orig_asi = asi = decode_asi(insn, regs); /* If this is a {get,put}_user() on an unaligned userspace pointer, * just signal a fault and do not log the event. */ if (asi == ASI_AIUS) { kernel_mna_trap_fault(0); return; } log_unaligned(regs); if (!ok_for_kernel(insn) || dir == both) { printk("Unsupported unaligned load/store trap for kernel " "at <%016lx>.\n", regs->tpc); unaligned_panic("Kernel does fpu/atomic " "unaligned load/store.", regs); kernel_mna_trap_fault(0); } else { unsigned long addr, *reg_addr; int err; addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f)); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); switch (asi) { case ASI_NL: case ASI_AIUPL: case ASI_AIUSL: case ASI_PL: case ASI_SL: case ASI_PNFL: case ASI_SNFL: asi &= ~0x08; break; } switch (dir) { case load: reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs); err = do_int_load(reg_addr, size, (unsigned long *) addr, decode_signedness(insn), asi); if (likely(!err) && unlikely(asi != orig_asi)) { unsigned long val_in = *reg_addr; switch (size) { case 2: val_in = swab16(val_in); break; case 4: val_in = swab32(val_in); break; case 8: val_in = swab64(val_in); break; case 16: default: BUG(); break; } *reg_addr = val_in; } break; case store: err = do_int_store(((insn>>25)&0x1f), size, (unsigned long *) addr, regs, asi, orig_asi); break; default: panic("Impossible kernel unaligned trap."); /* Not reached... */ } if (unlikely(err)) kernel_mna_trap_fault(1); else advance(regs); } }
CWE-400
2
void lpc546xxEthTick(NetInterface *interface) { //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Handle periodic operations interface->phyDriver->tick(interface); } else if(interface->switchDriver != NULL) { //Handle periodic operations interface->switchDriver->tick(interface); } else { //Just for sanity } }
CWE-20
0
nautilus_file_mark_desktop_file_trusted (GFile *file, GtkWindow *parent_window, gboolean interactive, NautilusOpCallback done_callback, gpointer done_callback_data) { GTask *task; MarkTrustedJob *job; job = op_job_new (MarkTrustedJob, parent_window); job->file = g_object_ref (file); job->interactive = interactive; job->done_callback = done_callback; job->done_callback_data = done_callback_data; task = g_task_new (NULL, NULL, mark_trusted_task_done, job); g_task_set_task_data (task, job, NULL); g_task_run_in_thread (task, mark_trusted_task_thread_func); g_object_unref (task); }
CWE-20
0
check_entry_size_and_hooks(struct ip6t_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } err = check_entry(e); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_err("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; }
CWE-119
26
sf_open_fd (int fd, int mode, SF_INFO *sfinfo, int close_desc) { SF_PRIVATE *psf ; if ((SF_CONTAINER (sfinfo->format)) == SF_FORMAT_SD2) { sf_errno = SFE_SD2_FD_DISALLOWED ; return NULL ; } ; if ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; copy_filename (psf, "") ; psf->file.mode = mode ; psf_set_file (psf, fd) ; psf->is_pipe = psf_is_pipe (psf) ; psf->fileoffset = psf_ftell (psf) ; if (! close_desc) psf->file.do_not_close_descriptor = SF_TRUE ; return psf_open_file (psf, sfinfo) ; } /* sf_open_fd */
CWE-119
26
static int misaligned_fpu_store(struct pt_regs *regs, __u32 opcode, int displacement_not_indexed, int width_shift, int do_paired_load) { /* Return -1 for a fault, 0 for OK */ int error; int srcreg; __u64 address; error = generate_and_check_address(regs, opcode, displacement_not_indexed, width_shift, &address); if (error < 0) { return error; } perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address); srcreg = (opcode >> 4) & 0x3f; if (user_mode(regs)) { __u64 buffer; /* Initialise these to NaNs. */ __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL; if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) { return -1; } /* 'current' may be the current owner of the FPU state, so context switch the registers into memory so they can be indexed by register number. */ if (last_task_used_math == current) { enable_fpu(); save_fpu(current); disable_fpu(); last_task_used_math = NULL; regs->sr |= SR_FD; } switch (width_shift) { case 2: buflo = current->thread.xstate->hardfpu.fp_regs[srcreg]; break; case 3: if (do_paired_load) { buflo = current->thread.xstate->hardfpu.fp_regs[srcreg]; bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1]; } else { #if defined(CONFIG_CPU_LITTLE_ENDIAN) bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg]; buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1]; #else buflo = current->thread.xstate->hardfpu.fp_regs[srcreg]; bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1]; #endif } break; default: printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n", width_shift, (unsigned long) regs->pc); break; } *(__u32*) &buffer = buflo; *(1 + (__u32*) &buffer) = bufhi; if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) { return -1; /* fault */ } return 0; } else { die ("Misaligned FPU load inside kernel", regs, 0); return -1; } }
CWE-400
2
static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int ret; int copylen; ret = -EOPNOTSUPP; if (m->msg_flags&MSG_OOB) goto read_error; m->msg_namelen = 0; skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error; copylen = skb->len; if (len < copylen) { m->msg_flags |= MSG_TRUNC; copylen = len; } ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen); if (ret) goto out_free; ret = (flags & MSG_TRUNC) ? skb->len : copylen; out_free: skb_free_datagram(sk, skb); caif_check_flow_release(sk); return ret; read_error: return ret; }
CWE-20
0
static int pn_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct sk_buff *skb = NULL; struct sockaddr_pn sa; int rval = -EOPNOTSUPP; int copylen; if (flags & ~(MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_NOSIGNAL| MSG_CMSG_COMPAT)) goto out_nofree; if (addr_len) *addr_len = sizeof(sa); skb = skb_recv_datagram(sk, flags, noblock, &rval); if (skb == NULL) goto out_nofree; pn_skb_get_src_sockaddr(skb, &sa); copylen = skb->len; if (len < copylen) { msg->msg_flags |= MSG_TRUNC; copylen = len; } rval = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copylen); if (rval) { rval = -EFAULT; goto out; } rval = (flags & MSG_TRUNC) ? skb->len : copylen; if (msg->msg_name != NULL) memcpy(msg->msg_name, &sa, sizeof(struct sockaddr_pn)); out: skb_free_datagram(sk, skb); out_nofree: return rval; }
CWE-20
0
static int crypto_report_one(struct crypto_alg *alg, struct crypto_user_alg *ualg, struct sk_buff *skb) { strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); strlcpy(ualg->cru_driver_name, alg->cra_driver_name, sizeof(ualg->cru_driver_name)); strlcpy(ualg->cru_module_name, module_name(alg->cra_module), sizeof(ualg->cru_module_name)); ualg->cru_type = 0; ualg->cru_mask = 0; ualg->cru_flags = alg->cra_flags; ualg->cru_refcnt = refcount_read(&alg->cra_refcnt); if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority)) goto nla_put_failure; if (alg->cra_flags & CRYPTO_ALG_LARVAL) { struct crypto_report_larval rl; strlcpy(rl.type, "larval", sizeof(rl.type)); if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, sizeof(struct crypto_report_larval), &rl)) goto nla_put_failure; goto out; } if (alg->cra_type && alg->cra_type->report) { if (alg->cra_type->report(skb, alg)) goto nla_put_failure; goto out; } switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) { case CRYPTO_ALG_TYPE_CIPHER: if (crypto_report_cipher(skb, alg)) goto nla_put_failure; break; case CRYPTO_ALG_TYPE_COMPRESS: if (crypto_report_comp(skb, alg)) goto nla_put_failure; break; case CRYPTO_ALG_TYPE_ACOMPRESS: if (crypto_report_acomp(skb, alg)) goto nla_put_failure; break; case CRYPTO_ALG_TYPE_AKCIPHER: if (crypto_report_akcipher(skb, alg)) goto nla_put_failure; break; case CRYPTO_ALG_TYPE_KPP: if (crypto_report_kpp(skb, alg)) goto nla_put_failure; break; } out: return 0; nla_put_failure: return -EMSGSIZE; }
CWE-200
10
static int cac_get_serial_nr_from_CUID(sc_card_t* card, sc_serial_number_t* serial) { cac_private_data_t * priv = CAC_DATA(card); SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_NORMAL); if (card->serialnr.len) { *serial = card->serialnr; SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_SUCCESS); } if (priv->cac_id_len) { serial->len = MIN(priv->cac_id_len, SC_MAX_SERIALNR); memcpy(serial->value, priv->cac_id, priv->cac_id_len); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_SUCCESS); } SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_FILE_NOT_FOUND); }
CWE-119
26
static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu) { return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; }
CWE-119
26
static int pfkey_recvmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct pfkey_sock *pfk = pfkey_sk(sk); struct sk_buff *skb; int copied, err; err = -EINVAL; if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT)) goto out; msg->msg_namelen = 0; skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out; copied = skb->len; if (copied > len) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto out_free; sock_recv_ts_and_drops(msg, sk, skb); err = (flags & MSG_TRUNC) ? skb->len : copied; if (pfk->dump.dump != NULL && 3 * atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) pfkey_do_dump(pfk); out_free: skb_free_datagram(sk, skb); out: return err; }
CWE-20
0
swabHorDiff16(TIFF* tif, uint8* cp0, tmsize_t cc) { uint16* wp = (uint16*) cp0; tmsize_t wc = cc / 2; horDiff16(tif, cp0, cc); TIFFSwabArrayOfShort(wp, wc); }
CWE-119
26
int user_update(struct key *key, struct key_preparsed_payload *prep) { struct user_key_payload *upayload, *zap; size_t datalen = prep->datalen; int ret; ret = -EINVAL; if (datalen <= 0 || datalen > 32767 || !prep->data) goto error; /* construct a replacement payload */ ret = -ENOMEM; upayload = kmalloc(sizeof(*upayload) + datalen, GFP_KERNEL); if (!upayload) goto error; upayload->datalen = datalen; memcpy(upayload->data, prep->data, datalen); /* check the quota and attach the new data */ zap = upayload; ret = key_payload_reserve(key, datalen); if (ret == 0) { /* attach the new data, displacing the old */ zap = key->payload.data[0]; rcu_assign_keypointer(key, upayload); key->expiry = 0; } if (zap) kfree_rcu(zap, rcu); error: return ret; }
CWE-269
6
static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; int len; if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { rfcomm_dlc_accept(d); msg->msg_namelen = 0; return 0; } len = bt_sock_stream_recvmsg(iocb, sock, msg, size, flags); lock_sock(sk); if (!(flags & MSG_PEEK) && len > 0) atomic_sub(len, &sk->sk_rmem_alloc); if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2)) rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc); release_sock(sk); return len; }
CWE-20
0
static RList* sections(RBinFile* bf) { RList* ret = NULL; RBinSection* sect = NULL; psxexe_header psxheader = {0}; ut64 sz = 0; if (!(ret = r_list_new ())) { return NULL; } if (!(sect = R_NEW0 (RBinSection))) { r_list_free (ret); return NULL; } if (r_buf_fread_at (bf->buf, 0, (ut8*)&psxheader, "8c17i", 1) < sizeof (psxexe_header)) { eprintf ("Truncated Header\n"); free (sect); r_list_free (ret); return NULL; } sz = r_buf_size (bf->buf); sect->name = strdup ("TEXT"); sect->paddr = PSXEXE_TEXTSECTION_OFFSET; sect->size = sz - PSXEXE_TEXTSECTION_OFFSET; sect->vaddr = psxheader.t_addr; sect->vsize = psxheader.t_size; sect->perm = R_PERM_RX; sect->add = true; sect->has_strings = true; r_list_append (ret, sect); return ret; }
CWE-400
2
static void settings_changed(struct btd_adapter *adapter, uint32_t settings) { uint32_t changed_mask; changed_mask = adapter->current_settings ^ settings; adapter->current_settings = settings; adapter->pending_settings &= ~changed_mask; DBG("Changed settings: 0x%08x", changed_mask); DBG("Pending settings: 0x%08x", adapter->pending_settings); if (changed_mask & MGMT_SETTING_POWERED) { g_dbus_emit_property_changed(dbus_conn, adapter->path, ADAPTER_INTERFACE, "Powered"); if (adapter->current_settings & MGMT_SETTING_POWERED) { adapter_start(adapter); } else { adapter_stop(adapter); if (powering_down) { adapter_remaining--; if (!adapter_remaining) btd_exit(); } } } if ((changed_mask & MGMT_SETTING_LE) && btd_adapter_get_powered(adapter) && (adapter->current_settings & MGMT_SETTING_LE)) trigger_passive_scanning(adapter); if (changed_mask & MGMT_SETTING_DISCOVERABLE) { g_dbus_emit_property_changed(dbus_conn, adapter->path, ADAPTER_INTERFACE, "Discoverable"); store_adapter_info(adapter); btd_adv_manager_refresh(adapter->adv_manager); } if (changed_mask & MGMT_SETTING_BONDABLE) { g_dbus_emit_property_changed(dbus_conn, adapter->path, ADAPTER_INTERFACE, "Pairable"); trigger_pairable_timeout(adapter); } }
CWE-863
11
set_cs_start(char *line) { char *p, *q, *r; if ((p = strstr(line, "string currentfile"))) { /* enforce presence of `readstring' -- 5/29/99 */ if (!strstr(line, "readstring")) return; /* locate the name of the charstring start command */ *p = '\0'; /* damage line[] */ q = strrchr(line, '/'); if (q) { r = cs_start; ++q; while (!isspace(*q) && *q != '{') *r++ = *q++; *r = '\0'; } *p = 's'; /* repair line[] */ } }
CWE-119
26
swabHorDiff32(TIFF* tif, uint8* cp0, tmsize_t cc) { uint32* wp = (uint32*) cp0; tmsize_t wc = cc / 4; horDiff32(tif, cp0, cc); TIFFSwabArrayOfLong(wp, wc); }
CWE-119
26
static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) { struct task_group *tg = cfs_rq->tg; struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); u64 amount = 0, min_amount, expires; int expires_seq; /* note: this is a positive sum as runtime_remaining <= 0 */ min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; raw_spin_lock(&cfs_b->lock); if (cfs_b->quota == RUNTIME_INF) amount = min_amount; else { start_cfs_bandwidth(cfs_b); if (cfs_b->runtime > 0) { amount = min(cfs_b->runtime, min_amount); cfs_b->runtime -= amount; cfs_b->idle = 0; } } expires_seq = cfs_b->expires_seq; expires = cfs_b->runtime_expires; raw_spin_unlock(&cfs_b->lock); cfs_rq->runtime_remaining += amount; /* * we may have advanced our local expiration to account for allowed * spread between our sched_clock and the one on which runtime was * issued. */ if (cfs_rq->expires_seq != expires_seq) { cfs_rq->expires_seq = expires_seq; cfs_rq->runtime_expires = expires; } return cfs_rq->runtime_remaining > 0; }
CWE-400
2
void jpc_qmfb_join_colgrp(jpc_fix_t *a, int numrows, int stride, int parity) { int bufsize = JPC_CEILDIVPOW2(numrows, 1); jpc_fix_t joinbuf[QMFB_JOINBUFSIZE * JPC_QMFB_COLGRPSIZE]; jpc_fix_t *buf = joinbuf; jpc_fix_t *srcptr; jpc_fix_t *dstptr; register jpc_fix_t *srcptr2; register jpc_fix_t *dstptr2; register int n; register int i; int hstartcol; /* Allocate memory for the join buffer from the heap. */ if (bufsize > QMFB_JOINBUFSIZE) { if (!(buf = jas_alloc3(bufsize, JPC_QMFB_COLGRPSIZE, sizeof(jpc_fix_t)))) { /* We have no choice but to commit suicide. */ abort(); } } hstartcol = (numrows + 1 - parity) >> 1; /* Save the samples from the lowpass channel. */ n = hstartcol; srcptr = &a[0]; dstptr = buf; while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } srcptr += stride; dstptr += JPC_QMFB_COLGRPSIZE; } /* Copy the samples from the highpass channel into place. */ srcptr = &a[hstartcol * stride]; dstptr = &a[(1 - parity) * stride]; n = numrows - hstartcol; while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += 2 * stride; srcptr += stride; } /* Copy the samples from the lowpass channel into place. */ srcptr = buf; dstptr = &a[parity * stride]; n = hstartcol; while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += 2 * stride; srcptr += JPC_QMFB_COLGRPSIZE; } /* If the join buffer was allocated on the heap, free this memory. */ if (buf != joinbuf) { jas_free(buf); } }
CWE-119
26
decode_sequence(const uint8_t *asn1, size_t len, const struct seq_info *seq, void *val) { krb5_error_code ret; const uint8_t *contents; size_t i, j, clen; taginfo t; assert(seq->n_fields > 0); for (i = 0; i < seq->n_fields; i++) { if (len == 0) break; ret = get_tag(asn1, len, &t, &contents, &clen, &asn1, &len); if (ret) goto error; /* * Find the applicable sequence field. This logic is a little * oversimplified; we could match an element to an optional extensible * choice or optional stored-DER type when we ought to match a * subsequent non-optional field. But it's unwise and (hopefully) very * rare for ASN.1 modules to require such precision. */ for (; i < seq->n_fields; i++) { if (check_atype_tag(seq->fields[i], &t)) break; ret = omit_atype(seq->fields[i], val); if (ret) goto error; } /* We currently model all sequences as extensible. We should consider * changing this before making the encoder visible to plugins. */ if (i == seq->n_fields) break; ret = decode_atype(&t, contents, clen, seq->fields[i], val); if (ret) goto error; } /* Initialize any fields in the C object which were not accounted for in * the sequence. Error out if any of them aren't optional. */ for (; i < seq->n_fields; i++) { ret = omit_atype(seq->fields[i], val); if (ret) goto error; } return 0; error: /* Free what we've decoded so far. Free pointers in a second pass in * case multiple fields refer to the same pointer. */ for (j = 0; j < i; j++) free_atype(seq->fields[j], val); for (j = 0; j < i; j++) free_atype_ptr(seq->fields[j], val); return ret; }
CWE-674
28
error_t enc624j600SendPacket(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { size_t length; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > 1536) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the link is up before transmitting the frame if(!interface->linkState) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Drop current packet return NO_ERROR; } //Ensure that the transmitter is ready to send if(enc624j600ReadReg(interface, ENC624J600_REG_ECON1) & ECON1_TXRTS) { return ERROR_FAILURE; } //Point to the SRAM buffer enc624j600WriteReg(interface, ENC624J600_REG_EGPWRPT, ENC624J600_TX_BUFFER_START); //Copy the packet to the SRAM buffer enc624j600WriteBuffer(interface, ENC624J600_CMD_WGPDATA, buffer, offset); //Program ETXST to the start address of the packet enc624j600WriteReg(interface, ENC624J600_REG_ETXST, ENC624J600_TX_BUFFER_START); //Program ETXLEN with the length of data copied to the memory enc624j600WriteReg(interface, ENC624J600_REG_ETXLEN, length); //Clear TXIF and TXABTIF interrupt flags enc624j600ClearBit(interface, ENC624J600_REG_EIR, EIR_TXIF | EIR_TXABTIF); //Set the TXRTS bit to initiate transmission enc624j600SetBit(interface, ENC624J600_REG_ECON1, ECON1_TXRTS); //Successful processing return NO_ERROR; }
CWE-20
0
parse_netscreen_rec_hdr(struct wtap_pkthdr *phdr, const char *line, char *cap_int, gboolean *cap_dir, char *cap_dst, int *err, gchar **err_info) { int sec; int dsec, pkt_len; char direction[2]; char cap_src[13]; phdr->rec_type = REC_TYPE_PACKET; phdr->presence_flags = WTAP_HAS_TS|WTAP_HAS_CAP_LEN; if (sscanf(line, "%9d.%9d: %15[a-z0-9/:.-](%1[io]) len=%9d:%12s->%12s/", &sec, &dsec, cap_int, direction, &pkt_len, cap_src, cap_dst) < 5) { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup("netscreen: Can't parse packet-header"); return -1; } *cap_dir = (direction[0] == 'o' ? NETSCREEN_EGRESS : NETSCREEN_INGRESS); phdr->ts.secs = sec; phdr->ts.nsecs = dsec * 100000000; phdr->len = pkt_len; return pkt_len; }
CWE-20
0
static void scsi_read_complete(void * opaque, int ret) { SCSIDiskReq *r = (SCSIDiskReq *)opaque; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); int n; if (r->req.aiocb != NULL) { r->req.aiocb = NULL; bdrv_acct_done(s->bs, &r->acct); } if (ret) { if (scsi_handle_rw_error(r, -ret, SCSI_REQ_STATUS_RETRY_READ)) { return; } } DPRINTF("Data ready tag=0x%x len=%zd\n", r->req.tag, r->iov.iov_len); n = r->iov.iov_len / 512; r->sector += n; r->sector_count -= n; scsi_req_data(&r->req, r->iov.iov_len); }
CWE-119
26
int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, struct sockaddr_storage *kern_address, int mode) { int tot_len; if (kern_msg->msg_namelen) { if (mode == VERIFY_READ) { int err = move_addr_to_kernel(kern_msg->msg_name, kern_msg->msg_namelen, kern_address); if (err < 0) return err; } kern_msg->msg_name = kern_address; } else kern_msg->msg_name = NULL; tot_len = iov_from_user_compat_to_kern(kern_iov, (struct compat_iovec __user *)kern_msg->msg_iov, kern_msg->msg_iovlen); if (tot_len >= 0) kern_msg->msg_iov = kern_iov; return tot_len; }
CWE-20
0
static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg, struct nlattr **attrs) { struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; struct tipc_link_info link_info; int err; if (!attrs[TIPC_NLA_LINK]) return -EINVAL; err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL); if (err) return err; link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]); link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP])); strcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME])); return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); }
CWE-200
10
static int read_private_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; const sc_acl_entry_t *e; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I0012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select private key file: %s\n", sc_strerror(r)); return 2; } e = sc_file_get_acl_entry(file, SC_AC_OP_READ); if (e == NULL || e->method == SC_AC_NEVER) return 10; bufsize = file->size; sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read private key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_private_key(p, keysize, rsa); }
CWE-119
26
void ip4_datagram_release_cb(struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); const struct ip_options_rcu *inet_opt; __be32 daddr = inet->inet_daddr; struct flowi4 fl4; struct rtable *rt; if (! __sk_dst_get(sk) || __sk_dst_check(sk, 0)) return; rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt && inet_opt->opt.srr) daddr = inet_opt->opt.faddr; rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr, inet->inet_saddr, inet->inet_dport, inet->inet_sport, sk->sk_protocol, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); if (!IS_ERR(rt)) __sk_dst_set(sk, &rt->dst); rcu_read_unlock(); }
CWE-362
18
__releases(&keyring_serialise_link_sem) { BUG_ON(index_key->type == NULL); kenter("%d,%s,", keyring->serial, index_key->type->name); if (index_key->type == &key_type_keyring) up_write(&keyring_serialise_link_sem); if (edit && !edit->dead_leaf) { key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); assoc_array_cancel_edit(edit); } up_write(&keyring->sem); }
CWE-119
26
int prepareForShutdown() { redisLog(REDIS_WARNING,"User requested shutdown, saving DB..."); /* Kill the saving child if there is a background saving in progress. We want to avoid race conditions, for instance our saving child may overwrite the synchronous saving did by SHUTDOWN. */ if (server.bgsavechildpid != -1) { redisLog(REDIS_WARNING,"There is a live saving child. Killing it!"); kill(server.bgsavechildpid,SIGKILL); rdbRemoveTempFile(server.bgsavechildpid); } if (server.appendonly) { /* Append only file: fsync() the AOF and exit */ aof_fsync(server.appendfd); if (server.vm_enabled) unlink(server.vm_swap_file); } else if (server.saveparamslen > 0) { /* Snapshotting. Perform a SYNC SAVE and exit */ if (rdbSave(server.dbfilename) != REDIS_OK) { /* Ooops.. error saving! The best we can do is to continue * operating. Note that if there was a background saving process, * in the next cron() Redis will be notified that the background * saving aborted, handling special stuff like slaves pending for * synchronization... */ redisLog(REDIS_WARNING,"Error trying to save the DB, can't exit"); return REDIS_ERR; } } else { redisLog(REDIS_WARNING,"Not saving DB."); } if (server.daemonize) unlink(server.pidfile); redisLog(REDIS_WARNING,"Server exit now, bye bye..."); return REDIS_OK; }
CWE-20
0
wb_prep(netdissect_options *ndo, const struct pkt_prep *prep, u_int len) { int n; const struct pgstate *ps; const u_char *ep = ndo->ndo_snapend; ND_PRINT((ndo, " wb-prep:")); if (len < sizeof(*prep)) { return (-1); } n = EXTRACT_32BITS(&prep->pp_n); ps = (const struct pgstate *)(prep + 1); while (--n >= 0 && !ND_TTEST(*ps)) { const struct id_off *io, *ie; char c = '<'; ND_PRINT((ndo, " %u/%s:%u", EXTRACT_32BITS(&ps->slot), ipaddr_string(ndo, &ps->page.p_sid), EXTRACT_32BITS(&ps->page.p_uid))); io = (struct id_off *)(ps + 1); for (ie = io + ps->nid; io < ie && !ND_TTEST(*io); ++io) { ND_PRINT((ndo, "%c%s:%u", c, ipaddr_string(ndo, &io->id), EXTRACT_32BITS(&io->off))); c = ','; } ND_PRINT((ndo, ">")); ps = (struct pgstate *)io; } return ((u_char *)ps <= ep? 0 : -1); }
CWE-20
0
decrypt_response(struct sc_card *card, unsigned char *in, size_t inlen, unsigned char *out, size_t * out_len) { size_t cipher_len; size_t i; unsigned char iv[16] = { 0 }; unsigned char plaintext[4096] = { 0 }; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; /* no cipher */ if (in[0] == 0x99) return 0; /* parse cipher length */ if (0x01 == in[2] && 0x82 != in[1]) { cipher_len = in[1]; i = 3; } else if (0x01 == in[3] && 0x81 == in[1]) { cipher_len = in[2]; i = 4; } else if (0x01 == in[4] && 0x82 == in[1]) { cipher_len = in[2] * 0x100; cipher_len += in[3]; i = 5; } else { return -1; } if (cipher_len < 2 || i+cipher_len > inlen || cipher_len > sizeof plaintext) return -1; /* decrypt */ if (KEY_TYPE_AES == exdata->smtype) aes128_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); else des3_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); /* unpadding */ while (0x80 != plaintext[cipher_len - 2] && (cipher_len - 2 > 0)) cipher_len--; if (2 == cipher_len) return -1; memcpy(out, plaintext, cipher_len - 2); *out_len = cipher_len - 2; return 0; }
CWE-119
26
static void vapic_exit(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; int idx; if (!apic || !apic->vapic_addr) return; idx = srcu_read_lock(&vcpu->kvm->srcu); kvm_release_page_dirty(apic->vapic_page); mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); srcu_read_unlock(&vcpu->kvm->srcu, idx); }
CWE-20
0
static VALUE cState_array_nl_set(VALUE self, VALUE array_nl) { unsigned long len; GET_STATE(self); Check_Type(array_nl, T_STRING); len = RSTRING_LEN(array_nl); if (len == 0) { if (state->array_nl) { ruby_xfree(state->array_nl); state->array_nl = NULL; } } else { if (state->array_nl) ruby_xfree(state->array_nl); state->array_nl = strdup(RSTRING_PTR(array_nl)); state->array_nl_len = len; } return Qnil; }
CWE-119
26
static int vp8_lossy_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, uint8_t *data_start, unsigned int data_size) { WebPContext *s = avctx->priv_data; AVPacket pkt; int ret; if (!s->initialized) { ff_vp8_decode_init(avctx); s->initialized = 1; if (s->has_alpha) avctx->pix_fmt = AV_PIX_FMT_YUVA420P; } s->lossless = 0; if (data_size > INT_MAX) { av_log(avctx, AV_LOG_ERROR, "unsupported chunk size\n"); return AVERROR_PATCHWELCOME; } av_init_packet(&pkt); pkt.data = data_start; pkt.size = data_size; ret = ff_vp8_decode_frame(avctx, p, got_frame, &pkt); if (ret < 0) return ret; update_canvas_size(avctx, avctx->width, avctx->height); if (s->has_alpha) { ret = vp8_lossy_decode_alpha(avctx, p, s->alpha_data, s->alpha_data_size); if (ret < 0) return ret; } return ret; }
CWE-119
26
irc_ctcp_dcc_filename_without_quotes (const char *filename) { int length; length = strlen (filename); if (length > 0) { if ((filename[0] == '\"') && (filename[length - 1] == '\"')) return weechat_strndup (filename + 1, length - 2); } return strdup (filename); }
CWE-119
26
static inline key_ref_t __key_update(key_ref_t key_ref, struct key_preparsed_payload *prep) { struct key *key = key_ref_to_ptr(key_ref); int ret; /* need write permission on the key to update it */ ret = key_permission(key_ref, KEY_NEED_WRITE); if (ret < 0) goto error; ret = -EEXIST; if (!key->type->update) goto error; down_write(&key->sem); ret = key->type->update(key, prep); if (ret == 0) /* updating a negative key instantiates it */ clear_bit(KEY_FLAG_NEGATIVE, &key->flags); up_write(&key->sem); if (ret < 0) goto error; out: return key_ref; error: key_put(key); key_ref = ERR_PTR(ret); goto out; }
CWE-20
0
static int setup_config(int type) { int rv; rv = read_config(cl.configfile, type); if (rv < 0) goto out; if (is_auth_req()) { rv = read_authkey(); if (rv < 0) goto out; #if HAVE_LIBGCRYPT if (!gcry_check_version(NULL)) { log_error("gcry_check_version"); rv = -ENOENT; goto out; } gcry_control(GCRYCTL_DISABLE_SECMEM, 0); gcry_control(GCRYCTL_INITIALIZATION_FINISHED, 0); #endif } /* Set "local" pointer, ignoring errors. */ if (cl.type == DAEMON && cl.site[0]) { if (!find_site_by_name(cl.site, &local, 1)) { log_error("Cannot find \"%s\" in the configuration.", cl.site); return -EINVAL; } local->local = 1; } else find_myself(NULL, type == CLIENT || type == GEOSTORE); rv = check_config(type); if (rv < 0) goto out; /* Per default the PID file name is derived from the * configuration name. */ if (!cl.lockfile[0]) { snprintf(cl.lockfile, sizeof(cl.lockfile)-1, "%s/%s.pid", BOOTH_RUN_DIR, booth_conf->name); } out: return rv; }
CWE-287
4
static int vmci_transport_dgram_dequeue(struct kiocb *kiocb, struct vsock_sock *vsk, struct msghdr *msg, size_t len, int flags) { int err; int noblock; struct vmci_datagram *dg; size_t payload_len; struct sk_buff *skb; noblock = flags & MSG_DONTWAIT; if (flags & MSG_OOB || flags & MSG_ERRQUEUE) return -EOPNOTSUPP; msg->msg_namelen = 0; /* Retrieve the head sk_buff from the socket's receive queue. */ err = 0; skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); if (err) return err; if (!skb) return -EAGAIN; dg = (struct vmci_datagram *)skb->data; if (!dg) /* err is 0, meaning we read zero bytes. */ goto out; payload_len = dg->payload_size; /* Ensure the sk_buff matches the payload size claimed in the packet. */ if (payload_len != skb->len - sizeof(*dg)) { err = -EINVAL; goto out; } if (payload_len > len) { payload_len = len; msg->msg_flags |= MSG_TRUNC; } /* Place the datagram payload in the user's iovec. */ err = skb_copy_datagram_iovec(skb, sizeof(*dg), msg->msg_iov, payload_len); if (err) goto out; if (msg->msg_name) { struct sockaddr_vm *vm_addr; /* Provide the address of the sender. */ vm_addr = (struct sockaddr_vm *)msg->msg_name; vsock_addr_init(vm_addr, dg->src.context, dg->src.resource); msg->msg_namelen = sizeof(*vm_addr); } err = payload_len; out: skb_free_datagram(&vsk->sk, skb); return err; }
CWE-20
0
int CLASS parse_jpeg(int offset) { int len, save, hlen, mark; fseek(ifp, offset, SEEK_SET); if (fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8) return 0; while (fgetc(ifp) == 0xff && (mark = fgetc(ifp)) != 0xda) { order = 0x4d4d; len = get2() - 2; save = ftell(ifp); if (mark == 0xc0 || mark == 0xc3 || mark == 0xc9) { fgetc(ifp); raw_height = get2(); raw_width = get2(); } order = get2(); hlen = get4(); if (get4() == 0x48454150) /* "HEAP" */ { #ifdef LIBRAW_LIBRARY_BUILD imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens; #endif parse_ciff(save + hlen, len - hlen, 0); } if (parse_tiff(save + 6)) apply_tiff(); fseek(ifp, save + len, SEEK_SET); } return 1; }
CWE-119
26
static void perf_event_comm_output(struct perf_event *event, struct perf_comm_event *comm_event) { struct perf_output_handle handle; struct perf_sample_data sample; int size = comm_event->event_id.header.size; int ret; perf_event_header__init_id(&comm_event->event_id.header, &sample, event); ret = perf_output_begin(&handle, event, comm_event->event_id.header.size, 0, 0); if (ret) goto out; comm_event->event_id.pid = perf_event_pid(event, comm_event->task); comm_event->event_id.tid = perf_event_tid(event, comm_event->task); perf_output_put(&handle, comm_event->event_id); __output_copy(&handle, comm_event->comm, comm_event->comm_size); perf_event__output_id_sample(event, &handle, &sample); perf_output_end(&handle); out: comm_event->event_id.header.size = size; }
CWE-400
2
static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, unsigned long addr, unsigned long length) { unsigned long result = 0; switch (ioapic->ioregsel) { case IOAPIC_REG_VERSION: result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16) | (IOAPIC_VERSION_ID & 0xff)); break; case IOAPIC_REG_APIC_ID: case IOAPIC_REG_ARB_ID: result = ((ioapic->id & 0xf) << 24); break; default: { u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; u64 redir_content; ASSERT(redir_index < IOAPIC_NUM_PINS); redir_content = ioapic->redirtbl[redir_index].bits; result = (ioapic->ioregsel & 0x1) ? (redir_content >> 32) & 0xffffffff : redir_content & 0xffffffff; break; } } return result; }
CWE-20
0
static int dccp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) { struct dccp_hdr _dh, *dh; unsigned int dccp_len = skb->len - dataoff; unsigned int cscov; const char *msg; dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh); if (dh == NULL) { msg = "nf_ct_dccp: short packet "; goto out_invalid; } if (dh->dccph_doff * 4 < sizeof(struct dccp_hdr) || dh->dccph_doff * 4 > dccp_len) { msg = "nf_ct_dccp: truncated/malformed packet "; goto out_invalid; } cscov = dccp_len; if (dh->dccph_cscov) { cscov = (dh->dccph_cscov - 1) * 4; if (cscov > dccp_len) { msg = "nf_ct_dccp: bad checksum coverage "; goto out_invalid; } } if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_DCCP, pf)) { msg = "nf_ct_dccp: bad checksum "; goto out_invalid; } if (dh->dccph_type >= DCCP_PKT_INVALID) { msg = "nf_ct_dccp: reserved packet type "; goto out_invalid; } return NF_ACCEPT; out_invalid: if (LOG_INVALID(net, IPPROTO_DCCP)) nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg); return -NF_ACCEPT; }
CWE-20
0
fpAcc(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; uint32 bps = tif->tif_dir.td_bitspersample / 8; tmsize_t wc = cc / bps; tmsize_t count = cc; uint8 *cp = (uint8 *) cp0; uint8 *tmp = (uint8 *)_TIFFmalloc(cc); assert((cc%(bps*stride))==0); if (!tmp) return; while (count > stride) { REPEAT4(stride, cp[stride] = (unsigned char) ((cp[stride] + cp[0]) & 0xff); cp++) count -= stride; } _TIFFmemcpy(tmp, cp0, cc); cp = (uint8 *) cp0; for (count = 0; count < wc; count++) { uint32 byte; for (byte = 0; byte < bps; byte++) { #if WORDS_BIGENDIAN cp[bps * count + byte] = tmp[byte * wc + count]; #else cp[bps * count + byte] = tmp[(bps - byte - 1) * wc + count]; #endif } } _TIFFfree(tmp); }
CWE-119
26
int sc_file_set_sec_attr(sc_file_t *file, const u8 *sec_attr, size_t sec_attr_len) { u8 *tmp; if (!sc_file_valid(file)) { return SC_ERROR_INVALID_ARGUMENTS; } if (sec_attr == NULL) { if (file->sec_attr != NULL) free(file->sec_attr); file->sec_attr = NULL; file->sec_attr_len = 0; return 0; } tmp = (u8 *) realloc(file->sec_attr, sec_attr_len); if (!tmp) { if (file->sec_attr) free(file->sec_attr); file->sec_attr = NULL; file->sec_attr_len = 0; return SC_ERROR_OUT_OF_MEMORY; } file->sec_attr = tmp; memcpy(file->sec_attr, sec_attr, sec_attr_len); file->sec_attr_len = sec_attr_len; return 0; }
CWE-119
26
static int m88rs2000_frontend_attach(struct dvb_usb_adapter *d) { u8 obuf[] = { 0x51 }; u8 ibuf[] = { 0 }; if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0) err("command 0x51 transfer failed."); d->fe_adap[0].fe = dvb_attach(m88rs2000_attach, &s421_m88rs2000_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe == NULL) return -EIO; if (dvb_attach(ts2020_attach, d->fe_adap[0].fe, &dw2104_ts2020_config, &d->dev->i2c_adap)) { info("Attached RS2000/TS2020!"); return 0; } info("Failed to attach RS2000/TS2020!"); return -EIO; }
CWE-119
26
static int __init sit_init(void) { int err; printk(KERN_INFO "IPv6 over IPv4 tunneling driver\n"); if (xfrm4_tunnel_register(&sit_handler, AF_INET6) < 0) { printk(KERN_INFO "sit init: Can't add protocol\n"); return -EAGAIN; } err = register_pernet_device(&sit_net_ops); if (err < 0) xfrm4_tunnel_deregister(&sit_handler, AF_INET6); return err; }
CWE-362
18
int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, __be32 saddr, __be32 daddr, struct ip_options *opt) { struct inet_sock *inet = inet_sk(sk); struct rtable *rt = skb_rtable(skb); struct iphdr *iph; /* Build the IP header. */ skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = 5; iph->tos = inet->tos; if (ip_dont_fragment(sk, &rt->dst)) iph->frag_off = htons(IP_DF); else iph->frag_off = 0; iph->ttl = ip_select_ttl(inet, &rt->dst); iph->daddr = rt->rt_dst; iph->saddr = rt->rt_src; iph->protocol = sk->sk_protocol; ip_select_ident(iph, &rt->dst, sk); if (opt && opt->optlen) { iph->ihl += opt->optlen>>2; ip_options_build(skb, opt, daddr, rt, 0); } skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; /* Send it out. */ return ip_local_out(skb); }
CWE-362
18
uint32_t mt_random (mtrand *mt) { uint32_t y; unsigned long mag01[2]; mag01[0] = 0; mag01[1] = MATRIX_A; if (mt->mt_index_ >= MT_LEN) { int kk; for (kk = 0; kk < MT_LEN - MT_IA; kk++) { y = (mt->mt_buffer_[kk] & UPPER_MASK) | (mt->mt_buffer_[kk + 1] & LOWER_MASK); mt->mt_buffer_[kk] = mt->mt_buffer_[kk + MT_IA] ^ (y >> 1) ^ mag01[y & 0x1UL]; } for (;kk < MT_LEN - 1; kk++) { y = (mt->mt_buffer_[kk] & UPPER_MASK) | (mt->mt_buffer_[kk + 1] & LOWER_MASK); mt->mt_buffer_[kk] = mt->mt_buffer_[kk + (MT_IA - MT_LEN)] ^ (y >> 1) ^ mag01[y & 0x1UL]; } y = (mt->mt_buffer_[MT_LEN - 1] & UPPER_MASK) | (mt->mt_buffer_[0] & LOWER_MASK); mt->mt_buffer_[MT_LEN - 1] = mt->mt_buffer_[MT_IA - 1] ^ (y >> 1) ^ mag01[y & 0x1UL]; mt->mt_index_ = 0; } y = mt->mt_buffer_[mt->mt_index_++]; y ^= (y >> 11); y ^= (y << 7) & 0x9d2c5680UL; y ^= (y << 15) & 0xefc60000UL; y ^= (y >> 18); return y; }
CWE-119
26
void svhandler_flash_pgm_blk(void) { uint32_t beginAddr = _param_1; uint32_t data = _param_2; uint32_t length = _param_3; // Protect from overflow. if (beginAddr + length < beginAddr) return; // Do not allow firmware to erase bootstrap or bootloader sectors. if (((beginAddr >= BSTRP_FLASH_SECT_START) && (beginAddr <= (BSTRP_FLASH_SECT_START + BSTRP_FLASH_SECT_LEN - 1))) || (((beginAddr + length) >= BSTRP_FLASH_SECT_START) && ((beginAddr + length) <= (BSTRP_FLASH_SECT_START + BSTRP_FLASH_SECT_LEN - 1)))) { return; } if (((beginAddr >= BLDR_FLASH_SECT_START) && (beginAddr <= (BLDR_FLASH_SECT_START + 2 * BLDR_FLASH_SECT_LEN - 1))) || (((beginAddr + length) >= BLDR_FLASH_SECT_START) && ((beginAddr + length) <= (BLDR_FLASH_SECT_START + 2 * BLDR_FLASH_SECT_LEN - 1)))) { return; } // Unlock flash. flash_clear_status_flags(); flash_unlock(); // Flash write. flash_program(beginAddr, (uint8_t *)data, length); // Return flash status. _param_1 = !!flash_chk_status(); _param_2 = 0; _param_3 = 0; // Wait for any write operation to complete. flash_wait_for_last_operation(); // Disable writes to flash. FLASH_CR &= ~FLASH_CR_PG; // Lock flash register FLASH_CR |= FLASH_CR_LOCK; }
CWE-668
7
set_lenIV(char *line) { char *p = strstr(line, "/lenIV "); /* Allow lenIV to be negative. Thanks to Tom Kacvinsky <[email protected]> */ if (p && (isdigit(p[7]) || p[7] == '+' || p[7] == '-')) { lenIV = atoi(p + 7); } }
CWE-119
26
static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) { if ((opcode & OPCODE) == LL) { perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); return simulate_ll(regs, opcode); } if ((opcode & OPCODE) == SC) { perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); return simulate_sc(regs, opcode); } return -1; /* Must be something else ... */ }
CWE-400
2
static ext3_fsblk_t get_sb_block(void **data, struct super_block *sb) { ext3_fsblk_t sb_block; char *options = (char *) *data; if (!options || strncmp(options, "sb=", 3) != 0) return 1; /* Default location */ options += 3; /*todo: use simple_strtoll with >32bit ext3 */ sb_block = simple_strtoul(options, &options, 0); if (*options && *options != ',') { ext3_msg(sb, "error: invalid sb specification: %s", (char *) *data); return 1; } if (*options == ',') options++; *data = (void *) options; return sb_block; }
CWE-20
0
static noinline void key_gc_unused_keys(struct list_head *keys) { while (!list_empty(keys)) { struct key *key = list_entry(keys->next, struct key, graveyard_link); list_del(&key->graveyard_link); kdebug("- %u", key->serial); key_check(key); /* Throw away the key data */ if (key->type->destroy) key->type->destroy(key); security_key_free(key); /* deal with the user's key tracking and quota */ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { spin_lock(&key->user->lock); key->user->qnkeys--; key->user->qnbytes -= key->quotalen; spin_unlock(&key->user->lock); } atomic_dec(&key->user->nkeys); if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) atomic_dec(&key->user->nikeys); key_user_put(key->user); kfree(key->description); #ifdef KEY_DEBUGGING key->magic = KEY_DEBUG_MAGIC_X; #endif kmem_cache_free(key_jar, key); } }
CWE-20
0
static void unix_copy_addr(struct msghdr *msg, struct sock *sk) { struct unix_sock *u = unix_sk(sk); msg->msg_namelen = 0; if (u->addr) { msg->msg_namelen = u->addr->len; memcpy(msg->msg_name, u->addr->name, u->addr->len); } }
CWE-20
0
static int ieee80211_fragment(struct ieee80211_tx_data *tx, struct sk_buff *skb, int hdrlen, int frag_threshold) { struct ieee80211_local *local = tx->local; struct ieee80211_tx_info *info; struct sk_buff *tmp; int per_fragm = frag_threshold - hdrlen - FCS_LEN; int pos = hdrlen + per_fragm; int rem = skb->len - hdrlen - per_fragm; if (WARN_ON(rem < 0)) return -EINVAL; /* first fragment was already added to queue by caller */ while (rem) { int fraglen = per_fragm; if (fraglen > rem) fraglen = rem; rem -= fraglen; tmp = dev_alloc_skb(local->tx_headroom + frag_threshold + tx->sdata->encrypt_headroom + IEEE80211_ENCRYPT_TAILROOM); if (!tmp) return -ENOMEM; __skb_queue_tail(&tx->skbs, tmp); skb_reserve(tmp, local->tx_headroom + tx->sdata->encrypt_headroom); /* copy control information */ memcpy(tmp->cb, skb->cb, sizeof(tmp->cb)); info = IEEE80211_SKB_CB(tmp); info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | IEEE80211_TX_CTL_FIRST_FRAGMENT); if (rem) info->flags |= IEEE80211_TX_CTL_MORE_FRAMES; skb_copy_queue_mapping(tmp, skb); tmp->priority = skb->priority; tmp->dev = skb->dev; /* copy header and data */ memcpy(skb_put(tmp, hdrlen), skb->data, hdrlen); memcpy(skb_put(tmp, fraglen), skb->data + pos, fraglen); pos += fraglen; } /* adjust first fragment's length */ skb->len = hdrlen + per_fragm; return 0; }
CWE-200
10
ossl_cipher_initialize(VALUE self, VALUE str) { EVP_CIPHER_CTX *ctx; const EVP_CIPHER *cipher; char *name; unsigned char dummy_key[EVP_MAX_KEY_LENGTH] = { 0 }; name = StringValueCStr(str); GetCipherInit(self, ctx); if (ctx) { ossl_raise(rb_eRuntimeError, "Cipher already inititalized!"); } AllocCipher(self, ctx); if (!(cipher = EVP_get_cipherbyname(name))) { ossl_raise(rb_eRuntimeError, "unsupported cipher algorithm (%"PRIsVALUE")", str); } /* * EVP_CipherInit_ex() allows to specify NULL to key and IV, however some * ciphers don't handle well (OpenSSL's bug). [Bug #2768] * * The EVP which has EVP_CIPH_RAND_KEY flag (such as DES3) allows * uninitialized key, but other EVPs (such as AES) does not allow it. * Calling EVP_CipherUpdate() without initializing key causes SEGV so we * set the data filled with "\0" as the key by default. */ if (EVP_CipherInit_ex(ctx, cipher, NULL, dummy_key, NULL, -1) != 1) ossl_raise(eCipherError, NULL); return self; }
CWE-326
9
static void xfrm6_tunnel_spi_fini(void) { kmem_cache_destroy(xfrm6_tunnel_spi_kmem); }
CWE-362
18
static void test_show_object(struct object *object, struct strbuf *path, const char *last, void *data) { struct bitmap_test_data *tdata = data; int bitmap_pos; bitmap_pos = bitmap_position(object->oid.hash); if (bitmap_pos < 0) die("Object not in bitmap: %s\n", oid_to_hex(&object->oid)); bitmap_set(tdata->base, bitmap_pos); display_progress(tdata->prg, ++tdata->seen); }
CWE-119
26
static int read_public_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } bufsize = file->size; sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read public key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_public_key(p, keysize, rsa); }
CWE-119
26
void xmlrpc_char_encode(char *outbuffer, const char *s1) { long unsigned int i; unsigned char c; char buf2[15]; mowgli_string_t *s = mowgli_string_create(); *buf2 = '\0'; *outbuffer = '\0'; if ((!(s1) || (*(s1) == '\0'))) { return; } for (i = 0; s1[i] != '\0'; i++) { c = s1[i]; if (c > 127) { snprintf(buf2, sizeof buf2, "&#%d;", c); s->append(s, buf2, strlen(buf2)); } else if (c == '&') { s->append(s, "&amp;", 5); } else if (c == '<') { s->append(s, "&lt;", 4); } else if (c == '>') { s->append(s, "&gt;", 4); } else if (c == '"') { s->append(s, "&quot;", 6); } else { s->append_char(s, c); } } memcpy(outbuffer, s->str, XMLRPC_BUFSIZE); }
CWE-119
26
static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev, struct ib_udata *udata) { int ret = 0; struct hns_roce_ucontext *context; struct hns_roce_ib_alloc_ucontext_resp resp; struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); resp.qp_tab_size = hr_dev->caps.num_qps; context = kmalloc(sizeof(*context), GFP_KERNEL); if (!context) return ERR_PTR(-ENOMEM); ret = hns_roce_uar_alloc(hr_dev, &context->uar); if (ret) goto error_fail_uar_alloc; if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) { INIT_LIST_HEAD(&context->page_list); mutex_init(&context->page_mutex); } ret = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (ret) goto error_fail_copy_to_udata; return &context->ibucontext; error_fail_copy_to_udata: hns_roce_uar_free(hr_dev, &context->uar); error_fail_uar_alloc: kfree(context); return ERR_PTR(ret); }
CWE-665
32
static inline int check_entry_size_and_hooks(struct arpt_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || (unsigned char *)e + sizeof(struct arpt_entry) >= limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } err = check_entry(e); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_err("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; }
CWE-119
26
static int checkout_verify_paths( git_repository *repo, int action, git_diff_delta *delta) { unsigned int flags = GIT_PATH_REJECT_WORKDIR_DEFAULTS; if (action & CHECKOUT_ACTION__REMOVE) { if (!git_path_isvalid(repo, delta->old_file.path, delta->old_file.mode, flags)) { git_error_set(GIT_ERROR_CHECKOUT, "cannot remove invalid path '%s'", delta->old_file.path); return -1; } } if (action & ~CHECKOUT_ACTION__REMOVE) { if (!git_path_isvalid(repo, delta->new_file.path, delta->new_file.mode, flags)) { git_error_set(GIT_ERROR_CHECKOUT, "cannot checkout to invalid path '%s'", delta->new_file.path); return -1; } } return 0; }
CWE-706
29
static inline bool unconditional(const struct ip6t_ip6 *ipv6) { static const struct ip6t_ip6 uncond; return memcmp(ipv6, &uncond, sizeof(uncond)) == 0; }
CWE-119
26
static int read_public_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } bufsize = file->size; sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read public key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_public_key(p, keysize, rsa); }
CWE-119
26
parse_device(dev_t *pdev, struct archive *a, char *val) { #define MAX_PACK_ARGS 3 unsigned long numbers[MAX_PACK_ARGS]; char *p, *dev; int argc; pack_t *pack; dev_t result; const char *error = NULL; memset(pdev, 0, sizeof(*pdev)); if ((dev = strchr(val, ',')) != NULL) { /* * Device's major/minor are given in a specified format. * Decode and pack it accordingly. */ *dev++ = '\0'; if ((pack = pack_find(val)) == NULL) { archive_set_error(a, ARCHIVE_ERRNO_FILE_FORMAT, "Unknown format `%s'", val); return ARCHIVE_WARN; } argc = 0; while ((p = la_strsep(&dev, ",")) != NULL) { if (*p == '\0') { archive_set_error(a, ARCHIVE_ERRNO_FILE_FORMAT, "Missing number"); return ARCHIVE_WARN; } numbers[argc++] = (unsigned long)mtree_atol(&p); if (argc > MAX_PACK_ARGS) { archive_set_error(a, ARCHIVE_ERRNO_FILE_FORMAT, "Too many arguments"); return ARCHIVE_WARN; } } if (argc < 2) { archive_set_error(a, ARCHIVE_ERRNO_FILE_FORMAT, "Not enough arguments"); return ARCHIVE_WARN; } result = (*pack)(argc, numbers, &error); if (error != NULL) { archive_set_error(a, ARCHIVE_ERRNO_FILE_FORMAT, "%s", error); return ARCHIVE_WARN; } } else { /* file system raw value. */ result = (dev_t)mtree_atol(&val); } *pdev = result; return ARCHIVE_OK; #undef MAX_PACK_ARGS }
CWE-119
26
static int install_process_keyring(void) { struct cred *new; int ret; new = prepare_creds(); if (!new) return -ENOMEM; ret = install_process_keyring_to_cred(new); if (ret < 0) { abort_creds(new); return ret != -EEXIST ? ret : 0; } return commit_creds(new); }
CWE-404
30
static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags) { int result; handle_t *handle = NULL; struct inode *inode = file_inode(vma->vm_file); struct super_block *sb = inode->i_sb; bool write = flags & FAULT_FLAG_WRITE; if (write) { sb_start_pagefault(sb); file_update_time(vma->vm_file); handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE, ext4_chunk_trans_blocks(inode, PMD_SIZE / PAGE_SIZE)); } if (IS_ERR(handle)) result = VM_FAULT_SIGBUS; else result = __dax_pmd_fault(vma, addr, pmd, flags, ext4_get_block_dax, ext4_end_io_unwritten); if (write) { if (!IS_ERR(handle)) ext4_journal_stop(handle); sb_end_pagefault(sb); } return result; }
CWE-362
18
static pj_status_t STATUS_FROM_SSL_ERR(char *action, pj_ssl_sock_t *ssock, unsigned long err) { int level = 0; int len = 0; //dummy ERROR_LOG("STATUS_FROM_SSL_ERR", err, ssock); level++; /* General SSL error, dig more from OpenSSL error queue */ if (err == SSL_ERROR_SSL) { err = ERR_get_error(); ERROR_LOG("STATUS_FROM_SSL_ERR", err, ssock); } ssock->last_err = err; return GET_STATUS_FROM_SSL_ERR(err); }
CWE-362
18
rtadv_read (struct thread *thread) { int sock; int len; u_char buf[RTADV_MSG_SIZE]; struct sockaddr_in6 from; ifindex_t ifindex = 0; int hoplimit = -1; struct zebra_vrf *zvrf = THREAD_ARG (thread); sock = THREAD_FD (thread); zvrf->rtadv.ra_read = NULL; /* Register myself. */ rtadv_event (zvrf, RTADV_READ, sock); len = rtadv_recv_packet (sock, buf, BUFSIZ, &from, &ifindex, &hoplimit); if (len < 0) { zlog_warn ("router solicitation recv failed: %s.", safe_strerror (errno)); return len; } rtadv_process_packet (buf, (unsigned)len, ifindex, hoplimit, zvrf->vrf_id); return 0; }
CWE-119
26
int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; size_t copied; int err; BT_DBG("sock %p sk %p len %zu", sock, sk, len); if (flags & (MSG_OOB)) return -EOPNOTSUPP; skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) { msg->msg_namelen = 0; return 0; } return err; } copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err == 0) { sock_recv_ts_and_drops(msg, sk, skb); if (bt_sk(sk)->skb_msg_name) bt_sk(sk)->skb_msg_name(skb, msg->msg_name, &msg->msg_namelen); else msg->msg_namelen = 0; } skb_free_datagram(sk, skb); return err ? : copied; }
CWE-20
0
static void Sp_search(js_State *J) { js_Regexp *re; const char *text; Resub m; text = checkstring(J, 0); if (js_isregexp(J, 1)) js_copy(J, 1); else if (js_isundefined(J, 1)) js_newregexp(J, "", 0); else js_newregexp(J, js_tostring(J, 1), 0); re = js_toregexp(J, -1); if (!js_regexec(re->prog, text, &m, 0)) js_pushnumber(J, js_utfptrtoidx(text, m.sub[0].sp)); else js_pushnumber(J, -1); }
CWE-674
28
static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) { return usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), RTL8150_REQ_GET_REGS, RTL8150_REQT_READ, indx, 0, data, size, 500); }
CWE-119
26
error_t dm9000UpdateMacAddrFilter(NetInterface *interface) { uint_t i; uint_t k; uint32_t crc; uint8_t hashTable[8]; MacFilterEntry *entry; //Debug message TRACE_DEBUG("Updating MAC filter...\r\n"); //Clear hash table osMemset(hashTable, 0, sizeof(hashTable)); //Always accept broadcast packets regardless of the MAC filter table hashTable[7] = 0x80; //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0; i < MAC_ADDR_FILTER_SIZE; i++) { //Point to the current entry entry = &interface->macAddrFilter[i]; //Valid entry? if(entry->refCount > 0) { //Compute CRC over the current MAC address crc = dm9000CalcCrc(&entry->addr, sizeof(MacAddr)); //Calculate the corresponding index in the table k = crc & 0x3F; //Update hash table contents hashTable[k / 8] |= (1 << (k % 8)); } } //Write the hash table to the DM9000 controller for(i = 0; i < 8; i++) { dm9000WriteReg(DM9000_REG_MAR0 + i, hashTable[i]); } //Debug message TRACE_DEBUG(" MAR = %02" PRIX8 " %02" PRIX8 " %02" PRIX8 " %02" PRIX8 " " "%02" PRIX8 " %02" PRIX8 " %02" PRIX8 " %02" PRIX8 "\r\n", dm9000ReadReg(DM9000_REG_MAR0), dm9000ReadReg(DM9000_REG_MAR1), dm9000ReadReg(DM9000_REG_MAR2), dm9000ReadReg(DM9000_REG_MAR3), dm9000ReadReg(DM9000_REG_MAR4), dm9000ReadReg(DM9000_REG_MAR5), dm9000ReadReg(DM9000_REG_MAR6), dm9000ReadReg(DM9000_REG_MAR7)); //Successful processing return NO_ERROR; }
CWE-20
0
static noinline void key_gc_unused_keys(struct list_head *keys) { while (!list_empty(keys)) { struct key *key = list_entry(keys->next, struct key, graveyard_link); list_del(&key->graveyard_link); kdebug("- %u", key->serial); key_check(key); /* Throw away the key data if the key is instantiated */ if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) && !test_bit(KEY_FLAG_NEGATIVE, &key->flags) && key->type->destroy) key->type->destroy(key); security_key_free(key); /* deal with the user's key tracking and quota */ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { spin_lock(&key->user->lock); key->user->qnkeys--; key->user->qnbytes -= key->quotalen; spin_unlock(&key->user->lock); } atomic_dec(&key->user->nkeys); if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) atomic_dec(&key->user->nikeys); key_user_put(key->user); kfree(key->description); memzero_explicit(key, sizeof(*key)); kmem_cache_free(key_jar, key); } }
CWE-20
0
static int muscle_list_files(sc_card_t *card, u8 *buf, size_t bufLen) { muscle_private_t* priv = MUSCLE_DATA(card); mscfs_t *fs = priv->fs; int x; int count = 0; mscfs_check_cache(priv->fs); for(x = 0; x < fs->cache.size; x++) { u8* oid= fs->cache.array[x].objectId.id; sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "FILE: %02X%02X%02X%02X\n", oid[0],oid[1],oid[2],oid[3]); if(0 == memcmp(fs->currentPath, oid, 2)) { buf[0] = oid[2]; buf[1] = oid[3]; if(buf[0] == 0x00 && buf[1] == 0x00) continue; /* No directories/null names outside of root */ buf += 2; count+=2; } } return count; }
CWE-119
26
zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr) { #ifdef HAVE_KSID ksid_t *ksid = crgetsid(cr, KSID_GROUP); ksidlist_t *ksidlist = crgetsidlist(cr); uid_t gid; if (ksid && ksidlist) { int i; ksid_t *ksid_groups; uint32_t idx = FUID_INDEX(id); uint32_t rid = FUID_RID(id); ksid_groups = ksidlist->ksl_sids; for (i = 0; i != ksidlist->ksl_nsid; i++) { if (idx == 0) { if (id != IDMAP_WK_CREATOR_GROUP_GID && id == ksid_groups[i].ks_id) { return (B_TRUE); } } else { const char *domain; domain = zfs_fuid_find_by_idx(zfsvfs, idx); ASSERT(domain != NULL); if (strcmp(domain, IDMAP_WK_CREATOR_SID_AUTHORITY) == 0) return (B_FALSE); if ((strcmp(domain, ksid_groups[i].ks_domain->kd_name) == 0) && rid == ksid_groups[i].ks_rid) return (B_TRUE); } } } /* * Not found in ksidlist, check posix groups */ gid = zfs_fuid_map_id(zfsvfs, id, cr, ZFS_GROUP); return (groupmember(gid, cr)); #else return (B_TRUE); #endif }
CWE-863
11
static int do_devinfo_ioctl(struct comedi_device *dev, struct comedi_devinfo __user *arg, struct file *file) { struct comedi_devinfo devinfo; const unsigned minor = iminor(file->f_dentry->d_inode); struct comedi_device_file_info *dev_file_info = comedi_get_device_file_info(minor); struct comedi_subdevice *read_subdev = comedi_get_read_subdevice(dev_file_info); struct comedi_subdevice *write_subdev = comedi_get_write_subdevice(dev_file_info); memset(&devinfo, 0, sizeof(devinfo)); /* fill devinfo structure */ devinfo.version_code = COMEDI_VERSION_CODE; devinfo.n_subdevs = dev->n_subdevices; memcpy(devinfo.driver_name, dev->driver->driver_name, COMEDI_NAMELEN); memcpy(devinfo.board_name, dev->board_name, COMEDI_NAMELEN); if (read_subdev) devinfo.read_subdevice = read_subdev - dev->subdevices; else devinfo.read_subdevice = -1; if (write_subdev) devinfo.write_subdevice = write_subdev - dev->subdevices; else devinfo.write_subdevice = -1; if (copy_to_user(arg, &devinfo, sizeof(struct comedi_devinfo))) return -EFAULT; return 0; }
CWE-200
10
horizontalDifference16(unsigned short *ip, int n, int stride, unsigned short *wp, uint16 *From14) { register int r1, g1, b1, a1, r2, g2, b2, a2, mask; /* assumption is unsigned pixel values */ #undef CLAMP #define CLAMP(v) From14[(v) >> 2] mask = CODE_MASK; if (n >= stride) { if (stride == 3) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); n -= 3; while (n > 0) { n -= 3; wp += 3; ip += 3; r1 = CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1; } } else if (stride == 4) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); a2 = wp[3] = CLAMP(ip[3]); n -= 4; while (n > 0) { n -= 4; wp += 4; ip += 4; r1 = CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1; a1 = CLAMP(ip[3]); wp[3] = (uint16)((a1-a2) & mask); a2 = a1; } } else { ip += n - 1; /* point to last one */ wp += n - 1; /* point to last one */ n -= stride; while (n > 0) { REPEAT(stride, wp[0] = CLAMP(ip[0]); wp[stride] -= wp[0]; wp[stride] &= mask; wp--; ip--) n -= stride; } REPEAT(stride, wp[0] = CLAMP(ip[0]); wp--; ip--) } } }
CWE-119
26
static int logi_dj_ll_raw_request(struct hid_device *hid, unsigned char reportnum, __u8 *buf, size_t count, unsigned char report_type, int reqtype) { struct dj_device *djdev = hid->driver_data; struct dj_receiver_dev *djrcv_dev = djdev->dj_receiver_dev; u8 *out_buf; int ret; if (buf[0] != REPORT_TYPE_LEDS) return -EINVAL; out_buf = kzalloc(DJREPORT_SHORT_LENGTH, GFP_ATOMIC); if (!out_buf) return -ENOMEM; if (count < DJREPORT_SHORT_LENGTH - 2) count = DJREPORT_SHORT_LENGTH - 2; out_buf[0] = REPORT_ID_DJ_SHORT; out_buf[1] = djdev->device_index; memcpy(out_buf + 2, buf, count); ret = hid_hw_raw_request(djrcv_dev->hdev, out_buf[0], out_buf, DJREPORT_SHORT_LENGTH, report_type, reqtype); kfree(out_buf); return ret; }
CWE-119
26
int ocfs2_set_acl(handle_t *handle, struct inode *inode, struct buffer_head *di_bh, int type, struct posix_acl *acl, struct ocfs2_alloc_context *meta_ac, struct ocfs2_alloc_context *data_ac) { int name_index; void *value = NULL; size_t size = 0; int ret; if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; switch (type) { case ACL_TYPE_ACCESS: name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS; if (acl) { umode_t mode = inode->i_mode; ret = posix_acl_equiv_mode(acl, &mode); if (ret < 0) return ret; if (ret == 0) acl = NULL; ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode); if (ret) return ret; } break; case ACL_TYPE_DEFAULT: name_index = OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT; if (!S_ISDIR(inode->i_mode)) return acl ? -EACCES : 0; break; default: return -EINVAL; } if (acl) { value = ocfs2_acl_to_xattr(acl, &size); if (IS_ERR(value)) return (int)PTR_ERR(value); } if (handle) ret = ocfs2_xattr_set_handle(handle, inode, di_bh, name_index, "", value, size, 0, meta_ac, data_ac); else ret = ocfs2_xattr_set(inode, name_index, "", value, size, 0); kfree(value); return ret; }
CWE-285
23
static bool parse_reconnect(struct pool *pool, json_t *val) { char *sockaddr_url, *stratum_port, *tmp; char *url, *port, address[256]; memset(address, 0, 255); url = (char *)json_string_value(json_array_get(val, 0)); if (!url) url = pool->sockaddr_url; else { char *dot_pool, *dot_reconnect; dot_pool = strchr(pool->sockaddr_url, '.'); if (!dot_pool) { applog(LOG_ERR, "Denied stratum reconnect request for pool without domain '%s'", pool->sockaddr_url); return false; } dot_reconnect = strchr(url, '.'); if (!dot_reconnect) { applog(LOG_ERR, "Denied stratum reconnect request to url without domain '%s'", url); return false; } if (strcmp(dot_pool, dot_reconnect)) { applog(LOG_ERR, "Denied stratum reconnect request to non-matching domain url '%s'", pool->sockaddr_url); return false; } } port = (char *)json_string_value(json_array_get(val, 1)); if (!port) port = pool->stratum_port; sprintf(address, "%s:%s", url, port); if (!extract_sockaddr(address, &sockaddr_url, &stratum_port)) return false; applog(LOG_WARNING, "Stratum reconnect requested from pool %d to %s", pool->pool_no, address); clear_pool_work(pool); mutex_lock(&pool->stratum_lock); __suspend_stratum(pool); tmp = pool->sockaddr_url; pool->sockaddr_url = sockaddr_url; pool->stratum_url = pool->sockaddr_url; free(tmp); tmp = pool->stratum_port; pool->stratum_port = stratum_port; free(tmp); mutex_unlock(&pool->stratum_lock); if (!restart_stratum(pool)) { pool_failed(pool); return false; } return true; }
CWE-119
26
static int ovl_remove_upper(struct dentry *dentry, bool is_dir) { struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); struct inode *dir = upperdir->d_inode; struct dentry *upper = ovl_dentry_upper(dentry); int err; inode_lock_nested(dir, I_MUTEX_PARENT); err = -ESTALE; if (upper->d_parent == upperdir) { /* Don't let d_delete() think it can reset d_inode */ dget(upper); if (is_dir) err = vfs_rmdir(dir, upper); else err = vfs_unlink(dir, upper, NULL); dput(upper); ovl_dentry_version_inc(dentry->d_parent); } /* * Keeping this dentry hashed would mean having to release * upperpath/lowerpath, which could only be done if we are the * sole user of this dentry. Too tricky... Just unhash for * now. */ if (!err) d_drop(dentry); inode_unlock(dir); return err; }
CWE-20
0
handle_associated_event(struct cpu_hw_events *cpuc, int idx, struct perf_sample_data *data, struct pt_regs *regs) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc = &event->hw; mipspmu_event_update(event, hwc, idx); data->period = event->hw.last_period; if (!mipspmu_event_set_period(event, hwc, idx)) return; if (perf_event_overflow(event, 0, data, regs)) mipspmu->disable_event(idx); }
CWE-400
2
static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name; size_t copied; struct sk_buff *skb; int er; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ lock_sock(sk); if (sk->sk_state != TCP_ESTABLISHED) { release_sock(sk); return -ENOTCONN; } /* Now we can treat all alike */ if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) { release_sock(sk); return er; } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (er < 0) { skb_free_datagram(sk, skb); release_sock(sk); return er; } if (sax != NULL) { memset(sax, 0, sizeof(*sax)); sax->sax25_family = AF_NETROM; skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call, AX25_ADDR_LEN); } msg->msg_namelen = sizeof(*sax); skb_free_datagram(sk, skb); release_sock(sk); return copied; }
CWE-20
0
static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; int copied, err; BT_DBG("sock %p, sk %p", sock, sk); if (flags & (MSG_OOB)) return -EOPNOTSUPP; if (sk->sk_state == BT_CLOSED) return 0; skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) return err; msg->msg_namelen = 0; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); switch (hci_pi(sk)->channel) { case HCI_CHANNEL_RAW: hci_sock_cmsg(sk, msg, skb); break; case HCI_CHANNEL_USER: case HCI_CHANNEL_CONTROL: case HCI_CHANNEL_MONITOR: sock_recv_timestamp(msg, sk, skb); break; } skb_free_datagram(sk, skb); return err ? : copied; }
CWE-20
0
int __gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type) { int error; int len; char *data; const char *name = gfs2_acl_name(type); if (acl && acl->a_count > GFS2_ACL_MAX_ENTRIES(GFS2_SB(inode))) return -E2BIG; if (type == ACL_TYPE_ACCESS) { umode_t mode = inode->i_mode; error = posix_acl_equiv_mode(acl, &mode); if (error < 0) return error; if (error == 0) acl = NULL; if (mode != inode->i_mode) { inode->i_mode = mode; mark_inode_dirty(inode); } } if (acl) { len = posix_acl_to_xattr(&init_user_ns, acl, NULL, 0); if (len == 0) return 0; data = kmalloc(len, GFP_NOFS); if (data == NULL) return -ENOMEM; error = posix_acl_to_xattr(&init_user_ns, acl, data, len); if (error < 0) goto out; } else { data = NULL; len = 0; } error = __gfs2_xattr_set(inode, name, data, len, 0, GFS2_EATYPE_SYS); if (error) goto out; set_cached_acl(inode, type, acl); out: kfree(data); return error; }
CWE-285
23
static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct sockaddr_at *sat = (struct sockaddr_at *)msg->msg_name; struct ddpehdr *ddp; int copied = 0; int offset = 0; int err = 0; struct sk_buff *skb; skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); lock_sock(sk); if (!skb) goto out; /* FIXME: use skb->cb to be able to use shared skbs */ ddp = ddp_hdr(skb); copied = ntohs(ddp->deh_len_hops) & 1023; if (sk->sk_type != SOCK_RAW) { offset = sizeof(*ddp); copied -= offset; } if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied); if (!err) { if (sat) { sat->sat_family = AF_APPLETALK; sat->sat_port = ddp->deh_sport; sat->sat_addr.s_node = ddp->deh_snode; sat->sat_addr.s_net = ddp->deh_snet; } msg->msg_namelen = sizeof(*sat); } skb_free_datagram(sk, skb); /* Free the datagram. */ out: release_sock(sk); return err ? : copied; }
CWE-20
0
static int hash_recvmsg(struct kiocb *unused, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)); int err; if (len > ds) len = ds; else if (len < ds) msg->msg_flags |= MSG_TRUNC; msg->msg_namelen = 0; lock_sock(sk); if (ctx->more) { ctx->more = 0; ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), &ctx->completion); if (err) goto unlock; } err = memcpy_toiovec(msg->msg_iov, ctx->result, len); unlock: release_sock(sk); return err ?: len; }
CWE-20
0
bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct extent_tree *et; struct extent_node *en; struct extent_info ei; if (!f2fs_may_extent_tree(inode)) { /* drop largest extent */ if (i_ext && i_ext->len) { i_ext->len = 0; return true; } return false; } et = __grab_extent_tree(inode); if (!i_ext || !i_ext->len) return false; get_extent_info(&ei, i_ext); write_lock(&et->lock); if (atomic_read(&et->node_cnt)) goto out; en = __init_extent_tree(sbi, et, &ei); if (en) { spin_lock(&sbi->extent_lock); list_add_tail(&en->list, &sbi->extent_list); spin_unlock(&sbi->extent_lock); } out: write_unlock(&et->lock); return false; }
CWE-119
26