code
stringlengths
23
2.05k
label_name
stringlengths
6
7
label
int64
0
37
initpyfribidi (void) { PyObject *module; /* XXX What should be done if we fail here? */ module = Py_InitModule3 ("pyfribidi", PyfribidiMethods, _pyfribidi__doc__); PyModule_AddIntConstant (module, "RTL", (long) FRIBIDI_TYPE_RTL); PyModule_AddIntConstant (module, "LTR", (long) FRIBIDI_TYPE_LTR); PyModule_AddIntConstant (module, "ON", (long) FRIBIDI_TYPE_ON); PyModule_AddStringConstant (module, "__author__", "Yaacov Zamir and Nir Soffer"); }
CWE-119
26
int key_update(key_ref_t key_ref, const void *payload, size_t plen) { struct key_preparsed_payload prep; struct key *key = key_ref_to_ptr(key_ref); int ret; key_check(key); /* the key must be writable */ ret = key_permission(key_ref, KEY_NEED_WRITE); if (ret < 0) return ret; /* attempt to update it if supported */ if (!key->type->update) return -EOPNOTSUPP; memset(&prep, 0, sizeof(prep)); prep.data = payload; prep.datalen = plen; prep.quotalen = key->type->def_datalen; prep.expiry = TIME_T_MAX; if (key->type->preparse) { ret = key->type->preparse(&prep); if (ret < 0) goto error; } down_write(&key->sem); ret = key->type->update(key, &prep); if (ret == 0) /* updating a negative key instantiates it */ clear_bit(KEY_FLAG_NEGATIVE, &key->flags); up_write(&key->sem); error: if (key->type->preparse) key->type->free_preparse(&prep); return ret; }
CWE-20
0
int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options *opt = NULL; struct inet_sock *sk_inet; struct inet_connection_sock *sk_conn; /* In the case of sock_create_lite(), the sock->sk field is not * defined yet but it is not a problem as the only users of these * "lite" PF_INET sockets are functions which do an accept() call * afterwards so we will label the socket as part of the accept(). */ if (sk == NULL) return 0; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto socket_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } memcpy(opt->__data, buf, buf_len); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; sk_inet = inet_sk(sk); if (sk_inet->is_icsk) { sk_conn = inet_csk(sk); if (sk_inet->opt) sk_conn->icsk_ext_hdr_len -= sk_inet->opt->optlen; sk_conn->icsk_ext_hdr_len += opt->optlen; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } opt = xchg(&sk_inet->opt, opt); kfree(opt); return 0; socket_setattr_failure: kfree(buf); kfree(opt); return ret_val; }
CWE-362
18
static void vdbeVComment(Vdbe *p, const char *zFormat, va_list ap){ assert( p->nOp>0 || p->aOp==0 ); assert( p->aOp==0 || p->aOp[p->nOp-1].zComment==0 || p->db->mallocFailed ); if( p->nOp ){ assert( p->aOp ); sqlite3DbFree(p->db, p->aOp[p->nOp-1].zComment); p->aOp[p->nOp-1].zComment = sqlite3VMPrintf(p->db, zFormat, ap); } }
CWE-755
21
int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma) { struct au1100fb_device *fbdev; unsigned int len; unsigned long start=0, off; fbdev = to_au1100fb_device(fbi); if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) { return -EINVAL; } start = fbdev->fb_phys & PAGE_MASK; len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len); off = vma->vm_pgoff << PAGE_SHIFT; if ((vma->vm_end - vma->vm_start + off) > len) { return -EINVAL; } off += start; vma->vm_pgoff = off >> PAGE_SHIFT; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6 if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) { return -EAGAIN; } return 0; }
CWE-119
26
static int t220_frontend_attach(struct dvb_usb_adapter *d) { u8 obuf[3] = { 0xe, 0x87, 0 }; u8 ibuf[] = { 0 }; if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) err("command 0x0e transfer failed."); obuf[0] = 0xe; obuf[1] = 0x86; obuf[2] = 1; if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) err("command 0x0e transfer failed."); obuf[0] = 0xe; obuf[1] = 0x80; obuf[2] = 0; if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) err("command 0x0e transfer failed."); msleep(50); obuf[0] = 0xe; obuf[1] = 0x80; obuf[2] = 1; if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) err("command 0x0e transfer failed."); obuf[0] = 0x51; if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0) err("command 0x51 transfer failed."); d->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config, &d->dev->i2c_adap, NULL); if (d->fe_adap[0].fe != NULL) { if (dvb_attach(tda18271_attach, d->fe_adap[0].fe, 0x60, &d->dev->i2c_adap, &tda18271_config)) { info("Attached TDA18271HD/CXD2820R!"); return 0; } } info("Failed to attach TDA18271HD/CXD2820R!"); return -EIO; }
CWE-119
26
static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct ipx_sock *ipxs = ipx_sk(sk); struct sockaddr_ipx *sipx = (struct sockaddr_ipx *)msg->msg_name; struct ipxhdr *ipx = NULL; struct sk_buff *skb; int copied, rc; lock_sock(sk); /* put the autobinding in */ if (!ipxs->port) { struct sockaddr_ipx uaddr; uaddr.sipx_port = 0; uaddr.sipx_network = 0; #ifdef CONFIG_IPX_INTERN rc = -ENETDOWN; if (!ipxs->intrfc) goto out; /* Someone zonked the iface */ memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN); #endif /* CONFIG_IPX_INTERN */ rc = __ipx_bind(sock, (struct sockaddr *)&uaddr, sizeof(struct sockaddr_ipx)); if (rc) goto out; } rc = -ENOTCONN; if (sock_flag(sk, SOCK_ZAPPED)) goto out; skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &rc); if (!skb) goto out; ipx = ipx_hdr(skb); copied = ntohs(ipx->ipx_pktsize) - sizeof(struct ipxhdr); if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } rc = skb_copy_datagram_iovec(skb, sizeof(struct ipxhdr), msg->msg_iov, copied); if (rc) goto out_free; if (skb->tstamp.tv64) sk->sk_stamp = skb->tstamp; msg->msg_namelen = sizeof(*sipx); if (sipx) { sipx->sipx_family = AF_IPX; sipx->sipx_port = ipx->ipx_source.sock; memcpy(sipx->sipx_node, ipx->ipx_source.node, IPX_NODE_LEN); sipx->sipx_network = IPX_SKB_CB(skb)->ipx_source_net; sipx->sipx_type = ipx->ipx_type; sipx->sipx_zero = 0; } rc = copied; out_free: skb_free_datagram(sk, skb); out: release_sock(sk); return rc; }
CWE-20
0
void enc28j60WritePhyReg(NetInterface *interface, uint16_t address, uint16_t data) { //Write register address enc28j60WriteReg(interface, ENC28J60_REG_MIREGADR, address & REG_ADDR_MASK); //Write the lower 8 bits enc28j60WriteReg(interface, ENC28J60_REG_MIWRL, LSB(data)); //Write the upper 8 bits enc28j60WriteReg(interface, ENC28J60_REG_MIWRH, MSB(data)); //Wait until the PHY register has been written while((enc28j60ReadReg(interface, ENC28J60_REG_MISTAT) & MISTAT_BUSY) != 0) { } }
CWE-20
0
int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; size_t copied; int err; BT_DBG("sock %p sk %p len %zu", sock, sk, len); if (flags & (MSG_OOB)) return -EOPNOTSUPP; skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) { msg->msg_namelen = 0; return 0; } return err; } copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err == 0) { sock_recv_ts_and_drops(msg, sk, skb); if (bt_sk(sk)->skb_msg_name) bt_sk(sk)->skb_msg_name(skb, msg->msg_name, &msg->msg_namelen); else msg->msg_namelen = 0; } skb_free_datagram(sk, skb); return err ? : copied; }
CWE-20
0
static int pfkey_recvmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct pfkey_sock *pfk = pfkey_sk(sk); struct sk_buff *skb; int copied, err; err = -EINVAL; if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT)) goto out; msg->msg_namelen = 0; skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out; copied = skb->len; if (copied > len) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto out_free; sock_recv_ts_and_drops(msg, sk, skb); err = (flags & MSG_TRUNC) ? skb->len : copied; if (pfk->dump.dump != NULL && 3 * atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) pfkey_do_dump(pfk); out_free: skb_free_datagram(sk, skb); out: return err; }
CWE-20
0
static int vt_kdsetmode(struct vc_data *vc, unsigned long mode) { switch (mode) { case KD_GRAPHICS: break; case KD_TEXT0: case KD_TEXT1: mode = KD_TEXT; fallthrough; case KD_TEXT: break; default: return -EINVAL; } /* FIXME: this needs the console lock extending */ if (vc->vc_mode == mode) return 0; vc->vc_mode = mode; if (vc->vc_num != fg_console) return 0; /* explicitly blank/unblank the screen if switching modes */ console_lock(); if (mode == KD_TEXT) do_unblank_screen(1); else do_blank_screen(1); console_unlock(); return 0; }
CWE-362
18
static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; int copied; int rc; pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags); skb = skb_recv_datagram(sk, flags, noblock, &rc); if (!skb) return rc; msg->msg_namelen = 0; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); skb_free_datagram(sk, skb); return rc ? : copied; }
CWE-20
0
void js_RegExp_prototype_exec(js_State *J, js_Regexp *re, const char *text) { int i; int opts; Resub m; opts = 0; if (re->flags & JS_REGEXP_G) { if (re->last > strlen(text)) { re->last = 0; js_pushnull(J); return; } if (re->last > 0) { text += re->last; opts |= REG_NOTBOL; } } if (!js_regexec(re->prog, text, &m, opts)) { js_newarray(J); js_pushstring(J, text); js_setproperty(J, -2, "input"); js_pushnumber(J, js_utfptrtoidx(text, m.sub[0].sp)); js_setproperty(J, -2, "index"); for (i = 0; i < m.nsub; ++i) { js_pushlstring(J, m.sub[i].sp, m.sub[i].ep - m.sub[i].sp); js_setindex(J, -2, i); } if (re->flags & JS_REGEXP_G) re->last = re->last + (m.sub[0].ep - text); return; } if (re->flags & JS_REGEXP_G) re->last = 0; js_pushnull(J); }
CWE-674
28
rad_get_vendor_attr(u_int32_t *vendor, const void **data, size_t *len) { struct vendor_attribute *attr; attr = (struct vendor_attribute *)*data; *vendor = ntohl(attr->vendor_value); *data = attr->attrib_data; *len = attr->attrib_len - 2; return (attr->attrib_type); }
CWE-119
26
static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); size_t copied = 0; int err = -EOPNOTSUPP; struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; if (flags & MSG_OOB) goto out; if (addr_len) *addr_len = sizeof(*sin); skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_timestamp(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: return err ? err : copied; }
CWE-20
0
static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) { /* unprotected vars, we dont care of overwrites */ static u32 challenge_timestamp; static unsigned int challenge_count; struct tcp_sock *tp = tcp_sk(sk); u32 now; /* First check our per-socket dupack rate limit. */ if (tcp_oow_rate_limited(sock_net(sk), skb, LINUX_MIB_TCPACKSKIPPEDCHALLENGE, &tp->last_oow_ack_time)) return; /* Then check the check host-wide RFC 5961 rate limit. */ now = jiffies / HZ; if (now != challenge_timestamp) { challenge_timestamp = now; challenge_count = 0; } if (++challenge_count <= sysctl_tcp_challenge_ack_limit) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); tcp_send_ack(sk); } }
CWE-200
10
chunk_grow(chunk_t *chunk, size_t sz) { off_t offset; size_t memlen_orig = chunk->memlen; tor_assert(sz > chunk->memlen); offset = chunk->data - chunk->mem; chunk = tor_realloc(chunk, CHUNK_ALLOC_SIZE(sz)); chunk->memlen = sz; chunk->data = chunk->mem + offset; #ifdef DEBUG_CHUNK_ALLOC tor_assert(chunk->DBG_alloc == CHUNK_ALLOC_SIZE(memlen_orig)); chunk->DBG_alloc = CHUNK_ALLOC_SIZE(sz); #endif total_bytes_allocated_in_chunks += CHUNK_ALLOC_SIZE(sz) - CHUNK_ALLOC_SIZE(memlen_orig); return chunk; }
CWE-119
26
static void perf_event_interrupt(struct pt_regs *regs) { int i; struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); struct perf_event *event; unsigned long val; int found = 0; int nmi; nmi = perf_intr_is_nmi(regs); if (nmi) nmi_enter(); else irq_enter(); for (i = 0; i < ppmu->n_counter; ++i) { event = cpuhw->event[i]; val = read_pmc(i); if ((int)val < 0) { if (event) { /* event has overflowed */ found = 1; record_and_restart(event, val, regs, nmi); } else { /* * Disabled counter is negative, * reset it just in case. */ write_pmc(i, 0); } } } /* PMM will keep counters frozen until we return from the interrupt. */ mtmsr(mfmsr() | MSR_PMM); mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE); isync(); if (nmi) nmi_exit(); else irq_exit(); }
CWE-400
2
void big_key_describe(const struct key *key, struct seq_file *m) { size_t datalen = (size_t)key->payload.data[big_key_len]; seq_puts(m, key->description); if (key_is_instantiated(key)) seq_printf(m, ": %zu [%s]", datalen, datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff"); }
CWE-20
0
GIT_INLINE(bool) only_spaces_and_dots(const char *path) { const char *c = path; for (;; c++) { if (*c == '\0') return true; if (*c != ' ' && *c != '.') return false; } return true; }
CWE-706
29
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, size_t msg_len) { struct sock *sk = asoc->base.sk; int err = 0; long current_timeo = *timeo_p; DEFINE_WAIT(wait); pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, *timeo_p, msg_len); /* Increment the association's refcnt. */ sctp_association_hold(asoc); /* Wait on the association specific sndbuf space. */ for (;;) { prepare_to_wait_exclusive(&asoc->wait, &wait, TASK_INTERRUPTIBLE); if (!*timeo_p) goto do_nonblock; if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || asoc->base.dead) goto do_error; if (signal_pending(current)) goto do_interrupted; if (msg_len <= sctp_wspace(asoc)) break; /* Let another process have a go. Since we are going * to sleep anyway. */ release_sock(sk); current_timeo = schedule_timeout(current_timeo); BUG_ON(sk != asoc->base.sk); lock_sock(sk); *timeo_p = current_timeo; } out: finish_wait(&asoc->wait, &wait); /* Release the association's refcnt. */ sctp_association_put(asoc); return err; do_error: err = -EPIPE; goto out; do_interrupted: err = sock_intr_errno(*timeo_p); goto out; do_nonblock: err = -EAGAIN; goto out; }
CWE-362
18
process_open(u_int32_t id) { u_int32_t pflags; Attrib a; char *name; int r, handle, fd, flags, mode, status = SSH2_FX_FAILURE; if ((r = sshbuf_get_cstring(iqueue, &name, NULL)) != 0 || (r = sshbuf_get_u32(iqueue, &pflags)) != 0 || /* portable flags */ (r = decode_attrib(iqueue, &a)) != 0) fatal("%s: buffer error: %s", __func__, ssh_err(r)); debug3("request %u: open flags %d", id, pflags); flags = flags_from_portable(pflags); mode = (a.flags & SSH2_FILEXFER_ATTR_PERMISSIONS) ? a.perm : 0666; logit("open \"%s\" flags %s mode 0%o", name, string_from_portable(pflags), mode); if (readonly && ((flags & O_ACCMODE) == O_WRONLY || (flags & O_ACCMODE) == O_RDWR)) { verbose("Refusing open request in read-only mode"); status = SSH2_FX_PERMISSION_DENIED; } else { fd = open(name, flags, mode); if (fd < 0) { status = errno_to_portable(errno); } else { handle = handle_new(HANDLE_FILE, name, fd, flags, NULL); if (handle < 0) { close(fd); } else { send_handle(id, handle); status = SSH2_FX_OK; } } } if (status != SSH2_FX_OK) send_status(id, status); free(name); }
CWE-732
13
static bool check_underflow(const struct ip6t_entry *e) { const struct xt_entry_target *t; unsigned int verdict; if (!unconditional(&e->ipv6)) return false; t = ip6t_get_target_c(e); if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) return false; verdict = ((struct xt_standard_target *)t)->verdict; verdict = -verdict - 1; return verdict == NF_DROP || verdict == NF_ACCEPT; }
CWE-119
26
static mif_hdr_t *mif_hdr_get(jas_stream_t *in) { uchar magicbuf[MIF_MAGICLEN]; char buf[4096]; mif_hdr_t *hdr; bool done; jas_tvparser_t *tvp; int id; hdr = 0; tvp = 0; if (jas_stream_read(in, magicbuf, MIF_MAGICLEN) != MIF_MAGICLEN) { goto error; } if (magicbuf[0] != (MIF_MAGIC >> 24) || magicbuf[1] != ((MIF_MAGIC >> 16) & 0xff) || magicbuf[2] != ((MIF_MAGIC >> 8) & 0xff) || magicbuf[3] != (MIF_MAGIC & 0xff)) { jas_eprintf("error: bad signature\n"); goto error; } if (!(hdr = mif_hdr_create(0))) { goto error; } done = false; do { if (!mif_getline(in, buf, sizeof(buf))) { jas_eprintf("mif_getline failed\n"); goto error; } if (buf[0] == '\0') { continue; } JAS_DBGLOG(10, ("header line: len=%d; %s\n", strlen(buf), buf)); if (!(tvp = jas_tvparser_create(buf))) { jas_eprintf("jas_tvparser_create failed\n"); goto error; } if (jas_tvparser_next(tvp)) { jas_eprintf("cannot get record type\n"); goto error; } id = jas_taginfo_nonull(jas_taginfos_lookup(mif_tags2, jas_tvparser_gettag(tvp)))->id; jas_tvparser_destroy(tvp); tvp = 0; switch (id) { case MIF_CMPT: if (mif_process_cmpt(hdr, buf)) { jas_eprintf("cannot get component information\n"); goto error; } break; case MIF_END: done = 1; break; default: jas_eprintf("invalid header information: %s\n", buf); goto error; break; } } while (!done); return hdr; error: if (hdr) { mif_hdr_destroy(hdr); } if (tvp) { jas_tvparser_destroy(tvp); } return 0; }
CWE-20
0
do_encrypt (const RIJNDAEL_context *ctx, unsigned char *bx, const unsigned char *ax) { #ifdef USE_AMD64_ASM return _gcry_aes_amd64_encrypt_block(ctx->keyschenc, bx, ax, ctx->rounds, encT); #elif defined(USE_ARM_ASM) return _gcry_aes_arm_encrypt_block(ctx->keyschenc, bx, ax, ctx->rounds, encT); #else return do_encrypt_fn (ctx, bx, ax); #endif /* !USE_ARM_ASM && !USE_AMD64_ASM*/ }
CWE-668
7
static int su3000_power_ctrl(struct dvb_usb_device *d, int i) { struct dw2102_state *state = (struct dw2102_state *)d->priv; u8 obuf[] = {0xde, 0}; info("%s: %d, initialized %d", __func__, i, state->initialized); if (i && !state->initialized) { state->initialized = 1; /* reset board */ return dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0); } return 0; }
CWE-119
26
static void f2fs_put_super(struct super_block *sb) { struct f2fs_sb_info *sbi = F2FS_SB(sb); int i; f2fs_quota_off_umount(sb); /* prevent remaining shrinker jobs */ mutex_lock(&sbi->umount_mutex); /* * We don't need to do checkpoint when superblock is clean. * But, the previous checkpoint was not done by umount, it needs to do * clean checkpoint again. */ if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) || !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) { struct cp_control cpc = { .reason = CP_UMOUNT, }; write_checkpoint(sbi, &cpc); } /* be sure to wait for any on-going discard commands */ f2fs_wait_discard_bios(sbi); if (f2fs_discard_en(sbi) && !sbi->discard_blks) { struct cp_control cpc = { .reason = CP_UMOUNT | CP_TRIMMED, }; write_checkpoint(sbi, &cpc); } /* write_checkpoint can update stat informaion */ f2fs_destroy_stats(sbi); /* * normally superblock is clean, so we need to release this. * In addition, EIO will skip do checkpoint, we need this as well. */ release_ino_entry(sbi, true); f2fs_leave_shrinker(sbi); mutex_unlock(&sbi->umount_mutex); /* our cp_error case, we can wait for any writeback page */ f2fs_flush_merged_writes(sbi); iput(sbi->node_inode); iput(sbi->meta_inode); /* destroy f2fs internal modules */ destroy_node_manager(sbi); destroy_segment_manager(sbi); kfree(sbi->ckpt); f2fs_unregister_sysfs(sbi); sb->s_fs_info = NULL; if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); kfree(sbi->raw_super); destroy_device_list(sbi); mempool_destroy(sbi->write_io_dummy); #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif destroy_percpu_info(sbi); for (i = 0; i < NR_PAGE_TYPE; i++) kfree(sbi->write_io[i]); kfree(sbi); }
CWE-20
0
static int proc_connectinfo(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_connectinfo ci = { .devnum = ps->dev->devnum, .slow = ps->dev->speed == USB_SPEED_LOW }; if (copy_to_user(arg, &ci, sizeof(ci))) return -EFAULT; return 0; }
CWE-200
10
static int parse_video_info(AVIOContext *pb, AVStream *st) { uint16_t size_asf; // ASF-specific Format Data size uint32_t size_bmp; // BMP_HEADER-specific Format Data size unsigned int tag; st->codecpar->width = avio_rl32(pb); st->codecpar->height = avio_rl32(pb); avio_skip(pb, 1); // skip reserved flags size_asf = avio_rl16(pb); tag = ff_get_bmp_header(pb, st, &size_bmp); st->codecpar->codec_tag = tag; st->codecpar->codec_id = ff_codec_get_id(ff_codec_bmp_tags, tag); size_bmp = FFMAX(size_asf, size_bmp); if (size_bmp > BMP_HEADER_SIZE) { int ret; st->codecpar->extradata_size = size_bmp - BMP_HEADER_SIZE; if (!(st->codecpar->extradata = av_malloc(st->codecpar->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE))) { st->codecpar->extradata_size = 0; return AVERROR(ENOMEM); } memset(st->codecpar->extradata + st->codecpar->extradata_size , 0, AV_INPUT_BUFFER_PADDING_SIZE); if ((ret = avio_read(pb, st->codecpar->extradata, st->codecpar->extradata_size)) < 0) return ret; } return 0; }
CWE-119
26
static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name) { return kstrdup(name, GFP_KERNEL); }
CWE-362
18
PredictorEncodeRow(TIFF* tif, uint8* bp, tmsize_t cc, uint16 s) { TIFFPredictorState *sp = PredictorState(tif); assert(sp != NULL); assert(sp->encodepfunc != NULL); assert(sp->encoderow != NULL); /* XXX horizontal differencing alters user's data XXX */ (*sp->encodepfunc)(tif, bp, cc); return (*sp->encoderow)(tif, bp, cc, s); }
CWE-119
26
void lpc546xxEthEnableIrq(NetInterface *interface) { //Enable Ethernet MAC interrupts NVIC_EnableIRQ(ETHERNET_IRQn); //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Enable Ethernet PHY interrupts interface->phyDriver->enableIrq(interface); } else if(interface->switchDriver != NULL) { //Enable Ethernet switch interrupts interface->switchDriver->enableIrq(interface); } else { //Just for sanity } }
CWE-20
0
static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct ipx_sock *ipxs = ipx_sk(sk); struct sockaddr_ipx *sipx = (struct sockaddr_ipx *)msg->msg_name; struct ipxhdr *ipx = NULL; struct sk_buff *skb; int copied, rc; lock_sock(sk); /* put the autobinding in */ if (!ipxs->port) { struct sockaddr_ipx uaddr; uaddr.sipx_port = 0; uaddr.sipx_network = 0; #ifdef CONFIG_IPX_INTERN rc = -ENETDOWN; if (!ipxs->intrfc) goto out; /* Someone zonked the iface */ memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN); #endif /* CONFIG_IPX_INTERN */ rc = __ipx_bind(sock, (struct sockaddr *)&uaddr, sizeof(struct sockaddr_ipx)); if (rc) goto out; } rc = -ENOTCONN; if (sock_flag(sk, SOCK_ZAPPED)) goto out; skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &rc); if (!skb) goto out; ipx = ipx_hdr(skb); copied = ntohs(ipx->ipx_pktsize) - sizeof(struct ipxhdr); if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } rc = skb_copy_datagram_iovec(skb, sizeof(struct ipxhdr), msg->msg_iov, copied); if (rc) goto out_free; if (skb->tstamp.tv64) sk->sk_stamp = skb->tstamp; msg->msg_namelen = sizeof(*sipx); if (sipx) { sipx->sipx_family = AF_IPX; sipx->sipx_port = ipx->ipx_source.sock; memcpy(sipx->sipx_node, ipx->ipx_source.node, IPX_NODE_LEN); sipx->sipx_network = IPX_SKB_CB(skb)->ipx_source_net; sipx->sipx_type = ipx->ipx_type; sipx->sipx_zero = 0; } rc = copied; out_free: skb_free_datagram(sk, skb); out: release_sock(sk); return rc; }
CWE-20
0
raptor_libxml_resolveEntity(void* user_data, const xmlChar *publicId, const xmlChar *systemId) { raptor_sax2* sax2 = (raptor_sax2*)user_data; return libxml2_resolveEntity(sax2->xc, publicId, systemId); }
CWE-200
10
static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct free_nid *i; struct nat_entry *ne; int err; /* 0 nid should not be used */ if (unlikely(nid == 0)) return false; if (build) { /* do not add allocated nids */ ne = __lookup_nat_cache(nm_i, nid); if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) || nat_get_blkaddr(ne) != NULL_ADDR)) return false; } i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS); i->nid = nid; i->state = NID_NEW; if (radix_tree_preload(GFP_NOFS)) { kmem_cache_free(free_nid_slab, i); return true; } spin_lock(&nm_i->nid_list_lock); err = __insert_nid_to_list(sbi, i, FREE_NID_LIST, true); spin_unlock(&nm_i->nid_list_lock); radix_tree_preload_end(); if (err) { kmem_cache_free(free_nid_slab, i); return true; } return true; }
CWE-362
18
static int __kprobes perf_event_nmi_handler(struct notifier_block *self, unsigned long cmd, void *__args) { struct die_args *args = __args; struct perf_sample_data data; struct cpu_hw_events *cpuc; struct pt_regs *regs; int i; if (!atomic_read(&active_events)) return NOTIFY_DONE; switch (cmd) { case DIE_NMI: break; default: return NOTIFY_DONE; } regs = args->regs; perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); /* If the PMU has the TOE IRQ enable bits, we need to do a * dummy write to the %pcr to clear the overflow bits and thus * the interrupt. * * Do this before we peek at the counters to determine * overflow so we don't lose any events. */ if (sparc_pmu->irq_bit) pcr_ops->write(cpuc->pcr); for (i = 0; i < cpuc->n_events; i++) { struct perf_event *event = cpuc->event[i]; int idx = cpuc->current_idx[i]; struct hw_perf_event *hwc; u64 val; hwc = &event->hw; val = sparc_perf_event_update(event, hwc, idx); if (val & (1ULL << 31)) continue; data.period = event->hw.last_period; if (!sparc_perf_event_set_period(event, hwc, idx)) continue; if (perf_event_overflow(event, 1, &data, regs)) sparc_pmu_stop(event, 0); } return NOTIFY_STOP; }
CWE-400
2
static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask) { u32 lmask = mask; u32 hmask = mask >> 32; int err = 0; WARN_ON(system_state != SYSTEM_BOOTING); if (boot_cpu_has(X86_FEATURE_XSAVES)) asm volatile("1:"XRSTORS"\n\t" "2:\n\t" : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) : "memory"); else asm volatile("1:"XRSTOR"\n\t" "2:\n\t" : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) : "memory"); asm volatile(xstate_fault : "0" (0) : "memory"); return err; }
CWE-20
0
static inline bool unconditional(const struct arpt_arp *arp) { static const struct arpt_arp uncond; return memcmp(arp, &uncond, sizeof(uncond)) == 0; }
CWE-119
26
static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct sockaddr_at *sat = (struct sockaddr_at *)msg->msg_name; struct ddpehdr *ddp; int copied = 0; int offset = 0; int err = 0; struct sk_buff *skb; skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); lock_sock(sk); if (!skb) goto out; /* FIXME: use skb->cb to be able to use shared skbs */ ddp = ddp_hdr(skb); copied = ntohs(ddp->deh_len_hops) & 1023; if (sk->sk_type != SOCK_RAW) { offset = sizeof(*ddp); copied -= offset; } if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied); if (!err) { if (sat) { sat->sat_family = AF_APPLETALK; sat->sat_port = ddp->deh_sport; sat->sat_addr.s_node = ddp->deh_snode; sat->sat_addr.s_net = ddp->deh_snet; } msg->msg_namelen = sizeof(*sat); } skb_free_datagram(sk, skb); /* Free the datagram. */ out: release_sock(sk); return err ? : copied; }
CWE-20
0
static int http_buf_read(URLContext *h, uint8_t *buf, int size) { HTTPContext *s = h->priv_data; int len; /* read bytes from input buffer first */ len = s->buf_end - s->buf_ptr; if (len > 0) { if (len > size) len = size; memcpy(buf, s->buf_ptr, len); s->buf_ptr += len; } else { int64_t target_end = s->end_off ? s->end_off : s->filesize; if ((!s->willclose || s->chunksize < 0) && target_end >= 0 && s->off >= target_end) return AVERROR_EOF; len = ffurl_read(s->hd, buf, size); if (!len && (!s->willclose || s->chunksize < 0) && target_end >= 0 && s->off < target_end) { av_log(h, AV_LOG_ERROR, "Stream ends prematurely at %"PRId64", should be %"PRId64"\n", s->off, target_end ); return AVERROR(EIO); } } if (len > 0) { s->off += len; if (s->chunksize > 0) s->chunksize -= len; } return len; }
CWE-119
26
horizontalDifference8(unsigned char *ip, int n, int stride, unsigned short *wp, uint16 *From8) { register int r1, g1, b1, a1, r2, g2, b2, a2, mask; #undef CLAMP #define CLAMP(v) (From8[(v)]) mask = CODE_MASK; if (n >= stride) { if (stride == 3) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); n -= 3; while (n > 0) { n -= 3; r1 = CLAMP(ip[3]); wp[3] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[4]); wp[4] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[5]); wp[5] = (uint16)((b1-b2) & mask); b2 = b1; wp += 3; ip += 3; } } else if (stride == 4) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); a2 = wp[3] = CLAMP(ip[3]); n -= 4; while (n > 0) { n -= 4; r1 = CLAMP(ip[4]); wp[4] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[5]); wp[5] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[6]); wp[6] = (uint16)((b1-b2) & mask); b2 = b1; a1 = CLAMP(ip[7]); wp[7] = (uint16)((a1-a2) & mask); a2 = a1; wp += 4; ip += 4; } } else { wp += n + stride - 1; /* point to last one */ ip += n + stride - 1; /* point to last one */ n -= stride; while (n > 0) { REPEAT(stride, wp[0] = CLAMP(ip[0]); wp[stride] -= wp[0]; wp[stride] &= mask; wp--; ip--) n -= stride; } REPEAT(stride, wp[0] = CLAMP(ip[0]); wp--; ip--) } } }
CWE-119
26
static int jas_iccputuint(jas_stream_t *out, int n, ulonglong val) { int i; int c; for (i = n; i > 0; --i) { c = (val >> (8 * (i - 1))) & 0xff; if (jas_stream_putc(out, c) == EOF) return -1; } return 0; }
CWE-20
0
SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, unsigned int, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; struct iovec iov; struct msghdr msg; struct sockaddr_storage address; int err, err2; int fput_needed; if (size > INT_MAX) size = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_iovlen = 1; msg.msg_iov = &iov; iov.iov_len = size; iov.iov_base = ubuf; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = sizeof(address); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = sock_recvmsg(sock, &msg, size, flags); if (err >= 0 && addr != NULL) { err2 = move_addr_to_user(&address, msg.msg_namelen, addr, addr_len); if (err2 < 0) err = err2; } fput_light(sock->file, fput_needed); out: return err; }
CWE-20
0
int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; size_t copied; int err; BT_DBG("sock %p sk %p len %zu", sock, sk, len); if (flags & (MSG_OOB)) return -EOPNOTSUPP; skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) return 0; return err; } msg->msg_namelen = 0; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err == 0) sock_recv_ts_and_drops(msg, sk, skb); skb_free_datagram(sk, skb); return err ? : copied; }
CWE-200
10
int ff_amf_get_field_value(const uint8_t *data, const uint8_t *data_end, const uint8_t *name, uint8_t *dst, int dst_size) { int namelen = strlen(name); int len; while (*data != AMF_DATA_TYPE_OBJECT && data < data_end) { len = ff_amf_tag_size(data, data_end); if (len < 0) len = data_end - data; data += len; } if (data_end - data < 3) return -1; data++; for (;;) { int size = bytestream_get_be16(&data); if (!size) break; if (size < 0 || size >= data_end - data) return -1; data += size; if (size == namelen && !memcmp(data-size, name, namelen)) { switch (*data++) { case AMF_DATA_TYPE_NUMBER: snprintf(dst, dst_size, "%g", av_int2double(AV_RB64(data))); break; case AMF_DATA_TYPE_BOOL: snprintf(dst, dst_size, "%s", *data ? "true" : "false"); break; case AMF_DATA_TYPE_STRING: len = bytestream_get_be16(&data); av_strlcpy(dst, data, FFMIN(len+1, dst_size)); break; default: return -1; } return 0; } len = ff_amf_tag_size(data, data_end); if (len < 0 || len >= data_end - data) return -1; data += len; } return -1; }
CWE-20
0
int main(int argc, char *argv[]) { p_fm_config_conx_hdlt hdl; int instance = 0; fm_mgr_config_errno_t res; char *rem_addr = NULL; char *community = "public"; char Opts[256]; int arg; char *command; int i; /* Get options at the command line (overide default values) */ strcpy(Opts, "i:d:h-"); while ((arg = getopt(argc, argv, Opts)) != EOF) { switch (arg) { case 'h': case '-': usage(argv[0]); return(0); case 'i': instance = atol(optarg); break; case 'd': rem_addr = optarg; break; default: usage(argv[0]); return(-1); } } if(optind >= argc){ fprintf(stderr, "Command required\n"); usage(argv[0]); return -1; } command = argv[optind++]; printf("Connecting to %s FM instance %d\n", (rem_addr==NULL) ? "LOCAL":rem_addr, instance); if((res = fm_mgr_config_init(&hdl,instance, rem_addr, community)) != FM_CONF_OK) { fprintf(stderr, "Failed to initialize the client handle: %d\n", res); goto die_clean; } if((res = fm_mgr_config_connect(hdl)) != FM_CONF_OK) { fprintf(stderr, "Failed to connect: (%d) %s\n",res,fm_mgr_get_error_str(res)); goto die_clean; } for(i=0;i<commandListLen;i++){ if(strcmp(command,commandList[i].name) == 0){ return commandList[i].cmdPtr(hdl, commandList[i].mgr, (argc - optind), &argv[optind]); } } fprintf(stderr, "Command (%s) is not valid\n",command); usage(argv[0]); res = -1; die_clean: if (hdl) free(hdl); return res; }
CWE-362
18
int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode) { int size, ct, err; if (m->msg_namelen) { if (mode == VERIFY_READ) { void __user *namep; namep = (void __user __force *) m->msg_name; err = move_addr_to_kernel(namep, m->msg_namelen, address); if (err < 0) return err; } m->msg_name = address; } else { m->msg_name = NULL; } size = m->msg_iovlen * sizeof(struct iovec); if (copy_from_user(iov, (void __user __force *) m->msg_iov, size)) return -EFAULT; m->msg_iov = iov; err = 0; for (ct = 0; ct < m->msg_iovlen; ct++) { size_t len = iov[ct].iov_len; if (len > INT_MAX - err) { len = INT_MAX - err; iov[ct].iov_len = len; } err += len; } return err; }
CWE-20
0
static void __net_random_once_deferred(struct work_struct *w) { struct __net_random_once_work *work = container_of(w, struct __net_random_once_work, work); if (!static_key_enabled(work->key)) static_key_slow_inc(work->key); kfree(work); }
CWE-200
10
int sc_file_set_sec_attr(sc_file_t *file, const u8 *sec_attr, size_t sec_attr_len) { u8 *tmp; if (!sc_file_valid(file)) { return SC_ERROR_INVALID_ARGUMENTS; } if (sec_attr == NULL) { if (file->sec_attr != NULL) free(file->sec_attr); file->sec_attr = NULL; file->sec_attr_len = 0; return 0; } tmp = (u8 *) realloc(file->sec_attr, sec_attr_len); if (!tmp) { if (file->sec_attr) free(file->sec_attr); file->sec_attr = NULL; file->sec_attr_len = 0; return SC_ERROR_OUT_OF_MEMORY; } file->sec_attr = tmp; memcpy(file->sec_attr, sec_attr, sec_attr_len); file->sec_attr_len = sec_attr_len; return 0; }
CWE-119
26
static void lo_release(struct gendisk *disk, fmode_t mode) { struct loop_device *lo = disk->private_data; int err; if (atomic_dec_return(&lo->lo_refcnt)) return; mutex_lock(&lo->lo_ctl_mutex); if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) { /* * In autoclear mode, stop the loop thread * and remove configuration after last close. */ err = loop_clr_fd(lo); if (!err) return; } else if (lo->lo_state == Lo_bound) { /* * Otherwise keep thread (if running) and config, * but flush possible ongoing bios in thread. */ blk_mq_freeze_queue(lo->lo_queue); blk_mq_unfreeze_queue(lo->lo_queue); } mutex_unlock(&lo->lo_ctl_mutex); }
CWE-362
18
static void wdm_in_callback(struct urb *urb) { struct wdm_device *desc = urb->context; int status = urb->status; spin_lock(&desc->iuspin); clear_bit(WDM_RESPONDING, &desc->flags); if (status) { switch (status) { case -ENOENT: dev_dbg(&desc->intf->dev, "nonzero urb status received: -ENOENT"); goto skip_error; case -ECONNRESET: dev_dbg(&desc->intf->dev, "nonzero urb status received: -ECONNRESET"); goto skip_error; case -ESHUTDOWN: dev_dbg(&desc->intf->dev, "nonzero urb status received: -ESHUTDOWN"); goto skip_error; case -EPIPE: dev_err(&desc->intf->dev, "nonzero urb status received: -EPIPE\n"); break; default: dev_err(&desc->intf->dev, "Unexpected error %d\n", status); break; } } desc->rerr = status; desc->reslength = urb->actual_length; memmove(desc->ubuf + desc->length, desc->inbuf, desc->reslength); desc->length += desc->reslength; skip_error: wake_up(&desc->wait); set_bit(WDM_READ, &desc->flags); spin_unlock(&desc->iuspin); }
CWE-119
26
void exit_io_context(void) { struct io_context *ioc; task_lock(current); ioc = current->io_context; current->io_context = NULL; task_unlock(current); if (atomic_dec_and_test(&ioc->nr_tasks)) { if (ioc->aic && ioc->aic->exit) ioc->aic->exit(ioc->aic); cfq_exit(ioc); } put_io_context(ioc); }
CWE-400
2
static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_stats sas; struct sctp_association *asoc = NULL; /* User must provide at least the assoc id */ if (len < sizeof(sctp_assoc_t)) return -EINVAL; if (copy_from_user(&sas, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, sas.sas_assoc_id); if (!asoc) return -EINVAL; sas.sas_rtxchunks = asoc->stats.rtxchunks; sas.sas_gapcnt = asoc->stats.gapcnt; sas.sas_outofseqtsns = asoc->stats.outofseqtsns; sas.sas_osacks = asoc->stats.osacks; sas.sas_isacks = asoc->stats.isacks; sas.sas_octrlchunks = asoc->stats.octrlchunks; sas.sas_ictrlchunks = asoc->stats.ictrlchunks; sas.sas_oodchunks = asoc->stats.oodchunks; sas.sas_iodchunks = asoc->stats.iodchunks; sas.sas_ouodchunks = asoc->stats.ouodchunks; sas.sas_iuodchunks = asoc->stats.iuodchunks; sas.sas_idupchunks = asoc->stats.idupchunks; sas.sas_opackets = asoc->stats.opackets; sas.sas_ipackets = asoc->stats.ipackets; /* New high max rto observed, will return 0 if not a single * RTO update took place. obs_rto_ipaddr will be bogus * in such a case */ sas.sas_maxrto = asoc->stats.max_obs_rto; memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr, sizeof(struct sockaddr_storage)); /* Mark beginning of a new observation period */ asoc->stats.max_obs_rto = asoc->rto_min; /* Allow the struct to grow and fill in as much as possible */ len = min_t(size_t, len, sizeof(sas)); if (put_user(len, optlen)) return -EFAULT; SCTP_DEBUG_PRINTK("sctp_getsockopt_assoc_stat(%d): %d\n", len, sas.sas_assoc_id); if (copy_to_user(optval, &sas, len)) return -EFAULT; return 0; }
CWE-20
0
void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) { u32 data, tpr; int max_irr, max_isr; struct kvm_lapic *apic = vcpu->arch.apic; void *vapic; apic_sync_pv_eoi_to_guest(vcpu, apic); if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) return; tpr = kvm_apic_get_reg(apic, APIC_TASKPRI) & 0xff; max_irr = apic_find_highest_irr(apic); if (max_irr < 0) max_irr = 0; max_isr = apic_find_highest_isr(apic); if (max_isr < 0) max_isr = 0; data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); vapic = kmap_atomic(vcpu->arch.apic->vapic_page); *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data; kunmap_atomic(vapic); }
CWE-20
0
static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct ipx_sock *ipxs = ipx_sk(sk); struct sockaddr_ipx *sipx = (struct sockaddr_ipx *)msg->msg_name; struct ipxhdr *ipx = NULL; struct sk_buff *skb; int copied, rc; lock_sock(sk); /* put the autobinding in */ if (!ipxs->port) { struct sockaddr_ipx uaddr; uaddr.sipx_port = 0; uaddr.sipx_network = 0; #ifdef CONFIG_IPX_INTERN rc = -ENETDOWN; if (!ipxs->intrfc) goto out; /* Someone zonked the iface */ memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN); #endif /* CONFIG_IPX_INTERN */ rc = __ipx_bind(sock, (struct sockaddr *)&uaddr, sizeof(struct sockaddr_ipx)); if (rc) goto out; } rc = -ENOTCONN; if (sock_flag(sk, SOCK_ZAPPED)) goto out; skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &rc); if (!skb) goto out; ipx = ipx_hdr(skb); copied = ntohs(ipx->ipx_pktsize) - sizeof(struct ipxhdr); if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } rc = skb_copy_datagram_iovec(skb, sizeof(struct ipxhdr), msg->msg_iov, copied); if (rc) goto out_free; if (skb->tstamp.tv64) sk->sk_stamp = skb->tstamp; msg->msg_namelen = sizeof(*sipx); if (sipx) { sipx->sipx_family = AF_IPX; sipx->sipx_port = ipx->ipx_source.sock; memcpy(sipx->sipx_node, ipx->ipx_source.node, IPX_NODE_LEN); sipx->sipx_network = IPX_SKB_CB(skb)->ipx_source_net; sipx->sipx_type = ipx->ipx_type; sipx->sipx_zero = 0; } rc = copied; out_free: skb_free_datagram(sk, skb); out: release_sock(sk); return rc; }
CWE-20
0
check_entry_size_and_hooks(struct ipt_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } err = check_entry(e); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_err("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; }
CWE-119
26
static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); size_t copied = 0; int err = -EOPNOTSUPP; struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; if (flags & MSG_OOB) goto out; if (addr_len) *addr_len = sizeof(*sin); if (flags & MSG_ERRQUEUE) { err = ip_recv_error(sk, msg, len); goto out; } skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: if (err) return err; return copied; }
CWE-20
0
static int hash_recvmsg(struct kiocb *unused, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)); int err; if (len > ds) len = ds; else if (len < ds) msg->msg_flags |= MSG_TRUNC; msg->msg_namelen = 0; lock_sock(sk); if (ctx->more) { ctx->more = 0; ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), &ctx->completion); if (err) goto unlock; } err = memcpy_toiovec(msg->msg_iov, ctx->result, len); unlock: release_sock(sk); return err ?: len; }
CWE-20
0
horizontalDifference16(unsigned short *ip, int n, int stride, unsigned short *wp, uint16 *From14) { register int r1, g1, b1, a1, r2, g2, b2, a2, mask; /* assumption is unsigned pixel values */ #undef CLAMP #define CLAMP(v) From14[(v) >> 2] mask = CODE_MASK; if (n >= stride) { if (stride == 3) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); n -= 3; while (n > 0) { n -= 3; wp += 3; ip += 3; r1 = CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1; } } else if (stride == 4) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); a2 = wp[3] = CLAMP(ip[3]); n -= 4; while (n > 0) { n -= 4; wp += 4; ip += 4; r1 = CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1; a1 = CLAMP(ip[3]); wp[3] = (uint16)((a1-a2) & mask); a2 = a1; } } else { ip += n - 1; /* point to last one */ wp += n - 1; /* point to last one */ n -= stride; while (n > 0) { REPEAT(stride, wp[0] = CLAMP(ip[0]); wp[stride] -= wp[0]; wp[stride] &= mask; wp--; ip--) n -= stride; } REPEAT(stride, wp[0] = CLAMP(ip[0]); wp--; ip--) } } }
CWE-119
26
static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int ret; int copylen; ret = -EOPNOTSUPP; if (m->msg_flags&MSG_OOB) goto read_error; m->msg_namelen = 0; skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error; copylen = skb->len; if (len < copylen) { m->msg_flags |= MSG_TRUNC; copylen = len; } ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen); if (ret) goto out_free; ret = (flags & MSG_TRUNC) ? skb->len : copylen; out_free: skb_free_datagram(sk, skb); caif_check_flow_release(sk); return ret; read_error: return ret; }
CWE-20
0
char *suhosin_decrypt_single_cookie(char *name, int name_len, char *value, int value_len, char *key, char **where TSRMLS_DC) { char buffer[4096]; char buffer2[4096]; int o_name_len = name_len; char *buf = buffer, *buf2 = buffer2, *d, *d_url; int l; if (name_len > sizeof(buffer)-2) { buf = estrndup(name, name_len); } else { memcpy(buf, name, name_len); buf[name_len] = 0; } name_len = php_url_decode(buf, name_len); normalize_varname(buf); name_len = strlen(buf); if (SUHOSIN_G(cookie_plainlist)) { if (zend_hash_exists(SUHOSIN_G(cookie_plainlist), buf, name_len+1)) { decrypt_return_plain: if (buf != buffer) { efree(buf); } memcpy(*where, name, o_name_len); *where += o_name_len; **where = '='; *where +=1; memcpy(*where, value, value_len); *where += value_len; return *where; } } else if (SUHOSIN_G(cookie_cryptlist)) { if (!zend_hash_exists(SUHOSIN_G(cookie_cryptlist), buf, name_len+1)) { goto decrypt_return_plain; } } if (strlen(value) <= sizeof(buffer2)-2) { memcpy(buf2, value, value_len); buf2[value_len] = 0; } else { buf2 = estrndup(value, value_len); } value_len = php_url_decode(buf2, value_len); d = suhosin_decrypt_string(buf2, value_len, buf, name_len, key, &l, SUHOSIN_G(cookie_checkraddr) TSRMLS_CC); if (d == NULL) { goto skip_cookie; } d_url = php_url_encode(d, l, &l); efree(d); memcpy(*where, name, o_name_len); *where += o_name_len; **where = '=';*where += 1; memcpy(*where, d_url, l); *where += l; efree(d_url); skip_cookie: if (buf != buffer) { efree(buf); } if (buf2 != buffer2) { efree(buf2); } return *where; }
CWE-119
26
header_put_be_8byte (SF_PRIVATE *psf, sf_count_t x) { if (psf->headindex < SIGNED_SIZEOF (psf->header) - 8) { psf->header [psf->headindex++] = 0 ; psf->header [psf->headindex++] = 0 ; psf->header [psf->headindex++] = 0 ; psf->header [psf->headindex++] = 0 ; psf->header [psf->headindex++] = (x >> 24) ; psf->header [psf->headindex++] = (x >> 16) ; psf->header [psf->headindex++] = (x >> 8) ; psf->header [psf->headindex++] = x ; } ; } /* header_put_be_8byte */
CWE-119
26
static ssize_t driver_override_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct platform_device *pdev = to_platform_device(dev); char *driver_override, *old = pdev->driver_override, *cp; if (count > PATH_MAX) return -EINVAL; driver_override = kstrndup(buf, count, GFP_KERNEL); if (!driver_override) return -ENOMEM; cp = strchr(driver_override, '\n'); if (cp) *cp = '\0'; if (strlen(driver_override)) { pdev->driver_override = driver_override; } else { kfree(driver_override); pdev->driver_override = NULL; } kfree(old); return count; }
CWE-362
18
vhost_scsi_send_evt(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg, struct se_lun *lun, u32 event, u32 reason) { struct vhost_scsi_evt *evt; evt = vhost_scsi_allocate_evt(vs, event, reason); if (!evt) return; if (tpg && lun) { /* TODO: share lun setup code with virtio-scsi.ko */ /* * Note: evt->event is zeroed when we allocate it and * lun[4-7] need to be zero according to virtio-scsi spec. */ evt->event.lun[0] = 0x01; evt->event.lun[1] = tpg->tport_tpgt & 0xFF; if (lun->unpacked_lun >= 256) evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ; evt->event.lun[3] = lun->unpacked_lun & 0xFF; } llist_add(&evt->list, &vs->vs_event_list); vhost_work_queue(&vs->dev, &vs->vs_event_work); }
CWE-119
26
static void br_multicast_del_pg(struct net_bridge *br, struct net_bridge_port_group *pg) { struct net_bridge_mdb_htable *mdb; struct net_bridge_mdb_entry *mp; struct net_bridge_port_group *p; struct net_bridge_port_group __rcu **pp; mdb = mlock_dereference(br->mdb, br); mp = br_mdb_ip_get(mdb, &pg->addr); if (WARN_ON(!mp)) return; for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; pp = &p->next) { if (p != pg) continue; rcu_assign_pointer(*pp, p->next); hlist_del_init(&p->mglist); del_timer(&p->timer); call_rcu_bh(&p->rcu, br_multicast_free_pg); if (!mp->ports && !mp->mglist && netif_running(br->dev)) mod_timer(&mp->timer, jiffies); return; } WARN_ON(1); }
CWE-20
0
raptor_libxml_getEntity(void* user_data, const xmlChar *name) { raptor_sax2* sax2 = (raptor_sax2*)user_data; return libxml2_getEntity(sax2->xc, name); }
CWE-200
10
static int cac_get_serial_nr_from_CUID(sc_card_t* card, sc_serial_number_t* serial) { cac_private_data_t * priv = CAC_DATA(card); SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_NORMAL); if (card->serialnr.len) { *serial = card->serialnr; SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_SUCCESS); } if (priv->cac_id_len) { serial->len = MIN(priv->cac_id_len, SC_MAX_SERIALNR); memcpy(serial->value, priv->cac_id, priv->cac_id_len); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_SUCCESS); } SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_FILE_NOT_FOUND); }
CWE-119
26
int rose_parse_facilities(unsigned char *p, struct rose_facilities_struct *facilities) { int facilities_len, len; facilities_len = *p++; if (facilities_len == 0) return 0; while (facilities_len > 0) { if (*p == 0x00) { facilities_len--; p++; switch (*p) { case FAC_NATIONAL: /* National */ len = rose_parse_national(p + 1, facilities, facilities_len - 1); if (len < 0) return 0; facilities_len -= len + 1; p += len + 1; break; case FAC_CCITT: /* CCITT */ len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1); if (len < 0) return 0; facilities_len -= len + 1; p += len + 1; break; default: printk(KERN_DEBUG "ROSE: rose_parse_facilities - unknown facilities family %02X\n", *p); facilities_len--; p++; break; } } else break; /* Error in facilities format */ } return 1; }
CWE-20
0
sf_open_virtual (SF_VIRTUAL_IO *sfvirtual, int mode, SF_INFO *sfinfo, void *user_data) { SF_PRIVATE *psf ; /* Make sure we have a valid set ot virtual pointers. */ if (sfvirtual->get_filelen == NULL || sfvirtual->seek == NULL || sfvirtual->tell == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_get_filelen / vio_seek / vio_tell in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_READ || mode == SFM_RDWR) && sfvirtual->read == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_read in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_WRITE || mode == SFM_RDWR) && sfvirtual->write == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_write in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf->virtual_io = SF_TRUE ; psf->vio = *sfvirtual ; psf->vio_user_data = user_data ; psf->file.mode = mode ; return psf_open_file (psf, sfinfo) ; } /* sf_open_virtual */
CWE-119
26
static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) { int i; unsigned char max_level = 0; int unix_sock_count = 0; for (i = scm->fp->count - 1; i >= 0; i--) { struct sock *sk = unix_get_socket(scm->fp->fp[i]); if (sk) { unix_sock_count++; max_level = max(max_level, unix_sk(sk)->recursion_level); } } if (unlikely(max_level > MAX_RECURSION_LEVEL)) return -ETOOMANYREFS; /* * Need to duplicate file references for the sake of garbage * collection. Otherwise a socket in the fps might become a * candidate for GC while the skb is not yet queued. */ UNIXCB(skb).fp = scm_fp_dup(scm->fp); if (!UNIXCB(skb).fp) return -ENOMEM; if (unix_sock_count) { for (i = scm->fp->count - 1; i >= 0; i--) unix_inflight(scm->fp->fp[i]); } return max_level; }
CWE-119
26
uint16_t enc28j60ReadPhyReg(NetInterface *interface, uint16_t address) { uint16_t data; //Write register address enc28j60WriteReg(interface, ENC28J60_REG_MIREGADR, address & REG_ADDR_MASK); //Start read operation enc28j60WriteReg(interface, ENC28J60_REG_MICMD, MICMD_MIIRD); //Wait for the read operation to complete while((enc28j60ReadReg(interface, ENC28J60_REG_MISTAT) & MISTAT_BUSY) != 0) { } //Clear command register enc28j60WriteReg(interface, ENC28J60_REG_MICMD, 0); //Read the lower 8 bits data = enc28j60ReadReg(interface, ENC28J60_REG_MIRDL); //Read the upper 8 bits data |= enc28j60ReadReg(interface, ENC28J60_REG_MIRDH) << 8; //Return register contents return data; }
CWE-20
0
int devmem_is_allowed(unsigned long pagenr) { if (pagenr < 256) return 1; if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) return 0; if (!page_is_ram(pagenr)) return 1; return 0; }
CWE-732
13
mcs_parse_domain_params(STREAM s) { int length; ber_parse_header(s, MCS_TAG_DOMAIN_PARAMS, &length); in_uint8s(s, length); return s_check(s); }
CWE-119
26
static int open_cred_file(char *file_name, struct parsed_mount_info *parsed_info) { char *line_buf = NULL; char *temp_val = NULL; FILE *fs = NULL; int i; const int line_buf_size = 4096; const int min_non_white = 10; i = toggle_dac_capability(0, 1); if (i) goto return_i; i = access(file_name, R_OK); if (i) { toggle_dac_capability(0, 0); i = errno; goto return_i; } fs = fopen(file_name, "r"); if (fs == NULL) { toggle_dac_capability(0, 0); i = errno; goto return_i; } i = toggle_dac_capability(0, 0); if (i) goto return_i; line_buf = (char *)malloc(line_buf_size); if (line_buf == NULL) { i = EX_SYSERR; goto return_i; } /* parse line from credentials file */ while (fgets(line_buf, line_buf_size, fs)) { /* eat leading white space */ for (i = 0; i < line_buf_size - min_non_white + 1; i++) { if ((line_buf[i] != ' ') && (line_buf[i] != '\t')) break; } null_terminate_endl(line_buf); /* parse next token */ switch (parse_cred_line(line_buf + i, &temp_val)) { case CRED_USER: strlcpy(parsed_info->username, temp_val, sizeof(parsed_info->username)); parsed_info->got_user = 1; break; case CRED_PASS: i = set_password(parsed_info, temp_val); if (i) goto return_i; break; case CRED_DOM: if (parsed_info->verboseflag) fprintf(stderr, "domain=%s\n", temp_val); strlcpy(parsed_info->domain, temp_val, sizeof(parsed_info->domain)); break; case CRED_UNPARSEABLE: if (parsed_info->verboseflag) fprintf(stderr, "Credential formatted " "incorrectly: %s\n", temp_val ? temp_val : "(null)"); break; } } i = 0; return_i: if (fs != NULL) fclose(fs); /* make sure passwords are scrubbed from memory */ if (line_buf != NULL) memset(line_buf, 0, line_buf_size); free(line_buf); return i; }
CWE-668
7
int dsOpen(void) { struct stat sb; int retval; char *path = server.diskstore_path; if ((retval = stat(path,&sb) == -1) && errno != ENOENT) { redisLog(REDIS_WARNING, "Error opening disk store at %s: %s", path, strerror(errno)); return REDIS_ERR; } /* Directory already in place. Assume everything is ok. */ if (retval == 0 && S_ISDIR(sb.st_mode)) return REDIS_OK; /* File exists but it's not a directory */ if (retval == 0 && !S_ISDIR(sb.st_mode)) { redisLog(REDIS_WARNING,"Disk store at %s is not a directory", path); return REDIS_ERR; } /* New disk store, create the directory structure now, as creating * them in a lazy way is not a good idea, after very few insertions * we'll need most of the 65536 directories anyway. */ if (mkdir(path) == -1) { redisLog(REDIS_WARNING,"Disk store init failed creating dir %s: %s", path, strerror(errno)); return REDIS_ERR; } return REDIS_OK; }
CWE-20
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in) { DelogoContext *s = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); AVFrame *out; int hsub0 = desc->log2_chroma_w; int vsub0 = desc->log2_chroma_h; int direct = 0; int plane; AVRational sar; if (av_frame_is_writable(in)) { direct = 1; out = in; } else { out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); } sar = in->sample_aspect_ratio; /* Assume square pixels if SAR is unknown */ if (!sar.num) sar.num = sar.den = 1; for (plane = 0; plane < 4 && in->data[plane]; plane++) { int hsub = plane == 1 || plane == 2 ? hsub0 : 0; int vsub = plane == 1 || plane == 2 ? vsub0 : 0; apply_delogo(out->data[plane], out->linesize[plane], in ->data[plane], in ->linesize[plane], FF_CEIL_RSHIFT(inlink->w, hsub), FF_CEIL_RSHIFT(inlink->h, vsub), sar, s->x>>hsub, s->y>>vsub, /* Up and left borders were rounded down, inject lost bits * into width and height to avoid error accumulation */ FF_CEIL_RSHIFT(s->w + (s->x & ((1<<hsub)-1)), hsub), FF_CEIL_RSHIFT(s->h + (s->y & ((1<<vsub)-1)), vsub), s->band>>FFMIN(hsub, vsub), s->show, direct); } if (!direct) av_frame_free(&in); return ff_filter_frame(outlink, out); }
CWE-119
26
flatpak_proxy_client_init (FlatpakProxyClient *client) { init_side (client, &client->client_side); init_side (client, &client->bus_side); client->auth_end_offset = AUTH_END_INIT_OFFSET; client->rewrite_reply = g_hash_table_new_full (g_direct_hash, g_direct_equal, NULL, g_object_unref); client->get_owner_reply = g_hash_table_new_full (g_direct_hash, g_direct_equal, NULL, g_free); client->unique_id_policy = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); }
CWE-436
5
static void perf_event_init_cpu(int cpu) { struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); mutex_lock(&swhash->hlist_mutex); swhash->online = true; if (swhash->hlist_refcount > 0) { struct swevent_hlist *hlist; hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); WARN_ON(!hlist); rcu_assign_pointer(swhash->swevent_hlist, hlist); } mutex_unlock(&swhash->hlist_mutex); }
CWE-362
18
static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int copied; int err = 0; lock_sock(sk); /* * This works for seqpacket too. The receiver has ordered the * queue for us! We do one quick check first though */ if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_ESTABLISHED) { err = -ENOTCONN; goto out; } /* Now we can treat all alike */ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out; if (!ax25_sk(sk)->pidincl) skb_pull(skb, 1); /* Remove PID */ skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (msg->msg_namelen != 0) { struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name; ax25_digi digi; ax25_address src; const unsigned char *mac = skb_mac_header(skb); memset(sax, 0, sizeof(struct full_sockaddr_ax25)); ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL, &digi, NULL, NULL); sax->sax25_family = AF_AX25; /* We set this correctly, even though we may not let the application know the digi calls further down (because it did NOT ask to know them). This could get political... **/ sax->sax25_ndigis = digi.ndigi; sax->sax25_call = src; if (sax->sax25_ndigis != 0) { int ct; struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)sax; for (ct = 0; ct < digi.ndigi; ct++) fsa->fsa_digipeater[ct] = digi.calls[ct]; } msg->msg_namelen = sizeof(struct full_sockaddr_ax25); } skb_free_datagram(sk, skb); err = copied; out: release_sock(sk); return err; }
CWE-20
0
static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct irda_sock *self = irda_sk(sk); struct sk_buff *skb; size_t copied; int err; IRDA_DEBUG(4, "%s()\n", __func__); msg->msg_namelen = 0; skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb) return err; skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { IRDA_DEBUG(2, "%s(), Received truncated frame (%zd < %zd)!\n", __func__, copied, size); copied = size; msg->msg_flags |= MSG_TRUNC; } skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); skb_free_datagram(sk, skb); /* * Check if we have previously stopped IrTTP and we know * have more free space in our rx_queue. If so tell IrTTP * to start delivering frames again before our rx_queue gets * empty */ if (self->rx_flow == FLOW_STOP) { if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) { IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __func__); self->rx_flow = FLOW_START; irttp_flow_request(self->tsap, FLOW_START); } } return copied; }
CWE-20
0
static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); size_t copied = 0; int err = -EOPNOTSUPP; struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; if (flags & MSG_OOB) goto out; if (addr_len) *addr_len = sizeof(*sin); if (flags & MSG_ERRQUEUE) { err = ip_recv_error(sk, msg, len); goto out; } skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: if (err) return err; return copied; }
CWE-20
0
static void mark_object(struct object *obj, struct strbuf *path, const char *name, void *data) { update_progress(data); }
CWE-119
26
static int rndis_set_response(struct rndis_params *params, rndis_set_msg_type *buf) { u32 BufLength, BufOffset; rndis_set_cmplt_type *resp; rndis_resp_t *r; r = rndis_add_response(params, sizeof(rndis_set_cmplt_type)); if (!r) return -ENOMEM; resp = (rndis_set_cmplt_type *)r->buf; BufLength = le32_to_cpu(buf->InformationBufferLength); BufOffset = le32_to_cpu(buf->InformationBufferOffset); #ifdef VERBOSE_DEBUG pr_debug("%s: Length: %d\n", __func__, BufLength); pr_debug("%s: Offset: %d\n", __func__, BufOffset); pr_debug("%s: InfoBuffer: ", __func__); for (i = 0; i < BufLength; i++) { pr_debug("%02x ", *(((u8 *) buf) + i + 8 + BufOffset)); } pr_debug("\n"); #endif resp->MessageType = cpu_to_le32(RNDIS_MSG_SET_C); resp->MessageLength = cpu_to_le32(16); resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ if (gen_ndis_set_resp(params, le32_to_cpu(buf->OID), ((u8 *)buf) + 8 + BufOffset, BufLength, r)) resp->Status = cpu_to_le32(RNDIS_STATUS_NOT_SUPPORTED); else resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); params->resp_avail(params->v); return 0; }
CWE-668
7
netscreen_seek_read(wtap *wth, gint64 seek_off, struct wtap_pkthdr *phdr, Buffer *buf, int *err, gchar **err_info) { int pkt_len; char line[NETSCREEN_LINE_LENGTH]; char cap_int[NETSCREEN_MAX_INT_NAME_LENGTH]; gboolean cap_dir; char cap_dst[13]; if (file_seek(wth->random_fh, seek_off, SEEK_SET, err) == -1) { return FALSE; } if (file_gets(line, NETSCREEN_LINE_LENGTH, wth->random_fh) == NULL) { *err = file_error(wth->random_fh, err_info); if (*err == 0) { *err = WTAP_ERR_SHORT_READ; } return FALSE; } pkt_len = parse_netscreen_rec_hdr(phdr, line, cap_int, &cap_dir, cap_dst, err, err_info); if (pkt_len == -1) return FALSE; if (!parse_netscreen_hex_dump(wth->random_fh, pkt_len, cap_int, cap_dst, phdr, buf, err, err_info)) return FALSE; return TRUE; }
CWE-20
0
static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags) { u64 runtime, runtime_expires; int throttled; /* no need to continue the timer with no bandwidth constraint */ if (cfs_b->quota == RUNTIME_INF) goto out_deactivate; throttled = !list_empty(&cfs_b->throttled_cfs_rq); cfs_b->nr_periods += overrun; /* * idle depends on !throttled (for the case of a large deficit), and if * we're going inactive then everything else can be deferred */ if (cfs_b->idle && !throttled) goto out_deactivate; __refill_cfs_bandwidth_runtime(cfs_b); if (!throttled) { /* mark as potentially idle for the upcoming period */ cfs_b->idle = 1; return 0; } /* account preceding periods in which throttling occurred */ cfs_b->nr_throttled += overrun; runtime_expires = cfs_b->runtime_expires; /* * This check is repeated as we are holding onto the new bandwidth while * we unthrottle. This can potentially race with an unthrottled group * trying to acquire new bandwidth from the global pool. This can result * in us over-using our runtime if it is all used during this loop, but * only by limited amounts in that extreme case. */ while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) { runtime = cfs_b->runtime; cfs_b->distribute_running = 1; raw_spin_unlock_irqrestore(&cfs_b->lock, flags); /* we can't nest cfs_b->lock while distributing bandwidth */ runtime = distribute_cfs_runtime(cfs_b, runtime, runtime_expires); raw_spin_lock_irqsave(&cfs_b->lock, flags); cfs_b->distribute_running = 0; throttled = !list_empty(&cfs_b->throttled_cfs_rq); lsub_positive(&cfs_b->runtime, runtime); } /* * While we are ensured activity in the period following an * unthrottle, this also covers the case in which the new bandwidth is * insufficient to cover the existing bandwidth deficit. (Forcing the * timer to remain active while there are any throttled entities.) */ cfs_b->idle = 0; return 0; out_deactivate: return 1; }
CWE-400
2
static void record_recent_object(struct object *obj, struct strbuf *path, const char *last, void *data) { sha1_array_append(&recent_objects, obj->oid.hash); }
CWE-119
26
int yr_object_array_set_item( YR_OBJECT* object, YR_OBJECT* item, int index) { YR_OBJECT_ARRAY* array; int i; int count; assert(index >= 0); assert(object->type == OBJECT_TYPE_ARRAY); array = object_as_array(object); if (array->items == NULL) { count = yr_max(64, (index + 1) * 2); array->items = (YR_ARRAY_ITEMS*) yr_malloc( sizeof(YR_ARRAY_ITEMS) + count * sizeof(YR_OBJECT*)); if (array->items == NULL) return ERROR_INSUFFICIENT_MEMORY; memset(array->items->objects, 0, count * sizeof(YR_OBJECT*)); array->items->count = count; } else if (index >= array->items->count) { count = array->items->count * 2; array->items = (YR_ARRAY_ITEMS*) yr_realloc( array->items, sizeof(YR_ARRAY_ITEMS) + count * sizeof(YR_OBJECT*)); if (array->items == NULL) return ERROR_INSUFFICIENT_MEMORY; for (i = array->items->count; i < count; i++) array->items->objects[i] = NULL; array->items->count = count; } item->parent = object; array->items->objects[index] = item; return ERROR_SUCCESS; }
CWE-119
26
static int muscle_list_files(sc_card_t *card, u8 *buf, size_t bufLen) { muscle_private_t* priv = MUSCLE_DATA(card); mscfs_t *fs = priv->fs; int x; int count = 0; mscfs_check_cache(priv->fs); for(x = 0; x < fs->cache.size; x++) { u8* oid= fs->cache.array[x].objectId.id; sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "FILE: %02X%02X%02X%02X\n", oid[0],oid[1],oid[2],oid[3]); if(0 == memcmp(fs->currentPath, oid, 2)) { buf[0] = oid[2]; buf[1] = oid[3]; if(buf[0] == 0x00 && buf[1] == 0x00) continue; /* No directories/null names outside of root */ buf += 2; count+=2; } } return count; }
CWE-119
26
mcs_recv_connect_response(STREAM mcs_data) { UNUSED(mcs_data); uint8 result; int length; STREAM s; RD_BOOL is_fastpath; uint8 fastpath_hdr; logger(Protocol, Debug, "%s()", __func__); s = iso_recv(&is_fastpath, &fastpath_hdr); if (s == NULL) return False; ber_parse_header(s, MCS_CONNECT_RESPONSE, &length); ber_parse_header(s, BER_TAG_RESULT, &length); in_uint8(s, result); if (result != 0) { logger(Protocol, Error, "mcs_recv_connect_response(), result=%d", result); return False; } ber_parse_header(s, BER_TAG_INTEGER, &length); in_uint8s(s, length); /* connect id */ mcs_parse_domain_params(s); ber_parse_header(s, BER_TAG_OCTET_STRING, &length); sec_process_mcs_data(s); /* if (length > mcs_data->size) { logger(Protocol, Error, "mcs_recv_connect_response(), expected length=%d, got %d",length, mcs_data->size); length = mcs_data->size; } in_uint8a(s, mcs_data->data, length); mcs_data->p = mcs_data->data; mcs_data->end = mcs_data->data + length; */ return s_check_end(s); }
CWE-119
26
file_check_mem(struct magic_set *ms, unsigned int level) { size_t len; if (level >= ms->c.len) { len = (ms->c.len += 20) * sizeof(*ms->c.li); ms->c.li = CAST(struct level_info *, (ms->c.li == NULL) ? malloc(len) : realloc(ms->c.li, len)); if (ms->c.li == NULL) { file_oomem(ms, len); return -1; } } ms->c.li[level].got_match = 0; #ifdef ENABLE_CONDITIONALS ms->c.li[level].last_match = 0; ms->c.li[level].last_cond = COND_NONE; #endif /* ENABLE_CONDITIONALS */ return 0; }
CWE-119
26
process_bitmap_updates(STREAM s) { uint16 num_updates; uint16 left, top, right, bottom, width, height; uint16 cx, cy, bpp, Bpp, compress, bufsize, size; uint8 *data, *bmpdata; int i; logger(Protocol, Debug, "%s()", __func__); in_uint16_le(s, num_updates); for (i = 0; i < num_updates; i++) { in_uint16_le(s, left); in_uint16_le(s, top); in_uint16_le(s, right); in_uint16_le(s, bottom); in_uint16_le(s, width); in_uint16_le(s, height); in_uint16_le(s, bpp); Bpp = (bpp + 7) / 8; in_uint16_le(s, compress); in_uint16_le(s, bufsize); cx = right - left + 1; cy = bottom - top + 1; logger(Graphics, Debug, "process_bitmap_updates(), [%d,%d,%d,%d], [%d,%d], bpp=%d, compression=%d", left, top, right, bottom, width, height, Bpp, compress); if (!compress) { int y; bmpdata = (uint8 *) xmalloc(width * height * Bpp); for (y = 0; y < height; y++) { in_uint8a(s, &bmpdata[(height - y - 1) * (width * Bpp)], width * Bpp); } ui_paint_bitmap(left, top, cx, cy, width, height, bmpdata); xfree(bmpdata); continue; } if (compress & 0x400) { size = bufsize; } else { in_uint8s(s, 2); /* pad */ in_uint16_le(s, size); in_uint8s(s, 4); /* line_size, final_size */ } in_uint8p(s, data, size); bmpdata = (uint8 *) xmalloc(width * height * Bpp); if (bitmap_decompress(bmpdata, width, height, data, size, Bpp)) { ui_paint_bitmap(left, top, cx, cy, width, height, bmpdata); } else { logger(Graphics, Warning, "process_bitmap_updates(), failed to decompress bitmap"); } xfree(bmpdata); } }
CWE-119
26
static void process_tree(struct rev_info *revs, struct tree *tree, show_object_fn show, struct strbuf *base, const char *name, void *cb_data) { struct object *obj = &tree->object; struct tree_desc desc; struct name_entry entry; enum interesting match = revs->diffopt.pathspec.nr == 0 ? all_entries_interesting: entry_not_interesting; int baselen = base->len; if (!revs->tree_objects) return; if (!obj) die("bad tree object"); if (obj->flags & (UNINTERESTING | SEEN)) return; if (parse_tree_gently(tree, revs->ignore_missing_links) < 0) { if (revs->ignore_missing_links) return; die("bad tree object %s", oid_to_hex(&obj->oid)); } obj->flags |= SEEN; show(obj, base, name, cb_data); strbuf_addstr(base, name); if (base->len) strbuf_addch(base, '/'); init_tree_desc(&desc, tree->buffer, tree->size); while (tree_entry(&desc, &entry)) { if (match != all_entries_interesting) { match = tree_entry_interesting(&entry, base, 0, &revs->diffopt.pathspec); if (match == all_entries_not_interesting) break; if (match == entry_not_interesting) continue; } if (S_ISDIR(entry.mode)) process_tree(revs, lookup_tree(entry.sha1), show, base, entry.path, cb_data); else if (S_ISGITLINK(entry.mode)) process_gitlink(revs, entry.sha1, show, base, entry.path, cb_data); else process_blob(revs, lookup_blob(entry.sha1), show, base, entry.path, cb_data); } strbuf_setlen(base, baselen); free_tree_buffer(tree); }
CWE-119
26
translate_hex_string(char *s, char *saved_orphan) { int c1 = *saved_orphan; char *start = s; char *t = s; for (; *s; s++) { if (isspace(*s)) continue; if (c1) { *t++ = (hexval(c1) << 4) + hexval(*s); c1 = 0; } else c1 = *s; } *saved_orphan = c1; return t - start; }
CWE-119
26
static void nlmclnt_unlock_callback(struct rpc_task *task, void *data) { struct nlm_rqst *req = data; u32 status = ntohl(req->a_res.status); if (RPC_ASSASSINATED(task)) goto die; if (task->tk_status < 0) { dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status); goto retry_rebind; } if (status == NLM_LCK_DENIED_GRACE_PERIOD) { rpc_delay(task, NLMCLNT_GRACE_WAIT); goto retry_unlock; } if (status != NLM_LCK_GRANTED) printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status); die: return; retry_rebind: nlm_rebind_host(req->a_host); retry_unlock: rpc_restart_call(task); }
CWE-400
2
int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode) { int size, ct, err; if (m->msg_namelen) { if (mode == VERIFY_READ) { void __user *namep; namep = (void __user __force *) m->msg_name; err = move_addr_to_kernel(namep, m->msg_namelen, address); if (err < 0) return err; } m->msg_name = address; } else { m->msg_name = NULL; } size = m->msg_iovlen * sizeof(struct iovec); if (copy_from_user(iov, (void __user __force *) m->msg_iov, size)) return -EFAULT; m->msg_iov = iov; err = 0; for (ct = 0; ct < m->msg_iovlen; ct++) { size_t len = iov[ct].iov_len; if (len > INT_MAX - err) { len = INT_MAX - err; iov[ct].iov_len = len; } err += len; } return err; }
CWE-20
0
void lpc546xxEthWritePhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr, uint16_t data) { uint32_t temp; //Valid opcode? if(opcode == SMI_OPCODE_WRITE) { //Take care not to alter MDC clock configuration temp = ENET->MAC_MDIO_ADDR & ENET_MAC_MDIO_ADDR_CR_MASK; //Set up a write operation temp |= ENET_MAC_MDIO_ADDR_MOC(1) | ENET_MAC_MDIO_ADDR_MB_MASK; //PHY address temp |= ENET_MAC_MDIO_ADDR_PA(phyAddr); //Register address temp |= ENET_MAC_MDIO_ADDR_RDA(regAddr); //Data to be written in the PHY register ENET->MAC_MDIO_DATA = data & ENET_MAC_MDIO_DATA_MD_MASK; //Start a write operation ENET->MAC_MDIO_ADDR = temp; //Wait for the write to complete while((ENET->MAC_MDIO_ADDR & ENET_MAC_MDIO_ADDR_MB_MASK) != 0) { } } else { //The MAC peripheral only supports standard Clause 22 opcodes } }
CWE-20
0
static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { size_t copied = 0; int err = -EOPNOTSUPP; struct sk_buff *skb; struct sockaddr_ieee802154 *saddr; saddr = (struct sockaddr_ieee802154 *)msg->msg_name; skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } /* FIXME: skip headers if necessary ?! */ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_ts_and_drops(msg, sk, skb); if (saddr) { saddr->family = AF_IEEE802154; saddr->addr = mac_cb(skb)->sa; } if (addr_len) *addr_len = sizeof(*saddr); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: if (err) return err; return copied; }
CWE-200
10
static int misaligned_store(struct pt_regs *regs, __u32 opcode, int displacement_not_indexed, int width_shift) { /* Return -1 for a fault, 0 for OK */ int error; int srcreg; __u64 address; error = generate_and_check_address(regs, opcode, displacement_not_indexed, width_shift, &address); if (error < 0) { return error; } perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address); srcreg = (opcode >> 4) & 0x3f; if (user_mode(regs)) { __u64 buffer; if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) { return -1; } switch (width_shift) { case 1: *(__u16 *) &buffer = (__u16) regs->regs[srcreg]; break; case 2: *(__u32 *) &buffer = (__u32) regs->regs[srcreg]; break; case 3: buffer = regs->regs[srcreg]; break; default: printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n", width_shift, (unsigned long) regs->pc); break; } if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) { return -1; /* fault */ } } else { /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */ __u64 val = regs->regs[srcreg]; switch (width_shift) { case 1: misaligned_kernel_word_store(address, val); break; case 2: asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address)); asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address)); break; case 3: asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address)); asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address)); break; default: printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n", width_shift, (unsigned long) regs->pc); break; } } return 0; }
CWE-400
2
int net_get(int s, void *arg, int *len) { struct net_hdr nh; int plen; if (net_read_exact(s, &nh, sizeof(nh)) == -1) { return -1; } plen = ntohl(nh.nh_len); if (!(plen <= *len)) printf("PLEN %d type %d len %d\n", plen, nh.nh_type, *len); assert(plen <= *len); /* XXX */ *len = plen; if ((*len) && (net_read_exact(s, arg, *len) == -1)) { return -1; } return nh.nh_type; }
CWE-20
0
static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; int len; if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { rfcomm_dlc_accept(d); msg->msg_namelen = 0; return 0; } len = bt_sock_stream_recvmsg(iocb, sock, msg, size, flags); lock_sock(sk); if (!(flags & MSG_PEEK) && len > 0) atomic_sub(len, &sk->sk_rmem_alloc); if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2)) rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc); release_sock(sk); return len; }
CWE-20
0