code
stringlengths
23
2.05k
label_name
stringlengths
6
7
label
int64
0
37
static struct ip_options *tcp_v4_save_options(struct sock *sk, struct sk_buff *skb) { struct ip_options *opt = &(IPCB(skb)->opt); struct ip_options *dopt = NULL; if (opt && opt->optlen) { int opt_size = optlength(opt); dopt = kmalloc(opt_size, GFP_ATOMIC); if (dopt) { if (ip_options_echo(dopt, skb)) { kfree(dopt); dopt = NULL; } } } return dopt; }
CWE-362
18
static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct ipx_sock *ipxs = ipx_sk(sk); struct sockaddr_ipx *sipx = (struct sockaddr_ipx *)msg->msg_name; struct ipxhdr *ipx = NULL; struct sk_buff *skb; int copied, rc; lock_sock(sk); /* put the autobinding in */ if (!ipxs->port) { struct sockaddr_ipx uaddr; uaddr.sipx_port = 0; uaddr.sipx_network = 0; #ifdef CONFIG_IPX_INTERN rc = -ENETDOWN; if (!ipxs->intrfc) goto out; /* Someone zonked the iface */ memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN); #endif /* CONFIG_IPX_INTERN */ rc = __ipx_bind(sock, (struct sockaddr *)&uaddr, sizeof(struct sockaddr_ipx)); if (rc) goto out; } rc = -ENOTCONN; if (sock_flag(sk, SOCK_ZAPPED)) goto out; skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &rc); if (!skb) goto out; ipx = ipx_hdr(skb); copied = ntohs(ipx->ipx_pktsize) - sizeof(struct ipxhdr); if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } rc = skb_copy_datagram_iovec(skb, sizeof(struct ipxhdr), msg->msg_iov, copied); if (rc) goto out_free; if (skb->tstamp.tv64) sk->sk_stamp = skb->tstamp; msg->msg_namelen = sizeof(*sipx); if (sipx) { sipx->sipx_family = AF_IPX; sipx->sipx_port = ipx->ipx_source.sock; memcpy(sipx->sipx_node, ipx->ipx_source.node, IPX_NODE_LEN); sipx->sipx_network = IPX_SKB_CB(skb)->ipx_source_net; sipx->sipx_type = ipx->ipx_type; sipx->sipx_zero = 0; } rc = copied; out_free: skb_free_datagram(sk, skb); out: release_sock(sk); return rc; }
CWE-20
0
int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, struct sockaddr_storage *kern_address, int mode) { int tot_len; if (kern_msg->msg_namelen) { if (mode == VERIFY_READ) { int err = move_addr_to_kernel(kern_msg->msg_name, kern_msg->msg_namelen, kern_address); if (err < 0) return err; } kern_msg->msg_name = kern_address; } else kern_msg->msg_name = NULL; tot_len = iov_from_user_compat_to_kern(kern_iov, (struct compat_iovec __user *)kern_msg->msg_iov, kern_msg->msg_iovlen); if (tot_len >= 0) kern_msg->msg_iov = kern_iov; return tot_len; }
CWE-20
0
static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_acomp racomp; strlcpy(racomp.type, "acomp", sizeof(racomp.type)); if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(struct crypto_report_acomp), &racomp)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; }
CWE-200
10
static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock, struct msghdr *msg, size_t ignored, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm( &ctx->req)); struct skcipher_sg_list *sgl; struct scatterlist *sg; unsigned long iovlen; struct iovec *iov; int err = -EAGAIN; int used; long copied = 0; lock_sock(sk); msg->msg_namelen = 0; for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0; iovlen--, iov++) { unsigned long seglen = iov->iov_len; char __user *from = iov->iov_base; while (seglen) { sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list, list); sg = sgl->sg; while (!sg->length) sg++; used = ctx->used; if (!used) { err = skcipher_wait_for_data(sk, flags); if (err) goto unlock; } used = min_t(unsigned long, used, seglen); used = af_alg_make_sg(&ctx->rsgl, from, used, 1); err = used; if (err < 0) goto unlock; if (ctx->more || used < ctx->used) used -= used % bs; err = -EINVAL; if (!used) goto free; ablkcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used, ctx->iv); err = af_alg_wait_for_completion( ctx->enc ? crypto_ablkcipher_encrypt(&ctx->req) : crypto_ablkcipher_decrypt(&ctx->req), &ctx->completion); free: af_alg_free_sg(&ctx->rsgl); if (err) goto unlock; copied += used; from += used; seglen -= used; skcipher_pull_sgl(sk, used); } } err = 0; unlock: skcipher_wmem_wakeup(sk); release_sock(sk); return copied ?: err; }
CWE-20
0
static void Sp_split_regexp(js_State *J) { js_Regexp *re; const char *text; int limit, len, k; const char *p, *a, *b, *c, *e; Resub m; text = checkstring(J, 0); re = js_toregexp(J, 1); limit = js_isdefined(J, 2) ? js_tointeger(J, 2) : 1 << 30; js_newarray(J); len = 0; e = text + strlen(text); /* splitting the empty string */ if (e == text) { if (js_regexec(re->prog, text, &m, 0)) { if (len == limit) return; js_pushliteral(J, ""); js_setindex(J, -2, 0); } return; } p = a = text; while (a < e) { if (js_regexec(re->prog, a, &m, a > text ? REG_NOTBOL : 0)) break; /* no match */ b = m.sub[0].sp; c = m.sub[0].ep; /* empty string at end of last match */ if (b == p) { ++a; continue; } if (len == limit) return; js_pushlstring(J, p, b - p); js_setindex(J, -2, len++); for (k = 1; k < m.nsub; ++k) { if (len == limit) return; js_pushlstring(J, m.sub[k].sp, m.sub[k].ep - m.sub[k].sp); js_setindex(J, -2, len++); } a = p = c; } if (len == limit) return; js_pushstring(J, p); js_setindex(J, -2, len); }
CWE-674
28
static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) { struct net_bridge_mdb_htable *mdb; struct net_bridge_mdb_entry *mp; struct net_bridge_port_group *p; struct net_bridge_port_group __rcu **pp; struct br_ip ip; int err = -EINVAL; if (!netif_running(br->dev) || br->multicast_disabled) return -EINVAL; if (timer_pending(&br->multicast_querier_timer)) return -EBUSY; ip.proto = entry->addr.proto; if (ip.proto == htons(ETH_P_IP)) ip.u.ip4 = entry->addr.u.ip4; #if IS_ENABLED(CONFIG_IPV6) else ip.u.ip6 = entry->addr.u.ip6; #endif spin_lock_bh(&br->multicast_lock); mdb = mlock_dereference(br->mdb, br); mp = br_mdb_ip_get(mdb, &ip); if (!mp) goto unlock; for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; pp = &p->next) { if (!p->port || p->port->dev->ifindex != entry->ifindex) continue; if (p->port->state == BR_STATE_DISABLED) goto unlock; rcu_assign_pointer(*pp, p->next); hlist_del_init(&p->mglist); del_timer(&p->timer); call_rcu_bh(&p->rcu, br_multicast_free_pg); err = 0; if (!mp->ports && !mp->mglist && netif_running(br->dev)) mod_timer(&mp->timer, jiffies); break; } unlock: spin_unlock_bh(&br->multicast_lock); return err; }
CWE-20
0
int generate_password(int length, unsigned char *password) { const char pwchars[] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '%', '$' }; FILE *randfp; unsigned char pwtemp[MAX_PASSWD_BUF]; unsigned char *p; int i, n; int passlen; if ((length <= 0) || (length > MAX_PASSWD_LEN)) { fprintf(stderr, "Invalid password length specified.\n"); return -1; } /* Open the device to read random octets */ if ((randfp = fopen("/dev/urandom", "r")) == NULL) { perror("Error open /dev/urandom:"); return -1; } /* Read random octets */ if ((n = fread((char*)pwtemp, 1, length, randfp)) != length) { fprintf(stderr, "Error: Couldn't read from /dev/urandom\n"); fclose(randfp); return -1; } fclose(randfp); /* Now ensure each octet is uses the defined character set */ for(i = 0, p = pwtemp; i < length; i++, p++) { *p = pwchars[((int)(*p)) % 64]; } /* Convert the password to UTF-16LE */ passlen = passwd_to_utf16( pwtemp, length, MAX_PASSWD_LEN, password); return passlen; }
CWE-287
4
static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct ipv6_pinfo *np = inet6_sk(sk); struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)msg->msg_name; struct sk_buff *skb; size_t copied; int err; if (flags & MSG_OOB) return -EOPNOTSUPP; if (addr_len) *addr_len=sizeof(*sin6); if (flags & MSG_ERRQUEUE) return ipv6_recv_error(sk, msg, len); if (np->rxpmtu && np->rxopt.bits.rxpmtu) return ipv6_recv_rxpmtu(sk, msg, len); skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (copied > len) { copied = len; msg->msg_flags |= MSG_TRUNC; } if (skb_csum_unnecessary(skb)) { err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); } else if (msg->msg_flags&MSG_TRUNC) { if (__skb_checksum_complete(skb)) goto csum_copy_err; err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); } else { err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov); if (err == -EINVAL) goto csum_copy_err; } if (err) goto out_free; /* Copy the address. */ if (sin6) { sin6->sin6_family = AF_INET6; sin6->sin6_port = 0; sin6->sin6_addr = ipv6_hdr(skb)->saddr; sin6->sin6_flowinfo = 0; sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, IP6CB(skb)->iif); } sock_recv_ts_and_drops(msg, sk, skb); if (np->rxopt.all) ip6_datagram_recv_ctl(sk, msg, skb); err = copied; if (flags & MSG_TRUNC) err = skb->len; out_free: skb_free_datagram(sk, skb); out: return err; csum_copy_err: skb_kill_datagram(sk, skb, flags); /* Error for blocking case is chosen to masquerade as some normal condition. */ err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH; goto out; }
CWE-20
0
static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); size_t copied = 0; int err = -EOPNOTSUPP; struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; if (flags & MSG_OOB) goto out; if (addr_len) *addr_len = sizeof(*sin); skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_timestamp(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: return err ? err : copied; }
CWE-20
0
void big_key_revoke(struct key *key) { struct path *path = (struct path *)&key->payload.data[big_key_path]; /* clear the quota */ key_payload_reserve(key, 0); if (key_is_instantiated(key) && (size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD) vfs_truncate(path, 0); }
CWE-20
0
static ssize_t k90_show_macro_mode(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct usb_interface *usbif = to_usb_interface(dev->parent); struct usb_device *usbdev = interface_to_usbdev(usbif); const char *macro_mode; char data[8]; ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), K90_REQUEST_GET_MODE, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, data, 2, USB_CTRL_SET_TIMEOUT); if (ret < 0) { dev_warn(dev, "Failed to get K90 initial mode (error %d).\n", ret); return -EIO; } switch (data[0]) { case K90_MACRO_MODE_HW: macro_mode = "HW"; break; case K90_MACRO_MODE_SW: macro_mode = "SW"; break; default: dev_warn(dev, "K90 in unknown mode: %02hhx.\n", data[0]); return -EIO; } return snprintf(buf, PAGE_SIZE, "%s\n", macro_mode); }
CWE-119
26
SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, unsigned int, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; struct iovec iov; struct msghdr msg; struct sockaddr_storage address; int err, err2; int fput_needed; if (size > INT_MAX) size = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_iovlen = 1; msg.msg_iov = &iov; iov.iov_len = size; iov.iov_base = ubuf; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = sizeof(address); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = sock_recvmsg(sock, &msg, size, flags); if (err >= 0 && addr != NULL) { err2 = move_addr_to_user(&address, msg.msg_namelen, addr, addr_len); if (err2 < 0) err = err2; } fput_light(sock->file, fput_needed); out: return err; }
CWE-20
0
static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int ret; int copylen; ret = -EOPNOTSUPP; if (m->msg_flags&MSG_OOB) goto read_error; m->msg_namelen = 0; skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error; copylen = skb->len; if (len < copylen) { m->msg_flags |= MSG_TRUNC; copylen = len; } ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen); if (ret) goto out_free; ret = (flags & MSG_TRUNC) ? skb->len : copylen; out_free: skb_free_datagram(sk, skb); caif_check_flow_release(sk); return ret; read_error: return ret; }
CWE-20
0
static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) { int i; int nr = pagevec_count(pvec); int delta_munlocked; struct pagevec pvec_putback; int pgrescued = 0; pagevec_init(&pvec_putback, 0); /* Phase 1: page isolation */ spin_lock_irq(zone_lru_lock(zone)); for (i = 0; i < nr; i++) { struct page *page = pvec->pages[i]; if (TestClearPageMlocked(page)) { /* * We already have pin from follow_page_mask() * so we can spare the get_page() here. */ if (__munlock_isolate_lru_page(page, false)) continue; else __munlock_isolation_failed(page); } /* * We won't be munlocking this page in the next phase * but we still need to release the follow_page_mask() * pin. We cannot do it under lru_lock however. If it's * the last pin, __page_cache_release() would deadlock. */ pagevec_add(&pvec_putback, pvec->pages[i]); pvec->pages[i] = NULL; } delta_munlocked = -nr + pagevec_count(&pvec_putback); __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); spin_unlock_irq(zone_lru_lock(zone)); /* Now we can release pins of pages that we are not munlocking */ pagevec_release(&pvec_putback); /* Phase 2: page munlock */ for (i = 0; i < nr; i++) { struct page *page = pvec->pages[i]; if (page) { lock_page(page); if (!__putback_lru_fast_prepare(page, &pvec_putback, &pgrescued)) { /* * Slow path. We don't want to lose the last * pin before unlock_page() */ get_page(page); /* for putback_lru_page() */ __munlock_isolated_page(page); unlock_page(page); put_page(page); /* from follow_page_mask() */ } } } /* * Phase 3: page putback for pages that qualified for the fast path * This will also call put_page() to return pin from follow_page_mask() */ if (pagevec_count(&pvec_putback)) __putback_lru_fast(&pvec_putback, pgrescued); }
CWE-20
0
static int vmci_transport_dgram_dequeue(struct kiocb *kiocb, struct vsock_sock *vsk, struct msghdr *msg, size_t len, int flags) { int err; int noblock; struct vmci_datagram *dg; size_t payload_len; struct sk_buff *skb; noblock = flags & MSG_DONTWAIT; if (flags & MSG_OOB || flags & MSG_ERRQUEUE) return -EOPNOTSUPP; msg->msg_namelen = 0; /* Retrieve the head sk_buff from the socket's receive queue. */ err = 0; skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); if (err) return err; if (!skb) return -EAGAIN; dg = (struct vmci_datagram *)skb->data; if (!dg) /* err is 0, meaning we read zero bytes. */ goto out; payload_len = dg->payload_size; /* Ensure the sk_buff matches the payload size claimed in the packet. */ if (payload_len != skb->len - sizeof(*dg)) { err = -EINVAL; goto out; } if (payload_len > len) { payload_len = len; msg->msg_flags |= MSG_TRUNC; } /* Place the datagram payload in the user's iovec. */ err = skb_copy_datagram_iovec(skb, sizeof(*dg), msg->msg_iov, payload_len); if (err) goto out; if (msg->msg_name) { struct sockaddr_vm *vm_addr; /* Provide the address of the sender. */ vm_addr = (struct sockaddr_vm *)msg->msg_name; vsock_addr_init(vm_addr, dg->src.context, dg->src.resource); msg->msg_namelen = sizeof(*vm_addr); } err = payload_len; out: skb_free_datagram(&vsk->sk, skb); return err; }
CWE-20
0
static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); size_t copied = 0; int err = -EOPNOTSUPP; struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; if (flags & MSG_OOB) goto out; if (addr_len) *addr_len = sizeof(*sin); skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_timestamp(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: return err ? err : copied; }
CWE-200
10
static int filter_frame(AVFilterLink *inlink, AVFrame *in) { AVFilterContext *ctx = inlink->dst; BoxBlurContext *s = ctx->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFrame *out; int plane; int cw = FF_CEIL_RSHIFT(inlink->w, s->hsub), ch = FF_CEIL_RSHIFT(in->height, s->vsub); int w[4] = { inlink->w, cw, cw, inlink->w }; int h[4] = { in->height, ch, ch, in->height }; out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); for (plane = 0; in->data[plane] && plane < 4; plane++) hblur(out->data[plane], out->linesize[plane], in ->data[plane], in ->linesize[plane], w[plane], h[plane], s->radius[plane], s->power[plane], s->temp); for (plane = 0; in->data[plane] && plane < 4; plane++) vblur(out->data[plane], out->linesize[plane], out->data[plane], out->linesize[plane], w[plane], h[plane], s->radius[plane], s->power[plane], s->temp); av_frame_free(&in); return ff_filter_frame(outlink, out); }
CWE-119
26
static int needs_empty_write(sector_t block, struct inode *inode) { int error; struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 }; bh_map.b_size = 1 << inode->i_blkbits; error = gfs2_block_map(inode, block, &bh_map, 0); if (unlikely(error)) return error; return !buffer_mapped(&bh_map); }
CWE-119
26
CURLcode Curl_urldecode(struct SessionHandle *data, const char *string, size_t length, char **ostring, size_t *olen, bool reject_ctrl) { size_t alloc = (length?length:strlen(string))+1; char *ns = malloc(alloc); unsigned char in; size_t strindex=0; unsigned long hex; CURLcode res; if(!ns) return CURLE_OUT_OF_MEMORY; while(--alloc > 0) { in = *string; if(('%' == in) && ISXDIGIT(string[1]) && ISXDIGIT(string[2])) { /* this is two hexadecimal digits following a '%' */ char hexstr[3]; char *ptr; hexstr[0] = string[1]; hexstr[1] = string[2]; hexstr[2] = 0; hex = strtoul(hexstr, &ptr, 16); in = curlx_ultouc(hex); /* this long is never bigger than 255 anyway */ res = Curl_convert_from_network(data, &in, 1); if(res) { /* Curl_convert_from_network calls failf if unsuccessful */ free(ns); return res; } string+=2; alloc-=2; } if(reject_ctrl && (in < 0x20)) { free(ns); return CURLE_URL_MALFORMAT; } ns[strindex++] = in; string++; } ns[strindex]=0; /* terminate it */ if(olen) /* store output size */ *olen = strindex; if(ostring) /* store output string */ *ostring = ns; return CURLE_OK; }
CWE-119
26
static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { size_t copied = 0; int err = -EOPNOTSUPP; struct sk_buff *skb; struct sockaddr_ieee802154 *saddr; saddr = (struct sockaddr_ieee802154 *)msg->msg_name; skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } /* FIXME: skip headers if necessary ?! */ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_ts_and_drops(msg, sk, skb); if (saddr) { saddr->family = AF_IEEE802154; saddr->addr = mac_cb(skb)->sa; } if (addr_len) *addr_len = sizeof(*saddr); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: if (err) return err; return copied; }
CWE-20
0
static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name; size_t copied; unsigned char *asmptr; struct sk_buff *skb; int n, er, qbit; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; /* Now we can treat all alike */ if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) return er; qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT; skb_pull(skb, ROSE_MIN_LEN); if (rose->qbitincl) { asmptr = skb_push(skb, 1); *asmptr = qbit; } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (srose != NULL) { memset(srose, 0, msg->msg_namelen); srose->srose_family = AF_ROSE; srose->srose_addr = rose->dest_addr; srose->srose_call = rose->dest_call; srose->srose_ndigis = rose->dest_ndigis; if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) { struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name; for (n = 0 ; n < rose->dest_ndigis ; n++) full_srose->srose_digis[n] = rose->dest_digis[n]; msg->msg_namelen = sizeof(struct full_sockaddr_rose); } else { if (rose->dest_ndigis >= 1) { srose->srose_ndigis = 1; srose->srose_digi = rose->dest_digis[0]; } msg->msg_namelen = sizeof(struct sockaddr_rose); } } skb_free_datagram(sk, skb); return copied; }
CWE-20
0
static int handle_pte_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, unsigned int flags) { pte_t entry; spinlock_t *ptl; /* * some architectures can have larger ptes than wordsize, * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and CONFIG_32BIT=y, * so READ_ONCE or ACCESS_ONCE cannot guarantee atomic accesses. * The code below just needs a consistent view for the ifs and * we later double check anyway with the ptl lock held. So here * a barrier will do. */ entry = *pte; barrier(); if (!pte_present(entry)) { if (pte_none(entry)) { if (vma->vm_ops) { if (likely(vma->vm_ops->fault)) return do_fault(mm, vma, address, pte, pmd, flags, entry); } return do_anonymous_page(mm, vma, address, pte, pmd, flags); } return do_swap_page(mm, vma, address, pte, pmd, flags, entry); } if (pte_protnone(entry)) return do_numa_page(mm, vma, address, entry, pte, pmd); ptl = pte_lockptr(mm, pmd); spin_lock(ptl); if (unlikely(!pte_same(*pte, entry))) goto unlock; if (flags & FAULT_FLAG_WRITE) { if (!pte_write(entry)) return do_wp_page(mm, vma, address, pte, pmd, ptl, entry); entry = pte_mkdirty(entry); } entry = pte_mkyoung(entry); if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { update_mmu_cache(vma, address, pte); } else { /* * This is needed only for protection faults but the arch code * is not yet telling us if this is a protection fault or not. * This still avoids useless tlb flushes for .text page faults * with threads. */ if (flags & FAULT_FLAG_WRITE) flush_tlb_fix_spurious_fault(vma, address); } unlock: pte_unmap_unlock(pte, ptl); return 0; }
CWE-20
0
static int p4_pmu_handle_irq(struct pt_regs *regs) { struct perf_sample_data data; struct cpu_hw_events *cpuc; struct perf_event *event; struct hw_perf_event *hwc; int idx, handled = 0; u64 val; perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); for (idx = 0; idx < x86_pmu.num_counters; idx++) { int overflow; if (!test_bit(idx, cpuc->active_mask)) { /* catch in-flight IRQs */ if (__test_and_clear_bit(idx, cpuc->running)) handled++; continue; } event = cpuc->events[idx]; hwc = &event->hw; WARN_ON_ONCE(hwc->idx != idx); /* it might be unflagged overflow */ overflow = p4_pmu_clear_cccr_ovf(hwc); val = x86_perf_event_update(event); if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1)))) continue; handled += overflow; /* event overflow for sure */ data.period = event->hw.last_period; if (!x86_perf_event_set_period(event)) continue; if (perf_event_overflow(event, 1, &data, regs)) x86_pmu_stop(event, 0); } if (handled) inc_irq_stat(apic_perf_irqs); /* * When dealing with the unmasking of the LVTPC on P4 perf hw, it has * been observed that the OVF bit flag has to be cleared first _before_ * the LVTPC can be unmasked. * * The reason is the NMI line will continue to be asserted while the OVF * bit is set. This causes a second NMI to generate if the LVTPC is * unmasked before the OVF bit is cleared, leading to unknown NMI * messages. */ apic_write(APIC_LVTPC, APIC_DM_NMI); return handled; }
CWE-400
2
error_t am335xEthAddVlanAddrEntry(uint_t port, uint_t vlanId, MacAddr *macAddr) { error_t error; uint_t index; Am335xAleEntry entry; //Ensure that there are no duplicate address entries in the ALE table index = am335xEthFindVlanAddrEntry(vlanId, macAddr); //No matching entry found? if(index >= CPSW_ALE_MAX_ENTRIES) { //Find a free entry in the ALE table index = am335xEthFindFreeEntry(); } //Sanity check if(index < CPSW_ALE_MAX_ENTRIES) { //Set up a VLAN/address table entry entry.word2 = 0; entry.word1 = CPSW_ALE_WORD1_ENTRY_TYPE_VLAN_ADDR; entry.word0 = 0; //Multicast address? if(macIsMulticastAddr(macAddr)) { //Set port mask entry.word2 |= CPSW_ALE_WORD2_SUPER | CPSW_ALE_WORD2_PORT_LIST(1 << port) | CPSW_ALE_WORD2_PORT_LIST(1 << CPSW_CH0); //Set multicast forward state entry.word1 |= CPSW_ALE_WORD1_MCAST_FWD_STATE(0); } //Set VLAN identifier entry.word1 |= CPSW_ALE_WORD1_VLAN_ID(vlanId); //Copy the upper 16 bits of the unicast address entry.word1 |= (macAddr->b[0] << 8) | macAddr->b[1]; //Copy the lower 32 bits of the unicast address entry.word0 |= (macAddr->b[2] << 24) | (macAddr->b[3] << 16) | (macAddr->b[4] << 8) | macAddr->b[5]; //Add a new entry to the ALE table am335xEthWriteEntry(index, &entry); //Sucessful processing error = NO_ERROR; } else { //The ALE table is full error = ERROR_FAILURE; } //Return status code return error; }
CWE-20
0
static int iscsi_add_notunderstood_response( char *key, char *value, struct iscsi_param_list *param_list) { struct iscsi_extra_response *extra_response; if (strlen(value) > VALUE_MAXLEN) { pr_err("Value for notunderstood key \"%s\" exceeds %d," " protocol error.\n", key, VALUE_MAXLEN); return -1; } extra_response = kzalloc(sizeof(struct iscsi_extra_response), GFP_KERNEL); if (!extra_response) { pr_err("Unable to allocate memory for" " struct iscsi_extra_response.\n"); return -1; } INIT_LIST_HEAD(&extra_response->er_list); strncpy(extra_response->key, key, strlen(key) + 1); strncpy(extra_response->value, NOTUNDERSTOOD, strlen(NOTUNDERSTOOD) + 1); list_add_tail(&extra_response->er_list, &param_list->extra_response_list); return 0; }
CWE-119
26
id3_skip (SF_PRIVATE * psf) { unsigned char buf [10] ; memset (buf, 0, sizeof (buf)) ; psf_binheader_readf (psf, "pb", 0, buf, 10) ; if (buf [0] == 'I' && buf [1] == 'D' && buf [2] == '3') { int offset = buf [6] & 0x7f ; offset = (offset << 7) | (buf [7] & 0x7f) ; offset = (offset << 7) | (buf [8] & 0x7f) ; offset = (offset << 7) | (buf [9] & 0x7f) ; psf_log_printf (psf, "ID3 length : %d\n--------------------\n", offset) ; /* Never want to jump backwards in a file. */ if (offset < 0) return 0 ; /* Calculate new file offset and position ourselves there. */ psf->fileoffset += offset + 10 ; psf_binheader_readf (psf, "p", psf->fileoffset) ; return 1 ; } ; return 0 ; } /* id3_skip */
CWE-119
26
static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct irda_sock *self = irda_sk(sk); struct sk_buff *skb; size_t copied; int err; IRDA_DEBUG(4, "%s()\n", __func__); msg->msg_namelen = 0; skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb) return err; skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { IRDA_DEBUG(2, "%s(), Received truncated frame (%zd < %zd)!\n", __func__, copied, size); copied = size; msg->msg_flags |= MSG_TRUNC; } skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); skb_free_datagram(sk, skb); /* * Check if we have previously stopped IrTTP and we know * have more free space in our rx_queue. If so tell IrTTP * to start delivering frames again before our rx_queue gets * empty */ if (self->rx_flow == FLOW_STOP) { if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) { IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __func__); self->rx_flow = FLOW_START; irttp_flow_request(self->tsap, FLOW_START); } } return copied; }
CWE-20
0
static int read_public_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } bufsize = file->size; sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read public key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_public_key(p, keysize, rsa); }
CWE-119
26
dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) { enum ctx_state prev_state; prev_state = exception_enter(); if (notify_die(DIE_TRAP, "stack segment", regs, error_code, X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) { preempt_conditional_sti(regs); do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); preempt_conditional_cli(regs); } exception_exit(prev_state); }
CWE-269
6
void grubfs_free (GrubFS *gf) { if (gf) { if (gf->file && gf->file->device) free (gf->file->device->disk); //free (gf->file->device); free (gf->file); free (gf); } }
CWE-119
26
static int readContigStripsIntoBuffer (TIFF* in, uint8* buf) { uint8* bufp = buf; int32 bytes_read = 0; uint32 strip, nstrips = TIFFNumberOfStrips(in); uint32 stripsize = TIFFStripSize(in); uint32 rows = 0; uint32 rps = TIFFGetFieldDefaulted(in, TIFFTAG_ROWSPERSTRIP, &rps); tsize_t scanline_size = TIFFScanlineSize(in); if (scanline_size == 0) { TIFFError("", "TIFF scanline size is zero!"); return 0; } for (strip = 0; strip < nstrips; strip++) { bytes_read = TIFFReadEncodedStrip (in, strip, bufp, -1); rows = bytes_read / scanline_size; if ((strip < (nstrips - 1)) && (bytes_read != (int32)stripsize)) TIFFError("", "Strip %d: read %lu bytes, strip size %lu", (int)strip + 1, (unsigned long) bytes_read, (unsigned long)stripsize); if (bytes_read < 0 && !ignore) { TIFFError("", "Error reading strip %lu after %lu rows", (unsigned long) strip, (unsigned long)rows); return 0; } bufp += bytes_read; } return 1; } /* end readContigStripsIntoBuffer */
CWE-119
26
static int ext4_split_extent(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, struct ext4_map_blocks *map, int split_flag, int flags) { ext4_lblk_t ee_block; struct ext4_extent *ex; unsigned int ee_len, depth; int err = 0; int uninitialized; int split_flag1, flags1; depth = ext_depth(inode); ex = path[depth].p_ext; ee_block = le32_to_cpu(ex->ee_block); ee_len = ext4_ext_get_actual_len(ex); uninitialized = ext4_ext_is_uninitialized(ex); if (map->m_lblk + map->m_len < ee_block + ee_len) { split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ? EXT4_EXT_MAY_ZEROOUT : 0; flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; if (uninitialized) split_flag1 |= EXT4_EXT_MARK_UNINIT1 | EXT4_EXT_MARK_UNINIT2; err = ext4_split_extent_at(handle, inode, path, map->m_lblk + map->m_len, split_flag1, flags1); if (err) goto out; } ext4_ext_drop_refs(path); path = ext4_ext_find_extent(inode, map->m_lblk, path); if (IS_ERR(path)) return PTR_ERR(path); if (map->m_lblk >= ee_block) { split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ? EXT4_EXT_MAY_ZEROOUT : 0; if (uninitialized) split_flag1 |= EXT4_EXT_MARK_UNINIT1; if (split_flag & EXT4_EXT_MARK_UNINIT2) split_flag1 |= EXT4_EXT_MARK_UNINIT2; err = ext4_split_extent_at(handle, inode, path, map->m_lblk, split_flag1, flags); if (err) goto out; } ext4_ext_show_leaf(inode, path); out: return err ? err : map->m_len; }
CWE-362
18
struct se_portal_group *tcm_loop_make_naa_tpg( struct se_wwn *wwn, struct config_group *group, const char *name) { struct tcm_loop_hba *tl_hba = container_of(wwn, struct tcm_loop_hba, tl_hba_wwn); struct tcm_loop_tpg *tl_tpg; char *tpgt_str, *end_ptr; int ret; unsigned short int tpgt; tpgt_str = strstr(name, "tpgt_"); if (!tpgt_str) { printk(KERN_ERR "Unable to locate \"tpgt_#\" directory" " group\n"); return ERR_PTR(-EINVAL); } tpgt_str += 5; /* Skip ahead of "tpgt_" */ tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0); if (tpgt > TL_TPGS_PER_HBA) { printk(KERN_ERR "Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:" " %u\n", tpgt, TL_TPGS_PER_HBA); return ERR_PTR(-EINVAL); } tl_tpg = &tl_hba->tl_hba_tpgs[tpgt]; tl_tpg->tl_hba = tl_hba; tl_tpg->tl_tpgt = tpgt; /* * Register the tl_tpg as a emulated SAS TCM Target Endpoint */ ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops, wwn, &tl_tpg->tl_se_tpg, tl_tpg, TRANSPORT_TPG_TYPE_NORMAL); if (ret < 0) return ERR_PTR(-ENOMEM); printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated Emulated %s" " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), config_item_name(&wwn->wwn_group.cg_item), tpgt); return &tl_tpg->tl_se_tpg; }
CWE-119
26
static void perf_log_throttle(struct perf_event *event, int enable) { struct perf_output_handle handle; struct perf_sample_data sample; int ret; struct { struct perf_event_header header; u64 time; u64 id; u64 stream_id; } throttle_event = { .header = { .type = PERF_RECORD_THROTTLE, .misc = 0, .size = sizeof(throttle_event), }, .time = perf_clock(), .id = primary_event_id(event), .stream_id = event->id, }; if (enable) throttle_event.header.type = PERF_RECORD_UNTHROTTLE; perf_event_header__init_id(&throttle_event.header, &sample, event); ret = perf_output_begin(&handle, event, throttle_event.header.size, 1, 0); if (ret) return; perf_output_put(&handle, throttle_event); perf_event__output_id_sample(event, &handle, &sample); perf_output_end(&handle); }
CWE-400
2
static int netbk_count_requests(struct xenvif *vif, struct xen_netif_tx_request *first, struct xen_netif_tx_request *txp, int work_to_do) { RING_IDX cons = vif->tx.req_cons; int frags = 0; if (!(first->flags & XEN_NETTXF_more_data)) return 0; do { if (frags >= work_to_do) { netdev_dbg(vif->dev, "Need more frags\n"); return -frags; } if (unlikely(frags >= MAX_SKB_FRAGS)) { netdev_dbg(vif->dev, "Too many frags\n"); return -frags; } memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), sizeof(*txp)); if (txp->size > first->size) { netdev_dbg(vif->dev, "Frags galore\n"); return -frags; } first->size -= txp->size; frags++; if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n", txp->offset, txp->size); return -frags; } } while ((txp++)->flags & XEN_NETTXF_more_data); return frags; }
CWE-20
0
static int pfkey_recvmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct pfkey_sock *pfk = pfkey_sk(sk); struct sk_buff *skb; int copied, err; err = -EINVAL; if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT)) goto out; msg->msg_namelen = 0; skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out; copied = skb->len; if (copied > len) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto out_free; sock_recv_ts_and_drops(msg, sk, skb); err = (flags & MSG_TRUNC) ? skb->len : copied; if (pfk->dump.dump != NULL && 3 * atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) pfkey_do_dump(pfk); out_free: skb_free_datagram(sk, skb); out: return err; }
CWE-20
0
static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct ipv6_pinfo *np = inet6_sk(sk); struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)msg->msg_name; struct sk_buff *skb; size_t copied; int err; if (flags & MSG_OOB) return -EOPNOTSUPP; if (addr_len) *addr_len=sizeof(*sin6); if (flags & MSG_ERRQUEUE) return ipv6_recv_error(sk, msg, len); if (np->rxpmtu && np->rxopt.bits.rxpmtu) return ipv6_recv_rxpmtu(sk, msg, len); skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (copied > len) { copied = len; msg->msg_flags |= MSG_TRUNC; } if (skb_csum_unnecessary(skb)) { err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); } else if (msg->msg_flags&MSG_TRUNC) { if (__skb_checksum_complete(skb)) goto csum_copy_err; err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); } else { err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov); if (err == -EINVAL) goto csum_copy_err; } if (err) goto out_free; /* Copy the address. */ if (sin6) { sin6->sin6_family = AF_INET6; sin6->sin6_port = 0; sin6->sin6_addr = ipv6_hdr(skb)->saddr; sin6->sin6_flowinfo = 0; sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, IP6CB(skb)->iif); } sock_recv_ts_and_drops(msg, sk, skb); if (np->rxopt.all) ip6_datagram_recv_ctl(sk, msg, skb); err = copied; if (flags & MSG_TRUNC) err = skb->len; out_free: skb_free_datagram(sk, skb); out: return err; csum_copy_err: skb_kill_datagram(sk, skb, flags); /* Error for blocking case is chosen to masquerade as some normal condition. */ err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH; goto out; }
CWE-200
10
CURLcode Curl_auth_create_plain_message(struct Curl_easy *data, const char *userp, const char *passwdp, char **outptr, size_t *outlen) { CURLcode result; char *plainauth; size_t ulen; size_t plen; size_t plainlen; *outlen = 0; *outptr = NULL; ulen = strlen(userp); plen = strlen(passwdp); /* Compute binary message length. Check for overflows. */ if((ulen > SIZE_T_MAX/2) || (plen > (SIZE_T_MAX/2 - 2))) return CURLE_OUT_OF_MEMORY; plainlen = 2 * ulen + plen + 2; plainauth = malloc(plainlen); if(!plainauth) return CURLE_OUT_OF_MEMORY; /* Calculate the reply */ memcpy(plainauth, userp, ulen); plainauth[ulen] = '\0'; memcpy(plainauth + ulen + 1, userp, ulen); plainauth[2 * ulen + 1] = '\0'; memcpy(plainauth + 2 * ulen + 2, passwdp, plen); /* Base64 encode the reply */ result = Curl_base64_encode(data, plainauth, plainlen, outptr, outlen); free(plainauth); return result; }
CWE-119
26
static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int copied; int err = 0; lock_sock(sk); /* * This works for seqpacket too. The receiver has ordered the * queue for us! We do one quick check first though */ if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_ESTABLISHED) { err = -ENOTCONN; goto out; } /* Now we can treat all alike */ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out; if (!ax25_sk(sk)->pidincl) skb_pull(skb, 1); /* Remove PID */ skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (msg->msg_namelen != 0) { struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name; ax25_digi digi; ax25_address src; const unsigned char *mac = skb_mac_header(skb); memset(sax, 0, sizeof(struct full_sockaddr_ax25)); ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL, &digi, NULL, NULL); sax->sax25_family = AF_AX25; /* We set this correctly, even though we may not let the application know the digi calls further down (because it did NOT ask to know them). This could get political... **/ sax->sax25_ndigis = digi.ndigi; sax->sax25_call = src; if (sax->sax25_ndigis != 0) { int ct; struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)sax; for (ct = 0; ct < digi.ndigi; ct++) fsa->fsa_digipeater[ct] = digi.calls[ct]; } msg->msg_namelen = sizeof(struct full_sockaddr_ax25); } skb_free_datagram(sk, skb); err = copied; out: release_sock(sk); return err; }
CWE-20
0
static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name; size_t copied; unsigned char *asmptr; struct sk_buff *skb; int n, er, qbit; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; /* Now we can treat all alike */ if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) return er; qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT; skb_pull(skb, ROSE_MIN_LEN); if (rose->qbitincl) { asmptr = skb_push(skb, 1); *asmptr = qbit; } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (srose != NULL) { memset(srose, 0, msg->msg_namelen); srose->srose_family = AF_ROSE; srose->srose_addr = rose->dest_addr; srose->srose_call = rose->dest_call; srose->srose_ndigis = rose->dest_ndigis; if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) { struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name; for (n = 0 ; n < rose->dest_ndigis ; n++) full_srose->srose_digis[n] = rose->dest_digis[n]; msg->msg_namelen = sizeof(struct full_sockaddr_rose); } else { if (rose->dest_ndigis >= 1) { srose->srose_ndigis = 1; srose->srose_digi = rose->dest_digis[0]; } msg->msg_namelen = sizeof(struct sockaddr_rose); } } skb_free_datagram(sk, skb); return copied; }
CWE-20
0
SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, unsigned int, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; struct iovec iov; struct msghdr msg; struct sockaddr_storage address; int err, err2; int fput_needed; if (size > INT_MAX) size = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_iovlen = 1; msg.msg_iov = &iov; iov.iov_len = size; iov.iov_base = ubuf; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = sizeof(address); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = sock_recvmsg(sock, &msg, size, flags); if (err >= 0 && addr != NULL) { err2 = move_addr_to_user(&address, msg.msg_namelen, addr, addr_len); if (err2 < 0) err = err2; } fput_light(sock->file, fput_needed); out: return err; }
CWE-20
0
static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { return dax_mkwrite(vma, vmf, ext4_get_block_dax, ext4_end_io_unwritten); }
CWE-362
18
static void __exit xfrm6_tunnel_fini(void) { unregister_pernet_subsys(&xfrm6_tunnel_net_ops); xfrm6_tunnel_spi_fini(); xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET); xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6); xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); }
CWE-362
18
int hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl, int type) { int err; char *xattr_name; size_t size = 0; char *value = NULL; hfs_dbg(ACL_MOD, "[%s]: ino %lu\n", __func__, inode->i_ino); switch (type) { case ACL_TYPE_ACCESS: xattr_name = XATTR_NAME_POSIX_ACL_ACCESS; if (acl) { err = posix_acl_equiv_mode(acl, &inode->i_mode); if (err < 0) return err; } err = 0; break; case ACL_TYPE_DEFAULT: xattr_name = XATTR_NAME_POSIX_ACL_DEFAULT; if (!S_ISDIR(inode->i_mode)) return acl ? -EACCES : 0; break; default: return -EINVAL; } if (acl) { size = posix_acl_xattr_size(acl->a_count); if (unlikely(size > HFSPLUS_MAX_INLINE_DATA_SIZE)) return -ENOMEM; value = (char *)hfsplus_alloc_attr_entry(); if (unlikely(!value)) return -ENOMEM; err = posix_acl_to_xattr(&init_user_ns, acl, value, size); if (unlikely(err < 0)) goto end_set_acl; } err = __hfsplus_setxattr(inode, xattr_name, value, size, 0); end_set_acl: hfsplus_destroy_attr_entry((hfsplus_attr_entry *)value); if (!err) set_cached_acl(inode, type, acl); return err; }
CWE-285
23
static VALUE cState_space_before_set(VALUE self, VALUE space_before) { unsigned long len; GET_STATE(self); Check_Type(space_before, T_STRING); len = RSTRING_LEN(space_before); if (len == 0) { if (state->space_before) { ruby_xfree(state->space_before); state->space_before = NULL; state->space_before_len = 0; } } else { if (state->space_before) ruby_xfree(state->space_before); state->space_before = strdup(RSTRING_PTR(space_before)); state->space_before_len = len; } return Qnil; }
CWE-119
26
rndr_quote(struct buf *ob, const struct buf *text, void *opaque) { if (!text || !text->size) return 0; BUFPUTSL(ob, "<q>"); bufput(ob, text->data, text->size); BUFPUTSL(ob, "</q>"); return 1; }
CWE-74
1
static int http_open(URLContext *h, const char *uri, int flags, AVDictionary **options) { HTTPContext *s = h->priv_data; int ret; if( s->seekable == 1 ) h->is_streamed = 0; else h->is_streamed = 1; s->filesize = -1; s->location = av_strdup(uri); if (!s->location) return AVERROR(ENOMEM); if (options) av_dict_copy(&s->chained_options, *options, 0); if (s->headers) { int len = strlen(s->headers); if (len < 2 || strcmp("\r\n", s->headers + len - 2)) { av_log(h, AV_LOG_WARNING, "No trailing CRLF found in HTTP header.\n"); ret = av_reallocp(&s->headers, len + 3); if (ret < 0) return ret; s->headers[len] = '\r'; s->headers[len + 1] = '\n'; s->headers[len + 2] = '\0'; } } if (s->listen) { return http_listen(h, uri, flags, options); } ret = http_open_cnx(h, options); if (ret < 0) av_dict_free(&s->chained_options); return ret; }
CWE-119
26
error_t enc624j600SoftReset(NetInterface *interface) { //Wait for the SPI interface to be ready do { //Write 0x1234 to EUDAST enc624j600WriteReg(interface, ENC624J600_REG_EUDAST, 0x1234); //Read back register and check contents } while(enc624j600ReadReg(interface, ENC624J600_REG_EUDAST) != 0x1234); //Poll CLKRDY and wait for it to become set while((enc624j600ReadReg(interface, ENC624J600_REG_ESTAT) & ESTAT_CLKRDY) == 0) { } //Issue a system reset command by setting ETHRST enc624j600SetBit(interface, ENC624J600_REG_ECON2, ECON2_ETHRST); //Wait at least 25us for the reset to take place sleep(1); //Read EUDAST to confirm that the system reset took place. //EUDAST should have reverted back to its reset default if(enc624j600ReadReg(interface, ENC624J600_REG_EUDAST) != 0x0000) { return ERROR_FAILURE; } //Wait at least 256us for the PHY registers and PHY //status bits to become available sleep(1); //The controller is now ready to accept further commands return NO_ERROR; }
CWE-20
0
static OPJ_BOOL opj_tcd_code_block_enc_allocate_data(opj_tcd_cblk_enc_t * p_code_block) { OPJ_UINT32 l_data_size; /* The +1 is needed for https://github.com/uclouvain/openjpeg/issues/835 */ l_data_size = 1 + (OPJ_UINT32)((p_code_block->x1 - p_code_block->x0) * (p_code_block->y1 - p_code_block->y0) * (OPJ_INT32)sizeof(OPJ_UINT32)); if (l_data_size > p_code_block->data_size) { if (p_code_block->data) { /* We refer to data - 1 since below we incremented it */ opj_free(p_code_block->data - 1); } p_code_block->data = (OPJ_BYTE*) opj_malloc(l_data_size + 1); if (! p_code_block->data) { p_code_block->data_size = 0U; return OPJ_FALSE; } p_code_block->data_size = l_data_size; /* We reserve the initial byte as a fake byte to a non-FF value */ /* and increment the data pointer, so that opj_mqc_init_enc() */ /* can do bp = data - 1, and opj_mqc_byteout() can safely dereference */ /* it. */ p_code_block->data[0] = 0; p_code_block->data += 1; /*why +1 ?*/ } return OPJ_TRUE; }
CWE-119
26
error_t lpc546xxEthUpdateMacConfig(NetInterface *interface) { uint32_t config; //Read current MAC configuration config = ENET->MAC_CONFIG; //10BASE-T or 100BASE-TX operation mode? if(interface->linkSpeed == NIC_LINK_SPEED_100MBPS) { config |= ENET_MAC_CONFIG_FES_MASK; } else { config &= ~ENET_MAC_CONFIG_FES_MASK; } //Half-duplex or full-duplex mode? if(interface->duplexMode == NIC_FULL_DUPLEX_MODE) { config |= ENET_MAC_CONFIG_DM_MASK; } else { config &= ~ENET_MAC_CONFIG_DM_MASK; } //Update MAC configuration register ENET->MAC_CONFIG = config; //Successful processing return NO_ERROR; }
CWE-20
0
static void vector64_dst_append(RStrBuf *sb, csh *handle, cs_insn *insn, int n, int i) { cs_arm64_op op = INSOP64 (n); if (op.vector_index != -1) { i = op.vector_index; } #if CS_API_MAJOR == 4 const bool isvessas = (op.vess || op.vas); #else const bool isvessas = op.vas; #endif if (isvessas && i != -1) { int size = vector_size (&op); int shift = i * size; char *regc = "l"; size_t s = sizeof (bitmask_by_width) / sizeof (*bitmask_by_width); size_t index = size > 0? (size - 1) % s: 0; if (index >= BITMASK_BY_WIDTH_COUNT) { index = 0; } ut64 mask = bitmask_by_width[index]; if (shift >= 64) { shift -= 64; regc = "h"; } if (shift > 0 && shift < 64) { r_strbuf_appendf (sb, "%d,SWAP,0x%"PFMT64x",&,<<,%s%s,0x%"PFMT64x",&,|,%s%s", shift, mask, REG64 (n), regc, VEC64_MASK (shift, size), REG64 (n), regc); } else { int dimsize = size % 64; r_strbuf_appendf (sb, "0x%"PFMT64x",&,%s%s,0x%"PFMT64x",&,|,%s%s", mask, REG64 (n), regc, VEC64_MASK (shift, dimsize), REG64 (n), regc); } } else { r_strbuf_appendf (sb, "%s", REG64 (n)); } }
CWE-119
26
static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn, struct xfrm_replay_state_esn **preplay_esn, struct nlattr *rta) { struct xfrm_replay_state_esn *p, *pp, *up; if (!rta) return 0; up = nla_data(rta); p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL); if (!p) return -ENOMEM; pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL); if (!pp) { kfree(p); return -ENOMEM; } *replay_esn = p; *preplay_esn = pp; return 0; }
CWE-200
10
static int iwl_process_add_sta_resp(struct iwl_priv *priv, struct iwl_addsta_cmd *addsta, struct iwl_rx_packet *pkt) { u8 sta_id = addsta->sta.sta_id; unsigned long flags; int ret = -EIO; if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n", pkt->hdr.flags); return ret; } IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n", sta_id); spin_lock_irqsave(&priv->shrd->sta_lock, flags); switch (pkt->u.add_sta.status) { case ADD_STA_SUCCESS_MSK: IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n"); iwl_sta_ucode_activate(priv, sta_id); ret = 0; break; case ADD_STA_NO_ROOM_IN_TABLE: IWL_ERR(priv, "Adding station %d failed, no room in table.\n", sta_id); break; case ADD_STA_NO_BLOCK_ACK_RESOURCE: IWL_ERR(priv, "Adding station %d failed, no block ack " "resource.\n", sta_id); break; case ADD_STA_MODIFY_NON_EXIST_STA: IWL_ERR(priv, "Attempting to modify non-existing station %d\n", sta_id); break; default: IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status); break; } IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n", priv->stations[sta_id].sta.mode == STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id, priv->stations[sta_id].sta.sta.addr); /* * XXX: The MAC address in the command buffer is often changed from * the original sent to the device. That is, the MAC address * written to the command buffer often is not the same MAC address * read from the command buffer when the command returns. This * issue has not yet been resolved and this debugging is left to * observe the problem. */ IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n", priv->stations[sta_id].sta.mode == STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr); spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); return ret; }
CWE-119
26
hash_link_ref(const uint8_t *link_ref, size_t length) { size_t i; unsigned int hash = 0; for (i = 0; i < length; ++i) hash = tolower(link_ref[i]) + (hash << 6) + (hash << 16) - hash; return hash; }
CWE-327
3
void * pvPortMalloc( size_t xWantedSize ) { void * pvReturn = NULL; static uint8_t * pucAlignedHeap = NULL; /* Ensure that blocks are always aligned to the required number of bytes. */ #if ( portBYTE_ALIGNMENT != 1 ) { if( xWantedSize & portBYTE_ALIGNMENT_MASK ) { /* Byte alignment required. */ xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ); } } #endif vTaskSuspendAll(); { if( pucAlignedHeap == NULL ) { /* Ensure the heap starts on a correctly aligned boundary. */ pucAlignedHeap = ( uint8_t * ) ( ( ( portPOINTER_SIZE_TYPE ) & ucHeap[ portBYTE_ALIGNMENT ] ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); } /* Check there is enough room left for the allocation. */ if( ( ( xNextFreeByte + xWantedSize ) < configADJUSTED_HEAP_SIZE ) && ( ( xNextFreeByte + xWantedSize ) > xNextFreeByte ) ) /* Check for overflow. */ { /* Return the next free byte then increment the index past this * block. */ pvReturn = pucAlignedHeap + xNextFreeByte; xNextFreeByte += xWantedSize; } traceMALLOC( pvReturn, xWantedSize ); } ( void ) xTaskResumeAll(); #if ( configUSE_MALLOC_FAILED_HOOK == 1 ) { if( pvReturn == NULL ) { extern void vApplicationMallocFailedHook( void ); vApplicationMallocFailedHook(); } } #endif return pvReturn; }
CWE-119
26
static int udf_symlink_filler(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; struct buffer_head *bh = NULL; unsigned char *symlink; int err = -EIO; unsigned char *p = kmap(page); struct udf_inode_info *iinfo; uint32_t pos; iinfo = UDF_I(inode); pos = udf_block_map(inode, 0); down_read(&iinfo->i_data_sem); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { symlink = iinfo->i_ext.i_data + iinfo->i_lenEAttr; } else { bh = sb_bread(inode->i_sb, pos); if (!bh) goto out; symlink = bh->b_data; } udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p); brelse(bh); up_read(&iinfo->i_data_sem); SetPageUptodate(page); kunmap(page); unlock_page(page); return 0; out: up_read(&iinfo->i_data_sem); SetPageError(page); kunmap(page); unlock_page(page); return err; }
CWE-119
26
label (const uint8_t * src, size_t srclen, uint8_t * dst, size_t * dstlen, int flags) { size_t plen; uint32_t *p; int rc; size_t tmpl; if (_idn2_ascii_p (src, srclen)) { if (flags & IDN2_ALABEL_ROUNDTRIP) /* FIXME implement this MAY: If the input to this procedure appears to be an A-label (i.e., it starts in "xn--", interpreted case-insensitively), the lookup application MAY attempt to convert it to a U-label, first ensuring that the A-label is entirely in lowercase (converting it to lowercase if necessary), and apply the tests of Section 5.4 and the conversion of Section 5.5 to that form. */ return IDN2_INVALID_FLAGS; if (srclen > IDN2_LABEL_MAX_LENGTH) return IDN2_TOO_BIG_LABEL; if (srclen > *dstlen) return IDN2_TOO_BIG_DOMAIN; memcpy (dst, src, srclen); *dstlen = srclen; return IDN2_OK; } rc = _idn2_u8_to_u32_nfc (src, srclen, &p, &plen, flags & IDN2_NFC_INPUT); if (rc != IDN2_OK) return rc; if (!(flags & IDN2_TRANSITIONAL)) { rc = _idn2_label_test( TEST_NFC | TEST_2HYPHEN | TEST_LEADING_COMBINING | TEST_DISALLOWED | TEST_CONTEXTJ_RULE | TEST_CONTEXTO_WITH_RULE | TEST_UNASSIGNED | TEST_BIDI | ((flags & IDN2_NONTRANSITIONAL) ? TEST_NONTRANSITIONAL : 0) | ((flags & IDN2_USE_STD3_ASCII_RULES) ? 0 : TEST_ALLOW_STD3_DISALLOWED), p, plen); if (rc != IDN2_OK) { free(p); return rc; } } dst[0] = 'x'; dst[1] = 'n'; dst[2] = '-'; dst[3] = '-'; tmpl = *dstlen - 4; rc = _idn2_punycode_encode (plen, p, &tmpl, (char *) dst + 4); free (p); if (rc != IDN2_OK) return rc; *dstlen = 4 + tmpl; return IDN2_OK; }
CWE-20
0
void sock_release(struct socket *sock) { if (sock->ops) { struct module *owner = sock->ops->owner; sock->ops->release(sock); sock->ops = NULL; module_put(owner); } if (rcu_dereference_protected(sock->wq, 1)->fasync_list) pr_err("%s: fasync list not empty!\n", __func__); if (!sock->file) { iput(SOCK_INODE(sock)); return; } sock->file = NULL; }
CWE-362
18
CAMLprim value caml_alloc_dummy(value size) { mlsize_t wosize = Int_val(size); if (wosize == 0) return Atom(0); return caml_alloc (wosize, 0); }
CWE-119
26
void beforeSleep(struct aeEventLoop *eventLoop) { REDIS_NOTUSED(eventLoop); listNode *ln; redisClient *c; /* Awake clients that got all the swapped keys they requested */ if (server.vm_enabled && listLength(server.io_ready_clients)) { listIter li; listRewind(server.io_ready_clients,&li); while((ln = listNext(&li))) { c = ln->value; struct redisCommand *cmd; /* Resume the client. */ listDelNode(server.io_ready_clients,ln); c->flags &= (~REDIS_IO_WAIT); server.vm_blocked_clients--; aeCreateFileEvent(server.el, c->fd, AE_READABLE, readQueryFromClient, c); cmd = lookupCommand(c->argv[0]->ptr); redisAssert(cmd != NULL); call(c,cmd); resetClient(c); /* There may be more data to process in the input buffer. */ if (c->querybuf && sdslen(c->querybuf) > 0) processInputBuffer(c); } } /* Try to process pending commands for clients that were just unblocked. */ while (listLength(server.unblocked_clients)) { ln = listFirst(server.unblocked_clients); redisAssert(ln != NULL); c = ln->value; listDelNode(server.unblocked_clients,ln); /* Process remaining data in the input buffer. */ if (c->querybuf && sdslen(c->querybuf) > 0) processInputBuffer(c); } /* Write the AOF buffer on disk */ flushAppendOnlyFile(); }
CWE-20
0
de_dotdot( char* file ) { char* cp; char* cp2; int l; /* Collapse any multiple / sequences. */ while ( ( cp = strstr( file, "//") ) != (char*) 0 ) { for ( cp2 = cp + 2; *cp2 == '/'; ++cp2 ) continue; (void) strcpy( cp + 1, cp2 ); } /* Remove leading ./ and any /./ sequences. */ while ( strncmp( file, "./", 2 ) == 0 ) (void) memmove( file, file + 2, strlen( file ) - 1 ); while ( ( cp = strstr( file, "/./") ) != (char*) 0 ) (void) memmove( cp, cp + 2, strlen( file ) - 1 ); /* Alternate between removing leading ../ and removing xxx/../ */ for (;;) { while ( strncmp( file, "../", 3 ) == 0 ) (void) memmove( file, file + 3, strlen( file ) - 2 ); cp = strstr( file, "/../" ); if ( cp == (char*) 0 ) break; for ( cp2 = cp - 1; cp2 >= file && *cp2 != '/'; --cp2 ) continue; (void) strcpy( cp2 + 1, cp + 4 ); } /* Also elide any xxx/.. at the end. */ while ( ( l = strlen( file ) ) > 3 && strcmp( ( cp = file + l - 3 ), "/.." ) == 0 ) { for ( cp2 = cp - 1; cp2 >= file && *cp2 != '/'; --cp2 ) continue; if ( cp2 < file ) break; *cp2 = '\0'; } }
CWE-119
26
cdf_file_summary_info(struct magic_set *ms, const cdf_header_t *h, const cdf_stream_t *sst, const uint64_t clsid[2]) { cdf_summary_info_header_t si; cdf_property_info_t *info; size_t count; int m; if (cdf_unpack_summary_info(sst, h, &si, &info, &count) == -1) return -1; if (NOTMIME(ms)) { const char *str; if (file_printf(ms, "Composite Document File V2 Document") == -1) return -1; if (file_printf(ms, ", %s Endian", si.si_byte_order == 0xfffe ? "Little" : "Big") == -1) return -2; switch (si.si_os) { case 2: if (file_printf(ms, ", Os: Windows, Version %d.%d", si.si_os_version & 0xff, (uint32_t)si.si_os_version >> 8) == -1) return -2; break; case 1: if (file_printf(ms, ", Os: MacOS, Version %d.%d", (uint32_t)si.si_os_version >> 8, si.si_os_version & 0xff) == -1) return -2; break; default: if (file_printf(ms, ", Os %d, Version: %d.%d", si.si_os, si.si_os_version & 0xff, (uint32_t)si.si_os_version >> 8) == -1) return -2; break; } str = cdf_clsid_to_mime(clsid, clsid2desc); if (str) if (file_printf(ms, ", %s", str) == -1) return -2; } m = cdf_file_property_info(ms, info, count, clsid); free(info); return m == -1 ? -2 : m; }
CWE-119
26
int cipso_v4_req_setattr(struct request_sock *req, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options *opt = NULL; struct inet_request_sock *req_inet; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto req_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } memcpy(opt->__data, buf, buf_len); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; req_inet = inet_rsk(req); opt = xchg(&req_inet->opt, opt); kfree(opt); return 0; req_setattr_failure: kfree(buf); kfree(opt); return ret_val; }
CWE-362
18
SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, unsigned int, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; struct iovec iov; struct msghdr msg; struct sockaddr_storage address; int err, err2; int fput_needed; if (size > INT_MAX) size = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_iovlen = 1; msg.msg_iov = &iov; iov.iov_len = size; iov.iov_base = ubuf; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = sizeof(address); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = sock_recvmsg(sock, &msg, size, flags); if (err >= 0 && addr != NULL) { err2 = move_addr_to_user(&address, msg.msg_namelen, addr, addr_len); if (err2 < 0) err = err2; } fput_light(sock->file, fput_needed); out: return err; }
CWE-20
0
static int read_public_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } bufsize = file->size; sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read public key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_public_key(p, keysize, rsa); }
CWE-119
26
static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) { u32 low32, high32; unsigned long tmpl; struct desc_ptr dt; vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */ vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ #ifdef CONFIG_X86_64 /* * Load null selectors, so we can avoid reloading them in * __vmx_load_host_state(), in case userspace uses the null selectors * too (the expected case). */ vmcs_write16(HOST_DS_SELECTOR, 0); vmcs_write16(HOST_ES_SELECTOR, 0); #else vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ #endif vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ native_store_idt(&dt); vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ vmx->host_idt_base = dt.address; vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */ rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); vmcs_write32(HOST_IA32_SYSENTER_CS, low32); rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl); vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */ if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { rdmsr(MSR_IA32_CR_PAT, low32, high32); vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32)); } }
CWE-400
2
static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; int copied; int rc; pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags); skb = skb_recv_datagram(sk, flags, noblock, &rc); if (!skb) return rc; msg->msg_namelen = 0; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); skb_free_datagram(sk, skb); return rc ? : copied; }
CWE-20
0
sf_flac_write_callback (const FLAC__StreamDecoder * UNUSED (decoder), const FLAC__Frame *frame, const int32_t * const buffer [], void *client_data) { SF_PRIVATE *psf = (SF_PRIVATE*) client_data ; FLAC_PRIVATE* pflac = (FLAC_PRIVATE*) psf->codec_data ; pflac->frame = frame ; pflac->bufferpos = 0 ; pflac->bufferbackup = SF_FALSE ; pflac->wbuffer = buffer ; flac_buffer_copy (psf) ; return FLAC__STREAM_DECODER_WRITE_STATUS_CONTINUE ; } /* sf_flac_write_callback */
CWE-119
26
zend_op_array *compile_string(zval *source_string, char *filename TSRMLS_DC) { zend_lex_state original_lex_state; zend_op_array *op_array = (zend_op_array *) emalloc(sizeof(zend_op_array)); zend_op_array *original_active_op_array = CG(active_op_array); zend_op_array *retval; zval tmp; int compiler_result; zend_bool original_in_compilation = CG(in_compilation); if (source_string->value.str.len==0) { efree(op_array); return NULL; } CG(in_compilation) = 1; tmp = *source_string; zval_copy_ctor(&tmp); convert_to_string(&tmp); source_string = &tmp; zend_save_lexical_state(&original_lex_state TSRMLS_CC); if (zend_prepare_string_for_scanning(source_string, filename TSRMLS_CC)==FAILURE) { efree(op_array); retval = NULL; } else { zend_bool orig_interactive = CG(interactive); CG(interactive) = 0; init_op_array(op_array, ZEND_EVAL_CODE, INITIAL_OP_ARRAY_SIZE TSRMLS_CC); CG(interactive) = orig_interactive; CG(active_op_array) = op_array; zend_stack_push(&CG(context_stack), (void *) &CG(context), sizeof(CG(context))); zend_init_compiler_context(TSRMLS_C); BEGIN(ST_IN_SCRIPTING); compiler_result = zendparse(TSRMLS_C); if (SCNG(script_filtered)) { efree(SCNG(script_filtered)); SCNG(script_filtered) = NULL; } if (compiler_result==1) { CG(active_op_array) = original_active_op_array; CG(unclean_shutdown)=1; destroy_op_array(op_array TSRMLS_CC); efree(op_array); retval = NULL; } else { zend_do_return(NULL, 0 TSRMLS_CC); CG(active_op_array) = original_active_op_array; pass_two(op_array TSRMLS_CC); zend_release_labels(0 TSRMLS_CC); retval = op_array; } } zend_restore_lexical_state(&original_lex_state TSRMLS_CC); zval_dtor(&tmp); CG(in_compilation) = original_in_compilation; return retval; }
CWE-20
0
void lpc546xxEthEventHandler(NetInterface *interface) { error_t error; //Packet received? if((ENET->DMA_CH[0].DMA_CHX_STAT & ENET_DMA_CH_DMA_CHX_STAT_RI_MASK) != 0) { //Clear interrupt flag ENET->DMA_CH[0].DMA_CHX_STAT = ENET_DMA_CH_DMA_CHX_STAT_RI_MASK; //Process all pending packets do { //Read incoming packet error = lpc546xxEthReceivePacket(interface); //No more data in the receive buffer? } while(error != ERROR_BUFFER_EMPTY); } //Re-enable DMA interrupts ENET->DMA_CH[0].DMA_CHX_INT_EN = ENET_DMA_CH_DMA_CHX_INT_EN_NIE_MASK | ENET_DMA_CH_DMA_CHX_INT_EN_RIE_MASK | ENET_DMA_CH_DMA_CHX_INT_EN_TIE_MASK; }
CWE-20
0
char *url_decode_r(char *to, char *url, size_t size) { char *s = url, // source *d = to, // destination *e = &to[size - 1]; // destination end while(*s && d < e) { if(unlikely(*s == '%')) { if(likely(s[1] && s[2])) { *d++ = from_hex(s[1]) << 4 | from_hex(s[2]); s += 2; } } else if(unlikely(*s == '+')) *d++ = ' '; else *d++ = *s; s++; } *d = '\0'; return to; }
CWE-116
15
static int cac_get_serial_nr_from_CUID(sc_card_t* card, sc_serial_number_t* serial) { cac_private_data_t * priv = CAC_DATA(card); SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_NORMAL); if (card->serialnr.len) { *serial = card->serialnr; SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_SUCCESS); } if (priv->cac_id_len) { serial->len = MIN(priv->cac_id_len, SC_MAX_SERIALNR); memcpy(serial->value, priv->cac_id, priv->cac_id_len); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_SUCCESS); } SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_FILE_NOT_FOUND); }
CWE-119
26
static void tokenadd(struct jv_parser* p, char c) { assert(p->tokenpos <= p->tokenlen); if (p->tokenpos == p->tokenlen) { p->tokenlen = p->tokenlen*2 + 256; p->tokenbuf = jv_mem_realloc(p->tokenbuf, p->tokenlen); } assert(p->tokenpos < p->tokenlen); p->tokenbuf[p->tokenpos++] = c; }
CWE-119
26
unix_client_connect(hsm_com_client_hdl_t *hdl) { int fd, len; struct sockaddr_un unix_addr; if ((fd = socket(AF_UNIX, SOCK_STREAM, 0)) < 0) { return HSM_COM_ERROR; } memset(&unix_addr,0,sizeof(unix_addr)); unix_addr.sun_family = AF_UNIX; if(strlen(hdl->c_path) >= sizeof(unix_addr.sun_path)) { close(fd); return HSM_COM_PATH_ERR; } snprintf(unix_addr.sun_path, sizeof(unix_addr.sun_path), "%s", hdl->c_path); len = SUN_LEN(&unix_addr); unlink(unix_addr.sun_path); if(bind(fd, (struct sockaddr *)&unix_addr, len) < 0) { unlink(hdl->c_path); close(fd); return HSM_COM_BIND_ERR; } if(chmod(unix_addr.sun_path, S_IRWXU) < 0) { unlink(hdl->c_path); close(fd); return HSM_COM_CHMOD_ERR; } memset(&unix_addr,0,sizeof(unix_addr)); unix_addr.sun_family = AF_UNIX; strncpy(unix_addr.sun_path, hdl->s_path, sizeof(unix_addr.sun_path)); unix_addr.sun_path[sizeof(unix_addr.sun_path)-1] = 0; len = SUN_LEN(&unix_addr); if (connect(fd, (struct sockaddr *) &unix_addr, len) < 0) { unlink(hdl->c_path); close(fd); return HSM_COM_CONX_ERR; } hdl->client_fd = fd; hdl->client_state = HSM_COM_C_STATE_CT; // Send connection data packet if(unix_sck_send_conn(hdl, 2) != HSM_COM_OK) { hdl->client_state = HSM_COM_C_STATE_IN; return HSM_COM_SEND_ERR; } return HSM_COM_OK; }
CWE-362
18
fpDiff(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; uint32 bps = tif->tif_dir.td_bitspersample / 8; tmsize_t wc = cc / bps; tmsize_t count; uint8 *cp = (uint8 *) cp0; uint8 *tmp = (uint8 *)_TIFFmalloc(cc); assert((cc%(bps*stride))==0); if (!tmp) return; _TIFFmemcpy(tmp, cp0, cc); for (count = 0; count < wc; count++) { uint32 byte; for (byte = 0; byte < bps; byte++) { #if WORDS_BIGENDIAN cp[byte * wc + count] = tmp[bps * count + byte]; #else cp[(bps - byte - 1) * wc + count] = tmp[bps * count + byte]; #endif } } _TIFFfree(tmp); cp = (uint8 *) cp0; cp += cc - stride - 1; for (count = cc; count > stride; count -= stride) REPEAT4(stride, cp[stride] = (unsigned char)((cp[stride] - cp[0])&0xff); cp--) }
CWE-119
26
static zend_bool add_post_var(zval *arr, post_var_data_t *var, zend_bool eof TSRMLS_DC) { char *ksep, *vsep, *val; size_t klen, vlen; /* FIXME: string-size_t */ unsigned int new_vlen; if (var->ptr >= var->end) { return 0; } vsep = memchr(var->ptr, '&', var->end - var->ptr); if (!vsep) { if (!eof) { return 0; } else { vsep = var->end; } } ksep = memchr(var->ptr, '=', vsep - var->ptr); if (ksep) { *ksep = '\0'; /* "foo=bar&" or "foo=&" */ klen = ksep - var->ptr; vlen = vsep - ++ksep; } else { ksep = ""; /* "foo&" */ klen = vsep - var->ptr; vlen = 0; } php_url_decode(var->ptr, klen); val = estrndup(ksep, vlen); if (vlen) { vlen = php_url_decode(val, vlen); } if (sapi_module.input_filter(PARSE_POST, var->ptr, &val, vlen, &new_vlen TSRMLS_CC)) { php_register_variable_safe(var->ptr, val, new_vlen, arr TSRMLS_CC); } efree(val); var->ptr = vsep + (vsep != var->end); return 1; }
CWE-400
2
unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) { struct desc_struct *desc; short sel; sel = get_segment_selector(regs, seg_reg_idx); if (sel < 0) return -1L; if (v8086_mode(regs)) /* * Base is simply the segment selector shifted 4 * bits to the right. */ return (unsigned long)(sel << 4); if (user_64bit_mode(regs)) { /* * Only FS or GS will have a base address, the rest of * the segments' bases are forced to 0. */ unsigned long base; if (seg_reg_idx == INAT_SEG_REG_FS) rdmsrl(MSR_FS_BASE, base); else if (seg_reg_idx == INAT_SEG_REG_GS) /* * swapgs was called at the kernel entry point. Thus, * MSR_KERNEL_GS_BASE will have the user-space GS base. */ rdmsrl(MSR_KERNEL_GS_BASE, base); else base = 0; return base; } /* In protected mode the segment selector cannot be null. */ if (!sel) return -1L; desc = get_desc(sel); if (!desc) return -1L; return get_desc_base(desc); }
CWE-362
18
PGTYPEStimestamp_from_asc(char *str, char **endptr) { timestamp result; #ifdef HAVE_INT64_TIMESTAMP int64 noresult = 0; #else double noresult = 0.0; #endif fsec_t fsec; struct tm tt, *tm = &tt; int dtype; int nf; char *field[MAXDATEFIELDS]; int ftype[MAXDATEFIELDS]; char lowstr[MAXDATELEN + MAXDATEFIELDS]; char *realptr; char **ptr = (endptr != NULL) ? endptr : &realptr; if (strlen(str) >= sizeof(lowstr)) { errno = PGTYPES_TS_BAD_TIMESTAMP; return (noresult); } if (ParseDateTime(str, lowstr, field, ftype, &nf, ptr) != 0 || DecodeDateTime(field, ftype, nf, &dtype, tm, &fsec, 0) != 0) { errno = PGTYPES_TS_BAD_TIMESTAMP; return (noresult); } switch (dtype) { case DTK_DATE: if (tm2timestamp(tm, fsec, NULL, &result) != 0) { errno = PGTYPES_TS_BAD_TIMESTAMP; return (noresult); } break; case DTK_EPOCH: result = SetEpochTimestamp(); break; case DTK_LATE: TIMESTAMP_NOEND(result); break; case DTK_EARLY: TIMESTAMP_NOBEGIN(result); break; case DTK_INVALID: errno = PGTYPES_TS_BAD_TIMESTAMP; return (noresult); default: errno = PGTYPES_TS_BAD_TIMESTAMP; return (noresult); } /* AdjustTimestampForTypmod(&result, typmod); */ /* * Since it's difficult to test for noresult, make sure errno is 0 if no * error occurred. */ errno = 0; return result; }
CWE-119
26
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) { gfn_t gfn, end_gfn; pfn_t pfn; int r = 0; struct iommu_domain *domain = kvm->arch.iommu_domain; int flags; /* check if iommu exists and in use */ if (!domain) return 0; gfn = slot->base_gfn; end_gfn = gfn + slot->npages; flags = IOMMU_READ; if (!(slot->flags & KVM_MEM_READONLY)) flags |= IOMMU_WRITE; if (!kvm->arch.iommu_noncoherent) flags |= IOMMU_CACHE; while (gfn < end_gfn) { unsigned long page_size; /* Check if already mapped */ if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) { gfn += 1; continue; } /* Get the page size we could use to map */ page_size = kvm_host_page_size(kvm, gfn); /* Make sure the page_size does not exceed the memslot */ while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn) page_size >>= 1; /* Make sure gfn is aligned to the page size we want to map */ while ((gfn << PAGE_SHIFT) & (page_size - 1)) page_size >>= 1; /* Make sure hva is aligned to the page size we want to map */ while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1)) page_size >>= 1; /* * Pin all pages we are about to map in memory. This is * important because we unmap and unpin in 4kb steps later. */ pfn = kvm_pin_pages(slot, gfn, page_size); if (is_error_noslot_pfn(pfn)) { gfn += 1; continue; } /* Map into IO address space */ r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn), page_size, flags); if (r) { printk(KERN_ERR "kvm_iommu_map_address:" "iommu failed to map pfn=%llx\n", pfn); kvm_unpin_pages(kvm, pfn, page_size); goto unmap_pages; } gfn += page_size >> PAGE_SHIFT; } return 0; unmap_pages: kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn); return r; }
CWE-119
26
static int muscle_list_files(sc_card_t *card, u8 *buf, size_t bufLen) { muscle_private_t* priv = MUSCLE_DATA(card); mscfs_t *fs = priv->fs; int x; int count = 0; mscfs_check_cache(priv->fs); for(x = 0; x < fs->cache.size; x++) { u8* oid= fs->cache.array[x].objectId.id; sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "FILE: %02X%02X%02X%02X\n", oid[0],oid[1],oid[2],oid[3]); if(0 == memcmp(fs->currentPath, oid, 2)) { buf[0] = oid[2]; buf[1] = oid[3]; if(buf[0] == 0x00 && buf[1] == 0x00) continue; /* No directories/null names outside of root */ buf += 2; count+=2; } } return count; }
CWE-119
26
static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) { struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; struct nfs_getaclargs args = { .fh = NFS_FH(inode), .acl_pages = pages, .acl_len = buflen, }; struct nfs_getaclres res = { .acl_len = buflen, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], .rpc_argp = &args, .rpc_resp = &res, }; unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); int ret = -ENOMEM, i; /* As long as we're doing a round trip to the server anyway, * let's be prepared for a page of acl data. */ if (npages == 0) npages = 1; if (npages > ARRAY_SIZE(pages)) return -ERANGE; for (i = 0; i < npages; i++) { pages[i] = alloc_page(GFP_KERNEL); if (!pages[i]) goto out_free; } /* for decoding across pages */ res.acl_scratch = alloc_page(GFP_KERNEL); if (!res.acl_scratch) goto out_free; args.acl_len = npages * PAGE_SIZE; args.acl_pgbase = 0; dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", __func__, buf, buflen, npages, args.acl_len); ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); if (ret) goto out_free; /* Handle the case where the passed-in buffer is too short */ if (res.acl_flags & NFS4_ACL_TRUNC) { /* Did the user only issue a request for the acl length? */ if (buf == NULL) goto out_ok; ret = -ERANGE; goto out_free; } nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len); if (buf) _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); out_ok: ret = res.acl_len; out_free: for (i = 0; i < npages; i++) if (pages[i]) __free_page(pages[i]); if (res.acl_scratch) __free_page(res.acl_scratch); return ret; }
CWE-119
26
static char *oidc_cache_get_hashed_key(request_rec *r, const char *passphrase, const char *key) { char *input = apr_psprintf(r->pool, "%s:%s", passphrase, key); char *output = NULL; if (oidc_util_hash_string_and_base64url_encode(r, OIDC_JOSE_ALG_SHA256, input, &output) == FALSE) { oidc_error(r, "oidc_util_hash_string_and_base64url_encode returned an error"); return NULL; } return output; }
CWE-330
12
static int intel_pmu_drain_bts_buffer(void) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct debug_store *ds = cpuc->ds; struct bts_record { u64 from; u64 to; u64 flags; }; struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS]; struct bts_record *at, *top; struct perf_output_handle handle; struct perf_event_header header; struct perf_sample_data data; struct pt_regs regs; if (!event) return 0; if (!x86_pmu.bts_active) return 0; at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; top = (struct bts_record *)(unsigned long)ds->bts_index; if (top <= at) return 0; ds->bts_index = ds->bts_buffer_base; perf_sample_data_init(&data, 0); data.period = event->hw.last_period; regs.ip = 0; /* * Prepare a generic sample, i.e. fill in the invariant fields. * We will overwrite the from and to address before we output * the sample. */ perf_prepare_sample(&header, &data, event, &regs); if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1)) return 1; for (; at < top; at++) { data.ip = at->from; data.addr = at->to; perf_output_sample(&handle, &header, &data, event); } perf_output_end(&handle); /* There's new data available. */ event->hw.interrupts++; event->pending_kill = POLL_IN; return 1; }
CWE-400
2
static __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 60 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 && rdesc[41] == 0x00 && rdesc[59] == 0x26 && rdesc[60] == 0xf9 && rdesc[61] == 0x00) { hid_info(hdev, "fixing up Petalynx Maxter Remote report descriptor\n"); rdesc[60] = 0xfa; rdesc[40] = 0xfa; } return rdesc; }
CWE-119
26
static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url, AVDictionary *opts, AVDictionary *opts2, int *is_http) { HLSContext *c = s->priv_data; AVDictionary *tmp = NULL; const char *proto_name = NULL; int ret; av_dict_copy(&tmp, opts, 0); av_dict_copy(&tmp, opts2, 0); if (av_strstart(url, "crypto", NULL)) { if (url[6] == '+' || url[6] == ':') proto_name = avio_find_protocol_name(url + 7); } if (!proto_name) proto_name = avio_find_protocol_name(url); if (!proto_name) return AVERROR_INVALIDDATA; // only http(s) & file are allowed if (!av_strstart(proto_name, "http", NULL) && !av_strstart(proto_name, "file", NULL)) return AVERROR_INVALIDDATA; if (!strncmp(proto_name, url, strlen(proto_name)) && url[strlen(proto_name)] == ':') ; else if (av_strstart(url, "crypto", NULL) && !strncmp(proto_name, url + 7, strlen(proto_name)) && url[7 + strlen(proto_name)] == ':') ; else if (strcmp(proto_name, "file") || !strncmp(url, "file,", 5)) return AVERROR_INVALIDDATA; ret = s->io_open(s, pb, url, AVIO_FLAG_READ, &tmp); if (ret >= 0) { // update cookies on http response with setcookies. char *new_cookies = NULL; if (!(s->flags & AVFMT_FLAG_CUSTOM_IO)) av_opt_get(*pb, "cookies", AV_OPT_SEARCH_CHILDREN, (uint8_t**)&new_cookies); if (new_cookies) { av_free(c->cookies); c->cookies = new_cookies; } av_dict_set(&opts, "cookies", c->cookies, 0); } av_dict_free(&tmp); if (is_http) *is_http = av_strstart(proto_name, "http", NULL); return ret; }
CWE-200
10
uint16_t enc624j600ReadPhyReg(NetInterface *interface, uint8_t address) { //Write the address of the PHY register to read from enc624j600WriteReg(interface, ENC624J600_REG_MIREGADR, MIREGADR_R8 | address); //Start read operation enc624j600WriteReg(interface, ENC624J600_REG_MICMD, MICMD_MIIRD); //Wait at least 25.6us before polling the BUSY bit usleep(100); //Wait for the read operation to complete while((enc624j600ReadReg(interface, ENC624J600_REG_MISTAT) & MISTAT_BUSY) != 0) { } //Clear command register enc624j600WriteReg(interface, ENC624J600_REG_MICMD, 0x00); //Return register contents return enc624j600ReadReg(interface, ENC624J600_REG_MIRD); }
CWE-20
0
static BOOL rdp_read_font_capability_set(wStream* s, UINT16 length, rdpSettings* settings) { WINPR_UNUSED(settings); if (length > 4) Stream_Seek_UINT16(s); /* fontSupportFlags (2 bytes) */ if (length > 6) Stream_Seek_UINT16(s); /* pad2Octets (2 bytes) */ return TRUE; }
CWE-119
26
int rtp_packetize_xiph_config( sout_stream_id_sys_t *id, const char *fmtp, int64_t i_pts ) { if (fmtp == NULL) return VLC_EGENERIC; /* extract base64 configuration from fmtp */ char *start = strstr(fmtp, "configuration="); assert(start != NULL); start += sizeof("configuration=") - 1; char *end = strchr(start, ';'); assert(end != NULL); size_t len = end - start; char b64[len + 1]; memcpy(b64, start, len); b64[len] = '\0'; int i_max = rtp_mtu (id) - 6; /* payload max in one packet */ uint8_t *p_orig, *p_data; int i_data; i_data = vlc_b64_decode_binary(&p_orig, b64); if (i_data <= 9) { free(p_orig); return VLC_EGENERIC; } p_data = p_orig + 9; i_data -= 9; int i_count = ( i_data + i_max - 1 ) / i_max; for( int i = 0; i < i_count; i++ ) { int i_payload = __MIN( i_max, i_data ); block_t *out = block_Alloc( 18 + i_payload ); unsigned fragtype, numpkts; if (i_count == 1) { fragtype = 0; numpkts = 1; } else { numpkts = 0; if (i == 0) fragtype = 1; else if (i == i_count - 1) fragtype = 3; else fragtype = 2; } /* Ident:24, Fragment type:2, Vorbis/Theora Data Type:2, # of pkts:4 */ uint32_t header = ((XIPH_IDENT & 0xffffff) << 8) | (fragtype << 6) | (1 << 4) | numpkts; /* rtp common header */ rtp_packetize_common( id, out, 0, i_pts ); SetDWBE( out->p_buffer + 12, header); SetWBE( out->p_buffer + 16, i_payload); memcpy( &out->p_buffer[18], p_data, i_payload ); out->i_dts = i_pts; rtp_packetize_send( id, out ); p_data += i_payload; i_data -= i_payload; } free(p_orig); return VLC_SUCCESS; }
CWE-119
26
static int vmci_transport_dgram_dequeue(struct kiocb *kiocb, struct vsock_sock *vsk, struct msghdr *msg, size_t len, int flags) { int err; int noblock; struct vmci_datagram *dg; size_t payload_len; struct sk_buff *skb; noblock = flags & MSG_DONTWAIT; if (flags & MSG_OOB || flags & MSG_ERRQUEUE) return -EOPNOTSUPP; msg->msg_namelen = 0; /* Retrieve the head sk_buff from the socket's receive queue. */ err = 0; skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); if (err) return err; if (!skb) return -EAGAIN; dg = (struct vmci_datagram *)skb->data; if (!dg) /* err is 0, meaning we read zero bytes. */ goto out; payload_len = dg->payload_size; /* Ensure the sk_buff matches the payload size claimed in the packet. */ if (payload_len != skb->len - sizeof(*dg)) { err = -EINVAL; goto out; } if (payload_len > len) { payload_len = len; msg->msg_flags |= MSG_TRUNC; } /* Place the datagram payload in the user's iovec. */ err = skb_copy_datagram_iovec(skb, sizeof(*dg), msg->msg_iov, payload_len); if (err) goto out; if (msg->msg_name) { struct sockaddr_vm *vm_addr; /* Provide the address of the sender. */ vm_addr = (struct sockaddr_vm *)msg->msg_name; vsock_addr_init(vm_addr, dg->src.context, dg->src.resource); msg->msg_namelen = sizeof(*vm_addr); } err = payload_len; out: skb_free_datagram(&vsk->sk, skb); return err; }
CWE-20
0
horizontalDifference8(unsigned char *ip, int n, int stride, unsigned short *wp, uint16 *From8) { register int r1, g1, b1, a1, r2, g2, b2, a2, mask; #undef CLAMP #define CLAMP(v) (From8[(v)]) mask = CODE_MASK; if (n >= stride) { if (stride == 3) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); n -= 3; while (n > 0) { n -= 3; r1 = CLAMP(ip[3]); wp[3] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[4]); wp[4] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[5]); wp[5] = (uint16)((b1-b2) & mask); b2 = b1; wp += 3; ip += 3; } } else if (stride == 4) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); a2 = wp[3] = CLAMP(ip[3]); n -= 4; while (n > 0) { n -= 4; r1 = CLAMP(ip[4]); wp[4] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[5]); wp[5] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[6]); wp[6] = (uint16)((b1-b2) & mask); b2 = b1; a1 = CLAMP(ip[7]); wp[7] = (uint16)((a1-a2) & mask); a2 = a1; wp += 4; ip += 4; } } else { wp += n + stride - 1; /* point to last one */ ip += n + stride - 1; /* point to last one */ n -= stride; while (n > 0) { REPEAT(stride, wp[0] = CLAMP(ip[0]); wp[stride] -= wp[0]; wp[stride] &= mask; wp--; ip--) n -= stride; } REPEAT(stride, wp[0] = CLAMP(ip[0]); wp--; ip--) } } }
CWE-119
26
static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddrlen, int peer) { struct sockaddr_llc sllc; struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); int rc = 0; memset(&sllc, 0, sizeof(sllc)); lock_sock(sk); if (sock_flag(sk, SOCK_ZAPPED)) goto out; *uaddrlen = sizeof(sllc); memset(uaddr, 0, *uaddrlen); if (peer) { rc = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) goto out; if(llc->dev) sllc.sllc_arphrd = llc->dev->type; sllc.sllc_sap = llc->daddr.lsap; memcpy(&sllc.sllc_mac, &llc->daddr.mac, IFHWADDRLEN); } else { rc = -EINVAL; if (!llc->sap) goto out; sllc.sllc_sap = llc->sap->laddr.lsap; if (llc->dev) { sllc.sllc_arphrd = llc->dev->type; memcpy(&sllc.sllc_mac, llc->dev->dev_addr, IFHWADDRLEN); } } rc = 0; sllc.sllc_family = AF_LLC; memcpy(uaddr, &sllc, sizeof(sllc)); out: release_sock(sk); return rc; }
CWE-200
10
void __detach_mounts(struct dentry *dentry) { struct mountpoint *mp; struct mount *mnt; namespace_lock(); mp = lookup_mountpoint(dentry); if (IS_ERR_OR_NULL(mp)) goto out_unlock; lock_mount_hash(); while (!hlist_empty(&mp->m_list)) { mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); if (mnt->mnt.mnt_flags & MNT_UMOUNT) { struct mount *p, *tmp; list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { hlist_add_head(&p->mnt_umount.s_list, &unmounted); umount_mnt(p); } } else umount_tree(mnt, 0); } unlock_mount_hash(); put_mountpoint(mp); out_unlock: namespace_unlock(); }
CWE-200
10
static inline int xsave_state_booting(struct xsave_struct *fx, u64 mask) { u32 lmask = mask; u32 hmask = mask >> 32; int err = 0; WARN_ON(system_state != SYSTEM_BOOTING); if (boot_cpu_has(X86_FEATURE_XSAVES)) asm volatile("1:"XSAVES"\n\t" "2:\n\t" : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) : "memory"); else asm volatile("1:"XSAVE"\n\t" "2:\n\t" : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) : "memory"); asm volatile(xstate_fault : "0" (0) : "memory"); return err; }
CWE-20
0
flac_read_loop (SF_PRIVATE *psf, unsigned len) { FLAC_PRIVATE* pflac = (FLAC_PRIVATE*) psf->codec_data ; pflac->pos = 0 ; pflac->len = len ; pflac->remain = len ; /* First copy data that has already been decoded and buffered. */ if (pflac->frame != NULL && pflac->bufferpos < pflac->frame->header.blocksize) flac_buffer_copy (psf) ; /* Decode some more. */ while (pflac->pos < pflac->len) { if (FLAC__stream_decoder_process_single (pflac->fsd) == 0) break ; if (FLAC__stream_decoder_get_state (pflac->fsd) >= FLAC__STREAM_DECODER_END_OF_STREAM) break ; } ; pflac->ptr = NULL ; return pflac->pos ; } /* flac_read_loop */
CWE-119
26
static void process_blob(struct rev_info *revs, struct blob *blob, show_object_fn show, struct strbuf *path, const char *name, void *cb_data) { struct object *obj = &blob->object; if (!revs->blob_objects) return; if (!obj) die("bad blob object"); if (obj->flags & (UNINTERESTING | SEEN)) return; obj->flags |= SEEN; show(obj, path, name, cb_data); }
CWE-119
26
static int vmci_transport_dgram_dequeue(struct kiocb *kiocb, struct vsock_sock *vsk, struct msghdr *msg, size_t len, int flags) { int err; int noblock; struct vmci_datagram *dg; size_t payload_len; struct sk_buff *skb; noblock = flags & MSG_DONTWAIT; if (flags & MSG_OOB || flags & MSG_ERRQUEUE) return -EOPNOTSUPP; msg->msg_namelen = 0; /* Retrieve the head sk_buff from the socket's receive queue. */ err = 0; skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); if (err) return err; if (!skb) return -EAGAIN; dg = (struct vmci_datagram *)skb->data; if (!dg) /* err is 0, meaning we read zero bytes. */ goto out; payload_len = dg->payload_size; /* Ensure the sk_buff matches the payload size claimed in the packet. */ if (payload_len != skb->len - sizeof(*dg)) { err = -EINVAL; goto out; } if (payload_len > len) { payload_len = len; msg->msg_flags |= MSG_TRUNC; } /* Place the datagram payload in the user's iovec. */ err = skb_copy_datagram_iovec(skb, sizeof(*dg), msg->msg_iov, payload_len); if (err) goto out; if (msg->msg_name) { struct sockaddr_vm *vm_addr; /* Provide the address of the sender. */ vm_addr = (struct sockaddr_vm *)msg->msg_name; vsock_addr_init(vm_addr, dg->src.context, dg->src.resource); msg->msg_namelen = sizeof(*vm_addr); } err = payload_len; out: skb_free_datagram(&vsk->sk, skb); return err; }
CWE-20
0
void cipso_v4_req_delattr(struct request_sock *req) { struct ip_options *opt; struct inet_request_sock *req_inet; req_inet = inet_rsk(req); opt = req_inet->opt; if (opt == NULL || opt->cipso == 0) return; cipso_v4_delopt(&req_inet->opt); }
CWE-362
18
static inline int xsave_state(struct xsave_struct *fx, u64 mask) { u32 lmask = mask; u32 hmask = mask >> 32; int err = 0; /* * If xsaves is enabled, xsaves replaces xsaveopt because * it supports compact format and supervisor states in addition to * modified optimization in xsaveopt. * * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave * because xsaveopt supports modified optimization which is not * supported by xsave. * * If none of xsaves and xsaveopt is enabled, use xsave. */ alternative_input_2( "1:"XSAVE, "1:"XSAVEOPT, X86_FEATURE_XSAVEOPT, "1:"XSAVES, X86_FEATURE_XSAVES, [fx] "D" (fx), "a" (lmask), "d" (hmask) : "memory"); asm volatile("2:\n\t" xstate_fault : "0" (0) : "memory"); return err; }
CWE-20
0
static void mark_commit(struct commit *c, void *data) { mark_object(&c->object, NULL, NULL, data); }
CWE-119
26