code
stringlengths 23
2.05k
| label_name
stringlengths 6
7
| label
int64 0
37
|
---|---|---|
int sc_file_set_sec_attr(sc_file_t *file, const u8 *sec_attr,
size_t sec_attr_len)
{
u8 *tmp;
if (!sc_file_valid(file)) {
return SC_ERROR_INVALID_ARGUMENTS;
}
if (sec_attr == NULL) {
if (file->sec_attr != NULL)
free(file->sec_attr);
file->sec_attr = NULL;
file->sec_attr_len = 0;
return 0;
}
tmp = (u8 *) realloc(file->sec_attr, sec_attr_len);
if (!tmp) {
if (file->sec_attr)
free(file->sec_attr);
file->sec_attr = NULL;
file->sec_attr_len = 0;
return SC_ERROR_OUT_OF_MEMORY;
}
file->sec_attr = tmp;
memcpy(file->sec_attr, sec_attr, sec_attr_len);
file->sec_attr_len = sec_attr_len;
return 0;
} | CWE-119 | 26 |
queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
{
spin_unlock(&hb->lock);
drop_futex_key_refs(&q->key);
} | CWE-119 | 26 |
get_matching_model_microcode(int cpu, unsigned long start,
void *data, size_t size,
struct mc_saved_data *mc_saved_data,
unsigned long *mc_saved_in_initrd,
struct ucode_cpu_info *uci)
{
u8 *ucode_ptr = data;
unsigned int leftover = size;
enum ucode_state state = UCODE_OK;
unsigned int mc_size;
struct microcode_header_intel *mc_header;
struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
int i;
while (leftover) {
mc_header = (struct microcode_header_intel *)ucode_ptr;
mc_size = get_totalsize(mc_header);
if (!mc_size || mc_size > leftover ||
microcode_sanity_check(ucode_ptr, 0) < 0)
break;
leftover -= mc_size;
/*
* Since APs with same family and model as the BSP may boot in
* the platform, we need to find and save microcode patches
* with the same family and model as the BSP.
*/
if (matching_model_microcode(mc_header, uci->cpu_sig.sig) !=
UCODE_OK) {
ucode_ptr += mc_size;
continue;
}
_save_mc(mc_saved_tmp, ucode_ptr, &mc_saved_count);
ucode_ptr += mc_size;
}
if (leftover) {
state = UCODE_ERROR;
goto out;
}
if (mc_saved_count == 0) {
state = UCODE_NFOUND;
goto out;
}
for (i = 0; i < mc_saved_count; i++)
mc_saved_in_initrd[i] = (unsigned long)mc_saved_tmp[i] - start;
mc_saved_data->mc_saved_count = mc_saved_count;
out:
return state;
} | CWE-119 | 26 |
void usage(const char *progname)
{
const char* progname_real; /* contains the real name of the program */
/* (without path) */
progname_real = strrchr(progname, '/');
if (progname_real == NULL) /* no path in progname: use progname */
{
progname_real = progname;
}
else
{
progname_real++;
}
fprintf(stderr, "\nusage: %s {-e|-d} [ { -p <password> | -k <keyfile> } ] { [-o <output filename>] <file> | <file> [<file> ...] }\n\n",
progname_real);
} | CWE-287 | 4 |
static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int copied;
int err = 0;
lock_sock(sk);
/*
* This works for seqpacket too. The receiver has ordered the
* queue for us! We do one quick check first though
*/
if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_ESTABLISHED) {
err = -ENOTCONN;
goto out;
}
/* Now we can treat all alike */
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto out;
if (!ax25_sk(sk)->pidincl)
skb_pull(skb, 1); /* Remove PID */
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (msg->msg_namelen != 0) {
struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name;
ax25_digi digi;
ax25_address src;
const unsigned char *mac = skb_mac_header(skb);
memset(sax, 0, sizeof(struct full_sockaddr_ax25));
ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
&digi, NULL, NULL);
sax->sax25_family = AF_AX25;
/* We set this correctly, even though we may not let the
application know the digi calls further down (because it
did NOT ask to know them). This could get political... **/
sax->sax25_ndigis = digi.ndigi;
sax->sax25_call = src;
if (sax->sax25_ndigis != 0) {
int ct;
struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)sax;
for (ct = 0; ct < digi.ndigi; ct++)
fsa->fsa_digipeater[ct] = digi.calls[ct];
}
msg->msg_namelen = sizeof(struct full_sockaddr_ax25);
}
skb_free_datagram(sk, skb);
err = copied;
out:
release_sock(sk);
return err;
} | CWE-20 | 0 |
static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len,
int flags)
{
int err;
struct sk_buff *skb;
struct sock *sk = sock->sk;
err = -EIO;
if (sk->sk_state & PPPOX_BOUND)
goto end;
msg->msg_namelen = 0;
err = 0;
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
if (!skb)
goto end;
if (len > skb->len)
len = skb->len;
else if (len < skb->len)
msg->msg_flags |= MSG_TRUNC;
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
if (likely(err == 0))
err = len;
kfree_skb(skb);
end:
return err;
} | CWE-20 | 0 |
char *path_name(struct strbuf *path, const char *name)
{
struct strbuf ret = STRBUF_INIT;
if (path)
strbuf_addbuf(&ret, path);
strbuf_addstr(&ret, name);
return strbuf_detach(&ret, NULL);
} | CWE-119 | 26 |
static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
{
struct ipcm_cookie ipc;
struct rtable *rt = skb_rtable(skb);
struct net *net = dev_net(rt->dst.dev);
struct sock *sk;
struct inet_sock *inet;
__be32 daddr;
if (ip_options_echo(&icmp_param->replyopts, skb))
return;
sk = icmp_xmit_lock(net);
if (sk == NULL)
return;
inet = inet_sk(sk);
icmp_param->data.icmph.checksum = 0;
inet->tos = ip_hdr(skb)->tos;
daddr = ipc.addr = rt->rt_src;
ipc.opt = NULL;
ipc.tx_flags = 0;
if (icmp_param->replyopts.optlen) {
ipc.opt = &icmp_param->replyopts;
if (ipc.opt->srr)
daddr = icmp_param->replyopts.faddr;
}
{
struct flowi4 fl4 = {
.daddr = daddr,
.saddr = rt->rt_spec_dst,
.flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
.flowi4_proto = IPPROTO_ICMP,
};
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
goto out_unlock;
}
if (icmpv4_xrlim_allow(net, rt, icmp_param->data.icmph.type,
icmp_param->data.icmph.code))
icmp_push_reply(icmp_param, &ipc, &rt);
ip_rt_put(rt);
out_unlock:
icmp_xmit_unlock(sk);
} | CWE-362 | 18 |
int read_filesystem_tables_4()
{
long long directory_table_end, table_start;
if(read_xattrs_from_disk(fd, &sBlk.s, no_xattrs, &table_start) == 0)
return FALSE;
if(read_uids_guids(&table_start) == FALSE)
return FALSE;
if(parse_exports_table(&table_start) == FALSE)
return FALSE;
if(read_fragment_table(&directory_table_end) == FALSE)
return FALSE;
if(read_inode_table(sBlk.s.inode_table_start,
sBlk.s.directory_table_start) == FALSE)
return FALSE;
if(read_directory_table(sBlk.s.directory_table_start,
directory_table_end) == FALSE)
return FALSE;
if(no_xattrs)
sBlk.s.xattr_id_table_start = SQUASHFS_INVALID_BLK;
return TRUE;
} | CWE-20 | 0 |
static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
struct msghdr *msg, size_t ignored, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(
&ctx->req));
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
unsigned long iovlen;
struct iovec *iov;
int err = -EAGAIN;
int used;
long copied = 0;
lock_sock(sk);
msg->msg_namelen = 0;
for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
iovlen--, iov++) {
unsigned long seglen = iov->iov_len;
char __user *from = iov->iov_base;
while (seglen) {
sgl = list_first_entry(&ctx->tsgl,
struct skcipher_sg_list, list);
sg = sgl->sg;
while (!sg->length)
sg++;
used = ctx->used;
if (!used) {
err = skcipher_wait_for_data(sk, flags);
if (err)
goto unlock;
}
used = min_t(unsigned long, used, seglen);
used = af_alg_make_sg(&ctx->rsgl, from, used, 1);
err = used;
if (err < 0)
goto unlock;
if (ctx->more || used < ctx->used)
used -= used % bs;
err = -EINVAL;
if (!used)
goto free;
ablkcipher_request_set_crypt(&ctx->req, sg,
ctx->rsgl.sg, used,
ctx->iv);
err = af_alg_wait_for_completion(
ctx->enc ?
crypto_ablkcipher_encrypt(&ctx->req) :
crypto_ablkcipher_decrypt(&ctx->req),
&ctx->completion);
free:
af_alg_free_sg(&ctx->rsgl);
if (err)
goto unlock;
copied += used;
from += used;
seglen -= used;
skcipher_pull_sgl(sk, used);
}
}
err = 0;
unlock:
skcipher_wmem_wakeup(sk);
release_sock(sk);
return copied ?: err;
} | CWE-20 | 0 |
static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
__be16 proto)
{
struct packet_sock *po = pkt_sk(sk);
struct net_device *dev_curr;
__be16 proto_curr;
bool need_rehook;
struct net_device *dev = NULL;
int ret = 0;
bool unlisted = false;
if (po->fanout)
return -EINVAL;
lock_sock(sk);
spin_lock(&po->bind_lock);
rcu_read_lock();
if (name) {
dev = dev_get_by_name_rcu(sock_net(sk), name);
if (!dev) {
ret = -ENODEV;
goto out_unlock;
}
} else if (ifindex) {
dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
if (!dev) {
ret = -ENODEV;
goto out_unlock;
}
}
if (dev)
dev_hold(dev);
proto_curr = po->prot_hook.type;
dev_curr = po->prot_hook.dev;
need_rehook = proto_curr != proto || dev_curr != dev;
if (need_rehook) {
if (po->running) {
rcu_read_unlock();
__unregister_prot_hook(sk, true);
rcu_read_lock();
dev_curr = po->prot_hook.dev;
if (dev)
unlisted = !dev_get_by_index_rcu(sock_net(sk),
dev->ifindex);
}
po->num = proto;
po->prot_hook.type = proto;
if (unlikely(unlisted)) {
dev_put(dev);
po->prot_hook.dev = NULL;
po->ifindex = -1;
packet_cached_dev_reset(po);
} else {
po->prot_hook.dev = dev;
po->ifindex = dev ? dev->ifindex : 0;
packet_cached_dev_assign(po, dev);
}
}
if (dev_curr)
dev_put(dev_curr);
if (proto == 0 || !need_rehook)
goto out_unlock;
if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
register_prot_hook(sk);
} else {
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_error_report(sk);
}
out_unlock:
rcu_read_unlock();
spin_unlock(&po->bind_lock);
release_sock(sk);
return ret;
} | CWE-362 | 18 |
static int readContigStripsIntoBuffer (TIFF* in, uint8* buf)
{
uint8* bufp = buf;
int32 bytes_read = 0;
uint32 strip, nstrips = TIFFNumberOfStrips(in);
uint32 stripsize = TIFFStripSize(in);
uint32 rows = 0;
uint32 rps = TIFFGetFieldDefaulted(in, TIFFTAG_ROWSPERSTRIP, &rps);
tsize_t scanline_size = TIFFScanlineSize(in);
if (scanline_size == 0) {
TIFFError("", "TIFF scanline size is zero!");
return 0;
}
for (strip = 0; strip < nstrips; strip++) {
bytes_read = TIFFReadEncodedStrip (in, strip, bufp, -1);
rows = bytes_read / scanline_size;
if ((strip < (nstrips - 1)) && (bytes_read != (int32)stripsize))
TIFFError("", "Strip %d: read %lu bytes, strip size %lu",
(int)strip + 1, (unsigned long) bytes_read,
(unsigned long)stripsize);
if (bytes_read < 0 && !ignore) {
TIFFError("", "Error reading strip %lu after %lu rows",
(unsigned long) strip, (unsigned long)rows);
return 0;
}
bufp += bytes_read;
}
return 1;
} /* end readContigStripsIntoBuffer */ | CWE-119 | 26 |
void color_cmyk_to_rgb(opj_image_t *image)
{
float C, M, Y, K;
float sC, sM, sY, sK;
unsigned int w, h, max, i;
w = image->comps[0].w;
h = image->comps[0].h;
if(image->numcomps < 4) return;
max = w * h;
sC = 1.0F / (float)((1 << image->comps[0].prec) - 1);
sM = 1.0F / (float)((1 << image->comps[1].prec) - 1);
sY = 1.0F / (float)((1 << image->comps[2].prec) - 1);
sK = 1.0F / (float)((1 << image->comps[3].prec) - 1);
for(i = 0; i < max; ++i)
{
/* CMYK values from 0 to 1 */
C = (float)(image->comps[0].data[i]) * sC;
M = (float)(image->comps[1].data[i]) * sM;
Y = (float)(image->comps[2].data[i]) * sY;
K = (float)(image->comps[3].data[i]) * sK;
/* Invert all CMYK values */
C = 1.0F - C;
M = 1.0F - M;
Y = 1.0F - Y;
K = 1.0F - K;
/* CMYK -> RGB : RGB results from 0 to 255 */
image->comps[0].data[i] = (int)(255.0F * C * K); /* R */
image->comps[1].data[i] = (int)(255.0F * M * K); /* G */
image->comps[2].data[i] = (int)(255.0F * Y * K); /* B */
}
free(image->comps[3].data); image->comps[3].data = NULL;
image->comps[0].prec = 8;
image->comps[1].prec = 8;
image->comps[2].prec = 8;
image->numcomps -= 1;
image->color_space = OPJ_CLRSPC_SRGB;
for (i = 3; i < image->numcomps; ++i) {
memcpy(&(image->comps[i]), &(image->comps[i+1]), sizeof(image->comps[i]));
}
}/* color_cmyk_to_rgb() */ | CWE-119 | 26 |
static void record_and_restart(struct perf_event *event, unsigned long val,
struct pt_regs *regs, int nmi)
{
u64 period = event->hw.sample_period;
s64 prev, delta, left;
int record = 0;
if (event->hw.state & PERF_HES_STOPPED) {
write_pmc(event->hw.idx, 0);
return;
}
/* we don't have to worry about interrupts here */
prev = local64_read(&event->hw.prev_count);
delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
/*
* See if the total period for this event has expired,
* and update for the next period.
*/
val = 0;
left = local64_read(&event->hw.period_left) - delta;
if (period) {
if (left <= 0) {
left += period;
if (left <= 0)
left = period;
record = 1;
event->hw.last_period = event->hw.sample_period;
}
if (left < 0x80000000LL)
val = 0x80000000LL - left;
}
write_pmc(event->hw.idx, val);
local64_set(&event->hw.prev_count, val);
local64_set(&event->hw.period_left, left);
perf_event_update_userpage(event);
/*
* Finally record data if requested.
*/
if (record) {
struct perf_sample_data data;
perf_sample_data_init(&data, 0);
data.period = event->hw.last_period;
if (perf_event_overflow(event, nmi, &data, regs))
fsl_emb_pmu_stop(event, 0);
}
} | CWE-400 | 2 |
void addReply(redisClient *c, robj *obj) {
if (_installWriteEvent(c) != REDIS_OK) return;
redisAssert(!server.vm_enabled || obj->storage == REDIS_VM_MEMORY);
/* This is an important place where we can avoid copy-on-write
* when there is a saving child running, avoiding touching the
* refcount field of the object if it's not needed.
*
* If the encoding is RAW and there is room in the static buffer
* we'll be able to send the object to the client without
* messing with its page. */
if (obj->encoding == REDIS_ENCODING_RAW) {
if (_addReplyToBuffer(c,obj->ptr,sdslen(obj->ptr)) != REDIS_OK)
_addReplyObjectToList(c,obj);
} else {
/* FIXME: convert the long into string and use _addReplyToBuffer()
* instead of calling getDecodedObject. As this place in the
* code is too performance critical. */
obj = getDecodedObject(obj);
if (_addReplyToBuffer(c,obj->ptr,sdslen(obj->ptr)) != REDIS_OK)
_addReplyObjectToList(c,obj);
decrRefCount(obj);
}
} | CWE-20 | 0 |
static void show_object(struct object *object, struct strbuf *path,
const char *last, void *data)
{
struct bitmap *base = data;
bitmap_set(base, find_object_pos(object->oid.hash));
mark_as_seen(object);
} | CWE-119 | 26 |
static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *pdev = to_platform_device(dev);
return sprintf(buf, "%s\n", pdev->driver_override);
} | CWE-362 | 18 |
static void ikev2_parent_inI2outR2_continue(struct pluto_crypto_req_cont *pcrc,
struct pluto_crypto_req *r,
err_t ugh)
{
struct dh_continuation *dh = (struct dh_continuation *)pcrc;
struct msg_digest *md = dh->md;
struct state *const st = md->st;
stf_status e;
DBG(DBG_CONTROLMORE,
DBG_log("ikev2 parent inI2outR2: calculating g^{xy}, sending R2"));
if (st == NULL) {
loglog(RC_LOG_SERIOUS,
"%s: Request was disconnected from state",
__FUNCTION__);
if (dh->md)
release_md(dh->md);
return;
}
/* XXX should check out ugh */
passert(ugh == NULL);
passert(cur_state == NULL);
passert(st != NULL);
passert(st->st_suspended_md == dh->md);
set_suspended(st, NULL); /* no longer connected or suspended */
set_cur_state(st);
st->st_calculating = FALSE;
e = ikev2_parent_inI2outR2_tail(pcrc, r);
if ( e > STF_FAIL) {
/* we do not send a notify because we are the initiator that could be responding to an error notification */
int v2_notify_num = e - STF_FAIL;
DBG_log(
"ikev2_parent_inI2outR2_tail returned STF_FAIL with %s",
enum_name(&ikev2_notify_names, v2_notify_num));
} else if ( e != STF_OK) {
DBG_log("ikev2_parent_inI2outR2_tail returned %s",
enum_name(&stfstatus_name, e));
}
if (dh->md != NULL) {
complete_v2_state_transition(&dh->md, e);
if (dh->md)
release_md(dh->md);
}
reset_globals();
passert(GLOBALS_ARE_RESET());
} | CWE-20 | 0 |
swabHorAcc32(TIFF* tif, uint8* cp0, tmsize_t cc)
{
uint32* wp = (uint32*) cp0;
tmsize_t wc = cc / 4;
TIFFSwabArrayOfLong(wp, wc);
horAcc32(tif, cp0, cc);
} | CWE-119 | 26 |
static __exit void sctp_exit(void)
{
/* BUG. This should probably do something useful like clean
* up all the remaining associations and all that memory.
*/
/* Unregister with inet6/inet layers. */
sctp_v6_del_protocol();
sctp_v4_del_protocol();
unregister_pernet_subsys(&sctp_net_ops);
/* Free protosw registrations */
sctp_v6_protosw_exit();
sctp_v4_protosw_exit();
/* Unregister with socket layer. */
sctp_v6_pf_exit();
sctp_v4_pf_exit();
sctp_sysctl_unregister();
free_pages((unsigned long)sctp_assoc_hashtable,
get_order(sctp_assoc_hashsize *
sizeof(struct sctp_hashbucket)));
kfree(sctp_ep_hashtable);
free_pages((unsigned long)sctp_port_hashtable,
get_order(sctp_port_hashsize *
sizeof(struct sctp_bind_hashbucket)));
percpu_counter_destroy(&sctp_sockets_allocated);
rcu_barrier(); /* Wait for completion of call_rcu()'s */
kmem_cache_destroy(sctp_chunk_cachep);
kmem_cache_destroy(sctp_bucket_cachep);
} | CWE-119 | 26 |
int jpg_validate(jas_stream_t *in)
{
uchar buf[JPG_MAGICLEN];
int i;
int n;
assert(JAS_STREAM_MAXPUTBACK >= JPG_MAGICLEN);
/* Read the validation data (i.e., the data used for detecting
the format). */
if ((n = jas_stream_read(in, buf, JPG_MAGICLEN)) < 0) {
return -1;
}
/* Put the validation data back onto the stream, so that the
stream position will not be changed. */
for (i = n - 1; i >= 0; --i) {
if (jas_stream_ungetc(in, buf[i]) == EOF) {
return -1;
}
}
/* Did we read enough data? */
if (n < JPG_MAGICLEN) {
return -1;
}
/* Does this look like JPEG? */
if (buf[0] != (JPG_MAGIC >> 8) || buf[1] != (JPG_MAGIC & 0xff)) {
return -1;
}
return 0;
} | CWE-20 | 0 |
static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct ipx_sock *ipxs = ipx_sk(sk);
struct sockaddr_ipx *sipx = (struct sockaddr_ipx *)msg->msg_name;
struct ipxhdr *ipx = NULL;
struct sk_buff *skb;
int copied, rc;
lock_sock(sk);
/* put the autobinding in */
if (!ipxs->port) {
struct sockaddr_ipx uaddr;
uaddr.sipx_port = 0;
uaddr.sipx_network = 0;
#ifdef CONFIG_IPX_INTERN
rc = -ENETDOWN;
if (!ipxs->intrfc)
goto out; /* Someone zonked the iface */
memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN);
#endif /* CONFIG_IPX_INTERN */
rc = __ipx_bind(sock, (struct sockaddr *)&uaddr,
sizeof(struct sockaddr_ipx));
if (rc)
goto out;
}
rc = -ENOTCONN;
if (sock_flag(sk, SOCK_ZAPPED))
goto out;
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &rc);
if (!skb)
goto out;
ipx = ipx_hdr(skb);
copied = ntohs(ipx->ipx_pktsize) - sizeof(struct ipxhdr);
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
rc = skb_copy_datagram_iovec(skb, sizeof(struct ipxhdr), msg->msg_iov,
copied);
if (rc)
goto out_free;
if (skb->tstamp.tv64)
sk->sk_stamp = skb->tstamp;
msg->msg_namelen = sizeof(*sipx);
if (sipx) {
sipx->sipx_family = AF_IPX;
sipx->sipx_port = ipx->ipx_source.sock;
memcpy(sipx->sipx_node, ipx->ipx_source.node, IPX_NODE_LEN);
sipx->sipx_network = IPX_SKB_CB(skb)->ipx_source_net;
sipx->sipx_type = ipx->ipx_type;
sipx->sipx_zero = 0;
}
rc = copied;
out_free:
skb_free_datagram(sk, skb);
out:
release_sock(sk);
return rc;
} | CWE-20 | 0 |
static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
{
unsigned row;
unsigned col;
if (evtchn >= xen_evtchn_max_channels())
return -EINVAL;
row = EVTCHN_ROW(evtchn);
col = EVTCHN_COL(evtchn);
if (evtchn_to_irq[row] == NULL) {
/* Unallocated irq entries return -1 anyway */
if (irq == -1)
return 0;
evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL);
if (evtchn_to_irq[row] == NULL)
return -ENOMEM;
clear_evtchn_to_irq_row(row);
}
evtchn_to_irq[row][col] = irq;
return 0;
} | CWE-362 | 18 |
int MSG_ReadBits( msg_t *msg, int bits ) {
int value;
int get;
qboolean sgn;
int i, nbits;
// FILE* fp;
value = 0;
if ( bits < 0 ) {
bits = -bits;
sgn = qtrue;
} else {
sgn = qfalse;
}
if (msg->oob) {
if(bits==8)
{
value = msg->data[msg->readcount];
msg->readcount += 1;
msg->bit += 8;
}
else if(bits==16)
{
short temp;
CopyLittleShort(&temp, &msg->data[msg->readcount]);
value = temp;
msg->readcount += 2;
msg->bit += 16;
}
else if(bits==32)
{
CopyLittleLong(&value, &msg->data[msg->readcount]);
msg->readcount += 4;
msg->bit += 32;
}
else
Com_Error(ERR_DROP, "can't read %d bits", bits);
} else {
nbits = 0;
if (bits&7) {
nbits = bits&7;
for(i=0;i<nbits;i++) {
value |= (Huff_getBit(msg->data, &msg->bit)<<i);
}
bits = bits - nbits;
}
if (bits) {
// fp = fopen("c:\\netchan.bin", "a");
for(i=0;i<bits;i+=8) {
Huff_offsetReceive (msgHuff.decompressor.tree, &get, msg->data, &msg->bit);
// fwrite(&get, 1, 1, fp);
value |= (get<<(i+nbits));
}
// fclose(fp);
}
msg->readcount = (msg->bit>>3)+1;
}
if ( sgn && bits > 0 && bits < 32 ) {
if ( value & ( 1 << ( bits - 1 ) ) ) {
value |= -1 ^ ( ( 1 << bits ) - 1 );
}
}
return value;
} | CWE-119 | 26 |
MOBI_RET mobi_parse_huff(MOBIHuffCdic *huffcdic, const MOBIPdbRecord *record) {
MOBIBuffer *buf = mobi_buffer_init_null(record->data, record->size);
if (buf == NULL) {
debug_print("%s\n", "Memory allocation failed");
return MOBI_MALLOC_FAILED;
}
char huff_magic[5];
mobi_buffer_getstring(huff_magic, buf, 4);
const size_t header_length = mobi_buffer_get32(buf);
if (strncmp(huff_magic, HUFF_MAGIC, 4) != 0 || header_length < HUFF_HEADER_LEN) {
debug_print("HUFF wrong magic: %s\n", huff_magic);
mobi_buffer_free_null(buf);
return MOBI_DATA_CORRUPT;
}
const size_t data1_offset = mobi_buffer_get32(buf);
const size_t data2_offset = mobi_buffer_get32(buf);
/* skip little-endian table offsets */
mobi_buffer_setpos(buf, data1_offset);
if (buf->offset + (256 * 4) > buf->maxlen) {
debug_print("%s", "HUFF data1 too short\n");
mobi_buffer_free_null(buf);
return MOBI_DATA_CORRUPT;
}
/* read 256 indices from data1 big-endian */
for (int i = 0; i < 256; i++) {
huffcdic->table1[i] = mobi_buffer_get32(buf);
}
mobi_buffer_setpos(buf, data2_offset);
if (buf->offset + (64 * 4) > buf->maxlen) {
debug_print("%s", "HUFF data2 too short\n");
mobi_buffer_free_null(buf);
return MOBI_DATA_CORRUPT;
}
/* read 32 mincode-maxcode pairs from data2 big-endian */
huffcdic->mincode_table[0] = 0;
huffcdic->maxcode_table[0] = 0xFFFFFFFF;
for (int i = 1; i < 33; i++) {
const uint32_t mincode = mobi_buffer_get32(buf);
const uint32_t maxcode = mobi_buffer_get32(buf);
huffcdic->mincode_table[i] = mincode << (32 - i);
huffcdic->maxcode_table[i] = ((maxcode + 1) << (32 - i)) - 1;
}
mobi_buffer_free_null(buf);
return MOBI_SUCCESS;
} | CWE-119 | 26 |
flac_read_loop (SF_PRIVATE *psf, unsigned len)
{ FLAC_PRIVATE* pflac = (FLAC_PRIVATE*) psf->codec_data ;
pflac->pos = 0 ;
pflac->len = len ;
pflac->remain = len ;
/* First copy data that has already been decoded and buffered. */
if (pflac->frame != NULL && pflac->bufferpos < pflac->frame->header.blocksize)
flac_buffer_copy (psf) ;
/* Decode some more. */
while (pflac->pos < pflac->len)
{ if (FLAC__stream_decoder_process_single (pflac->fsd) == 0)
break ;
if (FLAC__stream_decoder_get_state (pflac->fsd) >= FLAC__STREAM_DECODER_END_OF_STREAM)
break ;
} ;
pflac->ptr = NULL ;
return pflac->pos ;
} /* flac_read_loop */ | CWE-119 | 26 |
static int vapic_enter(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct page *page;
if (!apic || !apic->vapic_addr)
return 0;
page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
if (is_error_page(page))
return -EFAULT;
vcpu->arch.apic->vapic_page = page;
return 0;
} | CWE-20 | 0 |
static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len,
int flags)
{
int err;
struct sk_buff *skb;
struct sock *sk = sock->sk;
err = -EIO;
if (sk->sk_state & PPPOX_BOUND)
goto end;
msg->msg_namelen = 0;
err = 0;
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
if (!skb)
goto end;
if (len > skb->len)
len = skb->len;
else if (len < skb->len)
msg->msg_flags |= MSG_TRUNC;
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
if (likely(err == 0))
err = len;
kfree_skb(skb);
end:
return err;
} | CWE-20 | 0 |
__be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
{
static u32 ip6_proxy_idents_hashrnd __read_mostly;
struct in6_addr buf[2];
struct in6_addr *addrs;
u32 id;
addrs = skb_header_pointer(skb,
skb_network_offset(skb) +
offsetof(struct ipv6hdr, saddr),
sizeof(buf), buf);
if (!addrs)
return 0;
net_get_random_once(&ip6_proxy_idents_hashrnd,
sizeof(ip6_proxy_idents_hashrnd));
id = __ipv6_select_ident(net, ip6_proxy_idents_hashrnd,
&addrs[1], &addrs[0]);
return htonl(id);
} | CWE-326 | 9 |
static int asf_read_marker(AVFormatContext *s, int64_t size)
{
AVIOContext *pb = s->pb;
ASFContext *asf = s->priv_data;
int i, count, name_len, ret;
char name[1024];
avio_rl64(pb); // reserved 16 bytes
avio_rl64(pb); // ...
count = avio_rl32(pb); // markers count
avio_rl16(pb); // reserved 2 bytes
name_len = avio_rl16(pb); // name length
for (i = 0; i < name_len; i++)
avio_r8(pb); // skip the name
for (i = 0; i < count; i++) {
int64_t pres_time;
int name_len;
avio_rl64(pb); // offset, 8 bytes
pres_time = avio_rl64(pb); // presentation time
pres_time -= asf->hdr.preroll * 10000;
avio_rl16(pb); // entry length
avio_rl32(pb); // send time
avio_rl32(pb); // flags
name_len = avio_rl32(pb); // name length
if ((ret = avio_get_str16le(pb, name_len * 2, name,
sizeof(name))) < name_len)
avio_skip(pb, name_len - ret);
avpriv_new_chapter(s, i, (AVRational) { 1, 10000000 }, pres_time,
AV_NOPTS_VALUE, name);
}
return 0;
} | CWE-834 | 33 |
static inline int ip6_ufo_append_data(struct sock *sk,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
void *from, int length, int hh_len, int fragheaderlen,
int transhdrlen, int mtu,unsigned int flags,
struct rt6_info *rt)
{
struct sk_buff *skb;
int err;
/* There is support for UDP large send offload by network
* device, so create one single skb packet containing complete
* udp datagram
*/
if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
skb = sock_alloc_send_skb(sk,
hh_len + fragheaderlen + transhdrlen + 20,
(flags & MSG_DONTWAIT), &err);
if (skb == NULL)
return err;
/* reserve space for Hardware header */
skb_reserve(skb, hh_len);
/* create space for UDP/IP header */
skb_put(skb,fragheaderlen + transhdrlen);
/* initialize network header pointer */
skb_reset_network_header(skb);
/* initialize protocol header pointer */
skb->transport_header = skb->network_header + fragheaderlen;
skb->protocol = htons(ETH_P_IPV6);
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
}
err = skb_append_datato_frags(sk,skb, getfrag, from,
(length - transhdrlen));
if (!err) {
struct frag_hdr fhdr;
/* Specify the length of each IPv6 datagram fragment.
* It has to be a multiple of 8.
*/
skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
sizeof(struct frag_hdr)) & ~7;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
ipv6_select_ident(&fhdr, rt);
skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
__skb_queue_tail(&sk->sk_write_queue, skb);
return 0;
}
/* There is not enough support do UPD LSO,
* so follow normal path
*/
kfree_skb(skb);
return err;
} | CWE-119 | 26 |
int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode)
{
int result = parse_rock_ridge_inode_internal(de, inode, 0);
/*
* if rockridge flag was reset and we didn't look for attributes
* behind eventual XA attributes, have a look there
*/
if ((ISOFS_SB(inode->i_sb)->s_rock_offset == -1)
&& (ISOFS_SB(inode->i_sb)->s_rock == 2)) {
result = parse_rock_ridge_inode_internal(de, inode, 14);
}
return result;
} | CWE-20 | 0 |
smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
{
int rc;
unsigned char key2[8];
struct crypto_skcipher *tfm_des;
struct scatterlist sgin, sgout;
struct skcipher_request *req;
str_to_key(key, key2);
tfm_des = crypto_alloc_skcipher("ecb(des)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm_des)) {
rc = PTR_ERR(tfm_des);
cifs_dbg(VFS, "could not allocate des crypto API\n");
goto smbhash_err;
}
req = skcipher_request_alloc(tfm_des, GFP_KERNEL);
if (!req) {
rc = -ENOMEM;
cifs_dbg(VFS, "could not allocate des crypto API\n");
goto smbhash_free_skcipher;
}
crypto_skcipher_setkey(tfm_des, key2, 8);
sg_init_one(&sgin, in, 8);
sg_init_one(&sgout, out, 8);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sgin, &sgout, 8, NULL);
rc = crypto_skcipher_encrypt(req);
if (rc)
cifs_dbg(VFS, "could not encrypt crypt key rc: %d\n", rc);
skcipher_request_free(req);
smbhash_free_skcipher:
crypto_free_skcipher(tfm_des);
smbhash_err:
return rc;
} | CWE-119 | 26 |
static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_at *sat = (struct sockaddr_at *)msg->msg_name;
struct ddpehdr *ddp;
int copied = 0;
int offset = 0;
int err = 0;
struct sk_buff *skb;
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
lock_sock(sk);
if (!skb)
goto out;
/* FIXME: use skb->cb to be able to use shared skbs */
ddp = ddp_hdr(skb);
copied = ntohs(ddp->deh_len_hops) & 1023;
if (sk->sk_type != SOCK_RAW) {
offset = sizeof(*ddp);
copied -= offset;
}
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied);
if (!err) {
if (sat) {
sat->sat_family = AF_APPLETALK;
sat->sat_port = ddp->deh_sport;
sat->sat_addr.s_node = ddp->deh_snode;
sat->sat_addr.s_net = ddp->deh_snet;
}
msg->msg_namelen = sizeof(*sat);
}
skb_free_datagram(sk, skb); /* Free the datagram. */
out:
release_sock(sk);
return err ? : copied;
} | CWE-20 | 0 |
int rm_rf_children(
int fd,
RemoveFlags flags,
const struct stat *root_dev) {
_cleanup_closedir_ DIR *d = NULL;
int ret = 0, r;
assert(fd >= 0);
/* This returns the first error we run into, but nevertheless tries to go on. This closes the passed
* fd, in all cases, including on failure. */
d = fdopendir(fd);
if (!d) {
safe_close(fd);
return -errno;
}
if (!(flags & REMOVE_PHYSICAL)) {
struct statfs sfs;
if (fstatfs(dirfd(d), &sfs) < 0)
return -errno;
if (is_physical_fs(&sfs)) {
/* We refuse to clean physical file systems with this call, unless explicitly
* requested. This is extra paranoia just to be sure we never ever remove non-state
* data. */
_cleanup_free_ char *path = NULL;
(void) fd_get_path(fd, &path);
return log_error_errno(SYNTHETIC_ERRNO(EPERM),
"Attempted to remove disk file system under \"%s\", and we can't allow that.",
strna(path));
}
}
FOREACH_DIRENT_ALL(de, d, return -errno) {
int is_dir;
if (dot_or_dot_dot(de->d_name))
continue;
is_dir =
de->d_type == DT_UNKNOWN ? -1 :
de->d_type == DT_DIR;
r = rm_rf_children_inner(dirfd(d), de->d_name, is_dir, flags, root_dev);
if (r < 0 && r != -ENOENT && ret == 0)
ret = r;
}
if (FLAGS_SET(flags, REMOVE_SYNCFS) && syncfs(dirfd(d)) < 0 && ret >= 0)
ret = -errno;
return ret;
} | CWE-674 | 28 |
dtls1_process_buffered_records(SSL *s)
{
pitem *item;
item = pqueue_peek(s->d1->unprocessed_rcds.q);
if (item)
{
/* Check if epoch is current. */
if (s->d1->unprocessed_rcds.epoch != s->d1->r_epoch)
return(1); /* Nothing to do. */
/* Process all the records. */
while (pqueue_peek(s->d1->unprocessed_rcds.q))
{
dtls1_get_unprocessed_record(s);
if ( ! dtls1_process_record(s))
return(0);
dtls1_buffer_record(s, &(s->d1->processed_rcds),
s->s3->rrec.seq_num);
}
}
/* sync epoch numbers once all the unprocessed records
* have been processed */
s->d1->processed_rcds.epoch = s->d1->r_epoch;
s->d1->unprocessed_rcds.epoch = s->d1->r_epoch + 1;
return(1);
} | CWE-119 | 26 |
CAMLprim value caml_alloc_dummy_float (value size)
{
mlsize_t wosize = Int_val(size) * Double_wosize;
if (wosize == 0) return Atom(0);
return caml_alloc (wosize, 0);
} | CWE-119 | 26 |
void __init proc_root_init(void)
{
struct vfsmount *mnt;
int err;
proc_init_inodecache();
err = register_filesystem(&proc_fs_type);
if (err)
return;
mnt = kern_mount_data(&proc_fs_type, &init_pid_ns);
if (IS_ERR(mnt)) {
unregister_filesystem(&proc_fs_type);
return;
}
init_pid_ns.proc_mnt = mnt;
proc_symlink("mounts", NULL, "self/mounts");
proc_net_init();
#ifdef CONFIG_SYSVIPC
proc_mkdir("sysvipc", NULL);
#endif
proc_mkdir("fs", NULL);
proc_mkdir("driver", NULL);
proc_mkdir("fs/nfsd", NULL); /* somewhere for the nfsd filesystem to be mounted */
#if defined(CONFIG_SUN_OPENPROMFS) || defined(CONFIG_SUN_OPENPROMFS_MODULE)
/* just give it a mountpoint */
proc_mkdir("openprom", NULL);
#endif
proc_tty_init();
#ifdef CONFIG_PROC_DEVICETREE
proc_device_tree_init();
#endif
proc_mkdir("bus", NULL);
proc_sys_init();
} | CWE-119 | 26 |
static ssize_t read_mem(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
phys_addr_t p = *ppos;
ssize_t read, sz;
void *ptr;
if (p != *ppos)
return 0;
if (!valid_phys_addr_range(p, count))
return -EFAULT;
read = 0;
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
/* we don't have page 0 mapped on sparc and m68k.. */
if (p < PAGE_SIZE) {
sz = size_inside_page(p, count);
if (sz > 0) {
if (clear_user(buf, sz))
return -EFAULT;
buf += sz;
p += sz;
count -= sz;
read += sz;
}
}
#endif
while (count > 0) {
unsigned long remaining;
sz = size_inside_page(p, count);
if (!range_is_allowed(p >> PAGE_SHIFT, count))
return -EPERM;
/*
* On ia64 if a page has been mapped somewhere as uncached, then
* it must also be accessed uncached by the kernel or data
* corruption may occur.
*/
ptr = xlate_dev_mem_ptr(p);
if (!ptr)
return -EFAULT;
remaining = copy_to_user(buf, ptr, sz);
unxlate_dev_mem_ptr(p, ptr);
if (remaining)
return -EFAULT;
buf += sz;
p += sz;
count -= sz;
read += sz;
}
*ppos += read;
return read;
} | CWE-732 | 13 |
static void print_value(int output, int num, const char *devname,
const char *value, const char *name, size_t valsz)
{
if (output & OUTPUT_VALUE_ONLY) {
fputs(value, stdout);
fputc('\n', stdout);
} else if (output & OUTPUT_UDEV_LIST) {
print_udev_format(name, value);
} else if (output & OUTPUT_EXPORT_LIST) {
if (num == 1 && devname)
printf("DEVNAME=%s\n", devname);
fputs(name, stdout);
fputs("=", stdout);
safe_print(value, valsz, NULL);
fputs("\n", stdout);
} else {
if (num == 1 && devname)
printf("%s:", devname);
fputs(" ", stdout);
fputs(name, stdout);
fputs("=\"", stdout);
safe_print(value, valsz, "\"");
fputs("\"", stdout);
}
} | CWE-77 | 14 |
void sctp_generate_t3_rtx_event(unsigned long peer)
{
int error;
struct sctp_transport *transport = (struct sctp_transport *) peer;
struct sctp_association *asoc = transport->asoc;
struct net *net = sock_net(asoc->base.sk);
/* Check whether a task is in the sock. */
bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) {
pr_debug("%s: sock is busy\n", __func__);
/* Try again later. */
if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20)))
sctp_transport_hold(transport);
goto out_unlock;
}
/* Is this transport really dead and just waiting around for
* the timer to let go of the reference?
*/
if (transport->dead)
goto out_unlock;
/* Run through the state machine. */
error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX),
asoc->state,
asoc->ep, asoc,
transport, GFP_ATOMIC);
if (error)
asoc->base.sk->sk_err = -error;
out_unlock:
bh_unlock_sock(asoc->base.sk);
sctp_transport_put(transport);
} | CWE-362 | 18 |
sasl_handle_login(struct sasl_session *const restrict p, struct user *const u, struct myuser *mu)
{
bool was_killed = false;
// Find the account if necessary
if (! mu)
{
if (! *p->authzeid)
{
(void) slog(LG_INFO, "%s: session for '%s' without an authzeid (BUG)",
MOWGLI_FUNC_NAME, u->nick);
(void) notice(saslsvs->nick, u->nick, LOGIN_CANCELLED_STR);
return false;
}
if (! (mu = myuser_find_uid(p->authzeid)))
{
if (*p->authzid)
(void) notice(saslsvs->nick, u->nick, "Account %s dropped; login cancelled",
p->authzid);
else
(void) notice(saslsvs->nick, u->nick, "Account dropped; login cancelled");
return false;
}
}
// If the user is already logged in, and not to the same account, log them out first
if (u->myuser && u->myuser != mu)
{
if (is_soper(u->myuser))
(void) logcommand_user(saslsvs, u, CMDLOG_ADMIN, "DESOPER: \2%s\2 as \2%s\2",
u->nick, entity(u->myuser)->name);
(void) logcommand_user(saslsvs, u, CMDLOG_LOGIN, "LOGOUT");
if (! (was_killed = ircd_on_logout(u, entity(u->myuser)->name)))
{
mowgli_node_t *n;
MOWGLI_ITER_FOREACH(n, u->myuser->logins.head)
{
if (n->data == u)
{
(void) mowgli_node_delete(n, &u->myuser->logins);
(void) mowgli_node_free(n);
break;
}
}
u->myuser = NULL;
}
}
// If they were not killed above, log them in now
if (! was_killed)
{
if (u->myuser != mu)
{
// If they're not logged in, or logging in to a different account, do a full login
(void) myuser_login(saslsvs, u, mu, false);
(void) logcommand_user(saslsvs, u, CMDLOG_LOGIN, "LOGIN (%s)", p->mechptr->name);
}
else
{
// Otherwise, just update login time ...
mu->lastlogin = CURRTIME;
(void) logcommand_user(saslsvs, u, CMDLOG_LOGIN, "REAUTHENTICATE (%s)", p->mechptr->name);
}
}
return true;
} | CWE-287 | 4 |
static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
{
struct unix_sock *u = unix_sk(sk);
msg->msg_namelen = 0;
if (u->addr) {
msg->msg_namelen = u->addr->len;
memcpy(msg->msg_name, u->addr->name, u->addr->len);
}
} | CWE-20 | 0 |
usage (int status)
{
if (status != EXIT_SUCCESS)
fprintf (stderr, _("Try `%s --help' for more information.\n"),
program_name);
else
{
printf (_("\
Usage: %s [OPTION]... [STRINGS]...\n\
"), program_name);
fputs (_("\
Internationalized Domain Name (IDNA2008) convert STRINGS, or standard input.\n\
\n\
"), stdout);
fputs (_("\
Command line interface to the Libidn2 implementation of IDNA2008.\n\
\n\
All strings are expected to be encoded in the locale charset.\n\
\n\
To process a string that starts with `-', for example `-foo', use `--'\n\
to signal the end of parameters, as in `idn2 --quiet -- -foo'.\n\
\n\
Mandatory arguments to long options are mandatory for short options too.\n\
"), stdout);
fputs (_("\
-h, --help Print help and exit\n\
-V, --version Print version and exit\n\
"), stdout);
fputs (_("\
-d, --decode Decode (punycode) domain name\n\
-l, --lookup Lookup domain name (default)\n\
-r, --register Register label\n\
"), stdout);
fputs (_("\
-T, --tr46t Enable TR46 transitional processing\n\
-N, --tr46nt Enable TR46 non-transitional processing\n\
--no-tr46 Disable TR46 processing\n\
"), stdout);
fputs (_("\
--usestd3asciirules Enable STD3 ASCII rules\n\
--debug Print debugging information\n\
--quiet Silent operation\n\
"), stdout);
emit_bug_reporting_address ();
}
exit (status);
} | CWE-20 | 0 |
void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
unsigned int len)
{
struct inet_sock *inet = inet_sk(sk);
struct {
struct ip_options opt;
char data[40];
} replyopts;
struct ipcm_cookie ipc;
__be32 daddr;
struct rtable *rt = skb_rtable(skb);
if (ip_options_echo(&replyopts.opt, skb))
return;
daddr = ipc.addr = rt->rt_src;
ipc.opt = NULL;
ipc.tx_flags = 0;
if (replyopts.opt.optlen) {
ipc.opt = &replyopts.opt;
if (ipc.opt->srr)
daddr = replyopts.opt.faddr;
}
{
struct flowi4 fl4;
flowi4_init_output(&fl4, arg->bound_dev_if, 0,
RT_TOS(ip_hdr(skb)->tos),
RT_SCOPE_UNIVERSE, sk->sk_protocol,
ip_reply_arg_flowi_flags(arg),
daddr, rt->rt_spec_dst,
tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
rt = ip_route_output_key(sock_net(sk), &fl4);
if (IS_ERR(rt))
return;
}
/* And let IP do all the hard work.
This chunk is not reenterable, hence spinlock.
Note that it uses the fact, that this function is called
with locally disabled BH and that sk cannot be already spinlocked.
*/
bh_lock_sock(sk);
inet->tos = ip_hdr(skb)->tos;
sk->sk_priority = skb->priority;
sk->sk_protocol = ip_hdr(skb)->protocol;
sk->sk_bound_dev_if = arg->bound_dev_if;
ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
&ipc, &rt, MSG_DONTWAIT);
if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
if (arg->csumoffset >= 0)
*((__sum16 *)skb_transport_header(skb) +
arg->csumoffset) = csum_fold(csum_add(skb->csum,
arg->csum));
skb->ip_summed = CHECKSUM_NONE;
ip_push_pending_frames(sk);
}
bh_unlock_sock(sk);
ip_rt_put(rt);
} | CWE-362 | 18 |
PS_SERIALIZER_DECODE_FUNC(php_binary) /* {{{ */
{
const char *p;
char *name;
const char *endptr = val + vallen;
zval *current;
int namelen;
int has_value;
php_unserialize_data_t var_hash;
PHP_VAR_UNSERIALIZE_INIT(var_hash);
for (p = val; p < endptr; ) {
zval **tmp;
namelen = ((unsigned char)(*p)) & (~PS_BIN_UNDEF);
if (namelen < 0 || namelen > PS_BIN_MAX || (p + namelen) >= endptr) {
PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
return FAILURE;
}
has_value = *p & PS_BIN_UNDEF ? 0 : 1;
name = estrndup(p + 1, namelen);
p += namelen + 1;
if (zend_hash_find(&EG(symbol_table), name, namelen + 1, (void **) &tmp) == SUCCESS) {
if ((Z_TYPE_PP(tmp) == IS_ARRAY && Z_ARRVAL_PP(tmp) == &EG(symbol_table)) || *tmp == PS(http_session_vars)) {
efree(name);
continue;
}
}
if (has_value) {
ALLOC_INIT_ZVAL(current);
if (php_var_unserialize(¤t, (const unsigned char **) &p, (const unsigned char *) endptr, &var_hash TSRMLS_CC)) {
php_set_session_var(name, namelen, current, &var_hash TSRMLS_CC);
} else {
PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
return FAILURE;
}
var_push_dtor_no_addref(&var_hash, ¤t);
}
PS_ADD_VARL(name, namelen);
efree(name);
}
PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
return SUCCESS;
} | CWE-74 | 1 |
static int jas_iccgetuint16(jas_stream_t *in, jas_iccuint16_t *val)
{
ulonglong tmp;
if (jas_iccgetuint(in, 2, &tmp))
return -1;
*val = tmp;
return 0;
} | CWE-20 | 0 |
static int encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
struct crypto_blkcipher *tfm = desc->tfm;
struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm);
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, 64);
salsa20_ivsetup(ctx, walk.iv);
if (likely(walk.nbytes == nbytes))
{
salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
walk.dst.virt.addr, nbytes);
return blkcipher_walk_done(desc, &walk, 0);
}
while (walk.nbytes >= 64) {
salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
walk.dst.virt.addr,
walk.nbytes - (walk.nbytes % 64));
err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64);
}
if (walk.nbytes) {
salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
walk.dst.virt.addr, walk.nbytes);
err = blkcipher_walk_done(desc, &walk, 0);
}
return err;
} | CWE-20 | 0 |
static int read_private_key(RSA *rsa)
{
int r;
sc_path_t path;
sc_file_t *file;
const sc_acl_entry_t *e;
u8 buf[2048], *p = buf;
size_t bufsize, keysize;
r = select_app_df();
if (r)
return 1;
sc_format_path("I0012", &path);
r = sc_select_file(card, &path, &file);
if (r) {
fprintf(stderr, "Unable to select private key file: %s\n", sc_strerror(r));
return 2;
}
e = sc_file_get_acl_entry(file, SC_AC_OP_READ);
if (e == NULL || e->method == SC_AC_NEVER)
return 10;
bufsize = file->size;
sc_file_free(file);
r = sc_read_binary(card, 0, buf, bufsize, 0);
if (r < 0) {
fprintf(stderr, "Unable to read private key file: %s\n", sc_strerror(r));
return 2;
}
bufsize = r;
do {
if (bufsize < 4)
return 3;
keysize = (p[0] << 8) | p[1];
if (keysize == 0)
break;
if (keysize < 3)
return 3;
if (p[2] == opt_key_num)
break;
p += keysize;
bufsize -= keysize;
} while (1);
if (keysize == 0) {
printf("Key number %d not found.\n", opt_key_num);
return 2;
}
return parse_private_key(p, keysize, rsa);
} | CWE-119 | 26 |
static int rd_build_device_space(struct rd_dev *rd_dev)
{
u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
sizeof(struct scatterlist));
struct rd_dev_sg_table *sg_table;
struct page *pg;
struct scatterlist *sg;
if (rd_dev->rd_page_count <= 0) {
pr_err("Illegal page count: %u for Ramdisk device\n",
rd_dev->rd_page_count);
return -EINVAL;
}
/* Don't need backing pages for NULLIO */
if (rd_dev->rd_flags & RDF_NULLIO)
return 0;
total_sg_needed = rd_dev->rd_page_count;
sg_tables = (total_sg_needed / max_sg_per_table) + 1;
sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
if (!sg_table) {
pr_err("Unable to allocate memory for Ramdisk"
" scatterlist tables\n");
return -ENOMEM;
}
rd_dev->sg_table_array = sg_table;
rd_dev->sg_table_count = sg_tables;
while (total_sg_needed) {
sg_per_table = (total_sg_needed > max_sg_per_table) ?
max_sg_per_table : total_sg_needed;
sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
GFP_KERNEL);
if (!sg) {
pr_err("Unable to allocate scatterlist array"
" for struct rd_dev\n");
return -ENOMEM;
}
sg_init_table(sg, sg_per_table);
sg_table[i].sg_table = sg;
sg_table[i].rd_sg_count = sg_per_table;
sg_table[i].page_start_offset = page_offset;
sg_table[i++].page_end_offset = (page_offset + sg_per_table)
- 1;
for (j = 0; j < sg_per_table; j++) {
pg = alloc_pages(GFP_KERNEL, 0);
if (!pg) {
pr_err("Unable to allocate scatterlist"
" pages for struct rd_dev_sg_table\n");
return -ENOMEM;
}
sg_assign_page(&sg[j], pg);
sg[j].length = PAGE_SIZE;
}
page_offset += sg_per_table;
total_sg_needed -= sg_per_table;
}
pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
rd_dev->rd_dev_id, rd_dev->rd_page_count,
rd_dev->sg_table_count);
return 0;
} | CWE-200 | 10 |
static pyc_object *get_tuple_object(RBuffer *buffer) {
pyc_object *ret = NULL;
bool error = false;
ut32 n = 0;
n = get_ut32 (buffer, &error);
if (n > ST32_MAX) {
eprintf ("bad marshal data (tuple size out of range)\n");
return NULL;
}
if (error) {
return NULL;
}
ret = get_array_object_generic (buffer, n);
if (ret) {
ret->type = TYPE_TUPLE;
return ret;
}
return NULL;
} | CWE-119 | 26 |
int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode)
{
int size, ct, err;
if (m->msg_namelen) {
if (mode == VERIFY_READ) {
void __user *namep;
namep = (void __user __force *) m->msg_name;
err = move_addr_to_kernel(namep, m->msg_namelen,
address);
if (err < 0)
return err;
}
m->msg_name = address;
} else {
m->msg_name = NULL;
}
size = m->msg_iovlen * sizeof(struct iovec);
if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
return -EFAULT;
m->msg_iov = iov;
err = 0;
for (ct = 0; ct < m->msg_iovlen; ct++) {
size_t len = iov[ct].iov_len;
if (len > INT_MAX - err) {
len = INT_MAX - err;
iov[ct].iov_len = len;
}
err += len;
}
return err;
} | CWE-20 | 0 |
static int atusb_get_and_show_build(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
char build[ATUSB_BUILD_SIZE + 1];
int ret;
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
build, ATUSB_BUILD_SIZE, 1000);
if (ret >= 0) {
build[ret] = 0;
dev_info(&usb_dev->dev, "Firmware: build %s\n", build);
}
return ret;
} | CWE-119 | 26 |
static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
int offset, size_t count)
{
int len = iov_length(from, count) - offset;
int copy = skb_headlen(skb);
int size, offset1 = 0;
int i = 0;
/* Skip over from offset */
while (count && (offset >= from->iov_len)) {
offset -= from->iov_len;
++from;
--count;
}
/* copy up to skb headlen */
while (count && (copy > 0)) {
size = min_t(unsigned int, copy, from->iov_len - offset);
if (copy_from_user(skb->data + offset1, from->iov_base + offset,
size))
return -EFAULT;
if (copy > size) {
++from;
--count;
offset = 0;
} else
offset += size;
copy -= size;
offset1 += size;
}
if (len == offset1)
return 0;
while (count--) {
struct page *page[MAX_SKB_FRAGS];
int num_pages;
unsigned long base;
unsigned long truesize;
len = from->iov_len - offset;
if (!len) {
offset = 0;
++from;
continue;
}
base = (unsigned long)from->iov_base + offset;
size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
num_pages = get_user_pages_fast(base, size, 0, &page[i]);
if ((num_pages != size) ||
(num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags)) {
for (i = 0; i < num_pages; i++)
put_page(page[i]);
return -EFAULT;
}
truesize = size * PAGE_SIZE;
skb->data_len += len;
skb->len += len;
skb->truesize += truesize;
atomic_add(truesize, &skb->sk->sk_wmem_alloc);
while (len) {
int off = base & ~PAGE_MASK;
int size = min_t(int, len, PAGE_SIZE - off);
__skb_fill_page_desc(skb, i, page[i], off, size);
skb_shinfo(skb)->nr_frags++;
/* increase sk_wmem_alloc */
base += size;
len -= size;
i++;
}
offset = 0;
++from;
}
return 0;
} | CWE-119 | 26 |
static void record_recent_object(struct object *obj,
struct strbuf *path,
const char *last,
void *data)
{
sha1_array_append(&recent_objects, obj->oid.hash);
} | CWE-119 | 26 |
static int parse_exports_table(long long *table_start)
{
int res;
int indexes = SQUASHFS_LOOKUP_BLOCKS(sBlk.s.inodes);
long long export_index_table[indexes];
res = read_fs_bytes(fd, sBlk.s.lookup_table_start,
SQUASHFS_LOOKUP_BLOCK_BYTES(sBlk.s.inodes), export_index_table);
if(res == FALSE) {
ERROR("parse_exports_table: failed to read export index table\n");
return FALSE;
}
SQUASHFS_INSWAP_LOOKUP_BLOCKS(export_index_table, indexes);
/*
* export_index_table[0] stores the start of the compressed export blocks.
* This by definition is also the end of the previous filesystem
* table - the fragment table.
*/
*table_start = export_index_table[0];
return TRUE;
} | CWE-20 | 0 |
void perf_bp_event(struct perf_event *bp, void *data)
{
struct perf_sample_data sample;
struct pt_regs *regs = data;
perf_sample_data_init(&sample, bp->attr.bp_addr);
if (!bp->hw.state && !perf_exclude_event(bp, regs))
perf_swevent_event(bp, 1, 1, &sample, regs);
} | CWE-400 | 2 |
PHP_FUNCTION(curl_unescape)
{
char *str = NULL, *out = NULL;
size_t str_len = 0;
int out_len;
zval *zid;
php_curl *ch;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "rs", &zid, &str, &str_len) == FAILURE) {
return;
}
if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) {
RETURN_FALSE;
}
if (str_len > INT_MAX) {
RETURN_FALSE;
}
if ((out = curl_easy_unescape(ch->cp, str, str_len, &out_len))) {
RETVAL_STRINGL(out, out_len);
curl_free(out);
} else {
RETURN_FALSE;
}
} | CWE-119 | 26 |
static int msg_cache_check (const char *id, body_cache_t *bcache, void *data)
{
CONTEXT *ctx;
POP_DATA *pop_data;
int i;
if (!(ctx = (CONTEXT *)data))
return -1;
if (!(pop_data = (POP_DATA *)ctx->data))
return -1;
#ifdef USE_HCACHE
/* keep hcache file if hcache == bcache */
if (strcmp (HC_FNAME "." HC_FEXT, id) == 0)
return 0;
#endif
for (i = 0; i < ctx->msgcount; i++)
/* if the id we get is known for a header: done (i.e. keep in cache) */
if (ctx->hdrs[i]->data && mutt_strcmp (ctx->hdrs[i]->data, id) == 0)
return 0;
/* message not found in context -> remove it from cache
* return the result of bcache, so we stop upon its first error
*/
return mutt_bcache_del (bcache, id);
} | CWE-119 | 26 |
error_t ipv6ComputeSolicitedNodeAddr(const Ipv6Addr *ipAddr,
Ipv6Addr *solicitedNodeAddr)
{
error_t error;
//Ensure the specified address is a valid unicast or anycast address
if(!ipv6IsMulticastAddr(ipAddr))
{
//Copy the 104-bit prefix
ipv6CopyAddr(solicitedNodeAddr, &IPV6_SOLICITED_NODE_ADDR_PREFIX);
//Take the low-order 24 bits of the address (unicast or anycast) and
//append those bits to the prefix
solicitedNodeAddr->b[13] = ipAddr->b[13];
solicitedNodeAddr->b[14] = ipAddr->b[14];
solicitedNodeAddr->b[15] = ipAddr->b[15];
//Sucessful processing
error = NO_ERROR;
}
else
{
//Report an error
error = ERROR_INVALID_ADDRESS;
}
//Return status code
return error;
} | CWE-20 | 0 |
fm_mgr_config_mgr_connect
(
fm_config_conx_hdl *hdl,
fm_mgr_type_t mgr
)
{
char s_path[256];
char c_path[256];
char *mgr_prefix;
p_hsm_com_client_hdl_t *mgr_hdl;
pid_t pid;
memset(s_path,0,sizeof(s_path));
memset(c_path,0,sizeof(c_path));
pid = getpid();
switch ( mgr )
{
case FM_MGR_SM:
mgr_prefix = HSM_FM_SCK_SM;
mgr_hdl = &hdl->sm_hdl;
break;
case FM_MGR_PM:
mgr_prefix = HSM_FM_SCK_PM;
mgr_hdl = &hdl->pm_hdl;
break;
case FM_MGR_FE:
mgr_prefix = HSM_FM_SCK_FE;
mgr_hdl = &hdl->fe_hdl;
break;
default:
return FM_CONF_INIT_ERR;
}
// Fill in the paths for the server and client sockets.
sprintf(s_path,"%s%s%d",HSM_FM_SCK_PREFIX,mgr_prefix,hdl->instance);
sprintf(c_path,"%s%s%d_C_%lu",HSM_FM_SCK_PREFIX,mgr_prefix,
hdl->instance, (long unsigned)pid);
if ( *mgr_hdl == NULL )
{
if ( hcom_client_init(mgr_hdl,s_path,c_path,32768) != HSM_COM_OK )
{
return FM_CONF_INIT_ERR;
}
}
if ( hcom_client_connect(*mgr_hdl) == HSM_COM_OK )
{
hdl->conx_mask |= mgr;
return FM_CONF_OK;
}
return FM_CONF_CONX_ERR;
}
| CWE-362 | 18 |
char *path_name(const struct name_path *path, const char *name)
{
const struct name_path *p;
char *n, *m;
int nlen = strlen(name);
int len = nlen + 1;
for (p = path; p; p = p->up) {
if (p->elem_len)
len += p->elem_len + 1;
}
n = xmalloc(len);
m = n + len - (nlen + 1);
strcpy(m, name);
for (p = path; p; p = p->up) {
if (p->elem_len) {
m -= p->elem_len + 1;
memcpy(m, p->elem, p->elem_len);
m[p->elem_len] = '/';
}
}
return n;
} | CWE-119 | 26 |
void handle_ld_nf(u32 insn, struct pt_regs *regs)
{
int rd = ((insn >> 25) & 0x1f);
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
unsigned long *reg;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
maybe_flush_windows(0, 0, rd, from_kernel);
reg = fetch_reg_addr(rd, regs);
if (from_kernel || rd < 16) {
reg[0] = 0;
if ((insn & 0x780000) == 0x180000)
reg[1] = 0;
} else if (test_thread_flag(TIF_32BIT)) {
put_user(0, (int __user *) reg);
if ((insn & 0x780000) == 0x180000)
put_user(0, ((int __user *) reg) + 1);
} else {
put_user(0, (unsigned long __user *) reg);
if ((insn & 0x780000) == 0x180000)
put_user(0, (unsigned long __user *) reg + 1);
}
advance(regs);
} | CWE-400 | 2 |
static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
struct sk_buff *skb;
int copied;
int rc;
pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags);
skb = skb_recv_datagram(sk, flags, noblock, &rc);
if (!skb)
return rc;
msg->msg_namelen = 0;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
skb_free_datagram(sk, skb);
return rc ? : copied;
} | CWE-20 | 0 |
static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
struct scm_cookie *scm)
{
memset(scm, 0, sizeof(*scm));
unix_get_peersec_dgram(sock, scm);
if (msg->msg_controllen <= 0)
return 0;
return __scm_send(sock, msg, scm);
} | CWE-287 | 4 |
static int ceph_x_decrypt(struct ceph_crypto_key *secret,
void **p, void *end, void *obuf, size_t olen)
{
struct ceph_x_encrypt_header head;
size_t head_len = sizeof(head);
int len, ret;
len = ceph_decode_32(p);
if (*p + len > end)
return -EINVAL;
dout("ceph_x_decrypt len %d\n", len);
ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen,
*p, len);
if (ret)
return ret;
if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC)
return -EPERM;
*p += len;
return olen;
} | CWE-119 | 26 |
void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime;
unsigned long flags;
if (PCM_RUNTIME_CHECK(substream))
return;
runtime = substream->runtime;
snd_pcm_stream_lock_irqsave(substream, flags);
if (!snd_pcm_running(substream) ||
snd_pcm_update_hw_ptr0(substream, 1) < 0)
goto _end;
#ifdef CONFIG_SND_PCM_TIMER
if (substream->timer_running)
snd_timer_interrupt(substream->timer, 1);
#endif
_end:
snd_pcm_stream_unlock_irqrestore(substream, flags);
kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
} | CWE-362 | 18 |
static void parse_content_range(URLContext *h, const char *p)
{
HTTPContext *s = h->priv_data;
const char *slash;
if (!strncmp(p, "bytes ", 6)) {
p += 6;
s->off = strtoll(p, NULL, 10);
if ((slash = strchr(p, '/')) && strlen(slash) > 0)
s->filesize = strtoll(slash + 1, NULL, 10);
}
if (s->seekable == -1 && (!s->is_akamai || s->filesize != 2147483647))
h->is_streamed = 0; /* we _can_ in fact seek */
} | CWE-119 | 26 |
static int ext4_convert_unwritten_extents_endio(handle_t *handle,
struct inode *inode,
struct ext4_ext_path *path)
{
struct ext4_extent *ex;
int depth;
int err = 0;
depth = ext_depth(inode);
ex = path[depth].p_ext;
ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
"block %llu, max_blocks %u\n", inode->i_ino,
(unsigned long long)le32_to_cpu(ex->ee_block),
ext4_ext_get_actual_len(ex));
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
goto out;
/* first mark the extent as initialized */
ext4_ext_mark_initialized(ex);
/* note: ext4_ext_correct_indexes() isn't needed here because
* borders are not changed
*/
ext4_ext_try_to_merge(handle, inode, path, ex);
/* Mark modified extent as dirty */
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
out:
ext4_ext_show_leaf(inode, path);
return err;
} | CWE-362 | 18 |
int qeth_snmp_command(struct qeth_card *card, char __user *udata)
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
struct qeth_snmp_ureq *ureq;
int req_len;
struct qeth_arp_query_info qinfo = {0, };
int rc = 0;
QETH_CARD_TEXT(card, 3, "snmpcmd");
if (card->info.guestlan)
return -EOPNOTSUPP;
if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
(!card->options.layer2)) {
return -EOPNOTSUPP;
}
/* skip 4 bytes (data_len struct member) to get req_len */
if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
return -EFAULT;
ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
if (IS_ERR(ureq)) {
QETH_CARD_TEXT(card, 2, "snmpnome");
return PTR_ERR(ureq);
}
qinfo.udata_len = ureq->hdr.data_len;
qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
if (!qinfo.udata) {
kfree(ureq);
return -ENOMEM;
}
qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
QETH_SNMP_SETADP_CMDLENGTH + req_len);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
qeth_snmp_command_cb, (void *)&qinfo);
if (rc)
QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n",
QETH_CARD_IFNAME(card), rc);
else {
if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
rc = -EFAULT;
}
kfree(ureq);
kfree(qinfo.udata);
return rc;
} | CWE-119 | 26 |
static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn,
struct nlattr *rp)
{
struct xfrm_replay_state_esn *up;
if (!replay_esn || !rp)
return 0;
up = nla_data(rp);
if (xfrm_replay_state_esn_len(replay_esn) !=
xfrm_replay_state_esn_len(up))
return -EINVAL;
return 0;
} | CWE-200 | 10 |
static void
yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep, void *yyscanner, HEX_LEX_ENVIRONMENT *lex_env)
{
YYUSE (yyvaluep);
YYUSE (yyscanner);
YYUSE (lex_env);
if (!yymsg)
yymsg = "Deleting";
YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
switch (yytype)
{
case 16: /* tokens */
#line 94 "hex_grammar.y" /* yacc.c:1257 */
{ yr_re_node_destroy(((*yyvaluep).re_node)); }
#line 1023 "hex_grammar.c" /* yacc.c:1257 */
break;
case 17: /* token_sequence */
#line 95 "hex_grammar.y" /* yacc.c:1257 */
{ yr_re_node_destroy(((*yyvaluep).re_node)); }
#line 1029 "hex_grammar.c" /* yacc.c:1257 */
break;
case 18: /* token_or_range */
#line 96 "hex_grammar.y" /* yacc.c:1257 */
{ yr_re_node_destroy(((*yyvaluep).re_node)); }
#line 1035 "hex_grammar.c" /* yacc.c:1257 */
break;
case 19: /* token */
#line 97 "hex_grammar.y" /* yacc.c:1257 */
{ yr_re_node_destroy(((*yyvaluep).re_node)); }
#line 1041 "hex_grammar.c" /* yacc.c:1257 */
break;
case 21: /* range */
#line 100 "hex_grammar.y" /* yacc.c:1257 */
{ yr_re_node_destroy(((*yyvaluep).re_node)); }
#line 1047 "hex_grammar.c" /* yacc.c:1257 */
break;
case 22: /* alternatives */
#line 99 "hex_grammar.y" /* yacc.c:1257 */
{ yr_re_node_destroy(((*yyvaluep).re_node)); }
#line 1053 "hex_grammar.c" /* yacc.c:1257 */
break;
case 23: /* byte */
#line 98 "hex_grammar.y" /* yacc.c:1257 */
{ yr_re_node_destroy(((*yyvaluep).re_node)); }
#line 1059 "hex_grammar.c" /* yacc.c:1257 */
break;
default:
break;
} | CWE-674 | 28 |
static bool tailmatch(const char *little, const char *bigone)
{
size_t littlelen = strlen(little);
size_t biglen = strlen(bigone);
if(littlelen > biglen)
return FALSE;
return Curl_raw_equal(little, bigone+biglen-littlelen) ? TRUE : FALSE;
} | CWE-200 | 10 |
xscale2pmu_handle_irq(int irq_num, void *dev)
{
unsigned long pmnc, of_flags;
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
/* Disable the PMU. */
pmnc = xscale2pmu_read_pmnc();
xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
/* Check the overflow flag register. */
of_flags = xscale2pmu_read_overflow_flags();
if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
return IRQ_NONE;
/* Clear the overflow bits. */
xscale2pmu_write_overflow_flags(of_flags);
regs = get_irq_regs();
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
continue;
hwc = &event->hw;
armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
if (perf_event_overflow(event, 0, &data, regs))
armpmu->disable(hwc, idx);
}
irq_work_run();
/*
* Re-enable the PMU.
*/
pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(pmnc);
return IRQ_HANDLED;
} | CWE-400 | 2 |
static int search_old_relocation(struct reloc_struct_t *reloc_table, ut32 addr_to_patch, int n_reloc) {
int i;
for (i = 0; i < n_reloc; i++) {
if (addr_to_patch == reloc_table[i].data_offset) {
return i;
}
}
return -1;
} | CWE-119 | 26 |
static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len,
int flags)
{
int err;
struct sk_buff *skb;
struct sock *sk = sock->sk;
err = -EIO;
if (sk->sk_state & PPPOX_BOUND)
goto end;
msg->msg_namelen = 0;
err = 0;
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
if (!skb)
goto end;
if (len > skb->len)
len = skb->len;
else if (len < skb->len)
msg->msg_flags |= MSG_TRUNC;
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
if (likely(err == 0))
err = len;
kfree_skb(skb);
end:
return err;
} | CWE-20 | 0 |
log2vis_encoded_string (PyObject * string, const char *encoding,
FriBidiParType base_direction, int clean, int reordernsm)
{
PyObject *logical = NULL; /* logical unicode object */
PyObject *result = NULL; /* output string object */
/* Always needed for the string length */
logical = PyUnicode_Decode (PyString_AS_STRING (string),
PyString_GET_SIZE (string),
encoding, "strict");
if (logical == NULL)
return NULL;
if (strcmp (encoding, "utf-8") == 0)
/* Shortcut for utf8 strings (little faster) */
result = log2vis_utf8 (string,
PyUnicode_GET_SIZE (logical),
base_direction, clean, reordernsm);
else
{
/* Invoke log2vis_unicode and encode back to encoding */
PyObject *visual = log2vis_unicode (logical, base_direction, clean, reordernsm);
if (visual)
{
result = PyUnicode_Encode (PyUnicode_AS_UNICODE
(visual),
PyUnicode_GET_SIZE (visual),
encoding, "strict");
Py_DECREF (visual);
}
}
Py_DECREF (logical);
return result;
} | CWE-119 | 26 |
static ssize_t write_mem(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
phys_addr_t p = *ppos;
ssize_t written, sz;
unsigned long copied;
void *ptr;
if (p != *ppos)
return -EFBIG;
if (!valid_phys_addr_range(p, count))
return -EFAULT;
written = 0;
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
/* we don't have page 0 mapped on sparc and m68k.. */
if (p < PAGE_SIZE) {
sz = size_inside_page(p, count);
/* Hmm. Do something? */
buf += sz;
p += sz;
count -= sz;
written += sz;
}
#endif
while (count > 0) {
sz = size_inside_page(p, count);
if (!range_is_allowed(p >> PAGE_SHIFT, sz))
return -EPERM;
/*
* On ia64 if a page has been mapped somewhere as uncached, then
* it must also be accessed uncached by the kernel or data
* corruption may occur.
*/
ptr = xlate_dev_mem_ptr(p);
if (!ptr) {
if (written)
break;
return -EFAULT;
}
copied = copy_from_user(ptr, buf, sz);
unxlate_dev_mem_ptr(p, ptr);
if (copied) {
written += sz - copied;
if (written)
break;
return -EFAULT;
}
buf += sz;
p += sz;
count -= sz;
written += sz;
}
*ppos += written;
return written;
} | CWE-732 | 13 |
static void flush_tmregs_to_thread(struct task_struct *tsk)
{
/*
* If task is not current, it will have been flushed already to
* it's thread_struct during __switch_to().
*
* A reclaim flushes ALL the state or if not in TM save TM SPRs
* in the appropriate thread structures from live.
*/
if (tsk != current)
return;
if (MSR_TM_SUSPENDED(mfmsr())) {
tm_reclaim_current(TM_CAUSE_SIGNAL);
} else {
tm_enable();
tm_save_sprs(&(tsk->thread));
}
} | CWE-119 | 26 |
GF_Err diST_box_read(GF_Box *s, GF_BitStream *bs)
{
u32 i;
char str[1024];
GF_DIMSScriptTypesBox *p = (GF_DIMSScriptTypesBox *)s;
i=0;
str[0]=0;
while (1) {
str[i] = gf_bs_read_u8(bs);
if (!str[i]) break;
i++;
}
ISOM_DECREASE_SIZE(p, i);
p->content_script_types = gf_strdup(str);
return GF_OK;
} | CWE-119 | 26 |
static void show_object(struct object *obj,
struct strbuf *path, const char *last,
void *data)
{
char *name = path_name(path, last);
add_preferred_base_object(name);
add_object_entry(obj->oid.hash, obj->type, name, 0);
obj->flags |= OBJECT_ADDED;
/*
* We will have generated the hash from the name,
* but not saved a pointer to it - we can free it
*/
free((char *)name);
} | CWE-119 | 26 |
static void Rp_test(js_State *J)
{
js_Regexp *re;
const char *text;
int opts;
Resub m;
re = js_toregexp(J, 0);
text = js_tostring(J, 1);
opts = 0;
if (re->flags & JS_REGEXP_G) {
if (re->last > strlen(text)) {
re->last = 0;
js_pushboolean(J, 0);
return;
}
if (re->last > 0) {
text += re->last;
opts |= REG_NOTBOL;
}
}
if (!js_regexec(re->prog, text, &m, opts)) {
if (re->flags & JS_REGEXP_G)
re->last = re->last + (m.sub[0].ep - text);
js_pushboolean(J, 1);
return;
}
if (re->flags & JS_REGEXP_G)
re->last = 0;
js_pushboolean(J, 0);
} | CWE-674 | 28 |
static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel;
int val, len;
int err;
struct pppol2tp_session *ps;
if (level != SOL_PPPOL2TP)
return udp_prot.getsockopt(sk, level, optname, optval, optlen);
if (get_user(len, optlen))
return -EFAULT;
len = min_t(unsigned int, len, sizeof(int));
if (len < 0)
return -EINVAL;
err = -ENOTCONN;
if (sk->sk_user_data == NULL)
goto end;
/* Get the session context */
err = -EBADF;
session = pppol2tp_sock_to_session(sk);
if (session == NULL)
goto end;
/* Special case: if session_id == 0x0000, treat as operation on tunnel */
ps = l2tp_session_priv(session);
if ((session->session_id == 0) &&
(session->peer_session_id == 0)) {
err = -EBADF;
tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
if (tunnel == NULL)
goto end_put_sess;
err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
sock_put(ps->tunnel_sock);
} else
err = pppol2tp_session_getsockopt(sk, session, optname, &val);
err = -EFAULT;
if (put_user(len, optlen))
goto end_put_sess;
if (copy_to_user((void __user *) optval, &val, len))
goto end_put_sess;
err = 0;
end_put_sess:
sock_put(sk);
end:
return err;
} | CWE-269 | 6 |
static void fwnet_receive_broadcast(struct fw_iso_context *context,
u32 cycle, size_t header_length, void *header, void *data)
{
struct fwnet_device *dev;
struct fw_iso_packet packet;
__be16 *hdr_ptr;
__be32 *buf_ptr;
int retval;
u32 length;
u16 source_node_id;
u32 specifier_id;
u32 ver;
unsigned long offset;
unsigned long flags;
dev = data;
hdr_ptr = header;
length = be16_to_cpup(hdr_ptr);
spin_lock_irqsave(&dev->lock, flags);
offset = dev->rcv_buffer_size * dev->broadcast_rcv_next_ptr;
buf_ptr = dev->broadcast_rcv_buffer_ptrs[dev->broadcast_rcv_next_ptr++];
if (dev->broadcast_rcv_next_ptr == dev->num_broadcast_rcv_ptrs)
dev->broadcast_rcv_next_ptr = 0;
spin_unlock_irqrestore(&dev->lock, flags);
specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8
| (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24;
ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
if (specifier_id == IANA_SPECIFIER_ID &&
(ver == RFC2734_SW_VERSION
#if IS_ENABLED(CONFIG_IPV6)
|| ver == RFC3146_SW_VERSION
#endif
)) {
buf_ptr += 2;
length -= IEEE1394_GASP_HDR_SIZE;
fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
context->card->generation, true);
}
packet.payload_length = dev->rcv_buffer_size;
packet.interrupt = 1;
packet.skip = 0;
packet.tag = 3;
packet.sy = 0;
packet.header_length = IEEE1394_GASP_HDR_SIZE;
spin_lock_irqsave(&dev->lock, flags);
retval = fw_iso_context_queue(dev->broadcast_rcv_context, &packet,
&dev->broadcast_rcv_buffer, offset);
spin_unlock_irqrestore(&dev->lock, flags);
if (retval >= 0)
fw_iso_context_queue_flush(dev->broadcast_rcv_context);
else
dev_err(&dev->netdev->dev, "requeue failed\n");
} | CWE-119 | 26 |
static pj_status_t STATUS_FROM_SSL_ERR2(char *action, pj_ssl_sock_t *ssock,
int ret, int err, int len)
{
unsigned long ssl_err = err;
if (err == SSL_ERROR_SSL) {
ssl_err = ERR_peek_error();
}
/* Dig for more from OpenSSL error queue */
SSLLogErrors(action, ret, err, len, ssock);
ssock->last_err = ssl_err;
return GET_STATUS_FROM_SSL_ERR(ssl_err);
} | CWE-362 | 18 |
receive_carbon(void **state)
{
prof_input("/carbons on");
prof_connect();
assert_true(stbbr_received(
"<iq id='*' type='set'><enable xmlns='urn:xmpp:carbons:2'/></iq>"
));
stbbr_send(
"<presence to='stabber@localhost' from='buddy1@localhost/mobile'>"
"<priority>10</priority>"
"<status>On my mobile</status>"
"</presence>"
);
assert_true(prof_output_exact("Buddy1 (mobile) is online, \"On my mobile\""));
prof_input("/msg Buddy1");
assert_true(prof_output_exact("unencrypted"));
stbbr_send(
"<message type='chat' to='stabber@localhost/profanity' from='buddy1@localhost'>"
"<received xmlns='urn:xmpp:carbons:2'>"
"<forwarded xmlns='urn:xmpp:forward:0'>"
"<message id='prof_msg_7' xmlns='jabber:client' type='chat' lang='en' to='stabber@localhost/profanity' from='buddy1@localhost/mobile'>"
"<body>test carbon from recipient</body>"
"</message>"
"</forwarded>"
"</received>"
"</message>"
);
assert_true(prof_output_regex("Buddy1/mobile: .+test carbon from recipient"));
} | CWE-20 | 0 |
static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_cipher rcipher;
strlcpy(rcipher.type, "cipher", sizeof(rcipher.type));
rcipher.blocksize = alg->cra_blocksize;
rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
rcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
if (nla_put(skb, CRYPTOCFGA_REPORT_CIPHER,
sizeof(struct crypto_report_cipher), &rcipher))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
} | CWE-200 | 10 |
AcpiNsTerminate (
void)
{
ACPI_STATUS Status;
ACPI_FUNCTION_TRACE (NsTerminate);
#ifdef ACPI_EXEC_APP
{
ACPI_OPERAND_OBJECT *Prev;
ACPI_OPERAND_OBJECT *Next;
/* Delete any module-level code blocks */
Next = AcpiGbl_ModuleCodeList;
while (Next)
{
Prev = Next;
Next = Next->Method.Mutex;
Prev->Method.Mutex = NULL; /* Clear the Mutex (cheated) field */
AcpiUtRemoveReference (Prev);
}
}
#endif
/*
* Free the entire namespace -- all nodes and all objects
* attached to the nodes
*/
AcpiNsDeleteNamespaceSubtree (AcpiGbl_RootNode);
/* Delete any objects attached to the root node */
Status = AcpiUtAcquireMutex (ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE (Status))
{
return_VOID;
}
AcpiNsDeleteNode (AcpiGbl_RootNode);
(void) AcpiUtReleaseMutex (ACPI_MTX_NAMESPACE);
ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Namespace freed\n"));
return_VOID;
} | CWE-755 | 21 |
static void alpha_perf_event_irq_handler(unsigned long la_ptr,
struct pt_regs *regs)
{
struct cpu_hw_events *cpuc;
struct perf_sample_data data;
struct perf_event *event;
struct hw_perf_event *hwc;
int idx, j;
__get_cpu_var(irq_pmi_count)++;
cpuc = &__get_cpu_var(cpu_hw_events);
/* Completely counting through the PMC's period to trigger a new PMC
* overflow interrupt while in this interrupt routine is utterly
* disastrous! The EV6 and EV67 counters are sufficiently large to
* prevent this but to be really sure disable the PMCs.
*/
wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
/* la_ptr is the counter that overflowed. */
if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
/* This should never occur! */
irq_err_count++;
pr_warning("PMI: silly index %ld\n", la_ptr);
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
idx = la_ptr;
perf_sample_data_init(&data, 0);
for (j = 0; j < cpuc->n_events; j++) {
if (cpuc->current_idx[j] == idx)
break;
}
if (unlikely(j == cpuc->n_events)) {
/* This can occur if the event is disabled right on a PMC overflow. */
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
event = cpuc->event[j];
if (unlikely(!event)) {
/* This should never occur! */
irq_err_count++;
pr_warning("PMI: No event at index %d!\n", idx);
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
hwc = &event->hw;
alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1);
data.period = event->hw.last_period;
if (alpha_perf_event_set_period(event, hwc, idx)) {
if (perf_event_overflow(event, 1, &data, regs)) {
/* Interrupts coming too quickly; "throttle" the
* counter, i.e., disable it for a little while.
*/
alpha_pmu_stop(event, 0);
}
}
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
} | CWE-400 | 2 |
static void __net_exit sctp_net_exit(struct net *net)
{
/* Free the local address list */
sctp_free_addr_wq(net);
sctp_free_local_addr_list(net);
/* Free the control endpoint. */
inet_ctl_sock_destroy(net->sctp.ctl_sock);
sctp_dbg_objcnt_exit(net);
sctp_proc_exit(net);
cleanup_sctp_mibs(net);
sctp_sysctl_net_unregister(net);
} | CWE-119 | 26 |
static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
unsigned int cryptlen)
{
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
AHASH_REQUEST_ON_STACK(ahreq, ctx->mac);
unsigned int assoclen = req->assoclen;
struct scatterlist sg[3];
u8 odata[16];
u8 idata[16];
int ilen, err;
/* format control data for input */
err = format_input(odata, req, cryptlen);
if (err)
goto out;
sg_init_table(sg, 3);
sg_set_buf(&sg[0], odata, 16);
/* format associated data and compute into mac */
if (assoclen) {
ilen = format_adata(idata, assoclen);
sg_set_buf(&sg[1], idata, ilen);
sg_chain(sg, 3, req->src);
} else {
ilen = 0;
sg_chain(sg, 2, req->src);
}
ahash_request_set_tfm(ahreq, ctx->mac);
ahash_request_set_callback(ahreq, pctx->flags, NULL, NULL);
ahash_request_set_crypt(ahreq, sg, NULL, assoclen + ilen + 16);
err = crypto_ahash_init(ahreq);
if (err)
goto out;
err = crypto_ahash_update(ahreq);
if (err)
goto out;
/* we need to pad the MAC input to a round multiple of the block size */
ilen = 16 - (assoclen + ilen) % 16;
if (ilen < 16) {
memset(idata, 0, ilen);
sg_init_table(sg, 2);
sg_set_buf(&sg[0], idata, ilen);
if (plain)
sg_chain(sg, 2, plain);
plain = sg;
cryptlen += ilen;
}
ahash_request_set_crypt(ahreq, plain, pctx->odata, cryptlen);
err = crypto_ahash_finup(ahreq);
out:
return err;
} | CWE-119 | 26 |
static int x86_pmu_handle_irq(struct pt_regs *regs)
{
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct perf_event *event;
int idx, handled = 0;
u64 val;
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
/*
* Some chipsets need to unmask the LVTPC in a particular spot
* inside the nmi handler. As a result, the unmasking was pushed
* into all the nmi handlers.
*
* This generic handler doesn't seem to have any issues where the
* unmasking occurs so it was left at the top.
*/
apic_write(APIC_LVTPC, APIC_DM_NMI);
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
if (!test_bit(idx, cpuc->active_mask)) {
/*
* Though we deactivated the counter some cpus
* might still deliver spurious interrupts still
* in flight. Catch them:
*/
if (__test_and_clear_bit(idx, cpuc->running))
handled++;
continue;
}
event = cpuc->events[idx];
val = x86_perf_event_update(event);
if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
continue;
/*
* event overflow
*/
handled++;
data.period = event->hw.last_period;
if (!x86_perf_event_set_period(event))
continue;
if (perf_event_overflow(event, 1, &data, regs))
x86_pmu_stop(event, 0);
}
if (handled)
inc_irq_stat(apic_perf_irqs);
return handled;
} | CWE-400 | 2 |
error_t httpClientSetMethod(HttpClientContext *context, const char_t *method)
{
size_t m;
size_t n;
char_t *p;
//Check parameters
if(context == NULL || method == NULL)
return ERROR_INVALID_PARAMETER;
//Compute the length of the HTTP method
n = osStrlen(method);
//Make sure the length of the user name is acceptable
if(n == 0 || n > HTTP_CLIENT_MAX_METHOD_LEN)
return ERROR_INVALID_LENGTH;
//Make sure the buffer contains a valid HTTP request
if(context->bufferLen > HTTP_CLIENT_BUFFER_SIZE)
return ERROR_INVALID_SYNTAX;
//Properly terminate the string with a NULL character
context->buffer[context->bufferLen] = '\0';
//The Request-Line begins with a method token
p = strchr(context->buffer, ' ');
//Any parsing error?
if(p == NULL)
return ERROR_INVALID_SYNTAX;
//Compute the length of the current method token
m = p - context->buffer;
//Make sure the buffer is large enough to hold the new HTTP request method
if((context->bufferLen + n - m) > HTTP_CLIENT_BUFFER_SIZE)
return ERROR_BUFFER_OVERFLOW;
//Make room for the new method token
osMemmove(context->buffer + n, p, context->bufferLen + 1 - m);
//Copy the new method token
osStrncpy(context->buffer, method, n);
//Adjust the length of the request header
context->bufferLen = context->bufferLen + n - m;
//Save HTTP request method
osStrcpy(context->method, method);
//Successful processing
return NO_ERROR;
} | CWE-20 | 0 |
const char * util_acl_to_str(const sc_acl_entry_t *e)
{
static char line[80], buf[20];
unsigned int acl;
if (e == NULL)
return "N/A";
line[0] = 0;
while (e != NULL) {
acl = e->method;
switch (acl) {
case SC_AC_UNKNOWN:
return "N/A";
case SC_AC_NEVER:
return "NEVR";
case SC_AC_NONE:
return "NONE";
case SC_AC_CHV:
strcpy(buf, "CHV");
if (e->key_ref != SC_AC_KEY_REF_NONE)
sprintf(buf + 3, "%d", e->key_ref);
break;
case SC_AC_TERM:
strcpy(buf, "TERM");
break;
case SC_AC_PRO:
strcpy(buf, "PROT");
break;
case SC_AC_AUT:
strcpy(buf, "AUTH");
if (e->key_ref != SC_AC_KEY_REF_NONE)
sprintf(buf + 4, "%d", e->key_ref);
break;
case SC_AC_SEN:
strcpy(buf, "Sec.Env. ");
if (e->key_ref != SC_AC_KEY_REF_NONE)
sprintf(buf + 3, "#%d", e->key_ref);
break;
case SC_AC_SCB:
strcpy(buf, "Sec.ControlByte ");
if (e->key_ref != SC_AC_KEY_REF_NONE)
sprintf(buf + 3, "Ox%X", e->key_ref);
break;
case SC_AC_IDA:
strcpy(buf, "PKCS#15 AuthID ");
if (e->key_ref != SC_AC_KEY_REF_NONE)
sprintf(buf + 3, "#%d", e->key_ref);
break;
default:
strcpy(buf, "????");
break;
}
strcat(line, buf);
strcat(line, " ");
e = e->next;
}
line[strlen(line)-1] = 0; /* get rid of trailing space */
return line;
} | CWE-119 | 26 |
static int rose_parse_national(unsigned char *p, struct rose_facilities_struct *facilities, int len)
{
unsigned char *pt;
unsigned char l, lg, n = 0;
int fac_national_digis_received = 0;
do {
switch (*p & 0xC0) {
case 0x00:
p += 2;
n += 2;
len -= 2;
break;
case 0x40:
if (*p == FAC_NATIONAL_RAND)
facilities->rand = ((p[1] << 8) & 0xFF00) + ((p[2] << 0) & 0x00FF);
p += 3;
n += 3;
len -= 3;
break;
case 0x80:
p += 4;
n += 4;
len -= 4;
break;
case 0xC0:
l = p[1];
if (*p == FAC_NATIONAL_DEST_DIGI) {
if (!fac_national_digis_received) {
memcpy(&facilities->source_digis[0], p + 2, AX25_ADDR_LEN);
facilities->source_ndigis = 1;
}
}
else if (*p == FAC_NATIONAL_SRC_DIGI) {
if (!fac_national_digis_received) {
memcpy(&facilities->dest_digis[0], p + 2, AX25_ADDR_LEN);
facilities->dest_ndigis = 1;
}
}
else if (*p == FAC_NATIONAL_FAIL_CALL) {
memcpy(&facilities->fail_call, p + 2, AX25_ADDR_LEN);
}
else if (*p == FAC_NATIONAL_FAIL_ADD) {
memcpy(&facilities->fail_addr, p + 3, ROSE_ADDR_LEN);
}
else if (*p == FAC_NATIONAL_DIGIS) {
fac_national_digis_received = 1;
facilities->source_ndigis = 0;
facilities->dest_ndigis = 0;
for (pt = p + 2, lg = 0 ; lg < l ; pt += AX25_ADDR_LEN, lg += AX25_ADDR_LEN) {
if (pt[6] & AX25_HBIT)
memcpy(&facilities->dest_digis[facilities->dest_ndigis++], pt, AX25_ADDR_LEN);
else
memcpy(&facilities->source_digis[facilities->source_ndigis++], pt, AX25_ADDR_LEN);
}
}
p += l + 2;
n += l + 2;
len -= l + 2;
break;
}
} while (*p != 0x00 && len > 0);
return n;
} | CWE-20 | 0 |
static void pipe_advance(struct iov_iter *i, size_t size)
{
struct pipe_inode_info *pipe = i->pipe;
struct pipe_buffer *buf;
int idx = i->idx;
size_t off = i->iov_offset, orig_sz;
if (unlikely(i->count < size))
size = i->count;
orig_sz = size;
if (size) {
if (off) /* make it relative to the beginning of buffer */
size += off - pipe->bufs[idx].offset;
while (1) {
buf = &pipe->bufs[idx];
if (size <= buf->len)
break;
size -= buf->len;
idx = next_idx(idx, pipe);
}
buf->len = size;
i->idx = idx;
off = i->iov_offset = buf->offset + size;
}
if (off)
idx = next_idx(idx, pipe);
if (pipe->nrbufs) {
int unused = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
/* [curbuf,unused) is in use. Free [idx,unused) */
while (idx != unused) {
pipe_buf_release(pipe, &pipe->bufs[idx]);
idx = next_idx(idx, pipe);
pipe->nrbufs--;
}
}
i->count -= orig_sz;
} | CWE-200 | 10 |
void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
struct pt_regs *regs, struct hlist_head *head, int rctx)
{
struct perf_sample_data data;
struct perf_event *event;
struct hlist_node *node;
struct perf_raw_record raw = {
.size = entry_size,
.data = record,
};
perf_sample_data_init(&data, addr);
data.raw = &raw;
hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, 1, &data, regs);
}
perf_swevent_put_recursion_context(rctx);
} | CWE-400 | 2 |
static pyc_object *get_array_object_generic(RBuffer *buffer, ut32 size) {
pyc_object *tmp = NULL;
pyc_object *ret = NULL;
ut32 i = 0;
ret = R_NEW0 (pyc_object);
if (!ret) {
return NULL;
}
ret->data = r_list_newf ((RListFree)free_object);
if (!ret->data) {
free (ret);
return NULL;
}
for (i = 0; i < size; i++) {
tmp = get_object (buffer);
if (!tmp) {
r_list_free (ret->data);
R_FREE (ret);
return NULL;
}
if (!r_list_append (ret->data, tmp)) {
free_object (tmp);
r_list_free (ret->data);
free (ret);
return NULL;
}
}
return ret;
} | CWE-119 | 26 |
error_t ksz8851ReceivePacket(NetInterface *interface)
{
size_t n;
uint16_t status;
Ksz8851Context *context;
NetRxAncillary ancillary;
//Point to the driver context
context = (Ksz8851Context *) interface->nicContext;
//Read received frame status from RXFHSR
status = ksz8851ReadReg(interface, KSZ8851_REG_RXFHSR);
//Make sure the frame is valid
if((status & RXFHSR_RXFV) != 0)
{
//Check error flags
if((status & (RXFHSR_RXMR | RXFHSR_RXFTL | RXFHSR_RXRF | RXFHSR_RXCE)) == 0)
{
//Read received frame byte size from RXFHBCR
n = ksz8851ReadReg(interface, KSZ8851_REG_RXFHBCR) & RXFHBCR_RXBC_MASK;
//Ensure the frame size is acceptable
if(n > 0 && n <= ETH_MAX_FRAME_SIZE)
{
//Reset QMU RXQ frame pointer to zero
ksz8851WriteReg(interface, KSZ8851_REG_RXFDPR, RXFDPR_RXFPAI);
//Enable RXQ read access
ksz8851SetBit(interface, KSZ8851_REG_RXQCR, RXQCR_SDA);
//Read data
ksz8851ReadFifo(interface, context->rxBuffer, n);
//End RXQ read access
ksz8851ClearBit(interface, KSZ8851_REG_RXQCR, RXQCR_SDA);
//Additional options can be passed to the stack along with the packet
ancillary = NET_DEFAULT_RX_ANCILLARY;
//Pass the packet to the upper layer
nicProcessPacket(interface, context->rxBuffer, n, &ancillary);
//Valid packet received
return NO_ERROR;
}
}
}
//Release the current error frame from RXQ
ksz8851SetBit(interface, KSZ8851_REG_RXQCR, RXQCR_RRXEF);
//Report an error
return ERROR_INVALID_PACKET;
} | CWE-20 | 0 |
static int read_public_key(RSA *rsa)
{
int r;
sc_path_t path;
sc_file_t *file;
u8 buf[2048], *p = buf;
size_t bufsize, keysize;
r = select_app_df();
if (r)
return 1;
sc_format_path("I1012", &path);
r = sc_select_file(card, &path, &file);
if (r) {
fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r));
return 2;
}
bufsize = file->size;
sc_file_free(file);
r = sc_read_binary(card, 0, buf, bufsize, 0);
if (r < 0) {
fprintf(stderr, "Unable to read public key file: %s\n", sc_strerror(r));
return 2;
}
bufsize = r;
do {
if (bufsize < 4)
return 3;
keysize = (p[0] << 8) | p[1];
if (keysize == 0)
break;
if (keysize < 3)
return 3;
if (p[2] == opt_key_num)
break;
p += keysize;
bufsize -= keysize;
} while (1);
if (keysize == 0) {
printf("Key number %d not found.\n", opt_key_num);
return 2;
}
return parse_public_key(p, keysize, rsa);
} | CWE-119 | 26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.