code
stringlengths 23
2.05k
| label_name
stringlengths 6
7
| label
int64 0
37
|
---|---|---|
static int perf_swevent_add(struct perf_event *event, int flags)
{
struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
struct hw_perf_event *hwc = &event->hw;
struct hlist_head *head;
if (is_sampling_event(event)) {
hwc->last_period = hwc->sample_period;
perf_swevent_set_period(event);
}
hwc->state = !(flags & PERF_EF_START);
head = find_swevent_head(swhash, event);
if (!head) {
/*
* We can race with cpu hotplug code. Do not
* WARN if the cpu just got unplugged.
*/
WARN_ON_ONCE(swhash->online);
return -EINVAL;
}
hlist_add_head_rcu(&event->hlist_entry, head);
perf_event_update_userpage(event);
return 0;
} | CWE-362 | 18 |
int perf_event_overflow(struct perf_event *event, int nmi,
struct perf_sample_data *data,
struct pt_regs *regs)
{
return __perf_event_overflow(event, nmi, 1, data, regs);
} | CWE-400 | 2 |
static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name;
size_t copied;
unsigned char *asmptr;
struct sk_buff *skb;
int n, er, qbit;
/*
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
*/
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
/* Now we can treat all alike */
if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL)
return er;
qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
skb_pull(skb, ROSE_MIN_LEN);
if (rose->qbitincl) {
asmptr = skb_push(skb, 1);
*asmptr = qbit;
}
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (srose != NULL) {
memset(srose, 0, msg->msg_namelen);
srose->srose_family = AF_ROSE;
srose->srose_addr = rose->dest_addr;
srose->srose_call = rose->dest_call;
srose->srose_ndigis = rose->dest_ndigis;
if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
for (n = 0 ; n < rose->dest_ndigis ; n++)
full_srose->srose_digis[n] = rose->dest_digis[n];
msg->msg_namelen = sizeof(struct full_sockaddr_rose);
} else {
if (rose->dest_ndigis >= 1) {
srose->srose_ndigis = 1;
srose->srose_digi = rose->dest_digis[0];
}
msg->msg_namelen = sizeof(struct sockaddr_rose);
}
}
skb_free_datagram(sk, skb);
return copied;
} | CWE-20 | 0 |
static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int error = 0;
if (sk->sk_state & PPPOX_BOUND) {
error = -EIO;
goto end;
}
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &error);
if (error < 0)
goto end;
m->msg_namelen = 0;
if (skb) {
total_len = min_t(size_t, total_len, skb->len);
error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len);
if (error == 0) {
consume_skb(skb);
return total_len;
}
}
kfree_skb(skb);
end:
return error;
} | CWE-20 | 0 |
int __usb_get_extra_descriptor(char *buffer, unsigned size,
unsigned char type, void **ptr)
{
struct usb_descriptor_header *header;
while (size >= sizeof(struct usb_descriptor_header)) {
header = (struct usb_descriptor_header *)buffer;
if (header->bLength < 2) {
printk(KERN_ERR
"%s: bogus descriptor, type %d length %d\n",
usbcore_name,
header->bDescriptorType,
header->bLength);
return -1;
}
if (header->bDescriptorType == type) {
*ptr = header;
return 0;
}
buffer += header->bLength;
size -= header->bLength;
}
return -1;
} | CWE-400 | 2 |
horAcc8(TIFF* tif, uint8* cp0, tmsize_t cc)
{
tmsize_t stride = PredictorState(tif)->stride;
unsigned char* cp = (unsigned char*) cp0;
assert((cc%stride)==0);
if (cc > stride) {
/*
* Pipeline the most common cases.
*/
if (stride == 3) {
unsigned int cr = cp[0];
unsigned int cg = cp[1];
unsigned int cb = cp[2];
cc -= 3;
cp += 3;
while (cc>0) {
cp[0] = (unsigned char) ((cr += cp[0]) & 0xff);
cp[1] = (unsigned char) ((cg += cp[1]) & 0xff);
cp[2] = (unsigned char) ((cb += cp[2]) & 0xff);
cc -= 3;
cp += 3;
}
} else if (stride == 4) {
unsigned int cr = cp[0];
unsigned int cg = cp[1];
unsigned int cb = cp[2];
unsigned int ca = cp[3];
cc -= 4;
cp += 4;
while (cc>0) {
cp[0] = (unsigned char) ((cr += cp[0]) & 0xff);
cp[1] = (unsigned char) ((cg += cp[1]) & 0xff);
cp[2] = (unsigned char) ((cb += cp[2]) & 0xff);
cp[3] = (unsigned char) ((ca += cp[3]) & 0xff);
cc -= 4;
cp += 4;
}
} else {
cc -= stride;
do {
REPEAT4(stride, cp[stride] =
(unsigned char) ((cp[stride] + *cp) & 0xff); cp++)
cc -= stride;
} while (cc>0);
}
}
} | CWE-119 | 26 |
static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct sco_pinfo *pi = sco_pi(sk);
lock_sock(sk);
if (sk->sk_state == BT_CONNECT2 &&
test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
sco_conn_defer_accept(pi->conn->hcon, pi->setting);
sk->sk_state = BT_CONFIG;
msg->msg_namelen = 0;
release_sock(sk);
return 0;
}
release_sock(sk);
return bt_sock_recvmsg(iocb, sock, msg, len, flags);
} | CWE-20 | 0 |
M_bool M_fs_path_ishidden(const char *path, M_fs_info_t *info)
{
M_list_str_t *path_parts;
size_t len;
M_bool ret = M_FALSE;
(void)info;
if (path == NULL || *path == '\0') {
return M_FALSE;
}
/* Hidden. Check if the first character of the last part of the path. Either the file or directory name itself
* starts with a '.'. */
path_parts = M_fs_path_componentize_path(path, M_FS_SYSTEM_UNIX);
len = M_list_str_len(path_parts);
if (len > 0) {
if (*M_list_str_at(path_parts, len-1) == '.') {
ret = M_TRUE;
}
}
M_list_str_destroy(path_parts);
return ret;
} | CWE-732 | 13 |
static int raw_cmd_copyin(int cmd, void __user *param,
struct floppy_raw_cmd **rcmd)
{
struct floppy_raw_cmd *ptr;
int ret;
int i;
*rcmd = NULL;
loop:
ptr = kmalloc(sizeof(struct floppy_raw_cmd), GFP_USER);
if (!ptr)
return -ENOMEM;
*rcmd = ptr;
ret = copy_from_user(ptr, param, sizeof(*ptr));
if (ret)
return -EFAULT;
ptr->next = NULL;
ptr->buffer_length = 0;
param += sizeof(struct floppy_raw_cmd);
if (ptr->cmd_count > 33)
/* the command may now also take up the space
* initially intended for the reply & the
* reply count. Needed for long 82078 commands
* such as RESTORE, which takes ... 17 command
* bytes. Murphy's law #137: When you reserve
* 16 bytes for a structure, you'll one day
* discover that you really need 17...
*/
return -EINVAL;
for (i = 0; i < 16; i++)
ptr->reply[i] = 0;
ptr->resultcode = 0;
ptr->kernel_data = NULL;
if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
if (ptr->length <= 0)
return -EINVAL;
ptr->kernel_data = (char *)fd_dma_mem_alloc(ptr->length);
fallback_on_nodma_alloc(&ptr->kernel_data, ptr->length);
if (!ptr->kernel_data)
return -ENOMEM;
ptr->buffer_length = ptr->length;
}
if (ptr->flags & FD_RAW_WRITE) {
ret = fd_copyin(ptr->data, ptr->kernel_data, ptr->length);
if (ret)
return ret;
}
if (ptr->flags & FD_RAW_MORE) {
rcmd = &(ptr->next);
ptr->rate &= 0x43;
goto loop;
}
return 0;
} | CWE-754 | 31 |
static int netlbl_cipsov4_add_common(struct genl_info *info,
struct cipso_v4_doi *doi_def)
{
struct nlattr *nla;
int nla_rem;
u32 iter = 0;
doi_def->doi = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_DOI]);
if (nla_validate_nested(info->attrs[NLBL_CIPSOV4_A_TAGLST],
NLBL_CIPSOV4_A_MAX,
netlbl_cipsov4_genl_policy) != 0)
return -EINVAL;
nla_for_each_nested(nla, info->attrs[NLBL_CIPSOV4_A_TAGLST], nla_rem)
if (nla->nla_type == NLBL_CIPSOV4_A_TAG) {
if (iter > CIPSO_V4_TAG_MAXCNT)
return -EINVAL;
doi_def->tags[iter++] = nla_get_u8(nla);
}
if (iter < CIPSO_V4_TAG_MAXCNT)
doi_def->tags[iter] = CIPSO_V4_TAG_INVALID;
return 0;
} | CWE-119 | 26 |
static void utee_param_to_param(struct tee_ta_param *p, struct utee_params *up)
{
size_t n;
uint32_t types = up->types;
p->types = types;
for (n = 0; n < TEE_NUM_PARAMS; n++) {
uintptr_t a = up->vals[n * 2];
size_t b = up->vals[n * 2 + 1];
switch (TEE_PARAM_TYPE_GET(types, n)) {
case TEE_PARAM_TYPE_MEMREF_INPUT:
case TEE_PARAM_TYPE_MEMREF_OUTPUT:
case TEE_PARAM_TYPE_MEMREF_INOUT:
p->u[n].mem.mobj = &mobj_virt;
p->u[n].mem.offs = a;
p->u[n].mem.size = b;
break;
case TEE_PARAM_TYPE_VALUE_INPUT:
case TEE_PARAM_TYPE_VALUE_INOUT:
p->u[n].val.a = a;
p->u[n].val.b = b;
break;
default:
memset(&p->u[n], 0, sizeof(p->u[n]));
break;
}
}
} | CWE-20 | 0 |
static inline int mk_vhost_fdt_close(struct session_request *sr)
{
int id;
unsigned int hash;
struct vhost_fdt_hash_table *ht = NULL;
struct vhost_fdt_hash_chain *hc;
if (config->fdt == MK_FALSE) {
return close(sr->fd_file);
}
id = sr->vhost_fdt_id;
hash = sr->vhost_fdt_hash;
ht = mk_vhost_fdt_table_lookup(id, sr->host_conf);
if (mk_unlikely(!ht)) {
return close(sr->fd_file);
}
/* We got the hash table, now look around the chains array */
hc = mk_vhost_fdt_chain_lookup(hash, ht);
if (hc) {
/* Increment the readers and check if we should close */
hc->readers--;
if (hc->readers == 0) {
hc->fd = -1;
hc->hash = 0;
ht->av_slots++;
return close(sr->fd_file);
}
else {
return 0;
}
}
return close(sr->fd_file);
} | CWE-20 | 0 |
__be32 ipv6_select_ident(struct net *net,
const struct in6_addr *daddr,
const struct in6_addr *saddr)
{
static u32 ip6_idents_hashrnd __read_mostly;
u32 id;
net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
id = __ipv6_select_ident(net, ip6_idents_hashrnd, daddr, saddr);
return htonl(id);
} | CWE-326 | 9 |
static inline int pmd_large(pmd_t pte)
{
return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
(_PAGE_PSE | _PAGE_PRESENT);
} | CWE-119 | 26 |
static st64 buf_format(RBuffer *dst, RBuffer *src, const char *fmt, int n) {
st64 res = 0;
int i;
for (i = 0; i < n; i++) {
int j;
int m = 1;
int tsize = 2;
bool bigendian = true;
for (j = 0; fmt[j]; j++) {
switch (fmt[j]) {
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
if (m == 1) {
m = r_num_get (NULL, &fmt[j]);
}
continue;
case 's': tsize = 2; bigendian = false; break;
case 'S': tsize = 2; bigendian = true; break;
case 'i': tsize = 4; bigendian = false; break;
case 'I': tsize = 4; bigendian = true; break;
case 'l': tsize = 8; bigendian = false; break;
case 'L': tsize = 8; bigendian = true; break;
case 'c': tsize = 1; bigendian = false; break;
default: return -1;
}
int k;
for (k = 0; k < m; k++) {
ut8 tmp[sizeof (ut64)];
ut8 d1;
ut16 d2;
ut32 d3;
ut64 d4;
st64 r = r_buf_read (src, tmp, tsize);
if (r < tsize) {
return -1;
}
switch (tsize) {
case 1:
d1 = r_read_ble8 (tmp);
r = r_buf_write (dst, (ut8 *)&d1, 1);
break;
case 2:
d2 = r_read_ble16 (tmp, bigendian);
r = r_buf_write (dst, (ut8 *)&d2, 2);
break;
case 4:
d3 = r_read_ble32 (tmp, bigendian);
r = r_buf_write (dst, (ut8 *)&d3, 4);
break;
case 8:
d4 = r_read_ble64 (tmp, bigendian);
r = r_buf_write (dst, (ut8 *)&d4, 8);
break;
}
if (r < 0) {
return -1;
}
res += r;
}
m = 1;
}
}
return res;
} | CWE-400 | 2 |
static void show_object(struct object *obj,
struct strbuf *path, const char *last,
void *data)
{
char *name = path_name(path, last);
add_preferred_base_object(name);
add_object_entry(obj->oid.hash, obj->type, name, 0);
obj->flags |= OBJECT_ADDED;
/*
* We will have generated the hash from the name,
* but not saved a pointer to it - we can free it
*/
free((char *)name);
} | CWE-119 | 26 |
static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct irda_sock *self = irda_sk(sk);
struct sk_buff *skb;
size_t copied;
int err;
IRDA_DEBUG(4, "%s()\n", __func__);
msg->msg_namelen = 0;
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
IRDA_DEBUG(2, "%s(), Received truncated frame (%zd < %zd)!\n",
__func__, copied, size);
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
skb_free_datagram(sk, skb);
/*
* Check if we have previously stopped IrTTP and we know
* have more free space in our rx_queue. If so tell IrTTP
* to start delivering frames again before our rx_queue gets
* empty
*/
if (self->rx_flow == FLOW_STOP) {
if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) {
IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __func__);
self->rx_flow = FLOW_START;
irttp_flow_request(self->tsap, FLOW_START);
}
}
return copied;
} | CWE-20 | 0 |
static int jpc_ppm_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in)
{
jpc_ppm_t *ppm = &ms->parms.ppm;
/* Eliminate compiler warning about unused variables. */
cstate = 0;
ppm->data = 0;
if (ms->len < 1) {
goto error;
}
if (jpc_getuint8(in, &ppm->ind)) {
goto error;
}
ppm->len = ms->len - 1;
if (ppm->len > 0) {
if (!(ppm->data = jas_malloc(ppm->len))) {
goto error;
}
if (JAS_CAST(uint, jas_stream_read(in, ppm->data, ppm->len)) != ppm->len) {
goto error;
}
} else {
ppm->data = 0;
}
return 0;
error:
jpc_ppm_destroyparms(ms);
return -1;
} | CWE-20 | 0 |
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
size_t ss = CDF_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
if (pos > ss * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
pos, ss * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
((const char *)sst->sst_tab) + pos, len);
return len;
} | CWE-119 | 26 |
static inline int pmd_present(pmd_t pmd)
{
return pmd_flags(pmd) & _PAGE_PRESENT;
} | CWE-119 | 26 |
static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int error = 0;
if (sk->sk_state & PPPOX_BOUND) {
error = -EIO;
goto end;
}
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &error);
if (error < 0)
goto end;
m->msg_namelen = 0;
if (skb) {
total_len = min_t(size_t, total_len, skb->len);
error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len);
if (error == 0) {
consume_skb(skb);
return total_len;
}
}
kfree_skb(skb);
end:
return error;
} | CWE-20 | 0 |
void enc624j600UpdateMacConfig(NetInterface *interface)
{
uint16_t duplexMode;
//Determine the new duplex mode by reading the PHYDPX bit
duplexMode = enc624j600ReadReg(interface, ENC624J600_REG_ESTAT) & ESTAT_PHYDPX;
//Full-duplex mode?
if(duplexMode)
{
//Configure the FULDPX bit to match the current duplex mode
enc624j600WriteReg(interface, ENC624J600_REG_MACON2, MACON2_DEFER |
MACON2_PADCFG2 | MACON2_PADCFG0 | MACON2_TXCRCEN | MACON2_R1 | MACON2_FULDPX);
//Configure the Back-to-Back Inter-Packet Gap register
enc624j600WriteReg(interface, ENC624J600_REG_MABBIPG, 0x15);
}
//Half-duplex mode?
else
{
//Configure the FULDPX bit to match the current duplex mode
enc624j600WriteReg(interface, ENC624J600_REG_MACON2, MACON2_DEFER |
MACON2_PADCFG2 | MACON2_PADCFG0 | MACON2_TXCRCEN | MACON2_R1);
//Configure the Back-to-Back Inter-Packet Gap register
enc624j600WriteReg(interface, ENC624J600_REG_MABBIPG, 0x12);
}
} | CWE-20 | 0 |
decrypt_response(struct sc_card *card, unsigned char *in, size_t inlen, unsigned char *out, size_t * out_len)
{
size_t cipher_len;
size_t i;
unsigned char iv[16] = { 0 };
unsigned char plaintext[4096] = { 0 };
epass2003_exdata *exdata = NULL;
if (!card->drv_data)
return SC_ERROR_INVALID_ARGUMENTS;
exdata = (epass2003_exdata *)card->drv_data;
/* no cipher */
if (in[0] == 0x99)
return 0;
/* parse cipher length */
if (0x01 == in[2] && 0x82 != in[1]) {
cipher_len = in[1];
i = 3;
}
else if (0x01 == in[3] && 0x81 == in[1]) {
cipher_len = in[2];
i = 4;
}
else if (0x01 == in[4] && 0x82 == in[1]) {
cipher_len = in[2] * 0x100;
cipher_len += in[3];
i = 5;
}
else {
return -1;
}
if (cipher_len < 2 || i+cipher_len > inlen || cipher_len > sizeof plaintext)
return -1;
/* decrypt */
if (KEY_TYPE_AES == exdata->smtype)
aes128_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext);
else
des3_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext);
/* unpadding */
while (0x80 != plaintext[cipher_len - 2] && (cipher_len - 2 > 0))
cipher_len--;
if (2 == cipher_len)
return -1;
memcpy(out, plaintext, cipher_len - 2);
*out_len = cipher_len - 2;
return 0;
} | CWE-119 | 26 |
static int snd_timer_start_slave(struct snd_timer_instance *timeri)
{
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
if (timeri->master)
list_add_tail(&timeri->active_list,
&timeri->master->slave_active_head);
spin_unlock_irqrestore(&slave_active_lock, flags);
return 1; /* delayed start */
} | CWE-20 | 0 |
long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
{
struct key *key;
key_ref_t key_ref;
long ret;
/* find the key first */
key_ref = lookup_user_key(keyid, 0, 0);
if (IS_ERR(key_ref)) {
ret = -ENOKEY;
goto error;
}
key = key_ref_to_ptr(key_ref);
/* see if we can read it directly */
ret = key_permission(key_ref, KEY_NEED_READ);
if (ret == 0)
goto can_read_key;
if (ret != -EACCES)
goto error;
/* we can't; see if it's searchable from this process's keyrings
* - we automatically take account of the fact that it may be
* dangling off an instantiation key
*/
if (!is_key_possessed(key_ref)) {
ret = -EACCES;
goto error2;
}
/* the key is probably readable - now try to read it */
can_read_key:
ret = key_validate(key);
if (ret == 0) {
ret = -EOPNOTSUPP;
if (key->type->read) {
/* read the data with the semaphore held (since we
* might sleep) */
down_read(&key->sem);
ret = key->type->read(key, buffer, buflen);
up_read(&key->sem);
}
}
error2:
key_put(key);
error:
return ret;
} | CWE-362 | 18 |
static inline u32 net_hash_mix(const struct net *net)
{
#ifdef CONFIG_NET_NS
return (u32)(((unsigned long)net) >> ilog2(sizeof(*net)));
#else
return 0;
#endif
} | CWE-326 | 9 |
sf_flac_write_callback (const FLAC__StreamDecoder * UNUSED (decoder), const FLAC__Frame *frame, const int32_t * const buffer [], void *client_data)
{ SF_PRIVATE *psf = (SF_PRIVATE*) client_data ;
FLAC_PRIVATE* pflac = (FLAC_PRIVATE*) psf->codec_data ;
pflac->frame = frame ;
pflac->bufferpos = 0 ;
pflac->bufferbackup = SF_FALSE ;
pflac->wbuffer = buffer ;
flac_buffer_copy (psf) ;
return FLAC__STREAM_DECODER_WRITE_STATUS_CONTINUE ;
} /* sf_flac_write_callback */ | CWE-119 | 26 |
ssh_packet_set_postauth(struct ssh *ssh)
{
struct sshcomp *comp;
int r, mode;
debug("%s: called", __func__);
/* This was set in net child, but is not visible in user child */
ssh->state->after_authentication = 1;
ssh->state->rekeying = 0;
for (mode = 0; mode < MODE_MAX; mode++) {
if (ssh->state->newkeys[mode] == NULL)
continue;
comp = &ssh->state->newkeys[mode]->comp;
if (comp && comp->enabled &&
(r = ssh_packet_init_compression(ssh)) != 0)
return r;
}
return 0;
} | CWE-119 | 26 |
__reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
int type, struct posix_acl *acl)
{
char *name;
void *value = NULL;
size_t size = 0;
int error;
switch (type) {
case ACL_TYPE_ACCESS:
name = XATTR_NAME_POSIX_ACL_ACCESS;
if (acl) {
error = posix_acl_equiv_mode(acl, &inode->i_mode);
if (error < 0)
return error;
else {
if (error == 0)
acl = NULL;
}
}
break;
case ACL_TYPE_DEFAULT:
name = XATTR_NAME_POSIX_ACL_DEFAULT;
if (!S_ISDIR(inode->i_mode))
return acl ? -EACCES : 0;
break;
default:
return -EINVAL;
}
if (acl) {
value = reiserfs_posix_acl_to_disk(acl, &size);
if (IS_ERR(value))
return (int)PTR_ERR(value);
}
error = reiserfs_xattr_set_handle(th, inode, name, value, size, 0);
/*
* Ensure that the inode gets dirtied if we're only using
* the mode bits and an old ACL didn't exist. We don't need
* to check if the inode is hashed here since we won't get
* called by reiserfs_inherit_default_acl().
*/
if (error == -ENODATA) {
error = 0;
if (type == ACL_TYPE_ACCESS) {
inode->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
}
}
kfree(value);
if (!error)
set_cached_acl(inode, type, acl);
return error;
} | CWE-285 | 23 |
struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
int port)
{
unsigned long flags;
struct snd_seq_client_port *new_port, *p;
int num = -1;
/* sanity check */
if (snd_BUG_ON(!client))
return NULL;
if (client->num_ports >= SNDRV_SEQ_MAX_PORTS) {
pr_warn("ALSA: seq: too many ports for client %d\n", client->number);
return NULL;
}
/* create a new port */
new_port = kzalloc(sizeof(*new_port), GFP_KERNEL);
if (!new_port)
return NULL; /* failure, out of memory */
/* init port data */
new_port->addr.client = client->number;
new_port->addr.port = -1;
new_port->owner = THIS_MODULE;
sprintf(new_port->name, "port-%d", num);
snd_use_lock_init(&new_port->use_lock);
port_subs_info_init(&new_port->c_src);
port_subs_info_init(&new_port->c_dest);
num = port >= 0 ? port : 0;
mutex_lock(&client->ports_mutex);
write_lock_irqsave(&client->ports_lock, flags);
list_for_each_entry(p, &client->ports_list_head, list) {
if (p->addr.port > num)
break;
if (port < 0) /* auto-probe mode */
num = p->addr.port + 1;
}
/* insert the new port */
list_add_tail(&new_port->list, &p->list);
client->num_ports++;
new_port->addr.port = num; /* store the port number in the port */
write_unlock_irqrestore(&client->ports_lock, flags);
mutex_unlock(&client->ports_mutex);
sprintf(new_port->name, "port-%d", num);
return new_port;
} | CWE-362 | 18 |
static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
{
unsigned long val;
void *ptr = NULL;
if (!atomic_pool) {
WARN(1, "coherent pool not initialised!\n");
return NULL;
}
val = gen_pool_alloc(atomic_pool, size);
if (val) {
phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
*ret_page = phys_to_page(phys);
ptr = (void *)val;
if (flags & __GFP_ZERO)
memset(ptr, 0, size);
}
return ptr;
} | CWE-200 | 10 |
void esp32EthDisableIrq(NetInterface *interface)
{
//Valid Ethernet PHY or switch driver?
if(interface->phyDriver != NULL)
{
//Disable Ethernet PHY interrupts
interface->phyDriver->disableIrq(interface);
}
else if(interface->switchDriver != NULL)
{
//Disable Ethernet switch interrupts
interface->switchDriver->disableIrq(interface);
}
else
{
//Just for sanity
}
} | CWE-20 | 0 |
void MSG_WriteBits( msg_t *msg, int value, int bits ) {
int i;
oldsize += bits;
// this isn't an exact overflow check, but close enough
if ( msg->maxsize - msg->cursize < 4 ) {
msg->overflowed = qtrue;
return;
}
if ( bits == 0 || bits < -31 || bits > 32 ) {
Com_Error( ERR_DROP, "MSG_WriteBits: bad bits %i", bits );
}
if ( bits < 0 ) {
bits = -bits;
}
if ( msg->oob ) {
if ( bits == 8 ) {
msg->data[msg->cursize] = value;
msg->cursize += 1;
msg->bit += 8;
} else if ( bits == 16 ) {
short temp = value;
CopyLittleShort( &msg->data[msg->cursize], &temp );
msg->cursize += 2;
msg->bit += 16;
} else if ( bits==32 ) {
CopyLittleLong( &msg->data[msg->cursize], &value );
msg->cursize += 4;
msg->bit += 32;
} else {
Com_Error( ERR_DROP, "can't write %d bits", bits );
}
} else {
value &= (0xffffffff >> (32 - bits));
if ( bits&7 ) {
int nbits;
nbits = bits&7;
for( i = 0; i < nbits; i++ ) {
Huff_putBit( (value & 1), msg->data, &msg->bit );
value = (value >> 1);
}
bits = bits - nbits;
}
if ( bits ) {
for( i = 0; i < bits; i += 8 ) {
Huff_offsetTransmit( &msgHuff.compressor, (value & 0xff), msg->data, &msg->bit );
value = (value >> 8);
}
}
msg->cursize = (msg->bit >> 3) + 1;
}
} | CWE-119 | 26 |
static void sctp_generate_timeout_event(struct sctp_association *asoc,
sctp_event_timeout_t timeout_type)
{
struct net *net = sock_net(asoc->base.sk);
int error = 0;
bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) {
pr_debug("%s: sock is busy: timer %d\n", __func__,
timeout_type);
/* Try again later. */
if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20)))
sctp_association_hold(asoc);
goto out_unlock;
}
/* Is this association really dead and just waiting around for
* the timer to let go of the reference?
*/
if (asoc->base.dead)
goto out_unlock;
/* Run through the state machine. */
error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
SCTP_ST_TIMEOUT(timeout_type),
asoc->state, asoc->ep, asoc,
(void *)timeout_type, GFP_ATOMIC);
if (error)
asoc->base.sk->sk_err = -error;
out_unlock:
bh_unlock_sock(asoc->base.sk);
sctp_association_put(asoc);
} | CWE-362 | 18 |
static int encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
struct crypto_blkcipher *tfm = desc->tfm;
struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm);
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, 64);
salsa20_ivsetup(ctx, walk.iv);
if (likely(walk.nbytes == nbytes))
{
salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
walk.src.virt.addr, nbytes);
return blkcipher_walk_done(desc, &walk, 0);
}
while (walk.nbytes >= 64) {
salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
walk.src.virt.addr,
walk.nbytes - (walk.nbytes % 64));
err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64);
}
if (walk.nbytes) {
salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
walk.src.virt.addr, walk.nbytes);
err = blkcipher_walk_done(desc, &walk, 0);
}
return err;
} | CWE-20 | 0 |
horizontalDifference16(unsigned short *ip, int n, int stride,
unsigned short *wp, uint16 *From14)
{
register int r1, g1, b1, a1, r2, g2, b2, a2, mask;
/* assumption is unsigned pixel values */
#undef CLAMP
#define CLAMP(v) From14[(v) >> 2]
mask = CODE_MASK;
if (n >= stride) {
if (stride == 3) {
r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]);
b2 = wp[2] = CLAMP(ip[2]);
n -= 3;
while (n > 0) {
n -= 3;
wp += 3;
ip += 3;
r1 = CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1;
g1 = CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1;
b1 = CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1;
}
} else if (stride == 4) {
r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]);
b2 = wp[2] = CLAMP(ip[2]); a2 = wp[3] = CLAMP(ip[3]);
n -= 4;
while (n > 0) {
n -= 4;
wp += 4;
ip += 4;
r1 = CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1;
g1 = CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1;
b1 = CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1;
a1 = CLAMP(ip[3]); wp[3] = (uint16)((a1-a2) & mask); a2 = a1;
}
} else {
ip += n - 1; /* point to last one */
wp += n - 1; /* point to last one */
n -= stride;
while (n > 0) {
REPEAT(stride, wp[0] = CLAMP(ip[0]);
wp[stride] -= wp[0];
wp[stride] &= mask;
wp--; ip--)
n -= stride;
}
REPEAT(stride, wp[0] = CLAMP(ip[0]); wp--; ip--)
}
}
} | CWE-119 | 26 |
static int __init ipip_init(void)
{
int err;
printk(banner);
if (xfrm4_tunnel_register(&ipip_handler, AF_INET)) {
printk(KERN_INFO "ipip init: can't register tunnel\n");
return -EAGAIN;
}
err = register_pernet_device(&ipip_net_ops);
if (err)
xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
return err;
} | CWE-362 | 18 |
static int __init xfrm6_tunnel_spi_init(void)
{
xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
sizeof(struct xfrm6_tunnel_spi),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (!xfrm6_tunnel_spi_kmem)
return -ENOMEM;
return 0;
} | CWE-362 | 18 |
static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
int err;
if (len > ds)
len = ds;
else if (len < ds)
msg->msg_flags |= MSG_TRUNC;
msg->msg_namelen = 0;
lock_sock(sk);
if (ctx->more) {
ctx->more = 0;
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
&ctx->completion);
if (err)
goto unlock;
}
err = memcpy_toiovec(msg->msg_iov, ctx->result, len);
unlock:
release_sock(sk);
return err ?: len;
} | CWE-20 | 0 |
static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
{
int ret;
ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data,
indx, &data, 1, 1000);
if (ret < 0)
netif_dbg(pegasus, drv, pegasus->net,
"%s returned %d\n", __func__, ret);
return ret;
} | CWE-119 | 26 |
static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
struct ceph_crypto_key *secret,
void *buf, void *end)
{
void *p = buf;
char *dbuf;
char *ticket_buf;
u8 reply_struct_v;
u32 num;
int ret;
dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
if (!dbuf)
return -ENOMEM;
ret = -ENOMEM;
ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
if (!ticket_buf)
goto out_dbuf;
ceph_decode_8_safe(&p, end, reply_struct_v, bad);
if (reply_struct_v != 1)
return -EINVAL;
ceph_decode_32_safe(&p, end, num, bad);
dout("%d tickets\n", num);
while (num--) {
ret = process_one_ticket(ac, secret, &p, end,
dbuf, ticket_buf);
if (ret)
goto out;
}
ret = 0;
out:
kfree(ticket_buf);
out_dbuf:
kfree(dbuf);
return ret;
bad:
ret = -EINVAL;
goto out;
} | CWE-119 | 26 |
static int muscle_list_files(sc_card_t *card, u8 *buf, size_t bufLen)
{
muscle_private_t* priv = MUSCLE_DATA(card);
mscfs_t *fs = priv->fs;
int x;
int count = 0;
mscfs_check_cache(priv->fs);
for(x = 0; x < fs->cache.size; x++) {
u8* oid= fs->cache.array[x].objectId.id;
sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL,
"FILE: %02X%02X%02X%02X\n",
oid[0],oid[1],oid[2],oid[3]);
if(0 == memcmp(fs->currentPath, oid, 2)) {
buf[0] = oid[2];
buf[1] = oid[3];
if(buf[0] == 0x00 && buf[1] == 0x00) continue; /* No directories/null names outside of root */
buf += 2;
count+=2;
}
}
return count;
} | CWE-119 | 26 |
static int __jfs_set_acl(tid_t tid, struct inode *inode, int type,
struct posix_acl *acl)
{
char *ea_name;
int rc;
int size = 0;
char *value = NULL;
switch (type) {
case ACL_TYPE_ACCESS:
ea_name = XATTR_NAME_POSIX_ACL_ACCESS;
if (acl) {
rc = posix_acl_equiv_mode(acl, &inode->i_mode);
if (rc < 0)
return rc;
inode->i_ctime = CURRENT_TIME;
mark_inode_dirty(inode);
if (rc == 0)
acl = NULL;
}
break;
case ACL_TYPE_DEFAULT:
ea_name = XATTR_NAME_POSIX_ACL_DEFAULT;
break;
default:
return -EINVAL;
}
if (acl) {
size = posix_acl_xattr_size(acl->a_count);
value = kmalloc(size, GFP_KERNEL);
if (!value)
return -ENOMEM;
rc = posix_acl_to_xattr(&init_user_ns, acl, value, size);
if (rc < 0)
goto out;
}
rc = __jfs_setxattr(tid, inode, ea_name, value, size, 0);
out:
kfree(value);
if (!rc)
set_cached_acl(inode, type, acl);
return rc;
} | CWE-285 | 23 |
static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int ret;
int copylen;
ret = -EOPNOTSUPP;
if (m->msg_flags&MSG_OOB)
goto read_error;
m->msg_namelen = 0;
skb = skb_recv_datagram(sk, flags, 0 , &ret);
if (!skb)
goto read_error;
copylen = skb->len;
if (len < copylen) {
m->msg_flags |= MSG_TRUNC;
copylen = len;
}
ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen);
if (ret)
goto out_free;
ret = (flags & MSG_TRUNC) ? skb->len : copylen;
out_free:
skb_free_datagram(sk, skb);
caif_check_flow_release(sk);
return ret;
read_error:
return ret;
} | CWE-20 | 0 |
check_entry_size_and_hooks(struct ip6t_entry *e,
struct xt_table_info *newinfo,
const unsigned char *base,
const unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
unsigned int valid_hooks)
{
unsigned int h;
int err;
if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
(unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
(unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p\n", e);
return -EINVAL;
}
if (e->next_offset
< sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
duprintf("checking: element %p size %u\n",
e, e->next_offset);
return -EINVAL;
}
err = check_entry(e);
if (err)
return err;
/* Check hooks & underflows */
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
if (!(valid_hooks & (1 << h)))
continue;
if ((unsigned char *)e - base == hook_entries[h])
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h]) {
if (!check_underflow(e)) {
pr_err("Underflows must be unconditional and "
"use the STANDARD target with "
"ACCEPT/DROP\n");
return -EINVAL;
}
newinfo->underflow[h] = underflows[h];
}
}
/* Clear counters and comefrom */
e->counters = ((struct xt_counters) { 0, 0 });
e->comefrom = 0;
return 0;
} | CWE-119 | 26 |
error_t ksz8851SendPacket(NetInterface *interface,
const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary)
{
size_t n;
size_t length;
Ksz8851TxHeader header;
Ksz8851Context *context;
//Point to the driver context
context = (Ksz8851Context *) interface->nicContext;
//Retrieve the length of the packet
length = netBufferGetLength(buffer) - offset;
//Check the frame length
if(length > ETH_MAX_FRAME_SIZE)
{
//The transmitter can accept another packet
osSetEvent(&interface->nicTxEvent);
//Report an error
return ERROR_INVALID_LENGTH;
}
//Get the amount of free memory available in the TX FIFO
n = ksz8851ReadReg(interface, KSZ8851_REG_TXMIR) & TXMIR_TXMA_MASK;
//Make sure the TX FIFO is available for writing
if(n < (length + 8))
{
return ERROR_FAILURE;
}
//Copy user data
netBufferRead(context->txBuffer, buffer, offset, length);
//Format control word
header.controlWord = htole16(TX_CTRL_TXIC | (context->frameId++ & TX_CTRL_TXFID));
//Total number of bytes to be transmitted
header.byteCount = htole16(length);
//Enable TXQ write access
ksz8851SetBit(interface, KSZ8851_REG_RXQCR, RXQCR_SDA);
//Write TX packet header
ksz8851WriteFifo(interface, (uint8_t *) &header, sizeof(Ksz8851TxHeader));
//Write data
ksz8851WriteFifo(interface, context->txBuffer, length);
//End TXQ write access
ksz8851ClearBit(interface, KSZ8851_REG_RXQCR, RXQCR_SDA);
//Start transmission
ksz8851SetBit(interface, KSZ8851_REG_TXQCR, TXQCR_METFE);
//Get the amount of free memory available in the TX FIFO
n = ksz8851ReadReg(interface, KSZ8851_REG_TXMIR) & TXMIR_TXMA_MASK;
//Check whether the TX FIFO is available for writing
if(n >= (ETH_MAX_FRAME_SIZE + 8))
{
//The transmitter can accept another packet
osSetEvent(&interface->nicTxEvent);
}
//Successful processing
return NO_ERROR;
} | CWE-20 | 0 |
mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sk_buff *skb;
struct sock *sk = sock->sk;
struct sockaddr_mISDN *maddr;
int copied, err;
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s: len %d, flags %x ch.nr %d, proto %x\n",
__func__, (int)len, flags, _pms(sk)->ch.nr,
sk->sk_protocol);
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
if (sk->sk_state == MISDN_CLOSED)
return 0;
skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) {
msg->msg_namelen = sizeof(struct sockaddr_mISDN);
maddr = (struct sockaddr_mISDN *)msg->msg_name;
maddr->family = AF_ISDN;
maddr->dev = _pms(sk)->dev->id;
if ((sk->sk_protocol == ISDN_P_LAPD_TE) ||
(sk->sk_protocol == ISDN_P_LAPD_NT)) {
maddr->channel = (mISDN_HEAD_ID(skb) >> 16) & 0xff;
maddr->tei = (mISDN_HEAD_ID(skb) >> 8) & 0xff;
maddr->sapi = mISDN_HEAD_ID(skb) & 0xff;
} else {
maddr->channel = _pms(sk)->ch.nr;
maddr->sapi = _pms(sk)->ch.addr & 0xFF;
maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF;
}
} else {
if (msg->msg_namelen)
printk(KERN_WARNING "%s: too small namelen %d\n",
__func__, msg->msg_namelen);
msg->msg_namelen = 0;
}
copied = skb->len + MISDN_HEADER_LEN;
if (len < copied) {
if (flags & MSG_PEEK)
atomic_dec(&skb->users);
else
skb_queue_head(&sk->sk_receive_queue, skb);
return -ENOSPC;
}
memcpy(skb_push(skb, MISDN_HEADER_LEN), mISDN_HEAD_P(skb),
MISDN_HEADER_LEN);
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
mISDN_sock_cmsg(sk, msg, skb);
skb_free_datagram(sk, skb);
return err ? : copied;
} | CWE-20 | 0 |
static OPJ_BOOL opj_tcd_code_block_enc_allocate_data(opj_tcd_cblk_enc_t *
p_code_block)
{
OPJ_UINT32 l_data_size;
l_data_size = (OPJ_UINT32)((p_code_block->x1 - p_code_block->x0) *
(p_code_block->y1 - p_code_block->y0) * (OPJ_INT32)sizeof(OPJ_UINT32));
if (l_data_size > p_code_block->data_size) {
if (p_code_block->data) {
/* We refer to data - 1 since below we incremented it */
opj_free(p_code_block->data - 1);
}
p_code_block->data = (OPJ_BYTE*) opj_malloc(l_data_size + 1);
if (! p_code_block->data) {
p_code_block->data_size = 0U;
return OPJ_FALSE;
}
p_code_block->data_size = l_data_size;
/* We reserve the initial byte as a fake byte to a non-FF value */
/* and increment the data pointer, so that opj_mqc_init_enc() */
/* can do bp = data - 1, and opj_mqc_byteout() can safely dereference */
/* it. */
p_code_block->data[0] = 0;
p_code_block->data += 1; /*why +1 ?*/
}
return OPJ_TRUE;
} | CWE-119 | 26 |
int vis_emul(struct pt_regs *regs, unsigned int insn)
{
unsigned long pc = regs->tpc;
unsigned int opf;
BUG_ON(regs->tstate & TSTATE_PRIV);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc))
return -EFAULT;
save_and_clear_fpu();
opf = (insn & VIS_OPF_MASK) >> VIS_OPF_SHIFT;
switch (opf) {
default:
return -EINVAL;
/* Pixel Formatting Instructions. */
case FPACK16_OPF:
case FPACK32_OPF:
case FPACKFIX_OPF:
case FEXPAND_OPF:
case FPMERGE_OPF:
pformat(regs, insn, opf);
break;
/* Partitioned Multiply Instructions */
case FMUL8x16_OPF:
case FMUL8x16AU_OPF:
case FMUL8x16AL_OPF:
case FMUL8SUx16_OPF:
case FMUL8ULx16_OPF:
case FMULD8SUx16_OPF:
case FMULD8ULx16_OPF:
pmul(regs, insn, opf);
break;
/* Pixel Compare Instructions */
case FCMPGT16_OPF:
case FCMPGT32_OPF:
case FCMPLE16_OPF:
case FCMPLE32_OPF:
case FCMPNE16_OPF:
case FCMPNE32_OPF:
case FCMPEQ16_OPF:
case FCMPEQ32_OPF:
pcmp(regs, insn, opf);
break;
/* Edge Handling Instructions */
case EDGE8_OPF:
case EDGE8N_OPF:
case EDGE8L_OPF:
case EDGE8LN_OPF:
case EDGE16_OPF:
case EDGE16N_OPF:
case EDGE16L_OPF:
case EDGE16LN_OPF:
case EDGE32_OPF:
case EDGE32N_OPF:
case EDGE32L_OPF:
case EDGE32LN_OPF:
edge(regs, insn, opf);
break;
/* Pixel Component Distance */
case PDIST_OPF:
pdist(regs, insn);
break;
/* Three-Dimensional Array Addressing Instructions */
case ARRAY8_OPF:
case ARRAY16_OPF:
case ARRAY32_OPF:
array(regs, insn, opf);
break;
/* Byte Mask and Shuffle Instructions */
case BMASK_OPF:
bmask(regs, insn);
break;
case BSHUFFLE_OPF:
bshuffle(regs, insn);
break;
}
regs->tpc = regs->tnpc;
regs->tnpc += 4;
return 0;
} | CWE-400 | 2 |
int read_file(struct sc_card *card, char *str_path, unsigned char **data, size_t *data_len)
{
struct sc_path path;
struct sc_file *file;
unsigned char *p;
int ok = 0;
int r;
size_t len;
sc_format_path(str_path, &path);
if (SC_SUCCESS != sc_select_file(card, &path, &file)) {
goto err;
}
len = file ? file->size : 4096;
p = realloc(*data, len);
if (!p) {
goto err;
}
*data = p;
*data_len = len;
r = sc_read_binary(card, 0, p, len, 0);
if (r < 0)
goto err;
*data_len = r;
ok = 1;
err:
sc_file_free(file);
return ok;
} | CWE-119 | 26 |
static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
struct dst_entry *dst,
const struct flowi6 *fl6)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct rt6_info *rt = (struct rt6_info *)dst;
if (!dst)
goto out;
/* Yes, checking route validity in not connected
* case is not very simple. Take into account,
* that we do not support routing by source, TOS,
* and MSG_DONTROUTE --ANK (980726)
*
* 1. ip6_rt_check(): If route was host route,
* check that cached destination is current.
* If it is network route, we still may
* check its validity using saved pointer
* to the last used address: daddr_cache.
* We do not want to save whole address now,
* (because main consumer of this service
* is tcp, which has not this problem),
* so that the last trick works only on connected
* sockets.
* 2. oif also should be the same.
*/
if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
#ifdef CONFIG_IPV6_SUBTREES
ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
#endif
(fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
dst_release(dst);
dst = NULL;
}
out:
return dst;
} | CWE-20 | 0 |
static int walk_hugetlb_range(unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->vma;
struct hstate *h = hstate_vma(vma);
unsigned long next;
unsigned long hmask = huge_page_mask(h);
unsigned long sz = huge_page_size(h);
pte_t *pte;
int err = 0;
do {
next = hugetlb_entry_end(h, addr, end);
pte = huge_pte_offset(walk->mm, addr & hmask, sz);
if (pte && walk->hugetlb_entry)
err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
if (err)
break;
} while (addr = next, addr != end);
return err;
} | CWE-200 | 10 |
horizontalDifferenceF(float *ip, int n, int stride, uint16 *wp, uint16 *FromLT2)
{
int32 r1, g1, b1, a1, r2, g2, b2, a2, mask;
float fltsize = Fltsize;
#define CLAMP(v) ( (v<(float)0.) ? 0 \
: (v<(float)2.) ? FromLT2[(int)(v*fltsize)] \
: (v>(float)24.2) ? 2047 \
: LogK1*log(v*LogK2) + 0.5 )
mask = CODE_MASK;
if (n >= stride) {
if (stride == 3) {
r2 = wp[0] = (uint16) CLAMP(ip[0]);
g2 = wp[1] = (uint16) CLAMP(ip[1]);
b2 = wp[2] = (uint16) CLAMP(ip[2]);
n -= 3;
while (n > 0) {
n -= 3;
wp += 3;
ip += 3;
r1 = (int32) CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1;
g1 = (int32) CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1;
b1 = (int32) CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1;
}
} else if (stride == 4) {
r2 = wp[0] = (uint16) CLAMP(ip[0]);
g2 = wp[1] = (uint16) CLAMP(ip[1]);
b2 = wp[2] = (uint16) CLAMP(ip[2]);
a2 = wp[3] = (uint16) CLAMP(ip[3]);
n -= 4;
while (n > 0) {
n -= 4;
wp += 4;
ip += 4;
r1 = (int32) CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1;
g1 = (int32) CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1;
b1 = (int32) CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1;
a1 = (int32) CLAMP(ip[3]); wp[3] = (uint16)((a1-a2) & mask); a2 = a1;
}
} else {
ip += n - 1; /* point to last one */
wp += n - 1; /* point to last one */
n -= stride;
while (n > 0) {
REPEAT(stride, wp[0] = (uint16) CLAMP(ip[0]);
wp[stride] -= wp[0];
wp[stride] &= mask;
wp--; ip--)
n -= stride;
}
REPEAT(stride, wp[0] = (uint16) CLAMP(ip[0]); wp--; ip--)
}
}
} | CWE-119 | 26 |
static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
{
/* depend on compiler for an atomic pmd read */
pmd_t pmdval = *pmd;
/*
* The barrier will stabilize the pmdval in a register or on
* the stack so that it will stop changing under the code.
*/
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
barrier();
#endif
if (pmd_none(pmdval))
return 1;
if (unlikely(pmd_bad(pmdval))) {
if (!pmd_trans_huge(pmdval))
pmd_clear_bad(pmd);
return 1;
}
return 0;
} | CWE-362 | 18 |
monitor_apply_keystate(struct monitor *pmonitor)
{
struct ssh *ssh = active_state; /* XXX */
struct kex *kex;
int r;
debug3("%s: packet_set_state", __func__);
if ((r = ssh_packet_set_state(ssh, child_state)) != 0)
fatal("%s: packet_set_state: %s", __func__, ssh_err(r));
sshbuf_free(child_state);
child_state = NULL;
if ((kex = ssh->kex) != NULL) {
/* XXX set callbacks */
#ifdef WITH_OPENSSL
kex->kex[KEX_DH_GRP1_SHA1] = kexdh_server;
kex->kex[KEX_DH_GRP14_SHA1] = kexdh_server;
kex->kex[KEX_DH_GRP14_SHA256] = kexdh_server;
kex->kex[KEX_DH_GRP16_SHA512] = kexdh_server;
kex->kex[KEX_DH_GRP18_SHA512] = kexdh_server;
kex->kex[KEX_DH_GEX_SHA1] = kexgex_server;
kex->kex[KEX_DH_GEX_SHA256] = kexgex_server;
kex->kex[KEX_ECDH_SHA2] = kexecdh_server;
#endif
kex->kex[KEX_C25519_SHA256] = kexc25519_server;
kex->load_host_public_key=&get_hostkey_public_by_type;
kex->load_host_private_key=&get_hostkey_private_by_type;
kex->host_key_index=&get_hostkey_index;
kex->sign = sshd_hostkey_sign;
}
/* Update with new address */
if (options.compression) {
ssh_packet_set_compress_hooks(ssh, pmonitor->m_zlib,
(ssh_packet_comp_alloc_func *)mm_zalloc,
(ssh_packet_comp_free_func *)mm_zfree);
}
} | CWE-119 | 26 |
static int lmf_header_load(lmf_header *lmfh, RBuffer *buf, Sdb *db) {
if (r_buf_size (buf) < sizeof (lmf_header)) {
return false;
}
if (r_buf_fread_at (buf, QNX_HEADER_ADDR, (ut8 *) lmfh, "iiiiiiiicccciiiicc", 1) < QNX_HDR_SIZE) {
return false;
}
r_strf_buffer (32);
sdb_set (db, "qnx.version", r_strf ("0x%xH", lmfh->version), 0);
sdb_set (db, "qnx.cflags", r_strf ("0x%xH", lmfh->cflags), 0);
sdb_set (db, "qnx.cpu", r_strf ("0x%xH", lmfh->cpu), 0);
sdb_set (db, "qnx.fpu", r_strf ("0x%xH", lmfh->fpu), 0);
sdb_set (db, "qnx.code_index", r_strf ("0x%x", lmfh->code_index), 0);
sdb_set (db, "qnx.stack_index", r_strf ("0x%x", lmfh->stack_index), 0);
sdb_set (db, "qnx.heap_index", r_strf ("0x%x", lmfh->heap_index), 0);
sdb_set (db, "qnx.argv_index", r_strf ("0x%x", lmfh->argv_index), 0);
sdb_set (db, "qnx.code_offset", r_strf ("0x%x", lmfh->code_offset), 0);
sdb_set (db, "qnx.stack_nbytes", r_strf ("0x%x", lmfh->stack_nbytes), 0);
sdb_set (db, "qnx.heap_nbytes", r_strf ("0x%x", lmfh->heap_nbytes), 0);
sdb_set (db, "qnx.image_base", r_strf ("0x%x", lmfh->image_base), 0);
return true;
} | CWE-400 | 2 |
static int xen_netbk_get_extras(struct xenvif *vif,
struct xen_netif_extra_info *extras,
int work_to_do)
{
struct xen_netif_extra_info extra;
RING_IDX cons = vif->tx.req_cons;
do {
if (unlikely(work_to_do-- <= 0)) {
netdev_dbg(vif->dev, "Missing extra info\n");
return -EBADR;
}
memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
sizeof(extra));
if (unlikely(!extra.type ||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
vif->tx.req_cons = ++cons;
netdev_dbg(vif->dev,
"Invalid extra type: %d\n", extra.type);
return -EINVAL;
}
memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
vif->tx.req_cons = ++cons;
} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
return work_to_do;
} | CWE-20 | 0 |
DECLAREreadFunc(readContigTilesIntoBuffer)
{
int status = 1;
tsize_t tilesize = TIFFTileSize(in);
tdata_t tilebuf;
uint32 imagew = TIFFScanlineSize(in);
uint32 tilew = TIFFTileRowSize(in);
int iskew = imagew - tilew;
uint8* bufp = (uint8*) buf;
uint32 tw, tl;
uint32 row;
(void) spp;
tilebuf = _TIFFmalloc(tilesize);
if (tilebuf == 0)
return 0;
_TIFFmemset(tilebuf, 0, tilesize);
(void) TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw);
(void) TIFFGetField(in, TIFFTAG_TILELENGTH, &tl);
for (row = 0; row < imagelength; row += tl) {
uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl;
uint32 colb = 0;
uint32 col;
for (col = 0; col < imagewidth && colb < imagew; col += tw) {
if (TIFFReadTile(in, tilebuf, col, row, 0, 0) < 0
&& !ignore) {
TIFFError(TIFFFileName(in),
"Error, can't read tile at %lu %lu",
(unsigned long) col,
(unsigned long) row);
status = 0;
goto done;
}
if (colb + tilew > imagew) {
uint32 width = imagew - colb;
uint32 oskew = tilew - width;
cpStripToTile(bufp + colb,
tilebuf, nrow, width,
oskew + iskew, oskew );
} else
cpStripToTile(bufp + colb,
tilebuf, nrow, tilew,
iskew, 0);
colb += tilew;
}
bufp += imagew * nrow;
}
done:
_TIFFfree(tilebuf);
return status;
} | CWE-119 | 26 |
static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct sco_pinfo *pi = sco_pi(sk);
lock_sock(sk);
if (sk->sk_state == BT_CONNECT2 &&
test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
sco_conn_defer_accept(pi->conn->hcon, pi->setting);
sk->sk_state = BT_CONFIG;
msg->msg_namelen = 0;
release_sock(sk);
return 0;
}
release_sock(sk);
return bt_sock_recvmsg(iocb, sock, msg, len, flags);
} | CWE-20 | 0 |
stf_status ikev2parent_inI2outR2(struct msg_digest *md)
{
struct state *st = md->st;
/* struct connection *c = st->st_connection; */
/*
* the initiator sent us an encrypted payload. We need to calculate
* our g^xy, and skeyseed values, and then decrypt the payload.
*/
DBG(DBG_CONTROLMORE,
DBG_log(
"ikev2 parent inI2outR2: calculating g^{xy} in order to decrypt I2"));
/* verify that there is in fact an encrypted payload */
if (!md->chain[ISAKMP_NEXT_v2E]) {
libreswan_log("R2 state should receive an encrypted payload");
reset_globals();
return STF_FATAL;
}
/* now. we need to go calculate the g^xy */
{
struct dh_continuation *dh = alloc_thing(
struct dh_continuation,
"ikev2_inI2outR2 KE");
stf_status e;
dh->md = md;
set_suspended(st, dh->md);
pcrc_init(&dh->dh_pcrc);
dh->dh_pcrc.pcrc_func = ikev2_parent_inI2outR2_continue;
e = start_dh_v2(&dh->dh_pcrc, st, st->st_import, RESPONDER,
st->st_oakley.groupnum);
if (e != STF_SUSPEND && e != STF_INLINE) {
loglog(RC_CRYPTOFAILED, "system too busy");
delete_state(st);
}
reset_globals();
return e;
}
} | CWE-20 | 0 |
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len)
{
int r;
unsigned long addr;
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr))
return -EFAULT;
r = copy_from_user(data, (void __user *)addr + offset, len);
if (r)
return -EFAULT;
return 0;
} | CWE-20 | 0 |
bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port)
{
char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL;
char url_address[256], port[6];
int url_len, port_len = 0;
*sockaddr_url = url;
url_begin = strstr(url, "//");
if (!url_begin)
url_begin = url;
else
url_begin += 2;
/* Look for numeric ipv6 entries */
ipv6_begin = strstr(url_begin, "[");
ipv6_end = strstr(url_begin, "]");
if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin)
url_end = strstr(ipv6_end, ":");
else
url_end = strstr(url_begin, ":");
if (url_end) {
url_len = url_end - url_begin;
port_len = strlen(url_begin) - url_len - 1;
if (port_len < 1)
return false;
port_start = url_end + 1;
} else
url_len = strlen(url_begin);
if (url_len < 1)
return false;
sprintf(url_address, "%.*s", url_len, url_begin);
if (port_len) {
char *slash;
snprintf(port, 6, "%.*s", port_len, port_start);
slash = strchr(port, '/');
if (slash)
*slash = '\0';
} else
strcpy(port, "80");
*sockaddr_port = strdup(port);
*sockaddr_url = strdup(url_address);
return true;
} | CWE-119 | 26 |
char *path_name(struct strbuf *path, const char *name)
{
struct strbuf ret = STRBUF_INIT;
if (path)
strbuf_addbuf(&ret, path);
strbuf_addstr(&ret, name);
return strbuf_detach(&ret, NULL);
} | CWE-119 | 26 |
static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
size_t count, loff_t *offset)
{
struct snd_timer_user *tu;
long result = 0, unit;
int qhead;
int err = 0;
tu = file->private_data;
unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
spin_lock_irq(&tu->qlock);
while ((long)count - result >= unit) {
while (!tu->qused) {
wait_queue_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
err = -EAGAIN;
goto _error;
}
set_current_state(TASK_INTERRUPTIBLE);
init_waitqueue_entry(&wait, current);
add_wait_queue(&tu->qchange_sleep, &wait);
spin_unlock_irq(&tu->qlock);
schedule();
spin_lock_irq(&tu->qlock);
remove_wait_queue(&tu->qchange_sleep, &wait);
if (tu->disconnected) {
err = -ENODEV;
goto _error;
}
if (signal_pending(current)) {
err = -ERESTARTSYS;
goto _error;
}
}
qhead = tu->qhead++;
tu->qhead %= tu->queue_size;
tu->qused--;
spin_unlock_irq(&tu->qlock);
mutex_lock(&tu->ioctl_lock);
if (tu->tread) {
if (copy_to_user(buffer, &tu->tqueue[qhead],
sizeof(struct snd_timer_tread)))
err = -EFAULT;
} else {
if (copy_to_user(buffer, &tu->queue[qhead],
sizeof(struct snd_timer_read)))
err = -EFAULT;
}
mutex_unlock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);
if (err < 0)
goto _error;
result += unit;
buffer += unit;
}
_error:
spin_unlock_irq(&tu->qlock);
return result > 0 ? result : err;
} | CWE-200 | 10 |
static int pop_sync_mailbox(struct Context *ctx, int *index_hint)
{
int i, j, ret = 0;
char buf[LONG_STRING];
struct PopData *pop_data = (struct PopData *) ctx->data;
struct Progress progress;
#ifdef USE_HCACHE
header_cache_t *hc = NULL;
#endif
pop_data->check_time = 0;
while (true)
{
if (pop_reconnect(ctx) < 0)
return -1;
mutt_progress_init(&progress, _("Marking messages deleted..."),
MUTT_PROGRESS_MSG, WriteInc, ctx->deleted);
#ifdef USE_HCACHE
hc = pop_hcache_open(pop_data, ctx->path);
#endif
for (i = 0, j = 0, ret = 0; ret == 0 && i < ctx->msgcount; i++)
{
if (ctx->hdrs[i]->deleted && ctx->hdrs[i]->refno != -1)
{
j++;
if (!ctx->quiet)
mutt_progress_update(&progress, j, -1);
snprintf(buf, sizeof(buf), "DELE %d\r\n", ctx->hdrs[i]->refno);
ret = pop_query(pop_data, buf, sizeof(buf));
if (ret == 0)
{
mutt_bcache_del(pop_data->bcache, ctx->hdrs[i]->data);
#ifdef USE_HCACHE
mutt_hcache_delete(hc, ctx->hdrs[i]->data, strlen(ctx->hdrs[i]->data));
#endif
}
}
#ifdef USE_HCACHE
if (ctx->hdrs[i]->changed)
{
mutt_hcache_store(hc, ctx->hdrs[i]->data, strlen(ctx->hdrs[i]->data),
ctx->hdrs[i], 0);
}
#endif
}
#ifdef USE_HCACHE
mutt_hcache_close(hc);
#endif
if (ret == 0)
{
mutt_str_strfcpy(buf, "QUIT\r\n", sizeof(buf));
ret = pop_query(pop_data, buf, sizeof(buf));
}
if (ret == 0)
{
pop_data->clear_cache = true;
pop_clear_cache(pop_data);
pop_data->status = POP_DISCONNECTED;
return 0;
}
if (ret == -2)
{
mutt_error("%s", pop_data->err_msg);
return -1;
}
}
} | CWE-119 | 26 |
unsigned long lh_char_hash(const void *k)
{
unsigned int h = 0;
const char* data = (const char*)k;
while( *data!=0 ) h = h*129 + (unsigned int)(*data++) + LH_PRIME;
return h;
} | CWE-119 | 26 |
static int futex_wait(u32 __user *uaddr, int fshared,
u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
{
struct hrtimer_sleeper timeout, *to = NULL;
struct restart_block *restart;
struct futex_hash_bucket *hb;
struct futex_q q;
int ret;
if (!bitset)
return -EINVAL;
q.pi_state = NULL;
q.bitset = bitset;
q.rt_waiter = NULL;
q.requeue_pi_key = NULL;
if (abs_time) {
to = &timeout;
hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
hrtimer_init_sleeper(to, current);
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
current->timer_slack_ns);
}
retry:
/* Prepare to wait on uaddr. */
ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
if (ret)
goto out;
/* queue_me and wait for wakeup, timeout, or a signal. */
futex_wait_queue_me(hb, &q, to);
/* If we were woken (and unqueued), we succeeded, whatever. */
ret = 0;
if (!unqueue_me(&q))
goto out_put_key;
ret = -ETIMEDOUT;
if (to && !to->task)
goto out_put_key;
/*
* We expect signal_pending(current), but we might be the
* victim of a spurious wakeup as well.
*/
if (!signal_pending(current)) {
put_futex_key(fshared, &q.key);
goto retry;
}
ret = -ERESTARTSYS;
if (!abs_time)
goto out_put_key;
restart = ¤t_thread_info()->restart_block;
restart->fn = futex_wait_restart;
restart->futex.uaddr = (u32 *)uaddr;
restart->futex.val = val;
restart->futex.time = abs_time->tv64;
restart->futex.bitset = bitset;
restart->futex.flags = FLAGS_HAS_TIMEOUT;
if (fshared)
restart->futex.flags |= FLAGS_SHARED;
if (clockrt)
restart->futex.flags |= FLAGS_CLOCKRT;
ret = -ERESTART_RESTARTBLOCK;
out_put_key:
put_futex_key(fshared, &q.key);
out:
if (to) {
hrtimer_cancel(&to->timer);
destroy_hrtimer_on_stack(&to->timer);
}
return ret;
} | CWE-119 | 26 |
static ssize_t exitcode_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
char *end, buf[sizeof("nnnnn\0")];
int tmp;
if (copy_from_user(buf, buffer, count))
return -EFAULT;
tmp = simple_strtol(buf, &end, 0);
if ((*end != '\0') && !isspace(*end))
return -EINVAL;
uml_exitcode = tmp;
return count;
} | CWE-119 | 26 |
static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct sco_pinfo *pi = sco_pi(sk);
lock_sock(sk);
if (sk->sk_state == BT_CONNECT2 &&
test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
sco_conn_defer_accept(pi->conn->hcon, pi->setting);
sk->sk_state = BT_CONFIG;
msg->msg_namelen = 0;
release_sock(sk);
return 0;
}
release_sock(sk);
return bt_sock_recvmsg(iocb, sock, msg, len, flags);
} | CWE-20 | 0 |
static struct mount *clone_mnt(struct mount *old, struct dentry *root,
int flag)
{
struct super_block *sb = old->mnt.mnt_sb;
struct mount *mnt;
int err;
mnt = alloc_vfsmnt(old->mnt_devname);
if (!mnt)
return ERR_PTR(-ENOMEM);
if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
mnt->mnt_group_id = 0; /* not a peer of original */
else
mnt->mnt_group_id = old->mnt_group_id;
if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
err = mnt_alloc_group_id(mnt);
if (err)
goto out_free;
}
mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
/* Don't allow unprivileged users to change mount flags */
if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY))
mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
/* Don't allow unprivileged users to reveal what is under a mount */
if ((flag & CL_UNPRIVILEGED) && list_empty(&old->mnt_expire))
mnt->mnt.mnt_flags |= MNT_LOCKED;
atomic_inc(&sb->s_active);
mnt->mnt.mnt_sb = sb;
mnt->mnt.mnt_root = dget(root);
mnt->mnt_mountpoint = mnt->mnt.mnt_root;
mnt->mnt_parent = mnt;
lock_mount_hash();
list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
unlock_mount_hash();
if ((flag & CL_SLAVE) ||
((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
list_add(&mnt->mnt_slave, &old->mnt_slave_list);
mnt->mnt_master = old;
CLEAR_MNT_SHARED(mnt);
} else if (!(flag & CL_PRIVATE)) {
if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
list_add(&mnt->mnt_share, &old->mnt_share);
if (IS_MNT_SLAVE(old))
list_add(&mnt->mnt_slave, &old->mnt_slave);
mnt->mnt_master = old->mnt_master;
}
if (flag & CL_MAKE_SHARED)
set_mnt_shared(mnt);
/* stick the duplicate mount on the same expiry list
* as the original if that was on one */
if (flag & CL_EXPIRE) {
if (!list_empty(&old->mnt_expire))
list_add(&mnt->mnt_expire, &old->mnt_expire);
}
return mnt;
out_free:
mnt_free_id(mnt);
free_vfsmnt(mnt);
return ERR_PTR(err);
} | CWE-269 | 6 |
struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
{
struct mapped_device *md;
md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
if (test_bit(DMF_FREEING, &md->flags) ||
dm_deleting_md(md))
return NULL;
dm_get(md);
return md;
} | CWE-362 | 18 |
int user_update(struct key *key, struct key_preparsed_payload *prep)
{
struct user_key_payload *zap = NULL;
int ret;
/* check the quota and attach the new data */
ret = key_payload_reserve(key, prep->datalen);
if (ret < 0)
return ret;
/* attach the new data, displacing the old */
key->expiry = prep->expiry;
if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags))
zap = dereference_key_locked(key);
rcu_assign_keypointer(key, prep->payload.data[0]);
prep->payload.data[0] = NULL;
if (zap)
call_rcu(&zap->rcu, user_free_payload_rcu);
return ret;
} | CWE-20 | 0 |
static int apparmor_setprocattr(struct task_struct *task, char *name,
void *value, size_t size)
{
struct common_audit_data sa;
struct apparmor_audit_data aad = {0,};
char *command, *args = value;
size_t arg_size;
int error;
if (size == 0)
return -EINVAL;
/* args points to a PAGE_SIZE buffer, AppArmor requires that
* the buffer must be null terminated or have size <= PAGE_SIZE -1
* so that AppArmor can null terminate them
*/
if (args[size - 1] != '\0') {
if (size == PAGE_SIZE)
return -EINVAL;
args[size] = '\0';
}
/* task can only write its own attributes */
if (current != task)
return -EACCES;
args = value;
args = strim(args);
command = strsep(&args, " ");
if (!args)
return -EINVAL;
args = skip_spaces(args);
if (!*args)
return -EINVAL;
arg_size = size - (args - (char *) value);
if (strcmp(name, "current") == 0) {
if (strcmp(command, "changehat") == 0) {
error = aa_setprocattr_changehat(args, arg_size,
!AA_DO_TEST);
} else if (strcmp(command, "permhat") == 0) {
error = aa_setprocattr_changehat(args, arg_size,
AA_DO_TEST);
} else if (strcmp(command, "changeprofile") == 0) {
error = aa_setprocattr_changeprofile(args, !AA_ONEXEC,
!AA_DO_TEST);
} else if (strcmp(command, "permprofile") == 0) {
error = aa_setprocattr_changeprofile(args, !AA_ONEXEC,
AA_DO_TEST);
} else
goto fail;
} else if (strcmp(name, "exec") == 0) {
if (strcmp(command, "exec") == 0)
error = aa_setprocattr_changeprofile(args, AA_ONEXEC,
!AA_DO_TEST);
else
goto fail;
} else
/* only support the "current" and "exec" process attributes */
return -EINVAL;
if (!error)
error = size;
return error;
fail:
sa.type = LSM_AUDIT_DATA_NONE;
sa.aad = &aad;
aad.profile = aa_current_profile();
aad.op = OP_SETPROCATTR;
aad.info = name;
aad.error = -EINVAL;
aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL);
return -EINVAL;
} | CWE-119 | 26 |
static int pfkey_recvmsg(struct kiocb *kiocb,
struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
struct sock *sk = sock->sk;
struct pfkey_sock *pfk = pfkey_sk(sk);
struct sk_buff *skb;
int copied, err;
err = -EINVAL;
if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
goto out;
msg->msg_namelen = 0;
skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto out;
copied = skb->len;
if (copied > len) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
skb_reset_transport_header(skb);
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err)
goto out_free;
sock_recv_ts_and_drops(msg, sk, skb);
err = (flags & MSG_TRUNC) ? skb->len : copied;
if (pfk->dump.dump != NULL &&
3 * atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
pfkey_do_dump(pfk);
out_free:
skb_free_datagram(sk, skb);
out:
return err;
} | CWE-20 | 0 |
int CLASS parse_jpeg(int offset)
{
int len, save, hlen, mark;
fseek(ifp, offset, SEEK_SET);
if (fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8)
return 0;
while (fgetc(ifp) == 0xff && (mark = fgetc(ifp)) != 0xda)
{
order = 0x4d4d;
len = get2() - 2;
save = ftell(ifp);
if (mark == 0xc0 || mark == 0xc3 || mark == 0xc9)
{
fgetc(ifp);
raw_height = get2();
raw_width = get2();
}
order = get2();
hlen = get4();
if (get4() == 0x48454150) /* "HEAP" */
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
#endif
parse_ciff(save + hlen, len - hlen, 0);
}
if (parse_tiff(save + 6))
apply_tiff();
fseek(ifp, save + len, SEEK_SET);
}
return 1;
} | CWE-119 | 26 |
header_put_be_short (SF_PRIVATE *psf, int x)
{ if (psf->headindex < SIGNED_SIZEOF (psf->header) - 2)
{ psf->header [psf->headindex++] = (x >> 8) ;
psf->header [psf->headindex++] = x ;
} ;
} /* header_put_be_short */ | CWE-119 | 26 |
struct inode *isofs_iget(struct super_block *sb,
unsigned long block,
unsigned long offset)
{
unsigned long hashval;
struct inode *inode;
struct isofs_iget5_callback_data data;
long ret;
if (offset >= 1ul << sb->s_blocksize_bits)
return ERR_PTR(-EINVAL);
data.block = block;
data.offset = offset;
hashval = (block << sb->s_blocksize_bits) | offset;
inode = iget5_locked(sb, hashval, &isofs_iget5_test,
&isofs_iget5_set, &data);
if (!inode)
return ERR_PTR(-ENOMEM);
if (inode->i_state & I_NEW) {
ret = isofs_read_inode(inode);
if (ret < 0) {
iget_failed(inode);
inode = ERR_PTR(ret);
} else {
unlock_new_inode(inode);
}
}
return inode;
} | CWE-20 | 0 |
void mt_init(mtrand *mt, uint32_t seed) {
int i;
mt->mt_buffer_[0] = seed;
mt->mt_index_ = MT_LEN;
for (i = 1; i < MT_LEN; i++) {
/* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */
/* In the previous versions, MSBs of the seed affect */
/* only MSBs of the array mt[]. */
/* 2002/01/09 modified by Makoto Matsumoto */
mt->mt_buffer_[i] =
(1812433253UL * (mt->mt_buffer_[i-1] ^
(mt->mt_buffer_[i-1] >> 30)) + i);
}
} | CWE-119 | 26 |
mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sk_buff *skb;
struct sock *sk = sock->sk;
struct sockaddr_mISDN *maddr;
int copied, err;
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s: len %d, flags %x ch.nr %d, proto %x\n",
__func__, (int)len, flags, _pms(sk)->ch.nr,
sk->sk_protocol);
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
if (sk->sk_state == MISDN_CLOSED)
return 0;
skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) {
msg->msg_namelen = sizeof(struct sockaddr_mISDN);
maddr = (struct sockaddr_mISDN *)msg->msg_name;
maddr->family = AF_ISDN;
maddr->dev = _pms(sk)->dev->id;
if ((sk->sk_protocol == ISDN_P_LAPD_TE) ||
(sk->sk_protocol == ISDN_P_LAPD_NT)) {
maddr->channel = (mISDN_HEAD_ID(skb) >> 16) & 0xff;
maddr->tei = (mISDN_HEAD_ID(skb) >> 8) & 0xff;
maddr->sapi = mISDN_HEAD_ID(skb) & 0xff;
} else {
maddr->channel = _pms(sk)->ch.nr;
maddr->sapi = _pms(sk)->ch.addr & 0xFF;
maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF;
}
} else {
if (msg->msg_namelen)
printk(KERN_WARNING "%s: too small namelen %d\n",
__func__, msg->msg_namelen);
msg->msg_namelen = 0;
}
copied = skb->len + MISDN_HEADER_LEN;
if (len < copied) {
if (flags & MSG_PEEK)
atomic_dec(&skb->users);
else
skb_queue_head(&sk->sk_receive_queue, skb);
return -ENOSPC;
}
memcpy(skb_push(skb, MISDN_HEADER_LEN), mISDN_HEAD_P(skb),
MISDN_HEADER_LEN);
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
mISDN_sock_cmsg(sk, msg, skb);
skb_free_datagram(sk, skb);
return err ? : copied;
} | CWE-20 | 0 |
static void audit_log_execve_info(struct audit_context *context,
struct audit_buffer **ab)
{
int i, len;
size_t len_sent = 0;
const char __user *p;
char *buf;
p = (const char __user *)current->mm->arg_start;
audit_log_format(*ab, "argc=%d", context->execve.argc);
/*
* we need some kernel buffer to hold the userspace args. Just
* allocate one big one rather than allocating one of the right size
* for every single argument inside audit_log_single_execve_arg()
* should be <8k allocation so should be pretty safe.
*/
buf = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL);
if (!buf) {
audit_panic("out of memory for argv string");
return;
}
for (i = 0; i < context->execve.argc; i++) {
len = audit_log_single_execve_arg(context, ab, i,
&len_sent, p, buf);
if (len <= 0)
break;
p += len;
}
kfree(buf);
} | CWE-362 | 18 |
X509_NAME_oneline_ex(X509_NAME * a,
char *buf,
unsigned int *size,
unsigned long flag)
{
BIO *out = NULL;
out = BIO_new(BIO_s_mem ());
if (X509_NAME_print_ex(out, a, 0, flag) > 0) {
if (buf != NULL && (*size) > (unsigned int) BIO_number_written(out)) {
memset(buf, 0, *size);
BIO_read(out, buf, (int) BIO_number_written(out));
}
else {
*size = BIO_number_written(out);
}
}
BIO_free(out);
return (buf);
} | CWE-119 | 26 |
static void prefetch_table(const volatile byte *tab, size_t len)
{
size_t i;
for (i = 0; i < len; i += 8 * 32)
{
(void)tab[i + 0 * 32];
(void)tab[i + 1 * 32];
(void)tab[i + 2 * 32];
(void)tab[i + 3 * 32];
(void)tab[i + 4 * 32];
(void)tab[i + 5 * 32];
(void)tab[i + 6 * 32];
(void)tab[i + 7 * 32];
}
(void)tab[len - 1];
} | CWE-668 | 7 |
static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
{
if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
IWL_ERR(priv, "ACTIVATE a non DRIVER active station id %u "
"addr %pM\n",
sta_id, priv->stations[sta_id].sta.sta.addr);
if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
IWL_DEBUG_ASSOC(priv,
"STA id %u addr %pM already present in uCode "
"(according to driver)\n",
sta_id, priv->stations[sta_id].sta.sta.addr);
} else {
priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
sta_id, priv->stations[sta_id].sta.sta.addr);
}
} | CWE-119 | 26 |
main (int argc, char *argv[])
{
unsigned cmdn;
int flags = IDN2_NONTRANSITIONAL;
setlocale (LC_ALL, "");
set_program_name (argv[0]);
bindtextdomain (PACKAGE, LOCALEDIR);
textdomain (PACKAGE);
if (cmdline_parser (argc, argv, &args_info) != 0)
return EXIT_FAILURE;
if (args_info.version_given)
{
version_etc (stdout, "idn2", PACKAGE_NAME, VERSION,
"Simon Josefsson", (char *) NULL);
return EXIT_SUCCESS;
}
if (args_info.help_given)
usage (EXIT_SUCCESS);
if (!args_info.quiet_given
&& args_info.inputs_num == 0 && isatty (fileno (stdin)))
fprintf (stderr, "%s %s\n" GREETING, PACKAGE, VERSION);
if (args_info.debug_given)
fprintf (stderr, _("Charset: %s\n"), locale_charset ());
if (!args_info.quiet_given
&& args_info.inputs_num == 0 && isatty (fileno (stdin)))
fprintf (stderr, "%s", _("Type each input string on a line by itself, "
"terminated by a newline character.\n"));
if (args_info.tr46t_given)
flags = IDN2_TRANSITIONAL;
else if (args_info.tr46nt_given)
flags = IDN2_NONTRANSITIONAL;
else if (args_info.no_tr46_given)
flags = IDN2_NO_TR46;
if (flags && args_info.usestd3asciirules_given)
flags |= IDN2_USE_STD3_ASCII_RULES;
for (cmdn = 0; cmdn < args_info.inputs_num; cmdn++)
process_input (args_info.inputs[cmdn], flags | IDN2_NFC_INPUT);
if (!cmdn)
{
char *buf = NULL;
size_t bufsize = 0;
while (getline (&buf, &bufsize, stdin) > 0)
process_input (buf, flags);
free (buf);
}
if (ferror (stdin))
error (EXIT_FAILURE, errno, "%s", _("input error"));
cmdline_parser_free (&args_info);
return EXIT_SUCCESS;
} | CWE-20 | 0 |
static void show_object(struct object *obj,
struct strbuf *path, const char *component,
void *cb_data)
{
struct rev_list_info *info = cb_data;
finish_object(obj, path, component, cb_data);
if (info->flags & REV_LIST_QUIET)
return;
show_object_with_name(stdout, obj, path, component);
} | CWE-119 | 26 |
static int read_fragment_table(long long *directory_table_end)
{
int res, i;
int bytes = SQUASHFS_FRAGMENT_BYTES(sBlk.s.fragments);
int indexes = SQUASHFS_FRAGMENT_INDEXES(sBlk.s.fragments);
long long fragment_table_index[indexes];
TRACE("read_fragment_table: %d fragments, reading %d fragment indexes "
"from 0x%llx\n", sBlk.s.fragments, indexes,
sBlk.s.fragment_table_start);
if(sBlk.s.fragments == 0) {
*directory_table_end = sBlk.s.fragment_table_start;
return TRUE;
}
fragment_table = malloc(bytes);
if(fragment_table == NULL)
EXIT_UNSQUASH("read_fragment_table: failed to allocate "
"fragment table\n");
res = read_fs_bytes(fd, sBlk.s.fragment_table_start,
SQUASHFS_FRAGMENT_INDEX_BYTES(sBlk.s.fragments),
fragment_table_index);
if(res == FALSE) {
ERROR("read_fragment_table: failed to read fragment table "
"index\n");
return FALSE;
}
SQUASHFS_INSWAP_FRAGMENT_INDEXES(fragment_table_index, indexes);
for(i = 0; i < indexes; i++) {
int expected = (i + 1) != indexes ? SQUASHFS_METADATA_SIZE :
bytes & (SQUASHFS_METADATA_SIZE - 1);
int length = read_block(fd, fragment_table_index[i], NULL,
expected, ((char *) fragment_table) + (i *
SQUASHFS_METADATA_SIZE));
TRACE("Read fragment table block %d, from 0x%llx, length %d\n",
i, fragment_table_index[i], length);
if(length == FALSE) {
ERROR("read_fragment_table: failed to read fragment "
"table index\n");
return FALSE;
}
}
for(i = 0; i < sBlk.s.fragments; i++)
SQUASHFS_INSWAP_FRAGMENT_ENTRY(&fragment_table[i]);
*directory_table_end = fragment_table_index[0];
return TRUE;
} | CWE-20 | 0 |
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned cpu = smp_processor_id();
if (likely(prev != next)) {
#ifdef CONFIG_SMP
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
this_cpu_write(cpu_tlbstate.active_mm, next);
#endif
cpumask_set_cpu(cpu, mm_cpumask(next));
/* Re-load page tables */
load_cr3(next->pgd);
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
/* Stop flush ipis for the previous mm */
cpumask_clear_cpu(cpu, mm_cpumask(prev));
/* Load per-mm CR4 state */
load_mm_cr4(next);
#ifdef CONFIG_MODIFY_LDT_SYSCALL
/*
* Load the LDT, if the LDT is different.
*
* It's possible that prev->context.ldt doesn't match
* the LDT register. This can happen if leave_mm(prev)
* was called and then modify_ldt changed
* prev->context.ldt but suppressed an IPI to this CPU.
* In this case, prev->context.ldt != NULL, because we
* never set context.ldt to NULL while the mm still
* exists. That means that next->context.ldt !=
* prev->context.ldt, because mms never share an LDT.
*/
if (unlikely(prev->context.ldt != next->context.ldt))
load_mm_ldt(next);
#endif
}
#ifdef CONFIG_SMP
else {
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
/*
* On established mms, the mm_cpumask is only changed
* from irq context, from ptep_clear_flush() while in
* lazy tlb mode, and here. Irqs are blocked during
* schedule, protecting us from simultaneous changes.
*/
cpumask_set_cpu(cpu, mm_cpumask(next));
/*
* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
*/
load_cr3(next->pgd);
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
load_mm_cr4(next);
load_mm_ldt(next);
}
}
#endif
} | CWE-362 | 18 |
error_t ndpCheckOptions(const uint8_t *options, size_t length)
{
size_t i;
NdpOption *option;
//Point to the very first option of the NDP message
i = 0;
//Parse options
while((i + sizeof(NdpOption)) <= length)
{
//Point to the current option
option = (NdpOption *) (options + i);
//Nodes must silently discard an NDP message that contains
//an option with length zero
if(option->length == 0)
return ERROR_INVALID_OPTION;
//Jump to next the next option
i += option->length * 8;
}
//The Options field is valid
return NO_ERROR;
} | CWE-20 | 0 |
GIOChannel *net_connect_ip_ssl(IPADDR *ip, int port, IPADDR *my_ip, const char *cert, const char *pkey, const char *cafile, const char *capath, gboolean verify)
{
GIOChannel *handle, *ssl_handle;
handle = net_connect_ip(ip, port, my_ip);
if (handle == NULL)
return NULL;
ssl_handle = irssi_ssl_get_iochannel(handle, cert, pkey, cafile, capath, verify);
if (ssl_handle == NULL)
g_io_channel_unref(handle);
return ssl_handle;
} | CWE-20 | 0 |
static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
struct sk_buff *skb;
int copied, err;
BT_DBG("sock %p, sk %p", sock, sk);
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
if (sk->sk_state == BT_CLOSED)
return 0;
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
return err;
msg->msg_namelen = 0;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
skb_reset_transport_header(skb);
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
switch (hci_pi(sk)->channel) {
case HCI_CHANNEL_RAW:
hci_sock_cmsg(sk, msg, skb);
break;
case HCI_CHANNEL_USER:
case HCI_CHANNEL_CONTROL:
case HCI_CHANNEL_MONITOR:
sock_recv_timestamp(msg, sk, skb);
break;
}
skb_free_datagram(sk, skb);
return err ? : copied;
} | CWE-20 | 0 |
static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
unsigned int len;
unsigned long start=0, off;
struct au1200fb_device *fbdev = info->par;
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
return -EINVAL;
}
start = fbdev->fb_phys & PAGE_MASK;
len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
off = vma->vm_pgoff << PAGE_SHIFT;
if ((vma->vm_end - vma->vm_start + off) > len) {
return -EINVAL;
}
off += start;
vma->vm_pgoff = off >> PAGE_SHIFT;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
} | CWE-119 | 26 |
read_attribute(cdk_stream_t inp, size_t pktlen, cdk_pkt_userid_t attr,
int name_size)
{
const byte *p;
byte *buf;
size_t len, nread;
cdk_error_t rc;
if (!inp || !attr || !pktlen)
return CDK_Inv_Value;
if (DEBUG_PKT)
_gnutls_write_log("read_attribute: %d octets\n",
(int) pktlen);
_gnutls_str_cpy(attr->name, name_size, ATTRIBUTE);
attr->len = MIN(name_size, sizeof(ATTRIBUTE) - 1);
buf = cdk_calloc(1, pktlen);
if (!buf)
return CDK_Out_Of_Core;
rc = stream_read(inp, buf, pktlen, &nread);
if (rc) {
cdk_free(buf);
return CDK_Inv_Packet;
}
p = buf;
len = *p++;
pktlen--;
if (len == 255) {
len = _cdk_buftou32(p);
p += 4;
pktlen -= 4;
} else if (len >= 192) {
if (pktlen < 2) {
cdk_free(buf);
return CDK_Inv_Packet;
}
len = ((len - 192) << 8) + *p + 192;
p++;
pktlen--;
}
if (*p != 1) { /* Currently only 1, meaning an image, is defined. */
cdk_free(buf);
return CDK_Inv_Packet;
}
p++;
len--;
if (len >= pktlen) {
cdk_free(buf);
return CDK_Inv_Packet;
}
attr->attrib_img = cdk_calloc(1, len);
if (!attr->attrib_img) {
cdk_free(buf);
return CDK_Out_Of_Core;
}
attr->attrib_len = len;
memcpy(attr->attrib_img, p, len);
cdk_free(buf);
return rc;
} | CWE-119 | 26 |
static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
u8 *buf = dev->in_out_buffer;
unsigned long flags;
int ret;
spin_lock_irqsave(&dev->lock, flags);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
if (ret != CP2112_GPIO_CONFIG_LENGTH) {
hid_err(hdev, "error requesting GPIO config: %d\n", ret);
goto exit;
}
buf[1] &= ~(1 << offset);
buf[2] = gpio_push_pull;
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
if (ret < 0) {
hid_err(hdev, "error setting GPIO config: %d\n", ret);
goto exit;
}
ret = 0;
exit:
spin_unlock_irqrestore(&dev->lock, flags);
return ret <= 0 ? ret : -EIO;
} | CWE-404 | 30 |
static int decode_bit_string(const u8 * inbuf, size_t inlen, void *outbuf,
size_t outlen, int invert)
{
const u8 *in = inbuf;
u8 *out = (u8 *) outbuf;
int zero_bits = *in & 0x07;
size_t octets_left = inlen - 1;
int i, count = 0;
memset(outbuf, 0, outlen);
in++;
if (outlen < octets_left)
return SC_ERROR_BUFFER_TOO_SMALL;
if (inlen < 1)
return SC_ERROR_INVALID_ASN1_OBJECT;
while (octets_left) {
/* 1st octet of input: ABCDEFGH, where A is the MSB */
/* 1st octet of output: HGFEDCBA, where A is the LSB */
/* first bit in bit string is the LSB in first resulting octet */
int bits_to_go;
*out = 0;
if (octets_left == 1)
bits_to_go = 8 - zero_bits;
else
bits_to_go = 8;
if (invert)
for (i = 0; i < bits_to_go; i++) {
*out |= ((*in >> (7 - i)) & 1) << i;
}
else {
*out = *in;
}
out++;
in++;
octets_left--;
count++;
}
return (count * 8) - zero_bits;
} | CWE-119 | 26 |
static int muscle_list_files(sc_card_t *card, u8 *buf, size_t bufLen)
{
muscle_private_t* priv = MUSCLE_DATA(card);
mscfs_t *fs = priv->fs;
int x;
int count = 0;
mscfs_check_cache(priv->fs);
for(x = 0; x < fs->cache.size; x++) {
u8* oid= fs->cache.array[x].objectId.id;
sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL,
"FILE: %02X%02X%02X%02X\n",
oid[0],oid[1],oid[2],oid[3]);
if(0 == memcmp(fs->currentPath, oid, 2)) {
buf[0] = oid[2];
buf[1] = oid[3];
if(buf[0] == 0x00 && buf[1] == 0x00) continue; /* No directories/null names outside of root */
buf += 2;
count+=2;
}
}
return count;
} | CWE-119 | 26 |
horDiff16(TIFF* tif, uint8* cp0, tmsize_t cc)
{
TIFFPredictorState* sp = PredictorState(tif);
tmsize_t stride = sp->stride;
uint16 *wp = (uint16*) cp0;
tmsize_t wc = cc/2;
assert((cc%(2*stride))==0);
if (wc > stride) {
wc -= stride;
wp += wc - 1;
do {
REPEAT4(stride, wp[stride] = (uint16)(((unsigned int)wp[stride] - (unsigned int)wp[0]) & 0xffff); wp--)
wc -= stride;
} while (wc > 0);
}
} | CWE-119 | 26 |
void sctp_generate_heartbeat_event(unsigned long data)
{
int error = 0;
struct sctp_transport *transport = (struct sctp_transport *) data;
struct sctp_association *asoc = transport->asoc;
struct net *net = sock_net(asoc->base.sk);
bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) {
pr_debug("%s: sock is busy\n", __func__);
/* Try again later. */
if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20)))
sctp_transport_hold(transport);
goto out_unlock;
}
/* Is this structure just waiting around for us to actually
* get destroyed?
*/
if (transport->dead)
goto out_unlock;
error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
asoc->state, asoc->ep, asoc,
transport, GFP_ATOMIC);
if (error)
asoc->base.sk->sk_err = -error;
out_unlock:
bh_unlock_sock(asoc->base.sk);
sctp_transport_put(transport);
} | CWE-362 | 18 |
void Huff_offsetTransmit (huff_t *huff, int ch, byte *fout, int *offset) {
bloc = *offset;
send(huff->loc[ch], NULL, fout);
*offset = bloc;
} | CWE-119 | 26 |
static int http_read_header(URLContext *h, int *new_location)
{
HTTPContext *s = h->priv_data;
char line[MAX_URL_SIZE];
int err = 0;
s->chunksize = -1;
for (;;) {
if ((err = http_get_line(s, line, sizeof(line))) < 0)
return err;
av_log(h, AV_LOG_TRACE, "header='%s'\n", line);
err = process_line(h, line, s->line_count, new_location);
if (err < 0)
return err;
if (err == 0)
break;
s->line_count++;
}
if (s->seekable == -1 && s->is_mediagateway && s->filesize == 2000000000)
h->is_streamed = 1; /* we can in fact _not_ seek */
// add any new cookies into the existing cookie string
cookie_string(s->cookie_dict, &s->cookies);
av_dict_free(&s->cookie_dict);
return err;
} | CWE-119 | 26 |
static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
int len;
if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
rfcomm_dlc_accept(d);
msg->msg_namelen = 0;
return 0;
}
len = bt_sock_stream_recvmsg(iocb, sock, msg, size, flags);
lock_sock(sk);
if (!(flags & MSG_PEEK) && len > 0)
atomic_sub(len, &sk->sk_rmem_alloc);
if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2))
rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc);
release_sock(sk);
return len;
} | CWE-20 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.