code
stringlengths
23
2.05k
label_name
stringlengths
6
7
label
int64
0
37
mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sk_buff *skb; struct sock *sk = sock->sk; struct sockaddr_mISDN *maddr; int copied, err; if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s: len %d, flags %x ch.nr %d, proto %x\n", __func__, (int)len, flags, _pms(sk)->ch.nr, sk->sk_protocol); if (flags & (MSG_OOB)) return -EOPNOTSUPP; if (sk->sk_state == MISDN_CLOSED) return 0; skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); if (!skb) return err; if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) { msg->msg_namelen = sizeof(struct sockaddr_mISDN); maddr = (struct sockaddr_mISDN *)msg->msg_name; maddr->family = AF_ISDN; maddr->dev = _pms(sk)->dev->id; if ((sk->sk_protocol == ISDN_P_LAPD_TE) || (sk->sk_protocol == ISDN_P_LAPD_NT)) { maddr->channel = (mISDN_HEAD_ID(skb) >> 16) & 0xff; maddr->tei = (mISDN_HEAD_ID(skb) >> 8) & 0xff; maddr->sapi = mISDN_HEAD_ID(skb) & 0xff; } else { maddr->channel = _pms(sk)->ch.nr; maddr->sapi = _pms(sk)->ch.addr & 0xFF; maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF; } } else { if (msg->msg_namelen) printk(KERN_WARNING "%s: too small namelen %d\n", __func__, msg->msg_namelen); msg->msg_namelen = 0; } copied = skb->len + MISDN_HEADER_LEN; if (len < copied) { if (flags & MSG_PEEK) atomic_dec(&skb->users); else skb_queue_head(&sk->sk_receive_queue, skb); return -ENOSPC; } memcpy(skb_push(skb, MISDN_HEADER_LEN), mISDN_HEAD_P(skb), MISDN_HEADER_LEN); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); mISDN_sock_cmsg(sk, msg, skb); skb_free_datagram(sk, skb); return err ? : copied; }
CWE-20
0
static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock, struct msghdr *msg, size_t ignored, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm( &ctx->req)); struct skcipher_sg_list *sgl; struct scatterlist *sg; unsigned long iovlen; struct iovec *iov; int err = -EAGAIN; int used; long copied = 0; lock_sock(sk); msg->msg_namelen = 0; for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0; iovlen--, iov++) { unsigned long seglen = iov->iov_len; char __user *from = iov->iov_base; while (seglen) { sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list, list); sg = sgl->sg; while (!sg->length) sg++; used = ctx->used; if (!used) { err = skcipher_wait_for_data(sk, flags); if (err) goto unlock; } used = min_t(unsigned long, used, seglen); used = af_alg_make_sg(&ctx->rsgl, from, used, 1); err = used; if (err < 0) goto unlock; if (ctx->more || used < ctx->used) used -= used % bs; err = -EINVAL; if (!used) goto free; ablkcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used, ctx->iv); err = af_alg_wait_for_completion( ctx->enc ? crypto_ablkcipher_encrypt(&ctx->req) : crypto_ablkcipher_decrypt(&ctx->req), &ctx->completion); free: af_alg_free_sg(&ctx->rsgl); if (err) goto unlock; copied += used; from += used; seglen -= used; skcipher_pull_sgl(sk, used); } } err = 0; unlock: skcipher_wmem_wakeup(sk); release_sock(sk); return copied ?: err; }
CWE-20
0
ssh_packet_get_compress_state(struct sshbuf *m, struct ssh *ssh) { struct session_state *state = ssh->state; struct sshbuf *b; int r; if ((b = sshbuf_new()) == NULL) return SSH_ERR_ALLOC_FAIL; if (state->compression_in_started) { if ((r = sshbuf_put_string(b, &state->compression_in_stream, sizeof(state->compression_in_stream))) != 0) goto out; } else if ((r = sshbuf_put_string(b, NULL, 0)) != 0) goto out; if (state->compression_out_started) { if ((r = sshbuf_put_string(b, &state->compression_out_stream, sizeof(state->compression_out_stream))) != 0) goto out; } else if ((r = sshbuf_put_string(b, NULL, 0)) != 0) goto out; r = sshbuf_put_stringb(m, b); out: sshbuf_free(b); return r; }
CWE-119
26
static int read_private_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; const sc_acl_entry_t *e; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I0012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select private key file: %s\n", sc_strerror(r)); return 2; } e = sc_file_get_acl_entry(file, SC_AC_OP_READ); if (e == NULL || e->method == SC_AC_NEVER) return 10; bufsize = file->size; sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read private key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_private_key(p, keysize, rsa); }
CWE-119
26
static void perf_output_wakeup(struct perf_output_handle *handle) { atomic_set(&handle->rb->poll, POLL_IN); if (handle->nmi) { handle->event->pending_wakeup = 1; irq_work_queue(&handle->event->pending); } else perf_event_wakeup(handle->event); }
CWE-400
2
void lzxd_free(struct lzxd_stream *lzx) { struct mspack_system *sys; if (lzx) { sys = lzx->sys; sys->free(lzx->inbuf); sys->free(lzx->window); sys->free(lzx); } }
CWE-119
26
static void finish_object(struct object *obj, struct strbuf *path, const char *name, void *cb_data) { struct rev_list_info *info = cb_data; if (obj->type == OBJ_BLOB && !has_object_file(&obj->oid)) die("missing blob object '%s'", oid_to_hex(&obj->oid)); if (info->revs->verify_objects && !obj->parsed && obj->type != OBJ_COMMIT) parse_object(obj->oid.hash); }
CWE-119
26
static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int err; struct sk_buff *skb; struct sock *sk = sock->sk; err = -EIO; if (sk->sk_state & PPPOX_BOUND) goto end; msg->msg_namelen = 0; err = 0; skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb) goto end; if (len > skb->len) len = skb->len; else if (len < skb->len) msg->msg_flags |= MSG_TRUNC; err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len); if (likely(err == 0)) err = len; kfree_skb(skb); end: return err; }
CWE-20
0
static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int change; struct user_element *ue = kcontrol->private_data; change = memcmp(&ucontrol->value, ue->elem_data, ue->elem_data_size) != 0; if (change) memcpy(ue->elem_data, &ucontrol->value, ue->elem_data_size); return change; }
CWE-362
18
rpmRC rpmReadPackageFile(rpmts ts, FD_t fd, const char * fn, Header * hdrp) { char *msg = NULL; Header h = NULL; Header sigh = NULL; hdrblob blob = NULL; hdrblob sigblob = NULL; rpmVSFlags vsflags = rpmtsVSFlags(ts) | RPMVSF_NEEDPAYLOAD; rpmKeyring keyring = rpmtsGetKeyring(ts, 1); struct rpmvs_s *vs = rpmvsCreate(0, vsflags, keyring); struct pkgdata_s pkgdata = { .msgfunc = loghdrmsg, .fn = fn ? fn : Fdescr(fd), .msg = NULL, .rc = RPMRC_OK, }; /* XXX: lots of 3rd party software relies on the behavior */ if (hdrp) *hdrp = NULL; rpmRC rc = rpmpkgRead(vs, fd, &sigblob, &blob, &msg); if (rc) goto exit; /* Actually all verify discovered signatures and digests */ rc = RPMRC_FAIL; if (!rpmvsVerify(vs, RPMSIG_VERIFIABLE_TYPE, handleHdrVS, &pkgdata)) { /* Finally import the headers and do whatever required retrofits etc */ if (hdrp) { if (hdrblobImport(sigblob, 0, &sigh, &msg)) goto exit; if (hdrblobImport(blob, 0, &h, &msg)) goto exit; /* Append (and remap) signature tags to the metadata. */ headerMergeLegacySigs(h, sigh); applyRetrofits(h); /* Bump reference count for return. */ *hdrp = headerLink(h); } rc = RPMRC_OK; } /* If there was a "substatus" (NOKEY in practise), return that instead */ if (rc == RPMRC_OK && pkgdata.rc) rc = pkgdata.rc; exit: if (rc && msg) rpmlog(RPMLOG_ERR, "%s: %s\n", Fdescr(fd), msg); hdrblobFree(sigblob); hdrblobFree(blob); headerFree(sigh); headerFree(h); rpmKeyringFree(keyring); rpmvsFree(vs); free(msg); return rc; }
CWE-345
22
static int __videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma) { struct videbuf_vmalloc_memory *mem; struct videobuf_mapping *map; unsigned int first; int retval; unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; if (! (vma->vm_flags & VM_WRITE) || ! (vma->vm_flags & VM_SHARED)) return -EINVAL; /* look for first buffer to map */ for (first = 0; first < VIDEO_MAX_FRAME; first++) { if (NULL == q->bufs[first]) continue; if (V4L2_MEMORY_MMAP != q->bufs[first]->memory) continue; if (q->bufs[first]->boff == offset) break; } if (VIDEO_MAX_FRAME == first) { dprintk(1,"mmap app bug: offset invalid [offset=0x%lx]\n", (vma->vm_pgoff << PAGE_SHIFT)); return -EINVAL; } /* create mapping + update buffer list */ map = q->bufs[first]->map = kmalloc(sizeof(struct videobuf_mapping),GFP_KERNEL); if (NULL == map) return -ENOMEM; map->start = vma->vm_start; map->end = vma->vm_end; map->q = q; q->bufs[first]->baddr = vma->vm_start; vma->vm_ops = &videobuf_vm_ops; vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED; vma->vm_private_data = map; mem=q->bufs[first]->priv; BUG_ON (!mem); MAGIC_CHECK(mem->magic,MAGIC_VMAL_MEM); /* Try to remap memory */ retval=remap_vmalloc_range(vma, mem->vmalloc,0); if (retval<0) { dprintk(1,"mmap: postponing remap_vmalloc_range\n"); mem->vma=kmalloc(sizeof(*vma),GFP_KERNEL); if (!mem->vma) { kfree(map); q->bufs[first]->map=NULL; return -ENOMEM; } memcpy(mem->vma,vma,sizeof(*vma)); } dprintk(1,"mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n", map,q,vma->vm_start,vma->vm_end, (long int) q->bufs[first]->bsize, vma->vm_pgoff,first); videobuf_vm_open(vma); return (0); }
CWE-119
26
mark_trusted_task_thread_func (GTask *task, gpointer source_object, gpointer task_data, GCancellable *cancellable) { MarkTrustedJob *job = task_data; CommonJob *common; common = (CommonJob *) job; nautilus_progress_info_start (job->common.progress); mark_desktop_file_trusted (common, cancellable, job->file, job->interactive); }
CWE-20
0
static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) { hid_info(hdev, "fixing up button/consumer in HID report descriptor\n"); rdesc[30] = 0x0c; } return rdesc; }
CWE-119
26
test_save_copy (const char *origname) { char buf[TESTBUFSIZE]; int ret; snprintf_func (buf, TESTBUFSIZE, "cp -f %s %s", origname, TEST_COPY_FILE); if ((ret = system (buf)) != 0) { return XD3_INTERNAL; } return 0; }
CWE-119
26
static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr) { struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; struct tipc_aead_key *skey = NULL; u16 key_gen = msg_key_gen(hdr); u16 size = msg_data_sz(hdr); u8 *data = msg_data(hdr); spin_lock(&rx->lock); if (unlikely(rx->skey || (key_gen == rx->key_gen && rx->key.keys))) { pr_err("%s: key existed <%p>, gen %d vs %d\n", rx->name, rx->skey, key_gen, rx->key_gen); goto exit; } /* Allocate memory for the key */ skey = kmalloc(size, GFP_ATOMIC); if (unlikely(!skey)) { pr_err("%s: unable to allocate memory for skey\n", rx->name); goto exit; } /* Copy key from msg data */ skey->keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME))); memcpy(skey->alg_name, data, TIPC_AEAD_ALG_NAME); memcpy(skey->key, data + TIPC_AEAD_ALG_NAME + sizeof(__be32), skey->keylen); /* Sanity check */ if (unlikely(size != tipc_aead_key_size(skey))) { kfree(skey); skey = NULL; goto exit; } rx->key_gen = key_gen; rx->skey_mode = msg_key_mode(hdr); rx->skey = skey; rx->nokey = 0; mb(); /* for nokey flag */ exit: spin_unlock(&rx->lock); /* Schedule the key attaching on this crypto */ if (likely(skey && queue_delayed_work(tx->wq, &rx->work, 0))) return true; return false; }
CWE-20
0
static int read_uids_guids(long long *table_start) { int res, i; int bytes = SQUASHFS_ID_BYTES(sBlk.s.no_ids); int indexes = SQUASHFS_ID_BLOCKS(sBlk.s.no_ids); long long id_index_table[indexes]; TRACE("read_uids_guids: no_ids %d\n", sBlk.s.no_ids); id_table = malloc(bytes); if(id_table == NULL) { ERROR("read_uids_guids: failed to allocate id table\n"); return FALSE; } res = read_fs_bytes(fd, sBlk.s.id_table_start, SQUASHFS_ID_BLOCK_BYTES(sBlk.s.no_ids), id_index_table); if(res == FALSE) { ERROR("read_uids_guids: failed to read id index table\n"); return FALSE; } SQUASHFS_INSWAP_ID_BLOCKS(id_index_table, indexes); /* * id_index_table[0] stores the start of the compressed id blocks. * This by definition is also the end of the previous filesystem * table - this may be the exports table if it is present, or the * fragments table if it isn't. */ *table_start = id_index_table[0]; for(i = 0; i < indexes; i++) { int expected = (i + 1) != indexes ? SQUASHFS_METADATA_SIZE : bytes & (SQUASHFS_METADATA_SIZE - 1); res = read_block(fd, id_index_table[i], NULL, expected, ((char *) id_table) + i * SQUASHFS_METADATA_SIZE); if(res == FALSE) { ERROR("read_uids_guids: failed to read id table block" "\n"); return FALSE; } } SQUASHFS_INSWAP_INTS(id_table, sBlk.s.no_ids); return TRUE; }
CWE-20
0
error_t ftpClientParsePwdReply(FtpClientContext *context, char_t *path, size_t maxLen) { size_t length; char_t *p; //Search for the last double quote p = strrchr(context->buffer, '\"'); //Failed to parse the response? if(p == NULL) return ERROR_INVALID_SYNTAX; //Split the string *p = '\0'; //Search for the first double quote p = strchr(context->buffer, '\"'); //Failed to parse the response? if(p == NULL) return ERROR_INVALID_SYNTAX; //Retrieve the length of the working directory length = osStrlen(p + 1); //Limit the number of characters to copy length = MIN(length, maxLen); //Copy the string osStrncpy(path, p + 1, length); //Properly terminate the string with a NULL character path[length] = '\0'; //Successful processing return NO_ERROR; }
CWE-20
0
parseuid(const char *s, uid_t *uid) { struct passwd *pw; const char *errstr; if ((pw = getpwnam(s)) != NULL) { *uid = pw->pw_uid; return 0; } #if !defined(__linux__) && !defined(__NetBSD__) *uid = strtonum(s, 0, UID_MAX, &errstr); #else sscanf(s, "%d", uid); #endif if (errstr) return -1; return 0; }
CWE-863
11
void set_task_cpu(struct task_struct *p, unsigned int new_cpu) { #ifdef CONFIG_SCHED_DEBUG /* * We should never call set_task_cpu() on a blocked task, * ttwu() will sort out the placement. */ WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)); #ifdef CONFIG_LOCKDEP /* * The caller should hold either p->pi_lock or rq->lock, when changing * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. * * sched_move_task() holds both and thus holding either pins the cgroup, * see set_task_rq(). * * Furthermore, all task_rq users should acquire both locks, see * task_rq_lock(). */ WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || lockdep_is_held(&task_rq(p)->lock))); #endif #endif trace_sched_migrate_task(p, new_cpu); if (task_cpu(p) != new_cpu) { p->se.nr_migrations++; perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0); } __set_task_cpu(p, new_cpu); }
CWE-400
2
static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name; size_t copied; struct sk_buff *skb; int er; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ lock_sock(sk); if (sk->sk_state != TCP_ESTABLISHED) { release_sock(sk); return -ENOTCONN; } /* Now we can treat all alike */ if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) { release_sock(sk); return er; } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (er < 0) { skb_free_datagram(sk, skb); release_sock(sk); return er; } if (sax != NULL) { memset(sax, 0, sizeof(*sax)); sax->sax25_family = AF_NETROM; skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call, AX25_ADDR_LEN); } msg->msg_namelen = sizeof(*sax); skb_free_datagram(sk, skb); release_sock(sk); return copied; }
CWE-20
0
const char * util_acl_to_str(const sc_acl_entry_t *e) { static char line[80], buf[20]; unsigned int acl; if (e == NULL) return "N/A"; line[0] = 0; while (e != NULL) { acl = e->method; switch (acl) { case SC_AC_UNKNOWN: return "N/A"; case SC_AC_NEVER: return "NEVR"; case SC_AC_NONE: return "NONE"; case SC_AC_CHV: strcpy(buf, "CHV"); if (e->key_ref != SC_AC_KEY_REF_NONE) sprintf(buf + 3, "%d", e->key_ref); break; case SC_AC_TERM: strcpy(buf, "TERM"); break; case SC_AC_PRO: strcpy(buf, "PROT"); break; case SC_AC_AUT: strcpy(buf, "AUTH"); if (e->key_ref != SC_AC_KEY_REF_NONE) sprintf(buf + 4, "%d", e->key_ref); break; case SC_AC_SEN: strcpy(buf, "Sec.Env. "); if (e->key_ref != SC_AC_KEY_REF_NONE) sprintf(buf + 3, "#%d", e->key_ref); break; case SC_AC_SCB: strcpy(buf, "Sec.ControlByte "); if (e->key_ref != SC_AC_KEY_REF_NONE) sprintf(buf + 3, "Ox%X", e->key_ref); break; case SC_AC_IDA: strcpy(buf, "PKCS#15 AuthID "); if (e->key_ref != SC_AC_KEY_REF_NONE) sprintf(buf + 3, "#%d", e->key_ref); break; default: strcpy(buf, "????"); break; } strcat(line, buf); strcat(line, " "); e = e->next; } line[strlen(line)-1] = 0; /* get rid of trailing space */ return line; }
CWE-119
26
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, int *peeked, int *off, int *err) { struct sk_buff *skb; long timeo; /* * Caller is allowed not to check sk->sk_err before skb_recv_datagram() */ int error = sock_error(sk); if (error) goto no_packet; timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); do { /* Again only user level code calls this function, so nothing * interrupt level will suddenly eat the receive_queue. * * Look at current nfs client by the way... * However, this function was correct in any case. 8) */ unsigned long cpu_flags; struct sk_buff_head *queue = &sk->sk_receive_queue; spin_lock_irqsave(&queue->lock, cpu_flags); skb_queue_walk(queue, skb) { *peeked = skb->peeked; if (flags & MSG_PEEK) { if (*off >= skb->len) { *off -= skb->len; continue; } skb->peeked = 1; atomic_inc(&skb->users); } else __skb_unlink(skb, queue); spin_unlock_irqrestore(&queue->lock, cpu_flags); return skb; } spin_unlock_irqrestore(&queue->lock, cpu_flags); /* User doesn't want to wait */ error = -EAGAIN; if (!timeo) goto no_packet; } while (!wait_for_packet(sk, err, &timeo)); return NULL; no_packet: *err = error; return NULL; }
CWE-20
0
SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, unsigned int, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; struct iovec iov; struct msghdr msg; struct sockaddr_storage address; int err, err2; int fput_needed; if (size > INT_MAX) size = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_iovlen = 1; msg.msg_iov = &iov; iov.iov_len = size; iov.iov_base = ubuf; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = sizeof(address); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = sock_recvmsg(sock, &msg, size, flags); if (err >= 0 && addr != NULL) { err2 = move_addr_to_user(&address, msg.msg_namelen, addr, addr_len); if (err2 < 0) err = err2; } fput_light(sock->file, fput_needed); out: return err; }
CWE-20
0
error_t dm9000SendPacket(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { size_t i; size_t length; uint16_t *p; Dm9000Context *context; //Point to the driver context context = (Dm9000Context *) interface->nicContext; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > ETH_MAX_FRAME_SIZE) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Copy user data netBufferRead(context->txBuffer, buffer, offset, length); //A dummy write is required before accessing FIFO dm9000WriteReg(DM9000_REG_MWCMDX, 0); //Select MWCMD register DM9000_INDEX_REG = DM9000_REG_MWCMD; //Point to the beginning of the buffer p = (uint16_t *) context->txBuffer; //Write data to the FIFO using 16-bit mode for(i = length; i > 1; i -= 2) { DM9000_DATA_REG = *(p++); } //Odd number of bytes? if(i > 0) { DM9000_DATA_REG = *((uint8_t *) p); } //Write the number of bytes to send dm9000WriteReg(DM9000_REG_TXPLL, LSB(length)); dm9000WriteReg(DM9000_REG_TXPLH, MSB(length)); //Clear interrupt flag dm9000WriteReg(DM9000_REG_ISR, ISR_PT); //Start data transfer dm9000WriteReg(DM9000_REG_TCR, TCR_TXREQ); //The packet was successfully written to FIFO context->queuedPackets++; //Successful processing return NO_ERROR; }
CWE-20
0
mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sk_buff *skb; struct sock *sk = sock->sk; struct sockaddr_mISDN *maddr; int copied, err; if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s: len %d, flags %x ch.nr %d, proto %x\n", __func__, (int)len, flags, _pms(sk)->ch.nr, sk->sk_protocol); if (flags & (MSG_OOB)) return -EOPNOTSUPP; if (sk->sk_state == MISDN_CLOSED) return 0; skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); if (!skb) return err; if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) { msg->msg_namelen = sizeof(struct sockaddr_mISDN); maddr = (struct sockaddr_mISDN *)msg->msg_name; maddr->family = AF_ISDN; maddr->dev = _pms(sk)->dev->id; if ((sk->sk_protocol == ISDN_P_LAPD_TE) || (sk->sk_protocol == ISDN_P_LAPD_NT)) { maddr->channel = (mISDN_HEAD_ID(skb) >> 16) & 0xff; maddr->tei = (mISDN_HEAD_ID(skb) >> 8) & 0xff; maddr->sapi = mISDN_HEAD_ID(skb) & 0xff; } else { maddr->channel = _pms(sk)->ch.nr; maddr->sapi = _pms(sk)->ch.addr & 0xFF; maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF; } } else { if (msg->msg_namelen) printk(KERN_WARNING "%s: too small namelen %d\n", __func__, msg->msg_namelen); msg->msg_namelen = 0; } copied = skb->len + MISDN_HEADER_LEN; if (len < copied) { if (flags & MSG_PEEK) atomic_dec(&skb->users); else skb_queue_head(&sk->sk_receive_queue, skb); return -ENOSPC; } memcpy(skb_push(skb, MISDN_HEADER_LEN), mISDN_HEAD_P(skb), MISDN_HEADER_LEN); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); mISDN_sock_cmsg(sk, msg, skb); skb_free_datagram(sk, skb); return err ? : copied; }
CWE-20
0
int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ip_options *opt; opt = inet_sk(sk)->opt; if (opt == NULL || opt->cipso == 0) return -ENOMSG; return cipso_v4_getattr(opt->__data + opt->cipso - sizeof(struct iphdr), secattr); }
CWE-362
18
static inline int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { unsigned char *opt = *option; unsigned char err_offset = 0; u8 opt_len = opt[1]; u8 opt_iter; if (opt_len < 8) { err_offset = 1; goto out; } if (get_unaligned_be32(&opt[2]) == 0) { err_offset = 2; goto out; } for (opt_iter = 6; opt_iter < opt_len;) { if (opt[opt_iter + 1] > (opt_len - opt_iter)) { err_offset = opt_iter + 1; goto out; } opt_iter += opt[opt_iter + 1]; } out: *option = opt + err_offset; return err_offset; }
CWE-400
2
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) { u32 data; void *vapic; if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) return; vapic = kmap_atomic(vcpu->arch.apic->vapic_page); data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)); kunmap_atomic(vapic); apic_set_tpr(vcpu->arch.apic, data & 0xff); }
CWE-20
0
static int atusb_read_reg(struct atusb *atusb, uint8_t reg) { struct usb_device *usb_dev = atusb->usb_dev; int ret; uint8_t value; dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg); ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), ATUSB_REG_READ, ATUSB_REQ_FROM_DEV, 0, reg, &value, 1, 1000); return ret >= 0 ? value : ret; }
CWE-119
26
horizontalDifference8(unsigned char *ip, int n, int stride, unsigned short *wp, uint16 *From8) { register int r1, g1, b1, a1, r2, g2, b2, a2, mask; #undef CLAMP #define CLAMP(v) (From8[(v)]) mask = CODE_MASK; if (n >= stride) { if (stride == 3) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); n -= 3; while (n > 0) { n -= 3; r1 = CLAMP(ip[3]); wp[3] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[4]); wp[4] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[5]); wp[5] = (uint16)((b1-b2) & mask); b2 = b1; wp += 3; ip += 3; } } else if (stride == 4) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); a2 = wp[3] = CLAMP(ip[3]); n -= 4; while (n > 0) { n -= 4; r1 = CLAMP(ip[4]); wp[4] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[5]); wp[5] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[6]); wp[6] = (uint16)((b1-b2) & mask); b2 = b1; a1 = CLAMP(ip[7]); wp[7] = (uint16)((a1-a2) & mask); a2 = a1; wp += 4; ip += 4; } } else { wp += n + stride - 1; /* point to last one */ ip += n + stride - 1; /* point to last one */ n -= stride; while (n > 0) { REPEAT(stride, wp[0] = CLAMP(ip[0]); wp[stride] -= wp[0]; wp[stride] &= mask; wp--; ip--) n -= stride; } REPEAT(stride, wp[0] = CLAMP(ip[0]); wp--; ip--) } } }
CWE-119
26
void dm9000WritePhyReg(uint8_t address, uint16_t data) { //Write PHY register address dm9000WriteReg(DM9000_REG_EPAR, 0x40 | address); //Write register value dm9000WriteReg(DM9000_REG_EPDRL, LSB(data)); dm9000WriteReg(DM9000_REG_EPDRH, MSB(data)); //Start the write operation dm9000WriteReg(DM9000_REG_EPCR, EPCR_EPOS | EPCR_ERPRW); //PHY access is still in progress? while((dm9000ReadReg(DM9000_REG_EPCR) & EPCR_ERRE) != 0) { } //Wait 5us minimum usleep(5); //Clear command register dm9000WriteReg(DM9000_REG_EPCR, EPCR_EPOS); }
CWE-20
0
static int http_read_stream(URLContext *h, uint8_t *buf, int size) { HTTPContext *s = h->priv_data; int err, new_location, read_ret; int64_t seek_ret; if (!s->hd) return AVERROR_EOF; if (s->end_chunked_post && !s->end_header) { err = http_read_header(h, &new_location); if (err < 0) return err; } if (s->chunksize >= 0) { if (!s->chunksize) { char line[32]; do { if ((err = http_get_line(s, line, sizeof(line))) < 0) return err; } while (!*line); /* skip CR LF from last chunk */ s->chunksize = strtoll(line, NULL, 16); av_log(NULL, AV_LOG_TRACE, "Chunked encoding data size: %"PRId64"'\n", s->chunksize); if (!s->chunksize) return 0; } size = FFMIN(size, s->chunksize); } #if CONFIG_ZLIB if (s->compressed) return http_buf_read_compressed(h, buf, size); #endif /* CONFIG_ZLIB */ read_ret = http_buf_read(h, buf, size); if ( (read_ret < 0 && s->reconnect && (!h->is_streamed || s->reconnect_streamed) && s->filesize > 0 && s->off < s->filesize) || (read_ret == 0 && s->reconnect_at_eof && (!h->is_streamed || s->reconnect_streamed))) { int64_t target = h->is_streamed ? 0 : s->off; if (s->reconnect_delay > s->reconnect_delay_max) return AVERROR(EIO); av_log(h, AV_LOG_INFO, "Will reconnect at %"PRId64" error=%s.\n", s->off, av_err2str(read_ret)); av_usleep(1000U*1000*s->reconnect_delay); s->reconnect_delay = 1 + 2*s->reconnect_delay; seek_ret = http_seek_internal(h, target, SEEK_SET, 1); if (seek_ret != target) { av_log(h, AV_LOG_ERROR, "Failed to reconnect at %"PRId64".\n", target); return read_ret; } read_ret = http_buf_read(h, buf, size); } else s->reconnect_delay = 0; return read_ret; }
CWE-119
26
static u32 __ipv6_select_ident(struct net *net, u32 hashrnd, const struct in6_addr *dst, const struct in6_addr *src) { u32 hash, id; hash = __ipv6_addr_jhash(dst, hashrnd); hash = __ipv6_addr_jhash(src, hash); hash ^= net_hash_mix(net); /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve, * set the hight order instead thus minimizing possible future * collisions. */ id = ip_idents_reserve(hash, 1); if (unlikely(!id)) id = 1 << 31; return id; }
CWE-326
9
static void mspack_fmap_free(void *mem) { free(mem); }
CWE-119
26
static int newque(struct ipc_namespace *ns, struct ipc_params *params) { struct msg_queue *msq; int id, retval; key_t key = params->key; int msgflg = params->flg; msq = ipc_rcu_alloc(sizeof(*msq)); if (!msq) return -ENOMEM; msq->q_perm.mode = msgflg & S_IRWXUGO; msq->q_perm.key = key; msq->q_perm.security = NULL; retval = security_msg_queue_alloc(msq); if (retval) { ipc_rcu_putref(msq, ipc_rcu_free); return retval; } /* ipc_addid() locks msq upon success. */ id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); if (id < 0) { ipc_rcu_putref(msq, msg_rcu_free); return id; } msq->q_stime = msq->q_rtime = 0; msq->q_ctime = get_seconds(); msq->q_cbytes = msq->q_qnum = 0; msq->q_qbytes = ns->msg_ctlmnb; msq->q_lspid = msq->q_lrpid = 0; INIT_LIST_HEAD(&msq->q_messages); INIT_LIST_HEAD(&msq->q_receivers); INIT_LIST_HEAD(&msq->q_senders); ipc_unlock_object(&msq->q_perm); rcu_read_unlock(); return msq->q_perm.id; }
CWE-362
18
horAcc32(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; uint32* wp = (uint32*) cp0; tmsize_t wc = cc / 4; assert((cc%(4*stride))==0); if (wc > stride) { wc -= stride; do { REPEAT4(stride, wp[stride] += wp[0]; wp++) wc -= stride; } while (wc > 0); } }
CWE-119
26
static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct sockaddr_at *sat = (struct sockaddr_at *)msg->msg_name; struct ddpehdr *ddp; int copied = 0; int offset = 0; int err = 0; struct sk_buff *skb; skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); lock_sock(sk); if (!skb) goto out; /* FIXME: use skb->cb to be able to use shared skbs */ ddp = ddp_hdr(skb); copied = ntohs(ddp->deh_len_hops) & 1023; if (sk->sk_type != SOCK_RAW) { offset = sizeof(*ddp); copied -= offset; } if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied); if (!err) { if (sat) { sat->sat_family = AF_APPLETALK; sat->sat_port = ddp->deh_sport; sat->sat_addr.s_node = ddp->deh_snode; sat->sat_addr.s_net = ddp->deh_snet; } msg->msg_namelen = sizeof(*sat); } skb_free_datagram(sk, skb); /* Free the datagram. */ out: release_sock(sk); return err ? : copied; }
CWE-20
0
int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, struct sockaddr_storage *kern_address, int mode) { int tot_len; if (kern_msg->msg_namelen) { if (mode == VERIFY_READ) { int err = move_addr_to_kernel(kern_msg->msg_name, kern_msg->msg_namelen, kern_address); if (err < 0) return err; } kern_msg->msg_name = kern_address; } else kern_msg->msg_name = NULL; tot_len = iov_from_user_compat_to_kern(kern_iov, (struct compat_iovec __user *)kern_msg->msg_iov, kern_msg->msg_iovlen); if (tot_len >= 0) kern_msg->msg_iov = kern_iov; return tot_len; }
CWE-20
0
static int jpc_ppm_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out) { jpc_ppm_t *ppm = &ms->parms.ppm; /* Eliminate compiler warning about unused variables. */ cstate = 0; if (JAS_CAST(uint, jas_stream_write(out, (char *) ppm->data, ppm->len)) != ppm->len) { return -1; } return 0; }
CWE-20
0
int read_file(struct sc_card *card, char *str_path, unsigned char **data, size_t *data_len) { struct sc_path path; struct sc_file *file; unsigned char *p; int ok = 0; int r; size_t len; sc_format_path(str_path, &path); if (SC_SUCCESS != sc_select_file(card, &path, &file)) { goto err; } len = file ? file->size : 4096; p = realloc(*data, len); if (!p) { goto err; } *data = p; *data_len = len; r = sc_read_binary(card, 0, p, len, 0); if (r < 0) goto err; *data_len = r; ok = 1; err: sc_file_free(file); return ok; }
CWE-119
26
static ssize_t environ_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { char *page; unsigned long src = *ppos; int ret = 0; struct mm_struct *mm = file->private_data; unsigned long env_start, env_end; if (!mm) return 0; page = (char *)__get_free_page(GFP_TEMPORARY); if (!page) return -ENOMEM; ret = 0; if (!atomic_inc_not_zero(&mm->mm_users)) goto free; down_read(&mm->mmap_sem); env_start = mm->env_start; env_end = mm->env_end; up_read(&mm->mmap_sem); while (count > 0) { size_t this_len, max_len; int retval; if (src >= (env_end - env_start)) break; this_len = env_end - (env_start + src); max_len = min_t(size_t, PAGE_SIZE, count); this_len = min(max_len, this_len); retval = access_remote_vm(mm, (env_start + src), page, this_len, 0); if (retval <= 0) { ret = retval; break; } if (copy_to_user(buf, page, retval)) { ret = -EFAULT; break; } ret += retval; src += retval; buf += retval; count -= retval; } *ppos = src; mmput(mm); free: free_page((unsigned long) page); return ret; }
CWE-362
18
long keyctl_set_reqkey_keyring(int reqkey_defl) { struct cred *new; int ret, old_setting; old_setting = current_cred_xxx(jit_keyring); if (reqkey_defl == KEY_REQKEY_DEFL_NO_CHANGE) return old_setting; new = prepare_creds(); if (!new) return -ENOMEM; switch (reqkey_defl) { case KEY_REQKEY_DEFL_THREAD_KEYRING: ret = install_thread_keyring_to_cred(new); if (ret < 0) goto error; goto set; case KEY_REQKEY_DEFL_PROCESS_KEYRING: ret = install_process_keyring_to_cred(new); if (ret < 0) { if (ret != -EEXIST) goto error; ret = 0; } goto set; case KEY_REQKEY_DEFL_DEFAULT: case KEY_REQKEY_DEFL_SESSION_KEYRING: case KEY_REQKEY_DEFL_USER_KEYRING: case KEY_REQKEY_DEFL_USER_SESSION_KEYRING: case KEY_REQKEY_DEFL_REQUESTOR_KEYRING: goto set; case KEY_REQKEY_DEFL_NO_CHANGE: case KEY_REQKEY_DEFL_GROUP_KEYRING: default: ret = -EINVAL; goto error; } set: new->jit_keyring = reqkey_defl; commit_creds(new); return old_setting; error: abort_creds(new); return ret; }
CWE-404
30
int ib_update_cm_av(struct ib_cm_id *id, const u8 *smac, const u8 *alt_smac) { struct cm_id_private *cm_id_priv; cm_id_priv = container_of(id, struct cm_id_private, id); if (smac != NULL) memcpy(cm_id_priv->av.smac, smac, sizeof(cm_id_priv->av.smac)); if (alt_smac != NULL) memcpy(cm_id_priv->alt_av.smac, alt_smac, sizeof(cm_id_priv->alt_av.smac)); return 0; }
CWE-20
0
SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, unsigned int, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; struct iovec iov; struct msghdr msg; struct sockaddr_storage address; int err, err2; int fput_needed; if (size > INT_MAX) size = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_iovlen = 1; msg.msg_iov = &iov; iov.iov_len = size; iov.iov_base = ubuf; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = sizeof(address); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = sock_recvmsg(sock, &msg, size, flags); if (err >= 0 && addr != NULL) { err2 = move_addr_to_user(&address, msg.msg_namelen, addr, addr_len); if (err2 < 0) err = err2; } fput_light(sock->file, fput_needed); out: return err; }
CWE-20
0
horDiff8(TIFF* tif, uint8* cp0, tmsize_t cc) { TIFFPredictorState* sp = PredictorState(tif); tmsize_t stride = sp->stride; unsigned char* cp = (unsigned char*) cp0; assert((cc%stride)==0); if (cc > stride) { cc -= stride; /* * Pipeline the most common cases. */ if (stride == 3) { unsigned int r1, g1, b1; unsigned int r2 = cp[0]; unsigned int g2 = cp[1]; unsigned int b2 = cp[2]; do { r1 = cp[3]; cp[3] = (unsigned char)((r1-r2)&0xff); r2 = r1; g1 = cp[4]; cp[4] = (unsigned char)((g1-g2)&0xff); g2 = g1; b1 = cp[5]; cp[5] = (unsigned char)((b1-b2)&0xff); b2 = b1; cp += 3; } while ((cc -= 3) > 0); } else if (stride == 4) { unsigned int r1, g1, b1, a1; unsigned int r2 = cp[0]; unsigned int g2 = cp[1]; unsigned int b2 = cp[2]; unsigned int a2 = cp[3]; do { r1 = cp[4]; cp[4] = (unsigned char)((r1-r2)&0xff); r2 = r1; g1 = cp[5]; cp[5] = (unsigned char)((g1-g2)&0xff); g2 = g1; b1 = cp[6]; cp[6] = (unsigned char)((b1-b2)&0xff); b2 = b1; a1 = cp[7]; cp[7] = (unsigned char)((a1-a2)&0xff); a2 = a1; cp += 4; } while ((cc -= 4) > 0); } else { cp += cc - 1; do { REPEAT4(stride, cp[stride] = (unsigned char)((cp[stride] - cp[0])&0xff); cp--) } while ((cc -= stride) > 0); } } }
CWE-119
26
static void bt_tags_for_each(struct blk_mq_tags *tags, struct blk_mq_bitmap_tags *bt, unsigned int off, busy_tag_iter_fn *fn, void *data, bool reserved) { struct request *rq; int bit, i; if (!tags->rqs) return; for (i = 0; i < bt->map_nr; i++) { struct blk_align_bitmap *bm = &bt->map[i]; for (bit = find_first_bit(&bm->word, bm->depth); bit < bm->depth; bit = find_next_bit(&bm->word, bm->depth, bit + 1)) { rq = blk_mq_tag_to_rq(tags, off + bit); fn(rq, data, reserved); } off += (1 << bt->bits_per_word); } }
CWE-362
18
static RList* entries(RBinFile* bf) { RList* ret = NULL; RBinAddr* addr = NULL; psxexe_header psxheader; if (!(ret = r_list_new ())) { return NULL; } if (!(addr = R_NEW0 (RBinAddr))) { r_list_free (ret); return NULL; } if (r_buf_fread_at (bf->buf, 0, (ut8*)&psxheader, "8c17i", 1) < sizeof (psxexe_header)) { eprintf ("PSXEXE Header truncated\n"); r_list_free (ret); free (addr); return NULL; } addr->paddr = (psxheader.pc0 - psxheader.t_addr) + PSXEXE_TEXTSECTION_OFFSET; addr->vaddr = psxheader.pc0; r_list_append (ret, addr); return ret; }
CWE-400
2
struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask) { struct crypto_alg *alg; if (!name) return ERR_PTR(-ENOENT); mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); type &= mask; alg = crypto_alg_lookup(name, type, mask); if (!alg) { request_module("%s", name); if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask & CRYPTO_ALG_NEED_FALLBACK)) request_module("%s-all", name); alg = crypto_alg_lookup(name, type, mask); } if (alg) return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg; return crypto_larval_add(name, type, mask); }
CWE-269
6
check_entry_size_and_hooks(struct ipt_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } err = check_entry(e); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_err("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; }
CWE-119
26
static int bcm_release(struct socket *sock) { struct sock *sk = sock->sk; struct net *net; struct bcm_sock *bo; struct bcm_op *op, *next; if (!sk) return 0; net = sock_net(sk); bo = bcm_sk(sk); /* remove bcm_ops, timer, rx_unregister(), etc. */ spin_lock(&bcm_notifier_lock); while (bcm_busy_notifier == bo) { spin_unlock(&bcm_notifier_lock); schedule_timeout_uninterruptible(1); spin_lock(&bcm_notifier_lock); } list_del(&bo->notifier); spin_unlock(&bcm_notifier_lock); lock_sock(sk); list_for_each_entry_safe(op, next, &bo->tx_ops, list) bcm_remove_op(op); list_for_each_entry_safe(op, next, &bo->rx_ops, list) { /* * Don't care if we're bound or not (due to netdev problems) * can_rx_unregister() is always a save thing to do here. */ if (op->ifindex) { /* * Only remove subscriptions that had not * been removed due to NETDEV_UNREGISTER * in bcm_notifier() */ if (op->rx_reg_dev) { struct net_device *dev; dev = dev_get_by_index(net, op->ifindex); if (dev) { bcm_rx_unreg(dev, op); dev_put(dev); } } } else can_rx_unregister(net, NULL, op->can_id, REGMASK(op->can_id), bcm_rx_handler, op); bcm_remove_op(op); } #if IS_ENABLED(CONFIG_PROC_FS) /* remove procfs entry */ if (net->can.bcmproc_dir && bo->bcm_proc_read) remove_proc_entry(bo->procname, net->can.bcmproc_dir); #endif /* CONFIG_PROC_FS */ /* remove device reference */ if (bo->bound) { bo->bound = 0; bo->ifindex = 0; } sock_orphan(sk); sock->sk = NULL; release_sock(sk); sock_put(sk); return 0; }
CWE-362
18
static int save_dev(blkid_dev dev, FILE *file) { struct list_head *p; if (!dev || dev->bid_name[0] != '/') return 0; DBG(SAVE, ul_debug("device %s, type %s", dev->bid_name, dev->bid_type ? dev->bid_type : "(null)")); fprintf(file, "<device DEVNO=\"0x%04lx\" TIME=\"%ld.%ld\"", (unsigned long) dev->bid_devno, (long) dev->bid_time, (long) dev->bid_utime); if (dev->bid_pri) fprintf(file, " PRI=\"%d\"", dev->bid_pri); list_for_each(p, &dev->bid_tags) { blkid_tag tag = list_entry(p, struct blkid_struct_tag, bit_tags); fprintf(file, " %s=\"%s\"", tag->bit_name,tag->bit_val); } fprintf(file, ">%s</device>\n", dev->bid_name); return 0; }
CWE-77
14
static void server_real_connect(SERVER_REC *server, IPADDR *ip, const char *unix_socket) { GIOChannel *handle; IPADDR *own_ip = NULL; const char *errmsg; char *errmsg2; char ipaddr[MAX_IP_LEN]; int port; g_return_if_fail(ip != NULL || unix_socket != NULL); signal_emit("server connecting", 2, server, ip); if (server->connrec->no_connect) return; if (ip != NULL) { own_ip = ip == NULL ? NULL : (IPADDR_IS_V6(ip) ? server->connrec->own_ip6 : server->connrec->own_ip4); port = server->connrec->proxy != NULL ? server->connrec->proxy_port : server->connrec->port; handle = server->connrec->use_ssl ? net_connect_ip_ssl(ip, port, own_ip, server->connrec->ssl_cert, server->connrec->ssl_pkey, server->connrec->ssl_cafile, server->connrec->ssl_capath, server->connrec->ssl_verify) : net_connect_ip(ip, port, own_ip); } else { handle = net_connect_unix(unix_socket); } if (handle == NULL) { /* failed */ errmsg = g_strerror(errno); errmsg2 = NULL; if (errno == EADDRNOTAVAIL) { if (own_ip != NULL) { /* show the IP which is causing the error */ net_ip2host(own_ip, ipaddr); errmsg2 = g_strconcat(errmsg, ": ", ipaddr, NULL); } server->no_reconnect = TRUE; } if (server->connrec->use_ssl && errno == ENOSYS) server->no_reconnect = TRUE; server->connection_lost = TRUE; server_connect_failed(server, errmsg2 ? errmsg2 : errmsg); g_free(errmsg2); } else { server->handle = net_sendbuffer_create(handle, 0); #ifdef HAVE_OPENSSL if (server->connrec->use_ssl) server_connect_callback_init_ssl(server, handle); else #endif server->connect_tag = g_input_add(handle, G_INPUT_WRITE | G_INPUT_READ, (GInputFunction) server_connect_callback_init, server); } }
CWE-20
0
COMPAT_SYSCALL_DEFINE5(waitid, int, which, compat_pid_t, pid, struct compat_siginfo __user *, infop, int, options, struct compat_rusage __user *, uru) { struct rusage ru; struct waitid_info info = {.status = 0}; long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL); int signo = 0; if (err > 0) { signo = SIGCHLD; err = 0; } if (!err && uru) { /* kernel_waitid() overwrites everything in ru */ if (COMPAT_USE_64BIT_TIME) err = copy_to_user(uru, &ru, sizeof(ru)); else err = put_compat_rusage(&ru, uru); if (err) return -EFAULT; } if (!infop) return err; user_access_begin(); unsafe_put_user(signo, &infop->si_signo, Efault); unsafe_put_user(0, &infop->si_errno, Efault); unsafe_put_user(info.cause, &infop->si_code, Efault); unsafe_put_user(info.pid, &infop->si_pid, Efault); unsafe_put_user(info.uid, &infop->si_uid, Efault); unsafe_put_user(info.status, &infop->si_status, Efault); user_access_end(); return err; Efault: user_access_end(); return -EFAULT; }
CWE-200
10
header_put_le_3byte (SF_PRIVATE *psf, int x) { if (psf->headindex < SIGNED_SIZEOF (psf->header) - 3) { psf->header [psf->headindex++] = x ; psf->header [psf->headindex++] = (x >> 8) ; psf->header [psf->headindex++] = (x >> 16) ; } ; } /* header_put_le_3byte */
CWE-119
26
int rm_rf_child(int fd, const char *name, RemoveFlags flags) { /* Removes one specific child of the specified directory */ if (fd < 0) return -EBADF; if (!filename_is_valid(name)) return -EINVAL; if ((flags & (REMOVE_ROOT|REMOVE_MISSING_OK)) != 0) /* Doesn't really make sense here, we are not supposed to remove 'fd' anyway */ return -EINVAL; if (FLAGS_SET(flags, REMOVE_ONLY_DIRECTORIES|REMOVE_SUBVOLUME)) return -EINVAL; return rm_rf_children_inner(fd, name, -1, flags, NULL); }
CWE-674
28
static int raw_cmd_copyout(int cmd, void __user *param, struct floppy_raw_cmd *ptr) { int ret; while (ptr) { ret = copy_to_user(param, ptr, sizeof(*ptr)); if (ret) return -EFAULT; param += sizeof(struct floppy_raw_cmd); if ((ptr->flags & FD_RAW_READ) && ptr->buffer_length) { if (ptr->length >= 0 && ptr->length <= ptr->buffer_length) { long length = ptr->buffer_length - ptr->length; ret = fd_copyout(ptr->data, ptr->kernel_data, length); if (ret) return ret; } } ptr = ptr->next; } return 0; }
CWE-200
10
static void update_read_synchronize(rdpUpdate* update, wStream* s) { WINPR_UNUSED(update); Stream_Seek_UINT16(s); /* pad2Octets (2 bytes) */ /** * The Synchronize Update is an artifact from the * T.128 protocol and should be ignored. */ }
CWE-119
26
static unsigned char *oidc_cache_hash_passphrase(request_rec *r, const char *passphrase) { unsigned char *key = NULL; unsigned int key_len = 0; oidc_jose_error_t err; if (oidc_jose_hash_bytes(r->pool, OIDC_JOSE_ALG_SHA256, (const unsigned char *) passphrase, strlen(passphrase), &key, &key_len, &err) == FALSE) { oidc_error(r, "oidc_jose_hash_bytes returned an error: %s", err.text); return NULL; } return key; }
CWE-330
12
static inline bool is_flush_request(struct request *rq, struct blk_flush_queue *fq, unsigned int tag) { return ((rq->cmd_flags & REQ_FLUSH_SEQ) && fq->flush_rq->tag == tag); }
CWE-362
18
char *url_decode_r(char *to, char *url, size_t size) { char *s = url, // source *d = to, // destination *e = &to[size - 1]; // destination end while(*s && d < e) { if(unlikely(*s == '%')) { if(likely(s[1] && s[2])) { *d++ = from_hex(s[1]) << 4 | from_hex(s[2]); s += 2; } } else if(unlikely(*s == '+')) *d++ = ' '; else *d++ = *s; s++; } *d = '\0'; return to; }
CWE-200
10
get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e, const char *hookname, const char **chainname, const char **comment, unsigned int *rulenum) { const struct xt_standard_target *t = (void *)ip6t_get_target_c(s); if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) { /* Head of user chain: ERROR target with chainname */ *chainname = t->target.data; (*rulenum) = 0; } else if (s == e) { (*rulenum)++; if (s->target_offset == sizeof(struct ip6t_entry) && strcmp(t->target.u.kernel.target->name, XT_STANDARD_TARGET) == 0 && t->verdict < 0 && unconditional(&s->ipv6)) { /* Tail of chains: STANDARD target (return/policy) */ *comment = *chainname == hookname ? comments[NF_IP6_TRACE_COMMENT_POLICY] : comments[NF_IP6_TRACE_COMMENT_RETURN]; } return 1; } else (*rulenum)++; return 0; }
CWE-119
26
static int pad_basic(bn_t m, int *p_len, int m_len, int k_len, int operation) { uint8_t pad = 0; int result = RLC_OK; bn_t t; RLC_TRY { bn_null(t); bn_new(t); switch (operation) { case RSA_ENC: case RSA_SIG: case RSA_SIG_HASH: /* EB = 00 | FF | D. */ bn_zero(m); bn_lsh(m, m, 8); bn_add_dig(m, m, RSA_PAD); /* Make room for the real message. */ bn_lsh(m, m, m_len * 8); break; case RSA_DEC: case RSA_VER: case RSA_VER_HASH: /* EB = 00 | FF | D. */ m_len = k_len - 1; bn_rsh(t, m, 8 * m_len); if (!bn_is_zero(t)) { result = RLC_ERR; } *p_len = 1; do { (*p_len)++; m_len--; bn_rsh(t, m, 8 * m_len); pad = (uint8_t)t->dp[0]; } while (pad == 0 && m_len > 0); if (pad != RSA_PAD) { result = RLC_ERR; } bn_mod_2b(m, m, (k_len - *p_len) * 8); break; } } RLC_CATCH_ANY { result = RLC_ERR; } RLC_FINALLY { bn_free(t); } return result; }
CWE-327
3
static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac, struct ceph_authorizer *a, size_t len) { struct ceph_x_authorizer *au = (void *)a; struct ceph_x_ticket_handler *th; int ret = 0; struct ceph_x_authorize_reply reply; void *p = au->reply_buf; void *end = p + sizeof(au->reply_buf); th = get_ticket_handler(ac, au->service); if (IS_ERR(th)) return PTR_ERR(th); ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply)); if (ret < 0) return ret; if (ret != sizeof(reply)) return -EPERM; if (au->nonce + 1 != le64_to_cpu(reply.nonce_plus_one)) ret = -EPERM; else ret = 0; dout("verify_authorizer_reply nonce %llx got %llx ret %d\n", au->nonce, le64_to_cpu(reply.nonce_plus_one), ret); return ret; }
CWE-119
26
asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, struct compat_timespec __user *timeout) { int datagrams; struct timespec ktspec; if (flags & MSG_CMSG_COMPAT) return -EINVAL; if (COMPAT_USE_64BIT_TIME) return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, flags | MSG_CMSG_COMPAT, (struct timespec *) timeout); if (timeout == NULL) return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, flags | MSG_CMSG_COMPAT, NULL); if (get_compat_timespec(&ktspec, timeout)) return -EFAULT; datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, flags | MSG_CMSG_COMPAT, &ktspec); if (datagrams > 0 && put_compat_timespec(&ktspec, timeout)) datagrams = -EFAULT; return datagrams; }
CWE-20
0
perf_event_read_event(struct perf_event *event, struct task_struct *task) { struct perf_output_handle handle; struct perf_sample_data sample; struct perf_read_event read_event = { .header = { .type = PERF_RECORD_READ, .misc = 0, .size = sizeof(read_event) + event->read_size, }, .pid = perf_event_pid(event, task), .tid = perf_event_tid(event, task), }; int ret; perf_event_header__init_id(&read_event.header, &sample, event); ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0); if (ret) return; perf_output_put(&handle, read_event); perf_output_read(&handle, event); perf_event__output_id_sample(event, &handle, &sample); perf_output_end(&handle); }
CWE-400
2
static void rds_tcp_kill_sock(struct net *net) { struct rds_tcp_connection *tc, *_tc; LIST_HEAD(tmp_list); struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); struct socket *lsock = rtn->rds_tcp_listen_sock; rtn->rds_tcp_listen_sock = NULL; rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w); spin_lock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); if (net != c_net || !tc->t_sock) continue; if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) { list_move_tail(&tc->t_tcp_node, &tmp_list); } else { list_del(&tc->t_tcp_node); tc->t_tcp_node_detached = true; } } spin_unlock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) rds_conn_destroy(tc->t_cpath->cp_conn); }
CWE-362
18
static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) { struct pci_dev *pdev = vdev->pdev; unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI; int ret; if (!is_irq_none(vdev)) return -EINVAL; vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); if (!vdev->ctx) return -ENOMEM; /* return the number of supported vectors if we can't get all: */ ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag); if (ret < nvec) { if (ret > 0) pci_free_irq_vectors(pdev); kfree(vdev->ctx); return ret; } vdev->num_ctx = nvec; vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX; if (!msix) { /* * Compute the virtual hardware field for max msi vectors - * it is the log base 2 of the number of vectors. */ vdev->msi_qmax = fls(nvec * 2 - 1) - 1; } return 0; }
CWE-119
26
static void do_perf_sw_event(enum perf_type_id type, u32 event_id, u64 nr, int nmi, struct perf_sample_data *data, struct pt_regs *regs) { struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); struct perf_event *event; struct hlist_node *node; struct hlist_head *head; rcu_read_lock(); head = find_swevent_head_rcu(swhash, type, event_id); if (!head) goto end; hlist_for_each_entry_rcu(event, node, head, hlist_entry) { if (perf_swevent_match(event, type, event_id, data, regs)) perf_swevent_event(event, nr, nmi, data, regs); } end: rcu_read_unlock(); }
CWE-400
2
struct crypto_template *crypto_lookup_template(const char *name) { return try_then_request_module(__crypto_lookup_template(name), "%s", name); }
CWE-269
6
static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int error = 0; if (sk->sk_state & PPPOX_BOUND) { error = -EIO; goto end; } skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &error); if (error < 0) goto end; m->msg_namelen = 0; if (skb) { total_len = min_t(size_t, total_len, skb->len); error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len); if (error == 0) { consume_skb(skb); return total_len; } } kfree_skb(skb); end: return error; }
CWE-20
0
static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr; struct net *net = sock_net(sk); int ret; int chk_addr_ret; if (!sock_flag(sk, SOCK_ZAPPED)) return -EINVAL; if (addr_len < sizeof(struct sockaddr_l2tpip)) return -EINVAL; if (addr->l2tp_family != AF_INET) return -EINVAL; ret = -EADDRINUSE; read_lock_bh(&l2tp_ip_lock); if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id)) goto out_in_use; read_unlock_bh(&l2tp_ip_lock); lock_sock(sk); if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip)) goto out; chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr); ret = -EADDRNOTAVAIL; if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL && chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) goto out; if (addr->l2tp_addr.s_addr) inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) inet->inet_saddr = 0; /* Use device */ sk_dst_reset(sk); l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id; write_lock_bh(&l2tp_ip_lock); sk_add_bind_node(sk, &l2tp_ip_bind_table); sk_del_node_init(sk); write_unlock_bh(&l2tp_ip_lock); ret = 0; sock_reset_flag(sk, SOCK_ZAPPED); out: release_sock(sk); return ret; out_in_use: read_unlock_bh(&l2tp_ip_lock); return ret; }
CWE-362
18
int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode) { int size, ct, err; if (m->msg_namelen) { if (mode == VERIFY_READ) { void __user *namep; namep = (void __user __force *) m->msg_name; err = move_addr_to_kernel(namep, m->msg_namelen, address); if (err < 0) return err; } m->msg_name = address; } else { m->msg_name = NULL; } size = m->msg_iovlen * sizeof(struct iovec); if (copy_from_user(iov, (void __user __force *) m->msg_iov, size)) return -EFAULT; m->msg_iov = iov; err = 0; for (ct = 0; ct < m->msg_iovlen; ct++) { size_t len = iov[ct].iov_len; if (len > INT_MAX - err) { len = INT_MAX - err; iov[ct].iov_len = len; } err += len; } return err; }
CWE-20
0
static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name; size_t copied; struct sk_buff *skb; int er; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ lock_sock(sk); if (sk->sk_state != TCP_ESTABLISHED) { release_sock(sk); return -ENOTCONN; } /* Now we can treat all alike */ if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) { release_sock(sk); return er; } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (er < 0) { skb_free_datagram(sk, skb); release_sock(sk); return er; } if (sax != NULL) { memset(sax, 0, sizeof(*sax)); sax->sax25_family = AF_NETROM; skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call, AX25_ADDR_LEN); } msg->msg_namelen = sizeof(*sax); skb_free_datagram(sk, skb); release_sock(sk); return copied; }
CWE-20
0
SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, infop, int, options, struct rusage __user *, ru) { struct rusage r; struct waitid_info info = {.status = 0}; long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL); int signo = 0; if (err > 0) { signo = SIGCHLD; err = 0; } if (!err) { if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) return -EFAULT; } if (!infop) return err; user_access_begin(); unsafe_put_user(signo, &infop->si_signo, Efault); unsafe_put_user(0, &infop->si_errno, Efault); unsafe_put_user(info.cause, &infop->si_code, Efault); unsafe_put_user(info.pid, &infop->si_pid, Efault); unsafe_put_user(info.uid, &infop->si_uid, Efault); unsafe_put_user(info.status, &infop->si_status, Efault); user_access_end(); return err; Efault: user_access_end(); return -EFAULT; }
CWE-200
10
static int ip_options_get_finish(struct net *net, struct ip_options **optp, struct ip_options *opt, int optlen) { while (optlen & 3) opt->__data[optlen++] = IPOPT_END; opt->optlen = optlen; if (optlen && ip_options_compile(net, opt, NULL)) { kfree(opt); return -EINVAL; } kfree(*optp); *optp = opt; return 0; }
CWE-362
18
static void keyring_describe(const struct key *keyring, struct seq_file *m) { if (keyring->description) seq_puts(m, keyring->description); else seq_puts(m, "[anon]"); if (key_is_instantiated(keyring)) { if (keyring->keys.nr_leaves_on_tree != 0) seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree); else seq_puts(m, ": empty"); } }
CWE-20
0
int main(int argc, const char *argv[]) { struct group *grent; const char *cmd; const char *path; int i; struct passwd *pw; grent = getgrnam(ABUILD_GROUP); if (grent == NULL) errx(1, "%s: Group not found", ABUILD_GROUP); char *name = NULL; pw = getpwuid(getuid()); if (pw) name = pw->pw_name; if (!is_in_group(grent->gr_gid)) { errx(1, "User %s is not a member of group %s\n", name ? name : "(unknown)", ABUILD_GROUP); } if (name == NULL) warnx("Could not find username for uid %d\n", getuid()); setenv("USER", name ?: "", 1); cmd = strrchr(argv[0], '/'); if (cmd) cmd++; else cmd = argv[0]; cmd = strchr(cmd, '-'); if (cmd == NULL) errx(1, "Calling command has no '-'"); cmd++; path = get_command_path(cmd); if (path == NULL) errx(1, "%s: Not a valid subcommand", cmd); /* we dont allow --allow-untrusted option */ for (i = 1; i < argc; i++) if (strcmp(argv[i], "--allow-untrusted") == 0) errx(1, "%s: not allowed option", "--allow-untrusted"); argv[0] = path; /* set our uid to root so bbsuid --install works */ setuid(0); /* set our gid to root so apk commit hooks run with the same gid as for "sudo apk add ..." */ setgid(0); execv(path, (char * const*)argv); perror(path); return 1; }
CWE-862
8
static void scsi_free_request(SCSIRequest *req) { SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); qemu_vfree(r->iov.iov_base); }
CWE-119
26
fpAcc(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; uint32 bps = tif->tif_dir.td_bitspersample / 8; tmsize_t wc = cc / bps; tmsize_t count = cc; uint8 *cp = (uint8 *) cp0; uint8 *tmp = (uint8 *)_TIFFmalloc(cc); if(cc%(bps*stride)!=0) { TIFFErrorExt(tif->tif_clientdata, "fpAcc", "%s", "cc%(bps*stride))!=0"); return 0; } if (!tmp) return 0; while (count > stride) { REPEAT4(stride, cp[stride] = (unsigned char) ((cp[stride] + cp[0]) & 0xff); cp++) count -= stride; } _TIFFmemcpy(tmp, cp0, cc); cp = (uint8 *) cp0; for (count = 0; count < wc; count++) { uint32 byte; for (byte = 0; byte < bps; byte++) { #if WORDS_BIGENDIAN cp[bps * count + byte] = tmp[byte * wc + count]; #else cp[bps * count + byte] = tmp[(bps - byte - 1) * wc + count]; #endif } } _TIFFfree(tmp); return 1; }
CWE-119
26
static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); size_t copied = 0; int err = -EOPNOTSUPP; struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; if (flags & MSG_OOB) goto out; if (addr_len) *addr_len = sizeof(*sin); if (flags & MSG_ERRQUEUE) { err = ip_recv_error(sk, msg, len); goto out; } skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: if (err) return err; return copied; }
CWE-200
10
int main(int argc, char **argv) { test_cmp_parameters inParam; FILE *fbase=NULL, *ftest=NULL; int same = 0; char lbase[256]; char strbase[256]; char ltest[256]; char strtest[256]; if( parse_cmdline_cmp(argc, argv, &inParam) == 1 ) { compare_dump_files_help_display(); goto cleanup; } /* Display Parameters*/ printf("******Parameters********* \n"); printf(" base_filename = %s\n" " test_filename = %s\n", inParam.base_filename, inParam.test_filename); printf("************************* \n"); /* open base file */ printf("Try to open: %s for reading ... ", inParam.base_filename); if((fbase = fopen(inParam.base_filename, "rb"))==NULL) { goto cleanup; } printf("Ok.\n"); /* open test file */ printf("Try to open: %s for reading ... ", inParam.test_filename); if((ftest = fopen(inParam.test_filename, "rb"))==NULL) { goto cleanup; } printf("Ok.\n"); while (fgets(lbase, sizeof(lbase), fbase) && fgets(ltest,sizeof(ltest),ftest)) { int nbase = sscanf(lbase, "%255[^\r\n]", strbase); int ntest = sscanf(ltest, "%255[^\r\n]", strtest); assert( nbase != 255 && ntest != 255 ); if( nbase != 1 || ntest != 1 ) { fprintf(stderr, "could not parse line from files\n" ); goto cleanup; } if( strcmp( strbase, strtest ) != 0 ) { fprintf(stderr,"<%s> vs. <%s>\n", strbase, strtest); goto cleanup; } } same = 1; printf("\n***** TEST SUCCEED: Files are the same. *****\n"); cleanup: /*Close File*/ if(fbase) fclose(fbase); if(ftest) fclose(ftest); /* Free memory*/ free(inParam.base_filename); free(inParam.test_filename); return same ? EXIT_SUCCESS : EXIT_FAILURE; }
CWE-119
26
static void put_ucounts(struct ucounts *ucounts) { unsigned long flags; if (atomic_dec_and_test(&ucounts->count)) { spin_lock_irqsave(&ucounts_lock, flags); hlist_del_init(&ucounts->node); spin_unlock_irqrestore(&ucounts_lock, flags); kfree(ucounts); } }
CWE-362
18
static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, unsigned int *timeouts) { return true; }
CWE-20
0
static void dtls1_clear_queues(SSL *s) { pitem *item = NULL; hm_fragment *frag = NULL; DTLS1_RECORD_DATA *rdata; while( (item = pqueue_pop(s->d1->unprocessed_rcds.q)) != NULL) { rdata = (DTLS1_RECORD_DATA *) item->data; if (rdata->rbuf.buf) { OPENSSL_free(rdata->rbuf.buf); } OPENSSL_free(item->data); pitem_free(item); } while( (item = pqueue_pop(s->d1->processed_rcds.q)) != NULL) { rdata = (DTLS1_RECORD_DATA *) item->data; if (rdata->rbuf.buf) { OPENSSL_free(rdata->rbuf.buf); } OPENSSL_free(item->data); pitem_free(item); } while( (item = pqueue_pop(s->d1->buffered_messages)) != NULL) { frag = (hm_fragment *)item->data; OPENSSL_free(frag->fragment); OPENSSL_free(frag); pitem_free(item); } while ( (item = pqueue_pop(s->d1->sent_messages)) != NULL) { frag = (hm_fragment *)item->data; OPENSSL_free(frag->fragment); OPENSSL_free(frag); pitem_free(item); } while ( (item = pqueue_pop(s->d1->buffered_app_data.q)) != NULL) { frag = (hm_fragment *)item->data; OPENSSL_free(frag->fragment); OPENSSL_free(frag); pitem_free(item); } }
CWE-119
26
static uint32_t scsi_init_iovec(SCSIDiskReq *r) { r->iov.iov_len = MIN(r->sector_count * 512, SCSI_DMA_BUF_SIZE); qemu_iovec_init_external(&r->qiov, &r->iov, 1); return r->qiov.size / 512; }
CWE-119
26
bool_t ksz8851IrqHandler(NetInterface *interface) { bool_t flag; size_t n; uint16_t ier; uint16_t isr; //This flag will be set if a higher priority task must be woken flag = FALSE; //Save IER register value ier = ksz8851ReadReg(interface, KSZ8851_REG_IER); //Disable interrupts to release the interrupt line ksz8851WriteReg(interface, KSZ8851_REG_IER, 0); //Read interrupt status register isr = ksz8851ReadReg(interface, KSZ8851_REG_ISR); //Link status change? if((isr & ISR_LCIS) != 0) { //Disable LCIE interrupt ier &= ~IER_LCIE; //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Packet transmission complete? if((isr & ISR_TXIS) != 0) { //Clear interrupt flag ksz8851WriteReg(interface, KSZ8851_REG_ISR, ISR_TXIS); //Get the amount of free memory available in the TX FIFO n = ksz8851ReadReg(interface, KSZ8851_REG_TXMIR) & TXMIR_TXMA_MASK; //Check whether the TX FIFO is available for writing if(n >= (ETH_MAX_FRAME_SIZE + 8)) { //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&interface->nicTxEvent); } } //Packet received? if((isr & ISR_RXIS) != 0) { //Disable RXIE interrupt ier &= ~IER_RXIE; //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Re-enable interrupts once the interrupt has been serviced ksz8851WriteReg(interface, KSZ8851_REG_IER, ier); //A higher priority task must be woken? return flag; }
CWE-20
0
videobuf_vm_close(struct vm_area_struct *vma) { struct videobuf_mapping *map = vma->vm_private_data; struct videobuf_queue *q = map->q; int i; dprintk(2,"vm_close %p [count=%d,vma=%08lx-%08lx]\n",map, map->count,vma->vm_start,vma->vm_end); map->count--; if (0 == map->count) { dprintk(1,"munmap %p q=%p\n",map,q); mutex_lock(&q->lock); for (i = 0; i < VIDEO_MAX_FRAME; i++) { if (NULL == q->bufs[i]) continue; if (q->bufs[i]->map != map) continue; q->ops->buf_release(q,q->bufs[i]); q->bufs[i]->map = NULL; q->bufs[i]->baddr = 0; } mutex_unlock(&q->lock); kfree(map); } return; }
CWE-119
26
test_compare_files (const char* tgt, const char *rec) { FILE *orig, *recons; static uint8_t obuf[TESTBUFSIZE], rbuf[TESTBUFSIZE]; xoff_t offset = 0; size_t i; size_t oc, rc; xoff_t diffs = 0; if ((orig = fopen (tgt, "r")) == NULL) { XPR(NT "open %s failed\n", tgt); return get_errno (); } if ((recons = fopen (rec, "r")) == NULL) { XPR(NT "open %s failed\n", rec); return get_errno (); } for (;;) { oc = fread (obuf, 1, TESTBUFSIZE, orig); rc = fread (rbuf, 1, TESTBUFSIZE, recons); if (oc != rc) { return XD3_INTERNAL; } if (oc == 0) { break; } for (i = 0; i < oc; i += 1) { if (obuf[i] != rbuf[i]) { XPR(NT "byte %u (read %u @ %"Q"u) %d != %d\n", (int)i, (int)oc, offset, obuf[i], rbuf[i]); diffs++; return XD3_INTERNAL; } } offset += oc; } fclose (orig); fclose (recons); if (diffs != 0) { return XD3_INTERNAL; } return 0; }
CWE-119
26
dophn_core(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int *flags) { Elf32_Phdr ph32; Elf64_Phdr ph64; size_t offset, len; unsigned char nbuf[BUFSIZ]; ssize_t bufsize; if (size != xph_sizeof) { if (file_printf(ms, ", corrupted program header size") == -1) return -1; return 0; } /* * Loop through all the program headers. */ for ( ; num; num--) { if (pread(fd, xph_addr, xph_sizeof, off) == -1) { file_badread(ms); return -1; } off += size; if (fsize != SIZE_UNKNOWN && xph_offset > fsize) { /* Perhaps warn here */ continue; } if (xph_type != PT_NOTE) continue; /* * This is a PT_NOTE section; loop through all the notes * in the section. */ len = xph_filesz < sizeof(nbuf) ? xph_filesz : sizeof(nbuf); if ((bufsize = pread(fd, nbuf, len, xph_offset)) == -1) { file_badread(ms); return -1; } offset = 0; for (;;) { if (offset >= (size_t)bufsize) break; offset = donote(ms, nbuf, offset, (size_t)bufsize, clazz, swap, 4, flags); if (offset == 0) break; } } return 0; }
CWE-20
0
static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem) { long tmp = *old; *old = atomic_long_cmpxchg(&sem->count, *old, new); return *old == tmp; }
CWE-362
18
static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev, struct rf_tech_specific_params_nfca_poll *nfca_poll, __u8 *data) { nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data)); data += 2; nfca_poll->nfcid1_len = *data++; pr_debug("sens_res 0x%x, nfcid1_len %d\n", nfca_poll->sens_res, nfca_poll->nfcid1_len); memcpy(nfca_poll->nfcid1, data, nfca_poll->nfcid1_len); data += nfca_poll->nfcid1_len; nfca_poll->sel_res_len = *data++; if (nfca_poll->sel_res_len != 0) nfca_poll->sel_res = *data++; pr_debug("sel_res_len %d, sel_res 0x%x\n", nfca_poll->sel_res_len, nfca_poll->sel_res); return data; }
CWE-119
26
static ssize_t bat_socket_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct socket_client *socket_client = file->private_data; struct socket_packet *socket_packet; size_t packet_len; int error; if ((file->f_flags & O_NONBLOCK) && (socket_client->queue_len == 0)) return -EAGAIN; if ((!buf) || (count < sizeof(struct icmp_packet))) return -EINVAL; if (!access_ok(VERIFY_WRITE, buf, count)) return -EFAULT; error = wait_event_interruptible(socket_client->queue_wait, socket_client->queue_len); if (error) return error; spin_lock_bh(&socket_client->lock); socket_packet = list_first_entry(&socket_client->queue_list, struct socket_packet, list); list_del(&socket_packet->list); socket_client->queue_len--; spin_unlock_bh(&socket_client->lock); error = copy_to_user(buf, &socket_packet->icmp_packet, socket_packet->icmp_len); packet_len = socket_packet->icmp_len; kfree(socket_packet); if (error) return -EFAULT; return packet_len; }
CWE-119
26
void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_transaction *cur_trans = trans->transaction; if (!trans->chunk_bytes_reserved) return; WARN_ON_ONCE(!list_empty(&trans->new_bgs)); btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv, trans->chunk_bytes_reserved, NULL); atomic64_sub(trans->chunk_bytes_reserved, &cur_trans->chunk_bytes_reserved); cond_wake_up(&cur_trans->chunk_reserve_wait); trans->chunk_bytes_reserved = 0; }
CWE-667
27
static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); get_block_t *get_block; /* * Fallback to buffered I/O if we see an inode without * extents. */ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) return 0; /* Fallback to buffered I/O if we do not support append dio. */ if (iocb->ki_pos + iter->count > i_size_read(inode) && !ocfs2_supports_append_dio(osb)) return 0; if (iov_iter_rw(iter) == READ) get_block = ocfs2_get_block; else get_block = ocfs2_dio_get_block; return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, get_block, ocfs2_dio_end_io, NULL, 0); }
CWE-362
18
static void perf_event_exit_cpu(int cpu) { struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); perf_event_exit_cpu_context(cpu); mutex_lock(&swhash->hlist_mutex); swhash->online = false; swevent_hlist_release(swhash); mutex_unlock(&swhash->hlist_mutex); }
CWE-362
18
Expr *sqlite3CreateColumnExpr(sqlite3 *db, SrcList *pSrc, int iSrc, int iCol){ Expr *p = sqlite3ExprAlloc(db, TK_COLUMN, 0, 0); if( p ){ struct SrcList_item *pItem = &pSrc->a[iSrc]; p->y.pTab = pItem->pTab; p->iTable = pItem->iCursor; if( p->y.pTab->iPKey==iCol ){ p->iColumn = -1; }else{ p->iColumn = (ynVar)iCol; testcase( iCol==BMS ); testcase( iCol==BMS-1 ); pItem->colUsed |= ((Bitmask)1)<<(iCol>=BMS ? BMS-1 : iCol); } } return p; }
CWE-754
31
static int hash_recvmsg(struct kiocb *unused, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)); int err; if (len > ds) len = ds; else if (len < ds) msg->msg_flags |= MSG_TRUNC; msg->msg_namelen = 0; lock_sock(sk); if (ctx->more) { ctx->more = 0; ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), &ctx->completion); if (err) goto unlock; } err = memcpy_toiovec(msg->msg_iov, ctx->result, len); unlock: release_sock(sk); return err ?: len; }
CWE-20
0
error_t mqttSnClientSendUnsubscribe(MqttSnClientContext *context, const char_t *topicName) { error_t error; systime_t time; uint16_t topicId; MqttSnFlags flags; //Initialize status code error = NO_ERROR; //Reset unused flags flags.all = 0; //Check whether a predefined topic ID has been registered topicId = mqttSnClientFindPredefTopicName(context, topicName); //Predefined topic ID found? if(topicId != MQTT_SN_INVALID_TOPIC_ID) { //The UNSUBSCRIBE message contains a predefined topic ID flags.topicIdType = MQTT_SN_PREDEFINED_TOPIC_ID; } else { //Short topic name? if(osStrlen(topicName) == 2 && strchr(topicName, '#') == NULL && strchr(topicName, '+') == NULL) { //The UNSUBSCRIBE message contains a short topic name flags.topicIdType = MQTT_SN_SHORT_TOPIC_NAME; } else { //The UNSUBSCRIBE message contains a normal topic name flags.topicIdType = MQTT_SN_NORMAL_TOPIC_NAME; } //Format UNSUBSCRIBE message error = mqttSnFormatUnsubscribe(&context->message, flags, context->msgId, topicId, topicName); } //Check status code if(!error) { //Debug message TRACE_INFO("Sending UNSUBSCRIBE message (%" PRIuSIZE " bytes)...\r\n", context->message.length); //Dump the contents of the message for debugging purpose mqttSnDumpMessage(context->message.buffer, context->message.length); //Send MQTT-SN message error = mqttSnClientSendDatagram(context, context->message.buffer, context->message.length); //Get current time time = osGetSystemTime(); //Save the time at which the message was sent context->retransmitStartTime = time; context->keepAliveTimestamp = time; //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_SENDING_REQ; context->msgType = MQTT_SN_MSG_TYPE_UNSUBSCRIBE; } //Return status code return error; }
CWE-20
0
void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data) { VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); uint16_t val = data; if (addr > (vdev->config_len - sizeof(val))) return; stw_p(vdev->config + addr, val); if (k->set_config) { k->set_config(vdev, vdev->config); } }
CWE-269
6
static void prefetch_enc(void) { prefetch_table((const void *)encT, sizeof(encT)); }
CWE-668
7