code
stringlengths
23
2.05k
label_name
stringlengths
6
7
label
int64
0
37
static struct ip_options *ip_options_get_alloc(const int optlen) { return kzalloc(sizeof(struct ip_options) + ((optlen + 3) & ~3), GFP_KERNEL); }
CWE-362
18
void svhandler_flash_erase_sector(void) { uint32_t sector = _param_1; // Do not allow firmware to erase bootstrap or bootloader sectors. if ((sector == FLASH_BOOTSTRAP_SECTOR) || (sector >= FLASH_BOOT_SECTOR_FIRST && sector <= FLASH_BOOT_SECTOR_LAST)) { return; } // Unlock flash. flash_clear_status_flags(); flash_unlock(); // Erase the sector. flash_erase_sector(sector, FLASH_CR_PROGRAM_X32); // Return flash status. _param_1 = !!flash_chk_status(); _param_2 = 0; _param_3 = 0; // Wait for any write operation to complete. flash_wait_for_last_operation(); // Disable writes to flash. FLASH_CR &= ~FLASH_CR_PG; // lock flash register FLASH_CR |= FLASH_CR_LOCK; }
CWE-668
7
int mif_validate(jas_stream_t *in) { uchar buf[MIF_MAGICLEN]; uint_fast32_t magic; int i; int n; assert(JAS_STREAM_MAXPUTBACK >= MIF_MAGICLEN); /* Read the validation data (i.e., the data used for detecting the format). */ if ((n = jas_stream_read(in, buf, MIF_MAGICLEN)) < 0) { return -1; } /* Put the validation data back onto the stream, so that the stream position will not be changed. */ for (i = n - 1; i >= 0; --i) { if (jas_stream_ungetc(in, buf[i]) == EOF) { return -1; } } /* Was enough data read? */ if (n < MIF_MAGICLEN) { return -1; } /* Compute the signature value. */ magic = (JAS_CAST(uint_fast32_t, buf[0]) << 24) | (JAS_CAST(uint_fast32_t, buf[1]) << 16) | (JAS_CAST(uint_fast32_t, buf[2]) << 8) | buf[3]; /* Ensure that the signature is correct for this format. */ if (magic != MIF_MAGIC) { return -1; } return 0; }
CWE-20
0
const char * util_acl_to_str(const sc_acl_entry_t *e) { static char line[80], buf[20]; unsigned int acl; if (e == NULL) return "N/A"; line[0] = 0; while (e != NULL) { acl = e->method; switch (acl) { case SC_AC_UNKNOWN: return "N/A"; case SC_AC_NEVER: return "NEVR"; case SC_AC_NONE: return "NONE"; case SC_AC_CHV: strcpy(buf, "CHV"); if (e->key_ref != SC_AC_KEY_REF_NONE) sprintf(buf + 3, "%d", e->key_ref); break; case SC_AC_TERM: strcpy(buf, "TERM"); break; case SC_AC_PRO: strcpy(buf, "PROT"); break; case SC_AC_AUT: strcpy(buf, "AUTH"); if (e->key_ref != SC_AC_KEY_REF_NONE) sprintf(buf + 4, "%d", e->key_ref); break; case SC_AC_SEN: strcpy(buf, "Sec.Env. "); if (e->key_ref != SC_AC_KEY_REF_NONE) sprintf(buf + 3, "#%d", e->key_ref); break; case SC_AC_SCB: strcpy(buf, "Sec.ControlByte "); if (e->key_ref != SC_AC_KEY_REF_NONE) sprintf(buf + 3, "Ox%X", e->key_ref); break; case SC_AC_IDA: strcpy(buf, "PKCS#15 AuthID "); if (e->key_ref != SC_AC_KEY_REF_NONE) sprintf(buf + 3, "#%d", e->key_ref); break; default: strcpy(buf, "????"); break; } strcat(line, buf); strcat(line, " "); e = e->next; } line[strlen(line)-1] = 0; /* get rid of trailing space */ return line; }
CWE-119
26
static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) { struct desc_struct *desc; unsigned long limit; short sel; sel = get_segment_selector(regs, seg_reg_idx); if (sel < 0) return 0; if (user_64bit_mode(regs) || v8086_mode(regs)) return -1L; if (!sel) return 0; desc = get_desc(sel); if (!desc) return 0; /* * If the granularity bit is set, the limit is given in multiples * of 4096. This also means that the 12 least significant bits are * not tested when checking the segment limits. In practice, * this means that the segment ends in (limit << 12) + 0xfff. */ limit = get_desc_limit(desc); if (desc->g) limit = (limit << 12) + 0xfff; return limit; }
CWE-362
18
static bool parse_reconnect(struct pool *pool, json_t *val) { char *sockaddr_url, *stratum_port, *tmp; char *url, *port, address[256]; if (opt_disable_client_reconnect) { applog(LOG_WARNING, "Stratum client.reconnect forbidden, aborting."); return false; } memset(address, 0, 255); url = (char *)json_string_value(json_array_get(val, 0)); if (!url) url = pool->sockaddr_url; port = (char *)json_string_value(json_array_get(val, 1)); if (!port) port = pool->stratum_port; sprintf(address, "%s:%s", url, port); if (!extract_sockaddr(address, &sockaddr_url, &stratum_port)) return false; applog(LOG_NOTICE, "Reconnect requested from %s to %s", get_pool_name(pool), address); clear_pool_work(pool); mutex_lock(&pool->stratum_lock); __suspend_stratum(pool); tmp = pool->sockaddr_url; pool->sockaddr_url = sockaddr_url; pool->stratum_url = pool->sockaddr_url; free(tmp); tmp = pool->stratum_port; pool->stratum_port = stratum_port; free(tmp); mutex_unlock(&pool->stratum_lock); if (!restart_stratum(pool)) { pool_failed(pool); return false; } return true; }
CWE-119
26
long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) { struct key *key; key_ref_t key_ref; long ret; /* find the key first */ key_ref = lookup_user_key(keyid, 0, 0); if (IS_ERR(key_ref)) { ret = -ENOKEY; goto error; } key = key_ref_to_ptr(key_ref); if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) { ret = -ENOKEY; goto error2; } /* see if we can read it directly */ ret = key_permission(key_ref, KEY_NEED_READ); if (ret == 0) goto can_read_key; if (ret != -EACCES) goto error2; /* we can't; see if it's searchable from this process's keyrings * - we automatically take account of the fact that it may be * dangling off an instantiation key */ if (!is_key_possessed(key_ref)) { ret = -EACCES; goto error2; } /* the key is probably readable - now try to read it */ can_read_key: ret = -EOPNOTSUPP; if (key->type->read) { /* Read the data with the semaphore held (since we might sleep) * to protect against the key being updated or revoked. */ down_read(&key->sem); ret = key_validate(key); if (ret == 0) ret = key->type->read(key, buffer, buflen); up_read(&key->sem); } error2: key_put(key); error: return ret; }
CWE-20
0
void esp32EthEnableIrq(NetInterface *interface) { //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Enable Ethernet PHY interrupts interface->phyDriver->enableIrq(interface); } else if(interface->switchDriver != NULL) { //Enable Ethernet switch interrupts interface->switchDriver->enableIrq(interface); } else { //Just for sanity } }
CWE-20
0
dtls1_buffer_record(SSL *s, record_pqueue *queue, unsigned char *priority) { DTLS1_RECORD_DATA *rdata; pitem *item; /* Limit the size of the queue to prevent DOS attacks */ if (pqueue_size(queue->q) >= 100) return 0; rdata = OPENSSL_malloc(sizeof(DTLS1_RECORD_DATA)); item = pitem_new(priority, rdata); if (rdata == NULL || item == NULL) { if (rdata != NULL) OPENSSL_free(rdata); if (item != NULL) pitem_free(item); SSLerr(SSL_F_DTLS1_BUFFER_RECORD, ERR_R_INTERNAL_ERROR); return(0); } rdata->packet = s->packet; rdata->packet_length = s->packet_length; memcpy(&(rdata->rbuf), &(s->s3->rbuf), sizeof(SSL3_BUFFER)); memcpy(&(rdata->rrec), &(s->s3->rrec), sizeof(SSL3_RECORD)); item->data = rdata; #ifndef OPENSSL_NO_SCTP /* Store bio_dgram_sctp_rcvinfo struct */ if (BIO_dgram_is_sctp(SSL_get_rbio(s)) && (s->state == SSL3_ST_SR_FINISHED_A || s->state == SSL3_ST_CR_FINISHED_A)) { BIO_ctrl(SSL_get_rbio(s), BIO_CTRL_DGRAM_SCTP_GET_RCVINFO, sizeof(rdata->recordinfo), &rdata->recordinfo); } #endif s->packet = NULL; s->packet_length = 0; memset(&(s->s3->rbuf), 0, sizeof(SSL3_BUFFER)); memset(&(s->s3->rrec), 0, sizeof(SSL3_RECORD)); if (!ssl3_setup_buffers(s)) { SSLerr(SSL_F_DTLS1_BUFFER_RECORD, ERR_R_INTERNAL_ERROR); OPENSSL_free(rdata); pitem_free(item); return(0); } /* insert should not fail, since duplicates are dropped */ if (pqueue_insert(queue->q, item) == NULL) { SSLerr(SSL_F_DTLS1_BUFFER_RECORD, ERR_R_INTERNAL_ERROR); OPENSSL_free(rdata); pitem_free(item); return(0); } return(1); }
CWE-119
26
int hns_rcb_get_ring_sset_count(int stringset) { if (stringset == ETH_SS_STATS) return HNS_RING_STATIC_REG_NUM; return 0; }
CWE-119
26
static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) { unsigned long pmnc; struct perf_sample_data data; struct cpu_hw_events *cpuc; struct pt_regs *regs; int idx; /* * Get and reset the IRQ flags */ pmnc = armv7_pmnc_getreset_flags(); /* * Did an overflow occur? */ if (!armv7_pmnc_has_overflowed(pmnc)) return IRQ_NONE; /* * Handle the counter(s) overflow(s) */ regs = get_irq_regs(); perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); for (idx = 0; idx <= armpmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; if (!test_bit(idx, cpuc->active_mask)) continue; /* * We have a single interrupt for all counters. Check that * each counter has overflowed before we process it. */ if (!armv7_pmnc_counter_has_overflowed(pmnc, idx)) continue; hwc = &event->hw; armpmu_event_update(event, hwc, idx, 1); data.period = event->hw.last_period; if (!armpmu_event_set_period(event, hwc, idx)) continue; if (perf_event_overflow(event, 0, &data, regs)) armpmu->disable(hwc, idx); } /* * Handle the pending perf events. * * Note: this call *must* be run with interrupts disabled. For * platforms that can have the PMU interrupts raised as an NMI, this * will not work. */ irq_work_run(); return IRQ_HANDLED; }
CWE-400
2
header_put_marker (SF_PRIVATE *psf, int x) { if (psf->headindex < SIGNED_SIZEOF (psf->header) - 4) { psf->header [psf->headindex++] = (x >> 24) ; psf->header [psf->headindex++] = (x >> 16) ; psf->header [psf->headindex++] = (x >> 8) ; psf->header [psf->headindex++] = x ; } ; } /* header_put_marker */
CWE-119
26
static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int error = 0; if (sk->sk_state & PPPOX_BOUND) { error = -EIO; goto end; } skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &error); if (error < 0) goto end; m->msg_namelen = 0; if (skb) { total_len = min_t(size_t, total_len, skb->len); error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len); if (error == 0) { consume_skb(skb); return total_len; } } kfree_skb(skb); end: return error; }
CWE-20
0
void *ndpGetOption(uint8_t *options, size_t length, uint8_t type) { size_t i; NdpOption *option; //Point to the very first option of the NDP message i = 0; //Parse options while((i + sizeof(NdpOption)) <= length) { //Point to the current option option = (NdpOption *) (options + i); //Nodes must silently discard an NDP message that contains //an option with length zero if(option->length == 0) break; //Check option length if((i + option->length * 8) > length) break; //Current option type matches the specified one? if(option->type == type || type == NDP_OPT_ANY) return option; //Jump to next the next option i += option->length * 8; } //Specified option type not found return NULL; }
CWE-20
0
static int dynamicGetbuf(gdIOCtxPtr ctx, void *buf, int len) { int rlen, remain; dpIOCtxPtr dctx; dynamicPtr *dp; dctx = (dpIOCtxPtr) ctx; dp = dctx->dp; remain = dp->logicalSize - dp->pos; if(remain >= len) { rlen = len; } else { if(remain == 0) { /* 2.0.34: EOF is incorrect. We use 0 for * errors and EOF, just like fileGetbuf, * which is a simple fread() wrapper. * TBB. Original bug report: Daniel Cowgill. */ return 0; /* NOT EOF */ } rlen = remain; } memcpy(buf, (void *) ((char *)dp->data + dp->pos), rlen); dp->pos += rlen; return rlen; }
CWE-119
26
mm_zalloc(struct mm_master *mm, u_int ncount, u_int size) { if (size == 0 || ncount == 0 || ncount > SIZE_MAX / size) fatal("%s: mm_zalloc(%u, %u)", __func__, ncount, size); return mm_malloc(mm, size * ncount); }
CWE-119
26
static int muscle_list_files(sc_card_t *card, u8 *buf, size_t bufLen) { muscle_private_t* priv = MUSCLE_DATA(card); mscfs_t *fs = priv->fs; int x; int count = 0; mscfs_check_cache(priv->fs); for(x = 0; x < fs->cache.size; x++) { u8* oid= fs->cache.array[x].objectId.id; sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "FILE: %02X%02X%02X%02X\n", oid[0],oid[1],oid[2],oid[3]); if(0 == memcmp(fs->currentPath, oid, 2)) { buf[0] = oid[2]; buf[1] = oid[3]; if(buf[0] == 0x00 && buf[1] == 0x00) continue; /* No directories/null names outside of root */ buf += 2; count+=2; } } return count; }
CWE-119
26
xfs_attr_shortform_addname(xfs_da_args_t *args) { int newsize, forkoff, retval; trace_xfs_attr_sf_addname(args); retval = xfs_attr_shortform_lookup(args); if ((args->flags & ATTR_REPLACE) && (retval == -ENOATTR)) { return retval; } else if (retval == -EEXIST) { if (args->flags & ATTR_CREATE) return retval; retval = xfs_attr_shortform_remove(args); ASSERT(retval == 0); } if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX || args->valuelen >= XFS_ATTR_SF_ENTSIZE_MAX) return -ENOSPC; newsize = XFS_ATTR_SF_TOTSIZE(args->dp); newsize += XFS_ATTR_SF_ENTSIZE_BYNAME(args->namelen, args->valuelen); forkoff = xfs_attr_shortform_bytesfit(args->dp, newsize); if (!forkoff) return -ENOSPC; xfs_attr_shortform_add(args, forkoff); return 0; }
CWE-754
31
void ptrace_triggered(struct perf_event *bp, int nmi, struct perf_sample_data *data, struct pt_regs *regs) { struct perf_event_attr attr; /* * Disable the breakpoint request here since ptrace has defined a * one-shot behaviour for breakpoint exceptions in PPC64. * The SIGTRAP signal is generated automatically for us in do_dabr(). * We don't have to do anything about that here */ attr = bp->attr; attr.disabled = true; modify_user_hw_breakpoint(bp, &attr); }
CWE-400
2
static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock_iocb *siocb = kiocb_to_siocb(kiocb); struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); struct sockaddr_nl *addr = msg->msg_name; u32 dst_pid; u32 dst_group; struct sk_buff *skb; int err; struct scm_cookie scm; if (msg->msg_flags&MSG_OOB) return -EOPNOTSUPP; if (NULL == siocb->scm) siocb->scm = &scm; err = scm_send(sock, msg, siocb->scm); if (err < 0) return err; if (msg->msg_namelen) { err = -EINVAL; if (addr->nl_family != AF_NETLINK) goto out; dst_pid = addr->nl_pid; dst_group = ffs(addr->nl_groups); err = -EPERM; if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND)) goto out; } else { dst_pid = nlk->dst_pid; dst_group = nlk->dst_group; } if (!nlk->pid) { err = netlink_autobind(sock); if (err) goto out; } err = -EMSGSIZE; if (len > sk->sk_sndbuf - 32) goto out; err = -ENOBUFS; skb = alloc_skb(len, GFP_KERNEL); if (skb == NULL) goto out; NETLINK_CB(skb).pid = nlk->pid; NETLINK_CB(skb).dst_group = dst_group; memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); err = -EFAULT; if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { kfree_skb(skb); goto out; } err = security_netlink_send(sk, skb); if (err) { kfree_skb(skb); goto out; } if (dst_group) { atomic_inc(&skb->users); netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL); } err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT); out: scm_destroy(siocb->scm); return err; }
CWE-287
4
static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int err; struct sk_buff *skb; struct sock *sk = sock->sk; err = -EIO; if (sk->sk_state & PPPOX_BOUND) goto end; msg->msg_namelen = 0; err = 0; skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb) goto end; if (len > skb->len) len = skb->len; else if (len < skb->len) msg->msg_flags |= MSG_TRUNC; err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len); if (likely(err == 0)) err = len; kfree_skb(skb); end: return err; }
CWE-20
0
static int propagate_one(struct mount *m) { struct mount *child; int type; /* skip ones added by this propagate_mnt() */ if (IS_MNT_NEW(m)) return 0; /* skip if mountpoint isn't covered by it */ if (!is_subdir(mp->m_dentry, m->mnt.mnt_root)) return 0; if (peers(m, last_dest)) { type = CL_MAKE_SHARED; } else { struct mount *n, *p; bool done; for (n = m; ; n = p) { p = n->mnt_master; if (p == dest_master || IS_MNT_MARKED(p)) break; } do { struct mount *parent = last_source->mnt_parent; if (last_source == first_source) break; done = parent->mnt_master == p; if (done && peers(n, parent)) break; last_source = last_source->mnt_master; } while (!done); type = CL_SLAVE; /* beginning of peer group among the slaves? */ if (IS_MNT_SHARED(m)) type |= CL_MAKE_SHARED; } /* Notice when we are propagating across user namespaces */ if (m->mnt_ns->user_ns != user_ns) type |= CL_UNPRIVILEGED; child = copy_tree(last_source, last_source->mnt.mnt_root, type); if (IS_ERR(child)) return PTR_ERR(child); child->mnt.mnt_flags &= ~MNT_LOCKED; mnt_set_mountpoint(m, mp, child); last_dest = m; last_source = child; if (m->mnt_master != dest_master) { read_seqlock_excl(&mount_lock); SET_MNT_MARK(m->mnt_master); read_sequnlock_excl(&mount_lock); } hlist_add_head(&child->mnt_hash, list); return 0; }
CWE-400
2
static int __vcpu_run(struct kvm_vcpu *vcpu) { int r; struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); r = vapic_enter(vcpu); if (r) { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); return r; } r = 1; while (r > 0) { if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && !vcpu->arch.apf.halted) r = vcpu_enter_guest(vcpu); else { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); kvm_vcpu_block(vcpu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { kvm_apic_accept_events(vcpu); switch(vcpu->arch.mp_state) { case KVM_MP_STATE_HALTED: vcpu->arch.pv.pv_unhalted = false; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; case KVM_MP_STATE_RUNNABLE: vcpu->arch.apf.halted = false; break; case KVM_MP_STATE_INIT_RECEIVED: break; default: r = -EINTR; break; } } } if (r <= 0) break; clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests); if (kvm_cpu_has_pending_timer(vcpu)) kvm_inject_pending_timer_irqs(vcpu); if (dm_request_for_irq_injection(vcpu)) { r = -EINTR; vcpu->run->exit_reason = KVM_EXIT_INTR; ++vcpu->stat.request_irq_exits; } kvm_check_async_pf_completion(vcpu); if (signal_pending(current)) { r = -EINTR; vcpu->run->exit_reason = KVM_EXIT_INTR; ++vcpu->stat.signal_exits; } if (need_resched()) { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); kvm_resched(vcpu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); } } srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); vapic_exit(vcpu); return r; }
CWE-20
0
static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { cpumask_clear(&mm->context.cpu_attach_mask); atomic_set(&mm->context.attach_count, 0); mm->context.flush_mm = 0; mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; mm->context.asce_bits |= _ASCE_TYPE_REGION3; #ifdef CONFIG_PGSTE mm->context.alloc_pgste = page_table_allocate_pgste; mm->context.has_pgste = 0; mm->context.use_skey = 0; #endif mm->context.asce_limit = STACK_TOP_MAX; crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); return 0; }
CWE-20
0
check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, const char *name) { struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; int ret, off, h; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) { duprintf("Bad offset %p, limit = %p\n", e, limit); return -EINVAL; } if (e->next_offset < sizeof(struct compat_arpt_entry) + sizeof(struct compat_xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* For purposes of check_entry casting the compat entry is fine */ ret = check_entry((struct arpt_entry *)e); if (ret) return ret; off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); entry_offset = (void *)e - (void *)base; t = compat_arpt_get_target(e); target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto out; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); if (ret) goto release_target; /* Check hooks & underflows */ for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* Clear counters and comefrom */ memset(&e->counters, 0, sizeof(e->counters)); e->comefrom = 0; return 0; release_target: module_put(t->u.kernel.target->me); out: return ret; }
CWE-119
26
int read_file(struct sc_card *card, char *str_path, unsigned char **data, size_t *data_len) { struct sc_path path; struct sc_file *file; unsigned char *p; int ok = 0; int r; size_t len; sc_format_path(str_path, &path); if (SC_SUCCESS != sc_select_file(card, &path, &file)) { goto err; } len = file ? file->size : 4096; p = realloc(*data, len); if (!p) { goto err; } *data = p; *data_len = len; r = sc_read_binary(card, 0, p, len, 0); if (r < 0) goto err; *data_len = r; ok = 1; err: sc_file_free(file); return ok; }
CWE-119
26
log2vis_utf8 (PyObject * string, int unicode_length, FriBidiParType base_direction, int clean, int reordernsm) { FriBidiChar *logical = NULL; /* input fribidi unicode buffer */ FriBidiChar *visual = NULL; /* output fribidi unicode buffer */ char *visual_utf8 = NULL; /* output fribidi UTF-8 buffer */ FriBidiStrIndex new_len = 0; /* length of the UTF-8 buffer */ PyObject *result = NULL; /* failure */ /* Allocate fribidi unicode buffers */ logical = PyMem_New (FriBidiChar, unicode_length + 1); if (logical == NULL) { PyErr_SetString (PyExc_MemoryError, "failed to allocate unicode buffer"); goto cleanup; } visual = PyMem_New (FriBidiChar, unicode_length + 1); if (visual == NULL) { PyErr_SetString (PyExc_MemoryError, "failed to allocate unicode buffer"); goto cleanup; } /* Convert to unicode and order visually */ fribidi_set_reorder_nsm(reordernsm); fribidi_utf8_to_unicode (PyString_AS_STRING (string), PyString_GET_SIZE (string), logical); if (!fribidi_log2vis (logical, unicode_length, &base_direction, visual, NULL, NULL, NULL)) { PyErr_SetString (PyExc_RuntimeError, "fribidi failed to order string"); goto cleanup; } /* Cleanup the string if requested */ if (clean) fribidi_remove_bidi_marks (visual, unicode_length, NULL, NULL, NULL); /* Allocate fribidi UTF-8 buffer */ visual_utf8 = PyMem_New(char, (unicode_length * 4)+1); if (visual_utf8 == NULL) { PyErr_SetString (PyExc_MemoryError, "failed to allocate UTF-8 buffer"); goto cleanup; } /* Encode the reordered string and create result string */ new_len = fribidi_unicode_to_utf8 (visual, unicode_length, visual_utf8); result = PyString_FromStringAndSize (visual_utf8, new_len); if (result == NULL) /* XXX does it raise any error? */ goto cleanup; cleanup: /* Delete unicode buffers */ PyMem_Del (logical); PyMem_Del (visual); PyMem_Del (visual_utf8); return result; }
CWE-119
26
static inline bool key_is_instantiated(const struct key *key) { return test_bit(KEY_FLAG_INSTANTIATED, &key->flags) && !test_bit(KEY_FLAG_NEGATIVE, &key->flags); }
CWE-20
0
static int jas_iccgetuint64(jas_stream_t *in, jas_iccuint64_t *val) { ulonglong tmp; if (jas_iccgetuint(in, 8, &tmp)) return -1; *val = tmp; return 0; }
CWE-20
0
test_compressed_stream_overflow (xd3_stream *stream, int ignore) { int ret; int i; uint8_t *buf; if ((buf = (uint8_t*) malloc (TWO_MEGS_AND_DELTA)) == NULL) { return ENOMEM; } memset (buf, 0, TWO_MEGS_AND_DELTA); for (i = 0; i < (2 << 20); i += 256) { int j; int off = mt_random(& static_mtrand) % 10; for (j = 0; j < 256; j++) { buf[i + j] = j + off; } } /* Test overflow of a 32-bit file offset. */ if (SIZEOF_XOFF_T == 4) { ret = test_streaming (stream, buf, buf + (1 << 20), buf + (2 << 20), (1 << 12) + 1); if (ret == XD3_INVALID_INPUT && MSG_IS ("decoder file offset overflow")) { ret = 0; } else { XPR(NT XD3_LIB_ERRMSG (stream, ret)); stream->msg = "expected overflow condition"; ret = XD3_INTERNAL; goto fail; } } /* Test transfer of exactly 32bits worth of data. */ if ((ret = test_streaming (stream, buf, buf + (1 << 20), buf + (2 << 20), 1 << 12))) { goto fail; } fail: free (buf); return ret; }
CWE-119
26
void lpc546xxEthInitDmaDesc(NetInterface *interface) { uint_t i; //Initialize TX DMA descriptor list for(i = 0; i < LPC546XX_ETH_TX_BUFFER_COUNT; i++) { //The descriptor is initially owned by the application txDmaDesc[i].tdes0 = 0; txDmaDesc[i].tdes1 = 0; txDmaDesc[i].tdes2 = 0; txDmaDesc[i].tdes3 = 0; } //Initialize TX descriptor index txIndex = 0; //Initialize RX DMA descriptor list for(i = 0; i < LPC546XX_ETH_RX_BUFFER_COUNT; i++) { //The descriptor is initially owned by the DMA rxDmaDesc[i].rdes0 = (uint32_t) rxBuffer[i]; rxDmaDesc[i].rdes1 = 0; rxDmaDesc[i].rdes2 = 0; rxDmaDesc[i].rdes3 = ENET_RDES3_OWN | ENET_RDES3_IOC | ENET_RDES3_BUF1V; } //Initialize RX descriptor index rxIndex = 0; //Start location of the TX descriptor list ENET->DMA_CH[0].DMA_CHX_TXDESC_LIST_ADDR = (uint32_t) &txDmaDesc[0]; //Length of the transmit descriptor ring ENET->DMA_CH[0].DMA_CHX_TXDESC_RING_LENGTH = LPC546XX_ETH_TX_BUFFER_COUNT - 1; //Start location of the RX descriptor list ENET->DMA_CH[0].DMA_CHX_RXDESC_LIST_ADDR = (uint32_t) &rxDmaDesc[0]; //Length of the receive descriptor ring ENET->DMA_CH[0].DMA_CHX_RXDESC_RING_LENGTH = LPC546XX_ETH_RX_BUFFER_COUNT - 1; }
CWE-20
0
int inet_sk_rebuild_header(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0); __be32 daddr; int err; /* Route is OK, nothing to do. */ if (rt) return 0; /* Reroute. */ daddr = inet->inet_daddr; if (inet->opt && inet->opt->srr) daddr = inet->opt->faddr; rt = ip_route_output_ports(sock_net(sk), sk, daddr, inet->inet_saddr, inet->inet_dport, inet->inet_sport, sk->sk_protocol, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); if (!IS_ERR(rt)) { err = 0; sk_setup_caps(sk, &rt->dst); } else { err = PTR_ERR(rt); /* Routing failed... */ sk->sk_route_caps = 0; /* * Other protocols have to map its equivalent state to TCP_SYN_SENT. * DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme */ if (!sysctl_ip_dynaddr || sk->sk_state != TCP_SYN_SENT || (sk->sk_userlocks & SOCK_BINDADDR_LOCK) || (err = inet_sk_reselect_saddr(sk)) != 0) sk->sk_err_soft = -err; } return err; }
CWE-362
18
static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) { perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); __perf_event_task_sched_out(task, next); }
CWE-400
2
static inline void fsnotify_oldname_free(const unsigned char *old_name) { kfree(old_name); }
CWE-362
18
asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) { enum direction dir = decode_direction(insn); int size = decode_access_size(insn); if(!ok_for_kernel(insn) || dir == both) { printk("Unsupported unaligned load/store trap for kernel at <%08lx>.\n", regs->pc); unaligned_panic("Wheee. Kernel does fpu/atomic unaligned load/store."); } else { unsigned long addr = compute_effective_address(regs, insn); int err; perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); switch (dir) { case load: err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs), size, (unsigned long *) addr, decode_signedness(insn)); break; case store: err = do_int_store(((insn>>25)&0x1f), size, (unsigned long *) addr, regs); break; default: panic("Impossible kernel unaligned trap."); /* Not reached... */ } if (err) kernel_mna_trap_fault(regs, insn); else advance(regs); } }
CWE-400
2
static int __f2fs_set_acl(struct inode *inode, int type, struct posix_acl *acl, struct page *ipage) { int name_index; void *value = NULL; size_t size = 0; int error; switch (type) { case ACL_TYPE_ACCESS: name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS; if (acl) { error = posix_acl_equiv_mode(acl, &inode->i_mode); if (error < 0) return error; set_acl_inode(inode, inode->i_mode); if (error == 0) acl = NULL; } break; case ACL_TYPE_DEFAULT: name_index = F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT; if (!S_ISDIR(inode->i_mode)) return acl ? -EACCES : 0; break; default: return -EINVAL; } if (acl) { value = f2fs_acl_to_disk(acl, &size); if (IS_ERR(value)) { clear_inode_flag(inode, FI_ACL_MODE); return (int)PTR_ERR(value); } } error = f2fs_setxattr(inode, name_index, "", value, size, ipage, 0); kfree(value); if (!error) set_cached_acl(inode, type, acl); clear_inode_flag(inode, FI_ACL_MODE); return error; }
CWE-285
23
process_plane(uint8 * in, int width, int height, uint8 * out, int size) { UNUSED(size); int indexw; int indexh; int code; int collen; int replen; int color; int x; int revcode; uint8 * last_line; uint8 * this_line; uint8 * org_in; uint8 * org_out; org_in = in; org_out = out; last_line = 0; indexh = 0; while (indexh < height) { out = (org_out + width * height * 4) - ((indexh + 1) * width * 4); color = 0; this_line = out; indexw = 0; if (last_line == 0) { while (indexw < width) { code = CVAL(in); replen = code & 0xf; collen = (code >> 4) & 0xf; revcode = (replen << 4) | collen; if ((revcode <= 47) && (revcode >= 16)) { replen = revcode; collen = 0; } while (collen > 0) { color = CVAL(in); *out = color; out += 4; indexw++; collen--; } while (replen > 0) { *out = color; out += 4; indexw++; replen--; } } } else { while (indexw < width) { code = CVAL(in); replen = code & 0xf; collen = (code >> 4) & 0xf; revcode = (replen << 4) | collen; if ((revcode <= 47) && (revcode >= 16)) { replen = revcode; collen = 0; } while (collen > 0) { x = CVAL(in); if (x & 1) { x = x >> 1; x = x + 1; color = -x; } else { x = x >> 1; color = x; } x = last_line[indexw * 4] + color; *out = x; out += 4; indexw++; collen--; } while (replen > 0) { x = last_line[indexw * 4] + color; *out = x; out += 4; indexw++; replen--; } } } indexh++; last_line = this_line; } return (int) (in - org_in); }
CWE-119
26
static void make_response(struct xen_blkif_ring *ring, u64 id, unsigned short op, int st) { struct blkif_response resp; unsigned long flags; union blkif_back_rings *blk_rings; int notify; resp.id = id; resp.operation = op; resp.status = st; spin_lock_irqsave(&ring->blk_ring_lock, flags); blk_rings = &ring->blk_rings; /* Place on the response ring for the relevant domain. */ switch (ring->blkif->blk_protocol) { case BLKIF_PROTOCOL_NATIVE: memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), &resp, sizeof(resp)); break; case BLKIF_PROTOCOL_X86_32: memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), &resp, sizeof(resp)); break; case BLKIF_PROTOCOL_X86_64: memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), &resp, sizeof(resp)); break; default: BUG(); } blk_rings->common.rsp_prod_pvt++; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); spin_unlock_irqrestore(&ring->blk_ring_lock, flags); if (notify) notify_remote_via_irq(ring->irq); }
CWE-200
10
static __u8 *nci_extract_rf_params_nfcf_passive_poll(struct nci_dev *ndev, struct rf_tech_specific_params_nfcf_poll *nfcf_poll, __u8 *data) { nfcf_poll->bit_rate = *data++; nfcf_poll->sensf_res_len = *data++; pr_debug("bit_rate %d, sensf_res_len %d\n", nfcf_poll->bit_rate, nfcf_poll->sensf_res_len); memcpy(nfcf_poll->sensf_res, data, nfcf_poll->sensf_res_len); data += nfcf_poll->sensf_res_len; return data; }
CWE-119
26
void enc624j600WritePhyReg(NetInterface *interface, uint8_t address, uint16_t data) { //Write the address of the PHY register to write to enc624j600WriteReg(interface, ENC624J600_REG_MIREGADR, MIREGADR_R8 | address); //Write the 16 bits of data into the MIWR register enc624j600WriteReg(interface, ENC624J600_REG_MIWR, data); //Wait until the PHY register has been written while((enc624j600ReadReg(interface, ENC624J600_REG_MISTAT) & MISTAT_BUSY) != 0) { } }
CWE-20
0
int sc_file_set_sec_attr(sc_file_t *file, const u8 *sec_attr, size_t sec_attr_len) { u8 *tmp; if (!sc_file_valid(file)) { return SC_ERROR_INVALID_ARGUMENTS; } if (sec_attr == NULL) { if (file->sec_attr != NULL) free(file->sec_attr); file->sec_attr = NULL; file->sec_attr_len = 0; return 0; } tmp = (u8 *) realloc(file->sec_attr, sec_attr_len); if (!tmp) { if (file->sec_attr) free(file->sec_attr); file->sec_attr = NULL; file->sec_attr_len = 0; return SC_ERROR_OUT_OF_MEMORY; } file->sec_attr = tmp; memcpy(file->sec_attr, sec_attr, sec_attr_len); file->sec_attr_len = sec_attr_len; return 0; }
CWE-119
26
static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; int copied; int rc; pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags); skb = skb_recv_datagram(sk, flags, noblock, &rc); if (!skb) return rc; msg->msg_namelen = 0; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); skb_free_datagram(sk, skb); return rc ? : copied; }
CWE-20
0
static int parse_token(char **name, char **value, char **cp) { char *end; if (!name || !value || !cp) return -BLKID_ERR_PARAM; if (!(*value = strchr(*cp, '='))) return 0; **value = '\0'; *name = strip_line(*cp); *value = skip_over_blank(*value + 1); if (**value == '"') { end = strchr(*value + 1, '"'); if (!end) { DBG(READ, ul_debug("unbalanced quotes at: %s", *value)); *cp = *value; return -BLKID_ERR_CACHE; } (*value)++; *end = '\0'; end++; } else { end = skip_over_word(*value); if (*end) { *end = '\0'; end++; } } *cp = end; return 1; }
CWE-77
14
static int simulate_sync(struct pt_regs *regs, unsigned int opcode) { if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) { perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); return 0; } return -1; /* Must be something else ... */ }
CWE-400
2
static struct desc_struct *get_desc(unsigned short sel) { struct desc_ptr gdt_desc = {0, 0}; unsigned long desc_base; #ifdef CONFIG_MODIFY_LDT_SYSCALL if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) { struct desc_struct *desc = NULL; struct ldt_struct *ldt; /* Bits [15:3] contain the index of the desired entry. */ sel >>= 3; mutex_lock(&current->active_mm->context.lock); ldt = current->active_mm->context.ldt; if (ldt && sel < ldt->nr_entries) desc = &ldt->entries[sel]; mutex_unlock(&current->active_mm->context.lock); return desc; } #endif native_store_gdt(&gdt_desc); /* * Segment descriptors have a size of 8 bytes. Thus, the index is * multiplied by 8 to obtain the memory offset of the desired descriptor * from the base of the GDT. As bits [15:3] of the segment selector * contain the index, it can be regarded as multiplied by 8 already. * All that remains is to clear bits [2:0]. */ desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK); if (desc_base > gdt_desc.size) return NULL; return (struct desc_struct *)(gdt_desc.address + desc_base); }
CWE-362
18
static void perf_event_mmap_output(struct perf_event *event, struct perf_mmap_event *mmap_event) { struct perf_output_handle handle; struct perf_sample_data sample; int size = mmap_event->event_id.header.size; int ret; perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); ret = perf_output_begin(&handle, event, mmap_event->event_id.header.size, 0, 0); if (ret) goto out; mmap_event->event_id.pid = perf_event_pid(event, current); mmap_event->event_id.tid = perf_event_tid(event, current); perf_output_put(&handle, mmap_event->event_id); __output_copy(&handle, mmap_event->file_name, mmap_event->file_size); perf_event__output_id_sample(event, &handle, &sample); perf_output_end(&handle); out: mmap_event->event_id.header.size = size; }
CWE-400
2
static int snd_timer_start_slave(struct snd_timer_instance *timeri) { unsigned long flags; spin_lock_irqsave(&slave_active_lock, flags); timeri->flags |= SNDRV_TIMER_IFLG_RUNNING; if (timeri->master) list_add_tail(&timeri->active_list, &timeri->master->slave_active_head); spin_unlock_irqrestore(&slave_active_lock, flags); return 1; /* delayed start */ }
CWE-362
18
static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct l2tp_session *session; struct l2tp_tunnel *tunnel; struct pppol2tp_session *ps; int val; int err; if (level != SOL_PPPOL2TP) return udp_prot.setsockopt(sk, level, optname, optval, optlen); if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; err = -ENOTCONN; if (sk->sk_user_data == NULL) goto end; /* Get session context from the socket */ err = -EBADF; session = pppol2tp_sock_to_session(sk); if (session == NULL) goto end; /* Special case: if session_id == 0x0000, treat as operation on tunnel */ ps = l2tp_session_priv(session); if ((session->session_id == 0) && (session->peer_session_id == 0)) { err = -EBADF; tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock); if (tunnel == NULL) goto end_put_sess; err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val); sock_put(ps->tunnel_sock); } else err = pppol2tp_session_setsockopt(sk, session, optname, val); err = 0; end_put_sess: sock_put(sk); end: return err; }
CWE-269
6
static int sock_close(struct inode *inode, struct file *filp) { sock_release(SOCKET_I(inode)); return 0; }
CWE-362
18
static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev) { struct rtnl_link_ifmap map = { .mem_start = dev->mem_start, .mem_end = dev->mem_end, .base_addr = dev->base_addr, .irq = dev->irq, .dma = dev->dma, .port = dev->if_port, }; if (nla_put(skb, IFLA_MAP, sizeof(map), &map)) return -EMSGSIZE; return 0; }
CWE-200
10
GetCode_(gdIOCtx *fd, CODE_STATIC_DATA *scd, int code_size, int flag, int *ZeroDataBlockP) { int i, j, ret; unsigned char count; if(flag) { scd->curbit = 0; scd->lastbit = 0; scd->last_byte = 0; scd->done = FALSE; return 0; } if((scd->curbit + code_size) >= scd->lastbit) { if(scd->done) { if(scd->curbit >= scd->lastbit) { /* Oh well */ } return -1; } scd->buf[0] = scd->buf[scd->last_byte - 2]; scd->buf[1] = scd->buf[scd->last_byte - 1]; if((count = GetDataBlock(fd, &scd->buf[2], ZeroDataBlockP)) <= 0) { scd->done = TRUE; } scd->last_byte = 2 + count; scd->curbit = (scd->curbit - scd->lastbit) + 16; scd->lastbit = (2 + count) * 8; } ret = 0; for (i = scd->curbit, j = 0; j < code_size; ++i, ++j) { ret |= ((scd->buf[i / 8] & (1 << (i % 8))) != 0) << j; } scd->curbit += code_size; return ret; }
CWE-119
26
static int handle_emulation_failure(struct kvm_vcpu *vcpu) { int r = EMULATE_DONE; ++vcpu->stat.insn_emulation_fail; trace_kvm_emulate_insn_failed(vcpu); if (!is_guest_mode(vcpu)) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; r = EMULATE_FAIL; } kvm_queue_exception(vcpu, UD_VECTOR); return r; }
CWE-362
18
static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; int copied; int rc; pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags); skb = skb_recv_datagram(sk, flags, noblock, &rc); if (!skb) return rc; msg->msg_namelen = 0; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); skb_free_datagram(sk, skb); return rc ? : copied; }
CWE-20
0
static ssize_t f_hidg_write(struct file *file, const char __user *buffer, size_t count, loff_t *offp) { struct f_hidg *hidg = file->private_data; struct usb_request *req; unsigned long flags; ssize_t status = -ENOMEM; if (!access_ok(buffer, count)) return -EFAULT; spin_lock_irqsave(&hidg->write_spinlock, flags); #define WRITE_COND (!hidg->write_pending) try_again: /* write queue */ while (!WRITE_COND) { spin_unlock_irqrestore(&hidg->write_spinlock, flags); if (file->f_flags & O_NONBLOCK) return -EAGAIN; if (wait_event_interruptible_exclusive( hidg->write_queue, WRITE_COND)) return -ERESTARTSYS; spin_lock_irqsave(&hidg->write_spinlock, flags); } hidg->write_pending = 1; req = hidg->req; count = min_t(unsigned, count, hidg->report_length); spin_unlock_irqrestore(&hidg->write_spinlock, flags); status = copy_from_user(req->buf, buffer, count); if (status != 0) { ERROR(hidg->func.config->cdev, "copy_from_user error\n"); status = -EINVAL; goto release_write_pending; } spin_lock_irqsave(&hidg->write_spinlock, flags); /* when our function has been disabled by host */ if (!hidg->req) { free_ep_req(hidg->in_ep, req); /* * TODO * Should we fail with error here? */ goto try_again; } req->status = 0; req->zero = 0; req->length = count; req->complete = f_hidg_req_complete; req->context = hidg; status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC); if (status < 0) { ERROR(hidg->func.config->cdev, "usb_ep_queue error on int endpoint %zd\n", status); goto release_write_pending_unlocked; } else { status = count; } spin_unlock_irqrestore(&hidg->write_spinlock, flags); return status; release_write_pending: spin_lock_irqsave(&hidg->write_spinlock, flags); release_write_pending_unlocked: hidg->write_pending = 0; spin_unlock_irqrestore(&hidg->write_spinlock, flags); wake_up(&hidg->write_queue); return status; }
CWE-667
27
static void unqueue_me_pi(struct futex_q *q) { WARN_ON(plist_node_empty(&q->list)); plist_del(&q->list, &q->list.plist); BUG_ON(!q->pi_state); free_pi_state(q->pi_state); q->pi_state = NULL; spin_unlock(q->lock_ptr); drop_futex_key_refs(&q->key); }
CWE-119
26
void jpc_qmfb_split_col(jpc_fix_t *a, int numrows, int stride, int parity) { int bufsize = JPC_CEILDIVPOW2(numrows, 1); jpc_fix_t splitbuf[QMFB_SPLITBUFSIZE]; jpc_fix_t *buf = splitbuf; register jpc_fix_t *srcptr; register jpc_fix_t *dstptr; register int n; register int m; int hstartcol; /* Get a buffer. */ if (bufsize > QMFB_SPLITBUFSIZE) { if (!(buf = jas_alloc2(bufsize, sizeof(jpc_fix_t)))) { /* We have no choice but to commit suicide in this case. */ abort(); } } if (numrows >= 2) { hstartcol = (numrows + 1 - parity) >> 1; // ORIGINAL (WRONG): m = (parity) ? hstartcol : (numrows - hstartcol); m = numrows - hstartcol; /* Save the samples destined for the highpass channel. */ n = m; dstptr = buf; srcptr = &a[(1 - parity) * stride]; while (n-- > 0) { *dstptr = *srcptr; ++dstptr; srcptr += stride << 1; } /* Copy the appropriate samples into the lowpass channel. */ dstptr = &a[(1 - parity) * stride]; srcptr = &a[(2 - parity) * stride]; n = numrows - m - (!parity); while (n-- > 0) { *dstptr = *srcptr; dstptr += stride; srcptr += stride << 1; } /* Copy the saved samples into the highpass channel. */ dstptr = &a[hstartcol * stride]; srcptr = buf; n = m; while (n-- > 0) { *dstptr = *srcptr; dstptr += stride; ++srcptr; } } /* If the split buffer was allocated on the heap, free this memory. */ if (buf != splitbuf) { jas_free(buf); } }
CWE-119
26
decrypt_response(struct sc_card *card, unsigned char *in, size_t inlen, unsigned char *out, size_t * out_len) { size_t cipher_len; size_t i; unsigned char iv[16] = { 0 }; unsigned char plaintext[4096] = { 0 }; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; /* no cipher */ if (in[0] == 0x99) return 0; /* parse cipher length */ if (0x01 == in[2] && 0x82 != in[1]) { cipher_len = in[1]; i = 3; } else if (0x01 == in[3] && 0x81 == in[1]) { cipher_len = in[2]; i = 4; } else if (0x01 == in[4] && 0x82 == in[1]) { cipher_len = in[2] * 0x100; cipher_len += in[3]; i = 5; } else { return -1; } if (cipher_len < 2 || i+cipher_len > inlen || cipher_len > sizeof plaintext) return -1; /* decrypt */ if (KEY_TYPE_AES == exdata->smtype) aes128_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); else des3_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); /* unpadding */ while (0x80 != plaintext[cipher_len - 2] && (cipher_len - 2 > 0)) cipher_len--; if (2 == cipher_len) return -1; memcpy(out, plaintext, cipher_len - 2); *out_len = cipher_len - 2; return 0; }
CWE-119
26
mm_zfree(struct mm_master *mm, void *address) { mm_free(mm, address); }
CWE-119
26
int handle_popc(u32 insn, struct pt_regs *regs) { u64 value; int ret, i, rd = ((insn >> 25) & 0x1f); int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); if (insn & 0x2000) { maybe_flush_windows(0, 0, rd, from_kernel); value = sign_extend_imm13(insn); } else { maybe_flush_windows(0, insn & 0x1f, rd, from_kernel); value = fetch_reg(insn & 0x1f, regs); } for (ret = 0, i = 0; i < 16; i++) { ret += popc_helper[value & 0xf]; value >>= 4; } if (rd < 16) { if (rd) regs->u_regs[rd] = ret; } else { if (test_thread_flag(TIF_32BIT)) { struct reg_window32 __user *win32; win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); put_user(ret, &win32->locals[rd - 16]); } else { struct reg_window __user *win; win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); put_user(ret, &win->locals[rd - 16]); } } advance(regs); return 1; }
CWE-400
2
static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock, struct msghdr *msg, size_t ignored, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm( &ctx->req)); struct skcipher_sg_list *sgl; struct scatterlist *sg; unsigned long iovlen; struct iovec *iov; int err = -EAGAIN; int used; long copied = 0; lock_sock(sk); msg->msg_namelen = 0; for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0; iovlen--, iov++) { unsigned long seglen = iov->iov_len; char __user *from = iov->iov_base; while (seglen) { sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list, list); sg = sgl->sg; while (!sg->length) sg++; used = ctx->used; if (!used) { err = skcipher_wait_for_data(sk, flags); if (err) goto unlock; } used = min_t(unsigned long, used, seglen); used = af_alg_make_sg(&ctx->rsgl, from, used, 1); err = used; if (err < 0) goto unlock; if (ctx->more || used < ctx->used) used -= used % bs; err = -EINVAL; if (!used) goto free; ablkcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used, ctx->iv); err = af_alg_wait_for_completion( ctx->enc ? crypto_ablkcipher_encrypt(&ctx->req) : crypto_ablkcipher_decrypt(&ctx->req), &ctx->completion); free: af_alg_free_sg(&ctx->rsgl); if (err) goto unlock; copied += used; from += used; seglen -= used; skcipher_pull_sgl(sk, used); } } err = 0; unlock: skcipher_wmem_wakeup(sk); release_sock(sk); return copied ?: err; }
CWE-20
0
static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); u8 obuf[0x40], ibuf[0x40]; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 1: switch (msg[0].addr) { case SU3000_STREAM_CTRL: obuf[0] = msg[0].buf[0] + 0x36; obuf[1] = 3; obuf[2] = 0; if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 0, 0) < 0) err("i2c transfer failed."); break; case DW2102_RC_QUERY: obuf[0] = 0x10; if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 2, 0) < 0) err("i2c transfer failed."); msg[0].buf[1] = ibuf[0]; msg[0].buf[0] = ibuf[1]; break; default: /* always i2c write*/ obuf[0] = 0x08; obuf[1] = msg[0].addr; obuf[2] = msg[0].len; memcpy(&obuf[3], msg[0].buf, msg[0].len); if (dvb_usb_generic_rw(d, obuf, msg[0].len + 3, ibuf, 1, 0) < 0) err("i2c transfer failed."); } break; case 2: /* always i2c read */ obuf[0] = 0x09; obuf[1] = msg[0].len; obuf[2] = msg[1].len; obuf[3] = msg[0].addr; memcpy(&obuf[4], msg[0].buf, msg[0].len); if (dvb_usb_generic_rw(d, obuf, msg[0].len + 4, ibuf, msg[1].len + 1, 0) < 0) err("i2c transfer failed."); memcpy(msg[1].buf, &ibuf[1], msg[1].len); break; default: warn("more than 2 i2c messages at a time is not handled yet."); break; } mutex_unlock(&d->i2c_mutex); return num; }
CWE-119
26
void Huff_offsetReceive (node_t *node, int *ch, byte *fin, int *offset) { bloc = *offset; while (node && node->symbol == INTERNAL_NODE) { if (get_bit(fin)) { node = node->right; } else { node = node->left; } } if (!node) { *ch = 0; return; // Com_Error(ERR_DROP, "Illegal tree!"); } *ch = node->symbol; *offset = bloc; }
CWE-119
26
static void oidc_scrub_headers(request_rec *r) { oidc_cfg *cfg = ap_get_module_config(r->server->module_config, &auth_openidc_module); if (cfg->scrub_request_headers != 0) { /* scrub all headers starting with OIDC_ first */ oidc_scrub_request_headers(r, OIDC_DEFAULT_HEADER_PREFIX, oidc_cfg_dir_authn_header(r)); /* * then see if the claim headers need to be removed on top of that * (i.e. the prefix does not start with the default OIDC_) */ if ((strstr(cfg->claim_prefix, OIDC_DEFAULT_HEADER_PREFIX) != cfg->claim_prefix)) { oidc_scrub_request_headers(r, cfg->claim_prefix, NULL); } } }
CWE-287
4
static int read_public_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } bufsize = file->size; sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read public key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_public_key(p, keysize, rsa); }
CWE-119
26
static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name; size_t copied; struct sk_buff *skb; int er; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ lock_sock(sk); if (sk->sk_state != TCP_ESTABLISHED) { release_sock(sk); return -ENOTCONN; } /* Now we can treat all alike */ if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) { release_sock(sk); return er; } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (er < 0) { skb_free_datagram(sk, skb); release_sock(sk); return er; } if (sax != NULL) { memset(sax, 0, sizeof(*sax)); sax->sax25_family = AF_NETROM; skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call, AX25_ADDR_LEN); } msg->msg_namelen = sizeof(*sax); skb_free_datagram(sk, skb); release_sock(sk); return copied; }
CWE-20
0
static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data) { int ret; ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0), PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0, indx, data, size, 100); if (ret < 0) netif_dbg(pegasus, drv, pegasus->net, "%s returned %d\n", __func__, ret); return ret; }
CWE-119
26
int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; size_t copied; int err; BT_DBG("sock %p sk %p len %zu", sock, sk, len); if (flags & (MSG_OOB)) return -EOPNOTSUPP; skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) { msg->msg_namelen = 0; return 0; } return err; } copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err == 0) { sock_recv_ts_and_drops(msg, sk, skb); if (bt_sk(sk)->skb_msg_name) bt_sk(sk)->skb_msg_name(skb, msg->msg_name, &msg->msg_namelen); else msg->msg_namelen = 0; } skb_free_datagram(sk, skb); return err ? : copied; }
CWE-20
0
fpDiff(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; uint32 bps = tif->tif_dir.td_bitspersample / 8; tmsize_t wc = cc / bps; tmsize_t count; uint8 *cp = (uint8 *) cp0; uint8 *tmp = (uint8 *)_TIFFmalloc(cc); if((cc%(bps*stride))!=0) { TIFFErrorExt(tif->tif_clientdata, "fpDiff", "%s", "(cc%(bps*stride))!=0"); return 0; } if (!tmp) return 0; _TIFFmemcpy(tmp, cp0, cc); for (count = 0; count < wc; count++) { uint32 byte; for (byte = 0; byte < bps; byte++) { #if WORDS_BIGENDIAN cp[byte * wc + count] = tmp[bps * count + byte]; #else cp[(bps - byte - 1) * wc + count] = tmp[bps * count + byte]; #endif } } _TIFFfree(tmp); cp = (uint8 *) cp0; cp += cc - stride - 1; for (count = cc; count > stride; count -= stride) REPEAT4(stride, cp[stride] = (unsigned char)((cp[stride] - cp[0])&0xff); cp--) return 1; }
CWE-119
26
static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { int result; handle_t *handle = NULL; struct super_block *sb = file_inode(vma->vm_file)->i_sb; bool write = vmf->flags & FAULT_FLAG_WRITE; if (write) { sb_start_pagefault(sb); file_update_time(vma->vm_file); handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE, EXT4_DATA_TRANS_BLOCKS(sb)); } if (IS_ERR(handle)) result = VM_FAULT_SIGBUS; else result = __dax_fault(vma, vmf, ext4_get_block_dax, ext4_end_io_unwritten); if (write) { if (!IS_ERR(handle)) ext4_journal_stop(handle); sb_end_pagefault(sb); } return result; }
CWE-362
18
int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, struct sockaddr_storage *kern_address, int mode) { int tot_len; if (kern_msg->msg_namelen) { if (mode == VERIFY_READ) { int err = move_addr_to_kernel(kern_msg->msg_name, kern_msg->msg_namelen, kern_address); if (err < 0) return err; } kern_msg->msg_name = kern_address; } else kern_msg->msg_name = NULL; tot_len = iov_from_user_compat_to_kern(kern_iov, (struct compat_iovec __user *)kern_msg->msg_iov, kern_msg->msg_iovlen); if (tot_len >= 0) kern_msg->msg_iov = kern_iov; return tot_len; }
CWE-20
0
void dm9000EventHandler(NetInterface *interface) { error_t error; uint8_t status; //Read interrupt status register status = dm9000ReadReg(DM9000_REG_ISR); //Check whether the link status has changed? if((status & ISR_LNKCHG) != 0) { //Clear interrupt flag dm9000WriteReg(DM9000_REG_ISR, ISR_LNKCHG); //Read network status register status = dm9000ReadReg(DM9000_REG_NSR); //Check link state if((status & NSR_LINKST) != 0) { //Get current speed if((status & NSR_SPEED) != 0) { interface->linkSpeed = NIC_LINK_SPEED_10MBPS; } else { interface->linkSpeed = NIC_LINK_SPEED_100MBPS; } //Read network control register status = dm9000ReadReg(DM9000_REG_NCR); //Determine the new duplex mode if((status & NCR_FDX) != 0) { interface->duplexMode = NIC_FULL_DUPLEX_MODE; } else { interface->duplexMode = NIC_HALF_DUPLEX_MODE; } //Link is up interface->linkState = TRUE; } else { //Link is down interface->linkState = FALSE; } //Process link state change event nicNotifyLinkChange(interface); } //Check whether a packet has been received? if((status & ISR_PR) != 0) { //Clear interrupt flag dm9000WriteReg(DM9000_REG_ISR, ISR_PR); //Process all pending packets do { //Read incoming packet error = dm9000ReceivePacket(interface); //No more data in the receive buffer? } while(error != ERROR_BUFFER_EMPTY); } //Re-enable LNKCHGI and PRI interrupts dm9000WriteReg(DM9000_REG_IMR, IMR_PAR | IMR_LNKCHGI | IMR_PTI | IMR_PRI); }
CWE-20
0
int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; size_t copied; int err; BT_DBG("sock %p sk %p len %zu", sock, sk, len); if (flags & (MSG_OOB)) return -EOPNOTSUPP; skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) { msg->msg_namelen = 0; return 0; } return err; } copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err == 0) { sock_recv_ts_and_drops(msg, sk, skb); if (bt_sk(sk)->skb_msg_name) bt_sk(sk)->skb_msg_name(skb, msg->msg_name, &msg->msg_namelen); else msg->msg_namelen = 0; } skb_free_datagram(sk, skb); return err ? : copied; }
CWE-20
0
int snd_timer_close(struct snd_timer_instance *timeri) { struct snd_timer *timer = NULL; struct snd_timer_instance *slave, *tmp; if (snd_BUG_ON(!timeri)) return -ENXIO; /* force to stop the timer */ snd_timer_stop(timeri); if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { /* wait, until the active callback is finished */ spin_lock_irq(&slave_active_lock); while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) { spin_unlock_irq(&slave_active_lock); udelay(10); spin_lock_irq(&slave_active_lock); } spin_unlock_irq(&slave_active_lock); mutex_lock(&register_mutex); list_del(&timeri->open_list); mutex_unlock(&register_mutex); } else { timer = timeri->timer; if (snd_BUG_ON(!timer)) goto out; /* wait, until the active callback is finished */ spin_lock_irq(&timer->lock); while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) { spin_unlock_irq(&timer->lock); udelay(10); spin_lock_irq(&timer->lock); } spin_unlock_irq(&timer->lock); mutex_lock(&register_mutex); list_del(&timeri->open_list); if (timer && list_empty(&timer->open_list_head) && timer->hw.close) timer->hw.close(timer); /* remove slave links */ list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head, open_list) { spin_lock_irq(&slave_active_lock); _snd_timer_stop(slave, 1, SNDRV_TIMER_EVENT_RESOLUTION); list_move_tail(&slave->open_list, &snd_timer_slave_list); slave->master = NULL; slave->timer = NULL; spin_unlock_irq(&slave_active_lock); } mutex_unlock(&register_mutex); } out: if (timeri->private_free) timeri->private_free(timeri); kfree(timeri->owner); kfree(timeri); if (timer) module_put(timer->module); return 0; }
CWE-20
0
mm_sshpam_init_ctx(Authctxt *authctxt) { Buffer m; int success; debug3("%s", __func__); buffer_init(&m); buffer_put_cstring(&m, authctxt->user); mm_request_send(pmonitor->m_recvfd, MONITOR_REQ_PAM_INIT_CTX, &m); debug3("%s: waiting for MONITOR_ANS_PAM_INIT_CTX", __func__); mm_request_receive_expect(pmonitor->m_recvfd, MONITOR_ANS_PAM_INIT_CTX, &m); success = buffer_get_int(&m); if (success == 0) { debug3("%s: pam_init_ctx failed", __func__); buffer_free(&m); return (NULL); } buffer_free(&m); return (authctxt); }
CWE-20
0
t1mac_output_ascii(char *s, int len) { if (blocktyp == POST_BINARY) { output_current_post(); blocktyp = POST_ASCII; } /* Mac line endings */ if (len > 0 && s[len-1] == '\n') s[len-1] = '\r'; t1mac_output_data((byte *)s, len); if (strncmp(s, "/FontName", 9) == 0) { for (s += 9; isspace(*s); s++) ; if (*s == '/') { const char *t = ++s; while (*t && !isspace(*t)) t++; free(font_name); font_name = (char *)malloc(t - s + 1); memcpy(font_name, s, t - s); font_name[t - s] = 0; } } }
CWE-119
26
int sc_file_set_sec_attr(sc_file_t *file, const u8 *sec_attr, size_t sec_attr_len) { u8 *tmp; if (!sc_file_valid(file)) { return SC_ERROR_INVALID_ARGUMENTS; } if (sec_attr == NULL) { if (file->sec_attr != NULL) free(file->sec_attr); file->sec_attr = NULL; file->sec_attr_len = 0; return 0; } tmp = (u8 *) realloc(file->sec_attr, sec_attr_len); if (!tmp) { if (file->sec_attr) free(file->sec_attr); file->sec_attr = NULL; file->sec_attr_len = 0; return SC_ERROR_OUT_OF_MEMORY; } file->sec_attr = tmp; memcpy(file->sec_attr, sec_attr, sec_attr_len); file->sec_attr_len = sec_attr_len; return 0; }
CWE-119
26
static void ikev2_parent_inR1outI2_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh) { struct dh_continuation *dh = (struct dh_continuation *)pcrc; struct msg_digest *md = dh->md; struct state *const st = md->st; stf_status e; DBG(DBG_CONTROLMORE, DBG_log("ikev2 parent inR1outI2: calculating g^{xy}, sending I2")); if (st == NULL) { loglog(RC_LOG_SERIOUS, "%s: Request was disconnected from state", __FUNCTION__); if (dh->md) release_md(dh->md); return; } /* XXX should check out ugh */ passert(ugh == NULL); passert(cur_state == NULL); passert(st != NULL); passert(st->st_suspended_md == dh->md); set_suspended(st, NULL); /* no longer connected or suspended */ set_cur_state(st); st->st_calculating = FALSE; e = ikev2_parent_inR1outI2_tail(pcrc, r); if (dh->md != NULL) { complete_v2_state_transition(&dh->md, e); if (dh->md) release_md(dh->md); } reset_globals(); passert(GLOBALS_ARE_RESET()); }
CWE-20
0
static void kgdb_hw_overflow_handler(struct perf_event *event, int nmi, struct perf_sample_data *data, struct pt_regs *regs) { struct task_struct *tsk = current; int i; for (i = 0; i < 4; i++) if (breakinfo[i].enabled) tsk->thread.debugreg6 |= (DR_TRAP0 << i); }
CWE-400
2
void luaD_callnoyield (lua_State *L, StkId func, int nResults) { incXCcalls(L); if (getCcalls(L) <= CSTACKERR) /* possible stack overflow? */ luaE_freeCI(L); luaD_call(L, func, nResults); decXCcalls(L); }
CWE-119
26
static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) { struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); /* if the deadline is ahead of our clock, nothing to do */ if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0)) return; if (cfs_rq->runtime_remaining < 0) return; /* * If the local deadline has passed we have to consider the * possibility that our sched_clock is 'fast' and the global deadline * has not truly expired. * * Fortunately we can check determine whether this the case by checking * whether the global deadline(cfs_b->expires_seq) has advanced. */ if (cfs_rq->expires_seq == cfs_b->expires_seq) { /* extend local deadline, drift is bounded above by 2 ticks */ cfs_rq->runtime_expires += TICK_NSEC; } else { /* global deadline is ahead, expiration has passed */ cfs_rq->runtime_remaining = 0; } }
CWE-400
2
const char * util_acl_to_str(const sc_acl_entry_t *e) { static char line[80], buf[20]; unsigned int acl; if (e == NULL) return "N/A"; line[0] = 0; while (e != NULL) { acl = e->method; switch (acl) { case SC_AC_UNKNOWN: return "N/A"; case SC_AC_NEVER: return "NEVR"; case SC_AC_NONE: return "NONE"; case SC_AC_CHV: strcpy(buf, "CHV"); if (e->key_ref != SC_AC_KEY_REF_NONE) sprintf(buf + 3, "%d", e->key_ref); break; case SC_AC_TERM: strcpy(buf, "TERM"); break; case SC_AC_PRO: strcpy(buf, "PROT"); break; case SC_AC_AUT: strcpy(buf, "AUTH"); if (e->key_ref != SC_AC_KEY_REF_NONE) sprintf(buf + 4, "%d", e->key_ref); break; case SC_AC_SEN: strcpy(buf, "Sec.Env. "); if (e->key_ref != SC_AC_KEY_REF_NONE) sprintf(buf + 3, "#%d", e->key_ref); break; case SC_AC_SCB: strcpy(buf, "Sec.ControlByte "); if (e->key_ref != SC_AC_KEY_REF_NONE) sprintf(buf + 3, "Ox%X", e->key_ref); break; case SC_AC_IDA: strcpy(buf, "PKCS#15 AuthID "); if (e->key_ref != SC_AC_KEY_REF_NONE) sprintf(buf + 3, "#%d", e->key_ref); break; default: strcpy(buf, "????"); break; } strcat(line, buf); strcat(line, " "); e = e->next; } line[strlen(line)-1] = 0; /* get rid of trailing space */ return line; }
CWE-119
26
void enc28j60SelectBank(NetInterface *interface, uint16_t address) { uint16_t bank; Enc28j60Context *context; //Point to the driver context context = (Enc28j60Context *) interface->nicContext; //Get the bank number from the specified address bank = address & REG_BANK_MASK; //Rewrite the bank number only if a change is detected if(bank != context->currentBank) { //Select specified bank switch(bank) { case BANK_0: //Select bank 0 enc28j60ClearBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL1 | ECON1_BSEL0); break; case BANK_1: //Select bank 1 enc28j60SetBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL0); enc28j60ClearBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL1); break; case BANK_2: //Select bank 2 enc28j60ClearBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL0); enc28j60SetBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL1); break; case BANK_3: //Select bank 3 enc28j60SetBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL1 | ECON1_BSEL0); break; default: //Invalid bank break; } //Save bank number context->currentBank = bank; } }
CWE-20
0
void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) { u64 now; if (cfs_b->quota == RUNTIME_INF) return; now = sched_clock_cpu(smp_processor_id()); cfs_b->runtime = cfs_b->quota; cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period); cfs_b->expires_seq++; }
CWE-400
2
static inline int add_post_vars(zval *arr, post_var_data_t *vars, zend_bool eof TSRMLS_DC) { uint64_t max_vars = PG(max_input_vars); vars->ptr = vars->str.c; vars->end = vars->str.c + vars->str.len; while (add_post_var(arr, vars, eof TSRMLS_CC)) { if (++vars->cnt > max_vars) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Input variables exceeded %" PRIu64 ". " "To increase the limit change max_input_vars in php.ini.", max_vars); return FAILURE; } } if (!eof) { memmove(vars->str.c, vars->ptr, vars->str.len = vars->end - vars->ptr); } return SUCCESS; }
CWE-400
2
static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; shm_rmid(ns, shp); shm_unlock(shp); if (!is_file_hugepages(shp->shm_file)) shmem_lock(shp->shm_file, 0, shp->mlock_user); else if (shp->mlock_user) user_shm_unlock(file_inode(shp->shm_file)->i_size, shp->mlock_user); fput (shp->shm_file); ipc_rcu_putref(shp, shm_rcu_free); }
CWE-362
18
static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int copied; int err = 0; lock_sock(sk); /* * This works for seqpacket too. The receiver has ordered the * queue for us! We do one quick check first though */ if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_ESTABLISHED) { err = -ENOTCONN; goto out; } /* Now we can treat all alike */ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out; if (!ax25_sk(sk)->pidincl) skb_pull(skb, 1); /* Remove PID */ skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (msg->msg_namelen != 0) { struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name; ax25_digi digi; ax25_address src; const unsigned char *mac = skb_mac_header(skb); memset(sax, 0, sizeof(struct full_sockaddr_ax25)); ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL, &digi, NULL, NULL); sax->sax25_family = AF_AX25; /* We set this correctly, even though we may not let the application know the digi calls further down (because it did NOT ask to know them). This could get political... **/ sax->sax25_ndigis = digi.ndigi; sax->sax25_call = src; if (sax->sax25_ndigis != 0) { int ct; struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)sax; for (ct = 0; ct < digi.ndigi; ct++) fsa->fsa_digipeater[ct] = digi.calls[ct]; } msg->msg_namelen = sizeof(struct full_sockaddr_ax25); } skb_free_datagram(sk, skb); err = copied; out: release_sock(sk); return err; }
CWE-20
0
k5_asn1_full_decode(const krb5_data *code, const struct atype_info *a, void **retrep) { krb5_error_code ret; const uint8_t *contents, *remainder; size_t clen, rlen; taginfo t; *retrep = NULL; ret = get_tag((uint8_t *)code->data, code->length, &t, &contents, &clen, &remainder, &rlen); if (ret) return ret; /* rlen should be 0, but we don't check it (and due to padding in * non-length-preserving enctypes, it will sometimes be nonzero). */ if (!check_atype_tag(a, &t)) return ASN1_BAD_ID; return decode_atype_to_ptr(&t, contents, clen, a, retrep); }
CWE-674
28
static int muscle_list_files(sc_card_t *card, u8 *buf, size_t bufLen) { muscle_private_t* priv = MUSCLE_DATA(card); mscfs_t *fs = priv->fs; int x; int count = 0; mscfs_check_cache(priv->fs); for(x = 0; x < fs->cache.size; x++) { u8* oid= fs->cache.array[x].objectId.id; sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "FILE: %02X%02X%02X%02X\n", oid[0],oid[1],oid[2],oid[3]); if(0 == memcmp(fs->currentPath, oid, 2)) { buf[0] = oid[2]; buf[1] = oid[3]; if(buf[0] == 0x00 && buf[1] == 0x00) continue; /* No directories/null names outside of root */ buf += 2; count+=2; } } return count; }
CWE-119
26
static int encrypted_update(struct key *key, struct key_preparsed_payload *prep) { struct encrypted_key_payload *epayload = key->payload.data[0]; struct encrypted_key_payload *new_epayload; char *buf; char *new_master_desc = NULL; const char *format = NULL; size_t datalen = prep->datalen; int ret = 0; if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) return -ENOKEY; if (datalen <= 0 || datalen > 32767 || !prep->data) return -EINVAL; buf = kmalloc(datalen + 1, GFP_KERNEL); if (!buf) return -ENOMEM; buf[datalen] = 0; memcpy(buf, prep->data, datalen); ret = datablob_parse(buf, &format, &new_master_desc, NULL, NULL); if (ret < 0) goto out; ret = valid_master_desc(new_master_desc, epayload->master_desc); if (ret < 0) goto out; new_epayload = encrypted_key_alloc(key, epayload->format, new_master_desc, epayload->datalen); if (IS_ERR(new_epayload)) { ret = PTR_ERR(new_epayload); goto out; } __ekey_init(new_epayload, epayload->format, new_master_desc, epayload->datalen); memcpy(new_epayload->iv, epayload->iv, ivsize); memcpy(new_epayload->payload_data, epayload->payload_data, epayload->payload_datalen); rcu_assign_keypointer(key, new_epayload); call_rcu(&epayload->rcu, encrypted_rcu_free); out: kzfree(buf); return ret; }
CWE-20
0
monitor_init(void) { struct ssh *ssh = active_state; /* XXX */ struct monitor *mon; mon = xcalloc(1, sizeof(*mon)); monitor_openfds(mon, 1); /* Used to share zlib space across processes */ if (options.compression) { mon->m_zback = mm_create(NULL, MM_MEMSIZE); mon->m_zlib = mm_create(mon->m_zback, 20 * MM_MEMSIZE); /* Compression needs to share state across borders */ ssh_packet_set_compress_hooks(ssh, mon->m_zlib, (ssh_packet_comp_alloc_func *)mm_zalloc, (ssh_packet_comp_free_func *)mm_zfree); } return mon; }
CWE-119
26
static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) { struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); int err = 0; mutex_lock(&swhash->hlist_mutex); if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) { struct swevent_hlist *hlist; hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); if (!hlist) { err = -ENOMEM; goto exit; } rcu_assign_pointer(swhash->swevent_hlist, hlist); } swhash->hlist_refcount++; exit: mutex_unlock(&swhash->hlist_mutex); return err; }
CWE-362
18
psf_asciiheader_printf (SF_PRIVATE *psf, const char *format, ...) { va_list argptr ; int maxlen ; char *start ; maxlen = strlen ((char*) psf->header) ; start = ((char*) psf->header) + maxlen ; maxlen = sizeof (psf->header) - maxlen ; va_start (argptr, format) ; vsnprintf (start, maxlen, format, argptr) ; va_end (argptr) ; /* Make sure the string is properly terminated. */ start [maxlen - 1] = 0 ; psf->headindex = strlen ((char*) psf->header) ; return ; } /* psf_asciiheader_printf */
CWE-119
26
BGD_DECLARE(void) gdImageXbmCtx(gdImagePtr image, char* file_name, int fg, gdIOCtx * out) { int x, y, c, b, sx, sy, p; char *name, *f; size_t i, l; name = file_name; if ((f = strrchr(name, '/')) != NULL) name = f+1; if ((f = strrchr(name, '\\')) != NULL) name = f+1; name = strdup(name); if ((f = strrchr(name, '.')) != NULL && !strcasecmp(f, ".XBM")) *f = '\0'; if ((l = strlen(name)) == 0) { free(name); name = strdup("image"); } else { for (i=0; i<l; i++) { /* only in C-locale isalnum() would work */ if (!isupper(name[i]) && !islower(name[i]) && !isdigit(name[i])) { name[i] = '_'; } } } gdCtxPrintf(out, "#define %s_width %d\n", name, gdImageSX(image)); gdCtxPrintf(out, "#define %s_height %d\n", name, gdImageSY(image)); gdCtxPrintf(out, "static unsigned char %s_bits[] = {\n ", name); free(name); b = 1; p = 0; c = 0; sx = gdImageSX(image); sy = gdImageSY(image); for (y = 0; y < sy; y++) { for (x = 0; x < sx; x++) { if (gdImageGetPixel(image, x, y) == fg) { c |= b; } if ((b == 128) || (x == sx && y == sy)) { b = 1; if (p) { gdCtxPrintf(out, ", "); if (!(p%12)) { gdCtxPrintf(out, "\n "); p = 12; } } p++; gdCtxPrintf(out, "0x%02X", c); c = 0; } else { b <<= 1; } } } gdCtxPrintf(out, "};\n"); }
CWE-119
26
static void show_object(struct object *object, struct strbuf *path, const char *last, void *data) { struct bitmap *base = data; int bitmap_pos; bitmap_pos = bitmap_position(object->oid.hash); if (bitmap_pos < 0) { char *name = path_name(path, last); bitmap_pos = ext_index_add_object(object, name); free(name); } bitmap_set(base, bitmap_pos); }
CWE-119
26
static int oidc_cache_crypto_decrypt(request_rec *r, const char *cache_value, unsigned char *key, unsigned char **plaintext) { int len = -1; /* grab the base64url-encoded tag after the "." */ char *encoded_tag = strstr(cache_value, "."); if (encoded_tag == NULL) { oidc_error(r, "corrupted cache value: no tag separator found in encrypted value"); return FALSE; } /* make sure we don't modify the original string since it may be just a pointer into the cache (shm) */ cache_value = apr_pstrmemdup(r->pool, cache_value, strlen(cache_value) - strlen(encoded_tag)); encoded_tag++; /* base64url decode the ciphertext */ char *d_bytes = NULL; int d_len = oidc_base64url_decode(r->pool, &d_bytes, cache_value); /* base64url decode the tag */ char *t_bytes = NULL; int t_len = oidc_base64url_decode(r->pool, &t_bytes, encoded_tag); /* see if we're still good to go */ if ((d_len > 0) && (t_len > 0)) { /* allocated space for the plaintext */ *plaintext = apr_pcalloc(r->pool, (d_len + EVP_CIPHER_block_size(OIDC_CACHE_CIPHER) - 1)); /* decrypt the ciphertext providing the tag value */ len = oidc_cache_crypto_decrypt_impl(r, (unsigned char *) d_bytes, d_len, OIDC_CACHE_CRYPTO_GCM_AAD, sizeof(OIDC_CACHE_CRYPTO_GCM_AAD), (unsigned char *) t_bytes, t_len, key, OIDC_CACHE_CRYPTO_GCM_IV, sizeof(OIDC_CACHE_CRYPTO_GCM_IV), *plaintext); /* check the result and make sure it is \0 terminated */ if (len > -1) { (*plaintext)[len] = '\0'; } else { *plaintext = NULL; } } return len; }
CWE-330
12
header_put_le_int (SF_PRIVATE *psf, int x) { if (psf->headindex < SIGNED_SIZEOF (psf->header) - 4) { psf->header [psf->headindex++] = x ; psf->header [psf->headindex++] = (x >> 8) ; psf->header [psf->headindex++] = (x >> 16) ; psf->header [psf->headindex++] = (x >> 24) ; } ; } /* header_put_le_int */
CWE-119
26
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) { struct request *rq = tags->rqs[tag]; /* mq_ctx of flush rq is always cloned from the corresponding req */ struct blk_flush_queue *fq = blk_get_flush_queue(rq->q, rq->mq_ctx); if (!is_flush_request(rq, fq, tag)) return rq; return fq->flush_rq; }
CWE-362
18
struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, unsigned long data_len, int noblock, int *errcode) { struct sk_buff *skb; gfp_t gfp_mask; long timeo; int err; gfp_mask = sk->sk_allocation; if (gfp_mask & __GFP_WAIT) gfp_mask |= __GFP_REPEAT; timeo = sock_sndtimeo(sk, noblock); while (1) { err = sock_error(sk); if (err != 0) goto failure; err = -EPIPE; if (sk->sk_shutdown & SEND_SHUTDOWN) goto failure; if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { skb = alloc_skb(header_len, gfp_mask); if (skb) { int npages; int i; /* No pages, we're done... */ if (!data_len) break; npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; skb->truesize += data_len; skb_shinfo(skb)->nr_frags = npages; for (i = 0; i < npages; i++) { struct page *page; page = alloc_pages(sk->sk_allocation, 0); if (!page) { err = -ENOBUFS; skb_shinfo(skb)->nr_frags = i; kfree_skb(skb); goto failure; } __skb_fill_page_desc(skb, i, page, 0, (data_len >= PAGE_SIZE ? PAGE_SIZE : data_len)); data_len -= PAGE_SIZE; } /* Full success... */ break; } err = -ENOBUFS; goto failure; } set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); err = -EAGAIN; if (!timeo) goto failure; if (signal_pending(current)) goto interrupted; timeo = sock_wait_for_wmem(sk, timeo); } skb_set_owner_w(skb, sk); return skb; interrupted: err = sock_intr_errno(timeo); failure: *errcode = err; return NULL; }
CWE-20
0
entry_guard_obeys_restriction(const entry_guard_t *guard, const entry_guard_restriction_t *rst) { tor_assert(guard); if (! rst) return 1; // No restriction? No problem. // Only one kind of restriction exists right now return tor_memneq(guard->identity, rst->exclude_id, DIGEST_LEN); }
CWE-200
10
static int msg_cache_check(const char *id, struct BodyCache *bcache, void *data) { struct Context *ctx = (struct Context *) data; if (!ctx) return -1; struct PopData *pop_data = (struct PopData *) ctx->data; if (!pop_data) return -1; #ifdef USE_HCACHE /* keep hcache file if hcache == bcache */ if (strcmp(HC_FNAME "." HC_FEXT, id) == 0) return 0; #endif for (int i = 0; i < ctx->msgcount; i++) { /* if the id we get is known for a header: done (i.e. keep in cache) */ if (ctx->hdrs[i]->data && (mutt_str_strcmp(ctx->hdrs[i]->data, id) == 0)) return 0; } /* message not found in context -> remove it from cache * return the result of bcache, so we stop upon its first error */ return mutt_bcache_del(bcache, id); }
CWE-119
26