code
stringlengths 23
2.05k
| label_name
stringlengths 6
7
| label
int64 0
37
|
---|---|---|
int do_fpu_inst(unsigned short inst, struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
if (!(task_thread_info(tsk)->status & TS_USEDFPU)) {
/* initialize once. */
fpu_init(fpu);
task_thread_info(tsk)->status |= TS_USEDFPU;
}
return fpu_emulate(inst, fpu, regs);
} | CWE-400 | 2 |
static int n_tty_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct n_tty_data *ldata = tty->disc_data;
int retval;
switch (cmd) {
case TIOCOUTQ:
return put_user(tty_chars_in_buffer(tty), (int __user *) arg);
case TIOCINQ:
down_write(&tty->termios_rwsem);
if (L_ICANON(tty))
retval = inq_canon(ldata);
else
retval = read_cnt(ldata);
up_write(&tty->termios_rwsem);
return put_user(retval, (unsigned int __user *) arg);
default:
return n_tty_ioctl_helper(tty, file, cmd, arg);
}
} | CWE-704 | 34 |
error_t ipStringToAddr(const char_t *str, IpAddr *ipAddr)
{
error_t error;
#if (IPV6_SUPPORT == ENABLED)
//IPv6 address?
if(strchr(str, ':'))
{
//IPv6 addresses are 16-byte long
ipAddr->length = sizeof(Ipv6Addr);
//Convert the string to IPv6 address
error = ipv6StringToAddr(str, &ipAddr->ipv6Addr);
}
else
#endif
#if (IPV4_SUPPORT == ENABLED)
//IPv4 address?
if(strchr(str, '.'))
{
//IPv4 addresses are 4-byte long
ipAddr->length = sizeof(Ipv4Addr);
//Convert the string to IPv4 address
error = ipv4StringToAddr(str, &ipAddr->ipv4Addr);
}
else
#endif
//Invalid IP address?
{
//Report an error
error = ERROR_FAILURE;
}
//Return status code
return error;
} | CWE-20 | 0 |
DECLAREwriteFunc(writeBufferToContigTiles)
{
uint32 imagew = TIFFScanlineSize(out);
uint32 tilew = TIFFTileRowSize(out);
int iskew = imagew - tilew;
tsize_t tilesize = TIFFTileSize(out);
tdata_t obuf;
uint8* bufp = (uint8*) buf;
uint32 tl, tw;
uint32 row;
(void) spp;
obuf = _TIFFmalloc(TIFFTileSize(out));
if (obuf == NULL)
return 0;
_TIFFmemset(obuf, 0, tilesize);
(void) TIFFGetField(out, TIFFTAG_TILELENGTH, &tl);
(void) TIFFGetField(out, TIFFTAG_TILEWIDTH, &tw);
for (row = 0; row < imagelength; row += tilelength) {
uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl;
uint32 colb = 0;
uint32 col;
for (col = 0; col < imagewidth; col += tw) {
/*
* Tile is clipped horizontally. Calculate
* visible portion and skewing factors.
*/
if (colb + tilew > imagew) {
uint32 width = imagew - colb;
int oskew = tilew - width;
cpStripToTile(obuf, bufp + colb, nrow, width,
oskew, oskew + iskew);
} else
cpStripToTile(obuf, bufp + colb, nrow, tilew,
0, iskew);
if (TIFFWriteTile(out, obuf, col, row, 0, 0) < 0) {
TIFFError(TIFFFileName(out),
"Error, can't write tile at %lu %lu",
(unsigned long) col,
(unsigned long) row);
_TIFFfree(obuf);
return 0;
}
colb += tilew;
}
bufp += nrow * imagew;
}
_TIFFfree(obuf);
return 1;
} | CWE-119 | 26 |
static void perf_event_interrupt(struct pt_regs *regs)
{
int i;
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
unsigned long val;
int found = 0;
int nmi;
if (cpuhw->n_limited)
freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
mfspr(SPRN_PMC6));
perf_read_regs(regs);
nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
else
irq_enter();
for (i = 0; i < cpuhw->n_events; ++i) {
event = cpuhw->event[i];
if (!event->hw.idx || is_limited_pmc(event->hw.idx))
continue;
val = read_pmc(event->hw.idx);
if ((int)val < 0) {
/* event has overflowed */
found = 1;
record_and_restart(event, val, regs, nmi);
}
}
/*
* In case we didn't find and reset the event that caused
* the interrupt, scan all events and reset any that are
* negative, to avoid getting continual interrupts.
* Any that we processed in the previous loop will not be negative.
*/
if (!found) {
for (i = 0; i < ppmu->n_counter; ++i) {
if (is_limited_pmc(i + 1))
continue;
val = read_pmc(i + 1);
if (pmc_overflow(val))
write_pmc(i + 1, 0);
}
}
/*
* Reset MMCR0 to its normal value. This will set PMXE and
* clear FC (freeze counters) and PMAO (perf mon alert occurred)
* and thus allow interrupts to occur again.
* XXX might want to use MSR.PM to keep the events frozen until
* we get back out of this interrupt.
*/
write_mmcr0(cpuhw, cpuhw->mmcr[0]);
if (nmi)
nmi_exit();
else
irq_exit();
} | CWE-400 | 2 |
static int cp2112_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
u8 *buf = dev->in_out_buffer;
unsigned long flags;
int ret;
spin_lock_irqsave(&dev->lock, flags);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
if (ret != CP2112_GPIO_CONFIG_LENGTH) {
hid_err(hdev, "error requesting GPIO config: %d\n", ret);
goto fail;
}
buf[1] |= 1 << offset;
buf[2] = gpio_push_pull;
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
if (ret < 0) {
hid_err(hdev, "error setting GPIO config: %d\n", ret);
goto fail;
}
spin_unlock_irqrestore(&dev->lock, flags);
/*
* Set gpio value when output direction is already set,
* as specified in AN495, Rev. 0.2, cpt. 4.4
*/
cp2112_gpio_set(chip, offset, value);
return 0;
fail:
spin_unlock_irqrestore(&dev->lock, flags);
return ret < 0 ? ret : -EIO;
} | CWE-404 | 30 |
PHP_FUNCTION(radius_get_vendor_attr)
{
int res;
const void *data;
int len;
u_int32_t vendor;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &data, &len) == FAILURE) {
return;
}
res = rad_get_vendor_attr(&vendor, &data, (size_t *) &len);
if (res == -1) {
RETURN_FALSE;
} else {
array_init(return_value);
add_assoc_long(return_value, "attr", res);
add_assoc_long(return_value, "vendor", vendor);
add_assoc_stringl(return_value, "data", (char *) data, len, 1);
return;
}
} | CWE-119 | 26 |
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
assert((size_t)CDF_SHORT_SEC_SIZE(h) == len);
(void)memcpy(((char *)buf) + offs,
((const char *)sst->sst_tab) + CDF_SHORT_SEC_POS(h, id), len);
return len;
} | CWE-119 | 26 |
ctcompare(const char *a, /* I - First string */
const char *b) /* I - Second string */
{
int result = 0; /* Result */
while (*a && *b)
{
result |= *a ^ *b;
a ++;
b ++;
}
return (result);
} | CWE-269 | 6 |
static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
{
struct xfrm_algo *algo;
struct nlattr *nla;
nla = nla_reserve(skb, XFRMA_ALG_AUTH,
sizeof(*algo) + (auth->alg_key_len + 7) / 8);
if (!nla)
return -EMSGSIZE;
algo = nla_data(nla);
strcpy(algo->alg_name, auth->alg_name);
memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
algo->alg_key_len = auth->alg_key_len;
return 0;
} | CWE-200 | 10 |
static int stellaris_enet_load(QEMUFile *f, void *opaque, int version_id)
{
stellaris_enet_state *s = (stellaris_enet_state *)opaque;
int i;
if (version_id != 1)
return -EINVAL;
s->ris = qemu_get_be32(f);
s->im = qemu_get_be32(f);
s->rctl = qemu_get_be32(f);
s->tctl = qemu_get_be32(f);
s->thr = qemu_get_be32(f);
s->mctl = qemu_get_be32(f);
s->mdv = qemu_get_be32(f);
s->mtxd = qemu_get_be32(f);
s->mrxd = qemu_get_be32(f);
s->np = qemu_get_be32(f);
s->tx_fifo_len = qemu_get_be32(f);
qemu_get_buffer(f, s->tx_fifo, sizeof(s->tx_fifo));
for (i = 0; i < 31; i++) {
s->rx[i].len = qemu_get_be32(f);
qemu_get_buffer(f, s->rx[i].data, sizeof(s->rx[i].data));
}
s->next_packet = qemu_get_be32(f);
s->rx_fifo_offset = qemu_get_be32(f);
return 0;
} | CWE-119 | 26 |
BOOL update_recv(rdpUpdate* update, wStream* s)
{
BOOL rc = FALSE;
UINT16 updateType;
rdpContext* context = update->context;
if (Stream_GetRemainingLength(s) < 2)
{
WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 2");
return FALSE;
}
Stream_Read_UINT16(s, updateType); /* updateType (2 bytes) */
WLog_Print(update->log, WLOG_TRACE, "%s Update Data PDU", UPDATE_TYPE_STRINGS[updateType]);
if (!update_begin_paint(update))
goto fail;
switch (updateType)
{
case UPDATE_TYPE_ORDERS:
rc = update_recv_orders(update, s);
break;
case UPDATE_TYPE_BITMAP:
{
BITMAP_UPDATE* bitmap_update = update_read_bitmap_update(update, s);
if (!bitmap_update)
{
WLog_ERR(TAG, "UPDATE_TYPE_BITMAP - update_read_bitmap_update() failed");
goto fail;
}
rc = IFCALLRESULT(FALSE, update->BitmapUpdate, context, bitmap_update);
free_bitmap_update(update->context, bitmap_update);
}
break;
case UPDATE_TYPE_PALETTE:
{
PALETTE_UPDATE* palette_update = update_read_palette(update, s);
if (!palette_update)
{
WLog_ERR(TAG, "UPDATE_TYPE_PALETTE - update_read_palette() failed");
goto fail;
}
rc = IFCALLRESULT(FALSE, update->Palette, context, palette_update);
free_palette_update(context, palette_update);
}
break;
case UPDATE_TYPE_SYNCHRONIZE:
update_read_synchronize(update, s);
rc = IFCALLRESULT(TRUE, update->Synchronize, context);
break;
default:
break;
}
fail:
if (!update_end_paint(update))
rc = FALSE;
if (!rc)
{
WLog_ERR(TAG, "UPDATE_TYPE %s [%" PRIu16 "] failed", update_type_to_string(updateType),
updateType);
return FALSE;
}
return TRUE;
} | CWE-119 | 26 |
static int stellaris_enet_init(SysBusDevice *sbd)
{
DeviceState *dev = DEVICE(sbd);
stellaris_enet_state *s = STELLARIS_ENET(dev);
memory_region_init_io(&s->mmio, OBJECT(s), &stellaris_enet_ops, s,
"stellaris_enet", 0x1000);
sysbus_init_mmio(sbd, &s->mmio);
sysbus_init_irq(sbd, &s->irq);
qemu_macaddr_default_if_unset(&s->conf.macaddr);
s->nic = qemu_new_nic(&net_stellaris_enet_info, &s->conf,
object_get_typename(OBJECT(dev)), dev->id, s);
qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
stellaris_enet_reset(s);
register_savevm(dev, "stellaris_enet", -1, 1,
stellaris_enet_save, stellaris_enet_load, s);
return 0;
} | CWE-119 | 26 |
void inet_sock_destruct(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
__skb_queue_purge(&sk->sk_receive_queue);
__skb_queue_purge(&sk->sk_error_queue);
sk_mem_reclaim(sk);
if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
pr_err("Attempt to release TCP socket in state %d %p\n",
sk->sk_state, sk);
return;
}
if (!sock_flag(sk, SOCK_DEAD)) {
pr_err("Attempt to release alive inet socket %p\n", sk);
return;
}
WARN_ON(atomic_read(&sk->sk_rmem_alloc));
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
WARN_ON(sk->sk_wmem_queued);
WARN_ON(sk->sk_forward_alloc);
kfree(inet->opt);
dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
sk_refcnt_debug_dec(sk);
} | CWE-362 | 18 |
smtp_mailaddr(struct mailaddr *maddr, char *line, int mailfrom, char **args,
const char *domain)
{
char *p, *e;
if (line == NULL)
return (0);
if (*line != '<')
return (0);
e = strchr(line, '>');
if (e == NULL)
return (0);
*e++ = '\0';
while (*e == ' ')
e++;
*args = e;
if (!text_to_mailaddr(maddr, line + 1))
return (0);
p = strchr(maddr->user, ':');
if (p != NULL) {
p++;
memmove(maddr->user, p, strlen(p) + 1);
}
if (!valid_localpart(maddr->user) ||
!valid_domainpart(maddr->domain)) {
/* accept empty return-path in MAIL FROM, required for bounces */
if (mailfrom && maddr->user[0] == '\0' && maddr->domain[0] == '\0')
return (1);
/* no user-part, reject */
if (maddr->user[0] == '\0')
return (0);
/* no domain, local user */
if (maddr->domain[0] == '\0') {
(void)strlcpy(maddr->domain, domain,
sizeof(maddr->domain));
return (1);
}
return (0);
}
return (1);
} | CWE-755 | 21 |
uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
uint16_t val;
k->get_config(vdev, vdev->config);
if (addr > (vdev->config_len - sizeof(val)))
return (uint32_t)-1;
val = lduw_p(vdev->config + addr);
return val;
} | CWE-269 | 6 |
void unix_inflight(struct file *fp)
{
struct sock *s = unix_get_socket(fp);
if (s) {
struct unix_sock *u = unix_sk(s);
spin_lock(&unix_gc_lock);
if (atomic_long_inc_return(&u->inflight) == 1) {
BUG_ON(!list_empty(&u->link));
list_add_tail(&u->link, &gc_inflight_list);
} else {
BUG_ON(list_empty(&u->link));
}
unix_tot_inflight++;
spin_unlock(&unix_gc_lock);
}
} | CWE-119 | 26 |
static cJSON *get_object_item(const cJSON * const object, const char * const name, const cJSON_bool case_sensitive)
{
cJSON *current_element = NULL;
if ((object == NULL) || (name == NULL))
{
return NULL;
}
current_element = object->child;
if (case_sensitive)
{
while ((current_element != NULL) && (strcmp(name, current_element->string) != 0))
{
current_element = current_element->next;
}
}
else
{
while ((current_element != NULL) && (case_insensitive_strcmp((const unsigned char*)name, (const unsigned char*)(current_element->string)) != 0))
{
current_element = current_element->next;
}
}
return current_element;
} | CWE-754 | 31 |
static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
struct msghdr *msg, size_t ignored, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(
&ctx->req));
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
unsigned long iovlen;
struct iovec *iov;
int err = -EAGAIN;
int used;
long copied = 0;
lock_sock(sk);
msg->msg_namelen = 0;
for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
iovlen--, iov++) {
unsigned long seglen = iov->iov_len;
char __user *from = iov->iov_base;
while (seglen) {
sgl = list_first_entry(&ctx->tsgl,
struct skcipher_sg_list, list);
sg = sgl->sg;
while (!sg->length)
sg++;
used = ctx->used;
if (!used) {
err = skcipher_wait_for_data(sk, flags);
if (err)
goto unlock;
}
used = min_t(unsigned long, used, seglen);
used = af_alg_make_sg(&ctx->rsgl, from, used, 1);
err = used;
if (err < 0)
goto unlock;
if (ctx->more || used < ctx->used)
used -= used % bs;
err = -EINVAL;
if (!used)
goto free;
ablkcipher_request_set_crypt(&ctx->req, sg,
ctx->rsgl.sg, used,
ctx->iv);
err = af_alg_wait_for_completion(
ctx->enc ?
crypto_ablkcipher_encrypt(&ctx->req) :
crypto_ablkcipher_decrypt(&ctx->req),
&ctx->completion);
free:
af_alg_free_sg(&ctx->rsgl);
if (err)
goto unlock;
copied += used;
from += used;
seglen -= used;
skcipher_pull_sgl(sk, used);
}
}
err = 0;
unlock:
skcipher_wmem_wakeup(sk);
release_sock(sk);
return copied ?: err;
} | CWE-20 | 0 |
vips_foreign_load_start( VipsImage *out, void *a, void *b )
{
VipsForeignLoad *load = VIPS_FOREIGN_LOAD( b );
VipsForeignLoadClass *class = VIPS_FOREIGN_LOAD_GET_CLASS( load );
if( !load->real ) {
if( !(load->real = vips_foreign_load_temp( load )) )
return( NULL );
#ifdef DEBUG
printf( "vips_foreign_load_start: triggering ->load()\n" );
#endif /*DEBUG*/
/* Read the image in. This may involve a long computation and
* will finish with load->real holding the decompressed image.
*
* We want our caller to be able to see this computation on
* @out, so eval signals on ->real need to appear on ->out.
*/
load->real->progress_signal = load->out;
/* Note the load object on the image. Loaders can use
* this to signal invalidate if they hit a load error. See
* vips_foreign_load_invalidate() below.
*/
g_object_set_qdata( G_OBJECT( load->real ),
vips__foreign_load_operation, load );
if( class->load( load ) ||
vips_image_pio_input( load->real ) )
return( NULL );
/* ->header() read the header into @out, load has read the
* image into @real. They must match exactly in size, bands,
* format and coding for the copy to work.
*
* Some versions of ImageMagick give different results between
* Ping and Load for some formats, for example.
*/
if( !vips_foreign_load_iscompat( load->real, out ) )
return( NULL );
/* We have to tell vips that out depends on real. We've set
* the demand hint below, but not given an input there.
*/
vips_image_pipelinev( load->out, load->out->dhint,
load->real, NULL );
}
return( vips_region_new( load->real ) );
} | CWE-362 | 18 |
static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
{
u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
unsigned long flags;
u64 expires;
/* confirm we're still not at a refresh boundary */
raw_spin_lock_irqsave(&cfs_b->lock, flags);
cfs_b->slack_started = false;
if (cfs_b->distribute_running) {
raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
return;
}
if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
return;
}
if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
runtime = cfs_b->runtime;
expires = cfs_b->runtime_expires;
if (runtime)
cfs_b->distribute_running = 1;
raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
if (!runtime)
return;
runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
raw_spin_lock_irqsave(&cfs_b->lock, flags);
if (expires == cfs_b->runtime_expires)
lsub_positive(&cfs_b->runtime, runtime);
cfs_b->distribute_running = 0;
raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
} | CWE-400 | 2 |
static int swp_handler(struct pt_regs *regs, unsigned int instr)
{
unsigned int address, destreg, data, type;
unsigned int res = 0;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc);
if (current->pid != previous_pid) {
pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
current->comm, (unsigned long)current->pid);
previous_pid = current->pid;
}
address = regs->uregs[EXTRACT_REG_NUM(instr, RN_OFFSET)];
data = regs->uregs[EXTRACT_REG_NUM(instr, RT2_OFFSET)];
destreg = EXTRACT_REG_NUM(instr, RT_OFFSET);
type = instr & TYPE_SWPB;
pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n",
EXTRACT_REG_NUM(instr, RN_OFFSET), address,
destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data);
/* Check access in reasonable access range for both SWP and SWPB */
if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
pr_debug("SWP{B} emulation: access to %p not allowed!\n",
(void *)address);
res = -EFAULT;
} else {
res = emulate_swpX(address, &data, type);
}
if (res == 0) {
/*
* On successful emulation, revert the adjustment to the PC
* made in kernel/traps.c in order to resume execution at the
* instruction following the SWP{B}.
*/
regs->ARM_pc += 4;
regs->uregs[destreg] = data;
} else if (res == -EFAULT) {
/*
* Memory errors do not mean emulation failed.
* Set up signal info to return SEGV, then return OK
*/
set_segfault(regs, address);
}
return 0;
} | CWE-400 | 2 |
static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct irda_sock *self = irda_sk(sk);
struct sk_buff *skb;
size_t copied;
int err;
IRDA_DEBUG(4, "%s()\n", __func__);
msg->msg_namelen = 0;
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
IRDA_DEBUG(2, "%s(), Received truncated frame (%zd < %zd)!\n",
__func__, copied, size);
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
skb_free_datagram(sk, skb);
/*
* Check if we have previously stopped IrTTP and we know
* have more free space in our rx_queue. If so tell IrTTP
* to start delivering frames again before our rx_queue gets
* empty
*/
if (self->rx_flow == FLOW_STOP) {
if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) {
IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __func__);
self->rx_flow = FLOW_START;
irttp_flow_request(self->tsap, FLOW_START);
}
}
return copied;
} | CWE-20 | 0 |
int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc;
struct sk_buff *skb;
int copied, error = -EINVAL;
msg->msg_namelen = 0;
if (sock->state != SS_CONNECTED)
return -ENOTCONN;
/* only handle MSG_DONTWAIT and MSG_PEEK */
if (flags & ~(MSG_DONTWAIT | MSG_PEEK))
return -EOPNOTSUPP;
vcc = ATM_SD(sock);
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags) ||
!test_bit(ATM_VF_READY, &vcc->flags))
return 0;
skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &error);
if (!skb)
return error;
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (error)
return error;
sock_recv_ts_and_drops(msg, sk, skb);
if (!(flags & MSG_PEEK)) {
pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc),
skb->truesize);
atm_return(vcc, skb->truesize);
}
skb_free_datagram(sk, skb);
return copied;
} | CWE-20 | 0 |
static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
struct btrfs_path *path,
const char *name, int name_len)
{
struct btrfs_dir_item *dir_item;
unsigned long name_ptr;
u32 total_len;
u32 cur = 0;
u32 this_len;
struct extent_buffer *leaf;
leaf = path->nodes[0];
dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
if (verify_dir_item(root, leaf, dir_item))
return NULL;
total_len = btrfs_item_size_nr(leaf, path->slots[0]);
while (cur < total_len) {
this_len = sizeof(*dir_item) +
btrfs_dir_name_len(leaf, dir_item) +
btrfs_dir_data_len(leaf, dir_item);
name_ptr = (unsigned long)(dir_item + 1);
if (btrfs_dir_name_len(leaf, dir_item) == name_len &&
memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0)
return dir_item;
cur += this_len;
dir_item = (struct btrfs_dir_item *)((char *)dir_item +
this_len);
}
return NULL;
} | CWE-362 | 18 |
static int jas_iccputsint(jas_stream_t *out, int n, longlong val)
{
ulonglong tmp;
tmp = (val < 0) ? (abort(), 0) : val;
return jas_iccputuint(out, n, tmp);
} | CWE-20 | 0 |
int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc;
struct sk_buff *skb;
int copied, error = -EINVAL;
msg->msg_namelen = 0;
if (sock->state != SS_CONNECTED)
return -ENOTCONN;
/* only handle MSG_DONTWAIT and MSG_PEEK */
if (flags & ~(MSG_DONTWAIT | MSG_PEEK))
return -EOPNOTSUPP;
vcc = ATM_SD(sock);
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags) ||
!test_bit(ATM_VF_READY, &vcc->flags))
return 0;
skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &error);
if (!skb)
return error;
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (error)
return error;
sock_recv_ts_and_drops(msg, sk, skb);
if (!(flags & MSG_PEEK)) {
pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc),
skb->truesize);
atm_return(vcc, skb->truesize);
}
skb_free_datagram(sk, skb);
return copied;
} | CWE-20 | 0 |
static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_at *sat = (struct sockaddr_at *)msg->msg_name;
struct ddpehdr *ddp;
int copied = 0;
int offset = 0;
int err = 0;
struct sk_buff *skb;
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
lock_sock(sk);
if (!skb)
goto out;
/* FIXME: use skb->cb to be able to use shared skbs */
ddp = ddp_hdr(skb);
copied = ntohs(ddp->deh_len_hops) & 1023;
if (sk->sk_type != SOCK_RAW) {
offset = sizeof(*ddp);
copied -= offset;
}
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied);
if (!err) {
if (sat) {
sat->sat_family = AF_APPLETALK;
sat->sat_port = ddp->deh_sport;
sat->sat_addr.s_node = ddp->deh_snode;
sat->sat_addr.s_net = ddp->deh_snet;
}
msg->msg_namelen = sizeof(*sat);
}
skb_free_datagram(sk, skb); /* Free the datagram. */
out:
release_sock(sk);
return err ? : copied;
} | CWE-20 | 0 |
static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag,
uint32_t lun, void *hba_private)
{
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
SCSIRequest *req;
SCSIDiskReq *r;
req = scsi_req_alloc(&scsi_disk_reqops, &s->qdev, tag, lun, hba_private);
r = DO_UPCAST(SCSIDiskReq, req, req);
r->iov.iov_base = qemu_blockalign(s->bs, SCSI_DMA_BUF_SIZE);
return req;
} | CWE-119 | 26 |
int hns_ppe_get_sset_count(int stringset)
{
if (stringset == ETH_SS_STATS)
return ETH_PPE_STATIC_NUM;
return 0;
} | CWE-119 | 26 |
static int misaligned_fpu_load(struct pt_regs *regs,
__u32 opcode,
int displacement_not_indexed,
int width_shift,
int do_paired_load)
{
/* Return -1 for a fault, 0 for OK */
int error;
int destreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0) {
return error;
}
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address);
destreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
__u32 buflo, bufhi;
if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
return -1;
}
if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
return -1; /* fault */
}
/* 'current' may be the current owner of the FPU state, so
context switch the registers into memory so they can be
indexed by register number. */
if (last_task_used_math == current) {
enable_fpu();
save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
}
buflo = *(__u32*) &buffer;
bufhi = *(1 + (__u32*) &buffer);
switch (width_shift) {
case 2:
current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
break;
case 3:
if (do_paired_load) {
current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
} else {
#if defined(CONFIG_CPU_LITTLE_ENDIAN)
current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi;
current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo;
#else
current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
#endif
}
break;
default:
printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
return 0;
} else {
die ("Misaligned FPU load inside kernel", regs, 0);
return -1;
}
} | CWE-400 | 2 |
static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
{
/* dock delta_exec before expiring quota (as it could span periods) */
cfs_rq->runtime_remaining -= delta_exec;
expire_cfs_rq_runtime(cfs_rq);
if (likely(cfs_rq->runtime_remaining > 0))
return;
/*
* if we're unable to extend our runtime we resched so that the active
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
resched_curr(rq_of(cfs_rq));
} | CWE-400 | 2 |
static void record_and_restart(struct perf_event *event, unsigned long val,
struct pt_regs *regs, int nmi)
{
u64 period = event->hw.sample_period;
s64 prev, delta, left;
int record = 0;
if (event->hw.state & PERF_HES_STOPPED) {
write_pmc(event->hw.idx, 0);
return;
}
/* we don't have to worry about interrupts here */
prev = local64_read(&event->hw.prev_count);
delta = check_and_compute_delta(prev, val);
local64_add(delta, &event->count);
/*
* See if the total period for this event has expired,
* and update for the next period.
*/
val = 0;
left = local64_read(&event->hw.period_left) - delta;
if (period) {
if (left <= 0) {
left += period;
if (left <= 0)
left = period;
record = 1;
event->hw.last_period = event->hw.sample_period;
}
if (left < 0x80000000LL)
val = 0x80000000LL - left;
}
write_pmc(event->hw.idx, val);
local64_set(&event->hw.prev_count, val);
local64_set(&event->hw.period_left, left);
perf_event_update_userpage(event);
/*
* Finally record data if requested.
*/
if (record) {
struct perf_sample_data data;
perf_sample_data_init(&data, ~0ULL);
data.period = event->hw.last_period;
if (event->attr.sample_type & PERF_SAMPLE_ADDR)
perf_get_data_addr(regs, &data.addr);
if (perf_event_overflow(event, nmi, &data, regs))
power_pmu_stop(event, 0);
}
} | CWE-400 | 2 |
char *M_fs_path_tmpdir(M_fs_system_t sys_type)
{
char *d = NULL;
char *out = NULL;
M_fs_error_t res;
#ifdef _WIN32
size_t len = M_fs_path_get_path_max(M_FS_SYSTEM_WINDOWS)+1;
d = M_malloc_zero(len);
/* Return is length without NULL. */
if (GetTempPath((DWORD)len, d) >= len) {
M_free(d);
d = NULL;
}
#elif defined(__APPLE__)
d = M_fs_path_mac_tmpdir();
#else
const char *const_temp;
/* Try Unix env var. */
# ifdef HAVE_SECURE_GETENV
const_temp = secure_getenv("TMPDIR");
# else
const_temp = getenv("TMPDIR");
# endif
if (!M_str_isempty(const_temp) && M_fs_perms_can_access(const_temp, M_FS_FILE_MODE_READ|M_FS_FILE_MODE_WRITE) == M_FS_ERROR_SUCCESS) {
d = M_strdup(const_temp);
}
/* Fallback to some "standard" system paths. */
if (d == NULL) {
const_temp = "/tmp";
if (!M_str_isempty(const_temp) && M_fs_perms_can_access(const_temp, M_FS_FILE_MODE_READ|M_FS_FILE_MODE_WRITE) == M_FS_ERROR_SUCCESS) {
d = M_strdup(const_temp);
}
}
if (d == NULL) {
const_temp = "/var/tmp";
if (!M_str_isempty(const_temp) && M_fs_perms_can_access(const_temp, M_FS_FILE_MODE_READ|M_FS_FILE_MODE_WRITE) == M_FS_ERROR_SUCCESS) {
d = M_strdup(const_temp);
}
}
#endif
if (d != NULL) {
res = M_fs_path_norm(&out, d, M_FS_PATH_NORM_ABSOLUTE, sys_type);
if (res != M_FS_ERROR_SUCCESS) {
out = NULL;
}
}
M_free(d);
return out;
} | CWE-732 | 13 |
static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
if (slack_runtime <= 0)
return;
raw_spin_lock(&cfs_b->lock);
if (cfs_b->quota != RUNTIME_INF &&
cfs_rq->runtime_expires == cfs_b->runtime_expires) {
cfs_b->runtime += slack_runtime;
/* we are under rq->lock, defer unthrottling using a timer */
if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
!list_empty(&cfs_b->throttled_cfs_rq))
start_cfs_slack_bandwidth(cfs_b);
}
raw_spin_unlock(&cfs_b->lock);
/* even if it's not valid for return we don't want to try again */
cfs_rq->runtime_remaining -= slack_runtime;
} | CWE-400 | 2 |
receive_carbon(void **state)
{
prof_input("/carbons on");
prof_connect();
assert_true(stbbr_received(
"<iq id='*' type='set'><enable xmlns='urn:xmpp:carbons:2'/></iq>"
));
stbbr_send(
"<presence to='stabber@localhost' from='buddy1@localhost/mobile'>"
"<priority>10</priority>"
"<status>On my mobile</status>"
"</presence>"
);
assert_true(prof_output_exact("Buddy1 (mobile) is online, \"On my mobile\""));
prof_input("/msg Buddy1");
assert_true(prof_output_exact("unencrypted"));
stbbr_send(
"<message type='chat' to='stabber@localhost/profanity' from='buddy1@localhost'>"
"<received xmlns='urn:xmpp:carbons:2'>"
"<forwarded xmlns='urn:xmpp:forward:0'>"
"<message id='prof_msg_7' xmlns='jabber:client' type='chat' lang='en' to='stabber@localhost/profanity' from='buddy1@localhost/mobile'>"
"<body>test carbon from recipient</body>"
"</message>"
"</forwarded>"
"</received>"
"</message>"
);
assert_true(prof_output_regex("Buddy1/mobile: .+test carbon from recipient"));
} | CWE-346 | 16 |
zfs_fastaccesschk_execute(znode_t *zdp, cred_t *cr)
{
boolean_t owner = B_FALSE;
boolean_t groupmbr = B_FALSE;
boolean_t is_attr;
uid_t uid = crgetuid(cr);
if (zdp->z_pflags & ZFS_AV_QUARANTINED)
return (1);
is_attr = ((zdp->z_pflags & ZFS_XATTR) &&
(ZTOV(zdp)->v_type == VDIR));
if (is_attr)
return (1);
if (zdp->z_pflags & ZFS_NO_EXECS_DENIED)
return (0);
mutex_enter(&zdp->z_acl_lock);
if (FUID_INDEX(zdp->z_uid) != 0 || FUID_INDEX(zdp->z_gid) != 0) {
goto out_slow;
}
if (uid == zdp->z_uid) {
owner = B_TRUE;
if (zdp->z_mode & S_IXUSR) {
goto out;
} else {
goto out_slow;
}
}
if (groupmember(zdp->z_gid, cr)) {
groupmbr = B_TRUE;
if (zdp->z_mode & S_IXGRP) {
goto out;
} else {
goto out_slow;
}
}
if (!owner && !groupmbr) {
if (zdp->z_mode & S_IXOTH) {
goto out;
}
}
out:
mutex_exit(&zdp->z_acl_lock);
return (0);
out_slow:
mutex_exit(&zdp->z_acl_lock);
return (1);
} | CWE-863 | 11 |
static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
struct vsock_sock *vsk,
struct msghdr *msg, size_t len,
int flags)
{
int err;
int noblock;
struct vmci_datagram *dg;
size_t payload_len;
struct sk_buff *skb;
noblock = flags & MSG_DONTWAIT;
if (flags & MSG_OOB || flags & MSG_ERRQUEUE)
return -EOPNOTSUPP;
/* Retrieve the head sk_buff from the socket's receive queue. */
err = 0;
skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
if (err)
return err;
if (!skb)
return -EAGAIN;
dg = (struct vmci_datagram *)skb->data;
if (!dg)
/* err is 0, meaning we read zero bytes. */
goto out;
payload_len = dg->payload_size;
/* Ensure the sk_buff matches the payload size claimed in the packet. */
if (payload_len != skb->len - sizeof(*dg)) {
err = -EINVAL;
goto out;
}
if (payload_len > len) {
payload_len = len;
msg->msg_flags |= MSG_TRUNC;
}
/* Place the datagram payload in the user's iovec. */
err = skb_copy_datagram_iovec(skb, sizeof(*dg), msg->msg_iov,
payload_len);
if (err)
goto out;
msg->msg_namelen = 0;
if (msg->msg_name) {
struct sockaddr_vm *vm_addr;
/* Provide the address of the sender. */
vm_addr = (struct sockaddr_vm *)msg->msg_name;
vsock_addr_init(vm_addr, dg->src.context, dg->src.resource);
msg->msg_namelen = sizeof(*vm_addr);
}
err = payload_len;
out:
skb_free_datagram(&vsk->sk, skb);
return err;
} | CWE-200 | 10 |
static inline void exit_io_context(void)
{
} | CWE-400 | 2 |
R_API st64 r_buf_read_at(RBuffer *b, ut64 addr, ut8 *buf, ut64 len) {
r_return_val_if_fail (b && buf, -1);
st64 o_addr = r_buf_seek (b, 0, R_BUF_CUR);
st64 r = r_buf_seek (b, addr, R_BUF_SET);
if (r < 0) {
return r;
}
r = r_buf_read (b, buf, len);
r_buf_seek (b, o_addr, R_BUF_SET);
return r;
} | CWE-400 | 2 |
static char *get_pid_environ_val(pid_t pid,char *val){
char temp[500];
int i=0;
int foundit=0;
FILE *fp;
sprintf(temp,"/proc/%d/environ",pid);
fp=fopen(temp,"r");
if(fp==NULL)
return NULL;
for(;;){
temp[i]=fgetc(fp);
if(foundit==1 && (temp[i]==0 || temp[i]=='\0' || temp[i]==EOF)){
char *ret;
temp[i]=0;
ret=malloc(strlen(temp)+10);
sprintf(ret,"%s",temp);
fclose(fp);
return ret;
}
switch(temp[i]){
case EOF:
fclose(fp);
return NULL;
case '=':
temp[i]=0;
if(!strcmp(temp,val)){
foundit=1;
}
i=0;
break;
case '\0':
i=0;
break;
default:
i++;
}
}
} | CWE-119 | 26 |
SQLWCHAR* _multi_string_alloc_and_expand( LPCSTR in )
{
SQLWCHAR *chr;
int len = 0;
if ( !in )
{
return in;
}
while ( in[ len ] != 0 || in[ len + 1 ] != 0 )
{
len ++;
}
chr = malloc(sizeof( SQLWCHAR ) * ( len + 2 ));
len = 0;
while ( in[ len ] != 0 || in[ len + 1 ] != 0 )
{
chr[ len ] = in[ len ];
len ++;
}
chr[ len ++ ] = 0;
chr[ len ++ ] = 0;
return chr;
} | CWE-119 | 26 |
int key_reject_and_link(struct key *key,
unsigned timeout,
unsigned error,
struct key *keyring,
struct key *authkey)
{
struct assoc_array_edit *edit;
struct timespec now;
int ret, awaken, link_ret = 0;
key_check(key);
key_check(keyring);
awaken = 0;
ret = -EBUSY;
if (keyring) {
if (keyring->restrict_link)
return -EPERM;
link_ret = __key_link_begin(keyring, &key->index_key, &edit);
}
mutex_lock(&key_construction_mutex);
/* can't instantiate twice */
if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
/* mark the key as being negatively instantiated */
atomic_inc(&key->user->nikeys);
key->reject_error = -error;
smp_wmb();
set_bit(KEY_FLAG_NEGATIVE, &key->flags);
set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
now = current_kernel_time();
key->expiry = now.tv_sec + timeout;
key_schedule_gc(key->expiry + key_gc_delay);
if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
awaken = 1;
ret = 0;
/* and link it into the destination keyring */
if (keyring && link_ret == 0)
__key_link(key, &edit);
/* disable the authorisation key */
if (authkey)
key_revoke(authkey);
}
mutex_unlock(&key_construction_mutex);
if (keyring && link_ret == 0)
__key_link_end(keyring, &key->index_key, edit);
/* wake up anyone waiting for a key to be constructed */
if (awaken)
wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
return ret == 0 ? link_ret : ret;
} | CWE-20 | 0 |
char* _multi_string_alloc_and_copy( LPCWSTR in )
{
char *chr;
int len = 0;
if ( !in )
{
return in;
}
while ( in[ len ] != 0 || in[ len + 1 ] != 0 )
{
len ++;
}
chr = malloc( len + 2 );
len = 0;
while ( in[ len ] != 0 || in[ len + 1 ] != 0 )
{
chr[ len ] = 0xFF & in[ len ];
len ++;
}
chr[ len ++ ] = '\0';
chr[ len ++ ] = '\0';
return chr;
} | CWE-119 | 26 |
PHP_FUNCTION(curl_escape)
{
char *str = NULL, *res = NULL;
size_t str_len = 0;
zval *zid;
php_curl *ch;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "rs", &zid, &str, &str_len) == FAILURE) {
return;
}
if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) {
RETURN_FALSE;
}
if ((res = curl_easy_escape(ch->cp, str, str_len))) {
RETVAL_STRING(res);
curl_free(res);
} else {
RETURN_FALSE;
}
} | CWE-119 | 26 |
const char * util_acl_to_str(const sc_acl_entry_t *e)
{
static char line[80], buf[20];
unsigned int acl;
if (e == NULL)
return "N/A";
line[0] = 0;
while (e != NULL) {
acl = e->method;
switch (acl) {
case SC_AC_UNKNOWN:
return "N/A";
case SC_AC_NEVER:
return "NEVR";
case SC_AC_NONE:
return "NONE";
case SC_AC_CHV:
strcpy(buf, "CHV");
if (e->key_ref != SC_AC_KEY_REF_NONE)
sprintf(buf + 3, "%d", e->key_ref);
break;
case SC_AC_TERM:
strcpy(buf, "TERM");
break;
case SC_AC_PRO:
strcpy(buf, "PROT");
break;
case SC_AC_AUT:
strcpy(buf, "AUTH");
if (e->key_ref != SC_AC_KEY_REF_NONE)
sprintf(buf + 4, "%d", e->key_ref);
break;
case SC_AC_SEN:
strcpy(buf, "Sec.Env. ");
if (e->key_ref != SC_AC_KEY_REF_NONE)
sprintf(buf + 3, "#%d", e->key_ref);
break;
case SC_AC_SCB:
strcpy(buf, "Sec.ControlByte ");
if (e->key_ref != SC_AC_KEY_REF_NONE)
sprintf(buf + 3, "Ox%X", e->key_ref);
break;
case SC_AC_IDA:
strcpy(buf, "PKCS#15 AuthID ");
if (e->key_ref != SC_AC_KEY_REF_NONE)
sprintf(buf + 3, "#%d", e->key_ref);
break;
default:
strcpy(buf, "????");
break;
}
strcat(line, buf);
strcat(line, " ");
e = e->next;
}
line[strlen(line)-1] = 0; /* get rid of trailing space */
return line;
} | CWE-119 | 26 |
uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
uint8_t val;
k->get_config(vdev, vdev->config);
if (addr > (vdev->config_len - sizeof(val)))
return (uint32_t)-1;
val = ldub_p(vdev->config + addr);
return val;
} | CWE-269 | 6 |
bool_t mqttSnClientIsShortTopicName(const char_t *topicName)
{
bool_t res;
//Initialize variable
res = FALSE;
//A short topic name is a topic name that has a fixed length of two octets
if(osStrlen(topicName) == 2)
{
//Ensure the topic name does not contains wildcard characters
if(strchr(topicName, '#') == NULL && strchr(topicName, '+') == NULL)
{
//The short topic name is a valid
res = TRUE;
}
}
//Return TRUE if the specified topic name is a short topic name
return res;
} | CWE-20 | 0 |
PredictorDecodeRow(TIFF* tif, uint8* op0, tmsize_t occ0, uint16 s)
{
TIFFPredictorState *sp = PredictorState(tif);
assert(sp != NULL);
assert(sp->decoderow != NULL);
assert(sp->decodepfunc != NULL);
if ((*sp->decoderow)(tif, op0, occ0, s)) {
(*sp->decodepfunc)(tif, op0, occ0);
return 1;
} else
return 0;
} | CWE-119 | 26 |
int fpm_unix_resolve_socket_premissions(struct fpm_worker_pool_s *wp) /* {{{ */
{
struct fpm_worker_pool_config_s *c = wp->config;
/* uninitialized */
wp->socket_uid = -1;
wp->socket_gid = -1;
wp->socket_mode = 0666;
if (!c) {
return 0;
}
if (c->listen_owner && *c->listen_owner) {
struct passwd *pwd;
pwd = getpwnam(c->listen_owner);
if (!pwd) {
zlog(ZLOG_SYSERROR, "[pool %s] cannot get uid for user '%s'", wp->config->name, c->listen_owner);
return -1;
}
wp->socket_uid = pwd->pw_uid;
wp->socket_gid = pwd->pw_gid;
}
if (c->listen_group && *c->listen_group) {
struct group *grp;
grp = getgrnam(c->listen_group);
if (!grp) {
zlog(ZLOG_SYSERROR, "[pool %s] cannot get gid for group '%s'", wp->config->name, c->listen_group);
return -1;
}
wp->socket_gid = grp->gr_gid;
}
if (c->listen_mode && *c->listen_mode) {
wp->socket_mode = strtoul(c->listen_mode, 0, 8);
}
return 0;
} | CWE-269 | 6 |
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
spin_lock_init(&mm->context.list_lock);
INIT_LIST_HEAD(&mm->context.pgtable_list);
INIT_LIST_HEAD(&mm->context.gmap_list);
return (pgd_t *) crst_table_alloc(mm);
} | CWE-20 | 0 |
static int spl_array_has_dimension_ex(int check_inherited, zval *object, zval *offset, int check_empty TSRMLS_DC) /* {{{ */
{
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
long index;
zval *rv, *value = NULL, **tmp;
if (check_inherited && intern->fptr_offset_has) {
zval *offset_tmp = offset;
SEPARATE_ARG_IF_REF(offset_tmp);
zend_call_method_with_1_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_has, "offsetExists", &rv, offset_tmp);
zval_ptr_dtor(&offset_tmp);
if (rv && zend_is_true(rv)) {
zval_ptr_dtor(&rv);
if (check_empty != 1) {
return 1;
} else if (intern->fptr_offset_get) {
value = spl_array_read_dimension_ex(1, object, offset, BP_VAR_R TSRMLS_CC);
}
} else {
if (rv) {
zval_ptr_dtor(&rv);
}
return 0;
}
}
if (!value) {
HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
switch(Z_TYPE_P(offset)) {
case IS_STRING:
if (zend_symtable_find(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1, (void **) &tmp) != FAILURE) {
if (check_empty == 2) {
return 1;
}
} else {
return 0;
}
break;
case IS_DOUBLE:
case IS_RESOURCE:
case IS_BOOL:
case IS_LONG:
if (offset->type == IS_DOUBLE) {
index = (long)Z_DVAL_P(offset);
} else {
index = Z_LVAL_P(offset);
}
if (zend_hash_index_find(ht, index, (void **)&tmp) != FAILURE) {
if (check_empty == 2) {
return 1;
}
} else {
return 0;
}
break;
default:
zend_error(E_WARNING, "Illegal offset type");
return 0;
}
if (check_empty && check_inherited && intern->fptr_offset_get) {
value = spl_array_read_dimension_ex(1, object, offset, BP_VAR_R TSRMLS_CC);
} else {
value = *tmp;
}
}
return check_empty ? zend_is_true(value) : Z_TYPE_P(value) != IS_NULL;
} /* }}} */ | CWE-20 | 0 |
static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h)
{
PadContext *s = inlink->dst->priv;
AVFrame *frame = ff_get_video_buffer(inlink->dst->outputs[0],
w + (s->w - s->in_w),
h + (s->h - s->in_h));
int plane;
if (!frame)
return NULL;
frame->width = w;
frame->height = h;
for (plane = 0; plane < 4 && frame->data[plane]; plane++) {
int hsub = s->draw.hsub[plane];
int vsub = s->draw.vsub[plane];
frame->data[plane] += (s->x >> hsub) * s->draw.pixelstep[plane] +
(s->y >> vsub) * frame->linesize[plane];
}
return frame;
} | CWE-119 | 26 |
void flash_option_bytes_init(int boot_from_dfu)
{
uint32_t val = 0xfffff8aa;
if (boot_from_dfu){
val &= ~(1<<27); // nBOOT0 = 0 (boot from system rom)
}
else {
if (solo_is_locked())
{
val = 0xfffff8cc;
}
}
val &= ~(1<<26); // nSWBOOT0 = 0 (boot from nBoot0)
val &= ~(1<<25); // SRAM2_RST = 1 (erase sram on reset)
val &= ~(1<<24); // SRAM2_PE = 1 (parity check en)
if (FLASH->OPTR == val)
{
return;
}
__disable_irq();
while (FLASH->SR & (1<<16))
;
flash_unlock();
if (FLASH->CR & (1<<30))
{
FLASH->OPTKEYR = 0x08192A3B;
FLASH->OPTKEYR = 0x4C5D6E7F;
}
FLASH->OPTR =val;
FLASH->CR |= (1<<17);
while (FLASH->SR & (1<<16))
;
flash_lock();
__enable_irq();
} | CWE-326 | 9 |
void mk_request_free(struct session_request *sr)
{
if (sr->fd_file > 0) {
mk_vhost_close(sr);
}
if (sr->headers.location) {
mk_mem_free(sr->headers.location);
}
if (sr->uri_processed.data != sr->uri.data) {
mk_ptr_free(&sr->uri_processed);
}
if (sr->real_path.data != sr->real_path_static) {
mk_ptr_free(&sr->real_path);
}
} | CWE-20 | 0 |
log2vis_unicode (PyObject * unicode, FriBidiParType base_direction, int clean, int reordernsm)
{
PyObject *logical = NULL; /* input string encoded in utf-8 */
PyObject *visual = NULL; /* output string encoded in utf-8 */
PyObject *result = NULL; /* unicode output string */
int length = PyUnicode_GET_SIZE (unicode);
logical = PyUnicode_AsUTF8String (unicode);
if (logical == NULL)
goto cleanup;
visual = log2vis_utf8 (logical, length, base_direction, clean, reordernsm);
if (visual == NULL)
goto cleanup;
result = PyUnicode_DecodeUTF8 (PyString_AS_STRING (visual),
PyString_GET_SIZE (visual), "strict");
cleanup:
Py_XDECREF (logical);
Py_XDECREF (visual);
return result;
} | CWE-119 | 26 |
struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
const struct sk_buff *skb,
int flags, pol_lookup_t lookup)
{
struct rt6_info *rt;
rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, skb, flags);
if (rt->dst.error == -EAGAIN) {
ip6_rt_put_flags(rt, flags);
rt = net->ipv6.ip6_null_entry;
if (!(flags | RT6_LOOKUP_F_DST_NOREF))
dst_hold(&rt->dst);
}
return &rt->dst;
} | CWE-755 | 21 |
DECLAREreadFunc(readContigTilesIntoBuffer)
{
int status = 1;
tsize_t tilesize = TIFFTileSize(in);
tdata_t tilebuf;
uint32 imagew = TIFFScanlineSize(in);
uint32 tilew = TIFFTileRowSize(in);
int iskew = imagew - tilew;
uint8* bufp = (uint8*) buf;
uint32 tw, tl;
uint32 row;
(void) spp;
tilebuf = _TIFFmalloc(tilesize);
if (tilebuf == 0)
return 0;
_TIFFmemset(tilebuf, 0, tilesize);
(void) TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw);
(void) TIFFGetField(in, TIFFTAG_TILELENGTH, &tl);
for (row = 0; row < imagelength; row += tl) {
uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl;
uint32 colb = 0;
uint32 col;
for (col = 0; col < imagewidth; col += tw) {
if (TIFFReadTile(in, tilebuf, col, row, 0, 0) < 0
&& !ignore) {
TIFFError(TIFFFileName(in),
"Error, can't read tile at %lu %lu",
(unsigned long) col,
(unsigned long) row);
status = 0;
goto done;
}
if (colb + tilew > imagew) {
uint32 width = imagew - colb;
uint32 oskew = tilew - width;
cpStripToTile(bufp + colb,
tilebuf, nrow, width,
oskew + iskew, oskew );
} else
cpStripToTile(bufp + colb,
tilebuf, nrow, tilew,
iskew, 0);
colb += tilew;
}
bufp += imagew * nrow;
}
done:
_TIFFfree(tilebuf);
return status;
} | CWE-119 | 26 |
swabHorAcc16(TIFF* tif, uint8* cp0, tmsize_t cc)
{
uint16* wp = (uint16*) cp0;
tmsize_t wc = cc / 2;
TIFFSwabArrayOfShort(wp, wc);
horAcc16(tif, cp0, cc);
} | CWE-119 | 26 |
void exit_io_context(void)
{
struct io_context *ioc;
task_lock(current);
ioc = current->io_context;
current->io_context = NULL;
task_unlock(current);
if (atomic_dec_and_test(&ioc->nr_tasks)) {
if (ioc->aic && ioc->aic->exit)
ioc->aic->exit(ioc->aic);
cfq_exit(ioc);
put_io_context(ioc);
}
} | CWE-400 | 2 |
static __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 107 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
rdesc[106] == 0x03) {
hid_info(hdev, "fixing up Sunplus Wireless Desktop report descriptor\n");
rdesc[105] = rdesc[110] = 0x03;
rdesc[106] = rdesc[111] = 0x21;
}
return rdesc;
} | CWE-119 | 26 |
int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode)
{
int size, ct, err;
if (m->msg_namelen) {
if (mode == VERIFY_READ) {
void __user *namep;
namep = (void __user __force *) m->msg_name;
err = move_addr_to_kernel(namep, m->msg_namelen,
address);
if (err < 0)
return err;
}
m->msg_name = address;
} else {
m->msg_name = NULL;
}
size = m->msg_iovlen * sizeof(struct iovec);
if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
return -EFAULT;
m->msg_iov = iov;
err = 0;
for (ct = 0; ct < m->msg_iovlen; ct++) {
size_t len = iov[ct].iov_len;
if (len > INT_MAX - err) {
len = INT_MAX - err;
iov[ct].iov_len = len;
}
err += len;
}
return err;
} | CWE-20 | 0 |
int sc_file_set_sec_attr(sc_file_t *file, const u8 *sec_attr,
size_t sec_attr_len)
{
u8 *tmp;
if (!sc_file_valid(file)) {
return SC_ERROR_INVALID_ARGUMENTS;
}
if (sec_attr == NULL) {
if (file->sec_attr != NULL)
free(file->sec_attr);
file->sec_attr = NULL;
file->sec_attr_len = 0;
return 0;
}
tmp = (u8 *) realloc(file->sec_attr, sec_attr_len);
if (!tmp) {
if (file->sec_attr)
free(file->sec_attr);
file->sec_attr = NULL;
file->sec_attr_len = 0;
return SC_ERROR_OUT_OF_MEMORY;
}
file->sec_attr = tmp;
memcpy(file->sec_attr, sec_attr, sec_attr_len);
file->sec_attr_len = sec_attr_len;
return 0;
} | CWE-119 | 26 |
decrypt_response(struct sc_card *card, unsigned char *in, size_t inlen, unsigned char *out, size_t * out_len)
{
size_t cipher_len;
size_t i;
unsigned char iv[16] = { 0 };
unsigned char plaintext[4096] = { 0 };
epass2003_exdata *exdata = NULL;
if (!card->drv_data)
return SC_ERROR_INVALID_ARGUMENTS;
exdata = (epass2003_exdata *)card->drv_data;
/* no cipher */
if (in[0] == 0x99)
return 0;
/* parse cipher length */
if (0x01 == in[2] && 0x82 != in[1]) {
cipher_len = in[1];
i = 3;
}
else if (0x01 == in[3] && 0x81 == in[1]) {
cipher_len = in[2];
i = 4;
}
else if (0x01 == in[4] && 0x82 == in[1]) {
cipher_len = in[2] * 0x100;
cipher_len += in[3];
i = 5;
}
else {
return -1;
}
if (cipher_len < 2 || i+cipher_len > inlen || cipher_len > sizeof plaintext)
return -1;
/* decrypt */
if (KEY_TYPE_AES == exdata->smtype)
aes128_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext);
else
des3_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext);
/* unpadding */
while (0x80 != plaintext[cipher_len - 2] && (cipher_len - 2 > 0))
cipher_len--;
if (2 == cipher_len)
return -1;
memcpy(out, plaintext, cipher_len - 2);
*out_len = cipher_len - 2;
return 0;
} | CWE-119 | 26 |
static int ext4_split_unwritten_extents(handle_t *handle,
struct inode *inode,
struct ext4_map_blocks *map,
struct ext4_ext_path *path,
int flags)
{
ext4_lblk_t eof_block;
ext4_lblk_t ee_block;
struct ext4_extent *ex;
unsigned int ee_len;
int split_flag = 0, depth;
ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
"block %llu, max_blocks %u\n", inode->i_ino,
(unsigned long long)map->m_lblk, map->m_len);
eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
inode->i_sb->s_blocksize_bits;
if (eof_block < map->m_lblk + map->m_len)
eof_block = map->m_lblk + map->m_len;
/*
* It is safe to convert extent to initialized via explicit
* zeroout only if extent is fully insde i_size or new_size.
*/
depth = ext_depth(inode);
ex = path[depth].p_ext;
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
split_flag |= EXT4_EXT_MARK_UNINIT2;
flags |= EXT4_GET_BLOCKS_PRE_IO;
return ext4_split_extent(handle, inode, path, map, split_flag, flags);
} | CWE-362 | 18 |
static noinline void key_gc_unused_keys(struct list_head *keys)
{
while (!list_empty(keys)) {
struct key *key =
list_entry(keys->next, struct key, graveyard_link);
list_del(&key->graveyard_link);
kdebug("- %u", key->serial);
key_check(key);
security_key_free(key);
/* deal with the user's key tracking and quota */
if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
spin_lock(&key->user->lock);
key->user->qnkeys--;
key->user->qnbytes -= key->quotalen;
spin_unlock(&key->user->lock);
}
atomic_dec(&key->user->nkeys);
if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
atomic_dec(&key->user->nikeys);
key_user_put(key->user);
/* now throw away the key memory */
if (key->type->destroy)
key->type->destroy(key);
kfree(key->description);
#ifdef KEY_DEBUGGING
key->magic = KEY_DEBUG_MAGIC_X;
#endif
kmem_cache_free(key_jar, key);
}
} | CWE-362 | 18 |
void enc28j60EventHandler(NetInterface *interface)
{
error_t error;
uint16_t status;
uint16_t value;
//Read interrupt status register
status = enc28j60ReadReg(interface, ENC28J60_REG_EIR);
//Check whether the link state has changed
if((status & EIR_LINKIF) != 0)
{
//Clear PHY interrupts flags
enc28j60ReadPhyReg(interface, ENC28J60_PHY_REG_PHIR);
//Clear interrupt flag
enc28j60ClearBit(interface, ENC28J60_REG_EIR, EIR_LINKIF);
//Read PHY status register
value = enc28j60ReadPhyReg(interface, ENC28J60_PHY_REG_PHSTAT2);
//Check link state
if((value & PHSTAT2_LSTAT) != 0)
{
//Link speed
interface->linkSpeed = NIC_LINK_SPEED_10MBPS;
#if (ENC28J60_FULL_DUPLEX_SUPPORT == ENABLED)
//Full-duplex mode
interface->duplexMode = NIC_FULL_DUPLEX_MODE;
#else
//Half-duplex mode
interface->duplexMode = NIC_HALF_DUPLEX_MODE;
#endif
//Link is up
interface->linkState = TRUE;
}
else
{
//Link is down
interface->linkState = FALSE;
}
//Process link state change event
nicNotifyLinkChange(interface);
}
//Check whether a packet has been received?
if((status & EIR_PKTIF) != 0)
{
//Clear interrupt flag
enc28j60ClearBit(interface, ENC28J60_REG_EIR, EIR_PKTIF);
//Process all pending packets
do
{
//Read incoming packet
error = enc28j60ReceivePacket(interface);
//No more data in the receive buffer?
} while(error != ERROR_BUFFER_EMPTY);
}
//Re-enable LINKIE and PKTIE interrupts
enc28j60SetBit(interface, ENC28J60_REG_EIE, EIE_LINKIE | EIE_PKTIE);
} | CWE-20 | 0 |
find_auth_end (FlatpakProxyClient *client, Buffer *buffer)
{
guchar *match;
int i;
/* First try to match any leftover at the start */
if (client->auth_end_offset > 0)
{
gsize left = strlen (AUTH_END_STRING) - client->auth_end_offset;
gsize to_match = MIN (left, buffer->pos);
/* Matched at least up to to_match */
if (memcmp (buffer->data, &AUTH_END_STRING[client->auth_end_offset], to_match) == 0)
{
client->auth_end_offset += to_match;
/* Matched all */
if (client->auth_end_offset == strlen (AUTH_END_STRING))
return to_match;
/* Matched to end of buffer */
return -1;
}
/* Did not actually match at start */
client->auth_end_offset = -1;
}
/* Look for whole match inside buffer */
match = memmem (buffer, buffer->pos,
AUTH_END_STRING, strlen (AUTH_END_STRING));
if (match != NULL)
return match - buffer->data + strlen (AUTH_END_STRING);
/* Record longest prefix match at the end */
for (i = MIN (strlen (AUTH_END_STRING) - 1, buffer->pos); i > 0; i--)
{
if (memcmp (buffer->data + buffer->pos - i, AUTH_END_STRING, i) == 0)
{
client->auth_end_offset = i;
break;
}
}
return -1;
} | CWE-436 | 5 |
uint16_t mesg_id (void) {
static uint16_t id = 0;
if (!id) {
srandom (time (NULL));
id = random ();
}
id++;
if (T.debug > 4)
syslog (LOG_DEBUG, "mesg_id() = %d", id);
return id;
} | CWE-330 | 12 |
static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int copied;
int err = 0;
lock_sock(sk);
/*
* This works for seqpacket too. The receiver has ordered the
* queue for us! We do one quick check first though
*/
if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_ESTABLISHED) {
err = -ENOTCONN;
goto out;
}
/* Now we can treat all alike */
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto out;
if (!ax25_sk(sk)->pidincl)
skb_pull(skb, 1); /* Remove PID */
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (msg->msg_namelen != 0) {
struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name;
ax25_digi digi;
ax25_address src;
const unsigned char *mac = skb_mac_header(skb);
memset(sax, 0, sizeof(struct full_sockaddr_ax25));
ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
&digi, NULL, NULL);
sax->sax25_family = AF_AX25;
/* We set this correctly, even though we may not let the
application know the digi calls further down (because it
did NOT ask to know them). This could get political... **/
sax->sax25_ndigis = digi.ndigi;
sax->sax25_call = src;
if (sax->sax25_ndigis != 0) {
int ct;
struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)sax;
for (ct = 0; ct < digi.ndigi; ct++)
fsa->fsa_digipeater[ct] = digi.calls[ct];
}
msg->msg_namelen = sizeof(struct full_sockaddr_ax25);
}
skb_free_datagram(sk, skb);
err = copied;
out:
release_sock(sk);
return err;
} | CWE-20 | 0 |
static int netbk_set_skb_gso(struct xenvif *vif,
struct sk_buff *skb,
struct xen_netif_extra_info *gso)
{
if (!gso->u.gso.size) {
netdev_dbg(vif->dev, "GSO size must not be zero.\n");
return -EINVAL;
}
/* Currently only TCPv4 S.O. is supported. */
if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
return -EINVAL;
}
skb_shinfo(skb)->gso_size = gso->u.gso.size;
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
/* Header must be checked, and gso_segs computed. */
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
skb_shinfo(skb)->gso_segs = 0;
return 0;
} | CWE-20 | 0 |
apr_byte_t oidc_cache_get(request_rec *r, const char *section, const char *key,
char **value) {
oidc_cfg *cfg = ap_get_module_config(r->server->module_config,
&auth_openidc_module);
int encrypted = oidc_cfg_cache_encrypt(r);
apr_byte_t rc = TRUE;
char *msg = NULL;
oidc_debug(r, "enter: %s (section=%s, decrypt=%d, type=%s)", key, section,
encrypted, cfg->cache->name);
/* see if encryption is turned on */
if (encrypted == 1)
key = oidc_cache_get_hashed_key(r, cfg->crypto_passphrase, key);
/* get the value from the cache */
const char *cache_value = NULL;
if (cfg->cache->get(r, section, key, &cache_value) == FALSE) {
rc = FALSE;
goto out;
}
/* see if it is any good */
if (cache_value == NULL)
goto out;
/* see if encryption is turned on */
if (encrypted == 0) {
*value = apr_pstrdup(r->pool, cache_value);
goto out;
}
rc = (oidc_cache_crypto_decrypt(r, cache_value,
oidc_cache_hash_passphrase(r, cfg->crypto_passphrase),
(unsigned char **) value) > 0);
out:
/* log the result */
msg = apr_psprintf(r->pool, "from %s cache backend for %skey %s",
cfg->cache->name, encrypted ? "encrypted " : "", key);
if (rc == TRUE)
if (*value != NULL)
oidc_debug(r, "cache hit: return %d bytes %s",
*value ? (int )strlen(*value) : 0, msg);
else
oidc_debug(r, "cache miss %s", msg);
else
oidc_warn(r, "error retrieving value %s", msg);
return rc;
} | CWE-330 | 12 |
static int cac_get_serial_nr_from_CUID(sc_card_t* card, sc_serial_number_t* serial)
{
cac_private_data_t * priv = CAC_DATA(card);
SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_NORMAL);
if (card->serialnr.len) {
*serial = card->serialnr;
SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_SUCCESS);
}
if (priv->cac_id_len) {
serial->len = MIN(priv->cac_id_len, SC_MAX_SERIALNR);
memcpy(serial->value, priv->cac_id, priv->cac_id_len);
SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_SUCCESS);
}
SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_FILE_NOT_FOUND);
} | CWE-119 | 26 |
static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
u8 *buf = dev->in_out_buffer;
unsigned long flags;
int ret;
spin_lock_irqsave(&dev->lock, flags);
buf[0] = CP2112_GPIO_SET;
buf[1] = value ? 0xff : 0;
buf[2] = 1 << offset;
ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf,
CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
if (ret < 0)
hid_err(hdev, "error setting GPIO values: %d\n", ret);
spin_unlock_irqrestore(&dev->lock, flags);
} | CWE-404 | 30 |
static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
{
LIST_HEAD(tmp_list);
struct mount *p;
if (how & UMOUNT_PROPAGATE)
propagate_mount_unlock(mnt);
/* Gather the mounts to umount */
for (p = mnt; p; p = next_mnt(p, mnt)) {
p->mnt.mnt_flags |= MNT_UMOUNT;
list_move(&p->mnt_list, &tmp_list);
}
/* Hide the mounts from mnt_mounts */
list_for_each_entry(p, &tmp_list, mnt_list) {
list_del_init(&p->mnt_child);
}
/* Add propogated mounts to the tmp_list */
if (how & UMOUNT_PROPAGATE)
propagate_umount(&tmp_list);
while (!list_empty(&tmp_list)) {
bool disconnect;
p = list_first_entry(&tmp_list, struct mount, mnt_list);
list_del_init(&p->mnt_expire);
list_del_init(&p->mnt_list);
__touch_mnt_namespace(p->mnt_ns);
p->mnt_ns = NULL;
if (how & UMOUNT_SYNC)
p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
disconnect = !IS_MNT_LOCKED_AND_LAZY(p);
pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
disconnect ? &unmounted : NULL);
if (mnt_has_parent(p)) {
mnt_add_count(p->mnt_parent, -1);
if (!disconnect) {
/* Don't forget about p */
list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
} else {
umount_mnt(p);
}
}
change_mnt_propagation(p, MS_PRIVATE);
}
} | CWE-200 | 10 |
int ip_options_get(struct net *net, struct ip_options **optp,
unsigned char *data, int optlen)
{
struct ip_options *opt = ip_options_get_alloc(optlen);
if (!opt)
return -ENOMEM;
if (optlen)
memcpy(opt->__data, data, optlen);
return ip_options_get_finish(net, optp, opt, optlen);
} | CWE-362 | 18 |
int credential_from_url_gently(struct credential *c, const char *url,
int quiet)
{
const char *at, *colon, *cp, *slash, *host, *proto_end;
credential_clear(c);
/*
* Match one of:
* (1) proto://<host>/...
* (2) proto://<user>@<host>/...
* (3) proto://<user>:<pass>@<host>/...
*/
proto_end = strstr(url, "://");
if (!proto_end)
return 0;
cp = proto_end + 3;
at = strchr(cp, '@');
colon = strchr(cp, ':');
slash = strchrnul(cp, '/');
if (!at || slash <= at) {
/* Case (1) */
host = cp;
}
else if (!colon || at <= colon) {
/* Case (2) */
c->username = url_decode_mem(cp, at - cp);
host = at + 1;
} else {
/* Case (3) */
c->username = url_decode_mem(cp, colon - cp);
c->password = url_decode_mem(colon + 1, at - (colon + 1));
host = at + 1;
}
if (proto_end - url > 0)
c->protocol = xmemdupz(url, proto_end - url);
c->host = url_decode_mem(host, slash - host);
/* Trim leading and trailing slashes from path */
while (*slash == '/')
slash++;
if (*slash) {
char *p;
c->path = url_decode(slash);
p = c->path + strlen(c->path) - 1;
while (p > c->path && *p == '/')
*p-- = '\0';
}
if (check_url_component(url, quiet, "username", c->username) < 0 ||
check_url_component(url, quiet, "password", c->password) < 0 ||
check_url_component(url, quiet, "protocol", c->protocol) < 0 ||
check_url_component(url, quiet, "host", c->host) < 0 ||
check_url_component(url, quiet, "path", c->path) < 0)
return -1;
return 0;
} | CWE-522 | 19 |
vhost_scsi_make_tpg(struct se_wwn *wwn,
struct config_group *group,
const char *name)
{
struct vhost_scsi_tport *tport = container_of(wwn,
struct vhost_scsi_tport, tport_wwn);
struct vhost_scsi_tpg *tpg;
unsigned long tpgt;
int ret;
if (strstr(name, "tpgt_") != name)
return ERR_PTR(-EINVAL);
if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
return ERR_PTR(-EINVAL);
tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL);
if (!tpg) {
pr_err("Unable to allocate struct vhost_scsi_tpg");
return ERR_PTR(-ENOMEM);
}
mutex_init(&tpg->tv_tpg_mutex);
INIT_LIST_HEAD(&tpg->tv_tpg_list);
tpg->tport = tport;
tpg->tport_tpgt = tpgt;
ret = core_tpg_register(&vhost_scsi_fabric_configfs->tf_ops, wwn,
&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
kfree(tpg);
return NULL;
}
mutex_lock(&vhost_scsi_mutex);
list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
mutex_unlock(&vhost_scsi_mutex);
return &tpg->se_tpg;
} | CWE-119 | 26 |
parseuid(const char *s, uid_t *uid)
{
struct passwd *pw;
const char *errstr;
if ((pw = getpwnam(s)) != NULL) {
*uid = pw->pw_uid;
return 0;
}
#if !defined(__linux__) && !defined(__NetBSD__)
*uid = strtonum(s, 0, UID_MAX, &errstr);
#else
sscanf(s, "%d", uid);
#endif
if (errstr)
return -1;
return 0;
} | CWE-754 | 31 |
int ecryptfs_privileged_open(struct file **lower_file,
struct dentry *lower_dentry,
struct vfsmount *lower_mnt,
const struct cred *cred)
{
struct ecryptfs_open_req req;
int flags = O_LARGEFILE;
int rc = 0;
init_completion(&req.done);
req.lower_file = lower_file;
req.path.dentry = lower_dentry;
req.path.mnt = lower_mnt;
/* Corresponding dput() and mntput() are done when the
* lower file is fput() when all eCryptfs files for the inode are
* released. */
flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR;
(*lower_file) = dentry_open(&req.path, flags, cred);
if (!IS_ERR(*lower_file))
goto out;
if ((flags & O_ACCMODE) == O_RDONLY) {
rc = PTR_ERR((*lower_file));
goto out;
}
mutex_lock(&ecryptfs_kthread_ctl.mux);
if (ecryptfs_kthread_ctl.flags & ECRYPTFS_KTHREAD_ZOMBIE) {
rc = -EIO;
mutex_unlock(&ecryptfs_kthread_ctl.mux);
printk(KERN_ERR "%s: We are in the middle of shutting down; "
"aborting privileged request to open lower file\n",
__func__);
goto out;
}
list_add_tail(&req.kthread_ctl_list, &ecryptfs_kthread_ctl.req_list);
mutex_unlock(&ecryptfs_kthread_ctl.mux);
wake_up(&ecryptfs_kthread_ctl.wait);
wait_for_completion(&req.done);
if (IS_ERR(*lower_file))
rc = PTR_ERR(*lower_file);
out:
return rc;
} | CWE-119 | 26 |
void faad_resetbits(bitfile *ld, int bits)
{
uint32_t tmp;
int words = bits >> 5;
int remainder = bits & 0x1F;
ld->bytes_left = ld->buffer_size - words*4;
if (ld->bytes_left >= 4)
{
tmp = getdword(&ld->start[words]);
ld->bytes_left -= 4;
} else {
tmp = getdword_n(&ld->start[words], ld->bytes_left);
ld->bytes_left = 0;
}
ld->bufa = tmp;
if (ld->bytes_left >= 4)
{
tmp = getdword(&ld->start[words+1]);
ld->bytes_left -= 4;
} else {
tmp = getdword_n(&ld->start[words+1], ld->bytes_left);
ld->bytes_left = 0;
}
ld->bufb = tmp;
ld->bits_left = 32 - remainder;
ld->tail = &ld->start[words+2];
/* recheck for reading too many bytes */
ld->error = 0;
// if (ld->bytes_left == 0)
// ld->no_more_reading = 1;
// if (ld->bytes_left < 0)
// ld->error = 1;
} | CWE-119 | 26 |
static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len, int noblock, int flags,
int *addr_len)
{
size_t copied = 0;
int err = -EOPNOTSUPP;
struct sk_buff *skb;
struct sockaddr_ieee802154 *saddr;
saddr = (struct sockaddr_ieee802154 *)msg->msg_name;
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
/* FIXME: skip headers if necessary ?! */
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err)
goto done;
sock_recv_ts_and_drops(msg, sk, skb);
if (saddr) {
saddr->family = AF_IEEE802154;
saddr->addr = mac_cb(skb)->sa;
}
if (addr_len)
*addr_len = sizeof(*saddr);
if (flags & MSG_TRUNC)
copied = skb->len;
done:
skb_free_datagram(sk, skb);
out:
if (err)
return err;
return copied;
} | CWE-20 | 0 |
static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int ret;
int copylen;
ret = -EOPNOTSUPP;
if (m->msg_flags&MSG_OOB)
goto read_error;
m->msg_namelen = 0;
skb = skb_recv_datagram(sk, flags, 0 , &ret);
if (!skb)
goto read_error;
copylen = skb->len;
if (len < copylen) {
m->msg_flags |= MSG_TRUNC;
copylen = len;
}
ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen);
if (ret)
goto out_free;
ret = (flags & MSG_TRUNC) ? skb->len : copylen;
out_free:
skb_free_datagram(sk, skb);
caif_check_flow_release(sk);
return ret;
read_error:
return ret;
} | CWE-20 | 0 |
void IGDstartelt(void * d, const char * name, int l)
{
struct IGDdatas * datas = (struct IGDdatas *)d;
memcpy( datas->cureltname, name, l);
datas->cureltname[l] = '\0';
datas->level++;
if( (l==7) && !memcmp(name, "service", l) ) {
datas->tmp.controlurl[0] = '\0';
datas->tmp.eventsuburl[0] = '\0';
datas->tmp.scpdurl[0] = '\0';
datas->tmp.servicetype[0] = '\0';
}
} | CWE-119 | 26 |
def is_authenticated(self, user, password):
# The content of the file is not cached because reading is generally a
# very cheap operation, and it's useful to get live updates of the
# htpasswd file.
with open(self.filename) as fd:
for line in fd:
line = line.strip()
if line:
login, hash_value = line.split(":")
if login == user:
return self.verify(hash_value, password)
return False | CWE-362 | 18 |
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, "www-authenticate")
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if scheme in challenges:
yield AUTH_SCHEME_CLASSES[scheme](
cred, host, request_uri, headers, response, content, self
) | CWE-400 | 2 |
def login_user(self, username, password, otp, context, **kwargs):
user = authenticate(request=context.get('request'),
username=username, password=password)
if not user:
login_reject.send(sender=self.__class__, username=username, reason='creds')
raise FormattedException(m='incorrect_username_or_password', d={'reason': 'incorrect_username_or_password'},
status_code=HTTP_401_UNAUTHORIZED)
if not user.email_verified and not user.is_superuser:
login_reject.send(sender=self.__class__, username=username, reason='email')
raise FormattedException(m='email_verification_required', d={'reason': 'email_verification_required'},
status_code=HTTP_401_UNAUTHORIZED)
if not user.can_login():
login_reject.send(sender=self.__class__, username=username, reason='closed')
raise FormattedException(m='login_not_open', d={'reason': 'login_not_open'},
status_code=HTTP_401_UNAUTHORIZED)
if user.totp_status == TOTPStatus.ENABLED:
if not otp or otp == '':
login_reject.send(sender=self.__class__, username=username, reason='no_2fa')
raise FormattedException(m='2fa_required', d={'reason': '2fa_required'},
status_code=HTTP_401_UNAUTHORIZED)
totp = pyotp.TOTP(user.totp_secret)
if not totp.verify(otp, valid_window=1):
login_reject.send(sender=self.__class__, username=username, reason='incorrect_2fa')
raise FormattedException(m='incorrect_2fa', d={'reason': 'incorrect_2fa'},
status_code=HTTP_401_UNAUTHORIZED)
login.send(sender=self.__class__, user=user)
return user | CWE-287 | 4 |
def should_deny_admin(self):
return self.totp_status != TOTPStatus.ENABLED and config.get(
"enable_force_admin_2fa"
) | CWE-287 | 4 |
def test_magic_response_http_error():
mw = _get_mw()
req = SplashRequest('http://example.com/foo')
req = mw.process_request(req, None)
resp_data = {
"info": {
"error": "http404",
"message": "Lua error: [string \"function main(splash)\r...\"]:3: http404",
"line_number": 3,
"type": "LUA_ERROR",
"source": "[string \"function main(splash)\r...\"]"
},
"description": "Error happened while executing Lua script",
"error": 400,
"type": "ScriptError"
}
resp = TextResponse("http://mysplash.example.com/execute", status=400,
headers={b'Content-Type': b'application/json'},
body=json.dumps(resp_data).encode('utf8'))
resp = mw.process_response(req, resp, None)
assert resp.data == resp_data
assert resp.status == 404
assert resp.splash_response_status == 400
assert resp.url == "http://example.com/foo" | CWE-200 | 10 |
def test_basic():
# Test Basic Authentication
http = httplib2.Http()
password = tests.gen_password()
handler = tests.http_reflect_with_auth(
allow_scheme="basic", allow_credentials=(("joe", password),)
)
with tests.server_request(handler, request_count=3) as uri:
response, content = http.request(uri, "GET")
assert response.status == 401
http.add_credentials("joe", password)
response, content = http.request(uri, "GET")
assert response.status == 200 | CWE-400 | 2 |
def main(req: func.HttpRequest) -> func.HttpResponse:
response = ok(
Info(
resource_group=get_base_resource_group(),
region=get_base_region(),
subscription=get_subscription(),
versions=versions(),
instance_id=get_instance_id(),
insights_appid=get_insights_appid(),
insights_instrumentation_key=get_insights_instrumentation_key(),
)
)
return response | CWE-285 | 23 |
def __call__(self, value, system):
if 'request' in system:
request = system['request']
mime, encoding = mimetypes.guess_type(value['filename'])
request.response_content_type = mime
if encoding:
request.response_encoding = encoding
f = os.path.join(self.repository_root,
value['filename'][0].lower(),
value['filename'])
if not os.path.exists(f):
dir_ = os.path.join(self.repository_root,
value['filename'][0].lower())
if not os.path.exists(dir_):
os.makedirs(dir_, 0750)
resp = requests.get(value['url'])
with open(f, 'wb') as rf:
rf.write(resp.content)
return resp.content
else:
data = ''
with open(f, 'rb') as rf:
data = ''
while True:
content = rf.read(2<<16)
if not content:
break
data += content
return data | CWE-20 | 0 |
def parse_http_message(kind, buf):
if buf._end:
return None
try:
start_line = buf.readline()
except EOFError:
return None
msg = kind()
msg.raw = start_line
if kind is HttpRequest:
assert re.match(
br".+ HTTP/\d\.\d\r\n$", start_line
), "Start line does not look like HTTP request: " + repr(start_line)
msg.method, msg.uri, msg.proto = start_line.rstrip().decode().split(" ", 2)
assert msg.proto.startswith("HTTP/"), repr(start_line)
elif kind is HttpResponse:
assert re.match(
br"^HTTP/\d\.\d \d+ .+\r\n$", start_line
), "Start line does not look like HTTP response: " + repr(start_line)
msg.proto, msg.status, msg.reason = start_line.rstrip().decode().split(" ", 2)
msg.status = int(msg.status)
assert msg.proto.startswith("HTTP/"), repr(start_line)
else:
raise Exception("Use HttpRequest or HttpResponse .from_{bytes,buffered}")
msg.version = msg.proto[5:]
while True:
line = buf.readline()
msg.raw += line
line = line.rstrip()
if not line:
break
t = line.decode().split(":", 1)
msg.headers[t[0].lower()] = t[1].lstrip()
content_length_string = msg.headers.get("content-length", "")
if content_length_string.isdigit():
content_length = int(content_length_string)
msg.body = msg.body_raw = buf.read(content_length)
elif msg.headers.get("transfer-encoding") == "chunked":
raise NotImplemented
elif msg.version == "1.0":
msg.body = msg.body_raw = buf.readall()
else:
msg.body = msg.body_raw = b""
msg.raw += msg.body_raw
return msg | CWE-400 | 2 |
def main() -> None:
formatter = argparse.ArgumentDefaultsHelpFormatter
parser = argparse.ArgumentParser(formatter_class=formatter)
parser.add_argument("resource_group")
parser.add_argument("storage_account")
parser.add_argument("admins", type=UUID, nargs="*")
args = parser.parse_args()
client = get_client_from_cli_profile(StorageManagementClient)
storage_keys = client.storage_accounts.list_keys(
args.resource_group, args.storage_account
)
table_service = TableService(
account_name=args.storage_account, account_key=storage_keys.keys[0].value
)
update_admins(table_service, args.resource_group, args.admins) | CWE-346 | 16 |
def _ensureSnapshotsFolder(self, subdir=None):
""" Ensure that the appropriate snapshot folder exists.
"""
path = ['snapshots', self._snapshot_id]
if subdir is not None:
path.extend(subdir.split('/'))
current = self._tool
for element in path:
if element not in current.objectIds():
# No Unicode IDs!
current._setObject(str(element), Folder(element))
current = current._getOb(element)
return current | CWE-200 | 10 |
def resolve_orders(root: models.User, info, **kwargs):
from ..order.types import OrderCountableConnection
def _resolve_orders(orders):
requester = get_user_or_app_from_context(info.context)
if not requester.has_perm(OrderPermissions.MANAGE_ORDERS):
orders = list(
filter(lambda order: order.status != OrderStatus.DRAFT, orders)
)
return create_connection_slice(
orders, info, kwargs, OrderCountableConnection
)
return OrdersByUserLoader(info.context).load(root.id).then(_resolve_orders) | CWE-863 | 11 |
def test_modify_config(self) -> None:
user1 = uuid4()
user2 = uuid4()
# no admins set
self.assertTrue(can_modify_config_impl(InstanceConfig(), UserInfo()))
# with oid, but no admin
self.assertTrue(
can_modify_config_impl(InstanceConfig(), UserInfo(object_id=user1))
)
# is admin
self.assertTrue(
can_modify_config_impl(
InstanceConfig(admins=[user1]), UserInfo(object_id=user1)
)
)
# no user oid set
self.assertFalse(
can_modify_config_impl(InstanceConfig(admins=[user1]), UserInfo())
)
# not an admin
self.assertFalse(
can_modify_config_impl(
InstanceConfig(admins=[user1]), UserInfo(object_id=user2)
)
) | CWE-285 | 23 |
def sync_tree(self):
LOGGER.info("sync tree to host")
self.transfer_inst.tree.remote(self.tree_,
role=consts.HOST,
idx=-1)
"""
federation.remote(obj=self.tree_,
name=self.transfer_inst.tree.name,
tag=self.transfer_inst.generate_transferid(self.transfer_inst.tree),
role=consts.HOST,
idx=-1)
""" | CWE-668 | 7 |
def test_with_admins(self) -> None:
no_admins = InstanceConfig(admins=None)
with_admins = InstanceConfig(admins=[UUID(int=0)])
with_admins_2 = InstanceConfig(admins=[UUID(int=1)])
no_admins.update(with_admins)
self.assertEqual(no_admins.admins, None)
with_admins.update(with_admins_2)
self.assertEqual(with_admins.admins, with_admins_2.admins) | CWE-346 | 16 |
def test_parse_www_authenticate_malformed():
# TODO: test (and fix) header value 'barbqwnbm-bb...:asd' leads to dead loop
with tests.assert_raises(httplib2.MalformedHeader):
httplib2._parse_www_authenticate(
{
"www-authenticate": 'OAuth "Facebook Platform" "invalid_token" "Invalid OAuth access token."'
}
) | CWE-400 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.