code
stringlengths 12
2.05k
| label_name
stringclasses 5
values | label
int64 0
4
|
---|---|---|
int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
unsigned char multicast_spec, u8 protocol_version)
{
struct hsr_priv *hsr;
struct hsr_port *port;
int res;
hsr = netdev_priv(hsr_dev);
INIT_LIST_HEAD(&hsr->ports);
INIT_LIST_HEAD(&hsr->node_db);
INIT_LIST_HEAD(&hsr->self_node_db);
ether_addr_copy(hsr_dev->dev_addr, slave[0]->dev_addr);
/* Make sure we recognize frames from ourselves in hsr_rcv() */
res = hsr_create_self_node(&hsr->self_node_db, hsr_dev->dev_addr,
slave[1]->dev_addr);
if (res < 0)
return res;
spin_lock_init(&hsr->seqnr_lock);
/* Overflow soon to find bugs easier: */
hsr->sequence_nr = HSR_SEQNR_START;
hsr->sup_sequence_nr = HSR_SUP_SEQNR_START;
timer_setup(&hsr->announce_timer, hsr_announce, 0);
timer_setup(&hsr->prune_timer, hsr_prune_nodes, 0);
ether_addr_copy(hsr->sup_multicast_addr, def_multicast_addr);
hsr->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
hsr->protVersion = protocol_version;
/* FIXME: should I modify the value of these?
*
* - hsr_dev->flags - i.e.
* IFF_MASTER/SLAVE?
* - hsr_dev->priv_flags - i.e.
* IFF_EBRIDGE?
* IFF_TX_SKB_SHARING?
* IFF_HSR_MASTER/SLAVE?
*/
/* Make sure the 1st call to netif_carrier_on() gets through */
netif_carrier_off(hsr_dev);
res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
if (res)
return res;
res = register_netdevice(hsr_dev);
if (res)
goto fail;
res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A);
if (res)
goto fail;
res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B);
if (res)
goto fail;
mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD));
return 0;
fail:
hsr_for_each_port(hsr, port)
hsr_del_port(port);
return res;
} | Variant | 0 |
NOEXPORT int init_section(int eof, SERVICE_OPTIONS **section_ptr) {
char *errstr;
#ifndef USE_WIN32
(*section_ptr)->option.log_stderr=new_global_options.option.log_stderr;
#endif /* USE_WIN32 */
if(*section_ptr==&new_service_options) {
/* end of global options or inetd mode -> initialize globals */
errstr=parse_global_option(CMD_INITIALIZE, NULL, NULL);
if(errstr) {
s_log(LOG_ERR, "Global options: %s", errstr);
return 1;
}
}
if(*section_ptr!=&new_service_options || eof) {
/* end service section or inetd mode -> initialize service */
if(*section_ptr==&new_service_options)
s_log(LOG_INFO, "Initializing inetd mode configuration");
else
s_log(LOG_INFO, "Initializing service [%s]",
(*section_ptr)->servname);
errstr=parse_service_option(CMD_INITIALIZE, section_ptr, NULL, NULL);
if(errstr) {
if(*section_ptr==&new_service_options)
s_log(LOG_ERR, "Inetd mode: %s", errstr);
else
s_log(LOG_ERR, "Service [%s]: %s",
(*section_ptr)->servname, errstr);
return 1;
}
}
return 0;
} | Base | 1 |
snmp_ber_decode_string_len_buffer(unsigned char *buf, uint32_t *buff_len, const char **str, uint32_t *length)
{
uint8_t type, i, length_bytes;
buf = snmp_ber_decode_type(buf, buff_len, &type);
if(buf == NULL || type != BER_DATA_TYPE_OCTET_STRING) {
/*
* Sanity check
* Invalid type in buffer
*/
return NULL;
}
if((*buf & 0x80) == 0) {
*length = (uint32_t)*buf++;
(*buff_len)--;
} else {
length_bytes = (uint8_t)(*buf++ & 0x7F);
(*buff_len)--;
if(length_bytes > 4) {
/*
* Sanity check
* It will not fit in the uint32_t
*/
return NULL;
}
*length = (uint32_t)*buf++;
(*buff_len)--;
for(i = 1; i < length_bytes; ++i) {
*length <<= 8;
*length |= *buf++;
(*buff_len)--;
}
}
*str = (const char *)buf;
*buff_len -= *length;
return buf + *length;
} | Base | 1 |
int main(int argc, char *argv[])
{
int ret;
struct lxc_lock *lock;
lock = lxc_newlock(NULL, NULL);
if (!lock) {
fprintf(stderr, "%d: failed to get unnamed lock\n", __LINE__);
exit(1);
}
ret = lxclock(lock, 0);
if (ret) {
fprintf(stderr, "%d: failed to take unnamed lock (%d)\n", __LINE__, ret);
exit(1);
}
ret = lxcunlock(lock);
if (ret) {
fprintf(stderr, "%d: failed to put unnamed lock (%d)\n", __LINE__, ret);
exit(1);
}
lxc_putlock(lock);
lock = lxc_newlock("/var/lib/lxc", mycontainername);
if (!lock) {
fprintf(stderr, "%d: failed to get lock\n", __LINE__);
exit(1);
}
struct stat sb;
char *pathname = RUNTIME_PATH "/lock/lxc/var/lib/lxc/";
ret = stat(pathname, &sb);
if (ret != 0) {
fprintf(stderr, "%d: filename %s not created\n", __LINE__,
pathname);
exit(1);
}
lxc_putlock(lock);
test_two_locks();
fprintf(stderr, "all tests passed\n");
exit(ret);
} | Base | 1 |
pci_lintr_release(struct pci_vdev *dev)
{
struct businfo *bi;
struct slotinfo *si;
int pin;
bi = pci_businfo[dev->bus];
assert(bi != NULL);
si = &bi->slotinfo[dev->slot];
for (pin = 1; pin < 4; pin++) {
si->si_intpins[pin].ii_count = 0;
si->si_intpins[pin].ii_pirq_pin = 0;
si->si_intpins[pin].ii_ioapic_irq = 0;
}
} | Base | 1 |
static bool load_buffer(RBinFile *bf, void **bin_obj, RBuffer *buf, ut64 loadaddr, Sdb *sdb) {
RBuffer *fbuf = r_buf_ref (buf);
struct MACH0_(opts_t) opts;
MACH0_(opts_set_default) (&opts, bf);
struct MACH0_(obj_t) *main_mach0 = MACH0_(new_buf) (fbuf, &opts);
if (!main_mach0) {
return false;
}
RRebaseInfo *rebase_info = r_rebase_info_new_from_mach0 (fbuf, main_mach0);
RKernelCacheObj *obj = NULL;
RPrelinkRange *prelink_range = get_prelink_info_range_from_mach0 (main_mach0);
if (!prelink_range) {
goto beach;
}
obj = R_NEW0 (RKernelCacheObj);
if (!obj) {
R_FREE (prelink_range);
goto beach;
}
RCFValueDict *prelink_info = NULL;
if (main_mach0->hdr.filetype != MH_FILESET && prelink_range->range.size) {
prelink_info = r_cf_value_dict_parse (fbuf, prelink_range->range.offset,
prelink_range->range.size, R_CF_OPTION_SKIP_NSDATA);
if (!prelink_info) {
R_FREE (prelink_range);
R_FREE (obj);
goto beach;
}
}
if (!pending_bin_files) {
pending_bin_files = r_list_new ();
if (!pending_bin_files) {
R_FREE (prelink_range);
R_FREE (obj);
R_FREE (prelink_info);
goto beach;
}
}
obj->mach0 = main_mach0;
obj->rebase_info = rebase_info;
obj->prelink_info = prelink_info;
obj->cache_buf = fbuf;
obj->pa2va_exec = prelink_range->pa2va_exec;
obj->pa2va_data = prelink_range->pa2va_data;
R_FREE (prelink_range);
*bin_obj = obj;
r_list_push (pending_bin_files, bf);
if (rebase_info || main_mach0->chained_starts) {
RIO *io = bf->rbin->iob.io;
swizzle_io_read (obj, io);
}
return true;
beach:
r_buf_free (fbuf);
obj->cache_buf = NULL;
MACH0_(mach0_free) (main_mach0);
return false;
} | Base | 1 |
static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int ret;
int copylen;
ret = -EOPNOTSUPP;
if (m->msg_flags&MSG_OOB)
goto read_error;
m->msg_namelen = 0;
skb = skb_recv_datagram(sk, flags, 0 , &ret);
if (!skb)
goto read_error;
copylen = skb->len;
if (len < copylen) {
m->msg_flags |= MSG_TRUNC;
copylen = len;
}
ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen);
if (ret)
goto out_free;
ret = (flags & MSG_TRUNC) ? skb->len : copylen;
out_free:
skb_free_datagram(sk, skb);
caif_check_flow_release(sk);
return ret;
read_error:
return ret;
} | Class | 2 |
static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len, int noblock, int flags, int *addr_len)
{
struct inet_sock *inet = inet_sk(sk);
size_t copied = 0;
int err = -EOPNOTSUPP;
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
struct sk_buff *skb;
if (flags & MSG_OOB)
goto out;
if (addr_len)
*addr_len = sizeof(*sin);
if (flags & MSG_ERRQUEUE) {
err = ip_recv_error(sk, msg, len);
goto out;
}
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err)
goto done;
sock_recv_ts_and_drops(msg, sk, skb);
/* Copy the address. */
if (sin) {
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
sin->sin_port = 0;
memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
}
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
if (flags & MSG_TRUNC)
copied = skb->len;
done:
skb_free_datagram(sk, skb);
out:
if (err)
return err;
return copied;
} | Class | 2 |
static int rngapi_reset(struct crypto_rng *tfm, const u8 *seed,
unsigned int slen)
{
u8 *buf = NULL;
u8 *src = (u8 *)seed;
int err;
if (slen) {
buf = kmalloc(slen, GFP_KERNEL);
if (!buf)
return -ENOMEM;
memcpy(buf, seed, slen);
src = buf;
}
err = crypto_old_rng_alg(tfm)->rng_reset(tfm, src, slen);
kzfree(buf);
return err;
} | Base | 1 |
static const char *parse_number( cJSON *item, const char *num )
{
int64_t i = 0;
double f = 0;
int isint = 1;
int sign = 1, scale = 0, subscale = 0, signsubscale = 1;
/* Could use sscanf for this? */
if ( *num == '-' ) {
/* Has sign. */
sign = -1;
++num;
}
if ( *num == '0' )
/* Is zero. */
++num;
if ( *num >= '1' && *num<='9' ) {
/* Number. */
do {
i = ( i * 10 ) + ( *num - '0' );
f = ( f * 10.0 ) + ( *num - '0' );
++num;
} while ( *num >= '0' && *num <= '9' );
}
if ( *num == '.' && num[1] >= '0' && num[1] <= '9' ) {
/* Fractional part. */
isint = 0;
++num;
do {
f = ( f * 10.0 ) + ( *num++ - '0' );
scale--;
} while ( *num >= '0' && *num <= '9' );
}
if ( *num == 'e' || *num == 'E' ) {
/* Exponent. */
isint = 0;
++num;
if ( *num == '+' )
++num;
else if ( *num == '-' ) {
/* With sign. */
signsubscale = -1;
++num;
}
while ( *num >= '0' && *num <= '9' )
subscale = ( subscale * 10 ) + ( *num++ - '0' );
}
/* Put it together. */
if ( isint ) {
/* Int: number = +/- number */
i = sign * i;
item->valueint = i;
item->valuefloat = i;
} else {
/* Float: number = +/- number.fraction * 10^+/- exponent */
f = sign * f * ipow( 10.0, scale + subscale * signsubscale );
item->valueint = f;
item->valuefloat = f;
}
item->type = cJSON_Number;
return num;
} | Base | 1 |
static int apparmor_setprocattr(struct task_struct *task, char *name,
void *value, size_t size)
{
char *command, *args = value;
size_t arg_size;
int error;
if (size == 0)
return -EINVAL;
/* args points to a PAGE_SIZE buffer, AppArmor requires that
* the buffer must be null terminated or have size <= PAGE_SIZE -1
* so that AppArmor can null terminate them
*/
if (args[size - 1] != '\0') {
if (size == PAGE_SIZE)
return -EINVAL;
args[size] = '\0';
}
/* task can only write its own attributes */
if (current != task)
return -EACCES;
args = value;
args = strim(args);
command = strsep(&args, " ");
if (!args)
return -EINVAL;
args = skip_spaces(args);
if (!*args)
return -EINVAL;
arg_size = size - (args - (char *) value);
if (strcmp(name, "current") == 0) {
if (strcmp(command, "changehat") == 0) {
error = aa_setprocattr_changehat(args, arg_size,
!AA_DO_TEST);
} else if (strcmp(command, "permhat") == 0) {
error = aa_setprocattr_changehat(args, arg_size,
AA_DO_TEST);
} else if (strcmp(command, "changeprofile") == 0) {
error = aa_setprocattr_changeprofile(args, !AA_ONEXEC,
!AA_DO_TEST);
} else if (strcmp(command, "permprofile") == 0) {
error = aa_setprocattr_changeprofile(args, !AA_ONEXEC,
AA_DO_TEST);
} else if (strcmp(command, "permipc") == 0) {
error = aa_setprocattr_permipc(args);
} else {
struct common_audit_data sa;
COMMON_AUDIT_DATA_INIT(&sa, NONE);
sa.aad.op = OP_SETPROCATTR;
sa.aad.info = name;
sa.aad.error = -EINVAL;
return aa_audit(AUDIT_APPARMOR_DENIED, NULL, GFP_KERNEL,
&sa, NULL);
}
} else if (strcmp(name, "exec") == 0) {
error = aa_setprocattr_changeprofile(args, AA_ONEXEC,
!AA_DO_TEST);
} else {
/* only support the "current" and "exec" process attributes */
return -EINVAL;
}
if (!error)
error = size;
return error;
} | Class | 2 |
init_connection_options(MYSQL *mysql)
{
#if defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY)
if (opt_use_ssl)
{
mysql_ssl_set(mysql, opt_ssl_key, opt_ssl_cert, opt_ssl_ca,
opt_ssl_capath, opt_ssl_cipher);
mysql_options(mysql, MYSQL_OPT_SSL_CRL, opt_ssl_crl);
mysql_options(mysql, MYSQL_OPT_SSL_CRLPATH, opt_ssl_crlpath);
}
mysql_options(mysql, MYSQL_OPT_SSL_VERIFY_SERVER_CERT,
(char*) &opt_ssl_verify_server_cert);
#endif
if (opt_protocol)
mysql_options(mysql, MYSQL_OPT_PROTOCOL, (char*) &opt_protocol);
#ifdef HAVE_SMEM
if (shared_memory_base_name)
mysql_options(mysql, MYSQL_SHARED_MEMORY_BASE_NAME, shared_memory_base_name);
#endif
} | Base | 1 |
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned cpu = smp_processor_id();
if (likely(prev != next)) {
#ifdef CONFIG_SMP
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
this_cpu_write(cpu_tlbstate.active_mm, next);
#endif
cpumask_set_cpu(cpu, mm_cpumask(next));
/* Re-load page tables */
load_cr3(next->pgd);
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
/* Stop flush ipis for the previous mm */
cpumask_clear_cpu(cpu, mm_cpumask(prev));
/* Load per-mm CR4 state */
load_mm_cr4(next);
#ifdef CONFIG_MODIFY_LDT_SYSCALL
/*
* Load the LDT, if the LDT is different.
*
* It's possible that prev->context.ldt doesn't match
* the LDT register. This can happen if leave_mm(prev)
* was called and then modify_ldt changed
* prev->context.ldt but suppressed an IPI to this CPU.
* In this case, prev->context.ldt != NULL, because we
* never set context.ldt to NULL while the mm still
* exists. That means that next->context.ldt !=
* prev->context.ldt, because mms never share an LDT.
*/
if (unlikely(prev->context.ldt != next->context.ldt))
load_mm_ldt(next);
#endif
}
#ifdef CONFIG_SMP
else {
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
/*
* On established mms, the mm_cpumask is only changed
* from irq context, from ptep_clear_flush() while in
* lazy tlb mode, and here. Irqs are blocked during
* schedule, protecting us from simultaneous changes.
*/
cpumask_set_cpu(cpu, mm_cpumask(next));
/*
* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
*/
load_cr3(next->pgd);
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
load_mm_cr4(next);
load_mm_ldt(next);
}
}
#endif
} | Class | 2 |
ast_for_funcdef_impl(struct compiling *c, const node *n,
asdl_seq *decorator_seq, int is_async)
{
/* funcdef: 'def' NAME parameters ['->' test] ':' [TYPE_COMMENT] suite */
identifier name;
arguments_ty args;
asdl_seq *body;
expr_ty returns = NULL;
int name_i = 1;
node *tc;
string type_comment = NULL;
if (is_async && c->c_feature_version < 5) {
ast_error(c, n,
"Async functions are only supported in Python 3.5 and greater");
return NULL;
}
REQ(n, funcdef);
name = NEW_IDENTIFIER(CHILD(n, name_i));
if (!name)
return NULL;
if (forbidden_name(c, name, CHILD(n, name_i), 0))
return NULL;
args = ast_for_arguments(c, CHILD(n, name_i + 1));
if (!args)
return NULL;
if (TYPE(CHILD(n, name_i+2)) == RARROW) {
returns = ast_for_expr(c, CHILD(n, name_i + 3));
if (!returns)
return NULL;
name_i += 2;
}
if (TYPE(CHILD(n, name_i + 3)) == TYPE_COMMENT) {
type_comment = NEW_TYPE_COMMENT(CHILD(n, name_i + 3));
name_i += 1;
}
body = ast_for_suite(c, CHILD(n, name_i + 3));
if (!body)
return NULL;
if (!type_comment && NCH(CHILD(n, name_i + 3)) > 1) {
/* If the function doesn't have a type comment on the same line, check
* if the suite has a type comment in it. */
tc = CHILD(CHILD(n, name_i + 3), 1);
if (TYPE(tc) == TYPE_COMMENT)
type_comment = NEW_TYPE_COMMENT(tc);
}
if (is_async)
return AsyncFunctionDef(name, args, body, decorator_seq, returns,
type_comment, LINENO(n),
n->n_col_offset, c->c_arena);
else
return FunctionDef(name, args, body, decorator_seq, returns,
type_comment, LINENO(n),
n->n_col_offset, c->c_arena);
} | Base | 1 |
static bool load_buffer(RBinFile *bf, void **bin_obj, RBuffer *buf, ut64 loadaddr, Sdb *sdb) {
RDyldCache *cache = R_NEW0 (RDyldCache);
memcpy (cache->magic, "dyldcac", 7);
cache->buf = r_buf_ref (buf);
populate_cache_headers (cache);
if (!cache->hdr) {
r_dyldcache_free (cache);
return false;
}
populate_cache_maps (cache);
if (!cache->maps) {
r_dyldcache_free (cache);
return false;
}
cache->accel = read_cache_accel (cache->buf, cache->hdr, cache->maps);
cache->bins = create_cache_bins (bf, cache);
if (!cache->bins) {
r_dyldcache_free (cache);
return false;
}
cache->locsym = r_dyld_locsym_new (cache);
cache->rebase_infos = get_rebase_infos (bf, cache);
if (cache->rebase_infos) {
if (!rebase_infos_get_slide (cache)) {
if (!pending_bin_files) {
pending_bin_files = r_list_new ();
if (!pending_bin_files) {
r_dyldcache_free (cache);
return false;
}
}
r_list_push (pending_bin_files, bf);
swizzle_io_read (cache, bf->rbin->iob.io);
}
}
*bin_obj = cache;
return true;
} | Base | 1 |
static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
u64 nr, int nmi,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
struct perf_event *event;
struct hlist_node *node;
struct hlist_head *head;
rcu_read_lock();
head = find_swevent_head_rcu(swhash, type, event_id);
if (!head)
goto end;
hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
if (perf_swevent_match(event, type, event_id, data, regs))
perf_swevent_event(event, nr, nmi, data, regs);
}
end:
rcu_read_unlock();
} | Class | 2 |
static int muscle_list_files(sc_card_t *card, u8 *buf, size_t bufLen)
{
muscle_private_t* priv = MUSCLE_DATA(card);
mscfs_t *fs = priv->fs;
int x;
int count = 0;
mscfs_check_cache(priv->fs);
for(x = 0; x < fs->cache.size; x++) {
u8* oid= fs->cache.array[x].objectId.id;
sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL,
"FILE: %02X%02X%02X%02X\n",
oid[0],oid[1],oid[2],oid[3]);
if(0 == memcmp(fs->currentPath, oid, 2)) {
buf[0] = oid[2];
buf[1] = oid[3];
if(buf[0] == 0x00 && buf[1] == 0x00) continue; /* No directories/null names outside of root */
buf += 2;
count+=2;
}
}
return count;
} | Class | 2 |
static int cac_get_serial_nr_from_CUID(sc_card_t* card, sc_serial_number_t* serial)
{
cac_private_data_t * priv = CAC_DATA(card);
SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_NORMAL);
if (card->serialnr.len) {
*serial = card->serialnr;
SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_SUCCESS);
}
if (priv->cac_id_len) {
serial->len = MIN(priv->cac_id_len, SC_MAX_SERIALNR);
memcpy(serial->value, priv->cac_id, priv->cac_id_len);
SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_SUCCESS);
}
SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_FILE_NOT_FOUND);
} | Class | 2 |
static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
size_t count, loff_t *offp)
{
struct f_hidg *hidg = file->private_data;
struct usb_request *req;
unsigned long flags;
ssize_t status = -ENOMEM;
if (!access_ok(buffer, count))
return -EFAULT;
spin_lock_irqsave(&hidg->write_spinlock, flags);
#define WRITE_COND (!hidg->write_pending)
try_again:
/* write queue */
while (!WRITE_COND) {
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
if (wait_event_interruptible_exclusive(
hidg->write_queue, WRITE_COND))
return -ERESTARTSYS;
spin_lock_irqsave(&hidg->write_spinlock, flags);
}
hidg->write_pending = 1;
req = hidg->req;
count = min_t(unsigned, count, hidg->report_length);
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
status = copy_from_user(req->buf, buffer, count);
if (status != 0) {
ERROR(hidg->func.config->cdev,
"copy_from_user error\n");
status = -EINVAL;
goto release_write_pending;
}
spin_lock_irqsave(&hidg->write_spinlock, flags);
/* when our function has been disabled by host */
if (!hidg->req) {
free_ep_req(hidg->in_ep, req);
/*
* TODO
* Should we fail with error here?
*/
goto try_again;
}
req->status = 0;
req->zero = 0;
req->length = count;
req->complete = f_hidg_req_complete;
req->context = hidg;
status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
if (status < 0) {
ERROR(hidg->func.config->cdev,
"usb_ep_queue error on int endpoint %zd\n", status);
goto release_write_pending_unlocked;
} else {
status = count;
}
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
return status;
release_write_pending:
spin_lock_irqsave(&hidg->write_spinlock, flags);
release_write_pending_unlocked:
hidg->write_pending = 0;
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
wake_up(&hidg->write_queue);
return status;
} | Class | 2 |
static int exists_not_none(PyObject *obj, _Py_Identifier *id)
{
int isnone;
PyObject *attr = _PyObject_GetAttrId(obj, id);
if (!attr) {
PyErr_Clear();
return 0;
}
isnone = attr == Py_None;
Py_DECREF(attr);
return !isnone;
} | Base | 1 |
process_bitmap_updates(STREAM s)
{
uint16 num_updates;
uint16 left, top, right, bottom, width, height;
uint16 cx, cy, bpp, Bpp, compress, bufsize, size;
uint8 *data, *bmpdata;
int i;
logger(Protocol, Debug, "%s()", __func__);
in_uint16_le(s, num_updates);
for (i = 0; i < num_updates; i++)
{
in_uint16_le(s, left);
in_uint16_le(s, top);
in_uint16_le(s, right);
in_uint16_le(s, bottom);
in_uint16_le(s, width);
in_uint16_le(s, height);
in_uint16_le(s, bpp);
Bpp = (bpp + 7) / 8;
in_uint16_le(s, compress);
in_uint16_le(s, bufsize);
cx = right - left + 1;
cy = bottom - top + 1;
logger(Graphics, Debug,
"process_bitmap_updates(), [%d,%d,%d,%d], [%d,%d], bpp=%d, compression=%d",
left, top, right, bottom, width, height, Bpp, compress);
if (!compress)
{
int y;
bmpdata = (uint8 *) xmalloc(width * height * Bpp);
for (y = 0; y < height; y++)
{
in_uint8a(s, &bmpdata[(height - y - 1) * (width * Bpp)],
width * Bpp);
}
ui_paint_bitmap(left, top, cx, cy, width, height, bmpdata);
xfree(bmpdata);
continue;
}
if (compress & 0x400)
{
size = bufsize;
}
else
{
in_uint8s(s, 2); /* pad */
in_uint16_le(s, size);
in_uint8s(s, 4); /* line_size, final_size */
}
in_uint8p(s, data, size);
bmpdata = (uint8 *) xmalloc(width * height * Bpp);
if (bitmap_decompress(bmpdata, width, height, data, size, Bpp))
{
ui_paint_bitmap(left, top, cx, cy, width, height, bmpdata);
}
else
{
logger(Graphics, Warning,
"process_bitmap_updates(), failed to decompress bitmap");
}
xfree(bmpdata);
}
} | Base | 1 |
static ScreenCell *realloc_buffer(VTermScreen *screen, ScreenCell *buffer, int new_rows, int new_cols)
{
ScreenCell *new_buffer = vterm_allocator_malloc(screen->vt, sizeof(ScreenCell) * new_rows * new_cols);
int row, col;
for(row = 0; row < new_rows; row++) {
for(col = 0; col < new_cols; col++) {
ScreenCell *new_cell = new_buffer + row*new_cols + col;
if(buffer && row < screen->rows && col < screen->cols)
*new_cell = buffer[row * screen->cols + col];
else {
new_cell->chars[0] = 0;
new_cell->pen = screen->pen;
}
}
}
if(buffer)
vterm_allocator_free(screen->vt, buffer);
return new_buffer;
} | Base | 1 |
static void rds_tcp_kill_sock(struct net *net)
{
struct rds_tcp_connection *tc, *_tc;
LIST_HEAD(tmp_list);
struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
struct socket *lsock = rtn->rds_tcp_listen_sock;
rtn->rds_tcp_listen_sock = NULL;
rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
spin_lock_irq(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
if (net != c_net || !tc->t_sock)
continue;
if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
list_move_tail(&tc->t_tcp_node, &tmp_list);
} else {
list_del(&tc->t_tcp_node);
tc->t_tcp_node_detached = true;
}
}
spin_unlock_irq(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
rds_conn_destroy(tc->t_cpath->cp_conn);
} | Class | 2 |
static int rd_build_device_space(struct rd_dev *rd_dev)
{
u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
sizeof(struct scatterlist));
struct rd_dev_sg_table *sg_table;
struct page *pg;
struct scatterlist *sg;
if (rd_dev->rd_page_count <= 0) {
pr_err("Illegal page count: %u for Ramdisk device\n",
rd_dev->rd_page_count);
return -EINVAL;
}
/* Don't need backing pages for NULLIO */
if (rd_dev->rd_flags & RDF_NULLIO)
return 0;
total_sg_needed = rd_dev->rd_page_count;
sg_tables = (total_sg_needed / max_sg_per_table) + 1;
sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
if (!sg_table) {
pr_err("Unable to allocate memory for Ramdisk"
" scatterlist tables\n");
return -ENOMEM;
}
rd_dev->sg_table_array = sg_table;
rd_dev->sg_table_count = sg_tables;
while (total_sg_needed) {
sg_per_table = (total_sg_needed > max_sg_per_table) ?
max_sg_per_table : total_sg_needed;
sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
GFP_KERNEL);
if (!sg) {
pr_err("Unable to allocate scatterlist array"
" for struct rd_dev\n");
return -ENOMEM;
}
sg_init_table(sg, sg_per_table);
sg_table[i].sg_table = sg;
sg_table[i].rd_sg_count = sg_per_table;
sg_table[i].page_start_offset = page_offset;
sg_table[i++].page_end_offset = (page_offset + sg_per_table)
- 1;
for (j = 0; j < sg_per_table; j++) {
pg = alloc_pages(GFP_KERNEL, 0);
if (!pg) {
pr_err("Unable to allocate scatterlist"
" pages for struct rd_dev_sg_table\n");
return -ENOMEM;
}
sg_assign_page(&sg[j], pg);
sg[j].length = PAGE_SIZE;
}
page_offset += sg_per_table;
total_sg_needed -= sg_per_table;
}
pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
rd_dev->rd_dev_id, rd_dev->rd_page_count,
rd_dev->sg_table_count);
return 0;
} | Class | 2 |
int ssl_init(void) { /* init TLS before parsing configuration file */
#if OPENSSL_VERSION_NUMBER>=0x10100000L
OPENSSL_init_ssl(OPENSSL_INIT_LOAD_SSL_STRINGS |
OPENSSL_INIT_LOAD_CRYPTO_STRINGS | OPENSSL_INIT_LOAD_CONFIG, NULL);
#else
OPENSSL_config(NULL);
SSL_load_error_strings();
SSL_library_init();
#endif
index_ssl_cli=SSL_get_ex_new_index(0,
"CLI pointer", NULL, NULL, NULL);
index_ssl_ctx_opt=SSL_CTX_get_ex_new_index(0,
"SERVICE_OPTIONS pointer", NULL, NULL, NULL);
index_session_authenticated=SSL_SESSION_get_ex_new_index(0,
"session authenticated", NULL, NULL, NULL);
index_session_connect_address=SSL_SESSION_get_ex_new_index(0,
"session connect address", NULL, cb_dup_addr, cb_free_addr);
if(index_ssl_cli<0 || index_ssl_ctx_opt<0 ||
index_session_authenticated<0 ||
index_session_connect_address<0) {
s_log(LOG_ERR, "Application specific data initialization failed");
return 1;
}
#ifndef OPENSSL_NO_ENGINE
ENGINE_load_builtin_engines();
#endif
#ifndef OPENSSL_NO_DH
dh_params=get_dh2048();
if(!dh_params) {
s_log(LOG_ERR, "Failed to get default DH parameters");
return 1;
}
#endif /* OPENSSL_NO_DH */
return 0;
} | Base | 1 |
static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
struct rf_tech_specific_params_nfca_poll *nfca_poll,
__u8 *data)
{
nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
data += 2;
nfca_poll->nfcid1_len = *data++;
pr_debug("sens_res 0x%x, nfcid1_len %d\n",
nfca_poll->sens_res, nfca_poll->nfcid1_len);
memcpy(nfca_poll->nfcid1, data, nfca_poll->nfcid1_len);
data += nfca_poll->nfcid1_len;
nfca_poll->sel_res_len = *data++;
if (nfca_poll->sel_res_len != 0)
nfca_poll->sel_res = *data++;
pr_debug("sel_res_len %d, sel_res 0x%x\n",
nfca_poll->sel_res_len,
nfca_poll->sel_res);
return data;
} | Class | 2 |
void trustedEncryptKeyAES(int *errStatus, char *errString, const char *key,
uint8_t *encryptedPrivateKey, uint32_t *enc_len) {
LOG_INFO(__FUNCTION__);
*errString = 0;
*errStatus = UNKNOWN_ERROR;
CHECK_STATE(key);
CHECK_STATE(encryptedPrivateKey);
*errStatus = UNKNOWN_ERROR;
int status = AES_encrypt_DH((char *)key, encryptedPrivateKey, BUF_LEN);
CHECK_STATUS2("AES encrypt failed with status %d");
*enc_len = strlen(key) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE;
SAFE_CHAR_BUF(decryptedKey, BUF_LEN);
status = AES_decrypt_DH(encryptedPrivateKey, *enc_len, decryptedKey, BUF_LEN);
CHECK_STATUS2("trustedDecryptKey failed with status %d");
uint64_t decryptedKeyLen = strnlen(decryptedKey, MAX_KEY_LENGTH);
if (decryptedKeyLen == MAX_KEY_LENGTH) {
snprintf(errString, BUF_LEN, "Decrypted key is not null terminated");
LOG_ERROR(errString);
goto clean;
}
*errStatus = -8;
if (strncmp(key, decryptedKey, MAX_KEY_LENGTH) != 0) {
snprintf(errString, BUF_LEN, "Decrypted key does not match original key");
LOG_ERROR(errString);
goto clean;
}
SET_SUCCESS
clean:
;
LOG_INFO(__FUNCTION__ );
LOG_INFO("SGX call completed");
} | Base | 1 |
snmp_ber_encode_string_len(unsigned char *out, uint32_t *out_len, const char *str, uint32_t length)
{
uint32_t i;
str += length - 1;
for(i = 0; i < length; ++i) {
(*out_len)++;
*out-- = (uint8_t)*str--;
}
out = snmp_ber_encode_length(out, out_len, length);
out = snmp_ber_encode_type(out, out_len, BER_DATA_TYPE_OCTET_STRING);
return out;
} | Base | 1 |
static ssize_t drop_sync(QIOChannel *ioc, size_t size)
{
ssize_t ret = 0;
char small[1024];
char *buffer;
buffer = sizeof(small) < size ? small : g_malloc(MIN(65536, size));
while (size > 0) {
ssize_t count = read_sync(ioc, buffer, MIN(65536, size));
if (count <= 0) {
goto cleanup;
}
assert(count <= size);
size -= count;
ret += count;
}
cleanup:
if (buffer != small) {
g_free(buffer);
}
return ret;
} | Variant | 0 |
static pdf_creator_t *new_creator(int *n_elements)
{
pdf_creator_t *daddy;
static const pdf_creator_t creator_template[] =
{
{"Title", ""},
{"Author", ""},
{"Subject", ""},
{"Keywords", ""},
{"Creator", ""},
{"Producer", ""},
{"CreationDate", ""},
{"ModDate", ""},
{"Trapped", ""},
};
daddy = malloc(sizeof(creator_template));
memcpy(daddy, creator_template, sizeof(creator_template));
if (n_elements)
*n_elements = sizeof(creator_template) / sizeof(creator_template[0]);
return daddy;
} | Base | 1 |
resp_get_length(netdissect_options *ndo, register const u_char *bp, int len, const u_char **endp)
{
int result;
u_char c;
int saw_digit;
int neg;
int too_large;
if (len == 0)
goto trunc;
ND_TCHECK(*bp);
too_large = 0;
neg = 0;
if (*bp == '-') {
neg = 1;
bp++;
len--;
}
result = 0;
saw_digit = 0;
for (;;) {
if (len == 0)
goto trunc;
ND_TCHECK(*bp);
c = *bp;
if (!(c >= '0' && c <= '9')) {
if (!saw_digit)
goto invalid;
break;
}
c -= '0';
if (result > (INT_MAX / 10)) {
/* This will overflow an int when we multiply it by 10. */
too_large = 1;
} else {
result *= 10;
if (result == INT_MAX && c > (INT_MAX % 10)) {
/* This will overflow an int when we add c */
too_large = 1;
} else
result += c;
}
bp++;
len--;
saw_digit = 1;
}
if (!saw_digit)
goto invalid;
/*
* OK, the next thing should be \r\n.
*/
if (len == 0)
goto trunc;
ND_TCHECK(*bp);
if (*bp != '\r')
goto invalid;
bp++;
len--;
if (len == 0)
goto trunc;
ND_TCHECK(*bp);
if (*bp != '\n')
goto invalid;
bp++;
len--;
*endp = bp;
if (neg) {
/* -1 means "null", anything else is invalid */
if (too_large || result != 1)
return (-4);
result = -1;
}
return (too_large ? -3 : result);
trunc:
return (-2);
invalid:
return (-5);
} | Base | 1 |
header_put_be_int (SF_PRIVATE *psf, int x)
{ if (psf->headindex < SIGNED_SIZEOF (psf->header) - 4)
{ psf->header [psf->headindex++] = (x >> 24) ;
psf->header [psf->headindex++] = (x >> 16) ;
psf->header [psf->headindex++] = (x >> 8) ;
psf->header [psf->headindex++] = x ;
} ;
} /* header_put_be_int */ | Class | 2 |
parse_user_name(char *user_input, char **ret_username)
{
register char *ptr;
register int index = 0;
char username[PAM_MAX_RESP_SIZE];
/* Set the default value for *ret_username */
*ret_username = NULL;
/*
* Set the initial value for username - this is a buffer holds
* the user name.
*/
bzero((void *)username, PAM_MAX_RESP_SIZE);
/*
* The user_input is guaranteed to be terminated by a null character.
*/
ptr = user_input;
/* Skip all the leading whitespaces if there are any. */
while ((*ptr == ' ') || (*ptr == '\t'))
ptr++;
if (*ptr == '\0') {
/*
* We should never get here since the user_input we got
* in pam_get_user() is not all whitespaces nor just "\0".
*/
return (PAM_BUF_ERR);
}
/*
* username will be the first string we get from user_input
* - we skip leading whitespaces and ignore trailing whitespaces
*/
while (*ptr != '\0') {
if ((*ptr == ' ') || (*ptr == '\t'))
break;
else {
username[index] = *ptr;
index++;
ptr++;
}
}
/* ret_username will be freed in pam_get_user(). */
if ((*ret_username = malloc(index + 1)) == NULL)
return (PAM_BUF_ERR);
(void) strcpy(*ret_username, username);
return (PAM_SUCCESS);
} | Base | 1 |
get_visual_text(
cmdarg_T *cap,
char_u **pp, // return: start of selected text
int *lenp) // return: length of selected text
{
if (VIsual_mode != 'V')
unadjust_for_sel();
if (VIsual.lnum != curwin->w_cursor.lnum)
{
if (cap != NULL)
clearopbeep(cap->oap);
return FAIL;
}
if (VIsual_mode == 'V')
{
*pp = ml_get_curline();
*lenp = (int)STRLEN(*pp);
}
else
{
if (LT_POS(curwin->w_cursor, VIsual))
{
*pp = ml_get_pos(&curwin->w_cursor);
*lenp = VIsual.col - curwin->w_cursor.col + 1;
}
else
{
*pp = ml_get_pos(&VIsual);
*lenp = curwin->w_cursor.col - VIsual.col + 1;
}
if (**pp == NUL)
*lenp = 0;
if (has_mbyte && *lenp > 0)
// Correct the length to include all bytes of the last character.
*lenp += (*mb_ptr2len)(*pp + (*lenp - 1)) - 1;
}
reset_VIsual_and_resel();
return OK;
} | Variant | 0 |
static void iwjpeg_scan_exif_ifd(struct iwjpegrcontext *rctx,
struct iw_exif_state *e, iw_uint32 ifd)
{
unsigned int tag_count;
unsigned int i;
unsigned int tag_pos;
unsigned int tag_id;
unsigned int v;
double v_dbl;
if(ifd<8 || ifd>e->d_len-18) return;
tag_count = iw_get_ui16_e(&e->d[ifd],e->endian);
if(tag_count>1000) return; // Sanity check.
for(i=0;i<tag_count;i++) {
tag_pos = ifd+2+i*12;
if(tag_pos+12 > e->d_len) return; // Avoid overruns.
tag_id = iw_get_ui16_e(&e->d[tag_pos],e->endian);
switch(tag_id) {
case 274: // 274 = Orientation
if(get_exif_tag_int_value(e,tag_pos,&v)) {
rctx->exif_orientation = v;
}
break;
case 296: // 296 = ResolutionUnit
if(get_exif_tag_int_value(e,tag_pos,&v)) {
rctx->exif_density_unit = v;
}
break;
case 282: // 282 = XResolution
if(get_exif_tag_dbl_value(e,tag_pos,&v_dbl)) {
rctx->exif_density_x = v_dbl;
}
break;
case 283: // 283 = YResolution
if(get_exif_tag_dbl_value(e,tag_pos,&v_dbl)) {
rctx->exif_density_y = v_dbl;
}
break;
}
}
} | Base | 1 |
void Huff_Compress(msg_t *mbuf, int offset) {
int i, ch, size;
byte seq[65536];
byte* buffer;
huff_t huff;
size = mbuf->cursize - offset;
buffer = mbuf->data+ + offset;
if (size<=0) {
return;
}
Com_Memset(&huff, 0, sizeof(huff_t));
// Add the NYT (not yet transmitted) node into the tree/list */
huff.tree = huff.lhead = huff.loc[NYT] = &(huff.nodeList[huff.blocNode++]);
huff.tree->symbol = NYT;
huff.tree->weight = 0;
huff.lhead->next = huff.lhead->prev = NULL;
huff.tree->parent = huff.tree->left = huff.tree->right = NULL;
seq[0] = (size>>8);
seq[1] = size&0xff;
bloc = 16;
for (i=0; i<size; i++ ) {
ch = buffer[i];
Huff_transmit(&huff, ch, seq); /* Transmit symbol */
Huff_addRef(&huff, (byte)ch); /* Do update */
}
bloc += 8; // next byte
mbuf->cursize = (bloc>>3) + offset;
Com_Memcpy(mbuf->data+offset, seq, (bloc>>3));
} | Class | 2 |
static int cac_read_binary(sc_card_t *card, unsigned int idx,
unsigned char *buf, size_t count, unsigned long flags)
{
cac_private_data_t * priv = CAC_DATA(card);
int r = 0;
u8 *val = NULL;
u8 *cert_ptr;
size_t val_len;
size_t len, cert_len;
u8 cert_type;
SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE);
/* if we didn't return it all last time, return the remainder */
if (priv->cached) {
sc_log(card->ctx,
"returning cached value idx=%d count=%"SC_FORMAT_LEN_SIZE_T"u",
idx, count);
if (idx > priv->cache_buf_len) {
LOG_FUNC_RETURN(card->ctx, SC_ERROR_FILE_END_REACHED);
}
len = MIN(count, priv->cache_buf_len-idx);
memcpy(buf, &priv->cache_buf[idx], len);
LOG_FUNC_RETURN(card->ctx, len);
}
sc_log(card->ctx,
"clearing cache idx=%d count=%"SC_FORMAT_LEN_SIZE_T"u",
idx, count);
free(priv->cache_buf);
priv->cache_buf = NULL;
priv->cache_buf_len = 0;
r = cac_cac1_get_certificate(card, &val, &val_len);
if (r < 0)
goto done;
if (val_len < 1) {
r = SC_ERROR_INVALID_DATA;
goto done;
}
cert_type = val[0];
cert_ptr = val + 1;
cert_len = val_len - 1;
/* if the info byte is 1, then the cert is compressed, decompress it */
if ((cert_type & 0x3) == 1) {
#ifdef ENABLE_ZLIB
r = sc_decompress_alloc(&priv->cache_buf, &priv->cache_buf_len,
cert_ptr, cert_len, COMPRESSION_AUTO);
#else
sc_log(card->ctx, "CAC compression not supported, no zlib");
r = SC_ERROR_NOT_SUPPORTED;
#endif
if (r)
goto done;
} else if (cert_len > 0) {
priv->cache_buf = malloc(cert_len);
if (priv->cache_buf == NULL) {
r = SC_ERROR_OUT_OF_MEMORY;
goto done;
}
priv->cache_buf_len = cert_len;
memcpy(priv->cache_buf, cert_ptr, cert_len);
}
/* OK we've read the data, now copy the required portion out to the callers buffer */
priv->cached = 1;
len = MIN(count, priv->cache_buf_len-idx);
if (len && priv->cache_buf)
memcpy(buf, &priv->cache_buf[idx], len);
r = len;
done:
if (val)
free(val);
LOG_FUNC_RETURN(card->ctx, r);
} | Class | 2 |
const char * util_acl_to_str(const sc_acl_entry_t *e)
{
static char line[80], buf[20];
unsigned int acl;
if (e == NULL)
return "N/A";
line[0] = 0;
while (e != NULL) {
acl = e->method;
switch (acl) {
case SC_AC_UNKNOWN:
return "N/A";
case SC_AC_NEVER:
return "NEVR";
case SC_AC_NONE:
return "NONE";
case SC_AC_CHV:
strcpy(buf, "CHV");
if (e->key_ref != SC_AC_KEY_REF_NONE)
sprintf(buf + 3, "%d", e->key_ref);
break;
case SC_AC_TERM:
strcpy(buf, "TERM");
break;
case SC_AC_PRO:
strcpy(buf, "PROT");
break;
case SC_AC_AUT:
strcpy(buf, "AUTH");
if (e->key_ref != SC_AC_KEY_REF_NONE)
sprintf(buf + 4, "%d", e->key_ref);
break;
case SC_AC_SEN:
strcpy(buf, "Sec.Env. ");
if (e->key_ref != SC_AC_KEY_REF_NONE)
sprintf(buf + 3, "#%d", e->key_ref);
break;
case SC_AC_SCB:
strcpy(buf, "Sec.ControlByte ");
if (e->key_ref != SC_AC_KEY_REF_NONE)
sprintf(buf + 3, "Ox%X", e->key_ref);
break;
case SC_AC_IDA:
strcpy(buf, "PKCS#15 AuthID ");
if (e->key_ref != SC_AC_KEY_REF_NONE)
sprintf(buf + 3, "#%d", e->key_ref);
break;
default:
strcpy(buf, "????");
break;
}
strcat(line, buf);
strcat(line, " ");
e = e->next;
}
line[strlen(line)-1] = 0; /* get rid of trailing space */
return line;
} | Variant | 0 |
TIFFReadEncodedStrip(TIFF* tif, uint32 strip, void* buf, tmsize_t size)
{
static const char module[] = "TIFFReadEncodedStrip";
TIFFDirectory *td = &tif->tif_dir;
uint32 rowsperstrip;
uint32 stripsperplane;
uint32 stripinplane;
uint16 plane;
uint32 rows;
tmsize_t stripsize;
if (!TIFFCheckRead(tif,0))
return((tmsize_t)(-1));
if (strip>=td->td_nstrips)
{
TIFFErrorExt(tif->tif_clientdata,module,
"%lu: Strip out of range, max %lu",(unsigned long)strip,
(unsigned long)td->td_nstrips);
return((tmsize_t)(-1));
}
/*
* Calculate the strip size according to the number of
* rows in the strip (check for truncated last strip on any
* of the separations).
*/
rowsperstrip=td->td_rowsperstrip;
if (rowsperstrip>td->td_imagelength)
rowsperstrip=td->td_imagelength;
stripsperplane=((td->td_imagelength+rowsperstrip-1)/rowsperstrip);
stripinplane=(strip%stripsperplane);
plane=(uint16)(strip/stripsperplane);
rows=td->td_imagelength-stripinplane*rowsperstrip;
if (rows>rowsperstrip)
rows=rowsperstrip;
stripsize=TIFFVStripSize(tif,rows);
if (stripsize==0)
return((tmsize_t)(-1));
/* shortcut to avoid an extra memcpy() */
if( td->td_compression == COMPRESSION_NONE &&
size!=(tmsize_t)(-1) && size >= stripsize &&
!isMapped(tif) &&
((tif->tif_flags&TIFF_NOREADRAW)==0) )
{
if (TIFFReadRawStrip1(tif, strip, buf, stripsize, module) != stripsize)
return ((tmsize_t)(-1));
if (!isFillOrder(tif, td->td_fillorder) &&
(tif->tif_flags & TIFF_NOBITREV) == 0)
TIFFReverseBits(buf,stripsize);
(*tif->tif_postdecode)(tif,buf,stripsize);
return (stripsize);
}
if ((size!=(tmsize_t)(-1))&&(size<stripsize))
stripsize=size;
if (!TIFFFillStrip(tif,strip))
return((tmsize_t)(-1));
if ((*tif->tif_decodestrip)(tif,buf,stripsize,plane)<=0)
return((tmsize_t)(-1));
(*tif->tif_postdecode)(tif,buf,stripsize);
return(stripsize);
} | Base | 1 |
void perf_bp_event(struct perf_event *bp, void *data)
{
struct perf_sample_data sample;
struct pt_regs *regs = data;
perf_sample_data_init(&sample, bp->attr.bp_addr);
if (!bp->hw.state && !perf_exclude_event(bp, regs))
perf_swevent_event(bp, 1, 1, &sample, regs);
} | Class | 2 |
int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
struct sk_buff *skb;
size_t copied;
int err;
BT_DBG("sock %p sk %p len %zu", sock, sk, len);
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN) {
msg->msg_namelen = 0;
return 0;
}
return err;
}
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
skb_reset_transport_header(skb);
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err == 0) {
sock_recv_ts_and_drops(msg, sk, skb);
if (bt_sk(sk)->skb_msg_name)
bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
&msg->msg_namelen);
else
msg->msg_namelen = 0;
}
skb_free_datagram(sk, skb);
return err ? : copied;
} | Class | 2 |
receive_carbon(void **state)
{
prof_input("/carbons on");
prof_connect();
assert_true(stbbr_received(
"<iq id='*' type='set'><enable xmlns='urn:xmpp:carbons:2'/></iq>"
));
stbbr_send(
"<presence to='stabber@localhost' from='buddy1@localhost/mobile'>"
"<priority>10</priority>"
"<status>On my mobile</status>"
"</presence>"
);
assert_true(prof_output_exact("Buddy1 (mobile) is online, \"On my mobile\""));
prof_input("/msg Buddy1");
assert_true(prof_output_exact("unencrypted"));
stbbr_send(
"<message type='chat' to='stabber@localhost/profanity' from='buddy1@localhost'>"
"<received xmlns='urn:xmpp:carbons:2'>"
"<forwarded xmlns='urn:xmpp:forward:0'>"
"<message id='prof_msg_7' xmlns='jabber:client' type='chat' lang='en' to='stabber@localhost/profanity' from='buddy1@localhost/mobile'>"
"<body>test carbon from recipient</body>"
"</message>"
"</forwarded>"
"</received>"
"</message>"
);
assert_true(prof_output_regex("Buddy1/mobile: .+test carbon from recipient"));
} | Class | 2 |
static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
{
if (file->f_flags & O_DSYNC)
return 0;
if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
return 1;
if (nfs_write_pageuptodate(page, inode) && (inode->i_flock == NULL ||
(inode->i_flock->fl_start == 0 &&
inode->i_flock->fl_end == OFFSET_MAX &&
inode->i_flock->fl_type != F_RDLCK)))
return 1;
return 0;
} | Class | 2 |
obj2ast_withitem(PyObject* obj, withitem_ty* out, PyArena* arena)
{
PyObject* tmp = NULL;
expr_ty context_expr;
expr_ty optional_vars;
if (_PyObject_HasAttrId(obj, &PyId_context_expr)) {
int res;
tmp = _PyObject_GetAttrId(obj, &PyId_context_expr);
if (tmp == NULL) goto failed;
res = obj2ast_expr(tmp, &context_expr, arena);
if (res != 0) goto failed;
Py_CLEAR(tmp);
} else {
PyErr_SetString(PyExc_TypeError, "required field \"context_expr\" missing from withitem");
return 1;
}
if (exists_not_none(obj, &PyId_optional_vars)) {
int res;
tmp = _PyObject_GetAttrId(obj, &PyId_optional_vars);
if (tmp == NULL) goto failed;
res = obj2ast_expr(tmp, &optional_vars, arena);
if (res != 0) goto failed;
Py_CLEAR(tmp);
} else {
optional_vars = NULL;
}
*out = withitem(context_expr, optional_vars, arena);
return 0;
failed:
Py_XDECREF(tmp);
return 1;
} | Base | 1 |
static void gdCtxPrintf(gdIOCtx * out, const char *format, ...)
{
char buf[4096];
int len;
va_list args;
va_start(args, format);
len = vsnprintf(buf, sizeof(buf)-1, format, args);
va_end(args);
out->putBuf(out, buf, len);
} | Class | 2 |
int build_segment_manager(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
struct f2fs_sm_info *sm_info;
int err;
sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
if (!sm_info)
return -ENOMEM;
/* init sm info */
sbi->sm_info = sm_info;
sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
sm_info->rec_prefree_segments = sm_info->main_segments *
DEF_RECLAIM_PREFREE_SEGMENTS / 100;
if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
if (!test_opt(sbi, LFS))
sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
INIT_LIST_HEAD(&sm_info->sit_entry_set);
if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
err = create_flush_cmd_control(sbi);
if (err)
return err;
}
err = create_discard_cmd_control(sbi);
if (err)
return err;
err = build_sit_info(sbi);
if (err)
return err;
err = build_free_segmap(sbi);
if (err)
return err;
err = build_curseg(sbi);
if (err)
return err;
/* reinit free segmap based on SIT */
build_sit_entries(sbi);
init_free_segmap(sbi);
err = build_dirty_segmap(sbi);
if (err)
return err;
init_min_max_mtime(sbi);
return 0;
} | Base | 1 |
void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
unsigned int len)
{
struct inet_sock *inet = inet_sk(sk);
struct {
struct ip_options opt;
char data[40];
} replyopts;
struct ipcm_cookie ipc;
__be32 daddr;
struct rtable *rt = skb_rtable(skb);
if (ip_options_echo(&replyopts.opt, skb))
return;
daddr = ipc.addr = rt->rt_src;
ipc.opt = NULL;
ipc.tx_flags = 0;
if (replyopts.opt.optlen) {
ipc.opt = &replyopts.opt;
if (ipc.opt->srr)
daddr = replyopts.opt.faddr;
}
{
struct flowi4 fl4;
flowi4_init_output(&fl4, arg->bound_dev_if, 0,
RT_TOS(ip_hdr(skb)->tos),
RT_SCOPE_UNIVERSE, sk->sk_protocol,
ip_reply_arg_flowi_flags(arg),
daddr, rt->rt_spec_dst,
tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
rt = ip_route_output_key(sock_net(sk), &fl4);
if (IS_ERR(rt))
return;
}
/* And let IP do all the hard work.
This chunk is not reenterable, hence spinlock.
Note that it uses the fact, that this function is called
with locally disabled BH and that sk cannot be already spinlocked.
*/
bh_lock_sock(sk);
inet->tos = ip_hdr(skb)->tos;
sk->sk_priority = skb->priority;
sk->sk_protocol = ip_hdr(skb)->protocol;
sk->sk_bound_dev_if = arg->bound_dev_if;
ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
&ipc, &rt, MSG_DONTWAIT);
if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
if (arg->csumoffset >= 0)
*((__sum16 *)skb_transport_header(skb) +
arg->csumoffset) = csum_fold(csum_add(skb->csum,
arg->csum));
skb->ip_summed = CHECKSUM_NONE;
ip_push_pending_frames(sk);
}
bh_unlock_sock(sk);
ip_rt_put(rt);
} | Class | 2 |
int AES_decrypt_DH(uint8_t *encr_message, uint64_t length, char *message, uint64_t msgLen) {
if (!message) {
LOG_ERROR("Null message in AES_encrypt_DH");
return -1;
}
if (!encr_message) {
LOG_ERROR("Null encr message in AES_encrypt_DH");
return -2;
}
if (length < SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE) {
LOG_ERROR("length < SGX_AESGCM_MAC_SIZE - SGX_AESGCM_IV_SIZE");
return -1;
}
uint64_t len = length - SGX_AESGCM_MAC_SIZE - SGX_AESGCM_IV_SIZE;
if (msgLen < len) {
LOG_ERROR("Output buffer not large enough");
return -2;
}
sgx_status_t status = sgx_rijndael128GCM_decrypt(&AES_DH_key,
encr_message + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE, len,
(unsigned char*) message,
encr_message + SGX_AESGCM_MAC_SIZE, SGX_AESGCM_IV_SIZE,
NULL, 0,
(sgx_aes_gcm_128bit_tag_t *)encr_message);
return status;
} | Base | 1 |
GF_Err afra_box_read(GF_Box *s, GF_BitStream *bs)
{
unsigned int i;
GF_AdobeFragRandomAccessBox *ptr = (GF_AdobeFragRandomAccessBox *)s;
ISOM_DECREASE_SIZE(ptr, 9)
ptr->long_ids = gf_bs_read_int(bs, 1);
ptr->long_offsets = gf_bs_read_int(bs, 1);
ptr->global_entries = gf_bs_read_int(bs, 1);
ptr->reserved = gf_bs_read_int(bs, 5);
ptr->time_scale = gf_bs_read_u32(bs);
ptr->entry_count = gf_bs_read_u32(bs);
if (ptr->size / ( (ptr->long_offsets ? 16 : 12) ) < ptr->entry_count)
return GF_ISOM_INVALID_FILE;
for (i=0; i<ptr->entry_count; i++) {
GF_AfraEntry *ae = gf_malloc(sizeof(GF_AfraEntry));
if (!ae) return GF_OUT_OF_MEM;
ISOM_DECREASE_SIZE(ptr, 8)
ae->time = gf_bs_read_u64(bs);
if (ptr->long_offsets) {
ISOM_DECREASE_SIZE(ptr, 8)
ae->offset = gf_bs_read_u64(bs);
} else {
ISOM_DECREASE_SIZE(ptr, 4)
ae->offset = gf_bs_read_u32(bs);
}
gf_list_insert(ptr->local_access_entries, ae, i);
}
if (ptr->global_entries) {
ISOM_DECREASE_SIZE(ptr, 4)
ptr->global_entry_count = gf_bs_read_u32(bs);
for (i=0; i<ptr->global_entry_count; i++) {
GF_GlobalAfraEntry *ae = gf_malloc(sizeof(GF_GlobalAfraEntry));
if (!ae) return GF_OUT_OF_MEM;
ISOM_DECREASE_SIZE(ptr, 8)
ae->time = gf_bs_read_u64(bs);
if (ptr->long_ids) {
ISOM_DECREASE_SIZE(ptr, 8)
ae->segment = gf_bs_read_u32(bs);
ae->fragment = gf_bs_read_u32(bs);
} else {
ISOM_DECREASE_SIZE(ptr, 4)
ae->segment = gf_bs_read_u16(bs);
ae->fragment = gf_bs_read_u16(bs);
}
if (ptr->long_offsets) {
ISOM_DECREASE_SIZE(ptr, 16)
ae->afra_offset = gf_bs_read_u64(bs);
ae->offset_from_afra = gf_bs_read_u64(bs);
} else {
ISOM_DECREASE_SIZE(ptr, 8)
ae->afra_offset = gf_bs_read_u32(bs);
ae->offset_from_afra = gf_bs_read_u32(bs);
}
gf_list_insert(ptr->global_access_entries, ae, i);
}
}
return GF_OK;
} | Variant | 0 |
validate_commit_metadata (GVariant *commit_data,
const char *ref,
const char *required_metadata,
gsize required_metadata_size,
gboolean require_xa_metadata,
GError **error)
{
g_autoptr(GVariant) commit_metadata = NULL;
g_autoptr(GVariant) xa_metadata_v = NULL;
const char *xa_metadata = NULL;
gsize xa_metadata_size = 0;
commit_metadata = g_variant_get_child_value (commit_data, 0);
if (commit_metadata != NULL)
{
xa_metadata_v = g_variant_lookup_value (commit_metadata,
"xa.metadata",
G_VARIANT_TYPE_STRING);
if (xa_metadata_v)
xa_metadata = g_variant_get_string (xa_metadata_v, &xa_metadata_size);
}
if ((xa_metadata == NULL && require_xa_metadata) ||
(xa_metadata != NULL && (xa_metadata_size != required_metadata_size ||
memcmp (xa_metadata, required_metadata, xa_metadata_size) != 0)))
{
g_set_error (error, G_IO_ERROR, G_IO_ERROR_PERMISSION_DENIED,
_("Commit metadata for %s not matching expected metadata"), ref);
return FALSE;
}
return TRUE;
} | Base | 1 |
static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct sco_pinfo *pi = sco_pi(sk);
lock_sock(sk);
if (sk->sk_state == BT_CONNECT2 &&
test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
sco_conn_defer_accept(pi->conn->hcon, pi->setting);
sk->sk_state = BT_CONFIG;
msg->msg_namelen = 0;
release_sock(sk);
return 0;
}
release_sock(sk);
return bt_sock_recvmsg(iocb, sock, msg, len, flags);
} | Class | 2 |
static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn,
struct xfrm_replay_state_esn **preplay_esn,
struct nlattr *rta)
{
struct xfrm_replay_state_esn *p, *pp, *up;
if (!rta)
return 0;
up = nla_data(rta);
p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
if (!p)
return -ENOMEM;
pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
if (!pp) {
kfree(p);
return -ENOMEM;
}
*replay_esn = p;
*preplay_esn = pp;
return 0;
} | Class | 2 |
R_API st64 r_buf_read_at(RBuffer *b, ut64 addr, ut8 *buf, ut64 len) {
r_return_val_if_fail (b && buf, -1);
st64 o_addr = r_buf_seek (b, 0, R_BUF_CUR);
st64 r = r_buf_seek (b, addr, R_BUF_SET);
if (r < 0) {
return r;
}
r = r_buf_read (b, buf, len);
r_buf_seek (b, o_addr, R_BUF_SET);
return r;
} | Class | 2 |
static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
{
u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
int err;
smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
smap->map.numa_node);
if (!smap->elems)
return -ENOMEM;
err = pcpu_freelist_init(&smap->freelist);
if (err)
goto free_elems;
pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size,
smap->map.max_entries);
return 0;
free_elems:
bpf_map_area_free(smap->elems);
return err;
} | Base | 1 |
LIBOPENMPT_MODPLUG_API unsigned int ModPlug_InstrumentName(ModPlugFile* file, unsigned int qual, char* buff)
{
const char* str;
unsigned int retval;
size_t tmpretval;
if(!file) return 0;
str = openmpt_module_get_instrument_name(file->mod,qual-1);
if(!str){
if(buff){
*buff = '\0';
}
return 0;
}
tmpretval = strlen(str);
if(tmpretval>=INT_MAX){
tmpretval = INT_MAX-1;
}
retval = (int)tmpretval;
if(buff){
memcpy(buff,str,retval+1);
buff[retval] = '\0';
}
openmpt_free_string(str);
return retval;
} | Base | 1 |
createenv(const struct rule *rule)
{
struct env *env;
u_int i;
env = malloc(sizeof(*env));
if (!env)
err(1, NULL);
RB_INIT(&env->root);
env->count = 0;
if (rule->options & KEEPENV) {
extern char **environ;
for (i = 0; environ[i] != NULL; i++) {
struct envnode *node;
const char *e, *eq;
size_t len;
char keybuf[1024];
e = environ[i];
/* ignore invalid or overlong names */
if ((eq = strchr(e, '=')) == NULL || eq == e)
continue;
len = eq - e;
if (len > sizeof(keybuf) - 1)
continue;
memcpy(keybuf, e, len);
keybuf[len] = '\0';
node = createnode(keybuf, eq + 1);
if (RB_INSERT(envtree, &env->root, node)) {
/* ignore any later duplicates */
freenode(node);
} else {
env->count++;
}
}
}
return env;
} | Class | 2 |
static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
int err;
if (len > ds)
len = ds;
else if (len < ds)
msg->msg_flags |= MSG_TRUNC;
msg->msg_namelen = 0;
lock_sock(sk);
if (ctx->more) {
ctx->more = 0;
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
&ctx->completion);
if (err)
goto unlock;
}
err = memcpy_toiovec(msg->msg_iov, ctx->result, len);
unlock:
release_sock(sk);
return err ?: len;
} | Class | 2 |
static char x2c(char *hex) {
register char digit;
digit = ((hex[0] >= 'A') ? ((hex[0] & 0xdf) - 'A')+10 : (hex[0] - '0'));
digit *= 16;
digit += (hex[1] >= 'A' ? ((hex[1] & 0xdf) - 'A')+10 : (hex[1] - '0'));
return(digit);
} | Base | 1 |
expr_context_name(expr_context_ty ctx)
{
switch (ctx) {
case Load:
return "Load";
case Store:
return "Store";
case Del:
return "Del";
case AugLoad:
return "AugLoad";
case AugStore:
return "AugStore";
case Param:
return "Param";
default:
assert(0);
return "(unknown)";
}
} | Base | 1 |
int secure_decrypt(void *data, unsigned int data_length, int is_signed)
{
at91_aes_key_size_t key_size;
unsigned int cmac_key[8], cipher_key[8];
unsigned int iv[AT91_AES_IV_SIZE_WORD];
unsigned int computed_cmac[AT91_AES_BLOCK_SIZE_WORD];
unsigned int fixed_length;
const unsigned int *cmac;
int rc = -1;
/* Init keys */
init_keys(&key_size, cipher_key, cmac_key, iv);
/* Init periph */
at91_aes_init();
/* Check signature if required */
if (is_signed) {
/* Compute the CMAC */
if (at91_aes_cmac(data_length, data, computed_cmac,
key_size, cmac_key))
goto exit;
/* Check the CMAC */
fixed_length = at91_aes_roundup(data_length);
cmac = (const unsigned int *)((char *)data + fixed_length);
if (!consttime_memequal(cmac, computed_cmac, AT91_AES_BLOCK_SIZE_BYTE))
goto exit;
}
/* Decrypt the whole file */
if (at91_aes_cbc(data_length, data, data, 0,
key_size, cipher_key, iv))
goto exit;
rc = 0;
exit:
/* Reset periph */
at91_aes_cleanup();
/* Reset keys */
memset(cmac_key, 0, sizeof(cmac_key));
memset(cipher_key, 0, sizeof(cipher_key));
memset(iv, 0, sizeof(iv));
return rc;
} | Base | 1 |
static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_acomp racomp;
strlcpy(racomp.type, "acomp", sizeof(racomp.type));
if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
sizeof(struct crypto_report_acomp), &racomp))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
} | Class | 2 |
static gboolean irssi_ssl_verify(SSL *ssl, SSL_CTX *ctx, X509 *cert)
{
if (SSL_get_verify_result(ssl) != X509_V_OK) {
unsigned char md[EVP_MAX_MD_SIZE];
unsigned int n;
char *str;
g_warning("Could not verify SSL servers certificate:");
if ((str = X509_NAME_oneline(X509_get_subject_name(cert), 0, 0)) == NULL)
g_warning(" Could not get subject-name from peer certificate");
else {
g_warning(" Subject : %s", str);
free(str);
}
if ((str = X509_NAME_oneline(X509_get_issuer_name(cert), 0, 0)) == NULL)
g_warning(" Could not get issuer-name from peer certificate");
else {
g_warning(" Issuer : %s", str);
free(str);
}
if (! X509_digest(cert, EVP_md5(), md, &n))
g_warning(" Could not get fingerprint from peer certificate");
else {
char hex[] = "0123456789ABCDEF";
char fp[EVP_MAX_MD_SIZE*3];
if (n < sizeof(fp)) {
unsigned int i;
for (i = 0; i < n; i++) {
fp[i*3+0] = hex[(md[i] >> 4) & 0xF];
fp[i*3+1] = hex[(md[i] >> 0) & 0xF];
fp[i*3+2] = i == n - 1 ? '\0' : ':';
}
g_warning(" MD5 Fingerprint : %s", fp);
}
}
return FALSE;
}
return TRUE;
} | Class | 2 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteSequenceRNNParams*>(node->builtin_data);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* input_weights = GetInput(context, node, kWeightsTensor);
const TfLiteTensor* recurrent_weights =
GetInput(context, node, kRecurrentWeightsTensor);
const TfLiteTensor* bias = GetInput(context, node, kBiasTensor);
// The hidden_state is a variable input tensor that can be modified.
TfLiteTensor* hidden_state =
const_cast<TfLiteTensor*>(GetInput(context, node, kHiddenStateTensor));
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
switch (input_weights->type) {
case kTfLiteFloat32:
return EvalFloat(input, input_weights, recurrent_weights, bias, params,
hidden_state, output);
case kTfLiteUInt8:
case kTfLiteInt8: {
// TODO(mirkov): implement eval with quantized inputs as well.
auto* op_data = reinterpret_cast<OpData*>(node->user_data);
TfLiteTensor* input_quantized = GetTemporary(context, node, 0);
TfLiteTensor* hidden_state_quantized = GetTemporary(context, node, 1);
TfLiteTensor* scaling_factors = GetTemporary(context, node, 2);
TfLiteTensor* accum_scratch = GetTemporary(context, node, 3);
TfLiteTensor* zero_points = GetTemporary(context, node, 4);
TfLiteTensor* row_sums = GetTemporary(context, node, 5);
return EvalHybrid(input, input_weights, recurrent_weights, bias, params,
input_quantized, hidden_state_quantized,
scaling_factors, hidden_state, output, zero_points,
accum_scratch, row_sums, &op_data->compute_row_sums);
}
default:
TF_LITE_KERNEL_LOG(context, "Type %d not currently supported.",
TfLiteTypeGetName(input_weights->type));
return kTfLiteError;
}
return kTfLiteOk;
} | Base | 1 |
void cJSON_AddItemToArray( cJSON *array, cJSON *item )
{
cJSON *c = array->child;
if ( ! item )
return;
if ( ! c ) {
array->child = item;
} else {
while ( c && c->next )
c = c->next;
suffix_object( c, item );
}
} | Base | 1 |
set_lenIV(char *line)
{
char *p = strstr(line, "/lenIV ");
/* Allow lenIV to be negative. Thanks to Tom Kacvinsky <[email protected]> */
if (p && (isdigit(p[7]) || p[7] == '+' || p[7] == '-')) {
lenIV = atoi(p + 7);
}
} | Class | 2 |
static int read_public_key(RSA *rsa)
{
int r;
sc_path_t path;
sc_file_t *file;
u8 buf[2048], *p = buf;
size_t bufsize, keysize;
r = select_app_df();
if (r)
return 1;
sc_format_path("I1012", &path);
r = sc_select_file(card, &path, &file);
if (r) {
fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r));
return 2;
}
bufsize = file->size;
sc_file_free(file);
r = sc_read_binary(card, 0, buf, bufsize, 0);
if (r < 0) {
fprintf(stderr, "Unable to read public key file: %s\n", sc_strerror(r));
return 2;
}
bufsize = r;
do {
if (bufsize < 4)
return 3;
keysize = (p[0] << 8) | p[1];
if (keysize == 0)
break;
if (keysize < 3)
return 3;
if (p[2] == opt_key_num)
break;
p += keysize;
bufsize -= keysize;
} while (1);
if (keysize == 0) {
printf("Key number %d not found.\n", opt_key_num);
return 2;
}
return parse_public_key(p, keysize, rsa);
} | Class | 2 |
PHP_FUNCTION(mb_ereg_search_init)
{
size_t argc = ZEND_NUM_ARGS();
zval *arg_str;
char *arg_pattern = NULL, *arg_options = NULL;
int arg_pattern_len = 0, arg_options_len = 0;
OnigSyntaxType *syntax = NULL;
OnigOptionType option;
if (zend_parse_parameters(argc TSRMLS_CC, "z|ss", &arg_str, &arg_pattern, &arg_pattern_len, &arg_options, &arg_options_len) == FAILURE) {
return;
}
if (argc > 1 && arg_pattern_len == 0) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Empty pattern");
RETURN_FALSE;
}
option = MBREX(regex_default_options);
syntax = MBREX(regex_default_syntax);
if (argc == 3) {
option = 0;
_php_mb_regex_init_options(arg_options, arg_options_len, &option, &syntax, NULL);
}
if (argc > 1) {
/* create regex pattern buffer */
if ((MBREX(search_re) = php_mbregex_compile_pattern(arg_pattern, arg_pattern_len, option, MBREX(current_mbctype), syntax TSRMLS_CC)) == NULL) {
RETURN_FALSE;
}
}
if (MBREX(search_str) != NULL) {
zval_ptr_dtor(&MBREX(search_str));
MBREX(search_str) = (zval *)NULL;
}
MBREX(search_str) = arg_str;
Z_ADDREF_P(MBREX(search_str));
SEPARATE_ZVAL_IF_NOT_REF(&MBREX(search_str));
MBREX(search_pos) = 0;
if (MBREX(search_regs) != NULL) {
onig_region_free(MBREX(search_regs), 1);
MBREX(search_regs) = (OnigRegion *) NULL;
}
RETURN_TRUE;
} | Variant | 0 |
build_principal_va(krb5_context context, krb5_principal princ,
unsigned int rlen, const char *realm, va_list ap)
{
krb5_error_code retval = 0;
char *r = NULL;
krb5_data *data = NULL;
krb5_int32 count = 0;
krb5_int32 size = 2; /* initial guess at needed space */
char *component = NULL;
data = malloc(size * sizeof(krb5_data));
if (!data) { retval = ENOMEM; }
if (!retval) {
r = strdup(realm);
if (!r) { retval = ENOMEM; }
}
while (!retval && (component = va_arg(ap, char *))) {
if (count == size) {
krb5_data *new_data = NULL;
size *= 2;
new_data = realloc(data, size * sizeof(krb5_data));
if (new_data) {
data = new_data;
} else {
retval = ENOMEM;
}
}
if (!retval) {
data[count].length = strlen(component);
data[count].data = strdup(component);
if (!data[count].data) { retval = ENOMEM; }
count++;
}
}
if (!retval) {
princ->type = KRB5_NT_UNKNOWN;
princ->magic = KV5M_PRINCIPAL;
princ->realm = make_data(r, rlen);
princ->data = data;
princ->length = count;
r = NULL; /* take ownership */
data = NULL; /* take ownership */
}
if (data) {
while (--count >= 0) {
free(data[count].data);
}
free(data);
}
free(r);
return retval;
} | Base | 1 |
static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
{
struct srpt_device *sdev;
struct srpt_rdma_ch *ch;
struct srpt_send_ioctx *target;
int ret, i;
ret = -EINVAL;
ch = ioctx->ch;
BUG_ON(!ch);
BUG_ON(!ch->sport);
sdev = ch->sport->sdev;
BUG_ON(!sdev);
spin_lock_irq(&sdev->spinlock);
for (i = 0; i < ch->rq_size; ++i) {
target = ch->ioctx_ring[i];
if (target->cmd.se_lun == ioctx->cmd.se_lun &&
target->cmd.tag == tag &&
srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
ret = 0;
/* now let the target core abort &target->cmd; */
break;
}
}
spin_unlock_irq(&sdev->spinlock);
return ret;
} | Base | 1 |
static int read_private_key(RSA *rsa)
{
int r;
sc_path_t path;
sc_file_t *file;
const sc_acl_entry_t *e;
u8 buf[2048], *p = buf;
size_t bufsize, keysize;
r = select_app_df();
if (r)
return 1;
sc_format_path("I0012", &path);
r = sc_select_file(card, &path, &file);
if (r) {
fprintf(stderr, "Unable to select private key file: %s\n", sc_strerror(r));
return 2;
}
e = sc_file_get_acl_entry(file, SC_AC_OP_READ);
if (e == NULL || e->method == SC_AC_NEVER)
return 10;
bufsize = file->size;
sc_file_free(file);
r = sc_read_binary(card, 0, buf, bufsize, 0);
if (r < 0) {
fprintf(stderr, "Unable to read private key file: %s\n", sc_strerror(r));
return 2;
}
bufsize = r;
do {
if (bufsize < 4)
return 3;
keysize = (p[0] << 8) | p[1];
if (keysize == 0)
break;
if (keysize < 3)
return 3;
if (p[2] == opt_key_num)
break;
p += keysize;
bufsize -= keysize;
} while (1);
if (keysize == 0) {
printf("Key number %d not found.\n", opt_key_num);
return 2;
}
return parse_private_key(p, keysize, rsa);
} | Variant | 0 |
static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name;
size_t copied;
struct sk_buff *skb;
int er;
/*
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
*/
lock_sock(sk);
if (sk->sk_state != TCP_ESTABLISHED) {
release_sock(sk);
return -ENOTCONN;
}
/* Now we can treat all alike */
if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) {
release_sock(sk);
return er;
}
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (er < 0) {
skb_free_datagram(sk, skb);
release_sock(sk);
return er;
}
if (sax != NULL) {
memset(sax, 0, sizeof(*sax));
sax->sax25_family = AF_NETROM;
skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
AX25_ADDR_LEN);
}
msg->msg_namelen = sizeof(*sax);
skb_free_datagram(sk, skb);
release_sock(sk);
return copied;
} | Class | 2 |
generic_ret *init_2_svc(krb5_ui_4 *arg, struct svc_req *rqstp)
{
static generic_ret ret;
gss_buffer_desc client_name,
service_name;
kadm5_server_handle_t handle;
OM_uint32 minor_stat;
const char *errmsg = NULL;
size_t clen, slen;
char *cdots, *sdots;
xdr_free(xdr_generic_ret, &ret);
if ((ret.code = new_server_handle(*arg, rqstp, &handle)))
goto exit_func;
if (! (ret.code = check_handle((void *)handle))) {
ret.api_version = handle->api_version;
}
free_server_handle(handle);
if (setup_gss_names(rqstp, &client_name, &service_name) < 0) {
ret.code = KADM5_FAILURE;
goto exit_func;
}
if (ret.code != 0)
errmsg = krb5_get_error_message(NULL, ret.code);
clen = client_name.length;
trunc_name(&clen, &cdots);
slen = service_name.length;
trunc_name(&slen, &sdots);
/* okay to cast lengths to int because trunc_name limits max value */
krb5_klog_syslog(LOG_NOTICE, _("Request: kadm5_init, %.*s%s, %s, "
"client=%.*s%s, service=%.*s%s, addr=%s, "
"vers=%d, flavor=%d"),
(int)clen, (char *)client_name.value, cdots,
errmsg ? errmsg : _("success"),
(int)clen, (char *)client_name.value, cdots,
(int)slen, (char *)service_name.value, sdots,
client_addr(rqstp->rq_xprt),
ret.api_version & ~(KADM5_API_VERSION_MASK),
rqstp->rq_cred.oa_flavor);
if (errmsg != NULL)
krb5_free_error_message(NULL, errmsg);
gss_release_buffer(&minor_stat, &client_name);
gss_release_buffer(&minor_stat, &service_name);
exit_func:
return(&ret);
} | Base | 1 |
static int f2fs_mpage_readpages(struct address_space *mapping,
struct list_head *pages, struct page *page,
unsigned nr_pages, bool is_readahead)
{
struct bio *bio = NULL;
sector_t last_block_in_bio = 0;
struct inode *inode = mapping->host;
struct f2fs_map_blocks map;
int ret = 0;
map.m_pblk = 0;
map.m_lblk = 0;
map.m_len = 0;
map.m_flags = 0;
map.m_next_pgofs = NULL;
map.m_next_extent = NULL;
map.m_seg_type = NO_CHECK_TYPE;
map.m_may_create = false;
for (; nr_pages; nr_pages--) {
if (pages) {
page = list_last_entry(pages, struct page, lru);
prefetchw(&page->flags);
list_del(&page->lru);
if (add_to_page_cache_lru(page, mapping,
page->index,
readahead_gfp_mask(mapping)))
goto next_page;
}
ret = f2fs_read_single_page(inode, page, nr_pages, &map, &bio,
&last_block_in_bio, is_readahead);
if (ret) {
SetPageError(page);
zero_user_segment(page, 0, PAGE_SIZE);
unlock_page(page);
}
next_page:
if (pages)
put_page(page);
}
BUG_ON(pages && !list_empty(pages));
if (bio)
__submit_bio(F2FS_I_SB(inode), bio, DATA);
return pages ? 0 : ret;
} | Base | 1 |
int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
{
kuid_t euid;
kgid_t egid;
int id;
int next_id = ids->next_id;
if (size > IPCMNI)
size = IPCMNI;
if (ids->in_use >= size)
return -ENOSPC;
idr_preload(GFP_KERNEL);
spin_lock_init(&new->lock);
new->deleted = false;
rcu_read_lock();
spin_lock(&new->lock);
id = idr_alloc(&ids->ipcs_idr, new,
(next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
GFP_NOWAIT);
idr_preload_end();
if (id < 0) {
spin_unlock(&new->lock);
rcu_read_unlock();
return id;
}
ids->in_use++;
current_euid_egid(&euid, &egid);
new->cuid = new->uid = euid;
new->gid = new->cgid = egid;
if (next_id < 0) {
new->seq = ids->seq++;
if (ids->seq > IPCID_SEQ_MAX)
ids->seq = 0;
} else {
new->seq = ipcid_to_seqx(next_id);
ids->next_id = -1;
}
new->id = ipc_buildid(id, new->seq);
return id;
} | Class | 2 |
static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
return crypto_init_skcipher_ops_blkcipher(tfm);
if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type ||
tfm->__crt_alg->cra_type == &crypto_givcipher_type)
return crypto_init_skcipher_ops_ablkcipher(tfm);
skcipher->setkey = alg->setkey;
skcipher->encrypt = alg->encrypt;
skcipher->decrypt = alg->decrypt;
skcipher->ivsize = alg->ivsize;
skcipher->keysize = alg->max_keysize;
if (alg->exit)
skcipher->base.exit = crypto_skcipher_exit_tfm;
if (alg->init)
return alg->init(skcipher);
return 0;
} | Base | 1 |
static __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 60 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
rdesc[41] == 0x00 && rdesc[59] == 0x26 &&
rdesc[60] == 0xf9 && rdesc[61] == 0x00) {
hid_info(hdev, "fixing up Petalynx Maxter Remote report descriptor\n");
rdesc[60] = 0xfa;
rdesc[40] = 0xfa;
}
return rdesc;
} | Class | 2 |
char *rfbProcessFileTransferReadBuffer(rfbClientPtr cl, uint32_t length)
{
char *buffer=NULL;
int n=0;
FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN("", cl, NULL);
/*
rfbLog("rfbProcessFileTransferReadBuffer(%dlen)\n", length);
*/
if (length>0) {
buffer=malloc((uint64_t)length+1);
if (buffer!=NULL) {
if ((n = rfbReadExact(cl, (char *)buffer, length)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessFileTransferReadBuffer: read");
rfbCloseClient(cl);
/* NOTE: don't forget to free(buffer) if you return early! */
if (buffer!=NULL) free(buffer);
return NULL;
}
/* Null Terminate */
buffer[length]=0;
}
}
return buffer;
} | Base | 1 |
test_compare_files (const char* tgt, const char *rec)
{
FILE *orig, *recons;
static uint8_t obuf[TESTBUFSIZE], rbuf[TESTBUFSIZE];
xoff_t offset = 0;
size_t i;
size_t oc, rc;
xoff_t diffs = 0;
if ((orig = fopen (tgt, "r")) == NULL)
{
XPR(NT "open %s failed\n", tgt);
return get_errno ();
}
if ((recons = fopen (rec, "r")) == NULL)
{
XPR(NT "open %s failed\n", rec);
return get_errno ();
}
for (;;)
{
oc = fread (obuf, 1, TESTBUFSIZE, orig);
rc = fread (rbuf, 1, TESTBUFSIZE, recons);
if (oc != rc)
{
return XD3_INTERNAL;
}
if (oc == 0)
{
break;
}
for (i = 0; i < oc; i += 1)
{
if (obuf[i] != rbuf[i])
{
XPR(NT "byte %u (read %u @ %"Q"u) %d != %d\n",
(int)i, (int)oc, offset, obuf[i], rbuf[i]);
diffs++;
return XD3_INTERNAL;
}
}
offset += oc;
}
fclose (orig);
fclose (recons);
if (diffs != 0)
{
return XD3_INTERNAL;
}
return 0;
} | Class | 2 |
unsigned long lh_char_hash(const void *k)
{
unsigned int h = 0;
const char* data = (const char*)k;
while( *data!=0 ) h = h*129 + (unsigned int)(*data++) + LH_PRIME;
return h;
} | Class | 2 |
int vfs_open(const struct path *path, struct file *file,
const struct cred *cred)
{
struct dentry *dentry = path->dentry;
struct inode *inode = dentry->d_inode;
file->f_path = *path;
if (dentry->d_flags & DCACHE_OP_SELECT_INODE) {
inode = dentry->d_op->d_select_inode(dentry, file->f_flags);
if (IS_ERR(inode))
return PTR_ERR(inode);
}
return do_dentry_open(file, inode, NULL, cred);
} | Pillar | 3 |
static void record_and_restart(struct perf_event *event, unsigned long val,
struct pt_regs *regs, int nmi)
{
u64 period = event->hw.sample_period;
s64 prev, delta, left;
int record = 0;
if (event->hw.state & PERF_HES_STOPPED) {
write_pmc(event->hw.idx, 0);
return;
}
/* we don't have to worry about interrupts here */
prev = local64_read(&event->hw.prev_count);
delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
/*
* See if the total period for this event has expired,
* and update for the next period.
*/
val = 0;
left = local64_read(&event->hw.period_left) - delta;
if (period) {
if (left <= 0) {
left += period;
if (left <= 0)
left = period;
record = 1;
event->hw.last_period = event->hw.sample_period;
}
if (left < 0x80000000LL)
val = 0x80000000LL - left;
}
write_pmc(event->hw.idx, val);
local64_set(&event->hw.prev_count, val);
local64_set(&event->hw.period_left, left);
perf_event_update_userpage(event);
/*
* Finally record data if requested.
*/
if (record) {
struct perf_sample_data data;
perf_sample_data_init(&data, 0);
data.period = event->hw.last_period;
if (perf_event_overflow(event, nmi, &data, regs))
fsl_emb_pmu_stop(event, 0);
}
} | Class | 2 |
static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len,
int noblock, int flags, int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)msg->msg_name;
struct sk_buff *skb;
size_t copied;
int err;
if (flags & MSG_OOB)
return -EOPNOTSUPP;
if (addr_len)
*addr_len=sizeof(*sin6);
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len);
if (np->rxpmtu && np->rxopt.bits.rxpmtu)
return ipv6_recv_rxpmtu(sk, msg, len);
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len;
if (copied > len) {
copied = len;
msg->msg_flags |= MSG_TRUNC;
}
if (skb_csum_unnecessary(skb)) {
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
} else if (msg->msg_flags&MSG_TRUNC) {
if (__skb_checksum_complete(skb))
goto csum_copy_err;
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
} else {
err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov);
if (err == -EINVAL)
goto csum_copy_err;
}
if (err)
goto out_free;
/* Copy the address. */
if (sin6) {
sin6->sin6_family = AF_INET6;
sin6->sin6_port = 0;
sin6->sin6_addr = ipv6_hdr(skb)->saddr;
sin6->sin6_flowinfo = 0;
sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
IP6CB(skb)->iif);
}
sock_recv_ts_and_drops(msg, sk, skb);
if (np->rxopt.all)
ip6_datagram_recv_ctl(sk, msg, skb);
err = copied;
if (flags & MSG_TRUNC)
err = skb->len;
out_free:
skb_free_datagram(sk, skb);
out:
return err;
csum_copy_err:
skb_kill_datagram(sk, skb, flags);
/* Error for blocking case is chosen to masquerade
as some normal condition.
*/
err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
goto out;
} | Class | 2 |
static pyc_object *get_set_object(RBuffer *buffer) {
pyc_object *ret = NULL;
bool error = false;
ut32 n = get_ut32 (buffer, &error);
if (n > ST32_MAX) {
eprintf ("bad marshal data (set size out of range)\n");
return NULL;
}
if (error) {
return NULL;
}
ret = get_array_object_generic (buffer, n);
if (!ret) {
return NULL;
}
ret->type = TYPE_SET;
return ret;
} | Base | 1 |
static int jas_iccgetuint(jas_stream_t *in, int n, ulonglong *val)
{
int i;
int c;
ulonglong v;
v = 0;
for (i = n; i > 0; --i) {
if ((c = jas_stream_getc(in)) == EOF)
return -1;
v = (v << 8) | c;
}
*val = v;
return 0;
} | Class | 2 |
ber_parse_header(STREAM s, int tagval, int *length)
{
int tag, len;
if (tagval > 0xff)
{
in_uint16_be(s, tag);
}
else
{
in_uint8(s, tag);
}
if (tag != tagval)
{
logger(Core, Error, "ber_parse_header(), expected tag %d, got %d", tagval, tag);
return False;
}
in_uint8(s, len);
if (len & 0x80)
{
len &= ~0x80;
*length = 0;
while (len--)
next_be(s, *length);
}
else
*length = len;
return s_check(s);
} | Base | 1 |
static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
struct kvm_mmio_fragment *frag;
unsigned len;
BUG_ON(!vcpu->mmio_needed);
/* Complete previous fragment */
frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
len = min(8u, frag->len);
if (!vcpu->mmio_is_write)
memcpy(frag->data, run->mmio.data, len);
if (frag->len <= 8) {
/* Switch to the next fragment. */
frag++;
vcpu->mmio_cur_fragment++;
} else {
/* Go forward to the next mmio piece. */
frag->data += len;
frag->gpa += len;
frag->len -= len;
}
if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
vcpu->mmio_needed = 0;
/* FIXME: return into emulator if single-stepping. */
if (vcpu->mmio_is_write)
return 1;
vcpu->mmio_read_completed = 1;
return complete_emulated_io(vcpu);
}
run->exit_reason = KVM_EXIT_MMIO;
run->mmio.phys_addr = frag->gpa;
if (vcpu->mmio_is_write)
memcpy(run->mmio.data, frag->data, min(8u, frag->len));
run->mmio.len = min(8u, frag->len);
run->mmio.is_write = vcpu->mmio_is_write;
vcpu->arch.complete_userspace_io = complete_emulated_mmio;
return 0;
} | Base | 1 |
cJSON *cJSON_Parse( const char *value )
{
cJSON *c;
ep = 0;
if ( ! ( c = cJSON_New_Item() ) )
return 0; /* memory fail */
if ( ! parse_value( c, skip( value ) ) ) {
cJSON_Delete( c );
return 0;
}
return c;
} | Base | 1 |
static int db_dict_iter_lookup_key_values(struct db_dict_value_iter *iter)
{
struct db_dict_iter_key *key;
string_t *path;
const char *error;
int ret;
/* sort the keys so that we'll first lookup the keys without
default value. if their lookup fails, the user doesn't exist. */
array_sort(&iter->keys, db_dict_iter_key_cmp);
path = t_str_new(128);
str_append(path, DICT_PATH_SHARED);
array_foreach_modifiable(&iter->keys, key) {
if (!key->used)
continue;
str_truncate(path, strlen(DICT_PATH_SHARED));
ret = var_expand(path, key->key->key, iter->var_expand_table, &error);
if (ret <= 0) {
auth_request_log_error(iter->auth_request, AUTH_SUBSYS_DB,
"Failed to expand key %s: %s", key->key->key, error);
return -1;
}
ret = dict_lookup(iter->conn->dict, iter->pool,
str_c(path), &key->value, &error);
if (ret > 0) {
auth_request_log_debug(iter->auth_request, AUTH_SUBSYS_DB,
"Lookup: %s = %s", str_c(path),
key->value);
} else if (ret < 0) {
auth_request_log_error(iter->auth_request, AUTH_SUBSYS_DB,
"Failed to lookup key %s: %s", str_c(path), error);
return -1;
} else if (key->key->default_value != NULL) {
auth_request_log_debug(iter->auth_request, AUTH_SUBSYS_DB,
"Lookup: %s not found, using default value %s",
str_c(path), key->key->default_value);
key->value = key->key->default_value;
} else {
return 0;
}
}
return 1;
} | Class | 2 |
dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
{
enum ctx_state prev_state;
prev_state = exception_enter();
if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) {
preempt_conditional_sti(regs);
do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
preempt_conditional_cli(regs);
}
exception_exit(prev_state);
} | Class | 2 |
netsnmp_mibindex_lookup( const char *dirname )
{
int i;
static char tmpbuf[300];
for (i=0; i<_mibindex; i++) {
if ( _mibindexes[i] &&
strcmp( _mibindexes[i], dirname ) == 0) {
snprintf(tmpbuf, sizeof(tmpbuf), "%s/mib_indexes/%d",
get_persistent_directory(), i);
tmpbuf[sizeof(tmpbuf)-1] = 0;
DEBUGMSGTL(("mibindex", "lookup: %s (%d) %s\n", dirname, i, tmpbuf ));
return tmpbuf;
}
}
DEBUGMSGTL(("mibindex", "lookup: (none)\n"));
return NULL;
} | Base | 1 |
gen_hash(codegen_scope *s, node *tree, int val, int limit)
{
int slimit = GEN_VAL_STACK_MAX;
if (cursp() >= GEN_LIT_ARY_MAX) slimit = INT16_MAX;
int len = 0;
mrb_bool update = FALSE;
while (tree) {
if (nint(tree->car->car->car) == NODE_KW_REST_ARGS) {
if (len > 0) {
pop_n(len*2);
if (!update) {
genop_2(s, OP_HASH, cursp(), len);
}
else {
pop();
genop_2(s, OP_HASHADD, cursp(), len);
}
push();
}
codegen(s, tree->car->cdr, val);
if (len > 0 || update) {
pop(); pop();
genop_1(s, OP_HASHCAT, cursp());
push();
}
update = TRUE;
len = 0;
}
else {
codegen(s, tree->car->car, val);
codegen(s, tree->car->cdr, val);
len++;
}
tree = tree->cdr;
if (val && cursp() >= slimit) {
pop_n(len*2);
if (!update) {
genop_2(s, OP_HASH, cursp(), len);
}
else {
pop();
genop_2(s, OP_HASHADD, cursp(), len);
}
push();
update = TRUE;
len = 0;
}
}
if (update) {
if (val && len > 0) {
pop_n(len*2+1);
genop_2(s, OP_HASHADD, cursp(), len);
push();
}
return -1; /* variable length */
}
return len;
} | Base | 1 |
ber_parse_header(STREAM s, int tagval, int *length)
{
int tag, len;
if (tagval > 0xff)
{
in_uint16_be(s, tag);
}
else
{
in_uint8(s, tag);
}
if (tag != tagval)
{
logger(Core, Error, "ber_parse_header(), expected tag %d, got %d", tagval, tag);
return False;
}
in_uint8(s, len);
if (len & 0x80)
{
len &= ~0x80;
*length = 0;
while (len--)
next_be(s, *length);
}
else
*length = len;
return s_check(s);
} | Base | 1 |
int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol,
bool add_on_replace)
{
struct snd_ctl_elem_id id;
unsigned int idx;
struct snd_kcontrol *old;
int ret;
if (!kcontrol)
return -EINVAL;
if (snd_BUG_ON(!card || !kcontrol->info)) {
ret = -EINVAL;
goto error;
}
id = kcontrol->id;
down_write(&card->controls_rwsem);
old = snd_ctl_find_id(card, &id);
if (!old) {
if (add_on_replace)
goto add;
up_write(&card->controls_rwsem);
ret = -EINVAL;
goto error;
}
ret = snd_ctl_remove(card, old);
if (ret < 0) {
up_write(&card->controls_rwsem);
goto error;
}
add:
if (snd_ctl_find_hole(card, kcontrol->count) < 0) {
up_write(&card->controls_rwsem);
ret = -ENOMEM;
goto error;
}
list_add_tail(&kcontrol->list, &card->controls);
card->controls_count += kcontrol->count;
kcontrol->id.numid = card->last_numid + 1;
card->last_numid += kcontrol->count;
up_write(&card->controls_rwsem);
for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
return 0;
error:
snd_ctl_free_one(kcontrol);
return ret;
} | Variant | 0 |
static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int error = 0;
if (sk->sk_state & PPPOX_BOUND) {
error = -EIO;
goto end;
}
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &error);
if (error < 0)
goto end;
m->msg_namelen = 0;
if (skb) {
total_len = min_t(size_t, total_len, skb->len);
error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len);
if (error == 0) {
consume_skb(skb);
return total_len;
}
}
kfree_skb(skb);
end:
return error;
} | Class | 2 |
xfs_attr_shortform_addname(xfs_da_args_t *args)
{
int newsize, forkoff, retval;
trace_xfs_attr_sf_addname(args);
retval = xfs_attr_shortform_lookup(args);
if ((args->flags & ATTR_REPLACE) && (retval == -ENOATTR)) {
return retval;
} else if (retval == -EEXIST) {
if (args->flags & ATTR_CREATE)
return retval;
retval = xfs_attr_shortform_remove(args);
ASSERT(retval == 0);
}
if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX ||
args->valuelen >= XFS_ATTR_SF_ENTSIZE_MAX)
return -ENOSPC;
newsize = XFS_ATTR_SF_TOTSIZE(args->dp);
newsize += XFS_ATTR_SF_ENTSIZE_BYNAME(args->namelen, args->valuelen);
forkoff = xfs_attr_shortform_bytesfit(args->dp, newsize);
if (!forkoff)
return -ENOSPC;
xfs_attr_shortform_add(args, forkoff);
return 0;
} | Class | 2 |
static void pipe_advance(struct iov_iter *i, size_t size)
{
struct pipe_inode_info *pipe = i->pipe;
struct pipe_buffer *buf;
int idx = i->idx;
size_t off = i->iov_offset, orig_sz;
if (unlikely(i->count < size))
size = i->count;
orig_sz = size;
if (size) {
if (off) /* make it relative to the beginning of buffer */
size += off - pipe->bufs[idx].offset;
while (1) {
buf = &pipe->bufs[idx];
if (size <= buf->len)
break;
size -= buf->len;
idx = next_idx(idx, pipe);
}
buf->len = size;
i->idx = idx;
off = i->iov_offset = buf->offset + size;
}
if (off)
idx = next_idx(idx, pipe);
if (pipe->nrbufs) {
int unused = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
/* [curbuf,unused) is in use. Free [idx,unused) */
while (idx != unused) {
pipe_buf_release(pipe, &pipe->bufs[idx]);
idx = next_idx(idx, pipe);
pipe->nrbufs--;
}
}
i->count -= orig_sz;
} | Class | 2 |
mm_answer_pam_init_ctx(int sock, Buffer *m)
{
debug3("%s", __func__);
authctxt->user = buffer_get_string(m, NULL);
sshpam_ctxt = (sshpam_device.init_ctx)(authctxt);
sshpam_authok = NULL;
buffer_clear(m);
if (sshpam_ctxt != NULL) {
monitor_permit(mon_dispatch, MONITOR_REQ_PAM_FREE_CTX, 1);
buffer_put_int(m, 1);
} else {
buffer_put_int(m, 0);
}
mm_request_send(sock, MONITOR_ANS_PAM_INIT_CTX, m);
return (0);
} | Class | 2 |
static int stellaris_enet_init(SysBusDevice *sbd)
{
DeviceState *dev = DEVICE(sbd);
stellaris_enet_state *s = STELLARIS_ENET(dev);
memory_region_init_io(&s->mmio, OBJECT(s), &stellaris_enet_ops, s,
"stellaris_enet", 0x1000);
sysbus_init_mmio(sbd, &s->mmio);
sysbus_init_irq(sbd, &s->irq);
qemu_macaddr_default_if_unset(&s->conf.macaddr);
s->nic = qemu_new_nic(&net_stellaris_enet_info, &s->conf,
object_get_typename(OBJECT(dev)), dev->id, s);
qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
stellaris_enet_reset(s);
register_savevm(dev, "stellaris_enet", -1, 1,
stellaris_enet_save, stellaris_enet_load, s);
return 0;
} | Class | 2 |
static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
{
return F2FS_M_SB(page->mapping);
} | Base | 1 |
static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
struct vsock_sock *vsk,
struct msghdr *msg, size_t len,
int flags)
{
int err;
int noblock;
struct vmci_datagram *dg;
size_t payload_len;
struct sk_buff *skb;
noblock = flags & MSG_DONTWAIT;
if (flags & MSG_OOB || flags & MSG_ERRQUEUE)
return -EOPNOTSUPP;
/* Retrieve the head sk_buff from the socket's receive queue. */
err = 0;
skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
if (err)
return err;
if (!skb)
return -EAGAIN;
dg = (struct vmci_datagram *)skb->data;
if (!dg)
/* err is 0, meaning we read zero bytes. */
goto out;
payload_len = dg->payload_size;
/* Ensure the sk_buff matches the payload size claimed in the packet. */
if (payload_len != skb->len - sizeof(*dg)) {
err = -EINVAL;
goto out;
}
if (payload_len > len) {
payload_len = len;
msg->msg_flags |= MSG_TRUNC;
}
/* Place the datagram payload in the user's iovec. */
err = skb_copy_datagram_iovec(skb, sizeof(*dg), msg->msg_iov,
payload_len);
if (err)
goto out;
msg->msg_namelen = 0;
if (msg->msg_name) {
struct sockaddr_vm *vm_addr;
/* Provide the address of the sender. */
vm_addr = (struct sockaddr_vm *)msg->msg_name;
vsock_addr_init(vm_addr, dg->src.context, dg->src.resource);
msg->msg_namelen = sizeof(*vm_addr);
}
err = payload_len;
out:
skb_free_datagram(&vsk->sk, skb);
return err;
} | Class | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.