code
stringlengths
12
2.05k
label_name
stringclasses
5 values
label
int64
0
4
static void vdbeVComment(Vdbe *p, const char *zFormat, va_list ap){ assert( p->nOp>0 || p->aOp==0 ); assert( p->aOp==0 || p->aOp[p->nOp-1].zComment==0 || p->db->mallocFailed ); if( p->nOp ){ assert( p->aOp ); sqlite3DbFree(p->db, p->aOp[p->nOp-1].zComment); p->aOp[p->nOp-1].zComment = sqlite3VMPrintf(p->db, zFormat, ap); } }
Class
2
SPL_METHOD(SplFileInfo, getFilename) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); int path_len; if (zend_parse_parameters_none() == FAILURE) { return; } spl_filesystem_object_get_path(intern, &path_len TSRMLS_CC); if (path_len && path_len < intern->file_name_len) { RETURN_STRINGL(intern->file_name + path_len + 1, intern->file_name_len - (path_len + 1), 1); } else { RETURN_STRINGL(intern->file_name, intern->file_name_len, 1); } }
Base
1
static void setup_private_mount(const char *snap_name) { uid_t uid = getuid(); gid_t gid = getgid(); char tmpdir[MAX_BUF] = { 0 }; // Create a 0700 base directory, this is the base dir that is // protected from other users. // // Under that basedir, we put a 1777 /tmp dir that is then bind // mounted for the applications to use sc_must_snprintf(tmpdir, sizeof(tmpdir), "/tmp/snap.%s_XXXXXX", snap_name); if (mkdtemp(tmpdir) == NULL) { die("cannot create temporary directory essential for private /tmp"); } // now we create a 1777 /tmp inside our private dir mode_t old_mask = umask(0); char *d = sc_strdup(tmpdir); sc_must_snprintf(tmpdir, sizeof(tmpdir), "%s/tmp", d); free(d); if (mkdir(tmpdir, 01777) != 0) { die("cannot create temporary directory for private /tmp"); } umask(old_mask); // chdir to '/' since the mount won't apply to the current directory char *pwd = get_current_dir_name(); if (pwd == NULL) die("cannot get current working directory"); if (chdir("/") != 0) die("cannot change directory to '/'"); // MS_BIND is there from linux 2.4 sc_do_mount(tmpdir, "/tmp", NULL, MS_BIND, NULL); // MS_PRIVATE needs linux > 2.6.11 sc_do_mount("none", "/tmp", NULL, MS_PRIVATE, NULL); // do the chown after the bind mount to avoid potential shenanigans if (chown("/tmp/", uid, gid) < 0) { die("cannot change ownership of /tmp"); } // chdir to original directory if (chdir(pwd) != 0) die("cannot change current working directory to the original directory"); free(pwd); }
Base
1
GPMF_ERR IsValidSize(GPMF_stream *ms, uint32_t size) // size is in longs not bytes. { if (ms) { int32_t nestsize = (int32_t)ms->nest_size[ms->nest_level]; if (nestsize == 0 && ms->nest_level == 0) nestsize = ms->buffer_size_longs; if (size + 2 <= nestsize) return GPMF_OK; } return GPMF_ERROR_BAD_STRUCTURE; }
Base
1
void print_cfs_stats(struct seq_file *m, int cpu) { struct cfs_rq *cfs_rq, *pos; rcu_read_lock(); for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) print_cfs_rq(m, cpu, cfs_rq); rcu_read_unlock(); }
Base
1
static int check_line_charstring(void) { char *p = line; while (isspace(*p)) p++; return (*p == '/' || (p[0] == 'd' && p[1] == 'u' && p[2] == 'p')); }
Class
2
int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode) { int size, ct, err; if (m->msg_namelen) { if (mode == VERIFY_READ) { void __user *namep; namep = (void __user __force *) m->msg_name; err = move_addr_to_kernel(namep, m->msg_namelen, address); if (err < 0) return err; } m->msg_name = address; } else { m->msg_name = NULL; } size = m->msg_iovlen * sizeof(struct iovec); if (copy_from_user(iov, (void __user __force *) m->msg_iov, size)) return -EFAULT; m->msg_iov = iov; err = 0; for (ct = 0; ct < m->msg_iovlen; ct++) { size_t len = iov[ct].iov_len; if (len > INT_MAX - err) { len = INT_MAX - err; iov[ct].iov_len = len; } err += len; } return err; }
Class
2
static inline int crypto_rng_generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen) { return tfm->generate(tfm, src, slen, dst, dlen); }
Base
1
static void spl_filesystem_tree_it_move_forward(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; spl_filesystem_object *object = spl_filesystem_iterator_to_object(iterator); object->u.dir.index++; do { spl_filesystem_dir_read(object TSRMLS_CC); } while (spl_filesystem_is_dot(object->u.dir.entry.d_name)); if (object->file_name) { efree(object->file_name); object->file_name = NULL; } if (iterator->current) { zval_ptr_dtor(&iterator->current); iterator->current = NULL; } }
Base
1
int DH_set0_pqg(DH *dh, BIGNUM *p, BIGNUM *q, BIGNUM *g) { if(!p || !g) /* q is optional */ return 0; BN_free(dh->p); BN_free(dh->q); BN_free(dh->g); dh->p = p; dh->q = q; dh->g = g; if(q) dh->length = BN_num_bits(q); return 1; }
Base
1
void set_task_blockstep(struct task_struct *task, bool on) { unsigned long debugctl; /* * Ensure irq/preemption can't change debugctl in between. * Note also that both TIF_BLOCKSTEP and debugctl should * be changed atomically wrt preemption. * FIXME: this means that set/clear TIF_BLOCKSTEP is simply * wrong if task != current, SIGKILL can wakeup the stopped * tracee and set/clear can play with the running task, this * can confuse the next __switch_to_xtra(). */ local_irq_disable(); debugctl = get_debugctlmsr(); if (on) { debugctl |= DEBUGCTLMSR_BTF; set_tsk_thread_flag(task, TIF_BLOCKSTEP); } else { debugctl &= ~DEBUGCTLMSR_BTF; clear_tsk_thread_flag(task, TIF_BLOCKSTEP); } if (task == current) update_debugctlmsr(debugctl); local_irq_enable(); }
Class
2
uint16_t dm9000ReadPhyReg(uint8_t address) { //Write PHY register address dm9000WriteReg(DM9000_REG_EPAR, 0x40 | address); //Start the read operation dm9000WriteReg(DM9000_REG_EPCR, EPCR_EPOS | EPCR_ERPRR); //PHY access is still in progress? while((dm9000ReadReg(DM9000_REG_EPCR) & EPCR_ERRE) != 0) { } //Clear command register dm9000WriteReg(DM9000_REG_EPCR, EPCR_EPOS); //Wait 5us minimum usleep(5); //Return register value return (dm9000ReadReg(DM9000_REG_EPDRH) << 8) | dm9000ReadReg(DM9000_REG_EPDRL); }
Class
2
static int link_pipe(struct pipe_inode_info *ipipe, struct pipe_inode_info *opipe, size_t len, unsigned int flags) { struct pipe_buffer *ibuf, *obuf; int ret = 0, i = 0, nbuf; /* * Potential ABBA deadlock, work around it by ordering lock * grabbing by pipe info address. Otherwise two different processes * could deadlock (one doing tee from A -> B, the other from B -> A). */ pipe_double_lock(ipipe, opipe); do { if (!opipe->readers) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } /* * If we have iterated all input buffers or ran out of * output room, break. */ if (i >= ipipe->nrbufs || opipe->nrbufs >= opipe->buffers) break; ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (ipipe->buffers-1)); nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1); /* * Get a reference to this pipe buffer, * so we can copy the contents over. */ pipe_buf_get(ipipe, ibuf); obuf = opipe->bufs + nbuf; *obuf = *ibuf; /* * Don't inherit the gift flag, we need to * prevent multiple steals of this page. */ obuf->flags &= ~PIPE_BUF_FLAG_GIFT; if (obuf->len > len) obuf->len = len; opipe->nrbufs++; ret += obuf->len; len -= obuf->len; i++; } while (len); /* * return EAGAIN if we have the potential of some data in the * future, otherwise just return 0 */ if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK)) ret = -EAGAIN; pipe_unlock(ipipe); pipe_unlock(opipe); /* * If we put data in the output pipe, wakeup any potential readers. */ if (ret > 0) wakeup_pipe_readers(opipe); return ret; }
Variant
0
static int read_private_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; const sc_acl_entry_t *e; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I0012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select private key file: %s\n", sc_strerror(r)); return 2; } e = sc_file_get_acl_entry(file, SC_AC_OP_READ); if (e == NULL || e->method == SC_AC_NEVER) return 10; bufsize = file->size; sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read private key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_private_key(p, keysize, rsa); }
Class
2
SMB2_sess_establish_session(struct SMB2_sess_data *sess_data) { int rc = 0; struct cifs_ses *ses = sess_data->ses; mutex_lock(&ses->server->srv_mutex); if (ses->server->sign && ses->server->ops->generate_signingkey) { rc = ses->server->ops->generate_signingkey(ses); kfree(ses->auth_key.response); ses->auth_key.response = NULL; if (rc) { cifs_dbg(FYI, "SMB3 session key generation failed\n"); mutex_unlock(&ses->server->srv_mutex); goto keygen_exit; } } if (!ses->server->session_estab) { ses->server->sequence_number = 0x2; ses->server->session_estab = true; } mutex_unlock(&ses->server->srv_mutex); cifs_dbg(FYI, "SMB2/3 session established successfully\n"); spin_lock(&GlobalMid_Lock); ses->status = CifsGood; ses->need_reconnect = false; spin_unlock(&GlobalMid_Lock); keygen_exit: if (!ses->server->sign) { kfree(ses->auth_key.response); ses->auth_key.response = NULL; } return rc; }
Base
1
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_8position___get__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->position); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.position.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; }
Base
1
error_t enc624j600UpdateMacAddrFilter(NetInterface *interface) { uint_t i; uint_t k; uint32_t crc; uint16_t hashTable[4]; MacFilterEntry *entry; //Debug message TRACE_DEBUG("Updating MAC filter...\r\n"); //Clear hash table osMemset(hashTable, 0, sizeof(hashTable)); //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0; i < MAC_ADDR_FILTER_SIZE; i++) { //Point to the current entry entry = &interface->macAddrFilter[i]; //Valid entry? if(entry->refCount > 0) { //Compute CRC over the current MAC address crc = enc624j600CalcCrc(&entry->addr, sizeof(MacAddr)); //Calculate the corresponding index in the table k = (crc >> 23) & 0x3F; //Update hash table contents hashTable[k / 16] |= (1 << (k % 16)); } } //Write the hash table to the ENC624J600 controller enc624j600WriteReg(interface, ENC624J600_REG_EHT1, hashTable[0]); enc624j600WriteReg(interface, ENC624J600_REG_EHT2, hashTable[1]); enc624j600WriteReg(interface, ENC624J600_REG_EHT3, hashTable[2]); enc624j600WriteReg(interface, ENC624J600_REG_EHT4, hashTable[3]); //Debug message TRACE_DEBUG(" EHT1 = %04" PRIX16 "\r\n", enc624j600ReadReg(interface, ENC624J600_REG_EHT1)); TRACE_DEBUG(" EHT2 = %04" PRIX16 "\r\n", enc624j600ReadReg(interface, ENC624J600_REG_EHT2)); TRACE_DEBUG(" EHT3 = %04" PRIX16 "\r\n", enc624j600ReadReg(interface, ENC624J600_REG_EHT3)); TRACE_DEBUG(" EHT4 = %04" PRIX16 "\r\n", enc624j600ReadReg(interface, ENC624J600_REG_EHT4)); //Successful processing return NO_ERROR; }
Class
2
static void spl_filesystem_dir_it_rewind(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_object *object = spl_filesystem_iterator_to_object((spl_filesystem_iterator *)iter); object->u.dir.index = 0; if (object->u.dir.dirp) { php_stream_rewinddir(object->u.dir.dirp); } spl_filesystem_dir_read(object TSRMLS_CC); }
Base
1
static int rename_in_ns(int pid, char *oldname, char **newnamep) { int fd = -1, ofd = -1, ret, ifindex = -1; bool grab_newname = false; ofd = lxc_preserve_ns(getpid(), "net"); if (ofd < 0) { fprintf(stderr, "Failed opening network namespace path for '%d'.", getpid()); return -1; } fd = lxc_preserve_ns(pid, "net"); if (fd < 0) { fprintf(stderr, "Failed opening network namespace path for '%d'.", pid); return -1; } if (setns(fd, 0) < 0) { fprintf(stderr, "setns to container network namespace\n"); goto out_err; } close(fd); fd = -1; if (!*newnamep) { grab_newname = true; *newnamep = VETH_DEF_NAME; if (!(ifindex = if_nametoindex(oldname))) { fprintf(stderr, "failed to get netdev index\n"); goto out_err; } } if ((ret = lxc_netdev_rename_by_name(oldname, *newnamep)) < 0) { fprintf(stderr, "Error %d renaming netdev %s to %s in container\n", ret, oldname, *newnamep); goto out_err; } if (grab_newname) { char ifname[IFNAMSIZ], *namep = ifname; if (!if_indextoname(ifindex, namep)) { fprintf(stderr, "Failed to get new netdev name\n"); goto out_err; } *newnamep = strdup(namep); if (!*newnamep) goto out_err; } if (setns(ofd, 0) < 0) { fprintf(stderr, "Error returning to original netns\n"); close(ofd); return -1; } close(ofd); return 0; out_err: if (ofd >= 0) close(ofd); if (setns(ofd, 0) < 0) fprintf(stderr, "Error returning to original network namespace\n"); if (fd >= 0) close(fd); return -1; }
Class
2
static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { size_t copied = 0; int err = -EOPNOTSUPP; struct sk_buff *skb; struct sockaddr_ieee802154 *saddr; saddr = (struct sockaddr_ieee802154 *)msg->msg_name; skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } /* FIXME: skip headers if necessary ?! */ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_ts_and_drops(msg, sk, skb); if (saddr) { saddr->family = AF_IEEE802154; saddr->addr = mac_cb(skb)->sa; } if (addr_len) *addr_len = sizeof(*saddr); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: if (err) return err; return copied; }
Class
2
static int add_push_report_sideband_pkt(git_push *push, git_pkt_data *data_pkt, git_buf *data_pkt_buf) { git_pkt *pkt; const char *line, *line_end = NULL; size_t line_len; int error; int reading_from_buf = data_pkt_buf->size > 0; if (reading_from_buf) { /* We had an existing partial packet, so add the new * packet to the buffer and parse the whole thing */ git_buf_put(data_pkt_buf, data_pkt->data, data_pkt->len); line = data_pkt_buf->ptr; line_len = data_pkt_buf->size; } else { line = data_pkt->data; line_len = data_pkt->len; } while (line_len > 0) { error = git_pkt_parse_line(&pkt, line, &line_end, line_len); if (error == GIT_EBUFS) { /* Buffer the data when the inner packet is split * across multiple sideband packets */ if (!reading_from_buf) git_buf_put(data_pkt_buf, line, line_len); error = 0; goto done; } else if (error < 0) goto done; /* Advance in the buffer */ line_len -= (line_end - line); line = line_end; /* When a valid packet with no content has been * read, git_pkt_parse_line does not report an * error, but the pkt pointer has not been set. * Handle this by skipping over empty packets. */ if (pkt == NULL) continue; error = add_push_report_pkt(push, pkt); git_pkt_free(pkt); if (error < 0 && error != GIT_ITEROVER) goto done; } error = 0; done: if (reading_from_buf) git_buf_consume(data_pkt_buf, line_end); return error; }
Base
1
spnego_gss_get_mic( OM_uint32 *minor_status, const gss_ctx_id_t context_handle, gss_qop_t qop_req, const gss_buffer_t message_buffer, gss_buffer_t message_token) { OM_uint32 ret; ret = gss_get_mic(minor_status, context_handle, qop_req, message_buffer, message_token); return (ret); }
Base
1
static M_bool M_fs_check_overwrite_allowed(const char *p1, const char *p2, M_uint32 mode) { M_fs_info_t *info = NULL; char *pold = NULL; char *pnew = NULL; M_fs_type_t type; M_bool ret = M_TRUE; if (mode & M_FS_FILE_MODE_OVERWRITE) return M_TRUE; /* If we're not overwriting we need to verify existance. * * For files we need to check if the file name exists in the * directory it's being copied to. * * For directories we need to check if the directory name * exists in the directory it's being copied to. */ if (M_fs_info(&info, p1, M_FS_PATH_INFO_FLAGS_BASIC) != M_FS_ERROR_SUCCESS) return M_FALSE; type = M_fs_info_get_type(info); M_fs_info_destroy(info); if (type != M_FS_TYPE_DIR) { /* File exists at path. */ if (M_fs_perms_can_access(p2, M_FS_PERMS_MODE_NONE) == M_FS_ERROR_SUCCESS) { ret = M_FALSE; goto done; } } /* Is dir */ pold = M_fs_path_basename(p1, M_FS_SYSTEM_AUTO); pnew = M_fs_path_join(p2, pnew, M_FS_SYSTEM_AUTO); if (M_fs_perms_can_access(pnew, M_FS_PERMS_MODE_NONE) == M_FS_ERROR_SUCCESS) { ret = M_FALSE; goto done; } done: M_free(pnew); M_free(pold); return ret; }
Class
2
static cJSON *get_object_item(const cJSON * const object, const char * const name, const cJSON_bool case_sensitive) { cJSON *current_element = NULL; if ((object == NULL) || (name == NULL)) { return NULL; } current_element = object->child; if (case_sensitive) { while ((current_element != NULL) && (strcmp(name, current_element->string) != 0)) { current_element = current_element->next; } } else { while ((current_element != NULL) && (case_insensitive_strcmp((const unsigned char*)name, (const unsigned char*)(current_element->string)) != 0)) { current_element = current_element->next; } } return current_element; }
Base
1
IW_IMPL(int) iw_get_input_density(struct iw_context *ctx, double *px, double *py, int *pcode) { *px = 1.0; *py = 1.0; *pcode = ctx->img1.density_code; if(ctx->img1.density_code!=IW_DENSITY_UNKNOWN) { *px = ctx->img1.density_x; *py = ctx->img1.density_y; return 1; } return 0; }
Base
1
Jsi_RC Jsi_RegExpMatch(Jsi_Interp *interp, Jsi_Value *pattern, const char *v, int *rc, Jsi_DString *dStr) { Jsi_Regex *re; int regexec_flags = 0; if (rc) *rc = 0; if (pattern == NULL || pattern->vt != JSI_VT_OBJECT || pattern->d.obj->ot != JSI_OT_REGEXP) return Jsi_LogError("expected pattern"); re = pattern->d.obj->d.robj; regex_t *reg = &re->reg; regmatch_t pos = {}; if (dStr) Jsi_DSInit(dStr); int r = regexec(reg, v, 1, &pos, regexec_flags); if (r >= REG_BADPAT) { char buf[100]; regerror(r, reg, buf, sizeof(buf)); return Jsi_LogError("error while matching pattern: %s", buf); } if (r != REG_NOMATCH) { if (rc) *rc = 1; if (dStr && pos.rm_so >= 0 && pos.rm_eo >= 0 && pos.rm_eo >= pos.rm_so) Jsi_DSAppendLen(dStr, v + pos.rm_so, pos.rm_eo - pos.rm_so); } return JSI_OK; }
Base
1
zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid, cred_t *cr, zfs_fuid_type_t type) { #ifdef HAVE_KSID uint32_t index = FUID_INDEX(fuid); const char *domain; uid_t id; if (index == 0) return (fuid); domain = zfs_fuid_find_by_idx(zfsvfs, index); ASSERT(domain != NULL); if (type == ZFS_OWNER || type == ZFS_ACE_USER) { (void) kidmap_getuidbysid(crgetzone(cr), domain, FUID_RID(fuid), &id); } else { (void) kidmap_getgidbysid(crgetzone(cr), domain, FUID_RID(fuid), &id); } return (id); #else /* * The Linux port only supports POSIX IDs, use the passed id. */ return (fuid); #endif /* HAVE_KSID */ }
Class
2
int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; size_t copied; int err; BT_DBG("sock %p sk %p len %zu", sock, sk, len); if (flags & (MSG_OOB)) return -EOPNOTSUPP; skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) { msg->msg_namelen = 0; return 0; } return err; } copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err == 0) { sock_recv_ts_and_drops(msg, sk, skb); if (bt_sk(sk)->skb_msg_name) bt_sk(sk)->skb_msg_name(skb, msg->msg_name, &msg->msg_namelen); else msg->msg_namelen = 0; } skb_free_datagram(sk, skb); return err ? : copied; }
Class
2
void AV1_RewriteESDescriptorEx(GF_MPEGVisualSampleEntryBox *av1, GF_MediaBox *mdia) { GF_BitRateBox *btrt = gf_isom_sample_entry_get_bitrate((GF_SampleEntryBox *)av1, GF_FALSE); if (av1->emul_esd) gf_odf_desc_del((GF_Descriptor *)av1->emul_esd); av1->emul_esd = gf_odf_desc_esd_new(2); av1->emul_esd->decoderConfig->streamType = GF_STREAM_VISUAL; av1->emul_esd->decoderConfig->objectTypeIndication = GF_CODECID_AV1; if (btrt) { av1->emul_esd->decoderConfig->bufferSizeDB = btrt->bufferSizeDB; av1->emul_esd->decoderConfig->avgBitrate = btrt->avgBitrate; av1->emul_esd->decoderConfig->maxBitrate = btrt->maxBitrate; } if (av1->av1_config) { GF_AV1Config *av1_cfg = AV1_DuplicateConfig(av1->av1_config->config); if (av1_cfg) { gf_odf_av1_cfg_write(av1_cfg, &av1->emul_esd->decoderConfig->decoderSpecificInfo->data, &av1->emul_esd->decoderConfig->decoderSpecificInfo->dataLength); gf_odf_av1_cfg_del(av1_cfg); } } }
Base
1
destroyUserInformationLists(DUL_USERINFO * userInfo) { PRV_SCUSCPROLE * role; role = (PRV_SCUSCPROLE*)LST_Dequeue(&userInfo->SCUSCPRoleList); while (role != NULL) { free(role); role = (PRV_SCUSCPROLE*)LST_Dequeue(&userInfo->SCUSCPRoleList); } LST_Destroy(&userInfo->SCUSCPRoleList); /* extended negotiation */ delete userInfo->extNegList; userInfo->extNegList = NULL; /* user identity negotiation */ delete userInfo->usrIdent; userInfo->usrIdent = NULL; }
Variant
0
decrypt_response(struct sc_card *card, unsigned char *in, size_t inlen, unsigned char *out, size_t * out_len) { size_t cipher_len; size_t i; unsigned char iv[16] = { 0 }; unsigned char plaintext[4096] = { 0 }; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; /* no cipher */ if (in[0] == 0x99) return 0; /* parse cipher length */ if (0x01 == in[2] && 0x82 != in[1]) { cipher_len = in[1]; i = 3; } else if (0x01 == in[3] && 0x81 == in[1]) { cipher_len = in[2]; i = 4; } else if (0x01 == in[4] && 0x82 == in[1]) { cipher_len = in[2] * 0x100; cipher_len += in[3]; i = 5; } else { return -1; } if (cipher_len < 2 || i+cipher_len > inlen || cipher_len > sizeof plaintext) return -1; /* decrypt */ if (KEY_TYPE_AES == exdata->smtype) aes128_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); else des3_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); /* unpadding */ while (0x80 != plaintext[cipher_len - 2] && (cipher_len - 2 > 0)) cipher_len--; if (2 == cipher_len) return -1; memcpy(out, plaintext, cipher_len - 2); *out_len = cipher_len - 2; return 0; }
Variant
0
process_bitmap_updates(STREAM s) { uint16 num_updates; uint16 left, top, right, bottom, width, height; uint16 cx, cy, bpp, Bpp, compress, bufsize, size; uint8 *data, *bmpdata; int i; logger(Protocol, Debug, "%s()", __func__); in_uint16_le(s, num_updates); for (i = 0; i < num_updates; i++) { in_uint16_le(s, left); in_uint16_le(s, top); in_uint16_le(s, right); in_uint16_le(s, bottom); in_uint16_le(s, width); in_uint16_le(s, height); in_uint16_le(s, bpp); Bpp = (bpp + 7) / 8; in_uint16_le(s, compress); in_uint16_le(s, bufsize); cx = right - left + 1; cy = bottom - top + 1; logger(Graphics, Debug, "process_bitmap_updates(), [%d,%d,%d,%d], [%d,%d], bpp=%d, compression=%d", left, top, right, bottom, width, height, Bpp, compress); if (!compress) { int y; bmpdata = (uint8 *) xmalloc(width * height * Bpp); for (y = 0; y < height; y++) { in_uint8a(s, &bmpdata[(height - y - 1) * (width * Bpp)], width * Bpp); } ui_paint_bitmap(left, top, cx, cy, width, height, bmpdata); xfree(bmpdata); continue; } if (compress & 0x400) { size = bufsize; } else { in_uint8s(s, 2); /* pad */ in_uint16_le(s, size); in_uint8s(s, 4); /* line_size, final_size */ } in_uint8p(s, data, size); bmpdata = (uint8 *) xmalloc(width * height * Bpp); if (bitmap_decompress(bmpdata, width, height, data, size, Bpp)) { ui_paint_bitmap(left, top, cx, cy, width, height, bmpdata); } else { logger(Graphics, Warning, "process_bitmap_updates(), failed to decompress bitmap"); } xfree(bmpdata); } }
Base
1
error_t lpc546xxEthUpdateMacConfig(NetInterface *interface) { uint32_t config; //Read current MAC configuration config = ENET->MAC_CONFIG; //10BASE-T or 100BASE-TX operation mode? if(interface->linkSpeed == NIC_LINK_SPEED_100MBPS) { config |= ENET_MAC_CONFIG_FES_MASK; } else { config &= ~ENET_MAC_CONFIG_FES_MASK; } //Half-duplex or full-duplex mode? if(interface->duplexMode == NIC_FULL_DUPLEX_MODE) { config |= ENET_MAC_CONFIG_DM_MASK; } else { config &= ~ENET_MAC_CONFIG_DM_MASK; } //Update MAC configuration register ENET->MAC_CONFIG = config; //Successful processing return NO_ERROR; }
Class
2
forbidden_name(struct compiling *c, identifier name, const node *n, int full_checks) { assert(PyUnicode_Check(name)); if (PyUnicode_CompareWithASCIIString(name, "__debug__") == 0) { ast_error(c, n, "assignment to keyword"); return 1; } if (full_checks) { const char * const *p; for (p = FORBIDDEN; *p; p++) { if (PyUnicode_CompareWithASCIIString(name, *p) == 0) { ast_error(c, n, "assignment to keyword"); return 1; } } } return 0; }
Base
1
BGD_DECLARE(void *) gdImageWebpPtr (gdImagePtr im, int *size) { void *rv; gdIOCtx *out = gdNewDynamicCtx(2048, NULL); if (out == NULL) { return NULL; } gdImageWebpCtx(im, out, -1); rv = gdDPExtractData(out, size); out->gd_free(out); return rv; }
Variant
0
static int jas_iccgetuint64(jas_stream_t *in, jas_iccuint64_t *val) { ulonglong tmp; if (jas_iccgetuint(in, 8, &tmp)) return -1; *val = tmp; return 0; }
Class
2
void show_object_with_name(FILE *out, struct object *obj, struct strbuf *path, const char *component) { char *name = path_name(path, component); char *p; fprintf(out, "%s ", oid_to_hex(&obj->oid)); for (p = name; *p && *p != '\n'; p++) fputc(*p, out); fputc('\n', out); free(name); }
Class
2
static void put_crypt_info(struct fscrypt_info *ci) { if (!ci) return; key_put(ci->ci_keyring_key); crypto_free_skcipher(ci->ci_ctfm); kmem_cache_free(fscrypt_info_cachep, ci); }
Base
1
ga_init2(garray_T *gap, int itemsize, int growsize) { ga_init(gap); gap->ga_itemsize = itemsize; gap->ga_growsize = growsize; }
Variant
0
int bson_check_string( bson *b, const char *string, const int length ) { return bson_validate_string( b, ( const unsigned char * )string, length, 1, 0, 0 ); }
Base
1
IPV6BuildTestPacket(uint32_t id, uint16_t off, int mf, const char content, int content_len) { Packet *p = NULL; uint8_t *pcontent; IPV6Hdr ip6h; p = SCCalloc(1, sizeof(*p) + default_packet_size); if (unlikely(p == NULL)) return NULL; PACKET_INITIALIZE(p); gettimeofday(&p->ts, NULL); ip6h.s_ip6_nxt = 44; ip6h.s_ip6_hlim = 2; /* Source and dest address - very bogus addresses. */ ip6h.s_ip6_src[0] = 0x01010101; ip6h.s_ip6_src[1] = 0x01010101; ip6h.s_ip6_src[2] = 0x01010101; ip6h.s_ip6_src[3] = 0x01010101; ip6h.s_ip6_dst[0] = 0x02020202; ip6h.s_ip6_dst[1] = 0x02020202; ip6h.s_ip6_dst[2] = 0x02020202; ip6h.s_ip6_dst[3] = 0x02020202; /* copy content_len crap, we need full length */ PacketCopyData(p, (uint8_t *)&ip6h, sizeof(IPV6Hdr)); p->ip6h = (IPV6Hdr *)GET_PKT_DATA(p); IPV6_SET_RAW_VER(p->ip6h, 6); /* Fragmentation header. */ IPV6FragHdr *fh = (IPV6FragHdr *)(GET_PKT_DATA(p) + sizeof(IPV6Hdr)); fh->ip6fh_nxt = IPPROTO_ICMP; fh->ip6fh_ident = htonl(id); fh->ip6fh_offlg = htons((off << 3) | mf); DecodeIPV6FragHeader(p, (uint8_t *)fh, 8, 8 + content_len, 0); pcontent = SCCalloc(1, content_len); if (unlikely(pcontent == NULL)) return NULL; memset(pcontent, content, content_len); PacketCopyDataOffset(p, sizeof(IPV6Hdr) + sizeof(IPV6FragHdr), pcontent, content_len); SET_PKT_LEN(p, sizeof(IPV6Hdr) + sizeof(IPV6FragHdr) + content_len); SCFree(pcontent); p->ip6h->s_ip6_plen = htons(sizeof(IPV6FragHdr) + content_len); SET_IPV6_SRC_ADDR(p, &p->src); SET_IPV6_DST_ADDR(p, &p->dst); /* Self test. */ if (IPV6_GET_VER(p) != 6) goto error; if (IPV6_GET_NH(p) != 44) goto error; if (IPV6_GET_PLEN(p) != sizeof(IPV6FragHdr) + content_len) goto error; return p; error: fprintf(stderr, "Error building test packet.\n"); if (p != NULL) SCFree(p); return NULL; }
Base
1
SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, unsigned int, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; struct iovec iov; struct msghdr msg; struct sockaddr_storage address; int err, err2; int fput_needed; if (size > INT_MAX) size = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_iovlen = 1; msg.msg_iov = &iov; iov.iov_len = size; iov.iov_base = ubuf; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = sizeof(address); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = sock_recvmsg(sock, &msg, size, flags); if (err >= 0 && addr != NULL) { err2 = move_addr_to_user(&address, msg.msg_namelen, addr, addr_len); if (err2 < 0) err = err2; } fput_light(sock->file, fput_needed); out: return err; }
Class
2
static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) { struct pci_dev *pdev = vdev->pdev; unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI; int ret; if (!is_irq_none(vdev)) return -EINVAL; vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); if (!vdev->ctx) return -ENOMEM; /* return the number of supported vectors if we can't get all: */ ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag); if (ret < nvec) { if (ret > 0) pci_free_irq_vectors(pdev); kfree(vdev->ctx); return ret; } vdev->num_ctx = nvec; vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX; if (!msix) { /* * Compute the virtual hardware field for max msi vectors - * it is the log base 2 of the number of vectors. */ vdev->msi_qmax = fls(nvec * 2 - 1) - 1; } return 0; }
Class
2
static int snd_timer_start_slave(struct snd_timer_instance *timeri) { unsigned long flags; spin_lock_irqsave(&slave_active_lock, flags); timeri->flags |= SNDRV_TIMER_IFLG_RUNNING; if (timeri->master) list_add_tail(&timeri->active_list, &timeri->master->slave_active_head); spin_unlock_irqrestore(&slave_active_lock, flags); return 1; /* delayed start */ }
Class
2
int handle_popc(u32 insn, struct pt_regs *regs) { u64 value; int ret, i, rd = ((insn >> 25) & 0x1f); int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); if (insn & 0x2000) { maybe_flush_windows(0, 0, rd, from_kernel); value = sign_extend_imm13(insn); } else { maybe_flush_windows(0, insn & 0x1f, rd, from_kernel); value = fetch_reg(insn & 0x1f, regs); } for (ret = 0, i = 0; i < 16; i++) { ret += popc_helper[value & 0xf]; value >>= 4; } if (rd < 16) { if (rd) regs->u_regs[rd] = ret; } else { if (test_thread_flag(TIF_32BIT)) { struct reg_window32 __user *win32; win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); put_user(ret, &win32->locals[rd - 16]); } else { struct reg_window __user *win; win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); put_user(ret, &win->locals[rd - 16]); } } advance(regs); return 1; }
Class
2
char *suhosin_encrypt_single_cookie(char *name, int name_len, char *value, int value_len, char *key TSRMLS_DC) { char buffer[4096]; char buffer2[4096]; char *buf = buffer, *buf2 = buffer2, *d, *d_url; int l; if (name_len > sizeof(buffer)-2) { buf = estrndup(name, name_len); } else { memcpy(buf, name, name_len); buf[name_len] = 0; } name_len = php_url_decode(buf, name_len); normalize_varname(buf); name_len = strlen(buf); if (SUHOSIN_G(cookie_plainlist)) { if (zend_hash_exists(SUHOSIN_G(cookie_plainlist), buf, name_len+1)) { encrypt_return_plain: if (buf != buffer) { efree(buf); } return estrndup(value, value_len); } } else if (SUHOSIN_G(cookie_cryptlist)) { if (!zend_hash_exists(SUHOSIN_G(cookie_cryptlist), buf, name_len+1)) { goto encrypt_return_plain; } } if (strlen(value) <= sizeof(buffer2)-2) { memcpy(buf2, value, value_len); buf2[value_len] = 0; } else { buf2 = estrndup(value, value_len); } value_len = php_url_decode(buf2, value_len); d = suhosin_encrypt_string(buf2, value_len, buf, name_len, key TSRMLS_CC); d_url = php_url_encode(d, strlen(d), &l); efree(d); if (buf != buffer) { efree(buf); } if (buf2 != buffer2) { efree(buf2); } return d_url; }
Class
2
SPL_METHOD(SplFileObject, eof) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_BOOL(php_stream_eof(intern->u.file.stream)); } /* }}} */
Base
1
void l2tp_packet_print(const struct l2tp_packet_t *pack, void (*print)(const char *fmt, ...)) { const struct l2tp_attr_t *attr; const struct l2tp_dict_value_t *val; if (pack->hdr.ver == 2) { print("[L2TP tid=%u sid=%u", ntohs(pack->hdr.tid), ntohs(pack->hdr.sid)); log_ppp_debug(" Ns=%u Nr=%u", ntohs(pack->hdr.Ns), ntohs(pack->hdr.Nr)); } else { print("[L2TP cid=%u", pack->hdr.cid); log_ppp_debug(" Ns=%u Nr=%u", ntohs(pack->hdr.Ns), ntohs(pack->hdr.Nr)); } list_for_each_entry(attr, &pack->attrs, entry) { print(" <%s", attr->attr->name); val = l2tp_dict_find_value(attr->attr, attr->val); if (val) print(" %s", val->name); else if (attr->H) print(" (hidden, %hu bytes)", attr->length); else { switch (attr->attr->type) { case ATTR_TYPE_INT16: print(" %i", attr->val.int16); break; case ATTR_TYPE_INT32: print(" %i", attr->val.int32); break; case ATTR_TYPE_STRING: print(" %s", attr->val.string); break; } } print(">"); } print("]\n"); }
Base
1
static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) { return usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), RTL8150_REQ_GET_REGS, RTL8150_REQT_READ, indx, 0, data, size, 500); }
Class
2
new_identifier(const char *n, struct compiling *c) { PyObject *id = PyUnicode_DecodeUTF8(n, strlen(n), NULL); if (!id) return NULL; /* PyUnicode_DecodeUTF8 should always return a ready string. */ assert(PyUnicode_IS_READY(id)); /* Check whether there are non-ASCII characters in the identifier; if so, normalize to NFKC. */ if (!PyUnicode_IS_ASCII(id)) { PyObject *id2; if (!c->c_normalize && !init_normalization(c)) { Py_DECREF(id); return NULL; } PyTuple_SET_ITEM(c->c_normalize_args, 1, id); id2 = PyObject_Call(c->c_normalize, c->c_normalize_args, NULL); Py_DECREF(id); if (!id2) return NULL; id = id2; } PyUnicode_InternInPlace(&id); if (PyArena_AddPyObject(c->c_arena, id) < 0) { Py_DECREF(id); return NULL; } return id; }
Base
1
int __close_fd_get_file(unsigned int fd, struct file **res) { struct files_struct *files = current->files; struct file *file; struct fdtable *fdt; spin_lock(&files->file_lock); fdt = files_fdtable(files); if (fd >= fdt->max_fds) goto out_unlock; file = fdt->fd[fd]; if (!file) goto out_unlock; rcu_assign_pointer(fdt->fd[fd], NULL); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); get_file(file); *res = file; return filp_close(file, files); out_unlock: spin_unlock(&files->file_lock); *res = NULL; return -ENOENT; }
Variant
0
static int misaligned_fpu_store(struct pt_regs *regs, __u32 opcode, int displacement_not_indexed, int width_shift, int do_paired_load) { /* Return -1 for a fault, 0 for OK */ int error; int srcreg; __u64 address; error = generate_and_check_address(regs, opcode, displacement_not_indexed, width_shift, &address); if (error < 0) { return error; } perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address); srcreg = (opcode >> 4) & 0x3f; if (user_mode(regs)) { __u64 buffer; /* Initialise these to NaNs. */ __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL; if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) { return -1; } /* 'current' may be the current owner of the FPU state, so context switch the registers into memory so they can be indexed by register number. */ if (last_task_used_math == current) { enable_fpu(); save_fpu(current); disable_fpu(); last_task_used_math = NULL; regs->sr |= SR_FD; } switch (width_shift) { case 2: buflo = current->thread.xstate->hardfpu.fp_regs[srcreg]; break; case 3: if (do_paired_load) { buflo = current->thread.xstate->hardfpu.fp_regs[srcreg]; bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1]; } else { #if defined(CONFIG_CPU_LITTLE_ENDIAN) bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg]; buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1]; #else buflo = current->thread.xstate->hardfpu.fp_regs[srcreg]; bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1]; #endif } break; default: printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n", width_shift, (unsigned long) regs->pc); break; } *(__u32*) &buffer = buflo; *(1 + (__u32*) &buffer) = bufhi; if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) { return -1; /* fault */ } return 0; } else { die ("Misaligned FPU load inside kernel", regs, 0); return -1; } }
Class
2
static zend_bool add_post_var(zval *arr, post_var_data_t *var, zend_bool eof TSRMLS_DC) { char *ksep, *vsep, *val; size_t klen, vlen; /* FIXME: string-size_t */ unsigned int new_vlen; if (var->ptr >= var->end) { return 0; } vsep = memchr(var->ptr, '&', var->end - var->ptr); if (!vsep) { if (!eof) { return 0; } else { vsep = var->end; } } ksep = memchr(var->ptr, '=', vsep - var->ptr); if (ksep) { *ksep = '\0'; /* "foo=bar&" or "foo=&" */ klen = ksep - var->ptr; vlen = vsep - ++ksep; } else { ksep = ""; /* "foo&" */ klen = vsep - var->ptr; vlen = 0; } php_url_decode(var->ptr, klen); val = estrndup(ksep, vlen); if (vlen) { vlen = php_url_decode(val, vlen); } if (sapi_module.input_filter(PARSE_POST, var->ptr, &val, vlen, &new_vlen TSRMLS_CC)) { php_register_variable_safe(var->ptr, val, new_vlen, arr TSRMLS_CC); } efree(val); var->ptr = vsep + (vsep != var->end); return 1; }
Class
2
static int get_exif_tag_dbl_value(struct iw_exif_state *e, unsigned int tag_pos, double *pv) { unsigned int field_type; unsigned int value_count; unsigned int value_pos; unsigned int numer, denom; field_type = iw_get_ui16_e(&e->d[tag_pos+2],e->endian); value_count = iw_get_ui32_e(&e->d[tag_pos+4],e->endian); if(value_count!=1) return 0; if(field_type!=5) return 0; // 5=Rational (two uint32's) // A rational is 8 bytes. Since 8>4, it is stored indirectly. First, read // the location where it is stored. value_pos = iw_get_ui32_e(&e->d[tag_pos+8],e->endian); if(value_pos > e->d_len-8) return 0; // Read the actual value. numer = iw_get_ui32_e(&e->d[value_pos ],e->endian); denom = iw_get_ui32_e(&e->d[value_pos+4],e->endian); if(denom==0) return 0; *pv = ((double)numer)/denom; return 1; }
Base
1
static void get_sem_elements(struct sem_data *p) { size_t i; if (!p || !p->sem_nsems || p->sem_perm.id < 0) return; p->elements = xcalloc(p->sem_nsems, sizeof(struct sem_elem)); for (i = 0; i < p->sem_nsems; i++) { struct sem_elem *e = &p->elements[i]; union semun arg = { .val = 0 }; e->semval = semctl(p->sem_perm.id, i, GETVAL, arg); if (e->semval < 0) err(EXIT_FAILURE, _("%s failed"), "semctl(GETVAL)"); e->ncount = semctl(p->sem_perm.id, i, GETNCNT, arg); if (e->ncount < 0) err(EXIT_FAILURE, _("%s failed"), "semctl(GETNCNT)"); e->zcount = semctl(p->sem_perm.id, i, GETZCNT, arg); if (e->zcount < 0) err(EXIT_FAILURE, _("%s failed"), "semctl(GETZCNT)"); e->pid = semctl(p->sem_perm.id, i, GETPID, arg); if (e->pid < 0) err(EXIT_FAILURE, _("%s failed"), "semctl(GETPID)"); } }
Base
1
static int bmp_getint32(jas_stream_t *in, int_fast32_t *val) { int n; uint_fast32_t v; int c; for (n = 4, v = 0;;) { if ((c = jas_stream_getc(in)) == EOF) { return -1; } v |= (c << 24); if (--n <= 0) { break; } v >>= 8; } if (val) { *val = v; } return 0; }
Base
1
struct mapped_device *dm_get_from_kobject(struct kobject *kobj) { struct mapped_device *md; md = container_of(kobj, struct mapped_device, kobj_holder.kobj); if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) return NULL; dm_get(md); return md; }
Class
2
void *gdImageJpegPtr (gdImagePtr im, int *size, int quality) { void *rv; gdIOCtx *out = gdNewDynamicCtx (2048, NULL); gdImageJpegCtx (im, out, quality); rv = gdDPExtractData (out, size); out->gd_free (out); return rv; }
Variant
0
static void ikev2_parent_outI1_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh) { struct ke_continuation *ke = (struct ke_continuation *)pcrc; struct msg_digest *md = ke->md; struct state *const st = md->st; stf_status e; DBG(DBG_CONTROLMORE, DBG_log("ikev2 parent outI1: calculated ke+nonce, sending I1")); if (st == NULL) { loglog(RC_LOG_SERIOUS, "%s: Request was disconnected from state", __FUNCTION__); if (ke->md) release_md(ke->md); return; } /* XXX should check out ugh */ passert(ugh == NULL); passert(cur_state == NULL); passert(st != NULL); passert(st->st_suspended_md == ke->md); set_suspended(st, NULL); /* no longer connected or suspended */ set_cur_state(st); st->st_calculating = FALSE; e = ikev2_parent_outI1_tail(pcrc, r); if (ke->md != NULL) { complete_v2_state_transition(&ke->md, e); if (ke->md) release_md(ke->md); } reset_cur_state(); reset_globals(); passert(GLOBALS_ARE_RESET()); }
Class
2
static LUA_FUNCTION(openssl_x509_check_host) { X509 * cert = CHECK_OBJECT(1, X509, "openssl.x509"); if (lua_isstring(L, 2)) { const char *hostname = lua_tostring(L, 2); lua_pushboolean(L, X509_check_host(cert, hostname, strlen(hostname), 0, NULL)); } else { lua_pushboolean(L, 0); } return 1; }
Base
1
int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct atm_vcc *vcc; struct sk_buff *skb; int copied, error = -EINVAL; msg->msg_namelen = 0; if (sock->state != SS_CONNECTED) return -ENOTCONN; /* only handle MSG_DONTWAIT and MSG_PEEK */ if (flags & ~(MSG_DONTWAIT | MSG_PEEK)) return -EOPNOTSUPP; vcc = ATM_SD(sock); if (test_bit(ATM_VF_RELEASED, &vcc->flags) || test_bit(ATM_VF_CLOSE, &vcc->flags) || !test_bit(ATM_VF_READY, &vcc->flags)) return 0; skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &error); if (!skb) return error; copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (error) return error; sock_recv_ts_and_drops(msg, sk, skb); if (!(flags & MSG_PEEK)) { pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize); atm_return(vcc, skb->truesize); } skb_free_datagram(sk, skb); return copied; }
Class
2
int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct atm_vcc *vcc; struct sk_buff *skb; int copied, error = -EINVAL; msg->msg_namelen = 0; if (sock->state != SS_CONNECTED) return -ENOTCONN; /* only handle MSG_DONTWAIT and MSG_PEEK */ if (flags & ~(MSG_DONTWAIT | MSG_PEEK)) return -EOPNOTSUPP; vcc = ATM_SD(sock); if (test_bit(ATM_VF_RELEASED, &vcc->flags) || test_bit(ATM_VF_CLOSE, &vcc->flags) || !test_bit(ATM_VF_READY, &vcc->flags)) return 0; skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &error); if (!skb) return error; copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (error) return error; sock_recv_ts_and_drops(msg, sk, skb); if (!(flags & MSG_PEEK)) { pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize); atm_return(vcc, skb->truesize); } skb_free_datagram(sk, skb); return copied; }
Class
2
static VALUE from_document(VALUE klass, VALUE document) { xmlDocPtr doc; xmlRelaxNGParserCtxtPtr ctx; xmlRelaxNGPtr schema; VALUE errors; VALUE rb_schema; Data_Get_Struct(document, xmlDoc, doc); /* In case someone passes us a node. ugh. */ doc = doc->doc; ctx = xmlRelaxNGNewDocParserCtxt(doc); errors = rb_ary_new(); xmlSetStructuredErrorFunc((void *)errors, Nokogiri_error_array_pusher); #ifdef HAVE_XMLRELAXNGSETPARSERSTRUCTUREDERRORS xmlRelaxNGSetParserStructuredErrors( ctx, Nokogiri_error_array_pusher, (void *)errors ); #endif schema = xmlRelaxNGParse(ctx); xmlSetStructuredErrorFunc(NULL, NULL); xmlRelaxNGFreeParserCtxt(ctx); if(NULL == schema) { xmlErrorPtr error = xmlGetLastError(); if(error) Nokogiri_error_raise(NULL, error); else rb_raise(rb_eRuntimeError, "Could not parse document"); return Qnil; } rb_schema = Data_Wrap_Struct(klass, 0, dealloc, schema); rb_iv_set(rb_schema, "@errors", errors); return rb_schema; }
Base
1
horizontalDifference8(unsigned char *ip, int n, int stride, unsigned short *wp, uint16 *From8) { register int r1, g1, b1, a1, r2, g2, b2, a2, mask; #undef CLAMP #define CLAMP(v) (From8[(v)]) mask = CODE_MASK; if (n >= stride) { if (stride == 3) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); n -= 3; while (n > 0) { n -= 3; r1 = CLAMP(ip[3]); wp[3] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[4]); wp[4] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[5]); wp[5] = (uint16)((b1-b2) & mask); b2 = b1; wp += 3; ip += 3; } } else if (stride == 4) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); a2 = wp[3] = CLAMP(ip[3]); n -= 4; while (n > 0) { n -= 4; r1 = CLAMP(ip[4]); wp[4] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[5]); wp[5] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[6]); wp[6] = (uint16)((b1-b2) & mask); b2 = b1; a1 = CLAMP(ip[7]); wp[7] = (uint16)((a1-a2) & mask); a2 = a1; wp += 4; ip += 4; } } else { wp += n + stride - 1; /* point to last one */ ip += n + stride - 1; /* point to last one */ n -= stride; while (n > 0) { REPEAT(stride, wp[0] = CLAMP(ip[0]); wp[stride] -= wp[0]; wp[stride] &= mask; wp--; ip--) n -= stride; } REPEAT(stride, wp[0] = CLAMP(ip[0]); wp--; ip--) } } }
Class
2
static uint get_alen(char *arg, int default_len) { int j; int alen; alen = default_len; for (j = 0; j < 8; j++) { if (arg[j] == '.') { alen = arg[j+1] - '0'; break; } else if (arg[j] == '\0') break; } return alen; }
Base
1
NOEXPORT int verify_callback(int preverify_ok, X509_STORE_CTX *callback_ctx) { /* our verify callback function */ SSL *ssl; CLI *c; /* retrieve application specific data */ ssl=X509_STORE_CTX_get_ex_data(callback_ctx, SSL_get_ex_data_X509_STORE_CTX_idx()); c=SSL_get_ex_data(ssl, index_ssl_cli); if(!c->opt->option.verify_chain && !c->opt->option.verify_peer) { s_log(LOG_INFO, "Certificate verification disabled"); return 1; /* accept */ } if(verify_checks(c, preverify_ok, callback_ctx)) { SSL_SESSION *sess=SSL_get1_session(c->ssl); if(sess) { int ok=SSL_SESSION_set_ex_data(sess, index_session_authenticated, (void *)(-1)); SSL_SESSION_free(sess); if(!ok) { sslerror("SSL_SESSION_set_ex_data"); return 0; /* reject */ } } return 1; /* accept */ } if(c->opt->option.client || c->opt->protocol) return 0; /* reject */ if(c->opt->redirect_addr.names) return 1; /* accept */ return 0; /* reject */ }
Base
1
static int v9fs_xattr_set_acl(const struct xattr_handler *handler, struct dentry *dentry, struct inode *inode, const char *name, const void *value, size_t size, int flags) { int retval; struct posix_acl *acl; struct v9fs_session_info *v9ses; v9ses = v9fs_dentry2v9ses(dentry); /* * set the attribute on the remote. Without even looking at the * xattr value. We leave it to the server to validate */ if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) return v9fs_xattr_set(dentry, handler->name, value, size, flags); if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; if (!inode_owner_or_capable(inode)) return -EPERM; if (value) { /* update the cached acl value */ acl = posix_acl_from_xattr(&init_user_ns, value, size); if (IS_ERR(acl)) return PTR_ERR(acl); else if (acl) { retval = posix_acl_valid(inode->i_sb->s_user_ns, acl); if (retval) goto err_out; } } else acl = NULL; switch (handler->flags) { case ACL_TYPE_ACCESS: if (acl) { umode_t mode = inode->i_mode; retval = posix_acl_equiv_mode(acl, &mode); if (retval < 0) goto err_out; else { struct iattr iattr; if (retval == 0) { /* * ACL can be represented * by the mode bits. So don't * update ACL. */ acl = NULL; value = NULL; size = 0; } /* Updte the mode bits */ iattr.ia_mode = ((mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO)); iattr.ia_valid = ATTR_MODE; /* FIXME should we update ctime ? * What is the following setxattr update the * mode ? */ v9fs_vfs_setattr_dotl(dentry, &iattr); } } break; case ACL_TYPE_DEFAULT: if (!S_ISDIR(inode->i_mode)) { retval = acl ? -EINVAL : 0; goto err_out; } break; default: BUG(); } retval = v9fs_xattr_set(dentry, handler->name, value, size, flags); if (!retval) set_cached_acl(inode, handler->flags, acl); err_out: posix_acl_release(acl); return retval; }
Class
2
bool_t enc28j60IrqHandler(NetInterface *interface) { bool_t flag; uint8_t status; //This flag will be set if a higher priority task must be woken flag = FALSE; //Clear the INTIE bit, immediately after an interrupt event enc28j60ClearBit(interface, ENC28J60_REG_EIE, EIE_INTIE); //Read interrupt status register status = enc28j60ReadReg(interface, ENC28J60_REG_EIR); //Link status change? if((status & EIR_LINKIF) != 0) { //Disable LINKIE interrupt enc28j60ClearBit(interface, ENC28J60_REG_EIE, EIE_LINKIE); //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Packet received? if((status & EIR_PKTIF) != 0) { //Disable PKTIE interrupt enc28j60ClearBit(interface, ENC28J60_REG_EIE, EIE_PKTIE); //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Packet transmission complete? if((status & (EIR_TXIF | EIE_TXERIE)) != 0) { //Clear interrupt flags enc28j60ClearBit(interface, ENC28J60_REG_EIR, EIR_TXIF | EIE_TXERIE); //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&interface->nicTxEvent); } //Once the interrupt has been serviced, the INTIE bit //is set again to re-enable interrupts enc28j60SetBit(interface, ENC28J60_REG_EIE, EIE_INTIE); //A higher priority task must be woken? return flag; }
Class
2
static int get_exif_tag_int_value(struct iw_exif_state *e, unsigned int tag_pos, unsigned int *pv) { unsigned int field_type; unsigned int value_count; field_type = iw_get_ui16_e(&e->d[tag_pos+2],e->endian); value_count = iw_get_ui32_e(&e->d[tag_pos+4],e->endian); if(value_count!=1) return 0; if(field_type==3) { // SHORT (uint16) *pv = iw_get_ui16_e(&e->d[tag_pos+8],e->endian); return 1; } else if(field_type==4) { // LONG (uint32) *pv = iw_get_ui32_e(&e->d[tag_pos+8],e->endian); return 1; } return 0; }
Base
1
int snd_timer_close(struct snd_timer_instance *timeri) { struct snd_timer *timer = NULL; struct snd_timer_instance *slave, *tmp; if (snd_BUG_ON(!timeri)) return -ENXIO; /* force to stop the timer */ snd_timer_stop(timeri); if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { /* wait, until the active callback is finished */ spin_lock_irq(&slave_active_lock); while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) { spin_unlock_irq(&slave_active_lock); udelay(10); spin_lock_irq(&slave_active_lock); } spin_unlock_irq(&slave_active_lock); mutex_lock(&register_mutex); list_del(&timeri->open_list); mutex_unlock(&register_mutex); } else { timer = timeri->timer; if (snd_BUG_ON(!timer)) goto out; /* wait, until the active callback is finished */ spin_lock_irq(&timer->lock); while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) { spin_unlock_irq(&timer->lock); udelay(10); spin_lock_irq(&timer->lock); } spin_unlock_irq(&timer->lock); mutex_lock(&register_mutex); list_del(&timeri->open_list); if (timer && list_empty(&timer->open_list_head) && timer->hw.close) timer->hw.close(timer); /* remove slave links */ list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head, open_list) { spin_lock_irq(&slave_active_lock); _snd_timer_stop(slave, 1, SNDRV_TIMER_EVENT_RESOLUTION); list_move_tail(&slave->open_list, &snd_timer_slave_list); slave->master = NULL; slave->timer = NULL; spin_unlock_irq(&slave_active_lock); } mutex_unlock(&register_mutex); } out: if (timeri->private_free) timeri->private_free(timeri); kfree(timeri->owner); kfree(timeri); if (timer) module_put(timer->module); return 0; }
Class
2
SYSCALL_DEFINE5(add_key, const char __user *, _type, const char __user *, _description, const void __user *, _payload, size_t, plen, key_serial_t, ringid) { key_ref_t keyring_ref, key_ref; char type[32], *description; void *payload; long ret; ret = -EINVAL; if (plen > 1024 * 1024 - 1) goto error; /* draw all the data into kernel space */ ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; description = NULL; if (_description) { description = strndup_user(_description, KEY_MAX_DESC_SIZE); if (IS_ERR(description)) { ret = PTR_ERR(description); goto error; } if (!*description) { kfree(description); description = NULL; } else if ((description[0] == '.') && (strncmp(type, "keyring", 7) == 0)) { ret = -EPERM; goto error2; } } /* pull the payload in if one was supplied */ payload = NULL; if (_payload) { ret = -ENOMEM; payload = kvmalloc(plen, GFP_KERNEL); if (!payload) goto error2; ret = -EFAULT; if (copy_from_user(payload, _payload, plen) != 0) goto error3; } /* find the target keyring (which must be writable) */ keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error3; } /* create or update the requested key and add it to the target * keyring */ key_ref = key_create_or_update(keyring_ref, type, description, payload, plen, KEY_PERM_UNDEF, KEY_ALLOC_IN_QUOTA); if (!IS_ERR(key_ref)) { ret = key_ref_to_ptr(key_ref)->serial; key_ref_put(key_ref); } else { ret = PTR_ERR(key_ref); } key_ref_put(keyring_ref); error3: kvfree(payload); error2: kfree(description); error: return ret; }
Base
1
static struct block_device *ext3_blkdev_get(dev_t dev, struct super_block *sb) { struct block_device *bdev; char b[BDEVNAME_SIZE]; bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); if (IS_ERR(bdev)) goto fail; return bdev; fail: ext3_msg(sb, "error: failed to open journal device %s: %ld", __bdevname(dev, b), PTR_ERR(bdev)); return NULL; }
Class
2
static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) { struct hwsim_new_radio_params param = { 0 }; const char *hwname = NULL; int ret; param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG]; param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE]; param.channels = channels; param.destroy_on_close = info->attrs[HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE]; if (info->attrs[HWSIM_ATTR_CHANNELS]) param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); if (info->attrs[HWSIM_ATTR_NO_VIF]) param.no_vif = true; if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { hwname = kasprintf(GFP_KERNEL, "%.*s", nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME])); if (!hwname) return -ENOMEM; param.hwname = hwname; } if (info->attrs[HWSIM_ATTR_USE_CHANCTX]) param.use_chanctx = true; else param.use_chanctx = (param.channels > 1); if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]) param.reg_alpha2 = nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]); if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) { u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]); if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) return -EINVAL; param.regd = hwsim_world_regdom_custom[idx]; } ret = mac80211_hwsim_new_radio(info, &param); kfree(hwname); return ret; }
Base
1
monitor_sync(struct monitor *pmonitor) { if (options.compression) { /* The member allocation is not visible, so sync it */ mm_share_sync(&pmonitor->m_zlib, &pmonitor->m_zback); } }
Class
2
static int init_strtab(ELFOBJ *bin) { r_return_val_if_fail (!bin->strtab, false); if (!bin->shdr) { return false; } Elf_(Half) shstrndx = bin->ehdr.e_shstrndx; if (shstrndx != SHN_UNDEF && !is_shidx_valid (bin, shstrndx)) { return false; } /* sh_size must be lower than UT32_MAX and not equal to zero, to avoid bugs on malloc() */ if (bin->shdr[shstrndx].sh_size > UT32_MAX) { return false; } if (!bin->shdr[shstrndx].sh_size) { return false; } bin->shstrtab_section = bin->strtab_section = &bin->shdr[shstrndx]; bin->shstrtab_size = bin->shstrtab_section->sh_size; if (bin->shstrtab_size > bin->size) { return false; } if (bin->shstrtab_section->sh_offset > bin->size) { return false; } if (bin->shstrtab_section->sh_offset + bin->shstrtab_section->sh_size > bin->size) { return false; } if (!(bin->shstrtab = calloc (1, bin->shstrtab_size + 1))) { r_sys_perror ("malloc"); bin->shstrtab = NULL; return false; } int res = r_buf_read_at (bin->b, bin->shstrtab_section->sh_offset, (ut8*)bin->shstrtab, bin->shstrtab_section->sh_size); if (res < 1) { R_LOG_ERROR ("read (shstrtab) at 0x%" PFMT64x, (ut64) bin->shstrtab_section->sh_offset); R_FREE (bin->shstrtab); return false; } bin->shstrtab[bin->shstrtab_section->sh_size] = '\0'; sdb_num_set (bin->kv, "elf_shstrtab.offset", bin->shstrtab_section->sh_offset, 0); sdb_num_set (bin->kv, "elf_shstrtab.size", bin->shstrtab_section->sh_size, 0); return true; }
Base
1
static struct pid *good_sigevent(sigevent_t * event) { struct task_struct *rtn = current->group_leader; if ((event->sigev_notify & SIGEV_THREAD_ID ) && (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) || !same_thread_group(rtn, current) || (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL)) return NULL; if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) && ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) return NULL; return task_pid(rtn); }
Base
1
prepenv(const struct rule *rule) { static const char *safeset[] = { "DISPLAY", "HOME", "LOGNAME", "MAIL", "PATH", "TERM", "USER", "USERNAME", NULL }; struct env *env; env = createenv(rule); /* if we started with blank, fill some defaults then apply rules */ if (!(rule->options & KEEPENV)) fillenv(env, safeset); if (rule->envlist) fillenv(env, rule->envlist); return flattenenv(env); }
Class
2
error_t ksz8851UpdateMacAddrFilter(NetInterface *interface) { uint_t i; uint_t k; uint32_t crc; uint16_t hashTable[4]; MacFilterEntry *entry; //Debug message TRACE_DEBUG("Updating MAC filter...\r\n"); //Clear hash table osMemset(hashTable, 0, sizeof(hashTable)); //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0; i < MAC_ADDR_FILTER_SIZE; i++) { //Point to the current entry entry = &interface->macAddrFilter[i]; //Valid entry? if(entry->refCount > 0) { //Compute CRC over the current MAC address crc = ksz8851CalcCrc(&entry->addr, sizeof(MacAddr)); //Calculate the corresponding index in the table k = (crc >> 26) & 0x3F; //Update hash table contents hashTable[k / 16] |= (1 << (k % 16)); } } //Write the hash table to the KSZ8851 controller ksz8851WriteReg(interface, KSZ8851_REG_MAHTR0, hashTable[0]); ksz8851WriteReg(interface, KSZ8851_REG_MAHTR1, hashTable[1]); ksz8851WriteReg(interface, KSZ8851_REG_MAHTR2, hashTable[2]); ksz8851WriteReg(interface, KSZ8851_REG_MAHTR3, hashTable[3]); //Debug message TRACE_DEBUG(" MAHTR0 = %04" PRIX16 "\r\n", ksz8851ReadReg(interface, KSZ8851_REG_MAHTR0)); TRACE_DEBUG(" MAHTR1 = %04" PRIX16 "\r\n", ksz8851ReadReg(interface, KSZ8851_REG_MAHTR1)); TRACE_DEBUG(" MAHTR2 = %04" PRIX16 "\r\n", ksz8851ReadReg(interface, KSZ8851_REG_MAHTR2)); TRACE_DEBUG(" MAHTR3 = %04" PRIX16 "\r\n", ksz8851ReadReg(interface, KSZ8851_REG_MAHTR3)); //Successful processing return NO_ERROR; }
Class
2
static int vt_kdsetmode(struct vc_data *vc, unsigned long mode) { switch (mode) { case KD_GRAPHICS: break; case KD_TEXT0: case KD_TEXT1: mode = KD_TEXT; fallthrough; case KD_TEXT: break; default: return -EINVAL; } /* FIXME: this needs the console lock extending */ if (vc->vc_mode == mode) return 0; vc->vc_mode = mode; if (vc->vc_num != fg_console) return 0; /* explicitly blank/unblank the screen if switching modes */ console_lock(); if (mode == KD_TEXT) do_unblank_screen(1); else do_blank_screen(1); console_unlock(); return 0; }
Base
1
static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) { unsigned long pmnc; struct perf_sample_data data; struct cpu_hw_events *cpuc; struct pt_regs *regs; int idx; /* * Get and reset the IRQ flags */ pmnc = armv7_pmnc_getreset_flags(); /* * Did an overflow occur? */ if (!armv7_pmnc_has_overflowed(pmnc)) return IRQ_NONE; /* * Handle the counter(s) overflow(s) */ regs = get_irq_regs(); perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); for (idx = 0; idx <= armpmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; if (!test_bit(idx, cpuc->active_mask)) continue; /* * We have a single interrupt for all counters. Check that * each counter has overflowed before we process it. */ if (!armv7_pmnc_counter_has_overflowed(pmnc, idx)) continue; hwc = &event->hw; armpmu_event_update(event, hwc, idx, 1); data.period = event->hw.last_period; if (!armpmu_event_set_period(event, hwc, idx)) continue; if (perf_event_overflow(event, 0, &data, regs)) armpmu->disable(hwc, idx); } /* * Handle the pending perf events. * * Note: this call *must* be run with interrupts disabled. For * platforms that can have the PMU interrupts raised as an NMI, this * will not work. */ irq_work_run(); return IRQ_HANDLED; }
Class
2
ber_parse_header(STREAM s, int tagval, int *length) { int tag, len; if (tagval > 0xff) { in_uint16_be(s, tag); } else { in_uint8(s, tag); } if (tag != tagval) { logger(Core, Error, "ber_parse_header(), expected tag %d, got %d", tagval, tag); return False; } in_uint8(s, len); if (len & 0x80) { len &= ~0x80; *length = 0; while (len--) next_be(s, *length); } else *length = len; return s_check(s); }
Class
2
decode_bytes_with_escapes(struct compiling *c, const node *n, const char *s, size_t len) { return PyBytes_DecodeEscape(s, len, NULL, 0, NULL); }
Base
1
int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; size_t copied; int err; BT_DBG("sock %p sk %p len %zu", sock, sk, len); if (flags & (MSG_OOB)) return -EOPNOTSUPP; skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) { msg->msg_namelen = 0; return 0; } return err; } copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err == 0) { sock_recv_ts_and_drops(msg, sk, skb); if (bt_sk(sk)->skb_msg_name) bt_sk(sk)->skb_msg_name(skb, msg->msg_name, &msg->msg_namelen); else msg->msg_namelen = 0; } skb_free_datagram(sk, skb); return err ? : copied; }
Class
2
ast2obj_keyword(void* _o) { keyword_ty o = (keyword_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_INCREF(Py_None); return Py_None; } result = PyType_GenericNew(keyword_type, NULL, NULL); if (!result) return NULL; value = ast2obj_identifier(o->arg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_arg, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; }
Base
1
nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_acl *acl) { __be32 error; int host_error; struct dentry *dentry; struct inode *inode; struct posix_acl *pacl = NULL, *dpacl = NULL; unsigned int flags = 0; /* Get inode */ error = fh_verify(rqstp, fhp, 0, NFSD_MAY_SATTR); if (error) return error; dentry = fhp->fh_dentry; inode = d_inode(dentry); if (!inode->i_op->set_acl || !IS_POSIXACL(inode)) return nfserr_attrnotsupp; if (S_ISDIR(inode->i_mode)) flags = NFS4_ACL_DIR; host_error = nfs4_acl_nfsv4_to_posix(acl, &pacl, &dpacl, flags); if (host_error == -EINVAL) return nfserr_attrnotsupp; if (host_error < 0) goto out_nfserr; host_error = inode->i_op->set_acl(inode, pacl, ACL_TYPE_ACCESS); if (host_error < 0) goto out_release; if (S_ISDIR(inode->i_mode)) { host_error = inode->i_op->set_acl(inode, dpacl, ACL_TYPE_DEFAULT); } out_release: posix_acl_release(pacl); posix_acl_release(dpacl); out_nfserr: if (host_error == -EOPNOTSUPP) return nfserr_attrnotsupp; else return nfserrno(host_error); }
Pillar
3
static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); size_t copied = 0; int err = -EOPNOTSUPP; struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; if (flags & MSG_OOB) goto out; if (addr_len) *addr_len = sizeof(*sin); skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_timestamp(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: return err ? err : copied; }
Class
2
static int cac_cac1_get_certificate(sc_card_t *card, u8 **out_buf, size_t *out_len) { u8 buf[CAC_MAX_SIZE]; u8 *out_ptr; size_t size = 0; size_t left = 0; size_t len, next_len; sc_apdu_t apdu; int r = SC_SUCCESS; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); /* get the size */ size = left = *out_buf ? *out_len : sizeof(buf); out_ptr = *out_buf ? *out_buf : buf; sc_format_apdu(card, &apdu, SC_APDU_CASE_2_SHORT, CAC_INS_GET_CERTIFICATE, 0, 0 ); next_len = MIN(left, 100); for (; left > 0; left -= len, out_ptr += len) { len = next_len; apdu.resp = out_ptr; apdu.le = len; apdu.resplen = left; r = sc_transmit_apdu(card, &apdu); if (r < 0) { break; } if (apdu.resplen == 0) { r = SC_ERROR_INTERNAL; break; } /* in the old CAC-1, 0x63 means 'more data' in addition to 'pin failed' */ if (apdu.sw1 != 0x63 || apdu.sw2 < 1) { /* we've either finished reading, or hit an error, break */ r = sc_check_sw(card, apdu.sw1, apdu.sw2); left -= len; break; } next_len = MIN(left, apdu.sw2); } if (r < 0) { SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, r); } r = size - left; if (*out_buf == NULL) { *out_buf = malloc(r); if (*out_buf == NULL) { SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, SC_ERROR_OUT_OF_MEMORY); } memcpy(*out_buf, buf, r); } *out_len = r; SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, r); }
Class
2
static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq) { unsigned row; unsigned col; if (evtchn >= xen_evtchn_max_channels()) return -EINVAL; row = EVTCHN_ROW(evtchn); col = EVTCHN_COL(evtchn); if (evtchn_to_irq[row] == NULL) { /* Unallocated irq entries return -1 anyway */ if (irq == -1) return 0; evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL); if (evtchn_to_irq[row] == NULL) return -ENOMEM; clear_evtchn_to_irq_row(row); } evtchn_to_irq[row][col] = irq; return 0; }
Variant
0
GetCode_(gdIOCtx *fd, CODE_STATIC_DATA *scd, int code_size, int flag, int *ZeroDataBlockP) { int i, j, ret; unsigned char count; if(flag) { scd->curbit = 0; scd->lastbit = 0; scd->last_byte = 0; scd->done = FALSE; return 0; } if((scd->curbit + code_size) >= scd->lastbit) { if(scd->done) { if(scd->curbit >= scd->lastbit) { /* Oh well */ } return -1; } scd->buf[0] = scd->buf[scd->last_byte - 2]; scd->buf[1] = scd->buf[scd->last_byte - 1]; if((count = GetDataBlock(fd, &scd->buf[2], ZeroDataBlockP)) <= 0) { scd->done = TRUE; } scd->last_byte = 2 + count; scd->curbit = (scd->curbit - scd->lastbit) + 16; scd->lastbit = (2 + count) * 8; } ret = 0; for (i = scd->curbit, j = 0; j < code_size; ++i, ++j) { ret |= ((scd->buf[i / 8] & (1 << (i % 8))) != 0) << j; } scd->curbit += code_size; return ret; }
Class
2
pci_set_cfgdata32(struct pci_vdev *dev, int offset, uint32_t val) { assert(offset <= (PCI_REGMAX - 3) && (offset & 3) == 0); *(uint32_t *)(dev->cfgdata + offset) = val; }
Base
1
int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, struct btrfs_ioctl_get_dev_stats *stats) { struct btrfs_device *dev; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; int i; mutex_lock(&fs_devices->device_list_mutex); dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL); mutex_unlock(&fs_devices->device_list_mutex); if (!dev) { btrfs_warn(fs_info, "get dev_stats failed, device not found"); return -ENODEV; } else if (!dev->dev_stats_valid) { btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); return -ENODEV; } else if (stats->flags & BTRFS_DEV_STATS_RESET) { for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { if (stats->nr_items > i) stats->values[i] = btrfs_dev_stat_read_and_reset(dev, i); else btrfs_dev_stat_reset(dev, i); } } else { for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) if (stats->nr_items > i) stats->values[i] = btrfs_dev_stat_read(dev, i); } if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; return 0; }
Base
1
static void ikev2_parent_inR1outI2_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh) { struct dh_continuation *dh = (struct dh_continuation *)pcrc; struct msg_digest *md = dh->md; struct state *const st = md->st; stf_status e; DBG(DBG_CONTROLMORE, DBG_log("ikev2 parent inR1outI2: calculating g^{xy}, sending I2")); if (st == NULL) { loglog(RC_LOG_SERIOUS, "%s: Request was disconnected from state", __FUNCTION__); if (dh->md) release_md(dh->md); return; } /* XXX should check out ugh */ passert(ugh == NULL); passert(cur_state == NULL); passert(st != NULL); passert(st->st_suspended_md == dh->md); set_suspended(st, NULL); /* no longer connected or suspended */ set_cur_state(st); st->st_calculating = FALSE; e = ikev2_parent_inR1outI2_tail(pcrc, r); if (dh->md != NULL) { complete_v2_state_transition(&dh->md, e); if (dh->md) release_md(dh->md); } reset_globals(); passert(GLOBALS_ARE_RESET()); }
Class
2
static void perf_event_exit_cpu(int cpu) { struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); perf_event_exit_cpu_context(cpu); mutex_lock(&swhash->hlist_mutex); swhash->online = false; swevent_hlist_release(swhash); mutex_unlock(&swhash->hlist_mutex); }
Variant
0
static void write_version( FILE *fp, const char *fname, const char *dirname, xref_t *xref) { long start; char *c, *new_fname, data; FILE *new_fp; start = ftell(fp); /* Create file */ if ((c = strstr(fname, ".pdf"))) *c = '\0'; new_fname = malloc(strlen(fname) + strlen(dirname) + 16); snprintf(new_fname, strlen(fname) + strlen(dirname) + 16, "%s/%s-version-%d.pdf", dirname, fname, xref->version); if (!(new_fp = fopen(new_fname, "w"))) { ERR("Could not create file '%s'\n", new_fname); fseek(fp, start, SEEK_SET); free(new_fname); return; } /* Copy original PDF */ fseek(fp, 0, SEEK_SET); while (fread(&data, 1, 1, fp)) fwrite(&data, 1, 1, new_fp); /* Emit an older startxref, refering to an older version. */ fprintf(new_fp, "\r\nstartxref\r\n%ld\r\n%%%%EOF", xref->start); /* Clean */ fclose(new_fp); free(new_fname); fseek(fp, start, SEEK_SET); }
Base
1
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) { u16 offset = sizeof(struct ipv6hdr); unsigned int packet_len = skb_tail_pointer(skb) - skb_network_header(skb); int found_rhdr = 0; *nexthdr = &ipv6_hdr(skb)->nexthdr; while (offset <= packet_len) { struct ipv6_opt_hdr *exthdr; switch (**nexthdr) { case NEXTHDR_HOP: break; case NEXTHDR_ROUTING: found_rhdr = 1; break; case NEXTHDR_DEST: #if IS_ENABLED(CONFIG_IPV6_MIP6) if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) break; #endif if (found_rhdr) return offset; break; default: return offset; } if (offset + sizeof(struct ipv6_opt_hdr) > packet_len) return -EINVAL; exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + offset); offset += ipv6_optlen(exthdr); *nexthdr = &exthdr->nexthdr; } return -EINVAL; }
Base
1
static int su3000_frontend_attach(struct dvb_usb_adapter *d) { u8 obuf[3] = { 0xe, 0x80, 0 }; u8 ibuf[] = { 0 }; if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) err("command 0x0e transfer failed."); obuf[0] = 0xe; obuf[1] = 0x02; obuf[2] = 1; if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) err("command 0x0e transfer failed."); msleep(300); obuf[0] = 0xe; obuf[1] = 0x83; obuf[2] = 0; if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) err("command 0x0e transfer failed."); obuf[0] = 0xe; obuf[1] = 0x83; obuf[2] = 1; if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) err("command 0x0e transfer failed."); obuf[0] = 0x51; if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0) err("command 0x51 transfer failed."); d->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe == NULL) return -EIO; if (dvb_attach(ts2020_attach, d->fe_adap[0].fe, &dw2104_ts2020_config, &d->dev->i2c_adap)) { info("Attached DS3000/TS2020!"); return 0; } info("Failed to attach DS3000/TS2020!"); return -EIO; }
Class
2
void Huff_offsetTransmit (huff_t *huff, int ch, byte *fout, int *offset) { bloc = *offset; send(huff->loc[ch], NULL, fout); *offset = bloc; }
Class
2
int insn_get_code_seg_params(struct pt_regs *regs) { struct desc_struct *desc; short sel; if (v8086_mode(regs)) /* Address and operand size are both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); sel = get_segment_selector(regs, INAT_SEG_REG_CS); if (sel < 0) return sel; desc = get_desc(sel); if (!desc) return -EINVAL; /* * The most significant byte of the Type field of the segment descriptor * determines whether a segment contains data or code. If this is a data * segment, return error. */ if (!(desc->type & BIT(3))) return -EINVAL; switch ((desc->l << 1) | desc->d) { case 0: /* * Legacy mode. CS.L=0, CS.D=0. Address and operand size are * both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); case 1: /* * Legacy mode. CS.L=0, CS.D=1. Address and operand size are * both 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 4); case 2: /* * IA-32e 64-bit mode. CS.L=1, CS.D=0. Address size is 64-bit; * operand size is 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 8); case 3: /* Invalid setting. CS.L=1, CS.D=1 */ /* fall through */ default: return -EINVAL; } }
Class
2
void cipso_v4_req_delattr(struct request_sock *req) { struct ip_options *opt; struct inet_request_sock *req_inet; req_inet = inet_rsk(req); opt = req_inet->opt; if (opt == NULL || opt->cipso == 0) return; cipso_v4_delopt(&req_inet->opt); }
Class
2
int sc_file_set_sec_attr(sc_file_t *file, const u8 *sec_attr, size_t sec_attr_len) { u8 *tmp; if (!sc_file_valid(file)) { return SC_ERROR_INVALID_ARGUMENTS; } if (sec_attr == NULL) { if (file->sec_attr != NULL) free(file->sec_attr); file->sec_attr = NULL; file->sec_attr_len = 0; return 0; } tmp = (u8 *) realloc(file->sec_attr, sec_attr_len); if (!tmp) { if (file->sec_attr) free(file->sec_attr); file->sec_attr = NULL; file->sec_attr_len = 0; return SC_ERROR_OUT_OF_MEMORY; } file->sec_attr = tmp; memcpy(file->sec_attr, sec_attr, sec_attr_len); file->sec_attr_len = sec_attr_len; return 0; }
Class
2