code
stringlengths
12
2.05k
label_name
stringclasses
5 values
label
int64
0
4
static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; int copied; int rc; pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags); skb = skb_recv_datagram(sk, flags, noblock, &rc); if (!skb) return rc; msg->msg_namelen = 0; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); skb_free_datagram(sk, skb); return rc ? : copied; }
Class
2
void rza1EthEventHandler(NetInterface *interface) { error_t error; //Packet received? if((ETHER.EESR0 & ETHER_EESR0_FR) != 0) { //Clear FR interrupt flag ETHER.EESR0 = ETHER_EESR0_FR; //Process all pending packets do { //Read incoming packet error = rza1EthReceivePacket(interface); //No more data in the receive buffer? } while(error != ERROR_BUFFER_EMPTY); } //Re-enable EDMAC interrupts ETHER.EESIPR0 = ETHER_EESIPR0_TWBIP | ETHER_EESIPR0_FRIP; }
Class
2
int unlinkat_harder(int dfd, const char *filename, int unlink_flags, RemoveFlags remove_flags) { mode_t old_mode; int r; /* Like unlinkat(), but tries harder: if we get EACCESS we'll try to set the r/w/x bits on the * directory. This is useful if we run unprivileged and have some files where the w bit is * missing. */ if (unlinkat(dfd, filename, unlink_flags) >= 0) return 0; if (errno != EACCES || !FLAGS_SET(remove_flags, REMOVE_CHMOD)) return -errno; r = patch_dirfd_mode(dfd, &old_mode); if (r < 0) return r; if (unlinkat(dfd, filename, unlink_flags) < 0) { r = -errno; /* Try to restore the original access mode if this didn't work */ (void) fchmod(dfd, old_mode); return r; } if (FLAGS_SET(remove_flags, REMOVE_CHMOD_RESTORE) && fchmod(dfd, old_mode) < 0) return -errno; /* If this worked, we won't reset the old mode by default, since we'll need it for other entries too, * and we should destroy the whole thing */ return 0; }
Class
2
static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq) { struct bfq_data *bfqd = bfqq->bfqd; enum bfqq_expiration reason; unsigned long flags; spin_lock_irqsave(&bfqd->lock, flags); bfq_clear_bfqq_wait_request(bfqq); if (bfqq != bfqd->in_service_queue) { spin_unlock_irqrestore(&bfqd->lock, flags); return; } if (bfq_bfqq_budget_timeout(bfqq)) /* * Also here the queue can be safely expired * for budget timeout without wasting * guarantees */ reason = BFQQE_BUDGET_TIMEOUT; else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0) /* * The queue may not be empty upon timer expiration, * because we may not disable the timer when the * first request of the in-service queue arrives * during disk idling. */ reason = BFQQE_TOO_IDLE; else goto schedule_dispatch; bfq_bfqq_expire(bfqd, bfqq, true, reason); schedule_dispatch: spin_unlock_irqrestore(&bfqd->lock, flags); bfq_schedule_dispatch(bfqd);
Variant
0
mcs_parse_domain_params(STREAM s) { int length; ber_parse_header(s, MCS_TAG_DOMAIN_PARAMS, &length); in_uint8s(s, length); return s_check(s); }
Base
1
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) { u32 data; void *vapic; if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) return; vapic = kmap_atomic(vcpu->arch.apic->vapic_page); data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)); kunmap_atomic(vapic); apic_set_tpr(vcpu->arch.apic, data & 0xff); }
Class
2
void trustedSetSEK_backup(int *errStatus, char *errString, uint8_t *encrypted_sek, uint32_t *enc_len, const char *sek_hex) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); CHECK_STATE(sek_hex); uint64_t len; hex2carray(sek_hex, &len, (uint8_t *) AES_key); derive_DH_Key(); sealHexSEK(errStatus, errString, encrypted_sek, enc_len, (char *)sek_hex); if (*errStatus != 0) { LOG_ERROR("sealHexSEK failed"); goto clean; } SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); }
Base
1
static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) { int start = 0; u32 prev_legacy, cur_legacy; mutex_lock(&kvm->arch.vpit->pit_state.lock); prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; if (!prev_legacy && cur_legacy) start = 1; memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, sizeof(kvm->arch.vpit->pit_state.channels)); kvm->arch.vpit->pit_state.flags = ps->flags; kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return 0; }
Base
1
void luaT_getvarargs (lua_State *L, CallInfo *ci, StkId where, int wanted) { int i; int nextra = ci->u.l.nextraargs; if (wanted < 0) { wanted = nextra; /* get all extra arguments available */ checkstackp(L, nextra, where); /* ensure stack space */ L->top = where + nextra; /* next instruction will need top */ } for (i = 0; i < wanted && i < nextra; i++) setobjs2s(L, where + i, ci->func - nextra + i); for (; i < wanted; i++) /* complete required results with nil */ setnilvalue(s2v(where + i)); }
Base
1
static ssize_t fuse_fill_write_pages(struct fuse_req *req, struct address_space *mapping, struct iov_iter *ii, loff_t pos) { struct fuse_conn *fc = get_fuse_conn(mapping->host); unsigned offset = pos & (PAGE_CACHE_SIZE - 1); size_t count = 0; int err; req->in.argpages = 1; req->page_descs[0].offset = offset; do { size_t tmp; struct page *page; pgoff_t index = pos >> PAGE_CACHE_SHIFT; size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset, iov_iter_count(ii)); bytes = min_t(size_t, bytes, fc->max_write - count); again: err = -EFAULT; if (iov_iter_fault_in_readable(ii, bytes)) break; err = -ENOMEM; page = grab_cache_page_write_begin(mapping, index, 0); if (!page) break; if (mapping_writably_mapped(mapping)) flush_dcache_page(page); tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); flush_dcache_page(page); if (!tmp) { unlock_page(page); page_cache_release(page); bytes = min(bytes, iov_iter_single_seg_count(ii)); goto again; } err = 0; req->pages[req->num_pages] = page; req->page_descs[req->num_pages].length = tmp; req->num_pages++; iov_iter_advance(ii, tmp); count += tmp; pos += tmp; offset += tmp; if (offset == PAGE_CACHE_SIZE) offset = 0; if (!fc->big_writes) break; } while (iov_iter_count(ii) && count < fc->max_write && req->num_pages < req->max_pages && offset == 0); return count > 0 ? count : err; }
Base
1
void ip4_datagram_release_cb(struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); const struct ip_options_rcu *inet_opt; __be32 daddr = inet->inet_daddr; struct flowi4 fl4; struct rtable *rt; if (! __sk_dst_get(sk) || __sk_dst_check(sk, 0)) return; rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt && inet_opt->opt.srr) daddr = inet_opt->opt.faddr; rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr, inet->inet_saddr, inet->inet_dport, inet->inet_sport, sk->sk_protocol, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); if (!IS_ERR(rt)) __sk_dst_set(sk, &rt->dst); rcu_read_unlock(); }
Class
2
delete_buff_tail(buffheader_T *buf, int slen) { int len = (int)STRLEN(buf->bh_curr->b_str); if (len >= slen) { buf->bh_curr->b_str[len - slen] = NUL; buf->bh_space += slen; } }
Base
1
static char *oidc_cache_get_hashed_key(request_rec *r, const char *passphrase, const char *key) { char *input = apr_psprintf(r->pool, "%s:%s", passphrase, key); char *output = NULL; if (oidc_util_hash_string_and_base64url_encode(r, OIDC_JOSE_ALG_SHA256, input, &output) == FALSE) { oidc_error(r, "oidc_util_hash_string_and_base64url_encode returned an error"); return NULL; } return output; }
Class
2
DefragInOrderSimpleTest(void) { Packet *p1 = NULL, *p2 = NULL, *p3 = NULL; Packet *reassembled = NULL; int id = 12; int i; int ret = 0; DefragInit(); p1 = BuildTestPacket(id, 0, 1, 'A', 8); if (p1 == NULL) goto end; p2 = BuildTestPacket(id, 1, 1, 'B', 8); if (p2 == NULL) goto end; p3 = BuildTestPacket(id, 2, 0, 'C', 3); if (p3 == NULL) goto end; if (Defrag(NULL, NULL, p1, NULL) != NULL) goto end; if (Defrag(NULL, NULL, p2, NULL) != NULL) goto end; reassembled = Defrag(NULL, NULL, p3, NULL); if (reassembled == NULL) { goto end; } if (IPV4_GET_HLEN(reassembled) != 20) { goto end; } if (IPV4_GET_IPLEN(reassembled) != 39) { goto end; } /* 20 bytes in we should find 8 bytes of A. */ for (i = 20; i < 20 + 8; i++) { if (GET_PKT_DATA(reassembled)[i] != 'A') { goto end; } } /* 28 bytes in we should find 8 bytes of B. */ for (i = 28; i < 28 + 8; i++) { if (GET_PKT_DATA(reassembled)[i] != 'B') { goto end; } } /* And 36 bytes in we should find 3 bytes of C. */ for (i = 36; i < 36 + 3; i++) { if (GET_PKT_DATA(reassembled)[i] != 'C') goto end; } ret = 1; end: if (p1 != NULL) SCFree(p1); if (p2 != NULL) SCFree(p2); if (p3 != NULL) SCFree(p3); if (reassembled != NULL) SCFree(reassembled); DefragDestroy(); return ret; }
Base
1
vhost_scsi_send_evt(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg, struct se_lun *lun, u32 event, u32 reason) { struct vhost_scsi_evt *evt; evt = vhost_scsi_allocate_evt(vs, event, reason); if (!evt) return; if (tpg && lun) { /* TODO: share lun setup code with virtio-scsi.ko */ /* * Note: evt->event is zeroed when we allocate it and * lun[4-7] need to be zero according to virtio-scsi spec. */ evt->event.lun[0] = 0x01; evt->event.lun[1] = tpg->tport_tpgt & 0xFF; if (lun->unpacked_lun >= 256) evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ; evt->event.lun[3] = lun->unpacked_lun & 0xFF; } llist_add(&evt->list, &vs->vs_event_list); vhost_work_queue(&vs->dev, &vs->vs_event_work); }
Class
2
int l2tp_packet_send(int sock, struct l2tp_packet_t *pack) { uint8_t *buf = mempool_alloc(buf_pool); struct l2tp_avp_t *avp; struct l2tp_attr_t *attr; uint8_t *ptr; int n; int len = sizeof(pack->hdr); if (!buf) { log_emerg("l2tp: out of memory\n"); return -1; } memset(buf, 0, L2TP_MAX_PACKET_SIZE); ptr = buf + sizeof(pack->hdr); list_for_each_entry(attr, &pack->attrs, entry) { if (len + sizeof(*avp) + attr->length >= L2TP_MAX_PACKET_SIZE) { log_error("l2tp: cann't send packet (exceeds maximum size)\n"); mempool_free(buf); return -1; } avp = (struct l2tp_avp_t *)ptr; avp->type = htons(attr->attr->id); avp->M = attr->M; avp->H = attr->H; avp->length = sizeof(*avp) + attr->length; *(uint16_t *)ptr = htons(*(uint16_t *)ptr); if (attr->H) memcpy(avp->val, attr->val.octets, attr->length); else switch (attr->attr->type) { case ATTR_TYPE_INT16: *(int16_t *)avp->val = htons(attr->val.int16); break; case ATTR_TYPE_INT32: *(int32_t *)avp->val = htonl(attr->val.int32); break; case ATTR_TYPE_INT64: *(uint64_t *)avp->val = htobe64(attr->val.uint64); break; case ATTR_TYPE_STRING: case ATTR_TYPE_OCTETS: memcpy(avp->val, attr->val.string, attr->length); break; } ptr += sizeof(*avp) + attr->length; len += sizeof(*avp) + attr->length; } pack->hdr.length = htons(len); memcpy(buf, &pack->hdr, sizeof(pack->hdr)); n = sendto(sock, buf, ntohs(pack->hdr.length), 0, &pack->addr, sizeof(pack->addr)); mempool_free(buf); if (n < 0) { if (errno == EAGAIN) { if (conf_verbose) log_warn("l2tp: buffer overflow (packet lost)\n"); } else { if (conf_verbose) log_warn("l2tp: sendto: %s\n", strerror(errno)); return -1; } } if (n != ntohs(pack->hdr.length)) { if (conf_verbose) log_warn("l2tp: short write (%i/%i)\n", n, ntohs(pack->hdr.length)); } return 0; }
Base
1
void xenvif_disconnect(struct xenvif *vif) { struct net_device *dev = vif->dev; if (netif_carrier_ok(dev)) { rtnl_lock(); netif_carrier_off(dev); /* discard queued packets */ if (netif_running(dev)) xenvif_down(vif); rtnl_unlock(); xenvif_put(vif); } atomic_dec(&vif->refcnt); wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); del_timer_sync(&vif->credit_timeout); if (vif->irq) unbind_from_irqhandler(vif->irq, vif); unregister_netdev(vif->dev); xen_netbk_unmap_frontend_rings(vif); free_netdev(vif->dev); }
Class
2
static void ikev2_parent_inI1outR1_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh) { struct ke_continuation *ke = (struct ke_continuation *)pcrc; struct msg_digest *md = ke->md; struct state *const st = md->st; stf_status e; DBG(DBG_CONTROLMORE, DBG_log("ikev2 parent inI1outR1: calculated ke+nonce, sending R1")); if (st == NULL) { loglog(RC_LOG_SERIOUS, "%s: Request was disconnected from state", __FUNCTION__); if (ke->md) release_md(ke->md); return; } /* XXX should check out ugh */ passert(ugh == NULL); passert(cur_state == NULL); passert(st != NULL); passert(st->st_suspended_md == ke->md); set_suspended(st, NULL); /* no longer connected or suspended */ set_cur_state(st); st->st_calculating = FALSE; e = ikev2_parent_inI1outR1_tail(pcrc, r); if (ke->md != NULL) { complete_v2_state_transition(&ke->md, e); if (ke->md) release_md(ke->md); } reset_globals(); passert(GLOBALS_ARE_RESET()); }
Class
2
mcs_recv_connect_response(STREAM mcs_data) { UNUSED(mcs_data); uint8 result; int length; STREAM s; RD_BOOL is_fastpath; uint8 fastpath_hdr; logger(Protocol, Debug, "%s()", __func__); s = iso_recv(&is_fastpath, &fastpath_hdr); if (s == NULL) return False; ber_parse_header(s, MCS_CONNECT_RESPONSE, &length); ber_parse_header(s, BER_TAG_RESULT, &length); in_uint8(s, result); if (result != 0) { logger(Protocol, Error, "mcs_recv_connect_response(), result=%d", result); return False; } ber_parse_header(s, BER_TAG_INTEGER, &length); in_uint8s(s, length); /* connect id */ mcs_parse_domain_params(s); ber_parse_header(s, BER_TAG_OCTET_STRING, &length); sec_process_mcs_data(s); /* if (length > mcs_data->size) { logger(Protocol, Error, "mcs_recv_connect_response(), expected length=%d, got %d",length, mcs_data->size); length = mcs_data->size; } in_uint8a(s, mcs_data->data, length); mcs_data->p = mcs_data->data; mcs_data->end = mcs_data->data + length; */ return s_check_end(s); }
Base
1
static int vp8_lossy_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, uint8_t *data_start, unsigned int data_size) { WebPContext *s = avctx->priv_data; AVPacket pkt; int ret; if (!s->initialized) { ff_vp8_decode_init(avctx); s->initialized = 1; if (s->has_alpha) avctx->pix_fmt = AV_PIX_FMT_YUVA420P; } s->lossless = 0; if (data_size > INT_MAX) { av_log(avctx, AV_LOG_ERROR, "unsupported chunk size\n"); return AVERROR_PATCHWELCOME; } av_init_packet(&pkt); pkt.data = data_start; pkt.size = data_size; ret = ff_vp8_decode_frame(avctx, p, got_frame, &pkt); if (ret < 0) return ret; update_canvas_size(avctx, avctx->width, avctx->height); if (s->has_alpha) { ret = vp8_lossy_decode_alpha(avctx, p, s->alpha_data, s->alpha_data_size); if (ret < 0) return ret; } return ret; }
Class
2
static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) { struct sk_buff *skb = (struct sk_buff *)(long) ctx; struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = (struct nlattr *) &skb->data[A]; if (nla->nla_len > A - skb->len) return 0; nla = nla_find_nested(nla, X); if (nla) return (void *) nla - (void *) skb->data; return 0; }
Base
1
int mif_validate(jas_stream_t *in) { uchar buf[MIF_MAGICLEN]; uint_fast32_t magic; int i; int n; assert(JAS_STREAM_MAXPUTBACK >= MIF_MAGICLEN); /* Read the validation data (i.e., the data used for detecting the format). */ if ((n = jas_stream_read(in, buf, MIF_MAGICLEN)) < 0) { return -1; } /* Put the validation data back onto the stream, so that the stream position will not be changed. */ for (i = n - 1; i >= 0; --i) { if (jas_stream_ungetc(in, buf[i]) == EOF) { return -1; } } /* Was enough data read? */ if (n < MIF_MAGICLEN) { return -1; } /* Compute the signature value. */ magic = (JAS_CAST(uint_fast32_t, buf[0]) << 24) | (JAS_CAST(uint_fast32_t, buf[1]) << 16) | (JAS_CAST(uint_fast32_t, buf[2]) << 8) | buf[3]; /* Ensure that the signature is correct for this format. */ if (magic != MIF_MAGIC) { return -1; } return 0; }
Class
2
SPL_METHOD(SplFileObject, key) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } /* Do not read the next line to support correct counting with fgetc() if (!intern->current_line) { spl_filesystem_file_read_line(getThis(), intern, 1 TSRMLS_CC); } */ RETURN_LONG(intern->u.file.current_line_num); } /* }}} */
Base
1
pci_lintr_request(struct pci_vdev *dev) { struct businfo *bi; struct slotinfo *si; int bestpin, bestcount, pin; bi = pci_businfo[dev->bus]; assert(bi != NULL); /* * Just allocate a pin from our slot. The pin will be * assigned IRQs later when interrupts are routed. */ si = &bi->slotinfo[dev->slot]; bestpin = 0; bestcount = si->si_intpins[0].ii_count; for (pin = 1; pin < 4; pin++) { if (si->si_intpins[pin].ii_count < bestcount) { bestpin = pin; bestcount = si->si_intpins[pin].ii_count; } } si->si_intpins[bestpin].ii_count++; dev->lintr.pin = bestpin + 1; pci_set_cfgdata8(dev, PCIR_INTPIN, bestpin + 1); }
Base
1
static void upnp_event_prepare(struct upnp_event_notify * obj) { static const char notifymsg[] = "NOTIFY %s HTTP/1.1\r\n" "Host: %s%s\r\n" #if (UPNP_VERSION_MAJOR == 1) && (UPNP_VERSION_MINOR == 0) "Content-Type: text/xml\r\n" /* UDA v1.0 */ #else "Content-Type: text/xml; charset=\"utf-8\"\r\n" /* UDA v1.1 or later */ #endif "Content-Length: %d\r\n" "NT: upnp:event\r\n" "NTS: upnp:propchange\r\n" "SID: %s\r\n" "SEQ: %u\r\n" "Connection: close\r\n" "Cache-Control: no-cache\r\n" "\r\n" "%.*s\r\n"; char * xml; int l; if(obj->sub == NULL) { obj->state = EError; return; } switch(obj->sub->service) { case EWanCFG: xml = getVarsWANCfg(&l); break; case EWanIPC: xml = getVarsWANIPCn(&l); break; #ifdef ENABLE_L3F_SERVICE case EL3F: xml = getVarsL3F(&l); break; #endif #ifdef ENABLE_6FC_SERVICE case E6FC: xml = getVars6FC(&l); break; #endif #ifdef ENABLE_DP_SERVICE case EDP: xml = getVarsDP(&l); break; #endif default: xml = NULL; l = 0; } obj->buffersize = 1024; obj->buffer = malloc(obj->buffersize); if(!obj->buffer) { syslog(LOG_ERR, "%s: malloc returned NULL", "upnp_event_prepare"); if(xml) { free(xml); } obj->state = EError; return; } obj->tosend = snprintf(obj->buffer, obj->buffersize, notifymsg, obj->path, obj->addrstr, obj->portstr, l+2, obj->sub->uuid, obj->sub->seq, l, xml); if(xml) { free(xml); xml = NULL; } obj->state = ESending; }
Base
1
PyMemoTable_Set(PyMemoTable *self, PyObject *key, Py_ssize_t value) { PyMemoEntry *entry; assert(key != NULL); entry = _PyMemoTable_Lookup(self, key); if (entry->me_key != NULL) { entry->me_value = value; return 0; } Py_INCREF(key); entry->me_key = key; entry->me_value = value; self->mt_used++; /* If we added a key, we can safely resize. Otherwise just return! * If used >= 2/3 size, adjust size. Normally, this quaduples the size. * * Quadrupling the size improves average table sparseness * (reducing collisions) at the cost of some memory. It also halves * the number of expensive resize operations in a growing memo table. * * Very large memo tables (over 50K items) use doubling instead. * This may help applications with severe memory constraints. */ if (!(self->mt_used * 3 >= (self->mt_mask + 1) * 2)) return 0; return _PyMemoTable_ResizeTable(self, (self->mt_used > 50000 ? 2 : 4) * self->mt_used); }
Base
1
compile_lock_unlock( lval_T *lvp, char_u *name_end, exarg_T *eap, int deep, void *coookie) { cctx_T *cctx = coookie; int cc = *name_end; char_u *p = lvp->ll_name; int ret = OK; size_t len; char_u *buf; isntype_T isn = ISN_EXEC; if (cctx->ctx_skip == SKIP_YES) return OK; // Cannot use :lockvar and :unlockvar on local variables. if (p[1] != ':') { char_u *end = find_name_end(p, NULL, NULL, FNE_CHECK_START); if (lookup_local(p, end - p, NULL, cctx) == OK) { char_u *s = p; if (*end != '.' && *end != '[') { emsg(_(e_cannot_lock_unlock_local_variable)); return FAIL; } // For "d.member" put the local variable on the stack, it will be // passed to ex_lockvar() indirectly. if (compile_load(&s, end, cctx, FALSE, FALSE) == FAIL) return FAIL; isn = ISN_LOCKUNLOCK; } } // Checking is done at runtime. *name_end = NUL; len = name_end - p + 20; buf = alloc(len); if (buf == NULL) ret = FAIL; else { char *cmd = eap->cmdidx == CMD_lockvar ? "lockvar" : "unlockvar"; if (deep < 0) vim_snprintf((char *)buf, len, "%s! %s", cmd, p); else vim_snprintf((char *)buf, len, "%s %d %s", cmd, deep, p); ret = generate_EXEC_copy(cctx, isn, buf); vim_free(buf); *name_end = cc; } return ret; }
Variant
0
asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) { enum direction dir; if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) || (((insn >> 30) & 3) != 3)) goto kill_user; dir = decode_direction(insn); if(!ok_for_user(regs, insn, dir)) { goto kill_user; } else { int err, size = decode_access_size(insn); unsigned long addr; if(floating_point_load_or_store_p(insn)) { printk("User FPU load/store unaligned unsupported.\n"); goto kill_user; } addr = compute_effective_address(regs, insn); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); switch(dir) { case load: err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs), size, (unsigned long *) addr, decode_signedness(insn)); break; case store: err = do_int_store(((insn>>25)&0x1f), size, (unsigned long *) addr, regs); break; case both: /* * This was supported in 2.4. However, we question * the value of SWAP instruction across word boundaries. */ printk("Unaligned SWAP unsupported.\n"); err = -EFAULT; break; default: unaligned_panic("Impossible user unaligned trap."); goto out; } if (err) goto kill_user; else advance(regs); goto out; } kill_user: user_mna_trap_fault(regs, insn); out: ; }
Class
2
header_put_be_3byte (SF_PRIVATE *psf, int x) { if (psf->headindex < SIGNED_SIZEOF (psf->header) - 3) { psf->header [psf->headindex++] = (x >> 16) ; psf->header [psf->headindex++] = (x >> 8) ; psf->header [psf->headindex++] = x ; } ; } /* header_put_be_3byte */
Class
2
void snd_pcm_period_elapsed(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; unsigned long flags; if (PCM_RUNTIME_CHECK(substream)) return; runtime = substream->runtime; snd_pcm_stream_lock_irqsave(substream, flags); if (!snd_pcm_running(substream) || snd_pcm_update_hw_ptr0(substream, 1) < 0) goto _end; #ifdef CONFIG_SND_PCM_TIMER if (substream->timer_running) snd_timer_interrupt(substream->timer, 1); #endif _end: snd_pcm_stream_unlock_irqrestore(substream, flags); kill_fasync(&runtime->fasync, SIGIO, POLL_IN); }
Variant
0
R_API char *r_socket_http_get(const char *url, int *code, int *rlen) { char *curl_env = r_sys_getenv ("R2_CURL"); if (curl_env && *curl_env) { char *encoded_url = r_str_escape (url); char *res = r_sys_cmd_strf ("curl '%s'", encoded_url); free (encoded_url); if (res) { if (code) { *code = 200; } if (rlen) { *rlen = strlen (res); } } free (curl_env); return res; } free (curl_env); RSocket *s; int ssl = r_str_startswith (url, "https://"); char *response, *host, *path, *port = "80"; char *uri = strdup (url); if (!uri) { return NULL; } if (code) { *code = 0; } if (rlen) { *rlen = 0; } host = strstr (uri, "://"); if (!host) { free (uri); eprintf ("r_socket_http_get: Invalid URI"); return NULL; } host += 3; port = strchr (host, ':'); if (!port) { port = ssl? "443": "80"; path = host; } else { *port++ = 0; path = port; } path = strchr (path, '/'); if (!path) { path = ""; } else { *path++ = 0; } s = r_socket_new (ssl); if (!s) { eprintf ("r_socket_http_get: Cannot create socket\n"); free (uri); return NULL; } if (r_socket_connect_tcp (s, host, port, 0)) { r_socket_printf (s, "GET /%s HTTP/1.1\r\n" "User-Agent: radare2 "R2_VERSION"\r\n" "Accept: */*\r\n" "Host: %s:%s\r\n" "\r\n", path, host, port); response = r_socket_http_answer (s, code, rlen); } else { eprintf ("Cannot connect to %s:%s\n", host, port); response = NULL; } free (uri); r_socket_free (s); return response; }
Base
1
void trustedGetPublicEcdsaKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint32_t enc_len, char *pub_key_x, char *pub_key_y) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN); mpz_t privateKeyMpz; mpz_init(privateKeyMpz); point pKey = point_init(); point pKey_test = point_init(); CHECK_STATE(encryptedPrivateKey); CHECK_STATE(pub_key_x); CHECK_STATE(pub_key_y); int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, ECDSA_SKEY_LEN); CHECK_STATUS2("AES_decrypt failed with status %d"); skey[enc_len - SGX_AESGCM_MAC_SIZE - SGX_AESGCM_IV_SIZE] = '\0'; strncpy(errString, skey, 1024); status = mpz_set_str(privateKeyMpz, skey, ECDSA_SKEY_BASE); CHECK_STATUS("mpz_set_str failed for private key"); signature_extract_public_key(pKey, privateKeyMpz, curve); point_multiplication(pKey_test, privateKeyMpz, curve->G, curve); if (!point_cmp(pKey, pKey_test)) { snprintf(errString, BUF_LEN, "Points are not equal"); LOG_ERROR(errString); *errStatus = -11; goto clean; } SAFE_CHAR_BUF(arr_x, BUF_LEN); mpz_get_str(arr_x, ECDSA_SKEY_BASE, pKey->x); int n_zeroes = 64 - strlen(arr_x); for (int i = 0; i < n_zeroes; i++) { pub_key_x[i] = '0'; } strncpy(pub_key_x + n_zeroes, arr_x, 1024 - n_zeroes); SAFE_CHAR_BUF(arr_y, BUF_LEN); mpz_get_str(arr_y, ECDSA_SKEY_BASE, pKey->y); n_zeroes = 64 - strlen(arr_y); for (int i = 0; i < n_zeroes; i++) { pub_key_y[i] = '0'; } strncpy(pub_key_y + n_zeroes, arr_y, 1024 - n_zeroes); SET_SUCCESS clean: mpz_clear(privateKeyMpz); point_clear(pKey); point_clear(pKey_test); static uint64_t counter = 0; if (counter % 1000 == 0) { LOG_INFO(__FUNCTION__); LOG_INFO("Thousand SGX calls completed"); } counter++; }
Base
1
SPL_METHOD(FilesystemIterator, key) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (SPL_FILE_DIR_KEY(intern, SPL_FILE_DIR_KEY_AS_FILENAME)) { RETURN_STRING(intern->u.dir.entry.d_name, 1); } else { spl_filesystem_object_get_file_name(intern TSRMLS_CC); RETURN_STRINGL(intern->file_name, intern->file_name_len, 1); } }
Base
1
static int adis_update_scan_mode_burst(struct iio_dev *indio_dev, const unsigned long *scan_mask) { struct adis *adis = iio_device_get_drvdata(indio_dev); unsigned int burst_length; u8 *tx; /* All but the timestamp channel */ burst_length = (indio_dev->num_channels - 1) * sizeof(u16); burst_length += adis->burst->extra_len; adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL); if (!adis->xfer) return -ENOMEM; adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL); if (!adis->buffer) return -ENOMEM; tx = adis->buffer + burst_length; tx[0] = ADIS_READ_REG(adis->burst->reg_cmd); tx[1] = 0; adis->xfer[0].tx_buf = tx; adis->xfer[0].bits_per_word = 8; adis->xfer[0].len = 2; adis->xfer[1].rx_buf = adis->buffer; adis->xfer[1].bits_per_word = 8; adis->xfer[1].len = burst_length; spi_message_init(&adis->msg); spi_message_add_tail(&adis->xfer[0], &adis->msg); spi_message_add_tail(&adis->xfer[1], &adis->msg); return 0; }
Variant
0
static void dbEvalSetColumn(DbEvalContext *p, int iCol, Jsi_DString *dStr) { Jsi_Interp *interp = p->jdb->interp; char nbuf[200]; sqlite3_stmt *pStmt = p->pPreStmt->pStmt; switch( sqlite3_column_type(pStmt, iCol) ) { case SQLITE_BLOB: { int bytes = sqlite3_column_bytes(pStmt, iCol); const char *zBlob = (char*)sqlite3_column_blob(pStmt, iCol); if( !zBlob ) { return; } Jsi_DSAppendLen(dStr, zBlob, bytes); return; } case SQLITE_INTEGER: { sqlite_int64 v = sqlite3_column_int64(pStmt, iCol); if (v==0 || v==1) { const char *dectyp = sqlite3_column_decltype(pStmt, iCol); if (dectyp && !Jsi_Strncasecmp(dectyp,"bool", 4)) { Jsi_DSAppend(dStr, (v?"true":"false"), NULL); return; } } #ifdef __WIN32 snprintf(nbuf, sizeof(nbuf), "%" PRId64, (Jsi_Wide)v); #else snprintf(nbuf, sizeof(nbuf), "%lld", v); #endif Jsi_DSAppend(dStr, nbuf, NULL); return; } case SQLITE_FLOAT: { Jsi_NumberToString(interp, sqlite3_column_double(pStmt, iCol), nbuf, sizeof(nbuf)); Jsi_DSAppend(dStr, nbuf, NULL); return; } case SQLITE_NULL: { return; } } const char *str = (char*)sqlite3_column_text(pStmt, iCol ); if (!str) str = p->jdb->optPtr->nullvalue; Jsi_DSAppend(dStr, str?str:"", NULL); }
Base
1
nfsd4_layout_verify(struct svc_export *exp, unsigned int layout_type) { if (!exp->ex_layout_types) { dprintk("%s: export does not support pNFS\n", __func__); return NULL; } if (!(exp->ex_layout_types & (1 << layout_type))) { dprintk("%s: layout type %d not supported\n", __func__, layout_type); return NULL; } return nfsd4_layout_ops[layout_type]; }
Variant
0
BGD_DECLARE(void *) gdImageWebpPtrEx (gdImagePtr im, int *size, int quality) { void *rv; gdIOCtx *out = gdNewDynamicCtx(2048, NULL); if (out == NULL) { return NULL; } gdImageWebpCtx(im, out, quality); rv = gdDPExtractData(out, size); out->gd_free(out); return rv; }
Variant
0
l2tp_result_code_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint16_t *ptr = (const uint16_t *)dat; ND_PRINT((ndo, "%u", EXTRACT_16BITS(ptr))); ptr++; /* Result Code */ if (length > 2) { /* Error Code (opt) */ ND_PRINT((ndo, "/%u", EXTRACT_16BITS(ptr))); ptr++; } if (length > 4) { /* Error Message (opt) */ ND_PRINT((ndo, " ")); print_string(ndo, (const u_char *)ptr, length - 4); } }
Base
1
static int g2m_init_buffers(G2MContext *c) { int aligned_height; if (!c->framebuf || c->old_width < c->width || c->old_height < c->height) { c->framebuf_stride = FFALIGN(c->width * 3, 16); aligned_height = FFALIGN(c->height, 16); av_free(c->framebuf); c->framebuf = av_mallocz(c->framebuf_stride * aligned_height); if (!c->framebuf) return AVERROR(ENOMEM); } if (!c->synth_tile || !c->jpeg_tile || c->old_tile_w < c->tile_width || c->old_tile_h < c->tile_height) { c->tile_stride = FFALIGN(c->tile_width, 16) * 3; aligned_height = FFALIGN(c->tile_height, 16); av_free(c->synth_tile); av_free(c->jpeg_tile); av_free(c->kempf_buf); av_free(c->kempf_flags); c->synth_tile = av_mallocz(c->tile_stride * aligned_height); c->jpeg_tile = av_mallocz(c->tile_stride * aligned_height); c->kempf_buf = av_mallocz((c->tile_width + 1) * aligned_height + FF_INPUT_BUFFER_PADDING_SIZE); c->kempf_flags = av_mallocz( c->tile_width * aligned_height); if (!c->synth_tile || !c->jpeg_tile || !c->kempf_buf || !c->kempf_flags) return AVERROR(ENOMEM); } return 0; }
Class
2
} static inline bool f2fs_force_buffered_io(struct inode *inode, struct kiocb *iocb, struct iov_iter *iter) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); int rw = iov_iter_rw(iter); if (f2fs_post_read_required(inode)) return true; if (f2fs_is_multi_device(sbi)) return true; /* * for blkzoned device, fallback direct IO to buffered IO, so * all IOs can be serialized by log-structured write. */ if (f2fs_sb_has_blkzoned(sbi)) return true; if (test_opt(sbi, LFS) && (rw == WRITE) && block_unaligned_IO(inode, iocb, iter)) return true; if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED)) return true;
Base
1
njs_promise_prototype_then(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { njs_int_t ret; njs_value_t *promise, *fulfilled, *rejected, constructor; njs_object_t *object; njs_function_t *function; njs_promise_capability_t *capability; promise = njs_argument(args, 0); if (njs_slow_path(!njs_is_object(promise))) { goto failed; } object = njs_object_proto_lookup(njs_object(promise), NJS_PROMISE, njs_object_t); if (njs_slow_path(object == NULL)) { goto failed; } function = njs_promise_create_function(vm, sizeof(njs_promise_context_t)); function->u.native = njs_promise_constructor; njs_set_function(&constructor, function); ret = njs_value_species_constructor(vm, promise, &constructor, &constructor); if (njs_slow_path(ret != NJS_OK)) { return ret; } capability = njs_promise_new_capability(vm, &constructor); if (njs_slow_path(capability == NULL)) { return NJS_ERROR; } fulfilled = njs_arg(args, nargs, 1); rejected = njs_arg(args, nargs, 2); return njs_promise_perform_then(vm, promise, fulfilled, rejected, capability); failed: njs_type_error(vm, "required a promise object"); return NJS_ERROR; }
Base
1
static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb, int tlen, int offset) { __wsum csum = skb->csum; if (skb->ip_summed != CHECKSUM_COMPLETE) return; if (offset != 0) csum = csum_sub(csum, csum_partial(skb_transport_header(skb) + tlen, offset, 0)); put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum); }
Base
1
error_t lpc546xxEthReceivePacket(NetInterface *interface) { error_t error; size_t n; NetRxAncillary ancillary; //The current buffer is available for reading? if((rxDmaDesc[rxIndex].rdes3 & ENET_RDES3_OWN) == 0) { //FD and LD flags should be set if((rxDmaDesc[rxIndex].rdes3 & ENET_RDES3_FD) != 0 && (rxDmaDesc[rxIndex].rdes3 & ENET_RDES3_LD) != 0) { //Make sure no error occurred if((rxDmaDesc[rxIndex].rdes3 & ENET_RDES3_ES) == 0) { //Retrieve the length of the frame n = rxDmaDesc[rxIndex].rdes3 & ENET_RDES3_PL; //Limit the number of data to read n = MIN(n, LPC546XX_ETH_RX_BUFFER_SIZE); //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_RX_ANCILLARY; //Pass the packet to the upper layer nicProcessPacket(interface, rxBuffer[rxIndex], n, &ancillary); //Valid packet received error = NO_ERROR; } else { //The received packet contains an error error = ERROR_INVALID_PACKET; } } else { //The packet is not valid error = ERROR_INVALID_PACKET; } //Set the start address of the buffer rxDmaDesc[rxIndex].rdes0 = (uint32_t) rxBuffer[rxIndex]; //Give the ownership of the descriptor back to the DMA rxDmaDesc[rxIndex].rdes3 = ENET_RDES3_OWN | ENET_RDES3_IOC | ENET_RDES3_BUF1V; //Increment index and wrap around if necessary if(++rxIndex >= LPC546XX_ETH_RX_BUFFER_COUNT) { rxIndex = 0; } } else { //No more data in the receive buffer error = ERROR_BUFFER_EMPTY; } //Clear RBU flag to resume processing ENET->DMA_CH[0].DMA_CHX_STAT = ENET_DMA_CH_DMA_CHX_STAT_RBU_MASK; //Instruct the DMA to poll the receive descriptor list ENET->DMA_CH[0].DMA_CHX_RXDESC_TAIL_PTR = 0; //Return status code return error; }
Class
2
static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url, AVDictionary *opts, AVDictionary *opts2, int *is_http) { HLSContext *c = s->priv_data; AVDictionary *tmp = NULL; const char *proto_name = NULL; int ret; av_dict_copy(&tmp, opts, 0); av_dict_copy(&tmp, opts2, 0); if (av_strstart(url, "crypto", NULL)) { if (url[6] == '+' || url[6] == ':') proto_name = avio_find_protocol_name(url + 7); } if (!proto_name) proto_name = avio_find_protocol_name(url); if (!proto_name) return AVERROR_INVALIDDATA; // only http(s) & file are allowed if (!av_strstart(proto_name, "http", NULL) && !av_strstart(proto_name, "file", NULL)) return AVERROR_INVALIDDATA; if (!strncmp(proto_name, url, strlen(proto_name)) && url[strlen(proto_name)] == ':') ; else if (av_strstart(url, "crypto", NULL) && !strncmp(proto_name, url + 7, strlen(proto_name)) && url[7 + strlen(proto_name)] == ':') ; else if (strcmp(proto_name, "file") || !strncmp(url, "file,", 5)) return AVERROR_INVALIDDATA; ret = s->io_open(s, pb, url, AVIO_FLAG_READ, &tmp); if (ret >= 0) { // update cookies on http response with setcookies. char *new_cookies = NULL; if (!(s->flags & AVFMT_FLAG_CUSTOM_IO)) av_opt_get(*pb, "cookies", AV_OPT_SEARCH_CHILDREN, (uint8_t**)&new_cookies); if (new_cookies) { av_free(c->cookies); c->cookies = new_cookies; } av_dict_set(&opts, "cookies", c->cookies, 0); } av_dict_free(&tmp); if (is_http) *is_http = av_strstart(proto_name, "http", NULL); return ret; }
Class
2
service_info *FindServiceEventURLPath( service_table *table, const char *eventURLPath) { service_info *finger = NULL; uri_type parsed_url; uri_type parsed_url_in; if (table && parse_uri(eventURLPath, strlen(eventURLPath), &parsed_url_in) == HTTP_SUCCESS) { finger = table->serviceList; while (finger) { if (finger->eventURL) { if (parse_uri(finger->eventURL, strlen(finger->eventURL), &parsed_url) == HTTP_SUCCESS) { if (!token_cmp(&parsed_url.pathquery, &parsed_url_in.pathquery)) { return finger; } } } finger = finger->next; } } return NULL; }
Base
1
check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, const char *name) { struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; int ret, off, h; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) { duprintf("Bad offset %p, limit = %p\n", e, limit); return -EINVAL; } if (e->next_offset < sizeof(struct compat_arpt_entry) + sizeof(struct compat_xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* For purposes of check_entry casting the compat entry is fine */ ret = check_entry((struct arpt_entry *)e); if (ret) return ret; off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); entry_offset = (void *)e - (void *)base; t = compat_arpt_get_target(e); target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto out; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); if (ret) goto release_target; /* Check hooks & underflows */ for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* Clear counters and comefrom */ memset(&e->counters, 0, sizeof(e->counters)); e->comefrom = 0; return 0; release_target: module_put(t->u.kernel.target->me); out: return ret; }
Class
2
qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line, const char *fmt, ...) { va_list va; struct va_format vaf; char nfunc[32]; memset(nfunc, 0, sizeof(nfunc)); memcpy(nfunc, func, sizeof(nfunc) - 1); va_start(va, fmt); vaf.fmt = fmt; vaf.va = &va; if (!(qedi_dbg_log & QEDI_LOG_WARN)) goto ret; if (likely(qedi) && likely(qedi->pdev)) pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), nfunc, line, qedi->host_no, &vaf); else pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); ret: va_end(va); }
Base
1
void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, int status) { struct dwc3 *dwc = dep->dwc; req->started = false; list_del(&req->list); req->remaining = 0; if (req->request.status == -EINPROGRESS) req->request.status = status; if (req->trb) usb_gadget_unmap_request_by_dev(dwc->sysdev, &req->request, req->direction); req->trb = NULL; trace_dwc3_gadget_giveback(req); spin_unlock(&dwc->lock); usb_gadget_giveback_request(&dep->endpoint, &req->request); spin_lock(&dwc->lock); if (dep->number > 1) pm_runtime_put(dwc->dev); }
Class
2
static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags) { u64 runtime, runtime_expires; int throttled; /* no need to continue the timer with no bandwidth constraint */ if (cfs_b->quota == RUNTIME_INF) goto out_deactivate; throttled = !list_empty(&cfs_b->throttled_cfs_rq); cfs_b->nr_periods += overrun; /* * idle depends on !throttled (for the case of a large deficit), and if * we're going inactive then everything else can be deferred */ if (cfs_b->idle && !throttled) goto out_deactivate; __refill_cfs_bandwidth_runtime(cfs_b); if (!throttled) { /* mark as potentially idle for the upcoming period */ cfs_b->idle = 1; return 0; } /* account preceding periods in which throttling occurred */ cfs_b->nr_throttled += overrun; runtime_expires = cfs_b->runtime_expires; /* * This check is repeated as we are holding onto the new bandwidth while * we unthrottle. This can potentially race with an unthrottled group * trying to acquire new bandwidth from the global pool. This can result * in us over-using our runtime if it is all used during this loop, but * only by limited amounts in that extreme case. */ while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) { runtime = cfs_b->runtime; cfs_b->distribute_running = 1; raw_spin_unlock_irqrestore(&cfs_b->lock, flags); /* we can't nest cfs_b->lock while distributing bandwidth */ runtime = distribute_cfs_runtime(cfs_b, runtime, runtime_expires); raw_spin_lock_irqsave(&cfs_b->lock, flags); cfs_b->distribute_running = 0; throttled = !list_empty(&cfs_b->throttled_cfs_rq); lsub_positive(&cfs_b->runtime, runtime); } /* * While we are ensured activity in the period following an * unthrottle, this also covers the case in which the new bandwidth is * insufficient to cover the existing bandwidth deficit. (Forcing the * timer to remain active while there are any throttled entities.) */ cfs_b->idle = 0; return 0; out_deactivate: return 1; }
Class
2
static int http_read_stream(URLContext *h, uint8_t *buf, int size) { HTTPContext *s = h->priv_data; int err, new_location, read_ret; int64_t seek_ret; if (!s->hd) return AVERROR_EOF; if (s->end_chunked_post && !s->end_header) { err = http_read_header(h, &new_location); if (err < 0) return err; } if (s->chunksize >= 0) { if (!s->chunksize) { char line[32]; do { if ((err = http_get_line(s, line, sizeof(line))) < 0) return err; } while (!*line); /* skip CR LF from last chunk */ s->chunksize = strtoll(line, NULL, 16); av_log(NULL, AV_LOG_TRACE, "Chunked encoding data size: %"PRId64"'\n", s->chunksize); if (!s->chunksize) return 0; } size = FFMIN(size, s->chunksize); } #if CONFIG_ZLIB if (s->compressed) return http_buf_read_compressed(h, buf, size); #endif /* CONFIG_ZLIB */ read_ret = http_buf_read(h, buf, size); if ( (read_ret < 0 && s->reconnect && (!h->is_streamed || s->reconnect_streamed) && s->filesize > 0 && s->off < s->filesize) || (read_ret == 0 && s->reconnect_at_eof && (!h->is_streamed || s->reconnect_streamed))) { int64_t target = h->is_streamed ? 0 : s->off; if (s->reconnect_delay > s->reconnect_delay_max) return AVERROR(EIO); av_log(h, AV_LOG_INFO, "Will reconnect at %"PRId64" error=%s.\n", s->off, av_err2str(read_ret)); av_usleep(1000U*1000*s->reconnect_delay); s->reconnect_delay = 1 + 2*s->reconnect_delay; seek_ret = http_seek_internal(h, target, SEEK_SET, 1); if (seek_ret != target) { av_log(h, AV_LOG_ERROR, "Failed to reconnect at %"PRId64".\n", target); return read_ret; } read_ret = http_buf_read(h, buf, size); } else s->reconnect_delay = 0; return read_ret; }
Class
2
static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct sco_pinfo *pi = sco_pi(sk); lock_sock(sk); if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { sco_conn_defer_accept(pi->conn->hcon, pi->setting); sk->sk_state = BT_CONFIG; msg->msg_namelen = 0; release_sock(sk); return 0; } release_sock(sk); return bt_sock_recvmsg(iocb, sock, msg, len, flags); }
Class
2
process_plane(uint8 * in, int width, int height, uint8 * out, int size) { UNUSED(size); int indexw; int indexh; int code; int collen; int replen; int color; int x; int revcode; uint8 * last_line; uint8 * this_line; uint8 * org_in; uint8 * org_out; org_in = in; org_out = out; last_line = 0; indexh = 0; while (indexh < height) { out = (org_out + width * height * 4) - ((indexh + 1) * width * 4); color = 0; this_line = out; indexw = 0; if (last_line == 0) { while (indexw < width) { code = CVAL(in); replen = code & 0xf; collen = (code >> 4) & 0xf; revcode = (replen << 4) | collen; if ((revcode <= 47) && (revcode >= 16)) { replen = revcode; collen = 0; } while (collen > 0) { color = CVAL(in); *out = color; out += 4; indexw++; collen--; } while (replen > 0) { *out = color; out += 4; indexw++; replen--; } } } else { while (indexw < width) { code = CVAL(in); replen = code & 0xf; collen = (code >> 4) & 0xf; revcode = (replen << 4) | collen; if ((revcode <= 47) && (revcode >= 16)) { replen = revcode; collen = 0; } while (collen > 0) { x = CVAL(in); if (x & 1) { x = x >> 1; x = x + 1; color = -x; } else { x = x >> 1; color = x; } x = last_line[indexw * 4] + color; *out = x; out += 4; indexw++; collen--; } while (replen > 0) { x = last_line[indexw * 4] + color; *out = x; out += 4; indexw++; replen--; } } } indexh++; last_line = this_line; } return (int) (in - org_in); }
Base
1
SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, unsigned int, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; struct iovec iov; struct msghdr msg; struct sockaddr_storage address; int err, err2; int fput_needed; if (size > INT_MAX) size = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_iovlen = 1; msg.msg_iov = &iov; iov.iov_len = size; iov.iov_base = ubuf; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = sizeof(address); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = sock_recvmsg(sock, &msg, size, flags); if (err >= 0 && addr != NULL) { err2 = move_addr_to_user(&address, msg.msg_namelen, addr, addr_len); if (err2 < 0) err = err2; } fput_light(sock->file, fput_needed); out: return err; }
Class
2
static int mem_resize(jas_stream_memobj_t *m, int bufsize) { unsigned char *buf; //assert(m->buf_); assert(bufsize >= 0); JAS_DBGLOG(100, ("mem_resize(%p, %d)\n", m, bufsize)); if (!(buf = jas_realloc2(m->buf_, bufsize, sizeof(unsigned char))) && bufsize) { JAS_DBGLOG(100, ("mem_resize realloc failed\n")); return -1; } JAS_DBGLOG(100, ("mem_resize realloc succeeded\n")); m->buf_ = buf; m->bufsize_ = bufsize; return 0; }
Base
1
int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode) { int size, ct, err; if (m->msg_namelen) { if (mode == VERIFY_READ) { void __user *namep; namep = (void __user __force *) m->msg_name; err = move_addr_to_kernel(namep, m->msg_namelen, address); if (err < 0) return err; } m->msg_name = address; } else { m->msg_name = NULL; } size = m->msg_iovlen * sizeof(struct iovec); if (copy_from_user(iov, (void __user __force *) m->msg_iov, size)) return -EFAULT; m->msg_iov = iov; err = 0; for (ct = 0; ct < m->msg_iovlen; ct++) { size_t len = iov[ct].iov_len; if (len > INT_MAX - err) { len = INT_MAX - err; iov[ct].iov_len = len; } err += len; } return err; }
Class
2
static int fill_thread_core_info(struct elf_thread_core_info *t, const struct user_regset_view *view, long signr, size_t *total) { unsigned int i; /* * NT_PRSTATUS is the one special case, because the regset data * goes into the pr_reg field inside the note contents, rather * than being the whole note contents. We fill the reset in here. * We assume that regset 0 is NT_PRSTATUS. */ fill_prstatus(&t->prstatus, t->task, signr); (void) view->regsets[0].get(t->task, &view->regsets[0], 0, sizeof(t->prstatus.pr_reg), &t->prstatus.pr_reg, NULL); fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &t->prstatus); *total += notesize(&t->notes[0]); do_thread_regset_writeback(t->task, &view->regsets[0]); /* * Each other regset might generate a note too. For each regset * that has no core_note_type or is inactive, we leave t->notes[i] * all zero and we'll know to skip writing it later. */ for (i = 1; i < view->n; ++i) { const struct user_regset *regset = &view->regsets[i]; do_thread_regset_writeback(t->task, regset); if (regset->core_note_type && (!regset->active || regset->active(t->task, regset))) { int ret; size_t size = regset->n * regset->size; void *data = kmalloc(size, GFP_KERNEL); if (unlikely(!data)) return 0; ret = regset->get(t->task, regset, 0, size, data, NULL); if (unlikely(ret)) kfree(data); else { if (regset->core_note_type != NT_PRFPREG) fill_note(&t->notes[i], "LINUX", regset->core_note_type, size, data); else { t->prstatus.pr_fpvalid = 1; fill_note(&t->notes[i], "CORE", NT_PRFPREG, size, data); } *total += notesize(&t->notes[i]); } } } return 1; }
Base
1
nf_nat_redirect_ipv4(struct sk_buff *skb, const struct nf_nat_ipv4_multi_range_compat *mr, unsigned int hooknum) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; __be32 newdst; struct nf_nat_range newrange; NF_CT_ASSERT(hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_LOCAL_OUT); ct = nf_ct_get(skb, &ctinfo); NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)); /* Local packets: make them go to loopback */ if (hooknum == NF_INET_LOCAL_OUT) { newdst = htonl(0x7F000001); } else { struct in_device *indev; struct in_ifaddr *ifa; newdst = 0; rcu_read_lock(); indev = __in_dev_get_rcu(skb->dev); if (indev != NULL) { ifa = indev->ifa_list; newdst = ifa->ifa_local; } rcu_read_unlock(); if (!newdst) return NF_DROP; } /* Transfer from original range. */ memset(&newrange.min_addr, 0, sizeof(newrange.min_addr)); memset(&newrange.max_addr, 0, sizeof(newrange.max_addr)); newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS; newrange.min_addr.ip = newdst; newrange.max_addr.ip = newdst; newrange.min_proto = mr->range[0].min; newrange.max_proto = mr->range[0].max; /* Hand modified range to generic setup. */ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST); }
Base
1
tok_new(void) { struct tok_state *tok = (struct tok_state *)PyMem_MALLOC( sizeof(struct tok_state)); if (tok == NULL) return NULL; tok->buf = tok->cur = tok->end = tok->inp = tok->start = NULL; tok->done = E_OK; tok->fp = NULL; tok->input = NULL; tok->tabsize = TABSIZE; tok->indent = 0; tok->indstack[0] = 0; tok->atbol = 1; tok->pendin = 0; tok->prompt = tok->nextprompt = NULL; tok->lineno = 0; tok->level = 0; tok->altwarning = 1; tok->alterror = 1; tok->alttabsize = 1; tok->altindstack[0] = 0; tok->decoding_state = STATE_INIT; tok->decoding_erred = 0; tok->read_coding_spec = 0; tok->enc = NULL; tok->encoding = NULL; tok->cont_line = 0; #ifndef PGEN tok->filename = NULL; tok->decoding_readline = NULL; tok->decoding_buffer = NULL; #endif tok->async_def = 0; tok->async_def_indent = 0; tok->async_def_nl = 0; return tok; }
Base
1
static int mpeg4video_probe(AVProbeData *probe_packet) { uint32_t temp_buffer = -1; int VO = 0, VOL = 0, VOP = 0, VISO = 0, res = 0; int i; for (i = 0; i < probe_packet->buf_size; i++) { temp_buffer = (temp_buffer << 8) + probe_packet->buf[i]; if ((temp_buffer & 0xffffff00) != 0x100) continue; if (temp_buffer == VOP_START_CODE) VOP++; else if (temp_buffer == VISUAL_OBJECT_START_CODE) VISO++; else if (temp_buffer < 0x120) VO++; else if (temp_buffer < 0x130) VOL++; else if (!(0x1AF < temp_buffer && temp_buffer < 0x1B7) && !(0x1B9 < temp_buffer && temp_buffer < 0x1C4)) res++; } if (VOP >= VISO && VOP >= VOL && VO >= VOL && VOL > 0 && res == 0) return AVPROBE_SCORE_EXTENSION; return 0; }
Base
1
static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data) { int ret; ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0), PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data, indx, &data, 1, 1000); if (ret < 0) netif_dbg(pegasus, drv, pegasus->net, "%s returned %d\n", __func__, ret); return ret; }
Class
2
static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg) { struct fib6_result *res = arg->result; struct rt6_info *rt = res->rt6; struct net_device *dev = NULL; if (!rt) return false; if (rt->rt6i_idev) dev = rt->rt6i_idev->dev; /* do not accept result if the route does * not meet the required prefix length */ if (rt->rt6i_dst.plen <= rule->suppress_prefixlen) goto suppress_route; /* do not accept result if the route uses a device * belonging to a forbidden interface group */ if (rule->suppress_ifgroup != -1 && dev && dev->group == rule->suppress_ifgroup) goto suppress_route; return false; suppress_route: ip6_rt_put(rt); return true; }
Base
1
void pdf_load_pages_kids(FILE *fp, pdf_t *pdf) { int i, id, dummy; char *buf, *c; long start, sz; start = ftell(fp); /* Load all kids for all xref tables (versions) */ for (i=0; i<pdf->n_xrefs; i++) { if (pdf->xrefs[i].version && (pdf->xrefs[i].end != 0)) { fseek(fp, pdf->xrefs[i].start, SEEK_SET); while (SAFE_F(fp, (fgetc(fp) != 't'))) ; /* Iterate to trailer */ /* Get root catalog */ sz = pdf->xrefs[i].end - ftell(fp); buf = malloc(sz + 1); SAFE_E(fread(buf, 1, sz, fp), sz, "Failed to load /Root.\n"); buf[sz] = '\0'; if (!(c = strstr(buf, "/Root"))) { free(buf); continue; } /* Jump to catalog (root) */ id = atoi(c + strlen("/Root") + 1); free(buf); buf = get_object(fp, id, &pdf->xrefs[i], NULL, &dummy); if (!buf || !(c = strstr(buf, "/Pages"))) { free(buf); continue; } /* Start at the first Pages obj and get kids */ id = atoi(c + strlen("/Pages") + 1); load_kids(fp, id, &pdf->xrefs[i]); free(buf); } } fseek(fp, start, SEEK_SET); }
Base
1
processBatchMultiRuleset(batch_t *pBatch) { ruleset_t *currRuleset; batch_t snglRuleBatch; int i; int iStart; /* start index of partial batch */ int iNew; /* index for new (temporary) batch */ DEFiRet; CHKiRet(batchInit(&snglRuleBatch, pBatch->nElem)); snglRuleBatch.pbShutdownImmediate = pBatch->pbShutdownImmediate; while(1) { /* loop broken inside */ /* search for first unprocessed element */ for(iStart = 0 ; iStart < pBatch->nElem && pBatch->pElem[iStart].state == BATCH_STATE_DISC ; ++iStart) /* just search, no action */; if(iStart == pBatch->nElem) FINALIZE; /* everything processed */ /* prepare temporary batch */ currRuleset = batchElemGetRuleset(pBatch, iStart); iNew = 0; for(i = iStart ; i < pBatch->nElem ; ++i) { if(batchElemGetRuleset(pBatch, i) == currRuleset) { batchCopyElem(&(snglRuleBatch.pElem[iNew++]), &(pBatch->pElem[i])); /* We indicate the element also as done, so it will not be processed again */ pBatch->pElem[i].state = BATCH_STATE_DISC; } } snglRuleBatch.nElem = iNew; /* was left just right by the for loop */ batchSetSingleRuleset(&snglRuleBatch, 1); /* process temp batch */ processBatch(&snglRuleBatch); } batchFree(&snglRuleBatch); finalize_it: RETiRet; }
Base
1
GF_Err gf_isom_get_text_description(GF_ISOFile *movie, u32 trackNumber, u32 descriptionIndex, GF_TextSampleDescriptor **out_desc) { GF_TrackBox *trak; u32 i; Bool is_qt_text = GF_FALSE; GF_Tx3gSampleEntryBox *txt; if (!descriptionIndex || !out_desc) return GF_BAD_PARAM; trak = gf_isom_get_track_from_file(movie, trackNumber); if (!trak || !trak->Media) return GF_BAD_PARAM; switch (trak->Media->handler->handlerType) { case GF_ISOM_MEDIA_TEXT: case GF_ISOM_MEDIA_SUBT: break; default: return GF_BAD_PARAM; } txt = (GF_Tx3gSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, descriptionIndex - 1); if (!txt) return GF_BAD_PARAM; switch (txt->type) { case GF_ISOM_BOX_TYPE_TX3G: break; case GF_ISOM_BOX_TYPE_TEXT: is_qt_text = GF_TRUE; break; default: return GF_BAD_PARAM; } (*out_desc) = (GF_TextSampleDescriptor *) gf_odf_desc_new(GF_ODF_TX3G_TAG); if (! (*out_desc) ) return GF_OUT_OF_MEM; (*out_desc)->back_color = txt->back_color; (*out_desc)->default_pos = txt->default_box; (*out_desc)->default_style = txt->default_style; (*out_desc)->displayFlags = txt->displayFlags; (*out_desc)->vert_justif = txt->vertical_justification; (*out_desc)->horiz_justif = txt->horizontal_justification; if (is_qt_text) { GF_TextSampleEntryBox *qt_txt = (GF_TextSampleEntryBox *) txt; if (qt_txt->textName) { (*out_desc)->font_count = 1; (*out_desc)->fonts = (GF_FontRecord *) gf_malloc(sizeof(GF_FontRecord)); (*out_desc)->fonts[0].fontName = gf_strdup(qt_txt->textName); } } else { (*out_desc)->font_count = txt->font_table->entry_count; (*out_desc)->fonts = (GF_FontRecord *) gf_malloc(sizeof(GF_FontRecord) * txt->font_table->entry_count); for (i=0; i<txt->font_table->entry_count; i++) { (*out_desc)->fonts[i].fontID = txt->font_table->fonts[i].fontID; if (txt->font_table->fonts[i].fontName) (*out_desc)->fonts[i].fontName = gf_strdup(txt->font_table->fonts[i].fontName); } } return GF_OK; }
Base
1
void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP); struct scm_timestamping tss; int empty = 1; struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); /* Race occurred between timestamp enabling and packet receiving. Fill in the current time for now. */ if (need_software_tstamp && skb->tstamp == 0) __net_timestamp(skb); if (need_software_tstamp) { if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) { struct timeval tv; skb_get_timestamp(skb, &tv); put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, sizeof(tv), &tv); } else { struct timespec ts; skb_get_timestampns(skb, &ts); put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, sizeof(ts), &ts); } } memset(&tss, 0, sizeof(tss)); if ((sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) && ktime_to_timespec_cond(skb->tstamp, tss.ts + 0)) empty = 0; if (shhwtstamps && (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) && ktime_to_timespec_cond(shhwtstamps->hwtstamp, tss.ts + 2)) empty = 0; if (!empty) { put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING, sizeof(tss), &tss); if (skb->len && (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS)) put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_OPT_STATS, skb->len, skb->data); } }
Base
1
void init_xml_relax_ng() { VALUE nokogiri = rb_define_module("Nokogiri"); VALUE xml = rb_define_module_under(nokogiri, "XML"); VALUE klass = rb_define_class_under(xml, "RelaxNG", cNokogiriXmlSchema); cNokogiriXmlRelaxNG = klass; rb_define_singleton_method(klass, "read_memory", read_memory, 1); rb_define_singleton_method(klass, "from_document", from_document, 1); rb_define_private_method(klass, "validate_document", validate_document, 1); }
Base
1
int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode) { int size, ct, err; if (m->msg_namelen) { if (mode == VERIFY_READ) { void __user *namep; namep = (void __user __force *) m->msg_name; err = move_addr_to_kernel(namep, m->msg_namelen, address); if (err < 0) return err; } m->msg_name = address; } else { m->msg_name = NULL; } size = m->msg_iovlen * sizeof(struct iovec); if (copy_from_user(iov, (void __user __force *) m->msg_iov, size)) return -EFAULT; m->msg_iov = iov; err = 0; for (ct = 0; ct < m->msg_iovlen; ct++) { size_t len = iov[ct].iov_len; if (len > INT_MAX - err) { len = INT_MAX - err; iov[ct].iov_len = len; } err += len; } return err; }
Class
2
IW_IMPL(unsigned int) iw_get_ui16be(const iw_byte *b) { return (b[0]<<8) | b[1]; }
Pillar
3
static int n_tty_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) { struct n_tty_data *ldata = tty->disc_data; int retval; switch (cmd) { case TIOCOUTQ: return put_user(tty_chars_in_buffer(tty), (int __user *) arg); case TIOCINQ: down_write(&tty->termios_rwsem); if (L_ICANON(tty)) retval = inq_canon(ldata); else retval = read_cnt(ldata); up_write(&tty->termios_rwsem); return put_user(retval, (unsigned int __user *) arg); default: return n_tty_ioctl_helper(tty, file, cmd, arg); } }
Class
2
static int filter_frame(AVFilterLink *inlink, AVFrame *in) { PadContext *s = inlink->dst->priv; AVFrame *out; int needs_copy = frame_needs_copy(s, in); if (needs_copy) { av_log(inlink->dst, AV_LOG_DEBUG, "Direct padding impossible allocating new frame\n"); out = ff_get_video_buffer(inlink->dst->outputs[0], FFMAX(inlink->w, s->w), FFMAX(inlink->h, s->h)); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); } else { int i; out = in; for (i = 0; i < 4 && out->data[i]; i++) { int hsub = s->draw.hsub[i]; int vsub = s->draw.vsub[i]; out->data[i] -= (s->x >> hsub) * s->draw.pixelstep[i] + (s->y >> vsub) * out->linesize[i]; } } /* top bar */ if (s->y) { ff_fill_rectangle(&s->draw, &s->color, out->data, out->linesize, 0, 0, s->w, s->y); } /* bottom bar */ if (s->h > s->y + s->in_h) { ff_fill_rectangle(&s->draw, &s->color, out->data, out->linesize, 0, s->y + s->in_h, s->w, s->h - s->y - s->in_h); } /* left border */ ff_fill_rectangle(&s->draw, &s->color, out->data, out->linesize, 0, s->y, s->x, in->height); if (needs_copy) { ff_copy_rectangle2(&s->draw, out->data, out->linesize, in->data, in->linesize, s->x, s->y, 0, 0, in->width, in->height); } /* right border */ ff_fill_rectangle(&s->draw, &s->color, out->data, out->linesize, s->x + s->in_w, s->y, s->w - s->x - s->in_w, in->height); out->width = s->w; out->height = s->h; if (in != out) av_frame_free(&in); return ff_filter_frame(inlink->dst->outputs[0], out); }
Class
2
static int simulate_sync(struct pt_regs *regs, unsigned int opcode) { if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) { perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); return 0; } return -1; /* Must be something else ... */ }
Class
2
escape_xml(const char *text) { static char *escaped; static size_t escaped_size; char *out; size_t len; if (!strlen(text)) return "empty string"; for (out=escaped, len=0; *text; ++len, ++out, ++text) { /* Make sure there's plenty of room for a quoted character */ if ((len + 8) > escaped_size) { char *bigger_escaped; escaped_size += 128; bigger_escaped = realloc(escaped, escaped_size); if (!bigger_escaped) { free(escaped); /* avoid leaking memory */ escaped = NULL; escaped_size = 0; /* Error string is cleverly chosen to fail XML validation */ return ">>> out of memory <<<"; } out = bigger_escaped + len; escaped = bigger_escaped; } switch (*text) { case '&': strcpy(out, "&amp;"); len += strlen(out) - 1; out = escaped + len; break; case '<': strcpy(out, "&lt;"); len += strlen(out) - 1; out = escaped + len; break; case '>': strcpy(out, "&gt;"); len += strlen(out) - 1; out = escaped + len; break; default: *out = *text; break; } } *out = '\x0'; /* NUL terminate the string */ return escaped; }
Base
1
IW_IMPL(int) iw_get_i32le(const iw_byte *b) { return (iw_int32)(iw_uint32)(b[0] | (b[1]<<8) | (b[2]<<16) | (b[3]<<24)); }
Pillar
3
static int vmci_transport_dgram_dequeue(struct kiocb *kiocb, struct vsock_sock *vsk, struct msghdr *msg, size_t len, int flags) { int err; int noblock; struct vmci_datagram *dg; size_t payload_len; struct sk_buff *skb; noblock = flags & MSG_DONTWAIT; if (flags & MSG_OOB || flags & MSG_ERRQUEUE) return -EOPNOTSUPP; msg->msg_namelen = 0; /* Retrieve the head sk_buff from the socket's receive queue. */ err = 0; skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); if (err) return err; if (!skb) return -EAGAIN; dg = (struct vmci_datagram *)skb->data; if (!dg) /* err is 0, meaning we read zero bytes. */ goto out; payload_len = dg->payload_size; /* Ensure the sk_buff matches the payload size claimed in the packet. */ if (payload_len != skb->len - sizeof(*dg)) { err = -EINVAL; goto out; } if (payload_len > len) { payload_len = len; msg->msg_flags |= MSG_TRUNC; } /* Place the datagram payload in the user's iovec. */ err = skb_copy_datagram_iovec(skb, sizeof(*dg), msg->msg_iov, payload_len); if (err) goto out; if (msg->msg_name) { struct sockaddr_vm *vm_addr; /* Provide the address of the sender. */ vm_addr = (struct sockaddr_vm *)msg->msg_name; vsock_addr_init(vm_addr, dg->src.context, dg->src.resource); msg->msg_namelen = sizeof(*vm_addr); } err = payload_len; out: skb_free_datagram(&vsk->sk, skb); return err; }
Class
2
void * gdImageWBMPPtr (gdImagePtr im, int *size, int fg) { void *rv; gdIOCtx *out = gdNewDynamicCtx(2048, NULL); gdImageWBMPCtx(im, fg, out); rv = gdDPExtractData(out, size); out->gd_free(out); return rv; }
Variant
0
static void perf_event_mmap_output(struct perf_event *event, struct perf_mmap_event *mmap_event) { struct perf_output_handle handle; struct perf_sample_data sample; int size = mmap_event->event_id.header.size; int ret; perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); ret = perf_output_begin(&handle, event, mmap_event->event_id.header.size, 0, 0); if (ret) goto out; mmap_event->event_id.pid = perf_event_pid(event, current); mmap_event->event_id.tid = perf_event_tid(event, current); perf_output_put(&handle, mmap_event->event_id); __output_copy(&handle, mmap_event->file_name, mmap_event->file_size); perf_event__output_id_sample(event, &handle, &sample); perf_output_end(&handle); out: mmap_event->event_id.header.size = size; }
Class
2
static void Rp_test(js_State *J) { js_Regexp *re; const char *text; int opts; Resub m; re = js_toregexp(J, 0); text = js_tostring(J, 1); opts = 0; if (re->flags & JS_REGEXP_G) { if (re->last > strlen(text)) { re->last = 0; js_pushboolean(J, 0); return; } if (re->last > 0) { text += re->last; opts |= REG_NOTBOL; } } if (!js_regexec(re->prog, text, &m, opts)) { if (re->flags & JS_REGEXP_G) re->last = re->last + (m.sub[0].ep - text); js_pushboolean(J, 1); return; } if (re->flags & JS_REGEXP_G) re->last = 0; js_pushboolean(J, 0); }
Class
2
static bool vtable_is_value_in_text_section(RVTableContext *context, ut64 curAddress, ut64 *value) { //value at the current address ut64 curAddressValue; if (!context->read_addr (context->anal, curAddress, &curAddressValue)) { return false; } //if the value is in text section bool ret = vtable_addr_in_text_section (context, curAddressValue); if (value) { *value = curAddressValue; } return ret; }
Base
1
char *url_decode_r(char *to, char *url, size_t size) { char *s = url, // source *d = to, // destination *e = &to[size - 1]; // destination end while(*s && d < e) { if(unlikely(*s == '%')) { if(likely(s[1] && s[2])) { *d++ = from_hex(s[1]) << 4 | from_hex(s[2]); s += 2; } } else if(unlikely(*s == '+')) *d++ = ' '; else *d++ = *s; s++; } *d = '\0'; return to; }
Class
2
int ras_validate(jas_stream_t *in) { uchar buf[RAS_MAGICLEN]; int i; int n; uint_fast32_t magic; assert(JAS_STREAM_MAXPUTBACK >= RAS_MAGICLEN); /* Read the validation data (i.e., the data used for detecting the format). */ if ((n = jas_stream_read(in, buf, RAS_MAGICLEN)) < 0) { return -1; } /* Put the validation data back onto the stream, so that the stream position will not be changed. */ for (i = n - 1; i >= 0; --i) { if (jas_stream_ungetc(in, buf[i]) == EOF) { return -1; } } /* Did we read enough data? */ if (n < RAS_MAGICLEN) { return -1; } magic = (JAS_CAST(uint_fast32_t, buf[0]) << 24) | (JAS_CAST(uint_fast32_t, buf[1]) << 16) | (JAS_CAST(uint_fast32_t, buf[2]) << 8) | buf[3]; /* Is the signature correct for the Sun Rasterfile format? */ if (magic != RAS_MAGIC) { return -1; } return 0; }
Class
2
static int get_gate_page(struct mm_struct *mm, unsigned long address, unsigned int gup_flags, struct vm_area_struct **vma, struct page **page) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; int ret = -EFAULT; /* user gate pages are read-only */ if (gup_flags & FOLL_WRITE) return -EFAULT; if (address > TASK_SIZE) pgd = pgd_offset_k(address); else pgd = pgd_offset_gate(mm, address); BUG_ON(pgd_none(*pgd)); p4d = p4d_offset(pgd, address); BUG_ON(p4d_none(*p4d)); pud = pud_offset(p4d, address); BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return -EFAULT; VM_BUG_ON(pmd_trans_huge(*pmd)); pte = pte_offset_map(pmd, address); if (pte_none(*pte)) goto unmap; *vma = get_gate_vma(mm); if (!page) goto out; *page = vm_normal_page(*vma, address, *pte); if (!*page) { if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) goto unmap; *page = pte_page(*pte); /* * This should never happen (a device public page in the gate * area). */ if (is_device_public_page(*page)) goto unmap; } get_page(*page); out: ret = 0; unmap: pte_unmap(pte); return ret; }
Variant
0
mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg, struct cfg80211_ap_settings *params) { struct ieee_types_header *rate_ie; int var_offset = offsetof(struct ieee80211_mgmt, u.beacon.variable); const u8 *var_pos = params->beacon.head + var_offset; int len = params->beacon.head_len - var_offset; u8 rate_len = 0; rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len); if (rate_ie) { memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len); rate_len = rate_ie->len; } rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, params->beacon.tail, params->beacon.tail_len); if (rate_ie) memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len); return; }
Base
1
hb_set_invert (hb_set_t *set) { if (unlikely (hb_object_is_immutable (set))) return; set->invert (); }
Base
1
static int __init pf_init(void) { /* preliminary initialisation */ struct pf_unit *pf; int unit; if (disable) return -EINVAL; pf_init_units(); if (pf_detect()) return -ENODEV; pf_busy = 0; if (register_blkdev(major, name)) { for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) put_disk(pf->disk); return -EBUSY; } for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { struct gendisk *disk = pf->disk; if (!pf->present) continue; disk->private_data = pf; add_disk(disk); } return 0; }
Base
1
static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid) { struct hlist_head *hashent = ucounts_hashentry(ns, uid); struct ucounts *ucounts, *new; spin_lock_irq(&ucounts_lock); ucounts = find_ucounts(ns, uid, hashent); if (!ucounts) { spin_unlock_irq(&ucounts_lock); new = kzalloc(sizeof(*new), GFP_KERNEL); if (!new) return NULL; new->ns = ns; new->uid = uid; atomic_set(&new->count, 0); spin_lock_irq(&ucounts_lock); ucounts = find_ucounts(ns, uid, hashent); if (ucounts) { kfree(new); } else { hlist_add_head(&new->node, hashent); ucounts = new; } } if (!atomic_add_unless(&ucounts->count, 1, INT_MAX)) ucounts = NULL; spin_unlock_irq(&ucounts_lock); return ucounts; }
Class
2
destroyPresentationContextList(LST_HEAD ** l) { PRV_PRESENTATIONCONTEXTITEM * prvCtx; DUL_SUBITEM * subItem; if (*l == NULL) return; prvCtx = (PRV_PRESENTATIONCONTEXTITEM*)LST_Dequeue(l); while (prvCtx != NULL) { subItem = (DUL_SUBITEM*)LST_Dequeue(&prvCtx->transferSyntaxList); while (subItem != NULL) { free(subItem); subItem = (DUL_SUBITEM*)LST_Dequeue(&prvCtx->transferSyntaxList); } LST_Destroy(&prvCtx->transferSyntaxList); free(prvCtx); prvCtx = (PRV_PRESENTATIONCONTEXTITEM*)LST_Dequeue(l); } LST_Destroy(l); }
Variant
0
static Jsi_RC NumberToPrecisionCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this, Jsi_Value **ret, Jsi_Func *funcPtr) { char buf[100]; int prec = 0, skip = 0; Jsi_Number num; Jsi_Value *v; ChkStringN(_this, funcPtr, v); if (Jsi_GetIntFromValue(interp, Jsi_ValueArrayIndex(interp, args, skip), &prec) != JSI_OK) return JSI_ERROR; if (prec<=0) return JSI_ERROR; Jsi_GetDoubleFromValue(interp, v, &num); snprintf(buf, sizeof(buf),"%.*" JSI_NUMFFMT, prec, num); if (num<0) prec++; buf[prec+1] = 0; if (buf[prec] == '.') buf[prec] = 0; Jsi_ValueMakeStringDup(interp, ret, buf); return JSI_OK; }
Base
1
k5_asn1_full_decode(const krb5_data *code, const struct atype_info *a, void **retrep) { krb5_error_code ret; const uint8_t *contents, *remainder; size_t clen, rlen; taginfo t; *retrep = NULL; ret = get_tag((uint8_t *)code->data, code->length, &t, &contents, &clen, &remainder, &rlen); if (ret) return ret; /* rlen should be 0, but we don't check it (and due to padding in * non-length-preserving enctypes, it will sometimes be nonzero). */ if (!check_atype_tag(a, &t)) return ASN1_BAD_ID; return decode_atype_to_ptr(&t, contents, clen, a, retrep); }
Class
2
void beforeSleep(struct aeEventLoop *eventLoop) { REDIS_NOTUSED(eventLoop); listNode *ln; redisClient *c; /* Awake clients that got all the swapped keys they requested */ if (server.vm_enabled && listLength(server.io_ready_clients)) { listIter li; listRewind(server.io_ready_clients,&li); while((ln = listNext(&li))) { c = ln->value; struct redisCommand *cmd; /* Resume the client. */ listDelNode(server.io_ready_clients,ln); c->flags &= (~REDIS_IO_WAIT); server.vm_blocked_clients--; aeCreateFileEvent(server.el, c->fd, AE_READABLE, readQueryFromClient, c); cmd = lookupCommand(c->argv[0]->ptr); redisAssert(cmd != NULL); call(c,cmd); resetClient(c); /* There may be more data to process in the input buffer. */ if (c->querybuf && sdslen(c->querybuf) > 0) processInputBuffer(c); } } /* Try to process pending commands for clients that were just unblocked. */ while (listLength(server.unblocked_clients)) { ln = listFirst(server.unblocked_clients); redisAssert(ln != NULL); c = ln->value; listDelNode(server.unblocked_clients,ln); /* Process remaining data in the input buffer. */ if (c->querybuf && sdslen(c->querybuf) > 0) processInputBuffer(c); } /* Write the AOF buffer on disk */ flushAppendOnlyFile(); }
Class
2
int read_file(struct sc_card *card, char *str_path, unsigned char **data, size_t *data_len) { struct sc_path path; struct sc_file *file; unsigned char *p; int ok = 0; int r; size_t len; sc_format_path(str_path, &path); if (SC_SUCCESS != sc_select_file(card, &path, &file)) { goto err; } len = file ? file->size : 4096; p = realloc(*data, len); if (!p) { goto err; } *data = p; *data_len = len; r = sc_read_binary(card, 0, p, len, 0); if (r < 0) goto err; *data_len = r; ok = 1; err: sc_file_free(file); return ok; }
Class
2
static GCObject **correctgraylist (GCObject **p) { GCObject *curr; while ((curr = *p) != NULL) { switch (curr->tt) { case LUA_VTABLE: case LUA_VUSERDATA: { GCObject **next = getgclist(curr); if (getage(curr) == G_TOUCHED1) { /* touched in this cycle? */ lua_assert(isgray(curr)); gray2black(curr); /* make it black, for next barrier */ changeage(curr, G_TOUCHED1, G_TOUCHED2); p = next; /* go to next element */ } else { /* not touched in this cycle */ if (!iswhite(curr)) { /* not white? */ lua_assert(isold(curr)); if (getage(curr) == G_TOUCHED2) /* advance from G_TOUCHED2... */ changeage(curr, G_TOUCHED2, G_OLD); /* ... to G_OLD */ gray2black(curr); /* make it black */ } /* else, object is white: just remove it from this list */ *p = *next; /* remove 'curr' from gray list */ } break; } case LUA_VTHREAD: { lua_State *th = gco2th(curr); lua_assert(!isblack(th)); if (iswhite(th)) /* new object? */ *p = th->gclist; /* remove from gray list */ else /* old threads remain gray */ p = &th->gclist; /* go to next element */ break; } default: lua_assert(0); /* nothing more could be gray here */ } } return p; }
Base
1
static VALUE from_document(VALUE klass, VALUE document) { xmlDocPtr doc; xmlSchemaParserCtxtPtr ctx; xmlSchemaPtr schema; VALUE errors; VALUE rb_schema; Data_Get_Struct(document, xmlDoc, doc); /* In case someone passes us a node. ugh. */ doc = doc->doc; if (has_blank_nodes_p(DOC_NODE_CACHE(doc))) { rb_raise(rb_eArgError, "Creating a schema from a document that has blank nodes exposed to Ruby is dangerous"); } ctx = xmlSchemaNewDocParserCtxt(doc); errors = rb_ary_new(); xmlSetStructuredErrorFunc((void *)errors, Nokogiri_error_array_pusher); #ifdef HAVE_XMLSCHEMASETPARSERSTRUCTUREDERRORS xmlSchemaSetParserStructuredErrors( ctx, Nokogiri_error_array_pusher, (void *)errors ); #endif schema = xmlSchemaParse(ctx); xmlSetStructuredErrorFunc(NULL, NULL); xmlSchemaFreeParserCtxt(ctx); if(NULL == schema) { xmlErrorPtr error = xmlGetLastError(); if(error) Nokogiri_error_raise(NULL, error); else rb_raise(rb_eRuntimeError, "Could not parse document"); return Qnil; } rb_schema = Data_Wrap_Struct(klass, 0, dealloc, schema); rb_iv_set(rb_schema, "@errors", errors); return rb_schema; return Qnil; }
Base
1
init_syntax_once () { register int c; static int done; if (done) return; bzero (re_syntax_table, sizeof re_syntax_table); for (c = 'a'; c <= 'z'; c++) re_syntax_table[c] = Sword; for (c = 'A'; c <= 'Z'; c++) re_syntax_table[c] = Sword; for (c = '0'; c <= '9'; c++) re_syntax_table[c] = Sword; re_syntax_table['_'] = Sword; done = 1; }
Base
1
static void handle_do_action(HttpRequest req, HttpResponse res) { Service_T s; Action_Type doaction = Action_Ignored; const char *action = get_parameter(req, "action"); const char *token = get_parameter(req, "token"); if (action) { if (is_readonly(req)) { send_error(req, res, SC_FORBIDDEN, "You do not have sufficient privileges to access this page"); return; } if ((doaction = Util_getAction(action)) == Action_Ignored) { send_error(req, res, SC_BAD_REQUEST, "Invalid action \"%s\"", action); return; } for (HttpParameter p = req->params; p; p = p->next) { if (IS(p->name, "service")) { s = Util_getService(p->value); if (! s) { send_error(req, res, SC_BAD_REQUEST, "There is no service named \"%s\"", p->value ? p->value : ""); return; } s->doaction = doaction; LogInfo("'%s' %s on user request\n", s->name, action); } } /* Set token for last service only so we'll get it back after all services were handled */ if (token) { Service_T q = NULL; for (s = servicelist; s; s = s->next) if (s->doaction == doaction) q = s; if (q) { FREE(q->token); q->token = Str_dup(token); } } Run.flags |= Run_ActionPending; do_wakeupcall(); } }
Compound
4
RList *r_bin_ne_get_entrypoints(r_bin_ne_obj_t *bin) { RList *entries = r_list_newf (free); if (!entries) { return NULL; } RBinAddr *entry; RList *segments = r_bin_ne_get_segments (bin); if (!segments) { r_list_free (entries); return NULL; } if (bin->ne_header->csEntryPoint) { entry = R_NEW0 (RBinAddr); if (!entry) { r_list_free (entries); return NULL; } entry->bits = 16; RBinSection *s = r_list_get_n (segments, bin->ne_header->csEntryPoint - 1); entry->paddr = bin->ne_header->ipEntryPoint + (s? s->paddr: 0); r_list_append (entries, entry); } int off = 0; while (off < bin->ne_header->EntryTableLength) { ut8 bundle_length = *(ut8 *)(bin->entry_table + off); if (!bundle_length) { break; } off++; ut8 bundle_type = *(ut8 *)(bin->entry_table + off); off++; int i; for (i = 0; i < bundle_length; i++) { entry = R_NEW0 (RBinAddr); if (!entry) { r_list_free (entries); return NULL; } off++; if (!bundle_type) { // Skip off--; free (entry); break; } else if (bundle_type == 0xFF) { // Moveable off += 2; ut8 segnum = *(bin->entry_table + off); off++; ut16 segoff = *(ut16 *)(bin->entry_table + off); entry->paddr = (ut64)bin->segment_entries[segnum - 1].offset * bin->alignment + segoff; } else { // Fixed entry->paddr = (ut64)bin->segment_entries[bundle_type - 1].offset * bin->alignment + *(ut16 *)(bin->entry_table + off); } off += 2; r_list_append (entries, entry); } } r_list_free (segments); bin->entries = entries; return entries; }
Base
1
static char *print_value( cJSON *item, int depth, int fmt ) { char *out = 0; if ( ! item ) return 0; switch ( ( item->type ) & 255 ) { case cJSON_NULL: out = cJSON_strdup( "null" ); break; case cJSON_False: out = cJSON_strdup( "false" ); break; case cJSON_True: out = cJSON_strdup( "true" ); break; case cJSON_Number: out = print_number( item ); break; case cJSON_String: out = print_string( item ); break; case cJSON_Array: out = print_array( item, depth, fmt ); break; case cJSON_Object: out = print_object( item, depth, fmt ); break; } return out; }
Base
1
static int crypto_report_one(struct crypto_alg *alg, struct crypto_user_alg *ualg, struct sk_buff *skb) { strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); strlcpy(ualg->cru_driver_name, alg->cra_driver_name, sizeof(ualg->cru_driver_name)); strlcpy(ualg->cru_module_name, module_name(alg->cra_module), sizeof(ualg->cru_module_name)); ualg->cru_type = 0; ualg->cru_mask = 0; ualg->cru_flags = alg->cra_flags; ualg->cru_refcnt = refcount_read(&alg->cra_refcnt); if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority)) goto nla_put_failure; if (alg->cra_flags & CRYPTO_ALG_LARVAL) { struct crypto_report_larval rl; strlcpy(rl.type, "larval", sizeof(rl.type)); if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, sizeof(struct crypto_report_larval), &rl)) goto nla_put_failure; goto out; } if (alg->cra_type && alg->cra_type->report) { if (alg->cra_type->report(skb, alg)) goto nla_put_failure; goto out; } switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) { case CRYPTO_ALG_TYPE_CIPHER: if (crypto_report_cipher(skb, alg)) goto nla_put_failure; break; case CRYPTO_ALG_TYPE_COMPRESS: if (crypto_report_comp(skb, alg)) goto nla_put_failure; break; case CRYPTO_ALG_TYPE_ACOMPRESS: if (crypto_report_acomp(skb, alg)) goto nla_put_failure; break; case CRYPTO_ALG_TYPE_AKCIPHER: if (crypto_report_akcipher(skb, alg)) goto nla_put_failure; break; case CRYPTO_ALG_TYPE_KPP: if (crypto_report_kpp(skb, alg)) goto nla_put_failure; break; } out: return 0; nla_put_failure: return -EMSGSIZE; }
Class
2
static int __init ip6_tunnel_init(void) { int err; if (xfrm6_tunnel_register(&ip4ip6_handler, AF_INET)) { printk(KERN_ERR "ip6_tunnel init: can't register ip4ip6\n"); err = -EAGAIN; goto out; } if (xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6)) { printk(KERN_ERR "ip6_tunnel init: can't register ip6ip6\n"); err = -EAGAIN; goto unreg_ip4ip6; } err = register_pernet_device(&ip6_tnl_net_ops); if (err < 0) goto err_pernet; return 0; err_pernet: xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6); unreg_ip4ip6: xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET); out: return err; }
Class
2
horDiff8(TIFF* tif, uint8* cp0, tmsize_t cc) { TIFFPredictorState* sp = PredictorState(tif); tmsize_t stride = sp->stride; unsigned char* cp = (unsigned char*) cp0; assert((cc%stride)==0); if (cc > stride) { cc -= stride; /* * Pipeline the most common cases. */ if (stride == 3) { unsigned int r1, g1, b1; unsigned int r2 = cp[0]; unsigned int g2 = cp[1]; unsigned int b2 = cp[2]; do { r1 = cp[3]; cp[3] = (unsigned char)((r1-r2)&0xff); r2 = r1; g1 = cp[4]; cp[4] = (unsigned char)((g1-g2)&0xff); g2 = g1; b1 = cp[5]; cp[5] = (unsigned char)((b1-b2)&0xff); b2 = b1; cp += 3; } while ((cc -= 3) > 0); } else if (stride == 4) { unsigned int r1, g1, b1, a1; unsigned int r2 = cp[0]; unsigned int g2 = cp[1]; unsigned int b2 = cp[2]; unsigned int a2 = cp[3]; do { r1 = cp[4]; cp[4] = (unsigned char)((r1-r2)&0xff); r2 = r1; g1 = cp[5]; cp[5] = (unsigned char)((g1-g2)&0xff); g2 = g1; b1 = cp[6]; cp[6] = (unsigned char)((b1-b2)&0xff); b2 = b1; a1 = cp[7]; cp[7] = (unsigned char)((a1-a2)&0xff); a2 = a1; cp += 4; } while ((cc -= 4) > 0); } else { cp += cc - 1; do { REPEAT4(stride, cp[stride] = (unsigned char)((cp[stride] - cp[0])&0xff); cp--) } while ((cc -= stride) > 0); } } }
Class
2
static int bson_string_is_db_ref( const unsigned char *string, const int length ) { int result = 0; if( length >= 4 ) { if( string[1] == 'r' && string[2] == 'e' && string[3] == 'f' ) result = 1; } else if( length >= 3 ) { if( string[1] == 'i' && string[2] == 'd' ) result = 1; else if( string[1] == 'd' && string[2] == 'b' ) result = 1; } return result; }
Base
1