code
stringlengths
12
2.05k
label_name
stringclasses
5 values
label
int64
0
4
static int logi_dj_ll_raw_request(struct hid_device *hid, unsigned char reportnum, __u8 *buf, size_t count, unsigned char report_type, int reqtype) { struct dj_device *djdev = hid->driver_data; struct dj_receiver_dev *djrcv_dev = djdev->dj_receiver_dev; u8 *out_buf; int ret; if (buf[0] != REPORT_TYPE_LEDS) return -EINVAL; out_buf = kzalloc(DJREPORT_SHORT_LENGTH, GFP_ATOMIC); if (!out_buf) return -ENOMEM; if (count < DJREPORT_SHORT_LENGTH - 2) count = DJREPORT_SHORT_LENGTH - 2; out_buf[0] = REPORT_ID_DJ_SHORT; out_buf[1] = djdev->device_index; memcpy(out_buf + 2, buf, count); ret = hid_hw_raw_request(djrcv_dev->hdev, out_buf[0], out_buf, DJREPORT_SHORT_LENGTH, report_type, reqtype); kfree(out_buf); return ret; }
Class
2
static int mem_read(jas_stream_obj_t *obj, char *buf, int cnt) { int n; assert(cnt >= 0); assert(buf); JAS_DBGLOG(100, ("mem_read(%p, %p, %d)\n", obj, buf, cnt)); jas_stream_memobj_t *m = (jas_stream_memobj_t *)obj; n = m->len_ - m->pos_; cnt = JAS_MIN(n, cnt); memcpy(buf, &m->buf_[m->pos_], cnt); m->pos_ += cnt; return cnt; }
Base
1
void flash_option_bytes_init(int boot_from_dfu) { uint32_t val = 0xfffff8aa; if (boot_from_dfu){ val &= ~(1<<27); // nBOOT0 = 0 (boot from system rom) } else { if (solo_is_locked()) { val = 0xfffff8cc; } } val &= ~(1<<26); // nSWBOOT0 = 0 (boot from nBoot0) val &= ~(1<<25); // SRAM2_RST = 1 (erase sram on reset) val &= ~(1<<24); // SRAM2_PE = 1 (parity check en) if (FLASH->OPTR == val) { return; } __disable_irq(); while (FLASH->SR & (1<<16)) ; flash_unlock(); if (FLASH->CR & (1<<30)) { FLASH->OPTKEYR = 0x08192A3B; FLASH->OPTKEYR = 0x4C5D6E7F; } FLASH->OPTR =val; FLASH->CR |= (1<<17); while (FLASH->SR & (1<<16)) ; flash_lock(); __enable_irq(); }
Class
2
static int kvm_ioctl_create_device(struct kvm *kvm, struct kvm_create_device *cd) { struct kvm_device_ops *ops = NULL; struct kvm_device *dev; bool test = cd->flags & KVM_CREATE_DEVICE_TEST; int ret; if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) return -ENODEV; ops = kvm_device_ops_table[cd->type]; if (ops == NULL) return -ENODEV; if (test) return 0; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->ops = ops; dev->kvm = kvm; mutex_lock(&kvm->lock); ret = ops->create(dev, cd->type); if (ret < 0) { mutex_unlock(&kvm->lock); kfree(dev); return ret; } list_add(&dev->vm_node, &kvm->devices); mutex_unlock(&kvm->lock); if (ops->init) ops->init(dev); ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); if (ret < 0) { mutex_lock(&kvm->lock); list_del(&dev->vm_node); mutex_unlock(&kvm->lock); ops->destroy(dev); return ret; } kvm_get_kvm(kvm); cd->fd = ret; return 0; }
Class
2
*/ int re_yyget_lineno (yyscan_t yyscanner) { struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; if (! YY_CURRENT_BUFFER) return 0; return yylineno;
Base
1
static SDL_Surface* Create_Surface_Solid(int width, int height, SDL_Color fg, Uint32 *color) { const int alignment = Get_Alignement() - 1; SDL_Surface *textbuf; Sint64 size; /* Create a surface with memory: * - pitch is rounded to alignment * - adress is aligned */ void *pixels, *ptr; /* Worse case at the end of line pulling 'alignment' extra blank pixels */ Sint64 pitch = width + alignment; pitch += alignment; pitch &= ~alignment; size = height * pitch + sizeof (void *) + alignment; if (size < 0 || size > SDL_MAX_SINT32) { /* Overflow... */ return NULL; } ptr = SDL_malloc((size_t)size); if (ptr == NULL) { return NULL; } /* address is aligned */ pixels = (void *)(((uintptr_t)ptr + sizeof(void *) + alignment) & ~alignment); ((void **)pixels)[-1] = ptr; textbuf = SDL_CreateRGBSurfaceWithFormatFrom(pixels, width, height, 0, pitch, SDL_PIXELFORMAT_INDEX8); if (textbuf == NULL) { SDL_free(ptr); return NULL; } /* Let SDL handle the memory allocation */ textbuf->flags &= ~SDL_PREALLOC; textbuf->flags |= SDL_SIMD_ALIGNED; /* Initialize with background to 0 */ SDL_memset(pixels, 0, height * pitch); /* Underline/Strikethrough color style */ *color = 1; /* Fill the palette: 1 is foreground */ { SDL_Palette *palette = textbuf->format->palette; palette->colors[0].r = 255 - fg.r; palette->colors[0].g = 255 - fg.g; palette->colors[0].b = 255 - fg.b; palette->colors[1].r = fg.r; palette->colors[1].g = fg.g; palette->colors[1].b = fg.b; palette->colors[1].a = fg.a; } SDL_SetColorKey(textbuf, SDL_TRUE, 0); return textbuf;
Base
1
static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct sockaddr_at *sat = (struct sockaddr_at *)msg->msg_name; struct ddpehdr *ddp; int copied = 0; int offset = 0; int err = 0; struct sk_buff *skb; skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); lock_sock(sk); if (!skb) goto out; /* FIXME: use skb->cb to be able to use shared skbs */ ddp = ddp_hdr(skb); copied = ntohs(ddp->deh_len_hops) & 1023; if (sk->sk_type != SOCK_RAW) { offset = sizeof(*ddp); copied -= offset; } if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied); if (!err) { if (sat) { sat->sat_family = AF_APPLETALK; sat->sat_port = ddp->deh_sport; sat->sat_addr.s_node = ddp->deh_snode; sat->sat_addr.s_net = ddp->deh_snet; } msg->msg_namelen = sizeof(*sat); } skb_free_datagram(sk, skb); /* Free the datagram. */ out: release_sock(sk); return err ? : copied; }
Class
2
MONGO_EXPORT void __mongo_set_error( mongo *conn, mongo_error_t err, const char *str, int errcode ) { int errstr_size, str_size; conn->err = err; conn->errcode = errcode; if( str ) { str_size = strlen( str ) + 1; errstr_size = str_size > MONGO_ERR_LEN ? MONGO_ERR_LEN : str_size; memcpy( conn->errstr, str, errstr_size ); conn->errstr[errstr_size-1] = '\0'; } }
Base
1
snmp_ber_encode_null(unsigned char *out, uint32_t *out_len, uint8_t type) { (*out_len)++; *out-- = 0x00; out = snmp_ber_encode_type(out, out_len, type); return out; }
Base
1
archive_read_format_rar_read_data(struct archive_read *a, const void **buff, size_t *size, int64_t *offset) { struct rar *rar = (struct rar *)(a->format->data); int ret; if (rar->has_encrypted_entries == ARCHIVE_READ_FORMAT_ENCRYPTION_DONT_KNOW) { rar->has_encrypted_entries = 0; } if (rar->bytes_unconsumed > 0) { /* Consume as much as the decompressor actually used. */ __archive_read_consume(a, rar->bytes_unconsumed); rar->bytes_unconsumed = 0; } *buff = NULL; if (rar->entry_eof || rar->offset_seek >= rar->unp_size) { *size = 0; *offset = rar->offset; if (*offset < rar->unp_size) *offset = rar->unp_size; return (ARCHIVE_EOF); } switch (rar->compression_method) { case COMPRESS_METHOD_STORE: ret = read_data_stored(a, buff, size, offset); break; case COMPRESS_METHOD_FASTEST: case COMPRESS_METHOD_FAST: case COMPRESS_METHOD_NORMAL: case COMPRESS_METHOD_GOOD: case COMPRESS_METHOD_BEST: ret = read_data_compressed(a, buff, size, offset); if (ret != ARCHIVE_OK && ret != ARCHIVE_WARN) __archive_ppmd7_functions.Ppmd7_Free(&rar->ppmd7_context); break; default: archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Unsupported compression method for RAR file."); ret = ARCHIVE_FATAL; break; } return (ret); }
Variant
0
SQLWCHAR* _multi_string_alloc_and_expand( LPCSTR in ) { SQLWCHAR *chr; int len = 0; if ( !in ) { return in; } while ( in[ len ] != 0 || in[ len + 1 ] != 0 ) { len ++; } chr = malloc(sizeof( SQLWCHAR ) * ( len + 2 )); len = 0; while ( in[ len ] != 0 || in[ len + 1 ] != 0 ) { chr[ len ] = in[ len ]; len ++; } chr[ len ++ ] = 0; chr[ len ++ ] = 0; return chr; }
Class
2
static struct btrfs_device *find_device(struct btrfs_fs_devices *fs_devices, u64 devid, const u8 *uuid) { struct btrfs_device *dev; list_for_each_entry(dev, &fs_devices->devices, dev_list) { if (dev->devid == devid && (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) { return dev; } } return NULL; }
Base
1
static int jpc_ppm_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in) { jpc_ppm_t *ppm = &ms->parms.ppm; /* Eliminate compiler warning about unused variables. */ cstate = 0; ppm->data = 0; if (ms->len < 1) { goto error; } if (jpc_getuint8(in, &ppm->ind)) { goto error; } ppm->len = ms->len - 1; if (ppm->len > 0) { if (!(ppm->data = jas_malloc(ppm->len))) { goto error; } if (JAS_CAST(uint, jas_stream_read(in, ppm->data, ppm->len)) != ppm->len) { goto error; } } else { ppm->data = 0; } return 0; error: jpc_ppm_destroyparms(ms); return -1; }
Base
1
static bool tailmatch(const char *little, const char *bigone) { size_t littlelen = strlen(little); size_t biglen = strlen(bigone); if(littlelen > biglen) return FALSE; return Curl_raw_equal(little, bigone+biglen-littlelen) ? TRUE : FALSE; }
Class
2
static inline int check_entry_size_and_hooks(struct arpt_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || (unsigned char *)e + sizeof(struct arpt_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } err = check_entry(e); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_err("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; }
Class
2
t1mac_output_ascii(char *s, int len) { if (blocktyp == POST_BINARY) { output_current_post(); blocktyp = POST_ASCII; } /* Mac line endings */ if (len > 0 && s[len-1] == '\n') s[len-1] = '\r'; t1mac_output_data((byte *)s, len); if (strncmp(s, "/FontName", 9) == 0) { for (s += 9; isspace(*s); s++) ; if (*s == '/') { const char *t = ++s; while (*t && !isspace(*t)) t++; free(font_name); font_name = (char *)malloc(t - s + 1); memcpy(font_name, s, t - s); font_name[t - s] = 0; } } }
Class
2
static int persistent_prepare_exception(struct dm_exception_store *store, struct dm_exception *e) { struct pstore *ps = get_info(store); uint32_t stride; chunk_t next_free; sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev); /* Is there enough room ? */ if (size < ((ps->next_free + 1) * store->chunk_size)) return -ENOSPC; e->new_chunk = ps->next_free; /* * Move onto the next free pending, making sure to take * into account the location of the metadata chunks. */ stride = (ps->exceptions_per_area + 1); next_free = ++ps->next_free; if (sector_div(next_free, stride) == 1) ps->next_free++; atomic_inc(&ps->pending_count); return 0; }
Class
2
static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct page *head, *page; int refs; if (!pmd_access_permitted(orig, write)) return 0; if (pmd_devmap(orig)) return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr); refs = 0; page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = compound_head(pmd_page(orig)); if (!page_cache_add_speculative(head, refs)) { *nr -= refs; return 0; } if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; }
Variant
0
static Jsi_RC jsi_ArrayFlatSub(Jsi_Interp *interp, Jsi_Obj* nobj, Jsi_Value *arr, int depth) { int i, n = 0, len = Jsi_ObjGetLength(interp, arr->d.obj); if (len <= 0) return JSI_OK; Jsi_RC rc = JSI_OK; int clen = Jsi_ObjGetLength(interp, nobj); for (i = 0; i < len && rc == JSI_OK; i++) { Jsi_Value *t = Jsi_ValueArrayIndex(interp, arr, i); if (t && depth>0 && Jsi_ValueIsArray(interp, t)) rc = jsi_ArrayFlatSub(interp, nobj, t , depth-1); else if (!Jsi_ValueIsUndef(interp, t)) Jsi_ObjArrayAdd(interp, nobj, t); if ((++n + clen)>interp->maxArrayList) return Jsi_LogError("array size exceeded"); } return rc; }
Base
1
ber_parse_header(STREAM s, int tagval, int *length) { int tag, len; if (tagval > 0xff) { in_uint16_be(s, tag); } else { in_uint8(s, tag); } if (tag != tagval) { logger(Core, Error, "ber_parse_header(), expected tag %d, got %d", tagval, tag); return False; } in_uint8(s, len); if (len & 0x80) { len &= ~0x80; *length = 0; while (len--) next_be(s, *length); } else *length = len; return s_check(s); }
Base
1
static int decode_dds1(GetByteContext *gb, uint8_t *frame, int width, int height) { const uint8_t *frame_start = frame; const uint8_t *frame_end = frame + width * height; int mask = 0x10000, bitbuf = 0; int i, v, offset, count, segments; segments = bytestream2_get_le16(gb); while (segments--) { if (bytestream2_get_bytes_left(gb) < 2) return AVERROR_INVALIDDATA; if (mask == 0x10000) { bitbuf = bytestream2_get_le16u(gb); mask = 1; } if (bitbuf & mask) { v = bytestream2_get_le16(gb); offset = (v & 0x1FFF) << 2; count = ((v >> 13) + 2) << 1; if (frame - frame_start < offset || frame_end - frame < count*2 + width) return AVERROR_INVALIDDATA; for (i = 0; i < count; i++) { frame[0] = frame[1] = frame[width] = frame[width + 1] = frame[-offset]; frame += 2; } } else if (bitbuf & (mask << 1)) { v = bytestream2_get_le16(gb)*2; if (frame - frame_end < v) return AVERROR_INVALIDDATA; frame += v; } else { if (frame_end - frame < width + 3) return AVERROR_INVALIDDATA; frame[0] = frame[1] = frame[width] = frame[width + 1] = bytestream2_get_byte(gb); frame += 2; frame[0] = frame[1] = frame[width] = frame[width + 1] = bytestream2_get_byte(gb); frame += 2; } mask <<= 2; } return 0; }
Class
2
static char* get_private_subtags(const char* loc_name) { char* result =NULL; int singletonPos = 0; int len =0; const char* mod_loc_name =NULL; if( loc_name && (len = strlen(loc_name)>0 ) ){ mod_loc_name = loc_name ; len = strlen(mod_loc_name); while( (singletonPos = getSingletonPos(mod_loc_name))!= -1){ if( singletonPos!=-1){ if( (*(mod_loc_name+singletonPos)=='x') || (*(mod_loc_name+singletonPos)=='X') ){ /* private subtag start found */ if( singletonPos + 2 == len){ /* loc_name ends with '-x-' ; return NULL */ } else{ /* result = mod_loc_name + singletonPos +2; */ result = estrndup(mod_loc_name + singletonPos+2 , (len -( singletonPos +2) ) ); } break; } else{ if( singletonPos + 1 >= len){ /* String end */ break; } else { /* singleton found but not a private subtag , hence check further in the string for the private subtag */ mod_loc_name = mod_loc_name + singletonPos +1; len = strlen(mod_loc_name); } } } } /* end of while */ } return result; }
Base
1
static Jsi_RC jsi_ArrayReduceSubCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr, int op) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array"); Jsi_RC rc = JSI_OK; int curlen, i; Jsi_Obj *obj; Jsi_Value *func, *vpargs, *ini = Jsi_ValueArrayIndex(interp, args, 1); func = Jsi_ValueArrayIndex(interp, args, 0); if (!Jsi_ValueIsFunction(interp, func)) return Jsi_LogError("expected function"); Jsi_Value *nrPtr = Jsi_ValueNew1(interp); obj = _this->d.obj; curlen = Jsi_ObjGetLength(interp, obj); if (curlen < 0) Jsi_ObjSetLength(interp, obj, 0); Jsi_ObjListifyArray(interp, obj); Jsi_Value *vobjs[4]; int n, rev = (op==2); Jsi_Func *fptr = func->d.obj->d.fobj->func; int maa = (fptr->argnames?fptr->argnames->argCnt:0); if (maa>4) maa = 4; for (n = 0, i = (rev?obj->arrCnt-1:0); (rev?i>=0:i < (int)obj->arrCnt) && rc == JSI_OK; n++, i = (rev?i-1:i+1)) { if (!obj->arr[i]) continue; if (n==0 && !ini) { ini = obj->arr[i]; continue; } vobjs[0] = ini; vobjs[1] = obj->arr[i]; vobjs[2] = (maa>2?Jsi_ValueNewNumber(interp, i):NULL); vobjs[3] = _this; vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vobjs, maa, 0)); Jsi_IncrRefCount(interp, vpargs); rc = Jsi_FunctionInvoke(interp, func, vpargs, &nrPtr, NULL); Jsi_DecrRefCount(interp, vpargs); if (rc != JSI_OK) break; ini = nrPtr; } if (rc == JSI_OK && ini) Jsi_ValueCopy(interp, *ret, ini); Jsi_DecrRefCount(interp, nrPtr); return rc; }
Base
1
int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle) { struct super_block *sb; int err; int rc; if (!ext4_handle_valid(handle)) { ext4_put_nojournal(handle); return 0; } if (!handle->h_transaction) { err = jbd2_journal_stop(handle); return handle->h_err ? handle->h_err : err; } sb = handle->h_transaction->t_journal->j_private; err = handle->h_err; rc = jbd2_journal_stop(handle); if (!err) err = rc; if (err) __ext4_std_error(sb, where, line, err); return err; }
Variant
0
int ocfs2_set_acl(handle_t *handle, struct inode *inode, struct buffer_head *di_bh, int type, struct posix_acl *acl, struct ocfs2_alloc_context *meta_ac, struct ocfs2_alloc_context *data_ac) { int name_index; void *value = NULL; size_t size = 0; int ret; if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; switch (type) { case ACL_TYPE_ACCESS: name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS; if (acl) { umode_t mode = inode->i_mode; ret = posix_acl_equiv_mode(acl, &mode); if (ret < 0) return ret; if (ret == 0) acl = NULL; ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode); if (ret) return ret; } break; case ACL_TYPE_DEFAULT: name_index = OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT; if (!S_ISDIR(inode->i_mode)) return acl ? -EACCES : 0; break; default: return -EINVAL; } if (acl) { value = ocfs2_acl_to_xattr(acl, &size); if (IS_ERR(value)) return (int)PTR_ERR(value); } if (handle) ret = ocfs2_xattr_set_handle(handle, inode, di_bh, name_index, "", value, size, 0, meta_ac, data_ac); else ret = ocfs2_xattr_set(inode, name_index, "", value, size, 0); kfree(value); return ret; }
Class
2
process_plane(uint8 * in, int width, int height, uint8 * out, int size) { UNUSED(size); int indexw; int indexh; int code; int collen; int replen; int color; int x; int revcode; uint8 * last_line; uint8 * this_line; uint8 * org_in; uint8 * org_out; org_in = in; org_out = out; last_line = 0; indexh = 0; while (indexh < height) { out = (org_out + width * height * 4) - ((indexh + 1) * width * 4); color = 0; this_line = out; indexw = 0; if (last_line == 0) { while (indexw < width) { code = CVAL(in); replen = code & 0xf; collen = (code >> 4) & 0xf; revcode = (replen << 4) | collen; if ((revcode <= 47) && (revcode >= 16)) { replen = revcode; collen = 0; } while (collen > 0) { color = CVAL(in); *out = color; out += 4; indexw++; collen--; } while (replen > 0) { *out = color; out += 4; indexw++; replen--; } } } else { while (indexw < width) { code = CVAL(in); replen = code & 0xf; collen = (code >> 4) & 0xf; revcode = (replen << 4) | collen; if ((revcode <= 47) && (revcode >= 16)) { replen = revcode; collen = 0; } while (collen > 0) { x = CVAL(in); if (x & 1) { x = x >> 1; x = x + 1; color = -x; } else { x = x >> 1; color = x; } x = last_line[indexw * 4] + color; *out = x; out += 4; indexw++; collen--; } while (replen > 0) { x = last_line[indexw * 4] + color; *out = x; out += 4; indexw++; replen--; } } } indexh++; last_line = this_line; } return (int) (in - org_in); }
Base
1
GF_Box *encs_box_new() { ISOM_DECL_BOX_ALLOC(GF_MPEGSampleEntryBox, GF_ISOM_BOX_TYPE_ENCS); gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp); tmp->internal_type = GF_ISOM_SAMPLE_ENTRY_MP4S; return (GF_Box *)tmp; }
Base
1
static void oidc_scrub_headers(request_rec *r) { oidc_cfg *cfg = ap_get_module_config(r->server->module_config, &auth_openidc_module); if (cfg->scrub_request_headers != 0) { /* scrub all headers starting with OIDC_ first */ oidc_scrub_request_headers(r, OIDC_DEFAULT_HEADER_PREFIX, oidc_cfg_dir_authn_header(r)); /* * then see if the claim headers need to be removed on top of that * (i.e. the prefix does not start with the default OIDC_) */ if ((strstr(cfg->claim_prefix, OIDC_DEFAULT_HEADER_PREFIX) != cfg->claim_prefix)) { oidc_scrub_request_headers(r, cfg->claim_prefix, NULL); } } }
Class
2
alloc_limit_assert (char *fn_name, size_t size) { if (alloc_limit && size > alloc_limit) { alloc_limit_failure (fn_name, size); exit (-1); } }
Base
1
static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { int result; handle_t *handle = NULL; struct super_block *sb = file_inode(vma->vm_file)->i_sb; bool write = vmf->flags & FAULT_FLAG_WRITE; if (write) { sb_start_pagefault(sb); file_update_time(vma->vm_file); handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE, EXT4_DATA_TRANS_BLOCKS(sb)); } if (IS_ERR(handle)) result = VM_FAULT_SIGBUS; else result = __dax_fault(vma, vmf, ext4_get_block_dax, ext4_end_io_unwritten); if (write) { if (!IS_ERR(handle)) ext4_journal_stop(handle); sb_end_pagefault(sb); } return result; }
Class
2
shutdown_mib(void) { unload_all_mibs(); if (tree_top) { if (tree_top->label) SNMP_FREE(tree_top->label); SNMP_FREE(tree_top); } tree_head = NULL; Mib = NULL; if (_mibindexes) { int i; for (i = 0; i < _mibindex; ++i) SNMP_FREE(_mibindexes[i]); free(_mibindexes); _mibindex = 0; _mibindex_max = 0; _mibindexes = NULL; } if (Prefix != NULL && Prefix != &Standard_Prefix[0]) SNMP_FREE(Prefix); if (Prefix) Prefix = NULL; SNMP_FREE(confmibs); SNMP_FREE(confmibdir); }
Base
1
int mif_validate(jas_stream_t *in) { uchar buf[MIF_MAGICLEN]; uint_fast32_t magic; int i; int n; assert(JAS_STREAM_MAXPUTBACK >= MIF_MAGICLEN); /* Read the validation data (i.e., the data used for detecting the format). */ if ((n = jas_stream_read(in, buf, MIF_MAGICLEN)) < 0) { return -1; } /* Put the validation data back onto the stream, so that the stream position will not be changed. */ for (i = n - 1; i >= 0; --i) { if (jas_stream_ungetc(in, buf[i]) == EOF) { return -1; } } /* Was enough data read? */ if (n < MIF_MAGICLEN) { return -1; } /* Compute the signature value. */ magic = (JAS_CAST(uint_fast32_t, buf[0]) << 24) | (JAS_CAST(uint_fast32_t, buf[1]) << 16) | (JAS_CAST(uint_fast32_t, buf[2]) << 8) | buf[3]; /* Ensure that the signature is correct for this format. */ if (magic != MIF_MAGIC) { return -1; } return 0; }
Base
1
static int ceph_x_decrypt(struct ceph_crypto_key *secret, void **p, void *end, void *obuf, size_t olen) { struct ceph_x_encrypt_header head; size_t head_len = sizeof(head); int len, ret; len = ceph_decode_32(p); if (*p + len > end) return -EINVAL; dout("ceph_x_decrypt len %d\n", len); ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen, *p, len); if (ret) return ret; if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC) return -EPERM; *p += len; return olen; }
Class
2
videobuf_vm_close(struct vm_area_struct *vma) { struct videobuf_mapping *map = vma->vm_private_data; struct videobuf_queue *q = map->q; int i; dprintk(2,"vm_close %p [count=%d,vma=%08lx-%08lx]\n",map, map->count,vma->vm_start,vma->vm_end); map->count--; if (0 == map->count) { dprintk(1,"munmap %p q=%p\n",map,q); mutex_lock(&q->lock); for (i = 0; i < VIDEO_MAX_FRAME; i++) { if (NULL == q->bufs[i]) continue; if (q->bufs[i]->map != map) continue; q->ops->buf_release(q,q->bufs[i]); q->bufs[i]->map = NULL; q->bufs[i]->baddr = 0; } mutex_unlock(&q->lock); kfree(map); } return; }
Class
2
vhost_scsi_make_tpg(struct se_wwn *wwn, struct config_group *group, const char *name) { struct vhost_scsi_tport *tport = container_of(wwn, struct vhost_scsi_tport, tport_wwn); struct vhost_scsi_tpg *tpg; unsigned long tpgt; int ret; if (strstr(name, "tpgt_") != name) return ERR_PTR(-EINVAL); if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) return ERR_PTR(-EINVAL); tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL); if (!tpg) { pr_err("Unable to allocate struct vhost_scsi_tpg"); return ERR_PTR(-ENOMEM); } mutex_init(&tpg->tv_tpg_mutex); INIT_LIST_HEAD(&tpg->tv_tpg_list); tpg->tport = tport; tpg->tport_tpgt = tpgt; ret = core_tpg_register(&vhost_scsi_fabric_configfs->tf_ops, wwn, &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); if (ret < 0) { kfree(tpg); return NULL; } mutex_lock(&vhost_scsi_mutex); list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list); mutex_unlock(&vhost_scsi_mutex); return &tpg->se_tpg; }
Class
2
decrypt_response(struct sc_card *card, unsigned char *in, size_t inlen, unsigned char *out, size_t * out_len) { size_t cipher_len; size_t i; unsigned char iv[16] = { 0 }; unsigned char plaintext[4096] = { 0 }; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; /* no cipher */ if (in[0] == 0x99) return 0; /* parse cipher length */ if (0x01 == in[2] && 0x82 != in[1]) { cipher_len = in[1]; i = 3; } else if (0x01 == in[3] && 0x81 == in[1]) { cipher_len = in[2]; i = 4; } else if (0x01 == in[4] && 0x82 == in[1]) { cipher_len = in[2] * 0x100; cipher_len += in[3]; i = 5; } else { return -1; } if (cipher_len < 2 || i+cipher_len > inlen || cipher_len > sizeof plaintext) return -1; /* decrypt */ if (KEY_TYPE_AES == exdata->smtype) aes128_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); else des3_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); /* unpadding */ while (0x80 != plaintext[cipher_len - 2] && (cipher_len - 2 > 0)) cipher_len--; if (2 == cipher_len) return -1; memcpy(out, plaintext, cipher_len - 2); *out_len = cipher_len - 2; return 0; }
Class
2
static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, struct btrfs_path *path, const char *name, int name_len) { struct btrfs_dir_item *dir_item; unsigned long name_ptr; u32 total_len; u32 cur = 0; u32 this_len; struct extent_buffer *leaf; leaf = path->nodes[0]; dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); if (verify_dir_item(root, leaf, dir_item)) return NULL; total_len = btrfs_item_size_nr(leaf, path->slots[0]); while (cur < total_len) { this_len = sizeof(*dir_item) + btrfs_dir_name_len(leaf, dir_item) + btrfs_dir_data_len(leaf, dir_item); name_ptr = (unsigned long)(dir_item + 1); if (btrfs_dir_name_len(leaf, dir_item) == name_len && memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0) return dir_item; cur += this_len; dir_item = (struct btrfs_dir_item *)((char *)dir_item + this_len); } return NULL; }
Class
2
htmlGetText(tree_t *t) /* I - Tree to pick */ { uchar *s, // String *s2, // New string *tdata = NULL, // Temporary string data *talloc = NULL; // Allocated string data size_t slen, // Length of string tlen; // Length of node string // Loop through all of the nodes in the tree and collect text... slen = 0; s = NULL; while (t != NULL) { if (t->child) tdata = talloc = htmlGetText(t->child); else tdata = t->data; if (tdata != NULL) { // Add the text to this string... tlen = strlen((char *)tdata); if (s) s2 = (uchar *)realloc(s, 1 + slen + tlen); else s2 = (uchar *)malloc(1 + tlen); if (!s2) break; s = s2; memcpy((char *)s + slen, (char *)tdata, tlen); slen += tlen; if (talloc) { free(talloc); talloc = NULL; } } t = t->next; } if (slen) s[slen] = '\0'; if (talloc) free(talloc); return (s); }
Base
1
get_visual_text( cmdarg_T *cap, char_u **pp, // return: start of selected text int *lenp) // return: length of selected text { if (VIsual_mode != 'V') unadjust_for_sel(); if (VIsual.lnum != curwin->w_cursor.lnum) { if (cap != NULL) clearopbeep(cap->oap); return FAIL; } if (VIsual_mode == 'V') { *pp = ml_get_curline(); *lenp = (int)STRLEN(*pp); } else { if (LT_POS(curwin->w_cursor, VIsual)) { *pp = ml_get_pos(&curwin->w_cursor); *lenp = VIsual.col - curwin->w_cursor.col + 1; } else { *pp = ml_get_pos(&VIsual); *lenp = curwin->w_cursor.col - VIsual.col + 1; } if (has_mbyte) // Correct the length to include the whole last character. *lenp += (*mb_ptr2len)(*pp + (*lenp - 1)) - 1; } reset_VIsual_and_resel(); return OK; }
Variant
0
irc_server_set_prefix_modes_chars (struct t_irc_server *server, const char *prefix) { char *pos; int i, length_modes, length_chars; if (!server || !prefix) return; /* free previous values */ if (server->prefix_modes) { free (server->prefix_modes); server->prefix_modes = NULL; } if (server->prefix_chars) { free (server->prefix_chars); server->prefix_chars = NULL; } /* assign new values */ pos = strchr (prefix, ')'); if (pos) { server->prefix_modes = weechat_strndup (prefix + 1, pos - prefix - 1); if (server->prefix_modes) { pos++; length_modes = strlen (server->prefix_modes); length_chars = strlen (pos); server->prefix_chars = malloc (length_modes + 1); if (server->prefix_chars) { for (i = 0; i < length_modes; i++) { server->prefix_chars[i] = (i < length_chars) ? pos[i] : ' '; } server->prefix_chars[length_modes] = '\0'; } else { free (server->prefix_modes); server->prefix_modes = NULL; } } } }
Base
1
static int search_old_relocation(struct reloc_struct_t *reloc_table, ut32 addr_to_patch, int n_reloc) { int i; for (i = 0; i < n_reloc; i++) { if (addr_to_patch == reloc_table[i].data_offset) { return i; } } return -1; }
Class
2
fm_mgr_config_mgr_connect ( fm_config_conx_hdl *hdl, fm_mgr_type_t mgr ) { char s_path[256]; char c_path[256]; char *mgr_prefix; p_hsm_com_client_hdl_t *mgr_hdl; pid_t pid; memset(s_path,0,sizeof(s_path)); memset(c_path,0,sizeof(c_path)); pid = getpid(); switch ( mgr ) { case FM_MGR_SM: mgr_prefix = HSM_FM_SCK_SM; mgr_hdl = &hdl->sm_hdl; break; case FM_MGR_PM: mgr_prefix = HSM_FM_SCK_PM; mgr_hdl = &hdl->pm_hdl; break; case FM_MGR_FE: mgr_prefix = HSM_FM_SCK_FE; mgr_hdl = &hdl->fe_hdl; break; default: return FM_CONF_INIT_ERR; } // Fill in the paths for the server and client sockets. sprintf(s_path,"%s%s%d",HSM_FM_SCK_PREFIX,mgr_prefix,hdl->instance); sprintf(c_path,"%s%s%d_C_%lu",HSM_FM_SCK_PREFIX,mgr_prefix, hdl->instance, (long unsigned)pid); if ( *mgr_hdl == NULL ) { if ( hcom_client_init(mgr_hdl,s_path,c_path,32768) != HSM_COM_OK ) { return FM_CONF_INIT_ERR; } } if ( hcom_client_connect(*mgr_hdl) == HSM_COM_OK ) { hdl->conx_mask |= mgr; return FM_CONF_OK; } return FM_CONF_CONX_ERR; }
Class
2
void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) { unsigned long pc = regs->tpc; unsigned long tstate = regs->tstate; u32 insn; u64 value; u8 freg; int flag; struct fpustate *f = FPUSTATE; if (tstate & TSTATE_PRIV) die_if_kernel("stdfmna from kernel", regs); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { int asi = decode_asi(insn, regs); freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); value = 0; flag = (freg < 32) ? FPRS_DL : FPRS_DU; if ((asi > ASI_SNFL) || (asi < ASI_P)) goto daex; save_and_clear_fpu(); if (current_thread_info()->fpsaved[0] & flag) value = *(u64 *)&f->regs[freg]; switch (asi) { case ASI_P: case ASI_S: break; case ASI_PL: case ASI_SL: value = __swab64p(&value); break; default: goto daex; } if (put_user (value >> 32, (u32 __user *) sfar) || __put_user ((u32)value, (u32 __user *)(sfar + 4))) goto daex; } else { daex: if (tlb_type == hypervisor) sun4v_data_access_exception(regs, sfar, sfsr); else spitfire_data_access_exception(regs, sfsr, sfar); return; } advance(regs); }
Class
2
archive_write_disk_set_acls(struct archive *a, int fd, const char *name, struct archive_acl *abstract_acl, __LA_MODE_T mode) { int ret = ARCHIVE_OK; #if !ARCHIVE_ACL_LIBRICHACL (void)mode; /* UNUSED */ #endif #if ARCHIVE_ACL_LIBRICHACL if ((archive_acl_types(abstract_acl) & ARCHIVE_ENTRY_ACL_TYPE_NFS4) != 0) { ret = set_richacl(a, fd, name, abstract_acl, mode, ARCHIVE_ENTRY_ACL_TYPE_NFS4, "nfs4"); } #if ARCHIVE_ACL_LIBACL else #endif #endif /* ARCHIVE_ACL_LIBRICHACL */ #if ARCHIVE_ACL_LIBACL if ((archive_acl_types(abstract_acl) & ARCHIVE_ENTRY_ACL_TYPE_POSIX1E) != 0) { if ((archive_acl_types(abstract_acl) & ARCHIVE_ENTRY_ACL_TYPE_ACCESS) != 0) { ret = set_acl(a, fd, name, abstract_acl, ARCHIVE_ENTRY_ACL_TYPE_ACCESS, "access"); if (ret != ARCHIVE_OK) return (ret); } if ((archive_acl_types(abstract_acl) & ARCHIVE_ENTRY_ACL_TYPE_DEFAULT) != 0) ret = set_acl(a, fd, name, abstract_acl, ARCHIVE_ENTRY_ACL_TYPE_DEFAULT, "default"); } #endif /* ARCHIVE_ACL_LIBACL */ return (ret); }
Base
1
get_princs_2_svc(gprincs_arg *arg, struct svc_req *rqstp) { static gprincs_ret ret; char *prime_arg; gss_buffer_desc client_name, service_name; OM_uint32 minor_stat; kadm5_server_handle_t handle; const char *errmsg = NULL; xdr_free(xdr_gprincs_ret, &ret); if ((ret.code = new_server_handle(arg->api_version, rqstp, &handle))) goto exit_func; if ((ret.code = check_handle((void *)handle))) goto exit_func; ret.api_version = handle->api_version; if (setup_gss_names(rqstp, &client_name, &service_name) < 0) { ret.code = KADM5_FAILURE; goto exit_func; } prime_arg = arg->exp; if (prime_arg == NULL) prime_arg = "*"; if (CHANGEPW_SERVICE(rqstp) || !kadm5int_acl_check(handle->context, rqst2name(rqstp), ACL_LIST, NULL, NULL)) { ret.code = KADM5_AUTH_LIST; log_unauth("kadm5_get_principals", prime_arg, &client_name, &service_name, rqstp); } else { ret.code = kadm5_get_principals((void *)handle, arg->exp, &ret.princs, &ret.count); if( ret.code != 0 ) errmsg = krb5_get_error_message(handle->context, ret.code); log_done("kadm5_get_principals", prime_arg, errmsg, &client_name, &service_name, rqstp); if (errmsg != NULL) krb5_free_error_message(handle->context, errmsg); } gss_release_buffer(&minor_stat, &client_name); gss_release_buffer(&minor_stat, &service_name); exit_func: free_server_handle(handle); return &ret; }
Base
1
static void ikev2_parent_inI2outR2_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh) { struct dh_continuation *dh = (struct dh_continuation *)pcrc; struct msg_digest *md = dh->md; struct state *const st = md->st; stf_status e; DBG(DBG_CONTROLMORE, DBG_log("ikev2 parent inI2outR2: calculating g^{xy}, sending R2")); if (st == NULL) { loglog(RC_LOG_SERIOUS, "%s: Request was disconnected from state", __FUNCTION__); if (dh->md) release_md(dh->md); return; } /* XXX should check out ugh */ passert(ugh == NULL); passert(cur_state == NULL); passert(st != NULL); passert(st->st_suspended_md == dh->md); set_suspended(st, NULL); /* no longer connected or suspended */ set_cur_state(st); st->st_calculating = FALSE; e = ikev2_parent_inI2outR2_tail(pcrc, r); if ( e > STF_FAIL) { /* we do not send a notify because we are the initiator that could be responding to an error notification */ int v2_notify_num = e - STF_FAIL; DBG_log( "ikev2_parent_inI2outR2_tail returned STF_FAIL with %s", enum_name(&ikev2_notify_names, v2_notify_num)); } else if ( e != STF_OK) { DBG_log("ikev2_parent_inI2outR2_tail returned %s", enum_name(&stfstatus_name, e)); } if (dh->md != NULL) { complete_v2_state_transition(&dh->md, e); if (dh->md) release_md(dh->md); } reset_globals(); passert(GLOBALS_ARE_RESET()); }
Class
2
static int nfc_genl_deactivate_target(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; u32 device_idx, target_idx; int rc; if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) return -EINVAL; device_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(device_idx); if (!dev) return -ENODEV; target_idx = nla_get_u32(info->attrs[NFC_ATTR_TARGET_INDEX]); rc = nfc_deactivate_target(dev, target_idx, NFC_TARGET_MODE_SLEEP); nfc_put_device(dev); return rc; }
Base
1
static int setup_ttydir_console(const struct lxc_rootfs *rootfs, const struct lxc_console *console, char *ttydir) { char path[MAXPATHLEN], lxcpath[MAXPATHLEN]; int ret; /* create rootfs/dev/<ttydir> directory */ ret = snprintf(path, sizeof(path), "%s/dev/%s", rootfs->mount, ttydir); if (ret >= sizeof(path)) return -1; ret = mkdir(path, 0755); if (ret && errno != EEXIST) { SYSERROR("failed with errno %d to create %s", errno, path); return -1; } INFO("created %s", path); ret = snprintf(lxcpath, sizeof(lxcpath), "%s/dev/%s/console", rootfs->mount, ttydir); if (ret >= sizeof(lxcpath)) { ERROR("console path too long"); return -1; } snprintf(path, sizeof(path), "%s/dev/console", rootfs->mount); ret = unlink(path); if (ret && errno != ENOENT) { SYSERROR("error unlinking %s", path); return -1; } ret = creat(lxcpath, 0660); if (ret==-1 && errno != EEXIST) { SYSERROR("error %d creating %s", errno, lxcpath); return -1; } if (ret >= 0) close(ret); if (console->master < 0) { INFO("no console"); return 0; } if (mount(console->name, lxcpath, "none", MS_BIND, 0)) { ERROR("failed to mount '%s' on '%s'", console->name, lxcpath); return -1; } /* create symlink from rootfs/dev/console to 'lxc/console' */ ret = snprintf(lxcpath, sizeof(lxcpath), "%s/console", ttydir); if (ret >= sizeof(lxcpath)) { ERROR("lxc/console path too long"); return -1; } ret = symlink(lxcpath, path); if (ret) { SYSERROR("failed to create symlink for console"); return -1; } INFO("console has been setup on %s", lxcpath); return 0; }
Base
1
mcs_parse_domain_params(STREAM s) { int length; ber_parse_header(s, MCS_TAG_DOMAIN_PARAMS, &length); in_uint8s(s, length); return s_check(s); }
Base
1
static bigint *sig_verify(BI_CTX *ctx, const uint8_t *sig, int sig_len, bigint *modulus, bigint *pub_exp) { int i, size; bigint *decrypted_bi, *dat_bi; bigint *bir = NULL; uint8_t *block = (uint8_t *)malloc(sig_len); /* decrypt */ dat_bi = bi_import(ctx, sig, sig_len); ctx->mod_offset = BIGINT_M_OFFSET; /* convert to a normal block */ decrypted_bi = bi_mod_power2(ctx, dat_bi, modulus, pub_exp); bi_export(ctx, decrypted_bi, block, sig_len); ctx->mod_offset = BIGINT_M_OFFSET; i = 10; /* start at the first possible non-padded byte */ while (block[i++] && i < sig_len); size = sig_len - i; /* get only the bit we want */ if (size > 0) { int len; const uint8_t *sig_ptr = get_signature(&block[i], &len); if (sig_ptr) { bir = bi_import(ctx, sig_ptr, len); } } free(block); /* save a few bytes of memory */ bi_clear_cache(ctx); return bir; }
Base
1
struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices, u64 devid, u8 *uuid, u8 *fsid) { struct btrfs_device *device; while (fs_devices) { if (!fsid || !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) { device = find_device(fs_devices, devid, uuid); if (device) return device; } fs_devices = fs_devices->seed; } return NULL; }
Base
1
MONGO_EXPORT mongo_cursor *gridfile_get_chunks( gridfile *gfile, int start, int size ) { bson_iterator it; bson_oid_t id; bson gte; bson query; bson orderby; bson command; mongo_cursor *cursor; bson_find( &it, gfile->meta, "_id" ); id = *bson_iterator_oid( &it ); bson_init( &query ); bson_append_oid( &query, "files_id", &id ); if ( size == 1 ) { bson_append_int( &query, "n", start ); } else { bson_init( &gte ); bson_append_int( &gte, "$gte", start ); bson_finish( &gte ); bson_append_bson( &query, "n", &gte ); bson_destroy( &gte ); } bson_finish( &query ); bson_init( &orderby ); bson_append_int( &orderby, "n", 1 ); bson_finish( &orderby ); bson_init( &command ); bson_append_bson( &command, "query", &query ); bson_append_bson( &command, "orderby", &orderby ); bson_finish( &command ); cursor = mongo_find( gfile->gfs->client, gfile->gfs->chunks_ns, &command, NULL, size, 0, 0 ); bson_destroy( &command ); bson_destroy( &query ); bson_destroy( &orderby ); return cursor; }
Base
1
static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) { struct task_group *tg = cfs_rq->tg; struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); u64 amount = 0, min_amount, expires; int expires_seq; /* note: this is a positive sum as runtime_remaining <= 0 */ min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; raw_spin_lock(&cfs_b->lock); if (cfs_b->quota == RUNTIME_INF) amount = min_amount; else { start_cfs_bandwidth(cfs_b); if (cfs_b->runtime > 0) { amount = min(cfs_b->runtime, min_amount); cfs_b->runtime -= amount; cfs_b->idle = 0; } } expires_seq = cfs_b->expires_seq; expires = cfs_b->runtime_expires; raw_spin_unlock(&cfs_b->lock); cfs_rq->runtime_remaining += amount; /* * we may have advanced our local expiration to account for allowed * spread between our sched_clock and the one on which runtime was * issued. */ if (cfs_rq->expires_seq != expires_seq) { cfs_rq->expires_seq = expires_seq; cfs_rq->runtime_expires = expires; } return cfs_rq->runtime_remaining > 0; }
Class
2
int rm_rf_child(int fd, const char *name, RemoveFlags flags) { /* Removes one specific child of the specified directory */ if (fd < 0) return -EBADF; if (!filename_is_valid(name)) return -EINVAL; if ((flags & (REMOVE_ROOT|REMOVE_MISSING_OK)) != 0) /* Doesn't really make sense here, we are not supposed to remove 'fd' anyway */ return -EINVAL; if (FLAGS_SET(flags, REMOVE_ONLY_DIRECTORIES|REMOVE_SUBVOLUME)) return -EINVAL; return rm_rf_children_inner(fd, name, -1, flags, NULL); }
Class
2
bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct extent_tree *et; struct extent_node *en; struct extent_info ei; if (!f2fs_may_extent_tree(inode)) { /* drop largest extent */ if (i_ext && i_ext->len) { i_ext->len = 0; return true; } return false; } et = __grab_extent_tree(inode); if (!i_ext || !i_ext->len) return false; get_extent_info(&ei, i_ext); write_lock(&et->lock); if (atomic_read(&et->node_cnt)) goto out; en = __init_extent_tree(sbi, et, &ei); if (en) { spin_lock(&sbi->extent_lock); list_add_tail(&en->list, &sbi->extent_list); spin_unlock(&sbi->extent_lock); } out: write_unlock(&et->lock); return false; }
Class
2
idn2_to_ascii_4i (const uint32_t * input, size_t inlen, char * output, int flags) { uint32_t *input_u32; uint8_t *input_u8, *output_u8; size_t length; int rc; if (!input) { if (output) *output = 0; return IDN2_OK; } input_u32 = (uint32_t *) malloc ((inlen + 1) * sizeof(uint32_t)); if (!input_u32) return IDN2_MALLOC; u32_cpy (input_u32, input, inlen); input_u32[inlen] = 0; input_u8 = u32_to_u8 (input_u32, inlen + 1, NULL, &length); free (input_u32); if (!input_u8) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } rc = idn2_lookup_u8 (input_u8, &output_u8, flags); free (input_u8); if (rc == IDN2_OK) { /* wow, this is ugly, but libidn manpage states: * char * out output zero terminated string that must have room for at * least 63 characters plus the terminating zero. */ if (output) strcpy (output, (const char *) output_u8); free(output_u8); } return rc; }
Base
1
static void vgacon_scrollback_startup(void) { vgacon_scrollback_cur = &vgacon_scrollbacks[0]; vgacon_scrollback_init(0); }
Base
1
void skb_complete_tx_timestamp(struct sk_buff *skb, struct skb_shared_hwtstamps *hwtstamps) { struct sock *sk = skb->sk; if (!skb_may_tx_timestamp(sk, false)) return; /* Take a reference to prevent skb_orphan() from freeing the socket, * but only if the socket refcount is not zero. */ if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) { *skb_hwtstamps(skb) = *hwtstamps; __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); sock_put(sk); }
Base
1
void cJSON_AddItemReferenceToObject( cJSON *object, const char *string, cJSON *item ) { cJSON_AddItemToObject( object, string, create_reference( item ) ); }
Base
1
static int install_process_keyring(void) { struct cred *new; int ret; new = prepare_creds(); if (!new) return -ENOMEM; ret = install_process_keyring_to_cred(new); if (ret < 0) { abort_creds(new); return ret != -EEXIST ? ret : 0; } return commit_creds(new); }
Class
2
static void jas_stream_initbuf(jas_stream_t *stream, int bufmode, char *buf, int bufsize) { /* If this function is being called, the buffer should not have been initialized yet. */ assert(!stream->bufbase_); if (bufmode != JAS_STREAM_UNBUF) { /* The full- or line-buffered mode is being employed. */ if (!buf) { /* The caller has not specified a buffer to employ, so allocate one. */ if ((stream->bufbase_ = jas_malloc(JAS_STREAM_BUFSIZE + JAS_STREAM_MAXPUTBACK))) { stream->bufmode_ |= JAS_STREAM_FREEBUF; stream->bufsize_ = JAS_STREAM_BUFSIZE; } else { /* The buffer allocation has failed. Resort to unbuffered operation. */ stream->bufbase_ = stream->tinybuf_; stream->bufsize_ = 1; } } else { /* The caller has specified a buffer to employ. */ /* The buffer must be large enough to accommodate maximum putback. */ assert(bufsize > JAS_STREAM_MAXPUTBACK); stream->bufbase_ = JAS_CAST(uchar *, buf); stream->bufsize_ = bufsize - JAS_STREAM_MAXPUTBACK; } } else { /* The unbuffered mode is being employed. */ /* A buffer should not have been supplied by the caller. */ assert(!buf); /* Use a trivial one-character buffer. */ stream->bufbase_ = stream->tinybuf_; stream->bufsize_ = 1; } stream->bufstart_ = &stream->bufbase_[JAS_STREAM_MAXPUTBACK]; stream->ptr_ = stream->bufstart_; stream->cnt_ = 0; stream->bufmode_ |= bufmode & JAS_STREAM_BUFMODEMASK; }
Class
2
accept_xsmp_connection (SmsConn sms_conn, GsmXsmpServer *server, unsigned long *mask_ret, SmsCallbacks *callbacks_ret, char **failure_reason_ret) { IceConn ice_conn; GsmXSMPClient *client; /* FIXME: what about during shutdown but before gsm_xsmp_shutdown? */ if (server->priv->xsmp_sockets == NULL) { g_debug ("GsmXsmpServer: In shutdown, rejecting new client"); *failure_reason_ret = strdup (_("Refusing new client connection because the session is currently being shut down\n")); return FALSE; } ice_conn = SmsGetIceConnection (sms_conn); client = ice_conn->context; g_return_val_if_fail (client != NULL, TRUE); gsm_xsmp_client_connect (client, sms_conn, mask_ret, callbacks_ret); return TRUE; }
Base
1
int pgpPrtParams(const uint8_t * pkts, size_t pktlen, unsigned int pkttype, pgpDigParams * ret) { const uint8_t *p = pkts; const uint8_t *pend = pkts + pktlen; pgpDigParams digp = NULL; struct pgpPkt pkt; int rc = -1; /* assume failure */ while (p < pend) { if (decodePkt(p, (pend - p), &pkt)) break; if (digp == NULL) { if (pkttype && pkt.tag != pkttype) { break; } else { digp = pgpDigParamsNew(pkt.tag); } } if (pgpPrtPkt(&pkt, digp)) break; p += (pkt.body - pkt.head) + pkt.blen; if (pkttype == PGPTAG_SIGNATURE) break; } rc = (digp && (p == pend)) ? 0 : -1; if (ret && rc == 0) { *ret = digp; } else { pgpDigParamsFree(digp); } return rc; }
Base
1
archive_read_format_zip_cleanup(struct archive_read *a) { struct zip *zip; struct zip_entry *zip_entry, *next_zip_entry; zip = (struct zip *)(a->format->data); #ifdef HAVE_ZLIB_H if (zip->stream_valid) inflateEnd(&zip->stream); #endif #if HAVA_LZMA_H && HAVE_LIBLZMA if (zip->zipx_lzma_valid) { lzma_end(&zip->zipx_lzma_stream); } #endif #ifdef HAVE_BZLIB_H if (zip->bzstream_valid) { BZ2_bzDecompressEnd(&zip->bzstream); } #endif free(zip->uncompressed_buffer); if (zip->ppmd8_valid) __archive_ppmd8_functions.Ppmd8_Free(&zip->ppmd8); if (zip->zip_entries) { zip_entry = zip->zip_entries; while (zip_entry != NULL) { next_zip_entry = zip_entry->next; archive_string_free(&zip_entry->rsrcname); free(zip_entry); zip_entry = next_zip_entry; } } free(zip->decrypted_buffer); if (zip->cctx_valid) archive_decrypto_aes_ctr_release(&zip->cctx); if (zip->hctx_valid) archive_hmac_sha1_cleanup(&zip->hctx); free(zip->iv); free(zip->erd); free(zip->v_data); archive_string_free(&zip->format_name); free(zip); (a->format->data) = NULL; return (ARCHIVE_OK); }
Variant
0
uint32_t mt_random (mtrand *mt) { uint32_t y; unsigned long mag01[2]; mag01[0] = 0; mag01[1] = MATRIX_A; if (mt->mt_index_ >= MT_LEN) { int kk; for (kk = 0; kk < MT_LEN - MT_IA; kk++) { y = (mt->mt_buffer_[kk] & UPPER_MASK) | (mt->mt_buffer_[kk + 1] & LOWER_MASK); mt->mt_buffer_[kk] = mt->mt_buffer_[kk + MT_IA] ^ (y >> 1) ^ mag01[y & 0x1UL]; } for (;kk < MT_LEN - 1; kk++) { y = (mt->mt_buffer_[kk] & UPPER_MASK) | (mt->mt_buffer_[kk + 1] & LOWER_MASK); mt->mt_buffer_[kk] = mt->mt_buffer_[kk + (MT_IA - MT_LEN)] ^ (y >> 1) ^ mag01[y & 0x1UL]; } y = (mt->mt_buffer_[MT_LEN - 1] & UPPER_MASK) | (mt->mt_buffer_[0] & LOWER_MASK); mt->mt_buffer_[MT_LEN - 1] = mt->mt_buffer_[MT_IA - 1] ^ (y >> 1) ^ mag01[y & 0x1UL]; mt->mt_index_ = 0; } y = mt->mt_buffer_[mt->mt_index_++]; y ^= (y >> 11); y ^= (y << 7) & 0x9d2c5680UL; y ^= (y << 15) & 0xefc60000UL; y ^= (y >> 18); return y; }
Class
2
int common_timer_set(struct k_itimer *timr, int flags, struct itimerspec64 *new_setting, struct itimerspec64 *old_setting) { const struct k_clock *kc = timr->kclock; bool sigev_none; ktime_t expires; if (old_setting) common_timer_get(timr, old_setting); /* Prevent rearming by clearing the interval */ timr->it_interval = 0; /* * Careful here. On SMP systems the timer expiry function could be * active and spinning on timr->it_lock. */ if (kc->timer_try_to_cancel(timr) < 0) return TIMER_RETRY; timr->it_active = 0; timr->it_requeue_pending = (timr->it_requeue_pending + 2) & ~REQUEUE_PENDING; timr->it_overrun_last = 0; /* Switch off the timer when it_value is zero */ if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec) return 0; timr->it_interval = timespec64_to_ktime(new_setting->it_interval); expires = timespec64_to_ktime(new_setting->it_value); sigev_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE; kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none); timr->it_active = !sigev_none; return 0; }
Base
1
static void read_module(RBuffer *b, ut64 addr, struct minidump_module *module) { st64 o_addr = r_buf_seek (b, 0, R_BUF_CUR); r_buf_seek (b, addr, R_BUF_SET); module->base_of_image = r_buf_read_le64 (b); module->size_of_image = r_buf_read_le32 (b); module->check_sum = r_buf_read_le32 (b); module->time_date_stamp = r_buf_read_le32 (b); module->module_name_rva = r_buf_read_le32 (b); module->version_info.dw_signature = r_buf_read_le32 (b); module->version_info.dw_struc_version = r_buf_read_le32 (b); module->version_info.dw_file_version_ms = r_buf_read_le32 (b); module->version_info.dw_file_version_ls = r_buf_read_le32 (b); module->version_info.dw_product_version_ms = r_buf_read_le32 (b); module->version_info.dw_product_version_ls = r_buf_read_le32 (b); module->version_info.dw_file_flags_mask = r_buf_read_le32 (b); module->version_info.dw_file_flags = r_buf_read_le32 (b); module->version_info.dw_file_os = r_buf_read_le32 (b); module->version_info.dw_file_type = r_buf_read_le32 (b); module->version_info.dw_file_subtype = r_buf_read_le32 (b); module->version_info.dw_file_date_ms = r_buf_read_le32 (b); module->version_info.dw_file_date_ls = r_buf_read_le32 (b); module->cv_record.data_size = r_buf_read_le32 (b); module->cv_record.rva = r_buf_read_le32 (b); module->misc_record.data_size = r_buf_read_le32 (b); module->misc_record.rva = r_buf_read_le32 (b); module->reserved_0 = r_buf_read_le64 (b); module->reserved_1 = r_buf_read_le64 (b); r_buf_seek (b, o_addr, R_BUF_SET); }
Class
2
static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct page *head, *page; int refs; if (!pud_access_permitted(orig, write)) return 0; if (pud_devmap(orig)) return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr); refs = 0; page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = compound_head(pud_page(orig)); if (!page_cache_add_speculative(head, refs)) { *nr -= refs; return 0; } if (unlikely(pud_val(orig) != pud_val(*pudp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; }
Variant
0
int ntlm_read_message_fields_buffer(wStream* s, NTLM_MESSAGE_FIELDS* fields) { if (fields->Len > 0) { if ((fields->BufferOffset + fields->Len) > Stream_Length(s)) return -1; fields->Buffer = (PBYTE) malloc(fields->Len); if (!fields->Buffer) return -1; Stream_SetPosition(s, fields->BufferOffset); Stream_Read(s, fields->Buffer, fields->Len); } return 1; }
Base
1
static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct cp2112_device *dev = gpiochip_get_data(chip); struct hid_device *hdev = dev->hdev; u8 *buf = dev->in_out_buffer; unsigned long flags; int ret; spin_lock_irqsave(&dev->lock, flags); buf[0] = CP2112_GPIO_SET; buf[1] = value ? 0xff : 0; buf[2] = 1 << offset; ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf, CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret < 0) hid_err(hdev, "error setting GPIO values: %d\n", ret); spin_unlock_irqrestore(&dev->lock, flags); }
Class
2
AsyncWith(asdl_seq * items, asdl_seq * body, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = AsyncWith_kind; p->v.AsyncWith.items = items; p->v.AsyncWith.body = body; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; }
Base
1
static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } if (unlikely(stop <= start)) return PyUnicode_FromUnicode(NULL, 0); length = stop - start; cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } }
Base
1
static int pfkey_recvmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct pfkey_sock *pfk = pfkey_sk(sk); struct sk_buff *skb; int copied, err; err = -EINVAL; if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT)) goto out; msg->msg_namelen = 0; skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out; copied = skb->len; if (copied > len) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto out_free; sock_recv_ts_and_drops(msg, sk, skb); err = (flags & MSG_TRUNC) ? skb->len : copied; if (pfk->dump.dump != NULL && 3 * atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) pfkey_do_dump(pfk); out_free: skb_free_datagram(sk, skb); out: return err; }
Class
2
static Jsi_RC jsi_ArraySliceCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_RC rc = JSI_OK; int istart = 0, iend, n, nsiz; Jsi_Number nstart; Jsi_Obj *nobj, *obj; Jsi_Value *start = Jsi_ValueArrayIndex(interp, args, 0), *end = Jsi_ValueArrayIndex(interp, args, 1); if (!start) { goto bail; } obj = _this->d.obj; n = Jsi_ObjGetLength(interp, obj); if (Jsi_GetNumberFromValue(interp,start, &nstart) == JSI_OK) { istart = (int)nstart; if (istart > n) goto done; if (istart < 0) istart = (n+istart); if (istart<0) goto bail; } if (n == 0) { done: Jsi_ValueMakeArrayObject(interp, ret, Jsi_ObjNewType(interp, JSI_OT_ARRAY)); return JSI_OK; } Jsi_Number nend; iend = n-1; if (end && Jsi_GetNumberFromValue(interp,end, &nend) == JSI_OK) { iend = (int) nend; if (iend >= n) iend = n; if (iend < 0) iend = (n+iend); if (iend<0) goto bail; } nsiz = iend-istart+1; if (nsiz<=0) goto done; Jsi_ObjListifyArray(interp, obj); nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); if (Jsi_ObjArraySizer(interp, nobj, nsiz) <= 0) { rc = Jsi_LogError("index too large: %d", nsiz); goto bail; } int i, m; for (m = 0, i = istart; i <= iend; i++, m++) { if (!obj->arr[i]) continue; nobj->arr[m] = NULL; Jsi_ValueDup2(interp, nobj->arr+m, obj->arr[i]); } Jsi_ObjSetLength(interp, nobj, nsiz); Jsi_ValueMakeArrayObject(interp, ret, nobj); return JSI_OK; bail: Jsi_ValueMakeNull(interp, ret); return rc; }
Base
1
pci_get_cfgdata16(struct pci_vdev *dev, int offset) { assert(offset <= (PCI_REGMAX - 1) && (offset & 1) == 0); return (*(uint16_t *)(dev->cfgdata + offset)); }
Base
1
void rose_start_heartbeat(struct sock *sk) { del_timer(&sk->sk_timer); sk->sk_timer.function = rose_heartbeat_expiry; sk->sk_timer.expires = jiffies + 5 * HZ; add_timer(&sk->sk_timer); }
Variant
0
double GetGPMFSampleRateAndTimes(size_t handle, GPMF_stream *gs, double rate, uint32_t index, double *in, double *out) { mp4object *mp4 = (mp4object *)handle; if (mp4 == NULL) return 0.0; uint32_t key, insamples; uint32_t repeat, outsamples; GPMF_stream find_stream; if (gs == NULL || mp4->metaoffsets == 0 || mp4->indexcount == 0 || mp4->basemetadataduration == 0 || mp4->meta_clockdemon == 0 || in == NULL || out == NULL) return 0.0; key = GPMF_Key(gs); repeat = GPMF_Repeat(gs); if (rate == 0.0) rate = GetGPMFSampleRate(handle, key, GPMF_SAMPLE_RATE_FAST); if (rate == 0.0) { *in = *out = 0.0; return 0.0; } GPMF_CopyState(gs, &find_stream); if (GPMF_OK == GPMF_FindPrev(&find_stream, GPMF_KEY_TOTAL_SAMPLES, GPMF_CURRENT_LEVEL)) { outsamples = BYTESWAP32(*(uint32_t *)GPMF_RawData(&find_stream)); insamples = outsamples - repeat; *in = ((double)insamples / (double)rate); *out = ((double)outsamples / (double)rate); } else { // might too costly in some applications read all the samples to determine the clock jitter, here I return the estimate from the MP4 track. *in = ((double)index * (double)mp4->basemetadataduration / (double)mp4->meta_clockdemon); *out = ((double)(index + 1) * (double)mp4->basemetadataduration / (double)mp4->meta_clockdemon); } return rate; }
Base
1
int MSG_ReadBits( msg_t *msg, int bits ) { int value; int get; qboolean sgn; int i, nbits; // FILE* fp; value = 0; if ( bits < 0 ) { bits = -bits; sgn = qtrue; } else { sgn = qfalse; } if (msg->oob) { if(bits==8) { value = msg->data[msg->readcount]; msg->readcount += 1; msg->bit += 8; } else if(bits==16) { short temp; CopyLittleShort(&temp, &msg->data[msg->readcount]); value = temp; msg->readcount += 2; msg->bit += 16; } else if(bits==32) { CopyLittleLong(&value, &msg->data[msg->readcount]); msg->readcount += 4; msg->bit += 32; } else Com_Error(ERR_DROP, "can't read %d bits", bits); } else { nbits = 0; if (bits&7) { nbits = bits&7; for(i=0;i<nbits;i++) { value |= (Huff_getBit(msg->data, &msg->bit)<<i); } bits = bits - nbits; } if (bits) { // fp = fopen("c:\\netchan.bin", "a"); for(i=0;i<bits;i+=8) { Huff_offsetReceive (msgHuff.decompressor.tree, &get, msg->data, &msg->bit); // fwrite(&get, 1, 1, fp); value |= (get<<(i+nbits)); } // fclose(fp); } msg->readcount = (msg->bit>>3)+1; } if ( sgn && bits > 0 && bits < 32 ) { if ( value & ( 1 << ( bits - 1 ) ) ) { value |= -1 ^ ( ( 1 << bits ) - 1 ); } } return value; }
Class
2
void luaT_adjustvarargs (lua_State *L, int nfixparams, CallInfo *ci, const Proto *p) { int i; int actual = cast_int(L->top - ci->func) - 1; /* number of arguments */ int nextra = actual - nfixparams; /* number of extra arguments */ ci->u.l.nextraargs = nextra; checkstackGC(L, p->maxstacksize + 1); /* copy function to the top of the stack */ setobjs2s(L, L->top++, ci->func); /* move fixed parameters to the top of the stack */ for (i = 1; i <= nfixparams; i++) { setobjs2s(L, L->top++, ci->func + i); setnilvalue(s2v(ci->func + i)); /* erase original parameter (for GC) */ } ci->func += actual + 1; ci->top += actual + 1; lua_assert(L->top <= ci->top && ci->top <= L->stack_last); }
Variant
0
void unix_inflight(struct file *fp) { struct sock *s = unix_get_socket(fp); if (s) { struct unix_sock *u = unix_sk(s); spin_lock(&unix_gc_lock); if (atomic_long_inc_return(&u->inflight) == 1) { BUG_ON(!list_empty(&u->link)); list_add_tail(&u->link, &gc_inflight_list); } else { BUG_ON(list_empty(&u->link)); } unix_tot_inflight++; spin_unlock(&unix_gc_lock); } }
Class
2
static Jsi_RC jsi_ArraySizeOfCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); int i = Jsi_ObjGetLength(interp, _this->d.obj); Jsi_ValueMakeNumber(interp, ret, i); return JSI_OK; }
Base
1
static bool parse_reconnect(struct pool *pool, json_t *val) { char *sockaddr_url, *stratum_port, *tmp; char *url, *port, address[256]; memset(address, 0, 255); url = (char *)json_string_value(json_array_get(val, 0)); if (!url) url = pool->sockaddr_url; else { char *dot_pool, *dot_reconnect; dot_pool = strchr(pool->sockaddr_url, '.'); if (!dot_pool) { applog(LOG_ERR, "Denied stratum reconnect request for pool without domain '%s'", pool->sockaddr_url); return false; } dot_reconnect = strchr(url, '.'); if (!dot_reconnect) { applog(LOG_ERR, "Denied stratum reconnect request to url without domain '%s'", url); return false; } if (strcmp(dot_pool, dot_reconnect)) { applog(LOG_ERR, "Denied stratum reconnect request to non-matching domain url '%s'", pool->sockaddr_url); return false; } } port = (char *)json_string_value(json_array_get(val, 1)); if (!port) port = pool->stratum_port; sprintf(address, "%s:%s", url, port); if (!extract_sockaddr(address, &sockaddr_url, &stratum_port)) return false; applog(LOG_WARNING, "Stratum reconnect requested from pool %d to %s", pool->pool_no, address); clear_pool_work(pool); mutex_lock(&pool->stratum_lock); __suspend_stratum(pool); tmp = pool->sockaddr_url; pool->sockaddr_url = sockaddr_url; pool->stratum_url = pool->sockaddr_url; free(tmp); tmp = pool->stratum_port; pool->stratum_port = stratum_port; free(tmp); mutex_unlock(&pool->stratum_lock); if (!restart_stratum(pool)) { pool_failed(pool); return false; } return true; }
Class
2
static u32 __ipv6_select_ident(struct net *net, u32 hashrnd, const struct in6_addr *dst, const struct in6_addr *src) { u32 hash, id; hash = __ipv6_addr_jhash(dst, hashrnd); hash = __ipv6_addr_jhash(src, hash); hash ^= net_hash_mix(net); /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve, * set the hight order instead thus minimizing possible future * collisions. */ id = ip_idents_reserve(hash, 1); if (unlikely(!id)) id = 1 << 31; return id; }
Class
2
int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; size_t copied; int err; BT_DBG("sock %p sk %p len %zu", sock, sk, len); if (flags & (MSG_OOB)) return -EOPNOTSUPP; skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) { msg->msg_namelen = 0; return 0; } return err; } copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err == 0) { sock_recv_ts_and_drops(msg, sk, skb); if (bt_sk(sk)->skb_msg_name) bt_sk(sk)->skb_msg_name(skb, msg->msg_name, &msg->msg_namelen); else msg->msg_namelen = 0; } skb_free_datagram(sk, skb); return err ? : copied; }
Class
2
static int cac_get_serial_nr_from_CUID(sc_card_t* card, sc_serial_number_t* serial) { cac_private_data_t * priv = CAC_DATA(card); SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_NORMAL); if (card->serialnr.len) { *serial = card->serialnr; SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_SUCCESS); } if (priv->cac_id_len) { serial->len = MIN(priv->cac_id_len, SC_MAX_SERIALNR); memcpy(serial->value, priv->cac_id, priv->cac_id_len); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_SUCCESS); } SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_FILE_NOT_FOUND); }
Class
2
static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) { struct k_itimer *timr; unsigned long flags; int si_private = 0; enum hrtimer_restart ret = HRTIMER_NORESTART; timr = container_of(timer, struct k_itimer, it.real.timer); spin_lock_irqsave(&timr->it_lock, flags); timr->it_active = 0; if (timr->it_interval != 0) si_private = ++timr->it_requeue_pending; if (posix_timer_event(timr, si_private)) { /* * signal was not sent because of sig_ignor * we will not get a call back to restart it AND * it should be restarted. */ if (timr->it_interval != 0) { ktime_t now = hrtimer_cb_get_time(timer); /* * FIXME: What we really want, is to stop this * timer completely and restart it in case the * SIG_IGN is removed. This is a non trivial * change which involves sighand locking * (sigh !), which we don't want to do late in * the release cycle. * * For now we just let timers with an interval * less than a jiffie expire every jiffie to * avoid softirq starvation in case of SIG_IGN * and a very small interval, which would put * the timer right back on the softirq pending * list. By moving now ahead of time we trick * hrtimer_forward() to expire the timer * later, while we still maintain the overrun * accuracy, but have some inconsistency in * the timer_gettime() case. This is at least * better than a starved softirq. A more * complex fix which solves also another related * inconsistency is already in the pipeline. */ #ifdef CONFIG_HIGH_RES_TIMERS { ktime_t kj = NSEC_PER_SEC / HZ; if (timr->it_interval < kj) now = ktime_add(now, kj); } #endif timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, timr->it_interval); ret = HRTIMER_RESTART; ++timr->it_requeue_pending; timr->it_active = 1; } } unlock_timer(timr, flags); return ret; }
Base
1
spnego_gss_set_sec_context_option( OM_uint32 *minor_status, gss_ctx_id_t *context_handle, const gss_OID desired_object, const gss_buffer_t value) { OM_uint32 ret; ret = gss_set_sec_context_option(minor_status, context_handle, desired_object, value); return (ret); }
Base
1
fp_readl(char *s, int size, struct tok_state *tok) { PyObject* bufobj; const char *buf; Py_ssize_t buflen; /* Ask for one less byte so we can terminate it */ assert(size > 0); size--; if (tok->decoding_buffer) { bufobj = tok->decoding_buffer; Py_INCREF(bufobj); } else { bufobj = PyObject_CallObject(tok->decoding_readline, NULL); if (bufobj == NULL) goto error; } if (PyUnicode_CheckExact(bufobj)) { buf = PyUnicode_AsUTF8AndSize(bufobj, &buflen); if (buf == NULL) { goto error; } } else { buf = PyByteArray_AsString(bufobj); if (buf == NULL) { goto error; } buflen = PyByteArray_GET_SIZE(bufobj); } Py_XDECREF(tok->decoding_buffer); if (buflen > size) { /* Too many chars, the rest goes into tok->decoding_buffer */ tok->decoding_buffer = PyByteArray_FromStringAndSize(buf+size, buflen-size); if (tok->decoding_buffer == NULL) goto error; buflen = size; } else tok->decoding_buffer = NULL; memcpy(s, buf, buflen); s[buflen] = '\0'; if (buflen == 0) /* EOF */ s = NULL; Py_DECREF(bufobj); return s; error: Py_XDECREF(bufobj); return error_ret(tok); }
Base
1
cstrchr(char_u *s, int c) { char_u *p; int cc; if (!rex.reg_ic || (!enc_utf8 && mb_char2len(c) > 1)) return vim_strchr(s, c); // tolower() and toupper() can be slow, comparing twice should be a lot // faster (esp. when using MS Visual C++!). // For UTF-8 need to use folded case. if (enc_utf8 && c > 0x80) cc = utf_fold(c); else if (MB_ISUPPER(c)) cc = MB_TOLOWER(c); else if (MB_ISLOWER(c)) cc = MB_TOUPPER(c); else return vim_strchr(s, c); if (has_mbyte) { for (p = s; *p != NUL; p += (*mb_ptr2len)(p)) { if (enc_utf8 && c > 0x80) { if (utf_fold(utf_ptr2char(p)) == cc) return p; } else if (*p == c || *p == cc) return p; } } else // Faster version for when there are no multi-byte characters. for (p = s; *p != NUL; ++p) if (*p == c || *p == cc) return p; return NULL; }
Base
1
int AES_decrypt(uint8_t *encr_message, uint64_t length, char *message, uint64_t msgLen) { if (!message) { LOG_ERROR("Null message in AES_encrypt"); return -1; } if (!encr_message) { LOG_ERROR("Null encr message in AES_encrypt"); return -2; } if (length < SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE) { LOG_ERROR("length < SGX_AESGCM_MAC_SIZE - SGX_AESGCM_IV_SIZE"); return -1; } uint64_t len = length - SGX_AESGCM_MAC_SIZE - SGX_AESGCM_IV_SIZE; if (msgLen < len) { LOG_ERROR("Output buffer not large enough"); return -2; } sgx_status_t status = sgx_rijndael128GCM_decrypt(&AES_key, encr_message + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE, len, (unsigned char*) message, encr_message + SGX_AESGCM_MAC_SIZE, SGX_AESGCM_IV_SIZE, NULL, 0, (sgx_aes_gcm_128bit_tag_t *)encr_message); return status; }
Base
1
GPMF_ERR IsValidSize(GPMF_stream *ms, uint32_t size) // size is in longs not bytes. { if (ms) { int32_t nestsize = (int32_t)ms->nest_size[ms->nest_level]; if (nestsize == 0 && ms->nest_level == 0) nestsize = ms->buffer_size_longs; if (size + 2 <= nestsize) return GPMF_OK; } return GPMF_ERROR_BAD_STRUCTURE; }
Base
1
static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes) { if (likely(i->nr_segs == 1)) { i->iov_offset += bytes; } else { const struct iovec *iov = i->iov; size_t base = i->iov_offset; while (bytes) { int copy = min(bytes, iov->iov_len - base); bytes -= copy; base += copy; if (iov->iov_len == base) { iov++; base = 0; } } i->iov = iov; i->iov_offset = base; } }
Class
2
ripng_print(netdissect_options *ndo, const u_char *dat, unsigned int length) { register const struct rip6 *rp = (const struct rip6 *)dat; register const struct netinfo6 *ni; register u_int amt; register u_int i; int j; int trunc; if (ndo->ndo_snapend < dat) return; amt = ndo->ndo_snapend - dat; i = min(length, amt); if (i < (sizeof(struct rip6) - sizeof(struct netinfo6))) return; i -= (sizeof(struct rip6) - sizeof(struct netinfo6)); switch (rp->rip6_cmd) { case RIP6_REQUEST: j = length / sizeof(*ni); if (j == 1 && rp->rip6_nets->rip6_metric == HOPCNT_INFINITY6 && IN6_IS_ADDR_UNSPECIFIED(&rp->rip6_nets->rip6_dest)) { ND_PRINT((ndo, " ripng-req dump")); break; } if (j * sizeof(*ni) != length - 4) ND_PRINT((ndo, " ripng-req %d[%u]:", j, length)); else ND_PRINT((ndo, " ripng-req %d:", j)); trunc = ((i / sizeof(*ni)) * sizeof(*ni) != i); for (ni = rp->rip6_nets; i >= sizeof(*ni); i -= sizeof(*ni), ++ni) { if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n\t")); else ND_PRINT((ndo, " ")); rip6_entry_print(ndo, ni, 0); } break; case RIP6_RESPONSE: j = length / sizeof(*ni); if (j * sizeof(*ni) != length - 4) ND_PRINT((ndo, " ripng-resp %d[%u]:", j, length)); else ND_PRINT((ndo, " ripng-resp %d:", j)); trunc = ((i / sizeof(*ni)) * sizeof(*ni) != i); for (ni = rp->rip6_nets; i >= sizeof(*ni); i -= sizeof(*ni), ++ni) { if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n\t")); else ND_PRINT((ndo, " ")); rip6_entry_print(ndo, ni, ni->rip6_metric); } if (trunc) ND_PRINT((ndo, "[|ripng]")); break; default: ND_PRINT((ndo, " ripng-%d ?? %u", rp->rip6_cmd, length)); break; } if (rp->rip6_vers != RIP6_VERSION) ND_PRINT((ndo, " [vers %d]", rp->rip6_vers)); }
Base
1
image_load_jpeg(image_t *img, /* I - Image pointer */ FILE *fp, /* I - File to load from */ int gray, /* I - 0 = color, 1 = grayscale */ int load_data)/* I - 1 = load image data, 0 = just info */ { struct jpeg_decompress_struct cinfo; /* Decompressor info */ struct jpeg_error_mgr jerr; /* Error handler info */ JSAMPROW row; /* Sample row pointer */ jpeg_std_error(&jerr); jerr.error_exit = jpeg_error_handler; cinfo.err = &jerr; jpeg_create_decompress(&cinfo); jpeg_stdio_src(&cinfo, fp); jpeg_read_header(&cinfo, (boolean)1); cinfo.quantize_colors = FALSE; if (gray || cinfo.num_components == 1) { cinfo.out_color_space = JCS_GRAYSCALE; cinfo.out_color_components = 1; cinfo.output_components = 1; } else if (cinfo.num_components != 3) { jpeg_destroy_decompress(&cinfo); progress_error(HD_ERROR_BAD_FORMAT, "CMYK JPEG files are not supported! (%s)", file_rlookup(img->filename)); return (-1); } else { cinfo.out_color_space = JCS_RGB; cinfo.out_color_components = 3; cinfo.output_components = 3; } jpeg_calc_output_dimensions(&cinfo); img->width = (int)cinfo.output_width; img->height = (int)cinfo.output_height; img->depth = (int)cinfo.output_components; if (!load_data) { jpeg_destroy_decompress(&cinfo); return (0); } img->pixels = (uchar *)malloc((size_t)(img->width * img->height * img->depth)); if (img->pixels == NULL) { jpeg_destroy_decompress(&cinfo); return (-1); } jpeg_start_decompress(&cinfo); while (cinfo.output_scanline < cinfo.output_height) { row = (JSAMPROW)(img->pixels + (size_t)cinfo.output_scanline * (size_t)cinfo.output_width * (size_t)cinfo.output_components); jpeg_read_scanlines(&cinfo, &row, (JDIMENSION)1); } jpeg_finish_decompress(&cinfo); jpeg_destroy_decompress(&cinfo); return (0); }
Base
1
static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol, int op_flag, unsigned int size, unsigned int __user *tlv) { struct user_element *ue = kcontrol->private_data; int change = 0; void *new_data; if (op_flag > 0) { if (size > 1024 * 128) /* sane value */ return -EINVAL; new_data = memdup_user(tlv, size); if (IS_ERR(new_data)) return PTR_ERR(new_data); change = ue->tlv_data_size != size; if (!change) change = memcmp(ue->tlv_data, new_data, size); kfree(ue->tlv_data); ue->tlv_data = new_data; ue->tlv_data_size = size; } else { if (! ue->tlv_data_size || ! ue->tlv_data) return -ENXIO; if (size < ue->tlv_data_size) return -ENOSPC; if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size)) return -EFAULT; } return change; }
Class
2
mcs_parse_domain_params(STREAM s) { int length; ber_parse_header(s, MCS_TAG_DOMAIN_PARAMS, &length); in_uint8s(s, length); return s_check(s); }
Base
1
static uint32_t scsi_init_iovec(SCSIDiskReq *r) { r->iov.iov_len = MIN(r->sector_count * 512, SCSI_DMA_BUF_SIZE); qemu_iovec_init_external(&r->qiov, &r->iov, 1); return r->qiov.size / 512; }
Class
2
void jpc_qmfb_split_col(jpc_fix_t *a, int numrows, int stride, int parity) { int bufsize = JPC_CEILDIVPOW2(numrows, 1); jpc_fix_t splitbuf[QMFB_SPLITBUFSIZE]; jpc_fix_t *buf = splitbuf; register jpc_fix_t *srcptr; register jpc_fix_t *dstptr; register int n; register int m; int hstartcol; /* Get a buffer. */ if (bufsize > QMFB_SPLITBUFSIZE) { if (!(buf = jas_alloc2(bufsize, sizeof(jpc_fix_t)))) { /* We have no choice but to commit suicide in this case. */ abort(); } } if (numrows >= 2) { hstartcol = (numrows + 1 - parity) >> 1; // ORIGINAL (WRONG): m = (parity) ? hstartcol : (numrows - hstartcol); m = numrows - hstartcol; /* Save the samples destined for the highpass channel. */ n = m; dstptr = buf; srcptr = &a[(1 - parity) * stride]; while (n-- > 0) { *dstptr = *srcptr; ++dstptr; srcptr += stride << 1; } /* Copy the appropriate samples into the lowpass channel. */ dstptr = &a[(1 - parity) * stride]; srcptr = &a[(2 - parity) * stride]; n = numrows - m - (!parity); while (n-- > 0) { *dstptr = *srcptr; dstptr += stride; srcptr += stride << 1; } /* Copy the saved samples into the highpass channel. */ dstptr = &a[hstartcol * stride]; srcptr = buf; n = m; while (n-- > 0) { *dstptr = *srcptr; dstptr += stride; ++srcptr; } } /* If the split buffer was allocated on the heap, free this memory. */ if (buf != splitbuf) { jas_free(buf); } }
Class
2
bool copyaudiodata (AFfilehandle infile, AFfilehandle outfile, int trackid) { int frameSize = afGetVirtualFrameSize(infile, trackid, 1); const int kBufferFrameCount = 65536; void *buffer = malloc(kBufferFrameCount * frameSize); AFframecount totalFrames = afGetFrameCount(infile, AF_DEFAULT_TRACK); AFframecount totalFramesWritten = 0; bool success = true; while (totalFramesWritten < totalFrames) { AFframecount framesToRead = totalFrames - totalFramesWritten; if (framesToRead > kBufferFrameCount) framesToRead = kBufferFrameCount; AFframecount framesRead = afReadFrames(infile, trackid, buffer, framesToRead); if (framesRead < framesToRead) { fprintf(stderr, "Bad read of audio track data.\n"); success = false; break; } AFframecount framesWritten = afWriteFrames(outfile, trackid, buffer, framesRead); if (framesWritten < framesRead) { fprintf(stderr, "Bad write of audio track data.\n"); success = false; break; } totalFramesWritten += framesWritten; } free(buffer); return success; }
Base
1
static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; int len; if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { rfcomm_dlc_accept(d); msg->msg_namelen = 0; return 0; } len = bt_sock_stream_recvmsg(iocb, sock, msg, size, flags); lock_sock(sk); if (!(flags & MSG_PEEK) && len > 0) atomic_sub(len, &sk->sk_rmem_alloc); if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2)) rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc); release_sock(sk); return len; }
Class
2