code
stringlengths
12
2.05k
label_name
stringclasses
5 values
label
int64
0
4
void ip4_datagram_release_cb(struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); const struct ip_options_rcu *inet_opt; __be32 daddr = inet->inet_daddr; struct flowi4 fl4; struct rtable *rt; if (! __sk_dst_get(sk) || __sk_dst_check(sk, 0)) return; rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt && inet_opt->opt.srr) daddr = inet_opt->opt.faddr; rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr, inet->inet_saddr, inet->inet_dport, inet->inet_sport, sk->sk_protocol, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); if (!IS_ERR(rt)) __sk_dst_set(sk, &rt->dst); rcu_read_unlock(); }
Variant
0
static int f2fs_read_data_page(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; int ret = -EAGAIN; trace_f2fs_readpage(page, DATA); /* If the file has inline data, try to read it directly */ if (f2fs_has_inline_data(inode)) ret = f2fs_read_inline_data(inode, page); if (ret == -EAGAIN) ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1, false); return ret; }
Base
1
smtp_log_to_file(smtp_t *smtp) { FILE *fp = fopen("/tmp/smtp-alert.log", "a"); time_t now; struct tm tm; char time_buf[25]; int time_buf_len; time(&now); localtime_r(&now, &tm); time_buf_len = strftime(time_buf, sizeof time_buf, "%a %b %e %X %Y", &tm); fprintf(fp, "%s: %s -> %s\n" "%*sSubject: %s\n" "%*sBody: %s\n\n", time_buf, global_data->email_from, smtp->email_to, time_buf_len - 7, "", smtp->subject, time_buf_len - 7, "", smtp->body); fclose(fp); free_smtp_all(smtp); }
Base
1
lldp_mgmt_addr_tlv_print(netdissect_options *ndo, const u_char *pptr, u_int len) { uint8_t mgmt_addr_len, intf_num_subtype, oid_len; const u_char *tptr; u_int tlen; char *mgmt_addr; tlen = len; tptr = pptr; if (tlen < 1) { return 0; } mgmt_addr_len = *tptr++; tlen--; if (tlen < mgmt_addr_len) { return 0; } mgmt_addr = lldp_network_addr_print(ndo, tptr, mgmt_addr_len); if (mgmt_addr == NULL) { return 0; } ND_PRINT((ndo, "\n\t Management Address length %u, %s", mgmt_addr_len, mgmt_addr)); tptr += mgmt_addr_len; tlen -= mgmt_addr_len; if (tlen < LLDP_INTF_NUM_LEN) { return 0; } intf_num_subtype = *tptr; ND_PRINT((ndo, "\n\t %s Interface Numbering (%u): %u", tok2str(lldp_intf_numb_subtype_values, "Unknown", intf_num_subtype), intf_num_subtype, EXTRACT_32BITS(tptr + 1))); tptr += LLDP_INTF_NUM_LEN; tlen -= LLDP_INTF_NUM_LEN; /* * The OID is optional. */ if (tlen) { oid_len = *tptr; if (tlen < oid_len) { return 0; } if (oid_len) { ND_PRINT((ndo, "\n\t OID length %u", oid_len)); safeputs(ndo, tptr + 1, oid_len); } } return 1; }
Base
1
sysUpTime_handler(snmp_varbind_t *varbind, uint32_t *oid) { snmp_api_set_time_ticks(varbind, oid, clock_seconds() * 100); }
Base
1
static void mpeg4_encode_gop_header(MpegEncContext *s) { int hours, minutes, seconds; int64_t time; put_bits(&s->pb, 16, 0); put_bits(&s->pb, 16, GOP_STARTCODE); time = s->current_picture_ptr->f->pts; if (s->reordered_input_picture[1]) time = FFMIN(time, s->reordered_input_picture[1]->f->pts); time = time * s->avctx->time_base.num; s->last_time_base = FFUDIV(time, s->avctx->time_base.den); seconds = FFUDIV(time, s->avctx->time_base.den); minutes = FFUDIV(seconds, 60); seconds = FFUMOD(seconds, 60); hours = FFUDIV(minutes, 60); minutes = FFUMOD(minutes, 60); hours = FFUMOD(hours , 24); put_bits(&s->pb, 5, hours); put_bits(&s->pb, 6, minutes); put_bits(&s->pb, 1, 1); put_bits(&s->pb, 6, seconds); put_bits(&s->pb, 1, !!(s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)); put_bits(&s->pb, 1, 0); // broken link == NO ff_mpeg4_stuffing(&s->pb); }
Class
2
static void ffs_user_copy_worker(struct work_struct *work) { struct ffs_io_data *io_data = container_of(work, struct ffs_io_data, work); int ret = io_data->req->status ? io_data->req->status : io_data->req->actual; if (io_data->read && ret > 0) { use_mm(io_data->mm); ret = copy_to_iter(io_data->buf, ret, &io_data->data); if (iov_iter_count(&io_data->data)) ret = -EFAULT; unuse_mm(io_data->mm); } io_data->kiocb->ki_complete(io_data->kiocb, ret, ret); if (io_data->ffs->ffs_eventfd && !(io_data->kiocb->ki_flags & IOCB_EVENTFD)) eventfd_signal(io_data->ffs->ffs_eventfd, 1); usb_ep_free_request(io_data->ep, io_data->req); io_data->kiocb->private = NULL; if (io_data->read) kfree(io_data->to_free); kfree(io_data->buf); kfree(io_data); }
Variant
0
sysName_handler(snmp_varbind_t *varbind, uint32_t *oid) { snmp_api_set_string(varbind, oid, "Contiki-NG - "CONTIKI_TARGET_STRING); }
Base
1
static void get_icu_value_src_php( char* tag_name, INTERNAL_FUNCTION_PARAMETERS) { const char* loc_name = NULL; int loc_name_len = 0; char* tag_value = NULL; char* empty_result = ""; int result = 0; char* msg = NULL; UErrorCode status = U_ZERO_ERROR; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s", &loc_name ,&loc_name_len ) == FAILURE) { spprintf(&msg , 0, "locale_get_%s : unable to parse input params", tag_name ); intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, msg , 1 TSRMLS_CC ); efree(msg); RETURN_FALSE; } if(loc_name_len == 0) { loc_name = intl_locale_get_default(TSRMLS_C); } /* Call ICU get */ tag_value = get_icu_value_internal( loc_name , tag_name , &result ,0); /* No value found */ if( result == -1 ) { if( tag_value){ efree( tag_value); } RETURN_STRING( empty_result , TRUE); } /* value found */ if( tag_value){ RETURN_STRING( tag_value , FALSE); } /* Error encountered while fetching the value */ if( result ==0) { spprintf(&msg , 0, "locale_get_%s : unable to get locale %s", tag_name , tag_name ); intl_error_set( NULL, status, msg , 1 TSRMLS_CC ); efree(msg); RETURN_NULL(); } }
Base
1
static void dtls1_clear_queues(SSL *s) { pitem *item = NULL; hm_fragment *frag = NULL; DTLS1_RECORD_DATA *rdata; while( (item = pqueue_pop(s->d1->unprocessed_rcds.q)) != NULL) { rdata = (DTLS1_RECORD_DATA *) item->data; if (rdata->rbuf.buf) { OPENSSL_free(rdata->rbuf.buf); } OPENSSL_free(item->data); pitem_free(item); } while( (item = pqueue_pop(s->d1->processed_rcds.q)) != NULL) { rdata = (DTLS1_RECORD_DATA *) item->data; if (rdata->rbuf.buf) { OPENSSL_free(rdata->rbuf.buf); } OPENSSL_free(item->data); pitem_free(item); } while( (item = pqueue_pop(s->d1->buffered_messages)) != NULL) { frag = (hm_fragment *)item->data; OPENSSL_free(frag->fragment); OPENSSL_free(frag); pitem_free(item); } while ( (item = pqueue_pop(s->d1->sent_messages)) != NULL) { frag = (hm_fragment *)item->data; OPENSSL_free(frag->fragment); OPENSSL_free(frag); pitem_free(item); } while ( (item = pqueue_pop(s->d1->buffered_app_data.q)) != NULL) { frag = (hm_fragment *)item->data; OPENSSL_free(frag->fragment); OPENSSL_free(frag); pitem_free(item); } }
Class
2
int main(int argc, char **argv) { test_cmp_parameters inParam; FILE *fbase=NULL, *ftest=NULL; int same = 0; char lbase[256]; char strbase[256]; char ltest[256]; char strtest[256]; if( parse_cmdline_cmp(argc, argv, &inParam) == 1 ) { compare_dump_files_help_display(); goto cleanup; } /* Display Parameters*/ printf("******Parameters********* \n"); printf(" base_filename = %s\n" " test_filename = %s\n", inParam.base_filename, inParam.test_filename); printf("************************* \n"); /* open base file */ printf("Try to open: %s for reading ... ", inParam.base_filename); if((fbase = fopen(inParam.base_filename, "rb"))==NULL) { goto cleanup; } printf("Ok.\n"); /* open test file */ printf("Try to open: %s for reading ... ", inParam.test_filename); if((ftest = fopen(inParam.test_filename, "rb"))==NULL) { goto cleanup; } printf("Ok.\n"); while (fgets(lbase, sizeof(lbase), fbase) && fgets(ltest,sizeof(ltest),ftest)) { int nbase = sscanf(lbase, "%255[^\r\n]", strbase); int ntest = sscanf(ltest, "%255[^\r\n]", strtest); assert( nbase != 255 && ntest != 255 ); if( nbase != 1 || ntest != 1 ) { fprintf(stderr, "could not parse line from files\n" ); goto cleanup; } if( strcmp( strbase, strtest ) != 0 ) { fprintf(stderr,"<%s> vs. <%s>\n", strbase, strtest); goto cleanup; } } same = 1; printf("\n***** TEST SUCCEED: Files are the same. *****\n"); cleanup: /*Close File*/ if(fbase) fclose(fbase); if(ftest) fclose(ftest); /* Free memory*/ free(inParam.base_filename); free(inParam.test_filename); return same ? EXIT_SUCCESS : EXIT_FAILURE; }
Class
2
zend_op_array *compile_string(zval *source_string, char *filename TSRMLS_DC) { zend_lex_state original_lex_state; zend_op_array *op_array = (zend_op_array *) emalloc(sizeof(zend_op_array)); zend_op_array *original_active_op_array = CG(active_op_array); zend_op_array *retval; zval tmp; int compiler_result; zend_bool original_in_compilation = CG(in_compilation); if (source_string->value.str.len==0) { efree(op_array); return NULL; } CG(in_compilation) = 1; tmp = *source_string; zval_copy_ctor(&tmp); convert_to_string(&tmp); source_string = &tmp; zend_save_lexical_state(&original_lex_state TSRMLS_CC); if (zend_prepare_string_for_scanning(source_string, filename TSRMLS_CC)==FAILURE) { efree(op_array); retval = NULL; } else { zend_bool orig_interactive = CG(interactive); CG(interactive) = 0; init_op_array(op_array, ZEND_EVAL_CODE, INITIAL_OP_ARRAY_SIZE TSRMLS_CC); CG(interactive) = orig_interactive; CG(active_op_array) = op_array; zend_stack_push(&CG(context_stack), (void *) &CG(context), sizeof(CG(context))); zend_init_compiler_context(TSRMLS_C); BEGIN(ST_IN_SCRIPTING); compiler_result = zendparse(TSRMLS_C); if (SCNG(script_filtered)) { efree(SCNG(script_filtered)); SCNG(script_filtered) = NULL; } if (compiler_result==1) { CG(active_op_array) = original_active_op_array; CG(unclean_shutdown)=1; destroy_op_array(op_array TSRMLS_CC); efree(op_array); retval = NULL; } else { zend_do_return(NULL, 0 TSRMLS_CC); CG(active_op_array) = original_active_op_array; pass_two(op_array TSRMLS_CC); zend_release_labels(0 TSRMLS_CC); retval = op_array; } } zend_restore_lexical_state(&original_lex_state TSRMLS_CC); zval_dtor(&tmp); CG(in_compilation) = original_in_compilation; return retval; }
Class
2
ins_compl_add_infercase( char_u *str_arg, int len, int icase, char_u *fname, int dir, int cont_s_ipos) // next ^X<> will set initial_pos { char_u *str = str_arg; char_u *p; int actual_len; // Take multi-byte characters int actual_compl_length; // into account. int min_len; int flags = 0; if (p_ic && curbuf->b_p_inf && len > 0) { // Infer case of completed part. // Find actual length of completion. if (has_mbyte) { p = str; actual_len = 0; while (*p != NUL) { MB_PTR_ADV(p); ++actual_len; } } else actual_len = len; // Find actual length of original text. if (has_mbyte) { p = compl_orig_text; actual_compl_length = 0; while (*p != NUL) { MB_PTR_ADV(p); ++actual_compl_length; } } else actual_compl_length = compl_length; // "actual_len" may be smaller than "actual_compl_length" when using // thesaurus, only use the minimum when comparing. min_len = actual_len < actual_compl_length ? actual_len : actual_compl_length; str = ins_compl_infercase_gettext(str, actual_len, actual_compl_length, min_len); } if (cont_s_ipos) flags |= CP_CONT_S_IPOS; if (icase) flags |= CP_ICASE; return ins_compl_add(str, len, fname, NULL, NULL, dir, flags, FALSE); }
Variant
0
R_API RSocket *r_socket_accept_timeout(RSocket *s, unsigned int timeout) { fd_set read_fds; fd_set except_fds; FD_ZERO (&read_fds); FD_SET (s->fd, &read_fds); FD_ZERO (&except_fds); FD_SET (s->fd, &except_fds); struct timeval t; t.tv_sec = timeout; t.tv_usec = 0; int r = select (s->fd + 1, &read_fds, NULL, &except_fds, &t); if(r < 0) { perror ("select"); } else if (r > 0 && FD_ISSET (s->fd, &read_fds)) { return r_socket_accept (s); } return NULL; }
Base
1
static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining, u64 expires) { struct cfs_rq *cfs_rq; u64 runtime; u64 starting_runtime = remaining; rcu_read_lock(); list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, throttled_list) { struct rq *rq = rq_of(cfs_rq); struct rq_flags rf; rq_lock_irqsave(rq, &rf); if (!cfs_rq_throttled(cfs_rq)) goto next; runtime = -cfs_rq->runtime_remaining + 1; if (runtime > remaining) runtime = remaining; remaining -= runtime; cfs_rq->runtime_remaining += runtime; cfs_rq->runtime_expires = expires; /* we check whether we're throttled above */ if (cfs_rq->runtime_remaining > 0) unthrottle_cfs_rq(cfs_rq); next: rq_unlock_irqrestore(rq, &rf); if (!remaining) break; } rcu_read_unlock(); return starting_runtime - remaining; }
Class
2
cJSON *cJSON_CreateBool( int b ) { cJSON *item = cJSON_New_Item(); if ( item ) item->type = b ? cJSON_True : cJSON_False; return item; }
Base
1
create_vterm(term_T *term, int rows, int cols) { VTerm *vterm; VTermScreen *screen; VTermState *state; VTermValue value; vterm = vterm_new_with_allocator(rows, cols, &vterm_allocator, NULL); term->tl_vterm = vterm; screen = vterm_obtain_screen(vterm); vterm_screen_set_callbacks(screen, &screen_callbacks, term); /* TODO: depends on 'encoding'. */ vterm_set_utf8(vterm, 1); init_default_colors(term); vterm_state_set_default_colors( vterm_obtain_state(vterm), &term->tl_default_color.fg, &term->tl_default_color.bg); if (t_colors >= 16) vterm_state_set_bold_highbright(vterm_obtain_state(vterm), 1); /* Required to initialize most things. */ vterm_screen_reset(screen, 1 /* hard */); /* Allow using alternate screen. */ vterm_screen_enable_altscreen(screen, 1); /* For unix do not use a blinking cursor. In an xterm this causes the * cursor to blink if it's blinking in the xterm. * For Windows we respect the system wide setting. */ #ifdef WIN3264 if (GetCaretBlinkTime() == INFINITE) value.boolean = 0; else value.boolean = 1; #else value.boolean = 0; #endif state = vterm_obtain_state(vterm); vterm_state_set_termprop(state, VTERM_PROP_CURSORBLINK, &value); vterm_state_set_unrecognised_fallbacks(state, &parser_fallbacks, term); }
Base
1
void Curl_detach_connnection(struct Curl_easy *data) { struct connectdata *conn = data->conn; if(conn) Curl_llist_remove(&conn->easyq, &data->conn_queue, NULL); data->conn = NULL; }
Variant
0
static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev, struct ib_udata *udata) { int ret = 0; struct hns_roce_ucontext *context; struct hns_roce_ib_alloc_ucontext_resp resp; struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); resp.qp_tab_size = hr_dev->caps.num_qps; context = kmalloc(sizeof(*context), GFP_KERNEL); if (!context) return ERR_PTR(-ENOMEM); ret = hns_roce_uar_alloc(hr_dev, &context->uar); if (ret) goto error_fail_uar_alloc; if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) { INIT_LIST_HEAD(&context->page_list); mutex_init(&context->page_mutex); } ret = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (ret) goto error_fail_copy_to_udata; return &context->ibucontext; error_fail_copy_to_udata: hns_roce_uar_free(hr_dev, &context->uar); error_fail_uar_alloc: kfree(context); return ERR_PTR(ret); }
Class
2
static int on_header_value( multipart_parser *parser, const char *at, size_t length) { multipart_parser_data_t *data = NULL; ogs_assert(parser); data = multipart_parser_get_data(parser); ogs_assert(data); if (at && length) { SWITCH(data->header_field) CASE(OGS_SBI_CONTENT_TYPE) if (data->part[data->num_of_part].content_type) ogs_free(data->part[data->num_of_part].content_type); data->part[data->num_of_part].content_type = ogs_strndup(at, length); ogs_assert(data->part[data->num_of_part].content_type); break; CASE(OGS_SBI_CONTENT_ID) if (data->part[data->num_of_part].content_id) ogs_free(data->part[data->num_of_part].content_id); data->part[data->num_of_part].content_id = ogs_strndup(at, length); ogs_assert(data->part[data->num_of_part].content_id); break; DEFAULT ogs_error("Unknown header field [%s]", data->header_field); END } return 0; }
Base
1
static void unix_copy_addr(struct msghdr *msg, struct sock *sk) { struct unix_sock *u = unix_sk(sk); msg->msg_namelen = 0; if (u->addr) { msg->msg_namelen = u->addr->len; memcpy(msg->msg_name, u->addr->name, u->addr->len); } }
Class
2
static int amd_gpio_remove(struct platform_device *pdev) { struct amd_gpio *gpio_dev; gpio_dev = platform_get_drvdata(pdev); gpiochip_remove(&gpio_dev->gc); pinctrl_unregister(gpio_dev->pctrl); return 0; }
Variant
0
mm_sshpam_init_ctx(Authctxt *authctxt) { Buffer m; int success; debug3("%s", __func__); buffer_init(&m); buffer_put_cstring(&m, authctxt->user); mm_request_send(pmonitor->m_recvfd, MONITOR_REQ_PAM_INIT_CTX, &m); debug3("%s: waiting for MONITOR_ANS_PAM_INIT_CTX", __func__); mm_request_receive_expect(pmonitor->m_recvfd, MONITOR_ANS_PAM_INIT_CTX, &m); success = buffer_get_int(&m); if (success == 0) { debug3("%s: pam_init_ctx failed", __func__); buffer_free(&m); return (NULL); } buffer_free(&m); return (authctxt); }
Class
2
static const char *parse_value( cJSON *item, const char *value ) { if ( ! value ) return 0; /* Fail on null. */ if ( ! strncmp( value, "null", 4 ) ) { item->type = cJSON_NULL; return value + 4; } if ( ! strncmp( value, "false", 5 ) ) { item->type = cJSON_False; return value + 5; } if ( ! strncmp( value, "true", 4 ) ) { item->type = cJSON_True; item->valueint = 1; return value + 4; } if ( *value == '\"' ) return parse_string( item, value ); if ( *value == '-' || ( *value >= '0' && *value <= '9' ) ) return parse_number( item, value ); if ( *value == '[' ) return parse_array( item, value ); if ( *value == '{' ) return parse_object( item, value ); /* Fail. */ ep = value; return 0; }
Base
1
static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; u16 dcid, flags; u8 rsp[64]; struct sock *sk; int len; dcid = __le16_to_cpu(req->dcid); flags = __le16_to_cpu(req->flags); BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags); sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid); if (!sk) return -ENOENT; if (sk->sk_state == BT_DISCONN) goto unlock; /* Reject if config buffer is too small. */ len = cmd_len - sizeof(*req); if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) { l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(sk, rsp, L2CAP_CONF_REJECT, flags), rsp); goto unlock; } /* Store config. */ memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len); l2cap_pi(sk)->conf_len += len; if (flags & 0x0001) { /* Incomplete config. Send empty response. */ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(sk, rsp, L2CAP_CONF_SUCCESS, 0x0001), rsp); goto unlock; } /* Complete config. */ len = l2cap_parse_conf_req(sk, rsp); if (len < 0) goto unlock; l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); /* Reset config buffer. */ l2cap_pi(sk)->conf_len = 0; if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE)) goto unlock; if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) { sk->sk_state = BT_CONNECTED; l2cap_chan_ready(sk); goto unlock; } if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) { u8 buf[64]; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(sk, buf), buf); } unlock: bh_unlock_sock(sk); return 0; }
Base
1
static int oidc_cache_crypto_encrypt(request_rec *r, const char *plaintext, unsigned char *key, char **result) { char *encoded = NULL, *p = NULL, *e_tag = NULL; unsigned char *ciphertext = NULL; int plaintext_len, ciphertext_len, encoded_len, e_tag_len; unsigned char tag[OIDC_CACHE_TAG_LEN]; /* allocate space for the ciphertext */ plaintext_len = strlen(plaintext) + 1; ciphertext = apr_pcalloc(r->pool, (plaintext_len + EVP_CIPHER_block_size(OIDC_CACHE_CIPHER))); ciphertext_len = oidc_cache_crypto_encrypt_impl(r, (unsigned char *) plaintext, plaintext_len, OIDC_CACHE_CRYPTO_GCM_AAD, sizeof(OIDC_CACHE_CRYPTO_GCM_AAD), key, OIDC_CACHE_CRYPTO_GCM_IV, sizeof(OIDC_CACHE_CRYPTO_GCM_IV), ciphertext, tag, sizeof(tag)); /* base64url encode the resulting ciphertext */ encoded_len = oidc_base64url_encode(r, &encoded, (const char *) ciphertext, ciphertext_len, 1); if (encoded_len > 0) { p = encoded; /* base64url encode the tag */ e_tag_len = oidc_base64url_encode(r, &e_tag, (const char *) tag, OIDC_CACHE_TAG_LEN, 1); /* now allocated space for the concatenated base64url encoded ciphertext and tag */ encoded = apr_pcalloc(r->pool, encoded_len + 1 + e_tag_len + 1); memcpy(encoded, p, encoded_len); p = encoded + encoded_len; *p = OIDC_CHAR_DOT; p++; /* append the tag in the buffer */ memcpy(p, e_tag, e_tag_len); encoded_len += e_tag_len + 1; /* make sure the result is \0 terminated */ encoded[encoded_len] = '\0'; *result = encoded; } return encoded_len; }
Class
2
diff_buf_delete(buf_T *buf) { int i; tabpage_T *tp; FOR_ALL_TABPAGES(tp) { i = diff_buf_idx_tp(buf, tp); if (i != DB_COUNT) { tp->tp_diffbuf[i] = NULL; tp->tp_diff_invalid = TRUE; if (tp == curtab) diff_redraw(TRUE); } } }
Base
1
static int msg_cache_check(const char *id, struct BodyCache *bcache, void *data) { struct Context *ctx = (struct Context *) data; if (!ctx) return -1; struct PopData *pop_data = (struct PopData *) ctx->data; if (!pop_data) return -1; #ifdef USE_HCACHE /* keep hcache file if hcache == bcache */ if (strcmp(HC_FNAME "." HC_FEXT, id) == 0) return 0; #endif for (int i = 0; i < ctx->msgcount; i++) { /* if the id we get is known for a header: done (i.e. keep in cache) */ if (ctx->hdrs[i]->data && (mutt_str_strcmp(ctx->hdrs[i]->data, id) == 0)) return 0; } /* message not found in context -> remove it from cache * return the result of bcache, so we stop upon its first error */ return mutt_bcache_del(bcache, id); }
Class
2
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, int len, bool more) { struct page *page = virt_to_head_page(data); int offset = data - page_address(page); struct sk_buff *skb = q->rx_head; offset += q->buf_offset; skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len, q->buf_size); if (more) return; q->rx_head = NULL; dev->drv->rx_skb(dev, q - dev->q_rx, skb); }
Base
1
writefile(const char *name, struct string *s) { FILE *f; int ret; f = fopen(name, "w"); if (!f) { warn("open %s:", name); return -1; } ret = 0; if (fwrite(s->s, 1, s->n, f) != s->n || fflush(f) != 0) { warn("write %s:", name); ret = -1; } fclose(f); return ret; }
Base
1
static int __get_data_block(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create, int flag, pgoff_t *next_pgofs) { struct f2fs_map_blocks map; int err; map.m_lblk = iblock; map.m_len = bh->b_size >> inode->i_blkbits; map.m_next_pgofs = next_pgofs; err = f2fs_map_blocks(inode, &map, create, flag); if (!err) { map_bh(bh, inode->i_sb, map.m_pblk); bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags; bh->b_size = map.m_len << inode->i_blkbits; } return err; }
Base
1
static void copyIPv6IfDifferent(void * dest, const void * src) { if(dest != src) { memcpy(dest, src, sizeof(struct in6_addr)); } }
Base
1
process_bitmap_updates(STREAM s) { uint16 num_updates; uint16 left, top, right, bottom, width, height; uint16 cx, cy, bpp, Bpp, compress, bufsize, size; uint8 *data, *bmpdata; int i; logger(Protocol, Debug, "%s()", __func__); in_uint16_le(s, num_updates); for (i = 0; i < num_updates; i++) { in_uint16_le(s, left); in_uint16_le(s, top); in_uint16_le(s, right); in_uint16_le(s, bottom); in_uint16_le(s, width); in_uint16_le(s, height); in_uint16_le(s, bpp); Bpp = (bpp + 7) / 8; in_uint16_le(s, compress); in_uint16_le(s, bufsize); cx = right - left + 1; cy = bottom - top + 1; logger(Graphics, Debug, "process_bitmap_updates(), [%d,%d,%d,%d], [%d,%d], bpp=%d, compression=%d", left, top, right, bottom, width, height, Bpp, compress); if (!compress) { int y; bmpdata = (uint8 *) xmalloc(width * height * Bpp); for (y = 0; y < height; y++) { in_uint8a(s, &bmpdata[(height - y - 1) * (width * Bpp)], width * Bpp); } ui_paint_bitmap(left, top, cx, cy, width, height, bmpdata); xfree(bmpdata); continue; } if (compress & 0x400) { size = bufsize; } else { in_uint8s(s, 2); /* pad */ in_uint16_le(s, size); in_uint8s(s, 4); /* line_size, final_size */ } in_uint8p(s, data, size); bmpdata = (uint8 *) xmalloc(width * height * Bpp); if (bitmap_decompress(bmpdata, width, height, data, size, Bpp)) { ui_paint_bitmap(left, top, cx, cy, width, height, bmpdata); } else { logger(Graphics, Warning, "process_bitmap_updates(), failed to decompress bitmap"); } xfree(bmpdata); } }
Base
1
cmdline_insert_reg(int *gotesc UNUSED) { int i; int c; #ifdef USE_ON_FLY_SCROLL dont_scroll = TRUE; // disallow scrolling here #endif putcmdline('"', TRUE); ++no_mapping; ++allow_keys; i = c = plain_vgetc(); // CTRL-R <char> if (i == Ctrl_O) i = Ctrl_R; // CTRL-R CTRL-O == CTRL-R CTRL-R if (i == Ctrl_R) c = plain_vgetc(); // CTRL-R CTRL-R <char> extra_char = NUL; --no_mapping; --allow_keys; #ifdef FEAT_EVAL /* * Insert the result of an expression. * Need to save the current command line, to be able to enter * a new one... */ new_cmdpos = -1; if (c == '=') { if (ccline.cmdfirstc == '=' // can't do this recursively || cmdline_star > 0) // or when typing a password { beep_flush(); c = ESC; } else c = get_expr_register(); } #endif if (c != ESC) // use ESC to cancel inserting register { cmdline_paste(c, i == Ctrl_R, FALSE); #ifdef FEAT_EVAL // When there was a serious error abort getting the // command line. if (aborting()) { *gotesc = TRUE; // will free ccline.cmdbuff after // putting it in history return GOTO_NORMAL_MODE; } #endif KeyTyped = FALSE; // Don't do p_wc completion. #ifdef FEAT_EVAL if (new_cmdpos >= 0) { // set_cmdline_pos() was used if (new_cmdpos > ccline.cmdlen) ccline.cmdpos = ccline.cmdlen; else ccline.cmdpos = new_cmdpos; } #endif } // remove the double quote redrawcmd(); // The text has been stuffed, the command line didn't change yet. return CMDLINE_NOT_CHANGED; }
Variant
0
static struct desc_struct *get_desc(unsigned short sel) { struct desc_ptr gdt_desc = {0, 0}; unsigned long desc_base; #ifdef CONFIG_MODIFY_LDT_SYSCALL if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) { struct desc_struct *desc = NULL; struct ldt_struct *ldt; /* Bits [15:3] contain the index of the desired entry. */ sel >>= 3; mutex_lock(&current->active_mm->context.lock); ldt = current->active_mm->context.ldt; if (ldt && sel < ldt->nr_entries) desc = &ldt->entries[sel]; mutex_unlock(&current->active_mm->context.lock); return desc; } #endif native_store_gdt(&gdt_desc); /* * Segment descriptors have a size of 8 bytes. Thus, the index is * multiplied by 8 to obtain the memory offset of the desired descriptor * from the base of the GDT. As bits [15:3] of the segment selector * contain the index, it can be regarded as multiplied by 8 already. * All that remains is to clear bits [2:0]. */ desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK); if (desc_base > gdt_desc.size) return NULL; return (struct desc_struct *)(gdt_desc.address + desc_base); }
Variant
0
check_lnums(int do_curwin) { win_T *wp; tabpage_T *tp; FOR_ALL_TAB_WINDOWS(tp, wp) if ((do_curwin || wp != curwin) && wp->w_buffer == curbuf) { // save the original cursor position and topline wp->w_save_cursor.w_cursor_save = wp->w_cursor; wp->w_save_cursor.w_topline_save = wp->w_topline; if (wp->w_cursor.lnum > curbuf->b_ml.ml_line_count) wp->w_cursor.lnum = curbuf->b_ml.ml_line_count; if (wp->w_topline > curbuf->b_ml.ml_line_count) wp->w_topline = curbuf->b_ml.ml_line_count; // save the corrected cursor position and topline wp->w_save_cursor.w_cursor_corr = wp->w_cursor; wp->w_save_cursor.w_topline_corr = wp->w_topline; } }
Variant
0
void sctp_generate_proto_unreach_event(unsigned long data) { struct sctp_transport *transport = (struct sctp_transport *) data; struct sctp_association *asoc = transport->asoc; struct net *net = sock_net(asoc->base.sk); bh_lock_sock(asoc->base.sk); if (sock_owned_by_user(asoc->base.sk)) { pr_debug("%s: sock is busy\n", __func__); /* Try again later. */ if (!mod_timer(&transport->proto_unreach_timer, jiffies + (HZ/20))) sctp_association_hold(asoc); goto out_unlock; } /* Is this structure just waiting around for us to actually * get destroyed? */ if (asoc->base.dead) goto out_unlock; sctp_do_sm(net, SCTP_EVENT_T_OTHER, SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); out_unlock: bh_unlock_sock(asoc->base.sk); sctp_association_put(asoc); }
Class
2
SPL_METHOD(SplDoublyLinkedList, offsetSet) { zval *zindex, *value; spl_dllist_object *intern; if (zend_parse_parameters(ZEND_NUM_ARGS(), "zz", &zindex, &value) == FAILURE) { return; } intern = Z_SPLDLLIST_P(getThis()); if (Z_TYPE_P(zindex) == IS_NULL) { /* $obj[] = ... */ spl_ptr_llist_push(intern->llist, value); } else { /* $obj[$foo] = ... */ zend_long index; spl_ptr_llist_element *element; index = spl_offset_convert_to_long(zindex); if (index < 0 || index >= intern->llist->count) { zval_ptr_dtor(value); zend_throw_exception(spl_ce_OutOfRangeException, "Offset invalid or out of range", 0); return; } element = spl_ptr_llist_offset(intern->llist, index, intern->flags & SPL_DLLIST_IT_LIFO); if (element != NULL) { /* call dtor on the old element as in spl_ptr_llist_pop */ if (intern->llist->dtor) { intern->llist->dtor(element); } /* the element is replaced, delref the old one as in * SplDoublyLinkedList::pop() */ zval_ptr_dtor(&element->data); ZVAL_COPY_VALUE(&element->data, value); /* new element, call ctor as in spl_ptr_llist_push */ if (intern->llist->ctor) { intern->llist->ctor(element); } } else { zval_ptr_dtor(value); zend_throw_exception(spl_ce_OutOfRangeException, "Offset invalid", 0); return; } } } /* }}} */
Variant
0
static void smp_task_timedout(struct timer_list *t) { struct sas_task_slow *slow = from_timer(slow, t, timer); struct sas_task *task = slow->task; unsigned long flags; spin_lock_irqsave(&task->task_state_lock, flags); if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) task->task_state_flags |= SAS_TASK_STATE_ABORTED; spin_unlock_irqrestore(&task->task_state_lock, flags); complete(&task->slow_task->completion); }
Class
2
header_put_le_8byte (SF_PRIVATE *psf, sf_count_t x) { if (psf->headindex < SIGNED_SIZEOF (psf->header) - 8) { psf->header [psf->headindex++] = x ; psf->header [psf->headindex++] = (x >> 8) ; psf->header [psf->headindex++] = (x >> 16) ; psf->header [psf->headindex++] = (x >> 24) ; psf->header [psf->headindex++] = 0 ; psf->header [psf->headindex++] = 0 ; psf->header [psf->headindex++] = 0 ; psf->header [psf->headindex++] = 0 ; } ; } /* header_put_le_8byte */
Class
2
static int async_polkit_callback(sd_bus_message *reply, void *userdata, sd_bus_error *error) { _cleanup_(sd_bus_error_free) sd_bus_error error_buffer = SD_BUS_ERROR_NULL; AsyncPolkitQuery *q = userdata; int r; assert(reply); assert(q); assert(q->slot); q->slot = sd_bus_slot_unref(q->slot); assert(!q->reply); q->reply = sd_bus_message_ref(reply); /* Now, let's dispatch the original message a second time be re-enqueing. This will then traverse the * whole message processing again, and thus re-validating and re-retrieving the "userdata" field * again. * * We install an idle event loop event to clean-up the PolicyKit request data when we are idle again, * i.e. after the second time the message is processed is complete. */ assert(!q->defer_event_source); r = sd_event_add_defer(sd_bus_get_event(sd_bus_message_get_bus(reply)), &q->defer_event_source, async_polkit_defer, q); if (r < 0) goto fail; r = sd_event_source_set_priority(q->defer_event_source, SD_EVENT_PRIORITY_IDLE); if (r < 0) goto fail; r = sd_event_source_set_enabled(q->defer_event_source, SD_EVENT_ONESHOT); if (r < 0) goto fail; r = sd_bus_message_rewind(q->request, true); if (r < 0) goto fail; r = sd_bus_enqeue_for_read(sd_bus_message_get_bus(q->request), q->request); if (r < 0) goto fail; return 1; fail: log_debug_errno(r, "Processing asynchronous PolicyKit reply failed, ignoring: %m"); (void) sd_bus_reply_method_errno(q->request, r, NULL); async_polkit_query_free(q); return r; }
Variant
0
static void scalar32_min_max_or(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { bool src_known = tnum_subreg_is_const(src_reg->var_off); bool dst_known = tnum_subreg_is_const(dst_reg->var_off); struct tnum var32_off = tnum_subreg(dst_reg->var_off); s32 smin_val = src_reg->smin_value; u32 umin_val = src_reg->umin_value; /* Assuming scalar64_min_max_or will be called so it is safe * to skip updating register for known case. */ if (src_known && dst_known) return; /* We get our maximum from the var_off, and our minimum is the * maximum of the operands' minima */ dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val); dst_reg->u32_max_value = var32_off.value | var32_off.mask; if (dst_reg->s32_min_value < 0 || smin_val < 0) { /* Lose signed bounds when ORing negative numbers, * ain't nobody got time for that. */ dst_reg->s32_min_value = S32_MIN; dst_reg->s32_max_value = S32_MAX; } else { /* ORing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->s32_min_value = dst_reg->umin_value; dst_reg->s32_max_value = dst_reg->umax_value; } }
Base
1
Ta3Grammar_FindDFA(grammar *g, int type) { dfa *d; #if 1 /* Massive speed-up */ d = &g->g_dfa[type - NT_OFFSET]; assert(d->d_type == type); return d; #else /* Old, slow version */ int i; for (i = g->g_ndfas, d = g->g_dfa; --i >= 0; d++) { if (d->d_type == type) return d; } assert(0); /* NOTREACHED */ #endif }
Base
1
ber_parse_header(STREAM s, int tagval, int *length) { int tag, len; if (tagval > 0xff) { in_uint16_be(s, tag); } else { in_uint8(s, tag); } if (tag != tagval) { logger(Core, Error, "ber_parse_header(), expected tag %d, got %d", tagval, tag); return False; } in_uint8(s, len); if (len & 0x80) { len &= ~0x80; *length = 0; while (len--) next_be(s, *length); } else *length = len; return s_check(s); }
Base
1
static UINT32 nsc_rle_encode(BYTE* in, BYTE* out, UINT32 originalSize) { UINT32 left; UINT32 runlength = 1; UINT32 planeSize = 0; left = originalSize; /** * We quit the loop if the running compressed size is larger than the original. * In such cases data will be sent uncompressed. */ while (left > 4 && planeSize < originalSize - 4) { if (left > 5 && *in == *(in + 1)) { runlength++; } else if (runlength == 1) { *out++ = *in; planeSize++; } else if (runlength < 256) { *out++ = *in; *out++ = *in; *out++ = runlength - 2; runlength = 1; planeSize += 3; } else { *out++ = *in; *out++ = *in; *out++ = 0xFF; *out++ = (runlength & 0x000000FF); *out++ = (runlength & 0x0000FF00) >> 8; *out++ = (runlength & 0x00FF0000) >> 16; *out++ = (runlength & 0xFF000000) >> 24; runlength = 1; planeSize += 7; } in++; left--; } if (planeSize < originalSize - 4) CopyMemory(out, in, 4); planeSize += 4; return planeSize; }
Base
1
static int read_public_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } bufsize = file->size; sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read public key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_public_key(p, keysize, rsa); }
Class
2
static void TreeTest(Jsi_Interp* interp) { Jsi_Tree *st, *wt, *mt; Jsi_TreeEntry *hPtr, *hPtr2; bool isNew, i; Jsi_TreeSearch srch; struct tdata { int n; int m; } t1, t2; char nbuf[100]; wt = Jsi_TreeNew(interp, JSI_KEYS_ONEWORD, NULL); mt = Jsi_TreeNew(interp, sizeof(struct tdata), NULL); Jsi_TreeSet(wt, wt,(void*)0x88); Jsi_TreeSet(wt, mt,(void*)0x99); printf("WT: %p\n", Jsi_TreeGet(wt, mt)); printf("WT2: %p\n", Jsi_TreeGet(wt, wt)); Jsi_TreeDelete(wt); t1.n = 0; t1.m = 1; t2.n = 1; t2.m = 2; Jsi_TreeSet(mt, &t1,(void*)0x88); Jsi_TreeSet(mt, &t2,(void*)0x99); Jsi_TreeSet(mt, &t2,(void*)0x98); printf("CT: %p\n", Jsi_TreeGet(mt, &t1)); printf("CT2: %p\n", Jsi_TreeGet(mt, &t2)); Jsi_TreeDelete(mt); st = Jsi_TreeNew(interp, JSI_KEYS_STRING, NULL); hPtr = Jsi_TreeEntryNew(st, "bob", &isNew); Jsi_TreeValueSet(hPtr, (void*)99); Jsi_TreeSet(st, "zoe",(void*)77); hPtr2 = Jsi_TreeSet(st, "ted",(void*)55); Jsi_TreeSet(st, "philip",(void*)66); Jsi_TreeSet(st, "alice",(void*)77); puts("SRCH"); for (hPtr=Jsi_TreeSearchFirst(st,&srch, JSI_TREE_ORDER_IN, NULL); hPtr; hPtr=Jsi_TreeSearchNext(&srch)) mycall(st, hPtr, NULL); Jsi_TreeSearchDone(&srch); puts("IN"); Jsi_TreeWalk(st, mycall, NULL, JSI_TREE_ORDER_IN); puts("PRE"); Jsi_TreeWalk(st, mycall, NULL, JSI_TREE_ORDER_PRE); puts("POST"); Jsi_TreeWalk(st, mycall, NULL, JSI_TREE_ORDER_POST); puts("LEVEL"); Jsi_TreeWalk(st, mycall, NULL, JSI_TREE_ORDER_LEVEL); Jsi_TreeEntryDelete(hPtr2); puts("INDEL"); Jsi_TreeWalk(st, mycall, NULL, 0); for (i=0; i<1000; i++) { snprintf(nbuf, sizeof(nbuf), "name%d", i); Jsi_TreeSet(st, nbuf,(void*)i); } Jsi_TreeWalk(st, mycall, NULL, 0); for (i=0; i<1000; i++) { Jsi_TreeEntryDelete(st->root); } puts("OK"); Jsi_TreeWalk(st, mycall, NULL, 0); Jsi_TreeDelete(st); }
Base
1
tabstop_set(char_u *var, int **array) { int valcount = 1; int t; char_u *cp; if (var[0] == NUL || (var[0] == '0' && var[1] == NUL)) { *array = NULL; return TRUE; } for (cp = var; *cp != NUL; ++cp) { if (cp == var || cp[-1] == ',') { char_u *end; if (strtol((char *)cp, (char **)&end, 10) <= 0) { if (cp != end) emsg(_(e_positive)); else emsg(_(e_invarg)); return FALSE; } } if (VIM_ISDIGIT(*cp)) continue; if (cp[0] == ',' && cp > var && cp[-1] != ',' && cp[1] != NUL) { ++valcount; continue; } emsg(_(e_invarg)); return FALSE; } *array = ALLOC_MULT(int, valcount + 1); if (*array == NULL) return FALSE; (*array)[0] = valcount; t = 1; for (cp = var; *cp != NUL;) { (*array)[t++] = atoi((char *)cp); while (*cp != NUL && *cp != ',') ++cp; if (*cp != NUL) ++cp; } return TRUE; }
Variant
0
static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_comp rcomp; strlcpy(rcomp.type, "compression", sizeof(rcomp.type)); if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(struct crypto_report_comp), &rcomp)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; }
Class
2
static int ndp_sock_recv(struct ndp *ndp) { struct ndp_msg *msg; enum ndp_msg_type msg_type; size_t len; int err; msg = ndp_msg_alloc(); if (!msg) return -ENOMEM; len = ndp_msg_payload_maxlen(msg); err = myrecvfrom6(ndp->sock, msg->buf, &len, 0, &msg->addrto, &msg->ifindex); if (err) { err(ndp, "Failed to receive message"); goto free_msg; } dbg(ndp, "rcvd from: %s, ifindex: %u", str_in6_addr(&msg->addrto), msg->ifindex); if (len < sizeof(*msg->icmp6_hdr)) { warn(ndp, "rcvd icmp6 packet too short (%luB)", len); err = 0; goto free_msg; } err = ndp_msg_type_by_raw_type(&msg_type, msg->icmp6_hdr->icmp6_type); if (err) { err = 0; goto free_msg; } ndp_msg_init(msg, msg_type); ndp_msg_payload_len_set(msg, len); if (!ndp_msg_check_valid(msg)) { warn(ndp, "rcvd invalid ND message"); err = 0; goto free_msg; } dbg(ndp, "rcvd %s, len: %zuB", ndp_msg_type_info(msg_type)->strabbr, len); if (!ndp_msg_check_opts(msg)) { err = 0; goto free_msg; } err = ndp_call_handlers(ndp, msg);; free_msg: ndp_msg_destroy(msg); return err; }
Pillar
3
int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, struct sockaddr_storage *kern_address, int mode) { int tot_len; if (kern_msg->msg_namelen) { if (mode == VERIFY_READ) { int err = move_addr_to_kernel(kern_msg->msg_name, kern_msg->msg_namelen, kern_address); if (err < 0) return err; } kern_msg->msg_name = kern_address; } else kern_msg->msg_name = NULL; tot_len = iov_from_user_compat_to_kern(kern_iov, (struct compat_iovec __user *)kern_msg->msg_iov, kern_msg->msg_iovlen); if (tot_len >= 0) kern_msg->msg_iov = kern_iov; return tot_len; }
Class
2
mcs_recv_connect_response(STREAM mcs_data) { UNUSED(mcs_data); uint8 result; int length; STREAM s; RD_BOOL is_fastpath; uint8 fastpath_hdr; logger(Protocol, Debug, "%s()", __func__); s = iso_recv(&is_fastpath, &fastpath_hdr); if (s == NULL) return False; ber_parse_header(s, MCS_CONNECT_RESPONSE, &length); ber_parse_header(s, BER_TAG_RESULT, &length); in_uint8(s, result); if (result != 0) { logger(Protocol, Error, "mcs_recv_connect_response(), result=%d", result); return False; } ber_parse_header(s, BER_TAG_INTEGER, &length); in_uint8s(s, length); /* connect id */ mcs_parse_domain_params(s); ber_parse_header(s, BER_TAG_OCTET_STRING, &length); sec_process_mcs_data(s); /* if (length > mcs_data->size) { logger(Protocol, Error, "mcs_recv_connect_response(), expected length=%d, got %d",length, mcs_data->size); length = mcs_data->size; } in_uint8a(s, mcs_data->data, length); mcs_data->p = mcs_data->data; mcs_data->end = mcs_data->data + length; */ return s_check_end(s); }
Base
1
static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn, struct nlattr *rp) { struct xfrm_replay_state_esn *up; if (!replay_esn || !rp) return 0; up = nla_data(rp); if (xfrm_replay_state_esn_len(replay_esn) != xfrm_replay_state_esn_len(up)) return -EINVAL; return 0; }
Class
2
process_bitmap_updates(STREAM s) { uint16 num_updates; uint16 left, top, right, bottom, width, height; uint16 cx, cy, bpp, Bpp, compress, bufsize, size; uint8 *data, *bmpdata; int i; logger(Protocol, Debug, "%s()", __func__); in_uint16_le(s, num_updates); for (i = 0; i < num_updates; i++) { in_uint16_le(s, left); in_uint16_le(s, top); in_uint16_le(s, right); in_uint16_le(s, bottom); in_uint16_le(s, width); in_uint16_le(s, height); in_uint16_le(s, bpp); Bpp = (bpp + 7) / 8; in_uint16_le(s, compress); in_uint16_le(s, bufsize); cx = right - left + 1; cy = bottom - top + 1; logger(Graphics, Debug, "process_bitmap_updates(), [%d,%d,%d,%d], [%d,%d], bpp=%d, compression=%d", left, top, right, bottom, width, height, Bpp, compress); if (!compress) { int y; bmpdata = (uint8 *) xmalloc(width * height * Bpp); for (y = 0; y < height; y++) { in_uint8a(s, &bmpdata[(height - y - 1) * (width * Bpp)], width * Bpp); } ui_paint_bitmap(left, top, cx, cy, width, height, bmpdata); xfree(bmpdata); continue; } if (compress & 0x400) { size = bufsize; } else { in_uint8s(s, 2); /* pad */ in_uint16_le(s, size); in_uint8s(s, 4); /* line_size, final_size */ } in_uint8p(s, data, size); bmpdata = (uint8 *) xmalloc(width * height * Bpp); if (bitmap_decompress(bmpdata, width, height, data, size, Bpp)) { ui_paint_bitmap(left, top, cx, cy, width, height, bmpdata); } else { logger(Graphics, Warning, "process_bitmap_updates(), failed to decompress bitmap"); } xfree(bmpdata); } }
Base
1
static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; struct sk_buff *skb; /* Allocate memory for receiving command response data */ skb = dev_alloc_skb(MWIFIEX_UPLD_SIZE); if (!skb) { mwifiex_dbg(adapter, ERROR, "Unable to allocate skb for command response data.\n"); return -ENOMEM; } skb_put(skb, MWIFIEX_UPLD_SIZE); if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, PCI_DMA_FROMDEVICE)) return -1; card->cmdrsp_buf = skb; return 0; }
Variant
0
mrb_realloc(mrb_state *mrb, void *p, size_t len) { void *p2; p2 = mrb_realloc_simple(mrb, p, len); if (len == 0) return p2; if (p2 == NULL) { mrb_free(mrb, p); mrb->gc.out_of_memory = TRUE; mrb_raise_nomemory(mrb); } else { mrb->gc.out_of_memory = FALSE; } return p2; }
Variant
0
gss_delete_sec_context (minor_status, context_handle, output_token) OM_uint32 * minor_status; gss_ctx_id_t * context_handle; gss_buffer_t output_token; { OM_uint32 status; gss_union_ctx_id_t ctx; status = val_del_sec_ctx_args(minor_status, context_handle, output_token); if (status != GSS_S_COMPLETE) return (status); /* * select the approprate underlying mechanism routine and * call it. */ ctx = (gss_union_ctx_id_t) *context_handle; if (GSSINT_CHK_LOOP(ctx)) return (GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT); status = gssint_delete_internal_sec_context(minor_status, ctx->mech_type, &ctx->internal_ctx_id, output_token); if (status) return status; /* now free up the space for the union context structure */ free(ctx->mech_type->elements); free(ctx->mech_type); free(*context_handle); *context_handle = GSS_C_NO_CONTEXT; return (GSS_S_COMPLETE); }
Variant
0
dump_threads(void) { FILE *fp; char time_buf[26]; element e; vrrp_t *vrrp; char *file_name; file_name = make_file_name("/tmp/thread_dump.dat", "vrrp", #if HAVE_DECL_CLONE_NEWNET global_data->network_namespace, #else NULL, #endif global_data->instance_name); fp = fopen(file_name, "a"); FREE(file_name); set_time_now(); ctime_r(&time_now.tv_sec, time_buf); fprintf(fp, "\n%.19s.%6.6ld: Thread dump\n", time_buf, time_now.tv_usec); dump_thread_data(master, fp); fprintf(fp, "alloc = %lu\n", master->alloc); fprintf(fp, "\n"); LIST_FOREACH(vrrp_data->vrrp, vrrp, e) { ctime_r(&vrrp->sands.tv_sec, time_buf); fprintf(fp, "VRRP instance %s, sands %.19s.%6.6lu, status %s\n", vrrp->iname, time_buf, vrrp->sands.tv_usec, vrrp->state == VRRP_STATE_INIT ? "INIT" : vrrp->state == VRRP_STATE_BACK ? "BACKUP" : vrrp->state == VRRP_STATE_MAST ? "MASTER" : vrrp->state == VRRP_STATE_FAULT ? "FAULT" : vrrp->state == VRRP_STATE_STOP ? "STOP" : vrrp->state == VRRP_DISPATCHER ? "DISPATCHER" : "unknown"); } fclose(fp); }
Base
1
ast_for_funcdef(struct compiling *c, const node *n, asdl_seq *decorator_seq) { /* funcdef: 'def' NAME parameters ['->' test] ':' suite */ return ast_for_funcdef_impl(c, n, decorator_seq, 0 /* is_async */); }
Base
1
static int sdp_parse_fmtp_config_h264(AVFormatContext *s, AVStream *stream, PayloadContext *h264_data, const char *attr, const char *value) { AVCodecParameters *par = stream->codecpar; if (!strcmp(attr, "packetization-mode")) { av_log(s, AV_LOG_DEBUG, "RTP Packetization Mode: %d\n", atoi(value)); h264_data->packetization_mode = atoi(value); /* * Packetization Mode: * 0 or not present: Single NAL mode (Only nals from 1-23 are allowed) * 1: Non-interleaved Mode: 1-23, 24 (STAP-A), 28 (FU-A) are allowed. * 2: Interleaved Mode: 25 (STAP-B), 26 (MTAP16), 27 (MTAP24), 28 (FU-A), * and 29 (FU-B) are allowed. */ if (h264_data->packetization_mode > 1) av_log(s, AV_LOG_ERROR, "Interleaved RTP mode is not supported yet.\n"); } else if (!strcmp(attr, "profile-level-id")) { if (strlen(value) == 6) parse_profile_level_id(s, h264_data, value); } else if (!strcmp(attr, "sprop-parameter-sets")) { int ret; if (value[strlen(value) - 1] == ',') { av_log(s, AV_LOG_WARNING, "Missing PPS in sprop-parameter-sets, ignoring\n"); return 0; } par->extradata_size = 0; av_freep(&par->extradata); ret = ff_h264_parse_sprop_parameter_sets(s, &par->extradata, &par->extradata_size, value); av_log(s, AV_LOG_DEBUG, "Extradata set to %p (size: %d)\n", par->extradata, par->extradata_size); return ret; } return 0; }
Class
2
static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) { struct ipcm_cookie ipc; struct rtable *rt = skb_rtable(skb); struct net *net = dev_net(rt->dst.dev); struct sock *sk; struct inet_sock *inet; __be32 daddr; if (ip_options_echo(&icmp_param->replyopts, skb)) return; sk = icmp_xmit_lock(net); if (sk == NULL) return; inet = inet_sk(sk); icmp_param->data.icmph.checksum = 0; inet->tos = ip_hdr(skb)->tos; daddr = ipc.addr = rt->rt_src; ipc.opt = NULL; ipc.tx_flags = 0; if (icmp_param->replyopts.optlen) { ipc.opt = &icmp_param->replyopts; if (ipc.opt->srr) daddr = icmp_param->replyopts.faddr; } { struct flowi4 fl4 = { .daddr = daddr, .saddr = rt->rt_spec_dst, .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), .flowi4_proto = IPPROTO_ICMP, }; security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) goto out_unlock; } if (icmpv4_xrlim_allow(net, rt, icmp_param->data.icmph.type, icmp_param->data.icmph.code)) icmp_push_reply(icmp_param, &ipc, &rt); ip_rt_put(rt); out_unlock: icmp_xmit_unlock(sk); }
Class
2
SWFInput_readSBits(SWFInput input, int number) { int num = SWFInput_readBits(input, number); if ( num & (1<<(number-1)) ) return num - (1<<number); else return num; }
Base
1
static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) { /* dock delta_exec before expiring quota (as it could span periods) */ cfs_rq->runtime_remaining -= delta_exec; expire_cfs_rq_runtime(cfs_rq); if (likely(cfs_rq->runtime_remaining > 0)) return; /* * if we're unable to extend our runtime we resched so that the active * hierarchy can be throttled */ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) resched_curr(rq_of(cfs_rq)); }
Class
2
void gdImageGifCtx(gdImagePtr im, gdIOCtxPtr out) { gdImagePtr pim = 0, tim = im; int interlace, BitsPerPixel; interlace = im->interlace; if (im->trueColor) { /* Expensive, but the only way that produces an acceptable result: mix down to a palette based temporary image. */ pim = gdImageCreatePaletteFromTrueColor(im, 1, 256); if (!pim) { return; } tim = pim; } BitsPerPixel = colorstobpp(tim->colorsTotal); /* All set, let's do it. */ GIFEncode( out, tim->sx, tim->sy, tim->interlace, 0, tim->transparent, BitsPerPixel, tim->red, tim->green, tim->blue, tim); if (pim) { /* Destroy palette based temporary image. */ gdImageDestroy( pim); } }
Variant
0
service_info *FindServiceControlURLPath( service_table *table, const char *controlURLPath) { service_info *finger = NULL; uri_type parsed_url; uri_type parsed_url_in; if (table && parse_uri(controlURLPath, strlen(controlURLPath), &parsed_url_in) == HTTP_SUCCESS) { finger = table->serviceList; while (finger) { if (finger->controlURL) { if (parse_uri(finger->controlURL, strlen(finger->controlURL), &parsed_url) == HTTP_SUCCESS) { if (!token_cmp(&parsed_url.pathquery, &parsed_url_in.pathquery)) { return finger; } } } finger = finger->next; } } return NULL; }
Base
1
static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int error = 0; if (sk->sk_state & PPPOX_BOUND) { error = -EIO; goto end; } skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &error); if (error < 0) goto end; m->msg_namelen = 0; if (skb) { total_len = min_t(size_t, total_len, skb->len); error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len); if (error == 0) { consume_skb(skb); return total_len; } } kfree_skb(skb); end: return error; }
Class
2
_client_protocol_timeout (GsmXSMPClient *client) { g_debug ("GsmXSMPClient: client_protocol_timeout for client '%s' in ICE status %d", client->priv->description, IceConnectionStatus (client->priv->ice_connection)); gsm_client_set_status (GSM_CLIENT (client), GSM_CLIENT_FAILED); gsm_client_disconnected (GSM_CLIENT (client)); return FALSE; }
Base
1
void jpc_qmfb_split_colgrp(jpc_fix_t *a, int numrows, int stride, int parity) { int bufsize = JPC_CEILDIVPOW2(numrows, 1); jpc_fix_t splitbuf[QMFB_SPLITBUFSIZE * JPC_QMFB_COLGRPSIZE]; jpc_fix_t *buf = splitbuf; jpc_fix_t *srcptr; jpc_fix_t *dstptr; register jpc_fix_t *srcptr2; register jpc_fix_t *dstptr2; register int n; register int i; int m; int hstartcol; /* Get a buffer. */ if (bufsize > QMFB_SPLITBUFSIZE) { if (!(buf = jas_alloc2(bufsize, sizeof(jpc_fix_t)))) { /* We have no choice but to commit suicide in this case. */ abort(); } } if (numrows >= 2) { hstartcol = (numrows + 1 - parity) >> 1; // ORIGINAL (WRONG): m = (parity) ? hstartcol : (numrows - hstartcol); m = numrows - hstartcol; /* Save the samples destined for the highpass channel. */ n = m; dstptr = buf; srcptr = &a[(1 - parity) * stride]; while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += JPC_QMFB_COLGRPSIZE; srcptr += stride << 1; } /* Copy the appropriate samples into the lowpass channel. */ dstptr = &a[(1 - parity) * stride]; srcptr = &a[(2 - parity) * stride]; n = numrows - m - (!parity); while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += stride; srcptr += stride << 1; } /* Copy the saved samples into the highpass channel. */ dstptr = &a[hstartcol * stride]; srcptr = buf; n = m; while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += stride; srcptr += JPC_QMFB_COLGRPSIZE; } } /* If the split buffer was allocated on the heap, free this memory. */ if (buf != splitbuf) { jas_free(buf); } }
Class
2
static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int ret; int copylen; ret = -EOPNOTSUPP; if (m->msg_flags&MSG_OOB) goto read_error; m->msg_namelen = 0; skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error; copylen = skb->len; if (len < copylen) { m->msg_flags |= MSG_TRUNC; copylen = len; } ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen); if (ret) goto out_free; ret = (flags & MSG_TRUNC) ? skb->len : copylen; out_free: skb_free_datagram(sk, skb); caif_check_flow_release(sk); return ret; read_error: return ret; }
Class
2
static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, size_t count, loff_t *offset) { struct snd_timer_user *tu; long result = 0, unit; int qhead; int err = 0; tu = file->private_data; unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read); spin_lock_irq(&tu->qlock); while ((long)count - result >= unit) { while (!tu->qused) { wait_queue_t wait; if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { err = -EAGAIN; goto _error; } set_current_state(TASK_INTERRUPTIBLE); init_waitqueue_entry(&wait, current); add_wait_queue(&tu->qchange_sleep, &wait); spin_unlock_irq(&tu->qlock); schedule(); spin_lock_irq(&tu->qlock); remove_wait_queue(&tu->qchange_sleep, &wait); if (tu->disconnected) { err = -ENODEV; goto _error; } if (signal_pending(current)) { err = -ERESTARTSYS; goto _error; } } qhead = tu->qhead++; tu->qhead %= tu->queue_size; tu->qused--; spin_unlock_irq(&tu->qlock); mutex_lock(&tu->ioctl_lock); if (tu->tread) { if (copy_to_user(buffer, &tu->tqueue[qhead], sizeof(struct snd_timer_tread))) err = -EFAULT; } else { if (copy_to_user(buffer, &tu->queue[qhead], sizeof(struct snd_timer_read))) err = -EFAULT; } mutex_unlock(&tu->ioctl_lock); spin_lock_irq(&tu->qlock); if (err < 0) goto _error; result += unit; buffer += unit; } _error: spin_unlock_irq(&tu->qlock); return result > 0 ? result : err; }
Class
2
static int usb_enumerate_device_otg(struct usb_device *udev) { int err = 0; #ifdef CONFIG_USB_OTG /* * OTG-aware devices on OTG-capable root hubs may be able to use SRP, * to wake us after we've powered off VBUS; and HNP, switching roles * "host" to "peripheral". The OTG descriptor helps figure this out. */ if (!udev->bus->is_b_host && udev->config && udev->parent == udev->bus->root_hub) { struct usb_otg_descriptor *desc = NULL; struct usb_bus *bus = udev->bus; unsigned port1 = udev->portnum; /* descriptor may appear anywhere in config */ err = __usb_get_extra_descriptor(udev->rawdescriptors[0], le16_to_cpu(udev->config[0].desc.wTotalLength), USB_DT_OTG, (void **) &desc); if (err || !(desc->bmAttributes & USB_OTG_HNP)) return 0; dev_info(&udev->dev, "Dual-Role OTG device on %sHNP port\n", (port1 == bus->otg_port) ? "" : "non-"); /* enable HNP before suspend, it's simpler */ if (port1 == bus->otg_port) { bus->b_hnp_enable = 1; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_SET_FEATURE, 0, USB_DEVICE_B_HNP_ENABLE, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (err < 0) { /* * OTG MESSAGE: report errors here, * customize to match your product. */ dev_err(&udev->dev, "can't set HNP mode: %d\n", err); bus->b_hnp_enable = 0; } } else if (desc->bLength == sizeof (struct usb_otg_descriptor)) { /* Set a_alt_hnp_support for legacy otg device */ err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_SET_FEATURE, 0, USB_DEVICE_A_ALT_HNP_SUPPORT, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (err < 0) dev_err(&udev->dev, "set a_alt_hnp_support failed: %d\n", err); } } #endif return err; }
Class
2
process_plane(uint8 * in, int width, int height, uint8 * out, int size) { UNUSED(size); int indexw; int indexh; int code; int collen; int replen; int color; int x; int revcode; uint8 * last_line; uint8 * this_line; uint8 * org_in; uint8 * org_out; org_in = in; org_out = out; last_line = 0; indexh = 0; while (indexh < height) { out = (org_out + width * height * 4) - ((indexh + 1) * width * 4); color = 0; this_line = out; indexw = 0; if (last_line == 0) { while (indexw < width) { code = CVAL(in); replen = code & 0xf; collen = (code >> 4) & 0xf; revcode = (replen << 4) | collen; if ((revcode <= 47) && (revcode >= 16)) { replen = revcode; collen = 0; } while (collen > 0) { color = CVAL(in); *out = color; out += 4; indexw++; collen--; } while (replen > 0) { *out = color; out += 4; indexw++; replen--; } } } else { while (indexw < width) { code = CVAL(in); replen = code & 0xf; collen = (code >> 4) & 0xf; revcode = (replen << 4) | collen; if ((revcode <= 47) && (revcode >= 16)) { replen = revcode; collen = 0; } while (collen > 0) { x = CVAL(in); if (x & 1) { x = x >> 1; x = x + 1; color = -x; } else { x = x >> 1; color = x; } x = last_line[indexw * 4] + color; *out = x; out += 4; indexw++; collen--; } while (replen > 0) { x = last_line[indexw * 4] + color; *out = x; out += 4; indexw++; replen--; } } } indexh++; last_line = this_line; } return (int) (in - org_in); }
Base
1
static void unix_copy_addr(struct msghdr *msg, struct sock *sk) { struct unix_sock *u = unix_sk(sk); msg->msg_namelen = 0; if (u->addr) { msg->msg_namelen = u->addr->len; memcpy(msg->msg_name, u->addr->name, u->addr->len); } }
Class
2
void esp32EthDisableIrq(NetInterface *interface) { //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Disable Ethernet PHY interrupts interface->phyDriver->disableIrq(interface); } else if(interface->switchDriver != NULL) { //Disable Ethernet switch interrupts interface->switchDriver->disableIrq(interface); } else { //Just for sanity } }
Class
2
get_html_data (MAPI_Attr *a) { VarLenData **body = XCALLOC(VarLenData*, a->num_values + 1); int j; for (j = 0; j < a->num_values; j++) { body[j] = XMALLOC(VarLenData, 1); body[j]->len = a->values[j].len; body[j]->data = CHECKED_XCALLOC(unsigned char, a->values[j].len); memmove (body[j]->data, a->values[j].data.buf, body[j]->len); } return body; }
Base
1
obj2ast_keyword(PyObject* obj, keyword_ty* out, PyArena* arena) { PyObject* tmp = NULL; identifier arg; expr_ty value; if (exists_not_none(obj, &PyId_arg)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_arg); if (tmp == NULL) goto failed; res = obj2ast_identifier(tmp, &arg, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { arg = NULL; } if (_PyObject_HasAttrId(obj, &PyId_value)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_value); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from keyword"); return 1; } *out = keyword(arg, value, arena); return 0; failed: Py_XDECREF(tmp); return 1; }
Base
1
static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); }
Base
1
isoclns_print(netdissect_options *ndo, const uint8_t *p, u_int length, u_int caplen) { if (caplen <= 1) { /* enough bytes on the wire ? */ ND_PRINT((ndo, "|OSI")); return; } if (ndo->ndo_eflag) ND_PRINT((ndo, "OSI NLPID %s (0x%02x): ", tok2str(nlpid_values, "Unknown", *p), *p)); switch (*p) { case NLPID_CLNP: if (!clnp_print(ndo, p, length)) print_unknown_data(ndo, p, "\n\t", caplen); break; case NLPID_ESIS: esis_print(ndo, p, length); return; case NLPID_ISIS: if (!isis_print(ndo, p, length)) print_unknown_data(ndo, p, "\n\t", caplen); break; case NLPID_NULLNS: ND_PRINT((ndo, "%slength: %u", ndo->ndo_eflag ? "" : ", ", length)); break; case NLPID_Q933: q933_print(ndo, p + 1, length - 1); break; case NLPID_IP: ip_print(ndo, p + 1, length - 1); break; case NLPID_IP6: ip6_print(ndo, p + 1, length - 1); break; case NLPID_PPP: ppp_print(ndo, p + 1, length - 1); break; default: if (!ndo->ndo_eflag) ND_PRINT((ndo, "OSI NLPID 0x%02x unknown", *p)); ND_PRINT((ndo, "%slength: %u", ndo->ndo_eflag ? "" : ", ", length)); if (caplen > 1) print_unknown_data(ndo, p, "\n\t", caplen); break; } }
Base
1
static int handle_pte_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, unsigned int flags) { pte_t entry; spinlock_t *ptl; /* * some architectures can have larger ptes than wordsize, * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and CONFIG_32BIT=y, * so READ_ONCE or ACCESS_ONCE cannot guarantee atomic accesses. * The code below just needs a consistent view for the ifs and * we later double check anyway with the ptl lock held. So here * a barrier will do. */ entry = *pte; barrier(); if (!pte_present(entry)) { if (pte_none(entry)) { if (vma->vm_ops) { if (likely(vma->vm_ops->fault)) return do_fault(mm, vma, address, pte, pmd, flags, entry); } return do_anonymous_page(mm, vma, address, pte, pmd, flags); } return do_swap_page(mm, vma, address, pte, pmd, flags, entry); } if (pte_protnone(entry)) return do_numa_page(mm, vma, address, entry, pte, pmd); ptl = pte_lockptr(mm, pmd); spin_lock(ptl); if (unlikely(!pte_same(*pte, entry))) goto unlock; if (flags & FAULT_FLAG_WRITE) { if (!pte_write(entry)) return do_wp_page(mm, vma, address, pte, pmd, ptl, entry); entry = pte_mkdirty(entry); } entry = pte_mkyoung(entry); if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { update_mmu_cache(vma, address, pte); } else { /* * This is needed only for protection faults but the arch code * is not yet telling us if this is a protection fault or not. * This still avoids useless tlb flushes for .text page faults * with threads. */ if (flags & FAULT_FLAG_WRITE) flush_tlb_fix_spurious_fault(vma, address); } unlock: pte_unmap_unlock(pte, ptl); return 0; }
Class
2
static Jsi_RC SysGetEnvCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this, Jsi_Value **ret, Jsi_Func *funcPtr) { extern char **environ; char *cp; int i; if (interp->isSafe) return Jsi_LogError("no getenv in safe mode"); Jsi_Value *v = Jsi_ValueArrayIndex(interp, args, 0); if (v != NULL) { const char *fnam = Jsi_ValueString(interp, v, NULL); if (!fnam) return Jsi_LogError("arg1: expected string 'name'"); cp = getenv(fnam); if (cp != NULL) { Jsi_ValueMakeStringDup(interp, ret, cp); } return JSI_OK; } /* Single object containing result members. */ Jsi_Value *vres; Jsi_Obj *ores = Jsi_ObjNew(interp); Jsi_Value *nnv; char *val, nam[200]; //Jsi_ObjIncrRefCount(interp, ores); vres = Jsi_ValueMakeObject(interp, NULL, ores); //Jsi_IncrRefCount(interp, vres); for (i=0; ; i++) { int n; cp = environ[i]; if (cp == 0 || ((val = Jsi_Strchr(cp, '='))==NULL)) break; n = val-cp+1; if (n>=(int)sizeof(nam)) n = sizeof(nam)-1; Jsi_Strncpy(nam, cp, n); val = val+1; nnv = Jsi_ValueMakeStringDup(interp, NULL, val); Jsi_ObjInsert(interp, ores, nam, nnv, 0); } Jsi_ValueReplace(interp, ret, vres); return JSI_OK; }
Base
1
static void iwjpeg_scan_exif(struct iwjpegrcontext *rctx, const iw_byte *d, size_t d_len) { struct iw_exif_state e; iw_uint32 ifd; if(d_len<8) return; iw_zeromem(&e,sizeof(struct iw_exif_state)); e.d = d; e.d_len = d_len; e.endian = d[0]=='I' ? IW_ENDIAN_LITTLE : IW_ENDIAN_BIG; ifd = iw_get_ui32_e(&d[4],e.endian); iwjpeg_scan_exif_ifd(rctx,&e,ifd); }
Base
1
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, unsigned long size, bool kernel, bool pinned, struct virtio_gpu_object **bo_ptr) { struct virtio_gpu_object *bo; enum ttm_bo_type type; size_t acc_size; int ret; if (kernel) type = ttm_bo_type_kernel; else type = ttm_bo_type_device; *bo_ptr = NULL; acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, size, sizeof(struct virtio_gpu_object)); bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL); if (bo == NULL) return -ENOMEM; size = roundup(size, PAGE_SIZE); ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size); if (ret != 0) return ret; bo->dumb = false; virtio_gpu_init_ttm_placement(bo, pinned); ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, size, type, &bo->placement, 0, !kernel, NULL, acc_size, NULL, NULL, &virtio_gpu_ttm_bo_destroy); /* ttm_bo_init failure will call the destroy */ if (ret != 0) return ret; *bo_ptr = bo; return 0; }
Base
1
mcs_recv_connect_response(STREAM mcs_data) { UNUSED(mcs_data); uint8 result; int length; STREAM s; RD_BOOL is_fastpath; uint8 fastpath_hdr; logger(Protocol, Debug, "%s()", __func__); s = iso_recv(&is_fastpath, &fastpath_hdr); if (s == NULL) return False; ber_parse_header(s, MCS_CONNECT_RESPONSE, &length); ber_parse_header(s, BER_TAG_RESULT, &length); in_uint8(s, result); if (result != 0) { logger(Protocol, Error, "mcs_recv_connect_response(), result=%d", result); return False; } ber_parse_header(s, BER_TAG_INTEGER, &length); in_uint8s(s, length); /* connect id */ mcs_parse_domain_params(s); ber_parse_header(s, BER_TAG_OCTET_STRING, &length); sec_process_mcs_data(s); /* if (length > mcs_data->size) { logger(Protocol, Error, "mcs_recv_connect_response(), expected length=%d, got %d",length, mcs_data->size); length = mcs_data->size; } in_uint8a(s, mcs_data->data, length); mcs_data->p = mcs_data->data; mcs_data->end = mcs_data->data + length; */ return s_check_end(s); }
Base
1
_prolog_error(batch_job_launch_msg_t *req, int rc) { char *err_name_ptr, err_name[256], path_name[MAXPATHLEN]; char *fmt_char; int fd; if (req->std_err || req->std_out) { if (req->std_err) strncpy(err_name, req->std_err, sizeof(err_name)); else strncpy(err_name, req->std_out, sizeof(err_name)); if ((fmt_char = strchr(err_name, (int) '%')) && (fmt_char[1] == 'j') && !strchr(fmt_char+1, (int) '%')) { char tmp_name[256]; fmt_char[1] = 'u'; snprintf(tmp_name, sizeof(tmp_name), err_name, req->job_id); strncpy(err_name, tmp_name, sizeof(err_name)); } } else { snprintf(err_name, sizeof(err_name), "slurm-%u.out", req->job_id); } err_name_ptr = err_name; if (err_name_ptr[0] == '/') snprintf(path_name, MAXPATHLEN, "%s", err_name_ptr); else if (req->work_dir) snprintf(path_name, MAXPATHLEN, "%s/%s", req->work_dir, err_name_ptr); else snprintf(path_name, MAXPATHLEN, "/%s", err_name_ptr); if ((fd = open(path_name, (O_CREAT|O_APPEND|O_WRONLY), 0644)) == -1) { error("Unable to open %s: %s", path_name, slurm_strerror(errno)); return; } snprintf(err_name, sizeof(err_name), "Error running slurm prolog: %d\n", WEXITSTATUS(rc)); safe_write(fd, err_name, strlen(err_name)); if (fchown(fd, (uid_t) req->uid, (gid_t) req->gid) == -1) { snprintf(err_name, sizeof(err_name), "Couldn't change fd owner to %u:%u: %m\n", req->uid, req->gid); } rwfail: close(fd); }
Pillar
3
PJ_DEF(pj_status_t) pjmedia_rtcp_fb_parse_rpsi( const void *buf, pj_size_t length, pjmedia_rtcp_fb_rpsi *rpsi) { pjmedia_rtcp_common *hdr = (pjmedia_rtcp_common*) buf; pj_uint8_t *p; pj_uint8_t padlen; pj_size_t rpsi_len; PJ_ASSERT_RETURN(buf && rpsi, PJ_EINVAL); PJ_ASSERT_RETURN(length >= sizeof(pjmedia_rtcp_common), PJ_ETOOSMALL); /* RPSI uses pt==RTCP_PSFB and FMT==3 */ if (hdr->pt != RTCP_PSFB || hdr->count != 3) return PJ_ENOTFOUND; rpsi_len = (pj_ntohs((pj_uint16_t)hdr->length)-2) * 4; if (length < rpsi_len + 12) return PJ_ETOOSMALL; p = (pj_uint8_t*)hdr + sizeof(*hdr); padlen = *p++; rpsi->pt = (*p++ & 0x7F); rpsi->rpsi_bit_len = rpsi_len*8 - 16 - padlen; pj_strset(&rpsi->rpsi, (char*)p, (rpsi->rpsi_bit_len + 7)/8); return PJ_SUCCESS; }
Base
1
cJSON *cJSON_CreateObject( void ) { cJSON *item = cJSON_New_Item(); if ( item ) item->type = cJSON_Object; return item; }
Base
1
_Unpickler_MemoPut(UnpicklerObject *self, Py_ssize_t idx, PyObject *value) { PyObject *old_item; if (idx >= self->memo_size) { if (_Unpickler_ResizeMemoList(self, idx * 2) < 0) return -1; assert(idx < self->memo_size); } Py_INCREF(value); old_item = self->memo[idx]; self->memo[idx] = value; if (old_item != NULL) { Py_DECREF(old_item); } else { self->memo_len++; } return 0; }
Base
1
static MagickPixelPacket **AcquirePixelThreadSet(const Image *images) { const Image *next; MagickPixelPacket **pixels; register ssize_t i, j; size_t columns, number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(MagickPixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (MagickPixelPacket **) NULL) return((MagickPixelPacket **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); columns=images->columns; for (next=images; next != (Image *) NULL; next=next->next) columns=MagickMax(next->columns,columns); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(MagickPixelPacket *) AcquireQuantumMemory(columns, sizeof(**pixels)); if (pixels[i] == (MagickPixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); for (j=0; j < (ssize_t) columns; j++) GetMagickPixelPacket(images,&pixels[i][j]); } return(pixels); }
Base
1
static u8 BS_ReadByte(GF_BitStream *bs) { Bool is_eos; if (bs->bsmode == GF_BITSTREAM_READ) { u8 res; if (bs->position >= bs->size) { if (bs->EndOfStream) bs->EndOfStream(bs->par); if (!bs->overflow_state) bs->overflow_state = 1; return 0; } res = bs->original[bs->position++]; if (bs->remove_emul_prevention_byte) { if ((bs->nb_zeros==2) && (res==0x03) && (bs->position<bs->size) && (bs->original[bs->position]<0x04)) { bs->nb_zeros = 0; res = bs->original[bs->position++]; } if (!res) bs->nb_zeros++; else bs->nb_zeros = 0; } return res; } if (bs->cache_write) bs_flush_write_cache(bs); is_eos = gf_feof(bs->stream); /*we are in FILE mode, test for end of file*/ if (!is_eos || bs->cache_read) { u8 res; Bool loc_eos=GF_FALSE; assert(bs->position<=bs->size); bs->position++; res = gf_bs_load_byte(bs, &loc_eos); if (loc_eos) goto bs_eof; if (bs->remove_emul_prevention_byte) { if ((bs->nb_zeros==2) && (res==0x03) && (bs->position<bs->size)) { u8 next = gf_bs_load_byte(bs, &loc_eos); if (next < 0x04) { bs->nb_zeros = 0; res = next; bs->position++; } else { gf_bs_seek(bs, bs->position); } } if (!res) bs->nb_zeros++; else bs->nb_zeros = 0; } return res; } bs_eof: if (bs->EndOfStream) { bs->EndOfStream(bs->par); if (!bs->overflow_state) bs->overflow_state = 1; } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CORE, ("[BS] Attempt to overread bitstream\n")); } assert(bs->position <= 1+bs->size); return 0; }
Base
1
static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node) { /* through the first node_set .parent * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */ return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); }
Base
1
void * pvPortMalloc( size_t xWantedSize ) { void * pvReturn = NULL; static uint8_t * pucAlignedHeap = NULL; /* Ensure that blocks are always aligned to the required number of bytes. */ #if ( portBYTE_ALIGNMENT != 1 ) { if( xWantedSize & portBYTE_ALIGNMENT_MASK ) { /* Byte alignment required. */ xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ); } } #endif vTaskSuspendAll(); { if( pucAlignedHeap == NULL ) { /* Ensure the heap starts on a correctly aligned boundary. */ pucAlignedHeap = ( uint8_t * ) ( ( ( portPOINTER_SIZE_TYPE ) & ucHeap[ portBYTE_ALIGNMENT ] ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); } /* Check there is enough room left for the allocation. */ if( ( ( xNextFreeByte + xWantedSize ) < configADJUSTED_HEAP_SIZE ) && ( ( xNextFreeByte + xWantedSize ) > xNextFreeByte ) ) /* Check for overflow. */ { /* Return the next free byte then increment the index past this * block. */ pvReturn = pucAlignedHeap + xNextFreeByte; xNextFreeByte += xWantedSize; } traceMALLOC( pvReturn, xWantedSize ); } ( void ) xTaskResumeAll(); #if ( configUSE_MALLOC_FAILED_HOOK == 1 ) { if( pvReturn == NULL ) { extern void vApplicationMallocFailedHook( void ); vApplicationMallocFailedHook(); } } #endif return pvReturn; }
Class
2
int dsOpen(void) { struct stat sb; int retval; char *path = server.diskstore_path; if ((retval = stat(path,&sb) == -1) && errno != ENOENT) { redisLog(REDIS_WARNING, "Error opening disk store at %s: %s", path, strerror(errno)); return REDIS_ERR; } /* Directory already in place. Assume everything is ok. */ if (retval == 0 && S_ISDIR(sb.st_mode)) return REDIS_OK; /* File exists but it's not a directory */ if (retval == 0 && !S_ISDIR(sb.st_mode)) { redisLog(REDIS_WARNING,"Disk store at %s is not a directory", path); return REDIS_ERR; } /* New disk store, create the directory structure now, as creating * them in a lazy way is not a good idea, after very few insertions * we'll need most of the 65536 directories anyway. */ if (mkdir(path) == -1) { redisLog(REDIS_WARNING,"Disk store init failed creating dir %s: %s", path, strerror(errno)); return REDIS_ERR; } return REDIS_OK; }
Class
2
gsm_xsmp_client_disconnect (GsmXSMPClient *client) { if (client->priv->watch_id > 0) { g_source_remove (client->priv->watch_id); } if (client->priv->conn != NULL) { SmsCleanUp (client->priv->conn); } if (client->priv->ice_connection != NULL) { IceSetShutdownNegotiation (client->priv->ice_connection, FALSE); IceCloseConnection (client->priv->ice_connection); } if (client->priv->protocol_timeout > 0) { g_source_remove (client->priv->protocol_timeout); } }
Base
1
static int decode_trns_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length) { int v, i; if (s->color_type == PNG_COLOR_TYPE_PALETTE) { if (length > 256 || !(s->state & PNG_PLTE)) return AVERROR_INVALIDDATA; for (i = 0; i < length; i++) { v = bytestream2_get_byte(&s->gb); s->palette[i] = (s->palette[i] & 0x00ffffff) | (v << 24); } } else if (s->color_type == PNG_COLOR_TYPE_GRAY || s->color_type == PNG_COLOR_TYPE_RGB) { if ((s->color_type == PNG_COLOR_TYPE_GRAY && length != 2) || (s->color_type == PNG_COLOR_TYPE_RGB && length != 6)) return AVERROR_INVALIDDATA; for (i = 0; i < length / 2; i++) { /* only use the least significant bits */ v = av_mod_uintp2(bytestream2_get_be16(&s->gb), s->bit_depth); if (s->bit_depth > 8) AV_WB16(&s->transparent_color_be[2 * i], v); else s->transparent_color_be[i] = v; } } else { return AVERROR_INVALIDDATA; } bytestream2_skip(&s->gb, 4); /* crc */ s->has_trns = 1; return 0; }
Base
1
static int print_media_desc(const pjmedia_sdp_media *m, char *buf, pj_size_t len) { char *p = buf; char *end = buf+len; unsigned i; int printed; /* check length for the "m=" line. */ if (len < (pj_size_t)m->desc.media.slen+m->desc.transport.slen+12+24) { return -1; } *p++ = 'm'; /* m= */ *p++ = '='; pj_memcpy(p, m->desc.media.ptr, m->desc.media.slen); p += m->desc.media.slen; *p++ = ' '; printed = pj_utoa(m->desc.port, p); p += printed; if (m->desc.port_count > 1) { *p++ = '/'; printed = pj_utoa(m->desc.port_count, p); p += printed; } *p++ = ' '; pj_memcpy(p, m->desc.transport.ptr, m->desc.transport.slen); p += m->desc.transport.slen; for (i=0; i<m->desc.fmt_count; ++i) { *p++ = ' '; pj_memcpy(p, m->desc.fmt[i].ptr, m->desc.fmt[i].slen); p += m->desc.fmt[i].slen; } *p++ = '\r'; *p++ = '\n'; /* print connection info, if present. */ if (m->conn) { printed = print_connection_info(m->conn, p, (int)(end-p)); if (printed < 0) { return -1; } p += printed; } /* print optional bandwidth info. */ for (i=0; i<m->bandw_count; ++i) { printed = (int)print_bandw(m->bandw[i], p, end-p); if (printed < 0) { return -1; } p += printed; } /* print attributes. */ for (i=0; i<m->attr_count; ++i) { printed = (int)print_attr(m->attr[i], p, end-p); if (printed < 0) { return -1; } p += printed; } return (int)(p-buf); }
Base
1
static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr; struct net *net = sock_net(sk); int ret; int chk_addr_ret; if (!sock_flag(sk, SOCK_ZAPPED)) return -EINVAL; if (addr_len < sizeof(struct sockaddr_l2tpip)) return -EINVAL; if (addr->l2tp_family != AF_INET) return -EINVAL; ret = -EADDRINUSE; read_lock_bh(&l2tp_ip_lock); if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id)) goto out_in_use; read_unlock_bh(&l2tp_ip_lock); lock_sock(sk); if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip)) goto out; chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr); ret = -EADDRNOTAVAIL; if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL && chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) goto out; if (addr->l2tp_addr.s_addr) inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) inet->inet_saddr = 0; /* Use device */ sk_dst_reset(sk); l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id; write_lock_bh(&l2tp_ip_lock); sk_add_bind_node(sk, &l2tp_ip_bind_table); sk_del_node_init(sk); write_unlock_bh(&l2tp_ip_lock); ret = 0; sock_reset_flag(sk, SOCK_ZAPPED); out: release_sock(sk); return ret; out_in_use: read_unlock_bh(&l2tp_ip_lock); return ret; }
Class
2
apr_byte_t oidc_cache_set(request_rec *r, const char *section, const char *key, const char *value, apr_time_t expiry) { oidc_cfg *cfg = ap_get_module_config(r->server->module_config, &auth_openidc_module); int encrypted = oidc_cfg_cache_encrypt(r); char *encoded = NULL; apr_byte_t rc = FALSE; char *msg = NULL; oidc_debug(r, "enter: %s (section=%s, len=%d, encrypt=%d, ttl(s)=%" APR_TIME_T_FMT ", type=%s)", key, section, value ? (int )strlen(value) : 0, encrypted, apr_time_sec(expiry - apr_time_now()), cfg->cache->name); /* see if we need to encrypt */ if (encrypted == 1) { key = oidc_cache_get_hashed_key(r, cfg->crypto_passphrase, key); if (key == NULL) goto out; if (value != NULL) { if (oidc_cache_crypto_encrypt(r, value, oidc_cache_hash_passphrase(r, cfg->crypto_passphrase), &encoded) <= 0) goto out; value = encoded; } } /* store the resulting value in the cache */ rc = cfg->cache->set(r, section, key, value, expiry); out: /* log the result */ msg = apr_psprintf(r->pool, "%d bytes in %s cache backend for %skey %s", (value ? (int) strlen(value) : 0), (cfg->cache->name ? cfg->cache->name : ""), (encrypted ? "encrypted " : ""), (key ? key : "")); if (rc == TRUE) oidc_debug(r, "successfully stored %s", msg); else oidc_warn(r, "could NOT store %s", msg); return rc; }
Class
2
ber_parse_header(STREAM s, int tagval, int *length) { int tag, len; if (tagval > 0xff) { in_uint16_be(s, tag); } else { in_uint8(s, tag); } if (tag != tagval) { logger(Core, Error, "ber_parse_header(), expected tag %d, got %d", tagval, tag); return False; } in_uint8(s, len); if (len & 0x80) { len &= ~0x80; *length = 0; while (len--) next_be(s, *length); } else *length = len; return s_check(s); }
Base
1
static inline bool is_flush_request(struct request *rq, struct blk_flush_queue *fq, unsigned int tag) { return ((rq->cmd_flags & REQ_FLUSH_SEQ) && fq->flush_rq->tag == tag); }
Class
2
new_identifier(const char *n, struct compiling *c) { PyObject *id = PyUnicode_DecodeUTF8(n, strlen(n), NULL); if (!id) return NULL; /* PyUnicode_DecodeUTF8 should always return a ready string. */ assert(PyUnicode_IS_READY(id)); /* Check whether there are non-ASCII characters in the identifier; if so, normalize to NFKC. */ if (!PyUnicode_IS_ASCII(id)) { PyObject *id2; if (!c->c_normalize && !init_normalization(c)) { Py_DECREF(id); return NULL; } PyTuple_SET_ITEM(c->c_normalize_args, 1, id); id2 = PyObject_Call(c->c_normalize, c->c_normalize_args, NULL); Py_DECREF(id); if (!id2) return NULL; id = id2; } PyUnicode_InternInPlace(&id); if (PyArena_AddPyObject(c->c_arena, id) < 0) { Py_DECREF(id); return NULL; } return id; }
Base
1