code
stringlengths
23
2.05k
label_name
stringlengths
6
7
label
int64
0
37
static void *__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, struct dma_attrs *attrs) { if (dev == NULL) { WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); return NULL; } if (IS_ENABLED(CONFIG_ZONE_DMA) && dev->coherent_dma_mask <= DMA_BIT_MASK(32)) flags |= GFP_DMA; if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { struct page *page; void *addr; size = PAGE_ALIGN(size); page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, get_order(size)); if (!page) return NULL; *dma_handle = phys_to_dma(dev, page_to_phys(page)); addr = page_address(page); if (flags & __GFP_ZERO) memset(addr, 0, size); return addr; } else { return swiotlb_alloc_coherent(dev, size, dma_handle, flags); } }
CWE-200
10
static int update_discovery_filter(struct btd_adapter *adapter) { struct mgmt_cp_start_service_discovery *sd_cp; GSList *l; DBG(""); if (discovery_filter_to_mgmt_cp(adapter, &sd_cp)) { btd_error(adapter->dev_id, "discovery_filter_to_mgmt_cp returned error"); return -ENOMEM; } for (l = adapter->discovery_list; l; l = g_slist_next(l)) { struct discovery_client *client = l->data; if (!client->discovery_filter) continue; if (client->discovery_filter->discoverable) break; } set_discovery_discoverable(adapter, l ? true : false); /* * If filters are equal, then don't update scan, except for when * starting discovery. */ if (filters_equal(adapter->current_discovery_filter, sd_cp) && adapter->discovering != 0) { DBG("filters were equal, deciding to not restart the scan."); g_free(sd_cp); return 0; } g_free(adapter->current_discovery_filter); adapter->current_discovery_filter = sd_cp; trigger_start_discovery(adapter, 0); return -EINPROGRESS; }
CWE-863
11
header_put_be_3byte (SF_PRIVATE *psf, int x) { if (psf->headindex < SIGNED_SIZEOF (psf->header) - 3) { psf->header [psf->headindex++] = (x >> 16) ; psf->header [psf->headindex++] = (x >> 8) ; psf->header [psf->headindex++] = x ; } ; } /* header_put_be_3byte */
CWE-119
26
Sfdouble_t sh_strnum(Shell_t *shp, const char *str, char **ptr, int mode) { Sfdouble_t d; char *last; if (*str == 0) { if (ptr) *ptr = (char *)str; return 0; } errno = 0; d = number(str, &last, shp->inarith ? 0 : 10, NULL); if (*last) { if (*last != '.' || last[1] != '.') { d = strval(shp, str, &last, arith, mode); Varsubscript = true; } if (!ptr && *last && mode > 0) errormsg(SH_DICT, ERROR_exit(1), e_lexbadchar, *last, str); } else if (!d && *str == '-') { d = -0.0; } if (ptr) *ptr = last; return d; }
CWE-77
14
void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, int status) { struct dwc3 *dwc = dep->dwc; req->started = false; list_del(&req->list); req->remaining = 0; if (req->request.status == -EINPROGRESS) req->request.status = status; if (req->trb) usb_gadget_unmap_request_by_dev(dwc->sysdev, &req->request, req->direction); req->trb = NULL; trace_dwc3_gadget_giveback(req); spin_unlock(&dwc->lock); usb_gadget_giveback_request(&dep->endpoint, &req->request); spin_lock(&dwc->lock); if (dep->number > 1) pm_runtime_put(dwc->dev); }
CWE-667
27
sf_open (const char *path, int mode, SF_INFO *sfinfo) { SF_PRIVATE *psf ; /* Ultimate sanity check. */ assert (sizeof (sf_count_t) == 8) ; if ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf_log_printf (psf, "File : %s\n", path) ; if (copy_filename (psf, path) != 0) { sf_errno = psf->error ; return NULL ; } ; psf->file.mode = mode ; if (strcmp (path, "-") == 0) psf->error = psf_set_stdio (psf) ; else psf->error = psf_fopen (psf) ; return psf_open_file (psf, sfinfo) ; } /* sf_open */
CWE-119
26
void traverse_commit_list(struct rev_info *revs, show_commit_fn show_commit, show_object_fn show_object, void *data) { int i; struct commit *commit; struct strbuf base; strbuf_init(&base, PATH_MAX); while ((commit = get_revision(revs)) != NULL) { /* * an uninteresting boundary commit may not have its tree * parsed yet, but we are not going to show them anyway */ if (commit->tree) add_pending_tree(revs, commit->tree); show_commit(commit, data); } for (i = 0; i < revs->pending.nr; i++) { struct object_array_entry *pending = revs->pending.objects + i; struct object *obj = pending->item; const char *name = pending->name; const char *path = pending->path; if (obj->flags & (UNINTERESTING | SEEN)) continue; if (obj->type == OBJ_TAG) { obj->flags |= SEEN; show_object(obj, NULL, name, data); continue; } if (!path) path = ""; if (obj->type == OBJ_TREE) { process_tree(revs, (struct tree *)obj, show_object, &base, path, data); continue; } if (obj->type == OBJ_BLOB) { process_blob(revs, (struct blob *)obj, show_object, NULL, path, data); continue; } die("unknown pending object %s (%s)", oid_to_hex(&obj->oid), name); } object_array_clear(&revs->pending); strbuf_release(&base); }
CWE-119
26
static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) { struct futex_hash_bucket *hb; get_futex_key_refs(&q->key); hb = hash_futex(&q->key); q->lock_ptr = &hb->lock; spin_lock(&hb->lock); return hb; }
CWE-119
26
static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); size_t copied = 0; int err = -EOPNOTSUPP; struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; if (flags & MSG_OOB) goto out; if (addr_len) *addr_len = sizeof(*sin); skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_timestamp(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: return err ? err : copied; }
CWE-20
0
static int mxf_read_primer_pack(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset) { MXFContext *mxf = arg; int item_num = avio_rb32(pb); int item_len = avio_rb32(pb); if (item_len != 18) { avpriv_request_sample(pb, "Primer pack item length %d", item_len); return AVERROR_PATCHWELCOME; } if (item_num > 65536) { av_log(mxf->fc, AV_LOG_ERROR, "item_num %d is too large\n", item_num); return AVERROR_INVALIDDATA; } if (mxf->local_tags) av_log(mxf->fc, AV_LOG_VERBOSE, "Multiple primer packs\n"); av_free(mxf->local_tags); mxf->local_tags_count = 0; mxf->local_tags = av_calloc(item_num, item_len); if (!mxf->local_tags) return AVERROR(ENOMEM); mxf->local_tags_count = item_num; avio_read(pb, mxf->local_tags, item_num*item_len); return 0; }
CWE-20
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame) { AVFilterContext *ctx = inlink->dst; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); uint32_t plane_checksum[4] = {0}, checksum = 0; int i, plane, vsub = desc->log2_chroma_h; for (plane = 0; plane < 4 && frame->data[plane]; plane++) { int64_t linesize = av_image_get_linesize(frame->format, frame->width, plane); uint8_t *data = frame->data[plane]; int h = plane == 1 || plane == 2 ? FF_CEIL_RSHIFT(inlink->h, vsub) : inlink->h; if (linesize < 0) return linesize; for (i = 0; i < h; i++) { plane_checksum[plane] = av_adler32_update(plane_checksum[plane], data, linesize); checksum = av_adler32_update(checksum, data, linesize); data += frame->linesize[plane]; } } av_log(ctx, AV_LOG_INFO, "n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" " "fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c " "checksum:%08X plane_checksum:[%08X", inlink->frame_count, av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base), av_frame_get_pkt_pos(frame), desc->name, frame->sample_aspect_ratio.num, frame->sample_aspect_ratio.den, frame->width, frame->height, !frame->interlaced_frame ? 'P' : /* Progressive */ frame->top_field_first ? 'T' : 'B', /* Top / Bottom */ frame->key_frame, av_get_picture_type_char(frame->pict_type), checksum, plane_checksum[0]); for (plane = 1; plane < 4 && frame->data[plane]; plane++) av_log(ctx, AV_LOG_INFO, " %08X", plane_checksum[plane]); av_log(ctx, AV_LOG_INFO, "]\n"); return ff_filter_frame(inlink->dst->outputs[0], frame); }
CWE-119
26
static gboolean irssi_ssl_verify(SSL *ssl, SSL_CTX *ctx, X509 *cert) { if (SSL_get_verify_result(ssl) != X509_V_OK) { unsigned char md[EVP_MAX_MD_SIZE]; unsigned int n; char *str; g_warning("Could not verify SSL servers certificate:"); if ((str = X509_NAME_oneline(X509_get_subject_name(cert), 0, 0)) == NULL) g_warning(" Could not get subject-name from peer certificate"); else { g_warning(" Subject : %s", str); free(str); } if ((str = X509_NAME_oneline(X509_get_issuer_name(cert), 0, 0)) == NULL) g_warning(" Could not get issuer-name from peer certificate"); else { g_warning(" Issuer : %s", str); free(str); } if (! X509_digest(cert, EVP_md5(), md, &n)) g_warning(" Could not get fingerprint from peer certificate"); else { char hex[] = "0123456789ABCDEF"; char fp[EVP_MAX_MD_SIZE*3]; if (n < sizeof(fp)) { unsigned int i; for (i = 0; i < n; i++) { fp[i*3+0] = hex[(md[i] >> 4) & 0xF]; fp[i*3+1] = hex[(md[i] >> 0) & 0xF]; fp[i*3+2] = i == n - 1 ? '\0' : ':'; } g_warning(" MD5 Fingerprint : %s", fp); } } return FALSE; } return TRUE; }
CWE-20
0
horizontalDifference16(unsigned short *ip, int n, int stride, unsigned short *wp, uint16 *From14) { register int r1, g1, b1, a1, r2, g2, b2, a2, mask; /* assumption is unsigned pixel values */ #undef CLAMP #define CLAMP(v) From14[(v) >> 2] mask = CODE_MASK; if (n >= stride) { if (stride == 3) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); n -= 3; while (n > 0) { n -= 3; wp += 3; ip += 3; r1 = CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1; } } else if (stride == 4) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); a2 = wp[3] = CLAMP(ip[3]); n -= 4; while (n > 0) { n -= 4; wp += 4; ip += 4; r1 = CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1; a1 = CLAMP(ip[3]); wp[3] = (uint16)((a1-a2) & mask); a2 = a1; } } else { ip += n - 1; /* point to last one */ wp += n - 1; /* point to last one */ n -= stride; while (n > 0) { REPEAT(stride, wp[0] = CLAMP(ip[0]); wp[stride] -= wp[0]; wp[stride] &= mask; wp--; ip--) n -= stride; } REPEAT(stride, wp[0] = CLAMP(ip[0]); wp--; ip--) } } }
CWE-119
26
int read_file(struct sc_card *card, char *str_path, unsigned char **data, size_t *data_len) { struct sc_path path; struct sc_file *file; unsigned char *p; int ok = 0; int r; size_t len; sc_format_path(str_path, &path); if (SC_SUCCESS != sc_select_file(card, &path, &file)) { goto err; } len = file ? file->size : 4096; p = realloc(*data, len); if (!p) { goto err; } *data = p; *data_len = len; r = sc_read_binary(card, 0, p, len, 0); if (r < 0) goto err; *data_len = r; ok = 1; err: sc_file_free(file); return ok; }
CWE-119
26
static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes) { if (likely(i->nr_segs == 1)) { i->iov_offset += bytes; } else { const struct iovec *iov = i->iov; size_t base = i->iov_offset; while (bytes) { int copy = min(bytes, iov->iov_len - base); bytes -= copy; base += copy; if (iov->iov_len == base) { iov++; base = 0; } } i->iov = iov; i->iov_offset = base; } }
CWE-20
0
static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct sco_pinfo *pi = sco_pi(sk); lock_sock(sk); if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { sco_conn_defer_accept(pi->conn->hcon, pi->setting); sk->sk_state = BT_CONFIG; msg->msg_namelen = 0; release_sock(sk); return 0; } release_sock(sk); return bt_sock_recvmsg(iocb, sock, msg, len, flags); }
CWE-20
0
static int __perf_event_overflow(struct perf_event *event, int nmi, int throttle, struct perf_sample_data *data, struct pt_regs *regs) { int events = atomic_read(&event->event_limit); struct hw_perf_event *hwc = &event->hw; int ret = 0; /* * Non-sampling counters might still use the PMI to fold short * hardware counters, ignore those. */ if (unlikely(!is_sampling_event(event))) return 0; if (unlikely(hwc->interrupts >= max_samples_per_tick)) { if (throttle) { hwc->interrupts = MAX_INTERRUPTS; perf_log_throttle(event, 0); ret = 1; } } else hwc->interrupts++; if (event->attr.freq) { u64 now = perf_clock(); s64 delta = now - hwc->freq_time_stamp; hwc->freq_time_stamp = now; if (delta > 0 && delta < 2*TICK_NSEC) perf_adjust_period(event, delta, hwc->last_period); } /* * XXX event_limit might not quite work as expected on inherited * events */ event->pending_kill = POLL_IN; if (events && atomic_dec_and_test(&event->event_limit)) { ret = 1; event->pending_kill = POLL_HUP; if (nmi) { event->pending_disable = 1; irq_work_queue(&event->pending); } else perf_event_disable(event); } if (event->overflow_handler) event->overflow_handler(event, nmi, data, regs); else perf_event_output(event, nmi, data, regs); if (event->fasync && event->pending_kill) { if (nmi) { event->pending_wakeup = 1; irq_work_queue(&event->pending); } else perf_event_wakeup(event); } return ret; }
CWE-400
2
static int trusted_update(struct key *key, struct key_preparsed_payload *prep) { struct trusted_key_payload *p = key->payload.data[0]; struct trusted_key_payload *new_p; struct trusted_key_options *new_o; size_t datalen = prep->datalen; char *datablob; int ret = 0; if (!p->migratable) return -EPERM; if (datalen <= 0 || datalen > 32767 || !prep->data) return -EINVAL; datablob = kmalloc(datalen + 1, GFP_KERNEL); if (!datablob) return -ENOMEM; new_o = trusted_options_alloc(); if (!new_o) { ret = -ENOMEM; goto out; } new_p = trusted_payload_alloc(key); if (!new_p) { ret = -ENOMEM; goto out; } memcpy(datablob, prep->data, datalen); datablob[datalen] = '\0'; ret = datablob_parse(datablob, new_p, new_o); if (ret != Opt_update) { ret = -EINVAL; kfree(new_p); goto out; } if (!new_o->keyhandle) { ret = -EINVAL; kfree(new_p); goto out; } /* copy old key values, and reseal with new pcrs */ new_p->migratable = p->migratable; new_p->key_len = p->key_len; memcpy(new_p->key, p->key, p->key_len); dump_payload(p); dump_payload(new_p); ret = key_seal(new_p, new_o); if (ret < 0) { pr_info("trusted_key: key_seal failed (%d)\n", ret); kfree(new_p); goto out; } if (new_o->pcrlock) { ret = pcrlock(new_o->pcrlock); if (ret < 0) { pr_info("trusted_key: pcrlock failed (%d)\n", ret); kfree(new_p); goto out; } } rcu_assign_keypointer(key, new_p); call_rcu(&p->rcu, trusted_rcu_free); out: kfree(datablob); kfree(new_o); return ret; }
CWE-269
6
ParseNameValue(const char * buffer, int bufsize, struct NameValueParserData * data) { struct xmlparser parser; data->l_head = NULL; data->portListing = NULL; data->portListingLength = 0; /* init xmlparser object */ parser.xmlstart = buffer; parser.xmlsize = bufsize; parser.data = data; parser.starteltfunc = NameValueParserStartElt; parser.endeltfunc = NameValueParserEndElt; parser.datafunc = NameValueParserGetData; parser.attfunc = 0; parsexml(&parser); }
CWE-119
26
int my_redel(const char *org_name, const char *tmp_name, myf MyFlags) { int error=1; DBUG_ENTER("my_redel"); DBUG_PRINT("my",("org_name: '%s' tmp_name: '%s' MyFlags: %d", org_name,tmp_name,MyFlags)); if (my_copystat(org_name,tmp_name,MyFlags) < 0) goto end; if (MyFlags & MY_REDEL_MAKE_BACKUP) { char name_buff[FN_REFLEN+20]; char ext[20]; ext[0]='-'; get_date(ext+1,2+4,(time_t) 0); strmov(strend(ext),REDEL_EXT); if (my_rename(org_name, fn_format(name_buff, org_name, "", ext, 2), MyFlags)) goto end; } else if (my_delete_allow_opened(org_name, MyFlags)) goto end; if (my_rename(tmp_name,org_name,MyFlags)) goto end; error=0; end: DBUG_RETURN(error); } /* my_redel */
CWE-362
18
int get_evtchn_to_irq(evtchn_port_t evtchn) { if (evtchn >= xen_evtchn_max_channels()) return -1; if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL) return -1; return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]; }
CWE-362
18
decrypt_response(struct sc_card *card, unsigned char *in, size_t inlen, unsigned char *out, size_t * out_len) { size_t cipher_len; size_t i; unsigned char iv[16] = { 0 }; unsigned char plaintext[4096] = { 0 }; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; /* no cipher */ if (in[0] == 0x99) return 0; /* parse cipher length */ if (0x01 == in[2] && 0x82 != in[1]) { cipher_len = in[1]; i = 3; } else if (0x01 == in[3] && 0x81 == in[1]) { cipher_len = in[2]; i = 4; } else if (0x01 == in[4] && 0x82 == in[1]) { cipher_len = in[2] * 0x100; cipher_len += in[3]; i = 5; } else { return -1; } if (cipher_len < 2 || i+cipher_len > inlen || cipher_len > sizeof plaintext) return -1; /* decrypt */ if (KEY_TYPE_AES == exdata->smtype) aes128_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); else des3_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); /* unpadding */ while (0x80 != plaintext[cipher_len - 2] && (cipher_len - 2 > 0)) cipher_len--; if (2 == cipher_len) return -1; memcpy(out, plaintext, cipher_len - 2); *out_len = cipher_len - 2; return 0; }
CWE-119
26
aiff_read_chanmap (SF_PRIVATE * psf, unsigned dword) { const AIFF_CAF_CHANNEL_MAP * map_info ; unsigned channel_bitmap, channel_decriptions, bytesread ; int layout_tag ; bytesread = psf_binheader_readf (psf, "444", &layout_tag, &channel_bitmap, &channel_decriptions) ; if ((map_info = aiff_caf_of_channel_layout_tag (layout_tag)) == NULL) return 0 ; psf_log_printf (psf, " Tag : %x\n", layout_tag) ; if (map_info) psf_log_printf (psf, " Layout : %s\n", map_info->name) ; if (bytesread < dword) psf_binheader_readf (psf, "j", dword - bytesread) ; if (map_info->channel_map != NULL) { size_t chanmap_size = psf->sf.channels * sizeof (psf->channel_map [0]) ; free (psf->channel_map) ; if ((psf->channel_map = malloc (chanmap_size)) == NULL) return SFE_MALLOC_FAILED ; memcpy (psf->channel_map, map_info->channel_map, chanmap_size) ; } ; return 0 ; } /* aiff_read_chanmap */
CWE-119
26
static int read_private_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; const sc_acl_entry_t *e; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I0012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select private key file: %s\n", sc_strerror(r)); return 2; } e = sc_file_get_acl_entry(file, SC_AC_OP_READ); if (e == NULL || e->method == SC_AC_NEVER) return 10; bufsize = file->size; sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read private key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_private_key(p, keysize, rsa); }
CWE-119
26
error_t httpClientSetUri(HttpClientContext *context, const char_t *uri) { size_t m; size_t n; char_t *p; char_t *q; //Check parameters if(context == NULL || uri == NULL) return ERROR_INVALID_PARAMETER; //The resource name must not be empty if(uri[0] == '\0') return ERROR_INVALID_PARAMETER; //Check HTTP request state if(context->requestState != HTTP_REQ_STATE_FORMAT_HEADER) return ERROR_WRONG_STATE; //Make sure the buffer contains a valid HTTP request if(context->bufferLen > HTTP_CLIENT_BUFFER_SIZE) return ERROR_INVALID_SYNTAX; //Properly terminate the string with a NULL character context->buffer[context->bufferLen] = '\0'; //The Request-Line begins with a method token p = strchr(context->buffer, ' '); //Any parsing error? if(p == NULL) return ERROR_INVALID_SYNTAX; //The method token is followed by the Request-URI p++; //Point to the end of the Request-URI q = strpbrk(p, " ?"); //Any parsing error? if(q == NULL) return ERROR_INVALID_SYNTAX; //Compute the length of the current URI m = q - p; //Compute the length of the new URI n = osStrlen(uri); //Make sure the buffer is large enough to hold the new resource name if((context->bufferLen + n - m) > HTTP_CLIENT_BUFFER_SIZE) return ERROR_BUFFER_OVERFLOW; //Make room for the new resource name osMemmove(p + n, q, context->buffer + context->bufferLen + 1 - q); //Copy the new resource name osStrncpy(p, uri, n); //Adjust the length of the request header context->bufferLen = context->bufferLen + n - m; //Successful processing return NO_ERROR; }
CWE-20
0
static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; int copied, err; BT_DBG("sock %p, sk %p", sock, sk); if (flags & (MSG_OOB)) return -EOPNOTSUPP; if (sk->sk_state == BT_CLOSED) return 0; skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) return err; msg->msg_namelen = 0; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); switch (hci_pi(sk)->channel) { case HCI_CHANNEL_RAW: hci_sock_cmsg(sk, msg, skb); break; case HCI_CHANNEL_USER: case HCI_CHANNEL_CONTROL: case HCI_CHANNEL_MONITOR: sock_recv_timestamp(msg, sk, skb); break; } skb_free_datagram(sk, skb); return err ? : copied; }
CWE-20
0
horizontalDifferenceF(float *ip, int n, int stride, uint16 *wp, uint16 *FromLT2) { int32 r1, g1, b1, a1, r2, g2, b2, a2, mask; float fltsize = Fltsize; #define CLAMP(v) ( (v<(float)0.) ? 0 \ : (v<(float)2.) ? FromLT2[(int)(v*fltsize)] \ : (v>(float)24.2) ? 2047 \ : LogK1*log(v*LogK2) + 0.5 ) mask = CODE_MASK; if (n >= stride) { if (stride == 3) { r2 = wp[0] = (uint16) CLAMP(ip[0]); g2 = wp[1] = (uint16) CLAMP(ip[1]); b2 = wp[2] = (uint16) CLAMP(ip[2]); n -= 3; while (n > 0) { n -= 3; wp += 3; ip += 3; r1 = (int32) CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1; g1 = (int32) CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1; b1 = (int32) CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1; } } else if (stride == 4) { r2 = wp[0] = (uint16) CLAMP(ip[0]); g2 = wp[1] = (uint16) CLAMP(ip[1]); b2 = wp[2] = (uint16) CLAMP(ip[2]); a2 = wp[3] = (uint16) CLAMP(ip[3]); n -= 4; while (n > 0) { n -= 4; wp += 4; ip += 4; r1 = (int32) CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1; g1 = (int32) CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1; b1 = (int32) CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1; a1 = (int32) CLAMP(ip[3]); wp[3] = (uint16)((a1-a2) & mask); a2 = a1; } } else { ip += n - 1; /* point to last one */ wp += n - 1; /* point to last one */ n -= stride; while (n > 0) { REPEAT(stride, wp[0] = (uint16) CLAMP(ip[0]); wp[stride] -= wp[0]; wp[stride] &= mask; wp--; ip--) n -= stride; } REPEAT(stride, wp[0] = (uint16) CLAMP(ip[0]); wp--; ip--) } } }
CWE-119
26
static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq) { struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; struct request *first_rq = list_first_entry(pending, struct request, flush.list); struct request *flush_rq = fq->flush_rq; /* C1 described at the top of this file */ if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending)) return false; /* C2 and C3 */ if (!list_empty(&fq->flush_data_in_flight) && time_before(jiffies, fq->flush_pending_since + FLUSH_PENDING_TIMEOUT)) return false; /* * Issue flush and toggle pending_idx. This makes pending_idx * different from running_idx, which means flush is in flight. */ fq->flush_pending_idx ^= 1; blk_rq_init(q, flush_rq); /* * Borrow tag from the first request since they can't * be in flight at the same time. */ if (q->mq_ops) { flush_rq->mq_ctx = first_rq->mq_ctx; flush_rq->tag = first_rq->tag; } flush_rq->cmd_type = REQ_TYPE_FS; flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; flush_rq->rq_disk = first_rq->rq_disk; flush_rq->end_io = flush_end_io; return blk_flush_queue_rq(flush_rq, false); }
CWE-362
18
static void ptrace_triggered(struct perf_event *bp, int nmi, struct perf_sample_data *data, struct pt_regs *regs) { int i; struct thread_struct *thread = &(current->thread); /* * Store in the virtual DR6 register the fact that the breakpoint * was hit so the thread's debugger will see it. */ for (i = 0; i < HBP_NUM; i++) { if (thread->ptrace_bps[i] == bp) break; } thread->debugreg6 |= (DR_TRAP0 << i); }
CWE-400
2
sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo) { Sg_request *srp; int val; unsigned int ms; val = 0; list_for_each_entry(srp, &sfp->rq_list, entry) { if (val > SG_MAX_QUEUE) break; memset(&rinfo[val], 0, SZ_SG_REQ_INFO); rinfo[val].req_state = srp->done + 1; rinfo[val].problem = srp->header.masked_status & srp->header.host_status & srp->header.driver_status; if (srp->done) rinfo[val].duration = srp->header.duration; else { ms = jiffies_to_msecs(jiffies); rinfo[val].duration = (ms > srp->header.duration) ? (ms - srp->header.duration) : 0; } rinfo[val].orphan = srp->orphan; rinfo[val].sg_io_owned = srp->sg_io_owned; rinfo[val].pack_id = srp->header.pack_id; rinfo[val].usr_ptr = srp->header.usr_ptr; val++; } }
CWE-200
10
int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct atm_vcc *vcc; struct sk_buff *skb; int copied, error = -EINVAL; msg->msg_namelen = 0; if (sock->state != SS_CONNECTED) return -ENOTCONN; /* only handle MSG_DONTWAIT and MSG_PEEK */ if (flags & ~(MSG_DONTWAIT | MSG_PEEK)) return -EOPNOTSUPP; vcc = ATM_SD(sock); if (test_bit(ATM_VF_RELEASED, &vcc->flags) || test_bit(ATM_VF_CLOSE, &vcc->flags) || !test_bit(ATM_VF_READY, &vcc->flags)) return 0; skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &error); if (!skb) return error; copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (error) return error; sock_recv_ts_and_drops(msg, sk, skb); if (!(flags & MSG_PEEK)) { pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize); atm_return(vcc, skb->truesize); } skb_free_datagram(sk, skb); return copied; }
CWE-20
0
static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining, u64 expires) { struct cfs_rq *cfs_rq; u64 runtime; u64 starting_runtime = remaining; rcu_read_lock(); list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, throttled_list) { struct rq *rq = rq_of(cfs_rq); struct rq_flags rf; rq_lock_irqsave(rq, &rf); if (!cfs_rq_throttled(cfs_rq)) goto next; runtime = -cfs_rq->runtime_remaining + 1; if (runtime > remaining) runtime = remaining; remaining -= runtime; cfs_rq->runtime_remaining += runtime; cfs_rq->runtime_expires = expires; /* we check whether we're throttled above */ if (cfs_rq->runtime_remaining > 0) unthrottle_cfs_rq(cfs_rq); next: rq_unlock_irqrestore(rq, &rf); if (!remaining) break; } rcu_read_unlock(); return starting_runtime - remaining; }
CWE-400
2
static int inet_sk_reselect_saddr(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); __be32 old_saddr = inet->inet_saddr; __be32 daddr = inet->inet_daddr; struct flowi4 fl4; struct rtable *rt; __be32 new_saddr; if (inet->opt && inet->opt->srr) daddr = inet->opt->faddr; /* Query new route. */ rt = ip_route_connect(&fl4, daddr, 0, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, sk->sk_protocol, inet->inet_sport, inet->inet_dport, sk, false); if (IS_ERR(rt)) return PTR_ERR(rt); sk_setup_caps(sk, &rt->dst); new_saddr = rt->rt_src; if (new_saddr == old_saddr) return 0; if (sysctl_ip_dynaddr > 1) { printk(KERN_INFO "%s(): shifting inet->saddr from %pI4 to %pI4\n", __func__, &old_saddr, &new_saddr); } inet->inet_saddr = inet->inet_rcv_saddr = new_saddr; /* * XXX The only one ugly spot where we need to * XXX really change the sockets identity after * XXX it has entered the hashes. -DaveM * * Besides that, it does not check for connection * uniqueness. Wait for troubles. */ __sk_prot_rehash(sk); return 0; }
CWE-362
18
static void perf_event_output(struct perf_event *event, int nmi, struct perf_sample_data *data, struct pt_regs *regs) { struct perf_output_handle handle; struct perf_event_header header; /* protect the callchain buffers */ rcu_read_lock(); perf_prepare_sample(&header, data, event, regs); if (perf_output_begin(&handle, event, header.size, nmi, 1)) goto exit; perf_output_sample(&handle, &header, data, event); perf_output_end(&handle); exit: rcu_read_unlock(); }
CWE-400
2
u32 cdk_pk_get_keyid(cdk_pubkey_t pk, u32 * keyid) { u32 lowbits = 0; byte buf[24]; if (pk && (!pk->keyid[0] || !pk->keyid[1])) { if (pk->version < 4 && is_RSA(pk->pubkey_algo)) { byte p[MAX_MPI_BYTES]; size_t n; n = MAX_MPI_BYTES; _gnutls_mpi_print(pk->mpi[0], p, &n); pk->keyid[0] = p[n - 8] << 24 | p[n - 7] << 16 | p[n - 6] << 8 | p[n - 5]; pk->keyid[1] = p[n - 4] << 24 | p[n - 3] << 16 | p[n - 2] << 8 | p[n - 1]; } else if (pk->version == 4) { cdk_pk_get_fingerprint(pk, buf); pk->keyid[0] = _cdk_buftou32(buf + 12); pk->keyid[1] = _cdk_buftou32(buf + 16); } } lowbits = pk ? pk->keyid[1] : 0; if (keyid && pk) { keyid[0] = pk->keyid[0]; keyid[1] = pk->keyid[1]; } return lowbits; }
CWE-119
26
decrypt_response(struct sc_card *card, unsigned char *in, size_t inlen, unsigned char *out, size_t * out_len) { size_t cipher_len; size_t i; unsigned char iv[16] = { 0 }; unsigned char plaintext[4096] = { 0 }; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; /* no cipher */ if (in[0] == 0x99) return 0; /* parse cipher length */ if (0x01 == in[2] && 0x82 != in[1]) { cipher_len = in[1]; i = 3; } else if (0x01 == in[3] && 0x81 == in[1]) { cipher_len = in[2]; i = 4; } else if (0x01 == in[4] && 0x82 == in[1]) { cipher_len = in[2] * 0x100; cipher_len += in[3]; i = 5; } else { return -1; } if (cipher_len < 2 || i+cipher_len > inlen || cipher_len > sizeof plaintext) return -1; /* decrypt */ if (KEY_TYPE_AES == exdata->smtype) aes128_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); else des3_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); /* unpadding */ while (0x80 != plaintext[cipher_len - 2] && (cipher_len - 2 > 0)) cipher_len--; if (2 == cipher_len) return -1; memcpy(out, plaintext, cipher_len - 2); *out_len = cipher_len - 2; return 0; }
CWE-119
26
static int check_submodule_url(const char *url) { const char *curl_url; if (looks_like_command_line_option(url)) return -1; if (submodule_url_is_relative(url)) { /* * This could be appended to an http URL and url-decoded; * check for malicious characters. */ char *decoded = url_decode(url); int has_nl = !!strchr(decoded, '\n'); free(decoded); if (has_nl) return -1; } else if (url_to_curl_url(url, &curl_url)) { struct credential c = CREDENTIAL_INIT; int ret = credential_from_url_gently(&c, curl_url, 1); credential_clear(&c); return ret; } return 0; }
CWE-522
19
apr_byte_t oidc_cache_set(request_rec *r, const char *section, const char *key, const char *value, apr_time_t expiry) { oidc_cfg *cfg = ap_get_module_config(r->server->module_config, &auth_openidc_module); int encrypted = oidc_cfg_cache_encrypt(r); char *encoded = NULL; apr_byte_t rc = FALSE; char *msg = NULL; oidc_debug(r, "enter: %s (section=%s, len=%d, encrypt=%d, ttl(s)=%" APR_TIME_T_FMT ", type=%s)", key, section, value ? (int )strlen(value) : 0, encrypted, apr_time_sec(expiry - apr_time_now()), cfg->cache->name); /* see if we need to encrypt */ if (encrypted == 1) { key = oidc_cache_get_hashed_key(r, cfg->crypto_passphrase, key); if (key == NULL) goto out; if (value != NULL) { if (oidc_cache_crypto_encrypt(r, value, oidc_cache_hash_passphrase(r, cfg->crypto_passphrase), &encoded) <= 0) goto out; value = encoded; } } /* store the resulting value in the cache */ rc = cfg->cache->set(r, section, key, value, expiry); out: /* log the result */ msg = apr_psprintf(r->pool, "%d bytes in %s cache backend for %skey %s", (value ? (int) strlen(value) : 0), (cfg->cache->name ? cfg->cache->name : ""), (encrypted ? "encrypted " : ""), (key ? key : "")); if (rc == TRUE) oidc_debug(r, "successfully stored %s", msg); else oidc_warn(r, "could NOT store %s", msg); return rc; }
CWE-330
12
static void scsi_dma_restart_bh(void *opaque) { SCSIDiskState *s = opaque; SCSIRequest *req; SCSIDiskReq *r; qemu_bh_delete(s->bh); s->bh = NULL; QTAILQ_FOREACH(req, &s->qdev.requests, next) { r = DO_UPCAST(SCSIDiskReq, req, req); if (r->status & SCSI_REQ_STATUS_RETRY) { int status = r->status; int ret; r->status &= ~(SCSI_REQ_STATUS_RETRY | SCSI_REQ_STATUS_RETRY_TYPE_MASK); switch (status & SCSI_REQ_STATUS_RETRY_TYPE_MASK) { case SCSI_REQ_STATUS_RETRY_READ: scsi_read_data(&r->req); break; case SCSI_REQ_STATUS_RETRY_WRITE: scsi_write_data(&r->req); break; case SCSI_REQ_STATUS_RETRY_FLUSH: ret = scsi_disk_emulate_command(r, r->iov.iov_base); if (ret == 0) { scsi_req_complete(&r->req, GOOD); } } } } }
CWE-119
26
int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, unsigned long opt), unsigned long opt, u32 timeout, u8 *hci_status) { int ret; if (!test_bit(HCI_UP, &hdev->flags)) return -ENETDOWN; /* Serialize all requests */ hci_req_sync_lock(hdev); ret = __hci_req_sync(hdev, req, opt, timeout, hci_status); hci_req_sync_unlock(hdev); return ret; }
CWE-362
18
static int read_private_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; const sc_acl_entry_t *e; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I0012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select private key file: %s\n", sc_strerror(r)); return 2; } e = sc_file_get_acl_entry(file, SC_AC_OP_READ); if (e == NULL || e->method == SC_AC_NEVER) return 10; bufsize = file->size; sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read private key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_private_key(p, keysize, rsa); }
CWE-119
26
bool_t enc624j600IrqHandler(NetInterface *interface) { bool_t flag; uint16_t status; //This flag will be set if a higher priority task must be woken flag = FALSE; //Clear the INTIE bit, immediately after an interrupt event enc624j600ClearBit(interface, ENC624J600_REG_EIE, EIE_INTIE); //Read interrupt status register status = enc624j600ReadReg(interface, ENC624J600_REG_EIR); //Link status change? if((status & EIR_LINKIF) != 0) { //Disable LINKIE interrupt enc624j600ClearBit(interface, ENC624J600_REG_EIE, EIE_LINKIE); //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Packet received? if((status & EIR_PKTIF) != 0) { //Disable PKTIE interrupt enc624j600ClearBit(interface, ENC624J600_REG_EIE, EIE_PKTIE); //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Packet transmission complete? if((status & (EIR_TXIF | EIR_TXABTIF)) != 0) { //Clear interrupt flags enc624j600ClearBit(interface, ENC624J600_REG_EIR, EIR_TXIF | EIR_TXABTIF); //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&interface->nicTxEvent); } //Once the interrupt has been serviced, the INTIE bit //is set again to re-enable interrupts enc624j600SetBit(interface, ENC624J600_REG_EIE, EIE_INTIE); //A higher priority task must be woken? return flag; }
CWE-20
0
add_link_ref( struct link_ref **references, const uint8_t *name, size_t name_size) { struct link_ref *ref = calloc(1, sizeof(struct link_ref)); if (!ref) return NULL; ref->id = hash_link_ref(name, name_size); ref->next = references[ref->id % REF_TABLE_SIZE]; references[ref->id % REF_TABLE_SIZE] = ref; return ref; }
CWE-327
3
static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; int copied, err; BT_DBG("sock %p, sk %p", sock, sk); if (flags & (MSG_OOB)) return -EOPNOTSUPP; if (sk->sk_state == BT_CLOSED) return 0; skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) return err; msg->msg_namelen = 0; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); switch (hci_pi(sk)->channel) { case HCI_CHANNEL_RAW: hci_sock_cmsg(sk, msg, skb); break; case HCI_CHANNEL_USER: case HCI_CHANNEL_CONTROL: case HCI_CHANNEL_MONITOR: sock_recv_timestamp(msg, sk, skb); break; } skb_free_datagram(sk, skb); return err ? : copied; }
CWE-20
0
int prepare_binprm(struct linux_binprm *bprm) { struct inode *inode = file_inode(bprm->file); umode_t mode = inode->i_mode; int retval; /* clear any previous set[ug]id data from a previous binary */ bprm->cred->euid = current_euid(); bprm->cred->egid = current_egid(); if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) && !task_no_new_privs(current) && kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) && kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) { /* Set-uid? */ if (mode & S_ISUID) { bprm->per_clear |= PER_CLEAR_ON_SETID; bprm->cred->euid = inode->i_uid; } /* Set-gid? */ /* * If setgid is set but no group execute bit then this * is a candidate for mandatory locking, not a setgid * executable. */ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) { bprm->per_clear |= PER_CLEAR_ON_SETID; bprm->cred->egid = inode->i_gid; } } /* fill in binprm security blob */ retval = security_bprm_set_creds(bprm); if (retval) return retval; bprm->cred_prepared = 1; memset(bprm->buf, 0, BINPRM_BUF_SIZE); return kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE); }
CWE-362
18
ParseNameValue(const char * buffer, int bufsize, struct NameValueParserData * data) { struct xmlparser parser; data->l_head = NULL; data->portListing = NULL; data->portListingLength = 0; /* init xmlparser object */ parser.xmlstart = buffer; parser.xmlsize = bufsize; parser.data = data; parser.starteltfunc = NameValueParserStartElt; parser.endeltfunc = NameValueParserEndElt; parser.datafunc = NameValueParserGetData; parser.attfunc = 0; parsexml(&parser); }
CWE-119
26
static int vmci_transport_dgram_dequeue(struct kiocb *kiocb, struct vsock_sock *vsk, struct msghdr *msg, size_t len, int flags) { int err; int noblock; struct vmci_datagram *dg; size_t payload_len; struct sk_buff *skb; noblock = flags & MSG_DONTWAIT; if (flags & MSG_OOB || flags & MSG_ERRQUEUE) return -EOPNOTSUPP; msg->msg_namelen = 0; /* Retrieve the head sk_buff from the socket's receive queue. */ err = 0; skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); if (err) return err; if (!skb) return -EAGAIN; dg = (struct vmci_datagram *)skb->data; if (!dg) /* err is 0, meaning we read zero bytes. */ goto out; payload_len = dg->payload_size; /* Ensure the sk_buff matches the payload size claimed in the packet. */ if (payload_len != skb->len - sizeof(*dg)) { err = -EINVAL; goto out; } if (payload_len > len) { payload_len = len; msg->msg_flags |= MSG_TRUNC; } /* Place the datagram payload in the user's iovec. */ err = skb_copy_datagram_iovec(skb, sizeof(*dg), msg->msg_iov, payload_len); if (err) goto out; if (msg->msg_name) { struct sockaddr_vm *vm_addr; /* Provide the address of the sender. */ vm_addr = (struct sockaddr_vm *)msg->msg_name; vsock_addr_init(vm_addr, dg->src.context, dg->src.resource); msg->msg_namelen = sizeof(*vm_addr); } err = payload_len; out: skb_free_datagram(&vsk->sk, skb); return err; }
CWE-20
0
static int do_cmd (xd3_stream *stream, const char *buf) { int ret; if ((ret = system (buf)) != 0) { if (WIFEXITED (ret)) { stream->msg = "command exited non-zero"; IF_DEBUG1 (XPR(NT "command was: %s\n", buf)); } else { stream->msg = "abnormal command termination"; } return XD3_INTERNAL; } return 0; }
CWE-119
26
perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) { struct pt_regs hot_regs; if (static_branch(&perf_swevent_enabled[event_id])) { if (!regs) { perf_fetch_caller_regs(&hot_regs); regs = &hot_regs; } __perf_sw_event(event_id, nr, nmi, regs, addr); } }
CWE-400
2
header_seek (SF_PRIVATE *psf, sf_count_t position, int whence) { switch (whence) { case SEEK_SET : if (position > SIGNED_SIZEOF (psf->header)) { /* Too much header to cache so just seek instead. */ psf_fseek (psf, position, whence) ; return ; } ; if (position > psf->headend) psf->headend += psf_fread (psf->header + psf->headend, 1, position - psf->headend, psf) ; psf->headindex = position ; break ; case SEEK_CUR : if (psf->headindex + position < 0) break ; if (psf->headindex >= SIGNED_SIZEOF (psf->header)) { psf_fseek (psf, position, whence) ; return ; } ; if (psf->headindex + position <= psf->headend) { psf->headindex += position ; break ; } ; if (psf->headindex + position > SIGNED_SIZEOF (psf->header)) { /* Need to jump this without caching it. */ psf->headindex = psf->headend ; psf_fseek (psf, position, SEEK_CUR) ; break ; } ; psf->headend += psf_fread (psf->header + psf->headend, 1, position - (psf->headend - psf->headindex), psf) ; psf->headindex = psf->headend ; break ; case SEEK_END : default : psf_log_printf (psf, "Bad whence param in header_seek().\n") ; break ; } ; return ; } /* header_seek */
CWE-119
26
PGTYPESdate_from_asc(char *str, char **endptr) { date dDate; fsec_t fsec; struct tm tt, *tm = &tt; int dtype; int nf; char *field[MAXDATEFIELDS]; int ftype[MAXDATEFIELDS]; char lowstr[MAXDATELEN + 1]; char *realptr; char **ptr = (endptr != NULL) ? endptr : &realptr; bool EuroDates = FALSE; errno = 0; if (strlen(str) >= sizeof(lowstr)) { errno = PGTYPES_DATE_BAD_DATE; return INT_MIN; } if (ParseDateTime(str, lowstr, field, ftype, &nf, ptr) != 0 || DecodeDateTime(field, ftype, nf, &dtype, tm, &fsec, EuroDates) != 0) { errno = PGTYPES_DATE_BAD_DATE; return INT_MIN; } switch (dtype) { case DTK_DATE: break; case DTK_EPOCH: if (GetEpochTime(tm) < 0) { errno = PGTYPES_DATE_BAD_DATE; return INT_MIN; } break; default: errno = PGTYPES_DATE_BAD_DATE; return INT_MIN; } dDate = (date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) - date2j(2000, 1, 1)); return dDate; }
CWE-119
26
mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sk_buff *skb; struct sock *sk = sock->sk; struct sockaddr_mISDN *maddr; int copied, err; if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s: len %d, flags %x ch.nr %d, proto %x\n", __func__, (int)len, flags, _pms(sk)->ch.nr, sk->sk_protocol); if (flags & (MSG_OOB)) return -EOPNOTSUPP; if (sk->sk_state == MISDN_CLOSED) return 0; skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); if (!skb) return err; if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) { msg->msg_namelen = sizeof(struct sockaddr_mISDN); maddr = (struct sockaddr_mISDN *)msg->msg_name; maddr->family = AF_ISDN; maddr->dev = _pms(sk)->dev->id; if ((sk->sk_protocol == ISDN_P_LAPD_TE) || (sk->sk_protocol == ISDN_P_LAPD_NT)) { maddr->channel = (mISDN_HEAD_ID(skb) >> 16) & 0xff; maddr->tei = (mISDN_HEAD_ID(skb) >> 8) & 0xff; maddr->sapi = mISDN_HEAD_ID(skb) & 0xff; } else { maddr->channel = _pms(sk)->ch.nr; maddr->sapi = _pms(sk)->ch.addr & 0xFF; maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF; } } else { if (msg->msg_namelen) printk(KERN_WARNING "%s: too small namelen %d\n", __func__, msg->msg_namelen); msg->msg_namelen = 0; } copied = skb->len + MISDN_HEADER_LEN; if (len < copied) { if (flags & MSG_PEEK) atomic_dec(&skb->users); else skb_queue_head(&sk->sk_receive_queue, skb); return -ENOSPC; } memcpy(skb_push(skb, MISDN_HEADER_LEN), mISDN_HEAD_P(skb), MISDN_HEADER_LEN); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); mISDN_sock_cmsg(sk, msg, skb); skb_free_datagram(sk, skb); return err ? : copied; }
CWE-20
0
static int sched_read_attr(struct sched_attr __user *uattr, struct sched_attr *attr, unsigned int usize) { int ret; if (!access_ok(VERIFY_WRITE, uattr, usize)) return -EFAULT; /* * If we're handed a smaller struct than we know of, * ensure all the unknown bits are 0 - i.e. old * user-space does not get uncomplete information. */ if (usize < sizeof(*attr)) { unsigned char *addr; unsigned char *end; addr = (void *)attr + usize; end = (void *)attr + sizeof(*attr); for (; addr < end; addr++) { if (*addr) goto err_size; } attr->size = usize; } ret = copy_to_user(uattr, attr, usize); if (ret) return -EFAULT; out: return ret; err_size: ret = -E2BIG; goto out; }
CWE-200
10
int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct atm_vcc *vcc; struct sk_buff *skb; int copied, error = -EINVAL; msg->msg_namelen = 0; if (sock->state != SS_CONNECTED) return -ENOTCONN; /* only handle MSG_DONTWAIT and MSG_PEEK */ if (flags & ~(MSG_DONTWAIT | MSG_PEEK)) return -EOPNOTSUPP; vcc = ATM_SD(sock); if (test_bit(ATM_VF_RELEASED, &vcc->flags) || test_bit(ATM_VF_CLOSE, &vcc->flags) || !test_bit(ATM_VF_READY, &vcc->flags)) return 0; skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &error); if (!skb) return error; copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (error) return error; sock_recv_ts_and_drops(msg, sk, skb); if (!(flags & MSG_PEEK)) { pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize); atm_return(vcc, skb->truesize); } skb_free_datagram(sk, skb); return copied; }
CWE-20
0
static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct ipv6_pinfo *np = inet6_sk(sk); struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)msg->msg_name; struct sk_buff *skb; size_t copied; int err; if (flags & MSG_OOB) return -EOPNOTSUPP; if (addr_len) *addr_len=sizeof(*sin6); if (flags & MSG_ERRQUEUE) return ipv6_recv_error(sk, msg, len); if (np->rxpmtu && np->rxopt.bits.rxpmtu) return ipv6_recv_rxpmtu(sk, msg, len); skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (copied > len) { copied = len; msg->msg_flags |= MSG_TRUNC; } if (skb_csum_unnecessary(skb)) { err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); } else if (msg->msg_flags&MSG_TRUNC) { if (__skb_checksum_complete(skb)) goto csum_copy_err; err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); } else { err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov); if (err == -EINVAL) goto csum_copy_err; } if (err) goto out_free; /* Copy the address. */ if (sin6) { sin6->sin6_family = AF_INET6; sin6->sin6_port = 0; sin6->sin6_addr = ipv6_hdr(skb)->saddr; sin6->sin6_flowinfo = 0; sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, IP6CB(skb)->iif); } sock_recv_ts_and_drops(msg, sk, skb); if (np->rxopt.all) ip6_datagram_recv_ctl(sk, msg, skb); err = copied; if (flags & MSG_TRUNC) err = skb->len; out_free: skb_free_datagram(sk, skb); out: return err; csum_copy_err: skb_kill_datagram(sk, skb, flags); /* Error for blocking case is chosen to masquerade as some normal condition. */ err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH; goto out; }
CWE-20
0
static int __init ip6_tunnel_init(void) { int err; if (xfrm6_tunnel_register(&ip4ip6_handler, AF_INET)) { printk(KERN_ERR "ip6_tunnel init: can't register ip4ip6\n"); err = -EAGAIN; goto out; } if (xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6)) { printk(KERN_ERR "ip6_tunnel init: can't register ip6ip6\n"); err = -EAGAIN; goto unreg_ip4ip6; } err = register_pernet_device(&ip6_tnl_net_ops); if (err < 0) goto err_pernet; return 0; err_pernet: xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6); unreg_ip4ip6: xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET); out: return err; }
CWE-362
18
error_t enc624j600UpdateMacAddrFilter(NetInterface *interface) { uint_t i; uint_t k; uint32_t crc; uint16_t hashTable[4]; MacFilterEntry *entry; //Debug message TRACE_DEBUG("Updating MAC filter...\r\n"); //Clear hash table osMemset(hashTable, 0, sizeof(hashTable)); //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0; i < MAC_ADDR_FILTER_SIZE; i++) { //Point to the current entry entry = &interface->macAddrFilter[i]; //Valid entry? if(entry->refCount > 0) { //Compute CRC over the current MAC address crc = enc624j600CalcCrc(&entry->addr, sizeof(MacAddr)); //Calculate the corresponding index in the table k = (crc >> 23) & 0x3F; //Update hash table contents hashTable[k / 16] |= (1 << (k % 16)); } } //Write the hash table to the ENC624J600 controller enc624j600WriteReg(interface, ENC624J600_REG_EHT1, hashTable[0]); enc624j600WriteReg(interface, ENC624J600_REG_EHT2, hashTable[1]); enc624j600WriteReg(interface, ENC624J600_REG_EHT3, hashTable[2]); enc624j600WriteReg(interface, ENC624J600_REG_EHT4, hashTable[3]); //Debug message TRACE_DEBUG(" EHT1 = %04" PRIX16 "\r\n", enc624j600ReadReg(interface, ENC624J600_REG_EHT1)); TRACE_DEBUG(" EHT2 = %04" PRIX16 "\r\n", enc624j600ReadReg(interface, ENC624J600_REG_EHT2)); TRACE_DEBUG(" EHT3 = %04" PRIX16 "\r\n", enc624j600ReadReg(interface, ENC624J600_REG_EHT3)); TRACE_DEBUG(" EHT4 = %04" PRIX16 "\r\n", enc624j600ReadReg(interface, ENC624J600_REG_EHT4)); //Successful processing return NO_ERROR; }
CWE-20
0
static inline bool unconditional(const struct ipt_ip *ip) { static const struct ipt_ip uncond; return memcmp(ip, &uncond, sizeof(uncond)) == 0; #undef FWINV }
CWE-119
26
void __perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) { struct perf_sample_data data; int rctx; preempt_disable_notrace(); rctx = perf_swevent_get_recursion_context(); if (rctx < 0) return; perf_sample_data_init(&data, addr); do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs); perf_swevent_put_recursion_context(rctx); preempt_enable_notrace(); }
CWE-400
2
static void mpeg4_encode_gop_header(MpegEncContext *s) { int hours, minutes, seconds; int64_t time; put_bits(&s->pb, 16, 0); put_bits(&s->pb, 16, GOP_STARTCODE); time = s->current_picture_ptr->f->pts; if (s->reordered_input_picture[1]) time = FFMIN(time, s->reordered_input_picture[1]->f->pts); time = time * s->avctx->time_base.num; s->last_time_base = FFUDIV(time, s->avctx->time_base.den); seconds = FFUDIV(time, s->avctx->time_base.den); minutes = FFUDIV(seconds, 60); seconds = FFUMOD(seconds, 60); hours = FFUDIV(minutes, 60); minutes = FFUMOD(minutes, 60); hours = FFUMOD(hours , 24); put_bits(&s->pb, 5, hours); put_bits(&s->pb, 6, minutes); put_bits(&s->pb, 1, 1); put_bits(&s->pb, 6, seconds); put_bits(&s->pb, 1, !!(s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)); put_bits(&s->pb, 1, 0); // broken link == NO ff_mpeg4_stuffing(&s->pb); }
CWE-20
0
static void ptrace_hbptriggered(struct perf_event *bp, int unused, struct perf_sample_data *data, struct pt_regs *regs) { struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); long num; int i; siginfo_t info; for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i) if (current->thread.debug.hbp[i] == bp) break; num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i); info.si_signo = SIGTRAP; info.si_errno = (int)num; info.si_code = TRAP_HWBKPT; info.si_addr = (void __user *)(bkpt->trigger); force_sig_info(SIGTRAP, &info, current); }
CWE-400
2
horizontalDifferenceF(float *ip, int n, int stride, uint16 *wp, uint16 *FromLT2) { int32 r1, g1, b1, a1, r2, g2, b2, a2, mask; float fltsize = Fltsize; #define CLAMP(v) ( (v<(float)0.) ? 0 \ : (v<(float)2.) ? FromLT2[(int)(v*fltsize)] \ : (v>(float)24.2) ? 2047 \ : LogK1*log(v*LogK2) + 0.5 ) mask = CODE_MASK; if (n >= stride) { if (stride == 3) { r2 = wp[0] = (uint16) CLAMP(ip[0]); g2 = wp[1] = (uint16) CLAMP(ip[1]); b2 = wp[2] = (uint16) CLAMP(ip[2]); n -= 3; while (n > 0) { n -= 3; wp += 3; ip += 3; r1 = (int32) CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1; g1 = (int32) CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1; b1 = (int32) CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1; } } else if (stride == 4) { r2 = wp[0] = (uint16) CLAMP(ip[0]); g2 = wp[1] = (uint16) CLAMP(ip[1]); b2 = wp[2] = (uint16) CLAMP(ip[2]); a2 = wp[3] = (uint16) CLAMP(ip[3]); n -= 4; while (n > 0) { n -= 4; wp += 4; ip += 4; r1 = (int32) CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1; g1 = (int32) CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1; b1 = (int32) CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1; a1 = (int32) CLAMP(ip[3]); wp[3] = (uint16)((a1-a2) & mask); a2 = a1; } } else { ip += n - 1; /* point to last one */ wp += n - 1; /* point to last one */ n -= stride; while (n > 0) { REPEAT(stride, wp[0] = (uint16) CLAMP(ip[0]); wp[stride] -= wp[0]; wp[stride] &= mask; wp--; ip--) n -= stride; } REPEAT(stride, wp[0] = (uint16) CLAMP(ip[0]); wp--; ip--) } } }
CWE-119
26
static void request_key_auth_describe(const struct key *key, struct seq_file *m) { struct request_key_auth *rka = key->payload.data[0]; seq_puts(m, "key:"); seq_puts(m, key->description); if (key_is_instantiated(key)) seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len); }
CWE-20
0
void unix_notinflight(struct file *fp) { struct sock *s = unix_get_socket(fp); if (s) { struct unix_sock *u = unix_sk(s); spin_lock(&unix_gc_lock); BUG_ON(list_empty(&u->link)); if (atomic_long_dec_and_test(&u->inflight)) list_del_init(&u->link); unix_tot_inflight--; spin_unlock(&unix_gc_lock); } }
CWE-119
26
header_put_be_int (SF_PRIVATE *psf, int x) { if (psf->headindex < SIGNED_SIZEOF (psf->header) - 4) { psf->header [psf->headindex++] = (x >> 24) ; psf->header [psf->headindex++] = (x >> 16) ; psf->header [psf->headindex++] = (x >> 8) ; psf->header [psf->headindex++] = x ; } ; } /* header_put_be_int */
CWE-119
26
int regexec(Reprog *prog, const char *sp, Resub *sub, int eflags) { Resub scratch; int i; if (!sub) sub = &scratch; sub->nsub = prog->nsub; for (i = 0; i < MAXSUB; ++i) sub->sub[i].sp = sub->sub[i].ep = NULL; return !match(prog->start, sp, sp, prog->flags | eflags, sub); }
CWE-674
28
static void show_object(struct object *object, struct strbuf *path, const char *last, void *data) { struct bitmap *base = data; int bitmap_pos; bitmap_pos = bitmap_position(object->oid.hash); if (bitmap_pos < 0) { char *name = path_name(path, last); bitmap_pos = ext_index_add_object(object, name); free(name); } bitmap_set(base, bitmap_pos); }
CWE-119
26
static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock, struct msghdr *msg, size_t ignored, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm( &ctx->req)); struct skcipher_sg_list *sgl; struct scatterlist *sg; unsigned long iovlen; struct iovec *iov; int err = -EAGAIN; int used; long copied = 0; lock_sock(sk); msg->msg_namelen = 0; for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0; iovlen--, iov++) { unsigned long seglen = iov->iov_len; char __user *from = iov->iov_base; while (seglen) { sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list, list); sg = sgl->sg; while (!sg->length) sg++; used = ctx->used; if (!used) { err = skcipher_wait_for_data(sk, flags); if (err) goto unlock; } used = min_t(unsigned long, used, seglen); used = af_alg_make_sg(&ctx->rsgl, from, used, 1); err = used; if (err < 0) goto unlock; if (ctx->more || used < ctx->used) used -= used % bs; err = -EINVAL; if (!used) goto free; ablkcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used, ctx->iv); err = af_alg_wait_for_completion( ctx->enc ? crypto_ablkcipher_encrypt(&ctx->req) : crypto_ablkcipher_decrypt(&ctx->req), &ctx->completion); free: af_alg_free_sg(&ctx->rsgl); if (err) goto unlock; copied += used; from += used; seglen -= used; skcipher_pull_sgl(sk, used); } } err = 0; unlock: skcipher_wmem_wakeup(sk); release_sock(sk); return copied ?: err; }
CWE-20
0
static void scsi_read_data(SCSIRequest *req) { SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint32_t n; if (r->sector_count == (uint32_t)-1) { DPRINTF("Read buf_len=%zd\n", r->iov.iov_len); r->sector_count = 0; scsi_req_data(&r->req, r->iov.iov_len); return; } DPRINTF("Read sector_count=%d\n", r->sector_count); if (r->sector_count == 0) { /* This also clears the sense buffer for REQUEST SENSE. */ scsi_req_complete(&r->req, GOOD); return; } /* No data transfer may already be in progress */ assert(r->req.aiocb == NULL); if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { DPRINTF("Data transfer direction invalid\n"); scsi_read_complete(r, -EINVAL); return; } n = r->sector_count; if (n > SCSI_DMA_BUF_SIZE / 512) n = SCSI_DMA_BUF_SIZE / 512; if (s->tray_open) { scsi_read_complete(r, -ENOMEDIUM); } r->iov.iov_len = n * 512; qemu_iovec_init_external(&r->qiov, &r->iov, 1); bdrv_acct_start(s->bs, &r->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ); r->req.aiocb = bdrv_aio_readv(s->bs, r->sector, &r->qiov, n, scsi_read_complete, r); if (r->req.aiocb == NULL) { scsi_read_complete(r, -EIO); } }
CWE-119
26
static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) { struct user *dummy = NULL; addr_t offset; if (addr < (addr_t) &dummy->regs.acrs) { /* * psw and gprs are stored on the stack */ if (addr == (addr_t) &dummy->regs.psw.mask) { unsigned long mask = PSW_MASK_USER; mask |= is_ri_task(child) ? PSW_MASK_RI : 0; if ((data & ~mask) != PSW_USER_BITS) return -EINVAL; if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)) return -EINVAL; } *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { /* * access registers are stored in the thread structure */ offset = addr - (addr_t) &dummy->regs.acrs; #ifdef CONFIG_64BIT /* * Very special case: old & broken 64 bit gdb writing * to acrs[15] with a 64 bit value. Ignore the lower * half of the value and write the upper 32 bit to * acrs[15]. Sick... */ if (addr == (addr_t) &dummy->regs.acrs[15]) child->thread.acrs[15] = (unsigned int) (data >> 32); else #endif *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { /* * orig_gpr2 is stored on the kernel stack */ task_pt_regs(child)->orig_gpr2 = data; } else if (addr < (addr_t) &dummy->regs.fp_regs) { /* * prevent writes of padding hole between * orig_gpr2 and fp_regs on s390. */ return 0; } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { /* * floating point regs. are stored in the thread structure */ if (addr == (addr_t) &dummy->regs.fp_regs.fpc) if ((unsigned int) data != 0 || test_fp_ctl(data >> (BITS_PER_LONG - 32))) return -EINVAL; offset = addr - (addr_t) &dummy->regs.fp_regs; *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data; } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { /* * Handle access to the per_info structure. */ addr -= (addr_t) &dummy->regs.per_info; __poke_user_per(child, addr, data); } return 0; }
CWE-269
6
uint16_t dm9000ReadPhyReg(uint8_t address) { //Write PHY register address dm9000WriteReg(DM9000_REG_EPAR, 0x40 | address); //Start the read operation dm9000WriteReg(DM9000_REG_EPCR, EPCR_EPOS | EPCR_ERPRR); //PHY access is still in progress? while((dm9000ReadReg(DM9000_REG_EPCR) & EPCR_ERRE) != 0) { } //Clear command register dm9000WriteReg(DM9000_REG_EPCR, EPCR_EPOS); //Wait 5us minimum usleep(5); //Return register value return (dm9000ReadReg(DM9000_REG_EPDRH) << 8) | dm9000ReadReg(DM9000_REG_EPDRL); }
CWE-20
0
static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; int len; if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { rfcomm_dlc_accept(d); msg->msg_namelen = 0; return 0; } len = bt_sock_stream_recvmsg(iocb, sock, msg, size, flags); lock_sock(sk); if (!(flags & MSG_PEEK) && len > 0) atomic_sub(len, &sk->sk_rmem_alloc); if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2)) rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc); release_sock(sk); return len; }
CWE-20
0
GIT_INLINE(bool) verify_dotgit_ntfs(git_repository *repo, const char *path, size_t len) { git_buf *reserved = git_repository__reserved_names_win32; size_t reserved_len = git_repository__reserved_names_win32_len; size_t start = 0, i; if (repo) git_repository__reserved_names(&reserved, &reserved_len, repo, true); for (i = 0; i < reserved_len; i++) { git_buf *r = &reserved[i]; if (len >= r->size && strncasecmp(path, r->ptr, r->size) == 0) { start = r->size; break; } } if (!start) return true; /* Reject paths like ".git\" */ if (path[start] == '\\') return false; /* Reject paths like '.git ' or '.git.' */ for (i = start; i < len; i++) { if (path[i] != ' ' && path[i] != '.') return true; } return false; }
CWE-706
29
static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name; size_t copied; struct sk_buff *skb; int er; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ lock_sock(sk); if (sk->sk_state != TCP_ESTABLISHED) { release_sock(sk); return -ENOTCONN; } /* Now we can treat all alike */ if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) { release_sock(sk); return er; } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (er < 0) { skb_free_datagram(sk, skb); release_sock(sk); return er; } if (sax != NULL) { memset(sax, 0, sizeof(*sax)); sax->sax25_family = AF_NETROM; skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call, AX25_ADDR_LEN); } msg->msg_namelen = sizeof(*sax); skb_free_datagram(sk, skb); release_sock(sk); return copied; }
CWE-20
0
static long vbg_misc_device_ioctl(struct file *filp, unsigned int req, unsigned long arg) { struct vbg_session *session = filp->private_data; size_t returned_size, size; struct vbg_ioctl_hdr hdr; bool is_vmmdev_req; int ret = 0; void *buf; if (copy_from_user(&hdr, (void *)arg, sizeof(hdr))) return -EFAULT; if (hdr.version != VBG_IOCTL_HDR_VERSION) return -EINVAL; if (hdr.size_in < sizeof(hdr) || (hdr.size_out && hdr.size_out < sizeof(hdr))) return -EINVAL; size = max(hdr.size_in, hdr.size_out); if (_IOC_SIZE(req) && _IOC_SIZE(req) != size) return -EINVAL; if (size > SZ_16M) return -E2BIG; /* * IOCTL_VMMDEV_REQUEST needs the buffer to be below 4G to avoid * the need for a bounce-buffer and another copy later on. */ is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) || req == VBG_IOCTL_VMMDEV_REQUEST_BIG; if (is_vmmdev_req) buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT); else buf = kmalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; if (copy_from_user(buf, (void *)arg, hdr.size_in)) { ret = -EFAULT; goto out; } if (hdr.size_in < size) memset(buf + hdr.size_in, 0, size - hdr.size_in); ret = vbg_core_ioctl(session, req, buf); if (ret) goto out; returned_size = ((struct vbg_ioctl_hdr *)buf)->size_out; if (returned_size > size) { vbg_debug("%s: too much output data %zu > %zu\n", __func__, returned_size, size); returned_size = size; } if (copy_to_user((void *)arg, buf, returned_size) != 0) ret = -EFAULT; out: if (is_vmmdev_req) vbg_req_free(buf, size); else kfree(buf); return ret; }
CWE-362
18
asmlinkage void do_ade(struct pt_regs *regs) { unsigned int __user *pc; mm_segment_t seg; perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, regs->cp0_badvaddr); /* * Did we catch a fault trying to load an instruction? * Or are we running in MIPS16 mode? */ if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1)) goto sigbus; pc = (unsigned int __user *) exception_epc(regs); if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) goto sigbus; if (unaligned_action == UNALIGNED_ACTION_SIGNAL) goto sigbus; else if (unaligned_action == UNALIGNED_ACTION_SHOW) show_registers(regs); /* * Do branch emulation only if we didn't forward the exception. * This is all so but ugly ... */ seg = get_fs(); if (!user_mode(regs)) set_fs(KERNEL_DS); emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc); set_fs(seg); return; sigbus: die_if_kernel("Kernel unaligned instruction access", regs); force_sig(SIGBUS, current); /* * XXX On return from the signal handler we should advance the epc */ }
CWE-400
2
void jpc_qmfb_split_colgrp(jpc_fix_t *a, int numrows, int stride, int parity) { int bufsize = JPC_CEILDIVPOW2(numrows, 1); jpc_fix_t splitbuf[QMFB_SPLITBUFSIZE * JPC_QMFB_COLGRPSIZE]; jpc_fix_t *buf = splitbuf; jpc_fix_t *srcptr; jpc_fix_t *dstptr; register jpc_fix_t *srcptr2; register jpc_fix_t *dstptr2; register int n; register int i; int m; int hstartcol; /* Get a buffer. */ if (bufsize > QMFB_SPLITBUFSIZE) { if (!(buf = jas_alloc2(bufsize, sizeof(jpc_fix_t)))) { /* We have no choice but to commit suicide in this case. */ abort(); } } if (numrows >= 2) { hstartcol = (numrows + 1 - parity) >> 1; // ORIGINAL (WRONG): m = (parity) ? hstartcol : (numrows - hstartcol); m = numrows - hstartcol; /* Save the samples destined for the highpass channel. */ n = m; dstptr = buf; srcptr = &a[(1 - parity) * stride]; while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += JPC_QMFB_COLGRPSIZE; srcptr += stride << 1; } /* Copy the appropriate samples into the lowpass channel. */ dstptr = &a[(1 - parity) * stride]; srcptr = &a[(2 - parity) * stride]; n = numrows - m - (!parity); while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += stride; srcptr += stride << 1; } /* Copy the saved samples into the highpass channel. */ dstptr = &a[hstartcol * stride]; srcptr = buf; n = m; while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += stride; srcptr += JPC_QMFB_COLGRPSIZE; } } /* If the split buffer was allocated on the heap, free this memory. */ if (buf != splitbuf) { jas_free(buf); } }
CWE-119
26
cdf_read_sector(const cdf_info_t *info, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { assert((size_t)CDF_SEC_SIZE(h) == len); return cdf_read(info, (off_t)CDF_SEC_POS(h, id), ((char *)buf) + offs, len); }
CWE-119
26
int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size) { kuid_t euid; kgid_t egid; int id; int next_id = ids->next_id; if (size > IPCMNI) size = IPCMNI; if (ids->in_use >= size) return -ENOSPC; idr_preload(GFP_KERNEL); spin_lock_init(&new->lock); new->deleted = false; rcu_read_lock(); spin_lock(&new->lock); id = idr_alloc(&ids->ipcs_idr, new, (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0, GFP_NOWAIT); idr_preload_end(); if (id < 0) { spin_unlock(&new->lock); rcu_read_unlock(); return id; } ids->in_use++; current_euid_egid(&euid, &egid); new->cuid = new->uid = euid; new->gid = new->cgid = egid; if (next_id < 0) { new->seq = ids->seq++; if (ids->seq > IPCID_SEQ_MAX) ids->seq = 0; } else { new->seq = ipcid_to_seqx(next_id); ids->next_id = -1; } new->id = ipc_buildid(id, new->seq); return id; }
CWE-362
18
void traverse_commit_list(struct rev_info *revs, show_commit_fn show_commit, show_object_fn show_object, void *data) { int i; struct commit *commit; struct strbuf base; strbuf_init(&base, PATH_MAX); while ((commit = get_revision(revs)) != NULL) { /* * an uninteresting boundary commit may not have its tree * parsed yet, but we are not going to show them anyway */ if (commit->tree) add_pending_tree(revs, commit->tree); show_commit(commit, data); } for (i = 0; i < revs->pending.nr; i++) { struct object_array_entry *pending = revs->pending.objects + i; struct object *obj = pending->item; const char *name = pending->name; const char *path = pending->path; if (obj->flags & (UNINTERESTING | SEEN)) continue; if (obj->type == OBJ_TAG) { obj->flags |= SEEN; show_object(obj, NULL, name, data); continue; } if (!path) path = ""; if (obj->type == OBJ_TREE) { process_tree(revs, (struct tree *)obj, show_object, &base, path, data); continue; } if (obj->type == OBJ_BLOB) { process_blob(revs, (struct blob *)obj, show_object, NULL, path, data); continue; } die("unknown pending object %s (%s)", oid_to_hex(&obj->oid), name); } object_array_clear(&revs->pending); strbuf_release(&base); }
CWE-119
26
static grub_err_t read_foo (struct grub_disk *disk, grub_disk_addr_t sector, grub_size_t size, char *buf) { if (disk != NULL) { const int blocksize = 512; // unhardcode 512 int ret; RIOBind *iob = disk->data; if (bio) iob = bio; //printf ("io %p\n", file->root->iob.io); ret = iob->read_at (iob->io, delta+(blocksize*sector), (ut8*)buf, size*blocksize); if (ret == -1) return 1; //printf ("DISK PTR = %p\n", disk->data); //printf ("\nBUF: %x %x %x %x\n", buf[0], buf[1], buf[2], buf[3]); } else eprintf ("oops. no disk\n"); return 0; // 0 is ok }
CWE-119
26
static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep) { struct dwc3_gadget_ep_cmd_params params; struct dwc3_request *req; int starting; int ret; u32 cmd; if (!dwc3_calc_trbs_left(dep)) return 0; starting = !(dep->flags & DWC3_EP_BUSY); dwc3_prepare_trbs(dep); req = next_request(&dep->started_list); if (!req) { dep->flags |= DWC3_EP_PENDING_REQUEST; return 0; } memset(&params, 0, sizeof(params)); if (starting) { params.param0 = upper_32_bits(req->trb_dma); params.param1 = lower_32_bits(req->trb_dma); cmd = DWC3_DEPCMD_STARTTRANSFER; if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) cmd |= DWC3_DEPCMD_PARAM(dep->frame_number); } else { cmd = DWC3_DEPCMD_UPDATETRANSFER | DWC3_DEPCMD_PARAM(dep->resource_index); } ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params); if (ret < 0) { /* * FIXME we need to iterate over the list of requests * here and stop, unmap, free and del each of the linked * requests instead of what we do now. */ if (req->trb) memset(req->trb, 0, sizeof(struct dwc3_trb)); dep->queued_requests--; dwc3_gadget_giveback(dep, req, ret); return ret; } dep->flags |= DWC3_EP_BUSY; if (starting) { dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep); WARN_ON_ONCE(!dep->resource_index); } return 0; }
CWE-667
27
static int g2m_init_buffers(G2MContext *c) { int aligned_height; if (!c->framebuf || c->old_width < c->width || c->old_height < c->height) { c->framebuf_stride = FFALIGN(c->width * 3, 16); aligned_height = FFALIGN(c->height, 16); av_free(c->framebuf); c->framebuf = av_mallocz(c->framebuf_stride * aligned_height); if (!c->framebuf) return AVERROR(ENOMEM); } if (!c->synth_tile || !c->jpeg_tile || c->old_tile_w < c->tile_width || c->old_tile_h < c->tile_height) { c->tile_stride = FFALIGN(c->tile_width, 16) * 3; aligned_height = FFALIGN(c->tile_height, 16); av_free(c->synth_tile); av_free(c->jpeg_tile); av_free(c->kempf_buf); av_free(c->kempf_flags); c->synth_tile = av_mallocz(c->tile_stride * aligned_height); c->jpeg_tile = av_mallocz(c->tile_stride * aligned_height); c->kempf_buf = av_mallocz((c->tile_width + 1) * aligned_height + FF_INPUT_BUFFER_PADDING_SIZE); c->kempf_flags = av_mallocz( c->tile_width * aligned_height); if (!c->synth_tile || !c->jpeg_tile || !c->kempf_buf || !c->kempf_flags) return AVERROR(ENOMEM); } return 0; }
CWE-119
26
static int v9fs_xattr_set_acl(const struct xattr_handler *handler, struct dentry *dentry, struct inode *inode, const char *name, const void *value, size_t size, int flags) { int retval; struct posix_acl *acl; struct v9fs_session_info *v9ses; v9ses = v9fs_dentry2v9ses(dentry); /* * set the attribute on the remote. Without even looking at the * xattr value. We leave it to the server to validate */ if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) return v9fs_xattr_set(dentry, handler->name, value, size, flags); if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; if (!inode_owner_or_capable(inode)) return -EPERM; if (value) { /* update the cached acl value */ acl = posix_acl_from_xattr(&init_user_ns, value, size); if (IS_ERR(acl)) return PTR_ERR(acl); else if (acl) { retval = posix_acl_valid(inode->i_sb->s_user_ns, acl); if (retval) goto err_out; } } else acl = NULL; switch (handler->flags) { case ACL_TYPE_ACCESS: if (acl) { umode_t mode = inode->i_mode; retval = posix_acl_equiv_mode(acl, &mode); if (retval < 0) goto err_out; else { struct iattr iattr; if (retval == 0) { /* * ACL can be represented * by the mode bits. So don't * update ACL. */ acl = NULL; value = NULL; size = 0; } /* Updte the mode bits */ iattr.ia_mode = ((mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO)); iattr.ia_valid = ATTR_MODE; /* FIXME should we update ctime ? * What is the following setxattr update the * mode ? */ v9fs_vfs_setattr_dotl(dentry, &iattr); } } break; case ACL_TYPE_DEFAULT: if (!S_ISDIR(inode->i_mode)) { retval = acl ? -EINVAL : 0; goto err_out; } break; default: BUG(); } retval = v9fs_xattr_set(dentry, handler->name, value, size, flags); if (!retval) set_cached_acl(inode, handler->flags, acl); err_out: posix_acl_release(acl); return retval; }
CWE-285
23
static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol, int op_flag, unsigned int size, unsigned int __user *tlv) { struct user_element *ue = kcontrol->private_data; int change = 0; void *new_data; if (op_flag > 0) { if (size > 1024 * 128) /* sane value */ return -EINVAL; new_data = memdup_user(tlv, size); if (IS_ERR(new_data)) return PTR_ERR(new_data); change = ue->tlv_data_size != size; if (!change) change = memcmp(ue->tlv_data, new_data, size); kfree(ue->tlv_data); ue->tlv_data = new_data; ue->tlv_data_size = size; } else { if (! ue->tlv_data_size || ! ue->tlv_data) return -ENXIO; if (size < ue->tlv_data_size) return -ENOSPC; if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size)) return -EFAULT; } return change; }
CWE-362
18
static int write_empty_blocks(struct page *page, unsigned from, unsigned to, int mode) { struct inode *inode = page->mapping->host; unsigned start, end, next, blksize; sector_t block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); int ret; blksize = 1 << inode->i_blkbits; next = end = 0; while (next < from) { next += blksize; block++; } start = next; do { next += blksize; ret = needs_empty_write(block, inode); if (unlikely(ret < 0)) return ret; if (ret == 0) { if (end) { ret = __block_write_begin(page, start, end - start, gfs2_block_map); if (unlikely(ret)) return ret; ret = empty_write_end(page, start, end, mode); if (unlikely(ret)) return ret; end = 0; } start = next; } else end = next; block++; } while (next < to); if (end) { ret = __block_write_begin(page, start, end - start, gfs2_block_map); if (unlikely(ret)) return ret; ret = empty_write_end(page, start, end, mode); if (unlikely(ret)) return ret; } return 0; }
CWE-119
26
static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct ipx_sock *ipxs = ipx_sk(sk); struct sockaddr_ipx *sipx = (struct sockaddr_ipx *)msg->msg_name; struct ipxhdr *ipx = NULL; struct sk_buff *skb; int copied, rc; lock_sock(sk); /* put the autobinding in */ if (!ipxs->port) { struct sockaddr_ipx uaddr; uaddr.sipx_port = 0; uaddr.sipx_network = 0; #ifdef CONFIG_IPX_INTERN rc = -ENETDOWN; if (!ipxs->intrfc) goto out; /* Someone zonked the iface */ memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN); #endif /* CONFIG_IPX_INTERN */ rc = __ipx_bind(sock, (struct sockaddr *)&uaddr, sizeof(struct sockaddr_ipx)); if (rc) goto out; } rc = -ENOTCONN; if (sock_flag(sk, SOCK_ZAPPED)) goto out; skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &rc); if (!skb) goto out; ipx = ipx_hdr(skb); copied = ntohs(ipx->ipx_pktsize) - sizeof(struct ipxhdr); if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } rc = skb_copy_datagram_iovec(skb, sizeof(struct ipxhdr), msg->msg_iov, copied); if (rc) goto out_free; if (skb->tstamp.tv64) sk->sk_stamp = skb->tstamp; msg->msg_namelen = sizeof(*sipx); if (sipx) { sipx->sipx_family = AF_IPX; sipx->sipx_port = ipx->ipx_source.sock; memcpy(sipx->sipx_node, ipx->ipx_source.node, IPX_NODE_LEN); sipx->sipx_network = IPX_SKB_CB(skb)->ipx_source_net; sipx->sipx_type = ipx->ipx_type; sipx->sipx_zero = 0; } rc = copied; out_free: skb_free_datagram(sk, skb); out: release_sock(sk); return rc; }
CWE-20
0
static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) { hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n"); rdesc[11] = rdesc[16] = 0xff; rdesc[12] = rdesc[17] = 0x03; } return rdesc; }
CWE-119
26
header_put_le_short (SF_PRIVATE *psf, int x) { if (psf->headindex < SIGNED_SIZEOF (psf->header) - 2) { psf->header [psf->headindex++] = x ; psf->header [psf->headindex++] = (x >> 8) ; } ; } /* header_put_le_short */
CWE-119
26
static void sample_hbp_handler(struct perf_event *bp, int nmi, struct perf_sample_data *data, struct pt_regs *regs) { printk(KERN_INFO "%s value is changed\n", ksym_name); dump_stack(); printk(KERN_INFO "Dump stack from sample_hbp_handler\n"); }
CWE-400
2
static void ikev2_parent_outI1_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh) { struct ke_continuation *ke = (struct ke_continuation *)pcrc; struct msg_digest *md = ke->md; struct state *const st = md->st; stf_status e; DBG(DBG_CONTROLMORE, DBG_log("ikev2 parent outI1: calculated ke+nonce, sending I1")); if (st == NULL) { loglog(RC_LOG_SERIOUS, "%s: Request was disconnected from state", __FUNCTION__); if (ke->md) release_md(ke->md); return; } /* XXX should check out ugh */ passert(ugh == NULL); passert(cur_state == NULL); passert(st != NULL); passert(st->st_suspended_md == ke->md); set_suspended(st, NULL); /* no longer connected or suspended */ set_cur_state(st); st->st_calculating = FALSE; e = ikev2_parent_outI1_tail(pcrc, r); if (ke->md != NULL) { complete_v2_state_transition(&ke->md, e); if (ke->md) release_md(ke->md); } reset_cur_state(); reset_globals(); passert(GLOBALS_ARE_RESET()); }
CWE-20
0
void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) { struct mm_struct *mm = vma->vm_mm; preempt_disable(); if (current->active_mm == mm) { if (current->mm) __flush_tlb_one(start); else leave_mm(smp_processor_id()); } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); preempt_enable(); }
CWE-362
18
ZEND_API zend_op_array *compile_file(zend_file_handle *file_handle, int type TSRMLS_DC) { zend_lex_state original_lex_state; zend_op_array *op_array = (zend_op_array *) emalloc(sizeof(zend_op_array)); zend_op_array *original_active_op_array = CG(active_op_array); zend_op_array *retval=NULL; int compiler_result; zend_bool compilation_successful=0; znode retval_znode; zend_bool original_in_compilation = CG(in_compilation); retval_znode.op_type = IS_CONST; retval_znode.u.constant.type = IS_LONG; retval_znode.u.constant.value.lval = 1; Z_UNSET_ISREF(retval_znode.u.constant); Z_SET_REFCOUNT(retval_znode.u.constant, 1); zend_save_lexical_state(&original_lex_state TSRMLS_CC); retval = op_array; /* success oriented */ if (open_file_for_scanning(file_handle TSRMLS_CC)==FAILURE) { if (type==ZEND_REQUIRE) { zend_message_dispatcher(ZMSG_FAILED_REQUIRE_FOPEN, file_handle->filename TSRMLS_CC); zend_bailout(); } else { zend_message_dispatcher(ZMSG_FAILED_INCLUDE_FOPEN, file_handle->filename TSRMLS_CC); } compilation_successful=0; } else { init_op_array(op_array, ZEND_USER_FUNCTION, INITIAL_OP_ARRAY_SIZE TSRMLS_CC); CG(in_compilation) = 1; CG(active_op_array) = op_array; zend_stack_push(&CG(context_stack), (void *) &CG(context), sizeof(CG(context))); zend_init_compiler_context(TSRMLS_C); compiler_result = zendparse(TSRMLS_C); zend_do_return(&retval_znode, 0 TSRMLS_CC); CG(in_compilation) = original_in_compilation; if (compiler_result==1) { /* parser error */ zend_bailout(); } compilation_successful=1; } if (retval) { CG(active_op_array) = original_active_op_array; if (compilation_successful) { pass_two(op_array TSRMLS_CC); zend_release_labels(0 TSRMLS_CC); } else { efree(op_array); retval = NULL; } } zend_restore_lexical_state(&original_lex_state TSRMLS_CC); return retval; }
CWE-20
0
static int readContigStripsIntoBuffer (TIFF* in, uint8* buf) { uint8* bufp = buf; int32 bytes_read = 0; uint32 strip, nstrips = TIFFNumberOfStrips(in); uint32 stripsize = TIFFStripSize(in); uint32 rows = 0; uint32 rps = TIFFGetFieldDefaulted(in, TIFFTAG_ROWSPERSTRIP, &rps); tsize_t scanline_size = TIFFScanlineSize(in); if (scanline_size == 0) { TIFFError("", "TIFF scanline size is zero!"); return 0; } for (strip = 0; strip < nstrips; strip++) { bytes_read = TIFFReadEncodedStrip (in, strip, bufp, -1); rows = bytes_read / scanline_size; if ((strip < (nstrips - 1)) && (bytes_read != (int32)stripsize)) TIFFError("", "Strip %d: read %lu bytes, strip size %lu", (int)strip + 1, (unsigned long) bytes_read, (unsigned long)stripsize); if (bytes_read < 0 && !ignore) { TIFFError("", "Error reading strip %lu after %lu rows", (unsigned long) strip, (unsigned long)rows); return 0; } bufp += bytes_read; } return 1; } /* end readContigStripsIntoBuffer */
CWE-119
26
static void copy_fields(const FieldMatchContext *fm, AVFrame *dst, const AVFrame *src, int field) { int plane; for (plane = 0; plane < 4 && src->data[plane]; plane++) av_image_copy_plane(dst->data[plane] + field*dst->linesize[plane], dst->linesize[plane] << 1, src->data[plane] + field*src->linesize[plane], src->linesize[plane] << 1, get_width(fm, src, plane), get_height(fm, src, plane) / 2); }
CWE-119
26
createenv(const struct rule *rule) { struct env *env; u_int i; env = malloc(sizeof(*env)); if (!env) err(1, NULL); RB_INIT(&env->root); env->count = 0; if (rule->options & KEEPENV) { extern char **environ; for (i = 0; environ[i] != NULL; i++) { struct envnode *node; const char *e, *eq; size_t len; char keybuf[1024]; e = environ[i]; /* ignore invalid or overlong names */ if ((eq = strchr(e, '=')) == NULL || eq == e) continue; len = eq - e; if (len > sizeof(keybuf) - 1) continue; memcpy(keybuf, e, len); keybuf[len] = '\0'; node = createnode(keybuf, eq + 1); if (RB_INSERT(envtree, &env->root, node)) { /* ignore any later duplicates */ freenode(node); } else { env->count++; } } } return env; }
CWE-909
25
static int snd_timer_user_open(struct inode *inode, struct file *file) { struct snd_timer_user *tu; int err; err = nonseekable_open(inode, file); if (err < 0) return err; tu = kzalloc(sizeof(*tu), GFP_KERNEL); if (tu == NULL) return -ENOMEM; spin_lock_init(&tu->qlock); init_waitqueue_head(&tu->qchange_sleep); mutex_init(&tu->tread_sem); tu->ticks = 1; tu->queue_size = 128; tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read), GFP_KERNEL); if (tu->queue == NULL) { kfree(tu); return -ENOMEM; } file->private_data = tu; return 0; }
CWE-362
18
do_decrypt (const RIJNDAEL_context *ctx, unsigned char *bx, const unsigned char *ax) { #ifdef USE_AMD64_ASM return _gcry_aes_amd64_decrypt_block(ctx->keyschdec, bx, ax, ctx->rounds, &dec_tables); #elif defined(USE_ARM_ASM) return _gcry_aes_arm_decrypt_block(ctx->keyschdec, bx, ax, ctx->rounds, &dec_tables); #else return do_decrypt_fn (ctx, bx, ax); #endif /*!USE_ARM_ASM && !USE_AMD64_ASM*/ }
CWE-668
7
int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs) { u16 *op; int size; unicode_t u; op = pwcs; while (*s && len > 0) { if (*s & 0x80) { size = utf8_to_utf32(s, len, &u); if (size < 0) return -EINVAL; if (u >= PLANE_SIZE) { u -= PLANE_SIZE; *op++ = (wchar_t) (SURROGATE_PAIR | ((u >> 10) & SURROGATE_BITS)); *op++ = (wchar_t) (SURROGATE_PAIR | SURROGATE_LOW | (u & SURROGATE_BITS)); } else { *op++ = (wchar_t) u; } s += size; len -= size; } else { *op++ = *s++; len--; } } return op - pwcs; }
CWE-119
26
int unlinkat_harder(int dfd, const char *filename, int unlink_flags, RemoveFlags remove_flags) { mode_t old_mode; int r; /* Like unlinkat(), but tries harder: if we get EACCESS we'll try to set the r/w/x bits on the * directory. This is useful if we run unprivileged and have some files where the w bit is * missing. */ if (unlinkat(dfd, filename, unlink_flags) >= 0) return 0; if (errno != EACCES || !FLAGS_SET(remove_flags, REMOVE_CHMOD)) return -errno; r = patch_dirfd_mode(dfd, &old_mode); if (r < 0) return r; if (unlinkat(dfd, filename, unlink_flags) < 0) { r = -errno; /* Try to restore the original access mode if this didn't work */ (void) fchmod(dfd, old_mode); return r; } if (FLAGS_SET(remove_flags, REMOVE_CHMOD_RESTORE) && fchmod(dfd, old_mode) < 0) return -errno; /* If this worked, we won't reset the old mode by default, since we'll need it for other entries too, * and we should destroy the whole thing */ return 0; }
CWE-674
28