code
stringlengths
12
2.05k
label_name
stringlengths
6
8
label
int64
0
95
int TLSInStream::pos() { return offset + ptr - start; }
CWE-787
24
void jas_seq2d_bindsub(jas_matrix_t *s, jas_matrix_t *s1, int xstart, int ystart, int xend, int yend) { jas_matrix_bindsub(s, s1, ystart - s1->ystart_, xstart - s1->xstart_, yend - s1->ystart_ - 1, xend - s1->xstart_ - 1); }
CWE-190
19
TfLiteStatus PrepareSimple(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); OpContext op_context(context, node); TF_LITE_ENSURE_TYPES_EQ(context, op_context.axis->type, kTfLiteInt32); TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context)); TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1); // Leaves work to Eval if axis is not constant; else resizes output. if (!IsConstantTensor(op_context.axis)) { SetTensorToDynamic(op_context.output); SetTensorToDynamic(resolved_axis); return kTfLiteOk; } resolved_axis->allocation_type = kTfLiteArenaRw; TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis)); TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context)); return kTfLiteOk; }
CWE-787
24
void writeBytes(const void* data, int length) { check(length); memcpy(ptr, data, length); ptr += length; }
CWE-787
24
TfLiteStatus PrepareMeanOrSum(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_OK(context, PrepareSimple(context, node)); OpData* data = reinterpret_cast<OpData*>(node->user_data); // reduce_mean requires a buffer to store intermediate sum result. OpContext op_context(context, node); if (op_context.input->type == kTfLiteInt8 || op_context.input->type == kTfLiteUInt8 || op_context.input->type == kTfLiteInt16) { const double real_multiplier = static_cast<double>(op_context.input->params.scale) / static_cast<double>(op_context.output->params.scale); int exponent; QuantizeMultiplier(real_multiplier, &data->multiplier, &exponent); data->shift = exponent; } TfLiteTensor* temp_sum = GetTemporary(context, node, /*index=*/2); if (!IsConstantTensor(op_context.axis)) { SetTensorToDynamic(temp_sum); return kTfLiteOk; } temp_sum->allocation_type = kTfLiteArenaRw; return ResizeTempSum(context, &op_context, temp_sum); }
CWE-125
47
int jas_matrix_resize(jas_matrix_t *matrix, int numrows, int numcols) { int size; int i; size = numrows * numcols; if (size > matrix->datasize_ || numrows > matrix->maxrows_) { return -1; } matrix->numrows_ = numrows; matrix->numcols_ = numcols; for (i = 0; i < numrows; ++i) { matrix->rows_[i] = &matrix->data_[numcols * i]; } return 0; }
CWE-190
19
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); switch (input1->type) { case kTfLiteInt32: { return EvalImpl<int32_t>(context, data->requires_broadcast, input1, input2, output); } case kTfLiteFloat32: { return EvalImpl<float>(context, data->requires_broadcast, input1, input2, output); } default: { context->ReportError(context, "Type '%s' is not supported by floor_div.", TfLiteTypeGetName(input1->type)); return kTfLiteError; } } }
CWE-125
47
void RemoteFsDevice::serviceRemoved(const QString &name) { if (name==details.serviceName && constSambaAvahiProtocol==details.url.scheme()) { sub=tr("Not Available"); updateStatus(); } }
CWE-22
2
TfLiteStatus UseDynamicOutputTensors(TfLiteContext* context, TfLiteNode* node) { for (int i = 0; i < NumOutputs(node); ++i) { SetTensorToDynamic(GetOutput(context, node, i)); } return kTfLiteOk; }
CWE-787
24
explicit DataFormatDimMapOp(OpKernelConstruction* context) : OpKernel(context) { string src_format; OP_REQUIRES_OK(context, context->GetAttr("src_format", &src_format)); string dst_format; OP_REQUIRES_OK(context, context->GetAttr("dst_format", &dst_format)); OP_REQUIRES(context, src_format.size() == 4 || src_format.size() == 5, errors::InvalidArgument(strings::StrCat( "Source format must of length 4 or 5, received " "src_format = ", src_format))); OP_REQUIRES( context, dst_format.size() == 4 || dst_format.size() == 5, errors::InvalidArgument(strings::StrCat( "Destination format must of length 4 or 5, received dst_format = ", dst_format))); dst_idx_ = Tensor(DT_INT32, {static_cast<int64>(src_format.size())}); for (int i = 0; i < src_format.size(); ++i) { for (int j = 0; j < dst_format.size(); ++j) { if (dst_format[j] == src_format[i]) { dst_idx_.vec<int>()(i) = j; break; } } } }
CWE-125
47
void readBytes(void* data, int length) { U8* dataPtr = (U8*)data; U8* dataEnd = dataPtr + length; while (dataPtr < dataEnd) { int n = check(1, dataEnd - dataPtr); memcpy(dataPtr, ptr, n); ptr += n; dataPtr += n; } }
CWE-787
24
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAddParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) { EvalAdd<kernel_type>(context, node, params, data, input1, input2, output); } else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { TF_LITE_ENSURE_OK(context, EvalAddQuantized<kernel_type>(context, node, params, data, input1, input2, output)); } else { TF_LITE_UNSUPPORTED_TYPE(context, output->type, "Add"); } return kTfLiteOk; }
CWE-787
24
static UINT printer_process_irp_write(PRINTER_DEVICE* printer_dev, IRP* irp) { UINT32 Length; UINT64 Offset; rdpPrintJob* printjob = NULL; UINT error = CHANNEL_RC_OK; Stream_Read_UINT32(irp->input, Length); Stream_Read_UINT64(irp->input, Offset); Stream_Seek(irp->input, 20); /* Padding */ if (printer_dev->printer) printjob = printer_dev->printer->FindPrintJob(printer_dev->printer, irp->FileId); if (!printjob) { irp->IoStatus = STATUS_UNSUCCESSFUL; Length = 0; } else { error = printjob->Write(printjob, Stream_Pointer(irp->input), Length); } if (error) { WLog_ERR(TAG, "printjob->Write failed with error %" PRIu32 "!", error); return error; } Stream_Write_UINT32(irp->output, Length); Stream_Write_UINT8(irp->output, 0); /* Padding */ return irp->Complete(irp); }
CWE-125
47
Http::FilterTrailersStatus Context::onResponseTrailers() { if (!wasm_->onResponseTrailers_) { return Http::FilterTrailersStatus::Continue; } if (wasm_->onResponseTrailers_(this, id_).u64_ == 0) { return Http::FilterTrailersStatus::Continue; } return Http::FilterTrailersStatus::StopIteration; }
CWE-476
46
bool DefaultCertValidator::matchSubjectAltName( X509* cert, const std::vector<Matchers::StringMatcherImpl<envoy::type::matcher::v3::StringMatcher>>& subject_alt_name_matchers) { bssl::UniquePtr<GENERAL_NAMES> san_names( static_cast<GENERAL_NAMES*>(X509_get_ext_d2i(cert, NID_subject_alt_name, nullptr, nullptr))); if (san_names == nullptr) { return false; } for (const GENERAL_NAME* general_name : san_names.get()) { const std::string san = Utility::generalNameAsString(general_name); for (auto& config_san_matcher : subject_alt_name_matchers) { // For DNS SAN, if the StringMatcher type is exact, we have to follow DNS matching semantics. if (general_name->type == GEN_DNS && config_san_matcher.matcher().match_pattern_case() == envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kExact ? Utility::dnsNameMatch(config_san_matcher.matcher().exact(), absl::string_view(san)) : config_san_matcher.match(san)) { return true; } } } return false; }
CWE-295
52
int GetS8 (int nPos, bool *pbSuccess) { //*pbSuccess = true; if ( nPos < 0 || nPos >= m_nLen ) { *pbSuccess = false; return 0; } int nRes = m_sFile[ nPos ]; if ( nRes & 0x80 ) nRes |= ~0xff; return nRes; }
CWE-787
24
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* cond_tensor = GetInput(context, node, kInputConditionTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (cond_tensor->type != kTfLiteBool) { context->ReportError(context, "Condition tensor must be of type bool, but saw '%s'.", TfLiteTypeGetName(cond_tensor->type)); return kTfLiteError; } // As output will be a 2D tensor of indices, use int64 to be consistent with // tensorflow. output->type = kTfLiteInt64; // Exit early if cond is a non-const tensor. Set output tensor to dynamic so // output size can be determined in Eval. if (!IsConstantTensor(cond_tensor)) { SetTensorToDynamic(output); return kTfLiteOk; } return ResizeOutputTensor(context, cond_tensor, output); }
CWE-787
24
static bool php_mb_parse_encoding(const Variant& encoding, mbfl_encoding ***return_list, int *return_size, bool persistent) { bool ret; if (encoding.isArray()) { ret = php_mb_parse_encoding_array(encoding.toArray(), return_list, return_size, persistent ? 1 : 0); } else { String enc = encoding.toString(); ret = php_mb_parse_encoding_list(enc.data(), enc.size(), return_list, return_size, persistent ? 1 : 0); } if (!ret) { if (return_list && *return_list) { free(*return_list); *return_list = nullptr; } return_size = 0; } return ret; }
CWE-763
61
Jsi_Value *jsi_ValueObjKeyAssign(Jsi_Interp *interp, Jsi_Value *target, Jsi_Value *keyval, Jsi_Value *value, int flag) { int arrayindex = -1; if (keyval->vt == JSI_VT_NUMBER && Jsi_NumberIsInteger(keyval->d.num) && keyval->d.num >= 0) { arrayindex = (int)keyval->d.num; } /* TODO: array["1"] also extern the length of array */ if (arrayindex >= 0 && arrayindex < MAX_ARRAY_LIST && target->vt == JSI_VT_OBJECT && target->d.obj->arr) { return jsi_ObjArraySetDup(interp, target->d.obj, value, arrayindex); } const char *kstr = Jsi_ValueToString(interp, keyval, NULL); #if (defined(JSI_HAS___PROTO__) && JSI_HAS___PROTO__==2) if (Jsi_Strcmp(kstr, "__proto__")==0) { Jsi_Obj *obj = target->d.obj; obj->__proto__ = Jsi_ValueDup(interp, value); //obj->clearProto = 1; return obj->__proto__; } #endif Jsi_Value *v = Jsi_ValueNew1(interp); if (value) Jsi_ValueCopy(interp, v, value); jsi_ValueObjSet(interp, target, kstr, v, flag, (Jsi_ValueIsStringKey(interp, keyval)? JSI_OM_ISSTRKEY:0)); Jsi_DecrRefCount(interp, v); return v; }
CWE-190
19
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output_values = GetOutput(context, node, kOutputValues); TfLiteTensor* output_indexes = GetOutput(context, node, kOutputIndexes); if (IsDynamicTensor(output_values)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } const TfLiteTensor* top_k = GetInput(context, node, kInputTopK); const int32 k = top_k->data.i32[0]; // The tensor can have more than 2 dimensions or even be a vector, the code // anyway calls the internal dimension as row; const TfLiteTensor* input = GetInput(context, node, kInputTensor); const int32 row_size = input->dims->data[input->dims->size - 1]; int32 num_rows = 1; for (int i = 0; i < input->dims->size - 1; ++i) { num_rows *= input->dims->data[i]; } switch (output_values->type) { case kTfLiteFloat32: TopK(row_size, num_rows, GetTensorData<float>(input), k, output_indexes->data.i32, GetTensorData<float>(output_values)); break; case kTfLiteUInt8: TopK(row_size, num_rows, input->data.uint8, k, output_indexes->data.i32, output_values->data.uint8); break; case kTfLiteInt8: TopK(row_size, num_rows, input->data.int8, k, output_indexes->data.i32, output_values->data.int8); break; case kTfLiteInt32: TopK(row_size, num_rows, input->data.i32, k, output_indexes->data.i32, output_values->data.i32); break; case kTfLiteInt64: TopK(row_size, num_rows, input->data.i64, k, output_indexes->data.i32, output_values->data.i64); break; default: TF_LITE_KERNEL_LOG(context, "Type %s is currently not supported by TopK.", TfLiteTypeGetName(output_values->type)); return kTfLiteError; } return kTfLiteOk; }
CWE-125
47
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = kTfLiteInt32; // By design, the input shape is always known at the time of Prepare, even // if the preceding op that generates |input| is dynamic. Thus, we can // always compute the rank immediately, without waiting for Eval. SetTensorToPersistentRo(output); // Rank produces a 0-D int32 Tensor representing the rank of input. TfLiteIntArray* output_size = TfLiteIntArrayCreate(0); TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_size)); TF_LITE_ENSURE_EQ(context, NumDimensions(output), 0); // Immediately propagate the known rank to the output tensor. This allows // downstream ops that rely on the value to use it during prepare. if (output->type == kTfLiteInt32) { int32_t* output_data = GetTensorData<int32_t>(output); *output_data = NumDimensions(input); } else { return kTfLiteError; } return kTfLiteOk; }
CWE-125
47
AP4_AvccAtom::InspectFields(AP4_AtomInspector& inspector) { inspector.AddField("Configuration Version", m_ConfigurationVersion); const char* profile_name = GetProfileName(m_Profile); if (profile_name) { inspector.AddField("Profile", profile_name); } else { inspector.AddField("Profile", m_Profile); } inspector.AddField("Profile Compatibility", m_ProfileCompatibility, AP4_AtomInspector::HINT_HEX); inspector.AddField("Level", m_Level); inspector.AddField("NALU Length Size", m_NaluLengthSize); for (unsigned int i=0; i<m_SequenceParameters.ItemCount(); i++) { inspector.AddField("Sequence Parameter", m_SequenceParameters[i].GetData(), m_SequenceParameters[i].GetDataSize()); } for (unsigned int i=0; i<m_SequenceParameters.ItemCount(); i++) { inspector.AddField("Picture Parameter", m_PictureParameters[i].GetData(), m_PictureParameters[i].GetDataSize()); } return AP4_SUCCESS; }
CWE-476
46
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input_tensor = GetInput(context, node, 0); const TfLiteTensor* padding_matrix = GetInput(context, node, 1); TfLiteTensor* output_tensor = GetOutput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(padding_matrix), 2); TF_LITE_ENSURE_EQ(context, SizeOfDimension(padding_matrix, 0), NumDimensions(input_tensor)); if (!IsConstantTensor(padding_matrix)) { SetTensorToDynamic(output_tensor); return kTfLiteOk; } // We have constant padding, so we can infer output size. auto output_size = GetPaddedOutputShape(input_tensor, padding_matrix); if (output_size == nullptr) { return kTfLiteError; } return context->ResizeTensor(context, output_tensor, output_size.release()); }
CWE-125
47
String HHVM_FUNCTION(ldap_escape, const String& value, const String& ignores /* = "" */, int flags /* = 0 */) { char esc[256] = {}; if (flags & k_LDAP_ESCAPE_FILTER) { // llvm.org/bugs/show_bug.cgi?id=18389 esc['*'*1u] = esc['('*1u] = esc[')'*1u] = esc['\0'*1u] = esc['\\'*1u] = 1; } if (flags & k_LDAP_ESCAPE_DN) { esc[','*1u] = esc['='*1u] = esc['+'*1u] = esc['<'*1u] = esc['\\'*1u] = 1; esc['>'*1u] = esc[';'*1u] = esc['"'*1u] = esc['#'*1u] = 1; } if (!flags) { memset(esc, 1, sizeof(esc)); } for (int i = 0; i < ignores.size(); i++) { esc[(unsigned char)ignores[i]] = 0; } char hex[] = "0123456789abcdef"; String result(3 * value.size(), ReserveString); char *rdata = result.get()->mutableData(), *r = rdata; for (int i = 0; i < value.size(); i++) { auto c = (unsigned char)value[i]; if (esc[c]) { *r++ = '\\'; *r++ = hex[c >> 4]; *r++ = hex[c & 0xf]; } else { *r++ = c; } } result.setSize(r - rdata); return result; }
CWE-787
24
selReadStream(FILE *fp) { char *selname; char linebuf[L_BUF_SIZE]; l_int32 sy, sx, cy, cx, i, j, version, ignore; SEL *sel; PROCNAME("selReadStream"); if (!fp) return (SEL *)ERROR_PTR("stream not defined", procName, NULL); if (fscanf(fp, " Sel Version %d\n", &version) != 1) return (SEL *)ERROR_PTR("not a sel file", procName, NULL); if (version != SEL_VERSION_NUMBER) return (SEL *)ERROR_PTR("invalid sel version", procName, NULL); if (fgets(linebuf, L_BUF_SIZE, fp) == NULL) return (SEL *)ERROR_PTR("error reading into linebuf", procName, NULL); selname = stringNew(linebuf); sscanf(linebuf, " ------ %s ------", selname); if (fscanf(fp, " sy = %d, sx = %d, cy = %d, cx = %d\n", &sy, &sx, &cy, &cx) != 4) { LEPT_FREE(selname); return (SEL *)ERROR_PTR("dimensions not read", procName, NULL); } if ((sel = selCreate(sy, sx, selname)) == NULL) { LEPT_FREE(selname); return (SEL *)ERROR_PTR("sel not made", procName, NULL); } selSetOrigin(sel, cy, cx); for (i = 0; i < sy; i++) { ignore = fscanf(fp, " "); for (j = 0; j < sx; j++) ignore = fscanf(fp, "%1d", &sel->data[i][j]); ignore = fscanf(fp, "\n"); } ignore = fscanf(fp, "\n"); LEPT_FREE(selname); return sel; }
CWE-787
24
TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) { TfLiteIntArray* output_shape = GetOutputShape(context, node); std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> scoped_output_shape(output_shape, TfLiteIntArrayFree); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Tensorflow's Reshape allows one of the shape components to have the // special -1 value, meaning it will be calculated automatically based on the // input. Here we calculate what that dimension should be so that the number // of output elements in the same as the number of input elements. int num_input_elements = NumElements(input); int num_output_elements = 1; int stretch_dim = -1; for (int i = 0; i < output_shape->size; ++i) { int value = output_shape->data[i]; if (value == -1) { TF_LITE_ENSURE_EQ(context, stretch_dim, -1); stretch_dim = i; } else { num_output_elements *= value; } } if (stretch_dim != -1) { output_shape->data[stretch_dim] = num_input_elements / num_output_elements; num_output_elements *= output_shape->data[stretch_dim]; } TF_LITE_ENSURE_EQ(context, num_input_elements, num_output_elements); return context->ResizeTensor(context, output, scoped_output_shape.release()); }
CWE-125
47
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = input->type; return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
CWE-787
24
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (input->type != kTfLiteFloat32) { TF_LITE_UNSUPPORTED_TYPE(context, input->type, "Ceil"); } optimized_ops::Ceil(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; }
CWE-125
47
MONGO_EXPORT int bson_append_code_n( bson *b, const char *name, const char *value, int len ) { return bson_append_string_base( b, name, value, len, BSON_CODE ); }
CWE-190
19
folly::Optional<Param> ReadRecordLayer::readEvent( folly::IOBufQueue& socketBuf) { if (!unparsedHandshakeData_.empty()) { auto param = decodeHandshakeMessage(unparsedHandshakeData_); if (param) { VLOG(8) << "Received handshake message " << toString(boost::apply_visitor(EventVisitor(), *param)); return param; } } while (true) { // Read one record. We read one record at a time since records could cause // a change in the record layer. auto message = read(socketBuf); if (!message) { return folly::none; } if (!unparsedHandshakeData_.empty() && message->type != ContentType::handshake) { throw std::runtime_error("spliced handshake data"); } switch (message->type) { case ContentType::alert: { auto alert = decode<Alert>(std::move(message->fragment)); if (alert.description == AlertDescription::close_notify) { return Param(CloseNotify(socketBuf.move())); } else { return Param(std::move(alert)); } } case ContentType::handshake: { unparsedHandshakeData_.append(std::move(message->fragment)); auto param = decodeHandshakeMessage(unparsedHandshakeData_); if (param) { VLOG(8) << "Received handshake message " << toString(boost::apply_visitor(EventVisitor(), *param)); return param; } else { // If we read handshake data but didn't have enough to get a full // message we immediately try to read another record. // TODO: add limits on number of records we buffer continue; } } case ContentType::application_data: return Param(AppData(std::move(message->fragment))); default: throw std::runtime_error("unknown content type"); } } }
CWE-770
37
void Transform::interpolate_nearestneighbour( RawTile& in, unsigned int resampled_width, unsigned int resampled_height ){ // Pointer to input buffer unsigned char *input = (unsigned char*) in.data; int channels = in.channels; unsigned int width = in.width; unsigned int height = in.height; // Pointer to output buffer unsigned char *output; // Create new buffer if size is larger than input size bool new_buffer = false; if( resampled_width*resampled_height > in.width*in.height ){ new_buffer = true; output = new unsigned char[(unsigned long long)resampled_width*resampled_height*in.channels]; } else output = (unsigned char*) in.data; // Calculate our scale float xscale = (float)width / (float)resampled_width; float yscale = (float)height / (float)resampled_height; for( unsigned int j=0; j<resampled_height; j++ ){ for( unsigned int i=0; i<resampled_width; i++ ){ // Indexes in the current pyramid resolution and resampled spaces // Make sure to limit our input index to the image surface unsigned long ii = (unsigned int) floorf(i*xscale); unsigned long jj = (unsigned int) floorf(j*yscale); unsigned long pyramid_index = (unsigned int) channels * ( ii + jj*width ); unsigned long long resampled_index = (unsigned long long)(i + j*resampled_width)*channels; for( int k=0; k<in.channels; k++ ){ output[resampled_index+k] = input[pyramid_index+k]; } } } // Delete original buffer if( new_buffer ) delete[] (unsigned char*) input; // Correctly set our Rawtile info in.width = resampled_width; in.height = resampled_height; in.dataLength = resampled_width * resampled_height * channels * (in.bpc/8); in.data = output; }
CWE-190
19
jas_matrix_t *jas_matrix_create(int numrows, int numcols) { jas_matrix_t *matrix; int i; if (numrows < 0 || numcols < 0) { return 0; } if (!(matrix = jas_malloc(sizeof(jas_matrix_t)))) { return 0; } matrix->flags_ = 0; matrix->numrows_ = numrows; matrix->numcols_ = numcols; matrix->rows_ = 0; matrix->maxrows_ = numrows; matrix->data_ = 0; matrix->datasize_ = numrows * numcols; if (matrix->maxrows_ > 0) { if (!(matrix->rows_ = jas_alloc2(matrix->maxrows_, sizeof(jas_seqent_t *)))) { jas_matrix_destroy(matrix); return 0; } } if (matrix->datasize_ > 0) { if (!(matrix->data_ = jas_alloc2(matrix->datasize_, sizeof(jas_seqent_t)))) { jas_matrix_destroy(matrix); return 0; } } for (i = 0; i < numrows; ++i) { matrix->rows_[i] = &matrix->data_[i * matrix->numcols_]; } for (i = 0; i < matrix->datasize_; ++i) { matrix->data_[i] = 0; } matrix->xstart_ = 0; matrix->ystart_ = 0; matrix->xend_ = matrix->numcols_; matrix->yend_ = matrix->numrows_; return matrix; }
CWE-190
19
void combine_list(String & res, const StringList & in) { res.clear(); StringListEnumeration els = in.elements_obj(); const char * s = 0; while ( (s = els.next()) != 0) { for (; *s; ++s) { if (*s == ':') res.append('\\'); res.append(*s); } res.append(':'); } if (res.back() == ':') res.pop_back(); }
CWE-125
47
void Context::onDownstreamConnectionClose(PeerType peer_type) { if (wasm_->onDownstreamConnectionClose_) { wasm_->onDownstreamConnectionClose_(this, id_, static_cast<uint32_t>(peer_type)); } }
CWE-476
46
TfLiteStatus GreaterEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::GreaterFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::GreaterFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; }
CWE-125
47
TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node, int index) { if (index >= 0 && index < node->outputs->size) { const int tensor_index = node->outputs->data[index]; if (tensor_index != kTfLiteOptionalTensor) { if (context->tensors != nullptr) { return &context->tensors[tensor_index]; } else { return context->GetTensor(context, tensor_index); } } } return nullptr; }
CWE-125
47
int FileInStream::pos() { if (!file) throw Exception("File is not open"); return ftell(file) + ptr - b; }
CWE-787
24
TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: AverageEvalFloat<kernel_type>(context, node, params, data, input, output); break; case kTfLiteUInt8: AverageEvalQuantizedUint8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt8: AverageEvalQuantizedInt8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt16: AverageEvalQuantizedInt16<kernel_type>(context, node, params, data, input, output); break; default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; }
CWE-125
47
HexOutStream::overrun(int itemSize, int nItems) { if (itemSize > bufSize) throw Exception("HexOutStream overrun: max itemSize exceeded"); writeBuffer(); if (itemSize * nItems > end - ptr) nItems = (end - ptr) / itemSize; return nItems; }
CWE-787
24
TfLiteStatus Resize(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteLSHProjectionParams*>(node->builtin_data); TF_LITE_ENSURE(context, NumInputs(node) == 2 || NumInputs(node) == 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* hash = GetInput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(hash), 2); // Support up to 32 bits. TF_LITE_ENSURE(context, SizeOfDimension(hash, 1) <= 32); const TfLiteTensor* input = GetInput(context, node, 1); TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (NumInputs(node) == 3) { const TfLiteTensor* weight = GetInput(context, node, 2); TF_LITE_ENSURE_EQ(context, NumDimensions(weight), 1); TF_LITE_ENSURE_EQ(context, SizeOfDimension(weight, 0), SizeOfDimension(input, 0)); } TfLiteTensor* output = GetOutput(context, node, 0); TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1); switch (params->type) { case kTfLiteLshProjectionSparse: outputSize->data[0] = SizeOfDimension(hash, 0); break; case kTfLiteLshProjectionDense: outputSize->data[0] = SizeOfDimension(hash, 0) * SizeOfDimension(hash, 1); break; default: return kTfLiteError; } return context->ResizeTensor(context, output, outputSize); }
CWE-125
47
inline TfLiteTensor* GetMutableInput(const TfLiteContext* context, const TfLiteNode* node, int index) { if (index >= 0 && index < node->inputs->size) { const int tensor_index = node->inputs->data[index]; if (tensor_index != kTfLiteOptionalTensor) { if (context->tensors != nullptr) { return &context->tensors[tensor_index]; } else { return context->GetTensor(context, tensor_index); } } } return nullptr; }
CWE-125
47
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { int num_inputs = NumInputs(node); TF_LITE_ENSURE(context, num_inputs >= 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = input1->type; // Check that all input tensors have the same shape and type. for (int i = kInputTensor1 + 1; i < num_inputs; ++i) { const TfLiteTensor* input = GetInput(context, node, i); TF_LITE_ENSURE(context, HaveSameShapes(input1, input)); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input->type); } // Use the first input node's dimension to be the dimension of the output // node. TfLiteIntArray* input1_dims = input1->dims; TfLiteIntArray* output_dims = TfLiteIntArrayCopy(input1_dims); return context->ResizeTensor(context, output, output_dims); }
CWE-787
24
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* cond_tensor = GetInput(context, node, kInputConditionTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, cond_tensor, output)); } TfLiteIntArray* dims = cond_tensor->dims; if (dims->size == 0) { // Scalar tensors are not supported. TF_LITE_KERNEL_LOG(context, "Where op requires condition w/ rank > 0"); return kTfLiteError; } reference_ops::SelectTrueCoords(GetTensorShape(cond_tensor), GetTensorData<bool>(cond_tensor), GetTensorData<int64_t>(output)); return kTfLiteOk; }
CWE-125
47
inline bool operator ==(const MaskedIP& l, const MaskedIP& r) { auto shift = std::max((l.v6 ? 128 : 32) - l.prefix, (r.v6 ? 128 : 32) - r.prefix); ceph_assert(shift > 0); return (l.addr >> shift) == (r.addr >> shift); }
CWE-617
51
void CxImage::Startup(uint32_t imagetype) { //init pointers pDib = pSelection = pAlpha = NULL; ppLayers = ppFrames = NULL; //init structures memset(&head,0,sizeof(BITMAPINFOHEADER)); memset(&info,0,sizeof(CXIMAGEINFO)); //init default attributes info.dwType = imagetype; info.fQuality = 90.0f; info.nAlphaMax = 255; info.nBkgndIndex = -1; info.bEnabled = true; info.nJpegScale = 1; SetXDPI(CXIMAGE_DEFAULT_DPI); SetYDPI(CXIMAGE_DEFAULT_DPI); int16_t test = 1; info.bLittleEndianHost = (*((char *) &test) == 1); }
CWE-770
37
Variant HHVM_FUNCTION(mcrypt_get_block_size, const String& cipher, const Variant& module /* = null_string */) { MCRYPT td = mcrypt_module_open((char*)cipher.data(), (char*)MCG(algorithms_dir).data(), (char*)module.asCStrRef().data(), (char*)MCG(modes_dir).data()); if (td == MCRYPT_FAILED) { MCRYPT_OPEN_MODULE_FAILED("mcrypt_get_block_size"); return false; } int64_t ret = mcrypt_enc_get_block_size(td); mcrypt_module_close(td); return ret; }
CWE-843
43
void CharCodeToUnicode::addMapping(CharCode code, char *uStr, int n, int offset) { CharCode oldLen, i; Unicode u; char uHex[5]; int j; if (code >= mapLen) { oldLen = mapLen; mapLen = (code + 256) & ~255; map = (Unicode *)greallocn(map, mapLen, sizeof(Unicode)); for (i = oldLen; i < mapLen; ++i) { map[i] = 0; } } if (n <= 4) { if (sscanf(uStr, "%x", &u) != 1) { error(-1, "Illegal entry in ToUnicode CMap"); return; } map[code] = u + offset; } else { if (sMapLen >= sMapSize) { sMapSize = sMapSize + 16; sMap = (CharCodeToUnicodeString *) greallocn(sMap, sMapSize, sizeof(CharCodeToUnicodeString)); } map[code] = 0; sMap[sMapLen].c = code; sMap[sMapLen].len = n / 4; for (j = 0; j < sMap[sMapLen].len && j < maxUnicodeString; ++j) { strncpy(uHex, uStr + j*4, 4); uHex[4] = '\0'; if (sscanf(uHex, "%x", &sMap[sMapLen].u[j]) != 1) { error(-1, "Illegal entry in ToUnicode CMap"); } } sMap[sMapLen].u[sMap[sMapLen].len - 1] += offset; ++sMapLen; } }
CWE-120
44
int length() const { return m_str ? m_str->size() : 0; }
CWE-190
19
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteLSHProjectionParams*>(node->builtin_data); int32_t* out_buf = GetOutput(context, node, 0)->data.i32; const TfLiteTensor* hash = GetInput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 1); const TfLiteTensor* weight = NumInputs(node) == 2 ? nullptr : GetInput(context, node, 2); switch (params->type) { case kTfLiteLshProjectionDense: DenseLshProjection(hash, input, weight, out_buf); break; case kTfLiteLshProjectionSparse: SparseLshProjection(hash, input, weight, out_buf); break; default: return kTfLiteError; } return kTfLiteOk; }
CWE-125
47
TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) { ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { double real_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
CWE-125
47
void AuthChecker::PassUserInfoOnSuccess() { char *json_buf = auth::WriteUserInfoToJson(user_info_); if (json_buf == nullptr) { return; } char *base64_json_buf = auth::esp_base64_encode( json_buf, strlen(json_buf), true, false, true /*padding*/); context_->request()->AddHeaderToBackend(auth::kEndpointApiUserInfo, base64_json_buf); auth::esp_grpc_free(json_buf); auth::esp_grpc_free(base64_json_buf); TRACE(trace_span_) << "Authenticated."; trace_span_.reset(); on_done_(Status::OK); }
CWE-290
85
static int16_t decodeSample(ms_adpcm_state &state, uint8_t code, const int16_t *coefficient) { int linearSample = (state.sample1 * coefficient[0] + state.sample2 * coefficient[1]) >> 8; linearSample += ((code & 0x08) ? (code - 0x10) : code) * state.delta; linearSample = clamp(linearSample, MIN_INT16, MAX_INT16); int delta = (state.delta * adaptationTable[code]) >> 8; if (delta < 16) delta = 16; state.delta = delta; state.sample2 = state.sample1; state.sample1 = linearSample; return static_cast<int16_t>(linearSample); }
CWE-190
19
void RemoteFsDevice::load() { if (RemoteFsDevice::constSambaAvahiProtocol==details.url.scheme()) { // Start Avahi listener... Avahi::self(); QUrlQuery q(details.url); if (q.hasQueryItem(constServiceNameQuery)) { details.serviceName=q.queryItemValue(constServiceNameQuery); } if (!details.serviceName.isEmpty()) { AvahiService *srv=Avahi::self()->getService(details.serviceName); if (!srv || srv->getHost().isEmpty()) { sub=tr("Not Available"); } else { sub=tr("Available"); } } connect(Avahi::self(), SIGNAL(serviceAdded(QString)), SLOT(serviceAdded(QString))); connect(Avahi::self(), SIGNAL(serviceRemoved(QString)), SLOT(serviceRemoved(QString))); } if (isConnected()) { setAudioFolder(); readOpts(settingsFileName(), opts, true); rescan(false); // Read from cache if we have it! } }
CWE-22
2
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteDivParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) { EvalDiv<kernel_type>(context, node, params, data, input1, input2, output); } else if (output->type == kTfLiteUInt8) { TF_LITE_ENSURE_OK( context, EvalQuantized<kernel_type>(context, node, params, data, input1, input2, output)); } else { context->ReportError( context, "Div only supports FLOAT32, INT32 and quantized UINT8 now, got %d.", output->type); return kTfLiteError; } return kTfLiteOk; }
CWE-787
24
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); output->type = input->type; TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims); return context->ResizeTensor(context, output, output_size); }
CWE-787
24
TfLiteRegistration AddOpRegistration() { TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr}; reg.custom_name = "my_add"; reg.builtin_code = tflite::BuiltinOperator_CUSTOM; reg.prepare = [](TfLiteContext* context, TfLiteNode* node) { // Set output size to input size const TfLiteTensor* input1 = GetInput(context, node, 0); const TfLiteTensor* input2 = GetInput(context, node, 1); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_EQ(context, input1->dims->size, input2->dims->size); for (int i = 0; i < input1->dims->size; ++i) { TF_LITE_ENSURE_EQ(context, input1->dims->data[i], input2->dims->data[i]); } TF_LITE_ENSURE_STATUS(context->ResizeTensor( context, output, TfLiteIntArrayCopy(input1->dims))); return kTfLiteOk; }; reg.invoke = [](TfLiteContext* context, TfLiteNode* node) { // Copy input data to output data. const TfLiteTensor* a0 = GetInput(context, node, 0); TF_LITE_ENSURE(context, a0); TF_LITE_ENSURE(context, a0->data.f); const TfLiteTensor* a1 = GetInput(context, node, 1); TF_LITE_ENSURE(context, a1); TF_LITE_ENSURE(context, a1->data.f); TfLiteTensor* out = GetOutput(context, node, 0); TF_LITE_ENSURE(context, out); TF_LITE_ENSURE(context, out->data.f); int num = a0->dims->data[0]; for (int i = 0; i < num; i++) { out->data.f[i] = a0->data.f[i] + a1->data.f[i]; } return kTfLiteOk; }; return reg; }
CWE-125
47
TfLiteStatus PrepareHashtableImport(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0); const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputResourceIdTensor); TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32); TF_LITE_ENSURE_EQ(context, NumDimensions(input_resource_id_tensor), 1); TF_LITE_ENSURE_EQ(context, SizeOfDimension(input_resource_id_tensor, 0), 1); const TfLiteTensor* key_tensor = GetInput(context, node, kKeyTensor); const TfLiteTensor* value_tensor = GetInput(context, node, kValueTensor); TF_LITE_ENSURE(context, (key_tensor->type == kTfLiteInt64 && value_tensor->type == kTfLiteString) || (key_tensor->type == kTfLiteString && value_tensor->type == kTfLiteInt64)); // TODO(b/144731295): Tensorflow lookup ops support 1-D vector in storing // values. TF_LITE_ENSURE(context, HaveSameShapes(key_tensor, value_tensor)); return kTfLiteOk; }
CWE-125
47
void RemoteFsDevice::serviceAdded(const QString &name) { if (name==details.serviceName && constSambaAvahiProtocol==details.url.scheme()) { sub=tr("Available"); updateStatus(); } }
CWE-22
2
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* value = GetInput(context, node, kValueTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (IsDynamicTensor(output)) { const TfLiteTensor* dims = GetInput(context, node, kDimsTensor); TF_LITE_ENSURE_OK(context, ResizeOutput(context, dims, output)); } #define TF_LITE_FILL(data_type) \ reference_ops::Fill(GetTensorShape(value), GetTensorData<data_type>(value), \ GetTensorShape(output), \ GetTensorData<data_type>(output)) switch (output->type) { case kTfLiteInt32: TF_LITE_FILL(int32_t); break; case kTfLiteInt64: TF_LITE_FILL(int64_t); break; case kTfLiteFloat32: TF_LITE_FILL(float); break; case kTfLiteBool: TF_LITE_FILL(bool); break; case kTfLiteString: FillString(value, output); break; default: context->ReportError( context, "Fill only currently supports int32, int64, float32, bool, string " "for input 1, got %d.", value->type); return kTfLiteError; } #undef TF_LITE_FILL return kTfLiteOk; }
CWE-787
24
void AllocateDataSet(cmsIT8* it8) { TABLE* t = GetTable(it8); if (t -> Data) return; // Already allocated t-> nSamples = atoi(cmsIT8GetProperty(it8, "NUMBER_OF_FIELDS")); t-> nPatches = atoi(cmsIT8GetProperty(it8, "NUMBER_OF_SETS")); t-> Data = (char**)AllocChunk (it8, ((cmsUInt32Number) t->nSamples + 1) * ((cmsUInt32Number) t->nPatches + 1) *sizeof (char*)); if (t->Data == NULL) { SynError(it8, "AllocateDataSet: Unable to allocate data array"); } }
CWE-787
24
bool WindowsServiceControl::install( const QString& filePath, const QString& displayName ) { m_serviceHandle = CreateService( m_serviceManager, // SCManager database WindowsCoreFunctions::toConstWCharArray( m_name ), // name of service WindowsCoreFunctions::toConstWCharArray( displayName ),// name to display SERVICE_ALL_ACCESS, // desired access SERVICE_WIN32_OWN_PROCESS, // service type SERVICE_AUTO_START, // start type SERVICE_ERROR_NORMAL, // error control type WindowsCoreFunctions::toConstWCharArray( filePath ), // service's binary nullptr, // no load ordering group nullptr, // no tag identifier L"Tcpip\0RpcSs\0\0", // dependencies nullptr, // LocalSystem account nullptr ); // no password if( m_serviceHandle == nullptr ) { const auto error = GetLastError(); if( error == ERROR_SERVICE_EXISTS ) { vCritical() << qUtf8Printable( tr( "The service \"%1\" is already installed." ).arg( m_name ) ); } else { vCritical() << qUtf8Printable( tr( "The service \"%1\" could not be installed." ).arg( m_name ) ); } return false; } SC_ACTION serviceActions; serviceActions.Delay = 10000; serviceActions.Type = SC_ACTION_RESTART; SERVICE_FAILURE_ACTIONS serviceFailureActions; serviceFailureActions.dwResetPeriod = 0; serviceFailureActions.lpRebootMsg = nullptr; serviceFailureActions.lpCommand = nullptr; serviceFailureActions.lpsaActions = &serviceActions; serviceFailureActions.cActions = 1; ChangeServiceConfig2( m_serviceHandle, SERVICE_CONFIG_FAILURE_ACTIONS, &serviceFailureActions ); // Everything went fine vInfo() << qUtf8Printable( tr( "The service \"%1\" has been installed successfully." ).arg( m_name ) ); return true; }
CWE-428
77
ssize_t enc_untrusted_read(int fd, void *buf, size_t count) { return static_cast<ssize_t>(EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_read, fd, buf, count)); }
CWE-125
47
static void bson_append( bson *b, const void *data, int len ) { memcpy( b->cur , data , len ); b->cur += len; }
CWE-190
19
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* dims = GetInput(context, node, kDimsTensor); const TfLiteTensor* value = GetInput(context, node, kValueTensor); // Make sure the 1st input tensor is 1-D. TF_LITE_ENSURE_EQ(context, NumDimensions(dims), 1); // Make sure the 1st input tensor is int32 or int64. const auto dtype = dims->type; TF_LITE_ENSURE(context, dtype == kTfLiteInt32 || dtype == kTfLiteInt64); // Make sure the 2nd input tensor is a scalar. TF_LITE_ENSURE_EQ(context, NumDimensions(value), 0); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = value->type; if (IsConstantTensor(dims)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, dims, output)); } else { SetTensorToDynamic(output); } return kTfLiteOk; }
CWE-787
24
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); OpContext op_context(context, node); TF_LITE_ENSURE_EQ(context, NumOutputs(node), op_context.params->num_splits); auto input_type = op_context.input->type; TF_LITE_ENSURE(context, input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 || input_type == kTfLiteInt16 || input_type == kTfLiteInt32 || input_type == kTfLiteInt64 || input_type == kTfLiteInt8); for (int i = 0; i < NumOutputs(node); ++i) { GetOutput(context, node, i)->type = input_type; } auto size_splits = op_context.size_splits; TF_LITE_ENSURE_EQ(context, NumDimensions(size_splits), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), NumElements(size_splits)); // If we know the contents of the 'size_splits' tensor and the 'axis' tensor, // resize all outputs. Otherwise, wait until Eval(). if (IsConstantTensor(op_context.size_splits) && IsConstantTensor(op_context.axis)) { return ResizeOutputTensors(context, node, op_context.input, op_context.size_splits, op_context.axis); } else { return UseDynamicOutputTensors(context, node); } }
CWE-787
24
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { if (tuple[index].has_value()) { return Status(errors::InvalidArgument( "The tensor for index '", index, "' for key '", key.scalar<int64>()(), "' was already initialized '", dtypes_.size(), "'.")); } return Status::OK(); }
CWE-824
65
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* size = GetInput(context, node, kSizeTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // TODO(ahentz): Our current implementations rely on the input being 4D, // and the size being 1D tensor with exactly 2 elements. TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1); TF_LITE_ENSURE_TYPES_EQ(context, size->type, kTfLiteInt32); TF_LITE_ENSURE_EQ(context, size->dims->data[0], 2); output->type = input->type; if (!IsConstantTensor(size)) { SetTensorToDynamic(output); return kTfLiteOk; } return ResizeOutputTensor(context, input, size, output); }
CWE-787
24
TfLiteStatus LeakyReluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); double alpha_multiplier = input->params.scale * params->alpha / output->params.scale; QuantizeMultiplier(alpha_multiplier, &data->output_multiplier_alpha, &data->output_shift_alpha); double identity_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(identity_multiplier, &data->output_multiplier_identity, &data->output_shift_identity); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
CWE-787
24
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32) { EvalAddN<float>(context, node); } else if (output->type == kTfLiteInt32) { EvalAddN<int32_t>(context, node); } else { context->ReportError(context, "AddN only supports FLOAT32|INT32 now, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } return kTfLiteOk; }
CWE-787
24
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* cond_tensor = GetInput(context, node, kInputConditionTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, cond_tensor, output)); } TfLiteIntArray* dims = cond_tensor->dims; if (dims->size == 0) { // Scalar tensors are not supported. TF_LITE_KERNEL_LOG(context, "Where op requires condition w/ rank > 0"); return kTfLiteError; } reference_ops::SelectTrueCoords(GetTensorShape(cond_tensor), GetTensorData<bool>(cond_tensor), GetTensorData<int64_t>(output)); return kTfLiteOk; }
CWE-787
24
bool AES_GCM_DecryptContext::Decrypt( const void *pEncryptedDataAndTag, size_t cbEncryptedDataAndTag, const void *pIV, void *pPlaintextData, uint32 *pcbPlaintextData, const void *pAdditionalAuthenticationData, size_t cbAuthenticationData ) { unsigned long long pcbPlaintextData_longlong; const int nDecryptResult = crypto_aead_aes256gcm_decrypt_afternm( static_cast<unsigned char*>( pPlaintextData ), &pcbPlaintextData_longlong, nullptr, static_cast<const unsigned char*>( pEncryptedDataAndTag ), cbEncryptedDataAndTag, static_cast<const unsigned char*>( pAdditionalAuthenticationData ), cbAuthenticationData, static_cast<const unsigned char*>( pIV ), static_cast<const crypto_aead_aes256gcm_state*>( m_ctx ) ); *pcbPlaintextData = pcbPlaintextData_longlong; return nDecryptResult == 0; }
CWE-787
24
RestStatus RestAuthHandler::execute() { auto const type = _request->requestType(); if (type != rest::RequestType::POST) { generateError(rest::ResponseCode::METHOD_NOT_ALLOWED, TRI_ERROR_HTTP_METHOD_NOT_ALLOWED); return RestStatus::DONE; } bool parseSuccess = false; VPackSlice slice = this->parseVPackBody(parseSuccess); if (!parseSuccess) { // error already set return RestStatus::DONE; } if (!slice.isObject()) { return badRequest(); } VPackSlice usernameSlice = slice.get("username"); VPackSlice passwordSlice = slice.get("password"); if (!usernameSlice.isString() || !passwordSlice.isString()) { return badRequest(); } _username = usernameSlice.copyString(); std::string const password = passwordSlice.copyString(); auth::UserManager* um = AuthenticationFeature::instance()->userManager(); if (um == nullptr) { std::string msg = "This server does not support users"; LOG_TOPIC("2e7d4", ERR, Logger::AUTHENTICATION) << msg; generateError(rest::ResponseCode::UNAUTHORIZED, TRI_ERROR_HTTP_UNAUTHORIZED, msg); } else if (um->checkPassword(_username, password)) { VPackBuilder resultBuilder; { VPackObjectBuilder b(&resultBuilder); std::string jwt = generateJwt(_username, password); resultBuilder.add("jwt", VPackValue(jwt)); } _isValid = true; generateDocument(resultBuilder.slice(), true, &VPackOptions::Defaults); } else { // mop: rfc 2616 10.4.2 (if credentials wrong 401) generateError(rest::ResponseCode::UNAUTHORIZED, TRI_ERROR_HTTP_UNAUTHORIZED, "Wrong credentials"); } return RestStatus::DONE; }
CWE-613
7
TEST_P(SslSocketTest, Ipv6San) { const std::string client_ctx_yaml = R"EOF( common_tls_context: validation_context: trusted_ca: filename: "{{ test_rundir }}/test/config/integration/certs/upstreamcacert.pem" match_subject_alt_names: exact: "::1" )EOF"; const std::string server_ctx_yaml = R"EOF( common_tls_context: tls_certificates: certificate_chain: filename: "{{ test_rundir }}/test/config/integration/certs/upstreamlocalhostcert.pem" private_key: filename: "{{ test_rundir }}/test/config/integration/certs/upstreamlocalhostkey.pem" )EOF"; TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam()); testUtil(test_options); }
CWE-295
52
TfLiteStatus EvalHashtable(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE(context, node->user_data != nullptr); const auto* params = reinterpret_cast<const TfLiteHashtableParams*>(node->user_data); // The resource id is generated based on the given table name. const int resource_id = std::hash<std::string>{}(params->table_name); TfLiteTensor* resource_handle_tensor = GetOutput(context, node, kResourceHandleTensor); auto* resource_handle_data = GetTensorData<std::int32_t>(resource_handle_tensor); resource_handle_data[0] = resource_id; Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_); auto& resources = subgraph->resources(); resource::CreateHashtableResourceIfNotAvailable( &resources, resource_id, params->key_dtype, params->value_dtype); return kTfLiteOk; }
CWE-125
47
Java_org_tensorflow_lite_InterpreterTest_getNativeHandleForDelegate( JNIEnv* env, jclass clazz) { // A simple op which outputs a tensor with values of 7. static TfLiteRegistration registration = { .init = nullptr, .free = nullptr, .prepare = [](TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = tflite::GetInput(context, node, 0); TfLiteTensor* output = tflite::GetOutput(context, node, 0); TfLiteIntArray* output_dims = TfLiteIntArrayCopy(input->dims); output->type = kTfLiteFloat32; return context->ResizeTensor(context, output, output_dims); }, .invoke = [](TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = tflite::GetOutput(context, node, 0); std::fill(output->data.f, output->data.f + tflite::NumElements(output), 7.0f); return kTfLiteOk; }, .profiling_string = nullptr, .builtin_code = 0, .custom_name = "", .version = 1, }; static TfLiteDelegate delegate = { .data_ = nullptr, .Prepare = [](TfLiteContext* context, TfLiteDelegate* delegate) -> TfLiteStatus { TfLiteIntArray* execution_plan; TF_LITE_ENSURE_STATUS( context->GetExecutionPlan(context, &execution_plan)); context->ReplaceNodeSubsetsWithDelegateKernels( context, registration, execution_plan, delegate); // Now bind delegate buffer handles for all tensors. for (size_t i = 0; i < context->tensors_size; ++i) { context->tensors[i].delegate = delegate; context->tensors[i].buffer_handle = static_cast<int>(i); } return kTfLiteOk; }, .CopyFromBufferHandle = nullptr, .CopyToBufferHandle = nullptr, .FreeBufferHandle = nullptr, .flags = kTfLiteDelegateFlagsAllowDynamicTensors, }; return reinterpret_cast<jlong>(&delegate); }
CWE-787
24
static __forceinline void draw_line(float *output, int x0, int y0, int x1, int y1, int n) { int dy = y1 - y0; int adx = x1 - x0; int ady = abs(dy); int base; int x=x0,y=y0; int err = 0; int sy; #ifdef STB_VORBIS_DIVIDE_TABLE if (adx < DIVTAB_DENOM && ady < DIVTAB_NUMER) { if (dy < 0) { base = -integer_divide_table[ady][adx]; sy = base-1; } else { base = integer_divide_table[ady][adx]; sy = base+1; } } else { base = dy / adx; if (dy < 0) sy = base - 1; else sy = base+1; } #else base = dy / adx; if (dy < 0) sy = base - 1; else sy = base+1; #endif ady -= abs(base) * adx; if (x1 > n) x1 = n; if (x < x1) { LINE_OP(output[x], inverse_db_table[y]); for (++x; x < x1; ++x) { err += ady; if (err >= adx) { err -= adx; y += sy; } else y += base; LINE_OP(output[x], inverse_db_table[y]); } } }
CWE-125
47
TEST_P(WasmTest, DivByZero) { Stats::IsolatedStoreImpl stats_store; Api::ApiPtr api = Api::createApiForTest(stats_store); Upstream::MockClusterManager cluster_manager; Event::DispatcherPtr dispatcher(api->allocateDispatcher()); auto scope = Stats::ScopeSharedPtr(stats_store.createScope("wasm.")); NiceMock<LocalInfo::MockLocalInfo> local_info; auto name = ""; auto root_id = ""; auto vm_id = ""; auto vm_configuration = ""; auto plugin = std::make_shared<Extensions::Common::Wasm::Plugin>( name, root_id, vm_id, envoy::api::v2::core::TrafficDirection::UNSPECIFIED, local_info, nullptr); auto wasm = std::make_unique<Extensions::Common::Wasm::Wasm>( absl::StrCat("envoy.wasm.runtime.", GetParam()), vm_id, vm_configuration, plugin, scope, cluster_manager, *dispatcher); EXPECT_NE(wasm, nullptr); const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( "{{ test_rundir }}/test/extensions/wasm/test_data/segv_cpp.wasm")); EXPECT_FALSE(code.empty()); auto context = std::make_unique<TestContext>(wasm.get()); EXPECT_CALL(*context, scriptLog_(spdlog::level::err, Eq("before div by zero"))); EXPECT_TRUE(wasm->initialize(code, false)); wasm->setContext(context.get()); if (GetParam() == "v8") { EXPECT_THROW_WITH_MESSAGE( context->onLog(), Extensions::Common::Wasm::WasmException, "Function: proxy_onLog failed: Uncaught RuntimeError: divide by zero"); } else if (GetParam() == "wavm") { EXPECT_THROW_WITH_REGEX(context->onLog(), Extensions::Common::Wasm::WasmException, "Function: proxy_onLog failed: wavm.integerDivideByZeroOrOverflow.*"); } else { ASSERT_FALSE(true); // Neither of the above was matched. } }
CWE-476
46
TfLiteStatus StoreAllDecodedSequences( TfLiteContext* context, const std::vector<std::vector<std::vector<int>>>& sequences, TfLiteNode* node, int top_paths) { const int32_t batch_size = sequences.size(); std::vector<int32_t> num_entries(top_paths, 0); // Calculate num_entries per path for (const auto& batch_s : sequences) { TF_LITE_ENSURE_EQ(context, batch_s.size(), top_paths); for (int p = 0; p < top_paths; ++p) { num_entries[p] += batch_s[p].size(); } } for (int p = 0; p < top_paths; ++p) { const int32_t p_num = num_entries[p]; // Resize the decoded outputs. TfLiteTensor* indices = GetOutput(context, node, p); TF_LITE_ENSURE_OK(context, Resize(context, {p_num, 2}, indices)); TfLiteTensor* values = GetOutput(context, node, p + top_paths); TF_LITE_ENSURE_OK(context, Resize(context, {p_num}, values)); TfLiteTensor* decoded_shape = GetOutput(context, node, p + 2 * top_paths); TF_LITE_ENSURE_OK(context, Resize(context, {2}, decoded_shape)); int32_t max_decoded = 0; int32_t offset = 0; int32_t* indices_data = GetTensorData<int32_t>(indices); int32_t* values_data = GetTensorData<int32_t>(values); int32_t* decoded_shape_data = GetTensorData<int32_t>(decoded_shape); for (int b = 0; b < batch_size; ++b) { auto& p_batch = sequences[b][p]; int32_t num_decoded = p_batch.size(); max_decoded = std::max(max_decoded, num_decoded); std::copy_n(p_batch.begin(), num_decoded, values_data + offset); for (int32_t t = 0; t < num_decoded; ++t, ++offset) { indices_data[offset * 2] = b; indices_data[offset * 2 + 1] = t; } } decoded_shape_data[0] = batch_size; decoded_shape_data[1] = max_decoded; } return kTfLiteOk; }
CWE-787
24
TfLiteStatus HardSwishEval(TfLiteContext* context, TfLiteNode* node) { HardSwishData* data = static_cast<HardSwishData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::HardSwish( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::HardSwish( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { HardSwishParams& params = data->params; if (kernel_type == kReference) { reference_ops::HardSwish( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { optimized_ops::HardSwish( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } break; case kTfLiteInt8: { HardSwishParams& params = data->params; if (kernel_type == kReference) { reference_ops::HardSwish( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { optimized_ops::HardSwish( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
CWE-125
47
bool createBLSShare(const string &blsKeyName, const char *s_shares, const char *encryptedKeyHex) { CHECK_STATE(s_shares); CHECK_STATE(encryptedKeyHex); vector<char> errMsg(BUF_LEN,0); int errStatus = 0; uint64_t decKeyLen; SAFE_UINT8_BUF(encr_bls_key,BUF_LEN); SAFE_UINT8_BUF(encr_key,BUF_LEN); if (!hex2carray(encryptedKeyHex, &decKeyLen, encr_key, BUF_LEN)) { throw SGXException(INVALID_HEX, "Invalid encryptedKeyHex"); } uint32_t enc_bls_len = 0; sgx_status_t status = trustedCreateBlsKeyAES(eid, &errStatus, errMsg.data(), s_shares, encr_key, decKeyLen, encr_bls_key, &enc_bls_len); HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg.data()); SAFE_CHAR_BUF(hexBLSKey,2 * BUF_LEN) carray2Hex(encr_bls_key, enc_bls_len, hexBLSKey, 2 * BUF_LEN); SGXWalletServer::writeDataToDB(blsKeyName, hexBLSKey); return true; }
CWE-787
24
int Archive::Read(void *Data,size_t Size) { size_t Result; if (QOpen.Read(Data,Size,Result)) return (int)Result; return File::Read(Data,Size); }
CWE-787
24
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); const TfLiteType type = input1->type; if (type != kTfLiteInt32 && type != kTfLiteFloat32) { TF_LITE_KERNEL_LOG(context, "Unsupported data type %s.", TfLiteTypeGetName(type)); return kTfLiteError; } output->type = type; data->requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (data->requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } return context->ResizeTensor(context, output, output_size); }
CWE-787
24
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteMulParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); const bool requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( context, params->activation, output, &data->output_activation_min, &data->output_activation_max)); double real_multiplier = input1->params.scale * input2->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, output_size); }
CWE-787
24
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { static const int kOutputUniqueTensor = 0; static const int kOutputIndexTensor = 1; TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output_unique_tensor = GetOutput(context, node, kOutputUniqueTensor); TfLiteTensor* output_index_tensor = GetOutput(context, node, kOutputIndexTensor); // The op only supports 1D input. TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); TfLiteIntArray* output_index_shape = TfLiteIntArrayCopy(input->dims); // The unique values are determined during evaluation, so we don't know yet // the size of the output tensor. SetTensorToDynamic(output_unique_tensor); return context->ResizeTensor(context, output_index_tensor, output_index_shape); }
CWE-125
47
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* lookup = GetInput(context, node, 0); const TfLiteTensor* value = GetInput(context, node, 1); TfLiteTensor* output = GetOutput(context, node, 0); switch (value->type) { case kTfLiteFloat32: return EvalSimple(context, node, lookup, value, output); case kTfLiteUInt8: case kTfLiteInt8: if (output->type == kTfLiteFloat32) { return EvalHybrid(context, node, lookup, value, output); } else { return EvalSimple(context, node, lookup, value, output); } default: context->ReportError(context, "Type not currently supported."); return kTfLiteError; } }
CWE-787
24
R_API ut8 *r_bin_java_get_attr_buf(RBinJavaObj *bin, ut64 sz, const ut64 offset, const ut8 *buf, const ut64 len) { ut8 *attr_buf = NULL; int pending = len - offset; const ut8 *a_buf = offset + buf; attr_buf = (ut8 *) calloc (pending + 1, 1); if (!attr_buf) { eprintf ("Unable to allocate enough bytes (0x%04"PFMT64x ") to read in the attribute.\n", sz); return attr_buf; } memcpy (attr_buf, a_buf, pending); // sz+1); return attr_buf; }
CWE-125
47
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: return EvalImpl<kernel_type, kTfLiteFloat32>(context, node); case kTfLiteUInt8: return EvalImpl<kernel_type, kTfLiteUInt8>(context, node); case kTfLiteInt8: return EvalImpl<kernel_type, kTfLiteInt8>(context, node); case kTfLiteInt16: return EvalImpl<kernel_type, kTfLiteInt16>(context, node); default: context->ReportError(context, "Type %d not currently supported.", input->type); return kTfLiteError; } }
CWE-787
24
TfLiteStatus Gather(const TfLiteGatherParams& params, const TfLiteTensor* input, const TfLiteTensor* positions, TfLiteTensor* output) { tflite::GatherParams op_params; op_params.axis = params.axis; op_params.batch_dims = params.batch_dims; optimized_ops::Gather(op_params, GetTensorShape(input), GetTensorData<InputT>(input), GetTensorShape(positions), GetTensorData<PositionsT>(positions), GetTensorShape(output), GetTensorData<InputT>(output)); return kTfLiteOk; }
CWE-125
47
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* lookup = GetInput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(lookup), 1); TF_LITE_ENSURE_EQ(context, lookup->type, kTfLiteInt32); const TfLiteTensor* value = GetInput(context, node, 1); TF_LITE_ENSURE(context, NumDimensions(value) >= 2); TfLiteTensor* output = GetOutput(context, node, 0); TfLiteIntArray* outputSize = TfLiteIntArrayCreate(NumDimensions(value)); outputSize->data[0] = SizeOfDimension(lookup, 0); outputSize->data[1] = SizeOfDimension(value, 1); for (int i = 2; i < NumDimensions(value); i++) { outputSize->data[i] = SizeOfDimension(value, i); } return context->ResizeTensor(context, output, outputSize); }
CWE-787
24
void Compute(OpKernelContext* context) override { const Tensor& data = context->input(0); const Tensor& segment_ids = context->input(1); const Tensor& num_segments = context->input(2); if (!UnsortedSegmentReductionDoValidation(this, context, data, segment_ids, num_segments)) { return; } const auto segment_flat = segment_ids.flat<Index>(); const Index output_rows = internal::SubtleMustCopy(static_cast<Index>( num_segments.dtype() == DT_INT32 ? num_segments.scalar<int32>()() : num_segments.scalar<int64>()())); OP_REQUIRES(context, output_rows >= 0, errors::InvalidArgument("Input num_segments == ", output_rows, " must not be negative.")); TensorShape output_shape; output_shape.AddDim(output_rows); for (int i = segment_ids.dims(); i < data.dims(); i++) { output_shape.AddDim(data.dim_size(i)); } Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output)); auto output_flat = output->flat_outer_dims<T>(); auto data_ptr = data.template flat<T>().data(); reduction_functor_(context, output_rows, segment_ids.shape(), segment_flat, data.NumElements(), data_ptr, output_flat); }
CWE-681
59
int ZlibOutStream::overrun(int itemSize, int nItems) { #ifdef ZLIBOUT_DEBUG vlog.debug("overrun"); #endif if (itemSize > bufSize) throw Exception("ZlibOutStream overrun: max itemSize exceeded"); checkCompressionLevel(); while (end - ptr < itemSize) { zs->next_in = start; zs->avail_in = ptr - start; deflate(Z_NO_FLUSH); // output buffer not full if (zs->avail_in == 0) { offset += ptr - start; ptr = start; } else { // but didn't consume all the data? try shifting what's left to the // start of the buffer. vlog.info("z out buf not full, but in data not consumed"); memmove(start, zs->next_in, ptr - zs->next_in); offset += zs->next_in - start; ptr -= zs->next_in - start; } } if (itemSize * nItems > end - ptr) nItems = (end - ptr) / itemSize; return nItems; }
CWE-787
24
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32) { EvalAddN<float>(context, node); } else if (output->type == kTfLiteInt32) { EvalAddN<int32_t>(context, node); } else { context->ReportError(context, "AddN only supports FLOAT32|INT32 now, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } return kTfLiteOk; }
CWE-125
47
void FdOutStream::flush() { while (sentUpTo < ptr) { int n = writeWithTimeout((const void*) sentUpTo, ptr - sentUpTo, blocking? timeoutms : 0); // Timeout? if (n == 0) { // If non-blocking then we're done here if (!blocking) break; throw TimedOut(); } sentUpTo += n; offset += n; } // Managed to flush everything? if (sentUpTo == ptr) ptr = sentUpTo = start; }
CWE-787
24
RemoteFsDevice::RemoteFsDevice(MusicLibraryModel *m, const Details &d) : FsDevice(m, d.name, createUdi(d.name)) , mountToken(0) , currentMountStatus(false) , details(d) , proc(0) , mounterIface(0) , messageSent(false) { // details.path=Utils::fixPath(details.path); setup(); icn=MonoIcon::icon(details.isLocalFile() ? FontAwesome::foldero : constSshfsProtocol==details.url.scheme() ? FontAwesome::linux_os : FontAwesome::windows, Utils::monoIconColor()); }
CWE-22
2
static Variant HHVM_FUNCTION(bcsqrt, const String& operand, int64_t scale /* = -1 */) { if (scale < 0) scale = BCG(bc_precision); bc_num result; bc_init_num(&result); SCOPE_EXIT { bc_free_num(&result); }; php_str2num(&result, (char*)operand.data()); Variant ret; if (bc_sqrt(&result, scale) != 0) { if (result->n_scale > scale) { result->n_scale = scale; } ret = String(bc_num2str(result), AttachString); } else { raise_warning("Square root of negative number"); } return ret; }
CWE-190
19
static NTLM_AV_PAIR* ntlm_av_pair_next(NTLM_AV_PAIR* pAvPair, size_t* pcbAvPair) { size_t offset; if (!pcbAvPair) return NULL; if (!ntlm_av_pair_check(pAvPair, *pcbAvPair)) return NULL; offset = ntlm_av_pair_get_next_offset(pAvPair); *pcbAvPair -= offset; return (NTLM_AV_PAIR*)((PBYTE)pAvPair + offset); }
CWE-125
47
bool Scanner::fill(size_t need) { if (eof) return false; pop_finished_files(); DASSERT(bot <= tok && tok <= lim); size_t free = static_cast<size_t>(tok - bot); size_t copy = static_cast<size_t>(lim - tok); if (free >= need) { memmove(bot, tok, copy); shift_ptrs_and_fpos(-static_cast<ptrdiff_t>(free)); } else { BSIZE += std::max(BSIZE, need); char * buf = new char[BSIZE + YYMAXFILL]; if (!buf) fatal("out of memory"); memmove(buf, tok, copy); shift_ptrs_and_fpos(buf - bot); delete [] bot; bot = buf; free = BSIZE - copy; } if (!read(free)) { eof = lim; memset(lim, 0, YYMAXFILL); lim += YYMAXFILL; } return true; }
CWE-787
24
TfLiteStatus UseDynamicOutputTensors(TfLiteContext* context, TfLiteNode* node) { for (int i = 0; i < NumOutputs(node); ++i) { SetTensorToDynamic(GetOutput(context, node, i)); } return kTfLiteOk; }
CWE-125
47
int64_t MemFile::readImpl(char *buffer, int64_t length) { assertx(m_len != -1); assertx(length > 0); int64_t remaining = m_len - m_cursor; if (remaining < length) length = remaining; if (length > 0) { memcpy(buffer, (const void *)(m_data + m_cursor), length); } m_cursor += length; return length; }
CWE-787
24
void reposition(int pos) { ptr = start + pos; }
CWE-787
24