code
stringlengths
12
2.05k
label_name
stringlengths
6
8
label
int64
0
95
AP4_AvccAtom::AP4_AvccAtom(AP4_UI32 size, const AP4_UI08* payload) : AP4_Atom(AP4_ATOM_TYPE_AVCC, size) { // make a copy of our configuration bytes unsigned int payload_size = size-AP4_ATOM_HEADER_SIZE; m_RawBytes.SetData(payload, payload_size); // parse the payload m_ConfigurationVersion = payload[0]; m_Profile = payload[1]; m_ProfileCompatibility = payload[2]; m_Level = payload[3]; m_NaluLengthSize = 1+(payload[4]&3); AP4_UI08 num_seq_params = payload[5]&31; m_SequenceParameters.EnsureCapacity(num_seq_params); unsigned int cursor = 6; for (unsigned int i=0; i<num_seq_params; i++) { m_SequenceParameters.Append(AP4_DataBuffer()); AP4_UI16 param_length = AP4_BytesToInt16BE(&payload[cursor]); m_SequenceParameters[i].SetData(&payload[cursor]+2, param_length); cursor += 2+param_length; } AP4_UI08 num_pic_params = payload[cursor++]; m_PictureParameters.EnsureCapacity(num_pic_params); for (unsigned int i=0; i<num_pic_params; i++) { m_PictureParameters.Append(AP4_DataBuffer()); AP4_UI16 param_length = AP4_BytesToInt16BE(&payload[cursor]); m_PictureParameters[i].SetData(&payload[cursor]+2, param_length); cursor += 2+param_length; } }
CWE-125
47
TfLiteStatus EvalImpl(TfLiteContext* context, const TfLiteTensor* input, TfLiteNode* node) { // Map from value, to index in the unique elements vector. // Note that we prefer to use map than unordered_map as it showed less // increase in the binary size. std::map<T, int> unique_values; TfLiteTensor* output_indexes = GetOutput(context, node, 1); std::vector<T> output_values; I* indexes = GetTensorData<I>(output_indexes); const T* data = GetTensorData<T>(input); const int num_elements = NumElements(input); for (int i = 0; i < num_elements; ++i) { const auto element_it = unique_values.find(data[i]); if (element_it != unique_values.end()) { indexes[i] = element_it->second; } else { const int unique_index = unique_values.size(); unique_values[data[i]] = unique_index; indexes[i] = unique_index; output_values.push_back(data[i]); } } // Allocate output tensor. TfLiteTensor* unique_output = GetOutput(context, node, 0); std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape( TfLiteIntArrayCreate(NumDimensions(input)), TfLiteIntArrayFree); shape->data[0] = unique_values.size(); TF_LITE_ENSURE_STATUS( context->ResizeTensor(context, unique_output, shape.release())); // Set the values in the output tensor. T* output_unique_values = GetTensorData<T>(unique_output); for (int i = 0; i < output_values.size(); ++i) { output_unique_values[i] = output_values[i]; } return kTfLiteOk; }
CWE-125
47
R_API ut64 r_bin_java_element_pair_calc_size(RBinJavaElementValuePair *evp) { ut64 sz = 0; if (evp == NULL) { return sz; } // evp->element_name_idx = r_bin_java_read_short(bin, bin->b->cur); sz += 2; // evp->value = r_bin_java_element_value_new (bin, offset+2); if (evp->value) { sz += r_bin_java_element_value_calc_size (evp->value); } return sz; }
CWE-805
63
TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; }
CWE-125
47
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (type == kGenericOptimized) { optimized_ops::Floor(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { reference_ops::Floor(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; }
CWE-787
24
TfLiteRegistration CopyOpRegistration() { TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr}; reg.prepare = [](TfLiteContext* context, TfLiteNode* node) { // Set output size to input size const TfLiteTensor* tensor0 = GetInput(context, node, 0); TfLiteTensor* tensor1 = GetOutput(context, node, 0); TfLiteIntArray* newSize = TfLiteIntArrayCopy(tensor0->dims); return context->ResizeTensor(context, tensor1, newSize); }; reg.invoke = [](TfLiteContext* context, TfLiteNode* node) { CallReporting* call_reporting = static_cast<CallReporting*>(node->builtin_data); // Copy input data to output data. const TfLiteTensor* a0 = GetInput(context, node, 0); TfLiteTensor* a1 = GetOutput(context, node, 0); int num = a0->dims->data[0]; for (int i = 0; i < num; i++) { a1->data.f[i] = a0->data.f[i]; } call_reporting->Record(); return kTfLiteOk; }; return reg; }
CWE-787
24
R_API RBinJavaAttrInfo *r_bin_java_annotation_default_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut64 offset = 0; RBinJavaAttrInfo *attr = NULL; attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; if (attr && sz >= offset) { attr->type = R_BIN_JAVA_ATTR_TYPE_ANNOTATION_DEFAULT_ATTR; attr->info.annotation_default_attr.default_value = r_bin_java_element_value_new (buffer + offset, sz - offset, buf_offset + offset); if (attr->info.annotation_default_attr.default_value) { offset += attr->info.annotation_default_attr.default_value->size; } } r_bin_java_print_annotation_default_attr_summary (attr); return attr; }
CWE-805
63
TEST(DefaultCertValidatorTest, TestMatchSubjectAltNameNotMatched) { bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute( "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem")); envoy::type::matcher::v3::StringMatcher matcher; matcher.MergeFrom(TestUtility::createRegexMatcher(".*.foo.com")); std::vector<Matchers::StringMatcherImpl<envoy::type::matcher::v3::StringMatcher>> subject_alt_name_matchers; subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher)); EXPECT_FALSE(DefaultCertValidator::matchSubjectAltName(cert.get(), subject_alt_name_matchers)); }
CWE-295
52
void Logger::addMessage(const QString &message, const Log::MsgType &type) { QWriteLocker locker(&lock); Log::Msg temp = { msgCounter++, QDateTime::currentMSecsSinceEpoch(), type, message }; m_messages.push_back(temp); if (m_messages.size() >= MAX_LOG_MESSAGES) m_messages.pop_front(); emit newLogMessage(temp); }
CWE-79
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { // Check that the inputs and outputs have the right sizes and types. TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output_values = GetOutput(context, node, kOutputValues); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output_values->type); const TfLiteTensor* top_k = GetInput(context, node, kInputTopK); TF_LITE_ENSURE_TYPES_EQ(context, top_k->type, kTfLiteInt32); // Set output dynamic if the input is not const. if (IsConstantTensor(top_k)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } else { TfLiteTensor* output_indexes = GetOutput(context, node, kOutputIndexes); TfLiteTensor* output_values = GetOutput(context, node, kOutputValues); SetTensorToDynamic(output_indexes); SetTensorToDynamic(output_values); } return kTfLiteOk; }
CWE-125
47
void ImplPolygon::ImplSplit( sal_uInt16 nPos, sal_uInt16 nSpace, ImplPolygon const * pInitPoly ) { //Can't fit this in :-(, throw ? if (mnPoints + nSpace > USHRT_MAX) return; const sal_uInt16 nNewSize = mnPoints + nSpace; const std::size_t nSpaceSize = static_cast<std::size_t>(nSpace) * sizeof(Point); if( nPos >= mnPoints ) { // Append at the back nPos = mnPoints; ImplSetSize( nNewSize ); if( pInitPoly ) { memcpy( mpPointAry + nPos, pInitPoly->mpPointAry, nSpaceSize ); if( pInitPoly->mpFlagAry ) memcpy( mpFlagAry + nPos, pInitPoly->mpFlagAry, nSpace ); } } else { const sal_uInt16 nSecPos = nPos + nSpace; const sal_uInt16 nRest = mnPoints - nPos; Point* pNewAry = reinterpret_cast<Point*>(new char[ static_cast<std::size_t>(nNewSize) * sizeof(Point) ]); memcpy( pNewAry, mpPointAry, nPos * sizeof( Point ) ); if( pInitPoly ) memcpy( pNewAry + nPos, pInitPoly->mpPointAry, nSpaceSize ); else memset( pNewAry + nPos, 0, nSpaceSize ); memcpy( pNewAry + nSecPos, mpPointAry + nPos, nRest * sizeof( Point ) ); delete[] reinterpret_cast<char*>(mpPointAry); // consider FlagArray if( mpFlagAry ) { PolyFlags* pNewFlagAry = new PolyFlags[ nNewSize ]; memcpy( pNewFlagAry, mpFlagAry, nPos ); if( pInitPoly && pInitPoly->mpFlagAry ) memcpy( pNewFlagAry + nPos, pInitPoly->mpFlagAry, nSpace ); else memset( pNewFlagAry + nPos, 0, nSpace ); memcpy( pNewFlagAry + nSecPos, mpFlagAry + nPos, nRest ); delete[] mpFlagAry; mpFlagAry = pNewFlagAry; } mpPointAry = pNewAry; mnPoints = nNewSize; } }
CWE-787
24
int pnm_validate(jas_stream_t *in) { uchar buf[2]; int i; int n; assert(JAS_STREAM_MAXPUTBACK >= 2); /* Read the first two characters that constitute the signature. */ if ((n = jas_stream_read(in, buf, 2)) < 0) { return -1; } /* Put these characters back to the stream. */ for (i = n - 1; i >= 0; --i) { if (jas_stream_ungetc(in, buf[i]) == EOF) { return -1; } } /* Did we read enough data? */ if (n < 2) { return -1; } /* Is this the correct signature for a PNM file? */ if (buf[0] == 'P' && isdigit(buf[1])) { return 0; } return -1; }
CWE-190
19
TEST_P(SslSocketTest, Ipv4San) { const std::string client_ctx_yaml = R"EOF( common_tls_context: validation_context: trusted_ca: filename: "{{ test_rundir }}/test/config/integration/certs/upstreamcacert.pem" match_subject_alt_names: exact: "127.0.0.1" )EOF"; const std::string server_ctx_yaml = R"EOF( common_tls_context: tls_certificates: certificate_chain: filename: "{{ test_rundir }}/test/config/integration/certs/upstreamlocalhostcert.pem" private_key: filename: "{{ test_rundir }}/test/config/integration/certs/upstreamlocalhostkey.pem" )EOF"; TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam()); testUtil(test_options); }
CWE-295
52
TfLiteRegistration AddOpRegistration() { TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr}; reg.custom_name = "my_add"; reg.builtin_code = tflite::BuiltinOperator_CUSTOM; reg.prepare = [](TfLiteContext* context, TfLiteNode* node) { // Set output size to input size const TfLiteTensor* input1 = GetInput(context, node, 0); const TfLiteTensor* input2 = GetInput(context, node, 1); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_EQ(context, input1->dims->size, input2->dims->size); for (int i = 0; i < input1->dims->size; ++i) { TF_LITE_ENSURE_EQ(context, input1->dims->data[i], input2->dims->data[i]); } TF_LITE_ENSURE_STATUS(context->ResizeTensor( context, output, TfLiteIntArrayCopy(input1->dims))); return kTfLiteOk; }; reg.invoke = [](TfLiteContext* context, TfLiteNode* node) { // Copy input data to output data. const TfLiteTensor* a0 = GetInput(context, node, 0); TF_LITE_ENSURE(context, a0); TF_LITE_ENSURE(context, a0->data.f); const TfLiteTensor* a1 = GetInput(context, node, 1); TF_LITE_ENSURE(context, a1); TF_LITE_ENSURE(context, a1->data.f); TfLiteTensor* out = GetOutput(context, node, 0); TF_LITE_ENSURE(context, out); TF_LITE_ENSURE(context, out->data.f); int num = a0->dims->data[0]; for (int i = 0; i < num; i++) { out->data.f[i] = a0->data.f[i] + a1->data.f[i]; } return kTfLiteOk; }; return reg; }
CWE-787
24
ResponsePtr Server::ServeStatic(RequestPtr request) { assert(request->method() == methods::kGet); if (doc_root_.empty()) { LOG_INFO("The doc root was not specified"); return {}; } fs::path path = doc_root_ / request->url().path(); try { // NOTE: FileBody might throw Error::kFileError. auto body = std::make_shared<FileBody>(path, file_chunk_size_); auto response = std::make_shared<Response>(Status::kOK); std::string extension = path.extension().string(); response->SetContentType(media_types::FromExtension(extension), ""); // NOTE: Gzip compression is not supported. response->SetBody(body, true); return response; } catch (const Error& error) { LOG_ERRO("File error: %s", error.message().c_str()); return {}; } }
CWE-22
2
void PngImg::InitStorage_() { rowPtrs_.resize(info_.height, nullptr); data_ = new png_byte[info_.height * info_.rowbytes]; for(size_t i = 0; i < info_.height; ++i) { rowPtrs_[i] = data_ + i * info_.rowbytes; } }
CWE-787
24
void AverageEvalQuantizedUint8(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; (void)CalculateActivationRangeQuantized(context, params->activation, output, &activation_min, &activation_max); #define TF_LITE_AVERAGE_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ op_params.stride_width = params->stride_width; \ op_params.filter_height = params->filter_height; \ op_params.filter_width = params->filter_width; \ op_params.padding_values.height = data->padding.height; \ op_params.padding_values.width = data->padding.width; \ op_params.quantized_activation_min = activation_min; \ op_params.quantized_activation_max = activation_max; \ type::AveragePool(op_params, GetTensorShape(input), \ GetTensorData<uint8_t>(input), GetTensorShape(output), \ GetTensorData<uint8_t>(output)) if (kernel_type == kReference) { TF_LITE_AVERAGE_POOL(reference_ops); } else { TF_LITE_AVERAGE_POOL(optimized_ops); } #undef TF_LITE_AVERAGE_POOL }
CWE-835
42
static __forceinline void draw_line(float *output, int x0, int y0, int x1, int y1, int n) { int dy = y1 - y0; int adx = x1 - x0; int ady = abs(dy); int base; int x=x0,y=y0; int err = 0; int sy; #ifdef STB_VORBIS_DIVIDE_TABLE if (adx < DIVTAB_DENOM && ady < DIVTAB_NUMER) { if (dy < 0) { base = -integer_divide_table[ady][adx]; sy = base-1; } else { base = integer_divide_table[ady][adx]; sy = base+1; } } else { base = dy / adx; if (dy < 0) sy = base - 1; else sy = base+1; } #else base = dy / adx; if (dy < 0) sy = base - 1; else sy = base+1; #endif ady -= abs(base) * adx; if (x1 > n) x1 = n; if (x < x1) { LINE_OP(output[x], inverse_db_table[y]); for (++x; x < x1; ++x) { err += ady; if (err >= adx) { err -= adx; y += sy; } else y += base; LINE_OP(output[x], inverse_db_table[y]); } } }
CWE-369
60
TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: MaxEvalFloat<kernel_type>(context, node, params, data, input, output); break; case kTfLiteUInt8: MaxEvalQuantizedUInt8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt8: MaxEvalQuantizedInt8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt16: MaxEvalQuantizedInt16<kernel_type>(context, node, params, data, input, output); break; default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; }
CWE-787
24
bool MemFile::seek(int64_t offset, int whence /* = SEEK_SET */) { assertx(m_len != -1); if (whence == SEEK_CUR) { if (offset > 0 && offset < bufferedLen()) { setReadPosition(getReadPosition() + offset); setPosition(getPosition() + offset); return true; } offset += getPosition(); whence = SEEK_SET; } // invalidate the current buffer setWritePosition(0); setReadPosition(0); if (whence == SEEK_SET) { m_cursor = offset; } else { assertx(whence == SEEK_END); m_cursor = m_len + offset; } setPosition(m_cursor); return true; }
CWE-190
19
void DefaultEnv::Initialize() { sLog = new Log(); SetUpLog(); sEnv = new DefaultEnv(); sForkHandler = new ForkHandler(); sFileTimer = new FileTimer(); sPlugInManager = new PlugInManager(); sPlugInManager->ProcessEnvironmentSettings(); sForkHandler->RegisterFileTimer( sFileTimer ); //-------------------------------------------------------------------------- // MacOSX library loading is completely moronic. We cannot dlopen a library // from a thread other than a main thread, so we-pre dlopen all the // libraries that we may potentially want. //-------------------------------------------------------------------------- #ifdef __APPLE__ char *errBuff = new char[1024]; const char *libs[] = { "libXrdSeckrb5.so", "libXrdSecgsi.so", "libXrdSecgsiAuthzVO.so", "libXrdSecgsiGMAPDN.so", "libXrdSecgsiGMAPLDAP.so", "libXrdSecpwd.so", "libXrdSecsss.so", "libXrdSecunix.so", 0 }; for( int i = 0; libs[i]; ++i ) { sLog->Debug( UtilityMsg, "Attempting to pre-load: %s", libs[i] ); bool ok = XrdOucPreload( libs[i], errBuff, 1024 ); if( !ok ) sLog->Error( UtilityMsg, "Unable to pre-load %s: %s", libs[i], errBuff ); } delete [] errBuff; #endif }
CWE-78
6
TEST_P(SslSocketTest, FailedClientAuthSanVerificationNoClientCert) { const std::string client_ctx_yaml = R"EOF( common_tls_context: )EOF"; const std::string server_ctx_yaml = R"EOF( common_tls_context: tls_certificates: certificate_chain: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/unittest_cert.pem" private_key: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/unittest_key.pem" validation_context: trusted_ca: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" match_subject_alt_names: exact: "example.com" )EOF"; TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam()); testUtil(test_options.setExpectedServerStats("ssl.fail_verify_no_cert")); }
CWE-295
52
MONGO_EXPORT int bson_append_symbol_n( bson *b, const char *name, const char *value, int len ) { return bson_append_string_base( b, name, value, len, BSON_SYMBOL ); }
CWE-190
19
jas_image_t *jas_image_create(int numcmpts, jas_image_cmptparm_t *cmptparms, int clrspc) { jas_image_t *image; uint_fast32_t rawsize; uint_fast32_t inmem; int cmptno; jas_image_cmptparm_t *cmptparm; if (!(image = jas_image_create0())) { return 0; } image->clrspc_ = clrspc; image->maxcmpts_ = numcmpts; image->inmem_ = true; /* Allocate memory for the per-component information. */ if (!(image->cmpts_ = jas_alloc2(image->maxcmpts_, sizeof(jas_image_cmpt_t *)))) { jas_image_destroy(image); return 0; } /* Initialize in case of failure. */ for (cmptno = 0; cmptno < image->maxcmpts_; ++cmptno) { image->cmpts_[cmptno] = 0; } /* Compute the approximate raw size of the image. */ rawsize = 0; for (cmptno = 0, cmptparm = cmptparms; cmptno < numcmpts; ++cmptno, ++cmptparm) { rawsize += cmptparm->width * cmptparm->height * (cmptparm->prec + 7) / 8; } /* Decide whether to buffer the image data in memory, based on the raw size of the image. */ inmem = (rawsize < JAS_IMAGE_INMEMTHRESH); /* Create the individual image components. */ for (cmptno = 0, cmptparm = cmptparms; cmptno < numcmpts; ++cmptno, ++cmptparm) { if (!(image->cmpts_[cmptno] = jas_image_cmpt_create(cmptparm->tlx, cmptparm->tly, cmptparm->hstep, cmptparm->vstep, cmptparm->width, cmptparm->height, cmptparm->prec, cmptparm->sgnd, inmem))) { jas_image_destroy(image); return 0; } ++image->numcmpts_; } /* Determine the bounding box for all of the components on the reference grid (i.e., the image area) */ jas_image_setbbox(image); return image; }
CWE-190
19
int TLSOutStream::overrun(int itemSize, int nItems) { if (itemSize > bufSize) throw Exception("TLSOutStream overrun: max itemSize exceeded"); flush(); if (itemSize * nItems > end - ptr) nItems = (end - ptr) / itemSize; return nItems; }
CWE-787
24
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = GetOutput(context, node, 0); TfLiteTensor* hits = GetOutput(context, node, 1); const TfLiteTensor* lookup = GetInput(context, node, 0); const TfLiteTensor* key = GetInput(context, node, 1); const TfLiteTensor* value = GetInput(context, node, 2); const int num_rows = SizeOfDimension(value, 0); const int row_bytes = value->bytes / num_rows; void* pointer = nullptr; DynamicBuffer buf; for (int i = 0; i < SizeOfDimension(lookup, 0); i++) { int idx = -1; pointer = bsearch(&(lookup->data.i32[i]), key->data.i32, num_rows, sizeof(int32_t), greater); if (pointer != nullptr) { idx = (reinterpret_cast<char*>(pointer) - (key->data.raw)) / sizeof(int32_t); } if (idx >= num_rows || idx < 0) { if (output->type == kTfLiteString) { buf.AddString(nullptr, 0); } else { memset(output->data.raw + i * row_bytes, 0, row_bytes); } hits->data.uint8[i] = 0; } else { if (output->type == kTfLiteString) { buf.AddString(GetString(value, idx)); } else { memcpy(output->data.raw + i * row_bytes, value->data.raw + idx * row_bytes, row_bytes); } hits->data.uint8[i] = 1; } } if (output->type == kTfLiteString) { buf.WriteToTensorAsVector(output); } return kTfLiteOk; }
CWE-787
24
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteDivParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); output->type = input2->type; data->requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (data->requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } if (output->type == kTfLiteUInt8) { TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( context, params->activation, output, &data->output_activation_min, &data->output_activation_max)); const double real_multiplier = input1->params.scale / (input2->params.scale * output->params.scale); QuantizeMultiplier(real_multiplier, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, output_size); }
CWE-787
24
Status CalculateOutputIndex(OpKernelContext* context, int dimension, const vector<INDEX_TYPE>& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector<INDEX_TYPE>* result) { const RowPartitionTensor row_partition_tensor = GetRowPartitionTensor(context, dimension); auto partition_type = GetRowPartitionTypeByDimension(dimension); switch (partition_type) { case RowPartitionType::VALUE_ROWIDS: CalculateOutputIndexValueRowID( context, row_partition_tensor, parent_output_index, output_index_multiplier, output_size, result); return tensorflow::Status::OK(); case RowPartitionType::ROW_SPLITS: if (row_partition_tensor.size() - 1 > parent_output_index.size()) { return errors::InvalidArgument( "Row partition size is greater than output size: ", row_partition_tensor.size() - 1, " > ", parent_output_index.size()); } CalculateOutputIndexRowSplit( context, row_partition_tensor, parent_output_index, output_index_multiplier, output_size, result); return tensorflow::Status::OK(); default: return errors::InvalidArgument( "Unsupported partition type:", RowPartitionTypeToString(partition_type)); } }
CWE-131
88
String UTF16BEDecoder::to_utf8(const StringView& input) { StringBuilder builder(input.length() / 2); for (size_t i = 0; i < input.length(); i += 2) { u16 code_point = (input[i] << 8) | input[i + 1]; builder.append_code_point(code_point); } return builder.to_string(); }
CWE-120
44
Result ZipFile::uncompressEntry (int index, const File& targetDirectory, bool shouldOverwriteFiles) { auto* zei = entries.getUnchecked (index); #if JUCE_WINDOWS auto entryPath = zei->entry.filename; #else auto entryPath = zei->entry.filename.replaceCharacter ('\\', '/'); #endif if (entryPath.isEmpty()) return Result::ok(); auto targetFile = targetDirectory.getChildFile (entryPath); if (entryPath.endsWithChar ('/') || entryPath.endsWithChar ('\\')) return targetFile.createDirectory(); // (entry is a directory, not a file) std::unique_ptr<InputStream> in (createStreamForEntry (index)); if (in == nullptr) return Result::fail ("Failed to open the zip file for reading"); if (targetFile.exists()) { if (! shouldOverwriteFiles) return Result::ok(); if (! targetFile.deleteFile()) return Result::fail ("Failed to write to target file: " + targetFile.getFullPathName()); } if (! targetFile.getParentDirectory().createDirectory()) return Result::fail ("Failed to create target folder: " + targetFile.getParentDirectory().getFullPathName()); if (zei->entry.isSymbolicLink) { String originalFilePath (in->readEntireStreamAsString() .replaceCharacter (L'/', File::getSeparatorChar())); if (! File::createSymbolicLink (targetFile, originalFilePath, true)) return Result::fail ("Failed to create symbolic link: " + originalFilePath); } else { FileOutputStream out (targetFile); if (out.failedToOpen()) return Result::fail ("Failed to write to target file: " + targetFile.getFullPathName()); out << *in; } targetFile.setCreationTime (zei->entry.fileTime); targetFile.setLastModificationTime (zei->entry.fileTime); targetFile.setLastAccessTime (zei->entry.fileTime); return Result::ok(); }
CWE-22
2
TfLiteStatus PrepareHashtable(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 0); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); TF_LITE_ENSURE(context, node->user_data != nullptr); const auto* params = reinterpret_cast<const TfLiteHashtableParams*>(node->user_data); TF_LITE_ENSURE(context, !params->table_name.empty()); TF_LITE_ENSURE(context, (params->key_dtype == kTfLiteInt64 && params->value_dtype == kTfLiteString) || (params->key_dtype == kTfLiteString && params->value_dtype == kTfLiteInt64)); TfLiteTensor* resource_handle_tensor = GetOutput(context, node, kResourceHandleTensor); TF_LITE_ENSURE(context, resource_handle_tensor != nullptr); TF_LITE_ENSURE_EQ(context, resource_handle_tensor->type, kTfLiteInt32); TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1); outputSize->data[0] = 1; return context->ResizeTensor(context, resource_handle_tensor, outputSize); }
CWE-125
47
TEST_F(QuantizedConv2DTest, OddPadding) { const int stride = 2; TF_ASSERT_OK(NodeDefBuilder("quantized_conv_op", "QuantizedConv2D") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("out_type", DataTypeToEnum<qint32>::v()) .Attr("strides", {1, stride, stride, 1}) .Attr("padding", "SAME") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const int depth = 1; const int image_width = 4; const int image_height = 4; const int image_batch_count = 1; AddInputFromArray<quint8>( TensorShape({image_batch_count, image_height, image_width, depth}), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}); const int filter_size = 3; const int filter_count = 1; AddInputFromArray<quint8>( TensorShape({filter_size, filter_size, depth, filter_count}), {1, 2, 3, 4, 5, 6, 7, 8, 9}); AddInputFromArray<float>(TensorShape({1}), {0}); AddInputFromArray<float>(TensorShape({1}), {255.0f}); AddInputFromArray<float>(TensorShape({1}), {0}); AddInputFromArray<float>(TensorShape({1}), {255.0f}); TF_ASSERT_OK(RunOpKernel()); const int expected_width = image_width / stride; const int expected_height = (image_height * filter_count) / stride; Tensor expected(DT_QINT32, TensorShape({image_batch_count, expected_height, expected_width, filter_count})); test::FillValues<qint32>(&expected, {348, 252, 274, 175}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); }
CWE-476
46
void AverageEvalQuantizedInt16(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; CalculateActivationRangeQuantized(context, params->activation, output, &activation_min, &activation_max); #define TF_LITE_AVERAGE_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ op_params.stride_width = params->stride_width; \ op_params.filter_height = params->filter_height; \ op_params.filter_width = params->filter_width; \ op_params.padding_values.height = data->padding.height; \ op_params.padding_values.width = data->padding.width; \ op_params.quantized_activation_min = activation_min; \ op_params.quantized_activation_max = activation_max; \ type::AveragePool(op_params, GetTensorShape(input), \ GetTensorData<int16_t>(input), GetTensorShape(output), \ GetTensorData<int16_t>(output)) TF_LITE_AVERAGE_POOL(reference_integer_ops); #undef TF_LITE_AVERAGE_POOL }
CWE-835
42
inline bool ShapeIsVector(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* shape = GetInput(context, node, kShapeTensor); return (shape->dims->size == 1 && shape->type == kTfLiteInt32); }
CWE-125
47
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const CTCBeamSearchDecoderParams* option = reinterpret_cast<CTCBeamSearchDecoderParams*>(node->user_data); const int top_paths = option->top_paths; TF_LITE_ENSURE(context, option->beam_width >= top_paths); TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); // The outputs should be top_paths * 3 + 1. TF_LITE_ENSURE_EQ(context, NumOutputs(node), 3 * top_paths + 1); const TfLiteTensor* inputs = GetInput(context, node, kInputsTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(inputs), 3); // TensorFlow only supports float. TF_LITE_ENSURE_EQ(context, inputs->type, kTfLiteFloat32); const int batch_size = SizeOfDimension(inputs, 1); const TfLiteTensor* sequence_length = GetInput(context, node, kSequenceLengthTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(sequence_length), 1); TF_LITE_ENSURE_EQ(context, NumElements(sequence_length), batch_size); // TensorFlow only supports int32. TF_LITE_ENSURE_EQ(context, sequence_length->type, kTfLiteInt32); // Resize decoded outputs. // Do not resize indices & values cause we don't know the values yet. for (int i = 0; i < top_paths; ++i) { TfLiteTensor* indices = GetOutput(context, node, i); SetTensorToDynamic(indices); TfLiteTensor* values = GetOutput(context, node, i + top_paths); SetTensorToDynamic(values); TfLiteTensor* output_shape = GetOutput(context, node, i + 2 * top_paths); SetTensorToDynamic(output_shape); } // Resize log probability outputs. TfLiteTensor* log_probability_output = GetOutput(context, node, top_paths * 3); TfLiteIntArray* log_probability_output_shape_array = TfLiteIntArrayCreate(2); log_probability_output_shape_array->data[0] = batch_size; log_probability_output_shape_array->data[1] = top_paths; return context->ResizeTensor(context, log_probability_output, log_probability_output_shape_array); }
CWE-125
47
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* diag = GetInput(context, node, kDiagonalTensor); FillDiagHelper(input, diag, output); return kTfLiteOk; }
CWE-787
24
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); auto* params = reinterpret_cast<TfLiteShapeParams*>(node->builtin_data); switch (params->out_type) { case kTfLiteInt32: output->type = kTfLiteInt32; break; case kTfLiteInt64: output->type = kTfLiteInt64; break; default: context->ReportError(context, "Unknown shape output data type: %d", params->out_type); return kTfLiteError; } // By design, the input shape is always known at the time of Prepare, even // if the preceding op that generates |input| is dynamic. Thus, we can // always compute the shape immediately, without waiting for Eval. SetTensorToPersistentRo(output); // Shape always produces a 1-dimensional output tensor, where each output // element is the length of the corresponding input tensor's dimension. TfLiteIntArray* output_size = TfLiteIntArrayCreate(1); output_size->data[0] = NumDimensions(input); TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_size)); TFLITE_DCHECK_EQ(NumDimensions(output), 1); TFLITE_DCHECK_EQ(SizeOfDimension(output, 0), NumDimensions(input)); // Immediately propagate the known shape to the output tensor. This allows // downstream ops that rely on the value to use it during prepare. switch (output->type) { case kTfLiteInt32: ExtractShape(input, GetTensorData<int32_t>(output)); break; case kTfLiteInt64: ExtractShape(input, GetTensorData<int64_t>(output)); break; default: return kTfLiteError; } return kTfLiteOk; }
CWE-125
47
CharArray(int len) { buf = new char[len](); }
CWE-787
24
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { static const int kOutputUniqueTensor = 0; static const int kOutputIndexTensor = 1; TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output_unique_tensor = GetOutput(context, node, kOutputUniqueTensor); TfLiteTensor* output_index_tensor = GetOutput(context, node, kOutputIndexTensor); // The op only supports 1D input. TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); TfLiteIntArray* output_index_shape = TfLiteIntArrayCopy(input->dims); // The unique values are determined during evaluation, so we don't know yet // the size of the output tensor. SetTensorToDynamic(output_unique_tensor); return context->ResizeTensor(context, output_index_tensor, output_index_shape); }
CWE-787
24
static int lookup1_values(int entries, int dim) { int r = (int) floor(exp((float) log((float) entries) / dim)); if ((int) floor(pow((float) r+1, dim)) <= entries) // (int) cast for MinGW warning; ++r; // floor() to avoid _ftol() when non-CRT assert(pow((float) r+1, dim) > entries); assert((int) floor(pow((float) r, dim)) <= entries); // (int),floor() as above return r; }
CWE-125
47
int PackLinuxElf32::canUnpack() { if (super::canUnpack()) { return true; } if (Elf32_Ehdr::ET_DYN==get_te16(&ehdri.e_type)) { PackLinuxElf32help1(fi); } return false; }
CWE-476
46
void ResourceHandle::FromProto(const ResourceHandleProto& proto) { set_device(proto.device()); set_container(proto.container()); set_name(proto.name()); set_hash_code(proto.hash_code()); set_maybe_type_name(proto.maybe_type_name()); std::vector<DtypeAndPartialTensorShape> dtypes_and_shapes; for (const auto& dtype_and_shape : proto.dtypes_and_shapes()) { DataType dtype = dtype_and_shape.dtype(); PartialTensorShape shape(dtype_and_shape.shape()); dtypes_and_shapes.push_back(DtypeAndPartialTensorShape{dtype, shape}); } dtypes_and_shapes_ = std::move(dtypes_and_shapes); }
CWE-617
51
int jas_seq2d_output(jas_matrix_t *matrix, FILE *out) { #define MAXLINELEN 80 int i; int j; jas_seqent_t x; char buf[MAXLINELEN + 1]; char sbuf[MAXLINELEN + 1]; int n; fprintf(out, "%"PRIiFAST32" %"PRIiFAST32"\n", jas_seq2d_xstart(matrix), jas_seq2d_ystart(matrix)); fprintf(out, "%"PRIiFAST32" %"PRIiFAST32"\n", jas_matrix_numcols(matrix), jas_matrix_numrows(matrix)); buf[0] = '\0'; for (i = 0; i < jas_matrix_numrows(matrix); ++i) { for (j = 0; j < jas_matrix_numcols(matrix); ++j) { x = jas_matrix_get(matrix, i, j); sprintf(sbuf, "%s%4ld", (strlen(buf) > 0) ? " " : "", JAS_CAST(long, x)); n = JAS_CAST(int, strlen(buf)); if (n + JAS_CAST(int, strlen(sbuf)) > MAXLINELEN) { fputs(buf, out); fputs("\n", out); buf[0] = '\0'; } strcat(buf, sbuf); if (j == jas_matrix_numcols(matrix) - 1) { fputs(buf, out); fputs("\n", out); buf[0] = '\0'; } } } fputs(buf, out); return 0; }
CWE-190
19
size_t jsuGetFreeStack() { #ifdef ARM void *frame = __builtin_frame_address(0); size_t stackPos = (size_t)((char*)frame); size_t stackEnd = (size_t)((char*)&LINKER_END_VAR); if (stackPos < stackEnd) return 0; // should never happen, but just in case of overflow! return stackPos - stackEnd; #elif defined(LINUX) // On linux, we set STACK_BASE from `main`. char ptr; // this is on the stack extern void *STACK_BASE; uint32_t count = (uint32_t)((size_t)STACK_BASE - (size_t)&ptr); return 1000000 - count; // give it 1 megabyte of stack #else // stack depth seems pretty platform-specific :( Default to a value that disables it return 1000000; // no stack depth check on this platform #endif }
CWE-190
19
EntropyParser::EntropyParser(class Frame *frame,class Scan *scan) : JKeeper(scan->EnvironOf()), m_pScan(scan), m_pFrame(frame) { m_ucCount = scan->ComponentsInScan(); // The residual scan uses all components here, not just for, but // it does not require the component count either. for(volatile UBYTE i = 0;i < m_ucCount && i < 4;i++) { JPG_TRY { m_pComponent[i] = scan->ComponentOf(i); } JPG_CATCH { m_pComponent[i] = NULL; } JPG_ENDTRY; } m_ulRestartInterval = m_pFrame->TablesOf()->RestartIntervalOf(); m_usNextRestartMarker = 0xffd0; m_ulMCUsToGo = m_ulRestartInterval; m_bSegmentIsValid = true; m_bScanForDNL = (m_pFrame->HeightOf() == 0)?true:false; m_bDNLFound = false; }
CWE-476
46
void ZlibInStream::setUnderlying(InStream* is, int bytesIn_) { underlying = is; bytesIn = bytesIn_; ptr = end = start; }
CWE-787
24
bool load_face(Face & face, unsigned int options) { #ifdef GRAPHITE2_TELEMETRY telemetry::category _misc_cat(face.tele.misc); #endif Face::Table silf(face, Tag::Silf, 0x00050000); if (silf) options &= ~gr_face_dumbRendering; else if (!(options & gr_face_dumbRendering)) return false; if (!face.readGlyphs(options)) return false; if (silf) { if (!face.readFeatures() || !face.readGraphite(silf)) { #if !defined GRAPHITE2_NTRACING if (global_log) { *global_log << json::object << "type" << "fontload" << "failure" << face.error() << "context" << face.error_context() << json::close; } #endif return false; } else return true; } else return options & gr_face_dumbRendering; }
CWE-476
46
void* TFE_HandleToDLPack(TFE_TensorHandle* h, TF_Status* status) { const Tensor* tensor = GetTensorFromHandle(h, status); TF_DataType data_type = static_cast<TF_DataType>(tensor->dtype()); TensorReference tensor_ref(*tensor); // This will call buf_->Ref() auto* tf_dlm_tensor_ctx = new TfDlManagedTensorCtx(tensor_ref); tf_dlm_tensor_ctx->reference = tensor_ref; DLManagedTensor* dlm_tensor = &tf_dlm_tensor_ctx->tensor; dlm_tensor->manager_ctx = tf_dlm_tensor_ctx; dlm_tensor->deleter = &DLManagedTensorDeleter; dlm_tensor->dl_tensor.ctx = GetDlContext(h, status); int ndim = tensor->dims(); dlm_tensor->dl_tensor.ndim = ndim; dlm_tensor->dl_tensor.data = TFE_TensorHandleDevicePointer(h, status); dlm_tensor->dl_tensor.dtype = GetDlDataType(data_type, status); std::vector<int64_t>* shape_arr = &tf_dlm_tensor_ctx->shape; std::vector<int64_t>* stride_arr = &tf_dlm_tensor_ctx->strides; shape_arr->resize(ndim); stride_arr->resize(ndim, 1); for (int i = 0; i < ndim; i++) { (*shape_arr)[i] = tensor->dim_size(i); } for (int i = ndim - 2; i >= 0; --i) { (*stride_arr)[i] = (*shape_arr)[i + 1] * (*stride_arr)[i + 1]; } dlm_tensor->dl_tensor.shape = &(*shape_arr)[0]; // There are two ways to represent compact row-major data // 1) nullptr indicates tensor is compact and row-majored. // 2) fill in the strides array as the real case for compact row-major data. // Here we choose option 2, since some frameworks didn't handle the strides // argument properly. dlm_tensor->dl_tensor.strides = &(*stride_arr)[0]; dlm_tensor->dl_tensor.byte_offset = 0; // TF doesn't handle the strides and byte_offsets here return static_cast<void*>(dlm_tensor); }
CWE-908
48
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); TF_LITE_ENSURE_TYPES_EQ(context, GetInput(context, node, 0)->type, kTfLiteString); TF_LITE_ENSURE_TYPES_EQ(context, GetOutput(context, node, 0)->type, kTfLiteString); return kTfLiteOk; }
CWE-125
47
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); output->type = input->type; TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims); return context->ResizeTensor(context, output, output_size); }
CWE-787
24
otError Commissioner::AddJoiner(const Mac::ExtAddress *aEui64, const char *aPskd, uint32_t aTimeout) { otError error = OT_ERROR_NO_BUFS; VerifyOrExit(mState == OT_COMMISSIONER_STATE_ACTIVE, error = OT_ERROR_INVALID_STATE); VerifyOrExit(strlen(aPskd) <= Dtls::kPskMaxLength, error = OT_ERROR_INVALID_ARGS); RemoveJoiner(aEui64, 0); // remove immediately for (Joiner *joiner = &mJoiners[0]; joiner < OT_ARRAY_END(mJoiners); joiner++) { if (joiner->mValid) { continue; } if (aEui64 != NULL) { joiner->mEui64 = *aEui64; joiner->mAny = false; } else { joiner->mAny = true; } (void)strlcpy(joiner->mPsk, aPskd, sizeof(joiner->mPsk)); joiner->mValid = true; joiner->mExpirationTime = TimerMilli::GetNow() + Time::SecToMsec(aTimeout); UpdateJoinerExpirationTimer(); SendCommissionerSet(); otLogInfoMeshCoP("Added Joiner (%s, %s)", (aEui64 != NULL) ? aEui64->ToString().AsCString() : "*", aPskd); ExitNow(error = OT_ERROR_NONE); } exit: return error; }
CWE-787
24
TfLiteStatus ResizeOutputTensor(TfLiteContext* context, const TfLiteTensor* data, const TfLiteTensor* segment_ids, TfLiteTensor* output) { int max_index = -1; const int segment_id_size = segment_ids->dims->data[0]; if (segment_id_size > 0) { max_index = segment_ids->data.i32[segment_id_size - 1]; } const int data_rank = NumDimensions(data); TfLiteIntArray* output_shape = TfLiteIntArrayCreate(NumDimensions(data)); output_shape->data[0] = max_index + 1; for (int i = 1; i < data_rank; ++i) { output_shape->data[i] = data->dims->data[i]; } return context->ResizeTensor(context, output, output_shape); }
CWE-770
37
TfLiteStatus EluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Elu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteInt8: { OpData* data = reinterpret_cast<OpData*>(node->user_data); EvalUsingLookupTable(data, input, output); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and int8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
CWE-787
24
int FdInStream::overrun(int itemSize, int nItems, bool wait) { if (itemSize > bufSize) throw Exception("FdInStream overrun: max itemSize exceeded"); if (end - ptr != 0) memmove(start, ptr, end - ptr); offset += ptr - start; end -= ptr - start; ptr = start; int bytes_to_read; while (end < start + itemSize) { bytes_to_read = start + bufSize - end; if (!timing) { // When not timing, we must be careful not to read too much // extra data into the buffer. Otherwise, the line speed // estimation might stay at zero for a long time: All reads // during timing=1 can be satisfied without calling // readWithTimeoutOrCallback. However, reading only 1 or 2 bytes // bytes is ineffecient. bytes_to_read = vncmin(bytes_to_read, vncmax(itemSize*nItems, 8)); } int n = readWithTimeoutOrCallback((U8*)end, bytes_to_read, wait); if (n == 0) return 0; end += n; } if (itemSize * nItems > end - ptr) nItems = (end - ptr) / itemSize; return nItems; }
CWE-787
24
Variant HHVM_FUNCTION(mcrypt_generic_init, const Resource& td, const String& key, const String& iv) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } int max_key_size = mcrypt_enc_get_key_size(pm->m_td); int iv_size = mcrypt_enc_get_iv_size(pm->m_td); if (key.empty()) { raise_warning("Key size is 0"); } unsigned char *key_s = (unsigned char *)malloc(key.size()); memset(key_s, 0, key.size()); unsigned char *iv_s = (unsigned char *)malloc(iv_size + 1); memset(iv_s, 0, iv_size + 1); int key_size; if (key.size() > max_key_size) { raise_warning("Key size too large; supplied length: %d, max: %d", key.size(), max_key_size); key_size = max_key_size; } else { key_size = key.size(); } memcpy(key_s, key.data(), key.size()); if (iv.size() != iv_size) { raise_warning("Iv size incorrect; supplied length: %d, needed: %d", iv.size(), iv_size); } memcpy(iv_s, iv.data(), std::min(iv_size, iv.size())); mcrypt_generic_deinit(pm->m_td); int result = mcrypt_generic_init(pm->m_td, key_s, key_size, iv_s); /* If this function fails, close the mcrypt module to prevent crashes * when further functions want to access this resource */ if (result < 0) { pm->close(); switch (result) { case -3: raise_warning("Key length incorrect"); break; case -4: raise_warning("Memory allocation error"); break; case -1: default: raise_warning("Unknown error"); break; } } else { pm->m_init = true; } free(iv_s); free(key_s); return result; }
CWE-190
19
TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; }
CWE-787
24
explicit UnravelIndexOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
CWE-190
19
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteDivParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) { EvalDiv<kernel_type>(context, node, params, data, input1, input2, output); } else if (output->type == kTfLiteUInt8) { TF_LITE_ENSURE_OK( context, EvalQuantized<kernel_type>(context, node, params, data, input1, input2, output)); } else { context->ReportError( context, "Div only supports FLOAT32, INT32 and quantized UINT8 now, got %d.", output->type); return kTfLiteError; } return kTfLiteOk; }
CWE-125
47
Java_org_tensorflow_lite_InterpreterTest_getNativeHandleForDelegate( JNIEnv* env, jclass clazz) { // A simple op which outputs a tensor with values of 7. static TfLiteRegistration registration = { .init = nullptr, .free = nullptr, .prepare = [](TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = tflite::GetInput(context, node, 0); TfLiteTensor* output = tflite::GetOutput(context, node, 0); TfLiteIntArray* output_dims = TfLiteIntArrayCopy(input->dims); output->type = kTfLiteFloat32; return context->ResizeTensor(context, output, output_dims); }, .invoke = [](TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = tflite::GetOutput(context, node, 0); std::fill(output->data.f, output->data.f + tflite::NumElements(output), 7.0f); return kTfLiteOk; }, .profiling_string = nullptr, .builtin_code = 0, .custom_name = "", .version = 1, }; static TfLiteDelegate delegate = { .data_ = nullptr, .Prepare = [](TfLiteContext* context, TfLiteDelegate* delegate) -> TfLiteStatus { TfLiteIntArray* execution_plan; TF_LITE_ENSURE_STATUS( context->GetExecutionPlan(context, &execution_plan)); context->ReplaceNodeSubsetsWithDelegateKernels( context, registration, execution_plan, delegate); // Now bind delegate buffer handles for all tensors. for (size_t i = 0; i < context->tensors_size; ++i) { context->tensors[i].delegate = delegate; context->tensors[i].buffer_handle = static_cast<int>(i); } return kTfLiteOk; }, .CopyFromBufferHandle = nullptr, .CopyToBufferHandle = nullptr, .FreeBufferHandle = nullptr, .flags = kTfLiteDelegateFlagsAllowDynamicTensors, }; return reinterpret_cast<jlong>(&delegate); }
CWE-125
47
TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { size_t elements = input->bytes / sizeof(float); const float* in = GetTensorData<float>(input); const float* in_end = in + elements; float* out = GetTensorData<float>(output); for (; in < in_end; in++, out++) *out = std::min(std::max(0.f, *in), 6.f); return kTfLiteOk; } break; case kTfLiteUInt8: QuantizedReluX<uint8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
CWE-787
24
bool IsFullyConnectedOpSupported(const TfLiteRegistration* registration, const TfLiteNode* node, TfLiteContext* context) { if (node->builtin_data == nullptr) return false; const auto* fc_params = reinterpret_cast<const TfLiteFullyConnectedParams*>(node->builtin_data); const int kInput = 0; const int kWeights = 1; const int kBias = 2; if (fc_params->weights_format != kTfLiteFullyConnectedWeightsFormatDefault) { return false; } const TfLiteTensor* input = GetInput(context, node, kInput); const TfLiteTensor* weights = GetInput(context, node, kWeights); if (!IsFloatType(input->type)) { return false; } if (!IsFloatType(weights->type) || !IsConstantTensor(weights)) { return false; } // Core ML 2 only supports single-batch fully connected layer, thus dimensions // except the last one should be 1. if (input->dims->data[input->dims->size - 1] != NumElements(input)) { return false; } if (node->inputs->size > 2) { const TfLiteTensor* bias = GetInput(context, node, kBias); if (!IsFloatType(bias->type) || !IsConstantTensor(bias)) { return false; } } TfLiteFusedActivation activation = fc_params->activation; if (activation == kTfLiteActSignBit) { return false; } return true; }
CWE-125
47
gdImagePtr gdImageCreateTrueColor (int sx, int sy) { int i; gdImagePtr im; if (overflow2(sx, sy)) { return NULL; } if (overflow2(sizeof(unsigned char *), sy)) { return NULL; } if (overflow2(sizeof(int) + sizeof(unsigned char), sx * sy)) { return NULL; } // Check for OOM before doing a potentially large allocation. auto allocsz = sizeof(gdImage) + sy * (sizeof(int *) + sizeof(unsigned char *)) + sx * sy * (sizeof(int) + sizeof(unsigned char)); if (UNLIKELY(precheckOOM(allocsz))) { // Don't throw here because GD might need to do its own cleanup. return NULL; } im = (gdImage *) gdMalloc(sizeof(gdImage)); memset(im, 0, sizeof(gdImage)); im->tpixels = (int **) gdMalloc(sizeof(int *) * sy); im->AA_opacity = (unsigned char **) gdMalloc(sizeof(unsigned char *) * sy); im->polyInts = 0; im->polyAllocated = 0; im->brush = 0; im->tile = 0; im->style = 0; for (i = 0; i < sy; i++) { im->tpixels[i] = (int *) gdCalloc(sx, sizeof(int)); im->AA_opacity[i] = (unsigned char *) gdCalloc(sx, sizeof(unsigned char)); } im->sx = sx; im->sy = sy; im->transparent = (-1); im->interlace = 0; im->trueColor = 1; /* 2.0.2: alpha blending is now on by default, and saving of alpha is * off by default. This allows font antialiasing to work as expected * on the first try in JPEGs -- quite important -- and also allows * for smaller PNGs when saving of alpha channel is not really * desired, which it usually isn't! */ im->saveAlphaFlag = 0; im->alphaBlendingFlag = 1; im->thick = 1; im->AA = 0; im->AA_polygon = 0; im->cx1 = 0; im->cy1 = 0; im->cx2 = im->sx - 1; im->cy2 = im->sy - 1; im->interpolation = NULL; im->interpolation_id = GD_BILINEAR_FIXED; return im; }
CWE-22
2
bool read(ReadonlyBytes buffer) { auto fields_size = sizeof(EndOfCentralDirectory) - sizeof(u8*); if (buffer.size() < fields_size) return false; if (memcmp(buffer.data(), end_of_central_directory_signature, sizeof(end_of_central_directory_signature)) != 0) return false; memcpy(reinterpret_cast<void*>(&disk_number), buffer.data() + sizeof(end_of_central_directory_signature), fields_size); comment = buffer.data() + sizeof(end_of_central_directory_signature) + fields_size; return true; }
CWE-120
44
Variant HHVM_FUNCTION(mcrypt_generic_init, const Resource& td, const String& key, const String& iv) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } int max_key_size = mcrypt_enc_get_key_size(pm->m_td); int iv_size = mcrypt_enc_get_iv_size(pm->m_td); if (key.empty()) { raise_warning("Key size is 0"); } unsigned char *key_s = (unsigned char *)malloc(key.size()); memset(key_s, 0, key.size()); unsigned char *iv_s = (unsigned char *)malloc(iv_size + 1); memset(iv_s, 0, iv_size + 1); int key_size; if (key.size() > max_key_size) { raise_warning("Key size too large; supplied length: %d, max: %d", key.size(), max_key_size); key_size = max_key_size; } else { key_size = key.size(); } memcpy(key_s, key.data(), key.size()); if (iv.size() != iv_size) { raise_warning("Iv size incorrect; supplied length: %d, needed: %d", iv.size(), iv_size); } memcpy(iv_s, iv.data(), std::min(iv_size, iv.size())); mcrypt_generic_deinit(pm->m_td); int result = mcrypt_generic_init(pm->m_td, key_s, key_size, iv_s); /* If this function fails, close the mcrypt module to prevent crashes * when further functions want to access this resource */ if (result < 0) { pm->close(); switch (result) { case -3: raise_warning("Key length incorrect"); break; case -4: raise_warning("Memory allocation error"); break; case -1: default: raise_warning("Unknown error"); break; } } else { pm->m_init = true; } free(iv_s); free(key_s); return result; }
CWE-787
24
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const CTCBeamSearchDecoderParams* option = reinterpret_cast<CTCBeamSearchDecoderParams*>(node->user_data); const int top_paths = option->top_paths; TF_LITE_ENSURE(context, option->beam_width >= top_paths); TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); // The outputs should be top_paths * 3 + 1. TF_LITE_ENSURE_EQ(context, NumOutputs(node), 3 * top_paths + 1); const TfLiteTensor* inputs = GetInput(context, node, kInputsTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(inputs), 3); // TensorFlow only supports float. TF_LITE_ENSURE_EQ(context, inputs->type, kTfLiteFloat32); const int batch_size = SizeOfDimension(inputs, 1); const TfLiteTensor* sequence_length = GetInput(context, node, kSequenceLengthTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(sequence_length), 1); TF_LITE_ENSURE_EQ(context, NumElements(sequence_length), batch_size); // TensorFlow only supports int32. TF_LITE_ENSURE_EQ(context, sequence_length->type, kTfLiteInt32); // Resize decoded outputs. // Do not resize indices & values cause we don't know the values yet. for (int i = 0; i < top_paths; ++i) { TfLiteTensor* indices = GetOutput(context, node, i); SetTensorToDynamic(indices); TfLiteTensor* values = GetOutput(context, node, i + top_paths); SetTensorToDynamic(values); TfLiteTensor* output_shape = GetOutput(context, node, i + 2 * top_paths); SetTensorToDynamic(output_shape); } // Resize log probability outputs. TfLiteTensor* log_probability_output = GetOutput(context, node, top_paths * 3); TfLiteIntArray* log_probability_output_shape_array = TfLiteIntArrayCreate(2); log_probability_output_shape_array->data[0] = batch_size; log_probability_output_shape_array->data[1] = top_paths; return context->ResizeTensor(context, log_probability_output, log_probability_output_shape_array); }
CWE-787
24
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output_index_tensor = GetOutput(context, node, 1); TF_LITE_ENSURE_EQ(context, NumElements(output_index_tensor), NumElements(input)); switch (input->type) { case kTfLiteInt8: TF_LITE_ENSURE_STATUS(EvalImpl<int8_t>(context, input, node)); break; case kTfLiteInt16: TF_LITE_ENSURE_STATUS(EvalImpl<int16_t>(context, input, node)); break; case kTfLiteInt32: TF_LITE_ENSURE_STATUS(EvalImpl<int32_t>(context, input, node)); break; case kTfLiteInt64: TF_LITE_ENSURE_STATUS(EvalImpl<int64_t>(context, input, node)); break; case kTfLiteFloat32: TF_LITE_ENSURE_STATUS(EvalImpl<float>(context, input, node)); break; case kTfLiteUInt8: TF_LITE_ENSURE_STATUS(EvalImpl<uint8_t>(context, input, node)); break; default: context->ReportError(context, "Currently Unique doesn't support type: %s", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; }
CWE-787
24
MemInStream(const void* data, int len, bool deleteWhenDone_=false) : start((const U8*)data), deleteWhenDone(deleteWhenDone_) { ptr = start; end = start + len; }
CWE-787
24
inline int StringData::size() const { return m_len; }
CWE-125
47
int jas_memdump(FILE *out, void *data, size_t len) { size_t i; size_t j; uchar *dp; dp = data; for (i = 0; i < len; i += 16) { fprintf(out, "%04zx:", i); for (j = 0; j < 16; ++j) { if (i + j < len) { fprintf(out, " %02x", dp[i + j]); } } fprintf(out, "\n"); } return 0; }
CWE-190
19
std::string encodeBase64(const std::string& input) { using namespace boost::archive::iterators; using b64it = base64_from_binary<transform_width<const char*, 6, 8>>; auto data = input.data(); std::string encoded(b64it(data), b64it(data + (input.length()))); encoded.append((3 - (input.length() % 3)) % 3, '='); return encoded; }
CWE-787
24
QInt16() {}
CWE-908
48
vector <string> genECDSAKey() { vector<char> errMsg(BUF_LEN, 0); int errStatus = 0; vector <uint8_t> encr_pr_key(BUF_LEN, 0); vector<char> pub_key_x(BUF_LEN, 0); vector<char> pub_key_y(BUF_LEN, 0); uint32_t enc_len = 0; sgx_status_t status = trustedGenerateEcdsaKeyAES(eid, &errStatus, errMsg.data(), encr_pr_key.data(), &enc_len, pub_key_x.data(), pub_key_y.data()); HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus,errMsg.data()); vector <string> keys(3); vector<char> hexEncrKey(BUF_LEN * 2, 0); carray2Hex(encr_pr_key.data(), enc_len, hexEncrKey.data(), BUF_LEN * 2); keys.at(0) = hexEncrKey.data(); keys.at(1) = string(pub_key_x.data()) + string(pub_key_y.data()); vector<unsigned char> randBuffer(32, 0); fillRandomBuffer(randBuffer); vector<char> rand_str(BUF_LEN, 0); carray2Hex(randBuffer.data(), 32, rand_str.data(), BUF_LEN); keys.at(2) = rand_str.data(); CHECK_STATE(keys.at(2).size() == 64); return keys; }
CWE-787
24
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteLSHProjectionParams*>(node->builtin_data); int32_t* out_buf = GetOutput(context, node, 0)->data.i32; const TfLiteTensor* hash = GetInput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 1); const TfLiteTensor* weight = NumInputs(node) == 2 ? nullptr : GetInput(context, node, 2); switch (params->type) { case kTfLiteLshProjectionDense: DenseLshProjection(hash, input, weight, out_buf); break; case kTfLiteLshProjectionSparse: SparseLshProjection(hash, input, weight, out_buf); break; default: return kTfLiteError; } return kTfLiteOk; }
CWE-787
24
MONGO_EXPORT int bson_append_element( bson *b, const char *name_or_null, const bson_iterator *elem ) { bson_iterator next = *elem; int size; bson_iterator_next( &next ); size = next.cur - elem->cur; if ( name_or_null == NULL ) { if( bson_ensure_space( b, size ) == BSON_ERROR ) return BSON_ERROR; bson_append( b, elem->cur, size ); } else { int data_size = size - 2 - strlen( bson_iterator_key( elem ) ); bson_append_estart( b, elem->cur[0], name_or_null, data_size ); bson_append( b, bson_iterator_value( elem ), data_size ); } return BSON_OK; }
CWE-190
19
void *jas_realloc(void *ptr, size_t size) { void *result; JAS_DBGLOG(101, ("jas_realloc called with %x,%zu\n", ptr, size)); result = realloc(ptr, size); JAS_DBGLOG(100, ("jas_realloc(%p, %zu) -> %p\n", ptr, size, result)); return result; }
CWE-190
19
bool hex2carray(const char *_hex, uint64_t *_bin_len, uint8_t *_bin, uint64_t _max_length) { CHECK_STATE(_hex); CHECK_STATE(_bin); CHECK_STATE(_bin_len) int len = strnlen(_hex, 2 * _max_length + 1); CHECK_STATE(len != 2 * _max_length + 1); CHECK_STATE(len <= 2 * _max_length ); if (len == 0 && len % 2 == 1) return false; *_bin_len = len / 2; for (int i = 0; i < len / 2; i++) { int high = char2int((char) _hex[i * 2]); int low = char2int((char) _hex[i * 2 + 1]); if (high < 0 || low < 0) { return false; } _bin[i] = (unsigned char) (high * 16 + low); } return true; }
CWE-787
24
const String& setSize(int len) { assertx(m_str); m_str->setSize(len); return *this; }
CWE-125
47
TfLiteStatus ReverseSequenceImpl(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* seq_lengths_tensor = GetInput(context, node, kSeqLengthsTensor); const TS* seq_lengths = GetTensorData<TS>(seq_lengths_tensor); auto* params = reinterpret_cast<TfLiteReverseSequenceParams*>(node->builtin_data); int seq_dim = params->seq_dim; int batch_dim = params->batch_dim; TF_LITE_ENSURE(context, seq_dim >= 0); TF_LITE_ENSURE(context, batch_dim >= 0); TF_LITE_ENSURE(context, seq_dim != batch_dim); TF_LITE_ENSURE(context, seq_dim < NumDimensions(input)); TF_LITE_ENSURE(context, batch_dim < NumDimensions(input)); TF_LITE_ENSURE_EQ(context, SizeOfDimension(seq_lengths_tensor, 0), SizeOfDimension(input, batch_dim)); for (int i = 0; i < NumDimensions(seq_lengths_tensor); ++i) { TF_LITE_ENSURE(context, seq_lengths[i] <= SizeOfDimension(input, seq_dim)); } TfLiteTensor* output = GetOutput(context, node, kOutputTensor); reference_ops::ReverseSequence<T, TS>( seq_lengths, seq_dim, batch_dim, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); return kTfLiteOk; }
CWE-787
24
TfLiteStatus EluPrepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); OpData* data = reinterpret_cast<OpData*>(node->user_data); // Use LUT to handle quantized elu path. if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return value < 0.0 ? std::exp(value) - 1.0f : value; }); } return GenericPrepare(context, node); }
CWE-787
24
TfLiteStatus LessEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::LessFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; }
CWE-787
24
void BlockCodec::runPull() { AFframecount framesToRead = m_outChunk->frameCount; AFframecount framesRead = 0; assert(framesToRead % m_framesPerPacket == 0); int blockCount = framesToRead / m_framesPerPacket; // Read the compressed data. ssize_t bytesRead = read(m_inChunk->buffer, m_bytesPerPacket * blockCount); int blocksRead = bytesRead >= 0 ? bytesRead / m_bytesPerPacket : 0; // Decompress into m_outChunk. for (int i=0; i<blocksRead; i++) { decodeBlock(static_cast<const uint8_t *>(m_inChunk->buffer) + i * m_bytesPerPacket, static_cast<int16_t *>(m_outChunk->buffer) + i * m_framesPerPacket * m_track->f.channelCount); framesRead += m_framesPerPacket; } m_track->nextfframe += framesRead; assert(tell() == m_track->fpos_next_frame); if (framesRead < framesToRead) reportReadError(framesRead, framesToRead); m_outChunk->frameCount = framesRead; }
CWE-190
19
void TileManager::crop( RawTile *ttt ){ int tw = image->getTileWidth(); int th = image->getTileHeight(); if( loglevel >= 5 ){ *logfile << "TileManager :: Edge tile: Base size: " << tw << "x" << th << ": This tile: " << ttt->width << "x" << ttt->height << endl; } // Create a new buffer, fill it with the old data, then copy // back the cropped part into the RawTile buffer int len = tw * th * ttt->channels * (ttt->bpc/8); unsigned char* buffer = (unsigned char*) malloc( len ); unsigned char* src_ptr = (unsigned char*) memcpy( buffer, ttt->data, len ); unsigned char* dst_ptr = (unsigned char*) ttt->data; // Copy one scanline at a time len = ttt->width * ttt->channels * (ttt->bpc/8); for( unsigned int i=0; i<ttt->height; i++ ){ memcpy( dst_ptr, src_ptr, len ); dst_ptr += len; src_ptr += tw * ttt->channels * (ttt->bpc/8); } free( buffer ); // Reset the data length len = ttt->width * ttt->height * ttt->channels * (ttt->bpc/8); ttt->dataLength = len; ttt->padded = false; }
CWE-190
19
void onComplete(const Status& status, ContextImpl& context) const override { auto& completion_state = context.getCompletionState(this); if (completion_state.is_completed_) { return; } // If any of children is OK, return OK if (Status::Ok == status) { completion_state.is_completed_ = true; completeWithStatus(status, context); return; } // Then wait for all children to be done. if (++completion_state.number_completed_children_ == verifiers_.size()) { // Aggregate all children status into a final status. // JwtMissing should be treated differently than other failure status // since it simply means there is not Jwt token for the required provider. // If there is a failure status other than JwtMissing in the children, // it should be used as the final status. Status final_status = Status::JwtMissed; for (const auto& it : verifiers_) { // If a Jwt is extracted from a location not specified by the required provider, // the authenticator returns JwtUnknownIssuer. It should be treated the same as // JwtMissed. Status child_status = context.getCompletionState(it.get()).status_; if (child_status != Status::JwtMissed && child_status != Status::JwtUnknownIssuer) { final_status = child_status; } } if (is_allow_missing_or_failed_) { final_status = Status::Ok; } else if (is_allow_missing_ && final_status == Status::JwtMissed) { final_status = Status::Ok; } completion_state.is_completed_ = true; completeWithStatus(final_status, context); } }
CWE-303
89
TfLiteRegistration GetPassthroughOpRegistration() { TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr}; reg.init = [](TfLiteContext* context, const char*, size_t) -> void* { auto* first_new_tensor = new int; context->AddTensors(context, 2, first_new_tensor); return first_new_tensor; }; reg.free = [](TfLiteContext* context, void* buffer) { delete static_cast<int*>(buffer); }; reg.prepare = [](TfLiteContext* context, TfLiteNode* node) { auto* first_new_tensor = static_cast<int*>(node->user_data); const TfLiteTensor* tensor0 = GetInput(context, node, 0); TfLiteTensor* tensor1 = GetOutput(context, node, 0); TfLiteIntArray* newSize = TfLiteIntArrayCopy(tensor0->dims); TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, tensor1, newSize)); TfLiteIntArrayFree(node->temporaries); node->temporaries = TfLiteIntArrayCreate(2); for (int i = 0; i < 2; ++i) { node->temporaries->data[i] = *(first_new_tensor) + i; } auto setup_temporary = [&](int id) { TfLiteTensor* tmp = &context->tensors[id]; tmp->type = kTfLiteFloat32; tmp->allocation_type = kTfLiteArenaRw; return context->ResizeTensor(context, tmp, TfLiteIntArrayCopy(tensor0->dims)); }; TF_LITE_ENSURE_STATUS(setup_temporary(node->temporaries->data[0])); TF_LITE_ENSURE_STATUS(setup_temporary(node->temporaries->data[1])); return kTfLiteOk; }; reg.invoke = [](TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* a0 = GetInput(context, node, 0); auto populate = [&](int id) { TfLiteTensor* t = &context->tensors[id]; int num = a0->dims->data[0]; for (int i = 0; i < num; i++) { t->data.f[i] = a0->data.f[i]; } }; populate(node->outputs->data[0]); populate(node->temporaries->data[0]); populate(node->temporaries->data[1]); return kTfLiteOk; }; return reg; }
CWE-787
24
selaGetCombName(SELA *sela, l_int32 size, l_int32 direction) { char *selname; char combname[L_BUF_SIZE]; l_int32 i, nsels, sx, sy, found; SEL *sel; PROCNAME("selaGetCombName"); if (!sela) return (char *)ERROR_PTR("sela not defined", procName, NULL); if (direction != L_HORIZ && direction != L_VERT) return (char *)ERROR_PTR("invalid direction", procName, NULL); /* Derive the comb name we're looking for */ if (direction == L_HORIZ) snprintf(combname, L_BUF_SIZE, "sel_comb_%dh", size); else /* direction == L_VERT */ snprintf(combname, L_BUF_SIZE, "sel_comb_%dv", size); found = FALSE; nsels = selaGetCount(sela); for (i = 0; i < nsels; i++) { sel = selaGetSel(sela, i); selGetParameters(sel, &sy, &sx, NULL, NULL); if (sy != 1 && sx != 1) /* 2-D; not a comb */ continue; selname = selGetName(sel); if (!strcmp(selname, combname)) { found = TRUE; break; } } if (found) return stringNew(selname); else return (char *)ERROR_PTR("sel not found", procName, NULL); }
CWE-787
24
ResourceHandle::ResourceHandle(const ResourceHandleProto& proto) { FromProto(proto); }
CWE-617
51
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* start = GetInput(context, node, kStartTensor); const TfLiteTensor* limit = GetInput(context, node, kLimitTensor); const TfLiteTensor* delta = GetInput(context, node, kDeltaTensor); // Make sure all the inputs are scalars. TF_LITE_ENSURE_EQ(context, NumDimensions(start), 0); TF_LITE_ENSURE_EQ(context, NumDimensions(limit), 0); TF_LITE_ENSURE_EQ(context, NumDimensions(delta), 0); // Currently only supports int32 and float. // TODO(b/117912892): Support quantization as well. const auto dtype = start->type; if (dtype != kTfLiteFloat32 && dtype != kTfLiteInt32) { context->ReportError(context, "Unknown index output data type: %s", TfLiteTypeGetName(dtype)); return kTfLiteError; } TF_LITE_ENSURE_TYPES_EQ(context, limit->type, dtype); TF_LITE_ENSURE_TYPES_EQ(context, delta->type, dtype); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = dtype; if (IsConstantTensor(start) && IsConstantTensor(limit) && IsConstantTensor(delta)) { return ResizeOutput(context, start, limit, delta, output); } SetTensorToDynamic(output); return kTfLiteOk; }
CWE-125
47
PlainPasswd::PlainPasswd(int len) : CharArray(len) { }
CWE-787
24
PackLinuxElf64::elf_find_dynamic(unsigned int key) const { Elf64_Dyn const *dynp= dynseg; if (dynp) for (; (unsigned)((char const *)dynp - (char const *)dynseg) < sz_dynseg && Elf64_Dyn::DT_NULL!=dynp->d_tag; ++dynp) if (get_te64(&dynp->d_tag)==key) { upx_uint64_t const t= elf_get_offset_from_address(get_te64(&dynp->d_val)); if (t) { return &((unsigned char const *)file_image)[(size_t)t]; } break; } return 0; }
CWE-190
19
optional<ARN> ARN::parse(const string& s, bool wildcards) { static const char str_wild[] = "arn:([^:]*):([^:]*):([^:]*):([^:]*):([^:]*)"; static const regex rx_wild(str_wild, sizeof(str_wild) - 1, ECMAScript | optimize); static const char str_no_wild[] = "arn:([^:*]*):([^:*]*):([^:*]*):([^:*]*):([^:*]*)"; static const regex rx_no_wild(str_no_wild, sizeof(str_no_wild) - 1, ECMAScript | optimize); smatch match; if ((s == "*") && wildcards) { return ARN(Partition::wildcard, Service::wildcard, "*", "*", "*"); } else if (regex_match(s, match, wildcards ? rx_wild : rx_no_wild)) { ceph_assert(match.size() == 6); ARN a; { auto p = to_partition(match[1], wildcards); if (!p) return none; a.partition = *p; } { auto s = to_service(match[2], wildcards); if (!s) { return none; } a.service = *s; } a.region = match[3]; a.account = match[4]; a.resource = match[5]; return a; } return none; }
CWE-617
51
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* dims = GetInput(context, node, kDimsTensor); const TfLiteTensor* value = GetInput(context, node, kValueTensor); // Make sure the 1st input tensor is 1-D. TF_LITE_ENSURE_EQ(context, NumDimensions(dims), 1); // Make sure the 1st input tensor is int32 or int64. const auto dtype = dims->type; TF_LITE_ENSURE(context, dtype == kTfLiteInt32 || dtype == kTfLiteInt64); // Make sure the 2nd input tensor is a scalar. TF_LITE_ENSURE_EQ(context, NumDimensions(value), 0); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = value->type; if (IsConstantTensor(dims)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, dims, output)); } else { SetTensorToDynamic(output); } return kTfLiteOk; }
CWE-125
47
const FieldID& activeUnionMemberId(const void* object, ptrdiff_t offset) { return *reinterpret_cast<const FieldID*>( offset + static_cast<const char*>(object)); }
CWE-763
61
Status DoCompute(OpKernelContext* ctx) { tensorflow::ResourceTagger tag(kTFDataResourceTag, ctx->op_kernel().type_string()); tstring filename; TF_RETURN_IF_ERROR( ParseScalarArgument<tstring>(ctx, "filename", &filename)); tstring compression_type; TF_RETURN_IF_ERROR(ParseScalarArgument<tstring>(ctx, "compression_type", &compression_type)); std::unique_ptr<WritableFile> file; TF_RETURN_IF_ERROR(ctx->env()->NewWritableFile(filename, &file)); auto writer = absl::make_unique<io::RecordWriter>( file.get(), io::RecordWriterOptions::CreateRecordWriterOptions(compression_type)); DatasetBase* dataset; TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(ctx->input(0), &dataset)); IteratorContext::Params params(ctx); FunctionHandleCache function_handle_cache(params.flr); params.function_handle_cache = &function_handle_cache; ResourceMgr resource_mgr; params.resource_mgr = &resource_mgr; CancellationManager cancellation_manager(ctx->cancellation_manager()); params.cancellation_manager = &cancellation_manager; IteratorContext iter_ctx(std::move(params)); DatasetBase* finalized_dataset; TF_RETURN_IF_ERROR(FinalizeDataset(ctx, dataset, &finalized_dataset)); std::unique_ptr<IteratorBase> iterator; TF_RETURN_IF_ERROR(finalized_dataset->MakeIterator( &iter_ctx, /*parent=*/nullptr, "ToTFRecordOpIterator", &iterator)); std::vector<Tensor> components; components.reserve(finalized_dataset->output_dtypes().size()); bool end_of_sequence; do { TF_RETURN_IF_ERROR( iterator->GetNext(&iter_ctx, &components, &end_of_sequence)); if (!end_of_sequence) { TF_RETURN_IF_ERROR( writer->WriteRecord(components[0].scalar<tstring>()())); } components.clear(); } while (!end_of_sequence); return Status::OK(); }
CWE-787
24
string gen_dkg_poly(int _t) { vector<char> errMsg(BUF_LEN, 0); int errStatus = 0; uint32_t enc_len = 0; vector <uint8_t> encrypted_dkg_secret(BUF_LEN, 0); sgx_status_t status = trustedGenDkgSecretAES(eid, &errStatus, errMsg.data(), encrypted_dkg_secret.data(), &enc_len, _t); HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg.data()); uint64_t length = enc_len;; vector<char> hexEncrPoly(BUF_LEN, 0); CHECK_STATE(encrypted_dkg_secret.size() >= length); carray2Hex(encrypted_dkg_secret.data(), length, hexEncrPoly.data(), BUF_LEN); string result(hexEncrPoly.data()); return result; }
CWE-787
24
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const TfLiteTensor* multipliers = GetInput(context, node, kInputMultipliers); if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } switch (output->type) { case kTfLiteFloat32: Tile<float>(*(input->dims), input, multipliers, output); break; case kTfLiteUInt8: Tile<uint8_t>(*(input->dims), input, multipliers, output); break; case kTfLiteInt32: Tile<int32_t>(*(input->dims), input, multipliers, output); break; case kTfLiteInt64: Tile<int64_t>(*(input->dims), input, multipliers, output); break; case kTfLiteString: { DynamicBuffer buffer; TileString(*(input->dims), input, multipliers, &buffer, output); buffer.WriteToTensor(output, /*new_shape=*/nullptr); break; } case kTfLiteBool: Tile<bool>(*(input->dims), input, multipliers, output); break; default: context->ReportError(context, "Type '%s' is not supported by tile.", TfLiteTypeGetName(output->type)); return kTfLiteError; } return kTfLiteOk; }
CWE-125
47
TfLiteStatus EvalImpl(TfLiteContext* context, const TfLiteTensor* input, TfLiteNode* node) { // Map from value, to index in the unique elements vector. // Note that we prefer to use map than unordered_map as it showed less // increase in the binary size. std::map<T, int> unique_values; TfLiteTensor* output_indexes = GetOutput(context, node, 1); std::vector<T> output_values; I* indexes = GetTensorData<I>(output_indexes); const T* data = GetTensorData<T>(input); const int num_elements = NumElements(input); for (int i = 0; i < num_elements; ++i) { const auto element_it = unique_values.find(data[i]); if (element_it != unique_values.end()) { indexes[i] = element_it->second; } else { const int unique_index = unique_values.size(); unique_values[data[i]] = unique_index; indexes[i] = unique_index; output_values.push_back(data[i]); } } // Allocate output tensor. TfLiteTensor* unique_output = GetOutput(context, node, 0); std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape( TfLiteIntArrayCreate(NumDimensions(input)), TfLiteIntArrayFree); shape->data[0] = unique_values.size(); TF_LITE_ENSURE_STATUS( context->ResizeTensor(context, unique_output, shape.release())); // Set the values in the output tensor. T* output_unique_values = GetTensorData<T>(unique_output); for (int i = 0; i < output_values.size(); ++i) { output_unique_values[i] = output_values[i]; } return kTfLiteOk; }
CWE-787
24
TfLiteStatus UseDynamicOutputTensors(TfLiteContext* context, TfLiteNode* node) { for (int i = 0; i < NumOutputs(node); ++i) { SetTensorToDynamic(GetOutput(context, node, i)); } return kTfLiteOk; }
CWE-125
47
void* sspi_SecureHandleGetUpperPointer(SecHandle* handle) { void* pointer; if (!handle) return NULL; pointer = (void*) ~((size_t) handle->dwUpper); return pointer; }
CWE-476
46
Http::FilterMetadataStatus Context::onResponseMetadata() { if (!wasm_->onResponseMetadata_) { return Http::FilterMetadataStatus::Continue; } if (wasm_->onResponseMetadata_(this, id_).u64_ == 0) { return Http::FilterMetadataStatus::Continue; } return Http::FilterMetadataStatus::Continue; // This is currently the only return code. }
CWE-476
46
void CalculateOutputIndexValueRowID( const RowPartitionTensor& value_rowids, const vector<INDEX_TYPE>& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector<INDEX_TYPE>* result) { const INDEX_TYPE index_size = value_rowids.size(); result->reserve(index_size); if (index_size == 0) { return; } INDEX_TYPE current_output_column = 0; INDEX_TYPE current_value_rowid = value_rowids(0); DCHECK_LT(current_value_rowid, parent_output_index.size()); INDEX_TYPE current_output_index = parent_output_index[current_value_rowid]; result->push_back(current_output_index); for (INDEX_TYPE i = 1; i < index_size; ++i) { INDEX_TYPE next_value_rowid = value_rowids(i); if (next_value_rowid == current_value_rowid) { if (current_output_index >= 0) { ++current_output_column; if (current_output_column < output_size) { current_output_index += output_index_multiplier; } else { current_output_index = -1; } } } else { current_output_column = 0; current_value_rowid = next_value_rowid; DCHECK_LT(next_value_rowid, parent_output_index.size()); current_output_index = parent_output_index[next_value_rowid]; } result->push_back(current_output_index); } DCHECK_EQ(result->size(), value_rowids.size()); }
CWE-131
88