code
stringlengths 12
2.05k
| label_name
stringlengths 6
8
| label
int64 0
95
|
---|---|---|
ObfuscatedPasswd::ObfuscatedPasswd(const PlainPasswd& plainPwd) : CharArray(8), length(8) {
int l = strlen(plainPwd.buf), i;
for (i=0; i<8; i++)
buf[i] = i<l ? plainPwd.buf[i] : 0;
deskey(d3desObfuscationKey, EN0);
des((rdr::U8*)buf, (rdr::U8*)buf);
} | CWE-787 | 24 |
PackLinuxElf32::elf_find_dynamic(unsigned int key) const
{
Elf32_Dyn const *dynp= dynseg;
if (dynp)
for (; (unsigned)((char const *)dynp - (char const *)dynseg) < sz_dynseg
&& Elf32_Dyn::DT_NULL!=dynp->d_tag; ++dynp) if (get_te32(&dynp->d_tag)==key) {
unsigned const t= elf_get_offset_from_address(get_te32(&dynp->d_val));
if (t) {
return t + file_image;
}
break;
}
return 0;
} | CWE-190 | 19 |
static int lookup1_values(int entries, int dim)
{
int r = (int) floor(exp((float) log((float) entries) / dim));
if ((int) floor(pow((float) r+1, dim)) <= entries) // (int) cast for MinGW warning;
++r; // floor() to avoid _ftol() when non-CRT
assert(pow((float) r+1, dim) > entries);
assert((int) floor(pow((float) r, dim)) <= entries); // (int),floor() as above
return r;
} | CWE-369 | 60 |
TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteDepthwiseConvParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* filter = GetInput(context, node, kFilterTensor);
const TfLiteTensor* bias =
(NumInputs(node) == 3) ? GetInput(context, node, kBiasTensor) : nullptr;
TFLITE_DCHECK_EQ(input_type, input->type);
switch (input_type) { // Already know in/out types are same.
case kTfLiteFloat32:
if (filter->type == kTfLiteFloat32) {
return EvalFloat<kernel_type>(context, node, params, data, input,
filter, bias, output);
} else if (filter->type == kTfLiteInt8) {
return EvalHybridPerChannel<kernel_type>(context, node, params, data,
input, filter, bias, output);
} else {
TF_LITE_KERNEL_LOG(
context, "Type %s with filter type %s not currently supported.",
TfLiteTypeGetName(input->type), TfLiteTypeGetName(filter->type));
return kTfLiteError;
}
break;
case kTfLiteUInt8:
return EvalQuantized<kernel_type>(context, node, params, data, input,
filter, bias, output);
break;
case kTfLiteInt8:
return EvalQuantizedPerChannel<kernel_type>(context, node, params, data,
input, filter, bias, output);
break;
case kTfLiteInt16:
return EvalQuantizedPerChannel16x8(params, data, input, filter, bias,
output);
break;
default:
context->ReportError(context, "Type %d not currently supported.",
input->type);
return kTfLiteError;
}
} | CWE-125 | 47 |
TfLiteRegistration CancelOpRegistration() {
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
// Set output size to the input size in CancelOp::Prepare(). Code exists to
// have a framework in Prepare. The input and output tensors are not used.
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* in_tensor = GetInput(context, node, 0);
TfLiteTensor* out_tensor = GetOutput(context, node, 0);
TfLiteIntArray* new_size = TfLiteIntArrayCopy(in_tensor->dims);
return context->ResizeTensor(context, out_tensor, new_size);
};
reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
cancellation_data_.is_cancelled = true;
return kTfLiteOk;
};
return reg;
} | CWE-125 | 47 |
req::ptr<XMLDocumentData> doc() const { return m_node->doc(); } | CWE-22 | 2 |
static optional<Principal> parse_principal(CephContext* cct, TokenID t,
string&& s) {
// Wildcard!
if ((t == TokenID::AWS) && (s == "*")) {
return Principal::wildcard();
// Do nothing for now.
} else if (t == TokenID::CanonicalUser) {
// AWS ARNs
} else if (t == TokenID::AWS) {
auto a = ARN::parse(s);
if (!a) {
if (std::none_of(s.begin(), s.end(),
[](const char& c) {
return (c == ':') || (c == '/');
})) {
// Since tenants are simply prefixes, there's no really good
// way to see if one exists or not. So we return the thing and
// let them try to match against it.
return Principal::tenant(std::move(s));
}
}
if (a->resource == "root") {
return Principal::tenant(std::move(a->account));
}
static const char rx_str[] = "([^/]*)/(.*)";
static const regex rx(rx_str, sizeof(rx_str) - 1,
ECMAScript | optimize);
smatch match;
if (regex_match(a->resource, match, rx)) {
ceph_assert(match.size() == 3);
if (match[1] == "user") {
return Principal::user(std::move(a->account),
match[2]);
}
if (match[1] == "role") {
return Principal::role(std::move(a->account),
match[2]);
}
}
}
ldout(cct, 0) << "Supplied principal is discarded: " << s << dendl;
return boost::none;
} | CWE-617 | 51 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* value = GetInput(context, node, kValueTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (IsDynamicTensor(output)) {
const TfLiteTensor* dims = GetInput(context, node, kDimsTensor);
TF_LITE_ENSURE_OK(context, ResizeOutput(context, dims, output));
}
#define TF_LITE_FILL(data_type) \
reference_ops::Fill(GetTensorShape(value), GetTensorData<data_type>(value), \
GetTensorShape(output), \
GetTensorData<data_type>(output))
switch (output->type) {
case kTfLiteInt32:
TF_LITE_FILL(int32_t);
break;
case kTfLiteInt64:
TF_LITE_FILL(int64_t);
break;
case kTfLiteFloat32:
TF_LITE_FILL(float);
break;
case kTfLiteBool:
TF_LITE_FILL(bool);
break;
case kTfLiteString:
FillString(value, output);
break;
default:
context->ReportError(
context,
"Fill only currently supports int32, int64, float32, bool, string "
"for input 1, got %d.",
value->type);
return kTfLiteError;
}
#undef TF_LITE_FILL
return kTfLiteOk;
} | CWE-125 | 47 |
static inline ulong encode_twos_comp(long n, int prec)
{
ulong result;
assert(prec >= 2);
jas_eprintf("warning: support for signed data is untested\n");
// NOTE: Is this correct?
if (n < 0) {
result = -n;
result = (result ^ 0xffffffffUL) + 1;
result &= (1 << prec) - 1;
} else {
result = n;
}
return result;
} | CWE-190 | 19 |
int64_t MemFile::readImpl(char *buffer, int64_t length) {
assertx(m_len != -1);
assertx(length > 0);
int64_t remaining = m_len - m_cursor;
if (remaining < length) length = remaining;
if (length > 0) {
memcpy(buffer, (const void *)(m_data + m_cursor), length);
}
m_cursor += length;
return length;
} | CWE-787 | 24 |
void * alloc_bottom(size_t size) {
byte * tmp = bottom;
bottom += size;
if (bottom > top) {new_chunk(); tmp = bottom; bottom += size;}
return tmp;
} | CWE-787 | 24 |
std::string queueloader::get_filename(const std::string& str) {
std::string fn = ctrl->get_dlpath();
if (fn[fn.length()-1] != NEWSBEUTER_PATH_SEP[0])
fn.append(NEWSBEUTER_PATH_SEP);
char buf[1024];
snprintf(buf, sizeof(buf), "%s", str.c_str());
char * base = basename(buf);
if (!base || strlen(base) == 0) {
char lbuf[128];
time_t t = time(nullptr);
strftime(lbuf, sizeof(lbuf), "%Y-%b-%d-%H%M%S.unknown", localtime(&t));
fn.append(lbuf);
} else {
fn.append(base);
}
return fn;
} | CWE-78 | 6 |
int DummyOutStream::overrun(int itemSize, int nItems)
{
flush();
if (itemSize * nItems > end - ptr)
nItems = (end - ptr) / itemSize;
return nItems;
} | CWE-787 | 24 |
int size() const {
return m_str ? m_str->size() : 0;
} | CWE-190 | 19 |
inline size_t codepoint_length(const char *s8, size_t l) {
if (l) {
auto b = static_cast<uint8_t>(s8[0]);
if ((b & 0x80) == 0) {
return 1;
} else if ((b & 0xE0) == 0xC0) {
return 2;
} else if ((b & 0xF0) == 0xE0) {
return 3;
} else if ((b & 0xF8) == 0xF0) {
return 4;
}
}
return 0;
} | CWE-125 | 47 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output_values = GetOutput(context, node, kOutputValues);
TfLiteTensor* output_indexes = GetOutput(context, node, kOutputIndexes);
if (IsDynamicTensor(output_values)) {
TF_LITE_ENSURE_OK(context, ResizeOutput(context, node));
}
const TfLiteTensor* top_k = GetInput(context, node, kInputTopK);
const int32 k = top_k->data.i32[0];
// The tensor can have more than 2 dimensions or even be a vector, the code
// anyway calls the internal dimension as row;
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const int32 row_size = input->dims->data[input->dims->size - 1];
int32 num_rows = 1;
for (int i = 0; i < input->dims->size - 1; ++i) {
num_rows *= input->dims->data[i];
}
switch (output_values->type) {
case kTfLiteFloat32:
TopK(row_size, num_rows, GetTensorData<float>(input), k,
output_indexes->data.i32, GetTensorData<float>(output_values));
break;
case kTfLiteUInt8:
TopK(row_size, num_rows, input->data.uint8, k, output_indexes->data.i32,
output_values->data.uint8);
break;
case kTfLiteInt8:
TopK(row_size, num_rows, input->data.int8, k, output_indexes->data.i32,
output_values->data.int8);
break;
case kTfLiteInt32:
TopK(row_size, num_rows, input->data.i32, k, output_indexes->data.i32,
output_values->data.i32);
break;
case kTfLiteInt64:
TopK(row_size, num_rows, input->data.i64, k, output_indexes->data.i32,
output_values->data.i64);
break;
default:
TF_LITE_KERNEL_LOG(context, "Type %s is currently not supported by TopK.",
TfLiteTypeGetName(output_values->type));
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-787 | 24 |
static float *get_window(vorb *f, int len)
{
len <<= 1;
if (len == f->blocksize_0) return f->window[0];
if (len == f->blocksize_1) return f->window[1];
assert(0);
return NULL;
} | CWE-476 | 46 |
bool MemFile::seek(int64_t offset, int whence /* = SEEK_SET */) {
assertx(m_len != -1);
if (whence == SEEK_CUR) {
if (offset > 0 && offset < bufferedLen()) {
setReadPosition(getReadPosition() + offset);
setPosition(getPosition() + offset);
return true;
}
offset += getPosition();
whence = SEEK_SET;
}
// invalidate the current buffer
setWritePosition(0);
setReadPosition(0);
if (whence == SEEK_SET) {
m_cursor = offset;
} else {
assertx(whence == SEEK_END);
m_cursor = m_len + offset;
}
setPosition(m_cursor);
return true;
} | CWE-787 | 24 |
std::string DecodeUnsafe(string_view encoded) {
std::string raw;
if (Decode(encoded, &raw)) {
return raw;
}
return ToString(encoded);
} | CWE-22 | 2 |
TfLiteStatus NotEqualEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
bool requires_broadcast = !HaveSameShapes(input1, input2);
switch (input1->type) {
case kTfLiteBool:
Comparison<bool, reference_ops::NotEqualFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteFloat32:
Comparison<float, reference_ops::NotEqualFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteInt32:
Comparison<int32_t, reference_ops::NotEqualFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteInt64:
Comparison<int64_t, reference_ops::NotEqualFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteUInt8:
ComparisonQuantized<uint8_t, reference_ops::NotEqualFn>(
input1, input2, output, requires_broadcast);
break;
case kTfLiteInt8:
ComparisonQuantized<int8_t, reference_ops::NotEqualFn>(
input1, input2, output, requires_broadcast);
break;
case kTfLiteString:
ComparisonString(reference_ops::StringRefNotEqualFn, input1, input2,
output, requires_broadcast);
break;
default:
context->ReportError(
context,
"Does not support type %d, requires bool|float|int|uint8|string",
input1->type);
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-787 | 24 |
TEST_CASE_METHOD(TestFixture, "AES encrypt/decrypt", "[aes-encrypt-decrypt]") {
int errStatus = 0;
vector<char> errMsg(BUF_LEN, 0);
uint32_t encLen;
string key = SAMPLE_AES_KEY;
vector <uint8_t> encrypted_key(BUF_LEN, 0);
PRINT_SRC_LINE
auto status = trustedEncryptKeyAES(eid, &errStatus, errMsg.data(), key.c_str(), encrypted_key.data(), &encLen);
REQUIRE(status == 0);
REQUIRE(errStatus == 0);
vector<char> decr_key(BUF_LEN, 0);
PRINT_SRC_LINE
status = trustedDecryptKeyAES(eid, &errStatus, errMsg.data(), encrypted_key.data(), encLen, decr_key.data());
REQUIRE(status == 0);
REQUIRE(errStatus == 0);
REQUIRE(key.compare(decr_key.data()) == 0);
} | CWE-787 | 24 |
virtual size_t Read(void *buffer, size_t size, size_t count)
{
if (!m_fp) return 0;
return fread(buffer, size, count, m_fp);
}
| CWE-770 | 37 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
output->type = kTfLiteInt32;
// By design, the input shape is always known at the time of Prepare, even
// if the preceding op that generates |input| is dynamic. Thus, we can
// always compute the rank immediately, without waiting for Eval.
SetTensorToPersistentRo(output);
// Rank produces a 0-D int32 Tensor representing the rank of input.
TfLiteIntArray* output_size = TfLiteIntArrayCreate(0);
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_size));
TF_LITE_ENSURE_EQ(context, NumDimensions(output), 0);
// Immediately propagate the known rank to the output tensor. This allows
// downstream ops that rely on the value to use it during prepare.
if (output->type == kTfLiteInt32) {
int32_t* output_data = GetTensorData<int32_t>(output);
*output_data = NumDimensions(input);
} else {
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-787 | 24 |
Network::FilterStatus Context::onUpstreamData(int data_length, bool end_of_stream) {
if (!wasm_->onUpstreamData_) {
return Network::FilterStatus::Continue;
}
auto result = wasm_->onUpstreamData_(this, id_, static_cast<uint32_t>(data_length),
static_cast<uint32_t>(end_of_stream));
// TODO(PiotrSikora): pull Proxy-WASM's FilterStatus values.
return result.u64_ == 0 ? Network::FilterStatus::Continue : Network::FilterStatus::StopIteration;
} | CWE-476 | 46 |
void FrameFactory::rebuildAggregateFrames(ID3v2::Tag *tag) const
{
if(tag->header()->majorVersion() < 4 &&
tag->frameList("TDRC").size() == 1 &&
tag->frameList("TDAT").size() == 1)
{
TextIdentificationFrame *tdrc =
static_cast<TextIdentificationFrame *>(tag->frameList("TDRC").front());
UnknownFrame *tdat = static_cast<UnknownFrame *>(tag->frameList("TDAT").front());
if(tdrc->fieldList().size() == 1 &&
tdrc->fieldList().front().size() == 4 &&
tdat->data().size() >= 5)
{
String date(tdat->data().mid(1), String::Type(tdat->data()[0]));
if(date.length() == 4) {
tdrc->setText(tdrc->toString() + '-' + date.substr(2, 2) + '-' + date.substr(0, 2));
if(tag->frameList("TIME").size() == 1) {
UnknownFrame *timeframe = static_cast<UnknownFrame *>(tag->frameList("TIME").front());
if(timeframe->data().size() >= 5) {
String time(timeframe->data().mid(1), String::Type(timeframe->data()[0]));
if(time.length() == 4) {
tdrc->setText(tdrc->toString() + 'T' + time.substr(0, 2) + ':' + time.substr(2, 2));
}
}
}
}
}
}
} | CWE-434 | 5 |
generatePreview (const char inFileName[],
float exposure,
int previewWidth,
int &previewHeight,
Array2D <PreviewRgba> &previewPixels)
{
//
// Read the input file
//
RgbaInputFile in (inFileName);
Box2i dw = in.dataWindow();
float a = in.pixelAspectRatio();
int w = dw.max.x - dw.min.x + 1;
int h = dw.max.y - dw.min.y + 1;
Array2D <Rgba> pixels (h, w);
in.setFrameBuffer (ComputeBasePointer (&pixels[0][0], dw), 1, w);
in.readPixels (dw.min.y, dw.max.y);
//
// Make a preview image
//
previewHeight = max (int (h / (w * a) * previewWidth + .5f), 1);
previewPixels.resizeErase (previewHeight, previewWidth);
float fx = (previewWidth > 0)? (float (w - 1) / (previewWidth - 1)): 1;
float fy = (previewHeight > 0)? (float (h - 1) / (previewHeight - 1)): 1;
float m = Math<float>::pow (2.f, IMATH_NAMESPACE::clamp (exposure + 2.47393f, -20.f, 20.f));
for (int y = 0; y < previewHeight; ++y)
{
for (int x = 0; x < previewWidth; ++x)
{
PreviewRgba &preview = previewPixels[y][x];
const Rgba &pixel = pixels[int (y * fy + .5f)][int (x * fx + .5f)];
preview.r = gamma (pixel.r, m);
preview.g = gamma (pixel.g, m);
preview.b = gamma (pixel.b, m);
preview.a = int (IMATH_NAMESPACE::clamp (pixel.a * 255.f, 0.f, 255.f) + .5f);
}
}
} | CWE-476 | 46 |
int length() { return ptr - start; } | CWE-787 | 24 |
void Compute(OpKernelContext* context) override {
const float in_min = context->input(2).flat<float>()(0);
const float in_max = context->input(3).flat<float>()(0);
ImageResizerState st(align_corners_, false);
st.ValidateAndCreateOutput(context);
if (!context->status().ok()) return;
// Return if the output is empty.
if (st.output->NumElements() == 0) return;
typename TTypes<T, 4>::ConstTensor image_data(
context->input(0).tensor<T, 4>());
typename TTypes<T, 4>::Tensor output_data(st.output->tensor<T, 4>());
ResizeBilinear<T>(image_data, st.height_scale, st.width_scale, in_min,
in_max, half_pixel_centers_, &output_data);
Tensor* out_min = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(1, {}, &out_min));
out_min->flat<float>()(0) = in_min;
Tensor* out_max = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(2, {}, &out_max));
out_max->flat<float>()(0) = in_max;
} | CWE-787 | 24 |
TfLiteStatus LeakyReluPrepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output = GetOutput(context, node, 0);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data);
if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 ||
output->type == kTfLiteInt16) {
const auto* params =
reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data);
double alpha_multiplier =
input->params.scale * params->alpha / output->params.scale;
QuantizeMultiplier(alpha_multiplier, &data->output_multiplier_alpha,
&data->output_shift_alpha);
double identity_multiplier = input->params.scale / output->params.scale;
QuantizeMultiplier(identity_multiplier, &data->output_multiplier_identity,
&data->output_shift_identity);
}
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
} | CWE-125 | 47 |
MONGO_EXPORT void *bson_malloc( int size ) {
void *p;
p = bson_malloc_func( size );
bson_fatal_msg( !!p, "malloc() failed" );
return p;
} | CWE-190 | 19 |
static __forceinline void draw_line(float *output, int x0, int y0, int x1, int y1, int n)
{
int dy = y1 - y0;
int adx = x1 - x0;
int ady = abs(dy);
int base;
int x=x0,y=y0;
int err = 0;
int sy;
#ifdef STB_VORBIS_DIVIDE_TABLE
if (adx < DIVTAB_DENOM && ady < DIVTAB_NUMER) {
if (dy < 0) {
base = -integer_divide_table[ady][adx];
sy = base-1;
} else {
base = integer_divide_table[ady][adx];
sy = base+1;
}
} else {
base = dy / adx;
if (dy < 0)
sy = base - 1;
else
sy = base+1;
}
#else
base = dy / adx;
if (dy < 0)
sy = base - 1;
else
sy = base+1;
#endif
ady -= abs(base) * adx;
if (x1 > n) x1 = n;
if (x < x1) {
LINE_OP(output[x], inverse_db_table[y]);
for (++x; x < x1; ++x) {
err += ady;
if (err >= adx) {
err -= adx;
y += sy;
} else
y += base;
LINE_OP(output[x], inverse_db_table[y]);
}
}
} | CWE-908 | 48 |
TfLiteStatus LogSoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) {
LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output = GetOutput(context, node, 0);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, output->params.scale, 16.0 / 256);
static const double kBeta = 1.0;
if (input->type == kTfLiteUInt8) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 255);
data->params.table = data->f_table;
optimized_ops::PopulateSoftmaxLookupTable(&data->params,
input->params.scale, kBeta);
data->params.zero_point = output->params.zero_point;
data->params.scale = output->params.scale;
}
if (input->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 127);
static const int kScaledDiffIntegerBits = 5;
tflite::PreprocessLogSoftmaxScalingExp(
kBeta, input->params.scale, kScaledDiffIntegerBits,
&data->input_multiplier, &data->input_left_shift,
&data->reverse_scaling_divisor, &data->reverse_scaling_right_shift);
data->reverse_scaling_right_shift *= -1;
data->diff_min =
-1.0 * tflite::CalculateInputRadius(kScaledDiffIntegerBits,
data->input_left_shift);
}
}
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
} | CWE-125 | 47 |
Pl_ASCIIHexDecoder::flush()
{
if (this->pos == 0)
{
QTC::TC("libtests", "Pl_ASCIIHexDecoder no-op flush");
return;
}
int b[2];
for (int i = 0; i < 2; ++i)
{
if (this->inbuf[i] >= 'A')
{
b[i] = this->inbuf[i] - 'A' + 10;
}
else
{
b[i] = this->inbuf[i] - '0';
}
}
unsigned char ch = static_cast<unsigned char>((b[0] << 4) + b[1]);
QTC::TC("libtests", "Pl_ASCIIHexDecoder partial flush",
(this->pos == 2) ? 0 : 1);
getNext()->write(&ch, 1);
this->pos = 0;
this->inbuf[0] = '0';
this->inbuf[1] = '0';
this->inbuf[2] = '\0';
} | CWE-787 | 24 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* size = GetInput(context, node, kSizeTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
// TODO(ahentz): Our current implementations rely on the inputs being 4D.
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1);
TF_LITE_ENSURE_EQ(context, size->type, kTfLiteInt32);
// ResizeBilinear creates a float tensor even when the input is made of
// integers.
output->type = input->type;
if (!IsConstantTensor(size)) {
SetTensorToDynamic(output);
return kTfLiteOk;
}
// Ensure params are valid.
auto* params =
reinterpret_cast<TfLiteResizeBilinearParams*>(node->builtin_data);
if (params->half_pixel_centers && params->align_corners) {
context->ReportError(
context, "If half_pixel_centers is True, align_corners must be False.");
return kTfLiteError;
}
return ResizeOutputTensor(context, input, size, output);
} | CWE-125 | 47 |
void CommandData::ParseArg(wchar *Arg)
{
if (IsSwitch(*Arg) && !NoMoreSwitches)
if (Arg[1]=='-' && Arg[2]==0)
NoMoreSwitches=true;
else
ProcessSwitch(Arg+1);
else
if (*Command==0)
{
wcsncpy(Command,Arg,ASIZE(Command));
*Command=toupperw(*Command);
// 'I' and 'S' commands can contain case sensitive strings after
// the first character, so we must not modify their case.
// 'S' can contain SFX name, which case is important in Unix.
if (*Command!='I' && *Command!='S')
wcsupper(Command);
}
else
if (*ArcName==0)
wcsncpyz(ArcName,Arg,ASIZE(ArcName));
else
{
// Check if last character is the path separator.
size_t Length=wcslen(Arg);
wchar EndChar=Length==0 ? 0:Arg[Length-1];
bool EndSeparator=IsDriveDiv(EndChar) || IsPathDiv(EndChar);
wchar CmdChar=toupperw(*Command);
bool Add=wcschr(L"AFUM",CmdChar)!=NULL;
bool Extract=CmdChar=='X' || CmdChar=='E';
if (EndSeparator && !Add)
wcsncpyz(ExtrPath,Arg,ASIZE(ExtrPath));
else
if ((Add || CmdChar=='T') && (*Arg!='@' || ListMode==RCLM_REJECT_LISTS))
FileArgs.AddString(Arg);
else
{
FindData FileData;
bool Found=FindFile::FastFind(Arg,&FileData);
if ((!Found || ListMode==RCLM_ACCEPT_LISTS) &&
ListMode!=RCLM_REJECT_LISTS && *Arg=='@' && !IsWildcard(Arg))
{
FileLists=true;
ReadTextFile(Arg+1,&FileArgs,false,true,FilelistCharset,true,true,true);
}
else
if (Found && FileData.IsDir && Extract && *ExtrPath==0)
{
wcsncpyz(ExtrPath,Arg,ASIZE(ExtrPath));
AddEndSlash(ExtrPath,ASIZE(ExtrPath));
}
else
FileArgs.AddString(Arg);
}
}
} | CWE-787 | 24 |
static __forceinline void draw_line(float *output, int x0, int y0, int x1, int y1, int n)
{
int dy = y1 - y0;
int adx = x1 - x0;
int ady = abs(dy);
int base;
int x=x0,y=y0;
int err = 0;
int sy;
#ifdef STB_VORBIS_DIVIDE_TABLE
if (adx < DIVTAB_DENOM && ady < DIVTAB_NUMER) {
if (dy < 0) {
base = -integer_divide_table[ady][adx];
sy = base-1;
} else {
base = integer_divide_table[ady][adx];
sy = base+1;
}
} else {
base = dy / adx;
if (dy < 0)
sy = base - 1;
else
sy = base+1;
}
#else
base = dy / adx;
if (dy < 0)
sy = base - 1;
else
sy = base+1;
#endif
ady -= abs(base) * adx;
if (x1 > n) x1 = n;
if (x < x1) {
LINE_OP(output[x], inverse_db_table[y]);
for (++x; x < x1; ++x) {
err += ady;
if (err >= adx) {
err -= adx;
y += sy;
} else
y += base;
LINE_OP(output[x], inverse_db_table[y]);
}
}
} | CWE-787 | 24 |
ObfuscatedPasswd::ObfuscatedPasswd(int len) : CharArray(len), length(len) {
} | CWE-787 | 24 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, 0);
switch (input->type) {
case kTfLiteFloat32:
return EvalImpl<kernel_type, kTfLiteFloat32>(context, node);
case kTfLiteUInt8:
return EvalImpl<kernel_type, kTfLiteUInt8>(context, node);
case kTfLiteInt8:
return EvalImpl<kernel_type, kTfLiteInt8>(context, node);
case kTfLiteInt16:
return EvalImpl<kernel_type, kTfLiteInt16>(context, node);
default:
TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
} | CWE-125 | 47 |
static QSvgNode *createPathNode(QSvgNode *parent,
const QXmlStreamAttributes &attributes,
QSvgHandler *)
{
QStringView data = attributes.value(QLatin1String("d"));
QPainterPath qpath;
qpath.setFillRule(Qt::WindingFill);
//XXX do error handling
parsePathDataFast(data, qpath);
QSvgNode *path = new QSvgPath(parent, qpath);
return path;
} | CWE-787 | 24 |
ALWAYS_INLINE String serialize_impl(const Variant& value,
const SerializeOptions& opts) {
switch (value.getType()) {
case KindOfClass:
case KindOfLazyClass:
case KindOfPersistentString:
case KindOfString: {
auto const str =
isStringType(value.getType()) ? value.getStringData() :
isClassType(value.getType()) ? classToStringHelper(value.toClassVal()) :
lazyClassToStringHelper(value.toLazyClassVal());
auto const size = str->size();
if (size >= RuntimeOption::MaxSerializedStringSize) {
throw Exception("Size of serialized string (%d) exceeds max", size);
}
StringBuffer sb;
sb.append("s:");
sb.append(size);
sb.append(":\"");
sb.append(str->data(), size);
sb.append("\";");
return sb.detach();
}
case KindOfResource:
return s_Res;
case KindOfUninit:
case KindOfNull:
case KindOfBoolean:
case KindOfInt64:
case KindOfFunc:
case KindOfPersistentVec:
case KindOfVec:
case KindOfPersistentDict:
case KindOfDict:
case KindOfPersistentKeyset:
case KindOfKeyset:
case KindOfPersistentDArray:
case KindOfDArray:
case KindOfPersistentVArray:
case KindOfVArray:
case KindOfDouble:
case KindOfObject:
case KindOfClsMeth:
case KindOfRClsMeth:
case KindOfRFunc:
case KindOfRecord:
break;
}
VariableSerializer vs(VariableSerializer::Type::Serialize);
if (opts.keepDVArrays) vs.keepDVArrays();
if (opts.forcePHPArrays) vs.setForcePHPArrays();
if (opts.warnOnHackArrays) vs.setHackWarn();
if (opts.warnOnPHPArrays) vs.setPHPWarn();
if (opts.ignoreLateInit) vs.setIgnoreLateInit();
if (opts.serializeProvenanceAndLegacy) vs.setSerializeProvenanceAndLegacy();
// Keep the count so recursive calls to serialize() embed references properly.
return vs.serialize(value, true, true);
} | CWE-125 | 47 |
TypedValue HHVM_FUNCTION(substr_compare,
const String& main_str,
const String& str,
int offset,
int length /* = INT_MAX */,
bool case_insensitivity /* = false */) {
int s1_len = main_str.size();
int s2_len = str.size();
if (length <= 0) {
raise_warning("The length must be greater than zero");
return make_tv<KindOfBoolean>(false);
}
if (offset < 0) {
offset = s1_len + offset;
if (offset < 0) offset = 0;
}
if (offset >= s1_len) {
raise_warning("The start position cannot exceed initial string length");
return make_tv<KindOfBoolean>(false);
}
int cmp_len = s1_len - offset;
if (cmp_len < s2_len) cmp_len = s2_len;
if (cmp_len > length) cmp_len = length;
const char *s1 = main_str.data();
if (case_insensitivity) {
return tvReturn(bstrcasecmp(s1 + offset, cmp_len, str.data(), cmp_len));
}
return tvReturn(string_ncmp(s1 + offset, str.data(), cmp_len));
} | CWE-190 | 19 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0);
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
OpContext op_context(context, node);
TF_LITE_ENSURE(context, op_context.input->type == kTfLiteUInt8 ||
op_context.input->type == kTfLiteInt8 ||
op_context.input->type == kTfLiteInt16 ||
op_context.input->type == kTfLiteFloat16);
TF_LITE_ENSURE(context, op_context.ref->type == kTfLiteFloat32);
op_data->max_diff = op_data->tolerance * op_context.input->params.scale;
switch (op_context.input->type) {
case kTfLiteUInt8:
case kTfLiteInt8:
op_data->max_diff *= (1 << 8);
break;
case kTfLiteInt16:
op_data->max_diff *= (1 << 16);
break;
default:
break;
}
// Allocate tensor to store the dequantized inputs.
if (op_data->cache_tensor_id == kTensorNotAllocated) {
TF_LITE_ENSURE_OK(
context, context->AddTensors(context, 1, &op_data->cache_tensor_id));
}
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(1);
node->temporaries->data[0] = op_data->cache_tensor_id;
TfLiteTensor* dequantized = GetTemporary(context, node, /*index=*/0);
dequantized->type = op_context.ref->type;
dequantized->allocation_type = kTfLiteDynamic;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(
context, dequantized,
TfLiteIntArrayCopy(op_context.input->dims)));
return kTfLiteOk;
} | CWE-125 | 47 |
TfLiteStatus MultiplyAndCheckOverflow(size_t a, size_t b, size_t* product) {
// Multiplying a * b where a and b are size_t cannot result in overflow in a
// size_t accumulator if both numbers have no non-zero bits in their upper
// half.
constexpr size_t size_t_bits = 8 * sizeof(size_t);
constexpr size_t overflow_upper_half_bit_position = size_t_bits / 2;
*product = a * b;
// If neither integers have non-zero bits past 32 bits can't overflow.
// Otherwise check using slow devision.
if (TFLITE_EXPECT_FALSE((a | b) >> overflow_upper_half_bit_position != 0)) {
if (a != 0 && *product / a != b) return kTfLiteError;
}
return kTfLiteOk;
} | CWE-190 | 19 |
TfLiteStatus SimpleOpEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input1 = tflite::GetInput(context, node, /*index=*/0);
const TfLiteTensor* input2 = tflite::GetInput(context, node, /*index=*/1);
TfLiteTensor* output = GetOutput(context, node, /*index=*/0);
int32_t* output_data = output->data.i32;
*output_data = *(input1->data.i32) + *(input2->data.i32);
return kTfLiteOk;
} | CWE-125 | 47 |
int HexInStream::overrun(int itemSize, int nItems, bool wait) {
if (itemSize > bufSize)
throw Exception("HexInStream overrun: max itemSize exceeded");
if (end - ptr != 0)
memmove(start, ptr, end - ptr);
end -= ptr - start;
offset += ptr - start;
ptr = start;
while (end < ptr + itemSize) {
int n = in_stream.check(2, 1, wait);
if (n == 0) return 0;
const U8* iptr = in_stream.getptr();
const U8* eptr = in_stream.getend();
int length = min((eptr - iptr)/2, start + bufSize - end);
U8* optr = (U8*) end;
for (int i=0; i<length; i++) {
int v = 0;
readHexAndShift(iptr[i*2], &v);
readHexAndShift(iptr[i*2+1], &v);
optr[i] = v;
}
in_stream.setptr(iptr + length*2);
end += length;
}
if (itemSize * nItems > end - ptr)
nItems = (end - ptr) / itemSize;
return nItems;
} | CWE-787 | 24 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_);
const TfLiteTensor* input_resource_id_tensor =
GetInput(context, node, kInputVariableId);
const TfLiteTensor* input_value_tensor = GetInput(context, node, kInputValue);
int resource_id = input_resource_id_tensor->data.i32[0];
auto& resources = subgraph->resources();
resource::CreateResourceVariableIfNotAvailable(&resources, resource_id);
auto* variable = resource::GetResourceVariable(&resources, resource_id);
TF_LITE_ENSURE(context, variable != nullptr);
variable->AssignFrom(input_value_tensor);
return kTfLiteOk;
} | CWE-787 | 24 |
void QuickOpen::Load(uint64 BlockPos)
{
if (!Loaded) // If loading the first time, perform additional intialization.
{
SeekPos=Arc->Tell();
UnsyncSeekPos=false;
SaveFilePos SavePos(*Arc);
Arc->Seek(BlockPos,SEEK_SET);
if (Arc->ReadHeader()==0 || Arc->GetHeaderType()!=HEAD_SERVICE ||
!Arc->SubHead.CmpName(SUBHEAD_TYPE_QOPEN))
return;
QLHeaderPos=Arc->CurBlockPos;
RawDataStart=Arc->Tell();
RawDataSize=Arc->SubHead.UnpSize;
Loaded=true; // Set only after all file processing calls like Tell, Seek, ReadHeader.
}
if (Arc->SubHead.Encrypted)
{
RAROptions *Cmd=Arc->GetRAROptions();
#ifndef RAR_NOCRYPT
if (Cmd->Password.IsSet())
Crypt.SetCryptKeys(false,CRYPT_RAR50,&Cmd->Password,Arc->SubHead.Salt,
Arc->SubHead.InitV,Arc->SubHead.Lg2Count,
Arc->SubHead.HashKey,Arc->SubHead.PswCheck);
else
#endif
return;
}
RawDataPos=0;
ReadBufSize=0;
ReadBufPos=0;
LastReadHeader.Reset();
LastReadHeaderPos=0;
ReadBuffer();
} | CWE-787 | 24 |
TfLiteStatus InitializeTemporaries(TfLiteContext* context, TfLiteNode* node,
OpContext* op_context) {
// Creates a temp index to iterate through input data.
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(3);
node->temporaries->data[0] = op_data->scratch_tensor_index;
TfLiteTensor* scratch_tensor = GetTemporary(context, node, /*index=*/0);
scratch_tensor->type = kTfLiteInt32;
scratch_tensor->allocation_type = kTfLiteArenaRw;
TfLiteIntArray* index_size = TfLiteIntArrayCreate(1);
index_size->data[0] = NumDimensions(op_context->input);
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, scratch_tensor, index_size));
// Creates a temp tensor to store resolved axis given input data.
node->temporaries->data[1] = op_data->scratch_tensor_index + 1;
TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1);
resolved_axis->type = kTfLiteInt32;
// Creates a temp tensor to store temp sums when calculating mean.
node->temporaries->data[2] = op_data->scratch_tensor_index + 2;
TfLiteTensor* temp_sum = GetTemporary(context, node, /*index=*/2);
switch (op_context->input->type) {
case kTfLiteFloat32:
temp_sum->type = kTfLiteFloat32;
break;
case kTfLiteInt32:
temp_sum->type = kTfLiteInt64;
break;
case kTfLiteInt64:
temp_sum->type = kTfLiteInt64;
break;
case kTfLiteUInt8:
case kTfLiteInt8:
case kTfLiteInt16:
temp_sum->type = kTfLiteInt32;
break;
case kTfLiteBool:
temp_sum->type = kTfLiteBool;
break;
default:
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-125 | 47 |
MONGO_EXPORT int bson_append_finish_object( bson *b ) {
char *start;
int i;
if ( bson_ensure_space( b, 1 ) == BSON_ERROR ) return BSON_ERROR;
bson_append_byte( b , 0 );
start = b->data + b->stack[ --b->stackPos ];
i = b->cur - start;
bson_little_endian32( start, &i );
return BSON_OK;
} | CWE-190 | 19 |
void AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride,
int pad_width, int pad_height, int filter_width,
int filter_height, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width, pad_height,
filter_width, filter_height, output_activation_min,
output_activation_max, output_data, output_dims);
} | CWE-835 | 42 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
OpData* data = static_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output = GetOutput(context, node, 0);
// TODO(b/128934713): Add support for fixed-point per-channel quantization.
// Currently this only support affine per-layer quantization.
TF_LITE_ENSURE_EQ(context, output->quantization.type,
kTfLiteAffineQuantization);
const auto* affine_quantization =
static_cast<TfLiteAffineQuantization*>(output->quantization.params);
TF_LITE_ENSURE(context, affine_quantization);
TF_LITE_ENSURE(context, affine_quantization->scale);
TF_LITE_ENSURE(context, affine_quantization->scale->size == 1);
if (input->type == kTfLiteFloat32) {
// Quantize use case.
TF_LITE_ENSURE(context, output->type == kTfLiteUInt8 ||
output->type == kTfLiteInt8 ||
output->type == kTfLiteInt16);
} else {
// Requantize use case.
if (input->type == kTfLiteInt16) {
TF_LITE_ENSURE(
context, output->type == kTfLiteInt8 || output->type == kTfLiteInt16);
} else {
TF_LITE_ENSURE(context,
input->type == kTfLiteInt8 || input->type == kTfLiteUInt8);
TF_LITE_ENSURE(
context, output->type == kTfLiteUInt8 || output->type == kTfLiteInt8);
}
const double effective_output_scale =
static_cast<double>(input->params.scale) /
static_cast<double>(output->params.scale);
QuantizeMultiplier(effective_output_scale, &data->output_multiplier,
&data->output_shift);
}
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
} | CWE-787 | 24 |
inline TfLiteTensor* GetMutableInput(const TfLiteContext* context,
const TfLiteNode* node, int index) {
if (context->tensors != nullptr) {
return &context->tensors[node->inputs->data[index]];
} else {
return context->GetTensor(context, node->inputs->data[index]);
}
} | CWE-125 | 47 |
void TestJlCompress::extractDir_data()
{
QTest::addColumn<QString>("zipName");
QTest::addColumn<QStringList>("fileNames");
QTest::newRow("simple") << "jlextdir.zip" << (
QStringList() << "test0.txt" << "testdir1/test1.txt"
<< "testdir2/test2.txt" << "testdir2/subdir/test2sub.txt");
QTest::newRow("separate dir") << "sepdir.zip" << (
QStringList() << "laj/" << "laj/lajfile.txt");
} | CWE-22 | 2 |
TypedValue HHVM_FUNCTION(substr_compare,
const String& main_str,
const String& str,
int offset,
int length /* = INT_MAX */,
bool case_insensitivity /* = false */) {
int s1_len = main_str.size();
int s2_len = str.size();
if (length <= 0) {
raise_warning("The length must be greater than zero");
return make_tv<KindOfBoolean>(false);
}
if (offset < 0) {
offset = s1_len + offset;
if (offset < 0) offset = 0;
}
if (offset >= s1_len) {
raise_warning("The start position cannot exceed initial string length");
return make_tv<KindOfBoolean>(false);
}
int cmp_len = s1_len - offset;
if (cmp_len < s2_len) cmp_len = s2_len;
if (cmp_len > length) cmp_len = length;
const char *s1 = main_str.data();
if (case_insensitivity) {
return tvReturn(bstrcasecmp(s1 + offset, cmp_len, str.data(), cmp_len));
}
return tvReturn(string_ncmp(s1 + offset, str.data(), cmp_len));
} | CWE-125 | 47 |
void FormatConverter<T>::Populate(const T* src_data, std::vector<int> indices,
int level, int prev_idx, int* src_data_ptr,
T* dest_data) {
if (level == indices.size()) {
int orig_rank = dense_shape_.size();
std::vector<int> orig_idx;
orig_idx.resize(orig_rank);
int i = 0;
for (; i < orig_idx.size(); i++) {
int orig_dim = traversal_order_[i];
orig_idx[orig_dim] = indices[i];
}
for (; i < indices.size(); i++) {
const int block_idx = traversal_order_[i] - orig_rank;
const int orig_dim = block_map_[block_idx];
orig_idx[orig_dim] =
orig_idx[orig_dim] * block_size_[block_idx] + indices[i];
}
dest_data[GetFlattenedIndex(orig_idx, dense_shape_)] =
src_data[*src_data_ptr];
*src_data_ptr = *src_data_ptr + 1;
return;
}
const int metadata_idx = 2 * level;
const int shape_of_level = dim_metadata_[metadata_idx][0];
if (format_[level] == kTfLiteDimDense) {
for (int i = 0; i < shape_of_level; i++) {
indices[level] = i;
Populate(src_data, indices, level + 1, prev_idx * shape_of_level + i,
src_data_ptr, dest_data);
}
} else {
const auto& array_segments = dim_metadata_[metadata_idx];
const auto& array_indices = dim_metadata_[metadata_idx + 1];
for (int i = array_segments[prev_idx]; i < array_segments[prev_idx + 1];
i++) {
indices[level] = array_indices[i];
Populate(src_data, indices, level + 1, i, src_data_ptr, dest_data);
}
}
} | CWE-125 | 47 |
bool logToUSDT(const Array& bt) {
std::lock_guard<std::mutex> lock(usdt_mutex);
memset(&bt_slab, 0, sizeof(bt_slab));
int i = 0;
IterateVNoInc(
bt.get(),
[&](TypedValue tv) -> bool {
if (i >= strobelight::kMaxStackframes) {
return true;
}
assertx(isArrayLikeType(type(tv)));
ArrayData* bt_frame = val(tv).parr;
strobelight::backtrace_frame_t* frame = &bt_slab.frames[i];
auto const line = bt_frame->get(s_line.get());
if (line.is_init()) {
assertx(isIntType(type(line)));
frame->line = val(line).num;
}
auto const file_name = bt_frame->get(s_file.get());
if (file_name.is_init()) {
assertx(isStringType(type(file_name)));
strncpy(frame->file_name,
val(file_name).pstr->data(),
std::min(val(file_name).pstr->size(), strobelight::kFileNameMax));
frame->file_name[strobelight::kFileNameMax - 1] = '\0';
}
auto const class_name = bt_frame->get(s_class.get());
if (class_name.is_init()) {
assertx(isStringType(type(class_name)));
strncpy(frame->class_name,
val(class_name).pstr->data(),
std::min(val(class_name).pstr->size(), strobelight::kClassNameMax));
frame->class_name[strobelight::kClassNameMax - 1] = '\0';
}
auto const function_name = bt_frame->get(s_function.get());
if (function_name.is_init()) {
assertx(isStringType(type(function_name)));
strncpy(frame->function,
val(function_name).pstr->data(),
std::min(val(function_name).pstr->size(),
strobelight::kFunctionMax));
frame->function[strobelight::kFunctionMax - 1] = '\0';
}
i++;
return false;
}
);
bt_slab.len = i;
// Allow BPF to read the now-formatted stacktrace
FOLLY_SDT_WITH_SEMAPHORE(hhvm, hhvm_stack, &bt_slab);
return true;
} | CWE-125 | 47 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, 0);
switch (input->type) {
case kTfLiteFloat32:
return EvalImpl<kernel_type, kTfLiteFloat32>(context, node);
case kTfLiteUInt8:
return EvalImpl<kernel_type, kTfLiteUInt8>(context, node);
case kTfLiteInt8:
return EvalImpl<kernel_type, kTfLiteInt8>(context, node);
case kTfLiteInt16:
return EvalImpl<kernel_type, kTfLiteInt16>(context, node);
default:
TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
} | CWE-787 | 24 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
// TODO(b/137042749): TFLite infrastructure (converter, delegate) doesn't
// fully support 0-output ops yet. Currently it works if we manually crfat
// a TFLite graph that contains variable ops. Note:
// * The TFLite Converter need to be changed to be able to produce an op
// with 0 output.
// * The delegation code need to be changed to handle 0 output ops. However
// everything still works fine when variable ops aren't used.
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0);
const TfLiteTensor* input_resource_id_tensor =
GetInput(context, node, kInputVariableId);
TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32);
TF_LITE_ENSURE_EQ(context, NumElements(input_resource_id_tensor), 1);
return kTfLiteOk;
} | CWE-787 | 24 |
TEST_CASE_METHOD(TestFixture, "ECDSA AES key gen", "[ecdsa-aes-key-gen]") {
vector<char> errMsg(BUF_LEN, 0);
int errStatus = 0;
vector <uint8_t> encrPrivKey(BUF_LEN, 0);
vector<char> pubKeyX(BUF_LEN, 0);
vector<char> pubKeyY(BUF_LEN, 0);
uint32_t encLen = 0;
PRINT_SRC_LINE
auto status = trustedGenerateEcdsaKeyAES(eid, &errStatus, errMsg.data(), encrPrivKey.data(), &encLen,
pubKeyX.data(),
pubKeyY.data());
REQUIRE(status == SGX_SUCCESS);
REQUIRE(errStatus == SGX_SUCCESS);
} | CWE-787 | 24 |
void TightDecoder::FilterGradient(const rdr::U8* inbuf,
const PixelFormat& pf, PIXEL_T* outbuf,
int stride, const Rect& r)
{
int x, y, c;
static rdr::U8 prevRow[TIGHT_MAX_WIDTH*3];
static rdr::U8 thisRow[TIGHT_MAX_WIDTH*3];
rdr::U8 pix[3];
int est[3];
memset(prevRow, 0, sizeof(prevRow));
// Set up shortcut variables
int rectHeight = r.height();
int rectWidth = r.width();
for (y = 0; y < rectHeight; y++) {
/* First pixel in a row */
pf.rgbFromBuffer(pix, &inbuf[y*rectWidth], 1);
for (c = 0; c < 3; c++)
pix[c] += prevRow[c];
memcpy(thisRow, pix, sizeof(pix));
pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride], pix, 1);
/* Remaining pixels of a row */
for (x = 1; x < rectWidth; x++) {
for (c = 0; c < 3; c++) {
est[c] = prevRow[x*3+c] + pix[c] - prevRow[(x-1)*3+c];
if (est[c] > 255) {
est[c] = 255;
} else if (est[c] < 0) {
est[c] = 0;
}
}
pf.rgbFromBuffer(pix, &inbuf[y*rectWidth+x], 1);
for (c = 0; c < 3; c++)
pix[c] += est[c];
memcpy(&thisRow[x*3], pix, sizeof(pix));
pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride+x], pix, 1);
}
memcpy(prevRow, thisRow, sizeof(prevRow));
}
} | CWE-787 | 24 |
otError Commissioner::GeneratePskc(const char * aPassPhrase,
const char * aNetworkName,
const Mac::ExtendedPanId &aExtPanId,
Pskc & aPskc)
{
otError error = OT_ERROR_NONE;
const char *saltPrefix = "Thread";
uint8_t salt[OT_PBKDF2_SALT_MAX_LEN];
uint16_t saltLen = 0;
VerifyOrExit((strlen(aPassPhrase) >= OT_COMMISSIONING_PASSPHRASE_MIN_SIZE) &&
(strlen(aPassPhrase) <= OT_COMMISSIONING_PASSPHRASE_MAX_SIZE),
error = OT_ERROR_INVALID_ARGS);
memset(salt, 0, sizeof(salt));
memcpy(salt, saltPrefix, strlen(saltPrefix));
saltLen += static_cast<uint16_t>(strlen(saltPrefix));
memcpy(salt + saltLen, aExtPanId.m8, sizeof(aExtPanId));
saltLen += OT_EXT_PAN_ID_SIZE;
memcpy(salt + saltLen, aNetworkName, strlen(aNetworkName));
saltLen += static_cast<uint16_t>(strlen(aNetworkName));
otPbkdf2Cmac(reinterpret_cast<const uint8_t *>(aPassPhrase), static_cast<uint16_t>(strlen(aPassPhrase)),
reinterpret_cast<const uint8_t *>(salt), saltLen, 16384, OT_PSKC_MAX_SIZE, aPskc.m8);
exit:
return error;
} | CWE-787 | 24 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
// There are two ways in which the 'output' can be made dynamic: it could be
// a string tensor, or its shape cannot be calculated during Prepare(). In
// either case, we now have all the information to calculate its shape.
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context, ResizeOutput(context, node));
}
// Note that string tensors are always "dynamic" in the sense that their size
// is not known until we have all the content. This applies even when their
// shape is known ahead of time. As a result, a string tensor is never given
// any memory by ResizeOutput(), and we need to do it manually here. Since
// reshape doesn't change the data, the output tensor needs exactly as many
// bytes as the input tensor.
if (output->type == kTfLiteString) {
auto bytes_required = input->bytes;
TfLiteTensorRealloc(bytes_required, output);
output->bytes = bytes_required;
}
memcpy(output->data.raw, input->data.raw, input->bytes);
return kTfLiteOk;
} | CWE-125 | 47 |
static TfLiteRegistration DelegateRegistration() {
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
// If tensors are resized, the runtime should propagate shapes
// automatically if correct flag is set. Ensure values are correct.
// Output 0 should be dynamic.
TfLiteTensor* output0 = GetOutput(context, node, 0);
TF_LITE_ENSURE(context, IsDynamicTensor(output0));
// Output 1 has the same shape as input.
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output1 = GetOutput(context, node, 1);
TF_LITE_ENSURE(context, input->dims->size == output1->dims->size);
TF_LITE_ENSURE(context, input->dims->data[0] == output1->dims->data[0]);
return kTfLiteOk;
};
return reg;
} | CWE-787 | 24 |
int FdOutStream::overrun(int itemSize, int nItems)
{
if (itemSize > bufSize)
throw Exception("FdOutStream overrun: max itemSize exceeded");
// First try to get rid of the data we have
flush();
// Still not enough space?
if (itemSize > end - ptr) {
// Can we shuffle things around?
// (don't do this if it gains us less than 25%)
if ((sentUpTo - start > bufSize / 4) &&
(itemSize < bufSize - (ptr - sentUpTo))) {
memmove(start, sentUpTo, ptr - sentUpTo);
ptr = start + (ptr - sentUpTo);
sentUpTo = start;
} else {
// Have to get rid of more data, so turn off non-blocking
// for a bit...
bool realBlocking;
realBlocking = blocking;
blocking = true;
flush();
blocking = realBlocking;
}
}
// Can we fit all the items asked for?
if (itemSize * nItems > end - ptr)
nItems = (end - ptr) / itemSize;
return nItems;
} | CWE-787 | 24 |
void create_test_key() {
int errStatus = 0;
vector<char> errMsg(1024, 0);
uint32_t enc_len;
SAFE_UINT8_BUF(encrypted_key, BUF_LEN);
string key = TEST_VALUE;
sgx_status_t status = trustedEncryptKeyAES(eid, &errStatus, errMsg.data(), key.c_str(), encrypted_key, &enc_len);
HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg.data());
vector<char> hexEncrKey(2 * enc_len + 1, 0);
carray2Hex(encrypted_key, enc_len, hexEncrKey.data(), 2 * enc_len + 1);
LevelDB::getLevelDb()->writeDataUnique("TEST_KEY", hexEncrKey.data());
} | CWE-787 | 24 |
static jas_image_cmpt_t *jas_image_cmpt_create(int_fast32_t tlx,
int_fast32_t tly, int_fast32_t hstep, int_fast32_t vstep,
int_fast32_t width, int_fast32_t height, uint_fast16_t depth, bool sgnd,
uint_fast32_t inmem)
{
jas_image_cmpt_t *cmpt;
size_t size;
cmpt = 0;
if (width < 0 || height < 0 || hstep <= 0 || vstep <= 0) {
goto error;
}
if (!jas_safe_intfast32_add(tlx, width, 0) ||
!jas_safe_intfast32_add(tly, height, 0)) {
goto error;
}
if (!(cmpt = jas_malloc(sizeof(jas_image_cmpt_t)))) {
goto error;
}
cmpt->type_ = JAS_IMAGE_CT_UNKNOWN;
cmpt->tlx_ = tlx;
cmpt->tly_ = tly;
cmpt->hstep_ = hstep;
cmpt->vstep_ = vstep;
cmpt->width_ = width;
cmpt->height_ = height;
cmpt->prec_ = depth;
cmpt->sgnd_ = sgnd;
cmpt->stream_ = 0;
cmpt->cps_ = (depth + 7) / 8;
// Compute the number of samples in the image component, while protecting
// against overflow.
// size = cmpt->width_ * cmpt->height_ * cmpt->cps_;
if (!jas_safe_size_mul(cmpt->width_, cmpt->height_, &size) ||
!jas_safe_size_mul(size, cmpt->cps_, &size)) {
goto error;
}
cmpt->stream_ = (inmem) ? jas_stream_memopen2(0, size) :
jas_stream_tmpfile();
if (!cmpt->stream_) {
goto error;
}
/* Zero the component data. This isn't necessary, but it is
convenient for debugging purposes. */
/* Note: conversion of size - 1 to long can overflow */
if (size > 0) {
if (size - 1 > LONG_MAX) {
goto error;
}
if (jas_stream_seek(cmpt->stream_, size - 1, SEEK_SET) < 0 ||
jas_stream_putc(cmpt->stream_, 0) == EOF ||
jas_stream_seek(cmpt->stream_, 0, SEEK_SET) < 0) {
goto error;
}
}
return cmpt;
error:
if (cmpt) {
jas_image_cmpt_destroy(cmpt);
}
return 0;
} | CWE-190 | 19 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteL2NormParams*>(node->builtin_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE(context, NumDimensions(input) <= 4);
TF_LITE_ENSURE(context, output->type == kTfLiteFloat32 ||
output->type == kTfLiteUInt8 ||
output->type == kTfLiteInt8);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, output->params.scale, (1. / 128.));
if (output->type == kTfLiteUInt8) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 128);
}
if (output->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
}
}
// TODO(ahentz): For some reason our implementations don't support
// activations.
TF_LITE_ENSURE_EQ(context, params->activation, kTfLiteActNone);
TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims);
return context->ResizeTensor(context, output, output_size);
} | CWE-787 | 24 |
Status check_index_ordering(const Tensor& indices) {
auto findices = indices.flat<int>();
for (std::size_t i = 0; i < findices.dimension(0) - 1; ++i) {
if (findices(i) < findices(i + 1)) {
continue;
}
return Status(
errors::InvalidArgument("Indices are not strictly ordered"));
}
return Status::OK();
} | CWE-824 | 65 |
TEST_F(QuantizedConv2DTest, OddPaddingBatch) {
const int stride = 2;
TF_ASSERT_OK(NodeDefBuilder("quantized_conv_op", "QuantizedConv2D")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("out_type", DataTypeToEnum<qint32>::v())
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", "SAME")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const int depth = 1;
const int image_width = 4;
const int image_height = 4;
const int image_batch_count = 3;
AddInputFromArray<quint8>(
TensorShape({image_batch_count, image_height, image_width, depth}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
const int filter_size = 3;
const int filter_count = 1;
AddInputFromArray<quint8>(
TensorShape({filter_size, filter_size, depth, filter_count}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<float>(TensorShape({1}), {0});
AddInputFromArray<float>(TensorShape({1}), {255.0f});
AddInputFromArray<float>(TensorShape({1}), {0});
AddInputFromArray<float>(TensorShape({1}), {255.0f});
TF_ASSERT_OK(RunOpKernel());
const int expected_width = image_width / stride;
const int expected_height = (image_height * filter_count) / stride;
Tensor expected(DT_QINT32, TensorShape({image_batch_count, expected_height,
expected_width, filter_count}));
test::FillValues<qint32>(&expected, {348, 252, 274, 175, //
348, 252, 274, 175, //
348, 252, 274, 175});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
} | CWE-476 | 46 |
bool logToUSDT(const Array& bt) {
std::lock_guard<std::mutex> lock(usdt_mutex);
memset(&bt_slab, 0, sizeof(bt_slab));
int i = 0;
IterateVNoInc(
bt.get(),
[&](TypedValue tv) -> bool {
if (i >= strobelight::kMaxStackframes) {
return true;
}
assertx(isArrayLikeType(type(tv)));
ArrayData* bt_frame = val(tv).parr;
strobelight::backtrace_frame_t* frame = &bt_slab.frames[i];
auto const line = bt_frame->get(s_line.get());
if (line.is_init()) {
assertx(isIntType(type(line)));
frame->line = val(line).num;
}
auto const file_name = bt_frame->get(s_file.get());
if (file_name.is_init()) {
assertx(isStringType(type(file_name)));
strncpy(frame->file_name,
val(file_name).pstr->data(),
std::min(val(file_name).pstr->size(), strobelight::kFileNameMax));
frame->file_name[strobelight::kFileNameMax - 1] = '\0';
}
auto const class_name = bt_frame->get(s_class.get());
if (class_name.is_init()) {
assertx(isStringType(type(class_name)));
strncpy(frame->class_name,
val(class_name).pstr->data(),
std::min(val(class_name).pstr->size(), strobelight::kClassNameMax));
frame->class_name[strobelight::kClassNameMax - 1] = '\0';
}
auto const function_name = bt_frame->get(s_function.get());
if (function_name.is_init()) {
assertx(isStringType(type(function_name)));
strncpy(frame->function,
val(function_name).pstr->data(),
std::min(val(function_name).pstr->size(),
strobelight::kFunctionMax));
frame->function[strobelight::kFunctionMax - 1] = '\0';
}
i++;
return false;
}
);
bt_slab.len = i;
// Allow BPF to read the now-formatted stacktrace
FOLLY_SDT_WITH_SEMAPHORE(hhvm, hhvm_stack, &bt_slab);
return true;
} | CWE-190 | 19 |
void Compute(OpKernelContext* ctx) override {
const Tensor& handle = ctx->input(0);
const string& name = handle.scalar<tstring>()();
Tensor val;
OP_REQUIRES_OK(ctx, ctx->session_state()->GetTensor(name, &val));
ctx->set_output(0, val);
} | CWE-476 | 46 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
switch (input->type) {
case kTfLiteInt64:
reference_ops::Negate(
GetTensorShape(input), GetTensorData<int64_t>(input),
GetTensorShape(output), GetTensorData<int64_t>(output));
break;
case kTfLiteInt32:
reference_ops::Negate(
GetTensorShape(input), GetTensorData<int32_t>(input),
GetTensorShape(output), GetTensorData<int32_t>(output));
break;
case kTfLiteFloat32:
reference_ops::Negate(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output),
GetTensorData<float>(output));
break;
default:
context->ReportError(
context,
"Neg only currently supports int64, int32, and float32, got %d.",
input->type);
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-125 | 47 |
int length() const {
return m_str ? m_str->size() : 0;
} | CWE-787 | 24 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* size = GetInput(context, node, kSizeTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
// TODO(ahentz): Our current implementations rely on the inputs being 4D.
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1);
TF_LITE_ENSURE_EQ(context, size->type, kTfLiteInt32);
// ResizeBilinear creates a float tensor even when the input is made of
// integers.
output->type = input->type;
if (!IsConstantTensor(size)) {
SetTensorToDynamic(output);
return kTfLiteOk;
}
// Ensure params are valid.
auto* params =
reinterpret_cast<TfLiteResizeBilinearParams*>(node->builtin_data);
if (params->half_pixel_centers && params->align_corners) {
context->ReportError(
context, "If half_pixel_centers is True, align_corners must be False.");
return kTfLiteError;
}
return ResizeOutputTensor(context, input, size, output);
} | CWE-787 | 24 |
TfLiteStatus EvalHashtable(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE(context, node->user_data != nullptr);
const auto* params =
reinterpret_cast<const TfLiteHashtableParams*>(node->user_data);
// The resource id is generated based on the given table name.
const int resource_id = std::hash<std::string>{}(params->table_name);
TfLiteTensor* resource_handle_tensor =
GetOutput(context, node, kResourceHandleTensor);
auto* resource_handle_data =
GetTensorData<std::int32_t>(resource_handle_tensor);
resource_handle_data[0] = resource_id;
Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_);
auto& resources = subgraph->resources();
resource::CreateHashtableResourceIfNotAvailable(
&resources, resource_id, params->key_dtype, params->value_dtype);
return kTfLiteOk;
} | CWE-787 | 24 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const int num_elements = NumElements(input);
switch (input->type) {
case kTfLiteInt64:
memset(GetTensorData<int64_t>(output), 0, num_elements * sizeof(int64_t));
break;
case kTfLiteInt32:
memset(GetTensorData<int32_t>(output), 0, num_elements * sizeof(int32_t));
break;
case kTfLiteFloat32:
memset(GetTensorData<float>(output), 0, num_elements * sizeof(float));
break;
default:
context->ReportError(context,
"ZerosLike only currently supports int64, int32, "
"and float32, got %d.",
input->type);
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-787 | 24 |
TfLiteStatus GreaterEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
bool requires_broadcast = !HaveSameShapes(input1, input2);
switch (input1->type) {
case kTfLiteFloat32:
Comparison<float, reference_ops::GreaterFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteInt32:
Comparison<int32_t, reference_ops::GreaterFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteInt64:
Comparison<int64_t, reference_ops::GreaterFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteUInt8:
ComparisonQuantized<uint8_t, reference_ops::GreaterFn>(
input1, input2, output, requires_broadcast);
break;
case kTfLiteInt8:
ComparisonQuantized<int8_t, reference_ops::GreaterFn>(
input1, input2, output, requires_broadcast);
break;
default:
context->ReportError(context,
"Does not support type %d, requires float|int|uint8",
input1->type);
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-787 | 24 |
TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
TfLiteTensor* output = GetOutput(context, node, 0);
const TfLiteTensor* input = GetInput(context, node, 0);
switch (input->type) { // Already know in/out types are same.
case kTfLiteFloat32:
MaxEvalFloat<kernel_type>(context, node, params, data, input, output);
break;
case kTfLiteUInt8:
MaxEvalQuantizedUInt8<kernel_type>(context, node, params, data, input,
output);
break;
case kTfLiteInt8:
MaxEvalQuantizedInt8<kernel_type>(context, node, params, data, input,
output);
break;
case kTfLiteInt16:
MaxEvalQuantizedInt16<kernel_type>(context, node, params, data, input,
output);
break;
default:
TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-125 | 47 |
void *jas_malloc(size_t size)
{
void *result;
JAS_DBGLOG(101, ("jas_malloc called with %zu\n", size));
result = malloc(size);
JAS_DBGLOG(100, ("jas_malloc(%zu) -> %p\n", size, result));
return result;
} | CWE-190 | 19 |
MONGO_EXPORT int bson_append_code_w_scope_n( bson *b, const char *name,
const char *code, int len, const bson *scope ) {
int sl, size;
if ( !scope ) return BSON_ERROR;
sl = len + 1;
size = 4 + 4 + sl + bson_size( scope );
if ( bson_append_estart( b, BSON_CODEWSCOPE, name, size ) == BSON_ERROR )
return BSON_ERROR;
bson_append32( b, &size );
bson_append32( b, &sl );
bson_append( b, code, sl );
bson_append( b, scope->data, bson_size( scope ) );
return BSON_OK;
} | CWE-190 | 19 |
TfLiteStatus NonMaxSuppressionMultiClass(TfLiteContext* context,
TfLiteNode* node, OpData* op_data) {
// Get the input tensors
const TfLiteTensor* input_box_encodings =
GetInput(context, node, kInputTensorBoxEncodings);
const TfLiteTensor* input_class_predictions =
GetInput(context, node, kInputTensorClassPredictions);
const int num_boxes = input_box_encodings->dims->data[1];
const int num_classes = op_data->num_classes;
TF_LITE_ENSURE_EQ(context, input_class_predictions->dims->data[0],
kBatchSize);
TF_LITE_ENSURE_EQ(context, input_class_predictions->dims->data[1], num_boxes);
const int num_classes_with_background =
input_class_predictions->dims->data[2];
TF_LITE_ENSURE(context, (num_classes_with_background - num_classes <= 1));
TF_LITE_ENSURE(context, (num_classes_with_background >= num_classes));
const TfLiteTensor* scores;
switch (input_class_predictions->type) {
case kTfLiteUInt8: {
TfLiteTensor* temporary_scores = &context->tensors[op_data->scores_index];
DequantizeClassPredictions(input_class_predictions, num_boxes,
num_classes_with_background, temporary_scores);
scores = temporary_scores;
} break;
case kTfLiteFloat32:
scores = input_class_predictions;
break;
default:
// Unsupported type.
return kTfLiteError;
}
if (op_data->use_regular_non_max_suppression)
TF_LITE_ENSURE_STATUS(NonMaxSuppressionMultiClassRegularHelper(
context, node, op_data, GetTensorData<float>(scores)));
else
TF_LITE_ENSURE_STATUS(NonMaxSuppressionMultiClassFastHelper(
context, node, op_data, GetTensorData<float>(scores)));
return kTfLiteOk;
} | CWE-125 | 47 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteUnpackParams* data =
reinterpret_cast<TfLiteUnpackParams*>(node->builtin_data);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
switch (input->type) {
case kTfLiteFloat32: {
UnpackImpl<float>(context, node, input, data->num, data->axis);
break;
}
case kTfLiteInt32: {
UnpackImpl<int32_t>(context, node, input, data->num, data->axis);
break;
}
case kTfLiteUInt8: {
UnpackImpl<uint8_t>(context, node, input, data->num, data->axis);
break;
}
case kTfLiteInt8: {
UnpackImpl<int8_t>(context, node, input, data->num, data->axis);
break;
}
case kTfLiteBool: {
UnpackImpl<bool>(context, node, input, data->num, data->axis);
break;
}
case kTfLiteInt16: {
UnpackImpl<int16_t>(context, node, input, data->num, data->axis);
break;
}
default: {
context->ReportError(context, "Type '%s' is not supported by unpack.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
return kTfLiteOk;
} | CWE-787 | 24 |
void PrivateThreadPoolDatasetOp::MakeDatasetFromOptions(OpKernelContext* ctx,
DatasetBase* input,
int32_t num_threads,
DatasetBase** output) {
OP_REQUIRES(ctx, num_threads >= 0,
errors::InvalidArgument("`num_threads` must be >= 0"));
*output = new Dataset(ctx,
DatasetContext(DatasetContext::Params(
{PrivateThreadPoolDatasetOp::kDatasetType,
PrivateThreadPoolDatasetOp::kDatasetOp})),
input, num_threads);
} | CWE-770 | 37 |
const String& setSize(int len) {
assertx(m_str);
m_str->setSize(len);
return *this;
} | CWE-125 | 47 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
// Reinterprete the opaque data provided by user.
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
const TfLiteType type = input1->type;
if (type != kTfLiteInt32 && type != kTfLiteFloat32 && type != kTfLiteInt64) {
context->ReportError(context, "Type '%s' is not supported by floor_mod.",
TfLiteTypeGetName(type));
return kTfLiteError;
}
output->type = type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
} | CWE-125 | 47 |
static int lookup1_values(int entries, int dim)
{
int r = (int) floor(exp((float) log((float) entries) / dim));
if ((int) floor(pow((float) r+1, dim)) <= entries) // (int) cast for MinGW warning;
++r; // floor() to avoid _ftol() when non-CRT
assert(pow((float) r+1, dim) > entries);
assert((int) floor(pow((float) r, dim)) <= entries); // (int),floor() as above
return r;
} | CWE-617 | 51 |
int ZlibInStream::pos()
{
return offset + ptr - start;
} | CWE-787 | 24 |
TfLiteStatus LogicalImpl(TfLiteContext* context, TfLiteNode* node,
bool (*func)(bool, bool)) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<bool, bool, bool>(
GetTensorShape(input1), GetTensorData<bool>(input1),
GetTensorShape(input2), GetTensorData<bool>(input2),
GetTensorShape(output), GetTensorData<bool>(output), func);
} else {
reference_ops::BinaryFunction<bool, bool, bool>(
GetTensorShape(input1), GetTensorData<bool>(input1),
GetTensorShape(input2), GetTensorData<bool>(input2),
GetTensorShape(output), GetTensorData<bool>(output), func);
}
return kTfLiteOk;
} | CWE-787 | 24 |
bool DependencyOptimizer::SafeToRemoveIdentity(const NodeDef& node) const {
if (!IsIdentity(node) && !IsIdentityN(node)) {
return true;
}
if (nodes_to_preserve_.find(node.name()) != nodes_to_preserve_.end()) {
return false;
}
if (!fetch_nodes_known_) {
// The output values of this node may be needed.
return false;
}
if (node.input_size() < 1) {
// Node lacks input, is invalid
return false;
}
const NodeDef* input = node_map_->GetNode(NodeName(node.input(0)));
CHECK(input != nullptr) << "node = " << node.name()
<< " input = " << node.input(0);
// Don't remove Identity nodes corresponding to Variable reads or following
// Recv.
if (IsVariable(*input) || IsRecv(*input)) {
return false;
}
for (const auto& consumer : node_map_->GetOutputs(node.name())) {
if (node.input_size() > 1 && (IsRetval(*consumer) || IsMerge(*consumer))) {
return false;
}
if (IsSwitch(*input)) {
for (const string& consumer_input : consumer->input()) {
if (consumer_input == AsControlDependency(node.name())) {
return false;
}
}
}
}
return true;
} | CWE-617 | 51 |
TfLiteRegistration CancelOpRegistration() {
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
// Set output size to the input size in CancelOp::Prepare(). Code exists to
// have a framework in Prepare. The input and output tensors are not used.
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* in_tensor = GetInput(context, node, 0);
TfLiteTensor* out_tensor = GetOutput(context, node, 0);
TfLiteIntArray* new_size = TfLiteIntArrayCopy(in_tensor->dims);
return context->ResizeTensor(context, out_tensor, new_size);
};
reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
cancellation_data_.is_cancelled = true;
return kTfLiteOk;
};
return reg;
} | CWE-787 | 24 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteType output_type = GetOutput(context, node, kOutputTensor)->type;
switch (output_type) { // Already know in/outtypes are same.
case kTfLiteFloat32:
EvalUnquantized<float>(context, node);
break;
case kTfLiteInt32:
EvalUnquantized<int32_t>(context, node);
break;
case kTfLiteUInt8:
EvalQuantizedUInt8(context, node);
break;
case kTfLiteInt8:
EvalUnquantized<int8_t>(context, node);
break;
case kTfLiteInt64:
EvalUnquantized<int64_t>(context, node);
break;
default:
TF_LITE_KERNEL_LOG(
context, "Op Concatenation does not currently support Type '%s'.",
TfLiteTypeGetName(output_type));
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-125 | 47 |
bool AdminRequestHandler::handleDumpStaticStringsRequest(
const std::string& /*cmd*/, const std::string& filename) {
auto const& list = lookupDefinedStaticStrings();
std::ofstream out(filename.c_str());
SCOPE_EXIT { out.close(); };
for (auto item : list) {
out << formatStaticString(item);
if (RuntimeOption::EvalPerfDataMap) {
auto const len = std::min<size_t>(item->size(), 255);
std::string str(item->data(), len);
// Only print the first line (up to 255 characters). Since we want '\0' in
// the list of characters to avoid, we need to use the version of
// `find_first_of()' with explicit length.
auto cutOffPos = str.find_first_of("\r\n", 0, 3);
if (cutOffPos != std::string::npos) str.erase(cutOffPos);
Debug::DebugInfo::recordDataMap(item->mutableData(),
item->mutableData() + item->size(),
"Str-" + str);
}
}
return true;
} | CWE-22 | 2 |
static TfLiteRegistration DelegateRegistration() {
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
// If tensors are resized, the runtime should propagate shapes
// automatically if correct flag is set. Ensure values are correct.
// Output 0 should be dynamic.
TfLiteTensor* output0 = GetOutput(context, node, 0);
TF_LITE_ENSURE(context, IsDynamicTensor(output0));
// Output 1 has the same shape as input.
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output1 = GetOutput(context, node, 1);
TF_LITE_ENSURE(context, input->dims->size == output1->dims->size);
TF_LITE_ENSURE(context, input->dims->data[0] == output1->dims->data[0]);
return kTfLiteOk;
};
return reg;
} | CWE-125 | 47 |
TEST(DefaultCertValidatorTest, TestMultiLevelMatch) {
// san_multiple_dns_cert matches *.example.com
bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(
"{{ test_rundir "
"}}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem"));
envoy::type::matcher::v3::StringMatcher matcher;
matcher.set_exact("foo.api.example.com");
std::vector<Matchers::StringMatcherImpl<envoy::type::matcher::v3::StringMatcher>>
subject_alt_name_matchers;
subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher));
EXPECT_FALSE(DefaultCertValidator::matchSubjectAltName(cert.get(), subject_alt_name_matchers));
} | CWE-295 | 52 |
R_API RBinJavaAttrInfo *r_bin_java_rtv_annotations_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) {
ut32 i = 0;
ut64 offset = 0;
if (buf_offset + 8 > sz) {
return NULL;
}
RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset);
offset += 6;
if (attr) {
attr->type = R_BIN_JAVA_ATTR_TYPE_RUNTIME_VISIBLE_ANNOTATION_ATTR;
attr->info.annotation_array.num_annotations = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
attr->info.annotation_array.annotations = r_list_newf (r_bin_java_annotation_free);
for (i = 0; i < attr->info.annotation_array.num_annotations; i++) {
if (offset >= sz) {
break;
}
RBinJavaAnnotation *annotation = r_bin_java_annotation_new (buffer + offset, sz - offset, buf_offset + offset);
if (annotation) {
offset += annotation->size;
r_list_append (attr->info.annotation_array.annotations, (void *) annotation);
}
}
attr->size = offset;
}
return attr;
} | CWE-788 | 87 |
TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output = GetOutput(context, node, 0);
const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data);
switch (input->type) {
case kTfLiteFloat32: {
optimized_ops::Relu(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
} break;
// TODO(renjieliu): We may revisit the quantization calculation logic,
// the unbounded upper limit is actually hard to quantize.
case kTfLiteUInt8: {
QuantizedReluX<uint8_t>(0.0f, std::numeric_limits<float>::infinity(),
input, output, data);
} break;
case kTfLiteInt8: {
QuantizedReluX<int8_t>(0.0f, std::numeric_limits<float>::infinity(),
input, output, data);
} break;
default:
TF_LITE_KERNEL_LOG(
context, "Only float32 & int8/uint8 is supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-125 | 47 |
const String& setSize(int len) {
assertx(m_str);
m_str->setSize(len);
return *this;
} | CWE-787 | 24 |
QUInt16() {} | CWE-908 | 48 |
void Context::onLog() {
if (wasm_->onLog_) {
wasm_->onLog_(this, id_);
}
} | CWE-476 | 46 |
TfLiteStatus Relu1Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output = GetOutput(context, node, 0);
const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data);
switch (input->type) {
case kTfLiteFloat32: {
optimized_ops::Relu1(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output),
GetTensorData<float>(output));
return kTfLiteOk;
} break;
case kTfLiteUInt8: {
QuantizedReluX<uint8_t>(-1.0f, 1.0f, input, output, data);
return kTfLiteOk;
} break;
case kTfLiteInt8: {
QuantizedReluX<int8_t>(-1, 1, input, output, data);
return kTfLiteOk;
} break;
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int8 supported "
"currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
} | CWE-125 | 47 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.