code
stringlengths 12
2.05k
| label_name
stringlengths 6
8
| label
int64 0
95
|
---|---|---|
static inline long decode_twos_comp(ulong c, int prec)
{
long result;
assert(prec >= 2);
jas_eprintf("warning: support for signed data is untested\n");
// NOTE: Is this correct?
result = (c & ((1 << (prec - 1)) - 1)) - (c & (1 << (prec - 1)));
return result;
} | CWE-190 | 19 |
Http::FilterTrailersStatus Context::onRequestTrailers() {
if (!wasm_->onRequestTrailers_) {
return Http::FilterTrailersStatus::Continue;
}
if (wasm_->onRequestTrailers_(this, id_).u64_ == 0) {
return Http::FilterTrailersStatus::Continue;
}
return Http::FilterTrailersStatus::StopIteration;
} | CWE-476 | 46 |
write(Protocol_* iprot, const StructInfo& structInfo, const void* object) {
DCHECK(object);
size_t written = iprot->writeStructBegin(structInfo.name);
if (UNLIKELY(structInfo.unionExt != nullptr)) {
const FieldInfo* end = structInfo.fieldInfos + structInfo.numFields;
const auto& unionId =
activeUnionMemberId(object, structInfo.unionExt->unionTypeOffset);
const FieldInfo* found = std::lower_bound(
structInfo.fieldInfos,
end,
unionId,
[](const FieldInfo& lhs, FieldID rhs) { return lhs.id < rhs; });
if (found < end && found->id == unionId) {
const OptionalThriftValue value = getValue(*found->typeInfo, object);
if (value.hasValue()) {
written += writeField(iprot, *found, value.value());
} else if (found->typeInfo->type == protocol::TType::T_STRUCT) {
written += iprot->writeFieldBegin(
found->name, found->typeInfo->type, found->id);
written += iprot->writeStructBegin(found->name);
written += iprot->writeStructEnd();
written += iprot->writeFieldStop();
written += iprot->writeFieldEnd();
}
}
} else {
for (std::int16_t index = 0; index < structInfo.numFields; index++) {
const auto& fieldInfo = structInfo.fieldInfos[index];
if (fieldInfo.isUnqualified || fieldInfo.issetOffset == 0 ||
fieldIsSet(object, fieldInfo.issetOffset)) {
const OptionalThriftValue value =
getValue(*fieldInfo.typeInfo, getMember(fieldInfo, object));
if (value.hasValue()) {
written += writeField(iprot, fieldInfo, value.value());
}
}
}
}
written += iprot->writeFieldStop();
written += iprot->writeStructEnd();
return written;
} | CWE-763 | 61 |
QInt8() {} | CWE-908 | 48 |
TEST(BasicInterpreter, AllocateTwice) {
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
ASSERT_EQ(interpreter.SetOutputs({1}), kTfLiteOk);
TfLiteQuantizationParams quantized;
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "", {3},
quantized),
kTfLiteOk);
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(1, kTfLiteFloat32, "", {3},
quantized),
kTfLiteOk);
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* tensor0 = GetInput(context, node, 0);
TfLiteTensor* tensor1 = GetOutput(context, node, 0);
TfLiteIntArray* newSize = TfLiteIntArrayCopy(tensor0->dims);
return context->ResizeTensor(context, tensor1, newSize);
};
reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* a0 = GetInput(context, node, 0);
TfLiteTensor* a1 = GetOutput(context, node, 0);
int num = a0->dims->data[0];
for (int i = 0; i < num; i++) {
a1->data.f[i] = a0->data.f[i];
}
return kTfLiteOk;
};
ASSERT_EQ(
interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr, ®),
kTfLiteOk);
ASSERT_EQ(interpreter.ResizeInputTensor(0, {3}), kTfLiteOk);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
char* old_tensor0_ptr = interpreter.tensor(0)->data.raw;
char* old_tensor1_ptr = interpreter.tensor(1)->data.raw;
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
ASSERT_EQ(old_tensor0_ptr, interpreter.tensor(0)->data.raw);
ASSERT_EQ(old_tensor1_ptr, interpreter.tensor(1)->data.raw);
} | CWE-787 | 24 |
static String HHVM_FUNCTION(bcpow, const String& left, const String& right,
int64_t scale /* = -1 */) {
if (scale < 0) scale = BCG(bc_precision);
bc_num first, second, result;
bc_init_num(&first);
bc_init_num(&second);
bc_init_num(&result);
SCOPE_EXIT {
bc_free_num(&first);
bc_free_num(&second);
bc_free_num(&result);
};
php_str2num(&first, (char*)left.data());
php_str2num(&second, (char*)right.data());
bc_raise(first, second, &result, scale);
if (result->n_scale > scale) {
result->n_scale = scale;
}
String ret(bc_num2str(result), AttachString);
return ret;
} | CWE-190 | 19 |
void jas_matrix_asl(jas_matrix_t *matrix, int n)
{
int i;
int j;
jas_seqent_t *rowstart;
int rowstep;
jas_seqent_t *data;
if (jas_matrix_numrows(matrix) > 0 && jas_matrix_numcols(matrix) > 0) {
assert(matrix->rows_);
rowstep = jas_matrix_rowstep(matrix);
for (i = matrix->numrows_, rowstart = matrix->rows_[0]; i > 0; --i,
rowstart += rowstep) {
for (j = matrix->numcols_, data = rowstart; j > 0; --j,
++data) {
//*data <<= n;
*data = jas_seqent_asl(*data, n);
}
}
}
} | CWE-190 | 19 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpContext op_context(context, node);
switch (op_context.output->type) {
case kTfLiteFloat32:
TFLiteOperation<kernel_type, float, OpType>(context, node, op_context);
break;
case kTfLiteUInt8:
TFLiteOperation<kernel_type, uint8_t, OpType>(context, node,
op_context);
break;
case kTfLiteInt8:
TFLiteOperation<kernel_type, int8_t, OpType>(context, node, op_context);
break;
case kTfLiteInt32:
TFLiteOperation<kernel_type, int32_t, OpType>(context, node,
op_context);
break;
case kTfLiteInt64:
TFLiteOperation<kernel_type, int64_t, OpType>(context, node,
op_context);
break;
case kTfLiteInt16:
TFLiteOperation<kernel_type, int16_t, OpType>(context, node,
op_context);
break;
default:
context->ReportError(context,
"Type %d is currently not supported by Maximum.",
op_context.output->type);
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-125 | 47 |
TfLiteStatus PrepareHashtableImport(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0);
const TfLiteTensor* input_resource_id_tensor =
GetInput(context, node, kInputResourceIdTensor);
TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_resource_id_tensor), 1);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(input_resource_id_tensor, 0), 1);
const TfLiteTensor* key_tensor = GetInput(context, node, kKeyTensor);
const TfLiteTensor* value_tensor = GetInput(context, node, kValueTensor);
TF_LITE_ENSURE(context, (key_tensor->type == kTfLiteInt64 &&
value_tensor->type == kTfLiteString) ||
(key_tensor->type == kTfLiteString &&
value_tensor->type == kTfLiteInt64));
// TODO(b/144731295): Tensorflow lookup ops support 1-D vector in storing
// values.
TF_LITE_ENSURE(context, HaveSameShapes(key_tensor, value_tensor));
return kTfLiteOk;
} | CWE-787 | 24 |
TfLiteStatus EvalHashtableSize(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input_resource_id_tensor =
GetInput(context, node, kInputResourceIdTensor);
int resource_id = input_resource_id_tensor->data.i32[0];
TfLiteTensor* output_tensor = GetOutput(context, node, kOutputTensor);
auto* output_data = GetTensorData<std::int64_t>(output_tensor);
Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_);
auto& resources = subgraph->resources();
auto* lookup = resource::GetHashtableResource(&resources, resource_id);
TF_LITE_ENSURE(context, lookup != nullptr);
output_data[0] = lookup->Size();
return kTfLiteOk;
} | CWE-125 | 47 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
// Reinterprete the opaque data provided by user.
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
const TfLiteType type = input1->type;
if (type != kTfLiteBool) {
context->ReportError(context, "Logical ops only support bool type.");
return kTfLiteError;
}
output->type = type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
} | CWE-125 | 47 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
ruy::profiler::ScopeLabel label("SquaredDifference");
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (output->type == kTfLiteFloat32) {
EvalSquaredDifference<float>(context, node, data, input1, input2, output);
} else if (output->type == kTfLiteInt32) {
EvalSquaredDifference<int32_t>(context, node, data, input1, input2, output);
} else {
context->ReportError(
context,
"SquaredDifference only supports FLOAT32 and INT32 now, got %d.",
output->type);
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-787 | 24 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, node->inputs->size, 1);
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
const TfLiteTensor* input_resource_id_tensor =
GetInput(context, node, kInputVariableId);
TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32);
TF_LITE_ENSURE_EQ(context, NumElements(input_resource_id_tensor), 1);
TfLiteTensor* output = GetOutput(context, node, kOutputValue);
SetTensorToDynamic(output);
return kTfLiteOk;
} | CWE-787 | 24 |
char *uwsgi_expand_path(char *dir, int dir_len, char *ptr) {
char src[PATH_MAX + 1];
memcpy(src, dir, dir_len);
src[dir_len] = 0;
char *dst = ptr;
if (!dst)
dst = uwsgi_malloc(PATH_MAX + 1);
if (!realpath(src, dst)) {
uwsgi_error_realpath(src);
if (!ptr)
free(dst);
return NULL;
}
return dst;
} | CWE-787 | 24 |
void FormatConverter<T>::InitSparseToDenseConverter(
std::vector<int> shape, std::vector<int> traversal_order,
std::vector<TfLiteDimensionType> format, std::vector<int> dense_size,
std::vector<std::vector<int>> segments,
std::vector<std::vector<int>> indices, std::vector<int> block_map) {
dense_shape_ = std::move(shape);
traversal_order_ = std::move(traversal_order);
block_map_ = std::move(block_map);
format_ = std::move(format);
dense_size_ = 1;
for (int i = 0; i < dense_shape_.size(); i++) {
dense_size_ *= dense_shape_[i];
}
dim_metadata_.resize(2 * format_.size());
for (int i = 0; i < format_.size(); i++) {
if (format_[i] == kTfLiteDimDense) {
dim_metadata_[2 * i] = {dense_size[i]};
} else {
dim_metadata_[2 * i] = std::move(segments[i]);
dim_metadata_[2 * i + 1] = std::move(indices[i]);
}
}
int original_rank = dense_shape_.size();
int block_dim = 0;
blocked_shape_.resize(original_rank);
block_size_.resize(block_map_.size());
for (int i = 0; i < original_rank; i++) {
if (block_dim < block_map_.size() && block_map_[block_dim] == i) {
int orig_dim = traversal_order_[original_rank + block_dim];
block_size_[block_dim] = dense_size[orig_dim];
blocked_shape_[i] = dense_shape_[i] / dense_size[orig_dim];
block_dim++;
} else {
blocked_shape_[i] = dense_shape_[i];
}
}
} | CWE-787 | 24 |
unsigned char *base64decode(const char *buf, size_t *size)
{
if (!buf || !size) return NULL;
size_t len = (*size > 0) ? *size : strlen(buf);
if (len <= 0) return NULL;
unsigned char *outbuf = (unsigned char*)malloc((len/4)*3+3);
const char *ptr = buf;
int p = 0;
size_t l = 0;
do {
ptr += strspn(ptr, "\r\n\t ");
if (*ptr == '\0' || ptr >= buf+len) {
break;
}
l = strcspn(ptr, "\r\n\t ");
if (l > 3 && ptr+l <= buf+len) {
p+=base64decode_block(outbuf+p, ptr, l);
ptr += l;
} else {
break;
}
} while (1);
outbuf[p] = 0;
*size = p;
return outbuf;
} | CWE-125 | 47 |
void RequestContext::SetApiKeyHeader() {
request_->AddHeaderToBackend(kDefaultApiKeyHeaderName, api_key_);
} | CWE-290 | 85 |
void RemoteFsDevice::unmount()
{
if (details.isLocalFile()) {
return;
}
if (!isConnected() || proc) {
return;
}
if (messageSent) {
return;
}
if (constSambaProtocol==details.url.scheme() || constSambaAvahiProtocol==details.url.scheme()) {
mounter()->umount(mountPoint(details, false), getpid());
setStatusMessage(tr("Disconnecting..."));
messageSent=true;
return;
}
QString cmd;
QStringList args;
if (!details.isLocalFile()) {
QString mp=mountPoint(details, false);
if (!mp.isEmpty()) {
cmd=Utils::findExe("fusermount");
if (!cmd.isEmpty()) {
args << QLatin1String("-u") << QLatin1String("-z") << mp;
} else {
emit error(tr("\"fusermount\" is not installed!"));
}
}
}
if (!cmd.isEmpty()) {
setStatusMessage(tr("Disconnecting..."));
proc=new QProcess(this);
proc->setProperty("unmount", true);
connect(proc, SIGNAL(finished(int)), SLOT(procFinished(int)));
proc->start(cmd, args, QIODevice::ReadOnly);
}
} | CWE-22 | 2 |
StatusOr<FullTypeDef> SpecializeType(const AttrSlice& attrs,
const OpDef& op_def) {
FullTypeDef ft;
ft.set_type_id(TFT_PRODUCT);
for (int i = 0; i < op_def.output_arg_size(); i++) {
auto* t = ft.add_args();
*t = op_def.output_arg(i).experimental_full_type();
// Resolve dependent types. The convention for op registrations is to use
// attributes as type variables.
// See https://www.tensorflow.org/guide/create_op#type_polymorphism.
// Once the op signature can be defined entirely in FullType, this
// convention can be deprecated.
//
// Note: While this code performs some basic verifications, it generally
// assumes consistent op defs and attributes. If more complete
// verifications are needed, they should be done by separately, and in a
// way that can be reused for type inference.
for (int j = 0; j < t->args_size(); j++) {
auto* arg = t->mutable_args(i);
if (arg->type_id() == TFT_VAR) {
const auto* attr = attrs.Find(arg->s());
if (attr == nullptr) {
return Status(
error::INVALID_ARGUMENT,
absl::StrCat("Could not find an attribute for key ", arg->s()));
}
if (attr->value_case() == AttrValue::kList) {
const auto& attr_list = attr->list();
arg->set_type_id(TFT_PRODUCT);
for (int i = 0; i < attr_list.type_size(); i++) {
map_dtype_to_tensor(attr_list.type(i), arg->add_args());
}
} else if (attr->value_case() == AttrValue::kType) {
map_dtype_to_tensor(attr->type(), arg);
} else {
return Status(error::UNIMPLEMENTED,
absl::StrCat("unknown attribute type",
attrs.DebugString(), " key=", arg->s()));
}
arg->clear_s();
}
}
}
return ft;
} | CWE-125 | 47 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* lookup = GetInput(context, node, 0);
const TfLiteTensor* value = GetInput(context, node, 1);
TfLiteTensor* output = GetOutput(context, node, 0);
switch (value->type) {
case kTfLiteFloat32:
return EvalSimple(context, node, lookup, value, output);
case kTfLiteUInt8:
case kTfLiteInt8:
if (output->type == kTfLiteFloat32) {
return EvalHybrid(context, node, lookup, value, output);
} else {
return EvalSimple(context, node, lookup, value, output);
}
default:
context->ReportError(context, "Type not currently supported.");
return kTfLiteError;
}
} | CWE-125 | 47 |
Function *ESTreeIRGen::genGeneratorFunction(
Identifier originalName,
Variable *lazyClosureAlias,
ESTree::FunctionLikeNode *functionNode) {
assert(functionNode && "Function AST cannot be null");
// Build the outer function which creates the generator.
// Does not have an associated source range.
auto *outerFn = Builder.createGeneratorFunction(
originalName,
Function::DefinitionKind::ES5Function,
ESTree::isStrict(functionNode->strictness),
/* insertBefore */ nullptr);
auto *innerFn = genES5Function(
genAnonymousLabelName(originalName.isValid() ? originalName.str() : ""),
lazyClosureAlias,
functionNode,
true);
{
FunctionContext outerFnContext{this, outerFn, functionNode->getSemInfo()};
emitFunctionPrologue(
functionNode,
Builder.createBasicBlock(outerFn),
InitES5CaptureState::Yes,
DoEmitParameters::No);
// Create a generator function, which will store the arguments.
auto *gen = Builder.createCreateGeneratorInst(innerFn);
if (!hasSimpleParams(functionNode)) {
// If there are non-simple params, step the inner function once to
// initialize them.
Value *next = Builder.createLoadPropertyInst(gen, "next");
Builder.createCallInst(next, gen, {});
}
emitFunctionEpilogue(gen);
}
return outerFn;
} | CWE-787 | 24 |
unsigned int GetU32LE (int nPos, bool *pbSuccess)
{
//*pbSuccess = true;
if ( nPos < 0 || nPos + 3 >= m_nLen )
{
*pbSuccess = false;
return 0;
}
unsigned int nRes = m_sFile[nPos + 3];
nRes = (nRes << 8) + m_sFile[nPos + 2];
nRes = (nRes << 8) + m_sFile[nPos + 1];
nRes = (nRes << 8) + m_sFile[nPos + 0];
return nRes;
} | CWE-787 | 24 |
inline TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node,
std::function<T(T)> func,
TfLiteType expected_type) {
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output = GetOutput(context, node, 0);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, expected_type);
const int64_t num_elements = NumElements(input);
const T* in_data = GetTensorData<T>(input);
T* out_data = GetTensorData<T>(output);
for (int64_t i = 0; i < num_elements; ++i) {
out_data[i] = func(in_data[i]);
}
return kTfLiteOk;
} | CWE-125 | 47 |
MONGO_EXPORT int bson_append_binary( bson *b, const char *name, char type, const char *str, int len ) {
if ( type == BSON_BIN_BINARY_OLD ) {
int subtwolen = len + 4;
if ( bson_append_estart( b, BSON_BINDATA, name, 4+1+4+len ) == BSON_ERROR )
return BSON_ERROR;
bson_append32( b, &subtwolen );
bson_append_byte( b, type );
bson_append32( b, &len );
bson_append( b, str, len );
}
else {
if ( bson_append_estart( b, BSON_BINDATA, name, 4+1+len ) == BSON_ERROR )
return BSON_ERROR;
bson_append32( b, &len );
bson_append_byte( b, type );
bson_append( b, str, len );
}
return BSON_OK;
} | CWE-190 | 19 |
static TfLiteRegistration DynamicCopyOpRegistration() {
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
// Output 0 is dynamic
TfLiteTensor* output0 = GetOutput(context, node, 0);
SetTensorToDynamic(output0);
// Output 1 has the same shape as input.
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output1 = GetOutput(context, node, 1);
TF_LITE_ENSURE_STATUS(context->ResizeTensor(
context, output1, TfLiteIntArrayCopy(input->dims)));
return kTfLiteOk;
};
reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
// Not implemented since this isn't required in testing.
return kTfLiteOk;
};
return reg;
} | CWE-787 | 24 |
std::string addEmoji(const Proxy &node, const RegexMatchConfigs &emoji_array, extra_settings &ext)
{
std::string real_rule, ret;
for(const RegexMatchConfig &x : emoji_array)
{
if(!x.Script.empty())
{
std::string result;
script_safe_runner(ext.js_runtime, ext.js_context, [&](qjs::Context &ctx)
{
std::string script = x.Script;
if(startsWith(script, "path:"))
script = fileGet(script.substr(5), true);
try
{
ctx.eval(script);
auto getEmoji = (std::function<std::string(const Proxy&)>) ctx.eval("getEmoji");
ret = getEmoji(node);
if(!ret.empty())
result = ret + " " + node.Remark;
}
catch (qjs::exception)
{
script_print_stack(ctx);
}
}, global.scriptCleanContext);
if(!result.empty())
return result;
continue;
}
if(x.Replace.empty())
continue;
if(applyMatcher(x.Match, real_rule, node) && real_rule.size() && regFind(node.Remark, real_rule))
return x.Replace + " " + node.Remark;
}
return node.Remark;
} | CWE-434 | 5 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
optimized_ops::Round(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
return kTfLiteOk;
} | CWE-787 | 24 |
void nego_process_negotiation_response(rdpNego* nego, wStream* s)
{
UINT16 length;
WLog_DBG(TAG, "RDP_NEG_RSP");
if (Stream_GetRemainingLength(s) < 7)
{
WLog_ERR(TAG, "Invalid RDP_NEG_RSP");
nego->state = NEGO_STATE_FAIL;
return;
}
Stream_Read_UINT8(s, nego->flags);
Stream_Read_UINT16(s, length);
Stream_Read_UINT32(s, nego->SelectedProtocol);
nego->state = NEGO_STATE_FINAL;
} | CWE-125 | 47 |
void Context::onDone() {
if (wasm_->onDone_) {
wasm_->onDone_(this, id_);
}
} | CWE-476 | 46 |
void Compute(OpKernelContext* ctx) override {
auto x = ctx->input(0);
auto i = ctx->input(1);
auto v = ctx->input(2);
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(i.shape()),
errors::InvalidArgument("i must be a vector. ",
i.shape().DebugString()));
OP_REQUIRES(ctx, x.dims() == v.dims(),
errors::InvalidArgument(
"x and v shape doesn't match (ranks differ): ",
x.shape().DebugString(), " vs. ", v.shape().DebugString()));
for (int i = 1; i < x.dims(); ++i) {
OP_REQUIRES(
ctx, x.dim_size(i) == v.dim_size(i),
errors::InvalidArgument("x and v shape doesn't match at index ", i,
" : ", x.shape().DebugString(), " vs. ",
v.shape().DebugString()));
}
OP_REQUIRES(ctx, i.dim_size(0) == v.dim_size(0),
errors::InvalidArgument(
"i and x shape doesn't match at index 0: ",
i.shape().DebugString(), " vs. ", v.shape().DebugString()));
Tensor y = x; // This creates an alias intentionally.
// Skip processing if tensors are empty.
if (x.NumElements() > 0 || v.NumElements() > 0) {
OP_REQUIRES_OK(ctx, DoCompute(ctx, i, v, &y));
}
ctx->set_output(0, y);
} | CWE-369 | 60 |
inline TfLiteTensor* GetMutableInput(const TfLiteContext* context,
const TfLiteNode* node, int index) {
if (index >= 0 && index < node->inputs->size) {
const int tensor_index = node->inputs->data[index];
if (tensor_index != kTfLiteOptionalTensor) {
if (context->tensors != nullptr) {
return &context->tensors[tensor_index];
} else {
return context->GetTensor(context, tensor_index);
}
}
}
return nullptr;
} | CWE-787 | 24 |
inline void StringData::setSize(int len) {
assertx(!isImmutable() && !hasMultipleRefs());
assertx(len >= 0 && len <= capacity());
mutableData()[len] = 0;
m_lenAndHash = len;
assertx(m_hash == 0);
assertx(checkSane());
} | CWE-190 | 19 |
void DCR_CLASS dcr_cam_xyz_coeff (DCRAW* p, double cam_xyz[4][3])
{
double cam_rgb[4][3], inverse[4][3], num;
int i, j, k;
for (i=0; i < p->colors; i++) /* Multiply out XYZ colorspace */
for (j=0; j < 3; j++)
for (cam_rgb[i][j] = k=0; k < 3; k++)
cam_rgb[i][j] += cam_xyz[i][k] * xyz_rgb[k][j];
for (i=0; i < p->colors; i++) { /* Normalize cam_rgb so that */
for (num=j=0; j < 3; j++) /* cam_rgb * (1,1,1) is (1,1,1,1) */
num += cam_rgb[i][j];
for (j=0; j < 3; j++)
cam_rgb[i][j] /= num;
p->pre_mul[i] = 1 / (float)num;
}
dcr_pseudoinverse (cam_rgb, inverse, p->colors);
for (p->raw_color = i=0; i < 3; i++)
for (j=0; j < p->colors; j++)
p->rgb_cam[i][j] = (float)inverse[j][i];
}
| CWE-770 | 37 |
ALWAYS_INLINE String serialize_impl(const Variant& value,
const SerializeOptions& opts) {
switch (value.getType()) {
case KindOfClass:
case KindOfLazyClass:
case KindOfPersistentString:
case KindOfString: {
auto const str =
isStringType(value.getType()) ? value.getStringData() :
isClassType(value.getType()) ? classToStringHelper(value.toClassVal()) :
lazyClassToStringHelper(value.toLazyClassVal());
auto const size = str->size();
if (size >= RuntimeOption::MaxSerializedStringSize) {
throw Exception("Size of serialized string (%d) exceeds max", size);
}
StringBuffer sb;
sb.append("s:");
sb.append(size);
sb.append(":\"");
sb.append(str->data(), size);
sb.append("\";");
return sb.detach();
}
case KindOfResource:
return s_Res;
case KindOfUninit:
case KindOfNull:
case KindOfBoolean:
case KindOfInt64:
case KindOfFunc:
case KindOfPersistentVec:
case KindOfVec:
case KindOfPersistentDict:
case KindOfDict:
case KindOfPersistentKeyset:
case KindOfKeyset:
case KindOfPersistentDArray:
case KindOfDArray:
case KindOfPersistentVArray:
case KindOfVArray:
case KindOfDouble:
case KindOfObject:
case KindOfClsMeth:
case KindOfRClsMeth:
case KindOfRFunc:
case KindOfRecord:
break;
}
VariableSerializer vs(VariableSerializer::Type::Serialize);
if (opts.keepDVArrays) vs.keepDVArrays();
if (opts.forcePHPArrays) vs.setForcePHPArrays();
if (opts.warnOnHackArrays) vs.setHackWarn();
if (opts.warnOnPHPArrays) vs.setPHPWarn();
if (opts.ignoreLateInit) vs.setIgnoreLateInit();
if (opts.serializeProvenanceAndLegacy) vs.setSerializeProvenanceAndLegacy();
// Keep the count so recursive calls to serialize() embed references properly.
return vs.serialize(value, true, true);
} | CWE-190 | 19 |
TightDecoder::FilterGradient24(const rdr::U8 *inbuf,
const PixelFormat& pf, PIXEL_T* outbuf,
int stride, const Rect& r)
{
int x, y, c;
rdr::U8 prevRow[TIGHT_MAX_WIDTH*3];
rdr::U8 thisRow[TIGHT_MAX_WIDTH*3];
rdr::U8 pix[3];
int est[3];
memset(prevRow, 0, sizeof(prevRow));
// Set up shortcut variables
int rectHeight = r.height();
int rectWidth = r.width();
for (y = 0; y < rectHeight; y++) {
/* First pixel in a row */
for (c = 0; c < 3; c++) {
pix[c] = inbuf[y*rectWidth*3+c] + prevRow[c];
thisRow[c] = pix[c];
}
pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride], pix, 1);
/* Remaining pixels of a row */
for (x = 1; x < rectWidth; x++) {
for (c = 0; c < 3; c++) {
est[c] = prevRow[x*3+c] + pix[c] - prevRow[(x-1)*3+c];
if (est[c] > 0xff) {
est[c] = 0xff;
} else if (est[c] < 0) {
est[c] = 0;
}
pix[c] = inbuf[(y*rectWidth+x)*3+c] + est[c];
thisRow[x*3+c] = pix[c];
}
pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride+x], pix, 1);
}
memcpy(prevRow, thisRow, sizeof(prevRow));
}
} | CWE-787 | 24 |
TfLiteStatus ComparisonPrepareCommon(TfLiteContext* context, TfLiteNode* node,
bool is_string_allowed) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
// Don't support string.
if (!is_string_allowed) {
TF_LITE_ENSURE(context, input1->type != kTfLiteString);
}
// Currently only support tensors have the same type.
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
output->type = kTfLiteBool;
bool requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
} | CWE-125 | 47 |
jas_seq2d_t *jas_seq2d_copy(jas_seq2d_t *x)
{
jas_matrix_t *y;
int i;
int j;
y = jas_seq2d_create(jas_seq2d_xstart(x), jas_seq2d_ystart(x),
jas_seq2d_xend(x), jas_seq2d_yend(x));
assert(y);
for (i = 0; i < x->numrows_; ++i) {
for (j = 0; j < x->numcols_; ++j) {
*jas_matrix_getref(y, i, j) = jas_matrix_get(x, i, j);
}
}
return y;
} | CWE-190 | 19 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
TfLiteIntArray* output_size = TfLiteIntArrayCreate(4);
output_size->data[0] = input->dims->data[0];
output_size->data[1] = input->dims->data[1];
output_size->data[2] = input->dims->data[2];
output_size->data[3] = input->dims->data[3];
return context->ResizeTensor(context, output, output_size);
} | CWE-787 | 24 |
MONGO_EXPORT bson_type bson_iterator_next( bson_iterator *i ) {
int ds;
if ( i->first ) {
i->first = 0;
return ( bson_type )( *i->cur );
}
switch ( bson_iterator_type( i ) ) {
case BSON_EOO:
return BSON_EOO; /* don't advance */
case BSON_UNDEFINED:
case BSON_NULL:
ds = 0;
break;
case BSON_BOOL:
ds = 1;
break;
case BSON_INT:
ds = 4;
break;
case BSON_LONG:
case BSON_DOUBLE:
case BSON_TIMESTAMP:
case BSON_DATE:
ds = 8;
break;
case BSON_OID:
ds = 12;
break;
case BSON_STRING:
case BSON_SYMBOL:
case BSON_CODE:
ds = 4 + bson_iterator_int_raw( i );
break;
case BSON_BINDATA:
ds = 5 + bson_iterator_int_raw( i );
break;
case BSON_OBJECT:
case BSON_ARRAY:
case BSON_CODEWSCOPE:
ds = bson_iterator_int_raw( i );
break;
case BSON_DBREF:
ds = 4+12 + bson_iterator_int_raw( i );
break;
case BSON_REGEX: {
const char *s = bson_iterator_value( i );
const char *p = s;
p += strlen( p )+1;
p += strlen( p )+1;
ds = p-s;
break;
}
default: {
char msg[] = "unknown type: 000000000000";
bson_numstr( msg+14, ( unsigned )( i->cur[0] ) );
bson_fatal_msg( 0, msg );
return 0;
}
}
i->cur += 1 + strlen( i->cur + 1 ) + 1 + ds;
return ( bson_type )( *i->cur );
} | CWE-190 | 19 |
bool WideToCharMap(const wchar *Src,char *Dest,size_t DestSize,bool &Success)
{
// String with inconvertible characters mapped to private use Unicode area
// must have the mark code somewhere.
if (wcschr(Src,(wchar)MappedStringMark)==NULL)
return false;
Success=true;
uint SrcPos=0,DestPos=0;
while (Src[SrcPos]!=0 && DestPos<DestSize-MB_CUR_MAX)
{
if (uint(Src[SrcPos])==MappedStringMark)
{
SrcPos++;
continue;
}
// For security reasons do not restore low ASCII codes, so mapping cannot
// be used to hide control codes like path separators.
if (uint(Src[SrcPos])>=MapAreaStart+0x80 && uint(Src[SrcPos])<MapAreaStart+0x100)
Dest[DestPos++]=char(uint(Src[SrcPos++])-MapAreaStart);
else
{
mbstate_t ps;
memset(&ps,0,sizeof(ps));
if (wcrtomb(Dest+DestPos,Src[SrcPos],&ps)==-1)
{
Dest[DestPos]='_';
Success=false;
}
SrcPos++;
memset(&ps,0,sizeof(ps));
int Length=mbrlen(Dest+DestPos,MB_CUR_MAX,&ps);
DestPos+=Max(Length,1);
}
}
Dest[Min(DestPos,DestSize-1)]=0;
return true;
} | CWE-787 | 24 |
bool HexInStream::hexStrToBin(const char* s, char** data, int* length) {
int l=strlen(s);
if ((l % 2) == 0) {
delete [] *data;
*data = 0; *length = 0;
if (l == 0)
return true;
*data = new char[l/2];
*length = l/2;
for(int i=0;i<l;i+=2) {
int byte = 0;
if (!readHexAndShift(s[i], &byte) ||
!readHexAndShift(s[i+1], &byte))
goto decodeError;
(*data)[i/2] = byte;
}
return true;
}
decodeError:
delete [] *data;
*data = 0;
*length = 0;
return false;
} | CWE-787 | 24 |
void PngImg::InitStorage_() {
rowPtrs_.resize(info_.height, nullptr);
data_ = new png_byte[info_.height * info_.rowbytes];
for(size_t i = 0; i < info_.height; ++i) {
rowPtrs_[i] = data_ + i * info_.rowbytes;
}
} | CWE-190 | 19 |
decode_rt_routing_info(netdissect_options *ndo,
const u_char *pptr, char *buf, u_int buflen)
{
uint8_t route_target[8];
u_int plen;
ND_TCHECK(pptr[0]);
plen = pptr[0]; /* get prefix length */
if (0 == plen) {
snprintf(buf, buflen, "default route target");
return 1;
}
if (32 > plen)
return -1;
plen-=32; /* adjust prefix length */
if (64 < plen)
return -1;
memset(&route_target, 0, sizeof(route_target));
ND_TCHECK2(pptr[1], (plen + 7) / 8);
memcpy(&route_target, &pptr[1], (plen + 7) / 8);
if (plen % 8) {
((u_char *)&route_target)[(plen + 7) / 8 - 1] &=
((0xff00 >> (plen % 8)) & 0xff);
}
snprintf(buf, buflen, "origin AS: %s, route target %s",
as_printf(ndo, astostr, sizeof(astostr), EXTRACT_32BITS(pptr+1)),
bgp_vpn_rd_print(ndo, (u_char *)&route_target));
return 5 + (plen + 7) / 8;
trunc:
return -2;
} | CWE-125 | 47 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
// TODO(ahentz): these two checks would make the new implementation
// incompatible with some existing models, where params is not specified. It
// is OK not to have them because toco would have set input and output types
// to match the parameters.
// auto* params = reinterpret_cast<TfLiteCastParams*>(node->builtin_data);
// TF_LITE_ENSURE_EQ(context, input->type, params->in_data_type);
// TF_LITE_ENSURE_EQ(context, output->type, params->out_data_type);
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
} | CWE-125 | 47 |
TfLiteStatus LessEqualEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
bool requires_broadcast = !HaveSameShapes(input1, input2);
switch (input1->type) {
case kTfLiteFloat32:
Comparison<float, reference_ops::LessEqualFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteInt32:
Comparison<int32_t, reference_ops::LessEqualFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteInt64:
Comparison<int64_t, reference_ops::LessEqualFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteUInt8:
ComparisonQuantized<uint8_t, reference_ops::LessEqualFn>(
input1, input2, output, requires_broadcast);
break;
case kTfLiteInt8:
ComparisonQuantized<int8_t, reference_ops::LessEqualFn>(
input1, input2, output, requires_broadcast);
break;
default:
context->ReportError(context,
"Does not support type %d, requires float|int|uint8",
input1->type);
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-787 | 24 |
void Compute(OpKernelContext* ctx) override {
const Tensor& handle = ctx->input(0);
const string& name = handle.scalar<tstring>()();
OP_REQUIRES_OK(ctx, ctx->session_state()->DeleteTensor(name));
} | CWE-476 | 46 |
const String& setSize(int len) {
assertx(m_str);
m_str->setSize(len);
return *this;
} | CWE-787 | 24 |
void RestAuthHandler::shutdownExecute(bool isFinalized) noexcept {
try {
if (_isValid) {
events::LoggedIn(*_request, _username);
} else {
events::CredentialsBad(*_request, _username);
}
} catch (...) {
}
RestVocbaseBaseHandler::shutdownExecute(isFinalized);
} | CWE-613 | 7 |
TEST_P(SslSPIFFECertValidatorIntegrationTest, ServerRsaSPIFFEValidatorSANNotMatch) {
auto typed_conf = new envoy::config::core::v3::TypedExtensionConfig();
TestUtility::loadFromYaml(TestEnvironment::substitute(R"EOF(
name: envoy.tls.cert_validator.spiffe
typed_config:
"@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig
trust_domains:
- name: lyft.com
trust_bundle:
filename: "{{ test_rundir }}/test/config/integration/certs/cacert.pem"
)EOF"),
*typed_conf);
custom_validator_config_ = typed_conf;
envoy::type::matcher::v3::StringMatcher matcher;
matcher.set_prefix("spiffe://example.com/");
// The cert has "DNS.1 = lyft.com" but SPIFFE validator must ignore SAN types other than URI.
matcher.set_prefix("www.lyft.com");
san_matchers_ = {matcher};
initialize();
auto conn = makeSslClientConnection({});
if (tls_version_ == envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_2) {
auto codec = makeRawHttpConnection(std::move(conn), absl::nullopt);
EXPECT_FALSE(codec->connected());
} else {
auto codec = makeHttpConnection(std::move(conn));
ASSERT_TRUE(codec->waitForDisconnect());
codec->close();
}
checkVerifyErrorCouter(1);
} | CWE-295 | 52 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const int num_elements = NumElements(input);
TF_LITE_ENSURE_EQ(context, num_elements, NumElements(output));
switch (input->type) {
case kTfLiteInt64:
return copyToTensor(context, input->data.i64, output, num_elements);
case kTfLiteInt32:
return copyToTensor(context, input->data.i32, output, num_elements);
case kTfLiteUInt8:
return copyToTensor(context, input->data.uint8, output, num_elements);
case kTfLiteFloat32:
return copyToTensor(context, GetTensorData<float>(input), output,
num_elements);
case kTfLiteBool:
return copyToTensor(context, input->data.b, output, num_elements);
case kTfLiteComplex64:
return copyToTensor(
context, reinterpret_cast<std::complex<float>*>(input->data.c64),
output, num_elements);
default:
// Unsupported type.
TF_LITE_UNSUPPORTED_TYPE(context, input->type, "Cast");
}
return kTfLiteOk;
} | CWE-787 | 24 |
void CommandData::ProcessCommand()
{
#ifndef SFX_MODULE
const wchar *SingleCharCommands=L"FUADPXETK";
if (Command[0]!=0 && Command[1]!=0 && wcschr(SingleCharCommands,Command[0])!=NULL || *ArcName==0)
OutHelp(*Command==0 ? RARX_SUCCESS:RARX_USERERROR); // Return 'success' for 'rar' without parameters.
const wchar *ArcExt=GetExt(ArcName);
#ifdef _UNIX
if (ArcExt==NULL && (!FileExist(ArcName) || IsDir(GetFileAttr(ArcName))))
wcsncatz(ArcName,L".rar",ASIZE(ArcName));
#else
if (ArcExt==NULL)
wcsncatz(ArcName,L".rar",ASIZE(ArcName));
#endif
// Treat arcname.part1 as arcname.part1.rar.
if (ArcExt!=NULL && wcsnicomp(ArcExt,L".part",5)==0 && IsDigit(ArcExt[5]) &&
!FileExist(ArcName))
{
wchar Name[NM];
wcsncpyz(Name,ArcName,ASIZE(Name));
wcsncatz(Name,L".rar",ASIZE(Name));
if (FileExist(Name))
wcsncpyz(ArcName,Name,ASIZE(ArcName));
}
if (wcschr(L"AFUMD",*Command)==NULL)
{
if (GenerateArcName)
GenerateArchiveName(ArcName,ASIZE(ArcName),GenerateMask,false);
StringList ArcMasks;
ArcMasks.AddString(ArcName);
ScanTree Scan(&ArcMasks,Recurse,SaveSymLinks,SCAN_SKIPDIRS);
FindData FindData;
while (Scan.GetNext(&FindData)==SCAN_SUCCESS)
AddArcName(FindData.Name);
}
else
AddArcName(ArcName);
#endif
switch(Command[0])
{
case 'P':
case 'X':
case 'E':
case 'T':
case 'I':
{
CmdExtract Extract(this);
Extract.DoExtract();
}
break;
#ifndef SILENT
case 'V':
case 'L':
ListArchive(this);
break;
default:
OutHelp(RARX_USERERROR);
#endif
}
if (!BareOutput)
mprintf(L"\n");
} | CWE-787 | 24 |
void runTest() override
{
beginTest ("ZIP");
ZipFile::Builder builder;
StringArray entryNames { "first", "second", "third" };
HashMap<String, MemoryBlock> blocks;
for (auto& entryName : entryNames)
{
auto& block = blocks.getReference (entryName);
MemoryOutputStream mo (block, false);
mo << entryName;
mo.flush();
builder.addEntry (new MemoryInputStream (block, false), 9, entryName, Time::getCurrentTime());
}
MemoryBlock data;
MemoryOutputStream mo (data, false);
builder.writeToStream (mo, nullptr);
MemoryInputStream mi (data, false);
ZipFile zip (mi);
expectEquals (zip.getNumEntries(), entryNames.size());
for (auto& entryName : entryNames)
{
auto* entry = zip.getEntry (entryName);
std::unique_ptr<InputStream> input (zip.createStreamForEntry (*entry));
expectEquals (input->readEntireStreamAsString(), entryName);
}
}
| CWE-22 | 2 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0);
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
OpContext op_context(context, node);
TF_LITE_ENSURE(context, op_context.input->type == kTfLiteUInt8 ||
op_context.input->type == kTfLiteInt8 ||
op_context.input->type == kTfLiteInt16 ||
op_context.input->type == kTfLiteFloat16);
TF_LITE_ENSURE(context, op_context.ref->type == kTfLiteFloat32);
op_data->max_diff = op_data->tolerance * op_context.input->params.scale;
switch (op_context.input->type) {
case kTfLiteUInt8:
case kTfLiteInt8:
op_data->max_diff *= (1 << 8);
break;
case kTfLiteInt16:
op_data->max_diff *= (1 << 16);
break;
default:
break;
}
// Allocate tensor to store the dequantized inputs.
if (op_data->cache_tensor_id == kTensorNotAllocated) {
TF_LITE_ENSURE_OK(
context, context->AddTensors(context, 1, &op_data->cache_tensor_id));
}
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(1);
node->temporaries->data[0] = op_data->cache_tensor_id;
TfLiteTensor* dequantized = GetTemporary(context, node, /*index=*/0);
dequantized->type = op_context.ref->type;
dequantized->allocation_type = kTfLiteDynamic;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(
context, dequantized,
TfLiteIntArrayCopy(op_context.input->dims)));
return kTfLiteOk;
} | CWE-787 | 24 |
void writeStats(Array& /*ret*/) override {
fprintf(stderr, "writeStats start\n");
// RetSame: the return value is the same instance every time
// HasThis: call has a this argument
// AllSame: all returns were the same data even though args are different
// MemberCount: number of different arg sets (including this)
fprintf(stderr, "Count Function MinSerLen MaxSerLen RetSame HasThis "
"AllSame MemberCount\n");
for (auto& me : m_memos) {
if (me.second.m_ignore) continue;
if (me.second.m_count == 1) continue;
int min_ser_len = 999999999;
int max_ser_len = 0;
int count = 0;
int member_count = 0;
bool all_same = true;
if (me.second.m_has_this) {
bool any_multiple = false;
auto& fr = me.second.m_member_memos.begin()->second.m_return_value;
member_count = me.second.m_member_memos.size();
for (auto& mme : me.second.m_member_memos) {
if (mme.second.m_return_value != fr) all_same = false;
count += mme.second.m_count;
auto ser_len = mme.second.m_return_value.length();
min_ser_len = std::min(min_ser_len, ser_len);
max_ser_len = std::max(max_ser_len, ser_len);
if (mme.second.m_count > 1) any_multiple = true;
}
if (!any_multiple && !all_same) continue;
} else {
min_ser_len = max_ser_len = me.second.m_return_value.length();
count = me.second.m_count;
all_same = me.second.m_ret_tv_same;
}
fprintf(stderr, "%d %s %d %d %s %s %s %d\n",
count, me.first.data(),
min_ser_len, max_ser_len,
me.second.m_ret_tv_same ? " true" : "false",
me.second.m_has_this ? " true" : "false",
all_same ? " true" : "false",
member_count
);
}
fprintf(stderr, "writeStats end\n");
} | CWE-787 | 24 |
Status OpLevelCostEstimator::PredictAvgPool(const OpContext& op_context,
NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
const auto& op_info = op_context.op_info;
// x: op_info.inputs(0)
ConvolutionDimensions dims = OpDimensionsFromInputs(
op_info.inputs(0).shape(), op_info, &found_unknown_shapes);
// kx * ky - 1 additions and 1 multiplication per output.
int64_t ops = dims.batch * dims.ox * dims.oy * dims.oz * dims.kx * dims.ky;
node_costs->num_compute_ops = ops;
int64_t input_size;
if (dims.ky >= dims.sy) {
input_size = CalculateTensorSize(op_info.inputs(0), &found_unknown_shapes);
} else { // dims.ky < dims.sy
// vertical stride is larger than vertical kernel; assuming row-major
// format, skip unnecessary rows (or read every kx rows per sy rows, as the
// others are not used for output).
const auto data_size = DataTypeSize(BaseType(op_info.inputs(0).dtype()));
input_size = data_size * dims.batch * dims.ix * dims.ky * dims.oy * dims.iz;
}
node_costs->num_input_bytes_accessed = {input_size};
const int64_t output_size =
CalculateOutputSize(op_info, &found_unknown_shapes);
node_costs->num_output_bytes_accessed = {output_size};
node_costs->max_memory = output_size;
if (found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return Status::OK();
} | CWE-369 | 60 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* begin = GetInput(context, node, kBeginTensor);
const TfLiteTensor* size = GetInput(context, node, kSizeTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
// Ensure validity of input tensor and its dimension.
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
TF_LITE_ENSURE(context,
begin->type == kTfLiteInt32 || begin->type == kTfLiteInt64);
TF_LITE_ENSURE(context,
size->type == kTfLiteInt32 || size->type == kTfLiteInt64);
TF_LITE_ENSURE_EQ(context, NumDimensions(begin), 1);
TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1);
TF_LITE_ENSURE_EQ(context, NumElements(begin), NumElements(size));
TF_LITE_ENSURE_MSG(context, NumDimensions(input) <= kMaxDim,
"Slice op only supports 1D-4D input arrays.");
// Postpone allocation of output if any of the indexing tensors is not
// constant
if (!(IsConstantTensor(begin) && IsConstantTensor(size))) {
SetTensorToDynamic(output);
return kTfLiteOk;
}
return ResizeOutputShape(context, input, begin, size, output);
} | CWE-125 | 47 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE(context, NumInputs(node) == 1 || NumInputs(node) == 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
// Always postpone sizing string tensors, even if we could in principle
// calculate their shapes now. String tensors don't benefit from having their
// shapes precalculated because the actual memory can only be allocated after
// we know all the content.
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (output->type != kTfLiteString) {
if (NumInputs(node) == 1 ||
IsConstantTensor(GetInput(context, node, kShapeTensor))) {
TF_LITE_ENSURE_OK(context, ResizeOutput(context, node));
} else {
SetTensorToDynamic(output);
}
}
return kTfLiteOk;
} | CWE-125 | 47 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* axis = GetInput(context, node, kAxisTensor);
TF_LITE_ENSURE_EQ(context, NumDimensions(axis), 1);
TF_LITE_ENSURE(context, NumDimensions(input) >= NumElements(axis));
if (input->type != kTfLiteInt32 && input->type != kTfLiteFloat32 &&
input->type != kTfLiteUInt8 && input->type != kTfLiteInt16 &&
input->type != kTfLiteInt64 && input->type != kTfLiteBool) {
context->ReportError(context, "Type '%s' is not supported by reverse.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
if (axis->type != kTfLiteInt32) {
context->ReportError(context, "Axis Type '%s' is not supported by reverse.",
TfLiteTypeGetName(axis->type));
return kTfLiteError;
}
// TODO(renjieliu): support multi-axis case.
if (NumElements(axis) > 1) {
context->ReportError(context, "Current does not support more than 1 axis.");
}
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type);
return context->ResizeTensor(context, output, output_shape);
} | CWE-787 | 24 |
bool IsFullyConnectedOpSupported(const TfLiteRegistration* registration,
const TfLiteNode* node,
TfLiteContext* context) {
if (node->builtin_data == nullptr) return false;
const auto* fc_params =
reinterpret_cast<const TfLiteFullyConnectedParams*>(node->builtin_data);
const int kInput = 0;
const int kWeights = 1;
const int kBias = 2;
if (fc_params->weights_format != kTfLiteFullyConnectedWeightsFormatDefault) {
return false;
}
const TfLiteTensor* input = GetInput(context, node, kInput);
const TfLiteTensor* weights = GetInput(context, node, kWeights);
if (!IsFloatType(input->type)) {
return false;
}
if (!IsFloatType(weights->type) || !IsConstantTensor(weights)) {
return false;
}
// Core ML 2 only supports single-batch fully connected layer, thus dimensions
// except the last one should be 1.
if (input->dims->data[input->dims->size - 1] != NumElements(input)) {
return false;
}
if (node->inputs->size > 2) {
const TfLiteTensor* bias = GetInput(context, node, kBias);
if (!IsFloatType(bias->type) || !IsConstantTensor(bias)) {
return false;
}
}
TfLiteFusedActivation activation = fc_params->activation;
if (activation == kTfLiteActSignBit) {
return false;
}
return true;
} | CWE-787 | 24 |
void writeStats(Array& /*ret*/) override {
fprintf(stderr, "writeStats start\n");
// RetSame: the return value is the same instance every time
// HasThis: call has a this argument
// AllSame: all returns were the same data even though args are different
// MemberCount: number of different arg sets (including this)
fprintf(stderr, "Count Function MinSerLen MaxSerLen RetSame HasThis "
"AllSame MemberCount\n");
for (auto& me : m_memos) {
if (me.second.m_ignore) continue;
if (me.second.m_count == 1) continue;
int min_ser_len = 999999999;
int max_ser_len = 0;
int count = 0;
int member_count = 0;
bool all_same = true;
if (me.second.m_has_this) {
bool any_multiple = false;
auto& fr = me.second.m_member_memos.begin()->second.m_return_value;
member_count = me.second.m_member_memos.size();
for (auto& mme : me.second.m_member_memos) {
if (mme.second.m_return_value != fr) all_same = false;
count += mme.second.m_count;
auto ser_len = mme.second.m_return_value.length();
min_ser_len = std::min(min_ser_len, ser_len);
max_ser_len = std::max(max_ser_len, ser_len);
if (mme.second.m_count > 1) any_multiple = true;
}
if (!any_multiple && !all_same) continue;
} else {
min_ser_len = max_ser_len = me.second.m_return_value.length();
count = me.second.m_count;
all_same = me.second.m_ret_tv_same;
}
fprintf(stderr, "%d %s %d %d %s %s %s %d\n",
count, me.first.data(),
min_ser_len, max_ser_len,
me.second.m_ret_tv_same ? " true" : "false",
me.second.m_has_this ? " true" : "false",
all_same ? " true" : "false",
member_count
);
}
fprintf(stderr, "writeStats end\n");
} | CWE-125 | 47 |
ServerSecurityFeature::ServerSecurityFeature(application_features::ApplicationServer& server)
: ApplicationFeature(server, "ServerSecurity"),
_enableFoxxApi(true),
_enableFoxxStore(true),
_hardenedRestApi(false) {
setOptional(false);
startsAfter<application_features::GreetingsFeaturePhase>();
} | CWE-918 | 16 |
int size() const {
return m_str ? m_str->size() : 0;
} | CWE-125 | 47 |
TEST_CASE_METHOD(TestFixture, "ECDSA AES keygen and signature test", "[ecdsa-aes-key-sig-gen]") {
vector<char> errMsg(BUF_LEN, 0);
int errStatus = 0;
vector <uint8_t> encrPrivKey(BUF_LEN, 0);
vector<char> pubKeyX(BUF_LEN, 0);
vector<char> pubKeyY(BUF_LEN, 0);
uint32_t encLen = 0;
PRINT_SRC_LINE
auto status = trustedGenerateEcdsaKeyAES(eid, &errStatus, errMsg.data(), encrPrivKey.data(), &encLen,
pubKeyX.data(),
pubKeyY.data());
REQUIRE(status == SGX_SUCCESS);
REQUIRE(errStatus == SGX_SUCCESS);
string hex = SAMPLE_HEX_HASH;
vector<char> signatureR(BUF_LEN, 0);
vector<char> signatureS(BUF_LEN, 0);
uint8_t signatureV = 0;
for (int i = 0; i < 50; i++) {
PRINT_SRC_LINE
status = trustedEcdsaSignAES(eid, &errStatus, errMsg.data(), encrPrivKey.data(), encLen,
hex.data(),
signatureR.data(),
signatureS.data(), &signatureV, 16);
REQUIRE(status == SGX_SUCCESS);
REQUIRE(errStatus == SGX_SUCCESS);
}
} | CWE-787 | 24 |
Http::FilterDataStatus Context::onResponseBody(int body_buffer_length, bool end_of_stream) {
if (!wasm_->onResponseBody_) {
return Http::FilterDataStatus::Continue;
}
switch (wasm_
->onResponseBody_(this, id_, static_cast<uint32_t>(body_buffer_length),
static_cast<uint32_t>(end_of_stream))
.u64_) {
case 0:
return Http::FilterDataStatus::Continue;
case 1:
return Http::FilterDataStatus::StopIterationAndBuffer;
case 2:
return Http::FilterDataStatus::StopIterationAndWatermark;
default:
return Http::FilterDataStatus::StopIterationNoBuffer;
}
} | CWE-476 | 46 |
ZlibInStream::ZlibInStream(int bufSize_)
: underlying(0), bufSize(bufSize_ ? bufSize_ : DEFAULT_BUF_SIZE), offset(0),
zs(NULL), bytesIn(0)
{
ptr = end = start = new U8[bufSize];
init();
} | CWE-787 | 24 |
const std::string& get_tenant() const {
ceph_assert(t != Wildcard);
return u.tenant;
} | CWE-617 | 51 |
const char *enc_untrusted_inet_ntop(int af, const void *src, char *dst,
socklen_t size) {
if (!src || !dst) {
errno = EFAULT;
return nullptr;
}
size_t src_size = 0;
if (af == AF_INET) {
src_size = sizeof(struct in_addr);
} else if (af == AF_INET6) {
src_size = sizeof(struct in6_addr);
} else {
errno = EAFNOSUPPORT;
return nullptr;
}
MessageWriter input;
input.Push<int>(TokLinuxAfFamily(af));
input.PushByReference(Extent{reinterpret_cast<const char *>(src), src_size});
input.Push(size);
MessageReader output;
const auto status = NonSystemCallDispatcher(
::asylo::host_call::kInetNtopHandler, &input, &output);
CheckStatusAndParamCount(status, output, "enc_untrusted_inet_ntop", 2);
auto result = output.next();
int klinux_errno = output.next<int>();
if (result.empty()) {
errno = FromkLinuxErrorNumber(klinux_errno);
return nullptr;
}
memcpy(dst, result.data(),
std::min(static_cast<size_t>(size),
static_cast<size_t>(INET6_ADDRSTRLEN)));
return dst;
} | CWE-125 | 47 |
int ZlibInStream::overrun(int itemSize, int nItems, bool wait)
{
if (itemSize > bufSize)
throw Exception("ZlibInStream overrun: max itemSize exceeded");
if (end - ptr != 0)
memmove(start, ptr, end - ptr);
offset += ptr - start;
end -= ptr - start;
ptr = start;
while (end - ptr < itemSize) {
if (!decompress(wait))
return 0;
}
if (itemSize * nItems > end - ptr)
nItems = (end - ptr) / itemSize;
return nItems;
} | CWE-787 | 24 |
void *bson_realloc( void *ptr, int size ) {
void *p;
p = bson_realloc_func( ptr, size );
bson_fatal_msg( !!p, "realloc() failed" );
return p;
} | CWE-190 | 19 |
MONGO_EXPORT int bson_iterator_int( const bson_iterator *i ) {
switch ( bson_iterator_type( i ) ) {
case BSON_INT:
return bson_iterator_int_raw( i );
case BSON_LONG:
return bson_iterator_long_raw( i );
case BSON_DOUBLE:
return bson_iterator_double_raw( i );
default:
return 0;
}
} | CWE-190 | 19 |
TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
TfLiteTensor* output = GetOutput(context, node, 0);
const TfLiteTensor* input = GetInput(context, node, 0);
switch (input->type) { // Already know in/out types are same.
case kTfLiteFloat32:
AverageEvalFloat<kernel_type>(context, node, params, data, input, output);
break;
case kTfLiteUInt8:
AverageEvalQuantizedUint8<kernel_type>(context, node, params, data, input,
output);
break;
case kTfLiteInt8:
AverageEvalQuantizedInt8<kernel_type>(context, node, params, data, input,
output);
break;
case kTfLiteInt16:
AverageEvalQuantizedInt16<kernel_type>(context, node, params, data, input,
output);
break;
default:
TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-787 | 24 |
friend H AbslHashValue(H h, const TensorKey& k) {
const uint8* d = static_cast<uint8*>(k.data());
size_t s = k.AllocatedBytes();
std::vector<uint8> vec;
vec.reserve(s);
for (int i = 0; i < s; i++) {
vec.push_back(d[i]);
}
return H::combine(std::move(h), s);
} | CWE-120 | 44 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* start = GetInput(context, node, kStartTensor);
const TfLiteTensor* limit = GetInput(context, node, kLimitTensor);
const TfLiteTensor* delta = GetInput(context, node, kDeltaTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context,
ResizeOutput(context, start, limit, delta, output));
}
switch (output->type) {
case kTfLiteInt32: {
EvalImpl<int32_t>(start, delta, output);
break;
}
case kTfLiteFloat32: {
EvalImpl<float>(start, delta, output);
break;
}
default: {
context->ReportError(context, "Unsupported data type: %d", output->type);
return kTfLiteError;
}
}
return kTfLiteOk;
} | CWE-787 | 24 |
size_t CxMemFile::Read(void *buffer, size_t size, size_t count)
{
if (buffer==NULL) return 0;
if (m_pBuffer==NULL) return 0;
if (m_Position >= (int32_t)m_Size){
m_bEOF = true;
return 0;
}
int32_t nCount = (int32_t)(count*size);
if (nCount == 0) return 0;
int32_t nRead;
if (m_Position + nCount > (int32_t)m_Size){
m_bEOF = true;
nRead = (m_Size - m_Position);
} else
nRead = nCount;
memcpy(buffer, m_pBuffer + m_Position, nRead);
m_Position += nRead;
return (size_t)(nRead/size);
}
| CWE-770 | 37 |
TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* multipliers = GetInput(context, node, kInputMultipliers);
const int num_dimensions = NumDimensions(input);
const int num_multipliers = NumElements(multipliers);
TF_LITE_ENSURE_EQ(context, num_dimensions, num_multipliers);
switch (multipliers->type) {
case kTfLiteInt32:
return context->ResizeTensor(
context, output,
MultiplyShapeDims<int32_t>(*input->dims, multipliers,
num_dimensions));
case kTfLiteInt64:
return context->ResizeTensor(
context, output,
MultiplyShapeDims<int64_t>(*input->dims, multipliers,
num_dimensions));
default:
context->ReportError(
context, "Multipliers of type '%s' are not supported by tile.",
TfLiteTypeGetName(multipliers->type));
return kTfLiteError;
}
} | CWE-787 | 24 |
std::string TarFileReader::extract(const string &_path) {
if (_path.empty()) THROW("path cannot be empty");
if (!hasMore()) THROW("No more tar files");
string path = _path;
if (SystemUtilities::isDirectory(path)) path += "/" + getFilename();
LOG_DEBUG(5, "Extracting: " << path);
return extract(*SystemUtilities::oopen(path));
} | CWE-22 | 2 |
static size_t ntlm_av_pair_get_next_offset(NTLM_AV_PAIR* pAvPair)
{
return ntlm_av_pair_get_len(pAvPair) + sizeof(NTLM_AV_PAIR);
} | CWE-125 | 47 |
TfLiteStatus NonMaxSuppressionMultiClass(TfLiteContext* context,
TfLiteNode* node, OpData* op_data) {
// Get the input tensors
const TfLiteTensor* input_box_encodings =
GetInput(context, node, kInputTensorBoxEncodings);
const TfLiteTensor* input_class_predictions =
GetInput(context, node, kInputTensorClassPredictions);
const int num_boxes = input_box_encodings->dims->data[1];
const int num_classes = op_data->num_classes;
TF_LITE_ENSURE_EQ(context, input_class_predictions->dims->data[0],
kBatchSize);
TF_LITE_ENSURE_EQ(context, input_class_predictions->dims->data[1], num_boxes);
const int num_classes_with_background =
input_class_predictions->dims->data[2];
TF_LITE_ENSURE(context, (num_classes_with_background - num_classes <= 1));
TF_LITE_ENSURE(context, (num_classes_with_background >= num_classes));
const TfLiteTensor* scores;
switch (input_class_predictions->type) {
case kTfLiteUInt8: {
TfLiteTensor* temporary_scores = &context->tensors[op_data->scores_index];
DequantizeClassPredictions(input_class_predictions, num_boxes,
num_classes_with_background, temporary_scores);
scores = temporary_scores;
} break;
case kTfLiteFloat32:
scores = input_class_predictions;
break;
default:
// Unsupported type.
return kTfLiteError;
}
if (op_data->use_regular_non_max_suppression)
TF_LITE_ENSURE_STATUS(NonMaxSuppressionMultiClassRegularHelper(
context, node, op_data, GetTensorData<float>(scores)));
else
TF_LITE_ENSURE_STATUS(NonMaxSuppressionMultiClassFastHelper(
context, node, op_data, GetTensorData<float>(scores)));
return kTfLiteOk;
} | CWE-787 | 24 |
_forceinline void Unpack::CopyString(uint Length,uint Distance)
{
size_t SrcPtr=UnpPtr-Distance;
if (SrcPtr<MaxWinSize-MAX_LZ_MATCH && UnpPtr<MaxWinSize-MAX_LZ_MATCH)
{
// If we are not close to end of window, we do not need to waste time
// to "& MaxWinMask" pointer protection.
byte *Src=Window+SrcPtr;
byte *Dest=Window+UnpPtr;
UnpPtr+=Length;
#ifdef FAST_MEMCPY
if (Distance<Length) // Overlapping strings
#endif
while (Length>=8)
{
Dest[0]=Src[0];
Dest[1]=Src[1];
Dest[2]=Src[2];
Dest[3]=Src[3];
Dest[4]=Src[4];
Dest[5]=Src[5];
Dest[6]=Src[6];
Dest[7]=Src[7];
Src+=8;
Dest+=8;
Length-=8;
}
#ifdef FAST_MEMCPY
else
while (Length>=8)
{
// In theory we still could overlap here.
// Supposing Distance == MaxWinSize - 1 we have memcpy(Src, Src + 1, 8).
// But for real RAR archives Distance <= MaxWinSize - MAX_LZ_MATCH
// always, so overlap here is impossible.
// This memcpy expanded inline by MSVC. We could also use uint64
// assignment, which seems to provide about the same speed.
memcpy(Dest,Src,8);
Src+=8;
Dest+=8;
Length-=8;
}
#endif
// Unroll the loop for 0 - 7 bytes left. Note that we use nested "if"s.
if (Length>0) { Dest[0]=Src[0];
if (Length>1) { Dest[1]=Src[1];
if (Length>2) { Dest[2]=Src[2];
if (Length>3) { Dest[3]=Src[3];
if (Length>4) { Dest[4]=Src[4];
if (Length>5) { Dest[5]=Src[5];
if (Length>6) { Dest[6]=Src[6]; } } } } } } } // Close all nested "if"s.
}
else
while (Length-- > 0) // Slow copying with all possible precautions.
{
Window[UnpPtr]=Window[SrcPtr++ & MaxWinMask];
// We need to have masked UnpPtr after quit from loop, so it must not
// be replaced with 'Window[UnpPtr++ & MaxWinMask]'
UnpPtr=(UnpPtr+1) & MaxWinMask;
}
} | CWE-787 | 24 |
gdImagePtr gdImageCreateTrueColor (int sx, int sy)
{
int i;
gdImagePtr im;
if (overflow2(sx, sy)) {
return NULL;
}
if (overflow2(sizeof(unsigned char *), sy)) {
return NULL;
}
if (overflow2(sizeof(int) + sizeof(unsigned char), sx * sy)) {
return NULL;
}
// Check for OOM before doing a potentially large allocation.
auto allocsz = sizeof(gdImage)
+ sy * (sizeof(int *) + sizeof(unsigned char *))
+ sx * sy * (sizeof(int) + sizeof(unsigned char));
if (UNLIKELY(precheckOOM(allocsz))) {
// Don't throw here because GD might need to do its own cleanup.
return NULL;
}
im = (gdImage *) gdMalloc(sizeof(gdImage));
memset(im, 0, sizeof(gdImage));
im->tpixels = (int **) gdMalloc(sizeof(int *) * sy);
im->AA_opacity = (unsigned char **) gdMalloc(sizeof(unsigned char *) * sy);
im->polyInts = 0;
im->polyAllocated = 0;
im->brush = 0;
im->tile = 0;
im->style = 0;
for (i = 0; i < sy; i++) {
im->tpixels[i] = (int *) gdCalloc(sx, sizeof(int));
im->AA_opacity[i] = (unsigned char *) gdCalloc(sx, sizeof(unsigned char));
}
im->sx = sx;
im->sy = sy;
im->transparent = (-1);
im->interlace = 0;
im->trueColor = 1;
/* 2.0.2: alpha blending is now on by default, and saving of alpha is
* off by default. This allows font antialiasing to work as expected
* on the first try in JPEGs -- quite important -- and also allows
* for smaller PNGs when saving of alpha channel is not really
* desired, which it usually isn't!
*/
im->saveAlphaFlag = 0;
im->alphaBlendingFlag = 1;
im->thick = 1;
im->AA = 0;
im->AA_polygon = 0;
im->cx1 = 0;
im->cy1 = 0;
im->cx2 = im->sx - 1;
im->cy2 = im->sy - 1;
im->interpolation = NULL;
im->interpolation_id = GD_BILINEAR_FIXED;
return im;
} | CWE-787 | 24 |
ALWAYS_INLINE String serialize_impl(const Variant& value,
const SerializeOptions& opts) {
switch (value.getType()) {
case KindOfClass:
case KindOfLazyClass:
case KindOfPersistentString:
case KindOfString: {
auto const str =
isStringType(value.getType()) ? value.getStringData() :
isClassType(value.getType()) ? classToStringHelper(value.toClassVal()) :
lazyClassToStringHelper(value.toLazyClassVal());
auto const size = str->size();
if (size >= RuntimeOption::MaxSerializedStringSize) {
throw Exception("Size of serialized string (%d) exceeds max", size);
}
StringBuffer sb;
sb.append("s:");
sb.append(size);
sb.append(":\"");
sb.append(str->data(), size);
sb.append("\";");
return sb.detach();
}
case KindOfResource:
return s_Res;
case KindOfUninit:
case KindOfNull:
case KindOfBoolean:
case KindOfInt64:
case KindOfFunc:
case KindOfPersistentVec:
case KindOfVec:
case KindOfPersistentDict:
case KindOfDict:
case KindOfPersistentKeyset:
case KindOfKeyset:
case KindOfPersistentDArray:
case KindOfDArray:
case KindOfPersistentVArray:
case KindOfVArray:
case KindOfDouble:
case KindOfObject:
case KindOfClsMeth:
case KindOfRClsMeth:
case KindOfRFunc:
case KindOfRecord:
break;
}
VariableSerializer vs(VariableSerializer::Type::Serialize);
if (opts.keepDVArrays) vs.keepDVArrays();
if (opts.forcePHPArrays) vs.setForcePHPArrays();
if (opts.warnOnHackArrays) vs.setHackWarn();
if (opts.warnOnPHPArrays) vs.setPHPWarn();
if (opts.ignoreLateInit) vs.setIgnoreLateInit();
if (opts.serializeProvenanceAndLegacy) vs.setSerializeProvenanceAndLegacy();
// Keep the count so recursive calls to serialize() embed references properly.
return vs.serialize(value, true, true);
} | CWE-125 | 47 |
int ecall_restore(const char *input, uint64_t input_len, char **output,
uint64_t *output_len) {
if (!asylo::primitives::TrustedPrimitives::IsOutsideEnclave(input,
input_len) ||
!asylo::primitives::TrustedPrimitives::IsOutsideEnclave(
output_len, sizeof(uint64_t))) {
asylo::primitives::TrustedPrimitives::BestEffortAbort(
"ecall_restore: input/output found to not be in untrusted memory.");
}
int result = 0;
size_t tmp_output_len;
try {
result = asylo::Restore(input, static_cast<size_t>(input_len), output,
&tmp_output_len);
} catch (...) {
LOG(FATAL) << "Uncaught exception in enclave";
}
if (output_len) {
*output_len = static_cast<uint64_t>(tmp_output_len);
}
return result;
} | CWE-787 | 24 |
void groupGenerate(const std::string &rule, std::vector<Proxy> &nodelist, string_array &filtered_nodelist, bool add_direct, extra_settings &ext)
{
std::string real_rule;
if(startsWith(rule, "[]") && add_direct)
{
filtered_nodelist.emplace_back(rule.substr(2));
}
#ifndef NO_JS_RUNTIME
else if(startsWith(rule, "script:"))
{
script_safe_runner(ext.js_runtime, ext.js_context, [&](qjs::Context &ctx){
std::string script = fileGet(rule.substr(7), true);
try
{
ctx.eval(script);
auto filter = (std::function<std::string(const std::vector<Proxy>&)>) ctx.eval("filter");
std::string result_list = filter(nodelist);
filtered_nodelist = split(regTrim(result_list), "\n");
}
catch (qjs::exception)
{
script_print_stack(ctx);
}
}, global.scriptCleanContext);
}
#endif // NO_JS_RUNTIME
else
{
for(Proxy &x : nodelist)
{
if(applyMatcher(rule, real_rule, x) && (real_rule.empty() || regFind(x.Remark, real_rule)) && std::find(filtered_nodelist.begin(), filtered_nodelist.end(), x.Remark) == filtered_nodelist.end())
filtered_nodelist.emplace_back(x.Remark);
}
}
} | CWE-434 | 5 |
TfLiteStatus ReverseSequenceHelper(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* seq_lengths_tensor =
GetInput(context, node, kSeqLengthsTensor);
switch (seq_lengths_tensor->type) {
case kTfLiteInt32: {
return ReverseSequenceImpl<T, int32_t>(context, node);
}
case kTfLiteInt64: {
return ReverseSequenceImpl<T, int64_t>(context, node);
}
default: {
context->ReportError(
context,
"Seq_lengths type '%s' is not supported by reverse_sequence.",
TfLiteTypeGetName(seq_lengths_tensor->type));
return kTfLiteError;
}
}
return kTfLiteOk;
} | CWE-787 | 24 |
TEST_CASE_METHOD(TestFixture, "DKG AES public shares test", "[dkg-aes-pub-shares]") {
vector <uint8_t> encryptedDKGSecret(BUF_LEN, 0);
vector<char> errMsg(BUF_LEN, 0);
int errStatus = 0;
uint32_t encLen = 0;
unsigned t = 32, n = 32;
PRINT_SRC_LINE
auto status = trustedGenDkgSecretAES(eid, &errStatus, errMsg.data(), encryptedDKGSecret.data(), &encLen, n);
REQUIRE(status == SGX_SUCCESS);
REQUIRE(errStatus == SGX_SUCCESS);
vector<char> errMsg1(BUF_LEN, 0);
char colon = ':';
vector<char> pubShares(10000, 0);
PRINT_SRC_LINE
status = trustedGetPublicSharesAES(eid, &errStatus, errMsg1.data(),
encryptedDKGSecret.data(), encLen, pubShares.data(), t, n);
REQUIRE(status == SGX_SUCCESS);
REQUIRE(errStatus == SGX_SUCCESS);
vector <string> g2Strings = splitString(pubShares.data(), ',');
vector <libff::alt_bn128_G2> pubSharesG2;
for (u_int64_t i = 0; i < g2Strings.size(); i++) {
vector <string> coeffStr = splitString(g2Strings.at(i).c_str(), ':');
pubSharesG2.push_back(TestUtils::vectStringToG2(coeffStr));
}
vector<char> secret(BUF_LEN, 0);
PRINT_SRC_LINE
status = trustedDecryptDkgSecretAES(eid, &errStatus, errMsg1.data(), encryptedDKGSecret.data(), encLen,
(uint8_t *) secret.data());
REQUIRE(status == SGX_SUCCESS);
REQUIRE(errStatus == SGX_SUCCESS);
signatures::Dkg dkgObj(t, n);
vector <libff::alt_bn128_Fr> poly = TestUtils::splitStringToFr(secret.data(), colon);
vector <libff::alt_bn128_G2> pubSharesDkg = dkgObj.VerificationVector(poly);
for (uint32_t i = 0; i < pubSharesDkg.size(); i++) {
libff::alt_bn128_G2 el = pubSharesDkg.at(i);
el.to_affine_coordinates();
}
REQUIRE(pubSharesG2 == pubSharesDkg);
} | CWE-787 | 24 |
inline void StringData::setSize(int len) {
assertx(!isImmutable() && !hasMultipleRefs());
assertx(len >= 0 && len <= capacity());
mutableData()[len] = 0;
m_lenAndHash = len;
assertx(m_hash == 0);
assertx(checkSane());
} | CWE-787 | 24 |
TEST_F(SingleAllowMissingInOrListTest, MissingIssToken) {
EXPECT_CALL(mock_cb_, onComplete(Status::Ok));
auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, ES256WithoutIssToken}};
context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);
verifier_->verify(context_);
EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kExampleHeader));
} | CWE-303 | 89 |
TEST(DefaultCertValidatorTest, TestMatchSubjectAltNameDNSMatched) {
bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(
"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem"));
envoy::type::matcher::v3::StringMatcher matcher;
matcher.MergeFrom(TestUtility::createRegexMatcher(".*.example.com"));
std::vector<Matchers::StringMatcherImpl<envoy::type::matcher::v3::StringMatcher>>
subject_alt_name_matchers;
subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher));
EXPECT_TRUE(DefaultCertValidator::matchSubjectAltName(cert.get(), subject_alt_name_matchers));
} | CWE-295 | 52 |
Jsi_Value *Jsi_ValueArrayIndex(Jsi_Interp *interp, Jsi_Value *args, int index)
{
Jsi_Obj *obj = args->d.obj;
Jsi_Value *v;
assert(args->vt == JSI_VT_OBJECT);
if (obj->isarrlist && obj->arr)
return ((index < 0 || (uint)index >= obj->arrCnt) ? NULL : obj->arr[index]);
char unibuf[100];
Jsi_NumberItoA10(index, unibuf, sizeof(unibuf));
v = Jsi_TreeObjGetValue(args->d.obj, unibuf, 0);
return v;
} | CWE-120 | 44 |
static unsigned HuffmanTree_makeFromFrequencies(HuffmanTree* tree, const unsigned* frequencies,
size_t mincodes, size_t numcodes, unsigned maxbitlen)
{
unsigned error = 0;
while(!frequencies[numcodes - 1] && numcodes > mincodes) numcodes--; /*trim zeroes*/
tree->maxbitlen = maxbitlen;
tree->numcodes = (unsigned)numcodes; /*number of symbols*/
tree->lengths = (unsigned*)realloc(tree->lengths, numcodes * sizeof(unsigned));
if(!tree->lengths) return 83; /*alloc fail*/
/*initialize all lengths to 0*/
memset(tree->lengths, 0, numcodes * sizeof(unsigned));
error = lodepng_huffman_code_lengths(tree->lengths, frequencies, numcodes, maxbitlen);
if(!error) error = HuffmanTree_makeFromLengths2(tree);
return error;
} | CWE-252 | 49 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
FillDiagHelper(input, output);
return kTfLiteOk;
} | CWE-125 | 47 |
void CWebServer::GetFloorplanImage(WebEmSession & session, const request& req, reply & rep)
{
std::string idx = request::findValue(&req, "idx");
if (idx == "") {
return;
}
std::vector<std::vector<std::string> > result;
result = m_sql.safe_queryBlob("SELECT Image FROM Floorplans WHERE ID=%s", idx.c_str());
if (result.empty())
return;
reply::set_content(&rep, result[0][0].begin(), result[0][0].end());
std::string oname = "floorplan";
if (result[0][0].size() > 10)
{
if (result[0][0][0] == 'P')
oname += ".png";
else if (result[0][0][0] == -1)
oname += ".jpg";
else if (result[0][0][0] == 'B')
oname += ".bmp";
else if (result[0][0][0] == 'G')
oname += ".gif";
}
reply::add_header_attachment(&rep, oname);
} | CWE-89 | 0 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output = GetOutput(context, node, 0);
TfLiteTensor* hits = GetOutput(context, node, 1);
const TfLiteTensor* lookup = GetInput(context, node, 0);
const TfLiteTensor* key = GetInput(context, node, 1);
const TfLiteTensor* value = GetInput(context, node, 2);
const int num_rows = SizeOfDimension(value, 0);
const int row_bytes = value->bytes / num_rows;
void* pointer = nullptr;
DynamicBuffer buf;
for (int i = 0; i < SizeOfDimension(lookup, 0); i++) {
int idx = -1;
pointer = bsearch(&(lookup->data.i32[i]), key->data.i32, num_rows,
sizeof(int32_t), greater);
if (pointer != nullptr) {
idx = (reinterpret_cast<char*>(pointer) - (key->data.raw)) /
sizeof(int32_t);
}
if (idx >= num_rows || idx < 0) {
if (output->type == kTfLiteString) {
buf.AddString(nullptr, 0);
} else {
memset(output->data.raw + i * row_bytes, 0, row_bytes);
}
hits->data.uint8[i] = 0;
} else {
if (output->type == kTfLiteString) {
buf.AddString(GetString(value, idx));
} else {
memcpy(output->data.raw + i * row_bytes,
value->data.raw + idx * row_bytes, row_bytes);
}
hits->data.uint8[i] = 1;
}
}
if (output->type == kTfLiteString) {
buf.WriteToTensorAsVector(output);
}
return kTfLiteOk;
} | CWE-125 | 47 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
switch (input1->type) {
case kTfLiteInt32: {
return EvalImpl<int32_t>(context, data->requires_broadcast, input1,
input2, output);
}
case kTfLiteInt64: {
return EvalImpl<int64_t>(context, data->requires_broadcast, input1,
input2, output);
}
case kTfLiteFloat32: {
return EvalImpl<float>(context, data->requires_broadcast, input1, input2,
output);
}
default: {
context->ReportError(context, "Type '%s' is not supported by floor_mod.",
TfLiteTypeGetName(input1->type));
return kTfLiteError;
}
}
} | CWE-787 | 24 |
static __forceinline void draw_line(float *output, int x0, int y0, int x1, int y1, int n)
{
int dy = y1 - y0;
int adx = x1 - x0;
int ady = abs(dy);
int base;
int x=x0,y=y0;
int err = 0;
int sy;
#ifdef STB_VORBIS_DIVIDE_TABLE
if (adx < DIVTAB_DENOM && ady < DIVTAB_NUMER) {
if (dy < 0) {
base = -integer_divide_table[ady][adx];
sy = base-1;
} else {
base = integer_divide_table[ady][adx];
sy = base+1;
}
} else {
base = dy / adx;
if (dy < 0)
sy = base - 1;
else
sy = base+1;
}
#else
base = dy / adx;
if (dy < 0)
sy = base - 1;
else
sy = base+1;
#endif
ady -= abs(base) * adx;
if (x1 > n) x1 = n;
if (x < x1) {
LINE_OP(output[x], inverse_db_table[y]);
for (++x; x < x1; ++x) {
err += ady;
if (err >= adx) {
err -= adx;
y += sy;
} else
y += base;
LINE_OP(output[x], inverse_db_table[y]);
}
}
} | CWE-617 | 51 |
QInt32() {} | CWE-908 | 48 |
void nego_process_negotiation_failure(rdpNego* nego, wStream* s)
{
BYTE flags;
UINT16 length;
UINT32 failureCode;
WLog_DBG(TAG, "RDP_NEG_FAILURE");
Stream_Read_UINT8(s, flags);
Stream_Read_UINT16(s, length);
Stream_Read_UINT32(s, failureCode);
switch (failureCode)
{
case SSL_REQUIRED_BY_SERVER:
WLog_WARN(TAG, "Error: SSL_REQUIRED_BY_SERVER");
break;
case SSL_NOT_ALLOWED_BY_SERVER:
WLog_WARN(TAG, "Error: SSL_NOT_ALLOWED_BY_SERVER");
nego->sendNegoData = TRUE;
break;
case SSL_CERT_NOT_ON_SERVER:
WLog_ERR(TAG, "Error: SSL_CERT_NOT_ON_SERVER");
nego->sendNegoData = TRUE;
break;
case INCONSISTENT_FLAGS:
WLog_ERR(TAG, "Error: INCONSISTENT_FLAGS");
break;
case HYBRID_REQUIRED_BY_SERVER:
WLog_WARN(TAG, "Error: HYBRID_REQUIRED_BY_SERVER");
break;
default:
WLog_ERR(TAG, "Error: Unknown protocol security error %" PRIu32 "", failureCode);
break;
}
nego->state = NEGO_STATE_FAIL;
} | CWE-125 | 47 |
TfLiteRegistration OkOpRegistration() {
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
// Set output size to the input size in OkOp::Prepare(). Code exists to have
// a framework in Prepare. The input and output tensors are not used.
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* in_tensor = GetInput(context, node, 0);
TfLiteTensor* out_tensor = GetOutput(context, node, 0);
TfLiteIntArray* new_size = TfLiteIntArrayCopy(in_tensor->dims);
return context->ResizeTensor(context, out_tensor, new_size);
};
reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
};
return reg;
} | CWE-787 | 24 |
void * alloc_top(size_t size) {
top -= size;
if (top < bottom) {new_chunk(); top -= size;}
return top;
} | CWE-787 | 24 |
UnicodeString::doAppend(const UChar *srcChars, int32_t srcStart, int32_t srcLength) {
if(!isWritable() || srcLength == 0 || srcChars == NULL) {
return *this;
}
// Perform all remaining operations relative to srcChars + srcStart.
// From this point forward, do not use srcStart.
srcChars += srcStart;
if(srcLength < 0) {
// get the srcLength if necessary
if((srcLength = u_strlen(srcChars)) == 0) {
return *this;
}
}
int32_t oldLength = length();
int32_t newLength = oldLength + srcLength;
// Check for append onto ourself
const UChar* oldArray = getArrayStart();
if (isBufferWritable() &&
oldArray < srcChars + srcLength &&
srcChars < oldArray + oldLength) {
// Copy into a new UnicodeString and start over
UnicodeString copy(srcChars, srcLength);
if (copy.isBogus()) {
setToBogus();
return *this;
}
return doAppend(copy.getArrayStart(), 0, srcLength);
}
// optimize append() onto a large-enough, owned string
if((newLength <= getCapacity() && isBufferWritable()) ||
cloneArrayIfNeeded(newLength, getGrowCapacity(newLength))) {
UChar *newArray = getArrayStart();
// Do not copy characters when
// UChar *buffer=str.getAppendBuffer(...);
// is followed by
// str.append(buffer, length);
// or
// str.appendString(buffer, length)
// or similar.
if(srcChars != newArray + oldLength) {
us_arrayCopy(srcChars, 0, newArray, oldLength, srcLength);
}
setLength(newLength);
}
return *this;
} | CWE-190 | 19 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.