code
stringlengths 12
2.05k
| label_name
stringlengths 6
8
| label
int64 0
95
|
---|---|---|
TfLiteStatus LogicalImpl(TfLiteContext* context, TfLiteNode* node,
bool (*func)(bool, bool)) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<bool, bool, bool>(
GetTensorShape(input1), GetTensorData<bool>(input1),
GetTensorShape(input2), GetTensorData<bool>(input2),
GetTensorShape(output), GetTensorData<bool>(output), func);
} else {
reference_ops::BinaryFunction<bool, bool, bool>(
GetTensorShape(input1), GetTensorData<bool>(input1),
GetTensorShape(input2), GetTensorData<bool>(input2),
GetTensorShape(output), GetTensorData<bool>(output), func);
}
return kTfLiteOk;
} | CWE-125 | 47 |
R_API ut64 r_bin_java_element_pair_calc_size(RBinJavaElementValuePair *evp) {
ut64 sz = 0;
if (evp == NULL) {
return sz;
}
// evp->element_name_idx = r_bin_java_read_short(bin, bin->b->cur);
sz += 2;
// evp->value = r_bin_java_element_value_new (bin, offset+2);
if (evp->value) {
sz += r_bin_java_element_value_calc_size (evp->value);
}
return sz;
} | CWE-788 | 87 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteMfccParams*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input_wav = GetInput(context, node, kInputTensorWav);
const TfLiteTensor* input_rate = GetInput(context, node, kInputTensorRate);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_wav), 3);
TF_LITE_ENSURE_EQ(context, NumElements(input_rate), 1);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, input_wav->type, output->type);
TF_LITE_ENSURE_TYPES_EQ(context, input_rate->type, kTfLiteInt32);
TfLiteIntArray* output_size = TfLiteIntArrayCreate(3);
output_size->data[0] = input_wav->dims->data[0];
output_size->data[1] = input_wav->dims->data[1];
output_size->data[2] = params->dct_coefficient_count;
return context->ResizeTensor(context, output, output_size);
} | CWE-787 | 24 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
switch (output->type) {
case kTfLiteInt32: {
// TensorFlow does not support negative for int32.
TF_LITE_ENSURE_OK(context, CheckValue(context, input2));
PowImpl<int32_t>(input1, input2, output, data->requires_broadcast);
break;
}
case kTfLiteFloat32: {
PowImpl<float>(input1, input2, output, data->requires_broadcast);
break;
}
default: {
context->ReportError(context, "Unsupported data type: %d", output->type);
return kTfLiteError;
}
}
return kTfLiteOk;
} | CWE-125 | 47 |
TfLiteStatus EvalHashtableFind(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input_resource_id_tensor =
GetInput(context, node, kInputResourceIdTensor);
int resource_id = input_resource_id_tensor->data.i32[0];
const TfLiteTensor* key_tensor = GetInput(context, node, kKeyTensor);
const TfLiteTensor* default_value_tensor =
GetInput(context, node, kDefaultValueTensor);
TfLiteTensor* output_tensor = GetOutput(context, node, 0);
Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_);
auto& resources = subgraph->resources();
auto* lookup = resource::GetHashtableResource(&resources, resource_id);
TF_LITE_ENSURE(context, lookup != nullptr);
TF_LITE_ENSURE_STATUS(
lookup->CheckKeyAndValueTypes(context, key_tensor, output_tensor));
auto result =
lookup->Lookup(context, key_tensor, output_tensor, default_value_tensor);
return result;
} | CWE-787 | 24 |
TfLiteStatus LessEqualEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
bool requires_broadcast = !HaveSameShapes(input1, input2);
switch (input1->type) {
case kTfLiteFloat32:
Comparison<float, reference_ops::LessEqualFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteInt32:
Comparison<int32_t, reference_ops::LessEqualFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteInt64:
Comparison<int64_t, reference_ops::LessEqualFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteUInt8:
ComparisonQuantized<uint8_t, reference_ops::LessEqualFn>(
input1, input2, output, requires_broadcast);
break;
case kTfLiteInt8:
ComparisonQuantized<int8_t, reference_ops::LessEqualFn>(
input1, input2, output, requires_broadcast);
break;
default:
context->ReportError(context,
"Does not support type %d, requires float|int|uint8",
input1->type);
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-125 | 47 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteSpaceToDepthParams*>(node->builtin_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
auto data_type = output->type;
TF_LITE_ENSURE(context,
data_type == kTfLiteFloat32 || data_type == kTfLiteUInt8 ||
data_type == kTfLiteInt8 || data_type == kTfLiteInt32 ||
data_type == kTfLiteInt64);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
const int block_size = params->block_size;
const int input_height = input->dims->data[1];
const int input_width = input->dims->data[2];
int output_height = input_height / block_size;
int output_width = input_width / block_size;
TF_LITE_ENSURE_EQ(context, input_height, output_height * block_size);
TF_LITE_ENSURE_EQ(context, input_width, output_width * block_size);
TfLiteIntArray* output_size = TfLiteIntArrayCreate(4);
output_size->data[0] = input->dims->data[0];
output_size->data[1] = output_height;
output_size->data[2] = output_width;
output_size->data[3] = input->dims->data[3] * block_size * block_size;
return context->ResizeTensor(context, output, output_size);
} | CWE-125 | 47 |
ALWAYS_INLINE String serialize_impl(const Variant& value,
const SerializeOptions& opts) {
switch (value.getType()) {
case KindOfClass:
case KindOfLazyClass:
case KindOfPersistentString:
case KindOfString: {
auto const str =
isStringType(value.getType()) ? value.getStringData() :
isClassType(value.getType()) ? classToStringHelper(value.toClassVal()) :
lazyClassToStringHelper(value.toLazyClassVal());
auto const size = str->size();
if (size >= RuntimeOption::MaxSerializedStringSize) {
throw Exception("Size of serialized string (%d) exceeds max", size);
}
StringBuffer sb;
sb.append("s:");
sb.append(size);
sb.append(":\"");
sb.append(str->data(), size);
sb.append("\";");
return sb.detach();
}
case KindOfResource:
return s_Res;
case KindOfUninit:
case KindOfNull:
case KindOfBoolean:
case KindOfInt64:
case KindOfFunc:
case KindOfPersistentVec:
case KindOfVec:
case KindOfPersistentDict:
case KindOfDict:
case KindOfPersistentKeyset:
case KindOfKeyset:
case KindOfPersistentDArray:
case KindOfDArray:
case KindOfPersistentVArray:
case KindOfVArray:
case KindOfDouble:
case KindOfObject:
case KindOfClsMeth:
case KindOfRClsMeth:
case KindOfRFunc:
case KindOfRecord:
break;
}
VariableSerializer vs(VariableSerializer::Type::Serialize);
if (opts.keepDVArrays) vs.keepDVArrays();
if (opts.forcePHPArrays) vs.setForcePHPArrays();
if (opts.warnOnHackArrays) vs.setHackWarn();
if (opts.warnOnPHPArrays) vs.setPHPWarn();
if (opts.ignoreLateInit) vs.setIgnoreLateInit();
if (opts.serializeProvenanceAndLegacy) vs.setSerializeProvenanceAndLegacy();
// Keep the count so recursive calls to serialize() embed references properly.
return vs.serialize(value, true, true);
} | CWE-787 | 24 |
static unsigned short get_ushort(const unsigned char *data)
{
unsigned short val = *(const unsigned short *)data;
#ifdef OPJ_BIG_ENDIAN
val = ((val & 0xffU) << 8) | (val >> 8);
#endif
return val;
} | CWE-787 | 24 |
Status OpLevelCostEstimator::PredictMaxPool(const OpContext& op_context,
NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
const auto& op_info = op_context.op_info;
// x: op_info.inputs(0)
ConvolutionDimensions dims = OpDimensionsFromInputs(
op_info.inputs(0).shape(), op_info, &found_unknown_shapes);
// kx * ky - 1 comparisons per output (kx * xy > 1)
// or 1 copy per output (kx * k1 = 1).
int per_output_ops = dims.kx * dims.ky == 1 ? 1 : dims.kx * dims.ky - 1;
int64_t ops = dims.batch * dims.ox * dims.oy * dims.oz * per_output_ops;
node_costs->num_compute_ops = ops;
int64_t input_size = 0;
if (dims.ky >= dims.sy) {
input_size = CalculateTensorSize(op_info.inputs(0), &found_unknown_shapes);
} else { // dims.ky < dims.sy
// Vertical stride is larger than vertical kernel; assuming row-major
// format, skip unnecessary rows (or read every kx rows per sy rows, as the
// others are not used for output).
const auto data_size = DataTypeSize(BaseType(op_info.inputs(0).dtype()));
input_size = data_size * dims.batch * dims.ix * dims.ky * dims.oy * dims.iz;
}
node_costs->num_input_bytes_accessed = {input_size};
const int64_t output_size =
CalculateOutputSize(op_info, &found_unknown_shapes);
node_costs->num_output_bytes_accessed = {output_size};
node_costs->max_memory = output_size;
if (found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return Status::OK();
} | CWE-369 | 60 |
int TLSInStream::readTLS(U8* buf, int len, bool wait)
{
int n;
n = in->check(1, 1, wait);
if (n == 0)
return 0;
n = gnutls_record_recv(session, (void *) buf, len);
if (n == GNUTLS_E_INTERRUPTED || n == GNUTLS_E_AGAIN)
return 0;
if (n < 0) throw TLSException("readTLS", n);
return n;
} | CWE-787 | 24 |
static void jsiDumpInstr(Jsi_Interp *interp, jsi_Pstate *ps, Jsi_Value *_this,
jsi_TryList *trylist, jsi_OpCode *ip, Jsi_OpCodes *opcodes)
{
int i;
char buf[200];
jsi_code_decode(interp, ip, ip - opcodes->codes, buf, sizeof(buf));
Jsi_Printf(interp, jsi_Stderr, "%p: %-30.200s : THIS=%s, STACK=[", ip, buf, jsi_evalprint(_this));
for (i = 0; i < interp->framePtr->Sp; ++i) {
Jsi_Printf(interp, jsi_Stderr, "%s%s", (i>0?", ":""), jsi_evalprint(_jsi_STACKIDX(i)));
}
Jsi_Printf(interp, jsi_Stderr, "]");
if (ip->fname) {
const char *fn = ip->fname, *cp = Jsi_Strrchr(fn, '/');
if (cp) fn = cp+1;
Jsi_Printf(interp, jsi_Stderr, ", %s:%d", fn, ip->Line);
}
Jsi_Printf(interp, jsi_Stderr, "\n");
jsi_TryList *tlt = trylist;
for (i = 0; tlt; tlt = tlt->next) i++;
if (ps->last_exception)
Jsi_Printf(interp, jsi_Stderr, "TL: %d, excpt: %s\n", i, jsi_evalprint(ps->last_exception));
} | CWE-120 | 44 |
void PropertiesWidget::loadTorrentInfos(BitTorrent::TorrentHandle *const torrent)
{
clear();
m_torrent = torrent;
downloaded_pieces->setTorrent(m_torrent);
pieces_availability->setTorrent(m_torrent);
if (!m_torrent) return;
// Save path
updateSavePath(m_torrent);
// Hash
hash_lbl->setText(m_torrent->hash());
PropListModel->model()->clear();
if (m_torrent->hasMetadata()) {
// Creation date
lbl_creationDate->setText(m_torrent->creationDate().toString(Qt::DefaultLocaleShortDate));
label_total_size_val->setText(Utils::Misc::friendlyUnit(m_torrent->totalSize()));
// Comment
comment_text->setText(Utils::Misc::parseHtmlLinks(m_torrent->comment()));
// URL seeds
loadUrlSeeds();
label_created_by_val->setText(m_torrent->creator());
// List files in torrent
PropListModel->model()->setupModelData(m_torrent->info());
filesList->setExpanded(PropListModel->index(0, 0), true);
// Load file priorities
PropListModel->model()->updateFilesPriorities(m_torrent->filePriorities());
}
// Load dynamic data
loadDynamicData();
} | CWE-79 | 1 |
int64_t OpLevelCostEstimator::CalculateTensorSize(
const OpInfo::TensorProperties& tensor, bool* found_unknown_shapes) {
int64_t count = CalculateTensorElementCount(tensor, found_unknown_shapes);
int size = DataTypeSize(BaseType(tensor.dtype()));
VLOG(2) << "Count: " << count << " DataTypeSize: " << size;
return count * size;
} | CWE-190 | 19 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteDivParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
output->type = input2->type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
if (output->type == kTfLiteUInt8) {
TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
context, params->activation, output, &data->output_activation_min,
&data->output_activation_max));
const double real_multiplier =
input1->params.scale / (input2->params.scale * output->params.scale);
QuantizeMultiplier(real_multiplier, &data->output_multiplier,
&data->output_shift);
}
return context->ResizeTensor(context, output, output_size);
} | CWE-125 | 47 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* seq_lengths = GetInput(context, node, kSeqLengthsTensor);
TF_LITE_ENSURE_EQ(context, NumDimensions(seq_lengths), 1);
if (input->type != kTfLiteInt32 && input->type != kTfLiteFloat32 &&
input->type != kTfLiteUInt8 && input->type != kTfLiteInt16 &&
input->type != kTfLiteInt64) {
context->ReportError(context,
"Type '%s' is not supported by reverse_sequence.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
if (seq_lengths->type != kTfLiteInt32 && seq_lengths->type != kTfLiteInt64) {
context->ReportError(
context, "Seq_lengths type '%s' is not supported by reverse_sequence.",
TfLiteTypeGetName(seq_lengths->type));
return kTfLiteError;
}
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type);
return context->ResizeTensor(context, output, output_shape);
} | CWE-125 | 47 |
Variant HHVM_METHOD(XMLReader, expand,
const Variant& basenode /* = null */) {
auto* data = Native::data<XMLReader>(this_);
req::ptr<XMLDocumentData> doc;
xmlDocPtr docp = nullptr;
SYNC_VM_REGS_SCOPED();
if (!basenode.isNull()) {
auto dombasenode = Native::data<DOMNode>(basenode.toObject());
doc = dombasenode->doc();
docp = doc->docp();
if (docp == nullptr) {
raise_warning("Invalid State Error");
return false;
}
}
if (data->m_ptr) {
xmlNodePtr node = xmlTextReaderExpand(data->m_ptr);
if (node == nullptr) {
raise_warning("An Error Occurred while expanding");
return false;
} else {
xmlNodePtr nodec = xmlDocCopyNode(node, docp, 1);
if (nodec == nullptr) {
raise_notice("Cannot expand this node type");
return false;
} else {
return php_dom_create_object(nodec, doc);
}
}
}
raise_warning("Load Data before trying to read");
return false;
} | CWE-787 | 24 |
TypedValue HHVM_FUNCTION(substr_compare,
const String& main_str,
const String& str,
int offset,
int length /* = INT_MAX */,
bool case_insensitivity /* = false */) {
int s1_len = main_str.size();
int s2_len = str.size();
if (length <= 0) {
raise_warning("The length must be greater than zero");
return make_tv<KindOfBoolean>(false);
}
if (offset < 0) {
offset = s1_len + offset;
if (offset < 0) offset = 0;
}
if (offset >= s1_len) {
raise_warning("The start position cannot exceed initial string length");
return make_tv<KindOfBoolean>(false);
}
int cmp_len = s1_len - offset;
if (cmp_len < s2_len) cmp_len = s2_len;
if (cmp_len > length) cmp_len = length;
const char *s1 = main_str.data();
if (case_insensitivity) {
return tvReturn(bstrcasecmp(s1 + offset, cmp_len, str.data(), cmp_len));
}
return tvReturn(string_ncmp(s1 + offset, str.data(), cmp_len));
} | CWE-787 | 24 |
void Logger::addPeer(const QString &ip, bool blocked, const QString &reason)
{
QWriteLocker locker(&lock);
Log::Peer temp = { peerCounter++, QDateTime::currentMSecsSinceEpoch(), ip, blocked, reason };
m_peers.push_back(temp);
if (m_peers.size() >= MAX_LOG_MESSAGES)
m_peers.pop_front();
emit newLogPeer(temp);
} | CWE-79 | 1 |
AP4_HdlrAtom::AP4_HdlrAtom(AP4_UI32 size,
AP4_UI08 version,
AP4_UI32 flags,
AP4_ByteStream& stream) :
AP4_Atom(AP4_ATOM_TYPE_HDLR, size, version, flags)
{
AP4_UI32 predefined;
stream.ReadUI32(predefined);
stream.ReadUI32(m_HandlerType);
stream.ReadUI32(m_Reserved[0]);
stream.ReadUI32(m_Reserved[1]);
stream.ReadUI32(m_Reserved[2]);
// read the name unless it is empty
if (size < AP4_FULL_ATOM_HEADER_SIZE+20) return;
AP4_UI32 name_size = size-(AP4_FULL_ATOM_HEADER_SIZE+20);
char* name = new char[name_size+1];
if (name == NULL) return;
stream.Read(name, name_size);
name[name_size] = '\0'; // force a null termination
// handle a special case: the Quicktime files have a pascal
// string here, but ISO MP4 files have a C string.
// we try to detect a pascal encoding and correct it.
if (name[0] == name_size-1) {
m_HandlerName = name+1;
} else {
m_HandlerName = name;
}
delete[] name;
} | CWE-125 | 47 |
TfLiteStatus LessEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
bool requires_broadcast = !HaveSameShapes(input1, input2);
switch (input1->type) {
case kTfLiteFloat32:
Comparison<float, reference_ops::LessFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteInt32:
Comparison<int32_t, reference_ops::LessFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteInt64:
Comparison<int64_t, reference_ops::LessFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteUInt8:
ComparisonQuantized<uint8_t, reference_ops::LessFn>(
input1, input2, output, requires_broadcast);
break;
case kTfLiteInt8:
ComparisonQuantized<int8_t, reference_ops::LessFn>(input1, input2, output,
requires_broadcast);
break;
default:
context->ReportError(context,
"Does not support type %d, requires float|int|uint8",
input1->type);
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-125 | 47 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
// TODO(ahentz): these two checks would make the new implementation
// incompatible with some existing models, where params is not specified. It
// is OK not to have them because toco would have set input and output types
// to match the parameters.
// auto* params = reinterpret_cast<TfLiteCastParams*>(node->builtin_data);
// TF_LITE_ENSURE_EQ(context, input->type, params->in_data_type);
// TF_LITE_ENSURE_EQ(context, output->type, params->out_data_type);
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
} | CWE-787 | 24 |
FdOutStream::FdOutStream(int fd_, bool blocking_, int timeoutms_, int bufSize_)
: fd(fd_), blocking(blocking_), timeoutms(timeoutms_),
bufSize(bufSize_ ? bufSize_ : DEFAULT_BUF_SIZE), offset(0)
{
ptr = start = sentUpTo = new U8[bufSize];
end = start + bufSize;
gettimeofday(&lastWrite, NULL);
} | CWE-787 | 24 |
setSanMatchers(std::vector<envoy::type::matcher::v3::StringMatcher> san_matchers) {
san_matchers_ = san_matchers;
return *this;
} | CWE-295 | 52 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
switch (input->type) {
case kTfLiteInt64:
reference_ops::Negate(
GetTensorShape(input), GetTensorData<int64_t>(input),
GetTensorShape(output), GetTensorData<int64_t>(output));
break;
case kTfLiteInt32:
reference_ops::Negate(
GetTensorShape(input), GetTensorData<int32_t>(input),
GetTensorShape(output), GetTensorData<int32_t>(output));
break;
case kTfLiteFloat32:
reference_ops::Negate(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output),
GetTensorData<float>(output));
break;
default:
context->ReportError(
context,
"Neg only currently supports int64, int32, and float32, got %d.",
input->type);
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-787 | 24 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_);
const TfLiteTensor* input_resource_id_tensor =
GetInput(context, node, kInputVariableId);
int resource_id = input_resource_id_tensor->data.i32[0];
auto& resources = subgraph->resources();
auto* variable = resource::GetResourceVariable(&resources, resource_id);
TF_LITE_ENSURE(context, variable != nullptr);
TfLiteTensor* variable_tensor = variable->GetTensor();
TfLiteTensor* output = GetOutput(context, node, kOutputValue);
TF_LITE_ENSURE_TYPES_EQ(context, variable_tensor->type, output->type);
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(
context, output, TfLiteIntArrayCopy(variable_tensor->dims)));
memcpy(output->data.raw, variable_tensor->data.raw, output->bytes);
return kTfLiteOk;
} | CWE-125 | 47 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* fft_length = GetInput(context, node, kFftLengthTensor);
const int32_t* fft_length_data = GetTensorData<int32_t>(fft_length);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (output->type != kTfLiteComplex64) {
context->ReportError(context,
"Type '%s' for output is not supported by rfft2d.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
// Resize the output tensor if the fft_length tensor is not constant.
// Otherwise, check if the output shape is correct.
if (!IsConstantTensor(fft_length)) {
TF_LITE_ENSURE_STATUS(ResizeOutputandTemporaryTensors(context, node));
} else {
int num_dims_output = NumDimensions(output);
const RuntimeShape output_shape = GetTensorShape(output);
TF_LITE_ENSURE_EQ(context, num_dims_output, NumDimensions(input));
TF_LITE_ENSURE(context, num_dims_output >= 2);
TF_LITE_ENSURE_EQ(context, output_shape.Dims(num_dims_output - 2),
fft_length_data[0]);
TF_LITE_ENSURE_EQ(context, output_shape.Dims(num_dims_output - 1),
fft_length_data[1] / 2 + 1);
}
return Rfft2dHelper(context, node);
} | CWE-787 | 24 |
TfLiteStatus StoreAllDecodedSequences(
TfLiteContext* context,
const std::vector<std::vector<std::vector<int>>>& sequences,
TfLiteNode* node, int top_paths) {
const int32_t batch_size = sequences.size();
std::vector<int32_t> num_entries(top_paths, 0);
// Calculate num_entries per path
for (const auto& batch_s : sequences) {
TF_LITE_ENSURE_EQ(context, batch_s.size(), top_paths);
for (int p = 0; p < top_paths; ++p) {
num_entries[p] += batch_s[p].size();
}
}
for (int p = 0; p < top_paths; ++p) {
const int32_t p_num = num_entries[p];
// Resize the decoded outputs.
TfLiteTensor* indices = GetOutput(context, node, p);
TF_LITE_ENSURE_OK(context, Resize(context, {p_num, 2}, indices));
TfLiteTensor* values = GetOutput(context, node, p + top_paths);
TF_LITE_ENSURE_OK(context, Resize(context, {p_num}, values));
TfLiteTensor* decoded_shape = GetOutput(context, node, p + 2 * top_paths);
TF_LITE_ENSURE_OK(context, Resize(context, {2}, decoded_shape));
int32_t max_decoded = 0;
int32_t offset = 0;
int32_t* indices_data = GetTensorData<int32_t>(indices);
int32_t* values_data = GetTensorData<int32_t>(values);
int32_t* decoded_shape_data = GetTensorData<int32_t>(decoded_shape);
for (int b = 0; b < batch_size; ++b) {
auto& p_batch = sequences[b][p];
int32_t num_decoded = p_batch.size();
max_decoded = std::max(max_decoded, num_decoded);
std::copy_n(p_batch.begin(), num_decoded, values_data + offset);
for (int32_t t = 0; t < num_decoded; ++t, ++offset) {
indices_data[offset * 2] = b;
indices_data[offset * 2 + 1] = t;
}
}
decoded_shape_data[0] = batch_size;
decoded_shape_data[1] = max_decoded;
}
return kTfLiteOk;
} | CWE-125 | 47 |
QUInt8() {} | CWE-908 | 48 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
auto* params = reinterpret_cast<TfLiteShapeParams*>(node->builtin_data);
switch (params->out_type) {
case kTfLiteInt32:
output->type = kTfLiteInt32;
break;
case kTfLiteInt64:
output->type = kTfLiteInt64;
break;
default:
context->ReportError(context, "Unknown shape output data type: %d",
params->out_type);
return kTfLiteError;
}
// By design, the input shape is always known at the time of Prepare, even
// if the preceding op that generates |input| is dynamic. Thus, we can
// always compute the shape immediately, without waiting for Eval.
SetTensorToPersistentRo(output);
// Shape always produces a 1-dimensional output tensor, where each output
// element is the length of the corresponding input tensor's dimension.
TfLiteIntArray* output_size = TfLiteIntArrayCreate(1);
output_size->data[0] = NumDimensions(input);
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_size));
TFLITE_DCHECK_EQ(NumDimensions(output), 1);
TFLITE_DCHECK_EQ(SizeOfDimension(output, 0), NumDimensions(input));
// Immediately propagate the known shape to the output tensor. This allows
// downstream ops that rely on the value to use it during prepare.
switch (output->type) {
case kTfLiteInt32:
ExtractShape(input, GetTensorData<int32_t>(output));
break;
case kTfLiteInt64:
ExtractShape(input, GetTensorData<int64_t>(output));
break;
default:
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-787 | 24 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
output->type = input->type;
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
} | CWE-787 | 24 |
DeletionConfirmationDlg(QWidget *parent, const int &size, const QString &name, bool defaultDeleteFiles): QDialog(parent) {
setupUi(this);
if (size == 1)
label->setText(tr("Are you sure you want to delete '%1' from the transfer list?", "Are you sure you want to delete 'ubuntu-linux-iso' from the transfer list?").arg(name));
else
label->setText(tr("Are you sure you want to delete these %1 torrents from the transfer list?", "Are you sure you want to delete these 5 torrents from the transfer list?").arg(QString::number(size)));
// Icons
lbl_warn->setPixmap(GuiIconProvider::instance()->getIcon("dialog-warning").pixmap(lbl_warn->height()));
lbl_warn->setFixedWidth(lbl_warn->height());
rememberBtn->setIcon(GuiIconProvider::instance()->getIcon("object-locked"));
move(Utils::Misc::screenCenter(this));
checkPermDelete->setChecked(defaultDeleteFiles || Preferences::instance()->deleteTorrentFilesAsDefault());
connect(checkPermDelete, SIGNAL(clicked()), this, SLOT(updateRememberButtonState()));
buttonBox->button(QDialogButtonBox::Cancel)->setFocus();
} | CWE-79 | 1 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* data =
reinterpret_cast<TfLiteAudioMicrofrontendParams*>(node->user_data);
FrontendReset(data->state);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (data->out_float) {
GenerateFeatures<float>(data, input, output);
} else {
GenerateFeatures<int32>(data, input, output);
}
return kTfLiteOk;
} | CWE-787 | 24 |
CFontFileBase(char *sFile, int nLen, bool bFreeFileData)
{
m_sFileData = m_sFile = (unsigned char *)sFile;
m_nLen = nLen;
m_bFreeFileData = bFreeFileData;
m_nPos = 0;
} | CWE-787 | 24 |
int FileInStream::overrun(int itemSize, int nItems, bool wait)
{
if (itemSize > (int)sizeof(b))
throw Exception("FileInStream overrun: max itemSize exceeded");
if (end - ptr != 0)
memmove(b, ptr, end - ptr);
end -= ptr - b;
ptr = b;
while (end < b + itemSize) {
size_t n = fread((U8 *)end, b + sizeof(b) - end, 1, file);
if (n == 0) {
if (ferror(file))
throw SystemException("fread", errno);
if (feof(file))
throw EndOfStream();
return 0;
}
end += b + sizeof(b) - end;
}
if (itemSize * nItems > end - ptr)
nItems = (end - ptr) / itemSize;
return nItems;
} | CWE-787 | 24 |
req::ptr<XMLDocumentData> doc() const { return m_node->doc(); } | CWE-787 | 24 |
int Read(void* pDestBuffer, int nSize)
{
if ( m_nPos + nSize >= m_nLen )
nSize = m_nLen - m_nPos - 1;
memcpy( pDestBuffer, (m_sFile + m_nPos), nSize );
m_nPos += nSize;
return nSize;
} | CWE-787 | 24 |
static inline char *parse_ip_address_ex(const char *str, size_t str_len, int *portno, int get_err, zend_string **err)
{
char *colon;
char *host = NULL;
#ifdef HAVE_IPV6
char *p;
if (*(str) == '[' && str_len > 1) {
/* IPV6 notation to specify raw address with port (i.e. [fe80::1]:80) */
p = memchr(str + 1, ']', str_len - 2);
if (!p || *(p + 1) != ':') {
if (get_err) {
*err = strpprintf(0, "Failed to parse IPv6 address \"%s\"", str);
}
return NULL;
}
*portno = atoi(p + 2);
return estrndup(str + 1, p - str - 1);
}
#endif
if (str_len) {
colon = memchr(str, ':', str_len - 1);
} else {
colon = NULL;
}
if (colon) {
*portno = atoi(colon + 1);
host = estrndup(str, colon - str);
} else {
if (get_err) {
*err = strpprintf(0, "Failed to parse address \"%s\"", str);
}
return NULL;
}
return host;
} | CWE-918 | 16 |
TestCertificateValidationContextConfig(
envoy::config::core::v3::TypedExtensionConfig config, bool allow_expired_certificate = false,
std::vector<envoy::type::matcher::v3::StringMatcher> san_matchers = {})
: allow_expired_certificate_(allow_expired_certificate), api_(Api::createApiForTest()),
custom_validator_config_(config), san_matchers_(san_matchers){}; | CWE-295 | 52 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
output->type = input->type;
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
} | CWE-125 | 47 |
TfLiteStatus SimpleStatefulOp::Prepare(TfLiteContext* context,
TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
// Make sure that the input is in uint8_t with at least 1 data entry.
const TfLiteTensor* input = tflite::GetInput(context, node, kInputTensor);
if (input->type != kTfLiteUInt8) return kTfLiteError;
if (NumElements(input->dims) == 0) return kTfLiteError;
// Allocate a temporary buffer with the same size of input for sorting.
TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena(
context, sizeof(uint8_t) * NumElements(input->dims),
&data->sorting_buffer));
// We can interleave scratch / persistent buffer allocation.
data->invoke_count = reinterpret_cast<int*>(
context->AllocatePersistentBuffer(context, sizeof(int)));
*data->invoke_count = 0;
return kTfLiteOk;
} | CWE-787 | 24 |
TfLiteStatus NotEqualEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
bool requires_broadcast = !HaveSameShapes(input1, input2);
switch (input1->type) {
case kTfLiteBool:
Comparison<bool, reference_ops::NotEqualFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteFloat32:
Comparison<float, reference_ops::NotEqualFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteInt32:
Comparison<int32_t, reference_ops::NotEqualFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteInt64:
Comparison<int64_t, reference_ops::NotEqualFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteUInt8:
ComparisonQuantized<uint8_t, reference_ops::NotEqualFn>(
input1, input2, output, requires_broadcast);
break;
case kTfLiteInt8:
ComparisonQuantized<int8_t, reference_ops::NotEqualFn>(
input1, input2, output, requires_broadcast);
break;
case kTfLiteString:
ComparisonString(reference_ops::StringRefNotEqualFn, input1, input2,
output, requires_broadcast);
break;
default:
context->ReportError(
context,
"Does not support type %d, requires bool|float|int|uint8|string",
input1->type);
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-125 | 47 |
bool ResourceHandle::ParseFromString(const string& s) {
ResourceHandleProto proto;
const bool status = proto.ParseFromString(s);
if (status) FromProto(proto);
return status;
} | CWE-617 | 51 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
ruy::profiler::ScopeLabel label("SquaredDifference");
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (output->type == kTfLiteFloat32) {
EvalSquaredDifference<float>(context, node, data, input1, input2, output);
} else if (output->type == kTfLiteInt32) {
EvalSquaredDifference<int32_t>(context, node, data, input1, input2, output);
} else {
context->ReportError(
context,
"SquaredDifference only supports FLOAT32 and INT32 now, got %d.",
output->type);
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-125 | 47 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
switch (input1->type) {
case kTfLiteInt32: {
return EvalImpl<int32_t>(context, data->requires_broadcast, input1,
input2, output);
}
case kTfLiteInt64: {
return EvalImpl<int64_t>(context, data->requires_broadcast, input1,
input2, output);
}
case kTfLiteFloat32: {
return EvalImpl<float>(context, data->requires_broadcast, input1, input2,
output);
}
default: {
context->ReportError(context, "Type '%s' is not supported by floor_mod.",
TfLiteTypeGetName(input1->type));
return kTfLiteError;
}
}
} | CWE-125 | 47 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteMulParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) {
EvalMul<kernel_type>(context, node, params, data, input1, input2, output);
} else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 ||
output->type == kTfLiteInt16) {
TF_LITE_ENSURE_OK(
context, EvalQuantized<kernel_type>(context, node, params, data, input1,
input2, output));
} else {
context->ReportError(context,
"Mul only supports FLOAT32, INT32 and quantized UINT8,"
" INT8 and INT16 now, got %d.",
output->type);
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-787 | 24 |
StatusOr<FullTypeDef> SpecializeType(const AttrSlice& attrs,
const OpDef& op_def) {
FullTypeDef ft;
ft.set_type_id(TFT_PRODUCT);
for (int i = 0; i < op_def.output_arg_size(); i++) {
auto* t = ft.add_args();
*t = op_def.output_arg(i).experimental_full_type();
// Resolve dependent types. The convention for op registrations is to use
// attributes as type variables.
// See https://www.tensorflow.org/guide/create_op#type_polymorphism.
// Once the op signature can be defined entirely in FullType, this
// convention can be deprecated.
//
// Note: While this code performs some basic verifications, it generally
// assumes consistent op defs and attributes. If more complete
// verifications are needed, they should be done by separately, and in a
// way that can be reused for type inference.
for (int j = 0; j < t->args_size(); j++) {
auto* arg = t->mutable_args(i);
if (arg->type_id() == TFT_VAR) {
const auto* attr = attrs.Find(arg->s());
if (attr == nullptr) {
return Status(
error::INVALID_ARGUMENT,
absl::StrCat("Could not find an attribute for key ", arg->s()));
}
if (attr->value_case() == AttrValue::kList) {
const auto& attr_list = attr->list();
arg->set_type_id(TFT_PRODUCT);
for (int i = 0; i < attr_list.type_size(); i++) {
map_dtype_to_tensor(attr_list.type(i), arg->add_args());
}
} else if (attr->value_case() == AttrValue::kType) {
map_dtype_to_tensor(attr->type(), arg);
} else {
return Status(error::UNIMPLEMENTED,
absl::StrCat("unknown attribute type",
attrs.DebugString(), " key=", arg->s()));
}
arg->clear_s();
}
}
}
return ft;
} | CWE-787 | 24 |
bool RepeatedAttrDefEqual(
const protobuf::RepeatedPtrField<OpDef::AttrDef>& a1,
const protobuf::RepeatedPtrField<OpDef::AttrDef>& a2) {
std::unordered_map<string, const OpDef::AttrDef*> a1_set;
for (const OpDef::AttrDef& def : a1) {
DCHECK(a1_set.find(def.name()) == a1_set.end())
<< "AttrDef names must be unique, but '" << def.name()
<< "' appears more than once";
a1_set[def.name()] = &def;
}
for (const OpDef::AttrDef& def : a2) {
auto iter = a1_set.find(def.name());
if (iter == a1_set.end()) return false;
if (!AttrDefEqual(*iter->second, def)) return false;
a1_set.erase(iter);
}
if (!a1_set.empty()) return false;
return true;
} | CWE-617 | 51 |
void * alloc_top(size_t size, size_t align)
{loop:
top -= size;
align_top(align);
if (top < bottom) {new_chunk(); goto loop;}
return top;
} | CWE-787 | 24 |
TEST_F(ListenerManagerImplQuicOnlyTest, QuicListenerFactoryWithWrongTransportSocket) {
const std::string yaml = TestEnvironment::substitute(R"EOF(
address:
socket_address:
address: 127.0.0.1
protocol: UDP
port_value: 1234
filter_chains:
- filter_chain_match:
transport_protocol: "quic"
filters: []
transport_socket:
name: envoy.transport_sockets.quic
typed_config:
"@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext
common_tls_context:
tls_certificates:
- certificate_chain:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem"
private_key:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem"
validation_context:
trusted_ca:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem"
match_subject_alt_names:
- exact: localhost
- exact: 127.0.0.1
udp_listener_config:
quic_options: {}
)EOF",
Network::Address::IpVersion::v4);
envoy::config::listener::v3::Listener listener_proto = parseListenerFromV3Yaml(yaml);
#if defined(ENVOY_ENABLE_QUIC)
EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(listener_proto, "", true), EnvoyException,
"wrong transport socket config specified for quic transport socket");
#else
EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(listener_proto, "", true), EnvoyException,
"QUIC is configured but not enabled in the build.");
#endif
} | CWE-295 | 52 |
CString CWebSock::GetSkinPath(const CString& sSkinName) {
CString sRet = CZNC::Get().GetZNCPath() + "/webskins/" + sSkinName;
if (!CFile::IsDir(sRet)) {
sRet = CZNC::Get().GetCurPath() + "/webskins/" + sSkinName;
if (!CFile::IsDir(sRet)) {
sRet = CString(_SKINDIR_) + "/" + sSkinName;
}
}
return sRet + "/";
} | CWE-22 | 2 |
HexOutStream::HexOutStream(OutStream& os, int buflen)
: out_stream(os), offset(0), bufSize(buflen ? buflen : DEFAULT_BUF_LEN)
{
if (bufSize % 2)
bufSize--;
ptr = start = new U8[bufSize];
end = start + bufSize;
} | CWE-787 | 24 |
TfLiteStatus EvalHashtableImport(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input_resource_id_tensor =
GetInput(context, node, kInputResourceIdTensor);
const int resource_id = input_resource_id_tensor->data.i32[0];
const TfLiteTensor* key_tensor = GetInput(context, node, kKeyTensor);
const TfLiteTensor* value_tensor = GetInput(context, node, kValueTensor);
Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_);
auto& resources = subgraph->resources();
auto* lookup = resource::GetHashtableResource(&resources, resource_id);
TF_LITE_ENSURE(context, lookup != nullptr);
TF_LITE_ENSURE_STATUS(
lookup->CheckKeyAndValueTypes(context, key_tensor, value_tensor));
// The hashtable resource will only be initialized once, attempting to
// initialize it multiple times will be a no-op.
auto result = lookup->Import(context, key_tensor, value_tensor);
return result;
} | CWE-125 | 47 |
TfLiteStatus EluEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output = GetOutput(context, node, 0);
switch (input->type) {
case kTfLiteFloat32: {
optimized_ops::Elu(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
return kTfLiteOk;
} break;
case kTfLiteInt8: {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
EvalUsingLookupTable(data, input, output);
return kTfLiteOk;
} break;
default:
TF_LITE_KERNEL_LOG(
context, "Only float32 and int8 is supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
} | CWE-125 | 47 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const int num_elements = NumElements(input);
switch (input->type) {
case kTfLiteInt64:
memset(GetTensorData<int64_t>(output), 0, num_elements * sizeof(int64_t));
break;
case kTfLiteInt32:
memset(GetTensorData<int32_t>(output), 0, num_elements * sizeof(int32_t));
break;
case kTfLiteFloat32:
memset(GetTensorData<float>(output), 0, num_elements * sizeof(float));
break;
default:
context->ReportError(context,
"ZerosLike only currently supports int64, int32, "
"and float32, got %d.",
input->type);
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-125 | 47 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* begin = GetInput(context, node, kBeginTensor);
const TfLiteTensor* size = GetInput(context, node, kSizeTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
// Ensure validity of input tensor and its dimension.
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
TF_LITE_ENSURE(context,
begin->type == kTfLiteInt32 || begin->type == kTfLiteInt64);
TF_LITE_ENSURE(context,
size->type == kTfLiteInt32 || size->type == kTfLiteInt64);
TF_LITE_ENSURE_EQ(context, NumDimensions(begin), 1);
TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1);
TF_LITE_ENSURE_EQ(context, NumElements(begin), NumElements(size));
TF_LITE_ENSURE_MSG(context, NumDimensions(input) <= kMaxDim,
"Slice op only supports 1D-4D input arrays.");
// Postpone allocation of output if any of the indexing tensors is not
// constant
if (!(IsConstantTensor(begin) && IsConstantTensor(size))) {
SetTensorToDynamic(output);
return kTfLiteOk;
}
return ResizeOutputShape(context, input, begin, size, output);
} | CWE-787 | 24 |
inline void pad(int bytes) {
while (bytes-- > 0) writeU8(0);
} | CWE-787 | 24 |
static float *get_window(vorb *f, int len)
{
len <<= 1;
if (len == f->blocksize_0) return f->window[0];
if (len == f->blocksize_1) return f->window[1];
assert(0);
return NULL;
} | CWE-369 | 60 |
bool logToUSDT(const Array& bt) {
std::lock_guard<std::mutex> lock(usdt_mutex);
memset(&bt_slab, 0, sizeof(bt_slab));
int i = 0;
IterateVNoInc(
bt.get(),
[&](TypedValue tv) -> bool {
if (i >= strobelight::kMaxStackframes) {
return true;
}
assertx(isArrayLikeType(type(tv)));
ArrayData* bt_frame = val(tv).parr;
strobelight::backtrace_frame_t* frame = &bt_slab.frames[i];
auto const line = bt_frame->get(s_line.get());
if (line.is_init()) {
assertx(isIntType(type(line)));
frame->line = val(line).num;
}
auto const file_name = bt_frame->get(s_file.get());
if (file_name.is_init()) {
assertx(isStringType(type(file_name)));
strncpy(frame->file_name,
val(file_name).pstr->data(),
std::min(val(file_name).pstr->size(), strobelight::kFileNameMax));
frame->file_name[strobelight::kFileNameMax - 1] = '\0';
}
auto const class_name = bt_frame->get(s_class.get());
if (class_name.is_init()) {
assertx(isStringType(type(class_name)));
strncpy(frame->class_name,
val(class_name).pstr->data(),
std::min(val(class_name).pstr->size(), strobelight::kClassNameMax));
frame->class_name[strobelight::kClassNameMax - 1] = '\0';
}
auto const function_name = bt_frame->get(s_function.get());
if (function_name.is_init()) {
assertx(isStringType(type(function_name)));
strncpy(frame->function,
val(function_name).pstr->data(),
std::min(val(function_name).pstr->size(),
strobelight::kFunctionMax));
frame->function[strobelight::kFunctionMax - 1] = '\0';
}
i++;
return false;
}
);
bt_slab.len = i;
// Allow BPF to read the now-formatted stacktrace
FOLLY_SDT_WITH_SEMAPHORE(hhvm, hhvm_stack, &bt_slab);
return true;
} | CWE-125 | 47 |
void * alloc_bottom(size_t size, size_t align)
{loop:
align_bottom(align);
byte * tmp = bottom;
bottom += size;
if (bottom > top) {new_chunk(); goto loop;}
return tmp;
} | CWE-787 | 24 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteIntArray* input_dims = input->dims;
int input_dims_size = input_dims->size;
TF_LITE_ENSURE(context, input_dims_size >= 1);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
// Resize the output tensor.
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(input_dims_size + 1);
for (int i = 0; i < input_dims_size; i++) {
output_shape->data[i] = input_dims->data[i];
}
// Last dimension in the output is the same as the last dimension in the
// input.
output_shape->data[input_dims_size] = input_dims->data[input_dims_size - 1];
output->type = input->type;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_shape));
return kTfLiteOk;
} | CWE-125 | 47 |
inline bool checkNoWait(int length) { return check(length, 1, false)!=0; } | CWE-787 | 24 |
Jsi_RC jsi_evalcode(jsi_Pstate *ps, Jsi_Func *func, Jsi_OpCodes *opcodes,
jsi_ScopeChain *scope, Jsi_Value *fargs,
Jsi_Value *_this,
Jsi_Value **vret)
{
Jsi_Interp *interp = ps->interp;
if (interp->exited)
return JSI_ERROR;
Jsi_RC rc;
jsi_Frame frame = *interp->framePtr;
frame.parent = interp->framePtr;
interp->framePtr = &frame;
frame.parent->child = interp->framePtr = &frame;
frame.ps = ps;
frame.ingsc = scope;
frame.incsc = fargs;
frame.inthis = _this;
frame.opcodes = opcodes;
frame.fileName = ((func && func->script)?func->script:interp->curFile);
frame.funcName = interp->curFunction;
frame.dirName = interp->curDir;
if (frame.fileName && frame.fileName == frame.parent->fileName)
frame.logflag = frame.parent->logflag;
else
frame.logflag = 0;
frame.level = frame.parent->level+1;
frame.evalFuncPtr = func;
frame.arguments = NULL;
// if (func && func->strict)
// frame.strict = 1;
if (interp->curIp)
frame.parent->line = interp->curIp->Line;
frame.ip = interp->curIp;
interp->refCount++;
interp->level++;
Jsi_IncrRefCount(interp, fargs);
rc = jsi_evalcode_sub(ps, opcodes, scope, fargs, _this, *vret);
Jsi_DecrRefCount(interp, fargs);
if (interp->didReturn == 0 && !interp->exited) {
if ((interp->evalFlags&JSI_EVAL_RETURN)==0)
Jsi_ValueMakeUndef(interp, vret);
/*if (interp->framePtr->Sp != oldSp) //TODO: at some point after memory refs???
Jsi_LogBug("Stack not balance after execute script");*/
}
if (frame.arguments)
Jsi_DecrRefCount(interp, frame.arguments);
interp->didReturn = 0;
interp->refCount--;
interp->level--;
interp->framePtr = frame.parent;
interp->framePtr->child = NULL;
interp->curIp = frame.ip;
if (interp->exited)
rc = JSI_ERROR;
return rc;
} | CWE-120 | 44 |
void BinaryParameter::setParam(const void* v, int len) {
LOCK_CONFIG;
if (immutable) return;
vlog.debug("set %s(Binary)", getName());
delete [] value; value = 0;
if (len) {
value = new char[len];
length = len;
memcpy(value, v, len);
}
} | CWE-787 | 24 |
TfLiteStatus PrepareAny(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
const TfLiteTensor* input = GetInput(context, node, 0);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteBool);
return PrepareSimple(context, node);
} | CWE-787 | 24 |
Status BuildInputArgIndex(const OpDef::ArgDef& arg_def, AttrSlice attr_values,
const FunctionDef::ArgAttrs* arg_attrs,
bool ints_on_device,
int64_t resource_arg_unique_id) {
bool is_type_list;
DataTypeVector dtypes;
TF_RETURN_IF_ERROR(
ArgNumType(attr_values, arg_def, &is_type_list, &dtypes));
CHECK_GE(dtypes.size(), size_t{1});
int arg_index = result_.nodes.size();
TF_RETURN_IF_ERROR(
AddItem(arg_def.name(), {true, arg_index, 0, is_type_list, dtypes}));
// Creates dtypes.size() nodes in the graph.
for (size_t i = 0; i < dtypes.size(); ++i) {
TF_RETURN_IF_ERROR(AddItem(strings::StrCat(arg_def.name(), ":", i),
{true, arg_index, 0, false, {dtypes[i]}}));
DCHECK_EQ(arg_index, result_.nodes.size());
string name = arg_def.name();
if (dtypes.size() > 1) {
strings::StrAppend(&name, "_", i);
}
NodeDef* gnode = AddNode(name);
if (ints_on_device && dtypes[i] == DataType::DT_INT32) {
gnode->set_op(FunctionLibraryDefinition::kDeviceArgOp);
} else {
gnode->set_op(FunctionLibraryDefinition::kArgOp);
}
DataType dtype = arg_def.is_ref() ? MakeRefType(dtypes[i]) : dtypes[i];
AddAttr("T", dtype, gnode);
AddAttr("index", arg_index, gnode);
if (resource_arg_unique_id >= 0) {
AddAttr("_resource_arg_unique_id", resource_arg_unique_id, gnode);
}
if (arg_attrs) {
for (const auto& arg_attr : arg_attrs->attr()) {
AddAttr(arg_attr.first, arg_attr.second, gnode->mutable_attr());
}
}
result_.arg_types.push_back(dtypes[i]);
++arg_index;
}
return Status::OK();
} | CWE-617 | 51 |
UnicodeString DecimalQuantity::toScientificString() const {
U_ASSERT(!isApproximate);
UnicodeString result;
if (isNegative()) {
result.append(u'-');
}
if (precision == 0) {
result.append(u"0E+0", -1);
return result;
}
// NOTE: It is not safe to add to lOptPos (aka maxInt) or subtract from
// rOptPos (aka -maxFrac) due to overflow.
int32_t upperPos = std::min(precision + scale, lOptPos) - scale - 1;
int32_t lowerPos = std::max(scale, rOptPos) - scale;
int32_t p = upperPos;
result.append(u'0' + getDigitPos(p));
if ((--p) >= lowerPos) {
result.append(u'.');
for (; p >= lowerPos; p--) {
result.append(u'0' + getDigitPos(p));
}
}
result.append(u'E');
int32_t _scale = upperPos + scale;
if (_scale < 0) {
_scale *= -1;
result.append(u'-');
} else {
result.append(u'+');
}
if (_scale == 0) {
result.append(u'0');
}
int32_t insertIndex = result.length();
while (_scale > 0) {
std::div_t res = std::div(_scale, 10);
result.insert(insertIndex, u'0' + res.rem);
_scale = res.quot;
}
return result;
} | CWE-190 | 19 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* axis = GetInput(context, node, kAxisTensor);
TF_LITE_ENSURE_EQ(context, NumDimensions(axis), 1);
TF_LITE_ENSURE(context, NumDimensions(input) >= NumElements(axis));
if (input->type != kTfLiteInt32 && input->type != kTfLiteFloat32 &&
input->type != kTfLiteUInt8 && input->type != kTfLiteInt16 &&
input->type != kTfLiteInt64 && input->type != kTfLiteBool) {
context->ReportError(context, "Type '%s' is not supported by reverse.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
if (axis->type != kTfLiteInt32) {
context->ReportError(context, "Axis Type '%s' is not supported by reverse.",
TfLiteTypeGetName(axis->type));
return kTfLiteError;
}
// TODO(renjieliu): support multi-axis case.
if (NumElements(axis) > 1) {
context->ReportError(context, "Current does not support more than 1 axis.");
}
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type);
return context->ResizeTensor(context, output, output_shape);
} | CWE-125 | 47 |
TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node,
int index) {
if (index >= 0 && index < node->outputs->size) {
const int tensor_index = node->outputs->data[index];
if (tensor_index != kTfLiteOptionalTensor) {
if (context->tensors != nullptr) {
return &context->tensors[tensor_index];
} else {
return context->GetTensor(context, tensor_index);
}
}
}
return nullptr;
} | CWE-787 | 24 |
int PackLinuxElf64::canUnpack()
{
if (super::canUnpack()) {
return true;
}
if (Elf64_Ehdr::ET_DYN==get_te16(&ehdri.e_type)) {
PackLinuxElf64help1(fi);
Elf64_Phdr const *phdr = phdri, *last_LOAD = nullptr;
for (unsigned j = 0; j < e_phnum; ++phdr, ++j)
if (Elf64_Phdr::PT_LOAD==get_te32(&phdr->p_type)) {
last_LOAD = phdr;
}
if (!last_LOAD)
return false;
off_t offset = get_te64(&last_LOAD->p_offset);
unsigned filesz = get_te64(&last_LOAD->p_filesz);
fi->seek(filesz+offset, SEEK_SET);
MemBuffer buf(32 + sizeof(overlay_offset));
fi->readx(buf, buf.getSize());
return PackUnix::find_overlay_offset(buf);
}
return false;
} | CWE-476 | 46 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
OpContext op_context(context, node);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), op_context.params->num_splits);
auto input_type = op_context.input->type;
TF_LITE_ENSURE(context,
input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 ||
input_type == kTfLiteInt8 || input_type == kTfLiteInt16 ||
input_type == kTfLiteInt32);
for (int i = 0; i < NumOutputs(node); ++i) {
GetOutput(context, node, i)->type = input_type;
}
// If we know the contents of the 'axis' tensor, resize all outputs.
// Otherwise, wait until Eval().
if (IsConstantTensor(op_context.axis)) {
return ResizeOutputTensors(context, node, op_context.axis, op_context.input,
op_context.params->num_splits);
} else {
return UseDynamicOutputTensors(context, node);
}
} | CWE-125 | 47 |
void AllocateDataSet(cmsIT8* it8)
{
TABLE* t = GetTable(it8);
if (t -> Data) return; // Already allocated
t-> nSamples = atoi(cmsIT8GetProperty(it8, "NUMBER_OF_FIELDS"));
t-> nPatches = atoi(cmsIT8GetProperty(it8, "NUMBER_OF_SETS"));
t-> Data = (char**)AllocChunk (it8, ((cmsUInt32Number) t->nSamples + 1) * ((cmsUInt32Number) t->nPatches + 1) *sizeof (char*));
if (t->Data == NULL) {
SynError(it8, "AllocateDataSet: Unable to allocate data array");
}
} | CWE-190 | 19 |
bool IsConvolutionOpSupported(const TfLiteRegistration* registration,
const TfLiteNode* node, TfLiteContext* context) {
if (node->builtin_data == nullptr) return false;
TfLiteFusedActivation activation;
if (registration->builtin_code == kTfLiteBuiltinConv2d) {
const auto* conv_params =
reinterpret_cast<const TfLiteConvParams*>(node->builtin_data);
activation = conv_params->activation;
} else if (registration->builtin_code == kTfLiteBuiltinDepthwiseConv2d) {
const auto* depthwise_conv_params =
reinterpret_cast<const TfLiteDepthwiseConvParams*>(node->builtin_data);
activation = depthwise_conv_params->activation;
} else if (registration->builtin_code == kTfLiteBuiltinTransposeConv) {
activation = kTfLiteActNone;
} else {
TF_LITE_KERNEL_LOG(
context,
"Invalid op: op must be Conv2D, DepthwiseConv2D or TransposeConv.");
return false;
}
if (activation == kTfLiteActSignBit) {
return false;
}
const int kOutputShapeTensor = 0; // Only used for TransposeConv
const int kWeightTensor = 1;
const int kBiasTensor = 2; // Only used for non-TransposeConv
const TfLiteTensor* weights = GetInput(context, node, kWeightTensor);
const int max_kernel_size = 16384;
if (!IsConstantTensor(weights)) {
return false;
}
if (weights->dims->data[1] > max_kernel_size ||
weights->dims->data[2] > max_kernel_size) {
return false;
}
if (registration->builtin_code == kTfLiteBuiltinTransposeConv) {
if (!IsConstantTensor(GetInput(context, node, kOutputShapeTensor))) {
return false;
}
} else {
if (node->inputs->size >= kBiasTensor &&
!IsConstantTensor(GetInput(context, node, kBiasTensor))) {
return false;
}
}
return true;
} | CWE-787 | 24 |
int64_t TensorByteSize(const TensorProto& t) {
// num_elements returns -1 if shape is not fully defined.
int64_t num_elems = TensorShape(t.tensor_shape()).num_elements();
return num_elems < 0 ? -1 : num_elems * DataTypeSize(t.dtype());
} | CWE-617 | 51 |
LiteralString(std::string &&s, bool ignore_case)
: lit_(s), ignore_case_(ignore_case),
is_word_(false) {} | CWE-476 | 46 |
LiteralString(const std::string &s, bool ignore_case)
: lit_(s), ignore_case_(ignore_case),
is_word_(false) {} | CWE-476 | 46 |
TfLiteStatus InitializeTemporaries(TfLiteContext* context, TfLiteNode* node,
OpContext* op_context) {
// Creates a temp index to iterate through input data.
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(3);
node->temporaries->data[0] = op_data->scratch_tensor_index;
TfLiteTensor* scratch_tensor = GetTemporary(context, node, /*index=*/0);
scratch_tensor->type = kTfLiteInt32;
scratch_tensor->allocation_type = kTfLiteArenaRw;
TfLiteIntArray* index_size = TfLiteIntArrayCreate(1);
index_size->data[0] = NumDimensions(op_context->input);
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, scratch_tensor, index_size));
// Creates a temp tensor to store resolved axis given input data.
node->temporaries->data[1] = op_data->scratch_tensor_index + 1;
TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1);
resolved_axis->type = kTfLiteInt32;
// Creates a temp tensor to store temp sums when calculating mean.
node->temporaries->data[2] = op_data->scratch_tensor_index + 2;
TfLiteTensor* temp_sum = GetTemporary(context, node, /*index=*/2);
switch (op_context->input->type) {
case kTfLiteFloat32:
temp_sum->type = kTfLiteFloat32;
break;
case kTfLiteInt32:
temp_sum->type = kTfLiteInt64;
break;
case kTfLiteInt64:
temp_sum->type = kTfLiteInt64;
break;
case kTfLiteUInt8:
case kTfLiteInt8:
case kTfLiteInt16:
temp_sum->type = kTfLiteInt32;
break;
case kTfLiteBool:
temp_sum->type = kTfLiteBool;
break;
default:
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-787 | 24 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
output->type = input->type;
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
} | CWE-125 | 47 |
MONGO_EXPORT int bson_append_string_n( bson *b, const char *name, const char *value, int len ) {
return bson_append_string_base( b, name, value, len, BSON_STRING );
} | CWE-190 | 19 |
int HexInStream::pos() {
return offset + ptr - start;
} | CWE-787 | 24 |
void RegKey::getBinary(const TCHAR* valname, void** data, int* length, void* def, int deflen) const {
try {
getBinary(valname, data, length);
} catch(rdr::Exception&) {
if (deflen) {
*data = new char[deflen];
memcpy(*data, def, deflen);
} else
*data = 0;
*length = deflen;
}
} | CWE-787 | 24 |
static int lookup1_values(int entries, int dim)
{
int r = (int) floor(exp((float) log((float) entries) / dim));
if ((int) floor(pow((float) r+1, dim)) <= entries) // (int) cast for MinGW warning;
++r; // floor() to avoid _ftol() when non-CRT
assert(pow((float) r+1, dim) > entries);
assert((int) floor(pow((float) r, dim)) <= entries); // (int),floor() as above
return r;
} | CWE-787 | 24 |
bool IsPadOpSupported(const TfLiteRegistration* registration,
const TfLiteNode* node, TfLiteContext* context) {
// padding is d x 2 tensor, where d is the dimension of input.
const TfLiteTensor* padding = GetInput(context, node, 1);
if (!IsConstantTensor(padding)) {
TF_LITE_KERNEL_LOG(context,
"%s: Only constant padding is supported for PAD.",
padding->name);
return false;
}
if (padding->dims->data[0] != 4 || padding->dims->data[1] != 2) {
TF_LITE_KERNEL_LOG(context, "%s: Only 4D inputs are supported for PAD.",
padding->name);
return false;
}
const int32_t* padding_data = GetTensorData<int32_t>(padding);
if (!(padding_data[0] == 0 && padding_data[1] == 0)) {
TF_LITE_KERNEL_LOG(
context, "%s: Padding for batch dimension is not supported in PAD.",
padding->name);
return false;
}
if (!(padding_data[6] == 0 && padding_data[7] == 0)) {
TF_LITE_KERNEL_LOG(
context, "%s: Padding for channel dimension is not supported in PAD.",
padding->name);
return false;
}
return true;
} | CWE-787 | 24 |
R_API RBinJavaAnnotation *r_bin_java_annotation_new(ut8 *buffer, ut64 sz, ut64 buf_offset) {
ut32 i = 0;
RBinJavaAnnotation *annotation = NULL;
RBinJavaElementValuePair *evps = NULL;
ut64 offset = 0;
annotation = R_NEW0 (RBinJavaAnnotation);
if (!annotation) {
return NULL;
}
// (ut16) read and set annotation_value.type_idx;
annotation->type_idx = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
// (ut16) read and set annotation_value.num_element_value_pairs;
annotation->num_element_value_pairs = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
annotation->element_value_pairs = r_list_newf (r_bin_java_element_pair_free);
// read annotation_value.num_element_value_pairs, and append to annotation_value.element_value_pairs
for (i = 0; i < annotation->num_element_value_pairs; i++) {
if (offset > sz) {
break;
}
evps = r_bin_java_element_pair_new (buffer + offset, sz - offset, buf_offset + offset);
if (evps) {
offset += evps->size;
r_list_append (annotation->element_value_pairs, (void *) evps);
}
}
annotation->size = offset;
return annotation;
} | CWE-788 | 87 |
TfLiteStatus ComparisonPrepareCommon(TfLiteContext* context, TfLiteNode* node,
bool is_string_allowed) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
// Don't support string.
if (!is_string_allowed) {
TF_LITE_ENSURE(context, input1->type != kTfLiteString);
}
// Currently only support tensors have the same type.
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
output->type = kTfLiteBool;
bool requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
} | CWE-787 | 24 |
void operator()(OpKernelContext* ctx, const Index num_segments,
const TensorShape& segment_ids_shape,
typename TTypes<Index>::ConstFlat segment_ids,
const Index data_size, const T* data,
typename TTypes<T, 2>::Tensor output) {
output.setConstant(InitialValueF()());
if (data_size == 0) {
return;
}
const int64 N = segment_ids.dimension(0);
ReductionF reduction;
auto data_flat = typename TTypes<T, 2>::ConstTensor(data, N, data_size / N);
for (int64 i = 0; i < N; ++i) {
Index j = internal::SubtleMustCopy(segment_ids(i));
if (j < 0) {
continue;
}
OP_REQUIRES(ctx, FastBoundsCheck(j, num_segments),
errors::InvalidArgument(
"segment_ids", SliceDebugString(segment_ids_shape, i),
" = ", j, " is out of range [0, ", num_segments, ")"));
reduction(data_flat.template chip<0>(i), output.template chip<0>(j));
}
} | CWE-681 | 59 |
char* HexOutStream::binToHexStr(const char* data, int length) {
char* buffer = new char[length*2+1];
for (int i=0; i<length; i++) {
buffer[i*2] = intToHex((data[i] >> 4) & 15);
buffer[i*2+1] = intToHex((data[i] & 15));
if (!buffer[i*2] || !buffer[i*2+1]) {
delete [] buffer;
return 0;
}
}
buffer[length*2] = 0;
return buffer;
} | CWE-787 | 24 |
void nodeRename(Proxy &node, const RegexMatchConfigs &rename_array, extra_settings &ext)
{
std::string &remark = node.Remark, original_remark = node.Remark, returned_remark, real_rule;
for(const RegexMatchConfig &x : rename_array)
{
if(!x.Script.empty())
{
script_safe_runner(ext.js_runtime, ext.js_context, [&](qjs::Context &ctx)
{
std::string script = x.Script;
if(startsWith(script, "path:"))
script = fileGet(script.substr(5), true);
try
{
ctx.eval(script);
auto rename = (std::function<std::string(const Proxy&)>) ctx.eval("rename");
returned_remark = rename(node);
if(!returned_remark.empty())
remark = returned_remark;
}
catch (qjs::exception)
{
script_print_stack(ctx);
}
}, global.scriptCleanContext);
continue;
}
if(applyMatcher(x.Match, real_rule, node) && real_rule.size())
remark = regReplace(remark, real_rule, x.Replace);
}
if(remark.empty())
remark = original_remark;
return;
} | CWE-434 | 5 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteSpaceToDepthParams*>(node->builtin_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
auto data_type = output->type;
TF_LITE_ENSURE(context,
data_type == kTfLiteFloat32 || data_type == kTfLiteUInt8 ||
data_type == kTfLiteInt8 || data_type == kTfLiteInt32 ||
data_type == kTfLiteInt64);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
const int block_size = params->block_size;
const int input_height = input->dims->data[1];
const int input_width = input->dims->data[2];
int output_height = input_height / block_size;
int output_width = input_width / block_size;
TF_LITE_ENSURE_EQ(context, input_height, output_height * block_size);
TF_LITE_ENSURE_EQ(context, input_width, output_width * block_size);
TfLiteIntArray* output_size = TfLiteIntArrayCreate(4);
output_size->data[0] = input->dims->data[0];
output_size->data[1] = output_height;
output_size->data[2] = output_width;
output_size->data[3] = input->dims->data[3] * block_size * block_size;
return context->ResizeTensor(context, output, output_size);
} | CWE-787 | 24 |
int64_t MemFile::readImpl(char *buffer, int64_t length) {
assertx(m_len != -1);
assertx(length > 0);
int64_t remaining = m_len - m_cursor;
if (remaining < length) length = remaining;
if (length > 0) {
memcpy(buffer, (const void *)(m_data + m_cursor), length);
}
m_cursor += length;
return length;
} | CWE-125 | 47 |
void AverageEvalFloat(TfLiteContext* context, TfLiteNode* node,
TfLitePoolParams* params, OpData* data,
const TfLiteTensor* input, TfLiteTensor* output) {
float activation_min, activation_max;
CalculateActivationRange(params->activation, &activation_min,
&activation_max);
#define TF_LITE_AVERAGE_POOL(type) \
tflite::PoolParams op_params; \
op_params.stride_height = params->stride_height; \
op_params.stride_width = params->stride_width; \
op_params.filter_height = params->filter_height; \
op_params.filter_width = params->filter_width; \
op_params.padding_values.height = data->padding.height; \
op_params.padding_values.width = data->padding.width; \
op_params.float_activation_min = activation_min; \
op_params.float_activation_max = activation_max; \
type::AveragePool(op_params, GetTensorShape(input), \
GetTensorData<float>(input), GetTensorShape(output), \
GetTensorData<float>(output))
if (kernel_type == kReference) {
TF_LITE_AVERAGE_POOL(reference_ops);
} else {
TF_LITE_AVERAGE_POOL(optimized_ops);
}
#undef TF_LITE_AVERAGE_POOL
} | CWE-835 | 42 |
bool handleBackslash(signed char& out) {
char ch = *p++;
switch (ch) {
case 0: return false;
case '"': out = ch; return true;
case '\\': out = ch; return true;
case '/': out = ch; return true;
case 'b': out = '\b'; return true;
case 'f': out = '\f'; return true;
case 'n': out = '\n'; return true;
case 'r': out = '\r'; return true;
case 't': out = '\t'; return true;
case 'u': {
if (UNLIKELY(is_tsimplejson)) {
auto const ch1 = *p++;
auto const ch2 = *p++;
auto const dch3 = dehexchar(*p++);
auto const dch4 = dehexchar(*p++);
if (UNLIKELY(ch1 != '0' || ch2 != '0' || dch3 < 0 || dch4 < 0)) {
return false;
}
out = (dch3 << 4) | dch4;
return true;
} else {
uint16_t u16cp = 0;
for (int i = 0; i < 4; i++) {
auto const hexv = dehexchar(*p++);
if (hexv < 0) return false; // includes check for end of string
u16cp <<= 4;
u16cp |= hexv;
}
if (u16cp > 0x7f) {
return false;
} else {
out = u16cp;
return true;
}
}
}
default: return false;
}
} | CWE-125 | 47 |
Status BuildInputArgIndex(const OpDef::ArgDef& arg_def, AttrSlice attr_values,
const FunctionDef::ArgAttrs* arg_attrs,
bool ints_on_device,
int64_t resource_arg_unique_id) {
bool is_type_list;
DataTypeVector dtypes;
TF_RETURN_IF_ERROR(
ArgNumType(attr_values, arg_def, &is_type_list, &dtypes));
if (dtypes.size() < size_t{1}) {
return errors::Internal("Expected a list of at least one dtype");
}
int arg_index = result_.nodes.size();
TF_RETURN_IF_ERROR(
AddItem(arg_def.name(), {true, arg_index, 0, is_type_list, dtypes}));
// Creates dtypes.size() nodes in the graph.
for (size_t i = 0; i < dtypes.size(); ++i) {
TF_RETURN_IF_ERROR(AddItem(strings::StrCat(arg_def.name(), ":", i),
{true, arg_index, 0, false, {dtypes[i]}}));
DCHECK_EQ(arg_index, result_.nodes.size());
string name = arg_def.name();
if (dtypes.size() > 1) {
strings::StrAppend(&name, "_", i);
}
NodeDef* gnode = AddNode(name);
if (ints_on_device && dtypes[i] == DataType::DT_INT32) {
gnode->set_op(FunctionLibraryDefinition::kDeviceArgOp);
} else {
gnode->set_op(FunctionLibraryDefinition::kArgOp);
}
DataType dtype = arg_def.is_ref() ? MakeRefType(dtypes[i]) : dtypes[i];
AddAttr("T", dtype, gnode);
AddAttr("index", arg_index, gnode);
if (resource_arg_unique_id >= 0) {
AddAttr("_resource_arg_unique_id", resource_arg_unique_id, gnode);
}
if (arg_attrs) {
for (const auto& arg_attr : arg_attrs->attr()) {
AddAttr(arg_attr.first, arg_attr.second, gnode->mutable_attr());
}
}
result_.arg_types.push_back(dtypes[i]);
++arg_index;
}
return Status::OK();
} | CWE-617 | 51 |
std::wstring CreateUniqueTempDirectory()
{
// We need to put downloaded updates into a directory of their own, because
// if we put it in $TMP, some DLLs could be there and interfere with the
// installer.
//
// This code creates a new randomized directory name and tries to create it;
// this process is repeated if the directory already exists.
wchar_t tmpdir[MAX_PATH+1];
if ( GetTempPath(MAX_PATH+1, tmpdir) == 0 )
throw Win32Exception("Cannot create temporary directory");
for ( ;; )
{
std::wstring dir(tmpdir);
dir += L"Update-";
UUID uuid;
UuidCreate(&uuid);
RPC_WSTR uuidStr;
RPC_STATUS status = UuidToString(&uuid, &uuidStr);
dir += reinterpret_cast<wchar_t*>(uuidStr);
RpcStringFree(&uuidStr);
if ( CreateDirectory(dir.c_str(), NULL) )
return dir;
else if ( GetLastError() != ERROR_ALREADY_EXISTS )
throw Win32Exception("Cannot create temporary directory");
}
} | CWE-426 | 70 |
HexOutStream::writeBuffer() {
U8* pos = start;
while (pos != ptr) {
out_stream.check(2);
U8* optr = out_stream.getptr();
U8* oend = out_stream.getend();
int length = min(ptr-pos, (oend-optr)/2);
for (int i=0; i<length; i++) {
optr[i*2] = intToHex((pos[i] >> 4) & 0xf);
optr[i*2+1] = intToHex(pos[i] & 0xf);
}
out_stream.setptr(optr + length*2);
pos += length;
}
offset += ptr - start;
ptr = start;
} | CWE-787 | 24 |
MpdCantataMounterInterface * RemoteFsDevice::mounter()
{
if (!mounterIface) {
if (!QDBusConnection::systemBus().interface()->isServiceRegistered(MpdCantataMounterInterface::staticInterfaceName())) {
QDBusConnection::systemBus().interface()->startService(MpdCantataMounterInterface::staticInterfaceName());
}
mounterIface=new MpdCantataMounterInterface(MpdCantataMounterInterface::staticInterfaceName(),
"/Mounter", QDBusConnection::systemBus(), this);
connect(mounterIface, SIGNAL(mountStatus(const QString &, int, int)), SLOT(mountStatus(const QString &, int, int)));
connect(mounterIface, SIGNAL(umountStatus(const QString &, int, int)), SLOT(umountStatus(const QString &, int, int)));
}
return mounterIface;
} | CWE-22 | 2 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteSubParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32 ||
output->type == kTfLiteInt64) {
EvalSub<kernel_type>(context, node, params, data, input1, input2, output);
} else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 ||
output->type == kTfLiteInt16) {
EvalQuantized<kernel_type>(context, node, params, data, input1, input2,
output);
} else {
context->ReportError(
context,
"output type %d is not supported, requires float|uint8|int32 types.",
output->type);
return kTfLiteError;
}
return kTfLiteOk;
} | CWE-787 | 24 |
void RequestContext::StartBackendSpanAndSetTraceContext() {
backend_span_.reset(CreateSpan(cloud_trace_.get(), "Backend"));
// TODO: A better logic would be to send for GRPC backends the grpc-trace-bin
// header, and for http/https backends the X-Cloud-Trace-Context header.
std::string trace_context_header = cloud_trace()->ToTraceContextHeader(
backend_span_->trace_span()->span_id());
// Set trace context header to backend.
Status status = request()->AddHeaderToBackend(
cloud_trace()->header_type() == HeaderType::CLOUD_TRACE_CONTEXT
? kCloudTraceContextHeader
: kGRpcTraceContextHeader,
trace_context_header);
if (!status.ok()) {
service_context()->env()->LogError(
"Failed to set trace context header to backend.");
}
} | CWE-290 | 85 |
ssize_t enc_untrusted_recvfrom(int sockfd, void *buf, size_t len, int flags,
struct sockaddr *src_addr, socklen_t *addrlen) {
int klinux_flags = TokLinuxRecvSendFlag(flags);
if (klinux_flags == 0 && flags != 0) {
errno = EINVAL;
return -1;
}
MessageWriter input;
input.Push<int>(sockfd);
input.Push<uint64_t>(len);
input.Push<int>(klinux_flags);
MessageReader output;
const auto status = NonSystemCallDispatcher(
::asylo::host_call::kRecvFromHandler, &input, &output);
CheckStatusAndParamCount(status, output, "enc_untrusted_recvfrom", 4);
int result = output.next<int>();
int klinux_errno = output.next<int>();
// recvfrom() returns -1 on failure, with errno set to indicate the cause
// of the error.
if (result == -1) {
errno = FromkLinuxErrorNumber(klinux_errno);
return result;
}
auto buffer_received = output.next();
memcpy(buf, buffer_received.data(), len);
// If |src_addr| is not NULL, and the underlying protocol provides the source
// address, this source address is filled in. When |src_addr| is NULL, nothing
// is filled in; in this case, |addrlen| is not used, and should also be NULL.
if (src_addr != nullptr && addrlen != nullptr) {
auto klinux_sockaddr_buf = output.next();
const struct klinux_sockaddr *klinux_addr =
klinux_sockaddr_buf.As<struct klinux_sockaddr>();
FromkLinuxSockAddr(klinux_addr, klinux_sockaddr_buf.size(), src_addr,
addrlen, TrustedPrimitives::BestEffortAbort);
}
return result;
} | CWE-120 | 44 |
inline bool ShapeIsVector(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* shape = GetInput(context, node, kShapeTensor);
return (shape->dims->size == 1 && shape->type == kTfLiteInt32);
} | CWE-787 | 24 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.