code
stringlengths 12
2.05k
| label_name
stringclasses 5
values | label
int64 0
4
|
---|---|---|
TEST(DefaultCertValidatorTest, TestMatchSubjectAltNameNotMatched) {
bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(
"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem"));
envoy::type::matcher::v3::StringMatcher matcher;
matcher.MergeFrom(TestUtility::createRegexMatcher(".*.foo.com"));
std::vector<Matchers::StringMatcherImpl<envoy::type::matcher::v3::StringMatcher>>
subject_alt_name_matchers;
subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher));
EXPECT_FALSE(DefaultCertValidator::matchSubjectAltName(cert.get(), subject_alt_name_matchers));
} | Base | 1 |
void RemoteDevicePropertiesWidget::checkSaveable()
{
RemoteFsDevice::Details det=details();
modified=det!=orig;
saveable=!det.isEmpty();
if (saveable && Type_SambaAvahi==type->itemData(type->currentIndex()).toInt()) {
saveable=!smbAvahiName->text().trimmed().isEmpty();
}
emit updated();
} | Class | 2 |
void Lua::setParamsTable(lua_State* vm, const char* table_name,
const char* query) const {
char outbuf[FILENAME_MAX];
char *where;
char *tok;
char *query_string = query ? strdup(query) : NULL;
lua_newtable(L);
if (query_string) {
// ntop->getTrace()->traceEvent(TRACE_WARNING, "[HTTP] %s", query_string);
tok = strtok_r(query_string, "&", &where);
while(tok != NULL) {
char *_equal;
if(strncmp(tok, "csrf", strlen("csrf")) /* Do not put csrf into the params table */
&& (_equal = strchr(tok, '='))) {
char *decoded_buf;
int len;
_equal[0] = '\0';
_equal = &_equal[1];
len = strlen(_equal);
purifyHTTPParameter(tok), purifyHTTPParameter(_equal);
// ntop->getTrace()->traceEvent(TRACE_WARNING, "%s = %s", tok, _equal);
if((decoded_buf = (char*)malloc(len+1)) != NULL) {
Utils::urlDecode(_equal, decoded_buf, len+1);
Utils::purifyHTTPparam(tok, true, false);
Utils::purifyHTTPparam(decoded_buf, false, false);
/* Now make sure that decoded_buf is not a file path */
FILE *fd;
if((decoded_buf[0] == '.')
&& ((fd = fopen(decoded_buf, "r"))
|| (fd = fopen(realpath(decoded_buf, outbuf), "r")))) {
ntop->getTrace()->traceEvent(TRACE_WARNING, "Discarded '%s'='%s' as argument is a valid file path",
tok, decoded_buf);
decoded_buf[0] = '\0';
fclose(fd);
}
/* ntop->getTrace()->traceEvent(TRACE_WARNING, "'%s'='%s'", tok, decoded_buf); */
/* put tok and the decoded buffer in to the table */
lua_push_str_table_entry(vm, tok, decoded_buf);
free(decoded_buf);
} else
ntop->getTrace()->traceEvent(TRACE_WARNING, "Not enough memory");
}
tok = strtok_r(NULL, "&", &where);
} /* while */
}
if(query_string) free(query_string);
if(table_name)
lua_setglobal(L, table_name);
else
lua_setglobal(L, (char*)"_GET"); /* Default */
} | Base | 1 |
void * alloc_bottom(size_t size) {
byte * tmp = bottom;
bottom += size;
if (bottom > top) {new_chunk(); tmp = bottom; bottom += size;}
return tmp;
} | Base | 1 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
OpContext op_context(context, node);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), op_context.params->num_splits);
auto input_type = op_context.input->type;
TF_LITE_ENSURE(context,
input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 ||
input_type == kTfLiteInt16 || input_type == kTfLiteInt32 ||
input_type == kTfLiteInt64 || input_type == kTfLiteInt8);
for (int i = 0; i < NumOutputs(node); ++i) {
GetOutput(context, node, i)->type = input_type;
}
auto size_splits = op_context.size_splits;
TF_LITE_ENSURE_EQ(context, NumDimensions(size_splits), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), NumElements(size_splits));
// If we know the contents of the 'size_splits' tensor and the 'axis' tensor,
// resize all outputs. Otherwise, wait until Eval().
if (IsConstantTensor(op_context.size_splits) &&
IsConstantTensor(op_context.axis)) {
return ResizeOutputTensors(context, node, op_context.input,
op_context.size_splits, op_context.axis);
} else {
return UseDynamicOutputTensors(context, node);
}
} | Base | 1 |
bool IsFullyConnectedOpSupported(const TfLiteRegistration* registration,
const TfLiteNode* node,
TfLiteContext* context) {
if (node->builtin_data == nullptr) return false;
const auto* fc_params =
reinterpret_cast<const TfLiteFullyConnectedParams*>(node->builtin_data);
const int kInput = 0;
const int kWeights = 1;
const int kBias = 2;
if (fc_params->weights_format != kTfLiteFullyConnectedWeightsFormatDefault) {
return false;
}
const TfLiteTensor* input = GetInput(context, node, kInput);
const TfLiteTensor* weights = GetInput(context, node, kWeights);
if (!IsFloatType(input->type)) {
return false;
}
if (!IsFloatType(weights->type) || !IsConstantTensor(weights)) {
return false;
}
// Core ML 2 only supports single-batch fully connected layer, thus dimensions
// except the last one should be 1.
if (input->dims->data[input->dims->size - 1] != NumElements(input)) {
return false;
}
if (node->inputs->size > 2) {
const TfLiteTensor* bias = GetInput(context, node, kBias);
if (!IsFloatType(bias->type) || !IsConstantTensor(bias)) {
return false;
}
}
TfLiteFusedActivation activation = fc_params->activation;
if (activation == kTfLiteActSignBit) {
return false;
}
return true;
} | Base | 1 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* seq_lengths = GetInput(context, node, kSeqLengthsTensor);
TF_LITE_ENSURE_EQ(context, NumDimensions(seq_lengths), 1);
if (input->type != kTfLiteInt32 && input->type != kTfLiteFloat32 &&
input->type != kTfLiteUInt8 && input->type != kTfLiteInt16 &&
input->type != kTfLiteInt64) {
context->ReportError(context,
"Type '%s' is not supported by reverse_sequence.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
if (seq_lengths->type != kTfLiteInt32 && seq_lengths->type != kTfLiteInt64) {
context->ReportError(
context, "Seq_lengths type '%s' is not supported by reverse_sequence.",
TfLiteTypeGetName(seq_lengths->type));
return kTfLiteError;
}
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type);
return context->ResizeTensor(context, output, output_shape);
} | Base | 1 |
bool Unpack::ProcessDecoded(UnpackThreadData &D)
{
UnpackDecodedItem *Item=D.Decoded,*Border=D.Decoded+D.DecodedSize;
while (Item<Border)
{
UnpPtr&=MaxWinMask;
if (((WriteBorder-UnpPtr) & MaxWinMask)<MAX_LZ_MATCH+3 && WriteBorder!=UnpPtr)
{
UnpWriteBuf();
if (WrittenFileSize>DestUnpSize)
return false;
}
if (Item->Type==UNPDT_LITERAL)
{
#if defined(LITTLE_ENDIAN) && defined(ALLOW_MISALIGNED)
if (Item->Length==3 && UnpPtr<MaxWinSize-4)
{
*(uint32 *)(Window+UnpPtr)=*(uint32 *)Item->Literal;
UnpPtr+=4;
}
else
#endif
for (uint I=0;I<=Item->Length;I++)
Window[UnpPtr++ & MaxWinMask]=Item->Literal[I];
}
else
if (Item->Type==UNPDT_MATCH)
{
InsertOldDist(Item->Distance);
LastLength=Item->Length;
CopyString(Item->Length,Item->Distance);
}
else
if (Item->Type==UNPDT_REP)
{
uint Distance=OldDist[Item->Distance];
for (uint I=Item->Distance;I>0;I--)
OldDist[I]=OldDist[I-1];
OldDist[0]=Distance;
LastLength=Item->Length;
CopyString(Item->Length,Distance);
}
else
if (Item->Type==UNPDT_FULLREP)
{
if (LastLength!=0)
CopyString(LastLength,OldDist[0]);
}
else
if (Item->Type==UNPDT_FILTER)
{
UnpackFilter Filter;
Filter.Type=(byte)Item->Length;
Filter.BlockStart=Item->Distance;
Item++;
Filter.Channels=(byte)Item->Length;
Filter.BlockLength=Item->Distance;
AddFilter(Filter);
}
Item++;
}
return true;
} | Base | 1 |
ErrorCode HTTP2Codec::checkNewStream(uint32_t streamId, bool trailersAllowed) {
if (streamId == 0) {
goawayErrorMessage_ = folly::to<string>(
"GOAWAY error: received streamID=", streamId,
" as invalid new stream for lastStreamID_=", lastStreamID_);
VLOG(4) << goawayErrorMessage_;
return ErrorCode::PROTOCOL_ERROR;
}
parsingDownstreamTrailers_ = trailersAllowed && (streamId <= lastStreamID_);
if (parsingDownstreamTrailers_) {
VLOG(4) << "Parsing downstream trailers streamId=" << streamId;
}
if (sessionClosing_ != ClosingState::CLOSED) {
lastStreamID_ = streamId;
}
if (isInitiatedStream(streamId)) {
// this stream should be initiated by us, not by peer
goawayErrorMessage_ = folly::to<string>(
"GOAWAY error: invalid new stream received with streamID=", streamId);
VLOG(4) << goawayErrorMessage_;
return ErrorCode::PROTOCOL_ERROR;
} else {
return ErrorCode::NO_ERROR;
}
} | Class | 2 |
void writeStats(Array& /*ret*/) override {
fprintf(stderr, "writeStats start\n");
// RetSame: the return value is the same instance every time
// HasThis: call has a this argument
// AllSame: all returns were the same data even though args are different
// MemberCount: number of different arg sets (including this)
fprintf(stderr, "Count Function MinSerLen MaxSerLen RetSame HasThis "
"AllSame MemberCount\n");
for (auto& me : m_memos) {
if (me.second.m_ignore) continue;
if (me.second.m_count == 1) continue;
int min_ser_len = 999999999;
int max_ser_len = 0;
int count = 0;
int member_count = 0;
bool all_same = true;
if (me.second.m_has_this) {
bool any_multiple = false;
auto& fr = me.second.m_member_memos.begin()->second.m_return_value;
member_count = me.second.m_member_memos.size();
for (auto& mme : me.second.m_member_memos) {
if (mme.second.m_return_value != fr) all_same = false;
count += mme.second.m_count;
auto ser_len = mme.second.m_return_value.length();
min_ser_len = std::min(min_ser_len, ser_len);
max_ser_len = std::max(max_ser_len, ser_len);
if (mme.second.m_count > 1) any_multiple = true;
}
if (!any_multiple && !all_same) continue;
} else {
min_ser_len = max_ser_len = me.second.m_return_value.length();
count = me.second.m_count;
all_same = me.second.m_ret_tv_same;
}
fprintf(stderr, "%d %s %d %d %s %s %s %d\n",
count, me.first.data(),
min_ser_len, max_ser_len,
me.second.m_ret_tv_same ? " true" : "false",
me.second.m_has_this ? " true" : "false",
all_same ? " true" : "false",
member_count
);
}
fprintf(stderr, "writeStats end\n");
} | Base | 1 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteDivParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
output->type = input2->type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
if (output->type == kTfLiteUInt8) {
TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
context, params->activation, output, &data->output_activation_min,
&data->output_activation_max));
const double real_multiplier =
input1->params.scale / (input2->params.scale * output->params.scale);
QuantizeMultiplier(real_multiplier, &data->output_multiplier,
&data->output_shift);
}
return context->ResizeTensor(context, output, output_size);
} | Base | 1 |
int size() const {
return m_str ? m_str->size() : 0;
} | Base | 1 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
// Just copy input to output.
const TfLiteTensor* input = GetInput(context, node, kInput);
TfLiteTensor* output = GetOutput(context, node, 0);
const TfLiteTensor* axis = GetInput(context, node, kAxis);
if (IsDynamicTensor(output)) {
int axis_value;
TF_LITE_ENSURE_OK(context,
GetAxisValueFromTensor(context, *axis, &axis_value));
TF_LITE_ENSURE_OK(context,
ExpandTensorDim(context, *input, axis_value, output));
}
if (output->type == kTfLiteString) {
TfLiteTensorRealloc(input->bytes, output);
}
memcpy(output->data.raw, input->data.raw, input->bytes);
return kTfLiteOk;
} | Base | 1 |
bool IsConvolutionOpSupported(const TfLiteRegistration* registration,
const TfLiteNode* node, TfLiteContext* context) {
if (node->builtin_data == nullptr) return false;
TfLiteFusedActivation activation;
if (registration->builtin_code == kTfLiteBuiltinConv2d) {
const auto* conv_params =
reinterpret_cast<const TfLiteConvParams*>(node->builtin_data);
activation = conv_params->activation;
} else if (registration->builtin_code == kTfLiteBuiltinDepthwiseConv2d) {
const auto* depthwise_conv_params =
reinterpret_cast<const TfLiteDepthwiseConvParams*>(node->builtin_data);
activation = depthwise_conv_params->activation;
} else if (registration->builtin_code == kTfLiteBuiltinTransposeConv) {
activation = kTfLiteActNone;
} else {
TF_LITE_KERNEL_LOG(
context,
"Invalid op: op must be Conv2D, DepthwiseConv2D or TransposeConv.");
return false;
}
if (activation == kTfLiteActSignBit) {
return false;
}
const int kOutputShapeTensor = 0; // Only used for TransposeConv
const int kWeightTensor = 1;
const int kBiasTensor = 2; // Only used for non-TransposeConv
const TfLiteTensor* weights = GetInput(context, node, kWeightTensor);
const int max_kernel_size = 16384;
if (!IsConstantTensor(weights)) {
return false;
}
if (weights->dims->data[1] > max_kernel_size ||
weights->dims->data[2] > max_kernel_size) {
return false;
}
if (registration->builtin_code == kTfLiteBuiltinTransposeConv) {
if (!IsConstantTensor(GetInput(context, node, kOutputShapeTensor))) {
return false;
}
} else {
if (node->inputs->size >= kBiasTensor &&
!IsConstantTensor(GetInput(context, node, kBiasTensor))) {
return false;
}
}
return true;
} | Base | 1 |
Http::FilterTrailersStatus Context::onResponseTrailers() {
if (!wasm_->onResponseTrailers_) {
return Http::FilterTrailersStatus::Continue;
}
if (wasm_->onResponseTrailers_(this, id_).u64_ == 0) {
return Http::FilterTrailersStatus::Continue;
}
return Http::FilterTrailersStatus::StopIteration;
} | Base | 1 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
int num_inputs = NumInputs(node);
TF_LITE_ENSURE(context, num_inputs >= 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
output->type = input1->type;
// Check that all input tensors have the same shape and type.
for (int i = kInputTensor1 + 1; i < num_inputs; ++i) {
const TfLiteTensor* input = GetInput(context, node, i);
TF_LITE_ENSURE(context, HaveSameShapes(input1, input));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input->type);
}
// Use the first input node's dimension to be the dimension of the output
// node.
TfLiteIntArray* input1_dims = input1->dims;
TfLiteIntArray* output_dims = TfLiteIntArrayCopy(input1_dims);
return context->ResizeTensor(context, output, output_dims);
} | Base | 1 |
static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
void (*get)(struct x86_emulate_ctxt *ctxt,
struct desc_ptr *ptr))
{
struct desc_ptr desc_ptr;
if (ctxt->mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
get(ctxt, &desc_ptr);
if (ctxt->op_bytes == 2) {
ctxt->op_bytes = 4;
desc_ptr.address &= 0x00ffffff;
}
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return segmented_write(ctxt, ctxt->dst.addr.mem,
&desc_ptr, 2 + ctxt->op_bytes);
} | Class | 2 |
R_API RBinJavaAttrInfo *r_bin_java_source_code_file_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) {
if (!sz) {
return NULL;
}
ut64 offset = 0;
RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset);
offset += 6;
if (!attr) {
return NULL;
}
attr->type = R_BIN_JAVA_ATTR_TYPE_SOURCE_FILE_ATTR;
// if (buffer + offset > buffer + sz) return NULL;
attr->info.source_file_attr.sourcefile_idx = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
attr->size = offset;
// IFDBG r_bin_java_print_source_code_file_attr_summary(attr);
return attr;
} | Class | 2 |
int size() const {
return m_str ? m_str->size() : 0;
} | Base | 1 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
const TfLiteTensor* multipliers = GetInput(context, node, kInputMultipliers);
// Only int32 and int64 multipliers type is supported.
if (multipliers->type != kTfLiteInt32 && multipliers->type != kTfLiteInt64) {
context->ReportError(context,
"Multipliers of type '%s' are not supported by tile.",
TfLiteTypeGetName(multipliers->type));
return kTfLiteError;
}
if (IsConstantTensor(multipliers)) {
TF_LITE_ENSURE_OK(context, ResizeOutput(context, node));
} else {
SetTensorToDynamic(output);
}
return kTfLiteOk;
} | Base | 1 |
static inline char *parse_ip_address_ex(const char *str, size_t str_len, int *portno, int get_err, zend_string **err)
{
char *colon;
char *host = NULL;
#ifdef HAVE_IPV6
char *p;
if (*(str) == '[' && str_len > 1) {
/* IPV6 notation to specify raw address with port (i.e. [fe80::1]:80) */
p = memchr(str + 1, ']', str_len - 2);
if (!p || *(p + 1) != ':') {
if (get_err) {
*err = strpprintf(0, "Failed to parse IPv6 address \"%s\"", str);
}
return NULL;
}
*portno = atoi(p + 2);
return estrndup(str + 1, p - str - 1);
}
#endif
if (str_len) {
colon = memchr(str, ':', str_len - 1);
} else {
colon = NULL;
}
if (colon) {
*portno = atoi(colon + 1);
host = estrndup(str, colon - str);
} else {
if (get_err) {
*err = strpprintf(0, "Failed to parse address \"%s\"", str);
}
return NULL;
}
return host;
} | Base | 1 |
void PropertiesWidget::loadTorrentInfos(BitTorrent::TorrentHandle *const torrent)
{
clear();
m_torrent = torrent;
downloaded_pieces->setTorrent(m_torrent);
pieces_availability->setTorrent(m_torrent);
if (!m_torrent) return;
// Save path
updateSavePath(m_torrent);
// Hash
hash_lbl->setText(m_torrent->hash());
PropListModel->model()->clear();
if (m_torrent->hasMetadata()) {
// Creation date
lbl_creationDate->setText(m_torrent->creationDate().toString(Qt::DefaultLocaleShortDate));
label_total_size_val->setText(Utils::Misc::friendlyUnit(m_torrent->totalSize()));
// Comment
comment_text->setText(Utils::Misc::parseHtmlLinks(m_torrent->comment()));
// URL seeds
loadUrlSeeds();
label_created_by_val->setText(m_torrent->creator());
// List files in torrent
PropListModel->model()->setupModelData(m_torrent->info());
filesList->setExpanded(PropListModel->index(0, 0), true);
// Load file priorities
PropListModel->model()->updateFilesPriorities(m_torrent->filePriorities());
}
// Load dynamic data
loadDynamicData();
} | Base | 1 |
R_API ut8 *r_bin_java_get_attr_buf(RBinJavaObj *bin, ut64 sz, const ut64 offset, const ut8 *buf, const ut64 len) {
ut8 *attr_buf = NULL;
int pending = len - offset;
const ut8 *a_buf = offset + buf;
attr_buf = (ut8 *) calloc (pending + 1, 1);
if (!attr_buf) {
eprintf ("Unable to allocate enough bytes (0x%04"PFMT64x
") to read in the attribute.\n", sz);
return attr_buf;
}
memcpy (attr_buf, a_buf, pending); // sz+1);
return attr_buf;
} | Base | 1 |
void jas_matrix_asl(jas_matrix_t *matrix, int n)
{
int i;
int j;
jas_seqent_t *rowstart;
int rowstep;
jas_seqent_t *data;
if (jas_matrix_numrows(matrix) > 0 && jas_matrix_numcols(matrix) > 0) {
assert(matrix->rows_);
rowstep = jas_matrix_rowstep(matrix);
for (i = matrix->numrows_, rowstart = matrix->rows_[0]; i > 0; --i,
rowstart += rowstep) {
for (j = matrix->numcols_, data = rowstart; j > 0; --j,
++data) {
//*data <<= n;
*data = jas_seqent_asl(*data, n);
}
}
}
} | Class | 2 |
inline TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node,
std::function<T(T)> func,
TfLiteType expected_type) {
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output = GetOutput(context, node, 0);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, expected_type);
const int64_t num_elements = NumElements(input);
const T* in_data = GetTensorData<T>(input);
T* out_data = GetTensorData<T>(output);
for (int64_t i = 0; i < num_elements; ++i) {
out_data[i] = func(in_data[i]);
}
return kTfLiteOk;
} | Base | 1 |
void copyBytes(InStream* is, int length) {
while (length > 0) {
int n = check(1, length);
is->readBytes(ptr, n);
ptr += n;
length -= n;
}
} | Base | 1 |
USHORT CNB::QueryL4HeaderOffset(PVOID PacketData, ULONG IpHeaderOffset) const
{
USHORT Res;
auto ppr = ParaNdis_ReviewIPPacket(RtlOffsetToPointer(PacketData, IpHeaderOffset),
GetDataLength(), __FUNCTION__);
if (ppr.ipStatus != ppresNotIP)
{
Res = static_cast<USHORT>(IpHeaderOffset + ppr.ipHeaderSize);
}
else
{
DPrintf(0, ("[%s] ERROR: NOT an IP packet - expected troubles!\n", __FUNCTION__));
Res = 0;
}
return Res;
} | Class | 2 |
R_API RBinJavaAttrInfo *r_bin_java_annotation_default_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) {
ut64 offset = 0;
RBinJavaAttrInfo *attr = NULL;
attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset);
offset += 6;
if (attr && sz >= offset) {
attr->type = R_BIN_JAVA_ATTR_TYPE_ANNOTATION_DEFAULT_ATTR;
attr->info.annotation_default_attr.default_value = r_bin_java_element_value_new (buffer + offset, sz - offset, buf_offset + offset);
if (attr->info.annotation_default_attr.default_value) {
offset += attr->info.annotation_default_attr.default_value->size;
}
}
r_bin_java_print_annotation_default_attr_summary (attr);
return attr;
} | Class | 2 |
TfLiteStatus EvalHashtable(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE(context, node->user_data != nullptr);
const auto* params =
reinterpret_cast<const TfLiteHashtableParams*>(node->user_data);
// The resource id is generated based on the given table name.
const int resource_id = std::hash<std::string>{}(params->table_name);
TfLiteTensor* resource_handle_tensor =
GetOutput(context, node, kResourceHandleTensor);
auto* resource_handle_data =
GetTensorData<std::int32_t>(resource_handle_tensor);
resource_handle_data[0] = resource_id;
Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_);
auto& resources = subgraph->resources();
resource::CreateHashtableResourceIfNotAvailable(
&resources, resource_id, params->key_dtype, params->value_dtype);
return kTfLiteOk;
} | Base | 1 |
TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
bool requires_broadcast = !HaveSameShapes(input1, input2);
switch (input1->type) {
case kTfLiteFloat32:
Comparison<float, reference_ops::GreaterEqualFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteInt32:
Comparison<int32_t, reference_ops::GreaterEqualFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteInt64:
Comparison<int64_t, reference_ops::GreaterEqualFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteUInt8:
ComparisonQuantized<uint8_t, reference_ops::GreaterEqualFn>(
input1, input2, output, requires_broadcast);
break;
case kTfLiteInt8:
ComparisonQuantized<int8_t, reference_ops::GreaterEqualFn>(
input1, input2, output, requires_broadcast);
break;
default:
context->ReportError(context,
"Does not support type %d, requires float|int|uint8",
input1->type);
return kTfLiteError;
}
return kTfLiteOk;
} | Base | 1 |
jas_image_t *jas_image_create(int numcmpts, jas_image_cmptparm_t *cmptparms,
int clrspc)
{
jas_image_t *image;
uint_fast32_t rawsize;
uint_fast32_t inmem;
int cmptno;
jas_image_cmptparm_t *cmptparm;
if (!(image = jas_image_create0())) {
return 0;
}
image->clrspc_ = clrspc;
image->maxcmpts_ = numcmpts;
image->inmem_ = true;
/* Allocate memory for the per-component information. */
if (!(image->cmpts_ = jas_alloc2(image->maxcmpts_,
sizeof(jas_image_cmpt_t *)))) {
jas_image_destroy(image);
return 0;
}
/* Initialize in case of failure. */
for (cmptno = 0; cmptno < image->maxcmpts_; ++cmptno) {
image->cmpts_[cmptno] = 0;
}
/* Compute the approximate raw size of the image. */
rawsize = 0;
for (cmptno = 0, cmptparm = cmptparms; cmptno < numcmpts; ++cmptno,
++cmptparm) {
rawsize += cmptparm->width * cmptparm->height *
(cmptparm->prec + 7) / 8;
}
/* Decide whether to buffer the image data in memory, based on the
raw size of the image. */
inmem = (rawsize < JAS_IMAGE_INMEMTHRESH);
/* Create the individual image components. */
for (cmptno = 0, cmptparm = cmptparms; cmptno < numcmpts; ++cmptno,
++cmptparm) {
if (!(image->cmpts_[cmptno] = jas_image_cmpt_create(cmptparm->tlx,
cmptparm->tly, cmptparm->hstep, cmptparm->vstep,
cmptparm->width, cmptparm->height, cmptparm->prec,
cmptparm->sgnd, inmem))) {
jas_image_destroy(image);
return 0;
}
++image->numcmpts_;
}
/* Determine the bounding box for all of the components on the
reference grid (i.e., the image area) */
jas_image_setbbox(image);
return image;
} | Base | 1 |
TfLiteStatus MultiplyAndCheckOverflow(size_t a, size_t b, size_t* product) {
// Multiplying a * b where a and b are size_t cannot result in overflow in a
// size_t accumulator if both numbers have no non-zero bits in their upper
// half.
constexpr size_t size_t_bits = 8 * sizeof(size_t);
constexpr size_t overflow_upper_half_bit_position = size_t_bits / 2;
*product = a * b;
// If neither integers have non-zero bits past 32 bits can't overflow.
// Otherwise check using slow devision.
if (TFLITE_EXPECT_FALSE((a | b) >> overflow_upper_half_bit_position != 0)) {
if (a != 0 && *product / a != b) return kTfLiteError;
}
return kTfLiteOk;
} | Base | 1 |
TfLiteRegistration CancelOpRegistration() {
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
// Set output size to the input size in CancelOp::Prepare(). Code exists to
// have a framework in Prepare. The input and output tensors are not used.
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* in_tensor = GetInput(context, node, 0);
TfLiteTensor* out_tensor = GetOutput(context, node, 0);
TfLiteIntArray* new_size = TfLiteIntArrayCopy(in_tensor->dims);
return context->ResizeTensor(context, out_tensor, new_size);
};
reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
cancellation_data_.is_cancelled = true;
return kTfLiteOk;
};
return reg;
} | Base | 1 |
void test_base64_decode(void)
{
char buffer[16];
int len = mutt_b64_decode(buffer, encoded);
if (!TEST_CHECK(len == sizeof(clear) - 1))
{
TEST_MSG("Expected: %zu", sizeof(clear) - 1);
TEST_MSG("Actual : %zu", len);
}
buffer[len] = '\0';
if (!TEST_CHECK(strcmp(buffer, clear) == 0))
{
TEST_MSG("Expected: %s", clear);
TEST_MSG("Actual : %s", buffer);
}
} | Base | 1 |
int zgfx_decompress(ZGFX_CONTEXT* zgfx, const BYTE* pSrcData, UINT32 SrcSize, BYTE** ppDstData,
UINT32* pDstSize, UINT32 flags)
{
int status = -1;
BYTE descriptor;
wStream* stream = Stream_New((BYTE*)pSrcData, SrcSize);
if (!stream)
return -1;
if (Stream_GetRemainingLength(stream) < 1)
goto fail;
Stream_Read_UINT8(stream, descriptor); /* descriptor (1 byte) */
if (descriptor == ZGFX_SEGMENTED_SINGLE)
{
if (!zgfx_decompress_segment(zgfx, stream, Stream_GetRemainingLength(stream)))
goto fail;
*ppDstData = NULL;
if (zgfx->OutputCount > 0)
*ppDstData = (BYTE*) malloc(zgfx->OutputCount);
if (!*ppDstData)
goto fail;
*pDstSize = zgfx->OutputCount;
CopyMemory(*ppDstData, zgfx->OutputBuffer, zgfx->OutputCount);
}
else if (descriptor == ZGFX_SEGMENTED_MULTIPART)
{
UINT32 segmentSize;
UINT16 segmentNumber;
UINT16 segmentCount;
UINT32 uncompressedSize;
BYTE* pConcatenated;
if (Stream_GetRemainingLength(stream) < 6)
goto fail;
Stream_Read_UINT16(stream, segmentCount); /* segmentCount (2 bytes) */
Stream_Read_UINT32(stream, uncompressedSize); /* uncompressedSize (4 bytes) */
if (Stream_GetRemainingLength(stream) < segmentCount * sizeof(UINT32))
goto fail;
pConcatenated = (BYTE*) malloc(uncompressedSize);
if (!pConcatenated)
goto fail;
*ppDstData = pConcatenated;
*pDstSize = uncompressedSize;
for (segmentNumber = 0; segmentNumber < segmentCount; segmentNumber++)
{
if (Stream_GetRemainingLength(stream) < sizeof(UINT32))
goto fail;
Stream_Read_UINT32(stream, segmentSize); /* segmentSize (4 bytes) */
if (!zgfx_decompress_segment(zgfx, stream, segmentSize))
goto fail;
CopyMemory(pConcatenated, zgfx->OutputBuffer, zgfx->OutputCount);
pConcatenated += zgfx->OutputCount;
}
}
else
{
goto fail;
}
status = 1;
fail:
Stream_Free(stream, FALSE);
return status;
} | Base | 1 |
TypedValue HHVM_FUNCTION(substr_compare,
const String& main_str,
const String& str,
int offset,
int length /* = INT_MAX */,
bool case_insensitivity /* = false */) {
int s1_len = main_str.size();
int s2_len = str.size();
if (length <= 0) {
raise_warning("The length must be greater than zero");
return make_tv<KindOfBoolean>(false);
}
if (offset < 0) {
offset = s1_len + offset;
if (offset < 0) offset = 0;
}
if (offset >= s1_len) {
raise_warning("The start position cannot exceed initial string length");
return make_tv<KindOfBoolean>(false);
}
int cmp_len = s1_len - offset;
if (cmp_len < s2_len) cmp_len = s2_len;
if (cmp_len > length) cmp_len = length;
const char *s1 = main_str.data();
if (case_insensitivity) {
return tvReturn(bstrcasecmp(s1 + offset, cmp_len, str.data(), cmp_len));
}
return tvReturn(string_ncmp(s1 + offset, str.data(), cmp_len));
} | Base | 1 |
TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) {
TfLiteIntArray* output_shape = GetOutputShape(context, node);
std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)>
scoped_output_shape(output_shape, TfLiteIntArrayFree);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
// Tensorflow's Reshape allows one of the shape components to have the
// special -1 value, meaning it will be calculated automatically based on the
// input. Here we calculate what that dimension should be so that the number
// of output elements in the same as the number of input elements.
int num_input_elements = NumElements(input);
int num_output_elements = 1;
int stretch_dim = -1;
for (int i = 0; i < output_shape->size; ++i) {
int value = output_shape->data[i];
if (value == -1) {
TF_LITE_ENSURE_EQ(context, stretch_dim, -1);
stretch_dim = i;
} else {
num_output_elements *= value;
}
}
if (stretch_dim != -1) {
output_shape->data[stretch_dim] = num_input_elements / num_output_elements;
num_output_elements *= output_shape->data[stretch_dim];
}
TF_LITE_ENSURE_EQ(context, num_input_elements, num_output_elements);
return context->ResizeTensor(context, output, scoped_output_shape.release());
} | Base | 1 |
static void nodeConstruct(struct SaveNode* node, tr_variant const* v, bool sort_dicts)
{
node->isVisited = false;
node->childIndex = 0;
if (sort_dicts && tr_variantIsDict(v))
{
/* make node->sorted a sorted version of this dictionary */
size_t const n = v->val.l.count;
struct KeyIndex* tmp = tr_new(struct KeyIndex, n);
for (size_t i = 0; i < n; i++)
{
tmp[i].val = v->val.l.vals + i;
tmp[i].keystr = tr_quark_get_string(tmp[i].val->key, NULL);
}
qsort(tmp, n, sizeof(struct KeyIndex), compareKeyIndex);
tr_variantInitDict(&node->sorted, n);
for (size_t i = 0; i < n; ++i)
{
node->sorted.val.l.vals[i] = *tmp[i].val;
}
node->sorted.val.l.count = n;
tr_free(tmp);
node->v = &node->sorted;
}
else
{
node->v = v;
}
} | Variant | 0 |
UrlQuery::UrlQuery(const std::string& encoded_str) {
if (!encoded_str.empty()) {
// Split into key value pairs separated by '&'.
for (std::size_t i = 0; i != std::string::npos;) {
std::size_t j = encoded_str.find_first_of('&', i);
std::string kv;
if (j == std::string::npos) {
kv = encoded_str.substr(i);
i = std::string::npos;
} else {
kv = encoded_str.substr(i, j - i);
i = j + 1;
}
string_view key;
string_view value;
if (SplitKV(kv, '=', false, &key, &value)) {
parameters_.push_back({ DecodeUnsafe(key), DecodeUnsafe(value) });
}
}
}
} | Base | 1 |
inline void StringData::setSize(int len) {
assertx(!isImmutable() && !hasMultipleRefs());
assertx(len >= 0 && len <= capacity());
mutableData()[len] = 0;
m_lenAndHash = len;
assertx(m_hash == 0);
assertx(checkSane());
} | Base | 1 |
void Init(void)
{
for(int i = 0;i < 18;i++) {
X[i].Init();
M[i].Init();
}
} | Class | 2 |
void Phase2() final {
Local<Context> context_handle = Deref(context);
Context::Scope context_scope{context_handle};
Local<Value> key_inner = key->CopyInto();
Local<Object> object = Local<Object>::Cast(Deref(reference));
// Delete key before transferring in, potentially freeing up some v8 heap
Unmaybe(object->Delete(context_handle, key_inner));
Local<Value> val_inner = val->TransferIn();
did_set = Unmaybe(object->Set(context_handle, key_inner, val_inner));
} | Class | 2 |
void CUser::SetClientEncoding(const CString& s) {
m_sClientEncoding = s;
for (CClient* pClient : GetAllClients()) {
pClient->SetEncoding(s);
}
} | Class | 2 |
R_API RBinJavaAnnotation *r_bin_java_annotation_new(ut8 *buffer, ut64 sz, ut64 buf_offset) {
ut32 i = 0;
RBinJavaAnnotation *annotation = NULL;
RBinJavaElementValuePair *evps = NULL;
ut64 offset = 0;
annotation = R_NEW0 (RBinJavaAnnotation);
if (!annotation) {
return NULL;
}
// (ut16) read and set annotation_value.type_idx;
annotation->type_idx = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
// (ut16) read and set annotation_value.num_element_value_pairs;
annotation->num_element_value_pairs = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
annotation->element_value_pairs = r_list_newf (r_bin_java_element_pair_free);
// read annotation_value.num_element_value_pairs, and append to annotation_value.element_value_pairs
for (i = 0; i < annotation->num_element_value_pairs; i++) {
if (offset > sz) {
break;
}
evps = r_bin_java_element_pair_new (buffer + offset, sz - offset, buf_offset + offset);
if (evps) {
offset += evps->size;
r_list_append (annotation->element_value_pairs, (void *) evps);
}
}
annotation->size = offset;
return annotation;
} | Class | 2 |
otError Commissioner::AddJoiner(const Mac::ExtAddress *aEui64, const char *aPskd, uint32_t aTimeout)
{
otError error = OT_ERROR_NO_BUFS;
VerifyOrExit(mState == OT_COMMISSIONER_STATE_ACTIVE, error = OT_ERROR_INVALID_STATE);
VerifyOrExit(strlen(aPskd) <= Dtls::kPskMaxLength, error = OT_ERROR_INVALID_ARGS);
RemoveJoiner(aEui64, 0); // remove immediately
for (Joiner *joiner = &mJoiners[0]; joiner < OT_ARRAY_END(mJoiners); joiner++)
{
if (joiner->mValid)
{
continue;
}
if (aEui64 != NULL)
{
joiner->mEui64 = *aEui64;
joiner->mAny = false;
}
else
{
joiner->mAny = true;
}
(void)strlcpy(joiner->mPsk, aPskd, sizeof(joiner->mPsk));
joiner->mValid = true;
joiner->mExpirationTime = TimerMilli::GetNow() + Time::SecToMsec(aTimeout);
UpdateJoinerExpirationTimer();
SendCommissionerSet();
otLogInfoMeshCoP("Added Joiner (%s, %s)", (aEui64 != NULL) ? aEui64->ToString().AsCString() : "*", aPskd);
ExitNow(error = OT_ERROR_NONE);
}
exit:
return error;
} | Base | 1 |
int CommandData::IsProcessFile(FileHeader &FileHead,bool *ExactMatch,int MatchType,
wchar *MatchedArg,uint MatchedArgSize)
{
if (MatchedArg!=NULL && MatchedArgSize>0)
*MatchedArg=0;
if (wcslen(FileHead.FileName)>=NM)
return 0;
bool Dir=FileHead.Dir;
if (ExclCheck(FileHead.FileName,Dir,false,true))
return 0;
#ifndef SFX_MODULE
if (TimeCheck(FileHead.mtime))
return 0;
if ((FileHead.FileAttr & ExclFileAttr)!=0 || InclAttrSet && (FileHead.FileAttr & InclFileAttr)==0)
return 0;
if (!Dir && SizeCheck(FileHead.UnpSize))
return 0;
#endif
wchar *ArgName;
FileArgs.Rewind();
for (int StringCount=1;(ArgName=FileArgs.GetString())!=NULL;StringCount++)
if (CmpName(ArgName,FileHead.FileName,MatchType))
{
if (ExactMatch!=NULL)
*ExactMatch=wcsicompc(ArgName,FileHead.FileName)==0;
if (MatchedArg!=NULL)
wcsncpyz(MatchedArg,ArgName,MatchedArgSize);
return StringCount;
}
return 0;
} | Base | 1 |
void AOClient::pktEditEvidence(AreaData* area, int argc, QStringList argv, AOPacket packet)
{
if (!checkEvidenceAccess(area))
return;
bool is_int = false;
int idx = argv[0].toInt(&is_int);
AreaData::Evidence evi = {argv[1], argv[2], argv[3]};
if (is_int && idx <= area->evidence().size() && idx >= 0) {
area->replaceEvidence(idx, evi);
}
sendEvidenceList(area);
} | Variant | 0 |
const FieldID& activeUnionMemberId(const void* object, ptrdiff_t offset) {
return *reinterpret_cast<const FieldID*>(
offset + static_cast<const char*>(object));
} | Base | 1 |
void RemoteDevicePropertiesWidget::checkSaveable()
{
RemoteFsDevice::Details det=details();
modified=det!=orig;
saveable=!det.isEmpty();
if (saveable && Type_SambaAvahi==type->itemData(type->currentIndex()).toInt()) {
saveable=!smbAvahiName->text().trimmed().isEmpty();
}
emit updated();
} | Class | 2 |
template <class T> void testFeatTable(const T & table, const char * testName)
{
FeatureMap testFeatureMap;
dummyFace.replace_table(TtfUtil::Tag::Feat, &table, sizeof(T));
gr_face * face = gr_make_face_with_ops(&dummyFace, &face_handle::ops, gr_face_dumbRendering);
if (!face) throw std::runtime_error("failed to load font");
bool readStatus = testFeatureMap.readFeats(*face);
testAssert("readFeats", readStatus);
fprintf(stderr, testName, NULL);
testAssertEqual("test num features %hu,%hu\n", testFeatureMap.numFeats(), table.m_header.m_numFeat);
for (size_t i = 0; i < sizeof(table.m_defs) / sizeof(FeatDefn); i++)
{
const FeatureRef * ref = testFeatureMap.findFeatureRef(table.m_defs[i].m_featId);
testAssert("test feat\n", ref);
testAssertEqual("test feat settings %hu %hu\n", ref->getNumSettings(), table.m_defs[i].m_numFeatSettings);
testAssertEqual("test feat label %hu %hu\n", ref->getNameId(), table.m_defs[i].m_label);
size_t settingsIndex = (table.m_defs[i].m_settingsOffset - sizeof(FeatHeader)
- (sizeof(FeatDefn) * table.m_header.m_numFeat)) / sizeof(FeatSetting);
for (size_t j = 0; j < table.m_defs[i].m_numFeatSettings; j++)
{
testAssertEqual("setting label %hu %hu\n", ref->getSettingName(j),
table.m_settings[settingsIndex+j].m_label);
}
}
gr_face_destroy(face);
} | Base | 1 |
bool DefaultCertValidator::matchSubjectAltName(
X509* cert,
const std::vector<Matchers::StringMatcherImpl<envoy::type::matcher::v3::StringMatcher>>&
subject_alt_name_matchers) {
bssl::UniquePtr<GENERAL_NAMES> san_names(
static_cast<GENERAL_NAMES*>(X509_get_ext_d2i(cert, NID_subject_alt_name, nullptr, nullptr)));
if (san_names == nullptr) {
return false;
}
for (const GENERAL_NAME* general_name : san_names.get()) {
const std::string san = Utility::generalNameAsString(general_name);
for (auto& config_san_matcher : subject_alt_name_matchers) {
// For DNS SAN, if the StringMatcher type is exact, we have to follow DNS matching semantics.
if (general_name->type == GEN_DNS &&
config_san_matcher.matcher().match_pattern_case() ==
envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kExact
? Utility::dnsNameMatch(config_san_matcher.matcher().exact(), absl::string_view(san))
: config_san_matcher.match(san)) {
return true;
}
}
}
return false;
} | Base | 1 |
jas_matrix_t *jas_seq2d_create(int xstart, int ystart, int xend, int yend)
{
jas_matrix_t *matrix;
assert(xstart <= xend && ystart <= yend);
if (!(matrix = jas_matrix_create(yend - ystart, xend - xstart))) {
return 0;
}
matrix->xstart_ = xstart;
matrix->ystart_ = ystart;
matrix->xend_ = xend;
matrix->yend_ = yend;
return matrix;
} | Base | 1 |
static INLINE SIZE_T ntlm_av_pair_get_len(const NTLM_AV_PAIR* pAvPair)
{
UINT16 AvLen;
Data_Read_UINT16(&pAvPair->AvLen, AvLen);
return AvLen;
} | Base | 1 |
int Bind(const Node& node, int max_retry) override {
receiver_ = zmq_socket(context_, ZMQ_ROUTER);
CHECK(receiver_ != NULL)
<< "create receiver socket failed: " << zmq_strerror(errno);
int local = GetEnv("DMLC_LOCAL", 0);
std::string addr = local ? "ipc:///tmp/" : "tcp://*:";
int port = node.port;
unsigned seed = static_cast<unsigned>(time(NULL)+port);
for (int i = 0; i < max_retry+1; ++i) {
auto address = addr + std::to_string(port);
if (zmq_bind(receiver_, address.c_str()) == 0) break;
if (i == max_retry) {
port = -1;
} else {
port = 10000 + rand_r(&seed) % 40000;
}
}
return port;
} | Class | 2 |
R_API RBinJavaVerificationObj *r_bin_java_verification_info_from_type(RBinJavaObj *bin, R_BIN_JAVA_STACKMAP_TYPE type, ut32 value) {
RBinJavaVerificationObj *se = R_NEW0 (RBinJavaVerificationObj);
if (!se) {
return NULL;
}
se->tag = type;
if (se->tag == R_BIN_JAVA_STACKMAP_OBJECT) {
se->info.obj_val_cp_idx = (ut16) value;
} else if (se->tag == R_BIN_JAVA_STACKMAP_UNINIT) {
/*if (bin->offset_sz == 4) {
se->info.uninit_offset = value;
} else {
se->info.uninit_offset = (ut16) value;
}*/
se->info.uninit_offset = (ut16) value;
}
return se;
} | Class | 2 |
void CZNC::ForceEncoding() {
m_uiForceEncoding++;
#ifdef HAVE_ICU
for (Csock* pSock : GetManager()) {
if (pSock->GetEncoding().empty()) {
pSock->SetEncoding("UTF-8");
}
}
#endif
} | Class | 2 |
otError Commissioner::GeneratePskc(const char * aPassPhrase,
const char * aNetworkName,
const Mac::ExtendedPanId &aExtPanId,
Pskc & aPskc)
{
otError error = OT_ERROR_NONE;
const char *saltPrefix = "Thread";
uint8_t salt[OT_PBKDF2_SALT_MAX_LEN];
uint16_t saltLen = 0;
VerifyOrExit((strlen(aPassPhrase) >= OT_COMMISSIONING_PASSPHRASE_MIN_SIZE) &&
(strlen(aPassPhrase) <= OT_COMMISSIONING_PASSPHRASE_MAX_SIZE),
error = OT_ERROR_INVALID_ARGS);
memset(salt, 0, sizeof(salt));
memcpy(salt, saltPrefix, strlen(saltPrefix));
saltLen += static_cast<uint16_t>(strlen(saltPrefix));
memcpy(salt + saltLen, aExtPanId.m8, sizeof(aExtPanId));
saltLen += OT_EXT_PAN_ID_SIZE;
memcpy(salt + saltLen, aNetworkName, strlen(aNetworkName));
saltLen += static_cast<uint16_t>(strlen(aNetworkName));
otPbkdf2Cmac(reinterpret_cast<const uint8_t *>(aPassPhrase), static_cast<uint16_t>(strlen(aPassPhrase)),
reinterpret_cast<const uint8_t *>(salt), saltLen, 16384, OT_PSKC_MAX_SIZE, aPskc.m8);
exit:
return error;
} | Base | 1 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
output->type = input2->type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
} | Base | 1 |
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
if (mirror_pad_) {
const TfLiteMirrorPaddingParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
if (tf_options->mode !=
TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect) {
return absl::InvalidArgumentError(
"Only Reflective padding is supported for Mirror Pad operation.");
}
}
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 2));
RETURN_IF_ERROR(CheckInputsOutputs(context, tflite_node,
/*runtime_inputs=*/1, /*outputs=*/1));
RETURN_IF_ERROR(CheckTensorIsAvailable(context, tflite_node, 1));
auto pad_tensor = tflite::GetInput(context, tflite_node, 1);
if (pad_tensor->dims->size != 2) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid paddings tensor dimension: expected 2 dim, got ",
pad_tensor->dims->size, " dim"));
}
bool supported =
pad_tensor->dims->data[0] == 3 || pad_tensor->dims->data[0] == 4;
if (!supported || pad_tensor->dims->data[1] != 2) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid paddings tensor shape: expected 4x2 or 3x2, got ",
pad_tensor->dims->data[0], "x", pad_tensor->dims->data[1]));
}
return absl::OkStatus();
} | Base | 1 |
CFontFileBase(char *sFile, int nLen, bool bFreeFileData)
{
m_sFileData = m_sFile = (unsigned char *)sFile;
m_nLen = nLen;
m_bFreeFileData = bFreeFileData;
m_nPos = 0;
} | Base | 1 |
TPM_RC tpm_kdfa(TSS2_SYS_CONTEXT *sapi_context, TPMI_ALG_HASH hashAlg,
TPM2B *key, char *label, TPM2B *contextU, TPM2B *contextV, UINT16 bits,
TPM2B_MAX_BUFFER *resultKey )
{
TPM2B_DIGEST tmpResult;
TPM2B_DIGEST tpm2bLabel, tpm2bBits, tpm2b_i_2;
UINT8 *tpm2bBitsPtr = &tpm2bBits.t.buffer[0];
UINT8 *tpm2b_i_2Ptr = &tpm2b_i_2.t.buffer[0];
TPM2B_DIGEST *bufferList[8];
UINT32 bitsSwizzled, i_Swizzled;
TPM_RC rval;
int i, j;
UINT16 bytes = bits / 8;
resultKey->t .size = 0;
tpm2b_i_2.t.size = 4;
tpm2bBits.t.size = 4;
bitsSwizzled = string_bytes_endian_convert_32( bits );
*(UINT32 *)tpm2bBitsPtr = bitsSwizzled;
for(i = 0; label[i] != 0 ;i++ );
tpm2bLabel.t.size = i+1;
for( i = 0; i < tpm2bLabel.t.size; i++ )
{
tpm2bLabel.t.buffer[i] = label[i];
}
resultKey->t.size = 0;
i = 1;
while( resultKey->t.size < bytes )
{
// Inner loop
i_Swizzled = string_bytes_endian_convert_32( i );
*(UINT32 *)tpm2b_i_2Ptr = i_Swizzled;
j = 0;
bufferList[j++] = (TPM2B_DIGEST *)&(tpm2b_i_2.b);
bufferList[j++] = (TPM2B_DIGEST *)&(tpm2bLabel.b);
bufferList[j++] = (TPM2B_DIGEST *)contextU;
bufferList[j++] = (TPM2B_DIGEST *)contextV;
bufferList[j++] = (TPM2B_DIGEST *)&(tpm2bBits.b);
bufferList[j++] = (TPM2B_DIGEST *)0;
rval = tpm_hmac(sapi_context, hashAlg, key, (TPM2B **)&( bufferList[0] ), &tmpResult );
if( rval != TPM_RC_SUCCESS )
{
return( rval );
}
bool res = string_bytes_concat_buffer(resultKey, &(tmpResult.b));
if (!res) {
return TSS2_SYS_RC_BAD_VALUE;
}
}
// Truncate the result to the desired size.
resultKey->t.size = bytes;
return TPM_RC_SUCCESS;
} | Class | 2 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const int num_elements = NumElements(input);
switch (input->type) {
case kTfLiteInt64:
memset(GetTensorData<int64_t>(output), 0, num_elements * sizeof(int64_t));
break;
case kTfLiteInt32:
memset(GetTensorData<int32_t>(output), 0, num_elements * sizeof(int32_t));
break;
case kTfLiteFloat32:
memset(GetTensorData<float>(output), 0, num_elements * sizeof(float));
break;
default:
context->ReportError(context,
"ZerosLike only currently supports int64, int32, "
"and float32, got %d.",
input->type);
return kTfLiteError;
}
return kTfLiteOk;
} | Base | 1 |
ALWAYS_INLINE String serialize_impl(const Variant& value,
const SerializeOptions& opts) {
switch (value.getType()) {
case KindOfClass:
case KindOfLazyClass:
case KindOfPersistentString:
case KindOfString: {
auto const str =
isStringType(value.getType()) ? value.getStringData() :
isClassType(value.getType()) ? classToStringHelper(value.toClassVal()) :
lazyClassToStringHelper(value.toLazyClassVal());
auto const size = str->size();
if (size >= RuntimeOption::MaxSerializedStringSize) {
throw Exception("Size of serialized string (%d) exceeds max", size);
}
StringBuffer sb;
sb.append("s:");
sb.append(size);
sb.append(":\"");
sb.append(str->data(), size);
sb.append("\";");
return sb.detach();
}
case KindOfResource:
return s_Res;
case KindOfUninit:
case KindOfNull:
case KindOfBoolean:
case KindOfInt64:
case KindOfFunc:
case KindOfPersistentVec:
case KindOfVec:
case KindOfPersistentDict:
case KindOfDict:
case KindOfPersistentKeyset:
case KindOfKeyset:
case KindOfPersistentDArray:
case KindOfDArray:
case KindOfPersistentVArray:
case KindOfVArray:
case KindOfDouble:
case KindOfObject:
case KindOfClsMeth:
case KindOfRClsMeth:
case KindOfRFunc:
case KindOfRecord:
break;
}
VariableSerializer vs(VariableSerializer::Type::Serialize);
if (opts.keepDVArrays) vs.keepDVArrays();
if (opts.forcePHPArrays) vs.setForcePHPArrays();
if (opts.warnOnHackArrays) vs.setHackWarn();
if (opts.warnOnPHPArrays) vs.setPHPWarn();
if (opts.ignoreLateInit) vs.setIgnoreLateInit();
if (opts.serializeProvenanceAndLegacy) vs.setSerializeProvenanceAndLegacy();
// Keep the count so recursive calls to serialize() embed references properly.
return vs.serialize(value, true, true);
} | Base | 1 |
selReadStream(FILE *fp)
{
char *selname;
char linebuf[L_BUF_SIZE];
l_int32 sy, sx, cy, cx, i, j, version, ignore;
SEL *sel;
PROCNAME("selReadStream");
if (!fp)
return (SEL *)ERROR_PTR("stream not defined", procName, NULL);
if (fscanf(fp, " Sel Version %d\n", &version) != 1)
return (SEL *)ERROR_PTR("not a sel file", procName, NULL);
if (version != SEL_VERSION_NUMBER)
return (SEL *)ERROR_PTR("invalid sel version", procName, NULL);
if (fgets(linebuf, L_BUF_SIZE, fp) == NULL)
return (SEL *)ERROR_PTR("error reading into linebuf", procName, NULL);
selname = stringNew(linebuf);
sscanf(linebuf, " ------ %s ------", selname);
if (fscanf(fp, " sy = %d, sx = %d, cy = %d, cx = %d\n",
&sy, &sx, &cy, &cx) != 4) {
LEPT_FREE(selname);
return (SEL *)ERROR_PTR("dimensions not read", procName, NULL);
}
if ((sel = selCreate(sy, sx, selname)) == NULL) {
LEPT_FREE(selname);
return (SEL *)ERROR_PTR("sel not made", procName, NULL);
}
selSetOrigin(sel, cy, cx);
for (i = 0; i < sy; i++) {
ignore = fscanf(fp, " ");
for (j = 0; j < sx; j++)
ignore = fscanf(fp, "%1d", &sel->data[i][j]);
ignore = fscanf(fp, "\n");
}
ignore = fscanf(fp, "\n");
LEPT_FREE(selname);
return sel;
} | Base | 1 |
void* TFE_HandleToDLPack(TFE_TensorHandle* h, TF_Status* status) {
const Tensor* tensor = GetTensorFromHandle(h, status);
TF_DataType data_type = static_cast<TF_DataType>(tensor->dtype());
TensorReference tensor_ref(*tensor); // This will call buf_->Ref()
auto* tf_dlm_tensor_ctx = new TfDlManagedTensorCtx(tensor_ref);
tf_dlm_tensor_ctx->reference = tensor_ref;
DLManagedTensor* dlm_tensor = &tf_dlm_tensor_ctx->tensor;
dlm_tensor->manager_ctx = tf_dlm_tensor_ctx;
dlm_tensor->deleter = &DLManagedTensorDeleter;
dlm_tensor->dl_tensor.ctx = GetDlContext(h, status);
int ndim = tensor->dims();
dlm_tensor->dl_tensor.ndim = ndim;
dlm_tensor->dl_tensor.data = TFE_TensorHandleDevicePointer(h, status);
dlm_tensor->dl_tensor.dtype = GetDlDataType(data_type, status);
std::vector<int64_t>* shape_arr = &tf_dlm_tensor_ctx->shape;
std::vector<int64_t>* stride_arr = &tf_dlm_tensor_ctx->strides;
shape_arr->resize(ndim);
stride_arr->resize(ndim, 1);
for (int i = 0; i < ndim; i++) {
(*shape_arr)[i] = tensor->dim_size(i);
}
for (int i = ndim - 2; i >= 0; --i) {
(*stride_arr)[i] = (*shape_arr)[i + 1] * (*stride_arr)[i + 1];
}
dlm_tensor->dl_tensor.shape = &(*shape_arr)[0];
// There are two ways to represent compact row-major data
// 1) nullptr indicates tensor is compact and row-majored.
// 2) fill in the strides array as the real case for compact row-major data.
// Here we choose option 2, since some frameworks didn't handle the strides
// argument properly.
dlm_tensor->dl_tensor.strides = &(*stride_arr)[0];
dlm_tensor->dl_tensor.byte_offset =
0; // TF doesn't handle the strides and byte_offsets here
return static_cast<void*>(dlm_tensor);
} | Class | 2 |
StatusOr<FullTypeDef> SpecializeType(const AttrSlice& attrs,
const OpDef& op_def) {
FullTypeDef ft;
ft.set_type_id(TFT_PRODUCT);
for (int i = 0; i < op_def.output_arg_size(); i++) {
auto* t = ft.add_args();
*t = op_def.output_arg(i).experimental_full_type();
// Resolve dependent types. The convention for op registrations is to use
// attributes as type variables.
// See https://www.tensorflow.org/guide/create_op#type_polymorphism.
// Once the op signature can be defined entirely in FullType, this
// convention can be deprecated.
//
// Note: While this code performs some basic verifications, it generally
// assumes consistent op defs and attributes. If more complete
// verifications are needed, they should be done by separately, and in a
// way that can be reused for type inference.
for (int j = 0; j < t->args_size(); j++) {
auto* arg = t->mutable_args(i);
if (arg->type_id() == TFT_VAR) {
const auto* attr = attrs.Find(arg->s());
if (attr == nullptr) {
return Status(
error::INVALID_ARGUMENT,
absl::StrCat("Could not find an attribute for key ", arg->s()));
}
if (attr->value_case() == AttrValue::kList) {
const auto& attr_list = attr->list();
arg->set_type_id(TFT_PRODUCT);
for (int i = 0; i < attr_list.type_size(); i++) {
map_dtype_to_tensor(attr_list.type(i), arg->add_args());
}
} else if (attr->value_case() == AttrValue::kType) {
map_dtype_to_tensor(attr->type(), arg);
} else {
return Status(error::UNIMPLEMENTED,
absl::StrCat("unknown attribute type",
attrs.DebugString(), " key=", arg->s()));
}
arg->clear_s();
}
}
}
return ft;
} | Base | 1 |
void HeaderMapImpl::appendToHeader(HeaderString& header, absl::string_view data) {
if (data.empty()) {
return;
}
if (!header.empty()) {
header.append(",", 1);
}
header.append(data.data(), data.size());
} | Class | 2 |
void FilterManager::maybeEndDecode(bool end_stream) {
ASSERT(!state_.remote_complete_);
state_.remote_complete_ = end_stream;
if (end_stream) {
stream_info_.downstreamTiming().onLastDownstreamRxByteReceived(dispatcher().timeSource());
ENVOY_STREAM_LOG(debug, "request end stream", *this);
}
} | Variant | 0 |
void* TFE_HandleToDLPack(TFE_TensorHandle* h, TF_Status* status) {
const Tensor* tensor = GetTensorFromHandle(h, status);
TF_DataType data_type = static_cast<TF_DataType>(tensor->dtype());
TensorReference tensor_ref(*tensor); // This will call buf_->Ref()
auto* tf_dlm_tensor_ctx = new TfDlManagedTensorCtx(tensor_ref);
tf_dlm_tensor_ctx->reference = tensor_ref;
DLManagedTensor* dlm_tensor = &tf_dlm_tensor_ctx->tensor;
dlm_tensor->manager_ctx = tf_dlm_tensor_ctx;
dlm_tensor->deleter = &DLManagedTensorDeleter;
dlm_tensor->dl_tensor.ctx = GetDlContext(h, status);
int ndim = tensor->dims();
dlm_tensor->dl_tensor.ndim = ndim;
dlm_tensor->dl_tensor.data = TFE_TensorHandleDevicePointer(h, status);
dlm_tensor->dl_tensor.dtype = GetDlDataType(data_type, status);
std::vector<int64_t>* shape_arr = &tf_dlm_tensor_ctx->shape;
std::vector<int64_t>* stride_arr = &tf_dlm_tensor_ctx->strides;
shape_arr->resize(ndim);
stride_arr->resize(ndim, 1);
for (int i = 0; i < ndim; i++) {
(*shape_arr)[i] = tensor->dim_size(i);
}
for (int i = ndim - 2; i >= 0; --i) {
(*stride_arr)[i] = (*shape_arr)[i + 1] * (*stride_arr)[i + 1];
}
dlm_tensor->dl_tensor.shape = &(*shape_arr)[0];
// There are two ways to represent compact row-major data
// 1) nullptr indicates tensor is compact and row-majored.
// 2) fill in the strides array as the real case for compact row-major data.
// Here we choose option 2, since some frameworks didn't handle the strides
// argument properly.
dlm_tensor->dl_tensor.strides = &(*stride_arr)[0];
dlm_tensor->dl_tensor.byte_offset =
0; // TF doesn't handle the strides and byte_offsets here
return static_cast<void*>(dlm_tensor);
} | Base | 1 |
TEST(DefaultCertValidatorTest, TestMatchSubjectAltNameURIMatched) {
bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(
"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem"));
envoy::type::matcher::v3::StringMatcher matcher;
matcher.MergeFrom(TestUtility::createRegexMatcher("spiffe://lyft.com/.*-team"));
std::vector<Matchers::StringMatcherImpl<envoy::type::matcher::v3::StringMatcher>>
subject_alt_name_matchers;
subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher));
EXPECT_TRUE(DefaultCertValidator::matchSubjectAltName(cert.get(), subject_alt_name_matchers));
} | Base | 1 |
char *Hub::inflate(char *data, size_t &length) {
dynamicInflationBuffer.clear();
inflationStream.next_in = (Bytef *) data;
inflationStream.avail_in = length;
int err;
do {
inflationStream.next_out = (Bytef *) inflationBuffer;
inflationStream.avail_out = LARGE_BUFFER_SIZE;
err = ::inflate(&inflationStream, Z_FINISH);
if (!inflationStream.avail_in) {
break;
}
dynamicInflationBuffer.append(inflationBuffer, LARGE_BUFFER_SIZE - inflationStream.avail_out);
} while (err == Z_BUF_ERROR);
inflateReset(&inflationStream);
if (err != Z_BUF_ERROR && err != Z_OK) {
length = 0;
return nullptr;
}
if (dynamicInflationBuffer.length()) {
dynamicInflationBuffer.append(inflationBuffer, LARGE_BUFFER_SIZE - inflationStream.avail_out);
length = dynamicInflationBuffer.length();
return (char *) dynamicInflationBuffer.data();
}
length = LARGE_BUFFER_SIZE - inflationStream.avail_out;
return inflationBuffer;
} | Class | 2 |
const String& setSize(int len) {
assertx(m_str);
m_str->setSize(len);
return *this;
} | Base | 1 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_);
const TfLiteTensor* input_resource_id_tensor =
GetInput(context, node, kInputVariableId);
const TfLiteTensor* input_value_tensor = GetInput(context, node, kInputValue);
int resource_id = input_resource_id_tensor->data.i32[0];
auto& resources = subgraph->resources();
resource::CreateResourceVariableIfNotAvailable(&resources, resource_id);
auto* variable = resource::GetResourceVariable(&resources, resource_id);
TF_LITE_ENSURE(context, variable != nullptr);
variable->AssignFrom(input_value_tensor);
return kTfLiteOk;
} | Base | 1 |
int64_t LineBasedFrameDecoder::findEndOfLine(IOBufQueue& buf) {
Cursor c(buf.front());
for (uint32_t i = 0; i < maxLength_ && i < buf.chainLength(); i++) {
auto b = c.read<char>();
if (b == '\n' && terminatorType_ != TerminatorType::CARRIAGENEWLINE) {
return i;
} else if (terminatorType_ != TerminatorType::NEWLINE &&
b == '\r' && !c.isAtEnd() && c.read<char>() == '\n') {
return i;
}
}
return -1;
} | Base | 1 |
Http::FilterMetadataStatus Context::onResponseMetadata() {
if (!wasm_->onResponseMetadata_) {
return Http::FilterMetadataStatus::Continue;
}
if (wasm_->onResponseMetadata_(this, id_).u64_ == 0) {
return Http::FilterMetadataStatus::Continue;
}
return Http::FilterMetadataStatus::Continue; // This is currently the only return code.
} | Base | 1 |
std::string encodeBase64(const std::string& input) {
using namespace boost::archive::iterators;
using b64it = base64_from_binary<transform_width<const char*, 6, 8>>;
auto data = input.data();
std::string encoded(b64it(data), b64it(data + (input.length())));
encoded.append((3 - (input.length() % 3)) % 3, '=');
return encoded;
} | Base | 1 |
int CephxSessionHandler::_calc_signature(Message *m, uint64_t *psig)
{
const ceph_msg_header& header = m->get_header();
const ceph_msg_footer& footer = m->get_footer();
// optimized signature calculation
// - avoid temporary allocated buffers from encode_encrypt[_enc_bl]
// - skip the leading 4 byte wrapper from encode_encrypt
struct {
__u8 v;
__le64 magic;
__le32 len;
__le32 header_crc;
__le32 front_crc;
__le32 middle_crc;
__le32 data_crc;
} __attribute__ ((packed)) sigblock = {
1, mswab(AUTH_ENC_MAGIC), mswab<uint32_t>(4*4),
mswab<uint32_t>(header.crc), mswab<uint32_t>(footer.front_crc),
mswab<uint32_t>(footer.middle_crc), mswab<uint32_t>(footer.data_crc)
};
char exp_buf[CryptoKey::get_max_outbuf_size(sizeof(sigblock))];
try {
const CryptoKey::in_slice_t in {
sizeof(sigblock),
reinterpret_cast<const unsigned char*>(&sigblock)
};
const CryptoKey::out_slice_t out {
sizeof(exp_buf),
reinterpret_cast<unsigned char*>(&exp_buf)
};
key.encrypt(cct, in, out);
} catch (std::exception& e) {
lderr(cct) << __func__ << " failed to encrypt signature block" << dendl;
return -1;
}
*psig = *reinterpret_cast<__le64*>(exp_buf);
ldout(cct, 10) << __func__ << " seq " << m->get_seq()
<< " front_crc_ = " << footer.front_crc
<< " middle_crc = " << footer.middle_crc
<< " data_crc = " << footer.data_crc
<< " sig = " << *psig
<< dendl;
return 0;
} | Class | 2 |
MONGO_EXPORT void *bson_malloc( int size ) {
void *p;
p = bson_malloc_func( size );
bson_fatal_msg( !!p, "malloc() failed" );
return p;
} | Base | 1 |
void RemoteDevicePropertiesWidget::setType()
{
if (Type_SshFs==type->itemData(type->currentIndex()).toInt() && 0==sshPort->value()) {
sshPort->setValue(22);
}
if (Type_Samba==type->itemData(type->currentIndex()).toInt() && 0==smbPort->value()) {
smbPort->setValue(445);
}
} | Base | 1 |
void Filter::onUpstreamEvent(Network::ConnectionEvent event) {
// Update the connecting flag before processing the event because we may start a new connection
// attempt in initializeUpstreamConnection.
bool connecting = connecting_;
connecting_ = false;
if (event == Network::ConnectionEvent::RemoteClose ||
event == Network::ConnectionEvent::LocalClose) {
upstream_.reset();
disableIdleTimer();
if (connecting) {
if (event == Network::ConnectionEvent::RemoteClose) {
getStreamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamConnectionFailure);
read_callbacks_->upstreamHost()->outlierDetector().putResult(
Upstream::Outlier::Result::LocalOriginConnectFailed);
}
initializeUpstreamConnection();
} else {
if (read_callbacks_->connection().state() == Network::Connection::State::Open) {
read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite);
}
}
}
} | Variant | 0 |
void ComparisonQuantized(const TfLiteTensor* input1, const TfLiteTensor* input2,
TfLiteTensor* output, bool requires_broadcast) {
if (input1->type == kTfLiteUInt8 || input1->type == kTfLiteInt8) {
auto input1_offset = -input1->params.zero_point;
auto input2_offset = -input2->params.zero_point;
const int left_shift = 8;
int32 input1_multiplier;
int input1_shift;
QuantizeMultiplierSmallerThanOneExp(input1->params.scale,
&input1_multiplier, &input1_shift);
int32 input2_multiplier;
int input2_shift;
QuantizeMultiplierSmallerThanOneExp(input2->params.scale,
&input2_multiplier, &input2_shift);
ComparisonParams op_params;
op_params.left_shift = left_shift;
op_params.input1_offset = input1_offset;
op_params.input1_multiplier = input1_multiplier;
op_params.input1_shift = input1_shift;
op_params.input2_offset = input2_offset;
op_params.input2_multiplier = input2_multiplier;
op_params.input2_shift = input2_shift;
if (requires_broadcast) {
reference_ops::BroadcastComparison4DSlowWithScaling<input_dtype, opname>(
op_params, GetTensorShape(input1), GetTensorData<input_dtype>(input1),
GetTensorShape(input2), GetTensorData<input_dtype>(input2),
GetTensorShape(output), GetTensorData<bool>(output));
} else {
reference_ops::ComparisonWithScaling<input_dtype, opname>(
op_params, GetTensorShape(input1), GetTensorData<input_dtype>(input1),
GetTensorShape(input2), GetTensorData<input_dtype>(input2),
GetTensorShape(output), GetTensorData<bool>(output));
}
}
} | Class | 2 |
const TfLiteTensor* GetOptionalInputTensor(const TfLiteContext* context,
const TfLiteNode* node, int index) {
const bool use_tensor = index < node->inputs->size &&
node->inputs->data[index] != kTfLiteOptionalTensor;
if (use_tensor) {
return GetMutableInput(context, node, index);
}
return nullptr;
} | Base | 1 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
output->type = input2->type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
} | Base | 1 |
AP4_AvccAtom::AP4_AvccAtom(AP4_UI32 size, const AP4_UI08* payload) :
AP4_Atom(AP4_ATOM_TYPE_AVCC, size)
{
// make a copy of our configuration bytes
unsigned int payload_size = size-AP4_ATOM_HEADER_SIZE;
m_RawBytes.SetData(payload, payload_size);
// parse the payload
m_ConfigurationVersion = payload[0];
m_Profile = payload[1];
m_ProfileCompatibility = payload[2];
m_Level = payload[3];
m_NaluLengthSize = 1+(payload[4]&3);
AP4_UI08 num_seq_params = payload[5]&31;
m_SequenceParameters.EnsureCapacity(num_seq_params);
unsigned int cursor = 6;
for (unsigned int i=0; i<num_seq_params; i++) {
m_SequenceParameters.Append(AP4_DataBuffer());
AP4_UI16 param_length = AP4_BytesToInt16BE(&payload[cursor]);
m_SequenceParameters[i].SetData(&payload[cursor]+2, param_length);
cursor += 2+param_length;
}
AP4_UI08 num_pic_params = payload[cursor++];
m_PictureParameters.EnsureCapacity(num_pic_params);
for (unsigned int i=0; i<num_pic_params; i++) {
m_PictureParameters.Append(AP4_DataBuffer());
AP4_UI16 param_length = AP4_BytesToInt16BE(&payload[cursor]);
m_PictureParameters[i].SetData(&payload[cursor]+2, param_length);
cursor += 2+param_length;
}
} | Base | 1 |
R_API ut64 r_bin_java_element_pair_calc_size(RBinJavaElementValuePair *evp) {
ut64 sz = 0;
if (evp == NULL) {
return sz;
}
// evp->element_name_idx = r_bin_java_read_short(bin, bin->b->cur);
sz += 2;
// evp->value = r_bin_java_element_value_new (bin, offset+2);
if (evp->value) {
sz += r_bin_java_element_value_calc_size (evp->value);
}
return sz;
} | Class | 2 |
inline int StringData::size() const { return m_len; } | Base | 1 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
output->type = input->type;
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
} | Base | 1 |
void FormatConverter<T>::Populate(const T* src_data, std::vector<int> indices,
int level, int prev_idx, int* src_data_ptr,
T* dest_data) {
if (level == indices.size()) {
int orig_rank = dense_shape_.size();
std::vector<int> orig_idx;
orig_idx.resize(orig_rank);
int i = 0;
for (; i < orig_idx.size(); i++) {
int orig_dim = traversal_order_[i];
orig_idx[orig_dim] = indices[i];
}
for (; i < indices.size(); i++) {
const int block_idx = traversal_order_[i] - orig_rank;
const int orig_dim = block_map_[block_idx];
orig_idx[orig_dim] =
orig_idx[orig_dim] * block_size_[block_idx] + indices[i];
}
dest_data[GetFlattenedIndex(orig_idx, dense_shape_)] =
src_data[*src_data_ptr];
*src_data_ptr = *src_data_ptr + 1;
return;
}
const int metadata_idx = 2 * level;
const int shape_of_level = dim_metadata_[metadata_idx][0];
if (format_[level] == kTfLiteDimDense) {
for (int i = 0; i < shape_of_level; i++) {
indices[level] = i;
Populate(src_data, indices, level + 1, prev_idx * shape_of_level + i,
src_data_ptr, dest_data);
}
} else {
const auto& array_segments = dim_metadata_[metadata_idx];
const auto& array_indices = dim_metadata_[metadata_idx + 1];
for (int i = array_segments[prev_idx]; i < array_segments[prev_idx + 1];
i++) {
indices[level] = array_indices[i];
Populate(src_data, indices, level + 1, i, src_data_ptr, dest_data);
}
}
} | Base | 1 |
SetRunner(
ReferenceHandle& that,
Local<Value> key_handle,
Local<Value> val_handle,
MaybeLocal<Object> maybe_options
) :
key{ExternalCopy::CopyIfPrimitive(key_handle)},
val{TransferOut(val_handle, TransferOptions{maybe_options})},
context{that.context},
reference{that.reference} {
that.CheckDisposed();
if (!key) {
throw RuntimeTypeError("Invalid `key`");
}
} | Class | 2 |
CAMLprim value caml_blit_string(value s1, value ofs1, value s2, value ofs2,
value n)
{
memmove(&Byte(s2, Long_val(ofs2)), &Byte(s1, Long_val(ofs1)), Int_val(n));
return Val_unit;
} | Class | 2 |
void ServerSecurityFeature::collectOptions(std::shared_ptr<ProgramOptions> options) {
options->addOption("--server.harden",
"lock down REST APIs that reveal version information or server "
"internals for non-admin users",
new BooleanParameter(&_hardenedRestApi))
.setIntroducedIn(30500);
options->addOption("--foxx.api", "enables Foxx management REST APIs",
new BooleanParameter(&_enableFoxxApi),
arangodb::options::makeFlags(
arangodb::options::Flags::DefaultNoComponents,
arangodb::options::Flags::OnCoordinator,
arangodb::options::Flags::OnSingle))
.setIntroducedIn(30500);
options->addOption("--foxx.store", "enables Foxx store in web interface",
new BooleanParameter(&_enableFoxxStore),
arangodb::options::makeFlags(
arangodb::options::Flags::DefaultNoComponents,
arangodb::options::Flags::OnCoordinator,
arangodb::options::Flags::OnSingle))
.setIntroducedIn(30500);
} | Base | 1 |
int DummyOutStream::length()
{
flush();
return offset;
} | Base | 1 |
TfLiteStatus EvalHashtable(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE(context, node->user_data != nullptr);
const auto* params =
reinterpret_cast<const TfLiteHashtableParams*>(node->user_data);
// The resource id is generated based on the given table name.
const int resource_id = std::hash<std::string>{}(params->table_name);
TfLiteTensor* resource_handle_tensor =
GetOutput(context, node, kResourceHandleTensor);
auto* resource_handle_data =
GetTensorData<std::int32_t>(resource_handle_tensor);
resource_handle_data[0] = resource_id;
Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_);
auto& resources = subgraph->resources();
resource::CreateHashtableResourceIfNotAvailable(
&resources, resource_id, params->key_dtype, params->value_dtype);
return kTfLiteOk;
} | Base | 1 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* diag = GetInput(context, node, kDiagonalTensor);
FillDiagHelper(input, diag, output);
return kTfLiteOk;
} | Base | 1 |
int LibRaw::subtract_black()
{
CHECK_ORDER_LOW(LIBRAW_PROGRESS_RAW2_IMAGE);
try {
if(!is_phaseone_compressed() && (C.cblack[0] || C.cblack[1] || C.cblack[2] || C.cblack[3]))
{
#define BAYERC(row,col,c) imgdata.image[((row) >> IO.shrink)*S.iwidth + ((col) >> IO.shrink)][c]
int cblk[4],i;
for(i=0;i<4;i++)
cblk[i] = C.cblack[i];
int size = S.iheight * S.iwidth;
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#define MAX(a,b) ((a) > (b) ? (a) : (b))
#define LIM(x,min,max) MAX(min,MIN(x,max))
#define CLIP(x) LIM(x,0,65535)
for(i=0; i< size*4; i++)
{
int val = imgdata.image[0][i];
val -= cblk[i & 3];
imgdata.image[0][i] = CLIP(val);
if(C.data_maximum < val) C.data_maximum = val;
}
#undef MIN
#undef MAX
#undef LIM
#undef CLIP
C.maximum -= C.black;
ZERO(C.cblack);
C.black = 0;
#undef BAYERC
}
else
{
// Nothing to Do, maximum is already calculated, black level is 0, so no change
// only calculate channel maximum;
int idx;
ushort *p = (ushort*)imgdata.image;
C.data_maximum = 0;
for(idx=0;idx<S.iheight*S.iwidth*4;idx++)
if(C.data_maximum < p[idx]) C.data_maximum = p[idx];
}
return 0;
}
catch ( LibRaw_exceptions err) {
EXCEPTION_HANDLER(err);
}
} | Class | 2 |
friend bool operator==(const TensorKey& t1, const TensorKey& t2) {
if (t1.dtype() != t2.dtype() || t1.shape() != t2.shape()) {
return false;
}
if (DataTypeCanUseMemcpy(t1.dtype())) {
return t1.tensor_data() == t2.tensor_data();
}
if (t1.dtype() == DT_STRING) {
const auto s1 = t1.unaligned_flat<tstring>();
const auto s2 = t2.unaligned_flat<tstring>();
for (int64_t i = 0, n = t1.NumElements(); i < n; ++i) {
if (TF_PREDICT_FALSE(s1(i) != s2(i))) {
return false;
}
}
return true;
}
return false;
} | Base | 1 |
void RemoteFsDevice::load()
{
if (RemoteFsDevice::constSambaAvahiProtocol==details.url.scheme()) {
// Start Avahi listener...
Avahi::self();
QUrlQuery q(details.url);
if (q.hasQueryItem(constServiceNameQuery)) {
details.serviceName=q.queryItemValue(constServiceNameQuery);
}
if (!details.serviceName.isEmpty()) {
AvahiService *srv=Avahi::self()->getService(details.serviceName);
if (!srv || srv->getHost().isEmpty()) {
sub=tr("Not Available");
} else {
sub=tr("Available");
}
}
connect(Avahi::self(), SIGNAL(serviceAdded(QString)), SLOT(serviceAdded(QString)));
connect(Avahi::self(), SIGNAL(serviceRemoved(QString)), SLOT(serviceRemoved(QString)));
}
if (isConnected()) {
setAudioFolder();
readOpts(settingsFileName(), opts, true);
rescan(false); // Read from cache if we have it!
}
} | Base | 1 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* cond_tensor =
GetInput(context, node, kInputConditionTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context,
ResizeOutputTensor(context, cond_tensor, output));
}
TfLiteIntArray* dims = cond_tensor->dims;
if (dims->size == 0) {
// Scalar tensors are not supported.
TF_LITE_KERNEL_LOG(context, "Where op requires condition w/ rank > 0");
return kTfLiteError;
}
reference_ops::SelectTrueCoords(GetTensorShape(cond_tensor),
GetTensorData<bool>(cond_tensor),
GetTensorData<int64_t>(output));
return kTfLiteOk;
} | Base | 1 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteMulParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
const bool requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 ||
output->type == kTfLiteInt16) {
TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
context, params->activation, output, &data->output_activation_min,
&data->output_activation_max));
double real_multiplier =
input1->params.scale * input2->params.scale / output->params.scale;
QuantizeMultiplier(real_multiplier, &data->output_multiplier,
&data->output_shift);
}
return context->ResizeTensor(context, output, output_size);
} | Base | 1 |
const char *enc_untrusted_inet_ntop(int af, const void *src, char *dst,
socklen_t size) {
if (!src || !dst) {
errno = EFAULT;
return nullptr;
}
size_t src_size = 0;
if (af == AF_INET) {
src_size = sizeof(struct in_addr);
} else if (af == AF_INET6) {
src_size = sizeof(struct in6_addr);
} else {
errno = EAFNOSUPPORT;
return nullptr;
}
MessageWriter input;
input.Push<int>(TokLinuxAfFamily(af));
input.PushByReference(Extent{reinterpret_cast<const char *>(src), src_size});
input.Push(size);
MessageReader output;
const auto status = NonSystemCallDispatcher(
::asylo::host_call::kInetNtopHandler, &input, &output);
CheckStatusAndParamCount(status, output, "enc_untrusted_inet_ntop", 2);
auto result = output.next();
int klinux_errno = output.next<int>();
if (result.empty()) {
errno = FromkLinuxErrorNumber(klinux_errno);
return nullptr;
}
memcpy(dst, result.data(),
std::min(static_cast<size_t>(size),
static_cast<size_t>(INET6_ADDRSTRLEN)));
return dst;
} | Base | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.