code
stringlengths
12
2.05k
label_name
stringclasses
5 values
label
int64
0
4
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); // Reinterprete the opaque data provided by user. OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); const TfLiteType type = input1->type; switch (type) { case kTfLiteFloat32: case kTfLiteInt32: break; default: context->ReportError(context, "Type '%s' is not supported by floor_div.", TfLiteTypeGetName(type)); return kTfLiteError; } output->type = type; data->requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (data->requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } return context->ResizeTensor(context, output, output_size); }
Base
1
TfLiteStatus LeakyReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); const LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); LeakyReluParams op_params; switch (input->type) { case kTfLiteFloat32: { op_params.alpha = params->alpha; optimized_ops::LeakyRelu( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizeLeakyRelu<uint8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizeLeakyRelu<int8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt16: { QuantizeLeakyRelu<int16_t>(input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, int8, int16 and uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
Base
1
TfLiteStatus LogicalImpl(TfLiteContext* context, TfLiteNode* node, bool (*func)(bool, bool)) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (data->requires_broadcast) { reference_ops::BroadcastBinaryFunction4DSlow<bool, bool, bool>( GetTensorShape(input1), GetTensorData<bool>(input1), GetTensorShape(input2), GetTensorData<bool>(input2), GetTensorShape(output), GetTensorData<bool>(output), func); } else { reference_ops::BinaryFunction<bool, bool, bool>( GetTensorShape(input1), GetTensorData<bool>(input1), GetTensorShape(input2), GetTensorData<bool>(input2), GetTensorShape(output), GetTensorData<bool>(output), func); } return kTfLiteOk; }
Base
1
gdImagePtr gdImageCreateTrueColor (int sx, int sy) { int i; gdImagePtr im; if (overflow2(sx, sy)) { return NULL; } if (overflow2(sizeof(unsigned char *), sy)) { return NULL; } if (overflow2(sizeof(int) + sizeof(unsigned char), sx * sy)) { return NULL; } // Check for OOM before doing a potentially large allocation. auto allocsz = sizeof(gdImage) + sy * (sizeof(int *) + sizeof(unsigned char *)) + sx * sy * (sizeof(int) + sizeof(unsigned char)); if (UNLIKELY(precheckOOM(allocsz))) { // Don't throw here because GD might need to do its own cleanup. return NULL; } im = (gdImage *) gdMalloc(sizeof(gdImage)); memset(im, 0, sizeof(gdImage)); im->tpixels = (int **) gdMalloc(sizeof(int *) * sy); im->AA_opacity = (unsigned char **) gdMalloc(sizeof(unsigned char *) * sy); im->polyInts = 0; im->polyAllocated = 0; im->brush = 0; im->tile = 0; im->style = 0; for (i = 0; i < sy; i++) { im->tpixels[i] = (int *) gdCalloc(sx, sizeof(int)); im->AA_opacity[i] = (unsigned char *) gdCalloc(sx, sizeof(unsigned char)); } im->sx = sx; im->sy = sy; im->transparent = (-1); im->interlace = 0; im->trueColor = 1; /* 2.0.2: alpha blending is now on by default, and saving of alpha is * off by default. This allows font antialiasing to work as expected * on the first try in JPEGs -- quite important -- and also allows * for smaller PNGs when saving of alpha channel is not really * desired, which it usually isn't! */ im->saveAlphaFlag = 0; im->alphaBlendingFlag = 1; im->thick = 1; im->AA = 0; im->AA_polygon = 0; im->cx1 = 0; im->cy1 = 0; im->cx2 = im->sx - 1; im->cy2 = im->sy - 1; im->interpolation = NULL; im->interpolation_id = GD_BILINEAR_FIXED; return im; }
Base
1
CString CWebSock::GetSkinPath(const CString& sSkinName) { CString sRet = CZNC::Get().GetZNCPath() + "/webskins/" + sSkinName; if (!CFile::IsDir(sRet)) { sRet = CZNC::Get().GetCurPath() + "/webskins/" + sSkinName; if (!CFile::IsDir(sRet)) { sRet = CString(_SKINDIR_) + "/" + sSkinName; } } return sRet + "/"; }
Base
1
boost::optional<SaplingNotePlaintext> SaplingNotePlaintext::decrypt( const SaplingEncCiphertext &ciphertext, const uint256 &epk, const uint256 &esk, const uint256 &pk_d, const uint256 &cmu ) { auto pt = AttemptSaplingEncDecryption(ciphertext, epk, esk, pk_d); if (!pt) { return boost::none; } // Deserialize from the plaintext CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); ss << pt.get(); SaplingNotePlaintext ret; ss >> ret; uint256 cmu_expected; if (!librustzcash_sapling_compute_cm( ret.d.data(), pk_d.begin(), ret.value(), ret.rcm.begin(), cmu_expected.begin() )) { return boost::none; } if (cmu_expected != cmu) { return boost::none; } assert(ss.size() == 0); return ret; }
Class
2
void PCRECache::dump(const std::string& filename) { std::ofstream out(filename.c_str()); switch (m_kind) { case CacheKind::Static: for (auto& it : *m_staticCache) { out << it.first->data() << "\n"; } break; case CacheKind::Lru: case CacheKind::Scalable: { std::vector<LRUCacheKey> keys; if (m_kind == CacheKind::Lru) { m_lruCache->snapshotKeys(keys); } else { m_scalableCache->snapshotKeys(keys); } for (auto& key: keys) { out << key.c_str() << "\n"; } } break; } out.close(); }
Base
1
TEST_F(QuantizedConv2DTest, OddPaddingBatch) { const int stride = 2; TF_ASSERT_OK(NodeDefBuilder("quantized_conv_op", "QuantizedConv2D") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("out_type", DataTypeToEnum<qint32>::v()) .Attr("strides", {1, stride, stride, 1}) .Attr("padding", "SAME") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const int depth = 1; const int image_width = 4; const int image_height = 4; const int image_batch_count = 3; AddInputFromArray<quint8>( TensorShape({image_batch_count, image_height, image_width, depth}), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}); const int filter_size = 3; const int filter_count = 1; AddInputFromArray<quint8>( TensorShape({filter_size, filter_size, depth, filter_count}), {1, 2, 3, 4, 5, 6, 7, 8, 9}); AddInputFromArray<float>(TensorShape({1}), {0}); AddInputFromArray<float>(TensorShape({1}), {255.0f}); AddInputFromArray<float>(TensorShape({1}), {0}); AddInputFromArray<float>(TensorShape({1}), {255.0f}); TF_ASSERT_OK(RunOpKernel()); const int expected_width = image_width / stride; const int expected_height = (image_height * filter_count) / stride; Tensor expected(DT_QINT32, TensorShape({image_batch_count, expected_height, expected_width, filter_count})); test::FillValues<qint32>(&expected, {348, 252, 274, 175, // 348, 252, 274, 175, // 348, 252, 274, 175}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); }
Class
2
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteUnpackParams* data = reinterpret_cast<TfLiteUnpackParams*>(node->builtin_data); const TfLiteTensor* input = GetInput(context, node, kInputTensor); switch (input->type) { case kTfLiteFloat32: { UnpackImpl<float>(context, node, input, data->num, data->axis); break; } case kTfLiteInt32: { UnpackImpl<int32_t>(context, node, input, data->num, data->axis); break; } case kTfLiteUInt8: { UnpackImpl<uint8_t>(context, node, input, data->num, data->axis); break; } case kTfLiteInt8: { UnpackImpl<int8_t>(context, node, input, data->num, data->axis); break; } case kTfLiteBool: { UnpackImpl<bool>(context, node, input, data->num, data->axis); break; } case kTfLiteInt16: { UnpackImpl<int16_t>(context, node, input, data->num, data->axis); break; } default: { context->ReportError(context, "Type '%s' is not supported by unpack.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } return kTfLiteOk; }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* data = reinterpret_cast<TfLiteAudioMicrofrontendParams*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); TF_LITE_ENSURE_EQ(context, input->type, kTfLiteInt16); output->type = kTfLiteInt32; if (data->out_float) { output->type = kTfLiteFloat32; } TfLiteIntArray* output_size = TfLiteIntArrayCreate(2); int num_frames = 0; if (input->dims->data[0] >= data->state->window.size) { num_frames = (input->dims->data[0] - data->state->window.size) / data->state->window.step / data->frame_stride + 1; } output_size->data[0] = num_frames; output_size->data[1] = data->state->filterbank.num_channels * (1 + data->left_context + data->right_context); return context->ResizeTensor(context, output, output_size); }
Base
1
ObfuscatedPasswd::ObfuscatedPasswd(const PlainPasswd& plainPwd) : CharArray(8), length(8) { int l = strlen(plainPwd.buf), i; for (i=0; i<8; i++) buf[i] = i<l ? plainPwd.buf[i] : 0; deskey(d3desObfuscationKey, EN0); des((rdr::U8*)buf, (rdr::U8*)buf); }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteLSHProjectionParams*>(node->builtin_data); int32_t* out_buf = GetOutput(context, node, 0)->data.i32; const TfLiteTensor* hash = GetInput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 1); const TfLiteTensor* weight = NumInputs(node) == 2 ? nullptr : GetInput(context, node, 2); switch (params->type) { case kTfLiteLshProjectionDense: DenseLshProjection(hash, input, weight, out_buf); break; case kTfLiteLshProjectionSparse: SparseLshProjection(hash, input, weight, out_buf); break; default: return kTfLiteError; } return kTfLiteOk; }
Base
1
R_API RBinJavaAttrInfo *r_bin_java_constant_value_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut64 offset = 6; RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); if (attr) { attr->type = R_BIN_JAVA_ATTR_TYPE_CONST_VALUE_ATTR; attr->info.constant_value_attr.constantvalue_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->size = offset; } // IFDBG r_bin_java_print_constant_value_attr_summary(attr); return attr; }
Base
1
v8::Local<v8::Object> CreateNativeEvent( v8::Isolate* isolate, v8::Local<v8::Object> sender, content::RenderFrameHost* frame, electron::mojom::ElectronBrowser::MessageSyncCallback callback) { v8::Local<v8::Object> event; if (frame && callback) { gin::Handle<Event> native_event = Event::Create(isolate); native_event->SetCallback(std::move(callback)); event = v8::Local<v8::Object>::Cast(native_event.ToV8()); } else { // No need to create native event if we do not need to send reply. event = CreateEvent(isolate); } Dictionary dict(isolate, event); dict.Set("sender", sender); // Should always set frameId even when callback is null. if (frame) dict.Set("frameId", frame->GetRoutingID()); return event; }
Class
2
char *uwsgi_expand_path(char *dir, int dir_len, char *ptr) { char src[PATH_MAX + 1]; memcpy(src, dir, dir_len); src[dir_len] = 0; char *dst = ptr; if (!dst) dst = uwsgi_malloc(PATH_MAX + 1); if (!realpath(src, dst)) { uwsgi_error_realpath(src); if (!ptr) free(dst); return NULL; } return dst; }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { case kTfLiteFloat32: return EvalImpl<kernel_type, kTfLiteFloat32>(context, node); case kTfLiteUInt8: return EvalImpl<kernel_type, kTfLiteUInt8>(context, node); case kTfLiteInt8: return EvalImpl<kernel_type, kTfLiteInt8>(context, node); case kTfLiteInt16: return EvalImpl<kernel_type, kTfLiteInt16>(context, node); default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
Base
1
static int lookup1_values(int entries, int dim) { int r = (int) floor(exp((float) log((float) entries) / dim)); if ((int) floor(pow((float) r+1, dim)) <= entries) // (int) cast for MinGW warning; ++r; // floor() to avoid _ftol() when non-CRT assert(pow((float) r+1, dim) > entries); assert((int) floor(pow((float) r, dim)) <= entries); // (int),floor() as above return r; }
Base
1
jas_seq2d_t *jas_seq2d_copy(jas_seq2d_t *x) { jas_matrix_t *y; int i; int j; y = jas_seq2d_create(jas_seq2d_xstart(x), jas_seq2d_ystart(x), jas_seq2d_xend(x), jas_seq2d_yend(x)); assert(y); for (i = 0; i < x->numrows_; ++i) { for (j = 0; j < x->numcols_; ++j) { *jas_matrix_getref(y, i, j) = jas_matrix_get(x, i, j); } } return y; }
Base
1
void CalculateOutputIndexValueRowID( OpKernelContext* context, const RowPartitionTensor& value_rowids, const vector<INDEX_TYPE>& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector<INDEX_TYPE>* result) { const INDEX_TYPE index_size = value_rowids.size(); result->reserve(index_size); if (index_size == 0) { return; } INDEX_TYPE current_output_column = 0; INDEX_TYPE current_value_rowid = value_rowids(0); DCHECK_LT(current_value_rowid, parent_output_index.size()); INDEX_TYPE current_output_index = parent_output_index[current_value_rowid]; result->push_back(current_output_index); for (INDEX_TYPE i = 1; i < index_size; ++i) { INDEX_TYPE next_value_rowid = value_rowids(i); if (next_value_rowid == current_value_rowid) { if (current_output_index >= 0) { ++current_output_column; if (current_output_column < output_size) { current_output_index += output_index_multiplier; } else { current_output_index = -1; } } } else { current_output_column = 0; current_value_rowid = next_value_rowid; DCHECK_LT(next_value_rowid, parent_output_index.size()); current_output_index = parent_output_index[next_value_rowid]; } result->push_back(current_output_index); } OP_REQUIRES(context, result->size() == value_rowids.size(), errors::InvalidArgument("Invalid row ids.")); }
Base
1
bool ECDSA_Verification_Operation::verify(const uint8_t msg[], size_t msg_len, const uint8_t sig[], size_t sig_len) { if(sig_len != m_group.get_order_bytes() * 2) return false; const BigInt e(msg, msg_len, m_group.get_order_bits()); const BigInt r(sig, sig_len / 2); const BigInt s(sig + sig_len / 2, sig_len / 2); if(r <= 0 || r >= m_group.get_order() || s <= 0 || s >= m_group.get_order()) return false; const BigInt w = m_group.inverse_mod_order(s); const BigInt u1 = m_group.multiply_mod_order(e, w); const BigInt u2 = m_group.multiply_mod_order(r, w); const PointGFp R = m_gy_mul.multi_exp(u1, u2); if(R.is_zero()) return false; const BigInt v = m_group.mod_order(R.get_affine_x()); return (v == r); }
Class
2
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); optimized_ops::Round(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; }
Base
1
int HexOutStream::length() { return offset + ptr - start; }
Base
1
TEST_P(SslSocketTest, Ipv6San) { const std::string client_ctx_yaml = R"EOF( common_tls_context: validation_context: trusted_ca: filename: "{{ test_rundir }}/test/config/integration/certs/upstreamcacert.pem" match_subject_alt_names: exact: "::1" )EOF"; const std::string server_ctx_yaml = R"EOF( common_tls_context: tls_certificates: certificate_chain: filename: "{{ test_rundir }}/test/config/integration/certs/upstreamlocalhostcert.pem" private_key: filename: "{{ test_rundir }}/test/config/integration/certs/upstreamlocalhostkey.pem" )EOF"; TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam()); testUtil(test_options); }
Base
1
std::string controller::bookmark( const std::string& url, const std::string& title, const std::string& description, const std::string& feed_title) { std::string bookmark_cmd = cfg.get_configvalue("bookmark-cmd"); bool is_interactive = cfg.get_configvalue_as_bool("bookmark-interactive"); if (bookmark_cmd.length() > 0) { std::string cmdline = strprintf::fmt("%s '%s' %s %s %s", bookmark_cmd, utils::replace_all(url,"'", "%27"), quote_empty(stfl::quote(title)), quote_empty(stfl::quote(description)), quote_empty(stfl::quote(feed_title))); LOG(level::DEBUG, "controller::bookmark: cmd = %s", cmdline); if (is_interactive) { v->push_empty_formaction(); stfl::reset(); utils::run_interactively(cmdline, "controller::bookmark"); v->pop_current_formaction(); return ""; } else { char * my_argv[4]; my_argv[0] = const_cast<char *>("/bin/sh"); my_argv[1] = const_cast<char *>("-c"); my_argv[2] = const_cast<char *>(cmdline.c_str()); my_argv[3] = nullptr; return utils::run_program(my_argv, ""); } } else { return _("bookmarking support is not configured. Please set the configuration variable `bookmark-cmd' accordingly."); } }
Class
2
TfLiteStatus EluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Elu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteInt8: { OpData* data = reinterpret_cast<OpData*>(node->user_data); EvalUsingLookupTable(data, input, output); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and int8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
Base
1
TfLiteRegistration CopyOpRegistration() { TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr}; reg.prepare = [](TfLiteContext* context, TfLiteNode* node) { // Set output size to input size const TfLiteTensor* tensor0 = GetInput(context, node, 0); TfLiteTensor* tensor1 = GetOutput(context, node, 0); TfLiteIntArray* newSize = TfLiteIntArrayCopy(tensor0->dims); return context->ResizeTensor(context, tensor1, newSize); }; reg.invoke = [](TfLiteContext* context, TfLiteNode* node) { CallReporting* call_reporting = static_cast<CallReporting*>(node->builtin_data); // Copy input data to output data. const TfLiteTensor* a0 = GetInput(context, node, 0); TfLiteTensor* a1 = GetOutput(context, node, 0); int num = a0->dims->data[0]; for (int i = 0; i < num; i++) { a1->data.f[i] = a0->data.f[i]; } call_reporting->Record(); return kTfLiteOk; }; return reg; }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteDivParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) { EvalDiv<kernel_type>(context, node, params, data, input1, input2, output); } else if (output->type == kTfLiteUInt8) { TF_LITE_ENSURE_OK( context, EvalQuantized<kernel_type>(context, node, params, data, input1, input2, output)); } else { context->ReportError( context, "Div only supports FLOAT32, INT32 and quantized UINT8 now, got %d.", output->type); return kTfLiteError; } return kTfLiteOk; }
Base
1
TfLiteStatus StoreAllDecodedSequences( TfLiteContext* context, const std::vector<std::vector<std::vector<int>>>& sequences, TfLiteNode* node, int top_paths) { const int32_t batch_size = sequences.size(); std::vector<int32_t> num_entries(top_paths, 0); // Calculate num_entries per path for (const auto& batch_s : sequences) { TF_LITE_ENSURE_EQ(context, batch_s.size(), top_paths); for (int p = 0; p < top_paths; ++p) { num_entries[p] += batch_s[p].size(); } } for (int p = 0; p < top_paths; ++p) { const int32_t p_num = num_entries[p]; // Resize the decoded outputs. TfLiteTensor* indices = GetOutput(context, node, p); TF_LITE_ENSURE_OK(context, Resize(context, {p_num, 2}, indices)); TfLiteTensor* values = GetOutput(context, node, p + top_paths); TF_LITE_ENSURE_OK(context, Resize(context, {p_num}, values)); TfLiteTensor* decoded_shape = GetOutput(context, node, p + 2 * top_paths); TF_LITE_ENSURE_OK(context, Resize(context, {2}, decoded_shape)); int32_t max_decoded = 0; int32_t offset = 0; int32_t* indices_data = GetTensorData<int32_t>(indices); int32_t* values_data = GetTensorData<int32_t>(values); int32_t* decoded_shape_data = GetTensorData<int32_t>(decoded_shape); for (int b = 0; b < batch_size; ++b) { auto& p_batch = sequences[b][p]; int32_t num_decoded = p_batch.size(); max_decoded = std::max(max_decoded, num_decoded); std::copy_n(p_batch.begin(), num_decoded, values_data + offset); for (int32_t t = 0; t < num_decoded; ++t, ++offset) { indices_data[offset * 2] = b; indices_data[offset * 2 + 1] = t; } } decoded_shape_data[0] = batch_size; decoded_shape_data[1] = max_decoded; } return kTfLiteOk; }
Base
1
TEST_F(ListenerManagerImplQuicOnlyTest, QuicListenerFactoryWithWrongCodec) { const std::string yaml = TestEnvironment::substitute(R"EOF( address: socket_address: address: 127.0.0.1 protocol: UDP port_value: 1234 filter_chains: - filter_chain_match: transport_protocol: "quic" filters: [] transport_socket: name: envoy.transport_sockets.quic typed_config: "@type": type.googleapis.com/envoy.extensions.transport_sockets.quic.v3.QuicDownstreamTransport downstream_tls_context: common_tls_context: tls_certificates: - certificate_chain: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem" private_key: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem" validation_context: trusted_ca: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" match_subject_alt_names: - exact: localhost - exact: 127.0.0.1 udp_listener_config: quic_options: {} )EOF", Network::Address::IpVersion::v4); envoy::config::listener::v3::Listener listener_proto = parseListenerFromV3Yaml(yaml); #if defined(ENVOY_ENABLE_QUIC) EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(listener_proto, "", true), EnvoyException, "error building network filter chain for quic listener: requires exactly " "one http_connection_manager filter."); #else EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(listener_proto, "", true), EnvoyException, "QUIC is configured but not enabled in the build."); #endif }
Base
1
void Context::onDelete() { if (wasm_->onDelete_) { wasm_->onDelete_(this, id_); } }
Base
1
TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; // TODO(renjieliu): We may revisit the quantization calculation logic, // the unbounded upper limit is actually hard to quantize. case kTfLiteUInt8: { QuantizedReluX<uint8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 & int8/uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; }
Base
1
TfLiteStatus Relu1Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu1(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizedReluX<uint8_t>(-1.0f, 1.0f, input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(-1, 1, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int8 supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
Base
1
bool IsPadOpSupported(const TfLiteRegistration* registration, const TfLiteNode* node, TfLiteContext* context) { // padding is d x 2 tensor, where d is the dimension of input. const TfLiteTensor* padding = GetInput(context, node, 1); if (!IsConstantTensor(padding)) { TF_LITE_KERNEL_LOG(context, "%s: Only constant padding is supported for PAD.", padding->name); return false; } if (padding->dims->data[0] != 4 || padding->dims->data[1] != 2) { TF_LITE_KERNEL_LOG(context, "%s: Only 4D inputs are supported for PAD.", padding->name); return false; } const int32_t* padding_data = GetTensorData<int32_t>(padding); if (!(padding_data[0] == 0 && padding_data[1] == 0)) { TF_LITE_KERNEL_LOG( context, "%s: Padding for batch dimension is not supported in PAD.", padding->name); return false; } if (!(padding_data[6] == 0 && padding_data[7] == 0)) { TF_LITE_KERNEL_LOG( context, "%s: Padding for channel dimension is not supported in PAD.", padding->name); return false; } return true; }
Base
1
UnicodeString::doAppend(const UChar *srcChars, int32_t srcStart, int32_t srcLength) { if(!isWritable() || srcLength == 0 || srcChars == NULL) { return *this; } // Perform all remaining operations relative to srcChars + srcStart. // From this point forward, do not use srcStart. srcChars += srcStart; if(srcLength < 0) { // get the srcLength if necessary if((srcLength = u_strlen(srcChars)) == 0) { return *this; } } int32_t oldLength = length(); int32_t newLength = oldLength + srcLength; // Check for append onto ourself const UChar* oldArray = getArrayStart(); if (isBufferWritable() && oldArray < srcChars + srcLength && srcChars < oldArray + oldLength) { // Copy into a new UnicodeString and start over UnicodeString copy(srcChars, srcLength); if (copy.isBogus()) { setToBogus(); return *this; } return doAppend(copy.getArrayStart(), 0, srcLength); } // optimize append() onto a large-enough, owned string if((newLength <= getCapacity() && isBufferWritable()) || cloneArrayIfNeeded(newLength, getGrowCapacity(newLength))) { UChar *newArray = getArrayStart(); // Do not copy characters when // UChar *buffer=str.getAppendBuffer(...); // is followed by // str.append(buffer, length); // or // str.appendString(buffer, length) // or similar. if(srcChars != newArray + oldLength) { us_arrayCopy(srcChars, 0, newArray, oldLength, srcLength); } setLength(newLength); } return *this; }
Base
1
TfLiteStatus EvalHashtableSize(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputResourceIdTensor); int resource_id = input_resource_id_tensor->data.i32[0]; TfLiteTensor* output_tensor = GetOutput(context, node, kOutputTensor); auto* output_data = GetTensorData<std::int64_t>(output_tensor); Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_); auto& resources = subgraph->resources(); auto* lookup = resource::GetHashtableResource(&resources, resource_id); TF_LITE_ENSURE(context, lookup != nullptr); output_data[0] = lookup->Size(); return kTfLiteOk; }
Base
1
bool read(ReadonlyBytes buffer) { auto fields_size = sizeof(LocalFileHeader) - (sizeof(u8*) * 3); if (buffer.size() < fields_size) return false; if (memcmp(buffer.data(), local_file_header_signature, sizeof(local_file_header_signature)) != 0) return false; memcpy(reinterpret_cast<void*>(&minimum_version), buffer.data() + sizeof(local_file_header_signature), fields_size); name = buffer.data() + sizeof(local_file_header_signature) + fields_size; extra_data = name + name_length; compressed_data = extra_data + extra_data_length; return true; }
Base
1
int HttpFileImpl::save(const std::string &path) const { assert(!path.empty()); if (fileName_.empty()) return -1; filesystem::path fsPath(utils::toNativePath(path)); if (!fsPath.is_absolute() && (!fsPath.has_parent_path() || (fsPath.begin()->string() != "." && fsPath.begin()->string() != ".."))) { filesystem::path fsUploadPath(utils::toNativePath( HttpAppFrameworkImpl::instance().getUploadPath())); fsPath = fsUploadPath / fsPath; } filesystem::path fsFileName(utils::toNativePath(fileName_)); if (!filesystem::exists(fsPath)) { LOG_TRACE << "create path:" << fsPath; drogon::error_code err; filesystem::create_directories(fsPath, err); if (err) { LOG_SYSERR; return -1; } } return saveTo(fsPath / fsFileName); }
Base
1
static Array HHVM_METHOD(Memcache, getextendedstats, const String& /*type*/ /* = null_string */, int /*slabid*/ /* = 0 */, int /*limit*/ /* = 100 */) { auto data = Native::data<MemcacheData>(this_); memcached_return_t ret; memcached_stat_st *stats; stats = memcached_stat(&data->m_memcache, nullptr, &ret); if (ret != MEMCACHED_SUCCESS) { return Array(); } int server_count = memcached_server_count(&data->m_memcache); Array return_val; for (int server_id = 0; server_id < server_count; server_id++) { memcached_stat_st *stat; char stats_key[30] = {0}; size_t key_len; LMCD_SERVER_POSITION_INSTANCE_TYPE instance = memcached_server_instance_by_position(&data->m_memcache, server_id); const char *hostname = LMCD_SERVER_HOSTNAME(instance); in_port_t port = LMCD_SERVER_PORT(instance); stat = stats + server_id; Array server_stats = memcache_build_stats(&data->m_memcache, stat, &ret); if (ret != MEMCACHED_SUCCESS) { continue; } key_len = snprintf(stats_key, sizeof(stats_key), "%s:%d", hostname, port); return_val.set(String(stats_key, key_len, CopyString), server_stats); } free(stats); return return_val; }
Base
1
bool handleBackslash(signed char& out) { char ch = *p++; switch (ch) { case 0: return false; case '"': out = ch; return true; case '\\': out = ch; return true; case '/': out = ch; return true; case 'b': out = '\b'; return true; case 'f': out = '\f'; return true; case 'n': out = '\n'; return true; case 'r': out = '\r'; return true; case 't': out = '\t'; return true; case 'u': { if (UNLIKELY(is_tsimplejson)) { auto const ch1 = *p++; auto const ch2 = *p++; auto const dch3 = dehexchar(*p++); auto const dch4 = dehexchar(*p++); if (UNLIKELY(ch1 != '0' || ch2 != '0' || dch3 < 0 || dch4 < 0)) { return false; } out = (dch3 << 4) | dch4; return true; } else { uint16_t u16cp = 0; for (int i = 0; i < 4; i++) { auto const hexv = dehexchar(*p++); if (hexv < 0) return false; // includes check for end of string u16cp <<= 4; u16cp |= hexv; } if (u16cp > 0x7f) { return false; } else { out = u16cp; return true; } } } default: return false; } }
Base
1
RestAuthHandler::RestAuthHandler(application_features::ApplicationServer& server, GeneralRequest* request, GeneralResponse* response) : RestVocbaseBaseHandler(server, request, response), _validFor(60 * 60 * 24 * 30) {}
Base
1
TEST(BasicFlatBufferModel, TestHandleMalformedModel) { const auto model_paths = { // These models use the same tensor as both input and ouput of a node "tensorflow/lite/testdata/add_shared_tensors.bin", }; for (const auto& model_path : model_paths) { std::unique_ptr<tflite::FlatBufferModel> model = FlatBufferModel::BuildFromFile(model_path); ASSERT_NE(model, nullptr); tflite::ops::builtin::BuiltinOpResolver resolver; InterpreterBuilder builder(*model, resolver); std::unique_ptr<Interpreter> interpreter; ASSERT_EQ(builder(&interpreter), kTfLiteOk); ASSERT_NE(interpreter, nullptr); ASSERT_NE(interpreter->AllocateTensors(), kTfLiteOk); } }
Base
1
R_API RBinJavaAttrInfo *r_bin_java_source_code_file_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { if (!sz) { return NULL; } ut64 offset = 0; RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; if (!attr) { return NULL; } attr->type = R_BIN_JAVA_ATTR_TYPE_SOURCE_FILE_ATTR; // if (buffer + offset > buffer + sz) return NULL; attr->info.source_file_attr.sourcefile_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->size = offset; // IFDBG r_bin_java_print_source_code_file_attr_summary(attr); return attr; }
Base
1
TfLiteStatus LogicalImpl(TfLiteContext* context, TfLiteNode* node, bool (*func)(bool, bool)) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (data->requires_broadcast) { reference_ops::BroadcastBinaryFunction4DSlow<bool, bool, bool>( GetTensorShape(input1), GetTensorData<bool>(input1), GetTensorShape(input2), GetTensorData<bool>(input2), GetTensorShape(output), GetTensorData<bool>(output), func); } else { reference_ops::BinaryFunction<bool, bool, bool>( GetTensorShape(input1), GetTensorData<bool>(input1), GetTensorShape(input2), GetTensorData<bool>(input2), GetTensorShape(output), GetTensorData<bool>(output), func); } return kTfLiteOk; }
Base
1
TfLiteStatus PrepareHashtable(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 0); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); TF_LITE_ENSURE(context, node->user_data != nullptr); const auto* params = reinterpret_cast<const TfLiteHashtableParams*>(node->user_data); TF_LITE_ENSURE(context, !params->table_name.empty()); TF_LITE_ENSURE(context, (params->key_dtype == kTfLiteInt64 && params->value_dtype == kTfLiteString) || (params->key_dtype == kTfLiteString && params->value_dtype == kTfLiteInt64)); TfLiteTensor* resource_handle_tensor = GetOutput(context, node, kResourceHandleTensor); TF_LITE_ENSURE(context, resource_handle_tensor != nullptr); TF_LITE_ENSURE_EQ(context, resource_handle_tensor->type, kTfLiteInt32); TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1); outputSize->data[0] = 1; return context->ResizeTensor(context, resource_handle_tensor, outputSize); }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = kTfLiteInt32; // By design, the input shape is always known at the time of Prepare, even // if the preceding op that generates |input| is dynamic. Thus, we can // always compute the rank immediately, without waiting for Eval. SetTensorToPersistentRo(output); // Rank produces a 0-D int32 Tensor representing the rank of input. TfLiteIntArray* output_size = TfLiteIntArrayCreate(0); TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_size)); TF_LITE_ENSURE_EQ(context, NumDimensions(output), 0); // Immediately propagate the known rank to the output tensor. This allows // downstream ops that rely on the value to use it during prepare. if (output->type == kTfLiteInt32) { int32_t* output_data = GetTensorData<int32_t>(output); *output_data = NumDimensions(input); } else { return kTfLiteError; } return kTfLiteOk; }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); TfLiteIntArray* output_size = TfLiteIntArrayCreate(4); output_size->data[0] = input->dims->data[0]; output_size->data[1] = input->dims->data[1]; output_size->data[2] = input->dims->data[2]; output_size->data[3] = input->dims->data[3]; return context->ResizeTensor(context, output, output_size); }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2); TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size, params->stride)); const int64_t sample_count = input->dims->data[0]; const int64_t length_minus_window = (sample_count - params->window_size); if (length_minus_window < 0) { params->output_height = 0; } else { params->output_height = 1 + (length_minus_window / params->stride); } TfLiteIntArray* output_size = TfLiteIntArrayCreate(3); output_size->data[0] = input->dims->data[1]; output_size->data[1] = params->output_height; output_size->data[2] = params->spectrogram->output_frequency_channels(); return context->ResizeTensor(context, output, output_size); }
Base
1
TypedValue HHVM_FUNCTION(substr_compare, const String& main_str, const String& str, int offset, int length /* = INT_MAX */, bool case_insensitivity /* = false */) { int s1_len = main_str.size(); int s2_len = str.size(); if (length <= 0) { raise_warning("The length must be greater than zero"); return make_tv<KindOfBoolean>(false); } if (offset < 0) { offset = s1_len + offset; if (offset < 0) offset = 0; } if (offset >= s1_len) { raise_warning("The start position cannot exceed initial string length"); return make_tv<KindOfBoolean>(false); } int cmp_len = s1_len - offset; if (cmp_len < s2_len) cmp_len = s2_len; if (cmp_len > length) cmp_len = length; const char *s1 = main_str.data(); if (case_insensitivity) { return tvReturn(bstrcasecmp(s1 + offset, cmp_len, str.data(), cmp_len)); } return tvReturn(string_ncmp(s1 + offset, str.data(), cmp_len)); }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* start = GetInput(context, node, kStartTensor); const TfLiteTensor* limit = GetInput(context, node, kLimitTensor); const TfLiteTensor* delta = GetInput(context, node, kDeltaTensor); // Make sure all the inputs are scalars. TF_LITE_ENSURE_EQ(context, NumDimensions(start), 0); TF_LITE_ENSURE_EQ(context, NumDimensions(limit), 0); TF_LITE_ENSURE_EQ(context, NumDimensions(delta), 0); // Currently only supports int32 and float. // TODO(b/117912892): Support quantization as well. const auto dtype = start->type; if (dtype != kTfLiteFloat32 && dtype != kTfLiteInt32) { context->ReportError(context, "Unknown index output data type: %s", TfLiteTypeGetName(dtype)); return kTfLiteError; } TF_LITE_ENSURE_TYPES_EQ(context, limit->type, dtype); TF_LITE_ENSURE_TYPES_EQ(context, delta->type, dtype); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = dtype; if (IsConstantTensor(start) && IsConstantTensor(limit) && IsConstantTensor(delta)) { return ResizeOutput(context, start, limit, delta, output); } SetTensorToDynamic(output); return kTfLiteOk; }
Base
1
TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (!is_supported_type(input->type)) { TF_LITE_UNSUPPORTED_TYPE(context, input->type, op_name); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
Base
1
CharString *Formattable::internalGetCharString(UErrorCode &status) { if(fDecimalStr == NULL) { if (fDecimalQuantity == NULL) { // No decimal number for the formattable yet. Which means the value was // set directly by the user as an int, int64 or double. If the value came // from parsing, or from the user setting a decimal number, fDecimalNum // would already be set. // LocalPointer<DecimalQuantity> dq(new DecimalQuantity(), status); if (U_FAILURE(status)) { return nullptr; } populateDecimalQuantity(*dq, status); if (U_FAILURE(status)) { return nullptr; } fDecimalQuantity = dq.orphan(); } fDecimalStr = new CharString(); if (fDecimalStr == NULL) { status = U_MEMORY_ALLOCATION_ERROR; return NULL; } // Older ICUs called uprv_decNumberToString here, which is not exactly the same as // DecimalQuantity::toScientificString(). The biggest difference is that uprv_decNumberToString does // not print scientific notation for magnitudes greater than -5 and smaller than some amount (+5?). if (fDecimalQuantity->isZero()) { fDecimalStr->append("0", -1, status); } else if (std::abs(fDecimalQuantity->getMagnitude()) < 5) { fDecimalStr->appendInvariantChars(fDecimalQuantity->toPlainString(), status); } else { fDecimalStr->appendInvariantChars(fDecimalQuantity->toScientificString(), status); } } return fDecimalStr; }
Base
1
void Context::onLog() { if (wasm_->onLog_) { wasm_->onLog_(this, id_); } }
Base
1
mptctl_eventquery (unsigned long arg) { struct mpt_ioctl_eventquery __user *uarg = (void __user *) arg; struct mpt_ioctl_eventquery karg; MPT_ADAPTER *ioc; int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventquery - " "Unable to read in mpt_ioctl_eventquery struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_eventquery() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventquery called.\n", ioc->name)); karg.eventEntries = MPTCTL_EVENT_LOG_SIZE; karg.eventTypes = ioc->eventTypes; /* Copy the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, &karg, sizeof(struct mpt_ioctl_eventquery))) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_eventquery - " "Unable to write out mpt_ioctl_eventquery struct @ %p\n", ioc->name, __FILE__, __LINE__, uarg); return -EFAULT; } return 0; }
Class
2
TEST_P(SslSocketTest, FailedClientAuthSanVerification) { const std::string client_ctx_yaml = R"EOF( common_tls_context: tls_certificates: certificate_chain: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem" private_key: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem" )EOF"; const std::string server_ctx_yaml = R"EOF( common_tls_context: tls_certificates: certificate_chain: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/unittest_cert.pem" private_key: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/unittest_key.pem" validation_context: trusted_ca: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" match_subject_alt_names: exact: "example.com" )EOF"; TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam()); testUtil(test_options.setExpectedServerStats("ssl.fail_verify_san")); }
Base
1
int bmp_validate(jas_stream_t *in) { int n; int i; uchar buf[2]; assert(JAS_STREAM_MAXPUTBACK >= 2); /* Read the first two characters that constitute the signature. */ if ((n = jas_stream_read(in, (char *) buf, 2)) < 0) { return -1; } /* Put the characters read back onto the stream. */ for (i = n - 1; i >= 0; --i) { if (jas_stream_ungetc(in, buf[i]) == EOF) { return -1; } } /* Did we read enough characters? */ if (n < 2) { return -1; } /* Is the signature correct for the BMP format? */ if (buf[0] == (BMP_MAGIC & 0xff) && buf[1] == (BMP_MAGIC >> 8)) { return 0; } return -1; }
Class
2
TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) { ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { double real_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
Base
1
bool DecodeResourceHandleList(std::unique_ptr<port::StringListDecoder> d, ResourceHandle* ps, int64_t n) { std::vector<uint32> sizes(n); if (!d->ReadSizes(&sizes)) return false; ResourceHandleProto proto; for (int i = 0; i < n; ++i) { if (!proto.ParseFromArray(d->Data(sizes[i]), sizes[i])) { return false; } ps[i].FromProto(proto); } return true; }
Base
1
QPDFObjectHandle::parse(PointerHolder<InputSource> input, std::string const& object_description, QPDFTokenizer& tokenizer, bool& empty, StringDecrypter* decrypter, QPDF* context) { return parseInternal(input, object_description, tokenizer, empty, decrypter, context, false, false, false); }
Class
2
Http::FilterMetadataStatus Context::onRequestMetadata() { if (!wasm_->onRequestMetadata_) { return Http::FilterMetadataStatus::Continue; } if (wasm_->onRequestMetadata_(this, id_).u64_ == 0) { return Http::FilterMetadataStatus::Continue; } return Http::FilterMetadataStatus::Continue; // This is currently the only return code. }
Base
1
TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteDepthwiseConvParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); const TfLiteTensor* bias = (NumInputs(node) == 3) ? GetInput(context, node, kBiasTensor) : nullptr; TFLITE_DCHECK_EQ(input_type, input->type); switch (input_type) { // Already know in/out types are same. case kTfLiteFloat32: if (filter->type == kTfLiteFloat32) { return EvalFloat<kernel_type>(context, node, params, data, input, filter, bias, output); } else if (filter->type == kTfLiteInt8) { return EvalHybridPerChannel<kernel_type>(context, node, params, data, input, filter, bias, output); } else { TF_LITE_KERNEL_LOG( context, "Type %s with filter type %s not currently supported.", TfLiteTypeGetName(input->type), TfLiteTypeGetName(filter->type)); return kTfLiteError; } break; case kTfLiteUInt8: return EvalQuantized<kernel_type>(context, node, params, data, input, filter, bias, output); break; case kTfLiteInt8: return EvalQuantizedPerChannel<kernel_type>(context, node, params, data, input, filter, bias, output); break; case kTfLiteInt16: return EvalQuantizedPerChannel16x8(params, data, input, filter, bias, output); break; default: context->ReportError(context, "Type %d not currently supported.", input->type); return kTfLiteError; } }
Base
1
static void* OGRExpatRealloc( void *ptr, size_t size ) { if( CanAlloc(size) ) return realloc(ptr, size); free(ptr); return nullptr; }
Variant
0
int jas_memdump(FILE *out, void *data, size_t len) { size_t i; size_t j; uchar *dp; dp = data; for (i = 0; i < len; i += 16) { fprintf(out, "%04zx:", i); for (j = 0; j < 16; ++j) { if (i + j < len) { fprintf(out, " %02x", dp[i + j]); } } fprintf(out, "\n"); } return 0; }
Class
2
R_API RBinJavaAnnotation *r_bin_java_annotation_new(ut8 *buffer, ut64 sz, ut64 buf_offset) { ut32 i = 0; RBinJavaAnnotation *annotation = NULL; RBinJavaElementValuePair *evps = NULL; ut64 offset = 0; annotation = R_NEW0 (RBinJavaAnnotation); if (!annotation) { return NULL; } // (ut16) read and set annotation_value.type_idx; annotation->type_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; // (ut16) read and set annotation_value.num_element_value_pairs; annotation->num_element_value_pairs = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; annotation->element_value_pairs = r_list_newf (r_bin_java_element_pair_free); // read annotation_value.num_element_value_pairs, and append to annotation_value.element_value_pairs for (i = 0; i < annotation->num_element_value_pairs; i++) { if (offset > sz) { break; } evps = r_bin_java_element_pair_new (buffer + offset, sz - offset, buf_offset + offset); if (evps) { offset += evps->size; r_list_append (annotation->element_value_pairs, (void *) evps); } } annotation->size = offset; return annotation; }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); const TfLiteType type = input1->type; if (type != kTfLiteInt32 && type != kTfLiteFloat32) { TF_LITE_KERNEL_LOG(context, "Unsupported data type %s.", TfLiteTypeGetName(type)); return kTfLiteError; } output->type = type; data->requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (data->requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } return context->ResizeTensor(context, output, output_size); }
Base
1
void ComputeAsync(OpKernelContext* c, DoneCallback done) override { auto col_params = new CollectiveParams(); auto done_with_cleanup = [col_params, done = std::move(done)]() { done(); col_params->Unref(); }; OP_REQUIRES_OK_ASYNC(c, FillCollectiveParams(col_params, REDUCTION_COLLECTIVE, /*group_size*/ c->input(1), /*group_key*/ c->input(2), /*instance_key*/ c->input(3)), done); col_params->instance.shape = c->input(0).shape(); col_params->merge_op = merge_op_.get(); col_params->final_op = final_op_.get(); VLOG(1) << "CollectiveReduceV2 group_size " << col_params->group.group_size << " group_key " << col_params->group.group_key << " instance_key " << col_params->instance.instance_key; // Allocate the output tensor, trying to reuse the input. Tensor* output = nullptr; OP_REQUIRES_OK_ASYNC(c, c->forward_input_or_allocate_output( {0}, 0, col_params->instance.shape, &output), done_with_cleanup); Run(c, col_params, std::move(done_with_cleanup)); }
Variant
0
int GetU8 (int nPos, bool *pbSuccess) { //*pbSuccess = true; if ( nPos < 0 || nPos >= m_nLen ) { *pbSuccess = false; return 0; } return m_sFile[ nPos ]; }
Base
1
void CIRCNetwork::SetEncoding(const CString& s) { m_sEncoding = s; if (GetIRCSock()) { GetIRCSock()->SetEncoding(s); } }
Class
2
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* dims = GetInput(context, node, kDimsTensor); const TfLiteTensor* value = GetInput(context, node, kValueTensor); // Make sure the 1st input tensor is 1-D. TF_LITE_ENSURE_EQ(context, NumDimensions(dims), 1); // Make sure the 1st input tensor is int32 or int64. const auto dtype = dims->type; TF_LITE_ENSURE(context, dtype == kTfLiteInt32 || dtype == kTfLiteInt64); // Make sure the 2nd input tensor is a scalar. TF_LITE_ENSURE_EQ(context, NumDimensions(value), 0); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = value->type; if (IsConstantTensor(dims)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, dims, output)); } else { SetTensorToDynamic(output); } return kTfLiteOk; }
Base
1
UnicodeString::doAppend(const UChar *srcChars, int32_t srcStart, int32_t srcLength) { if(!isWritable() || srcLength == 0 || srcChars == NULL) { return *this; } // Perform all remaining operations relative to srcChars + srcStart. // From this point forward, do not use srcStart. srcChars += srcStart; if(srcLength < 0) { // get the srcLength if necessary if((srcLength = u_strlen(srcChars)) == 0) { return *this; } } int32_t oldLength = length(); int32_t newLength = oldLength + srcLength; // Check for append onto ourself const UChar* oldArray = getArrayStart(); if (isBufferWritable() && oldArray < srcChars + srcLength && srcChars < oldArray + oldLength) { // Copy into a new UnicodeString and start over UnicodeString copy(srcChars, srcLength); if (copy.isBogus()) { setToBogus(); return *this; } return doAppend(copy.getArrayStart(), 0, srcLength); } // optimize append() onto a large-enough, owned string if((newLength <= getCapacity() && isBufferWritable()) || cloneArrayIfNeeded(newLength, getGrowCapacity(newLength))) { UChar *newArray = getArrayStart(); // Do not copy characters when // UChar *buffer=str.getAppendBuffer(...); // is followed by // str.append(buffer, length); // or // str.appendString(buffer, length) // or similar. if(srcChars != newArray + oldLength) { us_arrayCopy(srcChars, 0, newArray, oldLength, srcLength); } setLength(newLength); } return *this; }
Base
1
bool MemoryManager::validate_user_read(const Process& process, VirtualAddress vaddr) const { auto* region = region_from_vaddr(process, vaddr); return region && region->is_readable(); }
Class
2
inline typename V::VariantType FBUnserializer<V>::unserializeThing() { size_t code = nextCode(); switch (code) { case FB_SERIALIZE_BYTE: case FB_SERIALIZE_I16: case FB_SERIALIZE_I32: case FB_SERIALIZE_I64: return V::fromInt64(unserializeInt64()); case FB_SERIALIZE_VARCHAR: case FB_SERIALIZE_STRING: return V::fromString(unserializeString()); case FB_SERIALIZE_STRUCT: return V::fromMap(unserializeMap()); case FB_SERIALIZE_NULL: ++p_; return V::createNull(); case FB_SERIALIZE_DOUBLE: return V::fromDouble(unserializeDouble()); case FB_SERIALIZE_BOOLEAN: return V::fromBool(unserializeBoolean()); case FB_SERIALIZE_VECTOR: return V::fromVector(unserializeVector()); case FB_SERIALIZE_LIST: return V::fromVector(unserializeList()); case FB_SERIALIZE_SET: return V::fromSet(unserializeSet()); default: throw UnserializeError("Invalid code: " + folly::to<std::string>(code) + " at location " + folly::to<std::string>(p_)); } }
Class
2
RectangleRequest &operator=(const struct RectangleRequest &req) { // Not nice, but this is really faster and simpler memcpy(this,&req,sizeof(struct RectangleRequest)); // Not linked in any way if this is new. rr_pNext = NULL; // return *this; }
Class
2
void * alloc_top(size_t size, size_t align) {loop: top -= size; align_top(align); if (top < bottom) {new_chunk(); goto loop;} return top; }
Base
1
Network::FilterStatus Context::onUpstreamData(int data_length, bool end_of_stream) { if (!wasm_->onUpstreamData_) { return Network::FilterStatus::Continue; } auto result = wasm_->onUpstreamData_(this, id_, static_cast<uint32_t>(data_length), static_cast<uint32_t>(end_of_stream)); // TODO(PiotrSikora): pull Proxy-WASM's FilterStatus values. return result.u64_ == 0 ? Network::FilterStatus::Continue : Network::FilterStatus::StopIteration; }
Base
1
RequestHandler::RequestHandler( std::shared_ptr<CheckWorkflow> check_workflow, std::shared_ptr<context::ServiceContext> service_context, std::unique_ptr<Request> request_data) : context_(new context::RequestContext(service_context, std::move(request_data))), check_workflow_(check_workflow) { // Remove x-endponts-api-userinfo from downstream client. // It should be set by the last Endpoint proxy to prevent users spoofing. std::string buffer; if (context_->request()->FindHeader( google::api_manager::auth::kEndpointApiUserInfo, &buffer)) { context_->request()->AddHeaderToBackend( google::api_manager::auth::kEndpointApiUserInfo, ""); } }
Base
1
friend bool operator==(const TensorKey& t1, const TensorKey& t2) { if (t1.dtype() != t2.dtype() || t1.shape() != t2.shape()) { return false; } if (DataTypeCanUseMemcpy(t1.dtype())) { return t1.tensor_data() == t2.tensor_data(); } if (t1.dtype() == DT_STRING) { const auto s1 = t1.unaligned_flat<tstring>(); const auto s2 = t2.unaligned_flat<tstring>(); for (int64_t i = 0, n = t1.NumElements(); i < n; ++i) { if (TF_PREDICT_FALSE(s1(i) != s2(i))) { return false; } } return true; } return false; }
Variant
0
void preprocessNodes(std::vector<Proxy> &nodes, extra_settings &ext) { std::for_each(nodes.begin(), nodes.end(), [&ext](Proxy &x) { if(ext.remove_emoji) x.Remark = trim(removeEmoji(x.Remark)); nodeRename(x, ext.rename_array, ext); if(ext.add_emoji) x.Remark = addEmoji(x, ext.emoji_array, ext); }); if(ext.sort_flag) { bool failed = true; if(ext.sort_script.size()) { std::string script = ext.sort_script; if(startsWith(script, "path:")) script = fileGet(script.substr(5), false); script_safe_runner(ext.js_runtime, ext.js_context, [&](qjs::Context &ctx) { try { ctx.eval(script); auto compare = (std::function<int(const Proxy&, const Proxy&)>) ctx.eval("compare"); auto comparer = [&](const Proxy &a, const Proxy &b) { if(a.Type == ProxyType::Unknow) return 1; if(b.Type == ProxyType::Unknow) return 0; return compare(a, b); }; std::stable_sort(nodes.begin(), nodes.end(), comparer); failed = false; } catch(qjs::exception) { script_print_stack(ctx); } }, global.scriptCleanContext); } if(failed) std::stable_sort(nodes.begin(), nodes.end(), [](const Proxy &a, const Proxy &b) { return a.Remark < b.Remark; }); } }
Base
1
void readStructEnd() { lastFieldId_ = nestedStructFieldIds_.back(); nestedStructFieldIds_.pop_back(); }
Class
2
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input_tensor = GetInput(context, node, 0); const TfLiteTensor* padding_matrix = GetInput(context, node, 1); TfLiteTensor* output_tensor = GetOutput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(padding_matrix), 2); TF_LITE_ENSURE_EQ(context, SizeOfDimension(padding_matrix, 0), NumDimensions(input_tensor)); if (!IsConstantTensor(padding_matrix)) { SetTensorToDynamic(output_tensor); return kTfLiteOk; } // We have constant padding, so we can infer output size. auto output_size = GetPaddedOutputShape(input_tensor, padding_matrix); if (output_size == nullptr) { return kTfLiteError; } return context->ResizeTensor(context, output_tensor, output_size.release()); }
Base
1
static inline bool isValid(const RemoteFsDevice::Details &d) { return d.isLocalFile() || RemoteFsDevice::constSshfsProtocol==d.url.scheme() || RemoteFsDevice::constSambaProtocol==d.url.scheme() || RemoteFsDevice::constSambaAvahiProtocol==d.url.scheme(); }
Class
2
bool is_digit(char c) { return c >= '0' && c <= '9'; }
Class
2
TfLiteStatus UseDynamicOutputTensors(TfLiteContext* context, TfLiteNode* node) { for (int i = 0; i < NumOutputs(node); ++i) { SetTensorToDynamic(GetOutput(context, node, i)); } return kTfLiteOk; }
Base
1
StatusOr<FullTypeDef> SpecializeType(const AttrSlice& attrs, const OpDef& op_def) { FullTypeDef ft; ft.set_type_id(TFT_PRODUCT); for (int i = 0; i < op_def.output_arg_size(); i++) { auto* t = ft.add_args(); *t = op_def.output_arg(i).experimental_full_type(); // Resolve dependent types. The convention for op registrations is to use // attributes as type variables. // See https://www.tensorflow.org/guide/create_op#type_polymorphism. // Once the op signature can be defined entirely in FullType, this // convention can be deprecated. // // Note: While this code performs some basic verifications, it generally // assumes consistent op defs and attributes. If more complete // verifications are needed, they should be done by separately, and in a // way that can be reused for type inference. for (int j = 0; j < t->args_size(); j++) { auto* arg = t->mutable_args(i); if (arg->type_id() == TFT_VAR) { const auto* attr = attrs.Find(arg->s()); DCHECK(attr != nullptr); if (attr->value_case() == AttrValue::kList) { const auto& attr_list = attr->list(); arg->set_type_id(TFT_PRODUCT); for (int i = 0; i < attr_list.type_size(); i++) { map_dtype_to_tensor(attr_list.type(i), arg->add_args()); } } else if (attr->value_case() == AttrValue::kType) { map_dtype_to_tensor(attr->type(), arg); } else { return Status(error::UNIMPLEMENTED, absl::StrCat("unknown attribute type", attrs.DebugString(), " key=", arg->s())); } arg->clear_s(); } } } return ft; }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = GetOutput(context, node, 0); TfLiteTensor* hits = GetOutput(context, node, 1); const TfLiteTensor* lookup = GetInput(context, node, 0); const TfLiteTensor* key = GetInput(context, node, 1); const TfLiteTensor* value = GetInput(context, node, 2); const int num_rows = SizeOfDimension(value, 0); const int row_bytes = value->bytes / num_rows; void* pointer = nullptr; DynamicBuffer buf; for (int i = 0; i < SizeOfDimension(lookup, 0); i++) { int idx = -1; pointer = bsearch(&(lookup->data.i32[i]), key->data.i32, num_rows, sizeof(int32_t), greater); if (pointer != nullptr) { idx = (reinterpret_cast<char*>(pointer) - (key->data.raw)) / sizeof(int32_t); } if (idx >= num_rows || idx < 0) { if (output->type == kTfLiteString) { buf.AddString(nullptr, 0); } else { memset(output->data.raw + i * row_bytes, 0, row_bytes); } hits->data.uint8[i] = 0; } else { if (output->type == kTfLiteString) { buf.AddString(GetString(value, idx)); } else { memcpy(output->data.raw + i * row_bytes, value->data.raw + idx * row_bytes, row_bytes); } hits->data.uint8[i] = 1; } } if (output->type == kTfLiteString) { buf.WriteToTensorAsVector(output); } return kTfLiteOk; }
Base
1
int PackLinuxElf32::canUnpack() { if (super::canUnpack()) { return true; } if (Elf32_Ehdr::ET_DYN==get_te16(&ehdri.e_type)) { PackLinuxElf32help1(fi); } return false; }
Base
1
TEST(DefaultCertValidatorTest, TestMultiLevelMatch) { // san_multiple_dns_cert matches *.example.com bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute( "{{ test_rundir " "}}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem")); envoy::type::matcher::v3::StringMatcher matcher; matcher.set_exact("foo.api.example.com"); std::vector<Matchers::StringMatcherImpl<envoy::type::matcher::v3::StringMatcher>> subject_alt_name_matchers; subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher)); EXPECT_FALSE(DefaultCertValidator::matchSubjectAltName(cert.get(), subject_alt_name_matchers)); }
Base
1
bool RequestParser::OnHeadersEnd() { bool matched = view_matcher_(request_->method(), request_->url().path(), &stream_); if (!matched) { LOG_WARN("No view matches the request: %s %s", request_->method().c_str(), request_->url().path().c_str()); } return matched; }
Base
1
TfLiteStatus NotEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteBool: Comparison<bool, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteFloat32: Comparison<float, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::NotEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::NotEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteString: ComparisonString(reference_ops::StringRefNotEqualFn, input1, input2, output, requires_broadcast); break; default: context->ReportError( context, "Does not support type %d, requires bool|float|int|uint8|string", input1->type); return kTfLiteError; } return kTfLiteOk; }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0); OpData* op_data = reinterpret_cast<OpData*>(node->user_data); OpContext op_context(context, node); TF_LITE_ENSURE(context, op_context.input->type == kTfLiteUInt8 || op_context.input->type == kTfLiteInt8 || op_context.input->type == kTfLiteInt16 || op_context.input->type == kTfLiteFloat16); TF_LITE_ENSURE(context, op_context.ref->type == kTfLiteFloat32); op_data->max_diff = op_data->tolerance * op_context.input->params.scale; switch (op_context.input->type) { case kTfLiteUInt8: case kTfLiteInt8: op_data->max_diff *= (1 << 8); break; case kTfLiteInt16: op_data->max_diff *= (1 << 16); break; default: break; } // Allocate tensor to store the dequantized inputs. if (op_data->cache_tensor_id == kTensorNotAllocated) { TF_LITE_ENSURE_OK( context, context->AddTensors(context, 1, &op_data->cache_tensor_id)); } TfLiteIntArrayFree(node->temporaries); node->temporaries = TfLiteIntArrayCreate(1); node->temporaries->data[0] = op_data->cache_tensor_id; TfLiteTensor* dequantized = GetTemporary(context, node, /*index=*/0); dequantized->type = op_context.ref->type; dequantized->allocation_type = kTfLiteDynamic; TF_LITE_ENSURE_OK(context, context->ResizeTensor( context, dequantized, TfLiteIntArrayCopy(op_context.input->dims))); return kTfLiteOk; }
Base
1
Variant HHVM_FUNCTION(mcrypt_generic_init, const Resource& td, const String& key, const String& iv) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } int max_key_size = mcrypt_enc_get_key_size(pm->m_td); int iv_size = mcrypt_enc_get_iv_size(pm->m_td); if (key.empty()) { raise_warning("Key size is 0"); } unsigned char *key_s = (unsigned char *)malloc(key.size()); memset(key_s, 0, key.size()); unsigned char *iv_s = (unsigned char *)malloc(iv_size + 1); memset(iv_s, 0, iv_size + 1); int key_size; if (key.size() > max_key_size) { raise_warning("Key size too large; supplied length: %d, max: %d", key.size(), max_key_size); key_size = max_key_size; } else { key_size = key.size(); } memcpy(key_s, key.data(), key.size()); if (iv.size() != iv_size) { raise_warning("Iv size incorrect; supplied length: %d, needed: %d", iv.size(), iv_size); } memcpy(iv_s, iv.data(), std::min(iv_size, iv.size())); mcrypt_generic_deinit(pm->m_td); int result = mcrypt_generic_init(pm->m_td, key_s, key_size, iv_s); /* If this function fails, close the mcrypt module to prevent crashes * when further functions want to access this resource */ if (result < 0) { pm->close(); switch (result) { case -3: raise_warning("Key length incorrect"); break; case -4: raise_warning("Memory allocation error"); break; case -1: default: raise_warning("Unknown error"); break; } } else { pm->m_init = true; } free(iv_s); free(key_s); return result; }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteMfccParams*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input_wav = GetInput(context, node, kInputTensorWav); const TfLiteTensor* input_rate = GetInput(context, node, kInputTensorRate); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input_wav), 3); TF_LITE_ENSURE_EQ(context, NumElements(input_rate), 1); TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); TF_LITE_ENSURE_TYPES_EQ(context, input_wav->type, output->type); TF_LITE_ENSURE_TYPES_EQ(context, input_rate->type, kTfLiteInt32); TfLiteIntArray* output_size = TfLiteIntArrayCreate(3); output_size->data[0] = input_wav->dims->data[0]; output_size->data[1] = input_wav->dims->data[1]; output_size->data[2] = params->dct_coefficient_count; return context->ResizeTensor(context, output, output_size); }
Base
1
void ComputeAsync(OpKernelContext* c, DoneCallback done) override { auto col_params = new CollectiveParams(); auto done_with_cleanup = [col_params, done = std::move(done)]() { done(); col_params->Unref(); }; core::RefCountPtr<CollectiveGroupResource> resource; OP_REQUIRES_OK_ASYNC(c, LookupResource(c, HandleFromInput(c, 1), &resource), done); Tensor group_assignment = c->input(2); OP_REQUIRES_OK_ASYNC( c, FillCollectiveParams(col_params, group_assignment, ALL_TO_ALL_COLLECTIVE, resource.get()), done); col_params->instance.shape = c->input(0).shape(); VLOG(1) << "CollectiveAllToAll group_size " << col_params->group.group_size << " group_key " << col_params->group.group_key << " instance_key " << col_params->instance.instance_key; // Allocate the output tensor, trying to reuse the input. Tensor* output = nullptr; OP_REQUIRES_OK_ASYNC(c, c->forward_input_or_allocate_output( {0}, 0, col_params->instance.shape, &output), done_with_cleanup); Run(c, col_params, std::move(done_with_cleanup)); }
Variant
0
int pnm_validate(jas_stream_t *in) { uchar buf[2]; int i; int n; assert(JAS_STREAM_MAXPUTBACK >= 2); /* Read the first two characters that constitute the signature. */ if ((n = jas_stream_read(in, buf, 2)) < 0) { return -1; } /* Put these characters back to the stream. */ for (i = n - 1; i >= 0; --i) { if (jas_stream_ungetc(in, buf[i]) == EOF) { return -1; } } /* Did we read enough data? */ if (n < 2) { return -1; } /* Is this the correct signature for a PNM file? */ if (buf[0] == 'P' && isdigit(buf[1])) { return 0; } return -1; }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLitePackParams* data = reinterpret_cast<TfLitePackParams*>(node->builtin_data); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); switch (output->type) { case kTfLiteFloat32: { return PackImpl<float>(context, node, output, data->values_count, data->axis); } case kTfLiteUInt8: { return PackImpl<uint8_t>(context, node, output, data->values_count, data->axis); } case kTfLiteInt8: { return PackImpl<int8_t>(context, node, output, data->values_count, data->axis); } case kTfLiteInt16: { return PackImpl<int16_t>(context, node, output, data->values_count, data->axis); } case kTfLiteInt32: { return PackImpl<int32_t>(context, node, output, data->values_count, data->axis); } case kTfLiteInt64: { return PackImpl<int64_t>(context, node, output, data->values_count, data->axis); } default: { context->ReportError(context, "Type '%s' is not supported by pack.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } return kTfLiteOk; }
Base
1
IntegrationStreamDecoderPtr HttpIntegrationTest::sendRequestAndWaitForResponse( const Http::TestHeaderMapImpl& request_headers, uint32_t request_body_size, const Http::TestHeaderMapImpl& response_headers, uint32_t response_size, int upstream_index) { ASSERT(codec_client_ != nullptr); // Send the request to Envoy. IntegrationStreamDecoderPtr response; if (request_body_size) { response = codec_client_->makeRequestWithBody(request_headers, request_body_size); } else { response = codec_client_->makeHeaderOnlyRequest(request_headers); } waitForNextUpstreamRequest(upstream_index); // Send response headers, and end_stream if there is no response body. upstream_request_->encodeHeaders(response_headers, response_size == 0); // Send any response data, with end_stream true. if (response_size) { upstream_request_->encodeData(response_size, true); } // Wait for the response to be read by the codec client. response->waitForEndStream(); return response; }
Class
2
inline bool ShapeIsVector(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* shape = GetInput(context, node, kShapeTensor); return (shape->dims->size == 1 && shape->type == kTfLiteInt32); }
Base
1
TEST_F(AllowMissingInAndOfOrListTest, GoodAndBadJwts) { EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); // Use the token with example.com issuer for x-other. auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, GoodToken}, {kOtherHeader, GoodToken}}; context_ = Verifier::createContext(headers, parent_span_, &mock_cb_); verifier_->verify(context_); EXPECT_THAT(headers, JwtOutputSuccess(kExampleHeader)); EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kOtherHeader)); }
Base
1
static bool MR_primality_test(UnsignedBigInteger n, const Vector<UnsignedBigInteger, 256>& tests) { // Written using Wikipedia: // https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test#Miller%E2%80%93Rabin_test ASSERT(!(n < 4)); auto predecessor = n.minus({ 1 }); auto d = predecessor; size_t r = 0; { auto div_result = d.divided_by(2); while (div_result.remainder == 0) { d = div_result.quotient; div_result = d.divided_by(2); ++r; } } if (r == 0) { // n - 1 is odd, so n was even. But there is only one even prime: return n == 2; } for (auto a : tests) { // Technically: ASSERT(2 <= a && a <= n - 2) ASSERT(a < n); auto x = ModularPower(a, d, n); if (x == 1 || x == predecessor) continue; bool skip_this_witness = false; // r − 1 iterations. for (size_t i = 0; i < r - 1; ++i) { x = ModularPower(x, 2, n); if (x == predecessor) { skip_this_witness = true; break; } } if (skip_this_witness) continue; return false; // "composite" } return true; // "probably prime" }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: return EvalImpl<kernel_type, kTfLiteFloat32>(context, node); case kTfLiteUInt8: return EvalImpl<kernel_type, kTfLiteUInt8>(context, node); case kTfLiteInt8: return EvalImpl<kernel_type, kTfLiteInt8>(context, node); case kTfLiteInt16: return EvalImpl<kernel_type, kTfLiteInt16>(context, node); default: context->ReportError(context, "Type %d not currently supported.", input->type); return kTfLiteError; } }
Base
1
void jas_seq2d_bindsub(jas_matrix_t *s, jas_matrix_t *s1, int xstart, int ystart, int xend, int yend) { jas_matrix_bindsub(s, s1, ystart - s1->ystart_, xstart - s1->xstart_, yend - s1->ystart_ - 1, xend - s1->xstart_ - 1); }
Class
2