code
stringlengths
12
2.05k
label_name
stringclasses
5 values
label
int64
0
4
static inline ulong encode_twos_comp(long n, int prec) { ulong result; assert(prec >= 2); jas_eprintf("warning: support for signed data is untested\n"); // NOTE: Is this correct? if (n < 0) { result = -n; result = (result ^ 0xffffffffUL) + 1; result &= (1 << prec) - 1; } else { result = n; } return result; }
Base
1
bool MemFile::seek(int64_t offset, int whence /* = SEEK_SET */) { assertx(m_len != -1); if (whence == SEEK_CUR) { if (offset > 0 && offset < bufferedLen()) { setReadPosition(getReadPosition() + offset); setPosition(getPosition() + offset); return true; } offset += getPosition(); whence = SEEK_SET; } // invalidate the current buffer setWritePosition(0); setReadPosition(0); if (whence == SEEK_SET) { m_cursor = offset; } else { assertx(whence == SEEK_END); m_cursor = m_len + offset; } setPosition(m_cursor); return true; }
Base
1
jas_matrix_t *jas_seq2d_input(FILE *in) { jas_matrix_t *matrix; int i; int j; long x; int numrows; int numcols; int xoff; int yoff; if (fscanf(in, "%d %d", &xoff, &yoff) != 2) return 0; if (fscanf(in, "%d %d", &numcols, &numrows) != 2) return 0; if (!(matrix = jas_seq2d_create(xoff, yoff, xoff + numcols, yoff + numrows))) return 0; if (jas_matrix_numrows(matrix) != numrows || jas_matrix_numcols(matrix) != numcols) { abort(); } /* Get matrix data. */ for (i = 0; i < jas_matrix_numrows(matrix); i++) { for (j = 0; j < jas_matrix_numcols(matrix); j++) { if (fscanf(in, "%ld", &x) != 1) { jas_matrix_destroy(matrix); return 0; } jas_matrix_set(matrix, i, j, JAS_CAST(jas_seqent_t, x)); } } return matrix; }
Class
2
bool AES_GCM_DecryptContext::Decrypt( const void *pEncryptedDataAndTag, size_t cbEncryptedDataAndTag, const void *pIV, void *pPlaintextData, uint32 *pcbPlaintextData, const void *pAdditionalAuthenticationData, size_t cbAuthenticationData ) { unsigned long long pcbPlaintextData_longlong; const int nDecryptResult = crypto_aead_aes256gcm_decrypt_afternm( static_cast<unsigned char*>( pPlaintextData ), &pcbPlaintextData_longlong, nullptr, static_cast<const unsigned char*>( pEncryptedDataAndTag ), cbEncryptedDataAndTag, static_cast<const unsigned char*>( pAdditionalAuthenticationData ), cbAuthenticationData, static_cast<const unsigned char*>( pIV ), static_cast<const crypto_aead_aes256gcm_state*>( m_ctx ) ); *pcbPlaintextData = pcbPlaintextData_longlong; return nDecryptResult == 0; }
Base
1
TfLiteStatus UseDynamicOutputTensors(TfLiteContext* context, TfLiteNode* node) { for (int i = 0; i < NumOutputs(node); ++i) { SetTensorToDynamic(GetOutput(context, node, i)); } return kTfLiteOk; }
Base
1
ECDSA_Signature_Operation(const ECDSA_PrivateKey& ecdsa, const std::string& emsa) : PK_Ops::Signature_with_EMSA(emsa), m_group(ecdsa.domain()), m_x(ecdsa.private_value()) { #if defined(BOTAN_HAS_RFC6979_GENERATOR) m_rfc6979_hash = hash_for_emsa(emsa); #endif }
Class
2
Status OpLevelCostEstimator::PredictAvgPoolGrad(const OpContext& op_context, NodeCosts* node_costs) const { bool found_unknown_shapes = false; const auto& op_info = op_context.op_info; // x's shape: op_info.inputs(0) // y_grad: op_info.inputs(1) // Extract x_shape from op_info.inputs(0).value() or op_info.outputs(0). bool shape_found = false; TensorShapeProto x_shape; if (op_info.inputs_size() >= 1 && op_info.inputs(0).has_value()) { const TensorProto& value = op_info.inputs(0).value(); shape_found = GetTensorShapeProtoFromTensorProto(value, &x_shape); } if (!shape_found && op_info.outputs_size() > 0) { x_shape = op_info.outputs(0).shape(); shape_found = true; } if (!shape_found) { // Set the minimum shape that's feasible. x_shape.Clear(); for (int i = 0; i < 4; ++i) { x_shape.add_dim()->set_size(1); } found_unknown_shapes = true; } ConvolutionDimensions dims = OpDimensionsFromInputs(x_shape, op_info, &found_unknown_shapes); int64_t ops = 0; if (dims.kx <= dims.sx && dims.ky <= dims.sy) { // Non-overlapping window. ops = dims.batch * dims.iz * (dims.ix * dims.iy + dims.ox * dims.oy); } else { // Overlapping window. ops = dims.batch * dims.iz * (dims.ix * dims.iy + dims.ox * dims.oy * (dims.kx * dims.ky + 1)); } auto s = PredictDefaultNodeCosts(ops, op_context, &found_unknown_shapes, node_costs); node_costs->max_memory = node_costs->num_total_output_bytes(); return s; }
Base
1
static unsigned HuffmanTree_makeFromFrequencies(HuffmanTree* tree, const unsigned* frequencies, size_t mincodes, size_t numcodes, unsigned maxbitlen) { unsigned error = 0; while(!frequencies[numcodes - 1] && numcodes > mincodes) numcodes--; /*trim zeroes*/ tree->maxbitlen = maxbitlen; tree->numcodes = (unsigned)numcodes; /*number of symbols*/ tree->lengths = (unsigned*)realloc(tree->lengths, numcodes * sizeof(unsigned)); if(!tree->lengths) return 83; /*alloc fail*/ /*initialize all lengths to 0*/ memset(tree->lengths, 0, numcodes * sizeof(unsigned)); error = lodepng_huffman_code_lengths(tree->lengths, frequencies, numcodes, maxbitlen); if(!error) error = HuffmanTree_makeFromLengths2(tree); return error; }
Variant
0
TfLiteStatus ResizeOutputTensors(TfLiteContext* context, TfLiteNode* node, const TfLiteTensor* axis, const TfLiteTensor* input, int num_splits) { int axis_value = GetTensorData<int>(axis)[0]; if (axis_value < 0) { axis_value += NumDimensions(input); } TF_LITE_ENSURE(context, axis_value >= 0); TF_LITE_ENSURE(context, axis_value < NumDimensions(input)); const int input_size = SizeOfDimension(input, axis_value); TF_LITE_ENSURE_MSG(context, input_size % num_splits == 0, "Not an even split"); const int slice_size = input_size / num_splits; for (int i = 0; i < NumOutputs(node); ++i) { TfLiteIntArray* output_dims = TfLiteIntArrayCopy(input->dims); output_dims->data[axis_value] = slice_size; TfLiteTensor* output = GetOutput(context, node, i); TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_dims)); } return kTfLiteOk; }
Base
1
bool DependencyOptimizer::SafeToRemoveIdentity(const NodeDef& node) const { if (!IsIdentity(node) && !IsIdentityN(node)) { return true; } if (nodes_to_preserve_.find(node.name()) != nodes_to_preserve_.end()) { return false; } if (!fetch_nodes_known_) { // The output values of this node may be needed. return false; } if (node.input_size() < 1) { // Node lacks input, is invalid return false; } const NodeDef* input = node_map_->GetNode(NodeName(node.input(0))); CHECK(input != nullptr) << "node = " << node.name() << " input = " << node.input(0); // Don't remove Identity nodes corresponding to Variable reads or following // Recv. if (IsVariable(*input) || IsRecv(*input)) { return false; } for (const auto& consumer : node_map_->GetOutputs(node.name())) { if (node.input_size() > 1 && (IsRetval(*consumer) || IsMerge(*consumer))) { return false; } if (IsSwitch(*input)) { for (const string& consumer_input : consumer->input()) { if (consumer_input == AsControlDependency(node.name())) { return false; } } } } return true; }
Base
1
RawTile OpenJPEGImage::getRegion( int ha, int va, unsigned int res, int layers, int x, int y, unsigned int w, unsigned int h ){ // Scale up our output bit depth to the nearest factor of 8 unsigned int obpc = bpc; if( bpc <= 16 && bpc > 8 ) obpc = 16; else if( bpc <= 8 ) obpc = 8; #ifdef DEBUG Timer timer; timer.start(); #endif RawTile rawtile( 0, res, ha, va, w, h, channels, obpc ); if( obpc == 16 ) rawtile.data = new unsigned short[w * h * channels]; else if( obpc == 8 ) rawtile.data = new unsigned char[w * h * channels]; else throw file_error( "OpenJPEG :: Unsupported number of bits" ); rawtile.dataLength = w*h*channels*(obpc/8); rawtile.filename = getImagePath(); rawtile.timestamp = timestamp; process( res, layers, x, y, w, h, rawtile.data ); #ifdef DEBUG logfile << "OpenJPEG :: getRegion() :: " << timer.getTime() << " microseconds" << endl; #endif return rawtile; }
Base
1
bool load_face(Face & face, unsigned int options) { #ifdef GRAPHITE2_TELEMETRY telemetry::category _misc_cat(face.tele.misc); #endif Face::Table silf(face, Tag::Silf, 0x00050000); if (silf) options &= ~gr_face_dumbRendering; else if (!(options & gr_face_dumbRendering)) return false; if (!face.readGlyphs(options)) return false; if (silf) { if (!face.readFeatures() || !face.readGraphite(silf)) { #if !defined GRAPHITE2_NTRACING if (global_log) { *global_log << json::object << "type" << "fontload" << "failure" << face.error() << "context" << face.error_context() << json::close; } #endif return false; } else return true; } else return options & gr_face_dumbRendering; }
Base
1
void* sspi_SecureHandleGetUpperPointer(SecHandle* handle) { void* pointer; if (!handle) return NULL; pointer = (void*) ~((size_t) handle->dwUpper); return pointer; }
Base
1
req::ptr<XMLDocumentData> doc() const { return m_node->doc(); }
Base
1
MONGO_EXPORT int bson_iterator_int( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_INT: return bson_iterator_int_raw( i ); case BSON_LONG: return bson_iterator_long_raw( i ); case BSON_DOUBLE: return bson_iterator_double_raw( i ); default: return 0; } }
Base
1
jas_matrix_t *jas_matrix_copy(jas_matrix_t *x) { jas_matrix_t *y; int i; int j; y = jas_matrix_create(x->numrows_, x->numcols_); for (i = 0; i < x->numrows_; ++i) { for (j = 0; j < x->numcols_; ++j) { *jas_matrix_getref(y, i, j) = jas_matrix_get(x, i, j); } } return y; }
Base
1
bool CSecurityTLS::processMsg(CConnection* cc) { rdr::InStream* is = cc->getInStream(); rdr::OutStream* os = cc->getOutStream(); client = cc; initGlobal(); if (!session) { if (!is->checkNoWait(1)) return false; if (is->readU8() == 0) { rdr::U32 result = is->readU32(); CharArray reason; if (result == secResultFailed || result == secResultTooMany) reason.buf = is->readString(); else reason.buf = strDup("Authentication failure (protocol error)"); throw AuthFailureException(reason.buf); } if (gnutls_init(&session, GNUTLS_CLIENT) != GNUTLS_E_SUCCESS) throw AuthFailureException("gnutls_init failed"); if (gnutls_set_default_priority(session) != GNUTLS_E_SUCCESS) throw AuthFailureException("gnutls_set_default_priority failed"); setParam(); } rdr::TLSInStream *tlsis = new rdr::TLSInStream(is, session); rdr::TLSOutStream *tlsos = new rdr::TLSOutStream(os, session); int err; err = gnutls_handshake(session); if (err != GNUTLS_E_SUCCESS) { delete tlsis; delete tlsos; if (!gnutls_error_is_fatal(err)) return false; vlog.error("TLS Handshake failed: %s\n", gnutls_strerror (err)); shutdown(false); throw AuthFailureException("TLS Handshake failed"); } checkSession(); cc->setStreams(fis = tlsis, fos = tlsos); return true; }
Class
2
boost::int64_t lazy_entry::int_value() const { TORRENT_ASSERT(m_type == int_t); boost::int64_t val = 0; bool negative = false; if (*m_data.start == '-') negative = true; parse_int(negative?m_data.start+1:m_data.start, m_data.start + m_size, 'e', val); if (negative) val = -val; return val; }
Class
2
LiteralString(const std::string &s, bool ignore_case) : lit_(s), ignore_case_(ignore_case), is_word_(false) {}
Base
1
TfLiteStatus LessEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::LessEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::LessEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::LessEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::LessEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::LessEqualFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; }
Base
1
optional<ARN> ARN::parse(const string& s, bool wildcards) { static const char str_wild[] = "arn:([^:]*):([^:]*):([^:]*):([^:]*):([^:]*)"; static const regex rx_wild(str_wild, sizeof(str_wild) - 1, ECMAScript | optimize); static const char str_no_wild[] = "arn:([^:*]*):([^:*]*):([^:*]*):([^:*]*):([^:*]*)"; static const regex rx_no_wild(str_no_wild, sizeof(str_no_wild) - 1, ECMAScript | optimize); smatch match; if ((s == "*") && wildcards) { return ARN(Partition::wildcard, Service::wildcard, "*", "*", "*"); } else if (regex_match(s, match, wildcards ? rx_wild : rx_no_wild)) { ceph_assert(match.size() == 6); ARN a; { auto p = to_partition(match[1], wildcards); if (!p) return none; a.partition = *p; } { auto s = to_service(match[2], wildcards); if (!s) { return none; } a.service = *s; } a.region = match[3]; a.account = match[4]; a.resource = match[5]; return a; } return none; }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* size = GetInput(context, node, kSizeTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // TODO(ahentz): Our current implementations rely on the inputs being 4D. TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1); TF_LITE_ENSURE_EQ(context, size->type, kTfLiteInt32); // ResizeBilinear creates a float tensor even when the input is made of // integers. output->type = input->type; if (!IsConstantTensor(size)) { SetTensorToDynamic(output); return kTfLiteOk; } // Ensure params are valid. auto* params = reinterpret_cast<TfLiteResizeBilinearParams*>(node->builtin_data); if (params->half_pixel_centers && params->align_corners) { context->ReportError( context, "If half_pixel_centers is True, align_corners must be False."); return kTfLiteError; } return ResizeOutputTensor(context, input, size, output); }
Base
1
TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // There are two ways in which the 'output' can be made dynamic: it could be // a string tensor, or its shape cannot be calculated during Prepare(). In // either case, we now have all the information to calculate its shape. if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } // Note that string tensors are always "dynamic" in the sense that their size // is not known until we have all the content. This applies even when their // shape is known ahead of time. As a result, a string tensor is never given // any memory by ResizeOutput(), and we need to do it manually here. Since // reshape doesn't change the data, the output tensor needs exactly as many // bytes as the input tensor. if (output->type == kTfLiteString) { auto bytes_required = input->bytes; TfLiteTensorRealloc(bytes_required, output); output->bytes = bytes_required; } memcpy(output->data.raw, input->data.raw, input->bytes); return kTfLiteOk; }
Base
1
Pl_ASCIIHexDecoder::flush() { if (this->pos == 0) { QTC::TC("libtests", "Pl_ASCIIHexDecoder no-op flush"); return; } int b[2]; for (int i = 0; i < 2; ++i) { if (this->inbuf[i] >= 'A') { b[i] = this->inbuf[i] - 'A' + 10; } else { b[i] = this->inbuf[i] - '0'; } } unsigned char ch = static_cast<unsigned char>((b[0] << 4) + b[1]); QTC::TC("libtests", "Pl_ASCIIHexDecoder partial flush", (this->pos == 2) ? 0 : 1); getNext()->write(&ch, 1); this->pos = 0; this->inbuf[0] = '0'; this->inbuf[1] = '0'; this->inbuf[2] = '\0'; }
Base
1
TEST_P(SslSocketTest, GetUriWithUriSan) { const std::string client_ctx_yaml = R"EOF( common_tls_context: tls_certificates: certificate_chain: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem" private_key: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem" )EOF"; const std::string server_ctx_yaml = R"EOF( common_tls_context: tls_certificates: certificate_chain: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/unittest_cert.pem" private_key: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/unittest_key.pem" validation_context: trusted_ca: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" match_subject_alt_names: exact: "spiffe://lyft.com/test-team" )EOF"; TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam()); testUtil(test_options.setExpectedClientCertUri("spiffe://lyft.com/test-team") .setExpectedSerialNumber(TEST_SAN_URI_CERT_SERIAL)); }
Base
1
static void jsiDumpInstr(Jsi_Interp *interp, jsi_Pstate *ps, Jsi_Value *_this, jsi_TryList *trylist, jsi_OpCode *ip, Jsi_OpCodes *opcodes) { int i; char buf[200]; jsi_code_decode(interp, ip, ip - opcodes->codes, buf, sizeof(buf)); Jsi_Printf(interp, jsi_Stderr, "%p: %-30.200s : THIS=%s, STACK=[", ip, buf, jsi_evalprint(_this)); for (i = 0; i < interp->framePtr->Sp; ++i) { Jsi_Printf(interp, jsi_Stderr, "%s%s", (i>0?", ":""), jsi_evalprint(_jsi_STACKIDX(i))); } Jsi_Printf(interp, jsi_Stderr, "]"); if (ip->fname) { const char *fn = ip->fname, *cp = Jsi_Strrchr(fn, '/'); if (cp) fn = cp+1; Jsi_Printf(interp, jsi_Stderr, ", %s:%d", fn, ip->Line); } Jsi_Printf(interp, jsi_Stderr, "\n"); jsi_TryList *tlt = trylist; for (i = 0; tlt; tlt = tlt->next) i++; if (ps->last_exception) Jsi_Printf(interp, jsi_Stderr, "TL: %d, excpt: %s\n", i, jsi_evalprint(ps->last_exception)); }
Base
1
TfLiteStatus Resize(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteLSHProjectionParams*>(node->builtin_data); TF_LITE_ENSURE(context, NumInputs(node) == 2 || NumInputs(node) == 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* hash = GetInput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(hash), 2); // Support up to 32 bits. TF_LITE_ENSURE(context, SizeOfDimension(hash, 1) <= 32); const TfLiteTensor* input = GetInput(context, node, 1); TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (NumInputs(node) == 3) { const TfLiteTensor* weight = GetInput(context, node, 2); TF_LITE_ENSURE_EQ(context, NumDimensions(weight), 1); TF_LITE_ENSURE_EQ(context, SizeOfDimension(weight, 0), SizeOfDimension(input, 0)); } TfLiteTensor* output = GetOutput(context, node, 0); TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1); switch (params->type) { case kTfLiteLshProjectionSparse: outputSize->data[0] = SizeOfDimension(hash, 0); break; case kTfLiteLshProjectionDense: outputSize->data[0] = SizeOfDimension(hash, 0) * SizeOfDimension(hash, 1); break; default: return kTfLiteError; } return context->ResizeTensor(context, output, outputSize); }
Base
1
TfLiteRegistration CopyOpRegistration() { TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr}; reg.prepare = [](TfLiteContext* context, TfLiteNode* node) { // Set output size to input size const TfLiteTensor* tensor0 = GetInput(context, node, 0); TfLiteTensor* tensor1 = GetOutput(context, node, 0); TfLiteIntArray* newSize = TfLiteIntArrayCopy(tensor0->dims); return context->ResizeTensor(context, tensor1, newSize); }; reg.invoke = [](TfLiteContext* context, TfLiteNode* node) { CallReporting* call_reporting = static_cast<CallReporting*>(node->builtin_data); // Copy input data to output data. const TfLiteTensor* a0 = GetInput(context, node, 0); TfLiteTensor* a1 = GetOutput(context, node, 0); int num = a0->dims->data[0]; for (int i = 0; i < num; i++) { a1->data.f[i] = a0->data.f[i]; } call_reporting->Record(); return kTfLiteOk; }; return reg; }
Base
1
void TestSocketLineReader::initTestCase() { m_server = new Server(this); QVERIFY2(m_server->listen(QHostAddress::LocalHost, 8694), "Failed to create local tcp server"); m_timer.setInterval(4000);//For second is more enough to send some data via local socket m_timer.setSingleShot(true); connect(&m_timer, &QTimer::timeout, &m_loop, &QEventLoop::quit); m_conn = new QSslSocket(this); m_conn->connectToHost(QHostAddress::LocalHost, 8694); connect(m_conn, &QAbstractSocket::connected, &m_loop, &QEventLoop::quit); m_timer.start(); m_loop.exec(); QVERIFY2(m_conn->isOpen(), "Could not connect to local tcp server"); }
Class
2
TfLiteStatus PrepareHashtableFind(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputResourceIdTensor); TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32); TF_LITE_ENSURE_EQ(context, NumDimensions(input_resource_id_tensor), 1); TF_LITE_ENSURE_EQ(context, SizeOfDimension(input_resource_id_tensor, 0), 1); const TfLiteTensor* default_value_tensor = GetInput(context, node, kDefaultValueTensor); const TfLiteTensor* key_tensor = GetInput(context, node, kKeyTensor); TfLiteTensor* output_tensor = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, default_value_tensor->type, output_tensor->type); TF_LITE_ENSURE(context, (key_tensor->type == kTfLiteInt64 && output_tensor->type == kTfLiteString) || (key_tensor->type == kTfLiteString && output_tensor->type == kTfLiteInt64)); return context->ResizeTensor(context, output_tensor, TfLiteIntArrayCopy(key_tensor->dims)); }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32) { EvalAddN<float>(context, node); } else if (output->type == kTfLiteInt32) { EvalAddN<int32_t>(context, node); } else { context->ReportError(context, "AddN only supports FLOAT32|INT32 now, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } return kTfLiteOk; }
Base
1
R_API ut64 r_bin_java_element_pair_calc_size(RBinJavaElementValuePair *evp) { ut64 sz = 0; if (evp == NULL) { return sz; } // evp->element_name_idx = r_bin_java_read_short(bin, bin->b->cur); sz += 2; // evp->value = r_bin_java_element_value_new (bin, offset+2); if (evp->value) { sz += r_bin_java_element_value_calc_size (evp->value); } return sz; }
Base
1
void jas_matrix_asr(jas_matrix_t *matrix, int n) { int i; int j; jas_seqent_t *rowstart; int rowstep; jas_seqent_t *data; assert(n >= 0); if (jas_matrix_numrows(matrix) > 0 && jas_matrix_numcols(matrix) > 0) { assert(matrix->rows_); rowstep = jas_matrix_rowstep(matrix); for (i = matrix->numrows_, rowstart = matrix->rows_[0]; i > 0; --i, rowstart += rowstep) { for (j = matrix->numcols_, data = rowstart; j > 0; --j, ++data) { //*data >>= n; *data = jas_seqent_asr(*data, n); } } } }
Base
1
TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { size_t elements = input->bytes / sizeof(float); const float* in = GetTensorData<float>(input); const float* in_end = in + elements; float* out = GetTensorData<float>(output); for (; in < in_end; in++, out++) *out = std::min(std::max(0.f, *in), 6.f); return kTfLiteOk; } break; case kTfLiteUInt8: QuantizedReluX<uint8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
Base
1
static unsigned short get_ushort(const unsigned char *data) { unsigned short val = *(const unsigned short *)data; #ifdef OPJ_BIG_ENDIAN val = ((val & 0xffU) << 8) | (val >> 8); #endif return val; }
Base
1
compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct mpt_fw_xfer32 kfw32; struct mpt_fw_xfer kfw; MPT_ADAPTER *iocp = NULL; int iocnum, iocnumX; int nonblock = (filp->f_flags & O_NONBLOCK); int ret; if (copy_from_user(&kfw32, (char __user *)arg, sizeof(kfw32))) return -EFAULT; /* Verify intended MPT adapter */ iocnumX = kfw32.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || (iocp == NULL)) { printk(KERN_DEBUG MYNAM "::compat_mptfwxfer_ioctl @%d - ioc%d not found!\n", __LINE__, iocnumX); return -ENODEV; } if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) return ret; dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "compat_mptfwxfer_ioctl() called\n", iocp->name)); kfw.iocnum = iocnum; kfw.fwlen = kfw32.fwlen; kfw.bufp = compat_ptr(kfw32.bufp); ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen); mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; }
Class
2
void ModifiablePixelBuffer::fillRect(const Rect& r, const void* pix) { int stride; U8 *buf; int w, h, b; w = r.width(); h = r.height(); b = format.bpp/8; if (h == 0) return; buf = getBufferRW(r, &stride); if (b == 1) { while (h--) { memset(buf, *(const U8*)pix, w); buf += stride * b; } } else { U8 *start; int w1; start = buf; w1 = w; while (w1--) { memcpy(buf, pix, b); buf += b; } buf += (stride - w) * b; h--; while (h--) { memcpy(buf, start, w * b); buf += stride * b; } } commitBufferRW(r); }
Class
2
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* axis = GetInput(context, node, kAxis); // Make sure the axis is only 1 dimension. TF_LITE_ENSURE_EQ(context, NumElements(axis), 1); // Make sure the axis is only either int32 or int64. TF_LITE_ENSURE(context, axis->type == kTfLiteInt32 || axis->type == kTfLiteInt64); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); auto* params = reinterpret_cast<TfLiteArgMaxParams*>(node->builtin_data); switch (params->output_type) { case kTfLiteInt32: output->type = kTfLiteInt32; break; case kTfLiteInt64: output->type = kTfLiteInt64; break; default: context->ReportError(context, "Unknown index output data type: %d", params->output_type); return kTfLiteError; } // Check conditions for different types. switch (input->type) { case kTfLiteFloat32: case kTfLiteUInt8: case kTfLiteInt8: case kTfLiteInt32: break; default: context->ReportError( context, "Unknown input type: %d, only float32 and int types are supported", input->type); return kTfLiteError; } TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (IsConstantTensor(axis)) { TF_LITE_ENSURE_STATUS(ResizeOutput(context, input, axis, output)); } else { SetTensorToDynamic(output); } return kTfLiteOk; }
Base
1
void Compute(OpKernelContext* context) override { // Get the input Tensors. OpInputList params_nested_splits_in; OP_REQUIRES_OK(context, context->input_list("params_nested_splits", &params_nested_splits_in)); const Tensor& params_dense_values_in = context->input(params_nested_splits_in.size()); const Tensor& indices_in = context->input(params_nested_splits_in.size() + 1); DCHECK_GT(params_nested_splits_in.size(), 0); // Enforced by REGISTER_OP. SPLITS_TYPE num_params = params_nested_splits_in[0].dim_size(0) - 1; OP_REQUIRES_OK(context, ValidateIndices(indices_in, num_params)); OP_REQUIRES(context, params_dense_values_in.dims() > 0, errors::InvalidArgument("params.rank must be nonzero")); SPLITS_TYPE num_params_dense_values = params_dense_values_in.dim_size(0); // Calculate the `splits`, and store the value slices that we need to // copy in `value_slices`. std::vector<std::pair<SPLITS_TYPE, SPLITS_TYPE>> value_slices; SPLITS_TYPE num_values = 0; std::vector<std::vector<SPLITS_TYPE>> out_splits; OP_REQUIRES_OK(context, MakeSplits(indices_in, params_nested_splits_in, num_params_dense_values, &out_splits, &value_slices, &num_values)); // Write the output tensors. OP_REQUIRES_OK(context, WriteSplits(out_splits, context)); OP_REQUIRES_OK(context, WriteValues(params_dense_values_in, value_slices, out_splits.size(), num_values, context)); }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2); const TfLiteTensor* lookup = GetInput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(lookup), 1); TF_LITE_ENSURE_EQ(context, lookup->type, kTfLiteInt32); const TfLiteTensor* key = GetInput(context, node, 1); TF_LITE_ENSURE_EQ(context, NumDimensions(key), 1); TF_LITE_ENSURE_EQ(context, key->type, kTfLiteInt32); const TfLiteTensor* value = GetInput(context, node, 2); TF_LITE_ENSURE(context, NumDimensions(value) >= 1); TF_LITE_ENSURE_EQ(context, SizeOfDimension(key, 0), SizeOfDimension(value, 0)); if (value->type == kTfLiteString) { TF_LITE_ENSURE_EQ(context, NumDimensions(value), 1); } TfLiteTensor* hits = GetOutput(context, node, 1); TF_LITE_ENSURE_EQ(context, hits->type, kTfLiteUInt8); TfLiteIntArray* hitSize = TfLiteIntArrayCreate(1); hitSize->data[0] = SizeOfDimension(lookup, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_EQ(context, value->type, output->type); TfLiteStatus status = kTfLiteOk; if (output->type != kTfLiteString) { TfLiteIntArray* outputSize = TfLiteIntArrayCreate(NumDimensions(value)); outputSize->data[0] = SizeOfDimension(lookup, 0); for (int i = 1; i < NumDimensions(value); i++) { outputSize->data[i] = SizeOfDimension(value, i); } status = context->ResizeTensor(context, output, outputSize); } if (context->ResizeTensor(context, hits, hitSize) != kTfLiteOk) { status = kTfLiteError; } return status; }
Base
1
bool AES_GCM_EncryptContext::Encrypt( const void *pPlaintextData, size_t cbPlaintextData, const void *pIV, void *pEncryptedDataAndTag, uint32 *pcbEncryptedDataAndTag, const void *pAdditionalAuthenticationData, size_t cbAuthenticationData ) { unsigned long long pcbEncryptedDataAndTag_longlong = *pcbEncryptedDataAndTag; crypto_aead_aes256gcm_encrypt_afternm( static_cast<unsigned char*>( pEncryptedDataAndTag ), &pcbEncryptedDataAndTag_longlong, static_cast<const unsigned char*>( pPlaintextData ), cbPlaintextData, static_cast<const unsigned char*>(pAdditionalAuthenticationData), cbAuthenticationData, nullptr, static_cast<const unsigned char*>( pIV ), static_cast<const crypto_aead_aes256gcm_state*>( m_ctx ) ); *pcbEncryptedDataAndTag = pcbEncryptedDataAndTag_longlong; return true; }
Base
1
TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) { ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { double real_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
Base
1
QByteArray Cipher::blowfishECB(QByteArray cipherText, bool direction) { QCA::Initializer init; QByteArray temp = cipherText; //do padding ourselves if (direction) { while ((temp.length() % 8) != 0) temp.append('\0'); } else { temp = b64ToByte(temp); while ((temp.length() % 8) != 0) temp.append('\0'); } QCA::Direction dir = (direction) ? QCA::Encode : QCA::Decode; QCA::Cipher cipher(m_type, QCA::Cipher::ECB, QCA::Cipher::NoPadding, dir, m_key); QByteArray temp2 = cipher.update(QCA::MemoryRegion(temp)).toByteArray(); temp2 += cipher.final().toByteArray(); if (!cipher.ok()) return cipherText; if (direction) temp2 = byteToB64(temp2); return temp2; }
Base
1
void Context::onUpstreamConnectionClose(PeerType peer_type) { if (wasm_->onUpstreamConnectionClose_) { wasm_->onUpstreamConnectionClose_(this, id_, static_cast<uint32_t>(peer_type)); } }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteIntArray* input_dims = input->dims; int input_dims_size = input_dims->size; TF_LITE_ENSURE(context, input_dims_size >= 1); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Resize the output tensor. TfLiteIntArray* output_shape = TfLiteIntArrayCreate(input_dims_size + 1); for (int i = 0; i < input_dims_size; i++) { output_shape->data[i] = input_dims->data[i]; } // Last dimension in the output is the same as the last dimension in the // input. output_shape->data[input_dims_size] = input_dims->data[input_dims_size - 1]; output->type = input->type; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_shape)); return kTfLiteOk; }
Base
1
FastCGIServer::FastCGIServer(const std::string &address, int port, int workers, bool useFileSocket) : Server(address, port), m_worker(&m_eventBaseManager), m_dispatcher(workers, workers, RuntimeOption::ServerThreadDropCacheTimeoutSeconds, RuntimeOption::ServerThreadDropStack, this, RuntimeOption::ServerThreadJobLIFOSwitchThreshold, RuntimeOption::ServerThreadJobMaxQueuingMilliSeconds, RequestPriority::k_numPriorities) { folly::SocketAddress sock_addr; if (useFileSocket) { sock_addr.setFromPath(address); } else if (address.empty()) { sock_addr.setFromLocalPort(port); } else { sock_addr.setFromHostPort(address, port); } m_socketConfig.bindAddress = sock_addr; m_socketConfig.acceptBacklog = RuntimeOption::ServerBacklog; std::chrono::seconds timeout; if (RuntimeOption::ConnectionTimeoutSeconds >= 0) { timeout = std::chrono::seconds(RuntimeOption::ConnectionTimeoutSeconds); } else { // default to 2 minutes timeout = std::chrono::seconds(120); } m_socketConfig.connectionIdleTimeout = timeout; }
Class
2
mptctl_eventenable (unsigned long arg) { struct mpt_ioctl_eventenable __user *uarg = (void __user *) arg; struct mpt_ioctl_eventenable karg; MPT_ADAPTER *ioc; int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventenable - " "Unable to read in mpt_ioctl_eventenable struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_eventenable() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventenable called.\n", ioc->name)); if (ioc->events == NULL) { /* Have not yet allocated memory - do so now. */ int sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS); ioc->events = kzalloc(sz, GFP_KERNEL); if (!ioc->events) { printk(MYIOC_s_ERR_FMT ": ERROR - Insufficient memory to add adapter!\n", ioc->name); return -ENOMEM; } ioc->alloc_total += sz; ioc->eventContext = 0; } /* Update the IOC event logging flag. */ ioc->eventTypes = karg.eventTypes; return 0; }
Class
2
static TfLiteRegistration DynamicCopyOpRegistration() { TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr}; reg.prepare = [](TfLiteContext* context, TfLiteNode* node) { // Output 0 is dynamic TfLiteTensor* output0 = GetOutput(context, node, 0); SetTensorToDynamic(output0); // Output 1 has the same shape as input. const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output1 = GetOutput(context, node, 1); TF_LITE_ENSURE_STATUS(context->ResizeTensor( context, output1, TfLiteIntArrayCopy(input->dims))); return kTfLiteOk; }; reg.invoke = [](TfLiteContext* context, TfLiteNode* node) { // Not implemented since this isn't required in testing. return kTfLiteOk; }; return reg; }
Base
1
void CreateNgrams(const tstring* data, tstring* output, int num_ngrams, int ngram_width) const { for (int ngram_index = 0; ngram_index < num_ngrams; ++ngram_index) { int pad_width = get_pad_width(ngram_width); int left_padding = std::max(0, pad_width - ngram_index); int right_padding = std::max(0, pad_width - (num_ngrams - (ngram_index + 1))); int num_tokens = ngram_width - (left_padding + right_padding); int data_start_index = left_padding > 0 ? 0 : ngram_index - pad_width; // Calculate the total expected size of the ngram so we can reserve the // correct amount of space in the string. int ngram_size = 0; // Size of the left padding. ngram_size += left_padding * left_pad_.length(); // Size of the tokens. for (int n = 0; n < num_tokens; ++n) { ngram_size += data[data_start_index + n].length(); } // Size of the right padding. ngram_size += right_padding * right_pad_.length(); // Size of the separators. int num_separators = left_padding + right_padding + num_tokens - 1; ngram_size += num_separators * separator_.length(); // Build the ngram. tstring* ngram = &output[ngram_index]; ngram->reserve(ngram_size); for (int n = 0; n < left_padding; ++n) { ngram->append(left_pad_); ngram->append(separator_); } for (int n = 0; n < num_tokens - 1; ++n) { ngram->append(data[data_start_index + n]); ngram->append(separator_); } ngram->append(data[data_start_index + num_tokens - 1]); for (int n = 0; n < right_padding; ++n) { ngram->append(separator_); ngram->append(right_pad_); } // In debug mode only: validate that we've reserved enough space for the // ngram. DCHECK_EQ(ngram_size, ngram->size()); } }
Base
1
TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: AverageEvalFloat<kernel_type>(context, node, params, data, input, output); break; case kTfLiteUInt8: AverageEvalQuantizedUint8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt8: AverageEvalQuantizedInt8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt16: AverageEvalQuantizedInt16<kernel_type>(context, node, params, data, input, output); break; default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; }
Base
1
friend H AbslHashValue(H h, const TensorKey& k) { const uint8* d = static_cast<uint8*>(k.data()); size_t s = k.AllocatedBytes(); std::vector<uint8> vec; vec.reserve(s); for (int i = 0; i < s; i++) { vec.push_back(d[i]); } return H::combine(std::move(h), s); }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); TfLiteIntArray* output_size = TfLiteIntArrayCreate(4); output_size->data[0] = input->dims->data[0]; output_size->data[1] = input->dims->data[1]; output_size->data[2] = input->dims->data[2]; output_size->data[3] = input->dims->data[3]; return context->ResizeTensor(context, output, output_size); }
Base
1
void pcre_dump_cache(const std::string& filename) { s_pcreCache.dump(filename); }
Base
1
void writeStats(Array& /*ret*/) override { fprintf(stderr, "writeStats start\n"); // RetSame: the return value is the same instance every time // HasThis: call has a this argument // AllSame: all returns were the same data even though args are different // MemberCount: number of different arg sets (including this) fprintf(stderr, "Count Function MinSerLen MaxSerLen RetSame HasThis " "AllSame MemberCount\n"); for (auto& me : m_memos) { if (me.second.m_ignore) continue; if (me.second.m_count == 1) continue; int min_ser_len = 999999999; int max_ser_len = 0; int count = 0; int member_count = 0; bool all_same = true; if (me.second.m_has_this) { bool any_multiple = false; auto& fr = me.second.m_member_memos.begin()->second.m_return_value; member_count = me.second.m_member_memos.size(); for (auto& mme : me.second.m_member_memos) { if (mme.second.m_return_value != fr) all_same = false; count += mme.second.m_count; auto ser_len = mme.second.m_return_value.length(); min_ser_len = std::min(min_ser_len, ser_len); max_ser_len = std::max(max_ser_len, ser_len); if (mme.second.m_count > 1) any_multiple = true; } if (!any_multiple && !all_same) continue; } else { min_ser_len = max_ser_len = me.second.m_return_value.length(); count = me.second.m_count; all_same = me.second.m_ret_tv_same; } fprintf(stderr, "%d %s %d %d %s %s %s %d\n", count, me.first.data(), min_ser_len, max_ser_len, me.second.m_ret_tv_same ? " true" : "false", me.second.m_has_this ? " true" : "false", all_same ? " true" : "false", member_count ); } fprintf(stderr, "writeStats end\n"); }
Base
1
TcpOption *tcpGetOption(TcpHeader *segment, uint8_t kind) { size_t length; uint_t i; TcpOption *option; //Make sure the TCP header is valid if(segment->dataOffset < 5) return NULL; //Compute the length of the options field length = segment->dataOffset * 4 - sizeof(TcpHeader); //Point to the very first option i = 0; //Parse TCP options while(i < length) { //Point to the current option option = (TcpOption *) (segment->options + i); //NOP option detected? if(option->kind == TCP_OPTION_NOP) { i++; continue; } //END option detected? if(option->kind == TCP_OPTION_END) break; //Check option length if((i + 1) >= length || (i + option->length) > length) break; //Current option kind match the specified one? if(option->kind == kind) return option; //Jump to next the next option i += option->length; } //Specified option code not found return NULL; }
Class
2
friend H AbslHashValue(H h, const TensorKey& k) { const uint8* d = static_cast<uint8*>(k.data()); size_t s = k.AllocatedBytes(); std::vector<uint8> vec; vec.reserve(s); for (int i = 0; i < s; i++) { vec.push_back(d[i]); } return H::combine(std::move(h), s); }
Variant
0
static int lookup1_values(int entries, int dim) { int r = (int) floor(exp((float) log((float) entries) / dim)); if ((int) floor(pow((float) r+1, dim)) <= entries) // (int) cast for MinGW warning; ++r; // floor() to avoid _ftol() when non-CRT assert(pow((float) r+1, dim) > entries); assert((int) floor(pow((float) r, dim)) <= entries); // (int),floor() as above return r; }
Base
1
void RequestContext::StartBackendSpanAndSetTraceContext() { backend_span_.reset(CreateSpan(cloud_trace_.get(), "Backend")); // TODO: A better logic would be to send for GRPC backends the grpc-trace-bin // header, and for http/https backends the X-Cloud-Trace-Context header. std::string trace_context_header = cloud_trace()->ToTraceContextHeader( backend_span_->trace_span()->span_id()); // Set trace context header to backend. Status status = request()->AddHeaderToBackend( cloud_trace()->header_type() == HeaderType::CLOUD_TRACE_CONTEXT ? kCloudTraceContextHeader : kGRpcTraceContextHeader, trace_context_header); if (!status.ok()) { service_context()->env()->LogError( "Failed to set trace context header to backend."); } }
Base
1
TfLiteStatus EluPrepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); OpData* data = reinterpret_cast<OpData*>(node->user_data); // Use LUT to handle quantized elu path. if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return value < 0.0 ? std::exp(value) - 1.0f : value; }); } return GenericPrepare(context, node); }
Base
1
bool IsPadOpSupported(const TfLiteRegistration* registration, const TfLiteNode* node, TfLiteContext* context) { // padding is d x 2 tensor, where d is the dimension of input. const TfLiteTensor* padding = GetInput(context, node, 1); if (!IsConstantTensor(padding)) { TF_LITE_KERNEL_LOG(context, "%s: Only constant padding is supported for PAD.", padding->name); return false; } if (padding->dims->data[0] != 4 || padding->dims->data[1] != 2) { TF_LITE_KERNEL_LOG(context, "%s: Only 4D inputs are supported for PAD.", padding->name); return false; } const int32_t* padding_data = GetTensorData<int32_t>(padding); if (!(padding_data[0] == 0 && padding_data[1] == 0)) { TF_LITE_KERNEL_LOG( context, "%s: Padding for batch dimension is not supported in PAD.", padding->name); return false; } if (!(padding_data[6] == 0 && padding_data[7] == 0)) { TF_LITE_KERNEL_LOG( context, "%s: Padding for channel dimension is not supported in PAD.", padding->name); return false; } return true; }
Base
1
bool ResourceHandle::ParseFromString(const string& s) { ResourceHandleProto proto; const bool status = proto.ParseFromString(s); if (status) FromProto(proto); return status; }
Base
1
AP4_AvccAtom::InspectFields(AP4_AtomInspector& inspector) { inspector.AddField("Configuration Version", m_ConfigurationVersion); const char* profile_name = GetProfileName(m_Profile); if (profile_name) { inspector.AddField("Profile", profile_name); } else { inspector.AddField("Profile", m_Profile); } inspector.AddField("Profile Compatibility", m_ProfileCompatibility, AP4_AtomInspector::HINT_HEX); inspector.AddField("Level", m_Level); inspector.AddField("NALU Length Size", m_NaluLengthSize); for (unsigned int i=0; i<m_SequenceParameters.ItemCount(); i++) { inspector.AddField("Sequence Parameter", m_SequenceParameters[i].GetData(), m_SequenceParameters[i].GetDataSize()); } for (unsigned int i=0; i<m_SequenceParameters.ItemCount(); i++) { inspector.AddField("Picture Parameter", m_PictureParameters[i].GetData(), m_PictureParameters[i].GetDataSize()); } return AP4_SUCCESS; }
Base
1
Pl_AES_PDF::flush(bool strip_padding) { assert(this->offset == this->buf_size); if (first) { first = false; bool return_after_init = false; if (this->cbc_mode) { if (encrypt) { // Set cbc_block to the initialization vector, and if // not zero, write it to the output stream. initializeVector(); if (! (this->use_zero_iv || this->use_specified_iv)) { getNext()->write(this->cbc_block, this->buf_size); } } else if (this->use_zero_iv || this->use_specified_iv) { // Initialize vector with zeroes; zero vector was not // written to the beginning of the input file. initializeVector(); } else { // Take the first block of input as the initialization // vector. There's nothing to write at this time. memcpy(this->cbc_block, this->inbuf, this->buf_size); this->offset = 0; return_after_init = true; } } this->crypto->rijndael_init( encrypt, this->key.get(), key_bytes, this->cbc_mode, this->cbc_block); if (return_after_init) { return; } } if (this->encrypt) { this->crypto->rijndael_process(this->inbuf, this->outbuf); } else { this->crypto->rijndael_process(this->inbuf, this->outbuf); } unsigned int bytes = this->buf_size; if (strip_padding) { unsigned char last = this->outbuf[this->buf_size - 1]; if (last <= this->buf_size) { bool strip = true; for (unsigned int i = 1; i <= last; ++i) { if (this->outbuf[this->buf_size - i] != last) { strip = false; break; } } if (strip) { bytes -= last; } } } getNext()->write(this->outbuf, bytes); this->offset = 0; }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSpaceToDepthParams*>(node->builtin_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); auto data_type = output->type; TF_LITE_ENSURE(context, data_type == kTfLiteFloat32 || data_type == kTfLiteUInt8 || data_type == kTfLiteInt8 || data_type == kTfLiteInt32 || data_type == kTfLiteInt64); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); const int block_size = params->block_size; const int input_height = input->dims->data[1]; const int input_width = input->dims->data[2]; int output_height = input_height / block_size; int output_width = input_width / block_size; TF_LITE_ENSURE_EQ(context, input_height, output_height * block_size); TF_LITE_ENSURE_EQ(context, input_width, output_width * block_size); TfLiteIntArray* output_size = TfLiteIntArrayCreate(4); output_size->data[0] = input->dims->data[0]; output_size->data[1] = output_height; output_size->data[2] = output_width; output_size->data[3] = input->dims->data[3] * block_size * block_size; return context->ResizeTensor(context, output, output_size); }
Base
1
bool ParseAttrValue(StringPiece type, StringPiece text, AttrValue* out) { // Parse type. string field_name; bool is_list = absl::ConsumePrefix(&type, "list("); if (absl::ConsumePrefix(&type, "string")) { field_name = "s"; } else if (absl::ConsumePrefix(&type, "int")) { field_name = "i"; } else if (absl::ConsumePrefix(&type, "float")) { field_name = "f"; } else if (absl::ConsumePrefix(&type, "bool")) { field_name = "b"; } else if (absl::ConsumePrefix(&type, "type")) { field_name = "type"; } else if (absl::ConsumePrefix(&type, "shape")) { field_name = "shape"; } else if (absl::ConsumePrefix(&type, "tensor")) { field_name = "tensor"; } else if (absl::ConsumePrefix(&type, "func")) { field_name = "func"; } else if (absl::ConsumePrefix(&type, "placeholder")) { field_name = "placeholder"; } else { return false; } if (is_list && !absl::ConsumePrefix(&type, ")")) { return false; } // Construct a valid text proto message to parse. string to_parse; if (is_list) { // TextFormat parser considers "i: 7" to be the same as "i: [7]", // but we only want to allow list values with []. StringPiece cleaned = text; str_util::RemoveLeadingWhitespace(&cleaned); str_util::RemoveTrailingWhitespace(&cleaned); if (cleaned.size() < 2 || cleaned[0] != '[' || cleaned[cleaned.size() - 1] != ']') { return false; } cleaned.remove_prefix(1); str_util::RemoveLeadingWhitespace(&cleaned); if (cleaned.size() == 1) { // User wrote "[]", so return empty list without invoking the TextFormat // parse which returns an error for "i: []". out->Clear(); out->mutable_list(); return true; } to_parse = strings::StrCat("list { ", field_name, ": ", text, " }"); } else { to_parse = strings::StrCat(field_name, ": ", text); } return ProtoParseFromString(to_parse, out); }
Class
2
int size() const { return m_str ? m_str->size() : 0; }
Base
1
void *UntrustedCacheMalloc::GetBuffer() { void **buffers = nullptr; void *buffer; bool is_pool_empty; { LockGuard spin_lock(&lock_); is_pool_empty = buffer_pool_.empty(); if (is_pool_empty) { buffers = primitives::AllocateUntrustedBuffers(kPoolIncrement, kPoolEntrySize); for (int i = 0; i < kPoolIncrement; i++) { if (!buffers[i] || !TrustedPrimitives::IsOutsideEnclave(buffers[i], kPoolEntrySize)) { abort(); } buffer_pool_.push(buffers[i]); } } buffer = buffer_pool_.top(); buffer_pool_.pop(); busy_buffers_.insert(buffer); } if (is_pool_empty) { // Free memory held by the array of buffer pointers returned by // AllocateUntrustedBuffers. Free(buffers); } return buffer; }
Class
2
TfLiteStatus EqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteBool: Comparison<bool, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteFloat32: Comparison<float, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::EqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::EqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteString: ComparisonString(reference_ops::StringRefEqualFn, input1, input2, output, requires_broadcast); break; default: context->ReportError( context, "Does not support type %d, requires bool|float|int|uint8|string", input1->type); return kTfLiteError; } return kTfLiteOk; }
Base
1
HexOutStream::writeBuffer() { U8* pos = start; while (pos != ptr) { out_stream.check(2); U8* optr = out_stream.getptr(); U8* oend = out_stream.getend(); int length = min(ptr-pos, (oend-optr)/2); for (int i=0; i<length; i++) { optr[i*2] = intToHex((pos[i] >> 4) & 0xf); optr[i*2+1] = intToHex(pos[i] & 0xf); } out_stream.setptr(optr + length*2); pos += length; } offset += ptr - start; ptr = start; }
Base
1
TfLiteStatus PrepareHashtable(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 0); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); TF_LITE_ENSURE(context, node->user_data != nullptr); const auto* params = reinterpret_cast<const TfLiteHashtableParams*>(node->user_data); TF_LITE_ENSURE(context, !params->table_name.empty()); TF_LITE_ENSURE(context, (params->key_dtype == kTfLiteInt64 && params->value_dtype == kTfLiteString) || (params->key_dtype == kTfLiteString && params->value_dtype == kTfLiteInt64)); TfLiteTensor* resource_handle_tensor = GetOutput(context, node, kResourceHandleTensor); TF_LITE_ENSURE(context, resource_handle_tensor != nullptr); TF_LITE_ENSURE_EQ(context, resource_handle_tensor->type, kTfLiteInt32); TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1); outputSize->data[0] = 1; return context->ResizeTensor(context, resource_handle_tensor, outputSize); }
Base
1
int ecall_restore(const char *input, uint64_t input_len, char **output, uint64_t *output_len) { if (!asylo::primitives::TrustedPrimitives::IsOutsideEnclave(input, input_len) || !asylo::primitives::TrustedPrimitives::IsOutsideEnclave( output_len, sizeof(uint64_t))) { asylo::primitives::TrustedPrimitives::BestEffortAbort( "ecall_restore: input/output found to not be in untrusted memory."); } int result = 0; size_t tmp_output_len; try { result = asylo::Restore(input, static_cast<size_t>(input_len), output, &tmp_output_len); } catch (...) { LOG(FATAL) << "Uncaught exception in enclave"; } if (output_len) { *output_len = static_cast<uint64_t>(tmp_output_len); } return result; }
Base
1
bool IsReshapeOpSupported(const TfLiteRegistration* registration, const TfLiteNode* node, TfLiteContext* context, int coreml_version) { if (coreml_version >= 3) { return false; } if (node->inputs->size == 1) { const auto* params = reinterpret_cast<TfLiteReshapeParams*>(node->builtin_data); return params->num_dimensions == 3 || params->num_dimensions == 4; } const int kShapeTensor = 1; const auto* shape = GetInput(context, node, kShapeTensor); if (shape->allocation_type != kTfLiteMmapRo) { TF_LITE_KERNEL_LOG(context, "Reshape has non-const shape."); return false; } const bool is_shape_tensor = shape->dims->size == 1 && shape->type == kTfLiteInt32; return is_shape_tensor && (shape->dims->data[0] == 3 || shape->dims->data[0] == 4); }
Base
1
TfLiteStatus L2Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: L2EvalFloat<kernel_type>(context, node, params, data, input, output); break; case kTfLiteUInt8: // We don't have a quantized implementation, so just fall through to the // 'default' case. default: context->ReportError(context, "Type %d not currently supported.", input->type); return kTfLiteError; } return kTfLiteOk; }
Base
1
int HexInStream::pos() { return offset + ptr - start; }
Base
1
void FormatConverter<T>::InitSparseToDenseConverter( std::vector<int> shape, std::vector<int> traversal_order, std::vector<TfLiteDimensionType> format, std::vector<int> dense_size, std::vector<std::vector<int>> segments, std::vector<std::vector<int>> indices, std::vector<int> block_map) { dense_shape_ = std::move(shape); traversal_order_ = std::move(traversal_order); block_map_ = std::move(block_map); format_ = std::move(format); dense_size_ = 1; for (int i = 0; i < dense_shape_.size(); i++) { dense_size_ *= dense_shape_[i]; } dim_metadata_.resize(2 * format_.size()); for (int i = 0; i < format_.size(); i++) { if (format_[i] == kTfLiteDimDense) { dim_metadata_[2 * i] = {dense_size[i]}; } else { dim_metadata_[2 * i] = std::move(segments[i]); dim_metadata_[2 * i + 1] = std::move(indices[i]); } } int original_rank = dense_shape_.size(); int block_dim = 0; blocked_shape_.resize(original_rank); block_size_.resize(block_map_.size()); for (int i = 0; i < original_rank; i++) { if (block_dim < block_map_.size() && block_map_[block_dim] == i) { int orig_dim = traversal_order_[original_rank + block_dim]; block_size_[block_dim] = dense_size[orig_dim]; blocked_shape_[i] = dense_shape_[i] / dense_size[orig_dim]; block_dim++; } else { blocked_shape_[i] = dense_shape_[i]; } } }
Base
1
void TileManager::crop( RawTile *ttt ){ int tw = image->getTileWidth(); int th = image->getTileHeight(); if( loglevel >= 5 ){ *logfile << "TileManager :: Edge tile: Base size: " << tw << "x" << th << ": This tile: " << ttt->width << "x" << ttt->height << endl; } // Create a new buffer, fill it with the old data, then copy // back the cropped part into the RawTile buffer int len = tw * th * ttt->channels * (ttt->bpc/8); unsigned char* buffer = (unsigned char*) malloc( len ); unsigned char* src_ptr = (unsigned char*) memcpy( buffer, ttt->data, len ); unsigned char* dst_ptr = (unsigned char*) ttt->data; // Copy one scanline at a time len = ttt->width * ttt->channels * (ttt->bpc/8); for( unsigned int i=0; i<ttt->height; i++ ){ memcpy( dst_ptr, src_ptr, len ); dst_ptr += len; src_ptr += tw * ttt->channels * (ttt->bpc/8); } free( buffer ); // Reset the data length len = ttt->width * ttt->height * ttt->channels * (ttt->bpc/8); ttt->dataLength = len; ttt->padded = false; }
Base
1
void AddBatchOffsets(Tensor* indices, const Tensor& params) { int64_t batch_size = 1; // The size of all batch dimensions. for (int idx = 0; idx < batch_dims_; ++idx) { batch_size *= params.dim_size(idx); } auto indices_flat = indices->flat<Index>(); int64_t const index_inner_size = indices->NumElements() / batch_size; int64_t const batch_offset = params.dim_size(batch_dims_); for (int64_t batch_idx = 0, dest_idx = 0; batch_idx < batch_size; ++batch_idx) { for (int64_t idx = 0; idx < index_inner_size; ++idx) { indices_flat(dest_idx++) += batch_offset * batch_idx; } } }
Base
1
int64_t MemFile::readImpl(char *buffer, int64_t length) { assertx(m_len != -1); assertx(length > 0); int64_t remaining = m_len - m_cursor; if (remaining < length) length = remaining; if (length > 0) { memcpy(buffer, (const void *)(m_data + m_cursor), length); } m_cursor += length; return length; }
Base
1
Status TensorSliceReader::GetTensor( const string& name, std::unique_ptr<tensorflow::Tensor>* out_tensor) const { DataType type; TensorShape shape; TensorSlice slice; { mutex_lock l(mu_); const TensorSliceSet* tss = gtl::FindPtrOrNull(tensors_, name); if (tss == nullptr) { return errors::NotFound(name, " not found in checkpoint file"); } if (tss->Slices().size() > 1) { // TODO(sherrym): Support multi-slice checkpoints. return errors::Unimplemented("Sliced checkpoints are not supported"); } type = tss->type(); shape = tss->shape(); slice = tss->Slices().begin()->second.slice; } std::unique_ptr<tensorflow::Tensor> t(new tensorflow::Tensor(type, shape)); bool success = false; #define READER_COPY(dt) \ case dt: \ success = CopySliceData(name, slice, \ t->flat<EnumToDataType<dt>::Type>().data()); \ break; switch (type) { READER_COPY(DT_FLOAT); READER_COPY(DT_DOUBLE); READER_COPY(DT_INT32); READER_COPY(DT_UINT8); READER_COPY(DT_INT16); READER_COPY(DT_INT8); READER_COPY(DT_INT64); READER_COPY(DT_STRING); default: return errors::Unimplemented("Data type not supported"); } #undef READER_COPY if (!success) { return errors::NotFound(name, " not found in checkpoint file"); } std::swap(*out_tensor, t); return Status::OK(); }
Class
2
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const int num_elements = NumElements(input); TF_LITE_ENSURE_EQ(context, num_elements, NumElements(output)); switch (input->type) { case kTfLiteInt64: return copyToTensor(context, input->data.i64, output, num_elements); case kTfLiteInt32: return copyToTensor(context, input->data.i32, output, num_elements); case kTfLiteUInt8: return copyToTensor(context, input->data.uint8, output, num_elements); case kTfLiteFloat32: return copyToTensor(context, GetTensorData<float>(input), output, num_elements); case kTfLiteBool: return copyToTensor(context, input->data.b, output, num_elements); case kTfLiteComplex64: return copyToTensor( context, reinterpret_cast<std::complex<float>*>(input->data.c64), output, num_elements); default: // Unsupported type. TF_LITE_UNSUPPORTED_TYPE(context, input->type, "Cast"); } return kTfLiteOk; }
Base
1
void LibRaw::exp_bef(float shift, float smooth) { // params limits if(shift>8) shift = 8; if(shift<0.25) shift = 0.25; if(smooth < 0.0) smooth = 0.0; if(smooth > 1.0) smooth = 1.0; unsigned short *lut = (ushort*)malloc((TBLN+1)*sizeof(unsigned short)); if(shift <=1.0) { for(int i=0;i<=TBLN;i++) lut[i] = (unsigned short)((float)i*shift); } else { float x1,x2,y1,y2; float cstops = log(shift)/log(2.0f); float room = cstops*2; float roomlin = powf(2.0f,room); x2 = (float)TBLN; x1 = (x2+1)/roomlin-1; y1 = x1*shift; y2 = x2*(1+(1-smooth)*(shift-1)); float sq3x=powf(x1*x1*x2,1.0f/3.0f); float B = (y2-y1+shift*(3*x1-3.0f*sq3x)) / (x2+2.0f*x1-3.0f*sq3x); float A = (shift - B)*3.0f*powf(x1*x1,1.0f/3.0f); float CC = y2 - A*powf(x2,1.0f/3.0f)-B*x2; for(int i=0;i<=TBLN;i++) { float X = (float)i; float Y = A*powf(X,1.0f/3.0f)+B*X+CC; if(i<x1) lut[i] = (unsigned short)((float)i*shift); else lut[i] = Y<0?0:(Y>TBLN?TBLN:(unsigned short)(Y)); } } for(int i=0; i< S.height*S.width; i++) { imgdata.image[i][0] = lut[imgdata.image[i][0]]; imgdata.image[i][1] = lut[imgdata.image[i][1]]; imgdata.image[i][2] = lut[imgdata.image[i][2]]; imgdata.image[i][3] = lut[imgdata.image[i][3]]; } C.data_maximum = lut[C.data_maximum]; C.maximum = lut[C.maximum]; // no need to adjust the minumum, black is already subtracted free(lut); }
Class
2
inline TfLiteTensor* GetMutableInput(const TfLiteContext* context, const TfLiteNode* node, int index) { if (context->tensors != nullptr) { return &context->tensors[node->inputs->data[index]]; } else { return context->GetTensor(context, node->inputs->data[index]); } }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { int num_inputs = NumInputs(node); TF_LITE_ENSURE(context, num_inputs >= 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = input1->type; // Check that all input tensors have the same shape and type. for (int i = kInputTensor1 + 1; i < num_inputs; ++i) { const TfLiteTensor* input = GetInput(context, node, i); TF_LITE_ENSURE(context, HaveSameShapes(input1, input)); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input->type); } // Use the first input node's dimension to be the dimension of the output // node. TfLiteIntArray* input1_dims = input1->dims; TfLiteIntArray* output_dims = TfLiteIntArrayCopy(input1_dims); return context->ResizeTensor(context, output, output_dims); }
Base
1
AsyncSocket::WriteResult AsyncSSLSocket::interpretSSLError(int rc, int error) { if (error == SSL_ERROR_WANT_READ) { // Even though we are attempting to write data, SSL_write() may // need to read data if renegotiation is being performed. We currently // don't support this and just fail the write. LOG(ERROR) << "AsyncSSLSocket(fd=" << fd_ << ", state=" << int(state_) << ", sslState=" << sslState_ << ", events=" << eventFlags_ << "): " << "unsupported SSL renegotiation during write"; return WriteResult( WRITE_ERROR, std::make_unique<SSLException>(SSLError::INVALID_RENEGOTIATION)); } else { if (zero_return(error, rc, errno)) { return WriteResult(0); } auto errError = ERR_get_error(); VLOG(3) << "ERROR: AsyncSSLSocket(fd=" << fd_ << ", state=" << int(state_) << ", sslState=" << sslState_ << ", events=" << eventFlags_ << "): " << "SSL error: " << error << ", errno: " << errno << ", func: " << ERR_func_error_string(errError) << ", reason: " << ERR_reason_error_string(errError); return WriteResult( WRITE_ERROR, std::make_unique<SSLException>(error, errError, rc, errno)); } }
Base
1
static BOOL ntlm_av_pair_add_copy(NTLM_AV_PAIR* pAvPairList, size_t cbAvPairList, NTLM_AV_PAIR* pAvPair, size_t cbAvPair) { if (!ntlm_av_pair_check(pAvPair, cbAvPair)) return FALSE; return ntlm_av_pair_add(pAvPairList, cbAvPairList, ntlm_av_pair_get_id(pAvPair), ntlm_av_pair_get_value_pointer(pAvPair), ntlm_av_pair_get_len(pAvPair)); }
Base
1
HeaderLookupTable_t::lookup (const char *buf, const std::size_t len) const { const HeaderTableRecord *r = HttpHeaderHashTable::lookup(buf, len); if (!r) return BadHdr; return *r; }
Class
2
void * alloc_top(size_t size) { top -= size; if (top < bottom) {new_chunk(); top -= size;} return top; }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); const TfLiteTensor* multipliers = GetInput(context, node, kInputMultipliers); // Only int32 and int64 multipliers type is supported. if (multipliers->type != kTfLiteInt32 && multipliers->type != kTfLiteInt64) { context->ReportError(context, "Multipliers of type '%s' are not supported by tile.", TfLiteTypeGetName(multipliers->type)); return kTfLiteError; } if (IsConstantTensor(multipliers)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } else { SetTensorToDynamic(output); } return kTfLiteOk; }
Base
1
TfLiteStatus EvalLogic(TfLiteContext* context, TfLiteNode* node, OpContext* op_context, T init_value, T reducer(const T current, const T in)) { int64_t num_axis = NumElements(op_context->axis); TfLiteTensor* temp_index = GetTemporary(context, node, /*index=*/0); TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1); // Resize the output tensor if the output tensor is dynamic. if (IsDynamicTensor(op_context->output)) { TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, op_context, resolved_axis)); TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, op_context)); } if (op_context->input->type == kTfLiteUInt8 || op_context->input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, op_context->input->params.scale, op_context->output->params.scale); TF_LITE_ENSURE_EQ(context, op_context->input->params.zero_point, op_context->output->params.zero_point); } TF_LITE_ENSURE( context, reference_ops::ReduceGeneric<T>( GetTensorData<T>(op_context->input), op_context->input->dims->data, op_context->input->dims->size, GetTensorData<T>(op_context->output), op_context->output->dims->data, op_context->output->dims->size, GetTensorData<int>(op_context->axis), num_axis, op_context->params->keep_dims, GetTensorData<int>(temp_index), GetTensorData<int>(resolved_axis), init_value, reducer)); return kTfLiteOk; }
Base
1
static void nodeDestruct(struct SaveNode* node) { if (node->v == &node->sorted) { tr_free(node->sorted.val.l.vals); } }
Variant
0
int CLASS parse_jpeg(int offset) { int len, save, hlen, mark; fseek(ifp, offset, SEEK_SET); if (fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8) return 0; while (fgetc(ifp) == 0xff && (mark = fgetc(ifp)) != 0xda) { order = 0x4d4d; len = get2() - 2; save = ftell(ifp); if (mark == 0xc0 || mark == 0xc3 || mark == 0xc9) { fgetc(ifp); raw_height = get2(); raw_width = get2(); } order = get2(); hlen = get4(); if (get4() == 0x48454150) /* "HEAP" */ { #ifdef LIBRAW_LIBRARY_BUILD imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens; #endif parse_ciff(save + hlen, len - hlen, 0); } if (parse_tiff(save + 6)) apply_tiff(); fseek(ifp, save + len, SEEK_SET); } return 1; }
Class
2
void CClient::EchoMessage(const CMessage& Message) { CMessage EchoedMessage = Message; for (CClient* pClient : GetClients()) { if (pClient->HasEchoMessage() || (pClient != this && (m_pNetwork->IsChan(Message.GetParam(0)) || pClient->HasSelfMessage()))) { EchoedMessage.SetNick(GetNickMask()); pClient->PutClient(EchoedMessage); } } }
Base
1
TfLiteStatus ResizeOutputTensor(TfLiteContext* context, const TfLiteTensor* data, const TfLiteTensor* segment_ids, TfLiteTensor* output) { int max_index = -1; const int segment_id_size = segment_ids->dims->data[0]; if (segment_id_size > 0) { max_index = segment_ids->data.i32[segment_id_size - 1]; } const int data_rank = NumDimensions(data); TfLiteIntArray* output_shape = TfLiteIntArrayCreate(NumDimensions(data)); output_shape->data[0] = max_index + 1; for (int i = 1; i < data_rank; ++i) { output_shape->data[i] = data->dims->data[i]; } return context->ResizeTensor(context, output, output_shape); }
Base
1
static int em_jmp_far(struct x86_emulate_ctxt *ctxt) { int rc; unsigned short sel, old_sel; struct desc_struct old_desc, new_desc; const struct x86_emulate_ops *ops = ctxt->ops; u8 cpl = ctxt->ops->cpl(ctxt); /* Assignment of RIP may only fail in 64-bit mode */ if (ctxt->mode == X86EMUL_MODE_PROT64) ops->get_segment(ctxt, &old_sel, &old_desc, NULL, VCPU_SREG_CS); memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, X86_TRANSFER_CALL_JMP, &new_desc); if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); if (rc != X86EMUL_CONTINUE) { WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); /* assigning eip failed; restore the old cs */ ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS); return rc; } return rc; }
Class
2
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* size = GetInput(context, node, kSizeTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // TODO(ahentz): Our current implementations rely on the inputs being 4D. TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1); TF_LITE_ENSURE_EQ(context, size->type, kTfLiteInt32); // ResizeBilinear creates a float tensor even when the input is made of // integers. output->type = input->type; if (!IsConstantTensor(size)) { SetTensorToDynamic(output); return kTfLiteOk; } // Ensure params are valid. auto* params = reinterpret_cast<TfLiteResizeBilinearParams*>(node->builtin_data); if (params->half_pixel_centers && params->align_corners) { context->ReportError( context, "If half_pixel_centers is True, align_corners must be False."); return kTfLiteError; } return ResizeOutputTensor(context, input, size, output); }
Base
1
void AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride, int pad_width, int pad_height, int filter_width, int filter_height, int32 output_activation_min, int32 output_activation_max, uint8* output_data, const Dims<4>& output_dims) { AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width, pad_height, filter_width, filter_height, output_activation_min, output_activation_max, output_data, output_dims); }
Base
1
GetRunner( const ReferenceHandle& that, Local<Value> key_handle, MaybeLocal<Object> maybe_options, bool inherit ) : context{that.context}, reference{that.reference}, options{maybe_options, inherit ? TransferOptions::Type::DeepReference : TransferOptions::Type::Reference}, inherit{inherit} { that.CheckDisposed(); key = ExternalCopy::CopyIfPrimitive(key_handle); if (!key) { throw RuntimeTypeError("Invalid `key`"); } }
Class
2
void SocketLineReader::dataReceived() { while (m_socket->canReadLine()) { const QByteArray line = m_socket->readLine(); if (line.length() > 1) { //we don't want a single \n m_packets.enqueue(line); } } //If we still have things to read from the socket, call dataReceived again //We do this manually because we do not trust readyRead to be emitted again //So we call this method again just in case. if (m_socket->bytesAvailable() > 0) { QMetaObject::invokeMethod(this, "dataReceived", Qt::QueuedConnection); return; } //If we have any packets, tell it to the world. if (!m_packets.isEmpty()) { Q_EMIT readyRead(); } }
Class
2
bool IsConvolutionOpSupported(const TfLiteRegistration* registration, const TfLiteNode* node, TfLiteContext* context) { if (node->builtin_data == nullptr) return false; TfLiteFusedActivation activation; if (registration->builtin_code == kTfLiteBuiltinConv2d) { const auto* conv_params = reinterpret_cast<const TfLiteConvParams*>(node->builtin_data); activation = conv_params->activation; } else if (registration->builtin_code == kTfLiteBuiltinDepthwiseConv2d) { const auto* depthwise_conv_params = reinterpret_cast<const TfLiteDepthwiseConvParams*>(node->builtin_data); activation = depthwise_conv_params->activation; } else if (registration->builtin_code == kTfLiteBuiltinTransposeConv) { activation = kTfLiteActNone; } else { TF_LITE_KERNEL_LOG( context, "Invalid op: op must be Conv2D, DepthwiseConv2D or TransposeConv."); return false; } if (activation == kTfLiteActSignBit) { return false; } const int kOutputShapeTensor = 0; // Only used for TransposeConv const int kWeightTensor = 1; const int kBiasTensor = 2; // Only used for non-TransposeConv const TfLiteTensor* weights = GetInput(context, node, kWeightTensor); const int max_kernel_size = 16384; if (!IsConstantTensor(weights)) { return false; } if (weights->dims->data[1] > max_kernel_size || weights->dims->data[2] > max_kernel_size) { return false; } if (registration->builtin_code == kTfLiteBuiltinTransposeConv) { if (!IsConstantTensor(GetInput(context, node, kOutputShapeTensor))) { return false; } } else { if (node->inputs->size >= kBiasTensor && !IsConstantTensor(GetInput(context, node, kBiasTensor))) { return false; } } return true; }
Base
1