code
stringlengths
12
2.05k
label_name
stringclasses
5 values
label
int64
0
4
unsigned int GetU32LE (int nPos, bool *pbSuccess) { //*pbSuccess = true; if ( nPos < 0 || nPos + 3 >= m_nLen ) { *pbSuccess = false; return 0; } unsigned int nRes = m_sFile[nPos + 3]; nRes = (nRes << 8) + m_sFile[nPos + 2]; nRes = (nRes << 8) + m_sFile[nPos + 1]; nRes = (nRes << 8) + m_sFile[nPos + 0]; return nRes; }
Base
1
R_API RBinJavaAttrInfo *r_bin_java_source_code_file_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { if (!sz) { return NULL; } ut64 offset = 0; RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; if (!attr) { return NULL; } attr->type = R_BIN_JAVA_ATTR_TYPE_SOURCE_FILE_ATTR; // if (buffer + offset > buffer + sz) return NULL; attr->info.source_file_attr.sourcefile_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->size = offset; // IFDBG r_bin_java_print_source_code_file_attr_summary(attr); return attr; }
Class
2
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteIntArray* input_dims = input->dims; int input_dims_size = input_dims->size; TF_LITE_ENSURE(context, input_dims_size >= 2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TfLiteIntArray* output_shape = TfLiteIntArrayCreate(input_dims_size); for (int i = 0; i < input_dims_size; i++) { output_shape->data[i] = input_dims->data[i]; } // Resize the output tensor to the same size as the input tensor. output->type = input->type; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_shape)); return kTfLiteOk; }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { // Check for supported activation types. auto* params = reinterpret_cast<TfLiteFullyConnectedParams*>(node->builtin_data); const TfLiteTensor* filter = GetInput(context, node, kWeightsTensor); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const bool is_quantized = ((filter->type == kTfLiteUInt8) || (filter->type == kTfLiteInt8)); const bool is_hybrid = is_quantized && (input->type == kTfLiteFloat32); const bool is_pie = kernel_type == kLegacyPie; // Pie and hybrid path supports all kinds of fused activations, otherwise only // clipping activations are supported. if (!is_pie && !is_hybrid) { TF_LITE_ENSURE(context, params->activation == kTfLiteActNone || params->activation == kTfLiteActRelu || params->activation == kTfLiteActReluN1To1 || params->activation == kTfLiteActRelu6); } return PrepareImpl(context, node); }
Base
1
void Compute(OpKernelContext* ctx) override { const Tensor& val = ctx->input(0); int64 id = ctx->session_state()->GetNewId(); TensorStore::TensorAndKey tk{val, id, requested_device()}; OP_REQUIRES_OK(ctx, ctx->tensor_store()->AddTensor(name(), tk)); Tensor* handle = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &handle)); if (ctx->expected_output_dtype(0) == DT_RESOURCE) { ResourceHandle resource_handle = MakeResourceHandle<Tensor>( ctx, SessionState::kTensorHandleResourceTypeName, tk.GetHandle(name())); resource_handle.set_maybe_type_name( SessionState::kTensorHandleResourceTypeName); handle->scalar<ResourceHandle>()() = resource_handle; } else { // Legacy behavior in V1. handle->flat<tstring>().setConstant(tk.GetHandle(name())); } }
Base
1
void ConnPoolImplBase::checkForIdleAndCloseIdleConnsIfDraining() { if (is_draining_for_deletion_) { closeIdleConnectionsForDrainingPool(); } if (isIdleImpl()) { ENVOY_LOG(debug, "invoking idle callbacks - is_draining_for_deletion_={}", is_draining_for_deletion_); for (const Instance::IdleCb& cb : idle_callbacks_) { cb(); } } }
Class
2
static int em_sysenter(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data; u16 cs_sel, ss_sel; u64 efer = 0; ops->get_msr(ctxt, MSR_EFER, &efer); /* inject #GP if in real mode */ if (ctxt->mode == X86EMUL_MODE_REAL) return emulate_gp(ctxt, 0); /* * Not recognized on AMD in compat mode (but is recognized in legacy * mode). */ if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA) && !vendor_intel(ctxt)) return emulate_ud(ctxt); /* sysenter/sysexit have not been tested in 64bit mode. */ if (ctxt->mode == X86EMUL_MODE_PROT64) return X86EMUL_UNHANDLEABLE; setup_syscalls_segments(ctxt, &cs, &ss); ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); switch (ctxt->mode) { case X86EMUL_MODE_PROT32: if ((msr_data & 0xfffc) == 0x0) return emulate_gp(ctxt, 0); break; case X86EMUL_MODE_PROT64: if (msr_data == 0x0) return emulate_gp(ctxt, 0); break; default: break; } ctxt->eflags &= ~(EFLG_VM | EFLG_IF); cs_sel = (u16)msr_data; cs_sel &= ~SELECTOR_RPL_MASK; ss_sel = cs_sel + 8; ss_sel &= ~SELECTOR_RPL_MASK; if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) { cs.d = 0; cs.l = 1; } ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); ctxt->_eip = msr_data; ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); *reg_write(ctxt, VCPU_REGS_RSP) = msr_data; return X86EMUL_CONTINUE; }
Class
2
SpawnPreparationInfo prepareSpawn(const Options &options) { TRACE_POINT(); SpawnPreparationInfo info; prepareChroot(info, options); info.userSwitching = prepareUserSwitching(options); prepareSwitchingWorkingDirectory(info, options); inferApplicationInfo(info); return info; }
Class
2
void jas_seq2d_bindsub(jas_matrix_t *s, jas_matrix_t *s1, int xstart, int ystart, int xend, int yend) { jas_matrix_bindsub(s, s1, ystart - s1->ystart_, xstart - s1->xstart_, yend - s1->ystart_ - 1, xend - s1->xstart_ - 1); }
Base
1
static const char* ConvertScalar(PyObject* v, Eigen::half* out) { // NOTE(nareshmodi): Is there a way to convert to C double without the // intermediate Python double? This will help with ConvertOneFloat as well. Safe_PyObjectPtr as_float = make_safe(PyNumber_Float(v)); double v_double = PyFloat_AS_DOUBLE(as_float.get()); *out = Eigen::half(v_double); return nullptr; }
Class
2
bool IsFullyConnectedOpSupported(const TfLiteRegistration* registration, const TfLiteNode* node, TfLiteContext* context) { if (node->builtin_data == nullptr) return false; const auto* fc_params = reinterpret_cast<const TfLiteFullyConnectedParams*>(node->builtin_data); const int kInput = 0; const int kWeights = 1; const int kBias = 2; if (fc_params->weights_format != kTfLiteFullyConnectedWeightsFormatDefault) { return false; } const TfLiteTensor* input = GetInput(context, node, kInput); const TfLiteTensor* weights = GetInput(context, node, kWeights); if (!IsFloatType(input->type)) { return false; } if (!IsFloatType(weights->type) || !IsConstantTensor(weights)) { return false; } // Core ML 2 only supports single-batch fully connected layer, thus dimensions // except the last one should be 1. if (input->dims->data[input->dims->size - 1] != NumElements(input)) { return false; } if (node->inputs->size > 2) { const TfLiteTensor* bias = GetInput(context, node, kBias); if (!IsFloatType(bias->type) || !IsConstantTensor(bias)) { return false; } } TfLiteFusedActivation activation = fc_params->activation; if (activation == kTfLiteActSignBit) { return false; } return true; }
Base
1
boost::optional<SaplingOutgoingPlaintext> SaplingOutgoingPlaintext::decrypt( const SaplingOutCiphertext &ciphertext, const uint256& ovk, const uint256& cv, const uint256& cm, const uint256& epk ) { auto pt = AttemptSaplingOutDecryption(ciphertext, ovk, cv, cm, epk); if (!pt) { return boost::none; } // Deserialize from the plaintext CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); ss << pt.get(); SaplingOutgoingPlaintext ret; ss >> ret; assert(ss.size() == 0); return ret; }
Class
2
static bool is_legal_file(const std::string &filename) { DBG_FS << "Looking for '" << filename << "'.\n"; if (filename.empty()) { LOG_FS << " invalid filename\n"; return false; } if (filename.find("..") != std::string::npos) { ERR_FS << "Illegal path '" << filename << "' (\"..\" not allowed).\n"; return false; } if (ends_with(filename, ".pbl")) { ERR_FS << "Illegal path '" << filename << "' (.pbl files are not allowed)." << std::endl; return false; } return true; }
Class
2
static __forceinline void draw_line(float *output, int x0, int y0, int x1, int y1, int n) { int dy = y1 - y0; int adx = x1 - x0; int ady = abs(dy); int base; int x=x0,y=y0; int err = 0; int sy; #ifdef STB_VORBIS_DIVIDE_TABLE if (adx < DIVTAB_DENOM && ady < DIVTAB_NUMER) { if (dy < 0) { base = -integer_divide_table[ady][adx]; sy = base-1; } else { base = integer_divide_table[ady][adx]; sy = base+1; } } else { base = dy / adx; if (dy < 0) sy = base - 1; else sy = base+1; } #else base = dy / adx; if (dy < 0) sy = base - 1; else sy = base+1; #endif ady -= abs(base) * adx; if (x1 > n) x1 = n; if (x < x1) { LINE_OP(output[x], inverse_db_table[y]); for (++x; x < x1; ++x) { err += ady; if (err >= adx) { err -= adx; y += sy; } else y += base; LINE_OP(output[x], inverse_db_table[y]); } } }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* dims = GetInput(context, node, kDimsTensor); const TfLiteTensor* value = GetInput(context, node, kValueTensor); // Make sure the 1st input tensor is 1-D. TF_LITE_ENSURE_EQ(context, NumDimensions(dims), 1); // Make sure the 1st input tensor is int32 or int64. const auto dtype = dims->type; TF_LITE_ENSURE(context, dtype == kTfLiteInt32 || dtype == kTfLiteInt64); // Make sure the 2nd input tensor is a scalar. TF_LITE_ENSURE_EQ(context, NumDimensions(value), 0); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = value->type; if (IsConstantTensor(dims)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, dims, output)); } else { SetTensorToDynamic(output); } return kTfLiteOk; }
Base
1
decode_rt_routing_info(netdissect_options *ndo, const u_char *pptr, char *buf, u_int buflen) { uint8_t route_target[8]; u_int plen; ND_TCHECK(pptr[0]); plen = pptr[0]; /* get prefix length */ if (0 == plen) { snprintf(buf, buflen, "default route target"); return 1; } if (32 > plen) return -1; plen-=32; /* adjust prefix length */ if (64 < plen) return -1; memset(&route_target, 0, sizeof(route_target)); ND_TCHECK2(pptr[1], (plen + 7) / 8); memcpy(&route_target, &pptr[1], (plen + 7) / 8); if (plen % 8) { ((u_char *)&route_target)[(plen + 7) / 8 - 1] &= ((0xff00 >> (plen % 8)) & 0xff); } snprintf(buf, buflen, "origin AS: %s, route target %s", as_printf(ndo, astostr, sizeof(astostr), EXTRACT_32BITS(pptr+1)), bgp_vpn_rd_print(ndo, (u_char *)&route_target)); return 5 + (plen + 7) / 8; trunc: return -2; }
Base
1
static NTLM_AV_PAIR* ntlm_av_pair_next(NTLM_AV_PAIR* pAvPair, size_t* pcbAvPair) { size_t offset; if (!pcbAvPair) return NULL; if (!ntlm_av_pair_check(pAvPair, *pcbAvPair)) return NULL; offset = ntlm_av_pair_get_next_offset(pAvPair); *pcbAvPair -= offset; return (NTLM_AV_PAIR*)((PBYTE)pAvPair + offset); }
Base
1
bool Decode(string_view encoded, std::string* raw) { for (auto iter = encoded.begin(); iter != encoded.end(); ++iter) { if (*iter == '%') { if (++iter == encoded.end()) { // Invalid URI string, two hexadecimal digits must follow '%'. return false; } int h_decimal = 0; if (!HexToDecimal(*iter, &h_decimal)) { return false; } if (++iter == encoded.end()) { // Invalid URI string, two hexadecimal digits must follow '%'. return false; } int l_decimal = 0; if (!HexToDecimal(*iter, &l_decimal)) { return false; } raw->push_back(static_cast<char>((h_decimal << 4) + l_decimal)); } else if (*iter > 127 || *iter < 0) { // Invalid encoded URI string, must be entirely ASCII. return false; } else { raw->push_back(*iter); } } return true; }
Base
1
void * alloc_bottom(size_t size, size_t align) {loop: align_bottom(align); byte * tmp = bottom; bottom += size; if (bottom > top) {new_chunk(); goto loop;} return tmp; }
Base
1
UnicodeString DecimalQuantity::toScientificString() const { U_ASSERT(!isApproximate); UnicodeString result; if (isNegative()) { result.append(u'-'); } if (precision == 0) { result.append(u"0E+0", -1); return result; } // NOTE: It is not safe to add to lOptPos (aka maxInt) or subtract from // rOptPos (aka -maxFrac) due to overflow. int32_t upperPos = std::min(precision + scale, lOptPos) - scale - 1; int32_t lowerPos = std::max(scale, rOptPos) - scale; int32_t p = upperPos; result.append(u'0' + getDigitPos(p)); if ((--p) >= lowerPos) { result.append(u'.'); for (; p >= lowerPos; p--) { result.append(u'0' + getDigitPos(p)); } } result.append(u'E'); int32_t _scale = upperPos + scale; if (_scale < 0) { _scale *= -1; result.append(u'-'); } else { result.append(u'+'); } if (_scale == 0) { result.append(u'0'); } int32_t insertIndex = result.length(); while (_scale > 0) { std::div_t res = std::div(_scale, 10); result.insert(insertIndex, u'0' + res.rem); _scale = res.quot; } return result; }
Base
1
static inline long decode_twos_comp(ulong c, int prec) { long result; assert(prec >= 2); jas_eprintf("warning: support for signed data is untested\n"); // NOTE: Is this correct? result = (c & ((1 << (prec - 1)) - 1)) - (c & (1 << (prec - 1))); return result; }
Class
2
TfLiteStatus PrepareHashtableFind(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputResourceIdTensor); TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32); TF_LITE_ENSURE_EQ(context, NumDimensions(input_resource_id_tensor), 1); TF_LITE_ENSURE_EQ(context, SizeOfDimension(input_resource_id_tensor, 0), 1); const TfLiteTensor* default_value_tensor = GetInput(context, node, kDefaultValueTensor); const TfLiteTensor* key_tensor = GetInput(context, node, kKeyTensor); TfLiteTensor* output_tensor = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, default_value_tensor->type, output_tensor->type); TF_LITE_ENSURE(context, (key_tensor->type == kTfLiteInt64 && output_tensor->type == kTfLiteString) || (key_tensor->type == kTfLiteString && output_tensor->type == kTfLiteInt64)); return context->ResizeTensor(context, output_tensor, TfLiteIntArrayCopy(key_tensor->dims)); }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); output->type = input->type; TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims); return context->ResizeTensor(context, output, output_size); }
Base
1
jas_matrix_t *jas_seq2d_create(int xstart, int ystart, int xend, int yend) { jas_matrix_t *matrix; assert(xstart <= xend && ystart <= yend); if (!(matrix = jas_matrix_create(yend - ystart, xend - xstart))) { return 0; } matrix->xstart_ = xstart; matrix->ystart_ = ystart; matrix->xend_ = xend; matrix->yend_ = yend; return matrix; }
Class
2
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, node->inputs->size, 1); TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputVariableId); TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32); TF_LITE_ENSURE_EQ(context, NumElements(input_resource_id_tensor), 1); TfLiteTensor* output = GetOutput(context, node, kOutputValue); SetTensorToDynamic(output); return kTfLiteOk; }
Base
1
int size() const { return m_str ? m_str->size() : 0; }
Base
1
void do_change_user(int afdt_fd) { std::string uname; lwp_read(afdt_fd, uname); if (!uname.length()) return; auto buf = PasswdBuffer{}; struct passwd *pw; if (getpwnam_r(uname.c_str(), &buf.ent, buf.data.get(), buf.size, &pw)) { // TODO(alexeyt) should we log something and/or fail to start? return; } if (!pw) { // TODO(alexeyt) should we log something and/or fail to start? return; } if (pw->pw_gid) { initgroups(pw->pw_name, pw->pw_gid); setgid(pw->pw_gid); } if (pw->pw_uid) { setuid(pw->pw_uid); } }
Base
1
parse_cosine_hex_dump(FILE_T fh, struct wtap_pkthdr *phdr, int pkt_len, Buffer* buf, int *err, gchar **err_info) { guint8 *pd; gchar line[COSINE_LINE_LENGTH]; int i, hex_lines, n, caplen = 0; /* Make sure we have enough room for the packet */ ws_buffer_assure_space(buf, COSINE_MAX_PACKET_LEN); pd = ws_buffer_start_ptr(buf); /* Calculate the number of hex dump lines, each * containing 16 bytes of data */ hex_lines = pkt_len / 16 + ((pkt_len % 16) ? 1 : 0); for (i = 0; i < hex_lines; i++) { if (file_gets(line, COSINE_LINE_LENGTH, fh) == NULL) { *err = file_error(fh, err_info); if (*err == 0) { *err = WTAP_ERR_SHORT_READ; } return FALSE; } if (empty_line(line)) { break; } if ((n = parse_single_hex_dump_line(line, pd, i*16)) == -1) { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup("cosine: hex dump line doesn't have 16 numbers"); return FALSE; } caplen += n; } phdr->caplen = caplen; return TRUE; }
Class
2
write(Protocol_* iprot, const StructInfo& structInfo, const void* object) { DCHECK(object); size_t written = iprot->writeStructBegin(structInfo.name); if (UNLIKELY(structInfo.unionExt != nullptr)) { const FieldInfo* end = structInfo.fieldInfos + structInfo.numFields; const auto& unionId = activeUnionMemberId(object, structInfo.unionExt->unionTypeOffset); const FieldInfo* found = std::lower_bound( structInfo.fieldInfos, end, unionId, [](const FieldInfo& lhs, FieldID rhs) { return lhs.id < rhs; }); if (found < end && found->id == unionId) { const OptionalThriftValue value = getValue(*found->typeInfo, object); if (value.hasValue()) { written += writeField(iprot, *found, value.value()); } else if (found->typeInfo->type == protocol::TType::T_STRUCT) { written += iprot->writeFieldBegin( found->name, found->typeInfo->type, found->id); written += iprot->writeStructBegin(found->name); written += iprot->writeStructEnd(); written += iprot->writeFieldStop(); written += iprot->writeFieldEnd(); } } } else { for (std::int16_t index = 0; index < structInfo.numFields; index++) { const auto& fieldInfo = structInfo.fieldInfos[index]; if (fieldInfo.isUnqualified || fieldInfo.issetOffset == 0 || fieldIsSet(object, fieldInfo.issetOffset)) { const OptionalThriftValue value = getValue(*fieldInfo.typeInfo, getMember(fieldInfo, object)); if (value.hasValue()) { written += writeField(iprot, fieldInfo, value.value()); } } } } written += iprot->writeFieldStop(); written += iprot->writeStructEnd(); return written; }
Base
1
ECDSA_PrivateKey::create_signature_op(RandomNumberGenerator& /*rng*/, const std::string& params, const std::string& provider) const { #if defined(BOTAN_HAS_BEARSSL) if(provider == "bearssl" || provider.empty()) { try { return make_bearssl_ecdsa_sig_op(*this, params); } catch(Lookup_Error& e) { if(provider == "bearssl") throw; } } #endif #if defined(BOTAN_HAS_OPENSSL) if(provider == "openssl" || provider.empty()) { try { return make_openssl_ecdsa_sig_op(*this, params); } catch(Lookup_Error& e) { if(provider == "openssl") throw; } } #endif if(provider == "base" || provider.empty()) return std::unique_ptr<PK_Ops::Signature>(new ECDSA_Signature_Operation(*this, params)); throw Provider_Not_Found(algo_name(), provider); }
Class
2
Status SetUnknownShape(const NodeDef* node, int output_port) { shape_inference::ShapeHandle shape = GetUnknownOutputShape(node, output_port); InferenceContext* ctx = GetContext(node); if (ctx == nullptr) { return errors::InvalidArgument("Missing context"); } ctx->set_output(output_port, shape); return Status::OK(); }
Base
1
void *jas_realloc(void *ptr, size_t size) { void *result; JAS_DBGLOG(101, ("jas_realloc called with %x,%zu\n", ptr, size)); result = realloc(ptr, size); JAS_DBGLOG(100, ("jas_realloc(%p, %zu) -> %p\n", ptr, size, result)); return result; }
Base
1
Integer InvertibleRWFunction::CalculateInverse(RandomNumberGenerator &rng, const Integer &x) const { DoQuickSanityCheck(); ModularArithmetic modn(m_n); Integer r, rInv; do { // do this in a loop for people using small numbers for testing r.Randomize(rng, Integer::One(), m_n - Integer::One()); rInv = modn.MultiplicativeInverse(r); } while (rInv.IsZero()); Integer re = modn.Square(r); re = modn.Multiply(re, x); // blind Integer cp=re%m_p, cq=re%m_q; if (Jacobi(cp, m_p) * Jacobi(cq, m_q) != 1) { cp = cp.IsOdd() ? (cp+m_p) >> 1 : cp >> 1; cq = cq.IsOdd() ? (cq+m_q) >> 1 : cq >> 1; } #pragma omp parallel #pragma omp sections { #pragma omp section cp = ModularSquareRoot(cp, m_p); #pragma omp section cq = ModularSquareRoot(cq, m_q); } Integer y = CRT(cq, m_q, cp, m_p, m_u); y = modn.Multiply(y, rInv); // unblind y = STDMIN(y, m_n-y); if (ApplyFunction(y) != x) // check throw Exception(Exception::OTHER_ERROR, "InvertibleRWFunction: computational error during private key operation"); return y; }
Class
2
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* data = GetInput(context, node, kInputDataTensor); const TfLiteTensor* segment_ids = GetInput(context, node, kInputSegmentIdsTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE(context, data->type == kTfLiteInt32 || data->type == kTfLiteFloat32); TF_LITE_ENSURE_EQ(context, segment_ids->type, kTfLiteInt32); if (!IsConstantTensor(data) || !IsConstantTensor(segment_ids)) { SetTensorToDynamic(output); return kTfLiteOk; } return ResizeOutputTensor(context, data, segment_ids, output); }
Base
1
bool VariableUnserializer::matchString(folly::StringPiece str) { const char* p = m_buf; assertx(p <= m_end); int total = 0; if (*p == 'S') { total = 2 + 8 + 1; if (p + total > m_end) return false; p++; if (*p++ != ':') return false; auto const sd = *reinterpret_cast<StringData*const*>(p); assertx(sd->isStatic()); if (str.compare(sd->slice()) != 0) return false; p += size_t(8); } else { const auto ss = str.size(); if (ss >= 100) return false; int digits = ss >= 10 ? 2 : 1; total = 2 + digits + 2 + ss + 2; if (p + total > m_end) return false; if (*p++ != 's') return false; if (*p++ != ':') return false; if (digits == 2) { if (*p++ != '0' + ss/10) return false; if (*p++ != '0' + ss%10) return false; } else { if (*p++ != '0' + ss) return false; } if (*p++ != ':') return false; if (*p++ != '\"') return false; if (memcmp(p, str.data(), ss)) return false; p += ss; if (*p++ != '\"') return false; } if (*p++ != ';') return false; assertx(m_buf + total == p); m_buf = p; return true; }
Class
2
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* start = GetInput(context, node, kStartTensor); const TfLiteTensor* limit = GetInput(context, node, kLimitTensor); const TfLiteTensor* delta = GetInput(context, node, kDeltaTensor); // Make sure all the inputs are scalars. TF_LITE_ENSURE_EQ(context, NumDimensions(start), 0); TF_LITE_ENSURE_EQ(context, NumDimensions(limit), 0); TF_LITE_ENSURE_EQ(context, NumDimensions(delta), 0); // Currently only supports int32 and float. // TODO(b/117912892): Support quantization as well. const auto dtype = start->type; if (dtype != kTfLiteFloat32 && dtype != kTfLiteInt32) { context->ReportError(context, "Unknown index output data type: %s", TfLiteTypeGetName(dtype)); return kTfLiteError; } TF_LITE_ENSURE_TYPES_EQ(context, limit->type, dtype); TF_LITE_ENSURE_TYPES_EQ(context, delta->type, dtype); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = dtype; if (IsConstantTensor(start) && IsConstantTensor(limit) && IsConstantTensor(delta)) { return ResizeOutput(context, start, limit, delta, output); } SetTensorToDynamic(output); return kTfLiteOk; }
Base
1
bool AdminRequestHandler::handleDumpStaticStringsRequest( const std::string& /*cmd*/, const std::string& filename) { auto const& list = lookupDefinedStaticStrings(); std::ofstream out(filename.c_str()); SCOPE_EXIT { out.close(); }; for (auto item : list) { out << formatStaticString(item); if (RuntimeOption::EvalPerfDataMap) { auto const len = std::min<size_t>(item->size(), 255); std::string str(item->data(), len); // Only print the first line (up to 255 characters). Since we want '\0' in // the list of characters to avoid, we need to use the version of // `find_first_of()' with explicit length. auto cutOffPos = str.find_first_of("\r\n", 0, 3); if (cutOffPos != std::string::npos) str.erase(cutOffPos); Debug::DebugInfo::recordDataMap(item->mutableData(), item->mutableData() + item->size(), "Str-" + str); } } return true; }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const TfLiteTensor* multipliers = GetInput(context, node, kInputMultipliers); if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } switch (output->type) { case kTfLiteFloat32: Tile<float>(*(input->dims), input, multipliers, output); break; case kTfLiteUInt8: Tile<uint8_t>(*(input->dims), input, multipliers, output); break; case kTfLiteInt32: Tile<int32_t>(*(input->dims), input, multipliers, output); break; case kTfLiteInt64: Tile<int64_t>(*(input->dims), input, multipliers, output); break; case kTfLiteString: { DynamicBuffer buffer; TileString(*(input->dims), input, multipliers, &buffer, output); buffer.WriteToTensor(output, /*new_shape=*/nullptr); break; } case kTfLiteBool: Tile<bool>(*(input->dims), input, multipliers, output); break; default: context->ReportError(context, "Type '%s' is not supported by tile.", TfLiteTypeGetName(output->type)); return kTfLiteError; } return kTfLiteOk; }
Base
1
void HeaderMapImpl::addCopy(const LowerCaseString& key, uint64_t value) { auto* entry = getExistingInline(key.get()); if (entry != nullptr) { char buf[32]; StringUtil::itoa(buf, sizeof(buf), value); appendToHeader(entry->value(), buf); return; } HeaderString new_key; new_key.setCopy(key.get().c_str(), key.get().size()); HeaderString new_value; new_value.setInteger(value); insertByKey(std::move(new_key), std::move(new_value)); ASSERT(new_key.empty()); // NOLINT(bugprone-use-after-move) ASSERT(new_value.empty()); // NOLINT(bugprone-use-after-move) }
Class
2
void RegKey::getBinary(const TCHAR* valname, void** data, int* length, void* def, int deflen) const { try { getBinary(valname, data, length); } catch(rdr::Exception&) { if (deflen) { *data = new char[deflen]; memcpy(*data, def, deflen); } else *data = 0; *length = deflen; } }
Base
1
TfLiteStatus EvalImpl(TfLiteContext* context, const TfLiteTensor* input, TfLiteNode* node) { // Map from value, to index in the unique elements vector. // Note that we prefer to use map than unordered_map as it showed less // increase in the binary size. std::map<T, int> unique_values; TfLiteTensor* output_indexes = GetOutput(context, node, 1); std::vector<T> output_values; I* indexes = GetTensorData<I>(output_indexes); const T* data = GetTensorData<T>(input); const int num_elements = NumElements(input); for (int i = 0; i < num_elements; ++i) { const auto element_it = unique_values.find(data[i]); if (element_it != unique_values.end()) { indexes[i] = element_it->second; } else { const int unique_index = unique_values.size(); unique_values[data[i]] = unique_index; indexes[i] = unique_index; output_values.push_back(data[i]); } } // Allocate output tensor. TfLiteTensor* unique_output = GetOutput(context, node, 0); std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape( TfLiteIntArrayCreate(NumDimensions(input)), TfLiteIntArrayFree); shape->data[0] = unique_values.size(); TF_LITE_ENSURE_STATUS( context->ResizeTensor(context, unique_output, shape.release())); // Set the values in the output tensor. T* output_unique_values = GetTensorData<T>(unique_output); for (int i = 0; i < output_values.size(); ++i) { output_unique_values[i] = output_values[i]; } return kTfLiteOk; }
Base
1
FdOutStream::FdOutStream(int fd_, bool blocking_, int timeoutms_, int bufSize_) : fd(fd_), blocking(blocking_), timeoutms(timeoutms_), bufSize(bufSize_ ? bufSize_ : DEFAULT_BUF_SIZE), offset(0) { ptr = start = sentUpTo = new U8[bufSize]; end = start + bufSize; gettimeofday(&lastWrite, NULL); }
Base
1
void XfccIntegrationTest::initialize() { config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) -> void { hcm.set_forward_client_cert_details(fcc_); hcm.mutable_set_current_client_cert_details()->CopyFrom(sccd_); }); config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { auto transport_socket = bootstrap.mutable_static_resources()->mutable_clusters(0)->mutable_transport_socket(); envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext context; auto* validation_context = context.mutable_common_tls_context()->mutable_validation_context(); validation_context->mutable_trusted_ca()->set_filename( TestEnvironment::runfilesPath("test/config/integration/certs/upstreamcacert.pem")); validation_context->add_match_subject_alt_names()->set_suffix("lyft.com"); transport_socket->set_name("envoy.transport_sockets.tls"); transport_socket->mutable_typed_config()->PackFrom(context); }); if (tls_) { config_helper_.addSslConfig(); } context_manager_ = std::make_unique<Extensions::TransportSockets::Tls::ContextManagerImpl>(timeSystem()); client_tls_ssl_ctx_ = createClientSslContext(false); client_mtls_ssl_ctx_ = createClientSslContext(true); HttpIntegrationTest::initialize(); }
Base
1
void ntlm_print_av_pair_list(NTLM_AV_PAIR* pAvPairList, size_t cbAvPairList) { size_t cbAvPair = cbAvPairList; NTLM_AV_PAIR* pAvPair = pAvPairList; if (!ntlm_av_pair_check(pAvPair, cbAvPair)) return; WLog_INFO(TAG, "AV_PAIRs ="); while (pAvPair && ntlm_av_pair_get_id(pAvPair) != MsvAvEOL) { WLog_INFO(TAG, "\t%s AvId: %" PRIu16 " AvLen: %" PRIu16 "", AV_PAIR_STRINGS[ntlm_av_pair_get_id(pAvPair)], ntlm_av_pair_get_id(pAvPair), ntlm_av_pair_get_len(pAvPair)); winpr_HexDump(TAG, WLOG_INFO, ntlm_av_pair_get_value_pointer(pAvPair), ntlm_av_pair_get_len(pAvPair)); pAvPair = ntlm_av_pair_next(pAvPair, &cbAvPair); } }
Base
1
int jsi_PstateSetFile(jsi_Pstate *ps, Jsi_Channel fp, int skipbang) { jsi_Lexer *l = ps->lexer; jsi_PstateClear(ps); l->ltype = LT_FILE; l->d.fp = fp; Jsi_Rewind(ps->interp, fp); if (skipbang) { char buf[1000]; if (Jsi_Gets(ps->interp, fp, buf, 1000) && (buf[0] != '#' || buf[1] != '!')) { Jsi_Rewind(ps->interp, fp); } } return JSI_OK; }
Base
1
set<int> PipeSocketHandler::listen(const SocketEndpoint& endpoint) { lock_guard<std::recursive_mutex> guard(globalMutex); string pipePath = endpoint.name(); if (pipeServerSockets.find(pipePath) != pipeServerSockets.end()) { throw runtime_error("Tried to listen twice on the same path"); } sockaddr_un local; int fd = socket(AF_UNIX, SOCK_STREAM, 0); FATAL_FAIL(fd); initServerSocket(fd); local.sun_family = AF_UNIX; /* local is declared before socket() ^ */ strcpy(local.sun_path, pipePath.c_str()); unlink(local.sun_path); FATAL_FAIL(::bind(fd, (struct sockaddr*)&local, sizeof(sockaddr_un))); ::listen(fd, 5); #ifndef WIN32 FATAL_FAIL(::chmod(local.sun_path, S_IRUSR | S_IWUSR | S_IXUSR)); #endif pipeServerSockets[pipePath] = set<int>({fd}); return pipeServerSockets[pipePath]; }
Class
2
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2); TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size, params->stride)); const int64_t sample_count = input->dims->data[0]; const int64_t length_minus_window = (sample_count - params->window_size); if (length_minus_window < 0) { params->output_height = 0; } else { params->output_height = 1 + (length_minus_window / params->stride); } TfLiteIntArray* output_size = TfLiteIntArrayCreate(3); output_size->data[0] = input->dims->data[1]; output_size->data[1] = params->output_height; output_size->data[2] = params->spectrogram->output_frequency_channels(); return context->ResizeTensor(context, output, output_size); }
Base
1
RemoteFsDevice::RemoteFsDevice(MusicLibraryModel *m, const Details &d) : FsDevice(m, d.name, createUdi(d.name)) , mountToken(0) , currentMountStatus(false) , details(d) , proc(0) , mounterIface(0) , messageSent(false) { // details.path=Utils::fixPath(details.path); setup(); icn=MonoIcon::icon(details.isLocalFile() ? FontAwesome::foldero : constSshfsProtocol==details.url.scheme() ? FontAwesome::linux_os : FontAwesome::windows, Utils::monoIconColor()); }
Class
2
void HeaderMapImpl::addCopy(const LowerCaseString& key, const std::string& value) { auto* entry = getExistingInline(key.get()); if (entry != nullptr) { appendToHeader(entry->value(), value); return; } HeaderString new_key; new_key.setCopy(key.get().c_str(), key.get().size()); HeaderString new_value; new_value.setCopy(value.c_str(), value.size()); insertByKey(std::move(new_key), std::move(new_value)); ASSERT(new_key.empty()); // NOLINT(bugprone-use-after-move) ASSERT(new_value.empty()); // NOLINT(bugprone-use-after-move) }
Class
2
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteIntArray* input_dims = input->dims; int input_dims_size = input_dims->size; TF_LITE_ENSURE(context, input_dims_size >= 1); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Resize the output tensor. TfLiteIntArray* output_shape = TfLiteIntArrayCreate(input_dims_size + 1); for (int i = 0; i < input_dims_size; i++) { output_shape->data[i] = input_dims->data[i]; } // Last dimension in the output is the same as the last dimension in the // input. output_shape->data[input_dims_size] = input_dims->data[input_dims_size - 1]; output->type = input->type; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_shape)); return kTfLiteOk; }
Base
1
TfLiteStatus NonMaxSuppressionMultiClass(TfLiteContext* context, TfLiteNode* node, OpData* op_data) { // Get the input tensors const TfLiteTensor* input_box_encodings = GetInput(context, node, kInputTensorBoxEncodings); const TfLiteTensor* input_class_predictions = GetInput(context, node, kInputTensorClassPredictions); const int num_boxes = input_box_encodings->dims->data[1]; const int num_classes = op_data->num_classes; TF_LITE_ENSURE_EQ(context, input_class_predictions->dims->data[0], kBatchSize); TF_LITE_ENSURE_EQ(context, input_class_predictions->dims->data[1], num_boxes); const int num_classes_with_background = input_class_predictions->dims->data[2]; TF_LITE_ENSURE(context, (num_classes_with_background - num_classes <= 1)); TF_LITE_ENSURE(context, (num_classes_with_background >= num_classes)); const TfLiteTensor* scores; switch (input_class_predictions->type) { case kTfLiteUInt8: { TfLiteTensor* temporary_scores = &context->tensors[op_data->scores_index]; DequantizeClassPredictions(input_class_predictions, num_boxes, num_classes_with_background, temporary_scores); scores = temporary_scores; } break; case kTfLiteFloat32: scores = input_class_predictions; break; default: // Unsupported type. return kTfLiteError; } if (op_data->use_regular_non_max_suppression) TF_LITE_ENSURE_STATUS(NonMaxSuppressionMultiClassRegularHelper( context, node, op_data, GetTensorData<float>(scores))); else TF_LITE_ENSURE_STATUS(NonMaxSuppressionMultiClassFastHelper( context, node, op_data, GetTensorData<float>(scores))); return kTfLiteOk; }
Base
1
TEST_F(SingleAllowMissingInOrListTest, MissingIssToken) { EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, ES256WithoutIssToken}}; context_ = Verifier::createContext(headers, parent_span_, &mock_cb_); verifier_->verify(context_); EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kExampleHeader)); }
Class
2
bool ConstantFolding::IsSimplifiableReshape( const NodeDef& node, const GraphProperties& properties) const { if (!IsReshape(node)) { return false; } CHECK_LE(2, node.input_size()); const NodeDef* new_shape = node_map_->GetNode(node.input(1)); if (!IsReallyConstant(*new_shape)) { return false; } TensorVector outputs; auto outputs_cleanup = gtl::MakeCleanup([&outputs] { for (const auto& output : outputs) { delete output.tensor; } }); Status s = EvaluateNode(*new_shape, TensorVector(), &outputs); if (!s.ok()) { return false; } CHECK_EQ(1, outputs.size()); const std::vector<OpInfo::TensorProperties>& props = properties.GetInputProperties(node.name()); if (props.empty()) { return false; } const OpInfo::TensorProperties& prop = props[0]; if (prop.dtype() == DT_INVALID) { return false; } const PartialTensorShape shape(prop.shape()); if (!shape.IsFullyDefined()) { return false; } PartialTensorShape new_dims; if (outputs[0]->dtype() == DT_INT32) { std::vector<int32> shp; for (int i = 0; i < outputs[0]->NumElements(); ++i) { int32_t dim = outputs[0]->flat<int32>()(i); shp.push_back(dim); } TF_CHECK_OK(TensorShapeUtils::MakeShape(shp, &new_dims)); } else { std::vector<int64_t> shp; for (int i = 0; i < outputs[0]->NumElements(); ++i) { int64_t dim = outputs[0]->flat<int64_t>()(i); shp.push_back(dim); } TF_CHECK_OK(TensorShapeUtils::MakeShape(shp, &new_dims)); } return shape.IsCompatibleWith(new_dims); }
Base
1
void initialize(const string &path, bool owner) { TRACE_POINT(); this->path = path; this->owner = owner; /* Create the server instance directory. We only need to write to this * directory for these reasons: * 1. Initial population of structure files (structure_version.txt, instance.pid). * 2. Creating/removing a generation directory. * 3. Removing the entire server instance directory (after all * generations are removed). * * 1 and 2 are done by the helper server during initialization and before lowering * privilege. 3 is done during helper server shutdown by a cleanup process that's * running as the same user the helper server was running as before privilege * lowering. * Therefore, we make the directory only writable by the user the helper server * was running as before privilege is lowered. Everybody else has read and execute * rights though, because we want admin tools to be able to list the available * generations no matter what user they're running as. */ makeDirTree(path, "u=rwx,g=rx,o=rx"); }
Base
1
jas_image_t *jas_image_create0() { jas_image_t *image; if (!(image = jas_malloc(sizeof(jas_image_t)))) { return 0; } image->tlx_ = 0; image->tly_ = 0; image->brx_ = 0; image->bry_ = 0; image->clrspc_ = JAS_CLRSPC_UNKNOWN; image->numcmpts_ = 0; image->maxcmpts_ = 0; image->cmpts_ = 0; image->inmem_ = true; image->cmprof_ = 0; return image; }
Class
2
TfLiteStatus L2Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: L2EvalFloat<kernel_type>(context, node, params, data, input, output); break; case kTfLiteUInt8: // We don't have a quantized implementation, so just fall through to the // 'default' case. default: context->ReportError(context, "Type %d not currently supported.", input->type); return kTfLiteError; } return kTfLiteOk; }
Base
1
TEST(BasicInterpreter, AllocateTwice) { Interpreter interpreter; ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk); ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk); ASSERT_EQ(interpreter.SetOutputs({1}), kTfLiteOk); TfLiteQuantizationParams quantized; ASSERT_EQ(interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "", {3}, quantized), kTfLiteOk); ASSERT_EQ(interpreter.SetTensorParametersReadWrite(1, kTfLiteFloat32, "", {3}, quantized), kTfLiteOk); TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr}; reg.prepare = [](TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* tensor0 = GetInput(context, node, 0); TfLiteTensor* tensor1 = GetOutput(context, node, 0); TfLiteIntArray* newSize = TfLiteIntArrayCopy(tensor0->dims); return context->ResizeTensor(context, tensor1, newSize); }; reg.invoke = [](TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* a0 = GetInput(context, node, 0); TfLiteTensor* a1 = GetOutput(context, node, 0); int num = a0->dims->data[0]; for (int i = 0; i < num; i++) { a1->data.f[i] = a0->data.f[i]; } return kTfLiteOk; }; ASSERT_EQ( interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr, &reg), kTfLiteOk); ASSERT_EQ(interpreter.ResizeInputTensor(0, {3}), kTfLiteOk); ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk); ASSERT_EQ(interpreter.Invoke(), kTfLiteOk); char* old_tensor0_ptr = interpreter.tensor(0)->data.raw; char* old_tensor1_ptr = interpreter.tensor(1)->data.raw; ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk); ASSERT_EQ(interpreter.Invoke(), kTfLiteOk); ASSERT_EQ(old_tensor0_ptr, interpreter.tensor(0)->data.raw); ASSERT_EQ(old_tensor1_ptr, interpreter.tensor(1)->data.raw); }
Base
1
void Compute(OpKernelContext* ctx) override { auto x = ctx->input(0); auto i = ctx->input(1); auto v = ctx->input(2); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(i.shape()), errors::InvalidArgument("i must be a vector. ", i.shape().DebugString())); OP_REQUIRES(ctx, x.dims() == v.dims(), errors::InvalidArgument( "x and v shape doesn't match (ranks differ): ", x.shape().DebugString(), " vs. ", v.shape().DebugString())); for (int i = 1; i < x.dims(); ++i) { OP_REQUIRES( ctx, x.dim_size(i) == v.dim_size(i), errors::InvalidArgument("x and v shape doesn't match at index ", i, " : ", x.shape().DebugString(), " vs. ", v.shape().DebugString())); } OP_REQUIRES(ctx, i.dim_size(0) == v.dim_size(0), errors::InvalidArgument( "i and x shape doesn't match at index 0: ", i.shape().DebugString(), " vs. ", v.shape().DebugString())); Tensor y = x; // This creates an alias intentionally. // Skip processing if tensors are empty. if (x.NumElements() > 0 || v.NumElements() > 0) { OP_REQUIRES_OK(ctx, DoCompute(ctx, i, v, &y)); } ctx->set_output(0, y); }
Base
1
std::string RestAuthHandler::generateJwt(std::string const& username, std::string const& password) { AuthenticationFeature* af = AuthenticationFeature::instance(); TRI_ASSERT(af != nullptr); return fuerte::jwt::generateUserToken(af->tokenCache().jwtSecret(), username, _validFor); }
Base
1
int overrun(int itemSize, int nItems, bool wait) { throw EndOfStream(); }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* start = GetInput(context, node, kStartTensor); const TfLiteTensor* limit = GetInput(context, node, kLimitTensor); const TfLiteTensor* delta = GetInput(context, node, kDeltaTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, start, limit, delta, output)); } switch (output->type) { case kTfLiteInt32: { EvalImpl<int32_t>(start, delta, output); break; } case kTfLiteFloat32: { EvalImpl<float>(start, delta, output); break; } default: { context->ReportError(context, "Unsupported data type: %d", output->type); return kTfLiteError; } } return kTfLiteOk; }
Base
1
void RemoteDevicePropertiesWidget::setType() { if (Type_SshFs==type->itemData(type->currentIndex()).toInt() && 0==sshPort->value()) { sshPort->setValue(22); } if (Type_Samba==type->itemData(type->currentIndex()).toInt() && 0==smbPort->value()) { smbPort->setValue(445); } }
Class
2
TfLiteStatus Resize(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteLSHProjectionParams*>(node->builtin_data); TF_LITE_ENSURE(context, NumInputs(node) == 2 || NumInputs(node) == 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* hash = GetInput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(hash), 2); // Support up to 32 bits. TF_LITE_ENSURE(context, SizeOfDimension(hash, 1) <= 32); const TfLiteTensor* input = GetInput(context, node, 1); TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (NumInputs(node) == 3) { const TfLiteTensor* weight = GetInput(context, node, 2); TF_LITE_ENSURE_EQ(context, NumDimensions(weight), 1); TF_LITE_ENSURE_EQ(context, SizeOfDimension(weight, 0), SizeOfDimension(input, 0)); } TfLiteTensor* output = GetOutput(context, node, 0); TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1); switch (params->type) { case kTfLiteLshProjectionSparse: outputSize->data[0] = SizeOfDimension(hash, 0); break; case kTfLiteLshProjectionDense: outputSize->data[0] = SizeOfDimension(hash, 0) * SizeOfDimension(hash, 1); break; default: return kTfLiteError; } return context->ResizeTensor(context, output, outputSize); }
Base
1
CString CZNC::FixupEncoding(const CString& sEncoding) const { if (sEncoding.empty() && m_uiForceEncoding) { return "UTF-8"; } return sEncoding; }
Class
2
int length() const { return m_str ? m_str->size() : 0; }
Base
1
TypedValue HHVM_FUNCTION(substr_compare, const String& main_str, const String& str, int offset, int length /* = INT_MAX */, bool case_insensitivity /* = false */) { int s1_len = main_str.size(); int s2_len = str.size(); if (length <= 0) { raise_warning("The length must be greater than zero"); return make_tv<KindOfBoolean>(false); } if (offset < 0) { offset = s1_len + offset; if (offset < 0) offset = 0; } if (offset >= s1_len) { raise_warning("The start position cannot exceed initial string length"); return make_tv<KindOfBoolean>(false); } int cmp_len = s1_len - offset; if (cmp_len < s2_len) cmp_len = s2_len; if (cmp_len > length) cmp_len = length; const char *s1 = main_str.data(); if (case_insensitivity) { return tvReturn(bstrcasecmp(s1 + offset, cmp_len, str.data(), cmp_len)); } return tvReturn(string_ncmp(s1 + offset, str.data(), cmp_len)); }
Base
1
TfLiteRegistration CancelOpRegistration() { TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr}; // Set output size to the input size in CancelOp::Prepare(). Code exists to // have a framework in Prepare. The input and output tensors are not used. reg.prepare = [](TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* in_tensor = GetInput(context, node, 0); TfLiteTensor* out_tensor = GetOutput(context, node, 0); TfLiteIntArray* new_size = TfLiteIntArrayCopy(in_tensor->dims); return context->ResizeTensor(context, out_tensor, new_size); }; reg.invoke = [](TfLiteContext* context, TfLiteNode* node) { cancellation_data_.is_cancelled = true; return kTfLiteOk; }; return reg; }
Base
1
static TfLiteRegistration DelegateRegistration() { TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr}; reg.prepare = [](TfLiteContext* context, TfLiteNode* node) { // If tensors are resized, the runtime should propagate shapes // automatically if correct flag is set. Ensure values are correct. // Output 0 should be dynamic. TfLiteTensor* output0 = GetOutput(context, node, 0); TF_LITE_ENSURE(context, IsDynamicTensor(output0)); // Output 1 has the same shape as input. const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output1 = GetOutput(context, node, 1); TF_LITE_ENSURE(context, input->dims->size == output1->dims->size); TF_LITE_ENSURE(context, input->dims->data[0] == output1->dims->data[0]); return kTfLiteOk; }; return reg; }
Base
1
ObfuscatedPasswd::ObfuscatedPasswd(int len) : CharArray(len), length(len) { }
Base
1
int Archive::Read(void *Data,size_t Size) { size_t Result; if (QOpen.Read(Data,Size,Result)) return (int)Result; return File::Read(Data,Size); }
Base
1
bool logToUSDT(const Array& bt) { std::lock_guard<std::mutex> lock(usdt_mutex); memset(&bt_slab, 0, sizeof(bt_slab)); int i = 0; IterateVNoInc( bt.get(), [&](TypedValue tv) -> bool { if (i >= strobelight::kMaxStackframes) { return true; } assertx(isArrayLikeType(type(tv))); ArrayData* bt_frame = val(tv).parr; strobelight::backtrace_frame_t* frame = &bt_slab.frames[i]; auto const line = bt_frame->get(s_line.get()); if (line.is_init()) { assertx(isIntType(type(line))); frame->line = val(line).num; } auto const file_name = bt_frame->get(s_file.get()); if (file_name.is_init()) { assertx(isStringType(type(file_name))); strncpy(frame->file_name, val(file_name).pstr->data(), std::min(val(file_name).pstr->size(), strobelight::kFileNameMax)); frame->file_name[strobelight::kFileNameMax - 1] = '\0'; } auto const class_name = bt_frame->get(s_class.get()); if (class_name.is_init()) { assertx(isStringType(type(class_name))); strncpy(frame->class_name, val(class_name).pstr->data(), std::min(val(class_name).pstr->size(), strobelight::kClassNameMax)); frame->class_name[strobelight::kClassNameMax - 1] = '\0'; } auto const function_name = bt_frame->get(s_function.get()); if (function_name.is_init()) { assertx(isStringType(type(function_name))); strncpy(frame->function, val(function_name).pstr->data(), std::min(val(function_name).pstr->size(), strobelight::kFunctionMax)); frame->function[strobelight::kFunctionMax - 1] = '\0'; } i++; return false; } ); bt_slab.len = i; // Allow BPF to read the now-formatted stacktrace FOLLY_SDT_WITH_SEMAPHORE(hhvm, hhvm_stack, &bt_slab); return true; }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const TfLiteTensor* input = GetInput(context, node, kInputTensor); FillDiagHelper(input, output); return kTfLiteOk; }
Base
1
TfLiteStatus ResizeOutputTensor(TfLiteContext* context, const TfLiteTensor* data, const TfLiteTensor* segment_ids, TfLiteTensor* output) { int max_index = -1; const int segment_id_size = segment_ids->dims->data[0]; if (segment_id_size > 0) { max_index = segment_ids->data.i32[segment_id_size - 1]; } const int data_rank = NumDimensions(data); TfLiteIntArray* output_shape = TfLiteIntArrayCreate(NumDimensions(data)); output_shape->data[0] = max_index + 1; for (int i = 1; i < data_rank; ++i) { output_shape->data[i] = data->dims->data[i]; } return context->ResizeTensor(context, output, output_shape); }
Base
1
void pcre_dump_cache(const std::string& filename) { s_pcreCache.dump(filename); }
Base
1
void pointZZ_pAdd(PointZZ_p * rop, const PointZZ_p * op1, const PointZZ_p * op2, const CurveZZ_p * curve) { mpz_t xdiff, ydiff, lambda; mpz_inits(xdiff, ydiff, lambda, NULL); // calculate lambda mpz_sub(ydiff, op2->y, op1->y); mpz_sub(xdiff, op2->x, op1->x); mpz_invert(xdiff, xdiff, curve->p); // TODO check status mpz_mul(lambda, ydiff, xdiff); mpz_mod(lambda, lambda, curve->p); // calculate resulting x coord mpz_mul(rop->x, lambda, lambda); mpz_sub(rop->x, rop->x, op1->x); mpz_sub(rop->x, rop->x, op2->x); mpz_mod(rop->x, rop->x, curve->p); //calculate resulting y coord mpz_sub(rop->y, op1->x, rop->x); mpz_mul(rop->y, lambda, rop->y); mpz_sub(rop->y, rop->y, op1->y); mpz_mod(rop->y, rop->y, curve->p); mpz_clears(xdiff, ydiff, lambda, NULL); }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); output->type = input->type; TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims); return context->ResizeTensor(context, output, output_size); }
Base
1
static int em_fxsave(struct x86_emulate_ctxt *ctxt) { struct fxregs_state fx_state; size_t size; int rc; rc = check_fxsr(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ctxt->ops->get_fpu(ctxt); rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state)); ctxt->ops->put_fpu(ctxt); if (rc != X86EMUL_CONTINUE) return rc; if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR) size = offsetof(struct fxregs_state, xmm_space[8 * 16/4]); else size = offsetof(struct fxregs_state, xmm_space[0]); return segmented_write(ctxt, ctxt->memop.addr.mem, &fx_state, size); }
Class
2
set_ssl_ciphers(SCHANNEL_CRED *schannel_cred, char *ciphers) { char *startCur = ciphers; int algCount = 0; static ALG_ID algIds[45]; /*There are 45 listed in the MS headers*/ while(startCur && (0 != *startCur) && (algCount < 45)) { long alg = strtol(startCur, 0, 0); if(!alg) alg = get_alg_id_by_name(startCur); if(alg) algIds[algCount++] = alg; else if(!strncmp(startCur, "USE_STRONG_CRYPTO", sizeof("USE_STRONG_CRYPTO") - 1) || !strncmp(startCur, "SCH_USE_STRONG_CRYPTO", sizeof("SCH_USE_STRONG_CRYPTO") - 1)) schannel_cred->dwFlags |= SCH_USE_STRONG_CRYPTO; else return CURLE_SSL_CIPHER; startCur = strchr(startCur, ':'); if(startCur) startCur++; } schannel_cred->palgSupportedAlgs = algIds; schannel_cred->cSupportedAlgs = algCount; return CURLE_OK; }
Class
2
ResponsePtr Server::ServeStatic(RequestPtr request) { assert(request->method() == methods::kGet); if (doc_root_.empty()) { LOG_INFO("The doc root was not specified"); return {}; } fs::path path = doc_root_ / request->url().path(); try { // NOTE: FileBody might throw Error::kFileError. auto body = std::make_shared<FileBody>(path, file_chunk_size_); auto response = std::make_shared<Response>(Status::kOK); std::string extension = path.extension().string(); response->SetContentType(media_types::FromExtension(extension), ""); // NOTE: Gzip compression is not supported. response->SetBody(body, true); return response; } catch (const Error& error) { LOG_ERRO("File error: %s", error.message().c_str()); return {}; } }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); switch (input1->type) { case kTfLiteInt32: { return EvalImpl<int32_t>(context, data->requires_broadcast, input1, input2, output); } case kTfLiteFloat32: { return EvalImpl<float>(context, data->requires_broadcast, input1, input2, output); } default: { context->ReportError(context, "Type '%s' is not supported by floor_div.", TfLiteTypeGetName(input1->type)); return kTfLiteError; } } }
Base
1
static int em_ret_far(struct x86_emulate_ctxt *ctxt) { int rc; unsigned long eip, cs; u16 old_cs; int cpl = ctxt->ops->cpl(ctxt); struct desc_struct old_desc, new_desc; const struct x86_emulate_ops *ops = ctxt->ops; if (ctxt->mode == X86EMUL_MODE_PROT64) ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS); rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; /* Outer-privilege level return is not implemented */ if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) return X86EMUL_UNHANDLEABLE; rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, X86_TRANSFER_RET, &new_desc); if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_far(ctxt, eip, &new_desc); if (rc != X86EMUL_CONTINUE) { WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); } return rc; }
Class
2
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteUnpackParams* data = reinterpret_cast<TfLiteUnpackParams*>(node->builtin_data); const TfLiteTensor* input = GetInput(context, node, kInputTensor); switch (input->type) { case kTfLiteFloat32: { UnpackImpl<float>(context, node, input, data->num, data->axis); break; } case kTfLiteInt32: { UnpackImpl<int32_t>(context, node, input, data->num, data->axis); break; } case kTfLiteUInt8: { UnpackImpl<uint8_t>(context, node, input, data->num, data->axis); break; } case kTfLiteInt8: { UnpackImpl<int8_t>(context, node, input, data->num, data->axis); break; } case kTfLiteBool: { UnpackImpl<bool>(context, node, input, data->num, data->axis); break; } case kTfLiteInt16: { UnpackImpl<int16_t>(context, node, input, data->num, data->axis); break; } default: { context->ReportError(context, "Type '%s' is not supported by unpack.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } return kTfLiteOk; }
Base
1
void nodeRename(Proxy &node, const RegexMatchConfigs &rename_array, extra_settings &ext) { std::string &remark = node.Remark, original_remark = node.Remark, returned_remark, real_rule; for(const RegexMatchConfig &x : rename_array) { if(!x.Script.empty()) { script_safe_runner(ext.js_runtime, ext.js_context, [&](qjs::Context &ctx) { std::string script = x.Script; if(startsWith(script, "path:")) script = fileGet(script.substr(5), true); try { ctx.eval(script); auto rename = (std::function<std::string(const Proxy&)>) ctx.eval("rename"); returned_remark = rename(node); if(!returned_remark.empty()) remark = returned_remark; } catch (qjs::exception) { script_print_stack(ctx); } }, global.scriptCleanContext); continue; } if(applyMatcher(x.Match, real_rule, node) && real_rule.size()) remark = regReplace(remark, real_rule, x.Replace); } if(remark.empty()) remark = original_remark; return; }
Base
1
TfLiteStatus PrepareSimple(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); OpContext op_context(context, node); TF_LITE_ENSURE_TYPES_EQ(context, op_context.axis->type, kTfLiteInt32); TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context)); TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1); // Leaves work to Eval if axis is not constant; else resizes output. if (!IsConstantTensor(op_context.axis)) { SetTensorToDynamic(op_context.output); SetTensorToDynamic(resolved_axis); return kTfLiteOk; } resolved_axis->allocation_type = kTfLiteArenaRw; TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis)); TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context)); return kTfLiteOk; }
Base
1
Variant HHVM_FUNCTION(mcrypt_generic_init, const Resource& td, const String& key, const String& iv) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } int max_key_size = mcrypt_enc_get_key_size(pm->m_td); int iv_size = mcrypt_enc_get_iv_size(pm->m_td); if (key.empty()) { raise_warning("Key size is 0"); } unsigned char *key_s = (unsigned char *)malloc(key.size()); memset(key_s, 0, key.size()); unsigned char *iv_s = (unsigned char *)malloc(iv_size + 1); memset(iv_s, 0, iv_size + 1); int key_size; if (key.size() > max_key_size) { raise_warning("Key size too large; supplied length: %d, max: %d", key.size(), max_key_size); key_size = max_key_size; } else { key_size = key.size(); } memcpy(key_s, key.data(), key.size()); if (iv.size() != iv_size) { raise_warning("Iv size incorrect; supplied length: %d, needed: %d", iv.size(), iv_size); } memcpy(iv_s, iv.data(), std::min(iv_size, iv.size())); mcrypt_generic_deinit(pm->m_td); int result = mcrypt_generic_init(pm->m_td, key_s, key_size, iv_s); /* If this function fails, close the mcrypt module to prevent crashes * when further functions want to access this resource */ if (result < 0) { pm->close(); switch (result) { case -3: raise_warning("Key length incorrect"); break; case -4: raise_warning("Memory allocation error"); break; case -1: default: raise_warning("Unknown error"); break; } } else { pm->m_init = true; } free(iv_s); free(key_s); return result; }
Base
1
R_API RBinJavaAnnotation *r_bin_java_annotation_new(ut8 *buffer, ut64 sz, ut64 buf_offset) { ut32 i = 0; RBinJavaAnnotation *annotation = NULL; RBinJavaElementValuePair *evps = NULL; ut64 offset = 0; annotation = R_NEW0 (RBinJavaAnnotation); if (!annotation) { return NULL; } // (ut16) read and set annotation_value.type_idx; annotation->type_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; // (ut16) read and set annotation_value.num_element_value_pairs; annotation->num_element_value_pairs = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; annotation->element_value_pairs = r_list_newf (r_bin_java_element_pair_free); // read annotation_value.num_element_value_pairs, and append to annotation_value.element_value_pairs for (i = 0; i < annotation->num_element_value_pairs; i++) { if (offset > sz) { break; } evps = r_bin_java_element_pair_new (buffer + offset, sz - offset, buf_offset + offset); if (evps) { offset += evps->size; r_list_append (annotation->element_value_pairs, (void *) evps); } } annotation->size = offset; return annotation; }
Base
1
mptctl_fw_download(unsigned long arg) { struct mpt_fw_xfer __user *ufwdl = (void __user *) arg; struct mpt_fw_xfer kfwdl; if (copy_from_user(&kfwdl, ufwdl, sizeof(struct mpt_fw_xfer))) { printk(KERN_ERR MYNAM "%s@%d::_ioctl_fwdl - " "Unable to copy mpt_fw_xfer struct @ %p\n", __FILE__, __LINE__, ufwdl); return -EFAULT; } return mptctl_do_fw_download(kfwdl.iocnum, kfwdl.bufp, kfwdl.fwlen); }
Class
2
AP4_AvccAtom::InspectFields(AP4_AtomInspector& inspector) { inspector.AddField("Configuration Version", m_ConfigurationVersion); const char* profile_name = GetProfileName(m_Profile); if (profile_name) { inspector.AddField("Profile", profile_name); } else { inspector.AddField("Profile", m_Profile); } inspector.AddField("Profile Compatibility", m_ProfileCompatibility, AP4_AtomInspector::HINT_HEX); inspector.AddField("Level", m_Level); inspector.AddField("NALU Length Size", m_NaluLengthSize); for (unsigned int i=0; i<m_SequenceParameters.ItemCount(); i++) { inspector.AddField("Sequence Parameter", m_SequenceParameters[i].GetData(), m_SequenceParameters[i].GetDataSize()); } for (unsigned int i=0; i<m_SequenceParameters.ItemCount(); i++) { inspector.AddField("Picture Parameter", m_PictureParameters[i].GetData(), m_PictureParameters[i].GetDataSize()); } return AP4_SUCCESS; }
Base
1
TEST_F(QuantizedConv2DTest, Small32Bit) { const int stride = 1; TF_ASSERT_OK(NodeDefBuilder("quantized_conv_op", "QuantizedConv2D") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("out_type", DataTypeToEnum<qint32>::v()) .Attr("strides", {1, stride, stride, 1}) .Attr("padding", "SAME") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const int depth = 1; const int image_width = 4; const int image_height = 3; const int image_batch_count = 1; AddInputFromArray<quint8>( TensorShape({image_batch_count, image_height, image_width, depth}), {10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120}); const int filter_size = 3; const int filter_count = 1; AddInputFromArray<quint8>( TensorShape({filter_size, filter_size, depth, filter_count}), {10, 40, 70, 20, 50, 80, 30, 60, 90}); AddInputFromArray<float>(TensorShape({1}), {0}); AddInputFromArray<float>(TensorShape({1}), {255.0f}); AddInputFromArray<float>(TensorShape({1}), {0}); AddInputFromArray<float>(TensorShape({1}), {255.0f}); TF_ASSERT_OK(RunOpKernel()); const int expected_width = image_width; const int expected_height = image_height * filter_count; Tensor expected(DT_QINT32, TensorShape({image_batch_count, expected_height, expected_width, filter_count})); test::FillValues<qint32>( &expected, {10500, 15000, 18300, 9500, 23500, 31200, 35700, 17800, 18700, 23400, 26100, 12100}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); }
Class
2
static inline int min(int a, int b) {return a<b ? a : b;}
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const CTCBeamSearchDecoderParams* option = reinterpret_cast<CTCBeamSearchDecoderParams*>(node->user_data); const int top_paths = option->top_paths; TF_LITE_ENSURE(context, option->beam_width >= top_paths); TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); // The outputs should be top_paths * 3 + 1. TF_LITE_ENSURE_EQ(context, NumOutputs(node), 3 * top_paths + 1); const TfLiteTensor* inputs = GetInput(context, node, kInputsTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(inputs), 3); // TensorFlow only supports float. TF_LITE_ENSURE_EQ(context, inputs->type, kTfLiteFloat32); const int batch_size = SizeOfDimension(inputs, 1); const TfLiteTensor* sequence_length = GetInput(context, node, kSequenceLengthTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(sequence_length), 1); TF_LITE_ENSURE_EQ(context, NumElements(sequence_length), batch_size); // TensorFlow only supports int32. TF_LITE_ENSURE_EQ(context, sequence_length->type, kTfLiteInt32); // Resize decoded outputs. // Do not resize indices & values cause we don't know the values yet. for (int i = 0; i < top_paths; ++i) { TfLiteTensor* indices = GetOutput(context, node, i); SetTensorToDynamic(indices); TfLiteTensor* values = GetOutput(context, node, i + top_paths); SetTensorToDynamic(values); TfLiteTensor* output_shape = GetOutput(context, node, i + 2 * top_paths); SetTensorToDynamic(output_shape); } // Resize log probability outputs. TfLiteTensor* log_probability_output = GetOutput(context, node, top_paths * 3); TfLiteIntArray* log_probability_output_shape_array = TfLiteIntArrayCreate(2); log_probability_output_shape_array->data[0] = batch_size; log_probability_output_shape_array->data[1] = top_paths; return context->ResizeTensor(context, log_probability_output, log_probability_output_shape_array); }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* axis_tensor = GetInput(context, node, kAxisTensor); int axis = GetTensorData<int32_t>(axis_tensor)[0]; const int rank = NumDimensions(input); if (axis < 0) { axis += rank; } TF_LITE_ENSURE(context, axis >= 0 && axis < rank); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); switch (output->type) { case kTfLiteFloat32: { reference_ops::Reverse<float>( axis, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); break; } case kTfLiteUInt8: { reference_ops::Reverse<uint8_t>( axis, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); break; } case kTfLiteInt16: { reference_ops::Reverse<int16_t>( axis, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); break; } case kTfLiteInt32: { reference_ops::Reverse<int32_t>( axis, GetTensorShape(input), GetTensorData<int32_t>(input), GetTensorShape(output), GetTensorData<int32_t>(output)); break; } case kTfLiteInt64: { reference_ops::Reverse<int64_t>( axis, GetTensorShape(input), GetTensorData<int64_t>(input), GetTensorShape(output), GetTensorData<int64_t>(output)); break; } case kTfLiteBool: { reference_ops::Reverse<bool>( axis, GetTensorShape(input), GetTensorData<bool>(input), GetTensorShape(output), GetTensorData<bool>(output)); break; } default: { context->ReportError(context, "Type '%s' is not supported by reverse.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } return kTfLiteOk; }
Base
1
void CLASS panasonic_load_raw() { int row, col, i, j, sh = 0, pred[2], nonz[2]; pana_bits(0); for (row = 0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col = 0; col < raw_width; col++) { if ((i = col % 14) == 0) pred[0] = pred[1] = nonz[0] = nonz[1] = 0; if (i % 3 == 2) sh = 4 >> (3 - pana_bits(2)); if (nonz[i & 1]) { if ((j = pana_bits(8))) { if ((pred[i & 1] -= 0x80 << sh) < 0 || sh == 4) pred[i & 1] &= ~((~0u) << sh); pred[i & 1] += j << sh; } } else if ((nonz[i & 1] = pana_bits(8)) || i > 11) pred[i & 1] = nonz[i & 1] << 4 | pana_bits(4); if ((RAW(row, col) = pred[col & 1]) > 4098 && col < width) derror(); } } }
Class
2
TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) { TfLiteIntArray* output_shape = GetOutputShape(context, node); std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> scoped_output_shape(output_shape, TfLiteIntArrayFree); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Tensorflow's Reshape allows one of the shape components to have the // special -1 value, meaning it will be calculated automatically based on the // input. Here we calculate what that dimension should be so that the number // of output elements in the same as the number of input elements. int num_input_elements = NumElements(input); int num_output_elements = 1; int stretch_dim = -1; for (int i = 0; i < output_shape->size; ++i) { int value = output_shape->data[i]; if (value == -1) { TF_LITE_ENSURE_EQ(context, stretch_dim, -1); stretch_dim = i; } else { num_output_elements *= value; } } if (stretch_dim != -1) { output_shape->data[stretch_dim] = num_input_elements / num_output_elements; num_output_elements *= output_shape->data[stretch_dim]; } TF_LITE_ENSURE_EQ(context, num_input_elements, num_output_elements); return context->ResizeTensor(context, output, scoped_output_shape.release()); }
Base
1
bool DNP3_Base::ParseAppLayer(Endpoint* endp) { bool orig = (endp == &orig_state); binpac::DNP3::DNP3_Flow* flow = orig ? interp->upflow() : interp->downflow(); u_char* data = endp->buffer + PSEUDO_TRANSPORT_INDEX; // The transport layer byte counts as app-layer it seems. int len = endp->pkt_length - 5; // DNP3 Packet : DNP3 Pseudo Link Layer | DNP3 Pseudo Transport Layer | DNP3 Pseudo Application Layer // DNP3 Serial Transport Layer data is always 1 byte. // Get FIN FIR seq field in transport header. // FIR indicate whether the following DNP3 Serial Application Layer is first chunk of bytes or not. // FIN indicate whether the following DNP3 Serial Application Layer is last chunk of bytes or not. int is_first = (endp->tpflags & 0x40) >> 6; // Initial chunk of data in this packet. int is_last = (endp->tpflags & 0x80) >> 7; // Last chunk of data in this packet. int transport = PSEUDO_TRANSPORT_LEN; int i = 0; while ( len > 0 ) { int n = min(len, 16); // Make sure chunk has a correct checksum. if ( ! CheckCRC(n, data, data + n, "app_chunk") ) return false; // Pass on to BinPAC. assert(data + n < endp->buffer + endp->buffer_len); flow->flow_buffer()->BufferData(data + transport, data + n); transport = 0; data += n + 2; len -= n; } if ( is_first ) endp->encountered_first_chunk = true; if ( ! is_first && ! endp->encountered_first_chunk ) { // We lost the first chunk. analyzer->Weird("dnp3_first_application_layer_chunk_missing"); return false; } if ( is_last ) { flow->flow_buffer()->FinishBuffer(); flow->FlowEOF(); ClearEndpointState(orig); } return true; }
Class
2
static XMLSharedNodeList* find_impl(xmlXPathContext* ctxt, const string& xpath) { xmlXPathObject* result = xmlXPathEval((const xmlChar*)xpath.c_str(), ctxt); if (!result) { xmlXPathFreeContext(ctxt); xmlFreeDoc(ctxt->doc); throw XMLException("Invalid XPath: " + xpath); } if (result->type != XPATH_NODESET) { xmlXPathFreeObject(result); xmlXPathFreeContext(ctxt); xmlFreeDoc(ctxt->doc); throw XMLException("Only nodeset result types are supported."); } xmlNodeSet* nodeset = result->nodesetval; XMLSharedNodeList* nodes = new XMLSharedNodeList(); if (nodeset) { for (int i = 0; i < nodeset->nodeNr; ++i) { XMLNode* node = readnode(nodeset->nodeTab[i]); nodes->push_back(boost::shared_ptr<XMLNode>(node)); } } else { // return empty set } xmlXPathFreeObject(result); return nodes; }
Variant
0
jas_image_t *jas_image_create0() { jas_image_t *image; if (!(image = jas_malloc(sizeof(jas_image_t)))) { return 0; } image->tlx_ = 0; image->tly_ = 0; image->brx_ = 0; image->bry_ = 0; image->clrspc_ = JAS_CLRSPC_UNKNOWN; image->numcmpts_ = 0; image->maxcmpts_ = 0; image->cmpts_ = 0; image->inmem_ = true; image->cmprof_ = 0; return image; }
Base
1
void TightDecoder::FilterGradient(const rdr::U8* inbuf, const PixelFormat& pf, PIXEL_T* outbuf, int stride, const Rect& r) { int x, y, c; static rdr::U8 prevRow[TIGHT_MAX_WIDTH*3]; static rdr::U8 thisRow[TIGHT_MAX_WIDTH*3]; rdr::U8 pix[3]; int est[3]; memset(prevRow, 0, sizeof(prevRow)); // Set up shortcut variables int rectHeight = r.height(); int rectWidth = r.width(); for (y = 0; y < rectHeight; y++) { /* First pixel in a row */ pf.rgbFromBuffer(pix, &inbuf[y*rectWidth], 1); for (c = 0; c < 3; c++) pix[c] += prevRow[c]; memcpy(thisRow, pix, sizeof(pix)); pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride], pix, 1); /* Remaining pixels of a row */ for (x = 1; x < rectWidth; x++) { for (c = 0; c < 3; c++) { est[c] = prevRow[x*3+c] + pix[c] - prevRow[(x-1)*3+c]; if (est[c] > 255) { est[c] = 255; } else if (est[c] < 0) { est[c] = 0; } } pf.rgbFromBuffer(pix, &inbuf[y*rectWidth+x], 1); for (c = 0; c < 3; c++) pix[c] += est[c]; memcpy(&thisRow[x*3], pix, sizeof(pix)); pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride+x], pix, 1); } memcpy(prevRow, thisRow, sizeof(prevRow)); } }
Base
1
TfLiteStatus NotEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteBool: Comparison<bool, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteFloat32: Comparison<float, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::NotEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::NotEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteString: ComparisonString(reference_ops::StringRefNotEqualFn, input1, input2, output, requires_broadcast); break; default: context->ReportError( context, "Does not support type %d, requires bool|float|int|uint8|string", input1->type); return kTfLiteError; } return kTfLiteOk; }
Base
1
virtual size_t Read(void *buffer, size_t size, size_t count) { if (!m_fp) return 0; return fread(buffer, size, count, m_fp); }
Base
1