code
stringlengths
12
2.05k
label_name
stringclasses
5 values
label
int64
0
4
QUInt8() {}
Base
1
void CxImage::Startup(uint32_t imagetype) { //init pointers pDib = pSelection = pAlpha = NULL; ppLayers = ppFrames = NULL; //init structures memset(&head,0,sizeof(BITMAPINFOHEADER)); memset(&info,0,sizeof(CXIMAGEINFO)); //init default attributes info.dwType = imagetype; info.fQuality = 90.0f; info.nAlphaMax = 255; info.nBkgndIndex = -1; info.bEnabled = true; info.nJpegScale = 1; SetXDPI(CXIMAGE_DEFAULT_DPI); SetYDPI(CXIMAGE_DEFAULT_DPI); int16_t test = 1; info.bLittleEndianHost = (*((char *) &test) == 1); }
Base
1
int jas_memdump(FILE *out, void *data, size_t len) { size_t i; size_t j; uchar *dp; dp = data; for (i = 0; i < len; i += 16) { fprintf(out, "%04zx:", i); for (j = 0; j < 16; ++j) { if (i + j < len) { fprintf(out, " %02x", dp[i + j]); } } fprintf(out, "\n"); } return 0; }
Base
1
int GetS32BE (int nPos, bool *pbSuccess) { //*pbSuccess = true; if ( nPos < 0 || nPos + 3 >= m_nLen ) { *pbSuccess = false; return 0; } int nRes = m_sFile[ nPos ]; nRes = (nRes << 8) + m_sFile[nPos + 1]; nRes = (nRes << 8) + m_sFile[nPos + 2]; nRes = (nRes << 8) + m_sFile[nPos + 3]; if ( nRes & 0x80000000 ) nRes |= ~0xffffffff; return nRes; }
Base
1
void BlockCodec::runPull() { AFframecount framesToRead = m_outChunk->frameCount; AFframecount framesRead = 0; assert(framesToRead % m_framesPerPacket == 0); int blockCount = framesToRead / m_framesPerPacket; // Read the compressed data. ssize_t bytesRead = read(m_inChunk->buffer, m_bytesPerPacket * blockCount); int blocksRead = bytesRead >= 0 ? bytesRead / m_bytesPerPacket : 0; // Decompress into m_outChunk. for (int i=0; i<blocksRead; i++) { decodeBlock(static_cast<const uint8_t *>(m_inChunk->buffer) + i * m_bytesPerPacket, static_cast<int16_t *>(m_outChunk->buffer) + i * m_framesPerPacket * m_track->f.channelCount); framesRead += m_framesPerPacket; } m_track->nextfframe += framesRead; assert(tell() == m_track->fpos_next_frame); if (framesRead < framesToRead) reportReadError(framesRead, framesToRead); m_outChunk->frameCount = framesRead; }
Base
1
RemoteFsDevice::RemoteFsDevice(MusicLibraryModel *m, const Details &d) : FsDevice(m, d.name, createUdi(d.name)) , mountToken(0) , currentMountStatus(false) , details(d) , proc(0) , mounterIface(0) , messageSent(false) { // details.path=Utils::fixPath(details.path); setup(); icn=MonoIcon::icon(details.isLocalFile() ? FontAwesome::foldero : constSshfsProtocol==details.url.scheme() ? FontAwesome::linux_os : FontAwesome::windows, Utils::monoIconColor()); }
Base
1
void AveragePool(const float* input_data, const Dims<4>& input_dims, int stride, int pad_width, int pad_height, int filter_width, int filter_height, float* output_data, const Dims<4>& output_dims) { AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width, pad_height, filter_width, filter_height, output_data, output_dims); }
Base
1
static int16_t decodeSample(ms_adpcm_state &state, uint8_t code, const int16_t *coefficient) { int linearSample = (state.sample1 * coefficient[0] + state.sample2 * coefficient[1]) >> 8; linearSample += ((code & 0x08) ? (code - 0x10) : code) * state.delta; linearSample = clamp(linearSample, MIN_INT16, MAX_INT16); int delta = (state.delta * adaptationTable[code]) >> 8; if (delta < 16) delta = 16; state.delta = delta; state.sample2 = state.sample1; state.sample1 = linearSample; return static_cast<int16_t>(linearSample); }
Base
1
Variant HHVM_METHOD(XMLReader, expand, const Variant& basenode /* = null */) { auto* data = Native::data<XMLReader>(this_); req::ptr<XMLDocumentData> doc; xmlDocPtr docp = nullptr; SYNC_VM_REGS_SCOPED(); if (!basenode.isNull()) { auto dombasenode = Native::data<DOMNode>(basenode.toObject()); doc = dombasenode->doc(); docp = doc->docp(); if (docp == nullptr) { raise_warning("Invalid State Error"); return false; } } if (data->m_ptr) { xmlNodePtr node = xmlTextReaderExpand(data->m_ptr); if (node == nullptr) { raise_warning("An Error Occurred while expanding"); return false; } else { xmlNodePtr nodec = xmlDocCopyNode(node, docp, 1); if (nodec == nullptr) { raise_notice("Cannot expand this node type"); return false; } else { return php_dom_create_object(nodec, doc); } } } raise_warning("Load Data before trying to read"); return false; }
Base
1
generatePreview (const char inFileName[], float exposure, int previewWidth, int &previewHeight, Array2D <PreviewRgba> &previewPixels) { // // Read the input file // RgbaInputFile in (inFileName); Box2i dw = in.dataWindow(); float a = in.pixelAspectRatio(); int w = dw.max.x - dw.min.x + 1; int h = dw.max.y - dw.min.y + 1; Array2D <Rgba> pixels (h, w); in.setFrameBuffer (ComputeBasePointer (&pixels[0][0], dw), 1, w); in.readPixels (dw.min.y, dw.max.y); // // Make a preview image // previewHeight = max (int (h / (w * a) * previewWidth + .5f), 1); previewPixels.resizeErase (previewHeight, previewWidth); float fx = (previewWidth > 0)? (float (w - 1) / (previewWidth - 1)): 1; float fy = (previewHeight > 0)? (float (h - 1) / (previewHeight - 1)): 1; float m = Math<float>::pow (2.f, IMATH_NAMESPACE::clamp (exposure + 2.47393f, -20.f, 20.f)); for (int y = 0; y < previewHeight; ++y) { for (int x = 0; x < previewWidth; ++x) { PreviewRgba &preview = previewPixels[y][x]; const Rgba &pixel = pixels[int (y * fy + .5f)][int (x * fx + .5f)]; preview.r = gamma (pixel.r, m); preview.g = gamma (pixel.g, m); preview.b = gamma (pixel.b, m); preview.a = int (IMATH_NAMESPACE::clamp (pixel.a * 255.f, 0.f, 255.f) + .5f); } } }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteType output_type = GetOutput(context, node, kOutputTensor)->type; switch (output_type) { // Already know in/outtypes are same. case kTfLiteFloat32: EvalUnquantized<float>(context, node); break; case kTfLiteInt32: EvalUnquantized<int32_t>(context, node); break; case kTfLiteUInt8: EvalQuantizedUInt8(context, node); break; case kTfLiteInt8: EvalUnquantized<int8_t>(context, node); break; case kTfLiteInt64: EvalUnquantized<int64_t>(context, node); break; default: TF_LITE_KERNEL_LOG( context, "Op Concatenation does not currently support Type '%s'.", TfLiteTypeGetName(output_type)); return kTfLiteError; } return kTfLiteOk; }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteType output_type = GetOutput(context, node, kOutputTensor)->type; switch (output_type) { // Already know in/outtypes are same. case kTfLiteFloat32: EvalUnquantized<float>(context, node); break; case kTfLiteInt32: EvalUnquantized<int32_t>(context, node); break; case kTfLiteUInt8: EvalQuantizedUInt8(context, node); break; case kTfLiteInt8: EvalUnquantized<int8_t>(context, node); break; case kTfLiteInt64: EvalUnquantized<int64_t>(context, node); break; default: TF_LITE_KERNEL_LOG( context, "Op Concatenation does not currently support Type '%s'.", TfLiteTypeGetName(output_type)); return kTfLiteError; } return kTfLiteOk; }
Base
1
static jas_image_cmpt_t *jas_image_cmpt_create(int_fast32_t tlx, int_fast32_t tly, int_fast32_t hstep, int_fast32_t vstep, int_fast32_t width, int_fast32_t height, uint_fast16_t depth, bool sgnd, uint_fast32_t inmem) { jas_image_cmpt_t *cmpt; size_t size; cmpt = 0; if (width < 0 || height < 0 || hstep <= 0 || vstep <= 0) { goto error; } if (!jas_safe_intfast32_add(tlx, width, 0) || !jas_safe_intfast32_add(tly, height, 0)) { goto error; } if (!(cmpt = jas_malloc(sizeof(jas_image_cmpt_t)))) { goto error; } cmpt->type_ = JAS_IMAGE_CT_UNKNOWN; cmpt->tlx_ = tlx; cmpt->tly_ = tly; cmpt->hstep_ = hstep; cmpt->vstep_ = vstep; cmpt->width_ = width; cmpt->height_ = height; cmpt->prec_ = depth; cmpt->sgnd_ = sgnd; cmpt->stream_ = 0; cmpt->cps_ = (depth + 7) / 8; // Compute the number of samples in the image component, while protecting // against overflow. // size = cmpt->width_ * cmpt->height_ * cmpt->cps_; if (!jas_safe_size_mul(cmpt->width_, cmpt->height_, &size) || !jas_safe_size_mul(size, cmpt->cps_, &size)) { goto error; } cmpt->stream_ = (inmem) ? jas_stream_memopen2(0, size) : jas_stream_tmpfile(); if (!cmpt->stream_) { goto error; } /* Zero the component data. This isn't necessary, but it is convenient for debugging purposes. */ /* Note: conversion of size - 1 to long can overflow */ if (size > 0) { if (size - 1 > LONG_MAX) { goto error; } if (jas_stream_seek(cmpt->stream_, size - 1, SEEK_SET) < 0 || jas_stream_putc(cmpt->stream_, 0) == EOF || jas_stream_seek(cmpt->stream_, 0, SEEK_SET) < 0) { goto error; } } return cmpt; error: if (cmpt) { jas_image_cmpt_destroy(cmpt); } return 0; }
Base
1
void writeStats(Array& /*ret*/) override { fprintf(stderr, "writeStats start\n"); // RetSame: the return value is the same instance every time // HasThis: call has a this argument // AllSame: all returns were the same data even though args are different // MemberCount: number of different arg sets (including this) fprintf(stderr, "Count Function MinSerLen MaxSerLen RetSame HasThis " "AllSame MemberCount\n"); for (auto& me : m_memos) { if (me.second.m_ignore) continue; if (me.second.m_count == 1) continue; int min_ser_len = 999999999; int max_ser_len = 0; int count = 0; int member_count = 0; bool all_same = true; if (me.second.m_has_this) { bool any_multiple = false; auto& fr = me.second.m_member_memos.begin()->second.m_return_value; member_count = me.second.m_member_memos.size(); for (auto& mme : me.second.m_member_memos) { if (mme.second.m_return_value != fr) all_same = false; count += mme.second.m_count; auto ser_len = mme.second.m_return_value.length(); min_ser_len = std::min(min_ser_len, ser_len); max_ser_len = std::max(max_ser_len, ser_len); if (mme.second.m_count > 1) any_multiple = true; } if (!any_multiple && !all_same) continue; } else { min_ser_len = max_ser_len = me.second.m_return_value.length(); count = me.second.m_count; all_same = me.second.m_ret_tv_same; } fprintf(stderr, "%d %s %d %d %s %s %s %d\n", count, me.first.data(), min_ser_len, max_ser_len, me.second.m_ret_tv_same ? " true" : "false", me.second.m_has_this ? " true" : "false", all_same ? " true" : "false", member_count ); } fprintf(stderr, "writeStats end\n"); }
Base
1
TfLiteStatus GreaterEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::GreaterFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::GreaterFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; }
Base
1
mptctl_readtest (unsigned long arg) { struct mpt_ioctl_test __user *uarg = (void __user *) arg; struct mpt_ioctl_test karg; MPT_ADAPTER *ioc; int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_test))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_readtest - " "Unable to read in mpt_ioctl_test struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_readtest() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_readtest called.\n", ioc->name)); /* Fill in the data and return the structure to the calling * program */ #ifdef MFCNT karg.chip_type = ioc->mfcnt; #else karg.chip_type = ioc->pcidev->device; #endif strncpy (karg.name, ioc->name, MPT_MAX_NAME); karg.name[MPT_MAX_NAME-1]='\0'; strncpy (karg.product, ioc->prod_name, MPT_PRODUCT_LENGTH); karg.product[MPT_PRODUCT_LENGTH-1]='\0'; /* Copy the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, &karg, sizeof(struct mpt_ioctl_test))) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_readtest - " "Unable to write out mpt_ioctl_test struct @ %p\n", ioc->name, __FILE__, __LINE__, uarg); return -EFAULT; } return 0; }
Class
2
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const TfLiteTensor* input = GetInput(context, node, kInputTensor); FillDiagHelper(input, output); return kTfLiteOk; }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* cond_tensor = GetInput(context, node, kInputConditionTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, cond_tensor, output)); } TfLiteIntArray* dims = cond_tensor->dims; if (dims->size == 0) { // Scalar tensors are not supported. TF_LITE_KERNEL_LOG(context, "Where op requires condition w/ rank > 0"); return kTfLiteError; } reference_ops::SelectTrueCoords(GetTensorShape(cond_tensor), GetTensorData<bool>(cond_tensor), GetTensorData<int64_t>(output)); return kTfLiteOk; }
Base
1
bool AdminRequestHandler::handleDumpStaticStringsRequest( const std::string& /*cmd*/, const std::string& filename) { auto const& list = lookupDefinedStaticStrings(); std::ofstream out(filename.c_str()); SCOPE_EXIT { out.close(); }; for (auto item : list) { out << formatStaticString(item); if (RuntimeOption::EvalPerfDataMap) { auto const len = std::min<size_t>(item->size(), 255); std::string str(item->data(), len); // Only print the first line (up to 255 characters). Since we want '\0' in // the list of characters to avoid, we need to use the version of // `find_first_of()' with explicit length. auto cutOffPos = str.find_first_of("\r\n", 0, 3); if (cutOffPos != std::string::npos) str.erase(cutOffPos); Debug::DebugInfo::recordDataMap(item->mutableData(), item->mutableData() + item->size(), "Str-" + str); } } return true; }
Base
1
int GetU16BE (int nPos, bool *pbSuccess) { //*pbSuccess = true; if ( nPos < 0 || nPos + 1 >= m_nLen) { *pbSuccess = false; return 0; } int nRes = m_sFile[ nPos ]; nRes = (nRes << 8) + m_sFile[ nPos + 1 ]; return nRes; }
Base
1
TfLiteStatus GreaterEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::GreaterFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::GreaterFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; }
Base
1
static Variant HHVM_FUNCTION(simplexml_import_dom, const Object& node, const String& class_name /* = "SimpleXMLElement" */) { auto domnode = Native::data<DOMNode>(node); xmlNodePtr nodep = domnode->nodep(); if (nodep) { if (nodep->doc == nullptr) { raise_warning("Imported Node must have associated Document"); return init_null(); } if (nodep->type == XML_DOCUMENT_NODE || nodep->type == XML_HTML_DOCUMENT_NODE) { nodep = xmlDocGetRootElement((xmlDocPtr) nodep); } } if (nodep && nodep->type == XML_ELEMENT_NODE) { auto cls = class_from_name(class_name, "simplexml_import_dom"); if (!cls) { return init_null(); } Object obj = create_object(cls->nameStr(), Array(), false); auto sxe = Native::data<SimpleXMLElement>(obj.get()); sxe->node = libxml_register_node(nodep); return obj; } else { raise_warning("Invalid Nodetype to import"); return init_null(); } return false; }
Class
2
RemoteDevicePropertiesWidget::RemoteDevicePropertiesWidget(QWidget *parent) : QWidget(parent) , modified(false) , saveable(false) { setupUi(this); if (qobject_cast<QTabWidget *>(parent)) { verticalLayout->setMargin(4); } type->addItem(tr("Samba Share"), (int)Type_Samba); type->addItem(tr("Samba Share (Auto-discover host and port)"), (int)Type_SambaAvahi); type->addItem(tr("Secure Shell (sshfs)"), (int)Type_SshFs); type->addItem(tr("Locally Mounted Folder"), (int)Type_File); }
Class
2
void Context::onDone() { if (wasm_->onDone_) { wasm_->onDone_(this, id_); } }
Base
1
static NO_INLINE JsVar *jspGetNamedFieldInParents(JsVar *object, const char* name, bool returnName) { // Now look in prototypes JsVar * child = jspeiFindChildFromStringInParents(object, name); /* Check for builtins via separate function * This way we save on RAM for built-ins because everything comes out of program code */ if (!child) { child = jswFindBuiltInFunction(object, name); } /* We didn't get here if we found a child in the object itself, so * if we're here then we probably have the wrong name - so for example * with `a.b = c;` could end up setting `a.prototype.b` (bug #360) * * Also we might have got a built-in, which wouldn't have a name on it * anyway - so in both cases, strip the name if it is there, and create * a new name that references the object we actually requested the * member from.. */ if (child && returnName) { // Get rid of existing name if (jsvIsName(child)) { JsVar *t = jsvGetValueOfName(child); jsvUnLock(child); child = t; } // create a new name JsVar *nameVar = jsvNewFromString(name); JsVar *newChild = jsvCreateNewChild(object, nameVar, child); jsvUnLock2(nameVar, child); child = newChild; } // If not found and is the prototype, create it if (!child) { if (jsvIsFunction(object) && strcmp(name, JSPARSE_PROTOTYPE_VAR)==0) { // prototype is supposed to be an object JsVar *proto = jsvNewObject(); // make sure it has a 'constructor' variable that points to the object it was part of jsvObjectSetChild(proto, JSPARSE_CONSTRUCTOR_VAR, object); child = jsvAddNamedChild(object, proto, JSPARSE_PROTOTYPE_VAR); jspEnsureIsPrototype(object, child); jsvUnLock(proto); } else if (strcmp(name, JSPARSE_INHERITS_VAR)==0) { const char *objName = jswGetBasicObjectName(object); if (objName) { child = jspNewPrototype(objName); } } } return child; }
Base
1
inline TfLiteTensor* GetMutableInput(const TfLiteContext* context, const TfLiteNode* node, int index) { if (index >= 0 && index < node->inputs->size) { const int tensor_index = node->inputs->data[index]; if (tensor_index != kTfLiteOptionalTensor) { if (context->tensors != nullptr) { return &context->tensors[tensor_index]; } else { return context->GetTensor(context, tensor_index); } } } return nullptr; }
Base
1
SECURITY_STATUS SEC_ENTRY DeleteSecurityContext(PCtxtHandle phContext) { char* Name; SECURITY_STATUS status; SecurityFunctionTableA* table; Name = (char*) sspi_SecureHandleGetUpperPointer(phContext); if (!Name) return SEC_E_SECPKG_NOT_FOUND; table = sspi_GetSecurityFunctionTableAByNameA(Name); if (!table) return SEC_E_SECPKG_NOT_FOUND; if (table->DeleteSecurityContext == NULL) return SEC_E_UNSUPPORTED_FUNCTION; status = table->DeleteSecurityContext(phContext); return status; }
Base
1
inline typename V::MapType FBUnserializer<V>::unserializeMap() { p_ += CODE_SIZE; typename V::MapType ret = V::createMap(); size_t code = nextCode(); while (code != FB_SERIALIZE_STOP) { switch (code) { case FB_SERIALIZE_VARCHAR: case FB_SERIALIZE_STRING: { auto key = unserializeString(); auto value = unserializeThing(); V::mapSet(ret, std::move(key), std::move(value)); } break; default: { auto key = unserializeInt64(); auto value = unserializeThing(); V::mapSet(ret, std::move(key), std::move(value)); } } code = nextCode(); } p_ += CODE_SIZE; return ret; }
Class
2
QString Helper::temporaryMountDevice(const QString &device, const QString &name, bool readonly) { QString mount_point = mountPoint(device); if (!mount_point.isEmpty()) return mount_point; mount_point = "%1/.%2/mount/%3"; const QStringList &tmp_paths = QStandardPaths::standardLocations(QStandardPaths::TempLocation); mount_point = mount_point.arg(tmp_paths.isEmpty() ? "/tmp" : tmp_paths.first()).arg(qApp->applicationName()).arg(name); if (!QDir::current().mkpath(mount_point)) { dCError("mkpath \"%s\" failed", qPrintable(mount_point)); return QString(); } if (!mountDevice(device, mount_point, readonly)) { dCError("Mount the device \"%s\" to \"%s\" failed", qPrintable(device), qPrintable(mount_point)); return QString(); } return mount_point; }
Base
1
static int64_t HHVM_FUNCTION(bccomp, const String& left, const String& right, int64_t scale /* = -1 */) { if (scale < 0) scale = BCG(bc_precision); bc_num first, second; bc_init_num(&first); bc_init_num(&second); bc_str2num(&first, (char*)left.data(), scale); bc_str2num(&second, (char*)right.data(), scale); int64_t ret = bc_compare(first, second); bc_free_num(&first); bc_free_num(&second); return ret; }
Base
1
static const char *jsi_evalprint(Jsi_Value *v) { static char buf[100]; if (!v) return "nil"; if (v->vt == JSI_VT_NUMBER) { snprintf(buf, 100, "NUM:%" JSI_NUMGFMT " ", v->d.num); } else if (v->vt == JSI_VT_BOOL) { snprintf(buf, 100, "BOO:%d", v->d.val); } else if (v->vt == JSI_VT_STRING) { snprintf(buf, 100, "STR:'%s'", v->d.s.str); } else if (v->vt == JSI_VT_VARIABLE) { snprintf(buf, 100, "VAR:%p", v->d.lval); } else if (v->vt == JSI_VT_NULL) { snprintf(buf, 100, "NULL"); } else if (v->vt == JSI_VT_OBJECT) { snprintf(buf, 100, "OBJ:%p", v->d.obj); } else if (v->vt == JSI_VT_UNDEF) { snprintf(buf, 100, "UNDEFINED"); } return buf; }
Base
1
absl::Status IsSupported(const TfLiteContext* context, const TfLiteNode* tflite_node, const TfLiteRegistration* registration) final { if (mirror_pad_) { const TfLiteMirrorPaddingParams* tf_options; RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options)); if (tf_options->mode != TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect) { return absl::InvalidArgumentError( "Only Reflective padding is supported for Mirror Pad operation."); } } RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 2)); RETURN_IF_ERROR(CheckInputsOutputs(context, tflite_node, /*runtime_inputs=*/1, /*outputs=*/1)); RETURN_IF_ERROR(CheckTensorIsAvailable(context, tflite_node, 1)); auto pad_tensor = tflite::GetInput(context, tflite_node, 1); if (pad_tensor->dims->size != 2) { return absl::InvalidArgumentError(absl::StrCat( "Invalid paddings tensor dimension: expected 2 dim, got ", pad_tensor->dims->size, " dim")); } bool supported = pad_tensor->dims->data[0] == 3 || pad_tensor->dims->data[0] == 4; if (!supported || pad_tensor->dims->data[1] != 2) { return absl::InvalidArgumentError(absl::StrCat( "Invalid paddings tensor shape: expected 4x2 or 3x2, got ", pad_tensor->dims->data[0], "x", pad_tensor->dims->data[1])); } return absl::OkStatus(); }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* cond_tensor = GetInput(context, node, kInputConditionTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (cond_tensor->type != kTfLiteBool) { context->ReportError(context, "Condition tensor must be of type bool, but saw '%s'.", TfLiteTypeGetName(cond_tensor->type)); return kTfLiteError; } // As output will be a 2D tensor of indices, use int64 to be consistent with // tensorflow. output->type = kTfLiteInt64; // Exit early if cond is a non-const tensor. Set output tensor to dynamic so // output size can be determined in Eval. if (!IsConstantTensor(cond_tensor)) { SetTensorToDynamic(output); return kTfLiteOk; } return ResizeOutputTensor(context, cond_tensor, output); }
Base
1
BinaryParameter::BinaryParameter(const char* name_, const char* desc_, const void* v, int l, ConfigurationObject co) : VoidParameter(name_, desc_, co), value(0), length(0), def_value((char*)v), def_length(l) { if (l) { value = new char[l]; length = l; memcpy(value, v, l); } }
Base
1
TfLiteStatus EvalHashtableSize(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputResourceIdTensor); int resource_id = input_resource_id_tensor->data.i32[0]; TfLiteTensor* output_tensor = GetOutput(context, node, kOutputTensor); auto* output_data = GetTensorData<std::int64_t>(output_tensor); Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_); auto& resources = subgraph->resources(); auto* lookup = resource::GetHashtableResource(&resources, resource_id); TF_LITE_ENSURE(context, lookup != nullptr); output_data[0] = lookup->Size(); return kTfLiteOk; }
Base
1
TfLiteStatus InitializeTemporaries(TfLiteContext* context, TfLiteNode* node, OpContext* op_context) { // Creates a temp index to iterate through input data. OpData* op_data = reinterpret_cast<OpData*>(node->user_data); TfLiteIntArrayFree(node->temporaries); node->temporaries = TfLiteIntArrayCreate(3); node->temporaries->data[0] = op_data->scratch_tensor_index; TfLiteTensor* scratch_tensor = GetTemporary(context, node, /*index=*/0); scratch_tensor->type = kTfLiteInt32; scratch_tensor->allocation_type = kTfLiteArenaRw; TfLiteIntArray* index_size = TfLiteIntArrayCreate(1); index_size->data[0] = NumDimensions(op_context->input); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor, index_size)); // Creates a temp tensor to store resolved axis given input data. node->temporaries->data[1] = op_data->scratch_tensor_index + 1; TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1); resolved_axis->type = kTfLiteInt32; // Creates a temp tensor to store temp sums when calculating mean. node->temporaries->data[2] = op_data->scratch_tensor_index + 2; TfLiteTensor* temp_sum = GetTemporary(context, node, /*index=*/2); switch (op_context->input->type) { case kTfLiteFloat32: temp_sum->type = kTfLiteFloat32; break; case kTfLiteInt32: temp_sum->type = kTfLiteInt64; break; case kTfLiteInt64: temp_sum->type = kTfLiteInt64; break; case kTfLiteUInt8: case kTfLiteInt8: case kTfLiteInt16: temp_sum->type = kTfLiteInt32; break; case kTfLiteBool: temp_sum->type = kTfLiteBool; break; default: return kTfLiteError; } return kTfLiteOk; }
Base
1
static int bson_append_string_base( bson *b, const char *name, const char *value, int len, bson_type type ) { int sl = len + 1; if ( bson_check_string( b, ( const char * )value, sl - 1 ) == BSON_ERROR ) return BSON_ERROR; if ( bson_append_estart( b, type, name, 4 + sl ) == BSON_ERROR ) { return BSON_ERROR; } bson_append32( b , &sl ); bson_append( b , value , sl - 1 ); bson_append( b , "\0" , 1 ); return BSON_OK; }
Base
1
TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node, int index) { if (context->tensors != nullptr) { return &context->tensors[node->outputs->data[index]]; } else { return context->GetTensor(context, node->outputs->data[index]); } }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_); const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputVariableId); const TfLiteTensor* input_value_tensor = GetInput(context, node, kInputValue); int resource_id = input_resource_id_tensor->data.i32[0]; auto& resources = subgraph->resources(); resource::CreateResourceVariableIfNotAvailable(&resources, resource_id); auto* variable = resource::GetResourceVariable(&resources, resource_id); TF_LITE_ENSURE(context, variable != nullptr); variable->AssignFrom(input_value_tensor); return kTfLiteOk; }
Base
1
TfLiteRegistration OkOpRegistration() { TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr}; // Set output size to the input size in OkOp::Prepare(). Code exists to have // a framework in Prepare. The input and output tensors are not used. reg.prepare = [](TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* in_tensor = GetInput(context, node, 0); TfLiteTensor* out_tensor = GetOutput(context, node, 0); TfLiteIntArray* new_size = TfLiteIntArrayCopy(in_tensor->dims); return context->ResizeTensor(context, out_tensor, new_size); }; reg.invoke = [](TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; }; return reg; }
Base
1
TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteDepthwiseConvParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); const TfLiteTensor* bias = (NumInputs(node) == 3) ? GetInput(context, node, kBiasTensor) : nullptr; TFLITE_DCHECK_EQ(input_type, input->type); switch (input_type) { // Already know in/out types are same. case kTfLiteFloat32: if (filter->type == kTfLiteFloat32) { return EvalFloat<kernel_type>(context, node, params, data, input, filter, bias, output); } else if (filter->type == kTfLiteInt8) { return EvalHybridPerChannel<kernel_type>(context, node, params, data, input, filter, bias, output); } else { TF_LITE_KERNEL_LOG( context, "Type %s with filter type %s not currently supported.", TfLiteTypeGetName(input->type), TfLiteTypeGetName(filter->type)); return kTfLiteError; } break; case kTfLiteUInt8: return EvalQuantized<kernel_type>(context, node, params, data, input, filter, bias, output); break; case kTfLiteInt8: return EvalQuantizedPerChannel<kernel_type>(context, node, params, data, input, filter, bias, output); break; case kTfLiteInt16: return EvalQuantizedPerChannel16x8(params, data, input, filter, bias, output); break; default: context->ReportError(context, "Type %d not currently supported.", input->type); return kTfLiteError; } }
Base
1
set_ssl_ciphers(SCHANNEL_CRED *schannel_cred, char *ciphers, int *algIds) { char *startCur = ciphers; int algCount = 0; while(startCur && (0 != *startCur) && (algCount < NUMOF_CIPHERS)) { long alg = strtol(startCur, 0, 0); if(!alg) alg = get_alg_id_by_name(startCur); if(alg) algIds[algCount++] = alg; else if(!strncmp(startCur, "USE_STRONG_CRYPTO", sizeof("USE_STRONG_CRYPTO") - 1) || !strncmp(startCur, "SCH_USE_STRONG_CRYPTO", sizeof("SCH_USE_STRONG_CRYPTO") - 1)) schannel_cred->dwFlags |= SCH_USE_STRONG_CRYPTO; else return CURLE_SSL_CIPHER; startCur = strchr(startCur, ':'); if(startCur) startCur++; } schannel_cred->palgSupportedAlgs = algIds; schannel_cred->cSupportedAlgs = algCount; return CURLE_OK; }
Variant
0
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteMfccParams*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input_wav = GetInput(context, node, kInputTensorWav); const TfLiteTensor* input_rate = GetInput(context, node, kInputTensorRate); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input_wav), 3); TF_LITE_ENSURE_EQ(context, NumElements(input_rate), 1); TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); TF_LITE_ENSURE_TYPES_EQ(context, input_wav->type, output->type); TF_LITE_ENSURE_TYPES_EQ(context, input_rate->type, kTfLiteInt32); TfLiteIntArray* output_size = TfLiteIntArrayCreate(3); output_size->data[0] = input_wav->dims->data[0]; output_size->data[1] = input_wav->dims->data[1]; output_size->data[2] = params->dct_coefficient_count; return context->ResizeTensor(context, output, output_size); }
Base
1
bool CheckRegion(int nPos, int nSize) { return (nPos >= 0 && nPos + nSize >= nPos && nPos + nSize <= m_nLen); }
Base
1
TfLiteStatus InitializeTemporaries(TfLiteContext* context, TfLiteNode* node, OpContext* op_context) { // Creates a temp index to iterate through input data. OpData* op_data = reinterpret_cast<OpData*>(node->user_data); TfLiteIntArrayFree(node->temporaries); node->temporaries = TfLiteIntArrayCreate(3); node->temporaries->data[0] = op_data->scratch_tensor_index; TfLiteTensor* scratch_tensor = GetTemporary(context, node, /*index=*/0); scratch_tensor->type = kTfLiteInt32; scratch_tensor->allocation_type = kTfLiteArenaRw; TfLiteIntArray* index_size = TfLiteIntArrayCreate(1); index_size->data[0] = NumDimensions(op_context->input); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor, index_size)); // Creates a temp tensor to store resolved axis given input data. node->temporaries->data[1] = op_data->scratch_tensor_index + 1; TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1); resolved_axis->type = kTfLiteInt32; // Creates a temp tensor to store temp sums when calculating mean. node->temporaries->data[2] = op_data->scratch_tensor_index + 2; TfLiteTensor* temp_sum = GetTemporary(context, node, /*index=*/2); switch (op_context->input->type) { case kTfLiteFloat32: temp_sum->type = kTfLiteFloat32; break; case kTfLiteInt32: temp_sum->type = kTfLiteInt64; break; case kTfLiteInt64: temp_sum->type = kTfLiteInt64; break; case kTfLiteUInt8: case kTfLiteInt8: case kTfLiteInt16: temp_sum->type = kTfLiteInt32; break; case kTfLiteBool: temp_sum->type = kTfLiteBool; break; default: return kTfLiteError; } return kTfLiteOk; }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteL2NormParams*>(node->builtin_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE(context, NumDimensions(input) <= 4); TF_LITE_ENSURE(context, output->type == kTfLiteFloat32 || output->type == kTfLiteUInt8 || output->type == kTfLiteInt8); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.scale, (1. / 128.)); if (output->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 128); } if (output->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); } } // TODO(ahentz): For some reason our implementations don't support // activations. TF_LITE_ENSURE_EQ(context, params->activation, kTfLiteActNone); TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims); return context->ResizeTensor(context, output, output_size); }
Base
1
Status OpLevelCostEstimator::PredictAvgPool(const OpContext& op_context, NodeCosts* node_costs) const { bool found_unknown_shapes = false; const auto& op_info = op_context.op_info; // x: op_info.inputs(0) ConvolutionDimensions dims = OpDimensionsFromInputs( op_info.inputs(0).shape(), op_info, &found_unknown_shapes); // kx * ky - 1 additions and 1 multiplication per output. int64_t ops = dims.batch * dims.ox * dims.oy * dims.oz * dims.kx * dims.ky; node_costs->num_compute_ops = ops; int64_t input_size; if (dims.ky >= dims.sy) { input_size = CalculateTensorSize(op_info.inputs(0), &found_unknown_shapes); } else { // dims.ky < dims.sy // vertical stride is larger than vertical kernel; assuming row-major // format, skip unnecessary rows (or read every kx rows per sy rows, as the // others are not used for output). const auto data_size = DataTypeSize(BaseType(op_info.inputs(0).dtype())); input_size = data_size * dims.batch * dims.ix * dims.ky * dims.oy * dims.iz; } node_costs->num_input_bytes_accessed = {input_size}; const int64_t output_size = CalculateOutputSize(op_info, &found_unknown_shapes); node_costs->num_output_bytes_accessed = {output_size}; node_costs->max_memory = output_size; if (found_unknown_shapes) { node_costs->inaccurate = true; node_costs->num_nodes_with_unknown_shapes = 1; } return Status::OK(); }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInput); const TfLiteTensor* axis = GetInput(context, node, kAxis); TfLiteTensor* output = GetOutput(context, node, 0); output->type = input->type; if (IsConstantTensor(axis)) { int axis_value; TF_LITE_ENSURE_OK(context, GetAxisValueFromTensor(context, *axis, &axis_value)); return ExpandTensorDim(context, *input, axis_value, output); } SetTensorToDynamic(output); return kTfLiteOk; }
Base
1
inline bool loadModule(const char* filename, IR::Module& outModule) { // Read the specified file into an array. std::vector<U8> fileBytes; if(!loadFile(filename, fileBytes)) { return false; } // If the file starts with the WASM binary magic number, load it as a binary irModule. if(*(U32*)fileBytes.data() == 0x6d736100) { return loadBinaryModule(fileBytes.data(), fileBytes.size(), outModule); } else { // Make sure the WAST file is null terminated. fileBytes.push_back(0); // Load it as a text irModule. std::vector<WAST::Error> parseErrors; if(!WAST::parseModule( (const char*)fileBytes.data(), fileBytes.size(), outModule, parseErrors)) { Log::printf(Log::error, "Error parsing WebAssembly text file:\n"); reportParseErrors(filename, parseErrors); return false; } return true; } }
Base
1
static int lookup1_values(int entries, int dim) { int r = (int) floor(exp((float) log((float) entries) / dim)); if ((int) floor(pow((float) r+1, dim)) <= entries) // (int) cast for MinGW warning; ++r; // floor() to avoid _ftol() when non-CRT assert(pow((float) r+1, dim) > entries); assert((int) floor(pow((float) r, dim)) <= entries); // (int),floor() as above return r; }
Base
1
static float *get_window(vorb *f, int len) { len <<= 1; if (len == f->blocksize_0) return f->window[0]; if (len == f->blocksize_1) return f->window[1]; assert(0); return NULL; }
Base
1
static int decode_level3_header(LHAFileHeader **header, LHAInputStream *stream) { unsigned int header_len; // The first field at the start of a level 3 header is supposed to // indicate word size, with the idea being that the header format // can be extended beyond 32-bit words in the future. In practise, // nothing supports anything other than 32-bit (4 bytes), and neither // do we. if (lha_decode_uint16(&RAW_DATA(header, 0)) != 4) { return 0; } // Read the full header. if (!extend_raw_data(header, stream, LEVEL_3_HEADER_LEN - RAW_DATA_LEN(header))) { return 0; } // Read the header length field (including extended headers), and // extend to this full length. Because this is a 32-bit value, // we must place a sensible limit on the amount of data that will // be read, to avoid possibly allocating gigabytes of memory. header_len = lha_decode_uint32(&RAW_DATA(header, 24)); if (header_len > LEVEL_3_MAX_HEADER_LEN) { return 0; } if (!extend_raw_data(header, stream, header_len - RAW_DATA_LEN(header))) { return 0; } // Compression method: memcpy((*header)->compress_method, &RAW_DATA(header, 2), 5); (*header)->compress_method[5] = '\0'; // File lengths: (*header)->compressed_length = lha_decode_uint32(&RAW_DATA(header, 7)); (*header)->length = lha_decode_uint32(&RAW_DATA(header, 11)); // Unix-style timestamp. (*header)->timestamp = lha_decode_uint32(&RAW_DATA(header, 15)); // CRC. (*header)->crc = lha_decode_uint16(&RAW_DATA(header, 21)); // OS type: (*header)->os_type = RAW_DATA(header, 23); if (!decode_extended_headers(header, 28)) { return 0; } return 1; }
Base
1
static String HHVM_FUNCTION(bcpow, const String& left, const String& right, int64_t scale /* = -1 */) { if (scale < 0) scale = BCG(bc_precision); bc_num first, second, result; bc_init_num(&first); bc_init_num(&second); bc_init_num(&result); SCOPE_EXIT { bc_free_num(&first); bc_free_num(&second); bc_free_num(&result); }; php_str2num(&first, (char*)left.data()); php_str2num(&second, (char*)right.data()); bc_raise(first, second, &result, scale); if (result->n_scale > scale) { result->n_scale = scale; } String ret(bc_num2str(result), AttachString); return ret; }
Base
1
StatusOr<FullTypeDef> SpecializeType(const AttrSlice& attrs, const OpDef& op_def) { FullTypeDef ft; ft.set_type_id(TFT_PRODUCT); for (int i = 0; i < op_def.output_arg_size(); i++) { auto* t = ft.add_args(); *t = op_def.output_arg(i).experimental_full_type(); // Resolve dependent types. The convention for op registrations is to use // attributes as type variables. // See https://www.tensorflow.org/guide/create_op#type_polymorphism. // Once the op signature can be defined entirely in FullType, this // convention can be deprecated. // // Note: While this code performs some basic verifications, it generally // assumes consistent op defs and attributes. If more complete // verifications are needed, they should be done by separately, and in a // way that can be reused for type inference. for (int j = 0; j < t->args_size(); j++) { auto* arg = t->mutable_args(i); if (arg->type_id() == TFT_VAR) { const auto* attr = attrs.Find(arg->s()); if (attr == nullptr) { return Status( error::INVALID_ARGUMENT, absl::StrCat("Could not find an attribute for key ", arg->s())); } if (attr->value_case() == AttrValue::kList) { const auto& attr_list = attr->list(); arg->set_type_id(TFT_PRODUCT); for (int i = 0; i < attr_list.type_size(); i++) { map_dtype_to_tensor(attr_list.type(i), arg->add_args()); } } else if (attr->value_case() == AttrValue::kType) { map_dtype_to_tensor(attr->type(), arg); } else { return Status(error::UNIMPLEMENTED, absl::StrCat("unknown attribute type", attrs.DebugString(), " key=", arg->s())); } arg->clear_s(); } } } return ft; }
Base
1
const std::string& get_tenant() const { ceph_assert(t != Wildcard); return u.tenant; }
Base
1
static float *get_window(vorb *f, int len) { len <<= 1; if (len == f->blocksize_0) return f->window[0]; if (len == f->blocksize_1) return f->window[1]; assert(0); return NULL; }
Base
1
static int em_fxrstor(struct x86_emulate_ctxt *ctxt) { struct fxregs_state fx_state; int rc; rc = check_fxsr(ctxt); if (rc != X86EMUL_CONTINUE) return rc; rc = segmented_read(ctxt, ctxt->memop.addr.mem, &fx_state, 512); if (rc != X86EMUL_CONTINUE) return rc; if (fx_state.mxcsr >> 16) return emulate_gp(ctxt, 0); ctxt->ops->get_fpu(ctxt); if (ctxt->mode < X86EMUL_MODE_PROT64) rc = fxrstor_fixup(ctxt, &fx_state); if (rc == X86EMUL_CONTINUE) rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state)); ctxt->ops->put_fpu(ctxt); return rc; }
Class
2
set<int> PipeSocketHandler::listen(const SocketEndpoint& endpoint) { lock_guard<std::recursive_mutex> guard(globalMutex); string pipePath = endpoint.name(); if (pipeServerSockets.find(pipePath) != pipeServerSockets.end()) { throw runtime_error("Tried to listen twice on the same path"); } sockaddr_un local; int fd = socket(AF_UNIX, SOCK_STREAM, 0); FATAL_FAIL(fd); initServerSocket(fd); local.sun_family = AF_UNIX; /* local is declared before socket() ^ */ strcpy(local.sun_path, pipePath.c_str()); unlink(local.sun_path); FATAL_FAIL(::bind(fd, (struct sockaddr*)&local, sizeof(sockaddr_un))); ::listen(fd, 5); #ifndef WIN32 FATAL_FAIL(::chmod(local.sun_path, S_IRUSR | S_IWUSR | S_IXUSR)); #endif pipeServerSockets[pipePath] = set<int>({fd}); return pipeServerSockets[pipePath]; }
Base
1
static int bson_append_estart( bson *b, int type, const char *name, const int dataSize ) { const int len = strlen( name ) + 1; if ( b->finished ) { b->err |= BSON_ALREADY_FINISHED; return BSON_ERROR; } if ( bson_ensure_space( b, 1 + len + dataSize ) == BSON_ERROR ) { return BSON_ERROR; } if( bson_check_field_name( b, ( const char * )name, len - 1 ) == BSON_ERROR ) { bson_builder_error( b ); return BSON_ERROR; } bson_append_byte( b, ( char )type ); bson_append( b, name, len ); return BSON_OK; }
Base
1
int64_t OpLevelCostEstimator::CalculateOutputSize(const OpInfo& op_info, bool* found_unknown_shapes) { int64_t total_output_size = 0; // Use float as default for calculations. for (const auto& output : op_info.outputs()) { DataType dt = output.dtype(); const auto& original_output_shape = output.shape(); int64_t output_size = DataTypeSize(BaseType(dt)); int num_dims = std::max(1, original_output_shape.dim_size()); auto output_shape = MaybeGetMinimumShape(original_output_shape, num_dims, found_unknown_shapes); for (const auto& dim : output_shape.dim()) { output_size *= dim.size(); } total_output_size += output_size; VLOG(1) << "Output Size: " << output_size << " Total Output Size:" << total_output_size; } return total_output_size; }
Base
1
std::string DecodeUnsafe(string_view encoded) { std::string raw; if (Decode(encoded, &raw)) { return raw; } return ToString(encoded); }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { static const int kOutputUniqueTensor = 0; static const int kOutputIndexTensor = 1; TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output_unique_tensor = GetOutput(context, node, kOutputUniqueTensor); TfLiteTensor* output_index_tensor = GetOutput(context, node, kOutputIndexTensor); // The op only supports 1D input. TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); TfLiteIntArray* output_index_shape = TfLiteIntArrayCopy(input->dims); // The unique values are determined during evaluation, so we don't know yet // the size of the output tensor. SetTensorToDynamic(output_unique_tensor); return context->ResizeTensor(context, output_index_tensor, output_index_shape); }
Base
1
void RemoteDevicePropertiesWidget::checkSaveable() { RemoteFsDevice::Details det=details(); modified=det!=orig; saveable=!det.isEmpty(); if (saveable && Type_SambaAvahi==type->itemData(type->currentIndex()).toInt()) { saveable=!smbAvahiName->text().trimmed().isEmpty(); } emit updated(); }
Base
1
bool DNP3_Base::AddToBuffer(Endpoint* endp, int target_len, const u_char** data, int* len) { if ( ! target_len ) return true; int to_copy = min(*len, target_len - endp->buffer_len); memcpy(endp->buffer + endp->buffer_len, *data, to_copy); *data += to_copy; *len -= to_copy; endp->buffer_len += to_copy; return endp->buffer_len == target_len; }
Class
2
bool PackLinuxElf32::calls_crt1(Elf32_Rel const *rel, int sz) { if (!dynsym || !dynstr) { return false; } for (unsigned relnum= 0; 0 < sz; (sz -= sizeof(Elf32_Rel)), ++rel, ++relnum) { unsigned const symnum = get_te32(&rel->r_info) >> 8; char const *const symnam = get_dynsym_name(symnum, relnum); if (0==strcmp(symnam, "__libc_start_main") // glibc || 0==strcmp(symnam, "__libc_init") // Android || 0==strcmp(symnam, "__uClibc_main") || 0==strcmp(symnam, "__uClibc_start_main")) return true; } return false; }
Base
1
void Logger::addMessage(const QString &message, const Log::MsgType &type) { QWriteLocker locker(&lock); Log::Msg temp = { msgCounter++, QDateTime::currentMSecsSinceEpoch(), type, message }; m_messages.push_back(temp); if (m_messages.size() >= MAX_LOG_MESSAGES) m_messages.pop_front(); emit newLogMessage(temp); }
Base
1
R_API RBinJavaAttrInfo *r_bin_java_annotation_default_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut64 offset = 0; RBinJavaAttrInfo *attr = NULL; attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; if (attr && sz >= offset) { attr->type = R_BIN_JAVA_ATTR_TYPE_ANNOTATION_DEFAULT_ATTR; attr->info.annotation_default_attr.default_value = r_bin_java_element_value_new (buffer + offset, sz - offset, buf_offset + offset); if (attr->info.annotation_default_attr.default_value) { offset += attr->info.annotation_default_attr.default_value->size; } } r_bin_java_print_annotation_default_attr_summary (attr); return attr; }
Base
1
Function *ESTreeIRGen::genGeneratorFunction( Identifier originalName, Variable *lazyClosureAlias, ESTree::FunctionLikeNode *functionNode) { assert(functionNode && "Function AST cannot be null"); // Build the outer function which creates the generator. // Does not have an associated source range. auto *outerFn = Builder.createGeneratorFunction( originalName, Function::DefinitionKind::ES5Function, ESTree::isStrict(functionNode->strictness), /* insertBefore */ nullptr); auto *innerFn = genES5Function( genAnonymousLabelName(originalName.isValid() ? originalName.str() : ""), lazyClosureAlias, functionNode, true); { FunctionContext outerFnContext{this, outerFn, functionNode->getSemInfo()}; emitFunctionPrologue( functionNode, Builder.createBasicBlock(outerFn), InitES5CaptureState::Yes, DoEmitParameters::No); // Create a generator function, which will store the arguments. auto *gen = Builder.createCreateGeneratorInst(innerFn); if (!hasSimpleParams(functionNode)) { // If there are non-simple params, step the inner function once to // initialize them. Value *next = Builder.createLoadPropertyInst(gen, "next"); Builder.createCallInst(next, gen, {}); } emitFunctionEpilogue(gen); } return outerFn; }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteDepthToSpaceParams*>(node->builtin_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); auto data_type = output->type; TF_LITE_ENSURE(context, data_type == kTfLiteFloat32 || data_type == kTfLiteUInt8 || data_type == kTfLiteInt8 || data_type == kTfLiteInt32 || data_type == kTfLiteInt64); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); const int block_size = params->block_size; const int input_height = input->dims->data[1]; const int input_width = input->dims->data[2]; const int input_channels = input->dims->data[3]; int output_height = input_height * block_size; int output_width = input_width * block_size; int output_channels = input_channels / block_size / block_size; TF_LITE_ENSURE_EQ(context, input_height, output_height / block_size); TF_LITE_ENSURE_EQ(context, input_width, output_width / block_size); TF_LITE_ENSURE_EQ(context, input_channels, output_channels * block_size * block_size); TfLiteIntArray* output_size = TfLiteIntArrayCreate(4); output_size->data[0] = input->dims->data[0]; output_size->data[1] = output_height; output_size->data[2] = output_width; output_size->data[3] = output_channels; return context->ResizeTensor(context, output, output_size); }
Base
1
static int java_switch_op(RAnal *anal, RAnalOp *op, ut64 addr, const ut8 *data, int len) { ut8 op_byte = data[0]; ut64 offset = addr - java_get_method_start (); ut8 pos = (offset+1)%4 ? 1 + 4 - (offset+1)%4 : 1; if (op_byte == 0xaa) { // handle a table switch condition if (pos + 8 > len) { return op->size; } int min_val = (ut32)(UINT (data, pos + 4)), max_val = (ut32)(UINT (data, pos + 8)); ut32 default_loc = (ut32) (UINT (data, pos)), cur_case = 0; op->switch_op = r_anal_switch_op_new (addr, min_val, default_loc); RAnalCaseOp *caseop = NULL; pos += 12; if (max_val > min_val && ((max_val - min_val)<(UT16_MAX/4))) { //caseop = r_anal_switch_op_add_case(op->switch_op, addr+default_loc, -1, addr+offset); for (cur_case = 0; cur_case <= max_val - min_val; pos += 4, cur_case++) { //ut32 value = (ut32)(UINT (data, pos)); if (pos + 4 >= len) { // switch is too big cant read further break; } int offset = (int)(ut32)(R_BIN_JAVA_UINT (data, pos)); caseop = r_anal_switch_op_add_case (op->switch_op, addr + pos, cur_case + min_val, addr + offset); if (caseop) { caseop->bb_ref_to = addr+offset; caseop->bb_ref_from = addr; // TODO figure this one out } } } else { eprintf ("Invalid switch boundaries at 0x%"PFMT64x"\n", addr); } } op->size = pos; return op->size; }
Base
1
void HeaderTable::setCapacity(uint32_t capacity) { auto oldCapacity = capacity_; capacity_ = capacity; if (capacity_ <= oldCapacity) { evict(0); } else { auto oldTail = tail(); auto oldLength = table_.size(); uint32_t newLength = (capacity_ >> 5) + 1; table_.resize(newLength); if (size_ > 0 && oldTail > head_) { // the list wrapped around, need to move oldTail..oldLength to the end of // the now-larger table_ std::copy(table_.begin() + oldTail, table_.begin() + oldLength, table_.begin() + newLength - (oldLength - oldTail)); // Update the names indecies that pointed to the old range for (auto& names_it: names_) { for (auto& idx: names_it.second) { if (idx >= oldTail) { DCHECK_LT(idx + (table_.size() - oldLength), table_.size()); idx += (table_.size() - oldLength); } else { // remaining indecies in the list were smaller than oldTail, so // should be indexed from 0 break; } } } } } }
Variant
0
int FdInStream::readWithTimeoutOrCallback(void* buf, int len, bool wait) { struct timeval before, after; if (timing) gettimeofday(&before, 0); int n; while (true) { do { fd_set fds; struct timeval tv; struct timeval* tvp = &tv; if (!wait) { tv.tv_sec = tv.tv_usec = 0; } else if (timeoutms != -1) { tv.tv_sec = timeoutms / 1000; tv.tv_usec = (timeoutms % 1000) * 1000; } else { tvp = 0; } FD_ZERO(&fds); FD_SET(fd, &fds); n = select(fd+1, &fds, 0, 0, tvp); } while (n < 0 && errno == EINTR); if (n > 0) break; if (n < 0) throw SystemException("select",errno); if (!wait) return 0; if (!blockCallback) throw TimedOut(); blockCallback->blockCallback(); } do { n = ::recv(fd, (char*)buf, len, 0); } while (n < 0 && errno == EINTR); if (n < 0) throw SystemException("read",errno); if (n == 0) throw EndOfStream(); if (timing) { gettimeofday(&after, 0); int newTimeWaited = ((after.tv_sec - before.tv_sec) * 10000 + (after.tv_usec - before.tv_usec) / 100); int newKbits = n * 8 / 1000; // limit rate to between 10kbit/s and 40Mbit/s if (newTimeWaited > newKbits*1000) newTimeWaited = newKbits*1000; if (newTimeWaited < newKbits/4) newTimeWaited = newKbits/4; timeWaitedIn100us += newTimeWaited; timedKbits += newKbits; } return n; }
Base
1
TEST_F(ListenerManagerImplQuicOnlyTest, QuicListenerFactoryWithWrongTransportSocket) { const std::string yaml = TestEnvironment::substitute(R"EOF( address: socket_address: address: 127.0.0.1 protocol: UDP port_value: 1234 filter_chains: - filter_chain_match: transport_protocol: "quic" filters: [] transport_socket: name: envoy.transport_sockets.quic typed_config: "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext common_tls_context: tls_certificates: - certificate_chain: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem" private_key: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem" validation_context: trusted_ca: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" match_subject_alt_names: - exact: localhost - exact: 127.0.0.1 udp_listener_config: quic_options: {} )EOF", Network::Address::IpVersion::v4); envoy::config::listener::v3::Listener listener_proto = parseListenerFromV3Yaml(yaml); #if defined(ENVOY_ENABLE_QUIC) EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(listener_proto, "", true), EnvoyException, "wrong transport socket config specified for quic transport socket"); #else EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(listener_proto, "", true), EnvoyException, "QUIC is configured but not enabled in the build."); #endif }
Base
1
R_API RBinJavaAttrInfo *r_bin_java_read_next_attr_from_buffer(ut8 *buffer, st64 sz, st64 buf_offset) { RBinJavaAttrInfo *attr = NULL; char *name = NULL; ut64 offset = 0; ut16 name_idx; st64 nsz; RBinJavaAttrMetas *type_info = NULL; if (!buffer || ((int) sz) < 4 || buf_offset < 0) { eprintf ("r_bin_Java_read_next_attr_from_buffer: invalid buffer size %d\n", (int) sz); return NULL; } name_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; nsz = R_BIN_JAVA_UINT (buffer, offset); offset += 4; name = r_bin_java_get_utf8_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, name_idx); if (!name) { name = strdup ("unknown"); } IFDBG eprintf("r_bin_java_read_next_attr: name_idx = %d is %s\n", name_idx, name); type_info = r_bin_java_get_attr_type_by_name (name); if (type_info) { IFDBG eprintf("Typeinfo: %s, was %s\n", type_info->name, name); // printf ("SZ %d %d %d\n", nsz, sz, buf_offset); if (nsz > sz) { free (name); return NULL; } if ((attr = type_info->allocs->new_obj (buffer, nsz, buf_offset))) { attr->metas->ord = (R_BIN_JAVA_GLOBAL_BIN->attr_idx++); } } else { eprintf ("r_bin_java_read_next_attr_from_buffer: Cannot find type_info for %s\n", name); } free (name); return attr; }
Base
1
int FileInStream::overrun(int itemSize, int nItems, bool wait) { if (itemSize > (int)sizeof(b)) throw Exception("FileInStream overrun: max itemSize exceeded"); if (end - ptr != 0) memmove(b, ptr, end - ptr); end -= ptr - b; ptr = b; while (end < b + itemSize) { size_t n = fread((U8 *)end, b + sizeof(b) - end, 1, file); if (n == 0) { if (ferror(file)) throw SystemException("fread", errno); if (feof(file)) throw EndOfStream(); return 0; } end += b + sizeof(b) - end; } if (itemSize * nItems > end - ptr) nItems = (end - ptr) / itemSize; return nItems; }
Base
1
static int lookup1_values(int entries, int dim) { int r = (int) floor(exp((float) log((float) entries) / dim)); if ((int) floor(pow((float) r+1, dim)) <= entries) // (int) cast for MinGW warning; ++r; // floor() to avoid _ftol() when non-CRT assert(pow((float) r+1, dim) > entries); assert((int) floor(pow((float) r, dim)) <= entries); // (int),floor() as above return r; }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); // Reinterprete the opaque data provided by user. OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); const TfLiteType type = input1->type; switch (type) { case kTfLiteFloat32: case kTfLiteInt32: break; default: context->ReportError(context, "Type '%s' is not supported by floor_div.", TfLiteTypeGetName(type)); return kTfLiteError; } output->type = type; data->requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (data->requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } return context->ResizeTensor(context, output, output_size); }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* diag = GetInput(context, node, kDiagonalTensor); FillDiagHelper(input, diag, output); return kTfLiteOk; }
Base
1
void CalculateOutputIndexRowSplit( const RowPartitionTensor& row_split, const vector<INDEX_TYPE>& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector<INDEX_TYPE>* result) { INDEX_TYPE row_split_size = row_split.size(); if (row_split_size > 0) { result->reserve(row_split(row_split_size - 1)); } for (INDEX_TYPE i = 0; i < row_split_size - 1; ++i) { INDEX_TYPE row_length = row_split(i + 1) - row_split(i); INDEX_TYPE real_length = std::min(output_size, row_length); INDEX_TYPE parent_output_index_current = parent_output_index[i]; if (parent_output_index_current == -1) { real_length = 0; } for (INDEX_TYPE j = 0; j < real_length; ++j) { result->push_back(parent_output_index_current); parent_output_index_current += output_index_multiplier; } for (INDEX_TYPE j = 0; j < row_length - real_length; ++j) { result->push_back(-1); } } if (row_split_size > 0) { DCHECK_EQ(result->size(), row_split(row_split_size - 1)); } }
Base
1
jas_matrix_t *jas_matrix_create(int numrows, int numcols) { jas_matrix_t *matrix; int i; size_t size; matrix = 0; if (numrows < 0 || numcols < 0) { goto error; } if (!(matrix = jas_malloc(sizeof(jas_matrix_t)))) { goto error; } matrix->flags_ = 0; matrix->numrows_ = numrows; matrix->numcols_ = numcols; matrix->rows_ = 0; matrix->maxrows_ = numrows; matrix->data_ = 0; matrix->datasize_ = 0; // matrix->datasize_ = numrows * numcols; if (!jas_safe_size_mul(numrows, numcols, &size)) { goto error; } matrix->datasize_ = size; if (matrix->maxrows_ > 0) { if (!(matrix->rows_ = jas_alloc2(matrix->maxrows_, sizeof(jas_seqent_t *)))) { goto error; } } if (matrix->datasize_ > 0) { if (!(matrix->data_ = jas_alloc2(matrix->datasize_, sizeof(jas_seqent_t)))) { goto error; } } for (i = 0; i < numrows; ++i) { matrix->rows_[i] = &matrix->data_[i * matrix->numcols_]; } for (i = 0; i < matrix->datasize_; ++i) { matrix->data_[i] = 0; } matrix->xstart_ = 0; matrix->ystart_ = 0; matrix->xend_ = matrix->numcols_; matrix->yend_ = matrix->numrows_; return matrix; error: if (matrix) { jas_matrix_destroy(matrix); } return 0; }
Base
1
Status CreateTempFile(Env* env, float value, uint64 size, string* filename) { const string dir = testing::TmpDir(); *filename = io::JoinPath(dir, strings::StrCat("file_", value)); std::unique_ptr<WritableFile> file; TF_RETURN_IF_ERROR(env->NewWritableFile(*filename, &file)); for (uint64 i = 0; i < size; ++i) { StringPiece sp(static_cast<char*>(static_cast<void*>(&value)), sizeof(value)); TF_RETURN_IF_ERROR(file->Append(sp)); } TF_RETURN_IF_ERROR(file->Close()); return Status::OK(); }
Base
1
Status OpLevelCostEstimator::PredictFusedBatchNormGrad( const OpContext& op_context, NodeCosts* node_costs) const { bool found_unknown_shapes = false; const auto& op_info = op_context.op_info; // y_backprop: op_info.inputs(0) // x: op_info.inputs(1) // scale: op_info.inputs(2) // mean: op_info.inputs(3) // variance or inverse of variance: op_info.inputs(4) ConvolutionDimensions dims = OpDimensionsFromInputs( op_info.inputs(1).shape(), op_info, &found_unknown_shapes); int64_t ops = 0; const auto rsqrt_cost = Eigen::internal::functor_traits< Eigen::internal::scalar_rsqrt_op<float>>::Cost; ops = dims.iz * (dims.batch * dims.ix * dims.iy * 11 + 5 + rsqrt_cost); node_costs->num_compute_ops = ops; const int64_t size_nhwc = CalculateTensorSize(op_info.inputs(1), &found_unknown_shapes); const int64_t size_c = CalculateTensorSize(op_info.inputs(2), &found_unknown_shapes); // TODO(dyoon): fix missing memory cost for variance input (size_c) and // yet another read of y_backprop (size_nhwc) internally. node_costs->num_input_bytes_accessed = {size_nhwc, size_nhwc, size_c, size_c}; node_costs->num_output_bytes_accessed = {size_nhwc, size_c, size_c}; // FusedBatchNormGrad has to read y_backprop internally. node_costs->internal_read_bytes = size_nhwc; node_costs->max_memory = node_costs->num_total_output_bytes(); if (found_unknown_shapes) { node_costs->inaccurate = true; node_costs->num_nodes_with_unknown_shapes = 1; } return Status::OK(); }
Base
1
QString Helper::temporaryMountDevice(const QString &device, const QString &name, bool readonly) { QString mount_point = mountPoint(device); if (!mount_point.isEmpty()) return mount_point; mount_point = "%1/.%2/mount/%3"; const QStringList &tmp_paths = QStandardPaths::standardLocations(QStandardPaths::TempLocation); mount_point = mount_point.arg(tmp_paths.isEmpty() ? "/tmp" : tmp_paths.first()).arg(qApp->applicationName()).arg(name); if (!QDir::current().mkpath(mount_point)) { dCError("mkpath \"%s\" failed", qPrintable(mount_point)); return QString(); } if (!mountDevice(device, mount_point, readonly)) { dCError("Mount the device \"%s\" to \"%s\" failed", qPrintable(device), qPrintable(mount_point)); return QString(); } return mount_point; }
Base
1
Json::Value SGXWalletServer::calculateAllBLSPublicKeysImpl(const Json::Value& publicShares, int t, int n) { spdlog::info("Entering {}", __FUNCTION__); INIT_RESULT(result) try { if (!check_n_t(t, n)) { throw SGXException(INVALID_DKG_PARAMS, "Invalid DKG parameters: n or t "); } if (!publicShares.isArray()) { throw SGXException(INVALID_DKG_PARAMS, "Invalid public shares format"); } if (publicShares.size() != (uint64_t) n) { throw SGXException(INVALID_DKG_PARAMS, "Invalid length of public shares"); } for (int i = 0; i < n; ++i) { if (!publicShares[i].isString()) { throw SGXException(INVALID_DKG_PARAMS, "Invalid public shares parts format"); } if (publicShares[i].asString().length() != (uint64_t) 256 * t) { throw SGXException(INVALID_DKG_PARAMS, "Invalid length of public shares parts"); } } vector<string> public_shares(n); for (int i = 0; i < n; ++i) { public_shares[i] = publicShares[i].asString(); } vector<string> public_keys = calculateAllBlsPublicKeys(public_shares); if (public_keys.size() != n) { throw SGXException(UNKNOWN_ERROR, ""); } for (int i = 0; i < n; ++i) { result["publicKeys"][i] = public_keys[i]; } } HANDLE_SGX_EXCEPTION(result) RETURN_SUCCESS(result); }
Base
1
uint64_t HeaderMapImpl::byteSize() const { uint64_t byte_size = 0; for (const HeaderEntryImpl& header : headers_) { byte_size += header.key().size(); byte_size += header.value().size(); } return byte_size; }
Class
2
void CalculateOutputIndexValueRowID( const RowPartitionTensor& value_rowids, const vector<INDEX_TYPE>& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector<INDEX_TYPE>* result) { const INDEX_TYPE index_size = value_rowids.size(); result->reserve(index_size); if (index_size == 0) { return; } INDEX_TYPE current_output_column = 0; INDEX_TYPE current_value_rowid = value_rowids(0); DCHECK_LT(current_value_rowid, parent_output_index.size()); INDEX_TYPE current_output_index = parent_output_index[current_value_rowid]; result->push_back(current_output_index); for (INDEX_TYPE i = 1; i < index_size; ++i) { INDEX_TYPE next_value_rowid = value_rowids(i); if (next_value_rowid == current_value_rowid) { if (current_output_index >= 0) { ++current_output_column; if (current_output_column < output_size) { current_output_index += output_index_multiplier; } else { current_output_index = -1; } } } else { current_output_column = 0; current_value_rowid = next_value_rowid; DCHECK_LT(next_value_rowid, parent_output_index.size()); current_output_index = parent_output_index[next_value_rowid]; } result->push_back(current_output_index); } DCHECK_EQ(result->size(), value_rowids.size()); }
Base
1
int length() const { return m_str ? m_str->size() : 0; }
Base
1
TEST_F(RouterTest, MissingRequiredHeaders) { NiceMock<Http::MockRequestEncoder> encoder; Http::ResponseDecoder* response_decoder = nullptr; expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10); expectResponseTimerCreate(); Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); headers.removeMethod(); EXPECT_CALL(encoder, encodeHeaders(_, _)) .WillOnce(Invoke([](const Http::RequestHeaderMap& headers, bool) -> Http::Status { return Http::HeaderUtility::checkRequiredRequestHeaders(headers); })); EXPECT_CALL( callbacks_, sendLocalReply(Http::Code::ServiceUnavailable, testing::Eq("missing required header: :method"), _, _, "filter_removed_required_request_headers{missing_required_header:_:method}")) .WillOnce(testing::InvokeWithoutArgs([] {})); router_.decodeHeaders(headers, true); router_.onDestroy(); }
Class
2
static __forceinline void draw_line(float *output, int x0, int y0, int x1, int y1, int n) { int dy = y1 - y0; int adx = x1 - x0; int ady = abs(dy); int base; int x=x0,y=y0; int err = 0; int sy; #ifdef STB_VORBIS_DIVIDE_TABLE if (adx < DIVTAB_DENOM && ady < DIVTAB_NUMER) { if (dy < 0) { base = -integer_divide_table[ady][adx]; sy = base-1; } else { base = integer_divide_table[ady][adx]; sy = base+1; } } else { base = dy / adx; if (dy < 0) sy = base - 1; else sy = base+1; } #else base = dy / adx; if (dy < 0) sy = base - 1; else sy = base+1; #endif ady -= abs(base) * adx; if (x1 > n) x1 = n; if (x < x1) { LINE_OP(output[x], inverse_db_table[y]); for (++x; x < x1; ++x) { err += ady; if (err >= adx) { err -= adx; y += sy; } else y += base; LINE_OP(output[x], inverse_db_table[y]); } } }
Base
1
void CLASS panasonic_load_raw() { int row, col, i, j, sh = 0, pred[2], nonz[2]; pana_bits(0); for (row = 0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col = 0; col < raw_width; col++) { if ((i = col % 14) == 0) pred[0] = pred[1] = nonz[0] = nonz[1] = 0; if (i % 3 == 2) sh = 4 >> (3 - pana_bits(2)); if (nonz[i & 1]) { if ((j = pana_bits(8))) { if ((pred[i & 1] -= 0x80 << sh) < 0 || sh == 4) pred[i & 1] &= ~((~0u) << sh); pred[i & 1] += j << sh; } } else if ((nonz[i & 1] = pana_bits(8)) || i > 11) pred[i & 1] = nonz[i & 1] << 4 | pana_bits(4); if ((RAW(row, col) = pred[col & 1]) > 4098 && col < width) derror(); } } }
Class
2
inline bool ShapeIsVector(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* shape = GetInput(context, node, kShapeTensor); return (shape->dims->size == 1 && shape->type == kTfLiteInt32); }
Base
1
static String HHVM_FUNCTION(bcadd, const String& left, const String& right, int64_t scale /* = -1 */) { if (scale < 0) scale = BCG(bc_precision); bc_num first, second, result; bc_init_num(&first); bc_init_num(&second); bc_init_num(&result); php_str2num(&first, (char*)left.data()); php_str2num(&second, (char*)right.data()); bc_add(first, second, &result, scale); if (result->n_scale > scale) { result->n_scale = scale; } String ret(bc_num2str(result), AttachString); bc_free_num(&first); bc_free_num(&second); bc_free_num(&result); return ret; }
Base
1
unsigned int GetU32BE (int nPos, bool *pbSuccess) { //*pbSuccess = true; if ( nPos < 0 || nPos + 3 >= m_nLen ) { *pbSuccess = false; return 0; } unsigned int nRes = m_sFile[nPos]; nRes = (nRes << 8) + m_sFile[nPos + 1]; nRes = (nRes << 8) + m_sFile[nPos + 2]; nRes = (nRes << 8) + m_sFile[nPos + 3]; return nRes; }
Base
1
static int intel_pmu_handle_irq(struct pt_regs *regs) { struct perf_sample_data data; struct cpu_hw_events *cpuc; int bit, loops; u64 status; int handled; perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); /* * Some chipsets need to unmask the LVTPC in a particular spot * inside the nmi handler. As a result, the unmasking was pushed * into all the nmi handlers. * * This handler doesn't seem to have any issues with the unmasking * so it was left at the top. */ apic_write(APIC_LVTPC, APIC_DM_NMI); intel_pmu_disable_all(); handled = intel_pmu_drain_bts_buffer(); status = intel_pmu_get_status(); if (!status) { intel_pmu_enable_all(0); return handled; } loops = 0; again: intel_pmu_ack_status(status); if (++loops > 100) { WARN_ONCE(1, "perfevents: irq loop stuck!\n"); perf_event_print_debug(); intel_pmu_reset(); goto done; } inc_irq_stat(apic_perf_irqs); intel_pmu_lbr_read(); /* * PEBS overflow sets bit 62 in the global status register */ if (__test_and_clear_bit(62, (unsigned long *)&status)) { handled++; x86_pmu.drain_pebs(regs); } for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { struct perf_event *event = cpuc->events[bit]; handled++; if (!test_bit(bit, cpuc->active_mask)) continue; if (!intel_pmu_save_and_restart(event)) continue; data.period = event->hw.last_period; if (perf_event_overflow(event, 1, &data, regs)) x86_pmu_stop(event, 0); } /* * Repeat if there is more work to be done: */ status = intel_pmu_get_status(); if (status) goto again; done: intel_pmu_enable_all(0); return handled; }
Class
2
it "stores the list of seeds" do cluster.seeds.should eq ["127.0.0.1:27017", "127.0.0.1:27018"] end
Class
2
it "changes the $orderby" do query.sort(a: 1) query.sort(a: 2) query.operation.selector.should eq( "$query" => selector, "$orderby" => { a: 2 } ) end
Class
2
def html_escape(s) s = s.to_s if s.html_safe? s else s.gsub(/[&"><]/) { |special| HTML_ESCAPE[special] }.html_safe end end
Base
1
def config_backup(params, request, session) if params[:name] code, response = send_request_with_token( session, params[:name], 'config_backup', true ) else if not allowed_for_local_cluster(session, Permissions::FULL) return 403, 'Permission denied' end $logger.info "Backup node configuration" stdout, stderr, retval = run_cmd(session, PCS, "config", "backup") if retval == 0 $logger.info "Backup successful" return [200, stdout] end $logger.info "Error during backup: #{stderr.join(' ').strip()}" return [400, "Unable to backup node: #{stderr.join(' ')}"] end end
Compound
4
def self.execute_action_move(mail_filter, email, val) mail_folder_id = val mail_folder = MailFolder.find_by_id(mail_folder_id) if !mail_folder.nil? and (mail_folder.user_id == email.user_id) email.update_attribute(:mail_folder_id, mail_folder_id) end return true end
Base
1
it 'is possible to specify a custom formatter' do get '/' expect(last_response.body).to eq('{:custom_formatter=>"rain!"}') end
Base
1