code
stringlengths 12
2.05k
| label_name
stringclasses 5
values | label
int64 0
4
|
---|---|---|
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
output->type = input->type;
TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims);
return context->ResizeTensor(context, output, output_size);
} | Base | 1 |
bool CxImage::Transfer(CxImage &from, bool bTransferFrames /*=true*/)
{
if (!Destroy())
return false;
memcpy(&head,&from.head,sizeof(BITMAPINFOHEADER));
memcpy(&info,&from.info,sizeof(CXIMAGEINFO));
pDib = from.pDib;
pSelection = from.pSelection;
pAlpha = from.pAlpha;
ppLayers = from.ppLayers;
memset(&from.head,0,sizeof(BITMAPINFOHEADER));
memset(&from.info,0,sizeof(CXIMAGEINFO));
from.pDib = from.pSelection = from.pAlpha = NULL;
from.ppLayers = NULL;
if (bTransferFrames){
DestroyFrames();
ppFrames = from.ppFrames;
from.ppFrames = NULL;
}
return true;
}
| Base | 1 |
inline typename V::VariantType FBUnserializer<V>::unserialize(
folly::StringPiece serialized) {
FBUnserializer<V> unserializer(serialized);
return unserializer.unserializeThing();
} | Class | 2 |
int64_t MemFile::readImpl(char *buffer, int64_t length) {
assertx(m_len != -1);
assertx(length > 0);
int64_t remaining = m_len - m_cursor;
if (remaining < length) length = remaining;
if (length > 0) {
memcpy(buffer, (const void *)(m_data + m_cursor), length);
}
m_cursor += length;
return length;
} | Base | 1 |
TEST_P(SslSPIFFECertValidatorIntegrationTest, ServerRsaSPIFFEValidatorSANNotMatch) {
auto typed_conf = new envoy::config::core::v3::TypedExtensionConfig();
TestUtility::loadFromYaml(TestEnvironment::substitute(R"EOF(
name: envoy.tls.cert_validator.spiffe
typed_config:
"@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig
trust_domains:
- name: lyft.com
trust_bundle:
filename: "{{ test_rundir }}/test/config/integration/certs/cacert.pem"
)EOF"),
*typed_conf);
custom_validator_config_ = typed_conf;
envoy::type::matcher::v3::StringMatcher matcher;
matcher.set_prefix("spiffe://example.com/");
// The cert has "DNS.1 = lyft.com" but SPIFFE validator must ignore SAN types other than URI.
matcher.set_prefix("www.lyft.com");
san_matchers_ = {matcher};
initialize();
auto conn = makeSslClientConnection({});
if (tls_version_ == envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_2) {
auto codec = makeRawHttpConnection(std::move(conn), absl::nullopt);
EXPECT_FALSE(codec->connected());
} else {
auto codec = makeHttpConnection(std::move(conn));
ASSERT_TRUE(codec->waitForDisconnect());
codec->close();
}
checkVerifyErrorCouter(1);
} | Base | 1 |
static int putint(jas_stream_t *out, int sgnd, int prec, long val)
{
int n;
int c;
bool s;
ulong tmp;
assert((!sgnd && prec >= 1) || (sgnd && prec >= 2));
if (sgnd) {
val = encode_twos_comp(val, prec);
}
assert(val >= 0);
val &= (1 << prec) - 1;
n = (prec + 7) / 8;
while (--n >= 0) {
c = (val >> (n * 8)) & 0xff;
if (jas_stream_putc(out, c) != c)
return -1;
}
return 0;
} | Base | 1 |
void TLSOutStream::flush()
{
U8* sentUpTo = start;
while (sentUpTo < ptr) {
int n = writeTLS(sentUpTo, ptr - sentUpTo);
sentUpTo += n;
offset += n;
}
ptr = start;
out->flush();
} | Base | 1 |
TfLiteStatus MockCustom::Invoke(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = tflite::GetInput(context, node, 0);
const int32_t* input_data = input->data.i32;
const TfLiteTensor* weight = tflite::GetInput(context, node, 1);
const uint8_t* weight_data = weight->data.uint8;
TfLiteTensor* output = GetOutput(context, node, 0);
int32_t* output_data = output->data.i32;
output_data[0] =
0; // Catch output tensor sharing memory with an input tensor
output_data[0] = input_data[0] + weight_data[0];
return kTfLiteOk;
} | Base | 1 |
void ZlibInStream::setUnderlying(InStream* is, int bytesIn_)
{
underlying = is;
bytesIn = bytesIn_;
ptr = end = start;
} | Base | 1 |
DSA_Signature_Operation(const DSA_PrivateKey& dsa, const std::string& emsa) :
PK_Ops::Signature_with_EMSA(emsa),
m_group(dsa.get_group()),
m_x(dsa.get_x()),
m_mod_q(dsa.group_q())
{
#if defined(BOTAN_HAS_RFC6979_GENERATOR)
m_rfc6979_hash = hash_for_emsa(emsa);
#endif
} | Class | 2 |
static port::StatusOr<CudnnRnnSequenceTensorDescriptor> Create(
GpuExecutor* parent, int max_seq_length, int batch_size, int data_size,
cudnnDataType_t data_type) {
CHECK_GT(max_seq_length, 0);
int dims[] = {batch_size, data_size, 1};
int strides[] = {dims[1] * dims[2], dims[2], 1};
TensorDescriptor tensor_desc = CreateTensorDescriptor();
RETURN_IF_CUDNN_ERROR(cudnnSetTensorNdDescriptor(
/*tensorDesc=*/tensor_desc.get(), /*dataType=*/data_type,
/*nbDims=*/sizeof(dims) / sizeof(dims[0]), /*dimA=*/dims,
/*strideA=*/strides));
return CudnnRnnSequenceTensorDescriptor(parent, max_seq_length, batch_size,
data_size, data_type,
nullptr,
std::move(tensor_desc));
} | Class | 2 |
TEST_CASE_METHOD(TestFixture, "ECDSA AES get public key", "[ecdsa-aes-get-pub-key]") {
int errStatus = 0;
vector<char> errMsg(BUF_LEN, 0);
vector <uint8_t> encPrivKey(BUF_LEN, 0);
vector<char> pubKeyX(BUF_LEN, 0);
vector<char> pubKeyY(BUF_LEN, 0);
uint32_t encLen = 0;
PRINT_SRC_LINE
auto status = trustedGenerateEcdsaKeyAES(eid, &errStatus, errMsg.data(), encPrivKey.data(), &encLen, pubKeyX.data(),
pubKeyY.data());
REQUIRE(status == SGX_SUCCESS);
REQUIRE(errStatus == SGX_SUCCESS);
vector<char> receivedPubKeyX(BUF_LEN, 0);
vector<char> receivedPubKeyY(BUF_LEN, 0);
PRINT_SRC_LINE
status = trustedGetPublicEcdsaKeyAES(eid, &errStatus, errMsg.data(), encPrivKey.data(), encLen,
receivedPubKeyX.data(),
receivedPubKeyY.data());
REQUIRE(status == SGX_SUCCESS);
REQUIRE(errStatus == SGX_SUCCESS);
} | Base | 1 |
TfLiteRegistration AddOpRegistration() {
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
reg.custom_name = "my_add";
reg.builtin_code = tflite::BuiltinOperator_CUSTOM;
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
// Set output size to input size
const TfLiteTensor* input1 = GetInput(context, node, 0);
const TfLiteTensor* input2 = GetInput(context, node, 1);
TfLiteTensor* output = GetOutput(context, node, 0);
TF_LITE_ENSURE_EQ(context, input1->dims->size, input2->dims->size);
for (int i = 0; i < input1->dims->size; ++i) {
TF_LITE_ENSURE_EQ(context, input1->dims->data[i], input2->dims->data[i]);
}
TF_LITE_ENSURE_STATUS(context->ResizeTensor(
context, output, TfLiteIntArrayCopy(input1->dims)));
return kTfLiteOk;
};
reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
// Copy input data to output data.
const TfLiteTensor* a0 = GetInput(context, node, 0);
TF_LITE_ENSURE(context, a0);
TF_LITE_ENSURE(context, a0->data.f);
const TfLiteTensor* a1 = GetInput(context, node, 1);
TF_LITE_ENSURE(context, a1);
TF_LITE_ENSURE(context, a1->data.f);
TfLiteTensor* out = GetOutput(context, node, 0);
TF_LITE_ENSURE(context, out);
TF_LITE_ENSURE(context, out->data.f);
int num = a0->dims->data[0];
for (int i = 0; i < num; i++) {
out->data.f[i] = a0->data.f[i] + a1->data.f[i];
}
return kTfLiteOk;
};
return reg;
} | Base | 1 |
TEST_P(Http2CodecImplTest, TestLargeRequestHeadersAtLimitAccepted) {
uint32_t codec_limit_kb = 64;
max_request_headers_kb_ = codec_limit_kb;
initialize();
TestHeaderMapImpl request_headers;
HttpTestUtility::addDefaultHeaders(request_headers);
std::string key = "big";
uint32_t head_room = 77;
uint32_t long_string_length =
codec_limit_kb * 1024 - request_headers.byteSize() - key.length() - head_room;
std::string long_string = std::string(long_string_length, 'q');
request_headers.addCopy(key, long_string);
// The amount of data sent to the codec is not equivalent to the size of the
// request headers that Envoy computes, as the codec limits based on the
// entire http2 frame. The exact head room needed (76) was found through iteration.
ASSERT_EQ(request_headers.byteSize() + head_room, codec_limit_kb * 1024);
EXPECT_CALL(request_decoder_, decodeHeaders_(_, _));
request_encoder_->encodeHeaders(request_headers, true);
} | Class | 2 |
void Compute(OpKernelContext* ctx) override {
const Tensor& handle = ctx->input(0);
const string& name = handle.scalar<tstring>()();
Tensor val;
OP_REQUIRES_OK(ctx, ctx->session_state()->GetTensor(name, &val));
ctx->set_output(0, val);
} | Base | 1 |
int64_t MemFile::readImpl(char *buffer, int64_t length) {
assertx(m_len != -1);
assertx(length > 0);
int64_t remaining = m_len - m_cursor;
if (remaining < length) length = remaining;
if (length > 0) {
memcpy(buffer, (const void *)(m_data + m_cursor), length);
}
m_cursor += length;
return length;
} | Base | 1 |
void DCR_CLASS dcr_cam_xyz_coeff (DCRAW* p, double cam_xyz[4][3])
{
double cam_rgb[4][3], inverse[4][3], num;
int i, j, k;
for (i=0; i < p->colors; i++) /* Multiply out XYZ colorspace */
for (j=0; j < 3; j++)
for (cam_rgb[i][j] = k=0; k < 3; k++)
cam_rgb[i][j] += cam_xyz[i][k] * xyz_rgb[k][j];
for (i=0; i < p->colors; i++) { /* Normalize cam_rgb so that */
for (num=j=0; j < 3; j++) /* cam_rgb * (1,1,1) is (1,1,1,1) */
num += cam_rgb[i][j];
for (j=0; j < 3; j++)
cam_rgb[i][j] /= num;
p->pre_mul[i] = 1 / (float)num;
}
dcr_pseudoinverse (cam_rgb, inverse, p->colors);
for (p->raw_color = i=0; i < 3; i++)
for (j=0; j < p->colors; j++)
p->rgb_cam[i][j] = (float)inverse[j][i];
}
| Base | 1 |
Jsi_RC jsi_evalcode(jsi_Pstate *ps, Jsi_Func *func, Jsi_OpCodes *opcodes,
jsi_ScopeChain *scope, Jsi_Value *fargs,
Jsi_Value *_this,
Jsi_Value **vret)
{
Jsi_Interp *interp = ps->interp;
if (interp->exited)
return JSI_ERROR;
Jsi_RC rc;
jsi_Frame frame = *interp->framePtr;
frame.parent = interp->framePtr;
interp->framePtr = &frame;
frame.parent->child = interp->framePtr = &frame;
frame.ps = ps;
frame.ingsc = scope;
frame.incsc = fargs;
frame.inthis = _this;
frame.opcodes = opcodes;
frame.fileName = ((func && func->script)?func->script:interp->curFile);
frame.funcName = interp->curFunction;
frame.dirName = interp->curDir;
if (frame.fileName && frame.fileName == frame.parent->fileName)
frame.logflag = frame.parent->logflag;
else
frame.logflag = 0;
frame.level = frame.parent->level+1;
frame.evalFuncPtr = func;
frame.arguments = NULL;
// if (func && func->strict)
// frame.strict = 1;
if (interp->curIp)
frame.parent->line = interp->curIp->Line;
frame.ip = interp->curIp;
interp->refCount++;
interp->level++;
Jsi_IncrRefCount(interp, fargs);
rc = jsi_evalcode_sub(ps, opcodes, scope, fargs, _this, *vret);
Jsi_DecrRefCount(interp, fargs);
if (interp->didReturn == 0 && !interp->exited) {
if ((interp->evalFlags&JSI_EVAL_RETURN)==0)
Jsi_ValueMakeUndef(interp, vret);
/*if (interp->framePtr->Sp != oldSp) //TODO: at some point after memory refs???
Jsi_LogBug("Stack not balance after execute script");*/
}
if (frame.arguments)
Jsi_DecrRefCount(interp, frame.arguments);
interp->didReturn = 0;
interp->refCount--;
interp->level--;
interp->framePtr = frame.parent;
interp->framePtr->child = NULL;
interp->curIp = frame.ip;
if (interp->exited)
rc = JSI_ERROR;
return rc;
} | Base | 1 |
void ComputeAsync(OpKernelContext* c, DoneCallback done) override {
auto col_params = new CollectiveParams();
auto done_with_cleanup = [col_params, done = std::move(done)]() {
done();
col_params->Unref();
};
core::RefCountPtr<CollectiveGroupResource> resource;
OP_REQUIRES_OK_ASYNC(c, LookupResource(c, HandleFromInput(c, 1), &resource),
done);
Tensor group_assignment = c->input(2);
OP_REQUIRES_OK_ASYNC(
c,
FillCollectiveParams(col_params, group_assignment, REDUCTION_COLLECTIVE,
resource.get()),
done);
col_params->instance.shape = c->input(0).shape();
col_params->merge_op = merge_op_.get();
col_params->final_op = final_op_.get();
VLOG(1) << "CollectiveReduceV3 group_size " << col_params->group.group_size
<< " group_key " << col_params->group.group_key << " instance_key "
<< col_params->instance.instance_key;
// Allocate the output tensor, trying to reuse the input.
Tensor* output = nullptr;
OP_REQUIRES_OK_ASYNC(c,
c->forward_input_or_allocate_output(
{0}, 0, col_params->instance.shape, &output),
done_with_cleanup);
Run(c, col_params, std::move(done_with_cleanup));
} | Variant | 0 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* lookup = GetInput(context, node, 0);
const TfLiteTensor* value = GetInput(context, node, 1);
TfLiteTensor* output = GetOutput(context, node, 0);
switch (value->type) {
case kTfLiteFloat32:
return EvalSimple(context, node, lookup, value, output);
case kTfLiteUInt8:
case kTfLiteInt8:
if (output->type == kTfLiteFloat32) {
return EvalHybrid(context, node, lookup, value, output);
} else {
return EvalSimple(context, node, lookup, value, output);
}
default:
context->ReportError(context, "Type not currently supported.");
return kTfLiteError;
}
} | Base | 1 |
MONGO_EXPORT int bson_append_element( bson *b, const char *name_or_null, const bson_iterator *elem ) {
bson_iterator next = *elem;
int size;
bson_iterator_next( &next );
size = next.cur - elem->cur;
if ( name_or_null == NULL ) {
if( bson_ensure_space( b, size ) == BSON_ERROR )
return BSON_ERROR;
bson_append( b, elem->cur, size );
}
else {
int data_size = size - 2 - strlen( bson_iterator_key( elem ) );
bson_append_estart( b, elem->cur[0], name_or_null, data_size );
bson_append( b, bson_iterator_value( elem ), data_size );
}
return BSON_OK;
} | Base | 1 |
TEST_F(HeaderTableTests, set_capacity) {
HPACKHeader accept("accept-encoding", "gzip");
uint32_t max = 10;
uint32_t capacity = accept.bytes() * max;
HeaderTable table(capacity);
// fill the table
for (size_t i = 0; i < max; i++) {
EXPECT_EQ(table.add(accept), true);
}
// change capacity
table.setCapacity(capacity / 2);
EXPECT_EQ(table.size(), max / 2);
EXPECT_EQ(table.bytes(), capacity / 2);
} | Variant | 0 |
MpdCantataMounterInterface * RemoteFsDevice::mounter()
{
if (!mounterIface) {
if (!QDBusConnection::systemBus().interface()->isServiceRegistered(MpdCantataMounterInterface::staticInterfaceName())) {
QDBusConnection::systemBus().interface()->startService(MpdCantataMounterInterface::staticInterfaceName());
}
mounterIface=new MpdCantataMounterInterface(MpdCantataMounterInterface::staticInterfaceName(),
"/Mounter", QDBusConnection::systemBus(), this);
connect(mounterIface, SIGNAL(mountStatus(const QString &, int, int)), SLOT(mountStatus(const QString &, int, int)));
connect(mounterIface, SIGNAL(umountStatus(const QString &, int, int)), SLOT(umountStatus(const QString &, int, int)));
}
return mounterIface;
} | Class | 2 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* start = GetInput(context, node, kStartTensor);
const TfLiteTensor* limit = GetInput(context, node, kLimitTensor);
const TfLiteTensor* delta = GetInput(context, node, kDeltaTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context,
ResizeOutput(context, start, limit, delta, output));
}
switch (output->type) {
case kTfLiteInt32: {
EvalImpl<int32_t>(start, delta, output);
break;
}
case kTfLiteFloat32: {
EvalImpl<float>(start, delta, output);
break;
}
default: {
context->ReportError(context, "Unsupported data type: %d", output->type);
return kTfLiteError;
}
}
return kTfLiteOk;
} | Base | 1 |
static bool TryParse(const char* inp, int length,
TypedValue* buf, Variant& out,
JSONContainerType container_type, bool is_tsimplejson) {
SimpleParser parser(inp, length, buf, container_type, is_tsimplejson);
bool ok = parser.parseValue();
parser.skipSpace();
if (!ok || parser.p != inp + length) {
// Unsupported, malformed, or trailing garbage. Release entire stack.
tvDecRefRange(buf, parser.top);
return false;
}
out = Variant::attach(*--parser.top);
return true;
} | Base | 1 |
void FdInStream::readBytes(void* data, int length)
{
if (length < MIN_BULK_SIZE) {
InStream::readBytes(data, length);
return;
}
U8* dataPtr = (U8*)data;
int n = end - ptr;
if (n > length) n = length;
memcpy(dataPtr, ptr, n);
dataPtr += n;
length -= n;
ptr += n;
while (length > 0) {
n = readWithTimeoutOrCallback(dataPtr, length);
dataPtr += n;
length -= n;
offset += n;
}
} | Base | 1 |
int jas_seq2d_output(jas_matrix_t *matrix, FILE *out)
{
#define MAXLINELEN 80
int i;
int j;
jas_seqent_t x;
char buf[MAXLINELEN + 1];
char sbuf[MAXLINELEN + 1];
int n;
fprintf(out, "%"PRIiFAST32" %"PRIiFAST32"\n", jas_seq2d_xstart(matrix),
jas_seq2d_ystart(matrix));
fprintf(out, "%"PRIiFAST32" %"PRIiFAST32"\n", jas_matrix_numcols(matrix),
jas_matrix_numrows(matrix));
buf[0] = '\0';
for (i = 0; i < jas_matrix_numrows(matrix); ++i) {
for (j = 0; j < jas_matrix_numcols(matrix); ++j) {
x = jas_matrix_get(matrix, i, j);
sprintf(sbuf, "%s%4ld", (strlen(buf) > 0) ? " " : "",
JAS_CAST(long, x));
n = JAS_CAST(int, strlen(buf));
if (n + JAS_CAST(int, strlen(sbuf)) > MAXLINELEN) {
fputs(buf, out);
fputs("\n", out);
buf[0] = '\0';
}
strcat(buf, sbuf);
if (j == jas_matrix_numcols(matrix) - 1) {
fputs(buf, out);
fputs("\n", out);
buf[0] = '\0';
}
}
}
fputs(buf, out);
return 0;
} | Class | 2 |
TfLiteStatus PrepareHashtableImport(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0);
const TfLiteTensor* input_resource_id_tensor =
GetInput(context, node, kInputResourceIdTensor);
TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_resource_id_tensor), 1);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(input_resource_id_tensor, 0), 1);
const TfLiteTensor* key_tensor = GetInput(context, node, kKeyTensor);
const TfLiteTensor* value_tensor = GetInput(context, node, kValueTensor);
TF_LITE_ENSURE(context, (key_tensor->type == kTfLiteInt64 &&
value_tensor->type == kTfLiteString) ||
(key_tensor->type == kTfLiteString &&
value_tensor->type == kTfLiteInt64));
// TODO(b/144731295): Tensorflow lookup ops support 1-D vector in storing
// values.
TF_LITE_ENSURE(context, HaveSameShapes(key_tensor, value_tensor));
return kTfLiteOk;
} | Base | 1 |
int64_t OutputFile::readImpl(char* /*buffer*/, int64_t /*length*/) {
raise_warning("cannot read from a php://output stream");
return -1;
} | Base | 1 |
void PngImg::InitStorage_() {
rowPtrs_.resize(info_.height, nullptr);
data_ = new png_byte[info_.height * info_.rowbytes];
for(size_t i = 0; i < info_.height; ++i) {
rowPtrs_[i] = data_ + i * info_.rowbytes;
}
} | Base | 1 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0);
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
OpContext op_context(context, node);
TF_LITE_ENSURE(context, op_context.input->type == kTfLiteUInt8 ||
op_context.input->type == kTfLiteInt8 ||
op_context.input->type == kTfLiteInt16 ||
op_context.input->type == kTfLiteFloat16);
TF_LITE_ENSURE(context, op_context.ref->type == kTfLiteFloat32);
op_data->max_diff = op_data->tolerance * op_context.input->params.scale;
switch (op_context.input->type) {
case kTfLiteUInt8:
case kTfLiteInt8:
op_data->max_diff *= (1 << 8);
break;
case kTfLiteInt16:
op_data->max_diff *= (1 << 16);
break;
default:
break;
}
// Allocate tensor to store the dequantized inputs.
if (op_data->cache_tensor_id == kTensorNotAllocated) {
TF_LITE_ENSURE_OK(
context, context->AddTensors(context, 1, &op_data->cache_tensor_id));
}
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(1);
node->temporaries->data[0] = op_data->cache_tensor_id;
TfLiteTensor* dequantized = GetTemporary(context, node, /*index=*/0);
dequantized->type = op_context.ref->type;
dequantized->allocation_type = kTfLiteDynamic;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(
context, dequantized,
TfLiteIntArrayCopy(op_context.input->dims)));
return kTfLiteOk;
} | Base | 1 |
boost::optional<SaplingNotePlaintext> SaplingNotePlaintext::decrypt(
const SaplingEncCiphertext &ciphertext,
const uint256 &ivk,
const uint256 &epk,
const uint256 &cmu
)
{
auto pt = AttemptSaplingEncDecryption(ciphertext, ivk, epk);
if (!pt) {
return boost::none;
}
// Deserialize from the plaintext
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss << pt.get();
SaplingNotePlaintext ret;
ss >> ret;
assert(ss.size() == 0);
uint256 pk_d;
if (!librustzcash_ivk_to_pkd(ivk.begin(), ret.d.data(), pk_d.begin())) {
return boost::none;
}
uint256 cmu_expected;
if (!librustzcash_sapling_compute_cm(
ret.d.data(),
pk_d.begin(),
ret.value(),
ret.rcm.begin(),
cmu_expected.begin()
))
{
return boost::none;
}
if (cmu_expected != cmu) {
return boost::none;
}
return ret;
} | Class | 2 |
static void exif_process_APP12(image_info_type *ImageInfo,
char *buffer, size_t length) {
size_t l1, l2=0;
if ((l1 = php_strnlen(buffer+2, length-2)) > 0) {
exif_iif_add_tag(ImageInfo, SECTION_APP12, "Company",
TAG_NONE, TAG_FMT_STRING, l1, buffer+2);
if (length > 2+l1+1) {
l2 = php_strnlen(buffer+2+l1+1, length-2-l1+1);
exif_iif_add_tag(ImageInfo, SECTION_APP12, "Info",
TAG_NONE, TAG_FMT_STRING, l2, buffer+2+l1+1);
}
}
} | Base | 1 |
MpdCantataMounterInterface * RemoteFsDevice::mounter()
{
if (!mounterIface) {
if (!QDBusConnection::systemBus().interface()->isServiceRegistered(MpdCantataMounterInterface::staticInterfaceName())) {
QDBusConnection::systemBus().interface()->startService(MpdCantataMounterInterface::staticInterfaceName());
}
mounterIface=new MpdCantataMounterInterface(MpdCantataMounterInterface::staticInterfaceName(),
"/Mounter", QDBusConnection::systemBus(), this);
connect(mounterIface, SIGNAL(mountStatus(const QString &, int, int)), SLOT(mountStatus(const QString &, int, int)));
connect(mounterIface, SIGNAL(umountStatus(const QString &, int, int)), SLOT(umountStatus(const QString &, int, int)));
}
return mounterIface;
} | Base | 1 |
BGD_DECLARE(void) gdImageWBMPCtx(gdImagePtr image, int fg, gdIOCtx *out)
{
int x, y, pos;
Wbmp *wbmp;
/* create the WBMP */
if((wbmp = createwbmp(gdImageSX(image), gdImageSY(image), WBMP_WHITE)) == NULL) {
gd_error("Could not create WBMP\n");
return;
}
/* fill up the WBMP structure */
pos = 0;
for(y = 0; y < gdImageSY(image); y++) {
for(x = 0; x < gdImageSX(image); x++) {
if(gdImageGetPixel(image, x, y) == fg) {
wbmp->bitmap[pos] = WBMP_BLACK;
}
pos++;
}
}
/* write the WBMP to a gd file descriptor */
if(writewbmp(wbmp, &gd_putout, out)) {
gd_error("Could not save WBMP\n");
}
/* des submitted this bugfix: gdFree the memory. */
freewbmp(wbmp);
} | Variant | 0 |
void Compute(OpKernelContext* ctx) override {
const Tensor& values_tensor = ctx->input(0);
const Tensor& value_range_tensor = ctx->input(1);
const Tensor& nbins_tensor = ctx->input(2);
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(value_range_tensor.shape()),
errors::InvalidArgument("value_range should be a vector."));
OP_REQUIRES(ctx, (value_range_tensor.shape().num_elements() == 2),
errors::InvalidArgument(
"value_range should be a vector of 2 elements."));
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(nbins_tensor.shape()),
errors::InvalidArgument("nbins should be a scalar."));
const auto values = values_tensor.flat<T>();
const auto value_range = value_range_tensor.flat<T>();
const auto nbins = nbins_tensor.scalar<int32>()();
OP_REQUIRES(
ctx, (value_range(0) < value_range(1)),
errors::InvalidArgument("value_range should satisfy value_range[0] < "
"value_range[1], but got '[",
value_range(0), ", ", value_range(1), "]'"));
OP_REQUIRES(
ctx, (nbins > 0),
errors::InvalidArgument("nbins should be a positive number, but got '",
nbins, "'"));
Tensor* out_tensor;
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, TensorShape({nbins}), &out_tensor));
auto out = out_tensor->flat<Tout>();
OP_REQUIRES_OK(
ctx, functor::HistogramFixedWidthFunctor<Device, T, Tout>::Compute(
ctx, values, value_range, nbins, out));
} | Class | 2 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* data = GetInput(context, node, kInputDataTensor);
const TfLiteTensor* segment_ids =
GetInput(context, node, kInputSegmentIdsTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE(context,
data->type == kTfLiteInt32 || data->type == kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, segment_ids->type, kTfLiteInt32);
if (!IsConstantTensor(data) || !IsConstantTensor(segment_ids)) {
SetTensorToDynamic(output);
return kTfLiteOk;
}
return ResizeOutputTensor(context, data, segment_ids, output);
} | Base | 1 |
TfLiteStatus LeakyReluPrepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output = GetOutput(context, node, 0);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data);
if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 ||
output->type == kTfLiteInt16) {
const auto* params =
reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data);
double alpha_multiplier =
input->params.scale * params->alpha / output->params.scale;
QuantizeMultiplier(alpha_multiplier, &data->output_multiplier_alpha,
&data->output_shift_alpha);
double identity_multiplier = input->params.scale / output->params.scale;
QuantizeMultiplier(identity_multiplier, &data->output_multiplier_identity,
&data->output_shift_identity);
}
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
} | Base | 1 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, node->inputs->size, 1);
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
const TfLiteTensor* input_resource_id_tensor =
GetInput(context, node, kInputVariableId);
TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32);
TF_LITE_ENSURE_EQ(context, NumElements(input_resource_id_tensor), 1);
TfLiteTensor* output = GetOutput(context, node, kOutputValue);
SetTensorToDynamic(output);
return kTfLiteOk;
} | Base | 1 |
inline void StringData::setSize(int len) {
assertx(!isImmutable() && !hasMultipleRefs());
assertx(len >= 0 && len <= capacity());
mutableData()[len] = 0;
m_lenAndHash = len;
assertx(m_hash == 0);
assertx(checkSane());
} | Base | 1 |
auto ReferenceHandle::Get(Local<Value> key_handle, MaybeLocal<Object> maybe_options) -> Local<Value> {
return ThreePhaseTask::Run<async, GetRunner>(*isolate, *this, key_handle, maybe_options, inherit);
} | Class | 2 |
static Jsi_RC StringSearchCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,
Jsi_Value **ret, Jsi_Func *funcPtr)
{
int sLen, bLen;
const char *source_str;
ChkString(_this, funcPtr, source_str, &sLen, &bLen);
char *v = _this->d.obj->d.s.str;
Jsi_Value *seq = Jsi_ValueArrayIndex(interp, args, skip);
if (Jsi_ValueIsString(interp, seq)) {
char *ce, *cp = Jsi_ValueString(interp, seq, NULL);
int n = -1;
if ((ce = Jsi_Strstr(source_str, cp))) {
n = (ce-source_str);
}
Jsi_ValueMakeNumber(interp, ret, n);
return JSI_OK;
}
if (!seq || seq->vt != JSI_VT_OBJECT || seq->d.obj->ot != JSI_OT_REGEXP) {
Jsi_ValueMakeNumber(interp, ret, -1);
return JSI_OK;
}
regex_t *reg = &seq->d.obj->d.robj->reg;
regmatch_t pos[MAX_SUBREGEX] = {};
int r;
if ((r = regexec(reg, v, MAX_SUBREGEX, pos, 0)) != 0) {
if (r == REG_NOMATCH) {
Jsi_ValueMakeNumber(interp, ret, -1.0);
return JSI_OK;
}
if (r >= REG_BADPAT) {
char buf[100];
regerror(r, reg, buf, sizeof(buf));
Jsi_LogError("error while matching pattern: %s", buf);
return JSI_ERROR;
}
}
Jsi_ValueMakeNumber(interp, ret, (Jsi_Number)pos[0].rm_so);
return JSI_OK;
} | Base | 1 |
TEST_F(HttpConnectionManagerImplTest, OverlyLongHeadersRejected) {
setup(false, "");
std::string response_code;
std::string response_body;
EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {
StreamDecoder* decoder = &conn_manager_->newStream(response_encoder_);
HeaderMapPtr headers{
new TestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}};
headers->addCopy(LowerCaseString("Foo"), std::string(60 * 1024, 'a'));
EXPECT_CALL(response_encoder_, encodeHeaders(_, true))
.WillOnce(Invoke([&response_code](const HeaderMap& headers, bool) -> void {
response_code = std::string(headers.Status()->value().getStringView());
}));
decoder->decodeHeaders(std::move(headers), true);
conn_manager_->newStream(response_encoder_);
}));
Buffer::OwnedImpl fake_input("1234");
conn_manager_->onData(fake_input, false); // kick off request
EXPECT_EQ("431", response_code);
EXPECT_EQ("", response_body);
} | Class | 2 |
TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output = GetOutput(context, node, 0);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
} | Base | 1 |
int FdOutStream::overrun(int itemSize, int nItems)
{
if (itemSize > bufSize)
throw Exception("FdOutStream overrun: max itemSize exceeded");
// First try to get rid of the data we have
flush();
// Still not enough space?
if (itemSize > end - ptr) {
// Can we shuffle things around?
// (don't do this if it gains us less than 25%)
if ((sentUpTo - start > bufSize / 4) &&
(itemSize < bufSize - (ptr - sentUpTo))) {
memmove(start, sentUpTo, ptr - sentUpTo);
ptr = start + (ptr - sentUpTo);
sentUpTo = start;
} else {
// Have to get rid of more data, so turn off non-blocking
// for a bit...
bool realBlocking;
realBlocking = blocking;
blocking = true;
flush();
blocking = realBlocking;
}
}
// Can we fit all the items asked for?
if (itemSize * nItems > end - ptr)
nItems = (end - ptr) / itemSize;
return nItems;
} | Base | 1 |
TfLiteStatus PrepareHashtableSize(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input_resource_id_tensor =
GetInput(context, node, kInputResourceIdTensor);
TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_resource_id_tensor), 1);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(input_resource_id_tensor, 0), 1);
TfLiteTensor* output_tensor = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE(context, output_tensor != nullptr);
TF_LITE_ENSURE_EQ(context, output_tensor->type, kTfLiteInt64);
TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1);
outputSize->data[0] = 1;
return context->ResizeTensor(context, output_tensor, outputSize);
} | Base | 1 |
void CheckAuthTest::TestValidToken(const std::string &auth_token,
const std::string &user_info) {
EXPECT_CALL(*raw_request_, FindHeader("x-goog-iap-jwt-assertion", _))
.WillOnce(Invoke([](const std::string &, std::string *token) {
*token = "";
return false;
}));
EXPECT_CALL(*raw_request_, FindHeader(kAuthHeader, _))
.WillOnce(Invoke([auth_token](const std::string &, std::string *token) {
*token = std::string(kBearer) + auth_token;
return true;
}));
EXPECT_CALL(*raw_request_, SetAuthToken(auth_token)).Times(1);
EXPECT_CALL(*raw_env_, DoRunHTTPRequest(_))
.Times(2)
.WillOnce(Invoke([](HTTPRequest *req) {
EXPECT_EQ(req->url(), kIssuer1OpenIdUrl);
std::string body(kOpenIdContent);
std::map<std::string, std::string> empty;
req->OnComplete(Status::OK, std::move(empty), std::move(body));
}))
.WillOnce(Invoke([](HTTPRequest *req) {
EXPECT_EQ(req->url(), kIssuer1PubkeyUrl);
std::string body(kPubkey);
std::map<std::string, std::string> empty;
req->OnComplete(Status::OK, std::move(empty), std::move(body));
}));
std::cout << "need be replaced: " << user_info << std::endl;
EXPECT_CALL(*raw_request_,
AddHeaderToBackend(kEndpointApiUserInfo, user_info))
.WillOnce(Return(utils::Status::OK));
CheckAuth(context_, [](Status status) { ASSERT_TRUE(status.ok()); });
} | Base | 1 |
TEST_F(SQLiteUtilTests, test_column_type_determination) {
// Correct identification of text and ints
testTypesExpected("select path, inode from file where path like '%'",
TypeMap({{"path", TEXT_TYPE}, {"inode", INTEGER_TYPE}}));
// Correctly treating BLOBs as text
testTypesExpected("select CAST(seconds AS BLOB) as seconds FROM time",
TypeMap({{"seconds", TEXT_TYPE}}));
// Correctly treating ints cast as double as doubles
testTypesExpected("select CAST(seconds AS DOUBLE) as seconds FROM time",
TypeMap({{"seconds", DOUBLE_TYPE}}));
// Correctly treating bools as ints
testTypesExpected("select CAST(seconds AS BOOLEAN) as seconds FROM time",
TypeMap({{"seconds", INTEGER_TYPE}}));
// Correctly recognizing values from columns declared double as double, even
// if they happen to have integer value. And also test multi-statement
// queries.
testTypesExpected(
"CREATE TABLE test_types_table (username varchar(30) primary key, age "
"double);INSERT INTO test_types_table VALUES (\"mike\", 23); SELECT age "
"from test_types_table",
TypeMap({{"age", DOUBLE_TYPE}}));
} | Class | 2 |
error_t tcpCheckSeqNum(Socket *socket, TcpHeader *segment, size_t length)
{
//Acceptability test for an incoming segment
bool_t acceptable = FALSE;
//Case where both segment length and receive window are zero
if(!length && !socket->rcvWnd)
{
//Make sure that SEG.SEQ = RCV.NXT
if(segment->seqNum == socket->rcvNxt)
{
acceptable = TRUE;
}
}
//Case where segment length is zero and receive window is non zero
else if(!length && socket->rcvWnd)
{
//Make sure that RCV.NXT <= SEG.SEQ < RCV.NXT+RCV.WND
if(TCP_CMP_SEQ(segment->seqNum, socket->rcvNxt) >= 0 &&
TCP_CMP_SEQ(segment->seqNum, socket->rcvNxt + socket->rcvWnd) < 0)
{
acceptable = TRUE;
}
}
//Case where both segment length and receive window are non zero
else if(length && socket->rcvWnd)
{
//Check whether RCV.NXT <= SEG.SEQ < RCV.NXT+RCV.WND
if(TCP_CMP_SEQ(segment->seqNum, socket->rcvNxt) >= 0 &&
TCP_CMP_SEQ(segment->seqNum, socket->rcvNxt + socket->rcvWnd) < 0)
{
acceptable = TRUE;
}
//or RCV.NXT <= SEG.SEQ+SEG.LEN-1 < RCV.NXT+RCV.WND
else if(TCP_CMP_SEQ(segment->seqNum + length - 1, socket->rcvNxt) >= 0 &&
TCP_CMP_SEQ(segment->seqNum + length - 1, socket->rcvNxt + socket->rcvWnd) < 0)
{
acceptable = TRUE;
}
}
//Non acceptable sequence number?
if(!acceptable)
{
//Debug message
TRACE_WARNING("Sequence number is not acceptable!\r\n");
//If an incoming segment is not acceptable, an acknowledgment
//should be sent in reply (unless the RST bit is set)
if(!(segment->flags & TCP_FLAG_RST))
tcpSendSegment(socket, TCP_FLAG_ACK, socket->sndNxt, socket->rcvNxt, 0, FALSE);
//Return status code
return ERROR_FAILURE;
}
//Sequence number is acceptable
return NO_ERROR;
} | Class | 2 |
TypedValue HHVM_FUNCTION(substr_compare,
const String& main_str,
const String& str,
int offset,
int length /* = INT_MAX */,
bool case_insensitivity /* = false */) {
int s1_len = main_str.size();
int s2_len = str.size();
if (length <= 0) {
raise_warning("The length must be greater than zero");
return make_tv<KindOfBoolean>(false);
}
if (offset < 0) {
offset = s1_len + offset;
if (offset < 0) offset = 0;
}
if (offset >= s1_len) {
raise_warning("The start position cannot exceed initial string length");
return make_tv<KindOfBoolean>(false);
}
int cmp_len = s1_len - offset;
if (cmp_len < s2_len) cmp_len = s2_len;
if (cmp_len > length) cmp_len = length;
const char *s1 = main_str.data();
if (case_insensitivity) {
return tvReturn(bstrcasecmp(s1 + offset, cmp_len, str.data(), cmp_len));
}
return tvReturn(string_ncmp(s1 + offset, str.data(), cmp_len));
} | Base | 1 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* axis = GetInput(context, node, kAxis);
// Make sure the axis is only 1 dimension.
TF_LITE_ENSURE_EQ(context, NumElements(axis), 1);
// Make sure the axis is only either int32 or int64.
TF_LITE_ENSURE(context,
axis->type == kTfLiteInt32 || axis->type == kTfLiteInt64);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
auto* params = reinterpret_cast<TfLiteArgMaxParams*>(node->builtin_data);
switch (params->output_type) {
case kTfLiteInt32:
output->type = kTfLiteInt32;
break;
case kTfLiteInt64:
output->type = kTfLiteInt64;
break;
default:
context->ReportError(context, "Unknown index output data type: %d",
params->output_type);
return kTfLiteError;
}
// Check conditions for different types.
switch (input->type) {
case kTfLiteFloat32:
case kTfLiteUInt8:
case kTfLiteInt8:
case kTfLiteInt32:
break;
default:
context->ReportError(
context,
"Unknown input type: %d, only float32 and int types are supported",
input->type);
return kTfLiteError;
}
TF_LITE_ENSURE(context, NumDimensions(input) >= 1);
if (IsConstantTensor(axis)) {
TF_LITE_ENSURE_STATUS(ResizeOutput(context, input, axis, output));
} else {
SetTensorToDynamic(output);
}
return kTfLiteOk;
} | Base | 1 |
CdsIntegrationTest()
: HttpIntegrationTest(Http::CodecType::HTTP2, ipVersion(),
ConfigHelper::discoveredClustersBootstrap(
sotwOrDelta() == Grpc::SotwOrDelta::Sotw ||
sotwOrDelta() == Grpc::SotwOrDelta::UnifiedSotw
? "GRPC"
: "DELTA_GRPC")) {
if (sotwOrDelta() == Grpc::SotwOrDelta::UnifiedSotw ||
sotwOrDelta() == Grpc::SotwOrDelta::UnifiedDelta) {
config_helper_.addRuntimeOverride("envoy.reloadable_features.unified_mux", "true");
}
use_lds_ = false;
sotw_or_delta_ = sotwOrDelta();
} | Class | 2 |
void writeStats(Array& /*ret*/) override {
fprintf(stderr, "writeStats start\n");
// RetSame: the return value is the same instance every time
// HasThis: call has a this argument
// AllSame: all returns were the same data even though args are different
// MemberCount: number of different arg sets (including this)
fprintf(stderr, "Count Function MinSerLen MaxSerLen RetSame HasThis "
"AllSame MemberCount\n");
for (auto& me : m_memos) {
if (me.second.m_ignore) continue;
if (me.second.m_count == 1) continue;
int min_ser_len = 999999999;
int max_ser_len = 0;
int count = 0;
int member_count = 0;
bool all_same = true;
if (me.second.m_has_this) {
bool any_multiple = false;
auto& fr = me.second.m_member_memos.begin()->second.m_return_value;
member_count = me.second.m_member_memos.size();
for (auto& mme : me.second.m_member_memos) {
if (mme.second.m_return_value != fr) all_same = false;
count += mme.second.m_count;
auto ser_len = mme.second.m_return_value.length();
min_ser_len = std::min(min_ser_len, ser_len);
max_ser_len = std::max(max_ser_len, ser_len);
if (mme.second.m_count > 1) any_multiple = true;
}
if (!any_multiple && !all_same) continue;
} else {
min_ser_len = max_ser_len = me.second.m_return_value.length();
count = me.second.m_count;
all_same = me.second.m_ret_tv_same;
}
fprintf(stderr, "%d %s %d %d %s %s %s %d\n",
count, me.first.data(),
min_ser_len, max_ser_len,
me.second.m_ret_tv_same ? " true" : "false",
me.second.m_has_this ? " true" : "false",
all_same ? " true" : "false",
member_count
);
}
fprintf(stderr, "writeStats end\n");
} | Base | 1 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteDivParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) {
EvalDiv<kernel_type>(context, node, params, data, input1, input2, output);
} else if (output->type == kTfLiteUInt8) {
TF_LITE_ENSURE_OK(
context, EvalQuantized<kernel_type>(context, node, params, data, input1,
input2, output));
} else {
context->ReportError(
context,
"Div only supports FLOAT32, INT32 and quantized UINT8 now, got %d.",
output->type);
return kTfLiteError;
}
return kTfLiteOk;
} | Base | 1 |
R_API RBinJavaVerificationObj *r_bin_java_verification_info_from_type(RBinJavaObj *bin, R_BIN_JAVA_STACKMAP_TYPE type, ut32 value) {
RBinJavaVerificationObj *se = R_NEW0 (RBinJavaVerificationObj);
if (!se) {
return NULL;
}
se->tag = type;
if (se->tag == R_BIN_JAVA_STACKMAP_OBJECT) {
se->info.obj_val_cp_idx = (ut16) value;
} else if (se->tag == R_BIN_JAVA_STACKMAP_UNINIT) {
/*if (bin->offset_sz == 4) {
se->info.uninit_offset = value;
} else {
se->info.uninit_offset = (ut16) value;
}*/
se->info.uninit_offset = (ut16) value;
}
return se;
} | Base | 1 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
// Reinterprete the opaque data provided by user.
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
const TfLiteType type = input1->type;
if (type != kTfLiteBool) {
context->ReportError(context, "Logical ops only support bool type.");
return kTfLiteError;
}
output->type = type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
} | Base | 1 |
TEST(BasicInterpreter, AllocateTwice) {
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
ASSERT_EQ(interpreter.SetOutputs({1}), kTfLiteOk);
TfLiteQuantizationParams quantized;
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "", {3},
quantized),
kTfLiteOk);
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(1, kTfLiteFloat32, "", {3},
quantized),
kTfLiteOk);
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* tensor0 = GetInput(context, node, 0);
TfLiteTensor* tensor1 = GetOutput(context, node, 0);
TfLiteIntArray* newSize = TfLiteIntArrayCopy(tensor0->dims);
return context->ResizeTensor(context, tensor1, newSize);
};
reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* a0 = GetInput(context, node, 0);
TfLiteTensor* a1 = GetOutput(context, node, 0);
int num = a0->dims->data[0];
for (int i = 0; i < num; i++) {
a1->data.f[i] = a0->data.f[i];
}
return kTfLiteOk;
};
ASSERT_EQ(
interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr, ®),
kTfLiteOk);
ASSERT_EQ(interpreter.ResizeInputTensor(0, {3}), kTfLiteOk);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
char* old_tensor0_ptr = interpreter.tensor(0)->data.raw;
char* old_tensor1_ptr = interpreter.tensor(1)->data.raw;
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
ASSERT_EQ(old_tensor0_ptr, interpreter.tensor(0)->data.raw);
ASSERT_EQ(old_tensor1_ptr, interpreter.tensor(1)->data.raw);
} | Base | 1 |
NTLM_AV_PAIR* ntlm_av_pair_get(NTLM_AV_PAIR* pAvPairList, size_t cbAvPairList, NTLM_AV_ID AvId,
size_t* pcbAvPairListRemaining)
{
size_t cbAvPair = cbAvPairList;
NTLM_AV_PAIR* pAvPair = pAvPairList;
if (!ntlm_av_pair_check(pAvPair, cbAvPair))
pAvPair = NULL;
while (pAvPair)
{
UINT16 id = ntlm_av_pair_get_id(pAvPair);
if (id == AvId)
break;
if (id == MsvAvEOL)
{
pAvPair = NULL;
break;
}
pAvPair = ntlm_av_pair_next(pAvPair, &cbAvPair);
}
if (!pAvPair)
cbAvPair = 0;
if (pcbAvPairListRemaining)
*pcbAvPairListRemaining = cbAvPair;
return pAvPair;
} | Base | 1 |
TestCertificateValidationContextConfig(
envoy::config::core::v3::TypedExtensionConfig config, bool allow_expired_certificate = false,
std::vector<envoy::type::matcher::v3::StringMatcher> san_matchers = {})
: allow_expired_certificate_(allow_expired_certificate), api_(Api::createApiForTest()),
custom_validator_config_(config), san_matchers_(san_matchers){}; | Base | 1 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
// Reinterprete the opaque data provided by user.
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
const TfLiteType type = input1->type;
if (type != kTfLiteBool) {
context->ReportError(context, "Logical ops only support bool type.");
return kTfLiteError;
}
output->type = type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
} | Base | 1 |
bool ContentSettingsObserver::AllowScript(bool enabled_per_settings) {
if (!enabled_per_settings)
return false;
if (IsScriptDisabledForPreview(render_frame()))
return false;
if (is_interstitial_page_)
return true;
blink::WebLocalFrame* frame = render_frame()->GetWebFrame();
const auto it = cached_script_permissions_.find(frame);
if (it != cached_script_permissions_.end())
return it->second;
// Evaluate the content setting rules before
// IsWhitelistedForContentSettings(); if there is only the default rule
// allowing all scripts, it's quicker this way.
bool allow = true;
if (content_setting_rules_) {
ContentSetting setting = GetContentSettingFromRules(
content_setting_rules_->script_rules, frame,
url::Origin(frame->GetDocument().GetSecurityOrigin()).GetURL());
allow = setting != CONTENT_SETTING_BLOCK;
}
allow = allow || IsWhitelistedForContentSettings();
cached_script_permissions_[frame] = allow;
return allow;
} | Class | 2 |
TEST_P(WasmTest, DivByZero) {
Stats::IsolatedStoreImpl stats_store;
Api::ApiPtr api = Api::createApiForTest(stats_store);
Upstream::MockClusterManager cluster_manager;
Event::DispatcherPtr dispatcher(api->allocateDispatcher());
auto scope = Stats::ScopeSharedPtr(stats_store.createScope("wasm."));
NiceMock<LocalInfo::MockLocalInfo> local_info;
auto name = "";
auto root_id = "";
auto vm_id = "";
auto vm_configuration = "";
auto plugin = std::make_shared<Extensions::Common::Wasm::Plugin>(
name, root_id, vm_id, envoy::api::v2::core::TrafficDirection::UNSPECIFIED, local_info,
nullptr);
auto wasm = std::make_unique<Extensions::Common::Wasm::Wasm>(
absl::StrCat("envoy.wasm.runtime.", GetParam()), vm_id, vm_configuration, plugin, scope,
cluster_manager, *dispatcher);
EXPECT_NE(wasm, nullptr);
const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(
"{{ test_rundir }}/test/extensions/wasm/test_data/segv_cpp.wasm"));
EXPECT_FALSE(code.empty());
auto context = std::make_unique<TestContext>(wasm.get());
EXPECT_CALL(*context, scriptLog_(spdlog::level::err, Eq("before div by zero")));
EXPECT_TRUE(wasm->initialize(code, false));
wasm->setContext(context.get());
if (GetParam() == "v8") {
EXPECT_THROW_WITH_MESSAGE(
context->onLog(), Extensions::Common::Wasm::WasmException,
"Function: proxy_onLog failed: Uncaught RuntimeError: divide by zero");
} else if (GetParam() == "wavm") {
EXPECT_THROW_WITH_REGEX(context->onLog(), Extensions::Common::Wasm::WasmException,
"Function: proxy_onLog failed: wavm.integerDivideByZeroOrOverflow.*");
} else {
ASSERT_FALSE(true); // Neither of the above was matched.
}
} | Base | 1 |
bool WebContents::SendIPCMessageToFrame(bool internal,
int32_t frame_id,
const std::string& channel,
v8::Local<v8::Value> args) {
v8::Isolate* isolate = JavascriptEnvironment::GetIsolate();
blink::CloneableMessage message;
if (!gin::ConvertFromV8(isolate, args, &message)) {
isolate->ThrowException(v8::Exception::Error(
gin::StringToV8(isolate, "Failed to serialize arguments")));
return false;
}
auto frames = web_contents()->GetAllFrames();
auto iter = std::find_if(frames.begin(), frames.end(), [frame_id](auto* f) {
return f->GetRoutingID() == frame_id;
});
if (iter == frames.end())
return false;
if (!(*iter)->IsRenderFrameLive())
return false;
mojo::AssociatedRemote<mojom::ElectronRenderer> electron_renderer;
(*iter)->GetRemoteAssociatedInterfaces()->GetInterface(&electron_renderer);
electron_renderer->Message(internal, channel, std::move(message),
0 /* sender_id */);
return true;
} | Class | 2 |
bool MemFile::seek(int64_t offset, int whence /* = SEEK_SET */) {
assertx(m_len != -1);
if (whence == SEEK_CUR) {
if (offset > 0 && offset < bufferedLen()) {
setReadPosition(getReadPosition() + offset);
setPosition(getPosition() + offset);
return true;
}
offset += getPosition();
whence = SEEK_SET;
}
// invalidate the current buffer
setWritePosition(0);
setReadPosition(0);
if (whence == SEEK_SET) {
m_cursor = offset;
} else {
assertx(whence == SEEK_END);
m_cursor = m_len + offset;
}
setPosition(m_cursor);
return true;
} | Base | 1 |
std::string decodeBase64(
const std::string& encoded) {
if (encoded.size() == 0) {
// special case, to prevent an integer overflow down below.
return "";
}
using namespace boost::archive::iterators;
using b64it =
transform_width<binary_from_base64<std::string::const_iterator>, 8, 6>;
std::string decoded = std::string(b64it(std::begin(encoded)),
b64it(std::end(encoded)));
uint32_t numPadding = std::count(encoded.begin(), encoded.end(), '=');
decoded.erase(decoded.end() - numPadding, decoded.end());
return decoded;
} | Base | 1 |
char* HexOutStream::binToHexStr(const char* data, int length) {
char* buffer = new char[length*2+1];
for (int i=0; i<length; i++) {
buffer[i*2] = intToHex((data[i] >> 4) & 15);
buffer[i*2+1] = intToHex((data[i] & 15));
if (!buffer[i*2] || !buffer[i*2+1]) {
delete [] buffer;
return 0;
}
}
buffer[length*2] = 0;
return buffer;
} | Base | 1 |
ALWAYS_INLINE String serialize_impl(const Variant& value,
const SerializeOptions& opts) {
switch (value.getType()) {
case KindOfClass:
case KindOfLazyClass:
case KindOfPersistentString:
case KindOfString: {
auto const str =
isStringType(value.getType()) ? value.getStringData() :
isClassType(value.getType()) ? classToStringHelper(value.toClassVal()) :
lazyClassToStringHelper(value.toLazyClassVal());
auto const size = str->size();
if (size >= RuntimeOption::MaxSerializedStringSize) {
throw Exception("Size of serialized string (%d) exceeds max", size);
}
StringBuffer sb;
sb.append("s:");
sb.append(size);
sb.append(":\"");
sb.append(str->data(), size);
sb.append("\";");
return sb.detach();
}
case KindOfResource:
return s_Res;
case KindOfUninit:
case KindOfNull:
case KindOfBoolean:
case KindOfInt64:
case KindOfFunc:
case KindOfPersistentVec:
case KindOfVec:
case KindOfPersistentDict:
case KindOfDict:
case KindOfPersistentKeyset:
case KindOfKeyset:
case KindOfPersistentDArray:
case KindOfDArray:
case KindOfPersistentVArray:
case KindOfVArray:
case KindOfDouble:
case KindOfObject:
case KindOfClsMeth:
case KindOfRClsMeth:
case KindOfRFunc:
case KindOfRecord:
break;
}
VariableSerializer vs(VariableSerializer::Type::Serialize);
if (opts.keepDVArrays) vs.keepDVArrays();
if (opts.forcePHPArrays) vs.setForcePHPArrays();
if (opts.warnOnHackArrays) vs.setHackWarn();
if (opts.warnOnPHPArrays) vs.setPHPWarn();
if (opts.ignoreLateInit) vs.setIgnoreLateInit();
if (opts.serializeProvenanceAndLegacy) vs.setSerializeProvenanceAndLegacy();
// Keep the count so recursive calls to serialize() embed references properly.
return vs.serialize(value, true, true);
} | Base | 1 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
switch (input->type) {
case kTfLiteInt64:
reference_ops::Negate(
GetTensorShape(input), GetTensorData<int64_t>(input),
GetTensorShape(output), GetTensorData<int64_t>(output));
break;
case kTfLiteInt32:
reference_ops::Negate(
GetTensorShape(input), GetTensorData<int32_t>(input),
GetTensorShape(output), GetTensorData<int32_t>(output));
break;
case kTfLiteFloat32:
reference_ops::Negate(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output),
GetTensorData<float>(output));
break;
default:
context->ReportError(
context,
"Neg only currently supports int64, int32, and float32, got %d.",
input->type);
return kTfLiteError;
}
return kTfLiteOk;
} | Base | 1 |
Status CheckInputs(Tensor group_size_t, Tensor group_key_t) {
if (group_size_t.dims() > 0) {
return errors::Internal(
"Unexpected dimensions on input group_size. "
"It shoulbe a scalar, got tensor with shape ",
group_size_t.shape().DebugString());
}
if (group_key_t.dims() > 0) {
return errors::Internal("Unexpected dimensions on input group_key, got ",
group_key_t.shape().DebugString());
}
auto group_size = group_size_t.unaligned_flat<int32>()(0);
if (group_size <= 0) {
return errors::InvalidArgument(
"group_size must be positive integer but got ", group_size);
}
return Status::OK();
} | Variant | 0 |
int64_t BZ2File::readImpl(char * buf, int64_t length) {
if (length == 0) {
return 0;
}
assertx(m_bzFile);
int len = BZ2_bzread(m_bzFile, buf, length);
/* Sometimes libbz2 will return fewer bytes than requested, and set bzerror
* to BZ_STREAM_END, but it's not actually EOF, and you can keep reading from
* the file - so, only set EOF after a failed read. This matches PHP5.
*/
if (len <= 0) {
setEof(true);
if (len < 0) {
return -1;
}
}
return len;
} | Base | 1 |
void onComplete(const Status& status, ContextImpl& context) const override {
auto& completion_state = context.getCompletionState(this);
if (completion_state.is_completed_) {
return;
}
// If any of children is OK, return OK
if (Status::Ok == status) {
completion_state.is_completed_ = true;
completeWithStatus(status, context);
return;
}
// Then wait for all children to be done.
if (++completion_state.number_completed_children_ == verifiers_.size()) {
// Aggregate all children status into a final status.
// JwtMissing should be treated differently than other failure status
// since it simply means there is not Jwt token for the required provider.
// If there is a failure status other than JwtMissing in the children,
// it should be used as the final status.
Status final_status = Status::JwtMissed;
for (const auto& it : verifiers_) {
// If a Jwt is extracted from a location not specified by the required provider,
// the authenticator returns JwtUnknownIssuer. It should be treated the same as
// JwtMissed.
Status child_status = context.getCompletionState(it.get()).status_;
if (child_status != Status::JwtMissed && child_status != Status::JwtUnknownIssuer) {
final_status = child_status;
}
}
if (is_allow_missing_or_failed_) {
final_status = Status::Ok;
} else if (is_allow_missing_ && final_status == Status::JwtMissed) {
final_status = Status::Ok;
}
completion_state.is_completed_ = true;
completeWithStatus(final_status, context);
}
} | Class | 2 |
static String HHVM_FUNCTION(bcsub, const String& left, const String& right,
int64_t scale /* = -1 */) {
if (scale < 0) scale = BCG(bc_precision);
bc_num first, second, result;
bc_init_num(&first);
bc_init_num(&second);
bc_init_num(&result);
php_str2num(&first, (char*)left.data());
php_str2num(&second, (char*)right.data());
bc_sub(first, second, &result, scale);
if (result->n_scale > scale) {
result->n_scale = scale;
}
String ret(bc_num2str(result), AttachString);
bc_free_num(&first);
bc_free_num(&second);
bc_free_num(&result);
return ret;
} | Base | 1 |
TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node,
int index) {
if (index >= 0 && index < node->outputs->size) {
const int tensor_index = node->outputs->data[index];
if (tensor_index != kTfLiteOptionalTensor) {
if (context->tensors != nullptr) {
return &context->tensors[tensor_index];
} else {
return context->GetTensor(context, tensor_index);
}
}
}
return nullptr;
} | Base | 1 |
QInt16() {} | Base | 1 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteIntArray* input_dims = input->dims;
int input_dims_size = input_dims->size;
TF_LITE_ENSURE(context, input_dims_size >= 2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(input_dims_size);
for (int i = 0; i < input_dims_size; i++) {
output_shape->data[i] = input_dims->data[i];
}
// Resize the output tensor to the same size as the input tensor.
output->type = input->type;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_shape));
return kTfLiteOk;
} | Base | 1 |
int ZlibInStream::overrun(int itemSize, int nItems, bool wait)
{
if (itemSize > bufSize)
throw Exception("ZlibInStream overrun: max itemSize exceeded");
if (!underlying)
throw Exception("ZlibInStream overrun: no underlying stream");
if (end - ptr != 0)
memmove(start, ptr, end - ptr);
offset += ptr - start;
end -= ptr - start;
ptr = start;
while (end - ptr < itemSize) {
if (!decompress(wait))
return 0;
}
if (itemSize * nItems > end - ptr)
nItems = (end - ptr) / itemSize;
return nItems;
} | Class | 2 |
static optional<Principal> parse_principal(CephContext* cct, TokenID t,
string&& s) {
// Wildcard!
if ((t == TokenID::AWS) && (s == "*")) {
return Principal::wildcard();
// Do nothing for now.
} else if (t == TokenID::CanonicalUser) {
// AWS ARNs
} else if (t == TokenID::AWS) {
auto a = ARN::parse(s);
if (!a) {
if (std::none_of(s.begin(), s.end(),
[](const char& c) {
return (c == ':') || (c == '/');
})) {
// Since tenants are simply prefixes, there's no really good
// way to see if one exists or not. So we return the thing and
// let them try to match against it.
return Principal::tenant(std::move(s));
}
}
if (a->resource == "root") {
return Principal::tenant(std::move(a->account));
}
static const char rx_str[] = "([^/]*)/(.*)";
static const regex rx(rx_str, sizeof(rx_str) - 1,
ECMAScript | optimize);
smatch match;
if (regex_match(a->resource, match, rx)) {
ceph_assert(match.size() == 3);
if (match[1] == "user") {
return Principal::user(std::move(a->account),
match[2]);
}
if (match[1] == "role") {
return Principal::role(std::move(a->account),
match[2]);
}
}
}
ldout(cct, 0) << "Supplied principal is discarded: " << s << dendl;
return boost::none;
} | Base | 1 |
void RemoteFsDevice::unmount()
{
if (details.isLocalFile()) {
return;
}
if (!isConnected() || proc) {
return;
}
if (messageSent) {
return;
}
if (constSambaProtocol==details.url.scheme() || constSambaAvahiProtocol==details.url.scheme()) {
mounter()->umount(mountPoint(details, false), getpid());
setStatusMessage(tr("Disconnecting..."));
messageSent=true;
return;
}
QString cmd;
QStringList args;
if (!details.isLocalFile()) {
QString mp=mountPoint(details, false);
if (!mp.isEmpty()) {
cmd=Utils::findExe("fusermount");
if (!cmd.isEmpty()) {
args << QLatin1String("-u") << QLatin1String("-z") << mp;
} else {
emit error(tr("\"fusermount\" is not installed!"));
}
}
}
if (!cmd.isEmpty()) {
setStatusMessage(tr("Disconnecting..."));
proc=new QProcess(this);
proc->setProperty("unmount", true);
connect(proc, SIGNAL(finished(int)), SLOT(procFinished(int)));
proc->start(cmd, args, QIODevice::ReadOnly);
}
} | Class | 2 |
void RemoteFsDevice::serviceRemoved(const QString &name)
{
if (name==details.serviceName && constSambaAvahiProtocol==details.url.scheme()) {
sub=tr("Not Available");
updateStatus();
}
} | Base | 1 |
MONGO_EXPORT int bson_append_binary( bson *b, const char *name, char type, const char *str, int len ) {
if ( type == BSON_BIN_BINARY_OLD ) {
int subtwolen = len + 4;
if ( bson_append_estart( b, BSON_BINDATA, name, 4+1+4+len ) == BSON_ERROR )
return BSON_ERROR;
bson_append32( b, &subtwolen );
bson_append_byte( b, type );
bson_append32( b, &len );
bson_append( b, str, len );
}
else {
if ( bson_append_estart( b, BSON_BINDATA, name, 4+1+len ) == BSON_ERROR )
return BSON_ERROR;
bson_append32( b, &len );
bson_append_byte( b, type );
bson_append( b, str, len );
}
return BSON_OK;
} | Base | 1 |
static String HHVM_FUNCTION(bcmul, const String& left, const String& right,
int64_t scale /* = -1 */) {
if (scale < 0) scale = BCG(bc_precision);
bc_num first, second, result;
bc_init_num(&first);
bc_init_num(&second);
bc_init_num(&result);
php_str2num(&first, (char*)left.data());
php_str2num(&second, (char*)right.data());
bc_multiply(first, second, &result, scale);
if (result->n_scale > scale) {
result->n_scale = scale;
}
String ret(bc_num2str(result), AttachString);
bc_free_num(&first);
bc_free_num(&second);
bc_free_num(&result);
return ret;
} | Base | 1 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteSpaceToDepthParams*>(node->builtin_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
auto data_type = output->type;
TF_LITE_ENSURE(context,
data_type == kTfLiteFloat32 || data_type == kTfLiteUInt8 ||
data_type == kTfLiteInt8 || data_type == kTfLiteInt32 ||
data_type == kTfLiteInt64);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
const int block_size = params->block_size;
const int input_height = input->dims->data[1];
const int input_width = input->dims->data[2];
int output_height = input_height / block_size;
int output_width = input_width / block_size;
TF_LITE_ENSURE_EQ(context, input_height, output_height * block_size);
TF_LITE_ENSURE_EQ(context, input_width, output_width * block_size);
TfLiteIntArray* output_size = TfLiteIntArrayCreate(4);
output_size->data[0] = input->dims->data[0];
output_size->data[1] = output_height;
output_size->data[2] = output_width;
output_size->data[3] = input->dims->data[3] * block_size * block_size;
return context->ResizeTensor(context, output, output_size);
} | Base | 1 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
// Reinterprete the opaque data provided by user.
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
const TfLiteType type = input1->type;
if (type != kTfLiteInt32 && type != kTfLiteFloat32 && type != kTfLiteInt64) {
context->ReportError(context, "Type '%s' is not supported by floor_mod.",
TfLiteTypeGetName(type));
return kTfLiteError;
}
output->type = type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
} | Base | 1 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
output->type = input->type;
TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims);
return context->ResizeTensor(context, output, output_size);
} | Base | 1 |
TfLiteStatus UseDynamicOutputTensors(TfLiteContext* context, TfLiteNode* node) {
for (int i = 0; i < NumOutputs(node); ++i) {
SetTensorToDynamic(GetOutput(context, node, i));
}
return kTfLiteOk;
} | Base | 1 |
TEST_P(SslSocketTest, FailedClientAuthSanVerificationNoClientCert) {
const std::string client_ctx_yaml = R"EOF(
common_tls_context:
)EOF";
const std::string server_ctx_yaml = R"EOF(
common_tls_context:
tls_certificates:
certificate_chain:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/unittest_cert.pem"
private_key:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/unittest_key.pem"
validation_context:
trusted_ca:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem"
match_subject_alt_names:
exact: "example.com"
)EOF";
TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam());
testUtil(test_options.setExpectedServerStats("ssl.fail_verify_no_cert"));
} | Base | 1 |
Status InferenceContext::Multiply(DimensionHandle first,
DimensionOrConstant second,
DimensionHandle* out) {
const int64_t first_value = Value(first);
const int64_t second_value = Value(second);
// Special cases.
if (first_value == 0) {
*out = first;
} else if (second_value == 0) {
*out = MakeDim(second);
} else if (first_value == 1) {
*out = MakeDim(second);
} else if (second_value == 1) {
*out = first;
} else if (first_value == kUnknownDim || second_value == kUnknownDim) {
*out = UnknownDim();
} else {
// Invariant: Both values are known and greater than 1.
const int64_t product = first_value * second_value;
if (product < 0) {
return errors::InvalidArgument(
"Negative dimension size caused by overflow when multiplying ",
first_value, " and ", second_value);
}
*out = MakeDim(product);
}
return Status::OK();
} | Base | 1 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteLSHProjectionParams*>(node->builtin_data);
int32_t* out_buf = GetOutput(context, node, 0)->data.i32;
const TfLiteTensor* hash = GetInput(context, node, 0);
const TfLiteTensor* input = GetInput(context, node, 1);
const TfLiteTensor* weight =
NumInputs(node) == 2 ? nullptr : GetInput(context, node, 2);
switch (params->type) {
case kTfLiteLshProjectionDense:
DenseLshProjection(hash, input, weight, out_buf);
break;
case kTfLiteLshProjectionSparse:
SparseLshProjection(hash, input, weight, out_buf);
break;
default:
return kTfLiteError;
}
return kTfLiteOk;
} | Base | 1 |
void *jas_malloc(size_t size)
{
void *result;
JAS_DBGLOG(101, ("jas_malloc called with %zu\n", size));
result = malloc(size);
JAS_DBGLOG(100, ("jas_malloc(%zu) -> %p\n", size, result));
return result;
} | Base | 1 |
Envoy::Ssl::ClientValidationStatus DefaultCertValidator::verifyCertificate(
X509* cert, const std::vector<std::string>& verify_san_list,
const std::vector<Matchers::StringMatcherImpl<envoy::type::matcher::v3::StringMatcher>>&
subject_alt_name_matchers) {
Envoy::Ssl::ClientValidationStatus validated = Envoy::Ssl::ClientValidationStatus::NotValidated;
if (!verify_san_list.empty()) {
if (!verifySubjectAltName(cert, verify_san_list)) {
stats_.fail_verify_san_.inc();
return Envoy::Ssl::ClientValidationStatus::Failed;
}
validated = Envoy::Ssl::ClientValidationStatus::Validated;
}
if (!subject_alt_name_matchers.empty()) {
if (!matchSubjectAltName(cert, subject_alt_name_matchers)) {
stats_.fail_verify_san_.inc();
return Envoy::Ssl::ClientValidationStatus::Failed;
}
validated = Envoy::Ssl::ClientValidationStatus::Validated;
}
if (!verify_certificate_hash_list_.empty() || !verify_certificate_spki_list_.empty()) {
const bool valid_certificate_hash =
!verify_certificate_hash_list_.empty() &&
verifyCertificateHashList(cert, verify_certificate_hash_list_);
const bool valid_certificate_spki =
!verify_certificate_spki_list_.empty() &&
verifyCertificateSpkiList(cert, verify_certificate_spki_list_);
if (!valid_certificate_hash && !valid_certificate_spki) {
stats_.fail_verify_cert_hash_.inc();
return Envoy::Ssl::ClientValidationStatus::Failed;
}
validated = Envoy::Ssl::ClientValidationStatus::Validated;
}
return validated;
} | Base | 1 |
bool CFontFileType1::RemovePfbMarkers()
{
bool bSuccess = true;
int nBlockType = 0;
int nBlockLen = 0;
int nChar = 0;
unsigned char *sBuffer = NULL;
int nBufLen = 0;
while ( nBlockType != PFB_DONE )
{
while ( 0 == nBlockLen )
{
nChar = ReadU8( &bSuccess );
if ( !bSuccess )
return false;
nBlockType = ReadU8( &bSuccess );
if ( !bSuccess || PFB_MARKER != nChar || ( PFB_ASCII != nBlockType && PFB_BINARY != nBlockType && PFB_DONE != nBlockType ) )
return false;
if ( PFB_DONE == nBlockType )
break;
nBlockLen = ReadU32LE( &bSuccess );
if ( !bSuccess )
return false;
}
// Читаем сам блок данных
if ( nBlockLen > 0 )
{
if ( !sBuffer )
{
sBuffer = (unsigned char*)MemUtilsMalloc( nBlockLen );
if ( !sBuffer )
return false;
}
else
sBuffer = (unsigned char*)MemUtilsRealloc( sBuffer, nBufLen + nBlockLen );
Read( sBuffer + nBufLen, nBlockLen );
nBufLen += nBlockLen;
}
nBlockLen = 0;
}
if ( m_bFreeFileData )
MemUtilsFree( m_sFile );
m_bFreeFileData = true;
m_sFile = (unsigned char*)sBuffer;
m_sFileData = m_sFile;
m_nLen = nBufLen;
m_nPos = 0;
return true;
} | Base | 1 |
TEST(DefaultCertValidatorTest, TestCertificateVerificationWithSANMatcher) {
Stats::TestUtil::TestStore test_store;
SslStats stats = generateSslStats(test_store);
// Create the default validator object.
auto default_validator =
std::make_unique<Extensions::TransportSockets::Tls::DefaultCertValidator>(
/*CertificateValidationContextConfig=*/nullptr, stats,
Event::GlobalTimeSystem().timeSystem());
bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(
"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem"));
envoy::type::matcher::v3::StringMatcher matcher;
matcher.MergeFrom(TestUtility::createRegexMatcher(".*.example.com"));
std::vector<Matchers::StringMatcherImpl<envoy::type::matcher::v3::StringMatcher>> san_matchers;
san_matchers.push_back(Matchers::StringMatcherImpl(matcher));
// Verify the certificate with correct SAN regex matcher.
EXPECT_EQ(default_validator->verifyCertificate(cert.get(), /*verify_san_list=*/{}, san_matchers),
Envoy::Ssl::ClientValidationStatus::Validated);
EXPECT_EQ(stats.fail_verify_san_.value(), 0);
matcher.MergeFrom(TestUtility::createExactMatcher("hello.example.com"));
std::vector<Matchers::StringMatcherImpl<envoy::type::matcher::v3::StringMatcher>>
invalid_san_matchers;
invalid_san_matchers.push_back(Matchers::StringMatcherImpl(matcher));
// Verify the certificate with incorrect SAN exact matcher.
EXPECT_EQ(default_validator->verifyCertificate(cert.get(), /*verify_san_list=*/{},
invalid_san_matchers),
Envoy::Ssl::ClientValidationStatus::Failed);
EXPECT_EQ(stats.fail_verify_san_.value(), 1);
} | Base | 1 |
static inline bool isMountable(const RemoteFsDevice::Details &d)
{
return RemoteFsDevice::constSshfsProtocol==d.url.scheme() ||
RemoteFsDevice::constSambaProtocol==d.url.scheme() || RemoteFsDevice::constSambaAvahiProtocol==d.url.scheme();
} | Class | 2 |
TfLiteStatus ComputeDepthMultiplier(TfLiteContext* context,
const TfLiteTensor* input,
const TfLiteTensor* filter,
int16* depth_multiplier) {
int num_filter_channels = SizeOfDimension(filter, 3);
int num_input_channels = SizeOfDimension(input, 3);
TF_LITE_ENSURE_EQ(context, num_filter_channels % num_input_channels, 0);
*depth_multiplier = num_filter_channels / num_input_channels;
return kTfLiteOk;
} | Base | 1 |
shared_ptr <vector<uint8_t>> check_and_set_SEK(const string &SEK) {
vector<char> decr_key(BUF_LEN, 0);
vector<char> errMsg(BUF_LEN, 0);
int err_status = 0;
auto encrypted_SEK = make_shared < vector < uint8_t >> (BUF_LEN, 0);
uint32_t l = 0;
sgx_status_t status = trustedSetSEK_backup(eid, &err_status, errMsg.data(), encrypted_SEK->data(), &l,
SEK.c_str());
encrypted_SEK->resize(l);
HANDLE_TRUSTED_FUNCTION_ERROR(status, err_status, errMsg.data());
validate_SEK();
return encrypted_SEK;
} | Base | 1 |
string encryptBLSKeyShare2Hex(int *errStatus, char *err_string, const char *_key) {
CHECK_STATE(errStatus);
CHECK_STATE(err_string);
CHECK_STATE(_key);
auto keyArray = make_shared<vector<char>>(BUF_LEN, 0);
auto encryptedKey = make_shared<vector<uint8_t>>(BUF_LEN, 0);
vector<char> errMsg(BUF_LEN, 0);
strncpy(keyArray->data(), _key, BUF_LEN);
*errStatus = 0;
unsigned int encryptedLen = 0;
sgx_status_t status = trustedEncryptKeyAES(eid, errStatus, errMsg.data(), keyArray->data(), encryptedKey->data(), &encryptedLen);
HANDLE_TRUSTED_FUNCTION_ERROR(status, *errStatus, errMsg.data());
SAFE_CHAR_BUF(resultBuf, 2 * BUF_LEN + 1);
carray2Hex(encryptedKey->data(), encryptedLen, resultBuf, 2 * BUF_LEN + 1);
return string(resultBuf);
} | Base | 1 |
static int burl_normalize_2F_to_slash_fix (buffer *b, int qs, int i)
{
char * const s = b->ptr;
const int blen = (int)buffer_string_length(b);
const int used = qs < 0 ? blen : qs;
int j = i;
for (; i < used; ++i, ++j) {
s[j] = s[i];
if (s[i] == '%' && s[i+1] == '2' && s[i+2] == 'F') {
s[j] = '/';
i+=2;
}
}
if (qs >= 0) {
memmove(s+j, s+qs, blen - qs);
j += blen - qs;
}
buffer_string_set_length(b, j);
return qs;
} | Base | 1 |
void writeBytes(const void* data, int length) {
const U8* dataPtr = (const U8*)data;
const U8* dataEnd = dataPtr + length;
while (dataPtr < dataEnd) {
int n = check(1, dataEnd - dataPtr);
memcpy(ptr, dataPtr, n);
ptr += n;
dataPtr += n;
}
} | Base | 1 |
TEST_CASE_METHOD(TestFixture, "DKG AES public shares test", "[dkg-aes-pub-shares]") {
vector <uint8_t> encryptedDKGSecret(BUF_LEN, 0);
vector<char> errMsg(BUF_LEN, 0);
int errStatus = 0;
uint32_t encLen = 0;
unsigned t = 32, n = 32;
PRINT_SRC_LINE
auto status = trustedGenDkgSecretAES(eid, &errStatus, errMsg.data(), encryptedDKGSecret.data(), &encLen, n);
REQUIRE(status == SGX_SUCCESS);
REQUIRE(errStatus == SGX_SUCCESS);
vector<char> errMsg1(BUF_LEN, 0);
char colon = ':';
vector<char> pubShares(10000, 0);
PRINT_SRC_LINE
status = trustedGetPublicSharesAES(eid, &errStatus, errMsg1.data(),
encryptedDKGSecret.data(), encLen, pubShares.data(), t, n);
REQUIRE(status == SGX_SUCCESS);
REQUIRE(errStatus == SGX_SUCCESS);
vector <string> g2Strings = splitString(pubShares.data(), ',');
vector <libff::alt_bn128_G2> pubSharesG2;
for (u_int64_t i = 0; i < g2Strings.size(); i++) {
vector <string> coeffStr = splitString(g2Strings.at(i).c_str(), ':');
pubSharesG2.push_back(TestUtils::vectStringToG2(coeffStr));
}
vector<char> secret(BUF_LEN, 0);
PRINT_SRC_LINE
status = trustedDecryptDkgSecretAES(eid, &errStatus, errMsg1.data(), encryptedDKGSecret.data(), encLen,
(uint8_t *) secret.data());
REQUIRE(status == SGX_SUCCESS);
REQUIRE(errStatus == SGX_SUCCESS);
signatures::Dkg dkgObj(t, n);
vector <libff::alt_bn128_Fr> poly = TestUtils::splitStringToFr(secret.data(), colon);
vector <libff::alt_bn128_G2> pubSharesDkg = dkgObj.VerificationVector(poly);
for (uint32_t i = 0; i < pubSharesDkg.size(); i++) {
libff::alt_bn128_G2 el = pubSharesDkg.at(i);
el.to_affine_coordinates();
}
REQUIRE(pubSharesG2 == pubSharesDkg);
} | Base | 1 |
static gboolean cosine_read(wtap *wth, int *err, gchar **err_info,
gint64 *data_offset)
{
gint64 offset;
int pkt_len;
char line[COSINE_LINE_LENGTH];
/* Find the next packet */
offset = cosine_seek_next_packet(wth, err, err_info, line);
if (offset < 0)
return FALSE;
*data_offset = offset;
/* Parse the header */
pkt_len = parse_cosine_rec_hdr(&wth->phdr, line, err, err_info);
if (pkt_len == -1)
return FALSE;
/* Convert the ASCII hex dump to binary data */
return parse_cosine_hex_dump(wth->fh, &wth->phdr, pkt_len,
wth->frame_buffer, err, err_info);
} | Class | 2 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.