code
stringlengths 12
2.05k
| label_name
stringclasses 5
values | label
int64 0
4
|
---|---|---|
void TestJlCompress::extractDir_data()
{
QTest::addColumn<QString>("zipName");
QTest::addColumn<QStringList>("fileNames");
QTest::newRow("simple") << "jlextdir.zip" << (
QStringList() << "test0.txt" << "testdir1/test1.txt"
<< "testdir2/test2.txt" << "testdir2/subdir/test2sub.txt");
QTest::newRow("separate dir") << "sepdir.zip" << (
QStringList() << "laj/" << "laj/lajfile.txt");
} | Base | 1 |
bool TemporaryFile::deleteTemporaryFile() const
{
// Have a few attempts at deleting the file before giving up..
for (int i = 5; --i >= 0;)
{
if (temporaryFile.deleteFile())
return true;
Thread::sleep (50);
}
return false;
}
| Base | 1 |
int64_t OpLevelCostEstimator::CalculateTensorSize(
const OpInfo::TensorProperties& tensor, bool* found_unknown_shapes) {
int64_t count = CalculateTensorElementCount(tensor, found_unknown_shapes);
int size = DataTypeSize(BaseType(tensor.dtype()));
VLOG(2) << "Count: " << count << " DataTypeSize: " << size;
return count * size;
} | Base | 1 |
void runTest() override
{
beginTest ("ZIP");
ZipFile::Builder builder;
StringArray entryNames { "first", "second", "third" };
HashMap<String, MemoryBlock> blocks;
for (auto& entryName : entryNames)
{
auto& block = blocks.getReference (entryName);
MemoryOutputStream mo (block, false);
mo << entryName;
mo.flush();
builder.addEntry (new MemoryInputStream (block, false), 9, entryName, Time::getCurrentTime());
}
MemoryBlock data;
MemoryOutputStream mo (data, false);
builder.writeToStream (mo, nullptr);
MemoryInputStream mi (data, false);
ZipFile zip (mi);
expectEquals (zip.getNumEntries(), entryNames.size());
for (auto& entryName : entryNames)
{
auto* entry = zip.getEntry (entryName);
std::unique_ptr<InputStream> input (zip.createStreamForEntry (*entry));
expectEquals (input->readEntireStreamAsString(), entryName);
}
}
| Base | 1 |
Http::FilterDataStatus Context::onRequestBody(int body_buffer_length, bool end_of_stream) {
if (!wasm_->onRequestBody_) {
return Http::FilterDataStatus::Continue;
}
switch (wasm_
->onRequestBody_(this, id_, static_cast<uint32_t>(body_buffer_length),
static_cast<uint32_t>(end_of_stream))
.u64_) {
case 0:
return Http::FilterDataStatus::Continue;
case 1:
return Http::FilterDataStatus::StopIterationAndBuffer;
case 2:
return Http::FilterDataStatus::StopIterationAndWatermark;
default:
return Http::FilterDataStatus::StopIterationNoBuffer;
}
} | Base | 1 |
static inline bool isValid(const RemoteFsDevice::Details &d)
{
return d.isLocalFile() || RemoteFsDevice::constSshfsProtocol==d.url.scheme() ||
RemoteFsDevice::constSambaProtocol==d.url.scheme() || RemoteFsDevice::constSambaAvahiProtocol==d.url.scheme();
} | Base | 1 |
jas_matrix_t *jas_matrix_copy(jas_matrix_t *x)
{
jas_matrix_t *y;
int i;
int j;
y = jas_matrix_create(x->numrows_, x->numcols_);
for (i = 0; i < x->numrows_; ++i) {
for (j = 0; j < x->numcols_; ++j) {
*jas_matrix_getref(y, i, j) = jas_matrix_get(x, i, j);
}
}
return y;
} | Class | 2 |
void RemoteFsDevice::serviceAdded(const QString &name)
{
if (name==details.serviceName && constSambaAvahiProtocol==details.url.scheme()) {
sub=tr("Available");
updateStatus();
}
} | Class | 2 |
int ZlibOutStream::length()
{
return offset + ptr - start;
} | Base | 1 |
R_API RBinJavaAttrInfo *r_bin_java_rtv_annotations_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) {
ut32 i = 0;
ut64 offset = 0;
if (buf_offset + 8 > sz) {
return NULL;
}
RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset);
offset += 6;
if (attr) {
attr->type = R_BIN_JAVA_ATTR_TYPE_RUNTIME_VISIBLE_ANNOTATION_ATTR;
attr->info.annotation_array.num_annotations = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
attr->info.annotation_array.annotations = r_list_newf (r_bin_java_annotation_free);
for (i = 0; i < attr->info.annotation_array.num_annotations; i++) {
if (offset >= sz) {
break;
}
RBinJavaAnnotation *annotation = r_bin_java_annotation_new (buffer + offset, sz - offset, buf_offset + offset);
if (annotation) {
offset += annotation->size;
r_list_append (attr->info.annotation_array.annotations, (void *) annotation);
}
}
attr->size = offset;
}
return attr;
} | Base | 1 |
int linenoiseHistorySave(const char* filename) {
FILE* fp = fopen(filename, "wt");
if (fp == NULL) {
return -1;
}
for (int j = 0; j < historyLen; ++j) {
if (history[j][0] != '\0') {
fprintf(fp, "%s\n", history[j]);
}
}
fclose(fp);
return 0;
} | Class | 2 |
AP4_HdlrAtom::AP4_HdlrAtom(AP4_UI32 size,
AP4_UI08 version,
AP4_UI32 flags,
AP4_ByteStream& stream) :
AP4_Atom(AP4_ATOM_TYPE_HDLR, size, version, flags)
{
AP4_UI32 predefined;
stream.ReadUI32(predefined);
stream.ReadUI32(m_HandlerType);
stream.ReadUI32(m_Reserved[0]);
stream.ReadUI32(m_Reserved[1]);
stream.ReadUI32(m_Reserved[2]);
// read the name unless it is empty
if (size < AP4_FULL_ATOM_HEADER_SIZE+20) return;
AP4_UI32 name_size = size-(AP4_FULL_ATOM_HEADER_SIZE+20);
char* name = new char[name_size+1];
if (name == NULL) return;
stream.Read(name, name_size);
name[name_size] = '\0'; // force a null termination
// handle a special case: the Quicktime files have a pascal
// string here, but ISO MP4 files have a C string.
// we try to detect a pascal encoding and correct it.
if (name[0] == name_size-1) {
m_HandlerName = name+1;
} else {
m_HandlerName = name;
}
delete[] name;
} | Base | 1 |
error_t ssiProcessExecCommand(HttpConnection *connection, const char_t *tag, size_t length)
{
char_t *separator;
char_t *attribute;
char_t *value;
//First, check whether CGI is supported by the server
if(connection->settings->cgiCallback == NULL)
return ERROR_INVALID_TAG;
//Discard invalid SSI directives
if(length < 4 || length >= HTTP_SERVER_BUFFER_SIZE)
return ERROR_INVALID_TAG;
//Skip the SSI exec command (4 bytes)
osMemcpy(connection->buffer, tag + 4, length - 4);
//Ensure the resulting string is NULL-terminated
connection->buffer[length - 4] = '\0';
//Check whether a separator is present
separator = strchr(connection->buffer, '=');
//Separator not found?
if(!separator)
return ERROR_INVALID_TAG;
//Split the tag
*separator = '\0';
//Get attribute name and value
attribute = strTrimWhitespace(connection->buffer);
value = strTrimWhitespace(separator + 1);
//Remove leading simple or double quote
if(value[0] == '\'' || value[0] == '\"')
value++;
//Get the length of the attribute value
length = osStrlen(value);
//Remove trailing simple or double quote
if(length > 0)
{
if(value[length - 1] == '\'' || value[length - 1] == '\"')
value[length - 1] = '\0';
}
//Enforce attribute name
if(osStrcasecmp(attribute, "cgi") && osStrcasecmp(attribute, "cmd") && osStrcasecmp(attribute, "cmd_argument"))
return ERROR_INVALID_TAG;
//Check the length of the CGI parameter
if(osStrlen(value) > HTTP_SERVER_CGI_PARAM_MAX_LEN)
return ERROR_INVALID_TAG;
//The scratch buffer may be altered by the user-defined callback.
//So the CGI parameter must be copied prior to function invocation
osStrcpy(connection->cgiParam, value);
//Invoke user-defined callback
return connection->settings->cgiCallback(connection, connection->cgiParam);
} | Class | 2 |
Status check_index_ordering(const Tensor& indices) {
auto findices = indices.flat<int>();
for (std::size_t i = 0; i < findices.dimension(0) - 1; ++i) {
if (findices(i) < findices(i + 1)) {
continue;
}
return Status(
errors::InvalidArgument("Indices are not strictly ordered"));
}
return Status::OK();
} | Base | 1 |
void CreateNgrams(const tstring* data, tstring* output, int num_ngrams,
int ngram_width) const {
for (int ngram_index = 0; ngram_index < num_ngrams; ++ngram_index) {
int pad_width = get_pad_width(ngram_width);
int left_padding = std::max(0, pad_width - ngram_index);
int right_padding =
std::max(0, pad_width - (num_ngrams - (ngram_index + 1)));
int num_tokens = ngram_width - (left_padding + right_padding);
int data_start_index = left_padding > 0 ? 0 : ngram_index - pad_width;
// Calculate the total expected size of the ngram so we can reserve the
// correct amount of space in the string.
int ngram_size = 0;
// Size of the left padding.
ngram_size += left_padding * left_pad_.length();
// Size of the tokens.
for (int n = 0; n < num_tokens; ++n) {
ngram_size += data[data_start_index + n].length();
}
// Size of the right padding.
ngram_size += right_padding * right_pad_.length();
// Size of the separators.
int num_separators = left_padding + right_padding + num_tokens - 1;
ngram_size += num_separators * separator_.length();
// Build the ngram.
tstring* ngram = &output[ngram_index];
ngram->reserve(ngram_size);
for (int n = 0; n < left_padding; ++n) {
ngram->append(left_pad_);
ngram->append(separator_);
}
for (int n = 0; n < num_tokens - 1; ++n) {
ngram->append(data[data_start_index + n]);
ngram->append(separator_);
}
ngram->append(data[data_start_index + num_tokens - 1]);
for (int n = 0; n < right_padding; ++n) {
ngram->append(separator_);
ngram->append(right_pad_);
}
// In debug mode only: validate that we've reserved enough space for the
// ngram.
DCHECK_EQ(ngram_size, ngram->size());
}
} | Base | 1 |
TfLiteStatus PrepareHashtableImport(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0);
const TfLiteTensor* input_resource_id_tensor =
GetInput(context, node, kInputResourceIdTensor);
TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_resource_id_tensor), 1);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(input_resource_id_tensor, 0), 1);
const TfLiteTensor* key_tensor = GetInput(context, node, kKeyTensor);
const TfLiteTensor* value_tensor = GetInput(context, node, kValueTensor);
TF_LITE_ENSURE(context, (key_tensor->type == kTfLiteInt64 &&
value_tensor->type == kTfLiteString) ||
(key_tensor->type == kTfLiteString &&
value_tensor->type == kTfLiteInt64));
// TODO(b/144731295): Tensorflow lookup ops support 1-D vector in storing
// values.
TF_LITE_ENSURE(context, HaveSameShapes(key_tensor, value_tensor));
return kTfLiteOk;
} | Base | 1 |
int bson_ensure_space( bson *b, const int bytesNeeded ) {
int pos = b->cur - b->data;
char *orig = b->data;
int new_size;
if ( pos + bytesNeeded <= b->dataSize )
return BSON_OK;
new_size = 1.5 * ( b->dataSize + bytesNeeded );
if( new_size < b->dataSize ) {
if( ( b->dataSize + bytesNeeded ) < INT_MAX )
new_size = INT_MAX;
else {
b->err = BSON_SIZE_OVERFLOW;
return BSON_ERROR;
}
}
b->data = bson_realloc( b->data, new_size );
if ( !b->data )
bson_fatal_msg( !!b->data, "realloc() failed" );
b->dataSize = new_size;
b->cur += b->data - orig;
return BSON_OK;
} | Base | 1 |
PackLinuxElf32::elf_find_dynamic(unsigned int key) const
{
Elf32_Dyn const *dynp= dynseg;
if (dynp)
for (; (unsigned)((char const *)dynp - (char const *)dynseg) < sz_dynseg
&& Elf32_Dyn::DT_NULL!=dynp->d_tag; ++dynp) if (get_te32(&dynp->d_tag)==key) {
unsigned const t= elf_get_offset_from_address(get_te32(&dynp->d_val));
if (t) {
return t + file_image;
}
break;
}
return 0;
} | Base | 1 |
int ZlibInStream::overrun(int itemSize, int nItems, bool wait)
{
if (itemSize > bufSize)
throw Exception("ZlibInStream overrun: max itemSize exceeded");
if (end - ptr != 0)
memmove(start, ptr, end - ptr);
offset += ptr - start;
end -= ptr - start;
ptr = start;
while (end - ptr < itemSize) {
if (!decompress(wait))
return 0;
}
if (itemSize * nItems > end - ptr)
nItems = (end - ptr) / itemSize;
return nItems;
} | Base | 1 |
EntropyParser::EntropyParser(class Frame *frame,class Scan *scan)
: JKeeper(scan->EnvironOf()), m_pScan(scan), m_pFrame(frame)
{
m_ucCount = scan->ComponentsInScan();
// The residual scan uses all components here, not just for, but
// it does not require the component count either.
for(volatile UBYTE i = 0;i < m_ucCount && i < 4;i++) {
JPG_TRY {
m_pComponent[i] = scan->ComponentOf(i);
} JPG_CATCH {
m_pComponent[i] = NULL;
} JPG_ENDTRY;
}
m_ulRestartInterval = m_pFrame->TablesOf()->RestartIntervalOf();
m_usNextRestartMarker = 0xffd0;
m_ulMCUsToGo = m_ulRestartInterval;
m_bSegmentIsValid = true;
m_bScanForDNL = (m_pFrame->HeightOf() == 0)?true:false;
m_bDNLFound = false;
} | Base | 1 |
SilenceMessage(const std::string& mask, const std::string& flags)
: ClientProtocol::Message("SILENCE")
{
PushParam(mask);
PushParamRef(flags);
} | Variant | 0 |
TEST(DefaultCertValidatorTest, TestMatchSubjectAltNameWildcardDNSMatched) {
bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(
"{{ test_rundir "
"}}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem"));
envoy::type::matcher::v3::StringMatcher matcher;
matcher.set_exact("api.example.com");
std::vector<Matchers::StringMatcherImpl<envoy::type::matcher::v3::StringMatcher>>
subject_alt_name_matchers;
subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher));
EXPECT_TRUE(DefaultCertValidator::matchSubjectAltName(cert.get(), subject_alt_name_matchers));
} | Base | 1 |
int FdInStream::overrun(int itemSize, int nItems, bool wait)
{
if (itemSize > bufSize)
throw Exception("FdInStream overrun: max itemSize exceeded");
if (end - ptr != 0)
memmove(start, ptr, end - ptr);
offset += ptr - start;
end -= ptr - start;
ptr = start;
int bytes_to_read;
while (end < start + itemSize) {
bytes_to_read = start + bufSize - end;
if (!timing) {
// When not timing, we must be careful not to read too much
// extra data into the buffer. Otherwise, the line speed
// estimation might stay at zero for a long time: All reads
// during timing=1 can be satisfied without calling
// readWithTimeoutOrCallback. However, reading only 1 or 2 bytes
// bytes is ineffecient.
bytes_to_read = vncmin(bytes_to_read, vncmax(itemSize*nItems, 8));
}
int n = readWithTimeoutOrCallback((U8*)end, bytes_to_read, wait);
if (n == 0) return 0;
end += n;
}
if (itemSize * nItems > end - ptr)
nItems = (end - ptr) / itemSize;
return nItems;
} | Base | 1 |
inline void skip(int bytes) {
while (bytes > 0) {
int n = check(1, bytes);
ptr += n;
bytes -= n;
}
} | Base | 1 |
void SSecurityTLS::initGlobal()
{
static bool globalInitDone = false;
if (!globalInitDone) {
if (gnutls_global_init() != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_global_init failed");
globalInitDone = true;
}
} | Class | 2 |
int overrun(int itemSize, int nItems) {
int len = ptr - start + itemSize * nItems;
if (len < (end - start) * 2)
len = (end - start) * 2;
U8* newStart = new U8[len];
memcpy(newStart, start, ptr - start);
ptr = newStart + (ptr - start);
delete [] start;
start = newStart;
end = newStart + len;
return nItems;
} | Base | 1 |
void RunOneAveragePoolTest(const PoolParams& params,
const RuntimeShape& input_shape,
const int8* input_data,
const RuntimeShape& output_shape) {
const int buffer_size = output_shape.FlatSize();
std::vector<int8> optimized_averagePool_output(buffer_size);
std::vector<int8> reference_averagePool_output(buffer_size);
reference_integer_ops::AveragePool(params, input_shape, input_data,
output_shape,
reference_averagePool_output.data());
optimized_integer_ops::AveragePool(params, input_shape, input_data,
output_shape,
optimized_averagePool_output.data());
for (int i = 0; i < buffer_size; i++) {
EXPECT_TRUE(reference_averagePool_output[i] ==
optimized_averagePool_output[i]);
}
} | Base | 1 |
TfLiteStatus LogSoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) {
LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output = GetOutput(context, node, 0);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, output->params.scale, 16.0 / 256);
static const double kBeta = 1.0;
if (input->type == kTfLiteUInt8) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 255);
data->params.table = data->f_table;
optimized_ops::PopulateSoftmaxLookupTable(&data->params,
input->params.scale, kBeta);
data->params.zero_point = output->params.zero_point;
data->params.scale = output->params.scale;
}
if (input->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 127);
static const int kScaledDiffIntegerBits = 5;
tflite::PreprocessLogSoftmaxScalingExp(
kBeta, input->params.scale, kScaledDiffIntegerBits,
&data->input_multiplier, &data->input_left_shift,
&data->reverse_scaling_divisor, &data->reverse_scaling_right_shift);
data->reverse_scaling_right_shift *= -1;
data->diff_min =
-1.0 * tflite::CalculateInputRadius(kScaledDiffIntegerBits,
data->input_left_shift);
}
}
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
} | Base | 1 |
void create_test_key() {
int errStatus = 0;
vector<char> errMsg(1024, 0);
uint32_t enc_len;
SAFE_UINT8_BUF(encrypted_key, BUF_LEN);
string key = TEST_VALUE;
sgx_status_t status = trustedEncryptKeyAES(eid, &errStatus, errMsg.data(), key.c_str(), encrypted_key, &enc_len);
HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg.data());
vector<char> hexEncrKey(2 * enc_len + 1, 0);
carray2Hex(encrypted_key, enc_len, hexEncrKey.data(), 2 * enc_len + 1);
LevelDB::getLevelDb()->writeDataUnique("TEST_KEY", hexEncrKey.data());
} | Base | 1 |
void MainWindow::showUpgradePrompt()
{
if (Settings.checkUpgradeAutomatic()) {
showStatusMessage("Checking for upgrade...");
QNetworkRequest request(QUrl("https://check.shotcut.org/version.json"));
QSslConfiguration sslConfig = request.sslConfiguration();
sslConfig.setPeerVerifyMode(QSslSocket::VerifyNone);
request.setSslConfiguration(sslConfig);
m_network.get(request);
} else {
m_network.setStrictTransportSecurityEnabled(false);
QAction* action = new QAction(tr("Click here to check for a new version of Shotcut."), 0);
connect(action, SIGNAL(triggered(bool)), SLOT(on_actionUpgrade_triggered()));
showStatusMessage(action, 15 /* seconds */);
}
} | Base | 1 |
virtual ~CxFile() { };
| Base | 1 |
TfLiteStatus EluEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output = GetOutput(context, node, 0);
switch (input->type) {
case kTfLiteFloat32: {
optimized_ops::Elu(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
return kTfLiteOk;
} break;
case kTfLiteInt8: {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
EvalUsingLookupTable(data, input, output);
return kTfLiteOk;
} break;
default:
TF_LITE_KERNEL_LOG(
context, "Only float32 and int8 is supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
} | Base | 1 |
void DefaultEnv::Initialize()
{
sLog = new Log();
SetUpLog();
sEnv = new DefaultEnv();
sForkHandler = new ForkHandler();
sFileTimer = new FileTimer();
sPlugInManager = new PlugInManager();
sPlugInManager->ProcessEnvironmentSettings();
sForkHandler->RegisterFileTimer( sFileTimer );
//--------------------------------------------------------------------------
// MacOSX library loading is completely moronic. We cannot dlopen a library
// from a thread other than a main thread, so we-pre dlopen all the
// libraries that we may potentially want.
//--------------------------------------------------------------------------
#ifdef __APPLE__
char *errBuff = new char[1024];
const char *libs[] =
{
"libXrdSeckrb5.so",
"libXrdSecgsi.so",
"libXrdSecgsiAuthzVO.so",
"libXrdSecgsiGMAPDN.so",
"libXrdSecgsiGMAPLDAP.so",
"libXrdSecpwd.so",
"libXrdSecsss.so",
"libXrdSecunix.so",
0
};
for( int i = 0; libs[i]; ++i )
{
sLog->Debug( UtilityMsg, "Attempting to pre-load: %s", libs[i] );
bool ok = XrdOucPreload( libs[i], errBuff, 1024 );
if( !ok )
sLog->Error( UtilityMsg, "Unable to pre-load %s: %s", libs[i], errBuff );
}
delete [] errBuff;
#endif
} | Base | 1 |
TEST_F(CheckAuthTest, TestNoOpenId) {
EXPECT_CALL(*raw_request_, FindHeader("x-goog-iap-jwt-assertion", _))
.WillOnce(Invoke([](const std::string &, std::string *token) {
*token = "";
return false;
}));
EXPECT_CALL(*raw_request_, FindHeader(kAuthHeader, _))
.WillOnce(Invoke([](const std::string &, std::string *token) {
*token = std::string(kBearer) + std::string(kTokenIssuer2);
return true;
}));
EXPECT_CALL(*raw_request_, SetAuthToken(kTokenIssuer2)).Times(1);
EXPECT_CALL(*raw_env_, DoRunHTTPRequest(_))
.WillOnce(Invoke([](HTTPRequest *req) {
EXPECT_EQ(req->url(), kIssuer2PubkeyUrl);
std::string body(kPubkey);
std::map<std::string, std::string> empty;
req->OnComplete(Status::OK, std::move(empty), std::move(body));
}));
EXPECT_CALL(*raw_request_,
AddHeaderToBackend(kEndpointApiUserInfo, kUserInfo_kSub_kIss2))
.WillOnce(Return(utils::Status::OK));
CheckAuth(context_, [](Status status) { ASSERT_TRUE(status.ok()); });
} | Base | 1 |
void CConfig::Write(CFile& File, unsigned int iIndentation) {
CString sIndentation = CString(iIndentation, '\t');
for (const auto& it : m_ConfigEntries) {
for (const CString& sValue : it.second) {
File.Write(sIndentation + it.first + " = " + sValue + "\n");
}
}
for (const auto& it : m_SubConfigs) {
for (const auto& it2 : it.second) {
File.Write("\n");
File.Write(sIndentation + "<" + it.first + " " + it2.first + ">\n");
it2.second.m_pSubConfig->Write(File, iIndentation + 1);
File.Write(sIndentation + "</" + it.first + ">\n");
}
}
} | Class | 2 |
TEST_F(AllowMissingInAndOfOrListTest, GoodAndBadJwts) {
EXPECT_CALL(mock_cb_, onComplete(Status::Ok));
// Use the token with example.com issuer for x-other.
auto headers =
Http::TestRequestHeaderMapImpl{{kExampleHeader, GoodToken}, {kOtherHeader, GoodToken}};
context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);
verifier_->verify(context_);
EXPECT_THAT(headers, JwtOutputSuccess(kExampleHeader));
EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kOtherHeader));
} | Class | 2 |
void RegKey::setBinary(const TCHAR* valname, const void* value, int length) const {
LONG result = RegSetValueEx(key, valname, 0, REG_BINARY, (const BYTE*)value, length);
if (result != ERROR_SUCCESS) throw rdr::SystemException("setBinary", result);
} | Base | 1 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteSubParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32 ||
output->type == kTfLiteInt64) {
EvalSub<kernel_type>(context, node, params, data, input1, input2, output);
} else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 ||
output->type == kTfLiteInt16) {
EvalQuantized<kernel_type>(context, node, params, data, input1, input2,
output);
} else {
context->ReportError(
context,
"output type %d is not supported, requires float|uint8|int32 types.",
output->type);
return kTfLiteError;
}
return kTfLiteOk;
} | Base | 1 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
switch (output->type) {
case kTfLiteInt32: {
// TensorFlow does not support negative for int32.
TF_LITE_ENSURE_OK(context, CheckValue(context, input2));
PowImpl<int32_t>(input1, input2, output, data->requires_broadcast);
break;
}
case kTfLiteFloat32: {
PowImpl<float>(input1, input2, output, data->requires_broadcast);
break;
}
default: {
context->ReportError(context, "Unsupported data type: %d", output->type);
return kTfLiteError;
}
}
return kTfLiteOk;
} | Base | 1 |
int MSADPCM::decodeBlock(const uint8_t *encoded, int16_t *decoded)
{
ms_adpcm_state decoderState[2];
ms_adpcm_state *state[2];
int channelCount = m_track->f.channelCount;
// Calculate the number of bytes needed for decoded data.
int outputLength = m_framesPerPacket * sizeof (int16_t) * channelCount;
state[0] = &decoderState[0];
if (channelCount == 2)
state[1] = &decoderState[1];
else
state[1] = &decoderState[0];
// Initialize block predictor.
for (int i=0; i<channelCount; i++)
{
state[i]->predictorIndex = *encoded++;
assert(state[i]->predictorIndex < m_numCoefficients);
}
// Initialize delta.
for (int i=0; i<channelCount; i++)
{
state[i]->delta = (encoded[1]<<8) | encoded[0];
encoded += sizeof (uint16_t);
}
// Initialize first two samples.
for (int i=0; i<channelCount; i++)
{
state[i]->sample1 = (encoded[1]<<8) | encoded[0];
encoded += sizeof (uint16_t);
}
for (int i=0; i<channelCount; i++)
{
state[i]->sample2 = (encoded[1]<<8) | encoded[0];
encoded += sizeof (uint16_t);
}
const int16_t *coefficient[2] =
{
m_coefficients[state[0]->predictorIndex],
m_coefficients[state[1]->predictorIndex]
};
for (int i=0; i<channelCount; i++)
*decoded++ = state[i]->sample2;
for (int i=0; i<channelCount; i++)
*decoded++ = state[i]->sample1;
/*
The first two samples have already been 'decoded' in
the block header.
*/
int samplesRemaining = (m_framesPerPacket - 2) * m_track->f.channelCount;
while (samplesRemaining > 0)
{
uint8_t code;
int16_t newSample;
code = *encoded >> 4;
newSample = decodeSample(*state[0], code, coefficient[0]);
*decoded++ = newSample;
code = *encoded & 0x0f;
newSample = decodeSample(*state[1], code, coefficient[1]);
*decoded++ = newSample;
encoded++;
samplesRemaining -= 2;
}
return outputLength;
} | Base | 1 |
bool PamBackend::start(const QString &user) {
bool result;
QString service = QStringLiteral("sddm");
if (user == QStringLiteral("sddm") && m_greeter)
service = QStringLiteral("sddm-greeter");
else if (m_app->session()->path().isEmpty())
service = QStringLiteral("sddm-check");
else if (m_autologin)
service = QStringLiteral("sddm-autologin");
result = m_pam->start(service, user);
if (!result)
m_app->error(m_pam->errorString(), Auth::ERROR_INTERNAL);
return result;
} | Base | 1 |
std::string& attrf(int ncid, int varId, const char * attrName, std::string& alloc)
{
alloc = "";
size_t len = 0;
nc_inq_attlen(ncid, varId, attrName, &len);
if(len < 1)
{
return alloc;
}
char attr_vals[NC_MAX_NAME + 1];
memset(attr_vals, 0, NC_MAX_NAME + 1);
// Now look through this variable for the attribute
if(nc_get_att_text(ncid, varId, attrName, attr_vals) != NC_NOERR)
{
return alloc;
}
alloc = std::string(attr_vals);
return alloc;
} | Base | 1 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_);
const TfLiteTensor* input_resource_id_tensor =
GetInput(context, node, kInputVariableId);
int resource_id = input_resource_id_tensor->data.i32[0];
auto& resources = subgraph->resources();
auto* variable = resource::GetResourceVariable(&resources, resource_id);
TF_LITE_ENSURE(context, variable != nullptr);
TfLiteTensor* variable_tensor = variable->GetTensor();
TfLiteTensor* output = GetOutput(context, node, kOutputValue);
TF_LITE_ENSURE_TYPES_EQ(context, variable_tensor->type, output->type);
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(
context, output, TfLiteIntArrayCopy(variable_tensor->dims)));
memcpy(output->data.raw, variable_tensor->data.raw, output->bytes);
return kTfLiteOk;
} | Base | 1 |
MONGO_EXPORT int bson_append_symbol_n( bson *b, const char *name, const char *value, int len ) {
return bson_append_string_base( b, name, value, len, BSON_SYMBOL );
} | Base | 1 |
R_API RBinJavaAttrInfo *r_bin_java_synthetic_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) {
ut64 offset = 0;
RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset);
if (!attr) {
return NULL;
}
offset += 6;
attr->type = R_BIN_JAVA_ATTR_TYPE_SYNTHETIC_ATTR;
attr->size = offset;
return attr;
} | Base | 1 |
void AverageEvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node,
TfLitePoolParams* params, OpData* data,
const TfLiteTensor* input, TfLiteTensor* output) {
int32_t activation_min;
int32_t activation_max;
(void)CalculateActivationRangeQuantized(context, params->activation, output,
&activation_min, &activation_max);
#define TF_LITE_AVERAGE_POOL(type) \
tflite::PoolParams op_params; \
op_params.stride_height = params->stride_height; \
op_params.stride_width = params->stride_width; \
op_params.filter_height = params->filter_height; \
op_params.filter_width = params->filter_width; \
op_params.padding_values.height = data->padding.height; \
op_params.padding_values.width = data->padding.width; \
op_params.quantized_activation_min = activation_min; \
op_params.quantized_activation_max = activation_max; \
type::AveragePool(op_params, GetTensorShape(input), \
GetTensorData<int8_t>(input), GetTensorShape(output), \
GetTensorData<int8_t>(output))
if (kernel_type == kReference) {
TF_LITE_AVERAGE_POOL(reference_integer_ops);
} else {
TF_LITE_AVERAGE_POOL(optimized_integer_ops);
}
#undef TF_LITE_AVERAGE_POOL
} | Base | 1 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteAddParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) {
EvalAdd<kernel_type>(context, node, params, data, input1, input2, output);
} else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 ||
output->type == kTfLiteInt16) {
TF_LITE_ENSURE_OK(context,
EvalAddQuantized<kernel_type>(context, node, params, data,
input1, input2, output));
} else {
TF_LITE_UNSUPPORTED_TYPE(context, output->type, "Add");
}
return kTfLiteOk;
} | Base | 1 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
output->type = input->type;
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
} | Base | 1 |
void RemoteFsDevice::serviceRemoved(const QString &name)
{
if (name==details.serviceName && constSambaAvahiProtocol==details.url.scheme()) {
sub=tr("Not Available");
updateStatus();
}
} | Class | 2 |
TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
bool requires_broadcast = !HaveSameShapes(input1, input2);
switch (input1->type) {
case kTfLiteFloat32:
Comparison<float, reference_ops::GreaterEqualFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteInt32:
Comparison<int32_t, reference_ops::GreaterEqualFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteInt64:
Comparison<int64_t, reference_ops::GreaterEqualFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteUInt8:
ComparisonQuantized<uint8_t, reference_ops::GreaterEqualFn>(
input1, input2, output, requires_broadcast);
break;
case kTfLiteInt8:
ComparisonQuantized<int8_t, reference_ops::GreaterEqualFn>(
input1, input2, output, requires_broadcast);
break;
default:
context->ReportError(context,
"Does not support type %d, requires float|int|uint8",
input1->type);
return kTfLiteError;
}
return kTfLiteOk;
} | Base | 1 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE(context, NumInputs(node) == 1 || NumInputs(node) == 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
// Always postpone sizing string tensors, even if we could in principle
// calculate their shapes now. String tensors don't benefit from having their
// shapes precalculated because the actual memory can only be allocated after
// we know all the content.
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (output->type != kTfLiteString) {
if (NumInputs(node) == 1 ||
IsConstantTensor(GetInput(context, node, kShapeTensor))) {
TF_LITE_ENSURE_OK(context, ResizeOutput(context, node));
} else {
SetTensorToDynamic(output);
}
}
return kTfLiteOk;
} | Base | 1 |
inline void* aligned_malloc(size_t size, size_t alignment) {
return folly::detail::aligned_malloc(size, alignment);
} | Base | 1 |
static __forceinline void draw_line(float *output, int x0, int y0, int x1, int y1, int n)
{
int dy = y1 - y0;
int adx = x1 - x0;
int ady = abs(dy);
int base;
int x=x0,y=y0;
int err = 0;
int sy;
#ifdef STB_VORBIS_DIVIDE_TABLE
if (adx < DIVTAB_DENOM && ady < DIVTAB_NUMER) {
if (dy < 0) {
base = -integer_divide_table[ady][adx];
sy = base-1;
} else {
base = integer_divide_table[ady][adx];
sy = base+1;
}
} else {
base = dy / adx;
if (dy < 0)
sy = base - 1;
else
sy = base+1;
}
#else
base = dy / adx;
if (dy < 0)
sy = base - 1;
else
sy = base+1;
#endif
ady -= abs(base) * adx;
if (x1 > n) x1 = n;
if (x < x1) {
LINE_OP(output[x], inverse_db_table[y]);
for (++x; x < x1; ++x) {
err += ady;
if (err >= adx) {
err -= adx;
y += sy;
} else
y += base;
LINE_OP(output[x], inverse_db_table[y]);
}
}
} | Base | 1 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
optimized_ops::Round(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
return kTfLiteOk;
} | Base | 1 |
static TfLiteRegistration DynamicCopyOpRegistration() {
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
// Output 0 is dynamic
TfLiteTensor* output0 = GetOutput(context, node, 0);
SetTensorToDynamic(output0);
// Output 1 has the same shape as input.
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output1 = GetOutput(context, node, 1);
TF_LITE_ENSURE_STATUS(context->ResizeTensor(
context, output1, TfLiteIntArrayCopy(input->dims)));
return kTfLiteOk;
};
reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
// Not implemented since this isn't required in testing.
return kTfLiteOk;
};
return reg;
} | Base | 1 |
folly::Optional<TLSMessage> EncryptedReadRecordLayer::read(
folly::IOBufQueue& buf) {
auto decryptedBuf = getDecryptedBuf(buf);
if (!decryptedBuf) {
return folly::none;
}
TLSMessage msg;
// Iterate over the buffers while trying to find
// the first non-zero octet. This is much faster than
// first iterating and then trimming.
auto currentBuf = decryptedBuf->get();
bool nonZeroFound = false;
do {
currentBuf = currentBuf->prev();
size_t i = currentBuf->length();
while (i > 0 && !nonZeroFound) {
nonZeroFound = (currentBuf->data()[i - 1] != 0);
i--;
}
if (nonZeroFound) {
msg.type = static_cast<ContentType>(currentBuf->data()[i]);
}
currentBuf->trimEnd(currentBuf->length() - i);
} while (!nonZeroFound && currentBuf != decryptedBuf->get());
if (!nonZeroFound) {
throw std::runtime_error("No content type found");
}
msg.fragment = std::move(*decryptedBuf);
switch (msg.type) {
case ContentType::handshake:
case ContentType::alert:
case ContentType::application_data:
break;
default:
throw std::runtime_error(folly::to<std::string>(
"received encrypted content type ",
static_cast<ContentTypeType>(msg.type)));
}
if (!msg.fragment) {
if (msg.type == ContentType::application_data) {
msg.fragment = folly::IOBuf::create(0);
} else {
throw std::runtime_error("received empty fragment");
}
}
return msg;
} | Base | 1 |
bool logToUSDT(const Array& bt) {
std::lock_guard<std::mutex> lock(usdt_mutex);
memset(&bt_slab, 0, sizeof(bt_slab));
int i = 0;
IterateVNoInc(
bt.get(),
[&](TypedValue tv) -> bool {
if (i >= strobelight::kMaxStackframes) {
return true;
}
assertx(isArrayLikeType(type(tv)));
ArrayData* bt_frame = val(tv).parr;
strobelight::backtrace_frame_t* frame = &bt_slab.frames[i];
auto const line = bt_frame->get(s_line.get());
if (line.is_init()) {
assertx(isIntType(type(line)));
frame->line = val(line).num;
}
auto const file_name = bt_frame->get(s_file.get());
if (file_name.is_init()) {
assertx(isStringType(type(file_name)));
strncpy(frame->file_name,
val(file_name).pstr->data(),
std::min(val(file_name).pstr->size(), strobelight::kFileNameMax));
frame->file_name[strobelight::kFileNameMax - 1] = '\0';
}
auto const class_name = bt_frame->get(s_class.get());
if (class_name.is_init()) {
assertx(isStringType(type(class_name)));
strncpy(frame->class_name,
val(class_name).pstr->data(),
std::min(val(class_name).pstr->size(), strobelight::kClassNameMax));
frame->class_name[strobelight::kClassNameMax - 1] = '\0';
}
auto const function_name = bt_frame->get(s_function.get());
if (function_name.is_init()) {
assertx(isStringType(type(function_name)));
strncpy(frame->function,
val(function_name).pstr->data(),
std::min(val(function_name).pstr->size(),
strobelight::kFunctionMax));
frame->function[strobelight::kFunctionMax - 1] = '\0';
}
i++;
return false;
}
);
bt_slab.len = i;
// Allow BPF to read the now-formatted stacktrace
FOLLY_SDT_WITH_SEMAPHORE(hhvm, hhvm_stack, &bt_slab);
return true;
} | Base | 1 |
TfLiteStatus PrepareHashtableSize(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input_resource_id_tensor =
GetInput(context, node, kInputResourceIdTensor);
TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_resource_id_tensor), 1);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(input_resource_id_tensor, 0), 1);
TfLiteTensor* output_tensor = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE(context, output_tensor != nullptr);
TF_LITE_ENSURE_EQ(context, output_tensor->type, kTfLiteInt64);
TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1);
outputSize->data[0] = 1;
return context->ResizeTensor(context, output_tensor, outputSize);
} | Base | 1 |
TfLiteStatus LessEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
bool requires_broadcast = !HaveSameShapes(input1, input2);
switch (input1->type) {
case kTfLiteFloat32:
Comparison<float, reference_ops::LessFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteInt32:
Comparison<int32_t, reference_ops::LessFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteInt64:
Comparison<int64_t, reference_ops::LessFn>(input1, input2, output,
requires_broadcast);
break;
case kTfLiteUInt8:
ComparisonQuantized<uint8_t, reference_ops::LessFn>(
input1, input2, output, requires_broadcast);
break;
case kTfLiteInt8:
ComparisonQuantized<int8_t, reference_ops::LessFn>(input1, input2, output,
requires_broadcast);
break;
default:
context->ReportError(context,
"Does not support type %d, requires float|int|uint8",
input1->type);
return kTfLiteError;
}
return kTfLiteOk;
} | Base | 1 |
void SSecurityTLS::shutdown()
{
if (session) {
if (gnutls_bye(session, GNUTLS_SHUT_RDWR) != GNUTLS_E_SUCCESS) {
/* FIXME: Treat as non-fatal error */
vlog.error("TLS session wasn't terminated gracefully");
}
}
if (dh_params) {
gnutls_dh_params_deinit(dh_params);
dh_params = 0;
}
if (anon_cred) {
gnutls_anon_free_server_credentials(anon_cred);
anon_cred = 0;
}
if (cert_cred) {
gnutls_certificate_free_credentials(cert_cred);
cert_cred = 0;
}
if (session) {
gnutls_deinit(session);
session = 0;
gnutls_global_deinit();
}
} | Class | 2 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
switch (input->type) { // Already know in/out types are same.
case kTfLiteFloat32:
return EvalImpl<kernel_type, kTfLiteFloat32>(context, node);
case kTfLiteUInt8:
return EvalImpl<kernel_type, kTfLiteUInt8>(context, node);
case kTfLiteInt8:
return EvalImpl<kernel_type, kTfLiteInt8>(context, node);
case kTfLiteInt16:
return EvalImpl<kernel_type, kTfLiteInt16>(context, node);
default:
context->ReportError(context, "Type %d not currently supported.",
input->type);
return kTfLiteError;
}
} | Base | 1 |
void RemoteFsDevice::serviceRemoved(const QString &name)
{
if (name==details.serviceName && constSambaAvahiProtocol==details.url.scheme()) {
sub=tr("Not Available");
updateStatus();
}
} | Class | 2 |
int Read(void* pDestBuffer, int nSize)
{
if ( m_nPos + nSize >= m_nLen )
nSize = m_nLen - m_nPos - 1;
memcpy( pDestBuffer, (m_sFile + m_nPos), nSize );
m_nPos += nSize;
return nSize;
} | Base | 1 |
void CharToWideMap(const char *Src,wchar *Dest,size_t DestSize,bool &Success)
{
// Map inconvertible characters to private use Unicode area 0xE000.
// Mark such string by placing special non-character code before
// first inconvertible character.
Success=false;
bool MarkAdded=false;
uint SrcPos=0,DestPos=0;
while (DestPos<DestSize)
{
if (Src[SrcPos]==0)
{
Success=true;
break;
}
mbstate_t ps;
memset(&ps,0,sizeof(ps));
if (mbrtowc(Dest+DestPos,Src+SrcPos,MB_CUR_MAX,&ps)==-1)
{
// For security reasons we do not want to map low ASCII characters,
// so we do not have additional .. and path separator codes.
if (byte(Src[SrcPos])>=0x80)
{
if (!MarkAdded)
{
Dest[DestPos++]=MappedStringMark;
MarkAdded=true;
if (DestPos>=DestSize)
break;
}
Dest[DestPos++]=byte(Src[SrcPos++])+MapAreaStart;
}
else
break;
}
else
{
memset(&ps,0,sizeof(ps));
int Length=mbrlen(Src+SrcPos,MB_CUR_MAX,&ps);
SrcPos+=Max(Length,1);
DestPos++;
}
}
Dest[Min(DestPos,DestSize-1)]=0;
} | Base | 1 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_);
const TfLiteTensor* input_resource_id_tensor =
GetInput(context, node, kInputVariableId);
int resource_id = input_resource_id_tensor->data.i32[0];
auto& resources = subgraph->resources();
auto* variable = resource::GetResourceVariable(&resources, resource_id);
TF_LITE_ENSURE(context, variable != nullptr);
TfLiteTensor* variable_tensor = variable->GetTensor();
TfLiteTensor* output = GetOutput(context, node, kOutputValue);
TF_LITE_ENSURE_TYPES_EQ(context, variable_tensor->type, output->type);
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(
context, output, TfLiteIntArrayCopy(variable_tensor->dims)));
memcpy(output->data.raw, variable_tensor->data.raw, output->bytes);
return kTfLiteOk;
} | Base | 1 |
void RestAuthHandler::shutdownExecute(bool isFinalized) noexcept {
try {
if (_isValid) {
events::LoggedIn(*_request, _username);
} else {
events::CredentialsBad(*_request, _username);
}
} catch (...) {
}
RestVocbaseBaseHandler::shutdownExecute(isFinalized);
} | Base | 1 |
Jsi_Value *jsi_ValueObjKeyAssign(Jsi_Interp *interp, Jsi_Value *target, Jsi_Value *keyval, Jsi_Value *value, int flag)
{
int arrayindex = -1;
if (keyval->vt == JSI_VT_NUMBER && Jsi_NumberIsInteger(keyval->d.num) && keyval->d.num >= 0) {
arrayindex = (int)keyval->d.num;
}
/* TODO: array["1"] also extern the length of array */
if (arrayindex >= 0 && arrayindex < MAX_ARRAY_LIST &&
target->vt == JSI_VT_OBJECT && target->d.obj->arr) {
return jsi_ObjArraySetDup(interp, target->d.obj, value, arrayindex);
}
const char *kstr = Jsi_ValueToString(interp, keyval, NULL);
#if (defined(JSI_HAS___PROTO__) && JSI_HAS___PROTO__==2)
if (Jsi_Strcmp(kstr, "__proto__")==0) {
Jsi_Obj *obj = target->d.obj;
obj->__proto__ = Jsi_ValueDup(interp, value);
//obj->clearProto = 1;
return obj->__proto__;
}
#endif
Jsi_Value *v = Jsi_ValueNew1(interp);
if (value)
Jsi_ValueCopy(interp, v, value);
jsi_ValueObjSet(interp, target, kstr, v, flag, (Jsi_ValueIsStringKey(interp, keyval)? JSI_OM_ISSTRKEY:0));
Jsi_DecrRefCount(interp, v);
return v;
} | Base | 1 |
CxFile(void) { };
| Base | 1 |
static inline char *parse_ip_address_ex(const char *str, size_t str_len, int *portno, int get_err, zend_string **err)
{
char *colon;
char *host = NULL;
#ifdef HAVE_IPV6
char *p;
if (*(str) == '[' && str_len > 1) {
/* IPV6 notation to specify raw address with port (i.e. [fe80::1]:80) */
p = memchr(str + 1, ']', str_len - 2);
if (!p || *(p + 1) != ':') {
if (get_err) {
*err = strpprintf(0, "Failed to parse IPv6 address \"%s\"", str);
}
return NULL;
}
*portno = atoi(p + 2);
return estrndup(str + 1, p - str - 1);
}
#endif
if (str_len) {
colon = memchr(str, ':', str_len - 1);
} else {
colon = NULL;
}
if (colon) {
*portno = atoi(colon + 1);
host = estrndup(str, colon - str);
} else {
if (get_err) {
*err = strpprintf(0, "Failed to parse address \"%s\"", str);
}
return NULL;
}
return host;
} | Class | 2 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 5);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* ids = GetInput(context, node, 0);
TF_LITE_ENSURE_EQ(context, NumDimensions(ids), 1);
TF_LITE_ENSURE_EQ(context, ids->type, kTfLiteInt32);
const TfLiteTensor* indices = GetInput(context, node, 1);
TF_LITE_ENSURE_EQ(context, NumDimensions(indices), 2);
TF_LITE_ENSURE_EQ(context, indices->type, kTfLiteInt32);
const TfLiteTensor* shape = GetInput(context, node, 2);
TF_LITE_ENSURE_EQ(context, NumDimensions(shape), 1);
TF_LITE_ENSURE_EQ(context, shape->type, kTfLiteInt32);
const TfLiteTensor* weights = GetInput(context, node, 3);
TF_LITE_ENSURE_EQ(context, NumDimensions(weights), 1);
TF_LITE_ENSURE_EQ(context, weights->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(indices, 0),
SizeOfDimension(ids, 0));
TF_LITE_ENSURE_EQ(context, SizeOfDimension(indices, 0),
SizeOfDimension(weights, 0));
const TfLiteTensor* value = GetInput(context, node, 4);
TF_LITE_ENSURE(context, NumDimensions(value) >= 2);
// Mark the output as a dynamic tensor.
TfLiteTensor* output = GetOutput(context, node, 0);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32);
output->allocation_type = kTfLiteDynamic;
return kTfLiteOk;
} | Base | 1 |
TEST_F(RouterTest, RetryUpstreamResetResponseStarted) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// Since the response is already started we don't retry.
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
EXPECT_CALL(callbacks_, encodeHeaders_(_, false));
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
response_decoder->decodeHeaders(std::move(response_headers), false);
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));
// Normally, sendLocalReply will actually send the reply, but in this case the
// HCM will detect the headers have already been sent and not route through
// the encoder again.
EXPECT_CALL(callbacks_, sendLocalReply(_, _, _, _, _)).WillOnce(testing::InvokeWithoutArgs([] {
}));
encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);
// For normal HTTP, once we have a 200 we consider this a success, even if a
// later reset occurs.
EXPECT_TRUE(verifyHostUpstreamStats(1, 0));
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
} | Class | 2 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* data = GetInput(context, node, kInputDataTensor);
const TfLiteTensor* segment_ids =
GetInput(context, node, kInputSegmentIdsTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context,
ResizeOutputTensor(context, data, segment_ids, output));
}
#define TF_LITE_SEGMENT_SUM(dtype) \
reference_ops::SegmentSum<dtype>( \
GetTensorShape(data), GetTensorData<dtype>(data), \
GetTensorShape(segment_ids), GetTensorData<int32_t>(segment_ids), \
GetTensorShape(output), GetTensorData<dtype>(output));
switch (data->type) {
case kTfLiteInt32:
TF_LITE_SEGMENT_SUM(int32_t);
break;
case kTfLiteFloat32:
TF_LITE_SEGMENT_SUM(float);
break;
default:
context->ReportError(context,
"Currently SegmentSum doesn't support type: %s",
TfLiteTypeGetName(data->type));
return kTfLiteError;
}
#undef TF_LITE_SEGMENT_SUM
return kTfLiteOk;
} | Base | 1 |
jas_matrix_t *jas_matrix_create(int numrows, int numcols)
{
jas_matrix_t *matrix;
int i;
size_t size;
matrix = 0;
if (numrows < 0 || numcols < 0) {
goto error;
}
if (!(matrix = jas_malloc(sizeof(jas_matrix_t)))) {
goto error;
}
matrix->flags_ = 0;
matrix->numrows_ = numrows;
matrix->numcols_ = numcols;
matrix->rows_ = 0;
matrix->maxrows_ = numrows;
matrix->data_ = 0;
matrix->datasize_ = 0;
// matrix->datasize_ = numrows * numcols;
if (!jas_safe_size_mul(numrows, numcols, &size)) {
goto error;
}
matrix->datasize_ = size;
if (matrix->maxrows_ > 0) {
if (!(matrix->rows_ = jas_alloc2(matrix->maxrows_,
sizeof(jas_seqent_t *)))) {
goto error;
}
}
if (matrix->datasize_ > 0) {
if (!(matrix->data_ = jas_alloc2(matrix->datasize_,
sizeof(jas_seqent_t)))) {
goto error;
}
}
for (i = 0; i < numrows; ++i) {
matrix->rows_[i] = &matrix->data_[i * matrix->numcols_];
}
for (i = 0; i < matrix->datasize_; ++i) {
matrix->data_[i] = 0;
}
matrix->xstart_ = 0;
matrix->ystart_ = 0;
matrix->xend_ = matrix->numcols_;
matrix->yend_ = matrix->numrows_;
return matrix;
error:
if (matrix) {
jas_matrix_destroy(matrix);
}
return 0;
} | Class | 2 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
// Check that the inputs and outputs have the right sizes and types.
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output_values = GetOutput(context, node, kOutputValues);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output_values->type);
const TfLiteTensor* top_k = GetInput(context, node, kInputTopK);
TF_LITE_ENSURE_TYPES_EQ(context, top_k->type, kTfLiteInt32);
// Set output dynamic if the input is not const.
if (IsConstantTensor(top_k)) {
TF_LITE_ENSURE_OK(context, ResizeOutput(context, node));
} else {
TfLiteTensor* output_indexes = GetOutput(context, node, kOutputIndexes);
TfLiteTensor* output_values = GetOutput(context, node, kOutputValues);
SetTensorToDynamic(output_indexes);
SetTensorToDynamic(output_values);
}
return kTfLiteOk;
} | Base | 1 |
void CSecurityTLS::initGlobal()
{
static bool globalInitDone = false;
if (!globalInitDone) {
gnutls_global_init();
globalInitDone = true;
}
} | Class | 2 |
TfLiteStatus ResizeOutputTensors(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* axis,
const TfLiteTensor* input, int num_splits) {
int axis_value = GetTensorData<int>(axis)[0];
if (axis_value < 0) {
axis_value += NumDimensions(input);
}
TF_LITE_ENSURE(context, axis_value >= 0);
TF_LITE_ENSURE(context, axis_value < NumDimensions(input));
const int input_size = SizeOfDimension(input, axis_value);
TF_LITE_ENSURE_MSG(context, input_size % num_splits == 0,
"Not an even split");
const int slice_size = input_size / num_splits;
for (int i = 0; i < NumOutputs(node); ++i) {
TfLiteIntArray* output_dims = TfLiteIntArrayCopy(input->dims);
output_dims->data[axis_value] = slice_size;
TfLiteTensor* output = GetOutput(context, node, i);
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_dims));
}
return kTfLiteOk;
} | Base | 1 |
HexOutStream::HexOutStream(OutStream& os, int buflen)
: out_stream(os), offset(0), bufSize(buflen ? buflen : DEFAULT_BUF_LEN)
{
if (bufSize % 2)
bufSize--;
ptr = start = new U8[bufSize];
end = start + bufSize;
} | Base | 1 |
void CharCodeToUnicode::addMapping(CharCode code, char *uStr, int n,
int offset) {
CharCode oldLen, i;
Unicode u;
char uHex[5];
int j;
if (code >= mapLen) {
oldLen = mapLen;
mapLen = (code + 256) & ~255;
map = (Unicode *)greallocn(map, mapLen, sizeof(Unicode));
for (i = oldLen; i < mapLen; ++i) {
map[i] = 0;
}
}
if (n <= 4) {
if (sscanf(uStr, "%x", &u) != 1) {
error(-1, "Illegal entry in ToUnicode CMap");
return;
}
map[code] = u + offset;
} else {
if (sMapLen >= sMapSize) {
sMapSize = sMapSize + 16;
sMap = (CharCodeToUnicodeString *)
greallocn(sMap, sMapSize, sizeof(CharCodeToUnicodeString));
}
map[code] = 0;
sMap[sMapLen].c = code;
sMap[sMapLen].len = n / 4;
for (j = 0; j < sMap[sMapLen].len && j < maxUnicodeString; ++j) {
strncpy(uHex, uStr + j*4, 4);
uHex[4] = '\0';
if (sscanf(uHex, "%x", &sMap[sMapLen].u[j]) != 1) {
error(-1, "Illegal entry in ToUnicode CMap");
}
}
sMap[sMapLen].u[sMap[sMapLen].len - 1] += offset;
++sMapLen;
}
} | Base | 1 |
bool WindowsServiceControl::install( const QString& filePath, const QString& displayName )
{
m_serviceHandle = CreateService(
m_serviceManager, // SCManager database
WindowsCoreFunctions::toConstWCharArray( m_name ), // name of service
WindowsCoreFunctions::toConstWCharArray( displayName ),// name to display
SERVICE_ALL_ACCESS, // desired access
SERVICE_WIN32_OWN_PROCESS,
// service type
SERVICE_AUTO_START, // start type
SERVICE_ERROR_NORMAL, // error control type
WindowsCoreFunctions::toConstWCharArray( filePath ), // service's binary
nullptr, // no load ordering group
nullptr, // no tag identifier
L"Tcpip\0RpcSs\0\0", // dependencies
nullptr, // LocalSystem account
nullptr ); // no password
if( m_serviceHandle == nullptr )
{
const auto error = GetLastError();
if( error == ERROR_SERVICE_EXISTS )
{
vCritical() << qUtf8Printable( tr( "The service \"%1\" is already installed." ).arg( m_name ) );
}
else
{
vCritical() << qUtf8Printable( tr( "The service \"%1\" could not be installed." ).arg( m_name ) );
}
return false;
}
SC_ACTION serviceActions;
serviceActions.Delay = 10000;
serviceActions.Type = SC_ACTION_RESTART;
SERVICE_FAILURE_ACTIONS serviceFailureActions;
serviceFailureActions.dwResetPeriod = 0;
serviceFailureActions.lpRebootMsg = nullptr;
serviceFailureActions.lpCommand = nullptr;
serviceFailureActions.lpsaActions = &serviceActions;
serviceFailureActions.cActions = 1;
ChangeServiceConfig2( m_serviceHandle, SERVICE_CONFIG_FAILURE_ACTIONS, &serviceFailureActions );
// Everything went fine
vInfo() << qUtf8Printable( tr( "The service \"%1\" has been installed successfully." ).arg( m_name ) );
return true;
} | Base | 1 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteMulParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) {
EvalMul<kernel_type>(context, node, params, data, input1, input2, output);
} else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 ||
output->type == kTfLiteInt16) {
TF_LITE_ENSURE_OK(
context, EvalQuantized<kernel_type>(context, node, params, data, input1,
input2, output));
} else {
context->ReportError(context,
"Mul only supports FLOAT32, INT32 and quantized UINT8,"
" INT8 and INT16 now, got %d.",
output->type);
return kTfLiteError;
}
return kTfLiteOk;
} | Base | 1 |
int length() const {
return m_str ? m_str->size() : 0;
} | Base | 1 |
void ftoa_bounded_extra(JsVarFloat val,char *str, size_t len, int radix, int fractionalDigits) {
const JsVarFloat stopAtError = 0.0000001;
if (isnan(val)) strncpy(str,"NaN",len);
else if (!isfinite(val)) {
if (val<0) strncpy(str,"-Infinity",len);
else strncpy(str,"Infinity",len);
} else {
if (val<0) {
if (--len <= 0) { *str=0; return; } // bounds check
*(str++) = '-';
val = -val;
}
// what if we're really close to an integer? Just use that...
if (((JsVarInt)(val+stopAtError)) == (1+(JsVarInt)val))
val = (JsVarFloat)(1+(JsVarInt)val);
JsVarFloat d = 1;
while (d*radix <= val) d*=radix;
while (d >= 1) {
int v = (int)(val / d);
val -= v*d;
if (--len <= 0) { *str=0; return; } // bounds check
*(str++) = itoch(v);
d /= radix;
}
#ifndef USE_NO_FLOATS
if (((fractionalDigits<0) && val>0) || fractionalDigits>0) {
bool hasPt = false;
val*=radix;
while (((fractionalDigits<0) && (fractionalDigits>-12) && (val > stopAtError)) || (fractionalDigits > 0)) {
int v = (int)(val+((fractionalDigits==1) ? 0.4 : 0.00000001) );
val = (val-v)*radix;
if (v==radix) v=radix-1;
if (!hasPt) {
hasPt = true;
if (--len <= 0) { *str=0; return; } // bounds check
*(str++)='.';
}
if (--len <= 0) { *str=0; return; } // bounds check
*(str++)=itoch(v);
fractionalDigits--;
}
}
#endif
*(str++)=0;
}
} | Class | 2 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
// TODO(ahentz): these two checks would make the new implementation
// incompatible with some existing models, where params is not specified. It
// is OK not to have them because toco would have set input and output types
// to match the parameters.
// auto* params = reinterpret_cast<TfLiteCastParams*>(node->builtin_data);
// TF_LITE_ENSURE_EQ(context, input->type, params->in_data_type);
// TF_LITE_ENSURE_EQ(context, output->type, params->out_data_type);
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
} | Base | 1 |
TEST_F(SingleAllowMissingInOrListTest, MissingIssToken) {
EXPECT_CALL(mock_cb_, onComplete(Status::Ok));
auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, ES256WithoutIssToken}};
context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);
verifier_->verify(context_);
EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kExampleHeader));
} | Base | 1 |
TEST_F(TestSPIFFEValidator, TestGetTrustBundleStore) {
initialize();
// No SAN
auto cert = readCertFromFile(TestEnvironment::substitute(
"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/extensions_cert.pem"));
EXPECT_FALSE(validator().getTrustBundleStore(cert.get()));
// Non-SPIFFE SAN
cert = readCertFromFile(TestEnvironment::substitute(
"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/non_spiffe_san_cert.pem"));
EXPECT_FALSE(validator().getTrustBundleStore(cert.get()));
// SPIFFE SAN
cert = readCertFromFile(TestEnvironment::substitute(
"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/spiffe_san_cert.pem"));
// Trust bundle not provided.
EXPECT_FALSE(validator().getTrustBundleStore(cert.get()));
// Trust bundle provided.
validator().trustBundleStores().emplace("example.com", X509StorePtr(X509_STORE_new()));
EXPECT_TRUE(validator().getTrustBundleStore(cert.get()));
} | Base | 1 |
void operator = (const IniSection &s)
{
if (&s == this)
{
return;
}
IniBase::operator = (s);
ip = s.ip;
end_comment = s.end_comment; rewrite_by = s.rewrite_by;
container = s.container;
reindex ();
} | Class | 2 |
void* sspi_SecureHandleGetLowerPointer(SecHandle* handle)
{
void* pointer;
if (!handle)
return NULL;
pointer = (void*) ~((size_t) handle->dwLower);
return pointer;
} | Base | 1 |
TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_STATUS(GenericPrepare(context, node));
TfLiteTensor* output = GetOutput(context, node, 0);
if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) {
HardSwishData* data = static_cast<HardSwishData*>(node->user_data);
HardSwishParams* params = &data->params;
const TfLiteTensor* input = GetInput(context, node, 0);
params->input_zero_point = input->params.zero_point;
params->output_zero_point = output->params.zero_point;
const float input_scale = input->params.scale;
const float hires_input_scale = (1.0f / 128.0f) * input_scale;
const float reluish_scale = 3.0f / 32768.0f;
const float output_scale = output->params.scale;
const float output_multiplier = hires_input_scale / output_scale;
int32_t output_multiplier_fixedpoint_int32;
QuantizeMultiplier(output_multiplier, &output_multiplier_fixedpoint_int32,
¶ms->output_multiplier_exponent);
DownScaleInt32ToInt16Multiplier(
output_multiplier_fixedpoint_int32,
¶ms->output_multiplier_fixedpoint_int16);
TF_LITE_ENSURE(context, params->output_multiplier_exponent <= 0);
const float reluish_multiplier = hires_input_scale / reluish_scale;
int32_t reluish_multiplier_fixedpoint_int32;
QuantizeMultiplier(reluish_multiplier, &reluish_multiplier_fixedpoint_int32,
¶ms->reluish_multiplier_exponent);
DownScaleInt32ToInt16Multiplier(
reluish_multiplier_fixedpoint_int32,
¶ms->reluish_multiplier_fixedpoint_int16);
}
return kTfLiteOk;
} | Base | 1 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output_values = GetOutput(context, node, kOutputValues);
TfLiteTensor* output_indexes = GetOutput(context, node, kOutputIndexes);
if (IsDynamicTensor(output_values)) {
TF_LITE_ENSURE_OK(context, ResizeOutput(context, node));
}
const TfLiteTensor* top_k = GetInput(context, node, kInputTopK);
const int32 k = top_k->data.i32[0];
// The tensor can have more than 2 dimensions or even be a vector, the code
// anyway calls the internal dimension as row;
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const int32 row_size = input->dims->data[input->dims->size - 1];
int32 num_rows = 1;
for (int i = 0; i < input->dims->size - 1; ++i) {
num_rows *= input->dims->data[i];
}
switch (output_values->type) {
case kTfLiteFloat32:
TopK(row_size, num_rows, GetTensorData<float>(input), k,
output_indexes->data.i32, GetTensorData<float>(output_values));
break;
case kTfLiteUInt8:
TopK(row_size, num_rows, input->data.uint8, k, output_indexes->data.i32,
output_values->data.uint8);
break;
case kTfLiteInt8:
TopK(row_size, num_rows, input->data.int8, k, output_indexes->data.i32,
output_values->data.int8);
break;
case kTfLiteInt32:
TopK(row_size, num_rows, input->data.i32, k, output_indexes->data.i32,
output_values->data.i32);
break;
case kTfLiteInt64:
TopK(row_size, num_rows, input->data.i64, k, output_indexes->data.i32,
output_values->data.i64);
break;
default:
TF_LITE_KERNEL_LOG(context, "Type %s is currently not supported by TopK.",
TfLiteTypeGetName(output_values->type));
return kTfLiteError;
}
return kTfLiteOk;
} | Base | 1 |
TfLiteStatus LeakyReluPrepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output = GetOutput(context, node, 0);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data);
if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 ||
output->type == kTfLiteInt16) {
const auto* params =
reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data);
double alpha_multiplier =
input->params.scale * params->alpha / output->params.scale;
QuantizeMultiplier(alpha_multiplier, &data->output_multiplier_alpha,
&data->output_shift_alpha);
double identity_multiplier = input->params.scale / output->params.scale;
QuantizeMultiplier(identity_multiplier, &data->output_multiplier_identity,
&data->output_shift_identity);
}
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
} | Base | 1 |
bool Scanner::fill(size_t need)
{
if (eof) return false;
pop_finished_files();
DASSERT(bot <= tok && tok <= lim);
size_t free = static_cast<size_t>(tok - bot);
size_t copy = static_cast<size_t>(lim - tok);
if (free >= need) {
memmove(bot, tok, copy);
shift_ptrs_and_fpos(-static_cast<ptrdiff_t>(free));
}
else {
BSIZE += std::max(BSIZE, need);
char * buf = new char[BSIZE + YYMAXFILL];
if (!buf) fatal("out of memory");
memmove(buf, tok, copy);
shift_ptrs_and_fpos(buf - bot);
delete [] bot;
bot = buf;
free = BSIZE - copy;
}
if (!read(free)) {
eof = lim;
memset(lim, 0, YYMAXFILL);
lim += YYMAXFILL;
}
return true;
} | Base | 1 |
QUInt16() {} | Base | 1 |
size_t jsuGetFreeStack() {
#ifdef ARM
void *frame = __builtin_frame_address(0);
size_t stackPos = (size_t)((char*)frame);
size_t stackEnd = (size_t)((char*)&LINKER_END_VAR);
if (stackPos < stackEnd) return 0; // should never happen, but just in case of overflow!
return stackPos - stackEnd;
#elif defined(LINUX)
// On linux, we set STACK_BASE from `main`.
char ptr; // this is on the stack
extern void *STACK_BASE;
uint32_t count = (uint32_t)((size_t)STACK_BASE - (size_t)&ptr);
return 1000000 - count; // give it 1 megabyte of stack
#else
// stack depth seems pretty platform-specific :( Default to a value that disables it
return 1000000; // no stack depth check on this platform
#endif
} | Base | 1 |
TEST(DefaultCertValidatorTest, TestMatchSubjectAltNameDNSMatched) {
bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(
"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem"));
envoy::type::matcher::v3::StringMatcher matcher;
matcher.MergeFrom(TestUtility::createRegexMatcher(".*.example.com"));
std::vector<Matchers::StringMatcherImpl<envoy::type::matcher::v3::StringMatcher>>
subject_alt_name_matchers;
subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher));
EXPECT_TRUE(DefaultCertValidator::matchSubjectAltName(cert.get(), subject_alt_name_matchers));
} | Base | 1 |
static INLINE UINT16 ntlm_av_pair_get_id(const NTLM_AV_PAIR* pAvPair)
{
UINT16 AvId;
Data_Read_UINT16(&pAvPair->AvId, AvId);
return AvId;
} | Base | 1 |
TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output = GetOutput(context, node, 0);
const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data);
switch (input->type) {
case kTfLiteFloat32: {
optimized_ops::Relu(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
} break;
// TODO(renjieliu): We may revisit the quantization calculation logic,
// the unbounded upper limit is actually hard to quantize.
case kTfLiteUInt8: {
QuantizedReluX<uint8_t>(0.0f, std::numeric_limits<float>::infinity(),
input, output, data);
} break;
case kTfLiteInt8: {
QuantizedReluX<int8_t>(0.0f, std::numeric_limits<float>::infinity(),
input, output, data);
} break;
default:
TF_LITE_KERNEL_LOG(
context, "Only float32 & int8/uint8 is supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
return kTfLiteOk;
} | Base | 1 |
mptctl_replace_fw (unsigned long arg)
{
struct mpt_ioctl_replace_fw __user *uarg = (void __user *) arg;
struct mpt_ioctl_replace_fw karg;
MPT_ADAPTER *ioc;
int iocnum;
int newFwSize;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_replace_fw))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_replace_fw - "
"Unable to read in mpt_ioctl_replace_fw struct @ %p\n",
__FILE__, __LINE__, uarg);
return -EFAULT;
}
if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
(ioc == NULL)) {
printk(KERN_DEBUG MYNAM "%s::mptctl_replace_fw() @%d - ioc%d not found!\n",
__FILE__, __LINE__, iocnum);
return -ENODEV;
}
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_replace_fw called.\n",
ioc->name));
/* If caching FW, Free the old FW image
*/
if (ioc->cached_fw == NULL)
return 0;
mpt_free_fw_memory(ioc);
/* Allocate memory for the new FW image
*/
newFwSize = ALIGN(karg.newImageSize, 4);
mpt_alloc_fw_memory(ioc, newFwSize);
if (ioc->cached_fw == NULL)
return -ENOMEM;
/* Copy the data from user memory to kernel space
*/
if (copy_from_user(ioc->cached_fw, uarg->newImage, newFwSize)) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_replace_fw - "
"Unable to read in mpt_ioctl_replace_fw image "
"@ %p\n", ioc->name, __FILE__, __LINE__, uarg);
mpt_free_fw_memory(ioc);
return -EFAULT;
}
/* Update IOCFactsReply
*/
ioc->facts.FWImageSize = newFwSize;
return 0;
} | Class | 2 |
QInt8() {} | Base | 1 |
void CommandData::ParseArg(wchar *Arg)
{
if (IsSwitch(*Arg) && !NoMoreSwitches)
if (Arg[1]=='-' && Arg[2]==0)
NoMoreSwitches=true;
else
ProcessSwitch(Arg+1);
else
if (*Command==0)
{
wcsncpy(Command,Arg,ASIZE(Command));
*Command=toupperw(*Command);
// 'I' and 'S' commands can contain case sensitive strings after
// the first character, so we must not modify their case.
// 'S' can contain SFX name, which case is important in Unix.
if (*Command!='I' && *Command!='S')
wcsupper(Command);
}
else
if (*ArcName==0)
wcsncpyz(ArcName,Arg,ASIZE(ArcName));
else
{
// Check if last character is the path separator.
size_t Length=wcslen(Arg);
wchar EndChar=Length==0 ? 0:Arg[Length-1];
bool EndSeparator=IsDriveDiv(EndChar) || IsPathDiv(EndChar);
wchar CmdChar=toupperw(*Command);
bool Add=wcschr(L"AFUM",CmdChar)!=NULL;
bool Extract=CmdChar=='X' || CmdChar=='E';
if (EndSeparator && !Add)
wcsncpyz(ExtrPath,Arg,ASIZE(ExtrPath));
else
if ((Add || CmdChar=='T') && (*Arg!='@' || ListMode==RCLM_REJECT_LISTS))
FileArgs.AddString(Arg);
else
{
FindData FileData;
bool Found=FindFile::FastFind(Arg,&FileData);
if ((!Found || ListMode==RCLM_ACCEPT_LISTS) &&
ListMode!=RCLM_REJECT_LISTS && *Arg=='@' && !IsWildcard(Arg))
{
FileLists=true;
ReadTextFile(Arg+1,&FileArgs,false,true,FilelistCharset,true,true,true);
}
else
if (Found && FileData.IsDir && Extract && *ExtrPath==0)
{
wcsncpyz(ExtrPath,Arg,ASIZE(ExtrPath));
AddEndSlash(ExtrPath,ASIZE(ExtrPath));
}
else
FileArgs.AddString(Arg);
}
}
} | Base | 1 |
static unsigned HuffmanTree_makeFromFrequencies(HuffmanTree* tree, const unsigned* frequencies,
size_t mincodes, size_t numcodes, unsigned maxbitlen)
{
unsigned error = 0;
while(!frequencies[numcodes - 1] && numcodes > mincodes) numcodes--; /*trim zeroes*/
tree->maxbitlen = maxbitlen;
tree->numcodes = (unsigned)numcodes; /*number of symbols*/
tree->lengths = (unsigned*)realloc(tree->lengths, numcodes * sizeof(unsigned));
if(!tree->lengths) return 83; /*alloc fail*/
/*initialize all lengths to 0*/
memset(tree->lengths, 0, numcodes * sizeof(unsigned));
error = lodepng_huffman_code_lengths(tree->lengths, frequencies, numcodes, maxbitlen);
if(!error) error = HuffmanTree_makeFromLengths2(tree);
return error;
} | Base | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.