text
stringlengths 0
2.2M
|
---|
p.setElementHandlerStringRef(StartDescriptionElementStringRef<std::string>, EndDescriptionElementStringRef);
|
p.setXmlDeclHandler(DeclHandler);
|
p.setCDataHandler(StartCData,EndCData);
|
p.setCharacterDataHandler(CharacterHandler);
|
p.setUserData(&userdata);
|
if (!p.Parse((unsigned char*)data, size))
|
{
|
delete mXML;
|
return 0;
|
}
|
if(userdata.mNodeHierarchy.size())
|
mXML->setRoot(userdata.mNodeHierarchy[0]);
|
userdata.mNodeHierarchy.clear();
|
return (XML*)mXML;
|
}
|
XMLBase* XMLReaderFile::ProtectedReadFileStringRef(const SP<CoreRawBuffer> buffer, char* encoding)
|
{
|
MinimalXML p;
|
MyUserData userdata;
|
userdata.mReader = this;
|
mXML = new XMLStringRef(buffer);
|
p.setElementHandlerStringRef(StartDescriptionElementStringRef<std::string_view>, EndDescriptionElementStringRef);
|
p.setXmlDeclHandler(DeclHandler);
|
p.setCDataHandler(StartCData, EndCDataStringRef<std::string_view>);
|
p.setCharacterDataHandler(CharacterHandlerStringRef<std::string_view>);
|
p.setUserData(&userdata);
|
u64 size = buffer->length();
|
char* Buff = (char*)buffer->buffer();
|
if (!p.Parse((unsigned char*)Buff, size))
|
{
|
delete mXML;
|
return 0;
|
}
|
if (userdata.mNodeHierarchy.size())
|
mXML->setRoot(userdata.mNodeHierarchy[0]);
|
userdata.mNodeHierarchy.clear();
|
return (XMLStringRef*)mXML;
|
}
|
XMLBase* XMLReaderFile::ProtectedReadFile( FileHandle * file )
|
{
|
u64 size;
|
auto rawbuffer = ModuleFileManager::LoadFileAsCharString(file,size,1);
|
if(rawbuffer)
|
{
|
XMLBase* result=0;
|
result = ProtectedReadFile(rawbuffer.get());
|
return result;
|
}
|
return 0;
|
}
|
/*
|
* Copyright (c) Facebook, Inc. and its affiliates.
|
*
|
* This source code is licensed under the MIT-style license found in the
|
* LICENSE file in the root directory of this source tree.
|
*/
|
/**
|
* Performs voice-activity detection with a CTC model.
|
* For each input sample in the dataset, outputs the following:
|
* - Chunk level probabilities of non-speech based on the probability of a
|
* blank label assigned as per the acoustic model trained with CTC. These are
|
* assigned for each chunk of output. For stride 1 model, these will be each
|
* frame (10 ms), but for a model with stride 8, these will be (80 ms)
|
* intervals (output in .vad file for each sample)
|
* - The perplexity of the predicted sequence based on a specified input
|
* language model (first output in .sts file for each sample)
|
* - The percentage of the audio containing speech based on the passed
|
* --vad_threshold flag (second output in .sts file for each sample)
|
* - The most likely token-level transcription of given audio based on the
|
* acoustic model output only (output in .tsc file for each sample).
|
* - Frame wise token emissions based on the most-likely token emitted for each
|
* chunk, (output in .fwt file for each sample).
|
*/
|
#include <stdlib.h>
|
#include <cmath>
|
#include <fstream>
|
#include <iomanip>
|
#include <string>
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.