diff
stringlengths
41
2.03M
msg
stringlengths
1
1.5k
repo
stringlengths
5
40
sha
stringlengths
40
40
time
stringlengths
20
20
mmm a / tensorflow / contrib / BUILD <nl> ppp b / tensorflow / contrib / BUILD <nl> py_library ( <nl> " / / tensorflow / contrib / bayesflow : bayesflow_py " , <nl> " / / tensorflow / contrib / boosted_trees : init_py " , <nl> " / / tensorflow / contrib / cloud : cloud_py " , <nl> + " / / tensorflow / contrib / cluster_resolver : cluster_resolver_pip " , <nl> " / / tensorflow / contrib / cluster_resolver : cluster_resolver_py " , <nl> " / / tensorflow / contrib / coder : coder_ops_py " , <nl> " / / tensorflow / contrib / compiler : compiler_py " , <nl> mmm a / tensorflow / contrib / lite / tools / BUILD <nl> ppp b / tensorflow / contrib / lite / tools / BUILD <nl> cc_library ( <nl> srcs = [ " verifier . cc " ] , <nl> hdrs = [ " verifier . h " ] , <nl> deps = [ <nl> + " / / tensorflow / contrib / lite : framework " , <nl> " / / tensorflow / contrib / lite : schema_fbs_version " , <nl> + " / / tensorflow / contrib / lite : string_util " , <nl> " / / tensorflow / contrib / lite / schema : schema_fbs " , <nl> + " @ com_google_absl / / absl / base : core_headers " , <nl> ] , <nl> ) <nl> <nl> cc_test ( <nl> " : verifier " , <nl> " / / tensorflow / contrib / lite : framework " , <nl> " / / tensorflow / contrib / lite : schema_fbs_version " , <nl> + " / / tensorflow / contrib / lite : string_util " , <nl> " / / tensorflow / contrib / lite / schema : schema_fbs " , <nl> " / / tensorflow / contrib / lite / testing : util " , <nl> " @ com_google_googletest / / : gtest " , <nl> mmm a / tensorflow / contrib / lite / tools / verifier . cc <nl> ppp b / tensorflow / contrib / lite / tools / verifier . cc <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> # include " tensorflow / contrib / lite / tools / verifier . h " <nl> + # include < climits > <nl> # include " tensorflow / contrib / lite / schema / schema_generated . h " <nl> + # include " tensorflow / contrib / lite / string_util . h " <nl> # include " tensorflow / contrib / lite / version . h " <nl> <nl> namespace tflite { <nl> <nl> namespace { <nl> <nl> + / / Reports error message when the reporter is set . <nl> + void ReportError ( ErrorReporter * error_reporter , const char * format , . . . ) { <nl> + if ( error_reporter ) { <nl> + va_list args ; <nl> + va_start ( args , format ) ; <nl> + error_reporter - > Report ( format , args ) ; <nl> + va_end ( args ) ; <nl> + } <nl> + } <nl> + <nl> + / / Returns the int32_t value pointed by ptr . <nl> + const uint32_t * GetIntPtr ( const char * ptr ) { <nl> + return reinterpret_cast < const uint32_t * > ( ptr ) ; <nl> + } <nl> + <nl> + / / Verifies flatbuffer format of the model contents and returns the in - memory <nl> + / / model . <nl> const Model * VerifyFlatbufferAndGetModel ( const void * buf , size_t len ) { <nl> : : flatbuffers : : Verifier verifier ( static_cast < const uint8_t * > ( buf ) , len ) ; <nl> if ( VerifyModelBuffer ( verifier ) ) { <nl> const Model * VerifyFlatbufferAndGetModel ( const void * buf , size_t len ) { <nl> } <nl> } <nl> <nl> + const uint32_t kMaxNumString = UINT_MAX / sizeof ( int32_t ) - 2 ; <nl> + <nl> + / / Verifies string tensor has legit buffer contents that follow the schema <nl> + / / defined in lite / string_util . h <nl> + bool VerifyStringTensorBuffer ( const Buffer & buffer , <nl> + ErrorReporter * error_reporter ) { <nl> + uint32_t buffer_size = buffer . data ( ) - > size ( ) ; <nl> + const char * buffer_ptr = reinterpret_cast < const char * > ( buffer . data ( ) - > data ( ) ) ; <nl> + <nl> + uint32_t num_strings = * GetIntPtr ( buffer_ptr ) ; <nl> + if ( num_strings > kMaxNumString ) { <nl> + ReportError ( error_reporter , <nl> + " String tensor has invalid num of string set : % d " , num_strings ) ; <nl> + return false ; <nl> + } <nl> + uint32_t header_offsets = <nl> + static_cast < uint32_t > ( num_strings + 2 ) * sizeof ( int32_t ) ; <nl> + <nl> + if ( buffer_size < header_offsets ) { <nl> + ReportError ( error_reporter , <nl> + " String tensor buffer requires at least % d bytes , but is " <nl> + " allocated with % d bytes " , <nl> + header_offsets , buffer_size ) ; <nl> + return false ; <nl> + } <nl> + <nl> + uint32_t prev_ptr = header_offsets ; <nl> + uint32_t offset = sizeof ( int32_t ) ; <nl> + <nl> + if ( * GetIntPtr ( buffer_ptr + offset ) ! = header_offsets ) { <nl> + ReportError ( error_reporter , <nl> + " String tensor buffer initial offset must be : % d " , <nl> + header_offsets ) ; <nl> + return false ; <nl> + } <nl> + offset + = sizeof ( int32_t ) ; <nl> + for ( int i = 1 ; i < = num_strings ; i + + , offset + = sizeof ( int32_t ) ) { <nl> + int string_offset = * GetIntPtr ( buffer_ptr + offset ) ; <nl> + if ( string_offset < prev_ptr | | string_offset > buffer_size ) { <nl> + ReportError ( error_reporter , " String tensor buffer is invalid : index % d " , <nl> + i ) ; <nl> + return false ; <nl> + } <nl> + } <nl> + if ( * GetIntPtr ( buffer_ptr + offset - sizeof ( int32_t ) ) ! = buffer_size ) { <nl> + ReportError ( error_reporter , " String tensor buffer last offset must be % d " , <nl> + buffer_size ) ; <nl> + return false ; <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + / / Verifies numeric tensor has legit buffer . <nl> + bool VerifyNumericTensorBuffer ( const Tensor & tensor , const Buffer & buffer , <nl> + ErrorReporter * error_reporter ) { <nl> + uint64_t bytes_required = 1 ; <nl> + for ( int dim : * tensor . shape ( ) ) { <nl> + bytes_required * = dim ; <nl> + if ( bytes_required > UINT_MAX ) { <nl> + ReportError ( error_reporter , " Tensor dimension overflow " ) ; <nl> + return false ; <nl> + } <nl> + } <nl> + switch ( tensor . type ( ) ) { <nl> + case TensorType_FLOAT32 : <nl> + bytes_required * = sizeof ( float ) ; <nl> + break ; <nl> + case TensorType_INT32 : <nl> + bytes_required * = sizeof ( int32_t ) ; <nl> + break ; <nl> + case TensorType_UINT8 : <nl> + bytes_required * = sizeof ( uint8_t ) ; <nl> + break ; <nl> + case TensorType_INT64 : <nl> + bytes_required * = sizeof ( int64_t ) ; <nl> + break ; <nl> + case TensorType_FLOAT16 : <nl> + / / FALLTHROUGH_INTENDED ; <nl> + default : <nl> + ReportError ( error_reporter , " Invalid tensor type : % d " , tensor . type ( ) ) ; <nl> + return false ; <nl> + } <nl> + if ( bytes_required > UINT_MAX ) { <nl> + ReportError ( error_reporter , " Tensor dimension overflow " ) ; <nl> + return false ; <nl> + } <nl> + <nl> + if ( bytes_required ! = buffer . data ( ) - > size ( ) ) { <nl> + ReportError ( <nl> + error_reporter , <nl> + " Tensor requires % d bytes , but is allocated with % d bytes buffer " , <nl> + bytes_required , buffer . data ( ) - > size ( ) ) ; <nl> + return false ; <nl> + } <nl> + return true ; <nl> + <nl> + / / TODO ( yichengfan ) : verify quantized tensors . <nl> + } <nl> + <nl> + / / Verifies tensors have valid properties and legit buffer if set . <nl> + bool VerifyTensors ( const Model & model , ErrorReporter * error_reporter ) { <nl> + if ( ! model . subgraphs ( ) ) { <nl> + return true ; <nl> + } <nl> + for ( const auto & subgraph : * model . subgraphs ( ) ) { <nl> + if ( ! subgraph - > tensors ( ) ) { <nl> + return true ; <nl> + } <nl> + for ( const auto & tensor : * subgraph - > tensors ( ) ) { <nl> + if ( ! tensor - > buffer ( ) ) { <nl> + return true ; <nl> + } <nl> + if ( tensor - > buffer ( ) > = model . buffers ( ) - > size ( ) ) { <nl> + ReportError ( error_reporter , " Invalid tensor buffer index : % d " , <nl> + tensor - > buffer ( ) ) ; <nl> + return false ; <nl> + } <nl> + auto * buffer = model . buffers ( ) - > Get ( tensor - > buffer ( ) ) ; <nl> + if ( ! buffer | | ! buffer - > data ( ) ) { <nl> + ReportError ( error_reporter , " Tensor buffer % d not set " , <nl> + tensor - > buffer ( ) ) ; <nl> + return false ; <nl> + } <nl> + <nl> + if ( tensor - > type ( ) = = TensorType_STRING ) { <nl> + if ( ! VerifyStringTensorBuffer ( * buffer , error_reporter ) ) { <nl> + return false ; <nl> + } <nl> + } else { <nl> + if ( ! VerifyNumericTensorBuffer ( * tensor , * buffer , error_reporter ) ) { <nl> + return false ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> } / / namespace <nl> <nl> - bool Verify ( const void * buf , size_t len ) { <nl> + bool Verify ( const void * buf , size_t len , ErrorReporter * error_reporter ) { <nl> const Model * model = VerifyFlatbufferAndGetModel ( buf , len ) ; <nl> if ( model = = nullptr ) { <nl> + ReportError ( error_reporter , " Invalid flatbuffer format " ) ; <nl> return false ; <nl> } <nl> - <nl> - return model - > version ( ) = = TFLITE_SCHEMA_VERSION ; <nl> + if ( model - > version ( ) ! = TFLITE_SCHEMA_VERSION ) { <nl> + ReportError ( error_reporter , " Invalid model version % d " , model - > version ( ) ) ; <nl> + return false ; <nl> + } <nl> + if ( ! VerifyTensors ( * model , error_reporter ) ) { <nl> + return false ; <nl> + } <nl> + return true ; <nl> } <nl> } / / namespace tflite <nl> mmm a / tensorflow / contrib / lite / tools / verifier . h <nl> ppp b / tensorflow / contrib / lite / tools / verifier . h <nl> limitations under the License . <nl> <nl> # include < stdio . h > <nl> <nl> + # include " tensorflow / contrib / lite / error_reporter . h " <nl> + <nl> namespace tflite { <nl> <nl> / / Verifies the integrity of a Tensorflow Lite flatbuffer model file . <nl> / / Currently , it verifies : <nl> / / * The file is following a legit flatbuffer schema . <nl> / / * The model is in supported version . <nl> - bool Verify ( const void * buf , size_t len ) ; <nl> + bool Verify ( const void * buf , size_t len , ErrorReporter * error_reporter ) ; <nl> <nl> } / / namespace tflite <nl> <nl> mmm a / tensorflow / contrib / lite / tools / verifier_test . cc <nl> ppp b / tensorflow / contrib / lite / tools / verifier_test . cc <nl> using flatbuffers : : FlatBufferBuilder ; <nl> using flatbuffers : : Offset ; <nl> using flatbuffers : : Vector ; <nl> <nl> - / / Class that abstracts the list of buffers at the end of the TF Lite structure <nl> - class DeferredBufferWriter { <nl> + / / Build single subgraph model . <nl> + class TfLiteFlatbufferModelBuilder { <nl> public : <nl> - DeferredBufferWriter ( ) { <nl> - data_ . push_back ( { } ) ; / / sentinel empty buffer . <nl> + TfLiteFlatbufferModelBuilder ( ) { <nl> + buffers_ . push_back ( <nl> + CreateBuffer ( builder_ , builder_ . CreateVector ( std : : vector < uint8_t > { } ) ) ) ; <nl> } <nl> <nl> - Offset < Vector < Offset < Buffer > > > BuildBuffers ( FlatBufferBuilder * builder ) { <nl> - std : : vector < Offset < Buffer > > buffer_vector ; <nl> - for ( const auto & vec : data_ ) { <nl> - auto data_buffer = builder - > CreateVector ( vec . data ( ) , vec . size ( ) ) ; <nl> - buffer_vector . push_back ( tflite : : CreateBuffer ( * builder , data_buffer ) ) ; <nl> + void AddTensor ( const std : : vector < int > & shape , tflite : : TensorType type , <nl> + const std : : vector < uint8_t > & buffer , const char * name ) { <nl> + int buffer_index = 0 ; <nl> + if ( ! buffer . empty ( ) ) { <nl> + buffer_index = buffers_ . size ( ) ; <nl> + buffers_ . push_back ( CreateBuffer ( builder_ , builder_ . CreateVector ( buffer ) ) ) ; <nl> } <nl> - return builder - > CreateVector ( buffer_vector ) ; <nl> + tensors_ . push_back ( CreateTensorDirect ( builder_ , & shape , type , buffer_index , <nl> + name , / * quantization = * / 0 ) ) ; <nl> } <nl> <nl> - / / Registers a buffer index and takes ownership of the data to write to it . <nl> - int Record ( std : : vector < uint8_t > data ) { <nl> - int buffer_index = data_ . size ( ) ; <nl> - data_ . emplace_back ( std : : move ( data ) ) ; <nl> - return buffer_index ; <nl> + void AddOperator ( const std : : vector < int32_t > & inputs , <nl> + const std : : vector < int32_t > & outputs , <nl> + tflite : : BuiltinOperator builtin_op , const char * custom_op ) { <nl> + operator_codes_ . push_back ( <nl> + CreateOperatorCodeDirect ( builder_ , builtin_op , custom_op ) ) ; <nl> + operators_ . push_back ( CreateOperator ( <nl> + builder_ , operator_codes_ . size ( ) - 1 , builder_ . CreateVector ( inputs ) , <nl> + builder_ . CreateVector ( outputs ) , BuiltinOptions_NONE , <nl> + / * builtin_options = * / 0 , <nl> + / * custom_options = * / 0 , tflite : : CustomOptionsFormat_FLEXBUFFERS ) ) ; <nl> + } <nl> + <nl> + void FinishModel ( const std : : vector < int32_t > & inputs , <nl> + const std : : vector < int32_t > & outputs ) { <nl> + auto subgraph = std : : vector < Offset < SubGraph > > ( { CreateSubGraph ( <nl> + builder_ , builder_ . CreateVector ( tensors_ ) , <nl> + builder_ . CreateVector ( inputs ) , builder_ . CreateVector ( outputs ) , <nl> + builder_ . CreateVector ( operators_ ) , <nl> + builder_ . CreateString ( " test_subgraph " ) ) } ) ; <nl> + auto result = CreateModel ( <nl> + builder_ , TFLITE_SCHEMA_VERSION , builder_ . CreateVector ( operator_codes_ ) , <nl> + builder_ . CreateVector ( subgraph ) , builder_ . CreateString ( " test_model " ) , <nl> + builder_ . CreateVector ( buffers_ ) ) ; <nl> + tflite : : FinishModelBuffer ( builder_ , result ) ; <nl> + } <nl> + <nl> + bool Verify ( ) { <nl> + return tflite : : Verify ( builder_ . GetBufferPointer ( ) , builder_ . GetSize ( ) , <nl> + DefaultErrorReporter ( ) ) ; <nl> } <nl> <nl> private : <nl> - std : : vector < std : : vector < unsigned char > > data_ ; <nl> + FlatBufferBuilder builder_ ; <nl> + std : : vector < Offset < Operator > > operators_ ; <nl> + std : : vector < Offset < OperatorCode > > operator_codes_ ; <nl> + std : : vector < Offset < Tensor > > tensors_ ; <nl> + std : : vector < Offset < Buffer > > buffers_ ; <nl> } ; <nl> <nl> TEST ( VerifyModel , TestEmptyModel ) { <nl> TEST ( VerifyModel , TestEmptyModel ) { <nl> / * description = * / 0 , / * buffers = * / 0 ) ; <nl> : : tflite : : FinishModelBuffer ( builder , model ) ; <nl> <nl> - ASSERT_TRUE ( Verify ( builder . GetBufferPointer ( ) , builder . GetSize ( ) ) ) ; <nl> + ASSERT_TRUE ( Verify ( builder . GetBufferPointer ( ) , builder . GetSize ( ) , <nl> + DefaultErrorReporter ( ) ) ) ; <nl> } <nl> <nl> TEST ( VerifyModel , TestSimpleModel ) { <nl> - FlatBufferBuilder builder ; <nl> - auto inputs = builder . CreateVector < int32_t > ( { 0 } ) ; <nl> - auto outputs = builder . CreateVector < int32_t > ( { 1 } ) ; <nl> - auto operator_codes = builder . CreateVector ( std : : vector < Offset < OperatorCode > > { <nl> - CreateOperatorCodeDirect ( builder , BuiltinOperator_CUSTOM , " test " ) } ) ; <nl> - auto operators = <nl> - builder . CreateVector ( std : : vector < Offset < Operator > > { CreateOperator ( <nl> - builder , / * opcode_index = * / 0 , <nl> - / * inputs = * / builder . CreateVector < int32_t > ( { 0 } ) , <nl> - / * outputs = * / builder . CreateVector < int32_t > ( { 1 } ) , BuiltinOptions_NONE , <nl> - / * builtin_options = * / 0 , <nl> - / * custom_options = * / 0 , : : tflite : : CustomOptionsFormat_FLEXBUFFERS ) } ) ; <nl> - std : : vector < int > shape ; <nl> - auto tensors = builder . CreateVector ( std : : vector < Offset < Tensor > > { <nl> - CreateTensorDirect ( builder , & shape , TensorType_INT32 , / * buffer = * / 0 , <nl> - " input " , / * quantization = * / 0 ) , <nl> - CreateTensorDirect ( builder , & shape , TensorType_INT32 , / * buffer = * / 0 , <nl> - " output " , / * quantization = * / 0 ) } ) ; <nl> - auto subgraph = std : : vector < Offset < SubGraph > > ( <nl> - { CreateSubGraph ( builder , tensors , inputs , outputs , operators , <nl> - builder . CreateString ( " Main " ) ) } ) ; <nl> - <nl> - auto model = CreateModel ( builder , TFLITE_SCHEMA_VERSION , operator_codes , <nl> - builder . CreateVector ( subgraph ) , <nl> - builder . CreateString ( " SmartReply " ) , / * buffers = * / 0 ) ; <nl> - <nl> - : : tflite : : FinishModelBuffer ( builder , model ) ; <nl> - ASSERT_TRUE ( Verify ( builder . GetBufferPointer ( ) , builder . GetSize ( ) ) ) ; <nl> + TfLiteFlatbufferModelBuilder builder ; <nl> + builder . AddOperator ( { 0 , 1 } , { 2 } , BuiltinOperator_CUSTOM , " test " ) ; <nl> + builder . AddTensor ( { 2 , 3 } , TensorType_UINT8 , { 1 , 2 , 3 , 4 , 5 , 6 } , " input " ) ; <nl> + builder . AddTensor ( <nl> + { 2 } , TensorType_STRING , <nl> + { 2 , 0 , 0 , 0 , 16 , 0 , 0 , 0 , 17 , 0 , 0 , 0 , 19 , 0 , 0 , 0 , ' A ' , ' B ' , ' C ' } , <nl> + " data " ) ; <nl> + builder . AddTensor ( { 2 , 3 } , TensorType_INT32 , { } , " output " ) ; <nl> + builder . FinishModel ( { 0 , 1 } , { 2 } ) ; <nl> + ASSERT_TRUE ( builder . Verify ( ) ) ; <nl> } <nl> <nl> TEST ( VerifyModel , TestCorruptedData ) { <nl> string model = " 123 " ; <nl> - ASSERT_FALSE ( Verify ( model . data ( ) , model . size ( ) ) ) ; <nl> + ASSERT_FALSE ( Verify ( model . data ( ) , model . size ( ) , / * error_reporter = * / nullptr ) ) ; <nl> } <nl> <nl> TEST ( VerifyModel , TestUnsupportedVersion ) { <nl> TEST ( VerifyModel , TestUnsupportedVersion ) { <nl> auto model = CreateModel ( builder , / * version = * / 1 , / * operator_codes = * / 0 , <nl> / * subgraphs = * / 0 , / * description = * / 0 , / * buffers = * / 0 ) ; <nl> : : tflite : : FinishModelBuffer ( builder , model ) ; <nl> - ASSERT_FALSE ( Verify ( builder . GetBufferPointer ( ) , builder . GetSize ( ) ) ) ; <nl> + ASSERT_FALSE ( Verify ( builder . GetBufferPointer ( ) , builder . GetSize ( ) , <nl> + DefaultErrorReporter ( ) ) ) ; <nl> } <nl> <nl> TEST ( VerifyModel , TestRandomModificationIsNotAllowed ) { <nl> TEST ( VerifyModel , TestRandomModificationIsNotAllowed ) { <nl> / * subgraphs = * / 0 , / * description = * / 0 , / * buffers = * / 0 ) ; <nl> : : tflite : : FinishModelBuffer ( builder , model ) ; <nl> <nl> - string model_content ( reinterpret_cast < char * > ( builder . GetBufferPointer ( ) ) , <nl> + string model_content ( reinterpret_cast < char * > ( builder . GetBufferPointer ( ) ) , <nl> builder . GetSize ( ) ) ; <nl> for ( int i = 0 ; i < model_content . size ( ) ; i + + ) { <nl> model_content [ i ] = ( model_content [ i ] + 137 ) % 255 ; <nl> - EXPECT_FALSE ( Verify ( model_content . data ( ) , model_content . size ( ) ) ) <nl> + EXPECT_FALSE ( Verify ( model_content . data ( ) , model_content . size ( ) , <nl> + DefaultErrorReporter ( ) ) ) <nl> < < " Fail at position : " < < i ; <nl> } <nl> } <nl> <nl> + TEST ( VerifyModel , TestIntTensorShapeIsGreaterThanBuffer ) { <nl> + TfLiteFlatbufferModelBuilder builder ; <nl> + builder . AddTensor ( { 2 , 3 } , TensorType_UINT8 , { 1 , 2 , 3 , 4 } , " input " ) ; <nl> + builder . FinishModel ( { } , { } ) ; <nl> + ASSERT_FALSE ( builder . Verify ( ) ) ; <nl> + } <nl> + <nl> + TEST ( VerifyModel , TestIntTensorShapeIsSmallerThanBuffer ) { <nl> + TfLiteFlatbufferModelBuilder builder ; <nl> + builder . AddTensor ( { 2 , 1 } , TensorType_UINT8 , { 1 , 2 , 3 , 4 } , " input " ) ; <nl> + builder . FinishModel ( { } , { } ) ; <nl> + ASSERT_FALSE ( builder . Verify ( ) ) ; <nl> + } <nl> + <nl> + TEST ( VerifyModel , TestIntTensorShapeOverflow ) { <nl> + TfLiteFlatbufferModelBuilder builder ; <nl> + builder . AddTensor ( { 1024 , 2048 , 4096 } , TensorType_UINT8 , { 1 , 2 , 3 , 4 } , <nl> + " input " ) ; <nl> + builder . FinishModel ( { } , { } ) ; <nl> + ASSERT_FALSE ( builder . Verify ( ) ) ; <nl> + } <nl> + <nl> + TEST ( VerifyModel , TensorBufferIsNotValid ) { <nl> + FlatBufferBuilder builder ; <nl> + std : : vector < int > shape = { 2 , 3 } ; <nl> + auto tensors = builder . CreateVector ( std : : vector < Offset < Tensor > > { <nl> + CreateTensorDirect ( builder , & shape , TensorType_INT32 , / * buffer = * / 2 , <nl> + " input " , / * quantization = * / 0 ) } ) ; <nl> + auto subgraph = std : : vector < Offset < SubGraph > > ( <nl> + { CreateSubGraph ( builder , tensors , / * inputs = * / 0 , / * outputs = * / 0 , <nl> + / * operators = * / 0 , builder . CreateString ( " Main " ) ) } ) ; <nl> + <nl> + auto buffers = builder . CreateVector ( std : : vector < Offset < Buffer > > { <nl> + CreateBuffer ( builder , <nl> + builder . CreateVector ( std : : vector < uint8 > { 1 , 2 , 3 , 4 , 5 , 6 } ) ) , <nl> + } ) ; <nl> + <nl> + auto model = CreateModel ( builder , TFLITE_SCHEMA_VERSION , / * operator_codes = * / 0 , <nl> + builder . CreateVector ( subgraph ) , <nl> + builder . CreateString ( " SmartReply " ) , buffers ) ; <nl> + <nl> + : : tflite : : FinishModelBuffer ( builder , model ) ; <nl> + ASSERT_FALSE ( Verify ( builder . GetBufferPointer ( ) , builder . GetSize ( ) , <nl> + DefaultErrorReporter ( ) ) ) ; <nl> + } <nl> + <nl> + TEST ( VerifyModel , StringTensorHasInvalidNumString ) { <nl> + TfLiteFlatbufferModelBuilder builder ; <nl> + builder . AddTensor ( <nl> + { 2 } , TensorType_STRING , <nl> + { 0x00 , 0x00 , 0x00 , 0x20 , 16 , 0 , 0 , 0 , 17 , 0 , 0 , 0 , 18 , 0 , 0 , 0 , ' A ' , ' B ' } , <nl> + " input " ) ; <nl> + builder . FinishModel ( { } , { } ) ; <nl> + ASSERT_FALSE ( builder . Verify ( ) ) ; <nl> + } <nl> + <nl> + TEST ( VerifyModel , StringTensorOffsetTooSmall ) { <nl> + TfLiteFlatbufferModelBuilder builder ; <nl> + builder . AddTensor ( <nl> + { 2 } , TensorType_STRING , <nl> + { 2 , 0 , 0 , 0 , 12 , 0 , 0 , 0 , 17 , 0 , 0 , 0 , 18 , 0 , 0 , 0 , ' A ' , ' B ' } , " input " ) ; <nl> + builder . FinishModel ( { } , { } ) ; <nl> + ASSERT_FALSE ( builder . Verify ( ) ) ; <nl> + } <nl> + <nl> + TEST ( VerifyModel , StringTensorOffsetOutOfRange ) { <nl> + TfLiteFlatbufferModelBuilder builder ; <nl> + builder . AddTensor ( <nl> + { 2 } , TensorType_STRING , <nl> + { 2 , 0 , 0 , 0 , 16 , 0 , 0 , 0 , 17 , 0 , 0 , 0 , 22 , 0 , 0 , 0 , ' A ' , ' B ' } , " input " ) ; <nl> + builder . FinishModel ( { } , { } ) ; <nl> + ASSERT_FALSE ( builder . Verify ( ) ) ; <nl> + } <nl> + <nl> + TEST ( VerifyModel , StringTensorIsLargerThanRequired ) { <nl> + TfLiteFlatbufferModelBuilder builder ; <nl> + builder . AddTensor ( <nl> + { 2 } , TensorType_STRING , <nl> + { 2 , 0 , 0 , 0 , 16 , 0 , 0 , 0 , 17 , 0 , 0 , 0 , 18 , 0 , 0 , 0 , ' A ' , ' B ' , ' C ' } , <nl> + " input " ) ; <nl> + builder . FinishModel ( { } , { } ) ; <nl> + ASSERT_FALSE ( builder . Verify ( ) ) ; <nl> + } <nl> + <nl> / / TODO ( yichengfan ) : make up malicious files to test with . <nl> <nl> } / / namespace tflite <nl> <nl> - int main ( int argc , char * * argv ) { <nl> + int main ( int argc , char * * argv ) { <nl> : : tflite : : LogToStderr ( ) ; <nl> : : testing : : InitGoogleTest ( & argc , argv ) ; <nl> return RUN_ALL_TESTS ( ) ; <nl>
Verify contents of tensors
tensorflow/tensorflow
76989a191815bdd96390626db154676ac42b890d
2018-02-01T01:06:07Z
mmm a / tensorflow / contrib / image / kernels / image_ops . cc <nl> ppp b / tensorflow / contrib / image / kernels / image_ops . cc <nl> class ImageProjectiveTransform : public OpKernel { <nl> void Compute ( OpKernelContext * ctx ) override { <nl> const Tensor & images_t = ctx - > input ( 0 ) ; <nl> const Tensor & transform_t = ctx - > input ( 1 ) ; <nl> + const Tensor & output_dim = ctx - > input ( 2 ) ; <nl> OP_REQUIRES ( ctx , images_t . shape ( ) . dims ( ) = = 4 , <nl> errors : : InvalidArgument ( " Input images must have rank 4 " ) ) ; <nl> OP_REQUIRES ( ctx , <nl> class ImageProjectiveTransform : public OpKernel { <nl> auto images = images_t . tensor < T , 4 > ( ) ; <nl> auto transform = transform_t . matrix < float > ( ) ; <nl> Tensor * output_t ; <nl> - OP_REQUIRES_OK ( ctx , ctx - > allocate_output ( 0 , images_t . shape ( ) , & output_t ) ) ; <nl> + / / Image is NHWC format . <nl> + auto output_shape = images_t . shape ( ) ; <nl> + output_shape . set_dim ( 1 , output_dim . vec < int > ( ) ( 0 ) ) ; <nl> + output_shape . set_dim ( 2 , output_dim . vec < int > ( ) ( 1 ) ) ; <nl> + OP_REQUIRES_OK ( ctx , ctx - > allocate_output ( 0 , output_shape , & output_t ) ) ; <nl> auto output = output_t - > tensor < T , 4 > ( ) ; <nl> ( FillProjectiveTransform < Device , T > ( interpolation_ ) ) ( <nl> ctx - > eigen_device < Device > ( ) , & output , images , transform ) ; <nl> mmm a / tensorflow / contrib / image / kernels / image_ops . h <nl> ppp b / tensorflow / contrib / image / kernels / image_ops . h <nl> struct FillProjectiveTransform { <nl> void operator ( ) ( const Device & device , OutputType * output , <nl> const InputType & images , <nl> const TransformsType & transform ) const { <nl> - output - > device ( device ) = images . generate ( <nl> + output - > device ( device ) = output - > generate ( <nl> ProjectiveGenerator < Device , T > ( images , transform , interpolation_ ) ) ; <nl> } <nl> } ; <nl> mmm a / tensorflow / contrib / image / ops / image_ops . cc <nl> ppp b / tensorflow / contrib / image / ops / image_ops . cc <nl> limitations under the License . <nl> <nl> namespace tensorflow { <nl> <nl> + using shape_inference : : DimensionHandle ; <nl> using shape_inference : : InferenceContext ; <nl> using shape_inference : : ShapeHandle ; <nl> <nl> + namespace { <nl> + <nl> + / / Sets output [ 0 ] to shape [ batch_dim , height , width , channel_dim ] , where <nl> + / / height and width come from the size_tensor . <nl> + Status SetOutputToSizedImage ( InferenceContext * c , DimensionHandle batch_dim , <nl> + int size_input_idx , DimensionHandle channel_dim ) { <nl> + / / Verify shape of size input . <nl> + ShapeHandle size ; <nl> + TF_RETURN_IF_ERROR ( c - > WithRank ( c - > input ( size_input_idx ) , 1 , & size ) ) ; <nl> + DimensionHandle unused ; <nl> + TF_RETURN_IF_ERROR ( c - > WithValue ( c - > Dim ( size , 0 ) , 2 , & unused ) ) ; <nl> + <nl> + / / Get size values from the size tensor . <nl> + const Tensor * size_tensor = c - > input_tensor ( size_input_idx ) ; <nl> + DimensionHandle width ; <nl> + DimensionHandle height ; <nl> + if ( size_tensor = = nullptr ) { <nl> + width = c - > UnknownDim ( ) ; <nl> + height = c - > UnknownDim ( ) ; <nl> + } else { <nl> + / / TODO ( petewarden ) - Remove once we have constant evaluation in C + + only . <nl> + if ( size_tensor - > dtype ( ) ! = DT_INT32 ) { <nl> + return errors : : InvalidArgument ( <nl> + " Bad size input type for SetOutputToSizedImage : Expected DT_INT32 " <nl> + " but got " , <nl> + DataTypeString ( size_tensor - > dtype ( ) ) , " for input # " , size_input_idx , <nl> + " in " , c - > DebugString ( ) ) ; <nl> + } <nl> + auto vec = size_tensor - > vec < int32 > ( ) ; <nl> + height = c - > MakeDim ( vec ( 0 ) ) ; <nl> + width = c - > MakeDim ( vec ( 1 ) ) ; <nl> + } <nl> + c - > set_output ( 0 , c - > MakeShape ( { batch_dim , height , width , channel_dim } ) ) ; <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + Status ResizeShapeFn ( InferenceContext * c ) { <nl> + ShapeHandle input ; <nl> + TF_RETURN_IF_ERROR ( c - > WithRank ( c - > input ( 0 ) , 4 , & input ) ) ; <nl> + return SetOutputToSizedImage ( c , c - > Dim ( input , 0 ) , 2 / * size_input_idx * / , <nl> + c - > Dim ( input , 3 ) ) ; <nl> + } <nl> + <nl> + } / / namespace <nl> + <nl> / / TODO ( ringwalt ) : Add a " fill_mode " argument with " constant " , " mirror " , etc . <nl> / / TODO ( ringwalt ) : Add a " fill_constant " argument for constant mode ( default 0 ) . <nl> / / TODO ( ringwalt ) : Add an " output_shape " argument . This is sufficient to <nl> using shape_inference : : ShapeHandle ; <nl> REGISTER_OP ( " ImageProjectiveTransform " ) <nl> . Input ( " images : dtype " ) <nl> . Input ( " transforms : float32 " ) <nl> + . Input ( " output_shape : int32 " ) <nl> . Attr ( " dtype : { uint8 , int32 , int64 , float32 , float64 } " ) <nl> . Attr ( " interpolation : string " ) <nl> . Output ( " transformed_images : dtype " ) <nl> - . SetShapeFn ( [ ] ( InferenceContext * c ) { <nl> - c - > set_output ( 0 , c - > input ( 0 ) ) ; <nl> - return Status : : OK ( ) ; <nl> - } ) <nl> + . SetShapeFn ( ResizeShapeFn ) <nl> . Doc ( R " doc ( <nl> Applies the given transform to each of the images . <nl> <nl> If one row of ` transforms ` is ` [ a0 , a1 , a2 , b0 , b1 , b2 , c0 , c1 ] ` , then it maps <nl> the * output * point ` ( x , y ) ` to a transformed * input * point <nl> ` ( x ' , y ' ) = ( ( a0 x + a1 y + a2 ) / k , ( b0 x + b1 y + b2 ) / k ) ` , where <nl> ` k = c0 x + c1 y + 1 ` . If the transformed point lays outside of the input <nl> - image , the output pixel is set to 0 . The output is the same size as the input , <nl> + image , the output pixel is set to 0 . <nl> <nl> images : 4D ` Tensor ` , input image ( s ) in NHWC format . <nl> transforms : 2D ` Tensor ` , projective transform ( s ) to apply to the image ( s ) . <nl> mmm a / tensorflow / contrib / image / python / kernel_tests / image_ops_test . py <nl> ppp b / tensorflow / contrib / image / python / kernel_tests / image_ops_test . py <nl> def _test_grad ( self , shape_to_test ) : <nl> x_init_value = test_image ) <nl> self . assertLess ( left_err , 1e - 10 ) <nl> <nl> + def _test_grad_different_shape ( self , input_shape , output_shape ) : <nl> + with self . test_session ( ) : <nl> + test_image_shape = input_shape <nl> + test_image = np . random . randn ( * test_image_shape ) <nl> + test_image_tensor = constant_op . constant ( <nl> + test_image , shape = test_image_shape ) <nl> + test_transform = image_ops . angles_to_projective_transforms ( <nl> + np . pi / 2 , 4 , 4 ) <nl> + <nl> + if len ( output_shape ) = = 2 : <nl> + resize_shape = output_shape <nl> + elif len ( output_shape ) = = 3 : <nl> + resize_shape = output_shape [ 0 : 2 ] <nl> + elif len ( output_shape ) = = 4 : <nl> + resize_shape = output_shape [ 1 : 3 ] <nl> + output = image_ops . transform ( <nl> + images = test_image_tensor , <nl> + transforms = test_transform , <nl> + output_shape = resize_shape ) <nl> + left_err = gradient_checker . compute_gradient_error ( <nl> + test_image_tensor , <nl> + test_image_shape , <nl> + output , <nl> + output_shape , <nl> + x_init_value = test_image ) <nl> + self . assertLess ( left_err , 1e - 10 ) <nl> + <nl> def test_grad ( self ) : <nl> self . _test_grad ( [ 16 , 16 ] ) <nl> self . _test_grad ( [ 4 , 12 , 12 ] ) <nl> self . _test_grad ( [ 3 , 4 , 12 , 12 ] ) <nl> + self . _test_grad_different_shape ( [ 16 , 16 ] , [ 8 , 8 ] ) <nl> + self . _test_grad_different_shape ( [ 4 , 12 , 3 ] , [ 8 , 24 , 3 ] ) <nl> + self . _test_grad_different_shape ( [ 3 , 4 , 12 , 3 ] , [ 3 , 8 , 24 , 3 ] ) <nl> <nl> <nl> class BipartiteMatchTest ( test_util . TensorFlowTestCase ) : <nl> mmm a / tensorflow / contrib / image / python / ops / image_ops . py <nl> ppp b / tensorflow / contrib / image / python / ops / image_ops . py <nl> def translations_to_projective_transforms ( translations , name = None ) : <nl> axis = 1 ) <nl> <nl> <nl> - def transform ( images , transforms , interpolation = " NEAREST " , name = None ) : <nl> + def transform ( images , <nl> + transforms , <nl> + interpolation = " NEAREST " , <nl> + output_shape = None , <nl> + name = None ) : <nl> " " " Applies the given transform ( s ) to the image ( s ) . <nl> <nl> Args : <nl> def transform ( images , transforms , interpolation = " NEAREST " , name = None ) : <nl> the transform mapping input points to output points . Note that gradients <nl> are not backpropagated into transformation parameters . <nl> interpolation : Interpolation mode . Supported values : " NEAREST " , " BILINEAR " . <nl> + output_shape : Output dimesion after the transform , [ height , width ] . <nl> + If None , output is the same size as input image . <nl> + <nl> + name : The name of the op . <nl> <nl> Returns : <nl> Image ( s ) with the same type and shape as ` images ` , with the given <nl> def transform ( images , transforms , interpolation = " NEAREST " , name = None ) : <nl> else : <nl> raise TypeError ( " Images should have rank between 2 and 4 . " ) <nl> <nl> + if output_shape is None : <nl> + output_shape = array_ops . shape ( images ) [ 1 : 3 ] <nl> + elif len ( output_shape ) ! = 2 : <nl> + raise TypeError ( <nl> + " output_shape must either be None or a vector of 2 elements . % s " % <nl> + str ( output_shape ) ) <nl> + <nl> if len ( transform_or_transforms . get_shape ( ) ) = = 1 : <nl> transforms = transform_or_transforms [ None ] <nl> elif transform_or_transforms . get_shape ( ) . ndims is None : <nl> def transform ( images , transforms , interpolation = " NEAREST " , name = None ) : <nl> else : <nl> raise TypeError ( " Transforms should have rank 1 or 2 . " ) <nl> output = gen_image_ops . image_projective_transform ( <nl> - images , transforms , interpolation = interpolation . upper ( ) ) <nl> + images , transforms , output_shape , interpolation = interpolation . upper ( ) ) <nl> if len ( image_or_images . get_shape ( ) ) = = 2 : <nl> return output [ 0 , : , : , 0 ] <nl> elif len ( image_or_images . get_shape ( ) ) = = 3 : <nl> def _image_projective_transform_grad ( op , grad ) : <nl> <nl> if image_or_images . dtype . base_dtype not in _IMAGE_DTYPES : <nl> raise TypeError ( " Invalid dtype % s . " % image_or_images . dtype ) <nl> - if len ( image_or_images . get_shape ( ) ) = = 2 : <nl> - images = image_or_images [ None , : , : , None ] <nl> - elif len ( image_or_images . get_shape ( ) ) = = 3 : <nl> - images = image_or_images [ None , : , : , : ] <nl> - elif len ( image_or_images . get_shape ( ) ) = = 4 : <nl> - images = image_or_images <nl> - else : <nl> - raise TypeError ( " Images should have rank between 2 and 4 " ) <nl> if len ( transform_or_transforms . get_shape ( ) ) = = 1 : <nl> transforms = transform_or_transforms [ None ] <nl> elif len ( transform_or_transforms . get_shape ( ) ) = = 2 : <nl> def _image_projective_transform_grad ( op , grad ) : <nl> inverse = linalg_ops . matrix_inverse ( transforms ) <nl> transforms = matrices_to_flat_transforms ( inverse ) <nl> output = gen_image_ops . image_projective_transform ( <nl> - grad , transforms , interpolation = interpolation ) <nl> - if len ( image_or_images . get_shape ( ) ) = = 2 : <nl> - return [ output [ 0 , : , : , 0 ] , None ] <nl> - elif len ( image_or_images . get_shape ( ) ) = = 3 : <nl> - return [ output [ 0 , : , : , : ] , None ] <nl> - else : <nl> - return [ output , None ] <nl> + images = grad , <nl> + transforms = transforms , <nl> + output_shape = array_ops . shape ( image_or_images ) [ 1 : 3 ] , <nl> + interpolation = interpolation ) <nl> + return [ output , None , None ] <nl> <nl> <nl> def bipartite_match ( distance_mat , <nl>
Allow output has a different shape from input in the image . transform ( ) .
tensorflow/tensorflow
28b8a3c74f93f9238fa626ec7d32fbddcb56b0a8
2018-04-21T18:04:59Z
mmm a / arangod / Cache / Cache . cpp <nl> ppp b / arangod / Cache / Cache . cpp <nl> void Cache : : requestMigrate ( uint32_t requestedLogSize ) { <nl> } <nl> <nl> void Cache : : freeValue ( CachedValue * value ) { <nl> - while ( value - > refCount . load ( ) > 0 ) { <nl> + while ( ! value - > isFreeable ( ) ) { <nl> std : : this_thread : : yield ( ) ; <nl> } <nl> <nl> mmm a / arangod / Cache / CachedValue . cpp <nl> ppp b / arangod / Cache / CachedValue . cpp <nl> <nl> <nl> using namespace arangodb : : cache ; <nl> <nl> - uint8_t const * CachedValue : : key ( ) const { <nl> - uint8_t const * buf = reinterpret_cast < uint8_t const * > ( this ) ; <nl> - return ( buf + sizeof ( CachedValue ) ) ; <nl> - } <nl> - <nl> - uint8_t const * CachedValue : : value ( ) const { <nl> - if ( valueSize = = 0 ) { <nl> - return nullptr ; <nl> - } <nl> - <nl> - uint8_t const * buf = reinterpret_cast < uint8_t const * > ( this ) ; <nl> - return ( buf + sizeof ( CachedValue ) + keySize ) ; <nl> - } <nl> - <nl> - uint64_t CachedValue : : size ( ) const { <nl> - uint64_t size = sizeof ( CachedValue ) ; <nl> - size + = keySize ; <nl> - size + = valueSize ; <nl> - return size ; <nl> - } <nl> - <nl> - bool CachedValue : : sameKey ( void const * k , uint32_t kSize ) const { <nl> - if ( keySize ! = kSize ) { <nl> - return false ; <nl> - } <nl> - <nl> - return ( 0 = = memcmp ( key ( ) , k , keySize ) ) ; <nl> - } <nl> - <nl> - void CachedValue : : lease ( ) { + + refCount ; } <nl> - <nl> - void CachedValue : : release ( ) { <nl> - if ( - - refCount = = UINT32_MAX ) { <nl> - TRI_ASSERT ( false ) ; <nl> - } <nl> - } <nl> - <nl> - bool CachedValue : : isFreeable ( ) { return ( refCount . load ( ) = = 0 ) ; } <nl> + const size_t CachedValue : : _headerAllocSize = sizeof ( CachedValue ) + <nl> + CachedValue : : _padding ; <nl> <nl> CachedValue * CachedValue : : copy ( ) const { <nl> uint8_t * buf = new uint8_t [ size ( ) ] ; <nl> - memcpy ( buf , this , size ( ) ) ; <nl> - CachedValue * value = reinterpret_cast < CachedValue * > ( buf ) ; <nl> - value - > refCount = 0 ; <nl> + CachedValue * value = new ( buf + offset ( ) ) CachedValue ( * this ) ; <nl> return value ; <nl> } <nl> <nl> - CachedValue * CachedValue : : construct ( void const * k , uint32_t kSize , <nl> - void const * v , uint64_t vSize ) { <nl> - if ( kSize = = 0 | | k = = nullptr | | ( vSize > 0 & & v = = nullptr ) ) { <nl> + CachedValue * CachedValue : : construct ( void const * k , size_t kSize , <nl> + void const * v , size_t vSize ) { <nl> + if ( kSize = = 0 | | k = = nullptr | | ( vSize > 0 & & v = = nullptr ) | | <nl> + kSize > maxKeySize | | vSize > maxValueSize ) { <nl> return nullptr ; <nl> } <nl> <nl> - uint8_t * buf = new uint8_t [ sizeof ( CachedValue ) + kSize + vSize ] ; <nl> - CachedValue * cv = reinterpret_cast < CachedValue * > ( buf ) ; <nl> - <nl> - cv - > refCount = 0 ; <nl> - cv - > keySize = kSize ; <nl> - cv - > valueSize = vSize ; <nl> - std : : memcpy ( const_cast < uint8_t * > ( cv - > key ( ) ) , k , kSize ) ; <nl> - if ( vSize > 0 ) { <nl> - std : : memcpy ( const_cast < uint8_t * > ( cv - > value ( ) ) , v , vSize ) ; <nl> - } <nl> + uint8_t * buf = new uint8_t [ _headerAllocSize + kSize + vSize ] ; <nl> + uint8_t * aligned = reinterpret_cast < uint8_t * > ( <nl> + ( reinterpret_cast < size_t > ( buf ) + _headerAllocOffset ) & <nl> + _headerAllocMask ) ; <nl> + size_t offset = buf - aligned ; <nl> + CachedValue * cv = new ( aligned ) CachedValue ( offset , k , kSize , v , vSize ) ; <nl> <nl> return cv ; <nl> } <nl> <nl> void CachedValue : : operator delete ( void * ptr ) { <nl> - delete [ ] reinterpret_cast < uint8_t * > ( ptr ) ; <nl> + CachedValue * cv = reinterpret_cast < CachedValue * > ( ptr ) ; <nl> + size_t offset = cv - > offset ( ) ; <nl> + cv - > ~ CachedValue ( ) ; <nl> + delete [ ] ( reinterpret_cast < uint8_t * > ( ptr ) - offset ) ; <nl> + } <nl> + <nl> + CachedValue : : CachedValue ( size_t off , void const * k , size_t kSize , <nl> + void const * v , size_t vSize ) <nl> + : _refCount ( 0 ) , <nl> + _keySize ( kSize + ( off < < _offsetShift ) ) , <nl> + _valueSize ( vSize ) { <nl> + std : : memcpy ( const_cast < uint8_t * > ( key ( ) ) , k , kSize ) ; <nl> + if ( vSize > 0 ) { <nl> + std : : memcpy ( const_cast < uint8_t * > ( value ( ) ) , v , vSize ) ; <nl> + } <nl> + } <nl> + <nl> + CachedValue : : CachedValue ( CachedValue const & other ) <nl> + : _refCount ( 0 ) , <nl> + _keySize ( other . _keySize ) , <nl> + _valueSize ( other . _valueSize ) { <nl> + std : : memcpy ( const_cast < uint8_t * > ( key ( ) ) , other . key ( ) , <nl> + keySize ( ) + valueSize ( ) ) ; <nl> } <nl> mmm a / arangod / Cache / CachedValue . h <nl> ppp b / arangod / Cache / CachedValue . h <nl> namespace cache { <nl> / / / to clients . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> struct CachedValue { <nl> + / / key size must fit in 3 bytes <nl> + static constexpr size_t maxKeySize = 0x00FFFFFFULL ; <nl> + / / value size must fit in 4 bytes <nl> + static constexpr size_t maxValueSize = 0xFFFFFFFFULL ; <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Reference count ( to avoid premature deletion ) <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - std : : atomic < uint32_t > refCount ; <nl> + inline uint32_t refCount ( ) const { return _refCount . load ( ) ; } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Size of the key in bytes <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - uint32_t keySize ; <nl> + inline size_t keySize ( ) const { <nl> + return static_cast < size_t > ( _keySize & _keyMask ) ; <nl> + } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Size of the value in bytes <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - uint64_t valueSize ; <nl> + inline size_t valueSize ( ) const { return static_cast < size_t > ( _valueSize ) ; } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Returns a pointer offset to the key <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - uint8_t const * key ( ) const ; <nl> + inline uint8_t const * key ( ) const { <nl> + return ( reinterpret_cast < uint8_t const * > ( this ) + sizeof ( CachedValue ) ) ; <nl> + } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Returns a pointer offset to the value <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - uint8_t const * value ( ) const ; <nl> + inline uint8_t const * value ( ) const { <nl> + return ( _valueSize = = 0 ) <nl> + ? nullptr <nl> + : reinterpret_cast < uint8_t const * > ( this ) + sizeof ( CachedValue ) + <nl> + keySize ( ) ; <nl> + } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Returns the allocated size of bytes including the key and value <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - uint64_t size ( ) const ; <nl> + inline size_t size ( ) const { <nl> + return _headerAllocSize + keySize ( ) + valueSize ( ) ; <nl> + } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Utility method to compare underlying key to external key <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - bool sameKey ( void const * k , uint32_t kSize ) const ; <nl> + inline bool sameKey ( void const * k , size_t kSize ) const { <nl> + return ( keySize ( ) = = kSize ) & & <nl> + ( 0 = = memcmp ( key ( ) , k , kSize ) ) ; <nl> + } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Increase reference count <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - void lease ( ) ; <nl> + inline void lease ( ) { + + _refCount ; } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Decrease reference count <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - void release ( ) ; <nl> + inline void release ( ) { - - _refCount ; } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Checks whether value can be freed ( i . e . no references to it ) <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - bool isFreeable ( ) ; <nl> + inline bool isFreeable ( ) const { return _refCount . load ( ) = = 0 ; } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Create a copy of this CachedValue object <nl> struct CachedValue { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Construct a CachedValue object from a given key and value <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - static CachedValue * construct ( void const * k , uint32_t kSize , void const * v , <nl> - uint64_t vSize ) ; <nl> + static CachedValue * construct ( void const * k , size_t kSize , void const * v , <nl> + size_t vSize ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Custom deleter to handle casting issues <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> static void operator delete ( void * ptr ) ; <nl> - } ; <nl> <nl> - / / ensure that header size is what we expect <nl> - static_assert ( sizeof ( CachedValue ) = = 16 , " Expected sizeof ( CachedValue ) = = 16 . " ) ; <nl> + private : <nl> + static constexpr size_t _padding = alignof ( std : : atomic < uint32_t > ) - 1 ; <nl> + static const size_t _headerAllocSize ; <nl> + static constexpr size_t _headerAllocMask = ~ _padding ; <nl> + static constexpr size_t _headerAllocOffset = _padding ; <nl> + static constexpr uint32_t _keyMask = 0x00FFFFFF ; <nl> + static constexpr uint32_t _offsetMask = 0xFF000000 ; <nl> + static constexpr size_t _offsetShift = 24 ; <nl> + <nl> + std : : atomic < uint32_t > _refCount ; <nl> + uint32_t _keySize ; <nl> + uint32_t _valueSize ; <nl> + <nl> + private : <nl> + CachedValue ( size_t off , void const * k , size_t kSize , <nl> + void const * v , size_t vSize ) ; <nl> + CachedValue ( CachedValue const & other ) ; <nl> + <nl> + inline size_t offset ( ) const { <nl> + return ( ( _keySize & _offsetMask ) > > _offsetShift ) ; <nl> + } <nl> + } ; <nl> <nl> } ; / / end namespace cache <nl> } ; / / end namespace arangodb <nl> mmm a / arangod / Cache / PlainCache . cpp <nl> ppp b / arangod / Cache / PlainCache . cpp <nl> Finding PlainCache : : find ( void const * key , uint32_t keySize ) { <nl> <nl> Result PlainCache : : insert ( CachedValue * value ) { <nl> TRI_ASSERT ( value ! = nullptr ) ; <nl> - uint32_t hash = hashKey ( value - > key ( ) , value - > keySize ) ; <nl> + uint32_t hash = hashKey ( value - > key ( ) , value - > keySize ( ) ) ; <nl> <nl> Result status ; <nl> PlainBucket * bucket ; <nl> Result PlainCache : : insert ( CachedValue * value ) { <nl> bool allowed = true ; <nl> bool maybeMigrate = false ; <nl> int64_t change = static_cast < int64_t > ( value - > size ( ) ) ; <nl> - CachedValue * candidate = bucket - > find ( hash , value - > key ( ) , value - > keySize ) ; <nl> + CachedValue * candidate = bucket - > find ( hash , value - > key ( ) , value - > keySize ( ) ) ; <nl> <nl> if ( candidate = = nullptr & & bucket - > isFull ( ) ) { <nl> candidate = bucket - > evictionCandidate ( ) ; <nl> Result PlainCache : : insert ( CachedValue * value ) { <nl> bool eviction = false ; <nl> if ( candidate ! = nullptr ) { <nl> bucket - > evict ( candidate , true ) ; <nl> - if ( ! candidate - > sameKey ( value - > key ( ) , value - > keySize ) ) { <nl> + if ( ! candidate - > sameKey ( value - > key ( ) , value - > keySize ( ) ) ) { <nl> eviction = true ; <nl> } <nl> freeValue ( candidate ) ; <nl> mmm a / arangod / Cache / TransactionalCache . cpp <nl> ppp b / arangod / Cache / TransactionalCache . cpp <nl> Finding TransactionalCache : : find ( void const * key , uint32_t keySize ) { <nl> <nl> Result TransactionalCache : : insert ( CachedValue * value ) { <nl> TRI_ASSERT ( value ! = nullptr ) ; <nl> - uint32_t hash = hashKey ( value - > key ( ) , value - > keySize ) ; <nl> + uint32_t hash = hashKey ( value - > key ( ) , value - > keySize ( ) ) ; <nl> <nl> Result status ; <nl> TransactionalBucket * bucket ; <nl> Result TransactionalCache : : insert ( CachedValue * value ) { <nl> bool allowed = ! bucket - > isBlacklisted ( hash ) ; <nl> if ( allowed ) { <nl> int64_t change = value - > size ( ) ; <nl> - CachedValue * candidate = bucket - > find ( hash , value - > key ( ) , value - > keySize ) ; <nl> + CachedValue * candidate = bucket - > find ( hash , value - > key ( ) , value - > keySize ( ) ) ; <nl> <nl> if ( candidate = = nullptr & & bucket - > isFull ( ) ) { <nl> candidate = bucket - > evictionCandidate ( ) ; <nl> Result TransactionalCache : : insert ( CachedValue * value ) { <nl> bool eviction = false ; <nl> if ( candidate ! = nullptr ) { <nl> bucket - > evict ( candidate , true ) ; <nl> - if ( ! candidate - > sameKey ( value - > key ( ) , value - > keySize ) ) { <nl> + if ( ! candidate - > sameKey ( value - > key ( ) , value - > keySize ( ) ) ) { <nl> eviction = true ; <nl> } <nl> freeValue ( candidate ) ; <nl> mmm a / arangod / RocksDBEngine / RocksDBCollection . cpp <nl> ppp b / arangod / RocksDBEngine / RocksDBCollection . cpp <nl> arangodb : : Result RocksDBCollection : : lookupRevisionVPack ( <nl> if ( f . found ( ) ) { <nl> std : : string * value = mdr . prepareStringUsage ( ) ; <nl> value - > append ( reinterpret_cast < char const * > ( f . value ( ) - > value ( ) ) , <nl> - static_cast < size_t > ( f . value ( ) - > valueSize ) ) ; <nl> + f . value ( ) - > valueSize ( ) ) ; <nl> mdr . setManagedAfterStringUsage ( revisionId ) ; <nl> return Result ( ) ; <nl> } <nl> arangodb : : Result RocksDBCollection : : lookupRevisionVPack ( <nl> auto entry = cache : : CachedValue : : construct ( <nl> key . string ( ) . data ( ) , static_cast < uint32_t > ( key . string ( ) . size ( ) ) , <nl> value - > data ( ) , static_cast < uint64_t > ( value - > size ( ) ) ) ; <nl> - auto status = _cache - > insert ( entry ) ; <nl> - if ( status . fail ( ) ) { <nl> - delete entry ; <nl> + if ( entry ) { <nl> + auto status = _cache - > insert ( entry ) ; <nl> + if ( status . fail ( ) ) { <nl> + delete entry ; <nl> + } <nl> } <nl> } <nl> <nl> arangodb : : Result RocksDBCollection : : lookupRevisionVPack ( <nl> auto entry = cache : : CachedValue : : construct ( <nl> key . string ( ) . data ( ) , static_cast < uint32_t > ( key . string ( ) . size ( ) ) , <nl> value . data ( ) , static_cast < uint64_t > ( value . size ( ) ) ) ; <nl> - auto status = _cache - > insert ( entry ) ; <nl> - if ( status . fail ( ) ) { <nl> - delete entry ; <nl> + if ( entry ) { <nl> + auto status = _cache - > insert ( entry ) ; <nl> + if ( status . fail ( ) ) { <nl> + delete entry ; <nl> + } <nl> } <nl> } <nl> <nl> mmm a / arangod / RocksDBEngine / RocksDBEdgeIndex . cpp <nl> ppp b / arangod / RocksDBEngine / RocksDBEdgeIndex . cpp <nl> void RocksDBEdgeIndexIterator : : lookupInRocksDB ( StringRef fromTo ) { <nl> fromTo . data ( ) , static_cast < uint32_t > ( fromTo . size ( ) ) , <nl> _builder . slice ( ) . start ( ) , <nl> static_cast < uint64_t > ( _builder . slice ( ) . byteSize ( ) ) ) ; <nl> - bool inserted = false ; <nl> - for ( size_t attempts = 0 ; attempts < 10 ; attempts + + ) { <nl> - auto status = cc - > insert ( entry ) ; <nl> - if ( status . ok ( ) ) { <nl> - inserted = true ; <nl> - break ; <nl> + if ( entry ) { <nl> + bool inserted = false ; <nl> + for ( size_t attempts = 0 ; attempts < 10 ; attempts + + ) { <nl> + auto status = cc - > insert ( entry ) ; <nl> + if ( status . ok ( ) ) { <nl> + inserted = true ; <nl> + break ; <nl> + } <nl> + if ( status . errorNumber ( ) ! = TRI_ERROR_LOCK_TIMEOUT ) { <nl> + break ; <nl> + } <nl> } <nl> - if ( status . errorNumber ( ) ! = TRI_ERROR_LOCK_TIMEOUT ) { <nl> - break ; <nl> + if ( ! inserted ) { <nl> + LOG_TOPIC ( DEBUG , arangodb : : Logger : : CACHE ) < < " Failed to cache : " <nl> + < < fromTo . toString ( ) ; <nl> + delete entry ; <nl> } <nl> } <nl> - if ( ! inserted ) { <nl> - LOG_TOPIC ( DEBUG , arangodb : : Logger : : CACHE ) < < " Failed to cache : " <nl> - < < fromTo . toString ( ) ; <nl> - delete entry ; <nl> - } <nl> } <nl> TRI_ASSERT ( _builder . slice ( ) . isArray ( ) ) ; <nl> _builderIterator = VPackArrayIterator ( _builder . slice ( ) ) ; <nl> void RocksDBEdgeIndex : : warmupInternal ( transaction : : Methods * trx , <nl> previous . data ( ) , static_cast < uint32_t > ( previous . size ( ) ) , <nl> builder . slice ( ) . start ( ) , <nl> static_cast < uint64_t > ( builder . slice ( ) . byteSize ( ) ) ) ; <nl> - bool inserted = false ; <nl> - for ( size_t attempts = 0 ; attempts < 10 ; attempts + + ) { <nl> - auto status = cc - > insert ( entry ) ; <nl> - if ( status . ok ( ) ) { <nl> - inserted = true ; <nl> - break ; <nl> + if ( entry ) { <nl> + bool inserted = false ; <nl> + for ( size_t attempts = 0 ; attempts < 10 ; attempts + + ) { <nl> + auto status = cc - > insert ( entry ) ; <nl> + if ( status . ok ( ) ) { <nl> + inserted = true ; <nl> + break ; <nl> + } <nl> + if ( status . errorNumber ( ) ! = TRI_ERROR_LOCK_TIMEOUT ) { <nl> + break ; <nl> + } <nl> } <nl> - if ( status . errorNumber ( ) ! = TRI_ERROR_LOCK_TIMEOUT ) { <nl> - break ; <nl> + if ( ! inserted ) { <nl> + delete entry ; <nl> } <nl> } <nl> - if ( ! inserted ) { <nl> - delete entry ; <nl> - } <nl> builder . clear ( ) ; <nl> } <nl> / / Need to store <nl> void RocksDBEdgeIndex : : warmupInternal ( transaction : : Methods * trx , <nl> previous . data ( ) , static_cast < uint32_t > ( previous . size ( ) ) , <nl> builder . slice ( ) . start ( ) , <nl> static_cast < uint64_t > ( builder . slice ( ) . byteSize ( ) ) ) ; <nl> - bool inserted = false ; <nl> - for ( size_t attempts = 0 ; attempts < 10 ; attempts + + ) { <nl> - auto status = cc - > insert ( entry ) ; <nl> - if ( status . ok ( ) ) { <nl> - inserted = true ; <nl> - break ; <nl> + if ( entry ) { <nl> + bool inserted = false ; <nl> + for ( size_t attempts = 0 ; attempts < 10 ; attempts + + ) { <nl> + auto status = cc - > insert ( entry ) ; <nl> + if ( status . ok ( ) ) { <nl> + inserted = true ; <nl> + break ; <nl> + } <nl> + if ( status . errorNumber ( ) ! = TRI_ERROR_LOCK_TIMEOUT ) { <nl> + break ; <nl> + } <nl> } <nl> - if ( status . errorNumber ( ) ! = TRI_ERROR_LOCK_TIMEOUT ) { <nl> - break ; <nl> + if ( ! inserted ) { <nl> + delete entry ; <nl> } <nl> } <nl> - if ( ! inserted ) { <nl> - delete entry ; <nl> - } <nl> } <nl> LOG_TOPIC ( DEBUG , Logger : : FIXME ) < < " loaded n : " < < n ; <nl> } <nl> mmm a / arangod / RocksDBEngine / RocksDBPrimaryIndex . cpp <nl> ppp b / arangod / RocksDBEngine / RocksDBPrimaryIndex . cpp <nl> RocksDBToken RocksDBPrimaryIndex : : lookupKey ( transaction : : Methods * trx , <nl> static_cast < uint32_t > ( key . string ( ) . size ( ) ) ) ; <nl> if ( f . found ( ) ) { <nl> rocksdb : : Slice s ( reinterpret_cast < char const * > ( f . value ( ) - > value ( ) ) , <nl> - static_cast < size_t > ( f . value ( ) - > valueSize ) ) ; <nl> + f . value ( ) - > valueSize ( ) ) ; <nl> return RocksDBToken ( RocksDBValue : : revisionId ( s ) ) ; <nl> } <nl> } <nl> RocksDBToken RocksDBPrimaryIndex : : lookupKey ( transaction : : Methods * trx , <nl> auto entry = cache : : CachedValue : : construct ( <nl> key . string ( ) . data ( ) , static_cast < uint32_t > ( key . string ( ) . size ( ) ) , <nl> value . buffer ( ) - > data ( ) , static_cast < uint64_t > ( value . buffer ( ) - > size ( ) ) ) ; <nl> - auto status = _cache - > insert ( entry ) ; <nl> - if ( status . fail ( ) ) { <nl> - delete entry ; <nl> + if ( entry ) { <nl> + auto status = _cache - > insert ( entry ) ; <nl> + if ( status . fail ( ) ) { <nl> + delete entry ; <nl> + } <nl> } <nl> } <nl> <nl> mmm a / tests / Cache / CachedValue . cpp <nl> ppp b / tests / Cache / CachedValue . cpp <nl> <nl> <nl> using namespace arangodb : : cache ; <nl> <nl> + const size_t padding = alignof ( std : : atomic < uint32_t > ) - 1 ; <nl> + <nl> TEST_CASE ( " cache : : CachedValue " , " [ cache ] " ) { <nl> SECTION ( " test constructor with valid input " ) { <nl> uint64_t k = 1 ; <nl> TEST_CASE ( " cache : : CachedValue " , " [ cache ] " ) { <nl> / / fixed key , variable value <nl> cv = CachedValue : : construct ( & k , sizeof ( uint64_t ) , v . data ( ) , v . size ( ) ) ; <nl> REQUIRE ( nullptr ! = cv ) ; <nl> - REQUIRE ( sizeof ( uint64_t ) = = cv - > keySize ) ; <nl> - REQUIRE ( v . size ( ) = = cv - > valueSize ) ; <nl> - REQUIRE ( sizeof ( CachedValue ) + sizeof ( uint64_t ) + v . size ( ) = = cv - > size ( ) ) ; <nl> + REQUIRE ( sizeof ( uint64_t ) = = cv - > keySize ( ) ) ; <nl> + REQUIRE ( v . size ( ) = = cv - > valueSize ( ) ) ; <nl> + REQUIRE ( sizeof ( CachedValue ) + padding + sizeof ( uint64_t ) + v . size ( ) = = cv - > size ( ) ) ; <nl> REQUIRE ( k = = * reinterpret_cast < uint64_t const * > ( cv - > key ( ) ) ) ; <nl> REQUIRE ( 0 = = memcmp ( v . data ( ) , cv - > value ( ) , v . size ( ) ) ) ; <nl> delete cv ; <nl> TEST_CASE ( " cache : : CachedValue " , " [ cache ] " ) { <nl> cv = CachedValue : : construct ( v . data ( ) , static_cast < uint32_t > ( v . size ( ) ) , & k , <nl> sizeof ( uint64_t ) ) ; <nl> REQUIRE ( nullptr ! = cv ) ; <nl> - REQUIRE ( v . size ( ) = = cv - > keySize ) ; <nl> - REQUIRE ( sizeof ( uint64_t ) = = cv - > valueSize ) ; <nl> - REQUIRE ( sizeof ( CachedValue ) + sizeof ( uint64_t ) + v . size ( ) = = cv - > size ( ) ) ; <nl> + REQUIRE ( v . size ( ) = = cv - > keySize ( ) ) ; <nl> + REQUIRE ( sizeof ( uint64_t ) = = cv - > valueSize ( ) ) ; <nl> + REQUIRE ( sizeof ( CachedValue ) + padding + sizeof ( uint64_t ) + v . size ( ) = = cv - > size ( ) ) ; <nl> REQUIRE ( 0 = = memcmp ( v . data ( ) , cv - > key ( ) , v . size ( ) ) ) ; <nl> REQUIRE ( k = = * reinterpret_cast < uint64_t const * > ( cv - > value ( ) ) ) ; <nl> delete cv ; <nl> TEST_CASE ( " cache : : CachedValue " , " [ cache ] " ) { <nl> / / fixed key , zero length value <nl> cv = CachedValue : : construct ( & k , sizeof ( uint64_t ) , nullptr , 0 ) ; <nl> REQUIRE ( nullptr ! = cv ) ; <nl> - REQUIRE ( sizeof ( uint64_t ) = = cv - > keySize ) ; <nl> - REQUIRE ( 0ULL = = cv - > valueSize ) ; <nl> - REQUIRE ( sizeof ( CachedValue ) + sizeof ( uint64_t ) = = cv - > size ( ) ) ; <nl> + REQUIRE ( sizeof ( uint64_t ) = = cv - > keySize ( ) ) ; <nl> + REQUIRE ( 0ULL = = cv - > valueSize ( ) ) ; <nl> + REQUIRE ( sizeof ( CachedValue ) + padding + sizeof ( uint64_t ) = = cv - > size ( ) ) ; <nl> REQUIRE ( k = = * reinterpret_cast < uint64_t const * > ( cv - > key ( ) ) ) ; <nl> REQUIRE ( nullptr = = cv - > value ( ) ) ; <nl> delete cv ; <nl> TEST_CASE ( " cache : : CachedValue " , " [ cache ] " ) { <nl> / / nullptr value , non - zero length <nl> cv = CachedValue : : construct ( & k , sizeof ( uint64_t ) , nullptr , v . size ( ) ) ; <nl> REQUIRE ( nullptr = = cv ) ; <nl> + <nl> + / / too large key size <nl> + cv = CachedValue : : construct ( & k , 0x1000000 , v . data ( ) , v . size ( ) ) ; <nl> + REQUIRE ( nullptr = = cv ) ; <nl> + <nl> + / / too large value size <nl> + cv = CachedValue : : construct ( & k , sizeof ( uint64_t ) , v . data ( ) , 0x100000000ULL ) ; <nl> + REQUIRE ( nullptr = = cv ) ; <nl> } <nl> <nl> SECTION ( " copy ( ) should produce a correct copy " ) { <nl> TEST_CASE ( " cache : : CachedValue " , " [ cache ] " ) { <nl> / / fixed key , variable value <nl> auto original = <nl> CachedValue : : construct ( & k , sizeof ( uint64_t ) , v . data ( ) , v . size ( ) ) ; <nl> + REQUIRE ( nullptr ! = original ) ; <nl> auto copy = original - > copy ( ) ; <nl> REQUIRE ( nullptr ! = copy ) ; <nl> REQUIRE ( copy ! = original ) ; <nl> - REQUIRE ( sizeof ( uint64_t ) = = copy - > keySize ) ; <nl> - REQUIRE ( v . size ( ) = = copy - > valueSize ) ; <nl> - REQUIRE ( sizeof ( CachedValue ) + sizeof ( uint64_t ) + v . size ( ) = = copy - > size ( ) ) ; <nl> + REQUIRE ( sizeof ( uint64_t ) = = copy - > keySize ( ) ) ; <nl> + REQUIRE ( v . size ( ) = = copy - > valueSize ( ) ) ; <nl> + REQUIRE ( sizeof ( CachedValue ) + padding + sizeof ( uint64_t ) + v . size ( ) = = copy - > size ( ) ) ; <nl> REQUIRE ( k = = * reinterpret_cast < uint64_t const * > ( copy - > key ( ) ) ) ; <nl> REQUIRE ( 0 = = memcmp ( v . data ( ) , copy - > value ( ) , v . size ( ) ) ) ; <nl> delete original ; <nl> TEST_CASE ( " cache : : CachedValue " , " [ cache ] " ) { <nl> <nl> auto cv = CachedValue : : construct ( <nl> k1 . data ( ) , static_cast < uint32_t > ( k1 . size ( ) ) , & v , sizeof ( uint64_t ) ) ; <nl> + REQUIRE ( nullptr ! = cv ) ; <nl> <nl> / / same key <nl> REQUIRE ( cv - > sameKey ( k1 . data ( ) , static_cast < uint32_t > ( k1 . size ( ) ) ) ) ; <nl> mmm a / tests / Cache / Manager . cpp <nl> ppp b / tests / Cache / Manager . cpp <nl> TEST_CASE ( " cache : : Manager " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> size_t cacheIndex = item % cacheCount ; <nl> CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> & item , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> auto status = caches [ cacheIndex ] - > insert ( value ) ; <nl> if ( status . fail ( ) ) { <nl> delete value ; <nl> TEST_CASE ( " cache : : Manager " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> size_t cacheIndex = item % cacheCount ; <nl> CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> & item , sizeof ( uint64_t ) ) ; <nl> - auto status = caches [ cacheIndex ] - > insert ( value ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> + auto status = caches [ cacheIndex ] - > insert ( value ) ; <nl> if ( status . fail ( ) ) { <nl> delete value ; <nl> } <nl> mmm a / tests / Cache / PlainBucket . cpp <nl> ppp b / tests / Cache / PlainBucket . cpp <nl> TEST_CASE ( " cache : : PlainBucket " , " [ cache ] " ) { <nl> for ( size_t i = 0 ; i < 11 ; i + + ) { <nl> ptrs [ i ] = CachedValue : : construct ( & ( keys [ i ] ) , sizeof ( uint64_t ) , <nl> & ( values [ i ] ) , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( ptrs [ i ] ! = nullptr ) ; <nl> } <nl> <nl> success = bucket - > lock ( - 1LL ) ; <nl> TEST_CASE ( " cache : : PlainBucket " , " [ cache ] " ) { <nl> } <nl> for ( size_t i = 0 ; i < 10 ; i + + ) { <nl> CachedValue * res = <nl> - bucket - > find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> + bucket - > find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ( ) ) ; <nl> REQUIRE ( res = = ptrs [ i ] ) ; <nl> } <nl> <nl> / / check that insert is ignored if full <nl> bucket - > insert ( hashes [ 10 ] , ptrs [ 10 ] ) ; <nl> - CachedValue * res = bucket - > find ( hashes [ 10 ] , ptrs [ 10 ] - > key ( ) , ptrs [ 10 ] - > keySize ) ; <nl> + CachedValue * res = bucket - > find ( hashes [ 10 ] , ptrs [ 10 ] - > key ( ) , ptrs [ 10 ] - > keySize ( ) ) ; <nl> REQUIRE ( nullptr = = res ) ; <nl> <nl> bucket - > unlock ( ) ; <nl> TEST_CASE ( " cache : : PlainBucket " , " [ cache ] " ) { <nl> for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> ptrs [ i ] = CachedValue : : construct ( & ( keys [ i ] ) , sizeof ( uint64_t ) , <nl> & ( values [ i ] ) , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( ptrs [ i ] ! = nullptr ) ; <nl> } <nl> <nl> success = bucket - > lock ( - 1LL ) ; <nl> TEST_CASE ( " cache : : PlainBucket " , " [ cache ] " ) { <nl> } <nl> for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> CachedValue * res = <nl> - bucket - > find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> + bucket - > find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ( ) ) ; <nl> REQUIRE ( res = = ptrs [ i ] ) ; <nl> } <nl> <nl> CachedValue * res ; <nl> - res = bucket - > remove ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ) ; <nl> + res = bucket - > remove ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ( ) ) ; <nl> REQUIRE ( res = = ptrs [ 1 ] ) ; <nl> - res = bucket - > find ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ) ; <nl> + res = bucket - > find ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ( ) ) ; <nl> REQUIRE ( nullptr = = res ) ; <nl> - res = bucket - > remove ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> + res = bucket - > remove ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ( ) ) ; <nl> REQUIRE ( res = = ptrs [ 0 ] ) ; <nl> - res = bucket - > find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> + res = bucket - > find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ( ) ) ; <nl> REQUIRE ( nullptr = = res ) ; <nl> - res = bucket - > remove ( hashes [ 2 ] , ptrs [ 2 ] - > key ( ) , ptrs [ 2 ] - > keySize ) ; <nl> + res = bucket - > remove ( hashes [ 2 ] , ptrs [ 2 ] - > key ( ) , ptrs [ 2 ] - > keySize ( ) ) ; <nl> REQUIRE ( res = = ptrs [ 2 ] ) ; <nl> - res = bucket - > find ( hashes [ 2 ] , ptrs [ 2 ] - > key ( ) , ptrs [ 2 ] - > keySize ) ; <nl> + res = bucket - > find ( hashes [ 2 ] , ptrs [ 2 ] - > key ( ) , ptrs [ 2 ] - > keySize ( ) ) ; <nl> REQUIRE ( nullptr = = res ) ; <nl> <nl> bucket - > unlock ( ) ; <nl> TEST_CASE ( " cache : : PlainBucket " , " [ cache ] " ) { <nl> for ( size_t i = 0 ; i < 11 ; i + + ) { <nl> ptrs [ i ] = CachedValue : : construct ( & ( keys [ i ] ) , sizeof ( uint64_t ) , <nl> & ( values [ i ] ) , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( ptrs [ i ] ! = nullptr ) ; <nl> } <nl> <nl> success = bucket - > lock ( - 1LL ) ; <nl> TEST_CASE ( " cache : : PlainBucket " , " [ cache ] " ) { <nl> } <nl> for ( size_t i = 0 ; i < 10 ; i + + ) { <nl> CachedValue * res = <nl> - bucket - > find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> + bucket - > find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ( ) ) ; <nl> REQUIRE ( res = = ptrs [ i ] ) ; <nl> } <nl> <nl> TEST_CASE ( " cache : : PlainBucket " , " [ cache ] " ) { <nl> CachedValue * candidate = bucket - > evictionCandidate ( ) ; <nl> REQUIRE ( candidate = = ptrs [ 0 ] ) ; <nl> bucket - > evict ( candidate , false ) ; <nl> - CachedValue * res = bucket - > find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> + CachedValue * res = bucket - > find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ( ) ) ; <nl> REQUIRE ( nullptr = = res ) ; <nl> REQUIRE ( ! bucket - > isFull ( ) ) ; <nl> <nl> TEST_CASE ( " cache : : PlainBucket " , " [ cache ] " ) { <nl> candidate = bucket - > evictionCandidate ( ) ; <nl> REQUIRE ( candidate = = ptrs [ 1 ] ) ; <nl> bucket - > evict ( candidate , true ) ; <nl> - res = bucket - > find ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ) ; <nl> + res = bucket - > find ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ( ) ) ; <nl> REQUIRE ( nullptr = = res ) ; <nl> REQUIRE ( ! bucket - > isFull ( ) ) ; <nl> <nl> / / check that we can insert now after eviction optimized for insertion <nl> bucket - > insert ( hashes [ 10 ] , ptrs [ 10 ] ) ; <nl> - res = bucket - > find ( hashes [ 10 ] , ptrs [ 10 ] - > key ( ) , ptrs [ 10 ] - > keySize ) ; <nl> + res = bucket - > find ( hashes [ 10 ] , ptrs [ 10 ] - > key ( ) , ptrs [ 10 ] - > keySize ( ) ) ; <nl> REQUIRE ( res = = ptrs [ 10 ] ) ; <nl> <nl> bucket - > unlock ( ) ; <nl> mmm a / tests / Cache / PlainCache . cpp <nl> ppp b / tests / Cache / PlainCache . cpp <nl> TEST_CASE ( " cache : : PlainCache " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> CachedValue * value = <nl> CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> auto status = cache - > insert ( value ) ; <nl> if ( status . ok ( ) ) { <nl> auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> TEST_CASE ( " cache : : PlainCache " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> uint64_t j = 2 * i ; <nl> CachedValue * value = <nl> CachedValue : : construct ( & i , sizeof ( uint64_t ) , & j , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> auto status = cache - > insert ( value ) ; <nl> if ( status . ok ( ) ) { <nl> auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> TEST_CASE ( " cache : : PlainCache " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> for ( uint64_t i = 1024 ; i < 256 * 1024 ; i + + ) { <nl> CachedValue * value = <nl> CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> auto status = cache - > insert ( value ) ; <nl> if ( status . ok ( ) ) { <nl> auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> TEST_CASE ( " cache : : PlainCache " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> CachedValue * value = <nl> CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> auto status = cache - > insert ( value ) ; <nl> if ( status . ok ( ) ) { <nl> auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> TEST_CASE ( " cache : : PlainCache " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> for ( uint64_t i = 0 ; i < 4 * 1024 * 1024 ; i + + ) { <nl> CachedValue * value = <nl> CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> auto status = cache - > insert ( value ) ; <nl> if ( status . fail ( ) ) { <nl> delete value ; <nl> TEST_CASE ( " cache : : PlainCache " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> uint64_t item = lower + i ; <nl> CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> & item , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> auto status = cache - > insert ( value ) ; <nl> if ( status . fail ( ) ) { <nl> delete value ; <nl> TEST_CASE ( " cache : : PlainCache " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> uint64_t item = + + validUpper ; <nl> CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> & item , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> auto status = cache - > insert ( value ) ; <nl> if ( status . fail ( ) ) { <nl> delete value ; <nl> TEST_CASE ( " cache : : PlainCache " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> CachedValue * value = <nl> CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> auto status = cacheHit - > insert ( value ) ; <nl> if ( status . fail ( ) ) { <nl> delete value ; <nl> TEST_CASE ( " cache : : PlainCache " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> <nl> value = <nl> CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> status = cacheMiss - > insert ( value ) ; <nl> if ( status . fail ( ) ) { <nl> delete value ; <nl> TEST_CASE ( " cache : : PlainCache " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> <nl> value = <nl> CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> status = cacheMixed - > insert ( value ) ; <nl> if ( status . fail ( ) ) { <nl> delete value ; <nl> mmm a / tests / Cache / Rebalancer . cpp <nl> ppp b / tests / Cache / Rebalancer . cpp <nl> TEST_CASE ( " cache : : Rebalancer " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> size_t cacheIndex = item % cacheCount ; <nl> CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> & item , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> auto status = caches [ cacheIndex ] - > insert ( value ) ; <nl> if ( status . fail ( ) ) { <nl> delete value ; <nl> TEST_CASE ( " cache : : Rebalancer " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> size_t cacheIndex = item % cacheCount ; <nl> CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> & item , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> auto status = caches [ cacheIndex ] - > insert ( value ) ; <nl> if ( status . fail ( ) ) { <nl> delete value ; <nl> TEST_CASE ( " cache : : Rebalancer " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> size_t cacheIndex = item % cacheCount ; <nl> CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> & item , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> auto status = caches [ cacheIndex ] - > insert ( value ) ; <nl> if ( status . fail ( ) ) { <nl> delete value ; <nl> TEST_CASE ( " cache : : Rebalancer " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> size_t cacheIndex = item % cacheCount ; <nl> CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> & item , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> auto status = caches [ cacheIndex ] - > insert ( value ) ; <nl> if ( status . fail ( ) ) { <nl> delete value ; <nl> mmm a / tests / Cache / TransactionalBucket . cpp <nl> ppp b / tests / Cache / TransactionalBucket . cpp <nl> TEST_CASE ( " cache : : TransactionalBucket " , " [ cache ] " ) { <nl> for ( size_t i = 0 ; i < 9 ; i + + ) { <nl> ptrs [ i ] = CachedValue : : construct ( & ( keys [ i ] ) , sizeof ( uint64_t ) , <nl> & ( values [ i ] ) , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( ptrs [ i ] ! = nullptr ) ; <nl> } <nl> <nl> success = bucket - > lock ( - 1LL ) ; <nl> TEST_CASE ( " cache : : TransactionalBucket " , " [ cache ] " ) { <nl> } <nl> for ( size_t i = 0 ; i < 7 ; i + + ) { <nl> CachedValue * res = <nl> - bucket - > find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> + bucket - > find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ( ) ) ; <nl> REQUIRE ( res = = ptrs [ i ] ) ; <nl> } <nl> <nl> / / check that insert is ignored if full <nl> bucket - > insert ( hashes [ 8 ] , ptrs [ 8 ] ) ; <nl> - CachedValue * res = bucket - > find ( hashes [ 8 ] , ptrs [ 8 ] - > key ( ) , ptrs [ 8 ] - > keySize ) ; <nl> + CachedValue * res = bucket - > find ( hashes [ 8 ] , ptrs [ 8 ] - > key ( ) , ptrs [ 8 ] - > keySize ( ) ) ; <nl> REQUIRE ( nullptr = = res ) ; <nl> <nl> bucket - > unlock ( ) ; <nl> TEST_CASE ( " cache : : TransactionalBucket " , " [ cache ] " ) { <nl> for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> ptrs [ i ] = CachedValue : : construct ( & ( keys [ i ] ) , sizeof ( uint64_t ) , <nl> & ( values [ i ] ) , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( ptrs [ i ] ! = nullptr ) ; <nl> } <nl> <nl> success = bucket - > lock ( - 1LL ) ; <nl> TEST_CASE ( " cache : : TransactionalBucket " , " [ cache ] " ) { <nl> } <nl> for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> CachedValue * res = <nl> - bucket - > find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> + bucket - > find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ( ) ) ; <nl> REQUIRE ( res = = ptrs [ i ] ) ; <nl> } <nl> <nl> CachedValue * res ; <nl> - res = bucket - > remove ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ) ; <nl> + res = bucket - > remove ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ( ) ) ; <nl> REQUIRE ( res = = ptrs [ 1 ] ) ; <nl> - res = bucket - > find ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ) ; <nl> + res = bucket - > find ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ( ) ) ; <nl> REQUIRE ( nullptr = = res ) ; <nl> - res = bucket - > remove ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> + res = bucket - > remove ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ( ) ) ; <nl> REQUIRE ( res = = ptrs [ 0 ] ) ; <nl> - res = bucket - > find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> + res = bucket - > find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ( ) ) ; <nl> REQUIRE ( nullptr = = res ) ; <nl> - res = bucket - > remove ( hashes [ 2 ] , ptrs [ 2 ] - > key ( ) , ptrs [ 2 ] - > keySize ) ; <nl> + res = bucket - > remove ( hashes [ 2 ] , ptrs [ 2 ] - > key ( ) , ptrs [ 2 ] - > keySize ( ) ) ; <nl> REQUIRE ( res = = ptrs [ 2 ] ) ; <nl> - res = bucket - > find ( hashes [ 2 ] , ptrs [ 2 ] - > key ( ) , ptrs [ 2 ] - > keySize ) ; <nl> + res = bucket - > find ( hashes [ 2 ] , ptrs [ 2 ] - > key ( ) , ptrs [ 2 ] - > keySize ( ) ) ; <nl> REQUIRE ( nullptr = = res ) ; <nl> <nl> bucket - > unlock ( ) ; <nl> TEST_CASE ( " cache : : TransactionalBucket " , " [ cache ] " ) { <nl> for ( size_t i = 0 ; i < 9 ; i + + ) { <nl> ptrs [ i ] = CachedValue : : construct ( & ( keys [ i ] ) , sizeof ( uint64_t ) , <nl> & ( values [ i ] ) , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( ptrs [ i ] ! = nullptr ) ; <nl> } <nl> <nl> success = bucket - > lock ( - 1LL ) ; <nl> TEST_CASE ( " cache : : TransactionalBucket " , " [ cache ] " ) { <nl> } <nl> for ( size_t i = 0 ; i < 8 ; i + + ) { <nl> CachedValue * res = <nl> - bucket - > find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> + bucket - > find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ( ) ) ; <nl> REQUIRE ( res = = ptrs [ i ] ) ; <nl> } <nl> <nl> TEST_CASE ( " cache : : TransactionalBucket " , " [ cache ] " ) { <nl> CachedValue * candidate = bucket - > evictionCandidate ( ) ; <nl> REQUIRE ( candidate = = ptrs [ 0 ] ) ; <nl> bucket - > evict ( candidate , false ) ; <nl> - CachedValue * res = bucket - > find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> + CachedValue * res = bucket - > find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ( ) ) ; <nl> REQUIRE ( nullptr = = res ) ; <nl> REQUIRE ( ! bucket - > isFull ( ) ) ; <nl> <nl> TEST_CASE ( " cache : : TransactionalBucket " , " [ cache ] " ) { <nl> candidate = bucket - > evictionCandidate ( ) ; <nl> REQUIRE ( candidate = = ptrs [ 1 ] ) ; <nl> bucket - > evict ( candidate , true ) ; <nl> - res = bucket - > find ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ) ; <nl> + res = bucket - > find ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ( ) ) ; <nl> REQUIRE ( nullptr = = res ) ; <nl> REQUIRE ( ! bucket - > isFull ( ) ) ; <nl> <nl> / / check that we can insert now after eviction optimized for insertion <nl> bucket - > insert ( hashes [ 8 ] , ptrs [ 8 ] ) ; <nl> - res = bucket - > find ( hashes [ 8 ] , ptrs [ 8 ] - > key ( ) , ptrs [ 8 ] - > keySize ) ; <nl> + res = bucket - > find ( hashes [ 8 ] , ptrs [ 8 ] - > key ( ) , ptrs [ 8 ] - > keySize ( ) ) ; <nl> REQUIRE ( res = = ptrs [ 8 ] ) ; <nl> <nl> bucket - > unlock ( ) ; <nl> TEST_CASE ( " cache : : TransactionalBucket " , " [ cache ] " ) { <nl> for ( size_t i = 0 ; i < 8 ; i + + ) { <nl> ptrs [ i ] = CachedValue : : construct ( & ( keys [ i ] ) , sizeof ( uint64_t ) , <nl> & ( values [ i ] ) , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( ptrs [ i ] ! = nullptr ) ; <nl> } <nl> <nl> success = bucket - > lock ( - 1LL ) ; <nl> TEST_CASE ( " cache : : TransactionalBucket " , " [ cache ] " ) { <nl> } <nl> } <nl> for ( size_t i = 0 ; i < 8 ; i + + ) { <nl> - res = bucket - > find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> + res = bucket - > find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ( ) ) ; <nl> REQUIRE ( res = = ptrs [ i ] ) ; <nl> } <nl> <nl> / / blacklist 1 - 5 to fill blacklist <nl> for ( size_t i = 1 ; i < 6 ; i + + ) { <nl> - bucket - > blacklist ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> + bucket - > blacklist ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ( ) ) ; <nl> } <nl> for ( size_t i = 1 ; i < 6 ; i + + ) { <nl> REQUIRE ( bucket - > isBlacklisted ( hashes [ i ] ) ) ; <nl> - res = bucket - > find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> + res = bucket - > find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ( ) ) ; <nl> REQUIRE ( nullptr = = res ) ; <nl> } <nl> / / verify actually not fully blacklisted <nl> REQUIRE ( ! bucket - > isFullyBlacklisted ( ) ) ; <nl> REQUIRE ( ! bucket - > isBlacklisted ( hashes [ 6 ] ) ) ; <nl> / / verify it didn ' t remove matching hash with non - matching key <nl> - res = bucket - > find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> + res = bucket - > find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ( ) ) ; <nl> REQUIRE ( res = = ptrs [ 0 ] ) ; <nl> <nl> / / verify we can ' t insert a key with a blacklisted hash <nl> bucket - > insert ( hashes [ 1 ] , ptrs [ 1 ] ) ; <nl> - res = bucket - > find ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ) ; <nl> + res = bucket - > find ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ( ) ) ; <nl> REQUIRE ( nullptr = = res ) ; <nl> <nl> / / proceed to fully blacklist <nl> - bucket - > blacklist ( hashes [ 6 ] , ptrs [ 6 ] - > key ( ) , ptrs [ 6 ] - > keySize ) ; <nl> + bucket - > blacklist ( hashes [ 6 ] , ptrs [ 6 ] - > key ( ) , ptrs [ 6 ] - > keySize ( ) ) ; <nl> REQUIRE ( bucket - > isBlacklisted ( hashes [ 6 ] ) ) ; <nl> - res = bucket - > find ( hashes [ 6 ] , ptrs [ 6 ] - > key ( ) , ptrs [ 6 ] - > keySize ) ; <nl> + res = bucket - > find ( hashes [ 6 ] , ptrs [ 6 ] - > key ( ) , ptrs [ 6 ] - > keySize ( ) ) ; <nl> REQUIRE ( nullptr = = res ) ; <nl> / / make sure it still didn ' t remove non - matching key <nl> - res = bucket - > find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> + res = bucket - > find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ( ) ) ; <nl> REQUIRE ( ptrs [ 0 ] = = res ) ; <nl> / / make sure it ' s fully blacklisted <nl> REQUIRE ( bucket - > isFullyBlacklisted ( ) ) ; <nl> mmm a / tests / Cache / TransactionalCache . cpp <nl> ppp b / tests / Cache / TransactionalCache . cpp <nl> TEST_CASE ( " cache : : TransactionalCache " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> CachedValue * value = <nl> CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> auto status = cache - > insert ( value ) ; <nl> if ( status . ok ( ) ) { <nl> auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> TEST_CASE ( " cache : : TransactionalCache " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> uint64_t j = 2 * i ; <nl> CachedValue * value = <nl> CachedValue : : construct ( & i , sizeof ( uint64_t ) , & j , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> auto status = cache - > insert ( value ) ; <nl> if ( status . ok ( ) ) { <nl> auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> TEST_CASE ( " cache : : TransactionalCache " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> for ( uint64_t i = 1024 ; i < 256 * 1024 ; i + + ) { <nl> CachedValue * value = <nl> CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> auto status = cache - > insert ( value ) ; <nl> if ( status . ok ( ) ) { <nl> auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> TEST_CASE ( " cache : : TransactionalCache " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> CachedValue * value = <nl> CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> auto status = cache - > insert ( value ) ; <nl> if ( status . ok ( ) ) { <nl> auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> TEST_CASE ( " cache : : TransactionalCache " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> CachedValue * value = <nl> CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> auto status = cache - > insert ( value ) ; <nl> if ( status . ok ( ) ) { <nl> auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> TEST_CASE ( " cache : : TransactionalCache " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> for ( uint64_t i = 512 ; i < 1024 ; i + + ) { <nl> CachedValue * value = <nl> CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> auto status = cache - > insert ( value ) ; <nl> REQUIRE ( status . fail ( ) ) ; <nl> delete value ; <nl> TEST_CASE ( " cache : : TransactionalCache " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> for ( uint64_t i = 512 ; i < 1024 ; i + + ) { <nl> CachedValue * value = <nl> CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> auto status = cache - > insert ( value ) ; <nl> if ( status . ok ( ) ) { <nl> reinserted + + ; <nl> TEST_CASE ( " cache : : TransactionalCache " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> for ( uint64_t i = 0 ; i < 4 * 1024 * 1024 ; i + + ) { <nl> CachedValue * value = <nl> CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> auto status = cache - > insert ( value ) ; <nl> if ( status . fail ( ) ) { <nl> delete value ; <nl> TEST_CASE ( " cache : : TransactionalCache " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> uint64_t item = lower + i ; <nl> CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> & item , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> auto status = cache - > insert ( value ) ; <nl> if ( status . fail ( ) ) { <nl> delete value ; <nl> TEST_CASE ( " cache : : TransactionalCache " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> } <nl> CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> & item , sizeof ( uint64_t ) ) ; <nl> + TRI_ASSERT ( value ! = nullptr ) ; <nl> auto status = cache - > insert ( value ) ; <nl> if ( status . fail ( ) ) { <nl> delete value ; <nl> mmm a / tests / Cache / TransactionalStore . cpp <nl> ppp b / tests / Cache / TransactionalStore . cpp <nl> TransactionalStore : : Document TransactionalStore : : lookup ( <nl> memcpy ( & result , buffer . data ( ) , sizeof ( Document ) ) ; <nl> CachedValue * value = CachedValue : : construct ( & key , sizeof ( uint64_t ) , <nl> & result , sizeof ( Document ) ) ; <nl> - auto status = _cache - > insert ( value ) ; <nl> - if ( status . fail ( ) ) { <nl> - delete value ; <nl> + if ( value ) { <nl> + auto status = _cache - > insert ( value ) ; <nl> + if ( status . fail ( ) ) { <nl> + delete value ; <nl> + } <nl> } <nl> } <nl> } <nl>
Improvements to CachedValue internals and handling . ( )
arangodb/arangodb
18e8c19271096835fe7060ec08ce6463353cbcf7
2017-08-28T13:07:16Z
mmm a / . travis . yml <nl> ppp b / . travis . yml <nl> install : <nl> # Linux dependencies <nl> # <nl> - if [ [ " $ TRAVIS_OS_NAME " = = " linux " & & " $ BUILD " = = " Kodi " ] ] ; then <nl> - sudo apt - get install - qq automake autopoint build - essential cmake curl dcadec - dev default - jre gawk gdc <nl> + sudo apt - get install - qq automake autopoint build - essential cmake curl dcadec - dev default - jre gawk gdb gdc <nl> gettext git - core gperf libasound2 - dev libass - dev libbz2 - dev libcap - dev libcdio - dev libcrossguid - dev libcurl3 <nl> libcurl4 - openssl - dev libdbus - 1 - dev libfontconfig - dev libegl1 - mesa - dev libfreetype6 - dev libfribidi - dev libgif - dev <nl> libiso9660 - dev libjpeg - dev libltdl - dev liblzo2 - dev libmicrohttpd - dev libmodplug - dev libmysqlclient - dev libnfs - dev <nl> before_script : <nl> # Linux <nl> # <nl> - if [ [ " $ TRAVIS_OS_NAME " = = " linux " & & " $ BUILD " = = " Kodi " ] ] ; then <nl> + ulimit - c unlimited - S ; <nl> if [ [ " $ TOOLS " = = " Autotools " ] ] ; then <nl> cd $ TRAVIS_BUILD_DIR & & <nl> . / bootstrap ; <nl> script : <nl> make - j3 ; <nl> fi <nl> <nl> + after_failure : <nl> + - COREFILE = $ ( find . - maxdepth 1 - name " core * " | head - n 1 ) <nl> + - if [ [ - f " $ COREFILE " ] ] ; then <nl> + gdb - c " $ COREFILE " kodi - test - ex " thread apply all bt " - ex " set pagination 0 " - batch ; <nl> + fi <nl> + <nl> # Disable annoying emails <nl> # <nl> notifications : <nl>
[ travis ] Print coredumps for failed tests
xbmc/xbmc
65aa269ae3cdef45185db748d348745796ff1b1c
2016-02-20T08:04:32Z
mmm a / src / rss / rssarticle . h <nl> ppp b / src / rss / rssarticle . h <nl> class RssArticle { <nl> bool m_read ; <nl> } ; <nl> <nl> + RssArticle hashToRssArticle ( RssFeed * parent , const QVariantHash & hash ) ; <nl> + <nl> # endif / / RSSARTICLE_H <nl>
Fix compilation error
qbittorrent/qBittorrent
adf615d0cc0add2e66880a18c81ccefcb7a43395
2011-01-25T18:51:14Z
mmm a / src / mongo / tools / oplog . cpp <nl> ppp b / src / mongo / tools / oplog . cpp <nl> class OplogTool : public Tool { <nl> <nl> log ( ) < < " going to connect " < < endl ; <nl> <nl> - OplogReader r ( false ) ; <nl> + OplogReader r ; <nl> r . setTailingQueryOptions ( QueryOption_SlaveOk | QueryOption_AwaitData ) ; <nl> r . connect ( getParam ( " from " ) ) ; <nl> <nl>
fix buildbot
mongodb/mongo
6b1cb2cd32ca32c23e00618eae3fa3fe73a0e938
2013-08-05T19:06:56Z
mmm a / configure . in <nl> ppp b / configure . in <nl> else <nl> fi <nl> <nl> # check for library basenames <nl> - if test " $ host_vendor " ! = " apple " ; then <nl> - # libcurl <nl> - LIBCURL_BASENAME = $ ( gcc $ CFLAGS - print - file - name = libcurl . so | \ <nl> - while read output ; do objdump - p $ output | \ <nl> - grep SONAME | \ <nl> - awk ' BEGIN { FS = " " } ; { print $ 2 } ' ; done ) <nl> - if [ - z " $ LIBCURL_BASENAME " ] ; then <nl> - echo " Unable to determine basename of libcurl library " <nl> - fi <nl> - <nl> - # libFLAC <nl> - LIBFLAC_BASENAME = $ ( gcc $ CFLAGS - print - file - name = libFLAC . so | \ <nl> - while read output ; do objdump - p $ output | \ <nl> - grep SONAME | \ <nl> - awk ' BEGIN { FS = " " } ; { print $ 2 } ' ; done ) <nl> - if [ [ - z " $ LIBFLAC_BASENAME " ] ] ; then <nl> - AC_MSG_ERROR ( [ Error occurred determining basename of libFLAC library ] ) <nl> - fi <nl> - <nl> - # libvorbisfile <nl> - LIBVORBISFILE_BASENAME = $ ( gcc $ CFLAGS - print - file - name = libvorbisfile . so | \ <nl> - while read output ; do objdump - p $ output | \ <nl> - grep SONAME | \ <nl> - awk ' BEGIN { FS = " " } ; { print $ 2 } ' ; done ) <nl> - if [ [ - z " $ LIBVORBISFILE_BASENAME " ] ] ; then <nl> - AC_MSG_ERROR ( [ Error occurred determining basename of libvorbisfile library ] ) <nl> - fi <nl> - <nl> - # libmodplug <nl> - LIBMODPLUG_BASENAME = $ ( gcc $ CFLAGS - print - file - name = libmodplug . so | \ <nl> - while read output ; do objdump - p $ output | \ <nl> - grep SONAME | \ <nl> - awk ' BEGIN { FS = " " } ; { print $ 2 } ' ; done ) <nl> - if [ [ - z " $ LIBMODPLUG_BASENAME " ] ] ; then <nl> - AC_MSG_ERROR ( [ Error occurred determining basename of libmodplug library ] ) <nl> - fi <nl> - <nl> - # libfaad <nl> - LIBFAAD_BASENAME = $ ( gcc $ CFLAGS - print - file - name = libfaad . so | \ <nl> - while read output ; do objdump - p $ output | \ <nl> - grep SONAME | \ <nl> - awk ' BEGIN { FS = " " } ; { print $ 2 } ' ; done ) <nl> - if [ [ - z " $ LIBFAAD_BASENAME " ] ] ; then <nl> - AC_MSG_ERROR ( [ Error occurred determining basename of libfaad library ] ) <nl> - fi <nl> - <nl> - # libmad <nl> - LIBMAD_BASENAME = $ ( gcc $ CFLAGS - print - file - name = libmad . so | \ <nl> - while read output ; do objdump - p $ output | \ <nl> - grep SONAME | \ <nl> - awk ' BEGIN { FS = " " } ; { print $ 2 } ' ; done ) <nl> - if [ [ - z " $ LIBMAD_BASENAME " ] ] ; then <nl> - AC_MSG_ERROR ( [ Error occurred determining basename of libmad library ] ) <nl> - fi <nl> - <nl> - # libogg <nl> - LIBOGG_BASENAME = $ ( gcc $ CFLAGS - print - file - name = libogg . so | \ <nl> - while read output ; do objdump - p $ output | \ <nl> - grep SONAME | \ <nl> - awk ' BEGIN { FS = " " } ; { print $ 2 } ' ; done ) <nl> - if [ [ - z " $ LIBOGG_BASENAME " ] ] ; then <nl> - AC_MSG_ERROR ( [ Error occurred determining basename of libogg library ] ) <nl> - fi <nl> - <nl> - # libvorbisenc <nl> - LIBVORBISENC_BASENAME = $ ( gcc $ CFLAGS - print - file - name = libvorbisenc . so | \ <nl> - while read output ; do objdump - p $ output | \ <nl> - grep SONAME | \ <nl> - awk ' BEGIN { FS = " " } ; { print $ 2 } ' ; done ) <nl> - if [ [ - z " $ LIBVORBISENC_BASENAME " ] ] ; then <nl> - AC_MSG_ERROR ( [ Error occurred determining basename of libvorbisenc library ] ) <nl> + AC_DEFUN ( [ XB_FIND_SONAME ] , <nl> + [ <nl> + AC_MSG_CHECKING ( [ for lib $ 2 soname ] ) <nl> + $ 1_SONAME = $ ( gcc - print - file - name = lib $ 2 . so | \ <nl> + while read output ; do objdump - p $ output | \ <nl> + grep " SONAME " | \ <nl> + sed - e ' s / \ + SONAME \ + / / ' ; done 2 > / dev / null ) <nl> + if [ [ - z " $ $ 1_SONAME " ] ] ; then <nl> + AC_MSG_RESULT ( [ no ] ) <nl> + AC_MSG_ERROR ( [ Unable to determine soname of lib $ 2 library ] ) <nl> + else <nl> + AC_MSG_RESULT ( [ $ $ 1_SONAME ] ) <nl> + AC_SUBST ( $ 1_SONAME ) <nl> fi <nl> + ] ) <nl> <nl> - # libvorbis <nl> - LIBVORBIS_BASENAME = $ ( gcc $ CFLAGS - print - file - name = libvorbis . so | \ <nl> - while read output ; do objdump - p $ output | \ <nl> - grep SONAME | \ <nl> - awk ' BEGIN { FS = " " } ; { print $ 2 } ' ; done ) <nl> - if [ [ - z " $ LIBVORBIS_BASENAME " ] ] ; then <nl> - AC_MSG_ERROR ( [ Error occurred determining basename of libvorbis library ] ) <nl> - fi <nl> + if test " $ host_vendor " ! = " apple " ; then <nl> + XB_FIND_SONAME ( [ CURL ] , [ curl ] ) <nl> + XB_FIND_SONAME ( [ FLAC ] , [ FLAC ] ) <nl> + XB_FIND_SONAME ( [ VORBISFILE ] , [ vorbisfile ] ) <nl> + XB_FIND_SONAME ( [ MODPLUG ] , [ modplug ] ) <nl> + XB_FIND_SONAME ( [ FAAD ] , [ faad ] ) <nl> + XB_FIND_SONAME ( [ MAD ] , [ mad ] ) <nl> + XB_FIND_SONAME ( [ OGG ] , [ ogg ] ) <nl> + XB_FIND_SONAME ( [ VORBISENC ] , [ vorbisenc ] ) <nl> + XB_FIND_SONAME ( [ VORBIS ] , [ vorbis ] ) <nl> fi <nl> <nl> # PulseAudio <nl> AC_SUBST ( LIBVORBIS_BASENAME ) <nl> # running the script , anything else if not . <nl> AC_DEFUN ( [ XB_CONFIG_MODULE ] , [ <nl> AC_CONFIG_COMMANDS_POST ( [ <nl> + if false ; then <nl> if [ [ $ 3 ! = " 1 " ] ] ; then <nl> if [ [ - d $ 1 ] ] ; then <nl> pushd $ 1 <nl> if [ [ $ 3 ! = " 1 " ] ] ; then <nl> else <nl> AC_MSG_NOTICE ( [ [ Skipping configuration of submodule $ 1 . ] ] ) <nl> fi <nl> + else <nl> + true <nl> + fi <nl> ] ) <nl> ] ) <nl> <nl> mmm a / xbmc / DllPaths_generated . h . in <nl> ppp b / xbmc / DllPaths_generated . h . in <nl> <nl> # ifdef __APPLE__ <nl> # define DLL_PATH_LIBCURL " special : / / xbmc / system / libcurl - @ ARCH @ . so " <nl> # else <nl> - # define DLL_PATH_LIBCURL " @ LIBCURL_BASENAME @ " <nl> + # define DLL_PATH_LIBCURL " @ CURL_SONAME @ " <nl> # endif <nl> # endif <nl> <nl> <nl> # define DLL_PATH_YM_CODEC " special : / / xbmc / system / players / paplayer / stsoundlibrary - @ ARCH @ . so " <nl> # define DLL_PATH_SHN_CODEC " special : / / xbmc / system / players / paplayer / libshnplay - @ ARCH @ . so " <nl> # if defined ( _LINUX ) & & ! defined ( __APPLE__ ) <nl> - # define DLL_PATH_FLAC_CODEC " @ LIBFLAC_BASENAME @ " <nl> - # define DLL_PATH_OGG_CODEC " @ LIBVORBISFILE_BASENAME @ " <nl> - # define DLL_PATH_MODPLUG_CODEC " @ LIBMODPLUG_BASENAME @ " <nl> + # define DLL_PATH_FLAC_CODEC " @ FLAC_SONAME @ " <nl> + # define DLL_PATH_OGG_CODEC " @ VORBISFILE_SONAME @ " <nl> + # define DLL_PATH_MODPLUG_CODEC " @ MODPLUG_SONAME @ " <nl> # else <nl> # define DLL_PATH_FLAC_CODEC " special : / / xbmc / system / players / paplayer / libFLAC - @ ARCH @ . so " <nl> # define DLL_PATH_OGG_CODEC " special : / / xbmc / system / players / paplayer / vorbisfile - @ ARCH @ . so " <nl> <nl> # define DLL_PATH_LIBMPEG2 " special : / / xbmc / system / players / dvdplayer / libmpeg2 - @ ARCH @ . so " <nl> # define DLL_PATH_LIBDVDNAV " special : / / xbmc / system / players / dvdplayer / libdvdnav - @ ARCH @ . so " <nl> # if defined ( _LINUX ) & & ! defined ( __APPLE__ ) <nl> - # define DLL_PATH_LIBFAAD " @ LIBFAAD_BASENAME @ " <nl> - # define DLL_PATH_LIBMAD " @ LIBMAD_BASENAME @ " <nl> + # define DLL_PATH_LIBFAAD " @ FAAD_SONAME @ " <nl> + # define DLL_PATH_LIBMAD " @ MAD_SONAME @ " <nl> # else <nl> # define DLL_PATH_LIBFAAD " special : / / xbmc / system / players / dvdplayer / libfaad - @ ARCH @ . so " <nl> # define DLL_PATH_LIBMAD " special : / / xbmc / system / players / dvdplayer / libmad - @ ARCH @ . so " <nl> <nl> / * cdrip * / <nl> # if defined ( _LINUX ) & & ! defined ( __APPLE__ ) <nl> # define DLL_PATH_LAME_ENC " libmp3lame . so . 0 " <nl> - # define DLL_PATH_OGG " @ LIBOGG_BASENAME @ " <nl> - # define DLL_PATH_VORBIS_ENC " @ LIBVORBISENC_BASENAME @ " <nl> - # define DLL_PATH_VORBIS " @ LIBVORBIS_BASENAME @ " <nl> + # define DLL_PATH_OGG " @ OGG_SONAME @ " <nl> + # define DLL_PATH_VORBIS_ENC " @ VORBISENC_SONAME @ " <nl> + # define DLL_PATH_VORBIS " @ VORBIS_SONAME @ " <nl> # else <nl> # define DLL_PATH_LAME_ENC " special : / / xbmc / system / cdrip / lame_enc - @ ARCH @ . so " <nl> # define DLL_PATH_OGG " special : / / xbmc / system / cdrip / ogg - @ ARCH @ . so " <nl>
Added : macro to resolve sonames in configure ( reduce duplication )
xbmc/xbmc
fd0db9dff5aaa93db848bb3a552425536fcb0fb1
2010-05-02T22:09:44Z
mmm a / code / sorting / shaker_sort / ShakerSort . java <nl> ppp b / code / sorting / shaker_sort / ShakerSort . java <nl> <nl> public class ShakerSort { <nl> <nl> static void sort ( int [ ] arr ) { <nl> - boolean swapped = true ; <nl> + boolean swapped ; <nl> int llim = 0 ; <nl> int rlim = arr . length - 1 ; <nl> int curr = llim ; <nl> int tmp ; <nl> <nl> while ( llim < = rlim ) { <nl> + swapped = false ; <nl> while ( curr + 1 < = rlim ) { <nl> if ( arr [ curr ] > arr [ curr + 1 ] ) { <nl> tmp = arr [ curr ] ; <nl> arr [ curr ] = arr [ curr + 1 ] ; <nl> arr [ curr + 1 ] = tmp ; <nl> + swapped = true ; <nl> } <nl> curr + = 1 ; <nl> } <nl> + if ( ! swapped ) { <nl> + return ; <nl> + } <nl> rlim - = 1 ; <nl> curr = rlim ; <nl> + swapped = false ; <nl> while ( curr - 1 > = llim ) { <nl> if ( arr [ curr ] < arr [ curr - 1 ] ) { <nl> tmp = arr [ curr ] ; <nl> arr [ curr ] = arr [ curr - 1 ] ; <nl> arr [ curr - 1 ] = tmp ; <nl> + swapped = true ; <nl> } <nl> curr - = 1 ; <nl> } <nl> + if ( ! swapped ) { <nl> + return ; <nl> + } <nl> llim + = 1 ; <nl> curr = llim ; <nl> } <nl> public static void main ( String [ ] args ) { <nl> } <nl> } <nl> <nl> + <nl>
check swapped
OpenGenus/cosmos
889dc8e1eb2ff7c5b1db4bb31bbb3f3a8172d468
2017-10-09T20:22:11Z
mmm a / tensorflow / python / compat / compat . py <nl> ppp b / tensorflow / python / compat / compat . py <nl> <nl> # This value changes every day with an automatic CL . It can be modified in code <nl> # via ` forward_compatibility_horizon ( ) ` or with the environment variable <nl> # TF_FORWARD_COMPATIBILITY_DELTA_DAYS , which is added to the compatibility date . <nl> - _FORWARD_COMPATIBILITY_HORIZON = datetime . date ( 2020 , 2 , 20 ) <nl> + _FORWARD_COMPATIBILITY_HORIZON = datetime . date ( 2020 , 2 , 21 ) <nl> _FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = " TF_FORWARD_COMPATIBILITY_DELTA_DAYS " <nl> _FORWARD_COMPATIBILITY_DATE_NUMBER = None <nl> <nl>
compat : Update forward compatibility horizon to 2020 - 02 - 21
tensorflow/tensorflow
aeddd93b419d88d7134cab5f59fdca58b6b31cc3
2020-02-21T09:05:26Z
mmm a / hphp / hhvm / process - init . cpp <nl> ppp b / hphp / hhvm / process - init . cpp <nl> void ProcessInit ( ) { <nl> ClassInfo : : SetHook ( & vm_class_info_hook ) ; <nl> <nl> / / Create the global tx64 object <nl> - g_translator = tx64 = new TranslatorX64 ( ) ; <nl> - tx64 - > initUniqueStubs ( ) ; <nl> + JIT : : g_translator = JIT : : tx64 = new JIT : : TranslatorX64 ( ) ; <nl> + JIT : : tx64 - > initUniqueStubs ( ) ; <nl> <nl> / / Save the current options , and set things up so that <nl> / / systemlib . php can be read from and stored in the <nl> mmm a / hphp / runtime / base / hardware - counter . cpp <nl> ppp b / hphp / runtime / base / hardware - counter . cpp <nl> bool HardwareCounter : : setPerfEvents ( const String & events ) { <nl> while ( s ) { <nl> int len = strlen ( s ) ; <nl> char * event = url_decode ( s , len ) ; <nl> - bool isPseudoEvent = TranslatorX64 : : isPseudoEvent ( event ) ; <nl> + bool isPseudoEvent = JIT : : TranslatorX64 : : isPseudoEvent ( event ) ; <nl> m_pseudoEvents = m_pseudoEvents | | isPseudoEvent ; <nl> if ( ! isPseudoEvent & & ! eventExists ( event ) & & ! addPerfEvent ( event ) ) { <nl> return false ; <nl> void HardwareCounter : : getPerfEvents ( Array & ret ) { <nl> ret . set ( m_counters [ i ] - > m_desc , m_counters [ i ] - > read ( ) ) ; <nl> } <nl> if ( m_pseudoEvents ) { <nl> - tx64 - > getPerfCounters ( ret ) ; <nl> + JIT : : tx64 - > getPerfCounters ( ret ) ; <nl> } <nl> } <nl> <nl> mmm a / hphp / runtime / vm / bytecode . cpp <nl> ppp b / hphp / runtime / vm / bytecode . cpp <nl> using std : : string ; <nl> using JIT : : VMRegAnchor ; <nl> using JIT : : EagerVMRegAnchor ; <nl> using JIT : : tx64 ; <nl> + using JIT : : tl_regState ; <nl> + using JIT : : VMRegState ; <nl> <nl> # if DEBUG <nl> # define OPTBLD_INLINE <nl> OPTBLD_INLINE void VMExecutionContext : : iopIdx ( IOP_ARGS ) { <nl> TypedValue * key = m_stack . indTV ( 1 ) ; <nl> TypedValue * arr = m_stack . indTV ( 2 ) ; <nl> <nl> - TypedValue result = genericIdx ( * arr , * key , * def ) ; <nl> + TypedValue result = JIT : : genericIdx ( * arr , * key , * def ) ; <nl> m_stack . popTV ( ) ; <nl> m_stack . popTV ( ) ; <nl> tvRefcountedDecRef ( arr ) ; <nl> mmm a / hphp / runtime / vm / debug / dwarf . h <nl> ppp b / hphp / runtime / vm / debug / dwarf . h <nl> <nl> # include < dwarf . h > <nl> # include < vector > <nl> <nl> - using namespace HPHP : : JIT ; <nl> - <nl> namespace HPHP { <nl> namespace Debug { <nl> <nl> + using JIT : : TCA ; <nl> + <nl> typedef enum { <nl> RAX , <nl> RDX , <nl> typedef std : : map < TCA , FunctionInfo * > FuncDB ; <nl> typedef vector < FunctionInfo * > FuncPtrDB ; <nl> <nl> struct DwarfInfo { <nl> - typedef std : : map < TCA , TransRec > TransDB ; <nl> + typedef std : : map < TCA , JIT : : TransRec > TransDB ; <nl> <nl> vector < DwarfChunk * > m_dwarfChunks ; <nl> / * Array of chunks indexed by lg ( # functions in chunk ) + 1 . <nl> mmm a / hphp / runtime / vm / debug / elfwriter . h <nl> ppp b / hphp / runtime / vm / debug / elfwriter . h <nl> <nl> # include < vector > <nl> # include < string > <nl> <nl> - using namespace HPHP : : JIT ; <nl> - <nl> namespace HPHP { <nl> namespace Debug { <nl> <nl> mmm a / hphp / runtime / vm / debugger - hook . cpp <nl> ppp b / hphp / runtime / vm / debugger - hook . cpp <nl> namespace HPHP { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> TRACE_SET_MOD ( debuggerflow ) ; <nl> + using JIT : : tx64 ; <nl> <nl> / / Hook called from the bytecode interpreter before every opcode executed while <nl> / / a debugger is attached . The debugger may choose to hold the thread below <nl> mmm a / hphp / runtime / vm / func . cpp <nl> ppp b / hphp / runtime / vm / func . cpp <nl> <nl> namespace HPHP { <nl> <nl> TRACE_SET_MOD ( hhbc ) ; <nl> + using JIT : : tx64 ; <nl> + <nl> const StringData * Func : : s___call = makeStaticString ( " __call " ) ; <nl> const StringData * Func : : s___callStatic = <nl> makeStaticString ( " __callStatic " ) ; <nl> mmm a / hphp / runtime / vm / jit / translator . cpp <nl> ppp b / hphp / runtime / vm / jit / translator . cpp <nl> <nl> # define KindOfUnknown DontUseKindOfUnknownInThisFile <nl> # define KindOfInvalid DontUseKindOfInvalidInThisFile <nl> <nl> + namespace { <nl> + TRACE_SET_MOD ( trans ) ; <nl> + } <nl> + <nl> namespace HPHP { <nl> namespace JIT { <nl> <nl> using namespace HPHP ; <nl> using HPHP : : JIT : : Type ; <nl> using HPHP : : JIT : : HhbcTranslator ; <nl> <nl> - TRACE_SET_MOD ( trans ) <nl> - <nl> static __thread BiasedCoin * dbgTranslateCoin ; <nl> Translator * g_translator ; <nl> Lease Translator : : s_writeLease ; <nl> std : : string traceletShape ( const Tracelet & trace ) { <nl> <nl> void invalidatePath ( const std : : string & path ) { <nl> TRACE ( 1 , " invalidatePath : abspath % s \ n " , path . c_str ( ) ) ; <nl> - PendQ : : defer ( new DeferredPathInvalidate ( path ) ) ; <nl> + PendQ : : defer ( new JIT : : DeferredPathInvalidate ( path ) ) ; <nl> } <nl> <nl> } / / HPHP <nl> mmm a / hphp / runtime / vm / unit . cpp <nl> ppp b / hphp / runtime / vm / unit . cpp <nl> Class * Unit : : loadClass ( const NamedEntity * ne , <nl> if ( LIKELY ( ( cls = ne - > getCachedClass ( ) ) ! = nullptr ) ) { <nl> return cls ; <nl> } <nl> - VMRegAnchor _ ; <nl> + JIT : : VMRegAnchor _ ; <nl> AutoloadHandler : : s_instance - > invokeHandler ( <nl> StrNR ( const_cast < StringData * > ( name ) ) ) ; <nl> return Unit : : lookupClass ( ne ) ; <nl>
Remove a couple ' using namespace ' declarations from headers
facebook/hhvm
55d0bc9c5003e19d4b2821c9cc145fc038cb9f41
2013-12-17T17:37:07Z
mmm a / hphp / hack / src / hackfmt / hack_format . ml <nl> ppp b / hphp / hack / src / hackfmt / hack_format . ml <nl> <nl> * <nl> * ) <nl> <nl> - module SyntaxKind = Full_fidelity_syntax_kind <nl> + module Env = Format_env <nl> + module SourceText = Full_fidelity_source_text <nl> module Syntax = Full_fidelity_editable_syntax <nl> - module TriviaKind = Full_fidelity_trivia_kind <nl> + module SyntaxKind = Full_fidelity_syntax_kind <nl> + module Token = Full_fidelity_editable_token <nl> + module TokenKind = Full_fidelity_token_kind <nl> module Trivia = Full_fidelity_editable_trivia <nl> + module TriviaKind = Full_fidelity_trivia_kind <nl> module Rewriter = Full_fidelity_rewriter . WithSyntax ( Syntax ) <nl> - module Env = Format_env <nl> - module SourceText = Full_fidelity_source_text <nl> <nl> open Hh_core <nl> - open Syntax <nl> open Doc <nl> <nl> let make_list = Syntax . make_list SourceText . empty 0 <nl> let make_missing ( ) = Syntax . make_missing SourceText . empty 0 <nl> * <nl> * Exported via the ` transform ` alias below . * ) <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> - match syntax node with <nl> - | Missing - > <nl> + match Syntax . syntax node with <nl> + | Syntax . Missing - > <nl> Nothing <nl> - | Token x - > <nl> - let open EditableToken in <nl> + | Syntax . Token x - > <nl> + let open Token in <nl> let token_kind = kind x in <nl> Concat [ <nl> begin <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> end ; <nl> transform_trailing_trivia ( trailing x ) ; <nl> ] <nl> - | SyntaxList _ - > <nl> + | Syntax . SyntaxList _ - > <nl> failwith ( Printf . sprintf <nl> " Error : SyntaxList should never be handled directly ; <nl> offending text is ' % s ' . " ( Syntax . text node ) ) ; <nl> - | EndOfFile x - > <nl> + | Syntax . EndOfFile x - > <nl> t env x . end_of_file_token <nl> - | Script x - > <nl> - begin match x . script_declarations . syntax with <nl> - | SyntaxList ( header : : declarations ) when is_markup_section header - > <nl> + | Syntax . Script x - > <nl> + begin match Syntax . syntax x . script_declarations with <nl> + | Syntax . SyntaxList ( header : : declarations ) <nl> + when Syntax . is_markup_section header - > <nl> Concat [ <nl> t env header ; <nl> Newline ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> handle_possible_list env x . script_declarations ; <nl> ] <nl> end <nl> - | LiteralExpression { literal_expression } - > <nl> + | Syntax . LiteralExpression { literal_expression } - > <nl> ( * Double quoted string literals can create a list * ) <nl> - let open EditableToken in <nl> + let open Token in <nl> let wrap_with_literal_type token transformed = <nl> let open TokenKind in <nl> match kind token with <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> | FloatingLiteral - > NumericLiteral transformed <nl> | _ - > transformed <nl> in <nl> - begin match syntax literal_expression with <nl> - | Token tok - > wrap_with_literal_type tok ( t env literal_expression ) <nl> - | SyntaxList l - > <nl> - let last = trailing_token literal_expression in <nl> + begin match Syntax . syntax literal_expression with <nl> + | Syntax . Token tok - > <nl> + wrap_with_literal_type tok ( t env literal_expression ) <nl> + | Syntax . SyntaxList l - > <nl> + let last = Syntax . trailing_token literal_expression in <nl> begin match last with <nl> | Some tok - > wrap_with_literal_type tok ( Concat ( List . map l ( t env ) ) ) <nl> | _ - > failwith " Expected Token " <nl> end <nl> | _ - > failwith " Expected Token or SyntaxList " <nl> end <nl> - | MarkupSection { <nl> + | Syntax . MarkupSection { <nl> markup_prefix = prefix ; <nl> markup_text = text ; <nl> markup_suffix = suffix ; <nl> _ } - > <nl> - if is_missing prefix <nl> + if Syntax . is_missing prefix <nl> then <nl> ( * leading markup section <nl> for hh files - strip leading whitespaces \ newlines - they are not <nl> emitted and having them in Hack file is illegal anyways * ) <nl> - let is_hh_script = match suffix . syntax with <nl> - | MarkupSuffix { markup_suffix_name = { <nl> + let is_hh_script = match Syntax . syntax suffix with <nl> + | Syntax . MarkupSuffix { markup_suffix_name = Syntax . { <nl> syntax = Token t ; _ <nl> } ; _ } - > <nl> - ( EditableToken . text t ) = " hh " <nl> + ( Token . text t ) = " hh " <nl> | _ - > false <nl> in <nl> let rec all_whitespaces s i = <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> | ' ' | ' \ t ' | ' \ r ' | ' \ n ' - > all_whitespaces s ( i + 1 ) <nl> | _ - > false ) <nl> in <nl> - let text_contains_only_whitespaces = match text . syntax with <nl> - | Token t - > all_whitespaces ( EditableToken . text t ) 0 <nl> + let text_contains_only_whitespaces = match Syntax . syntax text with <nl> + | Syntax . Token t - > all_whitespaces ( Token . text t ) 0 <nl> | _ - > false <nl> in <nl> if is_hh_script & & text_contains_only_whitespaces <nl> then t env suffix <nl> else transform_simple env node <nl> else transform_simple env node <nl> - | MarkupSuffix _ <nl> - | SimpleTypeSpecifier _ <nl> - | VariableExpression _ <nl> - | PipeVariableExpression _ <nl> - | PropertyDeclarator _ <nl> - | ConstantDeclarator _ <nl> - | StaticDeclarator _ <nl> - | ScopeResolutionExpression _ <nl> - | EmbeddedMemberSelectionExpression _ <nl> - | EmbeddedSubscriptExpression _ <nl> - | PostfixUnaryExpression _ <nl> - | XHPRequired _ <nl> - | XHPSimpleClassAttribute _ <nl> - | XHPClose _ <nl> - | TypeConstant _ <nl> - | GenericTypeSpecifier _ <nl> - | NullableTypeSpecifier _ <nl> - | SoftTypeSpecifier _ <nl> - | ListItem _ - > <nl> + | Syntax . MarkupSuffix _ <nl> + | Syntax . SimpleTypeSpecifier _ <nl> + | Syntax . VariableExpression _ <nl> + | Syntax . PipeVariableExpression _ <nl> + | Syntax . PropertyDeclarator _ <nl> + | Syntax . ConstantDeclarator _ <nl> + | Syntax . StaticDeclarator _ <nl> + | Syntax . ScopeResolutionExpression _ <nl> + | Syntax . EmbeddedMemberSelectionExpression _ <nl> + | Syntax . EmbeddedSubscriptExpression _ <nl> + | Syntax . PostfixUnaryExpression _ <nl> + | Syntax . XHPRequired _ <nl> + | Syntax . XHPSimpleClassAttribute _ <nl> + | Syntax . XHPClose _ <nl> + | Syntax . TypeConstant _ <nl> + | Syntax . GenericTypeSpecifier _ <nl> + | Syntax . NullableTypeSpecifier _ <nl> + | Syntax . SoftTypeSpecifier _ <nl> + | Syntax . ListItem _ - > <nl> transform_simple env node <nl> - | QualifiedName { qualified_name_parts ; } - > <nl> + | Syntax . QualifiedName { qualified_name_parts ; } - > <nl> handle_possible_list env qualified_name_parts <nl> - | ExpressionStatement _ - > <nl> + | Syntax . ExpressionStatement _ - > <nl> transform_simple_statement env node <nl> - | EnumDeclaration { <nl> + | Syntax . EnumDeclaration { <nl> enum_attribute_spec = attr ; <nl> enum_keyword = kw ; <nl> enum_name = name ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> ] ; <nl> Newline ; <nl> ] <nl> - | Enumerator { <nl> + | Syntax . Enumerator { <nl> enumerator_name = name ; <nl> enumerator_equal = eq_kw ; <nl> enumerator_value = value ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env semi ; <nl> Newline ; <nl> ] <nl> - | AliasDeclaration { <nl> + | Syntax . AliasDeclaration { <nl> alias_attribute_spec = attr ; <nl> alias_keyword = kw ; <nl> alias_name = name ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env semi ; <nl> Newline ; <nl> ] <nl> - | PropertyDeclaration { <nl> + | Syntax . PropertyDeclaration { <nl> property_modifiers = modifiers ; <nl> property_type = prop_type ; <nl> property_declarators = declarators ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env semi ; <nl> Newline ; <nl> ] <nl> - | NamespaceDeclaration { <nl> + | Syntax . NamespaceDeclaration { <nl> namespace_keyword = kw ; <nl> namespace_name = name ; <nl> namespace_body = body } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env body ; <nl> Newline ; <nl> ] <nl> - | NamespaceBody { <nl> + | Syntax . NamespaceBody { <nl> namespace_left_brace = left_b ; <nl> namespace_declarations = decls ; <nl> namespace_right_brace = right_b } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> Space ; <nl> braced_block_nest env left_b right_b [ handle_possible_list env decls ] ; <nl> ] <nl> - | NamespaceEmptyBody { <nl> + | Syntax . NamespaceEmptyBody { <nl> namespace_semicolon = semi } - > <nl> Concat [ <nl> t env semi ; <nl> ] <nl> - | NamespaceUseDeclaration { <nl> + | Syntax . NamespaceUseDeclaration { <nl> namespace_use_keyword = kw ; <nl> namespace_use_kind = use_kind ; <nl> namespace_use_clauses = clauses ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env semi ; <nl> Newline ; <nl> ] <nl> - | NamespaceGroupUseDeclaration { <nl> + | Syntax . NamespaceGroupUseDeclaration { <nl> namespace_group_use_keyword = kw ; <nl> namespace_group_use_kind = use_kind ; <nl> namespace_group_use_prefix = prefix ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env semi ; <nl> Newline ; <nl> ] <nl> - | NamespaceUseClause { <nl> + | Syntax . NamespaceUseClause { <nl> namespace_use_clause_kind = use_kind ; <nl> namespace_use_name = name ; <nl> namespace_use_as = as_kw ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> when_present alias space ; <nl> t env alias ; <nl> ] <nl> - | FunctionDeclaration { <nl> + | Syntax . FunctionDeclaration { <nl> function_attribute_spec = attr ; <nl> function_declaration_header = header ; <nl> function_body = body } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> handle_possible_compound_statement env ~ allow_collapse : true body ; <nl> Newline ; <nl> ] <nl> - | FunctionDeclarationHeader { <nl> + | Syntax . FunctionDeclarationHeader { <nl> function_modifiers = modifiers ; <nl> function_keyword = kw ; <nl> function_ampersand = amp ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> transform_fn_decl_name env modifiers kw amp name type_params leftp ) ; <nl> transform_fn_decl_args env params rightp colon ret_type where ; <nl> ] <nl> - | WhereClause { <nl> + | Syntax . WhereClause { <nl> where_clause_keyword = where ; <nl> where_clause_constraints = constraints } - > <nl> Concat [ <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> Space ; <nl> handle_possible_list env constraints ~ after_each : ( fun _ - > Space ) ; <nl> ] <nl> - | WhereConstraint { <nl> + | Syntax . WhereConstraint { <nl> where_constraint_left_type = left ; <nl> where_constraint_operator = op ; <nl> where_constraint_right_type = right } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> Space ; <nl> t env right ; <nl> ] <nl> - | MethodishDeclaration { <nl> + | Syntax . MethodishDeclaration { <nl> methodish_attribute = attr ; <nl> methodish_function_decl_header = func_decl ; <nl> methodish_function_body = body ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env attr ; <nl> when_present attr newline ; <nl> ( <nl> - let fn_name , args_and_where = match syntax func_decl with <nl> - | FunctionDeclarationHeader { <nl> + let fn_name , args_and_where = match Syntax . syntax func_decl with <nl> + | Syntax . FunctionDeclarationHeader { <nl> function_modifiers = modifiers ; <nl> function_keyword = kw ; <nl> function_ampersand = amp ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env semi ; <nl> Newline ; <nl> ] <nl> - | ClassishDeclaration { <nl> + | Syntax . ClassishDeclaration { <nl> classish_attribute = attr ; <nl> classish_modifiers = modifiers ; <nl> classish_keyword = kw ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> ] ) ; <nl> t env body ; <nl> ] <nl> - | ClassishBody { <nl> + | Syntax . ClassishBody { <nl> classish_body_left_brace = left_b ; <nl> classish_body_elements = body ; <nl> classish_body_right_brace = right_b } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> ] ; <nl> Newline ; <nl> ] <nl> - | TraitUsePrecedenceItem { <nl> + | Syntax . TraitUsePrecedenceItem { <nl> trait_use_precedence_item_name = name ; <nl> trait_use_precedence_item_keyword = kw ; <nl> trait_use_precedence_item_removed_names = removed_names } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env removed_names ; <nl> Newline ; <nl> ] <nl> - | TraitUseAliasItem { <nl> + | Syntax . TraitUseAliasItem { <nl> trait_use_alias_item_aliasing_name = aliasing_name ; <nl> trait_use_alias_item_keyword = kw ; <nl> trait_use_alias_item_modifiers = visibility ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env aliased_name ; <nl> Newline ; <nl> ] <nl> - | TraitUseConflictResolution { <nl> + | Syntax . TraitUseConflictResolution { <nl> trait_use_conflict_resolution_keyword = kw ; <nl> trait_use_conflict_resolution_names = elements ; <nl> trait_use_conflict_resolution_left_brace = lb ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> Newline ; <nl> t env rb ; <nl> ] <nl> - | TraitUse { <nl> + | Syntax . TraitUse { <nl> trait_use_keyword = kw ; <nl> trait_use_names = elements ; <nl> trait_use_semicolon = semi } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env semi ; <nl> Newline ; <nl> ] <nl> - | RequireClause { <nl> + | Syntax . RequireClause { <nl> require_keyword = kw ; <nl> require_kind = kind ; <nl> require_name = name ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env semi ; <nl> Newline ; <nl> ] <nl> - | ConstDeclaration { <nl> + | Syntax . ConstDeclaration { <nl> const_abstract = abstr ; <nl> const_keyword = kw ; <nl> const_type_specifier = const_type ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env semi ; <nl> Newline ; <nl> ] <nl> - | TypeConstDeclaration { <nl> + | Syntax . TypeConstDeclaration { <nl> type_const_abstract = abs ; <nl> type_const_keyword = kw ; <nl> type_const_type_keyword = type_kw ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env semi ; <nl> Newline ; <nl> ] <nl> - | ParameterDeclaration { <nl> + | Syntax . ParameterDeclaration { <nl> parameter_attribute = attr ; <nl> parameter_visibility = visibility ; <nl> parameter_call_convention = callconv ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env callconv ; <nl> when_present callconv space ; <nl> t env param_type ; <nl> - if is_missing visibility & & is_missing callconv & & is_missing param_type <nl> + if Syntax . is_missing visibility <nl> + & & Syntax . is_missing callconv <nl> + & & Syntax . is_missing param_type <nl> then t env name <nl> else Concat [ <nl> Space ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> ] ; <nl> t env default ; <nl> ] <nl> - | VariadicParameter { <nl> + | Syntax . VariadicParameter { <nl> variadic_parameter_call_convention = callconv ; <nl> variadic_parameter_type = type_var ; <nl> variadic_parameter_ellipsis = ellipsis } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env type_var ; <nl> t env ellipsis ; <nl> ] <nl> - | AttributeSpecification { <nl> + | Syntax . AttributeSpecification { <nl> attribute_specification_left_double_angle = left_da ; <nl> attribute_specification_attributes = attrs ; <nl> attribute_specification_right_double_angle = right_da ; } - > <nl> transform_argish env ~ allow_trailing : false left_da attrs right_da <nl> - | Attribute { <nl> + | Syntax . Attribute { <nl> attribute_name = name ; <nl> attribute_left_paren = left_p ; <nl> attribute_values = values ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env name ; <nl> transform_argish env left_p values right_p ; <nl> ] <nl> - | InclusionExpression { <nl> + | Syntax . InclusionExpression { <nl> inclusion_require = kw ; <nl> inclusion_filename = expr ; } - > <nl> Concat [ <nl> t env kw ; <nl> - ( match syntax expr with <nl> - | ParenthesizedExpression _ - > Nothing <nl> + ( match Syntax . syntax expr with <nl> + | Syntax . ParenthesizedExpression _ - > Nothing <nl> | _ - > Space <nl> ) ; <nl> SplitWith Cost . Base ; <nl> t env expr ; <nl> ] <nl> - | InclusionDirective { <nl> + | Syntax . InclusionDirective { <nl> inclusion_expression = expr ; <nl> inclusion_semicolon = semi ; } - > <nl> Concat [ <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env semi ; <nl> Newline ; <nl> ] <nl> - | CompoundStatement { <nl> + | Syntax . CompoundStatement { <nl> compound_left_brace ; <nl> compound_statements ; <nl> compound_right_brace ; } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> compound_right_brace ; <nl> Newline ; <nl> ] <nl> - | AlternateLoopStatement { <nl> + | Syntax . AlternateLoopStatement { <nl> alternate_loop_opening_colon ; <nl> alternate_loop_statements ; <nl> alternate_loop_closing_keyword ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> alternate_loop_closing_semicolon ; <nl> Newline ; <nl> ] <nl> - | UnsetStatement { <nl> + | Syntax . UnsetStatement { <nl> unset_keyword = kw ; <nl> unset_left_paren = left_p ; <nl> unset_variables = args ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env semi ; <nl> Newline ; <nl> ] <nl> - | WhileStatement x - > <nl> + | Syntax . WhileStatement x - > <nl> Concat [ <nl> t env x . while_keyword ; <nl> Space ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> handle_possible_compound_statement env x . while_body ; <nl> Newline ; <nl> ] <nl> - | DeclareDirectiveStatement x - > <nl> + | Syntax . DeclareDirectiveStatement x - > <nl> Concat [ <nl> t env x . declare_directive_keyword ; <nl> Space ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env x . declare_directive_semicolon ; <nl> Newline ; <nl> ] <nl> - | DeclareBlockStatement x - > <nl> + | Syntax . DeclareBlockStatement x - > <nl> Concat [ <nl> t env x . declare_block_keyword ; <nl> Space ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> handle_possible_compound_statement env x . declare_block_body ; <nl> Newline ; <nl> ] <nl> - | UsingStatementBlockScoped x - > <nl> + | Syntax . UsingStatementBlockScoped x - > <nl> Concat [ <nl> t env x . using_block_await_keyword ; <nl> when_present x . using_block_await_keyword space ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> handle_possible_compound_statement env x . using_block_body ; <nl> Newline ; <nl> ] <nl> - | UsingStatementFunctionScoped x - > <nl> + | Syntax . UsingStatementFunctionScoped x - > <nl> Concat [ <nl> t env x . using_function_await_keyword ; <nl> when_present x . using_function_await_keyword space ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env x . using_function_semicolon ; <nl> Newline ; <nl> ] <nl> - | IfStatement { <nl> + | Syntax . IfStatement { <nl> if_keyword = kw ; <nl> if_left_paren = left_p ; <nl> if_condition = condition ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env else_clause ; <nl> Newline ; <nl> ] <nl> - | ElseifClause { <nl> + | Syntax . ElseifClause { <nl> elseif_keyword = kw ; <nl> elseif_left_paren = left_p ; <nl> elseif_condition = condition ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> transform_condition env left_p condition right_p ; <nl> handle_possible_compound_statement env body ; <nl> ] <nl> - | ElseClause x - > <nl> + | Syntax . ElseClause x - > <nl> Concat [ <nl> t env x . else_keyword ; <nl> - match syntax x . else_statement with <nl> - | IfStatement _ - > Concat [ <nl> + match Syntax . syntax x . else_statement with <nl> + | Syntax . IfStatement _ - > Concat [ <nl> Space ; <nl> t env x . else_statement ; <nl> Space ; <nl> ] <nl> | _ - > handle_possible_compound_statement env x . else_statement <nl> ] <nl> - | IfEndIfStatement { <nl> + | Syntax . IfEndIfStatement { <nl> if_endif_keyword = kw ; <nl> if_endif_left_paren = left_p ; <nl> if_endif_condition = condition ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env semicolon ; <nl> Newline ; <nl> ] <nl> - | ElseifColonClause { <nl> + | Syntax . ElseifColonClause { <nl> elseif_colon_keyword = kw ; <nl> elseif_colon_left_paren = left_p ; <nl> elseif_colon_condition = condition ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env colon ; <nl> handle_possible_compound_statement env body ; <nl> ] <nl> - | ElseColonClause x - > <nl> + | Syntax . ElseColonClause x - > <nl> Concat [ <nl> t env x . else_colon_keyword ; <nl> - match syntax x . else_colon_statement with <nl> - | IfStatement _ - > Concat [ <nl> + match Syntax . syntax x . else_colon_statement with <nl> + | Syntax . IfStatement _ - > Concat [ <nl> Space ; <nl> t env x . else_colon_statement ; <nl> Space ; <nl> ] <nl> | _ - > handle_possible_compound_statement env x . else_colon_statement <nl> ] <nl> - | TryStatement { <nl> + | Syntax . TryStatement { <nl> try_keyword = kw ; <nl> try_compound_statement = body ; <nl> try_catch_clauses = catch_clauses ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env finally_clause ; <nl> Newline ; <nl> ] <nl> - | CatchClause { <nl> + | Syntax . CatchClause { <nl> catch_keyword = kw ; <nl> catch_left_paren = left_p ; <nl> catch_type = ex_type ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> ] ; <nl> handle_possible_compound_statement env body ; <nl> ] <nl> - | FinallyClause { <nl> + | Syntax . FinallyClause { <nl> finally_keyword = kw ; <nl> finally_body = body ; } - > <nl> Concat [ <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> Space ; <nl> handle_possible_compound_statement env body ; <nl> ] <nl> - | DoStatement { <nl> + | Syntax . DoStatement { <nl> do_keyword = do_kw ; <nl> do_body = body ; <nl> do_while_keyword = while_kw ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env semi ; <nl> Newline ; <nl> ] <nl> - | ForStatement { <nl> + | Syntax . ForStatement { <nl> for_keyword = kw ; <nl> for_left_paren = left_p ; <nl> for_initializer = init ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> handle_possible_compound_statement env body ; <nl> Newline ; <nl> ] <nl> - | ForeachStatement { <nl> + | Syntax . ForeachStatement { <nl> foreach_keyword = kw ; <nl> foreach_left_paren = left_p ; <nl> foreach_collection = collection ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> handle_possible_compound_statement env body ; <nl> Newline ; <nl> ] <nl> - | SwitchStatement { <nl> + | Syntax . SwitchStatement { <nl> switch_keyword = kw ; <nl> switch_left_paren = left_p ; <nl> switch_expression = expr ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> switch_left_brace = left_b ; <nl> switch_sections = sections ; <nl> switch_right_brace = right_b ; } - > <nl> - let sections = syntax_node_to_list sections in <nl> + let sections = Syntax . syntax_node_to_list sections in <nl> Concat [ <nl> t env kw ; <nl> Space ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> braced_block_nest env left_b right_b ( List . map sections ( t env ) ) ; <nl> Newline ; <nl> ] <nl> - | SwitchSection { <nl> + | Syntax . SwitchSection { <nl> switch_section_labels = labels ; <nl> switch_section_statements = statements ; <nl> switch_section_fallthrough = fallthrough ; } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> in <nl> let upto_fallthrough = List . rev upto_fallthrough in <nl> let after_fallthrough = List . rev after_fallthrough in <nl> - let labels = syntax_node_to_list labels in <nl> - let statements = syntax_node_to_list statements in <nl> + let labels = Syntax . syntax_node_to_list labels in <nl> + let statements = Syntax . syntax_node_to_list statements in <nl> ( * When the statements in the SwitchSection are wrapped in a single <nl> * CompoundStatement , special - case the opening curly brace to appear on <nl> * the same line as the case label . * ) <nl> let is_scoped_section = <nl> match statements with <nl> - | [ { syntax = CompoundStatement _ ; _ } ] - > true <nl> + | [ Syntax . { syntax = CompoundStatement _ ; _ } ] - > true <nl> | _ - > false <nl> in <nl> Concat [ <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> else BlockNest [ handle_list env statements ] ; <nl> t env fallthrough ; <nl> ] <nl> - | CaseLabel { <nl> + | Syntax . CaseLabel { <nl> case_keyword = kw ; <nl> case_expression = expr ; <nl> case_colon = colon ; } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env expr ; <nl> t env colon ; <nl> ] <nl> - | DefaultLabel { <nl> + | Syntax . DefaultLabel { <nl> default_keyword = kw ; <nl> default_colon = colon ; } - > <nl> Concat [ <nl> t env kw ; <nl> t env colon ; <nl> ] <nl> - | SwitchFallthrough { <nl> + | Syntax . SwitchFallthrough { <nl> fallthrough_keyword = kw ; <nl> fallthrough_semicolon = semi ; } - > <nl> Concat [ <nl> t env kw ; <nl> t env semi ; <nl> ] <nl> - | ReturnStatement { <nl> + | Syntax . ReturnStatement { <nl> return_keyword = kw ; <nl> return_expression = expr ; <nl> return_semicolon = semi ; } - > <nl> transform_keyword_expression_statement env kw expr semi <nl> - | GotoLabel { goto_label_name ; goto_label_colon } - > <nl> + | Syntax . GotoLabel { goto_label_name ; goto_label_colon } - > <nl> Concat [ <nl> t env goto_label_name ; <nl> t env goto_label_colon ; <nl> Newline ; <nl> ] <nl> - | GotoStatement { <nl> + | Syntax . GotoStatement { <nl> goto_statement_keyword ; <nl> goto_statement_label_name ; <nl> goto_statement_semicolon ; } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env goto_statement_semicolon ; <nl> Newline ; <nl> ] <nl> - | ThrowStatement { <nl> + | Syntax . ThrowStatement { <nl> throw_keyword = kw ; <nl> throw_expression = expr ; <nl> throw_semicolon = semi ; } - > <nl> transform_keyword_expression_statement env kw expr semi <nl> - | BreakStatement { <nl> + | Syntax . BreakStatement { <nl> break_keyword = kw ; <nl> break_level = expr ; <nl> break_semicolon = semi ; } - > <nl> transform_keyword_expression_statement env kw expr semi <nl> - | ContinueStatement { <nl> + | Syntax . ContinueStatement { <nl> continue_keyword = kw ; <nl> continue_level = level ; <nl> continue_semicolon = semi ; } - > <nl> transform_keyword_expression_statement env kw level semi <nl> - | FunctionStaticStatement { <nl> + | Syntax . FunctionStaticStatement { <nl> static_static_keyword = static_kw ; <nl> static_declarations = declarators ; <nl> static_semicolon = semi ; } - > <nl> transform_keyword_expr_list_statement env static_kw declarators semi <nl> - | EchoStatement { <nl> + | Syntax . EchoStatement { <nl> echo_keyword = kw ; <nl> echo_expressions = expr_list ; <nl> echo_semicolon = semi ; } - > <nl> - ( match syntax expr_list with <nl> - | SyntaxList [ { syntax = ListItem { list_item = expr ; _ } ; _ } ] <nl> - when kind expr = SyntaxKind . ParenthesizedExpression - > <nl> + ( match Syntax . syntax expr_list with <nl> + | Syntax . SyntaxList [ <nl> + Syntax . { syntax = ListItem { list_item = expr ; _ } ; _ } ] <nl> + when Syntax . kind expr = SyntaxKind . ParenthesizedExpression - > <nl> Concat [ <nl> t env kw ; <nl> t env expr ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> | _ - > <nl> transform_keyword_expr_list_statement env kw expr_list semi <nl> ) <nl> - | GlobalStatement { <nl> + | Syntax . GlobalStatement { <nl> global_keyword = kw ; <nl> global_variables = var_list ; <nl> global_semicolon = semi ; } - > <nl> transform_keyword_expr_list_statement env kw var_list semi <nl> - | SimpleInitializer { <nl> + | Syntax . SimpleInitializer { <nl> simple_initializer_equal = eq_kw ; <nl> simple_initializer_value = value ; } - > <nl> Concat [ <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> SplitWith Cost . Base ; <nl> Nest [ t env value ] ; <nl> ] <nl> - | AnonymousFunction { <nl> + | Syntax . AnonymousFunction { <nl> anonymous_static_keyword = static_kw ; <nl> anonymous_async_keyword = async_kw ; <nl> anonymous_coroutine_keyword = coroutine_kw ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> ~ allow_collapse : true <nl> body ; <nl> ] <nl> - | Php7AnonymousFunction { <nl> + | Syntax . Php7AnonymousFunction { <nl> php7_anonymous_static_keyword = static_kw ; <nl> php7_anonymous_async_keyword = async_kw ; <nl> php7_anonymous_coroutine_keyword = coroutine_kw ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> ~ allow_collapse : true <nl> body ; <nl> ] <nl> - | AnonymousFunctionUseClause { <nl> + | Syntax . AnonymousFunctionUseClause { <nl> anonymous_use_keyword = kw ; <nl> anonymous_use_left_paren = left_p ; <nl> anonymous_use_variables = vars ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> Space ; <nl> transform_argish env left_p vars right_p ; <nl> ] <nl> - | LambdaExpression { <nl> + | Syntax . LambdaExpression { <nl> lambda_async = async ; <nl> lambda_coroutine = coroutine ; <nl> lambda_signature = signature ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env arrow ; <nl> handle_lambda_body env body ; <nl> ] <nl> - | LambdaSignature { <nl> + | Syntax . LambdaSignature { <nl> lambda_left_paren = lp ; <nl> lambda_parameters = params ; <nl> lambda_right_paren = rp ; <nl> lambda_colon = colon ; <nl> lambda_type = ret_type ; } - > <nl> transform_argish_with_return_type env lp params rp colon ret_type <nl> - | CastExpression _ - > <nl> - Span ( List . map ( children node ) ( t env ) ) <nl> - | MemberSelectionExpression { <nl> + | Syntax . CastExpression _ - > <nl> + Span ( List . map ( Syntax . children node ) ( t env ) ) <nl> + | Syntax . MemberSelectionExpression { <nl> member_object ; <nl> member_operator ; <nl> member_name ; } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> member_name <nl> ) <nl> None <nl> - | SafeMemberSelectionExpression { <nl> + | Syntax . SafeMemberSelectionExpression { <nl> safe_member_object ; <nl> safe_member_operator ; <nl> safe_member_name ; } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> safe_member_name <nl> ) <nl> None <nl> - | YieldExpression { <nl> + | Syntax . YieldExpression { <nl> yield_keyword = kw ; <nl> yield_operand = operand ; } - > <nl> Concat [ <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> SplitWith Cost . Base ; <nl> Nest [ t env operand ] ; <nl> ] <nl> - | YieldFromExpression { <nl> + | Syntax . YieldFromExpression { <nl> yield_from_yield_keyword = yield_kw ; <nl> yield_from_from_keyword = from_kw ; <nl> yield_from_operand = operand ; } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> SplitWith Cost . Base ; <nl> Nest [ t env operand ] ; <nl> ] <nl> - | PrefixUnaryExpression { <nl> + | Syntax . PrefixUnaryExpression { <nl> prefix_unary_operator = operator ; <nl> prefix_unary_operand = operand ; } - > <nl> Concat [ <nl> t env operator ; <nl> - ( match syntax operator with <nl> - | Token x - > <nl> + ( match Syntax . syntax operator with <nl> + | Syntax . Token x - > <nl> let is_parenthesized = <nl> - match syntax operand with <nl> - | ParenthesizedExpression _ - > true <nl> + match Syntax . syntax operand with <nl> + | Syntax . ParenthesizedExpression _ - > true <nl> | _ - > false <nl> in <nl> - let open EditableToken . TokenKind in <nl> - ( match EditableToken . kind x with <nl> + let open TokenKind in <nl> + ( match Token . kind x with <nl> | Await | Clone | Suspend - > Space <nl> | Print - > if is_parenthesized then Nothing else Space <nl> | _ - > Nothing <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> ) ; <nl> t env operand ; <nl> ] <nl> - | BinaryExpression { <nl> + | Syntax . BinaryExpression { <nl> binary_left_operand ; <nl> binary_operator ; <nl> binary_right_operand ; } - > <nl> transform_binary_expression env ~ is_nested : false <nl> ( binary_left_operand , binary_operator , binary_right_operand ) <nl> - | InstanceofExpression { <nl> + | Syntax . InstanceofExpression { <nl> instanceof_left_operand = left ; <nl> instanceof_operator = kw ; <nl> instanceof_right_operand = right ; } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> SplitWith Cost . Base ; <nl> Nest [ t env right ] ; <nl> ] <nl> - | IsExpression { <nl> + | Syntax . IsExpression { <nl> is_left_operand = left ; <nl> is_operator = kw ; <nl> is_right_operand = right ; } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> SplitWith Cost . Base ; <nl> Nest [ t env right ] ; <nl> ] <nl> - | ConditionalExpression { <nl> + | Syntax . ConditionalExpression { <nl> conditional_test = test_expr ; <nl> conditional_question = q_kw ; <nl> conditional_consequence = true_expr ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> ] ) ; <nl> t env c_kw ; <nl> Space ; <nl> - if not ( is_missing true_expr ) & & env . Env . indent_width = 2 <nl> + if not ( Syntax . is_missing true_expr ) & & env . Env . indent_width = 2 <nl> then Nest [ t env false_expr ] <nl> else t env false_expr ; <nl> ] ) <nl> - | FunctionCallExpression { <nl> + | Syntax . FunctionCallExpression { <nl> function_call_receiver ; <nl> function_call_left_paren ; <nl> function_call_argument_list ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> function_call_left_paren <nl> function_call_argument_list <nl> function_call_right_paren <nl> - | FunctionCallWithTypeArgumentsExpression { <nl> + | Syntax . FunctionCallWithTypeArgumentsExpression { <nl> function_call_with_type_arguments_receiver ; <nl> function_call_with_type_arguments_type_args ; <nl> function_call_with_type_arguments_left_paren ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> function_call_with_type_arguments_left_paren <nl> function_call_with_type_arguments_argument_list <nl> function_call_with_type_arguments_right_paren <nl> - | EvalExpression { <nl> + | Syntax . EvalExpression { <nl> eval_keyword = kw ; <nl> eval_left_paren = left_p ; <nl> eval_argument = arg ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env kw ; <nl> transform_braced_item env left_p arg right_p ; <nl> ] <nl> - | EmptyExpression { <nl> + | Syntax . EmptyExpression { <nl> empty_keyword = kw ; <nl> empty_left_paren = left_p ; <nl> empty_argument = arg ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env kw ; <nl> transform_braced_item env left_p arg right_p ; <nl> ] <nl> - | IssetExpression { <nl> + | Syntax . IssetExpression { <nl> isset_keyword = kw ; <nl> isset_left_paren = left_p ; <nl> isset_argument_list = args ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env kw ; <nl> transform_argish env ~ allow_trailing : false left_p args right_p ; <nl> ] <nl> - | DefineExpression { <nl> + | Syntax . DefineExpression { <nl> define_keyword = kw ; <nl> define_left_paren = left_p ; <nl> define_argument_list = args ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env kw ; <nl> transform_argish env left_p args right_p ; <nl> ] <nl> - | HaltCompilerExpression { <nl> + | Syntax . HaltCompilerExpression { <nl> halt_compiler_keyword = kw ; <nl> halt_compiler_left_paren = left_p ; <nl> halt_compiler_argument_list = args ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env kw ; <nl> transform_argish env left_p args right_p ; <nl> ] <nl> - | ParenthesizedExpression { <nl> + | Syntax . ParenthesizedExpression { <nl> parenthesized_expression_left_paren = left_p ; <nl> parenthesized_expression_expression = expr ; <nl> parenthesized_expression_right_paren = right_p ; } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env right_p <nl> ] ) ; <nl> ] <nl> - | BracedExpression { <nl> + | Syntax . BracedExpression { <nl> braced_expression_left_brace = left_b ; <nl> braced_expression_expression = expr ; <nl> braced_expression_right_brace = right_b ; } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env left_b ; <nl> Split ; <nl> let rule = <nl> - if List . is_empty ( trailing_trivia left_b ) <nl> - & & List . is_empty ( trailing_trivia expr ) <nl> + if List . is_empty ( Syntax . trailing_trivia left_b ) <nl> + & & List . is_empty ( Syntax . trailing_trivia expr ) <nl> then Rule . Simple Cost . Base <nl> else Rule . Parental <nl> in <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env right_b <nl> ] ) <nl> ] <nl> - | EmbeddedBracedExpression { <nl> + | Syntax . EmbeddedBracedExpression { <nl> embedded_braced_expression_left_brace = left_b ; <nl> embedded_braced_expression_expression = expr ; <nl> embedded_braced_expression_right_brace = right_b ; } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> Nest [ t env expr ] ; <nl> t env right_b ; <nl> ] <nl> - | ListExpression { <nl> + | Syntax . ListExpression { <nl> list_keyword = kw ; <nl> list_left_paren = lp ; <nl> list_members = members ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env kw ; <nl> transform_argish env lp members rp ; <nl> ] <nl> - | CollectionLiteralExpression { <nl> + | Syntax . CollectionLiteralExpression { <nl> collection_literal_name = name ; <nl> collection_literal_left_brace = left_b ; <nl> collection_literal_initializers = initializers ; <nl> collection_literal_right_brace = right_b ; } - > <nl> transform_container_literal <nl> env ~ spaces : true name left_b initializers right_b <nl> - | ObjectCreationExpression { <nl> + | Syntax . ObjectCreationExpression { <nl> object_creation_new_keyword = newkw ; <nl> object_creation_object = what ; } - > <nl> Concat [ <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> Space ; <nl> t env what ; <nl> ] <nl> - | ConstructorCall { <nl> + | Syntax . ConstructorCall { <nl> constructor_call_type = obj_type ; <nl> constructor_call_left_paren = left_p ; <nl> constructor_call_argument_list = arg_list ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env obj_type ; <nl> transform_argish env left_p arg_list right_p ; <nl> ] <nl> - | AnonymousClass { <nl> + | Syntax . AnonymousClass { <nl> anonymous_class_class_keyword = classkw ; <nl> anonymous_class_left_paren = left_p ; <nl> anonymous_class_argument_list = arg_list ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> ] ) ; <nl> t env body ; <nl> ] <nl> - | ArrayCreationExpression { <nl> + | Syntax . ArrayCreationExpression { <nl> array_creation_left_bracket = left_b ; <nl> array_creation_members = members ; <nl> array_creation_right_bracket = right_b ; } - > <nl> transform_argish env left_b members right_b <nl> - | ArrayIntrinsicExpression { <nl> + | Syntax . ArrayIntrinsicExpression { <nl> array_intrinsic_keyword = kw ; <nl> array_intrinsic_left_paren = left_p ; <nl> array_intrinsic_members = members ; <nl> array_intrinsic_right_paren = right_p ; } - > <nl> transform_container_literal env kw left_p members right_p <nl> - | DarrayIntrinsicExpression { <nl> + | Syntax . DarrayIntrinsicExpression { <nl> darray_intrinsic_keyword = kw ; <nl> darray_intrinsic_left_bracket = left_p ; <nl> darray_intrinsic_members = members ; <nl> darray_intrinsic_right_bracket = right_p ; } - > <nl> transform_container_literal env kw left_p members right_p <nl> - | DictionaryIntrinsicExpression { <nl> + | Syntax . DictionaryIntrinsicExpression { <nl> dictionary_intrinsic_keyword = kw ; <nl> dictionary_intrinsic_left_bracket = left_p ; <nl> dictionary_intrinsic_members = members ; <nl> dictionary_intrinsic_right_bracket = right_p ; } - > <nl> transform_container_literal env kw left_p members right_p <nl> - | KeysetIntrinsicExpression { <nl> + | Syntax . KeysetIntrinsicExpression { <nl> keyset_intrinsic_keyword = kw ; <nl> keyset_intrinsic_left_bracket = left_p ; <nl> keyset_intrinsic_members = members ; <nl> keyset_intrinsic_right_bracket = right_p ; } - > <nl> transform_container_literal env kw left_p members right_p <nl> - | VarrayIntrinsicExpression { <nl> + | Syntax . VarrayIntrinsicExpression { <nl> varray_intrinsic_keyword = kw ; <nl> varray_intrinsic_left_bracket = left_p ; <nl> varray_intrinsic_members = members ; <nl> varray_intrinsic_right_bracket = right_p ; } - > <nl> transform_container_literal env kw left_p members right_p <nl> - | VectorIntrinsicExpression { <nl> + | Syntax . VectorIntrinsicExpression { <nl> vector_intrinsic_keyword = kw ; <nl> vector_intrinsic_left_bracket = left_p ; <nl> vector_intrinsic_members = members ; <nl> vector_intrinsic_right_bracket = right_p ; } - > <nl> transform_container_literal env kw left_p members right_p <nl> - | ElementInitializer { <nl> + | Syntax . ElementInitializer { <nl> element_key = key ; <nl> element_arrow = arrow ; <nl> element_value = value ; } - > <nl> transform_mapish_entry env key arrow value <nl> - | SubscriptExpression { <nl> + | Syntax . SubscriptExpression { <nl> subscript_receiver = receiver ; <nl> subscript_left_bracket = lb ; <nl> subscript_index = expr ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env receiver ; <nl> transform_braced_item env lb expr rb ; <nl> ] <nl> - | AwaitableCreationExpression { <nl> + | Syntax . AwaitableCreationExpression { <nl> awaitable_async = async_kw ; <nl> awaitable_coroutine = coroutine_kw ; <nl> awaitable_compound_statement = body ; } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> ( * TODO : correctly handle spacing after the closing brace * ) <nl> handle_possible_compound_statement env ~ space : false body ; <nl> ] <nl> - | XHPChildrenDeclaration { <nl> + | Syntax . XHPChildrenDeclaration { <nl> xhp_children_keyword = kw ; <nl> xhp_children_expression = expr ; <nl> xhp_children_semicolon = semi ; } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env semi ; <nl> Newline ; <nl> ] <nl> - | XHPChildrenParenthesizedList { <nl> + | Syntax . XHPChildrenParenthesizedList { <nl> xhp_children_list_left_paren = left_p ; <nl> xhp_children_list_xhp_children = expressions ; <nl> xhp_children_list_right_paren = right_p ; } - > <nl> Concat [ <nl> transform_argish env ~ allow_trailing : false left_p expressions right_p ; <nl> ] <nl> - | XHPCategoryDeclaration { <nl> + | Syntax . XHPCategoryDeclaration { <nl> xhp_category_keyword = kw ; <nl> xhp_category_categories = categories ; <nl> xhp_category_semicolon = semi ; } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env semi ; <nl> Newline ; <nl> ] <nl> - | XHPEnumType { <nl> + | Syntax . XHPEnumType { <nl> xhp_enum_optional = opt ; <nl> xhp_enum_keyword = kw ; <nl> xhp_enum_left_brace = left_b ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> Space ; <nl> transform_argish env left_b values right_b ; <nl> ] <nl> - | XHPClassAttributeDeclaration { <nl> + | Syntax . XHPClassAttributeDeclaration { <nl> xhp_attribute_keyword = kw ; <nl> xhp_attribute_attributes = xhp_attributes ; <nl> xhp_attribute_semicolon = semi ; } - > <nl> Concat [ <nl> t env kw ; <nl> - ( match syntax xhp_attributes with <nl> - | Missing - > Nothing <nl> - | SyntaxList [ attr ] - > <nl> + ( match Syntax . syntax xhp_attributes with <nl> + | Syntax . Missing - > Nothing <nl> + | Syntax . SyntaxList [ attr ] - > <nl> WithRule ( Rule . Parental , Nest [ Space ; Split ; t env attr ] ) <nl> - | SyntaxList attrs - > <nl> + | Syntax . SyntaxList attrs - > <nl> Nest [ handle_list env ~ before_each : newline attrs ] <nl> | _ - > failwith " Expected SyntaxList " <nl> ) ; <nl> t env semi ; <nl> Newline ; <nl> ] <nl> - | XHPClassAttribute { <nl> + | Syntax . XHPClassAttribute { <nl> xhp_attribute_decl_type = attr_type ; <nl> xhp_attribute_decl_name = name ; <nl> xhp_attribute_decl_initializer = init ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> when_present req space ; <nl> t env req ; <nl> ] <nl> - | XHPSimpleAttribute { <nl> + | Syntax . XHPSimpleAttribute { <nl> xhp_simple_attribute_name = name ; <nl> xhp_simple_attribute_equal = eq ; <nl> xhp_simple_attribute_expression = expr ; } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> SplitWith Cost . Base ; <nl> Nest [ t env expr ] ; <nl> ] <nl> - | XHPSpreadAttribute { <nl> + | Syntax . XHPSpreadAttribute { <nl> xhp_spread_attribute_left_brace = l_brace ; <nl> xhp_spread_attribute_spread_operator = spread ; <nl> xhp_spread_attribute_expression = expr ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> Nest [ t env expr ] ; <nl> t env r_brace ; <nl> ] <nl> - | XHPOpen { <nl> + | Syntax . XHPOpen { <nl> xhp_open_left_angle = left_a ; <nl> xhp_open_name = name ; <nl> xhp_open_attributes = attrs ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> Concat [ <nl> t env left_a ; <nl> t env name ; <nl> - match syntax attrs with <nl> - | Missing - > handle_xhp_open_right_angle_token env attrs right_a <nl> + match Syntax . syntax attrs with <nl> + | Syntax . Missing - > handle_xhp_open_right_angle_token env attrs right_a <nl> | _ - > <nl> Concat [ <nl> Space ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> ] ) <nl> ] <nl> ] <nl> - | XHPExpression { <nl> + | Syntax . XHPExpression { <nl> xhp_open = xhp_open ; <nl> xhp_body = body ; <nl> xhp_close = close ; } - > <nl> let handle_xhp_body body = <nl> - match syntax body with <nl> - | Missing - > Nothing , true <nl> - | SyntaxList xs - > <nl> + match Syntax . syntax body with <nl> + | Syntax . Missing - > Nothing , true <nl> + | Syntax . SyntaxList xs - > <nl> ( * XHP breaks the normal rules of trivia . All trailing trivia ( except <nl> * on XHPBody tokens ) is lexed as leading trivia for the next token . <nl> * <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> then transform_leading_trivia leading <nl> else begin <nl> let v = <nl> - match syntax node with <nl> - | Token _ - > if has_invisibles leading then Split else Nothing <nl> + match Syntax . syntax node with <nl> + | Syntax . Token _ - > <nl> + if has_invisibles leading then Split else Nothing <nl> | _ - > Split in <nl> Concat [ v ; transform_xhp_leading_trivia leading ] <nl> end ; <nl> t env node ; <nl> ] in <nl> - let open EditableToken in <nl> + let open Token in <nl> prev_token_was_xhpbody : = begin <nl> - match syntax node with <nl> - | Token t - > kind t = TokenKind . XHPBody <nl> + match Syntax . syntax node with <nl> + | Syntax . Token t - > kind t = TokenKind . XHPBody <nl> | _ - > false <nl> end ; <nl> ( * Here , we preserve newlines after XHPBody tokens and don ' t add <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> * solving too expensive . * ) <nl> let trailing = Syntax . trailing_trivia node in <nl> let trailing_whitespace = <nl> - match syntax node with <nl> - | Token _ when has_newline trailing - > Newline <nl> + match Syntax . syntax node with <nl> + | Syntax . Token _ when has_newline trailing - > Newline <nl> | _ when has_whitespace trailing - > Space <nl> | _ - > Nothing <nl> in <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> | Some token - > token <nl> in <nl> let can_split_before_first_token = <nl> - let open EditableToken in <nl> + let open Token in <nl> kind leading_token < > TokenKind . XHPBody | | <nl> has_invisibles ( leading leading_token ) <nl> in <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> ] <nl> end ; <nl> ] ) <nl> - | VarrayTypeSpecifier { <nl> + | Syntax . VarrayTypeSpecifier { <nl> varray_keyword = kw ; <nl> varray_left_angle = left_a ; <nl> varray_type = varray_type ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> transform_braced_item_with_trailer env <nl> left_a varray_type trailing_comma right_a ; <nl> ] <nl> - | VectorArrayTypeSpecifier { <nl> + | Syntax . VectorArrayTypeSpecifier { <nl> vector_array_keyword = kw ; <nl> vector_array_left_angle = left_a ; <nl> vector_array_type = vec_type ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env kw ; <nl> transform_braced_item env left_a vec_type right_a ; <nl> ] <nl> - | VectorTypeSpecifier { <nl> + | Syntax . VectorTypeSpecifier { <nl> vector_type_keyword = kw ; <nl> vector_type_left_angle = left_a ; <nl> vector_type_type = vec_type ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> transform_braced_item_with_trailer env <nl> left_a vec_type trailing_comma right_a ; <nl> ] <nl> - | KeysetTypeSpecifier { <nl> + | Syntax . KeysetTypeSpecifier { <nl> keyset_type_keyword = kw ; <nl> keyset_type_left_angle = left_a ; <nl> keyset_type_type = ks_type ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> transform_braced_item_with_trailer env <nl> left_a ks_type trailing_comma right_a ; <nl> ] <nl> - | TypeParameter { <nl> + | Syntax . TypeParameter { <nl> type_variance = variance ; <nl> type_name = name ; <nl> type_constraints = constraints ; } - > <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> handle_possible_list env constraints <nl> ~ after_each : ( fun is_last - > if is_last then Nothing else Space ) ; <nl> ] <nl> - | TypeConstraint { <nl> + | Syntax . TypeConstraint { <nl> constraint_keyword = kw ; <nl> constraint_type = constraint_type ; } - > <nl> Concat [ <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> Space ; <nl> t env constraint_type ; <nl> ] <nl> - | DarrayTypeSpecifier { <nl> + | Syntax . DarrayTypeSpecifier { <nl> darray_keyword = kw ; <nl> darray_left_angle = left_a ; <nl> darray_key = key ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> darray_value = value ; <nl> darray_trailing_comma = trailing_comma ; <nl> darray_right_angle = right_a ; } - > <nl> - let key_list_item = make_list_item key comma_kw in <nl> - let val_list_item = make_list_item value trailing_comma in <nl> + let key_list_item = Syntax . make_list_item key comma_kw in <nl> + let val_list_item = Syntax . make_list_item value trailing_comma in <nl> let args = make_list [ key_list_item ; val_list_item ] in <nl> Concat [ <nl> t env kw ; <nl> transform_argish env ~ allow_trailing : true left_a args right_a ; <nl> ] <nl> - | MapArrayTypeSpecifier { <nl> + | Syntax . MapArrayTypeSpecifier { <nl> map_array_keyword = kw ; <nl> map_array_left_angle = left_a ; <nl> map_array_key = key ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> map_array_right_angle = right_a ; } - > <nl> Concat [ <nl> t env kw ; <nl> - let key_list_item = make_list_item key comma_kw in <nl> - let val_list_item = make_list_item value ( make_missing ( ) ) in <nl> + let key_list_item = Syntax . make_list_item key comma_kw in <nl> + let val_list_item = Syntax . make_list_item value ( make_missing ( ) ) in <nl> let args = make_list [ key_list_item ; val_list_item ] in <nl> transform_argish env ~ allow_trailing : false left_a args right_a ; <nl> ] <nl> - | DictionaryTypeSpecifier { <nl> + | Syntax . DictionaryTypeSpecifier { <nl> dictionary_type_keyword = kw ; <nl> dictionary_type_left_angle = left_a ; <nl> dictionary_type_members = members ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env kw ; <nl> transform_argish env left_a members right_a ; <nl> ] <nl> - | ClosureTypeSpecifier { <nl> + | Syntax . ClosureTypeSpecifier { <nl> closure_outer_left_paren = outer_left_p ; <nl> closure_coroutine = coroutine ; <nl> closure_function_keyword = kw ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> inner_left_p param_list inner_right_p colon ret_type ; <nl> t env outer_right_p ; <nl> ] <nl> - | ClosureParameterTypeSpecifier { <nl> + | Syntax . ClosureParameterTypeSpecifier { <nl> closure_parameter_call_convention = callconv ; <nl> closure_parameter_type = cp_type ; } - > <nl> Concat [ <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> when_present callconv space ; <nl> t env cp_type ; <nl> ] <nl> - | ClassnameTypeSpecifier { <nl> + | Syntax . ClassnameTypeSpecifier { <nl> classname_keyword = kw ; <nl> classname_left_angle = left_a ; <nl> classname_type = class_type ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> transform_braced_item_with_trailer env <nl> left_a class_type trailing_comma right_a ; <nl> ] <nl> - | FieldSpecifier { <nl> + | Syntax . FieldSpecifier { <nl> field_question = question ; <nl> field_name = name ; <nl> field_arrow = arrow_kw ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env question ; <nl> transform_mapish_entry env name arrow_kw field_type ; <nl> ] <nl> - | FieldInitializer { <nl> + | Syntax . FieldInitializer { <nl> field_initializer_name = name ; <nl> field_initializer_arrow = arrow_kw ; <nl> field_initializer_value = value ; } - > <nl> transform_mapish_entry env name arrow_kw value <nl> - | ShapeTypeSpecifier { <nl> + | Syntax . ShapeTypeSpecifier { <nl> shape_type_keyword = shape_kw ; <nl> shape_type_left_paren = left_p ; <nl> shape_type_fields = type_fields ; <nl> shape_type_ellipsis = ellipsis ; <nl> shape_type_right_paren = right_p ; } - > <nl> - let fields = if is_missing ellipsis <nl> + let fields = if Syntax . is_missing ellipsis <nl> then type_fields <nl> else <nl> let missing_separator = make_missing ( ) in <nl> - let ellipsis_list = [ make_list_item ellipsis missing_separator ] in <nl> - make_list ( children type_fields @ ellipsis_list ) in <nl> - transform_container_literal env <nl> - ~ allow_trailing : ( is_missing ellipsis ) shape_kw left_p fields right_p <nl> - | ShapeExpression { <nl> + let ellipsis_list = <nl> + [ Syntax . make_list_item ellipsis missing_separator ] in <nl> + make_list ( Syntax . children type_fields @ ellipsis_list ) in <nl> + transform_container_literal env shape_kw left_p fields right_p <nl> + ~ allow_trailing : ( Syntax . is_missing ellipsis ) <nl> + | Syntax . ShapeExpression { <nl> shape_expression_keyword = shape_kw ; <nl> shape_expression_left_paren = left_p ; <nl> shape_expression_fields = fields ; <nl> shape_expression_right_paren = right_p ; } - > <nl> transform_container_literal env shape_kw left_p fields right_p <nl> - | TupleExpression { <nl> + | Syntax . TupleExpression { <nl> tuple_expression_keyword = kw ; <nl> tuple_expression_left_paren = left_p ; <nl> tuple_expression_items = items ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env kw ; <nl> transform_argish env left_p items right_p ; <nl> ] <nl> - | TypeArguments { <nl> + | Syntax . TypeArguments { <nl> type_arguments_left_angle = left_a ; <nl> type_arguments_types = type_list ; <nl> type_arguments_right_angle = right_a ; } - > <nl> transform_argish env left_a type_list right_a <nl> - | TypeParameters { <nl> + | Syntax . TypeParameters { <nl> type_parameters_left_angle = left_a ; <nl> type_parameters_parameters = param_list ; <nl> type_parameters_right_angle = right_a ; } - > <nl> transform_argish env left_a param_list right_a <nl> - | TupleTypeSpecifier { <nl> + | Syntax . TupleTypeSpecifier { <nl> tuple_left_paren = left_p ; <nl> tuple_types = types ; <nl> tuple_right_paren = right_p ; } - > <nl> transform_argish env left_p types right_p <nl> - | TupleTypeExplicitSpecifier { <nl> + | Syntax . TupleTypeExplicitSpecifier { <nl> tuple_type_keyword = kw ; <nl> tuple_type_left_angle = left_a ; <nl> tuple_type_types = types ; <nl> let rec t ( env : Env . t ) ( node : Syntax . t ) : Doc . t = <nl> t env kw ; <nl> transform_argish env left_a types right_a <nl> ] <nl> - | DecoratedExpression { <nl> + | Syntax . DecoratedExpression { <nl> decorated_expression_decorator = op ; <nl> decorated_expression_expression = expr ; } - > <nl> Concat [ <nl> t env op ; <nl> begin <nl> - let open EditableToken in <nl> - match syntax op with <nl> - | Token t when kind t = TokenKind . Inout - > Space <nl> + match Syntax . syntax op with <nl> + | Syntax . Token t when Token . kind t = TokenKind . Inout - > Space <nl> | _ - > Nothing <nl> end ; <nl> t env expr ; <nl> ] <nl> - | ErrorSyntax _ - > <nl> + | Syntax . ErrorSyntax _ - > <nl> raise Hackfmt_error . InvalidSyntax <nl> <nl> and when_present node f = <nl> - match syntax node with <nl> - | Missing - > Nothing <nl> + match Syntax . syntax node with <nl> + | Syntax . Missing - > Nothing <nl> | _ - > f ( ) <nl> <nl> and transform_simple env node = <nl> - Concat ( List . map ( children node ) ( t env ) ) <nl> + Concat ( List . map ( Syntax . children node ) ( t env ) ) <nl> <nl> and transform_simple_statement env node = <nl> - Concat ( ( List . map ( children node ) ( t env ) ) @ [ Newline ] ) <nl> + Concat ( ( List . map ( Syntax . children node ) ( t env ) ) @ [ Newline ] ) <nl> <nl> and braced_block_nest env ? ( allow_collapse = true ) open_b close_b nodes = <nl> let nodes = Concat nodes in <nl> - match allow_collapse , has_printable_content nodes , syntax open_b with <nl> - | true , false , Token ob <nl> - when List . for_all ( EditableToken . trailing ob ) <nl> + match allow_collapse , has_printable_content nodes , Syntax . syntax open_b with <nl> + | true , false , Syntax . Token ob <nl> + when List . for_all ( Token . trailing ob ) <nl> ( fun t - > Trivia . kind t < > TriviaKind . EndOfLine ) - > <nl> Concat [ <nl> t env open_b ; <nl> and after_each_argument is_last = <nl> else space_split ( ) <nl> <nl> and handle_lambda_body env node = <nl> - match syntax node with <nl> - | CompoundStatement { <nl> + match Syntax . syntax node with <nl> + | Syntax . CompoundStatement { <nl> compound_left_brace ; <nl> compound_statements ; <nl> compound_right_brace ; } - > <nl> handle_compound_statement env ~ allow_collapse : true <nl> compound_left_brace compound_statements compound_right_brace ; <nl> - | XHPExpression _ - > <nl> + | Syntax . XHPExpression _ - > <nl> WithRule ( Rule . Parental , Concat [ <nl> Space ; <nl> Split ; <nl> and handle_possible_compound_statement env <nl> ? ( allow_collapse = false ) <nl> node <nl> = <nl> - match syntax node with <nl> - | CompoundStatement { <nl> + match Syntax . syntax node with <nl> + | Syntax . CompoundStatement { <nl> compound_left_brace ; <nl> compound_statements ; <nl> compound_right_brace ; } - > <nl> and handle_possible_compound_statement env <nl> compound_right_brace ; <nl> if space then Space else Nothing ; <nl> ] <nl> - | AlternateLoopStatement { <nl> + | Syntax . AlternateLoopStatement { <nl> alternate_loop_opening_colon ; <nl> alternate_loop_statements ; <nl> alternate_loop_closing_keyword ; <nl> and handle_alternate_loop_statement env <nl> * its children break . <nl> * ) <nl> and handle_declarator_list env declarators = <nl> - match syntax declarators with <nl> - | Missing - > Nothing <nl> - | SyntaxList [ declarator ] - > <nl> + match Syntax . syntax declarators with <nl> + | Syntax . Missing - > Nothing <nl> + | Syntax . SyntaxList [ declarator ] - > <nl> Nest [ <nl> Space ; <nl> ( * Use an independent split , so we don ' t break just because a line break <nl> and handle_declarator_list env declarators = <nl> SplitWith Cost . Base ; <nl> t env declarator ; <nl> ] ; <nl> - | SyntaxList xs - > <nl> + | Syntax . SyntaxList xs - > <nl> ( * Use Rule . Parental to break each declarator onto its own line if any <nl> * line break occurs in a declarator , or if they can ' t all fit onto one <nl> * line . * ) <nl> and handle_possible_list env <nl> ? ( after_each = ( fun _is_last - > Nothing ) ) <nl> ? ( handle_last = t env ) <nl> node = <nl> - match syntax node with <nl> - | Missing - > Nothing <nl> - | SyntaxList x - > handle_list env x ~ before_each ~ after_each ~ handle_last <nl> + match Syntax . syntax node with <nl> + | Syntax . Missing - > Nothing <nl> + | Syntax . SyntaxList x - > <nl> + handle_list env x ~ before_each ~ after_each ~ handle_last <nl> | _ - > handle_list env [ node ] ~ before_each ~ after_each ~ handle_last <nl> <nl> and handle_xhp_open_right_angle_token env attrs node = <nl> - match syntax node with <nl> - | Token token - > <nl> + match Syntax . syntax node with <nl> + | Syntax . Token token - > <nl> Concat [ <nl> - if EditableToken . text token = " / > " <nl> + if Token . text token = " / > " <nl> then Concat [ Space ; when_present attrs split ] <nl> else Nothing ; <nl> t env node <nl> and handle_function_call_expression env <nl> args <nl> rp <nl> = <nl> - match syntax receiver with <nl> - | MemberSelectionExpression { <nl> + match Syntax . syntax receiver with <nl> + | Syntax . MemberSelectionExpression { <nl> member_object ; <nl> member_operator ; <nl> member_name ; } - > <nl> handle_possible_chaining env <nl> ( member_object , member_operator , member_name ) <nl> ( Some ( lp , args , rp ) ) <nl> - | SafeMemberSelectionExpression { <nl> + | Syntax . SafeMemberSelectionExpression { <nl> safe_member_object ; <nl> safe_member_operator ; <nl> safe_member_name ; } - > <nl> and handle_function_call_with_type_arguments_expression env <nl> args <nl> rp <nl> = <nl> - match syntax receiever with <nl> - | MemberSelectionExpression { <nl> + match Syntax . syntax receiever with <nl> + | Syntax . MemberSelectionExpression { <nl> member_object ; <nl> member_operator ; <nl> member_name ; } - > <nl> handle_possible_chaining env <nl> ( member_object , member_operator , member_name ) <nl> ( Some ( lp , args , rp ) ) <nl> - | SafeMemberSelectionExpression { <nl> + | Syntax . SafeMemberSelectionExpression { <nl> safe_member_object ; <nl> safe_member_operator ; <nl> safe_member_name ; } - > <nl> and handle_possible_chaining env ( obj , arrow1 , member1 ) argish = <nl> let ( obj , l ) = handle_chaining obj in <nl> obj , l @ [ ( arrow , member , fun_paren_args ) ] <nl> in <nl> - match syntax obj with <nl> - | FunctionCallExpression { <nl> + match Syntax . syntax obj with <nl> + | Syntax . FunctionCallExpression { <nl> function_call_receiver = receiver ; <nl> function_call_left_paren = lp ; <nl> function_call_argument_list = args ; <nl> function_call_right_paren = rp ; } - > <nl> - ( match syntax receiver with <nl> - | MemberSelectionExpression { <nl> + ( match Syntax . syntax receiver with <nl> + | Syntax . MemberSelectionExpression { <nl> member_object ; <nl> member_operator ; <nl> member_name ; } - > <nl> handle_mse_or_smse <nl> ( member_object , member_operator , member_name ) <nl> ( Some ( lp , args , rp ) ) <nl> - | SafeMemberSelectionExpression { <nl> + | Syntax . SafeMemberSelectionExpression { <nl> safe_member_object ; <nl> safe_member_operator ; <nl> safe_member_name ; } - > <nl> and handle_possible_chaining env ( obj , arrow1 , member1 ) argish = <nl> ( Some ( lp , args , rp ) ) <nl> | _ - > obj , [ ] <nl> ) <nl> - | MemberSelectionExpression { <nl> + | Syntax . MemberSelectionExpression { <nl> member_object ; <nl> member_operator ; <nl> member_name ; } - > <nl> handle_mse_or_smse <nl> ( member_object , member_operator , member_name ) <nl> None <nl> - | SafeMemberSelectionExpression { <nl> + | Syntax . SafeMemberSelectionExpression { <nl> safe_member_object ; <nl> safe_member_operator ; <nl> safe_member_name ; } - > <nl> and transform_fn_decl_args env params rightp colon ret_type where = <nl> ( * It is a syntax error to follow a variadic parameter with a trailing <nl> * comma , so suppress trailing commas in that case . * ) <nl> let allow_trailing = <nl> - match syntax params with <nl> - | SyntaxList params - > <nl> - let open EditableToken in <nl> - let open EditableToken . TokenKind in <nl> + match Syntax . syntax params with <nl> + | Syntax . SyntaxList params - > <nl> + let open Token in <nl> + let open TokenKind in <nl> let last_param = <nl> - match syntax ( List . last_exn params ) with <nl> - | ListItem { list_item ; _ } - > list_item <nl> + match Syntax . syntax ( List . last_exn params ) with <nl> + | Syntax . ListItem { list_item ; _ } - > list_item <nl> | _ - > failwith " Expected ListItem " <nl> in <nl> begin <nl> - match syntax last_param with <nl> - | VariadicParameter _ <nl> - | ParameterDeclaration { <nl> + match Syntax . syntax last_param with <nl> + | Syntax . VariadicParameter _ <nl> + | Syntax . ( ParameterDeclaration { <nl> parameter_name = { syntax = DecoratedExpression { <nl> decorated_expression_decorator = { <nl> syntax = Token { kind = DotDotDot ; _ } ; _ <nl> } ; _ <nl> } ; _ } ; _ <nl> - } - > <nl> + } ) - > <nl> false <nl> | _ - > true <nl> end <nl> and transform_argish env <nl> * line breaks within the argument ( normally these would force the splits <nl> * around the argument to break ) . * ) <nl> let split_when_children_split = <nl> - match spaces , syntax arg_list with <nl> - | false , SyntaxList [ x ] - > <nl> + match spaces , Syntax . syntax arg_list with <nl> + | false , Syntax . SyntaxList [ x ] - > <nl> let has_surrounding_whitespace = <nl> not ( <nl> - List . is_empty ( trailing_trivia left_p ) & & <nl> - List . is_empty ( trailing_trivia x ) <nl> + List . is_empty ( Syntax . trailing_trivia left_p ) & & <nl> + List . is_empty ( Syntax . trailing_trivia x ) <nl> ) <nl> in <nl> let item = <nl> - match syntax x with <nl> - | ListItem x - > x . list_item <nl> + match Syntax . syntax x with <nl> + | Syntax . ListItem { list_item ; _ } - > list_item <nl> | _ - > failwith " Expected ListItem " <nl> in <nl> ( * Blacklist constructs which look ugly when we try to preserve the <nl> * lack - of - whitespace style . * ) <nl> - ( match syntax item with <nl> - | LambdaExpression <nl> - { lambda_body = { syntax = CompoundStatement _ ; _ } ; _ } - > <nl> + ( match Syntax . syntax item with <nl> + | Syntax . ( LambdaExpression <nl> + { lambda_body = { syntax = CompoundStatement _ ; _ } ; _ } ) - > <nl> has_surrounding_whitespace <nl> - | FunctionCallExpression { function_call_receiver ; _ } - > <nl> + | Syntax . FunctionCallExpression { function_call_receiver ; _ } - > <nl> Syntax . is_member_selection_expression function_call_receiver | | <nl> has_surrounding_whitespace <nl> - | ConditionalExpression _ <nl> - | BinaryExpression _ <nl> - | MemberSelectionExpression _ <nl> - | FieldSpecifier _ <nl> - | FieldInitializer _ <nl> - | ElementInitializer _ <nl> - | LambdaExpression _ <nl> + | Syntax . ConditionalExpression _ <nl> + | Syntax . BinaryExpression _ <nl> + | Syntax . MemberSelectionExpression _ <nl> + | Syntax . FieldSpecifier _ <nl> + | Syntax . FieldInitializer _ <nl> + | Syntax . ElementInitializer _ <nl> + | Syntax . LambdaExpression _ <nl> - > true <nl> | _ - > has_surrounding_whitespace <nl> ) <nl> and transform_braced_item env left_p item right_p = <nl> delimited_nest env left_p right_p [ t env item ] <nl> <nl> and transform_trailing_comma env ~ allow_trailing item comma = <nl> - let open EditableToken in <nl> + let open Token in <nl> ( * PHP does not permit trailing commas in function calls . Rather than try to <nl> * account for where PHP ' s parser permits trailing commas , we just never add <nl> * them in PHP files . * ) <nl> let allow_trailing = allow_trailing & & env . Env . add_trailing_commas in <nl> - match syntax comma with <nl> - | Token tok - > <nl> + match Syntax . syntax comma with <nl> + | Syntax . Token tok - > <nl> Concat [ <nl> t env item ; <nl> transform_leading_trivia ( leading tok ) ; <nl> and transform_trailing_comma env ~ allow_trailing item comma = <nl> Ignore ( text tok , width tok ) ; <nl> transform_trailing_trivia ( trailing tok ) ; <nl> ] <nl> - | Missing - > <nl> + | Syntax . Missing - > <nl> let item , item_trailing = remove_trailing_trivia item in <nl> Concat [ <nl> t env item ; <nl> and remove_leading_trivia node = <nl> | None - > [ ] , node <nl> | Some leading_token - > <nl> let rewritten_node = Rewriter . rewrite_pre ( fun rewrite_node - > <nl> - match syntax rewrite_node with <nl> - | Token t when t = = leading_token - > <nl> + match Syntax . syntax rewrite_node with <nl> + | Syntax . Token t when t = = leading_token - > <nl> Rewriter . Replace <nl> - ( Syntax . make_token { t with EditableToken . leading = [ ] } ) <nl> + ( Syntax . make_token { t with Token . leading = [ ] } ) <nl> | _ - > Rewriter . Keep <nl> ) node in <nl> - EditableToken . leading leading_token , rewritten_node <nl> + Token . leading leading_token , rewritten_node <nl> <nl> and remove_trailing_trivia node = <nl> match Syntax . trailing_token node with <nl> | None - > node , [ ] <nl> | Some trailing_token - > <nl> let rewritten_node = Rewriter . rewrite_pre ( fun rewrite_node - > <nl> - match syntax rewrite_node with <nl> - | Token t when t = = trailing_token - > <nl> + match Syntax . syntax rewrite_node with <nl> + | Syntax . Token t when t = = trailing_token - > <nl> Rewriter . Replace <nl> - ( Syntax . make_token { t with EditableToken . trailing = [ ] } ) <nl> + ( Syntax . make_token { t with Token . trailing = [ ] } ) <nl> | _ - > Rewriter . Keep <nl> ) node in <nl> - rewritten_node , EditableToken . trailing trailing_token <nl> + rewritten_node , Token . trailing trailing_token <nl> <nl> and transform_last_arg env ~ allow_trailing node = <nl> - match syntax node with <nl> - | ListItem { <nl> + match Syntax . syntax node with <nl> + | Syntax . ListItem { <nl> list_item = item ; <nl> list_separator = separator ; } - > <nl> transform_trailing_comma env ~ allow_trailing item separator <nl> and transform_condition env left_p condition right_p = <nl> <nl> and transform_binary_expression env ~ is_nested ( left , operator , right ) = <nl> let get_operator_type op = <nl> - match syntax op with <nl> - | Token t - > Full_fidelity_operator . trailing_from_token <nl> - ( EditableToken . kind t ) <nl> + match Syntax . syntax op with <nl> + | Syntax . Token t - > Full_fidelity_operator . trailing_from_token <nl> + ( Token . kind t ) <nl> | _ - > failwith " Operator should always be a token " <nl> in <nl> let is_concat op = <nl> and transform_binary_expression env ~ is_nested ( left , operator , right ) = <nl> let precedence = Full_fidelity_operator . precedence operator_t in <nl> <nl> let rec flatten_expression expr = <nl> - match syntax expr with <nl> - | BinaryExpression { <nl> + match Syntax . syntax expr with <nl> + | Syntax . BinaryExpression { <nl> binary_left_operand = left ; <nl> binary_operator = operator ; <nl> binary_right_operand = right ; } - > <nl> and transform_binary_expression env ~ is_nested ( left , operator , right ) = <nl> in <nl> <nl> let transform_operand operand = <nl> - match syntax operand with <nl> - | BinaryExpression { <nl> + match Syntax . syntax operand with <nl> + | Syntax . BinaryExpression { <nl> binary_left_operand ; <nl> binary_operator ; <nl> binary_right_operand ; } - > <nl> and transform_binary_expression env ~ is_nested ( left , operator , right ) = <nl> in <nl> <nl> let binary_expression_syntax_list = <nl> - flatten_expression ( make_binary_expression left operator right ) in <nl> + flatten_expression ( Syntax . make_binary_expression left operator right ) <nl> + in <nl> match binary_expression_syntax_list with <nl> | hd : : tl - > <nl> WithLazyRule ( Rule . Parental , <nl>
Don ' t open Syntax in Hack_format
facebook/hhvm
3b8d9dda5b46dd06123df03c430a0fa0cbbdc1eb
2018-02-13T21:22:26Z
mmm a / tools / run_tests / run_xds_tests . py <nl> ppp b / tools / run_tests / run_xds_tests . py <nl> def parse_port_range ( port_arg ) : <nl> argp . add_argument ( ' - - qps ' , default = 10 , type = int , help = ' Client QPS ' ) <nl> argp . add_argument ( <nl> ' - - wait_for_backend_sec ' , <nl> - default = 600 , <nl> + default = 1200 , <nl> type = int , <nl> help = ' Time limit for waiting for created backend services to report ' <nl> ' healthy when launching or updated GCP resources ' ) <nl>
Merge pull request from ericgribkoff / increase_default_wait
grpc/grpc
544b2925e951c3fbb9767168a44f92b4268812b9
2020-03-22T00:14:59Z
mmm a / vendor / brightray <nl> ppp b / vendor / brightray <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit c4eac9a798f71ef2d09fbdb76bbf7cfe6aeb4846 <nl> + Subproject commit 08525d16f467a1e3d130ba52892dd544c6c2c17a <nl>
Update brightray for
electron/electron
c41183302b3d6e848fd7e9f54c277a3648740939
2016-08-12T08:37:38Z
mmm a / test / extensions / access_loggers / grpc / tcp_grpc_access_log_integration_test . cc <nl> ppp b / test / extensions / access_loggers / grpc / tcp_grpc_access_log_integration_test . cc <nl> class TcpGrpcAccessLogIntegrationTest : public Grpc : : VersionedGrpcClientIntegrat <nl> public : <nl> TcpGrpcAccessLogIntegrationTest ( ) <nl> : BaseIntegrationTest ( ipVersion ( ) , ConfigHelper : : tcpProxyConfig ( ) ) { <nl> - enable_half_close_ = true ; <nl> + enableHalfClose ( true ) ; <nl> } <nl> <nl> void createUpstreams ( ) override { <nl> mmm a / test / extensions / filters / http / oauth2 / oauth_integration_test . cc <nl> ppp b / test / extensions / filters / http / oauth2 / oauth_integration_test . cc <nl> class OauthIntegrationTest : public testing : : Test , public HttpIntegrationTest { <nl> public : <nl> OauthIntegrationTest ( ) <nl> : HttpIntegrationTest ( Http : : CodecClient : : Type : : HTTP2 , Network : : Address : : IpVersion : : v4 ) { <nl> - enable_half_close_ = true ; <nl> + enableHalfClose ( true ) ; <nl> } <nl> <nl> envoy : : service : : discovery : : v3 : : DiscoveryResponse genericSecretResponse ( absl : : string_view name , <nl> mmm a / test / integration / base_integration_test . cc <nl> ppp b / test / integration / base_integration_test . cc <nl> Network : : ClientConnectionPtr BaseIntegrationTest : : makeClientConnectionWithOption <nl> fmt : : format ( " tcp : / / { } : { } " , Network : : Test : : getLoopbackAddressUrlString ( version_ ) , port ) ) , <nl> Network : : Address : : InstanceConstSharedPtr ( ) , Network : : Test : : createRawBufferSocket ( ) , options ) ) ; <nl> <nl> - connection - > enableHalfClose ( enable_half_close_ ) ; <nl> + connection - > enableHalfClose ( enableHalfClose ( ) ) ; <nl> return connection ; <nl> } <nl> <nl> BaseIntegrationTest : : makeTcpConnection ( uint32_t port , <nl> const Network : : ConnectionSocket : : OptionsSharedPtr & options , <nl> Network : : Address : : InstanceConstSharedPtr source_address ) { <nl> return std : : make_unique < IntegrationTcpClient > ( * dispatcher_ , * mock_buffer_factory_ , port , version_ , <nl> - enable_half_close_ , options , source_address ) ; <nl> + enableHalfClose ( ) , options , source_address ) ; <nl> } <nl> <nl> void BaseIntegrationTest : : registerPort ( const std : : string & key , uint32_t port ) { <nl> mmm a / test / integration / base_integration_test . h <nl> ppp b / test / integration / base_integration_test . h <nl> class BaseIntegrationTest : protected Logger : : Loggable < Logger : : Id : : testing > { <nl> protected : <nl> void setUdpFakeUpstream ( bool value ) { upstream_config_ . udp_fake_upstream_ = value ; } <nl> bool initialized ( ) const { return initialized_ ; } <nl> - const FakeUpstreamConfig & upstreamConfig ( ) { <nl> - / / TODO ( alyssawilk ) make enable_half_close_ private and remove this . <nl> - upstream_config_ . enable_half_close_ = enable_half_close_ ; <nl> - return upstream_config_ ; <nl> - } <nl> + <nl> + / / Right now half - close is set globally , not separately for upstream and <nl> + / / downstream . <nl> + void enableHalfClose ( bool value ) { upstream_config_ . enable_half_close_ = value ; } <nl> + <nl> + bool enableHalfClose ( ) { return upstream_config_ . enable_half_close_ ; } <nl> + <nl> + const FakeUpstreamConfig & upstreamConfig ( ) { return upstream_config_ ; } <nl> <nl> std : : unique_ptr < Stats : : Scope > upstream_stats_store_ ; <nl> <nl> class BaseIntegrationTest : protected Logger : : Loggable < Logger : : Id : : testing > { <nl> / / This does nothing if autonomous_upstream_ is false <nl> bool autonomous_allow_incomplete_streams_ { false } ; <nl> <nl> - bool enable_half_close_ { false } ; <nl> - <nl> / / True if test will use a fixed RNG value . <nl> bool deterministic_ { } ; <nl> <nl> mmm a / test / integration / cluster_filter_integration_test . cc <nl> ppp b / test / integration / cluster_filter_integration_test . cc <nl> class ClusterFilterIntegrationTest : public testing : : TestWithParam < Network : : Addr <nl> : BaseIntegrationTest ( GetParam ( ) , ConfigHelper : : tcpProxyConfig ( ) ) , registration_ ( factory_ ) { } <nl> <nl> void initialize ( ) override { <nl> - enable_half_close_ = true ; <nl> + enableHalfClose ( true ) ; <nl> config_helper_ . addConfigModifier ( [ ] ( envoy : : config : : bootstrap : : v3 : : Bootstrap & bootstrap ) { <nl> auto * cluster_0 = bootstrap . mutable_static_resources ( ) - > mutable_clusters ( 0 ) ; <nl> auto * filter = cluster_0 - > add_filters ( ) ; <nl> mmm a / test / integration / filter_manager_integration_test . cc <nl> ppp b / test / integration / filter_manager_integration_test . cc <nl> INSTANTIATE_TEST_SUITE_P ( <nl> InjectDataToFilterChainIntegrationTest : : testParamsToString ) ; <nl> <nl> TEST_P ( InjectDataWithTcpProxyFilterIntegrationTest , UsageOfInjectDataMethodsShouldBeUnnoticeable ) { <nl> - enable_half_close_ = true ; <nl> + enableHalfClose ( true ) ; <nl> initialize ( ) ; <nl> <nl> auto tcp_client = makeTcpConnection ( lookupPort ( " listener_0 " ) ) ; <nl> mmm a / test / integration / tcp_proxy_integration_test . cc <nl> ppp b / test / integration / tcp_proxy_integration_test . cc <nl> TEST_P ( TcpProxyIntegrationTest , NoUpstream ) { <nl> lb_endpoint - > mutable_endpoint ( ) - > mutable_address ( ) - > mutable_socket_address ( ) - > set_port_value ( 1 ) ; <nl> } ) ; <nl> config_helper_ . skipPortUsageValidation ( ) ; <nl> - enable_half_close_ = false ; <nl> + enableHalfClose ( false ) ; <nl> initialize ( ) ; <nl> <nl> IntegrationTcpClientPtr tcp_client = makeTcpConnection ( lookupPort ( " tcp_proxy " ) ) ; <nl> TEST_P ( TcpProxyIntegrationTest , ShutdownWithOpenConnections ) { <nl> TEST_P ( TcpProxyIntegrationTest , TestIdletimeoutWithNoData ) { <nl> autonomous_upstream_ = true ; <nl> <nl> - enable_half_close_ = false ; <nl> + enableHalfClose ( false ) ; <nl> config_helper_ . addConfigModifier ( [ & ] ( envoy : : config : : bootstrap : : v3 : : Bootstrap & bootstrap ) - > void { <nl> auto * listener = bootstrap . mutable_static_resources ( ) - > mutable_listeners ( 0 ) ; <nl> auto * filter_chain = listener - > mutable_filter_chains ( 0 ) ; <nl> TEST_P ( TcpProxyIntegrationTest , TestIdletimeoutWithNoData ) { <nl> <nl> TEST_P ( TcpProxyIntegrationTest , TestIdletimeoutWithLargeOutstandingData ) { <nl> config_helper_ . setBufferLimits ( 1024 , 1024 ) ; <nl> - enable_half_close_ = false ; <nl> + enableHalfClose ( false ) ; <nl> config_helper_ . addConfigModifier ( [ & ] ( envoy : : config : : bootstrap : : v3 : : Bootstrap & bootstrap ) - > void { <nl> auto * listener = bootstrap . mutable_static_resources ( ) - > mutable_listeners ( 0 ) ; <nl> auto * filter_chain = listener - > mutable_filter_chains ( 0 ) ; <nl> TEST_P ( TcpProxyIntegrationTest , TestIdletimeoutWithLargeOutstandingData ) { <nl> TEST_P ( TcpProxyIntegrationTest , TestMaxDownstreamConnectionDurationWithNoData ) { <nl> autonomous_upstream_ = true ; <nl> <nl> - enable_half_close_ = false ; <nl> + enableHalfClose ( false ) ; <nl> config_helper_ . addConfigModifier ( [ & ] ( envoy : : config : : bootstrap : : v3 : : Bootstrap & bootstrap ) - > void { <nl> auto * listener = bootstrap . mutable_static_resources ( ) - > mutable_listeners ( 0 ) ; <nl> auto * filter_chain = listener - > mutable_filter_chains ( 0 ) ; <nl> TEST_P ( TcpProxyIntegrationTest , TestMaxDownstreamConnectionDurationWithNoData ) { <nl> <nl> TEST_P ( TcpProxyIntegrationTest , TestMaxDownstreamConnectionDurationWithLargeOutstandingData ) { <nl> config_helper_ . setBufferLimits ( 1024 , 1024 ) ; <nl> - enable_half_close_ = false ; <nl> + enableHalfClose ( false ) ; <nl> config_helper_ . addConfigModifier ( [ & ] ( envoy : : config : : bootstrap : : v3 : : Bootstrap & bootstrap ) - > void { <nl> auto * listener = bootstrap . mutable_static_resources ( ) - > mutable_listeners ( 0 ) ; <nl> auto * filter_chain = listener - > mutable_filter_chains ( 0 ) ; <nl> mmm a / test / integration / tcp_proxy_integration_test . h <nl> ppp b / test / integration / tcp_proxy_integration_test . h <nl> class TcpProxyIntegrationTest : public testing : : TestWithParam < TcpProxyIntegratio <nl> public : <nl> TcpProxyIntegrationTest ( ) <nl> : BaseIntegrationTest ( GetParam ( ) . version , ConfigHelper : : tcpProxyConfig ( ) ) { <nl> - enable_half_close_ = true ; <nl> + enableHalfClose ( true ) ; <nl> } <nl> <nl> void initialize ( ) override ; <nl> mmm a / test / integration / tcp_tunneling_integration_test . cc <nl> ppp b / test / integration / tcp_tunneling_integration_test . cc <nl> class ConnectTerminationIntegrationTest <nl> public : <nl> ConnectTerminationIntegrationTest ( ) <nl> : HttpIntegrationTest ( Http : : CodecClient : : Type : : HTTP2 , GetParam ( ) ) { <nl> - enable_half_close_ = true ; <nl> + enableHalfClose ( true ) ; <nl> } <nl> <nl> void initialize ( ) override { <nl> class TcpTunnelingIntegrationTest : public testing : : TestWithParam < Params > , <nl> } <nl> <nl> void SetUp ( ) override { <nl> - enable_half_close_ = true ; <nl> + enableHalfClose ( true ) ; <nl> setDownstreamProtocol ( Http : : CodecClient : : Type : : HTTP2 ) ; <nl> setUpstreamProtocol ( std : : get < 1 > ( GetParam ( ) ) ) ; <nl> <nl> TEST_P ( TcpTunnelingIntegrationTest , ResetStreamTest ) { <nl> if ( upstreamProtocol ( ) = = FakeHttpConnection : : Type : : HTTP1 ) { <nl> return ; <nl> } <nl> - enable_half_close_ = false ; <nl> + enableHalfClose ( false ) ; <nl> initialize ( ) ; <nl> <nl> / / Establish a connection . <nl> TEST_P ( TcpTunnelingIntegrationTest , ResetStreamTest ) { <nl> } <nl> <nl> TEST_P ( TcpTunnelingIntegrationTest , TestIdletimeoutWithLargeOutstandingData ) { <nl> - enable_half_close_ = false ; <nl> + enableHalfClose ( false ) ; <nl> config_helper_ . setBufferLimits ( 1024 , 1024 ) ; <nl> config_helper_ . addConfigModifier ( [ & ] ( envoy : : config : : bootstrap : : v3 : : Bootstrap & bootstrap ) - > void { <nl> auto * listener = bootstrap . mutable_static_resources ( ) - > mutable_listeners ( 1 ) ; <nl> mmm a / test / integration / uds_integration_test . cc <nl> ppp b / test / integration / uds_integration_test . cc <nl> HttpIntegrationTest : : ConnectionCreationFunction UdsListenerIntegrationTest : : crea <nl> Network : : Utility : : resolveUrl ( fmt : : format ( " unix : / / { } " , getListenerSocketName ( ) ) ) , <nl> Network : : Address : : InstanceConstSharedPtr ( ) , Network : : Test : : createRawBufferSocket ( ) , <nl> nullptr ) ) ; <nl> - conn - > enableHalfClose ( enable_half_close_ ) ; <nl> + conn - > enableHalfClose ( enableHalfClose ( ) ) ; <nl> return conn ; <nl> } ; <nl> } <nl>
test : TODO fixup making enable_half_close private )
envoyproxy/envoy
d3c4b9f2e2e2f2063df4031eeb2b0ab62c667f7b
2020-12-09T14:39:47Z
mmm a / src / net . cpp <nl> ppp b / src / net . cpp <nl> void CNode : : CloseSocketDisconnect ( ) <nl> pnodeSync = NULL ; <nl> } <nl> <nl> - void CNode : : Cleanup ( ) <nl> - { <nl> - } <nl> - <nl> void CNode : : PushVersion ( ) <nl> { <nl> int nBestHeight = g_signals . GetHeight ( ) . get_value_or ( 0 ) ; <nl> void ThreadSocketHandler ( ) <nl> <nl> / / close socket and cleanup <nl> pnode - > CloseSocketDisconnect ( ) ; <nl> - pnode - > Cleanup ( ) ; <nl> <nl> / / hold in disconnected pool until all refs are released <nl> if ( pnode - > fNetworkNode | | pnode - > fInbound ) <nl> mmm a / src / net . h <nl> ppp b / src / net . h <nl> class CNode <nl> void Subscribe ( unsigned int nChannel , unsigned int nHops = 0 ) ; <nl> void CancelSubscribe ( unsigned int nChannel ) ; <nl> void CloseSocketDisconnect ( ) ; <nl> - void Cleanup ( ) ; <nl> - <nl> <nl> / / Denial - of - service detection / prevention <nl> / / The idea is to detect peers that are behaving <nl>
remove unused CNode : : Cleanup ( )
bitcoin/bitcoin
2831a03b798e0eea724250a6ba15cb637800354d
2014-06-22T12:51:38Z
mmm a / xbmc / cores / dvdplayer / DVDCodecs / Video / CrystalHD . cpp <nl> ppp b / xbmc / cores / dvdplayer / DVDCodecs / Video / CrystalHD . cpp <nl> class DllLibCrystalHDInterface <nl> virtual BCM : : BC_STATUS DtsStartDecoder ( void * hDevice ) = 0 ; <nl> virtual BCM : : BC_STATUS DtsSetVideoParams ( void * hDevice , uint32_t videoAlg , int FGTEnable , int MetaDataEnable , int Progressive , uint32_t OptFlags ) = 0 ; <nl> virtual BCM : : BC_STATUS DtsStartCapture ( void * hDevice ) = 0 ; <nl> - virtual BCM : : BC_STATUS DtsStartCaptureImmidiate ( void * hDevice , uint32_t Reserved ) = 0 ; <nl> virtual BCM : : BC_STATUS DtsFlushRxCapture ( void * hDevice , int bDiscardOnly ) = 0 ; <nl> virtual BCM : : BC_STATUS DtsSetFFRate ( void * hDevice , uint32_t rate ) = 0 ; <nl> virtual BCM : : BC_STATUS DtsGetDriverStatus ( void * hDevice , BCM : : BC_DTS_STATUS * pStatus ) = 0 ; <nl> class DllLibCrystalHD : public DllDynamic , DllLibCrystalHDInterface <nl> DEFINE_METHOD1 ( BCM : : BC_STATUS , DtsStopDecoder , ( void * p1 ) ) <nl> DEFINE_METHOD6 ( BCM : : BC_STATUS , DtsSetVideoParams , ( void * p1 , uint32_t p2 , int p3 , int p4 , int p5 , uint32_t p6 ) ) <nl> DEFINE_METHOD1 ( BCM : : BC_STATUS , DtsStartCapture , ( void * p1 ) ) <nl> - DEFINE_METHOD2 ( BCM : : BC_STATUS , DtsStartCaptureImmidiate , ( void * p1 , uint32_t p2 ) ) <nl> DEFINE_METHOD2 ( BCM : : BC_STATUS , DtsFlushRxCapture , ( void * p1 , int p2 ) ) <nl> DEFINE_METHOD2 ( BCM : : BC_STATUS , DtsSetFFRate , ( void * p1 , uint32_t p2 ) ) <nl> DEFINE_METHOD2 ( BCM : : BC_STATUS , DtsGetDriverStatus , ( void * p1 , BCM : : BC_DTS_STATUS * p2 ) ) <nl> class DllLibCrystalHD : public DllDynamic , DllLibCrystalHDInterface <nl> RESOLVE_METHOD_RENAME ( DtsStopDecoder , DtsStopDecoder ) <nl> RESOLVE_METHOD_RENAME ( DtsSetVideoParams , DtsSetVideoParams ) <nl> RESOLVE_METHOD_RENAME ( DtsStartCapture , DtsStartCapture ) <nl> - RESOLVE_METHOD_RENAME ( DtsStartCaptureImmidiate , DtsStartCaptureImmidiate ) <nl> RESOLVE_METHOD_RENAME ( DtsFlushRxCapture , DtsFlushRxCapture ) <nl> RESOLVE_METHOD_RENAME ( DtsSetFFRate , DtsSetFFRate ) <nl> RESOLVE_METHOD_RENAME ( DtsGetDriverStatus , DtsGetDriverStatus ) <nl>
[ chd ] remove DtsStartCaptureImmidiate routine
xbmc/xbmc
a2d572e00d51e38f8243732f945ea504c9629058
2010-07-03T18:27:45Z
mmm a / examples / billiards . py <nl> ppp b / examples / billiards . py <nl> def circle ( x , y , color ) : <nl> def clear ( ) : <nl> for t in range ( 0 , max_steps ) : <nl> for i in range ( 0 , n_balls ) : <nl> - x . grad [ t , i ] = ti . Vector ( [ 0 . 0 , 0 . 0 ] ) <nl> - v . grad [ t , i ] = ti . Vector ( [ 0 . 0 , 0 . 0 ] ) <nl> impulse [ t , i ] = ti . Vector ( [ 0 . 0 , 0 . 0 ] ) <nl> - impulse . grad [ t , i ] = ti . Vector ( [ 0 . 0 , 0 . 0 ] ) <nl> <nl> <nl> def main ( ) : <nl> def main ( ) : <nl> <nl> for iter in range ( 200 ) : <nl> clear ( ) <nl> - init_x . grad [ None ] = [ 0 . 0 , 0 . 0 ] <nl> - init_v . grad [ None ] = [ 0 . 0 , 0 . 0 ] <nl> <nl> with ti . Tape ( loss ) : <nl> forward ( ) <nl> mmm a / examples / rigid_body . py <nl> ppp b / examples / rigid_body . py <nl> def clear_states ( ) : <nl> omega_inc [ t , i ] = 0 . 0 <nl> <nl> <nl> - def clear ( ) : <nl> - ti . clear_all_gradients ( ) <nl> - clear_states ( ) <nl> - <nl> - <nl> def setup_robot ( objects , springs ) : <nl> global n_objects , n_springs <nl> n_objects = len ( objects ) <nl> def main ( ) : <nl> <nl> forward ( ' initial ' ) <nl> for iter in range ( 50 ) : <nl> - clear ( ) <nl> + clear_states ( ) <nl> <nl> with ti . Tape ( loss ) : <nl> forward ( ) <nl> def main ( ) : <nl> weights [ i , j ] - = scale * weights . grad [ i , j ] <nl> bias [ i ] - = scale * bias . grad [ i ] <nl> <nl> - clear ( ) <nl> + clear_states ( ) <nl> forward ( ' final ' ) <nl> <nl> <nl> mmm a / examples / wave . py <nl> ppp b / examples / wave . py <nl> def forward ( output = None ) : <nl> loss [ None ] = 0 <nl> compute_loss ( steps - 1 ) <nl> <nl> - @ ti . kernel <nl> - def clear_p_grad ( ) : <nl> - for t , i , j in p : <nl> - p . grad [ t , i , j ] = 0 <nl> - <nl> - @ ti . kernel <nl> - def clear_initial_grad ( ) : <nl> - for i , j in initial : <nl> - initial . grad [ i , j ] = 0 <nl> - <nl> def main ( ) : <nl> # initialization <nl> target_img = cv2 . imread ( ' iclr2020 . png ' ) [ : , : , 0 ] / 255 . 0 <nl> def main ( ) : <nl> # initial [ n_grid / / 2 , n_grid / / 2 ] = 1 <nl> <nl> for opt in range ( 200 ) : <nl> - clear_p_grad ( ) <nl> - clear_initial_grad ( ) <nl> - <nl> with ti . Tape ( loss ) : <nl> forward ( ) <nl> <nl> mmm a / python / taichi_lang / __init__ . py <nl> ppp b / python / taichi_lang / __init__ . py <nl> <nl> determinant = Matrix . determinant <nl> set_default_fp = pytaichi . set_default_fp <nl> <nl> - def Tape ( loss ) : <nl> + def Tape ( loss , clear_gradients = True ) : <nl> + if clear_gradients : <nl> + clear_all_gradients ( ) <nl> loss [ None ] = 0 <nl> loss . grad [ None ] = 1 <nl> return runtime . get_tape ( loss ) <nl>
applied clear_all_gradients everywhere
taichi-dev/taichi
449cb21c8f77c3aa3b42d5242dce7997c1fc1c08
2019-09-18T00:24:34Z
mmm a / docs / Filament . md . html <nl> ppp b / docs / Filament . md . html <nl> <nl> <nl> We can improve this implementation by using half precision floats . This optimization requires changes to the original equation as there are two problems when computing $ 1 - ( \ NoH ) ^ 2 $ in half - floats . First , this computation suffers from floating point cancellation when $ ( \ NoH ) ^ 2 $ is close to 1 ( highlights ) . Secondly $ \ NoH $ does not have enough precision around 1 . <nl> <nl> - The solution involves Lagrange ' s identiy : <nl> + The solution involves Lagrange ' s identity : <nl> <nl> $ $ \ begin { equation } <nl> | a \ times b | ^ 2 = | a | ^ 2 | b | ^ 2 - ( a \ cdot b ) ^ 2 <nl> <nl> Where : <nl> <nl> $ $ \ begin { equation } <nl> - V ( v , l , \ alpha ) = \ frac { G ( v , l , \ alpha ) } { 4 ( \ NoV ) ( \ NoL ) } = V_1 ( l ) V_1 ( v ) <nl> + V ( v , l , \ alpha ) = \ frac { G ( v , l , \ alpha ) } { 4 ( \ NoV ) ( \ NoL ) } = V_1 ( l , \ alpha ) V_1 ( v , \ alpha ) <nl> \ end { equation } $ $ <nl> <nl> And : <nl> <nl> <nl> # # # Fresnel ( specular F ) <nl> <nl> - The Fresnel effect plays an important role in the apperance of physically based materials . This effect models the fact that the amount of light the viewer sees reflected from a surface depends on the viewing angle . Large bodies of water are a perfect way to experience this phenomenon , as shown in figure [ fresnelLake ] . When looking at the water straight down ( at normal incidence ) you can see through the water . However , when looking further out in the distance ( at grazing angle , where perceived light rays are getting parallel to the surface ) , you will see the specular reflections on the water become more intense . <nl> + The Fresnel effect plays an important role in the appearance of physically based materials . This effect models the fact that the amount of light the viewer sees reflected from a surface depends on the viewing angle . Large bodies of water are a perfect way to experience this phenomenon , as shown in figure [ fresnelLake ] . When looking at the water straight down ( at normal incidence ) you can see through the water . However , when looking further out in the distance ( at grazing angle , where perceived light rays are getting parallel to the surface ) , you will see the specular reflections on the water become more intense . <nl> <nl> The amount of light reflected depends not only on the viewing angle , but also on the index of refraction ( IOR ) of the material . At normal incidence ( perpendicular to the surface , or 0 degree angle ) , the amount of light reflected back is noted $ \ fNormal $ and can be derived from the IOR as we will see in section [ Reflectance remapping ] . The amount of light reflected back at grazing angle is noted $ \ fGrazing $ and approaches 100 % for smooth materials . <nl> <nl> <nl> <nl> # # # Energy gain in diffuse reflectance <nl> <nl> - The Lambert diffuse BRDF does not account for the light that reflects at the surface and that is therefore not able to participe in the diffuse scattering event . <nl> + The Lambert diffuse BRDF does not account for the light that reflects at the surface and that is therefore not able to participate in the diffuse scattering event . <nl> <nl> [ TODO : talk about the issue with fr + fd ] <nl> <nl> <nl> <nl> ! [ Figure [ singleVsMultiBounce ] : Single scattering ( left ) vs multiscattering ] ( images / diagram_single_vs_multi_scatter . png ) <nl> <nl> - Based on this simple explanation , we can intuitively deduce that the rougher a surface is , the higher the chances are that energy gets lost because of the failure to account for multiple scaterring events . This loss of energy appears to darken rough materials . Metallic surfaces are particularly affected because all of their reflectance is specular . This darkening effect is illustrated in figure [ metallicRoughEnergyLoss ] . With multiscattering energy conservation can be achieved , as shown in figure [ metallicRoughEnergyConservation ] . <nl> + Based on this simple explanation , we can intuitively deduce that the rougher a surface is , the higher the chances are that energy gets lost because of the failure to account for multiple scattering events . This loss of energy appears to darken rough materials . Metallic surfaces are particularly affected because all of their reflectance is specular . This darkening effect is illustrated in figure [ metallicRoughEnergyLoss ] . With multiscattering energy conservation can be achieved , as shown in figure [ metallicRoughEnergyConservation ] . <nl> <nl> ! [ Figure [ metallicRoughEnergyLoss ] : Darkening increases with roughness due to single scattering ] ( images / material_metallic_energy_loss . png ) <nl> <nl> <nl> All materials <nl> : * * Base color * * should be devoid of lighting information , except for micro - occlusion . <nl> <nl> - * * Metallic * * is almost a binary value . Pure conductors have a metallic value of 1 and pure dielectrics have a metallic value of 0 . You should try to use values close at or close to 0 and 1 . Interemdiate values are meant for transitions between surface types ( metal to rust for instance ) . <nl> + * * Metallic * * is almost a binary value . Pure conductors have a metallic value of 1 and pure dielectrics have a metallic value of 0 . You should try to use values close at or close to 0 and 1 . Intermediate values are meant for transitions between surface types ( metal to rust for instance ) . <nl> <nl> Non - metallic materials <nl> : * * Base color * * represents the reflected color and should be an sRGB value in the range 50 - 240 ( strict range ) or 30 - 240 ( tolerant range ) . <nl> <nl> <nl> The standard material model described previously can only describe isotropic surfaces , that is , surfaces whose properties are identical in all directions . Many real - world materials , such as brushed metal , can , however , only be replicated using an anisotropic model . <nl> <nl> - ! [ Figure [ anisotropic ] : Comparison of isotropic material ( left ) and anistropic material ( right ) ] ( images / material_anisotropic . png ) <nl> + ! [ Figure [ anisotropic ] : Comparison of isotropic material ( left ) and anisotropic material ( right ) ] ( images / material_anisotropic . png ) <nl> <nl> # # # Anisotropic specular BRDF <nl> <nl> <nl> ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ <nl> [ Listing [ clothBRDF ] : Implementation of Ashikhmin ' s velvet NDF in GLSL ] <nl> <nl> - In [ # Estevez17 ] Estevez and Kulla propose a different NDF ( called the " Charlie " sheen ) that is based on an exponentiated sinusoidal instead of an inverted gaussian . This NDF is appealing for several reasons : its parameterization feels more natural and intuitive , it provides a softer appearance and , as shown in equation $ \ ref { charlieNDF } $ , its implementation is simpler : <nl> + In [ # Estevez17 ] Estevez and Kulla propose a different NDF ( called the " Charlie " sheen ) that is based on an exponentiated sinusoidal instead of an inverted Gaussian . This NDF is appealing for several reasons : its parameterization feels more natural and intuitive , it provides a softer appearance and , as shown in equation $ \ ref { charlieNDF } $ , its implementation is simpler : <nl> <nl> $ $ \ begin { equation } \ label { charlieNDF } <nl> D ( m ) = \ frac { ( 2 + \ frac { 1 } { \ alpha } ) sin ( \ theta ) ^ { \ frac { 1 } { \ alpha } } } { 2 \ pi } <nl> <nl> $ Sun_ { \ bot } $ | 100 , 000 | 105 , 000 | 81 , 000 <nl> [ Table [ sunSkyIlluminance ] : Illuminance values in $ lx $ ( a full moon has an illuminance of 1 $ lx $ ) ] <nl> <nl> - Dynamic directional lights are particulary cheap to evaluate at runtime , as shown in listing [ glslDirectionalLight ] . <nl> + Dynamic directional lights are particularly cheap to evaluate at runtime , as shown in listing [ glslDirectionalLight ] . <nl> <nl> ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ <nl> vec3 l = normalize ( - lightDirection ) ; <nl> <nl> I = \ frac { \ Phi } { 4 \ pi } <nl> \ end { equation } $ $ <nl> <nl> - By simple subsitution of $ I $ in $ \ ref { punctualLightEquation } $ and $ E $ in $ \ ref { luminanceEquation } $ we can formulate the luminance equation of a point light as a function of the luminous power ( see $ \ ref { pointLightLuminanceEquation } $ ) . <nl> + By simple substitution of $ I $ in $ \ ref { punctualLightEquation } $ and $ E $ in $ \ ref { luminanceEquation } $ we can formulate the luminance equation of a point light as a function of the luminous power ( see $ \ ref { pointLightLuminanceEquation } $ ) . <nl> <nl> $ $ \ begin { equation } \ label { pointLightLuminanceEquation } <nl> L_ { out } = f ( v , l ) \ frac { \ Phi } { 4 \ pi d ^ 2 } \ left < \ NoL \ right > <nl> <nl> <nl> ! [ Figure [ spotLightTestFocused ] : Comparison of spot light outer angles , 55 degrees ( left ) and 15 degrees ( right ) ] ( images / screenshot_spot_light_focused . png ) <nl> <nl> - The coupling of illumination and the outer cone means that an artist cannot tweak the influence cone of a spot light without also changing the perceived illumination . It therefore makes sense to provide artists with a parameter to disable this coupling . Equations $ \ ref { spotLightLuminousPowerB } $ shows how to fomulate the luminous power for that purpose . <nl> + The coupling of illumination and the outer cone means that an artist cannot tweak the influence cone of a spot light without also changing the perceived illumination . It therefore makes sense to provide artists with a parameter to disable this coupling . Equations $ \ ref { spotLightLuminousPowerB } $ shows how to formulate the luminous power for that purpose . <nl> <nl> $ $ \ begin { equation } \ label { spotLightLuminousPowerB } <nl> \ Phi = \ pi I \ \ <nl> <nl> L_ { out } = f ( v , l ) \ frac { I } { d ^ 2 } \ left < \ NoL \ right > \ Psi ( l ) <nl> \ end { equation } $ $ <nl> <nl> - The term $ \ Psi ( l ) $ is the photometric attenuation function . It depends on the light evector , but also on the direction of the light . Spot lights already possess a direction vector but we need to introduce one for photometric point lights as well . <nl> + The term $ \ Psi ( l ) $ is the photometric attenuation function . It depends on the light vector , but also on the direction of the light . Spot lights already possess a direction vector but we need to introduce one for photometric point lights as well . <nl> <nl> The photometric attenuation function can be easily implemented in GLSL by adding a new attenuation factor to the implementation of punctual lights ( listing [ glslPunctualLight ] ) . The modified implementation is show in listing [ glslPhotometricPunctualLight ] . <nl> <nl> <nl> mmmmmmmmmmmmmmmmmmmmmmmm - - : | : mmmmmmmmmmmmmmmmmmmmm <nl> * * Type * * | Directional , point , spot or area <nl> * * Direction * * | Used for directional lights , spot lights , photometric point lights , and linear and tubular area lights ( orientation ) <nl> - * * Color * * | The color of emitted light , as a linear RGB color . Can be specified as an sRGB color or a color tempetature in the tools <nl> + * * Color * * | The color of emitted light , as a linear RGB color . Can be specified as an sRGB color or a color temperature in the tools <nl> * * Intensity * * | The light ' s brightness . The unit depends on the type of light <nl> * * Falloff radius * * | Maximum distance of influence <nl> * * Inner angle * * | Angle of the inner cone for spot lights , in degrees <nl> <nl> ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ <nl> [ Listing [ preexposedLighting ] : The output of the lighting pass is pre - exposed to fit in half - float buffers ] <nl> <nl> - This solution solves the storage problem but requires intermediate computations to be performed with single precision floats . We would instead prefer to perform all ( or ar least most ) of the lighting work using half precision floats instead . Doing so can greatly improve performance and power usage , particularly on mobile devices . Half precision floats are however ill - suited for this kind of work as common illuminance and luminance values ( for the sun for instance ) can exceed their range . The solution is to simply pre - expose the lights themselves instead of the result of the lighting pass . This can be done efficiently on the CPU if updating a light ' s constant buffer is cheap . This can also be done on the GPU , as shown in listing [ preexposedLights ] . <nl> + This solution solves the storage problem but requires intermediate computations to be performed with single precision floats . We would instead prefer to perform all ( or at least most ) of the lighting work using half precision floats instead . Doing so can greatly improve performance and power usage , particularly on mobile devices . Half precision floats are however ill - suited for this kind of work as common illuminance and luminance values ( for the sun for instance ) can exceed their range . The solution is to simply pre - expose the lights themselves instead of the result of the lighting pass . This can be done efficiently on the CPU if updating a light ' s constant buffer is cheap . This can also be done on the GPU , as shown in listing [ preexposedLights ] . <nl> <nl> ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ <nl> / / The inputs must be highp / single precision , <nl> <nl> <nl> # # # # Diffuse BRDF integration # # # # <nl> <nl> - Using the lambertian BRDF [ ^ iblDiffuse1 ] , we get the radiance : <nl> + Using the Lambertian BRDF [ ^ iblDiffuse1 ] , we get the radiance : <nl> <nl> $ $ <nl> \ begin { align * } <nl> <nl> <nl> ! [ Figure [ iblOriginal ] : Image - based environment ] ( images / ibl / ibl_river_roughness_m0 . png style = " max - width : 100 % ; " ) <nl> <nl> - ! [ Figure [ iblIrradiance ] : Image - based irradiance map using the lambertian BRDF ] ( images / ibl / ibl_irradiance . png style = " max - width : 100 % ; " ) <nl> + ! [ Figure [ iblIrradiance ] : Image - based irradiance map using the Lambertian BRDF ] ( images / ibl / ibl_irradiance . png style = " max - width : 100 % ; " ) <nl> <nl> However , the irradiance can also be approximated very closely by a decomposition into Spherical Harmonics ( SH , described in more details in the Spherical Harmonics section ) and calculated at runtime cheaply . It is usually best to avoid texture fetches on mobile and free - up a texture unit . Even if it is stored into a cubemap , it is orders of magnitude faster to pre - compute the integral using SH decomposition followed by a rendering . <nl> <nl> <nl> <nl> This expression is exact when the irradiance is constant . In fact , it is * * exact for the D . C . component of the irradiance * * . It is also exact when $ \ vec v = \ vec n $ . <nl> <nl> - $ \ color { blue } { \ frac { 1 } { K ( \ alpha ) } LD ( n , \ alpha ) } $ can easily be precomputed into a mip - mapped cubemap where each mipmap level contains the radiance for a different value of $ \ alpha $ . Also note that $ f_0 $ being a constant , it disapears entirely from $ LD ( ) $ and $ K ( \ alpha ) $ . <nl> + $ \ color { blue } { \ frac { 1 } { K ( \ alpha ) } LD ( n , \ alpha ) } $ can easily be precomputed into a mip - mapped cubemap where each mipmap level contains the radiance for a different value of $ \ alpha $ . Also note that $ f_0 $ being a constant , it disappears entirely from $ LD ( ) $ and $ K ( \ alpha ) $ . <nl> <nl> $ $ <nl> \ Lout ^ { simplified } ( n , \ alpha ) = \ color { blue } { \ frac { 1 } { K ( \ alpha ) } LD ( n , \ alpha ) } <nl> <nl> <nl> # # # Pre - integration for multiscattering # # # <nl> <nl> - In section [ Energy loss in specular reflectance ] we discussed how to use a second scaled specular lobe to compensate for the energy loss due to only acounting for a single scattering event in our BRDF . This energy compensation lobe is scaled by a term that depends on $ r $ defined in the following way : <nl> + In section [ Energy loss in specular reflectance ] we discussed how to use a second scaled specular lobe to compensate for the energy loss due to only accounting for a single scattering event in our BRDF . This energy compensation lobe is scaled by a term that depends on $ r $ defined in the following way : <nl> <nl> $ $ \ begin { equation } <nl> r = \ int_ { \ Omega } D ( l , v ) V ( l , v ) \ left < \ NoL \ right > dl <nl> <nl> <nl> Colin Barré - Brisebois and Stephen Hill propose in [ # Hill12 ] a mathematically sound solution called * Reoriented Normal Mapping * , which consists in rotating the basis of the detail map onto the normal from the base map . This technique relies on the shortest arc quaternion to apply the rotation , which greatly simplifies thanks to the properties of the tangent space . <nl> <nl> - Following the simplificationss described in [ # Hill12 ] , we can produce the GLSL implementation shown in listing [ reorientedNormalMapping ] . <nl> + Following the simplifications described in [ # Hill12 ] , we can produce the GLSL implementation shown in listing [ reorientedNormalMapping ] . <nl> <nl> ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ <nl> vec3 t = texture ( baseMap , uv ) . xyz * vec3 ( 2 . 0 , 2 . 0 , 2 . 0 ) + vec3 ( - 1 . 0 , - 1 . 0 , 0 . 0 ) ; <nl> <nl> <nl> Because we use photometric units throughout the lighting pipeline , the light reaching the camera is an energy expressed in luminance $ L $ , in $ cd . m ^ { - 2 } $ . Light incident to the camera sensor can cover a large range of values , from $ 10 ^ { - 5 } cd . m ^ { - 2 } $ for starlight to $ 10 ^ { 9 } cd . m ^ { - 2 } $ for the sun . Since we obviously cannot manipulate and even less record such a large range of values , we need to remap them . <nl> <nl> - This range remapping is done in a camera by exposing the sensor for a certain time . To maximize the use of the limited range of the sensor , the scene ' s light range is centered around the " middle grey " , a value halfway between black and white . The exposition is therefore achieved by manipulating , either manually or automatically , 3 settings : <nl> + This range remapping is done in a camera by exposing the sensor for a certain time . To maximize the use of the limited range of the sensor , the scene ' s light range is centered around the " middle gray " , a value halfway between black and white . The exposition is therefore achieved by manipulating , either manually or automatically , 3 settings : <nl> <nl> - Aperture <nl> - Shutter speed <nl> <nl> <nl> That constant $ K $ is the reflected - light meter constant , which varies between manufacturers . We could find two common values for this constant : 12 . 5 , used by Canon , Nikon and Sekonic , and 14 , used by Pentax and Minolta . Given the wide availability of Canon and Nikon cameras , as well as our own usage of Sekonic light meters , we will choose to use $ K = 12 . 5 $ . <nl> <nl> - Since we want to work with $ EV_ { 100 } $ , we can subsitute $ K $ and $ S $ in equation $ \ ref { evK } $ to obtain equation $ \ ref { ev100L } $ . <nl> + Since we want to work with $ EV_ { 100 } $ , we can substitute $ K $ and $ S $ in equation $ \ ref { evK } $ to obtain equation $ \ ref { ev100L } $ . <nl> <nl> $ $ \ begin { equation } \ label { ev100L } <nl> EV = log_2 ( L \ frac { 100 } { 12 . 5 } ) <nl> <nl> <nl> The constant $ C $ is the incident - light meter constant , which varies between manufacturers and / or types of sensors . There are two common types of sensors : flat and hemispherical . For flat sensors , a common value is 250 . With hemispherical sensors , we could find two common values : 320 , used by Minolta , and 340 , used by Sekonic . <nl> <nl> - Since we want to work with $ EV_ { 100 } $ , we can subsitute $ S $ $ \ ref { evC } $ to obtain equation $ \ ref { ev100C } $ . <nl> + Since we want to work with $ EV_ { 100 } $ , we can substitute $ S $ $ \ ref { evC } $ to obtain equation $ \ ref { ev100C } $ . <nl> <nl> $ $ \ begin { equation } \ label { ev100C } <nl> EV = log_2 ( E \ frac { 100 } { C } ) <nl> <nl> <nl> # # # # Exposure compensation <nl> <nl> - Even though an exposure value actually indicates combinations of camera settings , it is often used by photographers to describe light intensity . This is why cameras let photographers apply an exposure compensation to over or under - expose an image . This setting can be used for artistic control but also to achieve proper exposure ( snow for instance will be exposed for as 18 % middle - grey ) . <nl> + Even though an exposure value actually indicates combinations of camera settings , it is often used by photographers to describe light intensity . This is why cameras let photographers apply an exposure compensation to over or under - expose an image . This setting can be used for artistic control but also to achieve proper exposure ( snow for instance will be exposed for as 18 % middle - gray ) . <nl> <nl> Applying an exposure compensation $ EC $ is a simple as adding an offset to the exposure value , as shown in equation $ \ ref { ec } $ . <nl> <nl> <nl> <nl> # # # Bloom <nl> <nl> - Because the EV scale is almost perceptually linear , the exposure value is also often used as a light unit . This means we could let artists specify the intensity of lights or emissive surfaces using exposure compensation as a unit . The intensity of emitted light would therefore be relative to the exposure settings . Using exposure compensation as a light unit should be avoided whenever possible but can be useful to force ( or cancel ) a bloom effect around emissive surfaces independently of the camera settings ( for instance , a light saber in a game should always bloom ) . <nl> + Because the EV scale is almost perceptually linear , the exposure value is also often used as a light unit . This means we could let artists specify the intensity of lights or emissive surfaces using exposure compensation as a unit . The intensity of emitted light would therefore be relative to the exposure settings . Using exposure compensation as a light unit should be avoided whenever possible but can be useful to force ( or cancel ) a bloom effect around emissive surfaces independently of the camera settings ( for instance , a lightsaber in a game should always bloom ) . <nl> <nl> ! [ Figure [ bloom ] : Saturated photosites on a sensor create a blooming effect in the bright parts of the scene ] ( images / screenshot_bloom . jpg ) <nl> <nl> <nl> <nl> To validate our implementation against reference renderings , we will use a commercial - grade Open Source physically based offline path tracer called Mitsuba . Mitsuba offers many different integrators , samplers and material models , which should allow us to provide fair comparisons with our real - time renderer . This path tracer also relies on a simple XML scene description format that should be easy to automatically generate from our own scene descriptions . <nl> <nl> - Figure [ mistubaReference ] and figure [ filamentReference ] show a simple scene , a perfectly smooth dielectric sphere , rendered respectively with Mitsuba and Filament . <nl> + Figure [ mitsubaReference ] and figure [ filamentReference ] show a simple scene , a perfectly smooth dielectric sphere , rendered respectively with Mitsuba and Filament . <nl> <nl> - ! [ Figure [ mistubaReference ] : Rendered in 2048x1440 in 1 minute and 42 seconds on a 12 core 2013 MacPro ] ( images / screenshot_ref_mitsuba . jpg ) <nl> + ! [ Figure [ mitsubaReference ] : Rendered in 2048x1440 in 1 minute and 42 seconds on a 12 core 2013 MacPro ] ( images / screenshot_ref_mitsuba . jpg ) <nl> <nl> ! [ Figure [ filamentReference ] : Rendered in 2048x1440 with MSAA 4x at 60 fps on a Nexus 9 device ( Tegra K1 GPU ) ] ( images / screenshot_ref_filament . jpg ) <nl> <nl> <nl> <nl> ! [ Figure [ coordinates ] : Red + X , green + Y , blue + Z ( rendered in Marmoset Toolbag ) . ] ( images / screenshot_coordinates . jpg ) <nl> <nl> - # # # Cubemaps cooordinates system <nl> + # # # Cubemaps coordinates system <nl> <nl> All the cubemaps used in Filament ( environment background , reflection probes , etc . ) will follow the OpenGL convention for faces alignment show in figure [ cubemapCoordinates ] . <nl> <nl> <nl> \ Lout ( n , v , \ Theta ) \ equiv \ frac { 1 } { N } \ sum_ { i } ^ { N } \ frac { f ( l_ { i } , v , \ Theta ) } { p ( l_i , v , \ Theta ) } L_ { \ perp } ( l_i ) \ left < n \ cdot l_i \ right > <nl> \ end { equation } $ $ <nl> <nl> - In equation $ \ ref { iblImportanceSampling } $ , $ p $ is the probaility density function ( PDF ) of the BRDF $ f $ , and $ l_i $ represents the _important direction samples_ with that BRDF . These samples depend on $ v $ and $ \ alpha $ . The definition of the PDF and its Jacobian ( the transform from $ h $ to $ l $ ) is shown in equation $ \ ref { iblPDF } $ . <nl> + In equation $ \ ref { iblImportanceSampling } $ , $ p $ is the probability density function ( PDF ) of the BRDF $ f $ , and $ l_i $ represents the _important direction samples_ with that BRDF . These samples depend on $ v $ and $ \ alpha $ . The definition of the PDF and its Jacobian ( the transform from $ h $ to $ l $ ) is shown in equation $ \ ref { iblPDF } $ . <nl> <nl> $ $ \ begin { equation } \ label { iblPDF } <nl> p ( l , v , \ Theta ) = D ( h , \ alpha ) \ left < \ NoH \ right > J ( h ) \ \ <nl> <nl> l = \ { cos \ phi sin \ theta , sin \ phi sin \ theta , cos \ theta \ } <nl> \ end { equation } $ $ <nl> <nl> - Typically , $ ( \ zeta_ { \ phi } , \ zeta_ { \ theta } ) $ are chosen usign the Hammersely uniform distribution algorightm described in section [ Hammersley sequence ] . <nl> + Typically , $ ( \ zeta_ { \ phi } , \ zeta_ { \ theta } ) $ are chosen using the Hammersley uniform distribution algorithm described in section [ Hammersley sequence ] . <nl> <nl> # # # Pre - filtered importance sampling <nl> <nl> - Importance sampling considers only the PDF to generate important directions ; in particular its oblivious to the actual content of the IBL . If the later contains high frequencies in areas without a lot of samples , the integration won ’ t be accurate . This can be somewhat mitigated by using a technique called _pre - filtered importance sampling_ , in addition this allows the integral to converge with much less samples . <nl> + Importance sampling considers only the PDF to generate important directions ; in particular , it is oblivious to the actual content of the IBL . If the latter contains high frequencies in areas without a lot of samples , the integration won ’ t be accurate . This can be somewhat mitigated by using a technique called _pre - filtered importance sampling_ , in addition this allows the integral to converge with many fewer samples . <nl> <nl> Pre - filtered importance sampling uses several images of the environment increasingly low - pass filtered . This is typically implemented very efficiently with mipmaps and a box filter . The LOD is selected based on the sample importance , that is , low probability samples use a higher LOD index ( more filtered ) . <nl> <nl> <nl> \ int_ { \ phi = 0 } ^ { 2 \ pi } \ int_ { \ theta = 0 } ^ { \ frac { \ pi } { 2 } } D ( \ theta , \ phi ) cos \ theta sin \ theta d \ theta d \ phi = 1 \ \ <nl> \ end { equation } $ $ <nl> <nl> - The PDF of the BRDF can therefore be expressed as in equation $ \ ref { importantPDF } $ : <nl> + The PDF of the BRDF can therefore be expressed as in equation $ \ ref { importantPDF } $ : <nl> <nl> - $ $ \ begin { equation } <nl> + $ $ \ begin { equation } \ label { importantPDF } <nl> p ( \ theta , \ phi ) = \ frac { \ alpha ^ 2 } { \ pi ( cos ^ 2 \ theta ( \ alpha ^ 2 - 1 ) + 1 ) cos \ theta sin \ theta } <nl> \ end { equation } $ $ <nl> <nl> <nl> } <nl> ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ <nl> <nl> - # # Sample validation scene for Mistuba <nl> + # # Sample validation scene for Mitsuba <nl> <nl> ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ <nl> & lt ; scene version = " 0 . 5 . 0 " & gt ; <nl>
Documentation grammar and typo fixes ( )
google/filament
f78a680b9c269ad3e3c5fa4328e87c83d988c347
2018-08-28T18:35:53Z
mmm a / modules / tools / rosbag / profile_planning . py <nl> ppp b / modules / tools / rosbag / profile_planning . py <nl> def stat_planning ( planning_msg ) : <nl> return stats <nl> stats [ " total_time " ] = planning_msg . latency_stats . total_time_ms <nl> stats [ " init_time " ] = planning_msg . latency_stats . init_frame_time_ms <nl> + used_time = stats [ " init_time " ] <nl> stats [ " obstacles " ] = len ( planning_msg . decision . object_decision . decision ) <nl> for task in planning_msg . latency_stats . task_stats : <nl> stats [ task . name ] = task . time_ms <nl> + used_time + = task . time_ms <nl> + stats [ " other " ] = stats [ " total_time " ] - used_time <nl> return stats <nl> <nl> <nl> def print_stat ( msg , fhandle ) : <nl> keywords = [ <nl> ' obstacles ' , ' total_time ' , ' init_time ' , u ' TrafficDecider ' , <nl> u ' DpPolyPathOptimizer ' , u ' PathDecider ' , u ' DpStSpeedOptimizer ' , <nl> - u ' SpeedDecider ' , u ' QpSplinePathOptimizer ' , u ' QpSplineStSpeedOptimizer ' <nl> + u ' SpeedDecider ' , u ' QpSplinePathOptimizer ' , u ' QpSplineStSpeedOptimizer ' , <nl> + u ' other ' <nl> ] <nl> <nl> if g_first_time : <nl>
tool : add remaining other time stats in planning .
ApolloAuto/apollo
bf942f8f45280c2065fd4856ad2c981b5e5c54d1
2017-11-22T18:06:12Z
mmm a / . gitignore <nl> ppp b / . gitignore <nl> DerivedData <nl> * . pbobjc . * <nl> * . pbrpc . * <nl> src / objective - c / * * / Build <nl> + src / objective - c / boringssl_prefix_headers <nl> <nl> # Cocoapods artifacts <nl> Pods / <nl> mmm a / templates / src / objective - c / BoringSSL - GRPC . podspec . template <nl> ppp b / templates / src / objective - c / BoringSSL - GRPC . podspec . template <nl> <nl> EOF <nl> <nl> # Grab prefix header from Github repo <nl> - curl - o include / openssl / boringssl_prefix_symbols . h - L https : / / raw . githubusercontent . com / grpc / grpc / master / src / objective - c / boringssl_prefix_headers / boringssl_prefix_symbols - $ { boringssl_commit } . h <nl> + base64 - D | gunzip > include / openssl / boringssl_prefix_symbols . h < < EOF <nl> + % for line in open ( " src / objective - c / boringssl_prefix_headers / boringssl_prefix_symbols . h . gz . base64 " , " r " ) . readlines ( ) : <nl> + $ { line } \ <nl> + % endfor <nl> + EOF <nl> <nl> # We are renaming openssl to openssl_grpc so that there is no conflict with openssl if it exists <nl> find . - type f \ \ ( - path ' * . h ' - or - path ' * . cc ' - or - path ' * . c ' \ \ ) - print0 | xargs - 0 - L1 sed - E - i ' . grpc_back ' ' s ; # include < openssl / ; # include < openssl_grpc / ; g ' <nl> deleted file mode 100644 <nl> index 94c07657d4d . . 00000000000 <nl> mmm a / templates / src / objective - c / BoringSSL - GRPC . podspec . template - e <nl> ppp / dev / null <nl> <nl> - % YAML 1 . 2 <nl> mmm - | <nl> - < % ! <nl> - def expand_symbol_list ( symbol_list ) : <nl> - return ' , \ n ' . join ( " ' # define % s GRPC_SHADOW_ % s ' " % ( symbol , symbol ) for symbol in symbol_list ) <nl> - import subprocess <nl> - boringssl_commit = subprocess . check_output ( [ ' git ' , ' rev - parse ' , ' HEAD ' ] , cwd = ' third_party / boringssl ' ) . decode ( ) . strip ( ) <nl> - % > <nl> - <nl> - # This file has been automatically generated from a template file . <nl> - # Please make modifications to <nl> - # ` templates / src / objective - c / BoringSSL - GRPC . podspec . template ` instead . This <nl> - # file can be regenerated from the template by running <nl> - # ` tools / buildgen / generate_projects . sh ` . Because of some limitations of this <nl> - # template , you might actually need to run the same script twice in a row . <nl> - # ( see err_data . c section ) <nl> - <nl> - # BoringSSL CocoaPods podspec <nl> - <nl> - # Copyright 2015 , Google Inc . <nl> - # All rights reserved . <nl> - # <nl> - # Redistribution and use in source and binary forms , with or without <nl> - # modification , are permitted provided that the following conditions are <nl> - # met : <nl> - # <nl> - # * Redistributions of source code must retain the above copyright <nl> - # notice , this list of conditions and the following disclaimer . <nl> - # * Redistributions in binary form must reproduce the above <nl> - # copyright notice , this list of conditions and the following disclaimer <nl> - # in the documentation and / or other materials provided with the <nl> - # distribution . <nl> - # * Neither the name of Google Inc . nor the names of its <nl> - # contributors may be used to endorse or promote products derived from <nl> - # this software without specific prior written permission . <nl> - # <nl> - # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> - # " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> - # LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> - # A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> - # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> - # SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> - # LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> - # DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> - # THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> - # ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> - # OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> - <nl> - Pod : : Spec . new do | s | <nl> - s . name = ' BoringSSL - GRPC ' <nl> - version = ' 0 . 0 . 5 ' <nl> - s . version = version <nl> - s . summary = ' BoringSSL is a fork of OpenSSL that is designed to meet Google \ ' s needs . ' <nl> - # Adapted from the homepage : <nl> - s . description = < < - DESC <nl> - BoringSSL is a fork of OpenSSL that is designed to meet Google ' s needs . <nl> - <nl> - Although BoringSSL is an open source project , it is not intended for general use , as OpenSSL is . <nl> - We don ' t recommend that third parties depend upon it . Doing so is likely to be frustrating <nl> - because there are no guarantees of API stability . Only the latest version of this pod is <nl> - supported , and every new version is a new major version . <nl> - <nl> - We update Google libraries and programs that use BoringSSL as needed when deciding to make API <nl> - changes . This allows us to mostly avoid compromises in the name of compatibility . It works for <nl> - us , but it may not work for you . <nl> - <nl> - As a Cocoapods pod , it has the advantage over OpenSSL ' s pods that the library doesn ' t need to <nl> - be precompiled . This eliminates the 10 - 20 minutes of wait the first time a user does " pod <nl> - install " , lets it be used as a dynamic framework ( pending solution of Cocoapods ' issue # 4605 ) , <nl> - and works with bitcode automatically . It ' s also thought to be smaller than OpenSSL ( which takes <nl> - 1MB - 2MB per ARM architecture ) , but we don ' t have specific numbers yet . <nl> - <nl> - BoringSSL arose because Google used OpenSSL for many years in various ways and , over time , built <nl> - up a large number of patches that were maintained while tracking upstream OpenSSL . As Google ' s <nl> - product portfolio became more complex , more copies of OpenSSL sprung up and the effort involved <nl> - in maintaining all these patches in multiple places was growing steadily . <nl> - <nl> - Currently BoringSSL is the SSL library in Chrome / Chromium , Android ( but it ' s not part of the <nl> - NDK ) and a number of other apps / programs . <nl> - DESC <nl> - s . homepage = ' https : / / github . com / google / boringssl ' <nl> - s . license = { : type = > ' Mixed ' , : file = > ' LICENSE ' } <nl> - # " The name and email addresses of the library maintainers , not the Podspec maintainer . " <nl> - s . authors = ' Adam Langley ' , ' David Benjamin ' , ' Matt Braithwaite ' <nl> - <nl> - s . source = { <nl> - : git = > ' https : / / github . com / google / boringssl . git ' , <nl> - : commit = > " $ { boringssl_commit } " , <nl> - } <nl> - <nl> - s . ios . deployment_target = ' 7 . 0 ' <nl> - s . osx . deployment_target = ' 10 . 7 ' <nl> - s . tvos . deployment_target = ' 10 . 0 ' <nl> - s . watchos . deployment_target = ' 4 . 0 ' <nl> - <nl> - name = ' openssl_grpc ' <nl> - <nl> - # When creating a dynamic framework , name it openssl . framework instead of BoringSSL . framework . <nl> - # This lets users write their includes like ` # include < openssl / ssl . h > ` as opposed to ` # include <nl> - # < BoringSSL / ssl . h > ` . <nl> - s . module_name = name <nl> - <nl> - # When creating a dynamic framework , copy the headers under ` include / openssl / ` into the root of <nl> - # the ` Headers / ` directory of the framework ( i . e . , not under ` Headers / include / openssl ` ) . <nl> - # <nl> - # TODO ( jcanizales ) : Debug why this doesn ' t work on macOS . <nl> - s . header_mappings_dir = ' include / openssl ' <nl> - <nl> - # The above has an undesired effect when creating a static library : It forces users to write <nl> - # includes like ` # include < BoringSSL / ssl . h > ` . ` s . header_dir ` adds a path prefix to that , and <nl> - # because Cocoapods lets omit the pod name when including headers of static libraries , the <nl> - # following lets users write ` # include < openssl / ssl . h > ` . <nl> - s . header_dir = name <nl> - <nl> - # The module map and umbrella header created automatically by Cocoapods don ' t work for C libraries <nl> - # like this one . The following file , and a correct umbrella header , are created on the fly by the <nl> - # ` prepare_command ` of this pod . <nl> - s . module_map = ' include / openssl / BoringSSL . modulemap ' <nl> - <nl> - # We don ' t need to inhibit all warnings ; only - Wno - shorten - 64 - to - 32 . But Cocoapods ' linter doesn ' t <nl> - # want that for some reason . <nl> - s . compiler_flags = ' - DOPENSSL_NO_ASM ' , ' - GCC_WARN_INHIBIT_ALL_WARNINGS ' , ' - w ' <nl> - s . requires_arc = false <nl> - <nl> - # Like many other C libraries , BoringSSL has its public headers under ` include / < libname > / ` and its <nl> - # sources and private headers in other directories outside ` include / ` . Cocoapods ' linter doesn ' t <nl> - # allow any header to be listed outside the ` header_mappings_dir ` ( even though doing so works in <nl> - # practice ) . Because we need our ` header_mappings_dir ` to be ` include / openssl / ` for the reason <nl> - # mentioned above , we work around the linter limitation by dividing the pod into two subspecs , one <nl> - # for public headers and the other for implementation . Each gets its own ` header_mappings_dir ` , <nl> - # making the linter happy . <nl> - s . subspec ' Interface ' do | ss | <nl> - ss . header_mappings_dir = ' include / openssl ' <nl> - ss . source_files = ' include / openssl / * . h ' <nl> - end <nl> - s . subspec ' Implementation ' do | ss | <nl> - ss . header_mappings_dir = ' . ' <nl> - ss . source_files = ' ssl / * . { h , c , cc } ' , <nl> - ' ssl / * * / * . { h , c , cc } ' , <nl> - ' crypto / * . { h , c , cc } ' , <nl> - ' crypto / * * / * . { h , c , cc } ' , <nl> - # We have to include fiat because spake25519 depends on it <nl> - ' third_party / fiat / * . { h , c , cc } ' , <nl> - # Include the err_data . c generated in prepare_command below <nl> - ' err_data . c ' <nl> - <nl> - ss . private_header_files = ' ssl / * . h ' , <nl> - ' ssl / * * / * . h ' , <nl> - ' crypto / * . h ' , <nl> - ' crypto / * * / * . h ' , <nl> - ' third_party / fiat / * . h ' <nl> - # bcm . c includes other source files , creating duplicated symbols . Since it is not used , we <nl> - # explicitly exclude it from the pod . <nl> - # TODO ( mxyan ) : Work with BoringSSL team to remove this hack . <nl> - ss . exclude_files = ' crypto / fipsmodule / bcm . c ' , <nl> - ' * * / * _test . * ' , <nl> - ' * * / test_ * . * ' , <nl> - ' * * / test / * . * ' <nl> - <nl> - ss . dependency " # { s . name } / Interface " , version <nl> - end <nl> - <nl> - s . prepare_command = < < - END_OF_COMMAND <nl> - # Add a module map and an umbrella header <nl> - cat > include / openssl / umbrella . h < < EOF <nl> - # include " ssl . h " <nl> - # include " crypto . h " <nl> - # include " aes . h " <nl> - / * The following macros are defined by base . h . The latter is the first file included by the <nl> - other headers . * / <nl> - # if defined ( OPENSSL_ARM ) | | defined ( OPENSSL_AARCH64 ) <nl> - # include " arm_arch . h " <nl> - # endif <nl> - # include " asn1 . h " <nl> - # include " asn1_mac . h " <nl> - # include " asn1t . h " <nl> - # include " blowfish . h " <nl> - # include " cast . h " <nl> - # include " chacha . h " <nl> - # include " cmac . h " <nl> - # include " conf . h " <nl> - # include " cpu . h " <nl> - # include " curve25519 . h " <nl> - # include " des . h " <nl> - # include " dtls1 . h " <nl> - # include " hkdf . h " <nl> - # include " md4 . h " <nl> - # include " md5 . h " <nl> - # include " obj_mac . h " <nl> - # include " objects . h " <nl> - # include " opensslv . h " <nl> - # include " ossl_typ . h " <nl> - # include " pkcs12 . h " <nl> - # include " pkcs7 . h " <nl> - # include " pkcs8 . h " <nl> - # include " poly1305 . h " <nl> - # include " rand . h " <nl> - # include " rc4 . h " <nl> - # include " ripemd . h " <nl> - # include " safestack . h " <nl> - # include " srtp . h " <nl> - # include " x509 . h " <nl> - # include " x509v3 . h " <nl> - EOF <nl> - cat > include / openssl / BoringSSL . modulemap < < EOF <nl> - framework module openssl { <nl> - umbrella header " umbrella . h " <nl> - textual header " arm_arch . h " <nl> - export * <nl> - module * { export * } <nl> - } <nl> - EOF <nl> - <nl> - # To build boringssl , we need the generated file err_data . c , which is normally generated <nl> - # by boringssl ' s err_data_generate . go , but we already have a copy of err_data . c checked into the <nl> - # grpc / grpc repository that gets regenerated whenever we update the third_party / boringssl submodule . <nl> - # To make the podspec independent of the grpc repository , the . podspec . template just copies <nl> - # the contents of err_data . c directly into the . podspec . <nl> - # TODO ( jtattermusch ) : avoid needing to run tools / buildgen / generate_projects . sh twice on update <nl> - # TODO ( jtattermusch ) : another pre - generated copy of err_data . c is under third_party / boringssl - with - bazel <nl> - # investigate if we could use it . <nl> - cat > err_data . c < < EOF <nl> - % for err_data in open ( " src / boringssl / err_data . c " , " r " ) . readlines ( ) : <nl> - $ { err_data . replace ( ' \ \ 0 ' , ' \ \ \ \ 0 ' ) } \ <nl> - % endfor <nl> - EOF <nl> - <nl> - # The symbol prefixing mechanism is performed by redefining BoringSSL symbols with " # define <nl> - # SOME_BORINGSSL_SYMBOL GRPC_SHADOW_SOME_BORINGSSL_SYMBOL " . Unfortunately , some symbols are <nl> - # already redefined as macros in BoringSSL headers in the form " # define SOME_BORINGSSL_SYMBOL <nl> - # SOME_BORINGSSL_SYMBOL " Such type of redefinition will cause " SOME_BORINGSSL_SYMBOL redefined " <nl> - # error when using together with our prefix header . So the workaround in the below lines removes <nl> - # all such type of # define directives . <nl> - sed - i ' . back ' ' / ^ # define \ \ ( [ A - Za - z0 - 9_ ] * \ \ ) \ \ 1 / d ' include / openssl / * . h <nl> - # Remove lines of the format below for the same reason above <nl> - # # define SOME_BORINGSSL_SYMBOL $ { " \ \ " } <nl> - # SOME_BORINGSSL_SYMBOL <nl> - sed - i ' . back ' ' / ^ # define . * \ \ \ \ $ / { N ; / ^ # define \ \ ( [ A - Za - z0 - 9_ ] * \ \ ) * \ \ \ \ \ \ n * \ \ 1 / d ; } ' include / openssl / * . h <nl> - <nl> - # We are renaming openssl to openssl_grpc so that there is no conflict with openssl if it exists <nl> - find . - type f \ \ ( - path ' * . h ' - or - path ' * . cc ' - or - path ' * . c ' \ \ ) - print0 | xargs - 0 - L1 sed - E - i ' . grpc_back ' ' s ; # include < openssl / ; # include < openssl_grpc / ; g ' <nl> - END_OF_COMMAND <nl> - <nl> - # Redefine symbols to avoid conflict when the same app also depends on OpenSSL . The list of <nl> - # symbols are src / objective - c / grpc_shadow_boringssl_symbol_list . <nl> - # This is the last part of this file . <nl> - s . prefix_header_contents = <nl> - $ { expand_symbol_list ( settings . grpc_shadow_boringssl_symbols ) } <nl> - end <nl> mmm a / tools / distrib / upgrade_boringssl_objc . sh <nl> ppp b / tools / distrib / upgrade_boringssl_objc . sh <nl> cd . . / . . <nl> docker build tools / dockerfile / grpc_objc / generate_boringssl_prefix_header - t grpc / boringssl_prefix_header <nl> mkdir - p $ BORINGSSL_PREFIX_HEADERS_DIR <nl> docker run - it - - rm - v $ ( pwd ) / $ BORINGSSL_PREFIX_HEADERS_DIR : / output grpc / boringssl_prefix_header $ BORINGSSL_COMMIT <nl> - git add $ BORINGSSL_PREFIX_HEADERS_DIR / boringssl_prefix_symbols - $ BORINGSSL_COMMIT . h <nl> <nl> # Increase the minor version by 1 <nl> POD_VER = $ ( cat templates / src / objective - c / BoringSSL - GRPC . podspec . template | grep ' version = ' | perl - pe ' ( $ _ ) = / ( [ 0 - 9 ] + ( [ . ] [ 0 - 9 ] + ) + ) / ' ) <nl> mmm a / tools / dockerfile / grpc_objc / generate_boringssl_prefix_header / generate_boringssl_prefix_header . sh <nl> ppp b / tools / dockerfile / grpc_objc / generate_boringssl_prefix_header / generate_boringssl_prefix_header . sh <nl> make boringssl_prefix_symbols <nl> <nl> [ - f symbol_prefix_include / boringssl_prefix_symbols . h ] | | { echo " Failed to build boringssl_prefix_symbols . sh " ; exit 1 ; } <nl> <nl> - cp symbol_prefix_include / boringssl_prefix_symbols . h / output / boringssl_prefix_symbols - $ 1 . h <nl> + gzip - c symbol_prefix_include / boringssl_prefix_symbols . h | base64 > / output / boringssl_prefix_symbols . h . gz . base64 <nl> <nl> exit 0 <nl>
Use compressed header file
grpc/grpc
87687dec01d67355ef4738f20a1bcbf2015e5863
2019-11-22T00:20:43Z
mmm a / include / swift / AST / PrettyStackTrace . h <nl> ppp b / include / swift / AST / PrettyStackTrace . h <nl> namespace swift { <nl> class Expr ; <nl> class Pattern ; <nl> class Stmt ; <nl> + class TypeRepr ; <nl> <nl> / / / PrettyStackTraceLocation - Observe that we are doing some <nl> / / / processing starting at a fixed location . <nl> class PrettyStackTraceType : public llvm : : PrettyStackTraceEntry { <nl> virtual void print ( llvm : : raw_ostream & OS ) const ; <nl> } ; <nl> <nl> + / / / Observe that we are processing a specific type representation . <nl> + class PrettyStackTraceTypeRepr : public llvm : : PrettyStackTraceEntry { <nl> + ASTContext & Context ; <nl> + TypeRepr * TheType ; <nl> + const char * Action ; <nl> + public : <nl> + PrettyStackTraceTypeRepr ( ASTContext & C , const char * action , TypeRepr * type ) <nl> + : Context ( C ) , TheType ( type ) , Action ( action ) { } <nl> + virtual void print ( llvm : : raw_ostream & OS ) const ; <nl> + } ; <nl> + <nl> } / / end namespace swift <nl> <nl> # endif <nl> mmm a / lib / AST / PrettyStackTrace . cpp <nl> ppp b / lib / AST / PrettyStackTrace . cpp <nl> void swift : : printTypeDescription ( llvm : : raw_ostream & out , Type type , <nl> out < < ' \ n ' ; <nl> } <nl> <nl> + void PrettyStackTraceTypeRepr : : print ( llvm : : raw_ostream & out ) const { <nl> + out < < " While " < < Action < < " type " ; <nl> + TheType - > print ( out ) ; <nl> + if ( TheType & & TheType - > getSourceRange ( ) . isValid ( ) ) { <nl> + out < < " at " ; <nl> + TheType - > getSourceRange ( ) . print ( out , Context . SourceMgr ) ; <nl> + } <nl> + out < < ' \ n ' ; <nl> + } <nl> + <nl> void PrettyStackTraceLocation : : print ( llvm : : raw_ostream & out ) const { <nl> out < < " While " < < Action < < " starting at " ; <nl> Loc . print ( out , Context . SourceMgr ) ; <nl> mmm a / lib / Sema / TypeCheckType . cpp <nl> ppp b / lib / Sema / TypeCheckType . cpp <nl> <nl> # include " swift / AST / ASTWalker . h " <nl> # include " swift / AST / ExprHandle . h " <nl> # include " swift / AST / NameLookup . h " <nl> + # include " swift / AST / PrettyStackTrace . h " <nl> # include " swift / AST / TypeLoc . h " <nl> # include " llvm / ADT / SmallString . h " <nl> # include " llvm / ADT / Twine . h " <nl> void TypeChecker : : validateTypeDecl ( TypeDecl * D ) { <nl> } <nl> <nl> Type TypeChecker : : resolveType ( TypeRepr * TyR , bool allowUnboundGenerics ) { <nl> + PrettyStackTraceTypeRepr stackTrace ( Context , " resolving " , TyR ) ; <nl> + <nl> assert ( TyR & & " Cannot validate null TypeReprs ! " ) ; <nl> switch ( TyR - > getKind ( ) ) { <nl> case TypeReprKind : : Attributed : { <nl>
Introduce stack trace information for resolving type representations .
apple/swift
a85ef1e7c4a283deaf0ef6d9a20f8c9ae659760f
2013-08-12T14:59:13Z
mmm a / tensorflow / g3doc / api_docs / python / contrib . learn . md <nl> ppp b / tensorflow / g3doc / api_docs / python / contrib . learn . md <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl> Evaluate a model loaded from a checkpoint . <nl> <nl> Given ` graph ` , a directory to write summaries to ( ` output_dir ` ) , a checkpoint <nl> to restore variables from , and a ` dict ` of ` Tensor ` s to evaluate , run an eval <nl> - loop for ` max_steps ` steps . <nl> + loop for ` max_steps ` steps , or until an exception ( generally , an <nl> + end - of - input signal from a reader operation ) is raised from running <nl> + ` eval_dict ` . <nl> <nl> In each step of evaluation , all tensors in the ` eval_dict ` are evaluated , and <nl> every ` log_every_steps ` steps , they are logged . At the very end of evaluation , <nl> and written to ` output_dir ` . <nl> Can be ` None ` if the graph doesn ' t require loading any variables . <nl> * < b > ` eval_dict ` < / b > : A ` dict ` mapping string names to tensors to evaluate . It is <nl> evaluated in every logging step . The result of the final evaluation is <nl> - returned . If update_op is None , then it ' s evaluated in every step . <nl> + returned . If ` update_op ` is None , then it ' s evaluated in every step . If <nl> + ` max_steps ` is ` None ` , this should depend on a reader that will raise an <nl> + end - of - inupt exception when the inputs are exhausted . <nl> * < b > ` update_op ` < / b > : A ` Tensor ` which is run in every step . <nl> * < b > ` global_step_tensor ` < / b > : A ` Variable ` containing the global step . If ` None ` , <nl> one is extracted from the graph using the same logic as in ` Supervisor ` . <nl> mmm a / tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . contrib . learn . LinearRegressor . md <nl> ppp b / tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . contrib . learn . LinearRegressor . md <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl> mmm a / tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . learn . LinearClassifier . md <nl> ppp b / tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . learn . LinearClassifier . md <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl> mmm a / tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . learn . BaseEstimator . md <nl> ppp b / tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . learn . BaseEstimator . md <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl> mmm a / tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . learn . TensorFlowDNNRegressor . md <nl> ppp b / tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . learn . TensorFlowDNNRegressor . md <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl> mmm a / tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . contrib . learn . Estimator . md <nl> ppp b / tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . contrib . learn . Estimator . md <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl> mmm a / tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . contrib . learn . DNNClassifier . md <nl> ppp b / tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . contrib . learn . DNNClassifier . md <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl> mmm a / tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . contrib . learn . TensorFlowLinearClassifier . md <nl> ppp b / tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . contrib . learn . TensorFlowLinearClassifier . md <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl> mmm a / tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . contrib . learn . evaluate . md <nl> ppp b / tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . contrib . learn . evaluate . md <nl> Evaluate a model loaded from a checkpoint . <nl> <nl> Given ` graph ` , a directory to write summaries to ( ` output_dir ` ) , a checkpoint <nl> to restore variables from , and a ` dict ` of ` Tensor ` s to evaluate , run an eval <nl> - loop for ` max_steps ` steps . <nl> + loop for ` max_steps ` steps , or until an exception ( generally , an <nl> + end - of - input signal from a reader operation ) is raised from running <nl> + ` eval_dict ` . <nl> <nl> In each step of evaluation , all tensors in the ` eval_dict ` are evaluated , and <nl> every ` log_every_steps ` steps , they are logged . At the very end of evaluation , <nl> and written to ` output_dir ` . <nl> Can be ` None ` if the graph doesn ' t require loading any variables . <nl> * < b > ` eval_dict ` < / b > : A ` dict ` mapping string names to tensors to evaluate . It is <nl> evaluated in every logging step . The result of the final evaluation is <nl> - returned . If update_op is None , then it ' s evaluated in every step . <nl> + returned . If ` update_op ` is None , then it ' s evaluated in every step . If <nl> + ` max_steps ` is ` None ` , this should depend on a reader that will raise an <nl> + end - of - inupt exception when the inputs are exhausted . <nl> * < b > ` update_op ` < / b > : A ` Tensor ` which is run in every step . <nl> * < b > ` global_step_tensor ` < / b > : A ` Variable ` containing the global step . If ` None ` , <nl> one is extracted from the graph using the same logic as in ` Supervisor ` . <nl> mmm a / tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . contrib . learn . TensorFlowDNNClassifier . md <nl> ppp b / tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . contrib . learn . TensorFlowDNNClassifier . md <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl> mmm a / tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . contrib . learn . TensorFlowClassifier . md <nl> ppp b / tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . contrib . learn . TensorFlowClassifier . md <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl> mmm a / tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . contrib . learn . TensorFlowRegressor . md <nl> ppp b / tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . contrib . learn . TensorFlowRegressor . md <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl> mmm a / tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . contrib . learn . DNNRegressor . md <nl> ppp b / tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . contrib . learn . DNNRegressor . md <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl> mmm a / tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . contrib . learn . TensorFlowLinearRegressor . md <nl> ppp b / tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . contrib . learn . TensorFlowLinearRegressor . md <nl> Evaluates given model with provided evaluation data . <nl> * < b > ` x ` < / b > : features . <nl> * < b > ` y ` < / b > : targets . <nl> * < b > ` input_fn ` < / b > : Input function . If set , ` x ` , ` y ` , and ` batch_size ` must be <nl> - ` None ` . <nl> + ` None ` . If ` steps ` is ` None ` , the tensors returned by this should <nl> + generally raise an end - of - input exception when all eval records have <nl> + been returned ( typically , 1 epoch over eval data ) . <nl> * < b > ` feed_fn ` < / b > : Function creating a feed dict every time it is called . Called <nl> once per iteration . <nl> * < b > ` batch_size ` < / b > : minibatch size to use on the input , defaults to first <nl> - dimension of ` x ` . Must be ` None ` if ` input_fn ` is provided . <nl> + dimension of ` x ` , if specified . Must be ` None ` if ` input_fn ` is <nl> + provided . <nl> * < b > ` steps ` < / b > : Number of steps for which to evaluate model . If ` None ` , evaluate <nl> - forever . <nl> + until running tensors generated by ` metrics ` raises an exception . <nl> * < b > ` metrics ` < / b > : Dict of metric ops to run . If None , the default metric functions <nl> are used ; if { } , no metrics are used . If model has one output ( i . e . , <nl> returning single predction ) , keys are ` str ` , e . g . ` ' accuracy ' ` - just a <nl>
Update generated Python Op docs .
tensorflow/tensorflow
c97d95ba1b1c666cc21907a5b5e26494f3aee52d
2016-06-13T19:34:12Z
mmm a / bin / import_fisher . py <nl> ppp b / bin / import_fisher . py <nl> <nl> import pandas <nl> import subprocess <nl> import unicodedata <nl> - import wave <nl> - import audioop <nl> + import librosa <nl> + import resampy <nl> <nl> from util . text import validate_label <nl> <nl> def _split_wav_and_sentences ( data_dir , trans_data , original_data , converted_data <nl> <nl> print ( " splitting { } according to { } " . format ( wav_files , trans_file ) ) <nl> <nl> - origAudios = [ wave . open ( wav_file , " r " ) for wav_file in wav_files ] <nl> + origAudios = [ librosa . load ( wav_file , sr = None , mono = False ) for wav_file in wav_files ] <nl> <nl> # Loop over segments and split wav_file for each segment <nl> for segment in segments : <nl> def _split_wav_and_sentences ( data_dir , trans_data , original_data , converted_data <nl> if transcript ! = None : <nl> files . append ( ( os . path . abspath ( new_wav_file ) , new_wav_filesize , transcript ) ) <nl> <nl> - # Close origAudios <nl> - for origAudio in origAudios : <nl> - origAudio . close ( ) <nl> - <nl> return pandas . DataFrame ( data = files , columns = [ " wav_filename " , " wav_filesize " , " transcript " ] ) <nl> <nl> + def _split_audio ( origAudio , start_time , stop_time ) : <nl> + audioData = origAudio [ 0 ] <nl> + frameRate = origAudio [ 1 ] <nl> + nChannels = len ( audioData . shape ) <nl> + startIndex = int ( start_time * frameRate ) <nl> + stopIndex = int ( stop_time * frameRate ) <nl> + return audioData [ startIndex : stopIndex ] if 1 = = nChannels else audioData [ : , startIndex : stopIndex ] <nl> + <nl> def _split_and_resample_wav ( origAudio , start_time , stop_time , new_wav_file ) : <nl> - nChannels = origAudio . getnchannels ( ) <nl> - sampleWidth = origAudio . getsampwidth ( ) <nl> - frameRate = origAudio . getframerate ( ) <nl> - origAudio . setpos ( int ( start_time * frameRate ) ) <nl> - chunkData = origAudio . readframes ( int ( ( stop_time - start_time ) * frameRate ) ) <nl> - # by doubling the frame - rate we effectively go from 8 kHz to 16 kHz <nl> - chunkData , _ = audioop . ratecv ( chunkData , sampleWidth , nChannels , frameRate , 2 * frameRate , None ) <nl> - chunkAudio = wave . open ( new_wav_file , " w " ) <nl> - chunkAudio . setnchannels ( nChannels ) <nl> - chunkAudio . setsampwidth ( sampleWidth ) <nl> - chunkAudio . setframerate ( 2 * frameRate ) <nl> - chunkAudio . writeframes ( chunkData ) <nl> - chunkAudio . close ( ) <nl> + frameRate = origAudio [ 1 ] <nl> + chunkData = _split_audio ( origAudio , start_time , stop_time ) <nl> + chunkData = resampy . resample ( chunkData , frameRate , 16000 ) <nl> + librosa . output . write_wav ( new_wav_file , chunkData , 16000 ) <nl> <nl> def _split_sets ( filelist ) : <nl> # We initially split the entire set into 80 % train and 20 % test , then <nl> mmm a / requirements . txt <nl> ppp b / requirements . txt <nl> requests <nl> tables <nl> attrdict <nl> setuptools <nl> + librosa <nl> + resampy <nl>
Fixed ( Imported 8khz training audio compromised by unfiltered upsampling )
mozilla/DeepSpeech
9aa23ed387439257579ac5bd9869b43ecd432248
2018-11-17T18:26:19Z
mmm a / jstests / libs / chunk_manipulation_util . js <nl> ppp b / jstests / libs / chunk_manipulation_util . js <nl> function waitForMoveChunkStep ( shardConnection , stepNumber ) { <nl> let op = in_progress . next ( ) ; <nl> inProgressStr + = tojson ( op ) ; <nl> <nl> - / / TODO ( SERVER - 45993 ) : Remove the 4 . 2 and prior branch once 4 . 4 becomes last - lts . <nl> - if ( ( op . desc & & op . desc = = = " MoveChunk " ) | | <nl> - ( op . command & & op . command . moveChunk / * required for v4 . 2 and prior * / ) ) { <nl> + if ( op . desc & & op . desc = = = " MoveChunk " ) { <nl> / / Note : moveChunk in join mode will not have the " step " message . So keep on <nl> / / looking if searchString is not found . <nl> if ( op . msg & & op . msg . startsWith ( searchString ) ) { <nl> function killRunningMoveChunk ( admin ) { <nl> if ( op . desc & & op . desc = = = " MoveChunk " ) { <nl> opIdsToKill [ " MoveChunk " ] = op . opid ; <nl> } <nl> - / / TODO ( SERVER - 45993 ) : Remove this branch once 4 . 4 becomes last - lts . <nl> - / / For 4 . 2 binaries and prior . <nl> - if ( op . command & & op . command . moveChunk ) { <nl> - opIdsToKill [ " moveChunkCommand " ] = op . opid ; <nl> - } <nl> } <nl> <nl> if ( opIdsToKill . MoveChunk ) { <nl> admin . killOp ( opIdsToKill . MoveChunk ) ; <nl> abortedMigration = true ; <nl> - } else if ( opIdsToKill . moveChunkCommand ) { <nl> - / / TODO ( SERVER - 45993 ) : Remove this branch once 4 . 4 becomes last - lts . <nl> - admin . killOp ( opIdsToKill . moveChunkCommand ) ; <nl> - abortedMigration = true ; <nl> } <nl> <nl> assert . eq ( <nl>
SERVER - 51355 Complete TODO listed in SERVER - 45993
mongodb/mongo
4a2fbeebf482f4be1148479d12283819e55d933c
2020-10-14T19:30:58Z
mmm a / js / server / modules / org / arangodb / foxx / queues . js <nl> ppp b / js / server / modules / org / arangodb / foxx / queues . js <nl> queues = { <nl> _jobTypes : Object . create ( null ) , <nl> get : function ( key ) { <nl> ' use strict ' ; <nl> - if ( ! db . _queues . exists ( key ) ) { <nl> - throw new Error ( ' Queue does not exist : ' + key ) ; <nl> - } <nl> + <nl> if ( ! queueMap [ key ] ) { <nl> + if ( ! db . _queues . exists ( key ) ) { <nl> + throw new Error ( ' Queue does not exist : ' + key ) ; <nl> + } <nl> queueMap [ key ] = new Queue ( key ) ; <nl> } <nl> return queueMap [ key ] ; <nl> _ . extend ( Queue . prototype , { <nl> } , <nl> delete : function ( id ) { <nl> ' use strict ' ; <nl> - var result = false ; <nl> - db . _executeTransaction ( { <nl> + return db . _executeTransaction ( { <nl> collections : { <nl> read : [ ' _jobs ' ] , <nl> write : [ ' _jobs ' ] <nl> } , <nl> action : function ( ) { <nl> - if ( db . _jobs . exists ( id ) ) { <nl> + try { <nl> db . _jobs . remove ( id ) ; <nl> - result = true ; <nl> + return true ; <nl> + } catch ( err ) { <nl> + return false ; <nl> } <nl> } <nl> } ) ; <nl> - return result ; <nl> } , <nl> pending : function ( jobType ) { <nl> ' use strict ' ; <nl>
Applied performance optimization for queues .
arangodb/arangodb
a97417408a2419f11e2e2948b727b3b323fd3ebb
2014-08-15T15:43:49Z
mmm a / cmake / modules / SwiftSource . cmake <nl> ppp b / cmake / modules / SwiftSource . cmake <nl> function ( _compile_swift_files <nl> module_dependency_target <nl> COMMAND <nl> " $ { CMAKE_COMMAND } " " - E " " remove " " - f " " $ { module_file } " <nl> + COMMAND <nl> + " $ { CMAKE_COMMAND } " " - E " " remove " " - f " " $ { module_doc_file } " <nl> COMMAND <nl> " $ { PYTHON_EXECUTABLE } " " $ { line_directive_tool } " " @ $ { file_path } " - - <nl> " $ { swift_compiler_tool } " " - emit - module " " - o " " $ { module_file } " $ { swift_flags } <nl>
Remove old . swiftdoc file before creating new . swiftdoc file
apple/swift
1d1d070f1371b509de4d838efc2147f567ea8042
2017-03-25T07:47:19Z
mmm a / tensorflow / contrib / data / python / kernel_tests / BUILD <nl> ppp b / tensorflow / contrib / data / python / kernel_tests / BUILD <nl> py_test ( <nl> name = " reader_dataset_ops_test " , <nl> size = " medium " , <nl> srcs = [ " reader_dataset_ops_test . py " ] , <nl> + shard_count = 4 , <nl> srcs_version = " PY2AND3 " , <nl> tags = [ " no_pip " ] , <nl> deps = [ <nl> py_test ( <nl> " / / tensorflow / python : framework_ops " , <nl> " / / tensorflow / python : lib " , <nl> " / / tensorflow / python : parsing_ops " , <nl> + " / / tensorflow / python : string_ops " , <nl> " / / tensorflow / python : util " , <nl> " / / tensorflow / python / data / ops : iterator_ops " , <nl> " / / third_party / py / numpy " , <nl> mmm a / tensorflow / contrib / data / python / kernel_tests / reader_dataset_ops_test . py <nl> ppp b / tensorflow / contrib / data / python / kernel_tests / reader_dataset_ops_test . py <nl> <nl> from tensorflow . python . lib . io import python_io <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import parsing_ops <nl> + from tensorflow . python . ops import string_ops <nl> from tensorflow . python . platform import test <nl> from tensorflow . python . util import compat <nl> <nl> def testTFRecordWithCompressionCore ( self ) : <nl> lambda : self . _build_iterator_graph ( num_epochs * 2 ) , num_outputs ) <nl> <nl> <nl> + def _interleave ( iterators , cycle_length ) : <nl> + pending_iterators = iterators <nl> + open_iterators = [ ] <nl> + num_open = 0 <nl> + for i in range ( cycle_length ) : <nl> + if pending_iterators : <nl> + open_iterators . append ( pending_iterators . pop ( 0 ) ) <nl> + num_open + = 1 <nl> + <nl> + while num_open : <nl> + for i in range ( min ( cycle_length , len ( open_iterators ) ) ) : <nl> + if open_iterators [ i ] is None : <nl> + continue <nl> + try : <nl> + yield next ( open_iterators [ i ] ) <nl> + except StopIteration : <nl> + if pending_iterators : <nl> + open_iterators [ i ] = pending_iterators . pop ( 0 ) <nl> + else : <nl> + open_iterators [ i ] = None <nl> + num_open - = 1 <nl> + <nl> + <nl> class ReadBatchFeaturesTest ( test . TestCase ) : <nl> <nl> def setUp ( self ) : <nl> def _next_record ( file_indices ) : <nl> yield j , i <nl> <nl> def _next_record_interleaved ( file_indices , cycle_length ) : <nl> - return self . _interleave ( [ _next_record ( [ i ] ) for i in file_indices ] , <nl> - cycle_length ) <nl> + return _interleave ( [ _next_record ( [ i ] ) for i in file_indices ] , <nl> + cycle_length ) <nl> <nl> file_batch = [ ] <nl> keywords_batch_indices = [ ] <nl> def _next_record_interleaved ( file_indices , cycle_length ) : <nl> [ len ( file_batch ) , keywords_batch_max_len ] , record_batch <nl> ] <nl> <nl> - def _interleave ( self , iterators , cycle_length ) : <nl> - pending_iterators = iterators <nl> - open_iterators = [ ] <nl> - num_open = 0 <nl> - for i in range ( cycle_length ) : <nl> - if pending_iterators : <nl> - open_iterators . append ( pending_iterators . pop ( 0 ) ) <nl> - num_open + = 1 <nl> - <nl> - while num_open : <nl> - for i in range ( min ( cycle_length , len ( open_iterators ) ) ) : <nl> - if open_iterators [ i ] is None : <nl> - continue <nl> - try : <nl> - yield next ( open_iterators [ i ] ) <nl> - except StopIteration : <nl> - if pending_iterators : <nl> - open_iterators [ i ] = pending_iterators . pop ( 0 ) <nl> - else : <nl> - open_iterators [ i ] = None <nl> - num_open - = 1 <nl> - <nl> def _verify_records ( self , <nl> sess , <nl> batch_size , <nl> def testMakeCSVDataset_withShuffle ( self ) : <nl> self . assertFalse ( all_equal ) <nl> <nl> <nl> + class MakeTFRecordDatasetTest ( TFRecordDatasetTestBase ) : <nl> + <nl> + def _next_expected_batch ( self , <nl> + file_indices , <nl> + batch_size , <nl> + num_epochs , <nl> + cycle_length , <nl> + drop_final_batch , <nl> + use_parser_fn ) : <nl> + <nl> + def _next_record ( file_indices ) : <nl> + for j in file_indices : <nl> + for i in range ( self . _num_records ) : <nl> + yield j , i <nl> + <nl> + def _next_record_interleaved ( file_indices , cycle_length ) : <nl> + return _interleave ( [ _next_record ( [ i ] ) for i in file_indices ] , <nl> + cycle_length ) <nl> + <nl> + record_batch = [ ] <nl> + batch_index = 0 <nl> + for _ in range ( num_epochs ) : <nl> + if cycle_length = = 1 : <nl> + next_records = _next_record ( file_indices ) <nl> + else : <nl> + next_records = _next_record_interleaved ( file_indices , cycle_length ) <nl> + for f , r in next_records : <nl> + record = self . _record ( f , r ) <nl> + if use_parser_fn : <nl> + record = record [ 1 : ] <nl> + record_batch . append ( record ) <nl> + batch_index + = 1 <nl> + if len ( record_batch ) = = batch_size : <nl> + yield record_batch <nl> + record_batch = [ ] <nl> + batch_index = 0 <nl> + if record_batch and not drop_final_batch : <nl> + yield record_batch <nl> + <nl> + def _verify_records ( self , <nl> + sess , <nl> + outputs , <nl> + batch_size , <nl> + file_index , <nl> + num_epochs , <nl> + interleave_cycle_length , <nl> + drop_final_batch , <nl> + use_parser_fn ) : <nl> + if file_index is not None : <nl> + file_indices = [ file_index ] <nl> + else : <nl> + file_indices = range ( self . _num_files ) <nl> + <nl> + for expected_batch in self . _next_expected_batch ( <nl> + file_indices , batch_size , num_epochs , interleave_cycle_length , <nl> + drop_final_batch , use_parser_fn ) : <nl> + actual_batch = sess . run ( outputs ) <nl> + self . assertAllEqual ( expected_batch , actual_batch ) <nl> + <nl> + def _read_test ( self , batch_size , num_epochs , file_index = None , <nl> + num_parallel_reads = 1 , drop_final_batch = False , parser_fn = False ) : <nl> + if file_index is None : <nl> + file_pattern = self . test_filenames <nl> + else : <nl> + file_pattern = self . test_filenames [ file_index ] <nl> + <nl> + if parser_fn : <nl> + fn = lambda x : string_ops . substr ( x , 1 , 999 ) <nl> + else : <nl> + fn = None <nl> + <nl> + with ops . Graph ( ) . as_default ( ) as g : <nl> + with self . test_session ( graph = g ) as sess : <nl> + outputs = readers . make_tf_record_dataset ( <nl> + file_pattern = file_pattern , <nl> + num_epochs = num_epochs , <nl> + batch_size = batch_size , <nl> + parser_fn = fn , <nl> + num_parallel_reads = num_parallel_reads , <nl> + drop_final_batch = drop_final_batch , <nl> + shuffle = False ) . make_one_shot_iterator ( ) . get_next ( ) <nl> + self . _verify_records ( <nl> + sess , outputs , batch_size , file_index , num_epochs = num_epochs , <nl> + interleave_cycle_length = num_parallel_reads , <nl> + drop_final_batch = drop_final_batch , use_parser_fn = parser_fn ) <nl> + with self . assertRaises ( errors . OutOfRangeError ) : <nl> + sess . run ( outputs ) <nl> + <nl> + def testRead ( self ) : <nl> + for batch_size in [ 1 , 2 ] : <nl> + for num_epochs in [ 1 , 3 ] : <nl> + # Basic test : read from file 0 . <nl> + self . _read_test ( batch_size , num_epochs , 0 ) <nl> + <nl> + # Basic test : read from file 1 . <nl> + self . _read_test ( batch_size , num_epochs , 1 ) <nl> + <nl> + # Basic test : read from both files . <nl> + self . _read_test ( batch_size , num_epochs ) <nl> + <nl> + # Basic test : read from both files , with parallel reads . <nl> + self . _read_test ( batch_size , num_epochs , num_parallel_reads = 8 ) <nl> + <nl> + def testDropFinalBatch ( self ) : <nl> + for batch_size in [ 1 , 2 , 10 ] : <nl> + for num_epochs in [ 1 , 3 ] : <nl> + # Read from file 0 . <nl> + self . _read_test ( batch_size , num_epochs , 0 , drop_final_batch = True ) <nl> + <nl> + # Read from both files . <nl> + self . _read_test ( batch_size , num_epochs , drop_final_batch = True ) <nl> + <nl> + # Read from both files , with parallel reads . <nl> + self . _read_test ( batch_size , num_epochs , num_parallel_reads = 8 , <nl> + drop_final_batch = True ) <nl> + <nl> + def testParserFn ( self ) : <nl> + for batch_size in [ 1 , 2 ] : <nl> + for num_epochs in [ 1 , 3 ] : <nl> + for drop_final_batch in [ False , True ] : <nl> + self . _read_test ( batch_size , num_epochs , parser_fn = True , <nl> + drop_final_batch = drop_final_batch ) <nl> + self . _read_test ( batch_size , num_epochs , num_parallel_reads = 8 , <nl> + parser_fn = True , drop_final_batch = drop_final_batch ) <nl> + <nl> + def _shuffle_test ( self , batch_size , num_epochs , num_parallel_reads = 1 , <nl> + seed = None ) : <nl> + with ops . Graph ( ) . as_default ( ) as g : <nl> + with self . test_session ( graph = g ) as sess : <nl> + dataset = readers . make_tf_record_dataset ( <nl> + file_pattern = self . test_filenames , <nl> + num_epochs = num_epochs , <nl> + batch_size = batch_size , <nl> + num_parallel_reads = num_parallel_reads , <nl> + shuffle = True , <nl> + shuffle_seed = seed ) <nl> + iterator = dataset . make_initializable_iterator ( ) <nl> + next_element = iterator . get_next ( ) <nl> + <nl> + sess . run ( iterator . initializer ) <nl> + first_batches = [ ] <nl> + try : <nl> + while True : <nl> + first_batches . append ( sess . run ( next_element ) ) <nl> + except errors . OutOfRangeError : <nl> + pass <nl> + <nl> + sess . run ( iterator . initializer ) <nl> + second_batches = [ ] <nl> + try : <nl> + while True : <nl> + second_batches . append ( sess . run ( next_element ) ) <nl> + except errors . OutOfRangeError : <nl> + pass <nl> + <nl> + self . assertEqual ( len ( first_batches ) , len ( second_batches ) ) <nl> + if seed is not None : <nl> + # if you set a seed , should get the same results <nl> + for i in range ( len ( first_batches ) ) : <nl> + self . assertAllEqual ( first_batches [ i ] , second_batches [ i ] ) <nl> + <nl> + expected = [ ] <nl> + for f in range ( self . _num_files ) : <nl> + for r in range ( self . _num_records ) : <nl> + expected . extend ( [ self . _record ( f , r ) ] * num_epochs ) <nl> + <nl> + for batches in ( first_batches , second_batches ) : <nl> + actual = [ ] <nl> + for b in batches : <nl> + actual . extend ( b ) <nl> + self . assertAllEqual ( sorted ( expected ) , sorted ( actual ) ) <nl> + <nl> + def testShuffle ( self ) : <nl> + for batch_size in [ 1 , 2 ] : <nl> + for num_epochs in [ 1 , 3 ] : <nl> + for num_parallel_reads in [ 1 , 2 ] : <nl> + # Test that all expected elements are produced <nl> + self . _shuffle_test ( batch_size , num_epochs , num_parallel_reads ) <nl> + # Test that elements are produced in a consistent order if <nl> + # you specify a seed . <nl> + self . _shuffle_test ( batch_size , num_epochs , num_parallel_reads , <nl> + seed = 21345 ) <nl> + <nl> + <nl> if __name__ = = " __main__ " : <nl> test . main ( ) <nl> mmm a / tensorflow / contrib / data / python / ops / readers . py <nl> ppp b / tensorflow / contrib / data / python / ops / readers . py <nl> def _get_sorted_col_indices ( select_columns , column_names ) : <nl> return result <nl> <nl> <nl> + def _maybe_shuffle_and_repeat ( <nl> + dataset , num_epochs , shuffle , shuffle_buffer_size , shuffle_seed ) : <nl> + " " " Optionally shuffle and repeat dataset , as requested . " " " <nl> + if num_epochs ! = 1 and shuffle : <nl> + # Use shuffle_and_repeat for perf <nl> + return dataset . apply ( <nl> + shuffle_ops . shuffle_and_repeat ( shuffle_buffer_size , num_epochs , <nl> + shuffle_seed ) ) <nl> + elif shuffle : <nl> + return dataset . shuffle ( shuffle_buffer_size , shuffle_seed ) <nl> + elif num_epochs ! = 1 : <nl> + return dataset . repeat ( num_epochs ) <nl> + return dataset <nl> + <nl> + <nl> + def make_tf_record_dataset ( <nl> + file_pattern , <nl> + batch_size , <nl> + parser_fn = None , <nl> + num_epochs = None , <nl> + shuffle = True , <nl> + shuffle_buffer_size = None , <nl> + shuffle_seed = None , <nl> + prefetch_buffer_size = None , <nl> + num_parallel_reads = None , <nl> + num_parallel_parser_calls = None , <nl> + drop_final_batch = False ) : <nl> + " " " Reads and optionally parses TFRecord files into a dataset . <nl> + <nl> + Provides common functionality such as batching , optional parsing , shuffling , <nl> + and performant defaults . <nl> + <nl> + Args : <nl> + file_pattern : List of files or patterns of TFRecord file paths . <nl> + See @ { tf . gfile . Glob } for pattern rules . <nl> + batch_size : An int representing the number of records to combine <nl> + in a single batch . <nl> + parser_fn : ( Optional . ) A function accepting string input to parse <nl> + and process the record contents . This function must map records <nl> + to components of a fixed shape , so they may be batched . By <nl> + default , uses the record contents unmodified . <nl> + num_epochs : ( Optional . ) An int specifying the number of times this <nl> + dataset is repeated . If None ( the default ) , cycles through the <nl> + dataset forever . <nl> + shuffle : ( Optional . ) A bool that indicates whether the input <nl> + should be shuffled . Defaults to ` True ` . <nl> + shuffle_buffer_size : ( Optional . ) Buffer size to use for <nl> + shuffling . A large buffer size ensures better shuffling , but <nl> + increases memory usage and startup time . <nl> + shuffle_seed : ( Optional . ) Randomization seed to use for shuffling . <nl> + prefetch_buffer_size : ( Optional . ) An int specifying the number of <nl> + feature batches to prefetch for performance improvement . <nl> + Defaults to auto - tune . Set to 0 to disable prefetching . <nl> + num_parallel_reads : ( Optional . ) Number of threads used to read <nl> + records from files . By default or if set to a value > 1 , the <nl> + results will be interleaved . <nl> + num_parallel_parser_calls : ( Optional . ) Number of parallel <nl> + records to parse in parallel . Defaults to an automatic selection . <nl> + drop_final_batch : ( Optional . ) Whether the last batch should be <nl> + dropped in case its size is smaller than ` batch_size ` ; the <nl> + default behavior is not to drop the smaller batch . <nl> + <nl> + Returns : <nl> + A dataset , where each element matches the output of ` parser_fn ` <nl> + except it will have an additional leading ` batch - size ` dimension , <nl> + or a ` batch_size ` - length 1 - D tensor of strings if ` parser_fn ` is <nl> + unspecified . <nl> + " " " <nl> + files = dataset_ops . Dataset . list_files ( <nl> + file_pattern , shuffle = shuffle , seed = shuffle_seed ) <nl> + <nl> + if num_parallel_reads is None : <nl> + # Note : We considered auto - tuning this value , but there is a concern <nl> + # that this affects the mixing of records from different files , which <nl> + # could affect training convergence / accuracy , so we are defaulting to <nl> + # a constant for now . <nl> + num_parallel_reads = 24 <nl> + dataset = core_readers . TFRecordDataset ( <nl> + files , num_parallel_reads = num_parallel_reads ) <nl> + <nl> + if shuffle_buffer_size is None : <nl> + # TODO ( josh11b ) : Auto - tune this value when not specified <nl> + shuffle_buffer_size = 10000 <nl> + dataset = _maybe_shuffle_and_repeat ( <nl> + dataset , num_epochs , shuffle , shuffle_buffer_size , shuffle_seed ) <nl> + <nl> + if parser_fn is None : <nl> + if drop_final_batch : <nl> + dataset = dataset . apply ( batching . batch_and_drop_remainder ( batch_size ) ) <nl> + else : <nl> + dataset = dataset . batch ( batch_size ) <nl> + else : <nl> + # TODO ( josh11b ) : if num_parallel_parser_calls is None , use some function <nl> + # of num cores instead of map_and_batch ' s default behavior of one batch . <nl> + dataset = dataset . apply ( batching . map_and_batch ( <nl> + parser_fn , batch_size , num_parallel_calls = num_parallel_parser_calls , <nl> + drop_remainder = drop_final_batch ) ) <nl> + <nl> + if prefetch_buffer_size is None : <nl> + prefetch_buffer_size = - 1 # tf . config . data . AUTOTUNE <nl> + if prefetch_buffer_size = = 0 : <nl> + return dataset <nl> + else : <nl> + return dataset . prefetch ( buffer_size = prefetch_buffer_size ) <nl> + <nl> + <nl> def make_csv_dataset ( <nl> file_pattern , <nl> batch_size , <nl> def make_csv_dataset ( <nl> Args : <nl> file_pattern : List of files or patterns of file paths containing CSV <nl> records . See @ { tf . gfile . Glob } for pattern rules . <nl> - batch_size : An int representing the number of consecutive elements of this <nl> - dataset to combine in a single batch . <nl> + batch_size : An int representing the number of records to combine <nl> + in a single batch . <nl> column_names : An optional list of strings that corresponds to the CSV <nl> columns , in order . One per column of the input record . If this is not <nl> provided , infers the column names from the first row of the records . <nl> def make_csv_dataset ( <nl> If None , cycles through the dataset forever . <nl> shuffle : A bool that indicates whether the input should be shuffled . <nl> shuffle_buffer_size : Buffer size to use for shuffling . A large buffer size <nl> - ensures better shuffling , but would increase memory usage and startup <nl> - time . <nl> + ensures better shuffling , but increases memory usage and startup time . <nl> shuffle_seed : Randomization seed to use for shuffling . <nl> prefetch_buffer_size : An int specifying the number of feature batches to <nl> prefetch for performance improvement . Recommended value is the number of <nl> def decode_csv ( line ) : <nl> interleave_ops . parallel_interleave ( <nl> filename_to_dataset , cycle_length = num_parallel_reads , sloppy = sloppy ) ) <nl> <nl> - if num_epochs ! = 1 and shuffle : <nl> - # Use shuffle_and_repeat for perf <nl> - dataset = dataset . apply ( <nl> - shuffle_ops . shuffle_and_repeat ( shuffle_buffer_size , num_epochs , <nl> - shuffle_seed ) ) <nl> - elif shuffle : <nl> - dataset = dataset . shuffle ( shuffle_buffer_size , shuffle_seed ) <nl> - elif num_epochs ! = 1 : <nl> - dataset = dataset . repeat ( num_epochs ) <nl> + dataset = _maybe_shuffle_and_repeat ( <nl> + dataset , num_epochs , shuffle , shuffle_buffer_size , shuffle_seed ) <nl> <nl> # Use map_and_batch for perf <nl> # TODO ( b / 76425672 ) : use num_parallel_calls for better performance tuning when <nl> def make_batched_features_dataset ( file_pattern , <nl> Args : <nl> file_pattern : List of files or patterns of file paths containing <nl> ` Example ` records . See ` tf . gfile . Glob ` for pattern rules . <nl> - batch_size : An int representing the number of consecutive elements of this <nl> - dataset to combine in a single batch . <nl> + batch_size : An int representing the number of records to combine <nl> + in a single batch . <nl> features : A ` dict ` mapping feature keys to ` FixedLenFeature ` or <nl> ` VarLenFeature ` values . See ` tf . parse_example ` . <nl> reader : A function or class that can be <nl> def make_batched_features_dataset ( file_pattern , <nl> dataset = dataset . map ( lambda _ , v : v ) <nl> <nl> # Apply dataset repeat and shuffle transformations . <nl> - repeat_dataset = ( num_epochs ! = 1 ) <nl> - if repeat_dataset and shuffle : <nl> - # Used fused shuffle_and_repeat operation for better performance <nl> - dataset = dataset . apply ( <nl> - shuffle_ops . shuffle_and_repeat ( shuffle_buffer_size , num_epochs , <nl> - shuffle_seed ) ) <nl> - elif repeat_dataset : <nl> - dataset = dataset . repeat ( num_epochs ) <nl> - elif shuffle : <nl> - dataset = dataset . shuffle ( shuffle_buffer_size , shuffle_seed ) <nl> + dataset = _maybe_shuffle_and_repeat ( <nl> + dataset , num_epochs , shuffle , shuffle_buffer_size , shuffle_seed ) <nl> <nl> if drop_final_batch : <nl> dataset = dataset . apply ( batching . batch_and_drop_remainder ( batch_size ) ) <nl> def read_batch_features ( file_pattern , <nl> Args : <nl> file_pattern : List of files or patterns of file paths containing <nl> ` Example ` records . See ` tf . gfile . Glob ` for pattern rules . <nl> - batch_size : An int representing the number of consecutive elements of this <nl> - dataset to combine in a single batch . <nl> + batch_size : An int representing the number of records to combine <nl> + in a single batch . <nl> features : A ` dict ` mapping feature keys to ` FixedLenFeature ` or <nl> ` VarLenFeature ` values . See ` tf . parse_example ` . <nl> reader : A function or class that can be <nl>
Add tf . contrib . data . make_tf_record_dataset ( ) like make_csv_dataset ( ) and
tensorflow/tensorflow
f48c4115438f764a5d08e155275fa21f581ff55e
2018-05-16T17:05:29Z
mmm a / swoole_coroutine_util . c <nl> ppp b / swoole_coroutine_util . c <nl> <nl> # include " php_swoole . h " <nl> <nl> # ifdef SW_COROUTINE <nl> + # include " async . h " <nl> # include " swoole_coroutine . h " <nl> <nl> ZEND_BEGIN_ARG_INFO_EX ( arginfo_swoole_coroutine_void , 0 , 0 , 0 ) <nl> ZEND_BEGIN_ARG_INFO_EX ( arginfo_swoole_coroutine_gethostbyname , 0 , 0 , 1 ) <nl> ZEND_ARG_INFO ( 0 , family ) <nl> ZEND_END_ARG_INFO ( ) <nl> <nl> + ZEND_BEGIN_ARG_INFO_EX ( arginfo_swoole_coroutine_getaddrinfo , 0 , 0 , 1 ) <nl> + ZEND_ARG_INFO ( 0 , hostname ) <nl> + ZEND_ARG_INFO ( 0 , family ) <nl> + ZEND_ARG_INFO ( 0 , socktype ) <nl> + ZEND_ARG_INFO ( 0 , protocol ) <nl> + ZEND_ARG_INFO ( 0 , service ) <nl> + ZEND_END_ARG_INFO ( ) <nl> + <nl> static PHP_METHOD ( swoole_coroutine_util , create ) ; <nl> static PHP_METHOD ( swoole_coroutine_util , suspend ) ; <nl> static PHP_METHOD ( swoole_coroutine_util , cli_wait ) ; <nl> static PHP_METHOD ( swoole_coroutine_util , sleep ) ; <nl> static PHP_METHOD ( swoole_coroutine_util , fread ) ; <nl> static PHP_METHOD ( swoole_coroutine_util , fwrite ) ; <nl> static PHP_METHOD ( swoole_coroutine_util , gethostbyname ) ; <nl> + static PHP_METHOD ( swoole_coroutine_util , getaddrinfo ) ; <nl> static PHP_METHOD ( swoole_coroutine_util , call_user_func ) ; <nl> static PHP_METHOD ( swoole_coroutine_util , call_user_func_array ) ; <nl> <nl> static const zend_function_entry swoole_coroutine_util_methods [ ] = <nl> PHP_ME ( swoole_coroutine_util , fread , arginfo_swoole_coroutine_fread , ZEND_ACC_PUBLIC | ZEND_ACC_STATIC ) <nl> PHP_ME ( swoole_coroutine_util , fwrite , arginfo_swoole_coroutine_fwrite , ZEND_ACC_PUBLIC | ZEND_ACC_STATIC ) <nl> PHP_ME ( swoole_coroutine_util , gethostbyname , arginfo_swoole_coroutine_gethostbyname , ZEND_ACC_PUBLIC | ZEND_ACC_STATIC ) <nl> + PHP_ME ( swoole_coroutine_util , getaddrinfo , arginfo_swoole_coroutine_getaddrinfo , ZEND_ACC_PUBLIC | ZEND_ACC_STATIC ) <nl> PHP_ME ( swoole_coroutine_util , call_user_func , arginfo_swoole_coroutine_call_user_func , ZEND_ACC_PUBLIC | ZEND_ACC_STATIC ) <nl> PHP_ME ( swoole_coroutine_util , call_user_func_array , arginfo_swoole_coroutine_call_user_func_array , ZEND_ACC_PUBLIC | ZEND_ACC_STATIC ) <nl> PHP_FE_END <nl> static void coro_dns_onResolveCompleted ( swAio_event * event ) <nl> efree ( context ) ; <nl> } <nl> <nl> + static void coro_dns_onGetaddrinfoCompleted ( swAio_event * event ) <nl> + { <nl> + php_context * context = event - > object ; <nl> + <nl> + zval * retval = NULL ; <nl> + zval * result = NULL ; <nl> + <nl> + SW_MAKE_STD_ZVAL ( result ) ; <nl> + <nl> + struct sockaddr_in * addr_v4 ; <nl> + struct sockaddr_in6 * addr_v6 ; <nl> + <nl> + swRequest_getaddrinfo * req = event - > req ; <nl> + <nl> + if ( event - > error = = 0 ) <nl> + { <nl> + array_init ( result ) ; <nl> + int i ; <nl> + char tmp [ INET6_ADDRSTRLEN ] ; <nl> + const char * r ; <nl> + <nl> + for ( i = 0 ; i < req - > count ; i + + ) <nl> + { <nl> + if ( req - > family = = AF_INET ) <nl> + { <nl> + addr_v4 = req - > result + ( i * sizeof ( struct sockaddr_in ) ) ; <nl> + r = inet_ntop ( AF_INET , ( const void * ) & addr_v4 - > sin_addr , tmp , sizeof ( tmp ) ) ; <nl> + } <nl> + else <nl> + { <nl> + addr_v6 = req - > result + ( i * sizeof ( struct sockaddr_in6 ) ) ; <nl> + r = inet_ntop ( AF_INET6 , ( const void * ) & addr_v6 - > sin6_addr , tmp , sizeof ( tmp ) ) ; <nl> + } <nl> + if ( r ) <nl> + { <nl> + add_next_index_string ( result , tmp ) ; <nl> + } <nl> + } <nl> + } <nl> + else <nl> + { <nl> + ZVAL_BOOL ( result , 0 ) ; <nl> + } <nl> + <nl> + int ret = coro_resume ( context , result , & retval ) ; <nl> + if ( ret = = CORO_END & & retval ) <nl> + { <nl> + sw_zval_ptr_dtor ( & retval ) ; <nl> + } <nl> + sw_zval_ptr_dtor ( & result ) ; <nl> + efree ( req - > hostname ) ; <nl> + efree ( req - > result ) ; <nl> + if ( req - > service ) <nl> + { <nl> + efree ( req - > service ) ; <nl> + } <nl> + efree ( req ) ; <nl> + efree ( context ) ; <nl> + } <nl> + <nl> static PHP_METHOD ( swoole_coroutine_util , gethostbyname ) <nl> { <nl> char * domain_name ; <nl> static PHP_METHOD ( swoole_coroutine_util , gethostbyname ) <nl> coro_yield ( ) ; <nl> } <nl> <nl> + static PHP_METHOD ( swoole_coroutine_util , getaddrinfo ) <nl> + { <nl> + char * hostname ; <nl> + zend_size_t l_hostname ; <nl> + long family = AF_INET ; <nl> + long socktype = SOCK_STREAM ; <nl> + long protocol = IPPROTO_TCP ; <nl> + char * service = NULL ; <nl> + zend_size_t l_service = 0 ; <nl> + <nl> + if ( zend_parse_parameters ( ZEND_NUM_ARGS ( ) TSRMLS_CC , " s | llls " , & hostname , & l_hostname , & family , socktype , & protocol , <nl> + & hostname , & l_hostname ) = = FAILURE ) <nl> + { <nl> + RETURN_FALSE ; <nl> + } <nl> + <nl> + if ( l_hostname < = 0 ) <nl> + { <nl> + swoole_php_fatal_error ( E_WARNING , " hostname is empty . " ) ; <nl> + RETURN_FALSE ; <nl> + } <nl> + <nl> + if ( family ! = AF_INET & & family ! = AF_INET6 ) <nl> + { <nl> + swoole_php_fatal_error ( E_WARNING , " unknown protocol family , must be AF_INET or AF_INET6 . " ) ; <nl> + RETURN_FALSE ; <nl> + } <nl> + <nl> + swAio_event ev ; <nl> + bzero ( & ev , sizeof ( swAio_event ) ) ; <nl> + <nl> + swRequest_getaddrinfo * req = emalloc ( sizeof ( swRequest_getaddrinfo ) ) ; <nl> + bzero ( req , sizeof ( swRequest_getaddrinfo ) ) ; <nl> + <nl> + php_context * sw_current_context = emalloc ( sizeof ( php_context ) ) ; <nl> + <nl> + ev . type = SW_AIO_GETADDRINFO ; <nl> + ev . object = sw_current_context ; <nl> + ev . callback = coro_dns_onGetaddrinfoCompleted ; <nl> + ev . req = req ; <nl> + <nl> + req - > hostname = estrndup ( hostname , l_hostname ) ; <nl> + req - > family = family ; <nl> + req - > socktype = socktype ; <nl> + req - > protocol = protocol ; <nl> + <nl> + if ( service ) <nl> + { <nl> + req - > service = estrndup ( service , l_service ) ; <nl> + } <nl> + <nl> + if ( family = = AF_INET ) <nl> + { <nl> + req - > result = ecalloc ( SW_DNS_HOST_BUFFER_SIZE , sizeof ( struct sockaddr_in ) ) ; <nl> + } <nl> + else <nl> + { <nl> + req - > result = ecalloc ( SW_DNS_HOST_BUFFER_SIZE , sizeof ( struct sockaddr_in6 ) ) ; <nl> + } <nl> + <nl> + if ( SwooleAIO . mode = = SW_AIO_LINUX ) <nl> + { <nl> + SwooleAIO . mode = SW_AIO_BASE ; <nl> + SwooleAIO . init = 0 ; <nl> + } <nl> + php_swoole_check_aio ( ) ; <nl> + <nl> + if ( swAio_dispatch ( & ev ) < 0 ) <nl> + { <nl> + efree ( ev . buf ) ; <nl> + RETURN_FALSE ; <nl> + } <nl> + <nl> + coro_save ( sw_current_context ) ; <nl> + coro_yield ( ) ; <nl> + } <nl> + <nl> # endif <nl> new file mode 100644 <nl> index 0000000000 . . b01875f25c <nl> mmm / dev / null <nl> ppp b / tests / swoole_coroutine / getaddrinfo . phpt <nl> <nl> + - - TEST - - <nl> + swoole_coroutine : gethostbyname <nl> + - - SKIPIF - - <nl> + < ? php require __DIR__ . " / . . / include / skipif . inc " ; ? > <nl> + - - FILE - - <nl> + < ? php <nl> + require_once __DIR__ . " / . . / include / swoole . inc " ; <nl> + <nl> + use Swoole \ Coroutine as co ; <nl> + <nl> + co : : create ( function ( ) { <nl> + $ ip = co : : getaddrinfo ( ' www . baidu . com ' ) ; <nl> + var_dump ( $ ip ) ; <nl> + } ) ; <nl> + <nl> + ? > <nl> + - - EXPECT - - <nl> \ No newline at end of file <nl>
fixed
swoole/swoole-src
6f26633bb059d819bfcfdf86e851dd65809949fa
2018-01-19T11:51:58Z
mmm a / trunk / src / core / srs_core . hpp <nl> ppp b / trunk / src / core / srs_core . hpp <nl> <nl> ( void ) 0 <nl> <nl> / / Checking for st ( state - threads ) , only support the following cpus : i386 / amd64 / x86_64 / arm <nl> - / / @ reamrk to patch ST for arm , read https : / / github . com / ossrs / state - threads / issues / 1 <nl> + / / @ reamrk To patch ST for arm , read https : / / github . com / ossrs / state - threads / issues / 1 <nl> # if ! defined ( __amd64__ ) & & ! defined ( __x86_64__ ) & & ! defined ( __i386__ ) & & ! defined ( __arm__ ) <nl> # error " only support i386 / amd64 / x86_64 / arm cpu " <nl> # endif <nl> mmm a / trunk / src / kernel / srs_kernel_aac . hpp <nl> ppp b / trunk / src / kernel / srs_kernel_aac . hpp <nl> <nl> class SrsBuffer ; <nl> class ISrsStreamWriter ; <nl> <nl> - / * * <nl> - * Transmux the RTMP packets to AAC stream . <nl> - * / <nl> + / / Transmux the RTMP packets to AAC stream . <nl> class SrsAacTransmuxer <nl> { <nl> private : <nl> class SrsAacTransmuxer <nl> SrsAacTransmuxer ( ) ; <nl> virtual ~ SrsAacTransmuxer ( ) ; <nl> public : <nl> - / * * <nl> - * initialize the underlayer file stream . <nl> - * @ remark user can initialize multiple times to encode multiple aac files . <nl> - * @ remark , user must free the fs , aac encoder never close / free it . <nl> - * / <nl> + / / Initialize the underlayer file stream . <nl> + / / @ remark User can initialize multiple times to encode multiple aac files . <nl> + / / @ remark User must free the fs , aac encoder never close / free it . <nl> virtual srs_error_t initialize ( ISrsStreamWriter * fs ) ; <nl> public : <nl> - / * * <nl> - * write audio / video packet . <nl> - * @ remark assert data is not NULL . <nl> - * / <nl> + / / Write audio / video packet . <nl> + / / @ remark The assert data should not be NULL . <nl> virtual srs_error_t write_audio ( int64_t timestamp , char * data , int size ) ; <nl> } ; <nl> <nl> mmm a / trunk / src / kernel / srs_kernel_consts . hpp <nl> ppp b / trunk / src / kernel / srs_kernel_consts . hpp <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / RTMP consts values <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / default vhost of rtmp <nl> + / / Default vhost of rtmp <nl> # define SRS_CONSTS_RTMP_DEFAULT_VHOST " __defaultVhost__ " <nl> # define SRS_CONSTS_RTMP_DEFAULT_APP " __defaultApp__ " <nl> - / / default port of rtmp <nl> + / / Default port of rtmp <nl> # define SRS_CONSTS_RTMP_DEFAULT_PORT 1935 <nl> <nl> - / / the default chunk size for system . <nl> + / / The default chunk size for system . <nl> # define SRS_CONSTS_RTMP_SRS_CHUNK_SIZE 60000 <nl> / / 6 . Chunking , RTMP protocol default chunk size . <nl> # define SRS_CONSTS_RTMP_PROTOCOL_CHUNK_SIZE 128 <nl> <nl> - / * * <nl> - * 6 . Chunking <nl> - * The chunk size is configurable . It can be set using a control <nl> - * message ( Set Chunk Size ) as described in section 7 . 1 . The maximum <nl> - * chunk size can be 65536 bytes and minimum 128 bytes . Larger values <nl> - * reduce CPU usage , but also commit to larger writes that can delay <nl> - * other content on lower bandwidth connections . Smaller chunks are not <nl> - * good for high - bit rate streaming . Chunk size is maintained <nl> - * independently for each direction . <nl> - * / <nl> + / / 6 . Chunking <nl> + / / The chunk size is configurable . It can be set using a control <nl> + / / message ( Set Chunk Size ) as described in section 7 . 1 . The maximum <nl> + / / chunk size can be 65536 bytes and minimum 128 bytes . Larger values <nl> + / / reduce CPU usage , but also commit to larger writes that can delay <nl> + / / other content on lower bandwidth connections . Smaller chunks are not <nl> + / / good for high - bit rate streaming . Chunk size is maintained <nl> + / / independently for each direction . <nl> # define SRS_CONSTS_RTMP_MIN_CHUNK_SIZE 128 <nl> # define SRS_CONSTS_RTMP_MAX_CHUNK_SIZE 65536 <nl> <nl> <nl> - / / the following is the timeout for rtmp protocol , <nl> + / / The following is the timeout for rtmp protocol , <nl> / / to avoid death connection . <nl> <nl> - / / the common io timeout , for connect , recv or send . <nl> + / / The common io timeout , for connect , recv or send . <nl> / / TODO : FIXME : Maybe change to smaller value , such as 3s ? <nl> # define SRS_CONSTS_RTMP_TIMEOUT ( 30 * SRS_UTIME_SECONDS ) <nl> <nl> - / / the timeout to wait for client control message , <nl> + / / The timeout to wait for client control message , <nl> / / if timeout , we generally ignore and send the data to client , <nl> / / generally , it ' s the pulse time for data seding . <nl> / / @ remark , recomment to 500ms . <nl> # define SRS_CONSTS_RTMP_PULSE ( 500 * SRS_UTIME_MILLISECONDS ) <nl> <nl> - / * * <nl> - * max rtmp header size : <nl> - * 1bytes basic header , <nl> - * 11bytes message header , <nl> - * 4bytes timestamp header , <nl> - * that is , 1 + 11 + 4 = 16bytes . <nl> - * / <nl> + / / The max rtmp header size : <nl> + / / 1bytes basic header , <nl> + / / 11bytes message header , <nl> + / / 4bytes timestamp header , <nl> + / / that is , 1 + 11 + 4 = 16bytes . <nl> # define SRS_CONSTS_RTMP_MAX_FMT0_HEADER_SIZE 16 <nl> - / * * <nl> - * max rtmp header size : <nl> - * 1bytes basic header , <nl> - * 4bytes timestamp header , <nl> - * that is , 1 + 4 = 5bytes . <nl> - * / <nl> + / / The max rtmp header size : <nl> + / / 1bytes basic header , <nl> + / / 4bytes timestamp header , <nl> + / / that is , 1 + 4 = 5bytes . <nl> / / always use fmt0 as cache . <nl> # define SRS_CONSTS_RTMP_MAX_FMT3_HEADER_SIZE 5 <nl> <nl> - / * * <nl> - * for performance issue , <nl> - * the iovs cache , @ see https : / / github . com / ossrs / srs / issues / 194 <nl> - * iovs cache for multiple messages for each connections . <nl> - * suppose the chunk size is 64k , each message send in a chunk which needs only 2 iovec , <nl> - * so the iovs max should be ( SRS_PERF_MW_MSGS * 2 ) <nl> - * <nl> - * @ remark , SRS will realloc when the iovs not enough . <nl> - * / <nl> + / / For performance issue , <nl> + / / the iovs cache , @ see https : / / github . com / ossrs / srs / issues / 194 <nl> + / / iovs cache for multiple messages for each connections . <nl> + / / suppose the chunk size is 64k , each message send in a chunk which needs only 2 iovec , <nl> + / / so the iovs max should be ( SRS_PERF_MW_MSGS * 2 ) <nl> + / / <nl> + / / @ remark , SRS will realloc when the iovs not enough . <nl> # define SRS_CONSTS_IOVS_MAX ( SRS_PERF_MW_MSGS * 2 ) <nl> - / * * <nl> - * for performance issue , <nl> - * the c0c3 cache , @ see https : / / github . com / ossrs / srs / issues / 194 <nl> - * c0c3 cache for multiple messages for each connections . <nl> - * each c0 < = 16byes , suppose the chunk size is 64k , <nl> - * each message send in a chunk which needs only a c0 header , <nl> - * so the c0c3 cache should be ( SRS_PERF_MW_MSGS * 16 ) <nl> - * <nl> - * @ remark , SRS will try another loop when c0c3 cache dry , for we cannot realloc it . <nl> - * so we use larger c0c3 cache , that is ( SRS_PERF_MW_MSGS * 32 ) <nl> - * / <nl> + / / For performance issue , <nl> + / / the c0c3 cache , @ see https : / / github . com / ossrs / srs / issues / 194 <nl> + / / c0c3 cache for multiple messages for each connections . <nl> + / / each c0 < = 16byes , suppose the chunk size is 64k , <nl> + / / each message send in a chunk which needs only a c0 header , <nl> + / / so the c0c3 cache should be ( SRS_PERF_MW_MSGS * 16 ) <nl> + / / <nl> + / / @ remark , SRS will try another loop when c0c3 cache dry , for we cannot realloc it . <nl> + / / so we use larger c0c3 cache , that is ( SRS_PERF_MW_MSGS * 32 ) <nl> # define SRS_CONSTS_C0C3_HEADERS_MAX ( SRS_PERF_MW_MSGS * 32 ) <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> # define SRS_CONSTS_NULL_FILE " / dev / null " <nl> # define SRS_CONSTS_LOCALHOST " 127 . 0 . 0 . 1 " <nl> <nl> - / / signal defines . <nl> - / / reload the config file and apply new config . <nl> + / / The signal defines . <nl> + / / To reload the config file and apply new config . <nl> # define SRS_SIGNAL_RELOAD SIGHUP <nl> - / / reopen the log file . <nl> + / / Reopen the log file . <nl> # define SRS_SIGNAL_REOPEN_LOG SIGUSR1 <nl> - / / srs should gracefully quit , do dispose then exit . <nl> + / / The signal for srs to gracefully quit , do dispose then exit . <nl> # define SRS_SIGNAL_GRACEFULLY_QUIT SIGTERM <nl> <nl> - / / application level signals . <nl> - / / persistence the config in memory to config file . <nl> + / / The application level signals . <nl> + / / Persistence the config in memory to config file . <nl> / / @ see https : / / github . com / ossrs / srs / issues / 319 # issuecomment - 134993922 <nl> / / @ remark we actually don ' t handle the signal for it ' s not a valid os signal . <nl> # define SRS_SIGNAL_PERSISTENCE_CONFIG 1000 <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / log consts values <nl> + / / The log consts values <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / downloading speed - up , play to edge , ingest from origin <nl> + / / Downloading speed - up , play to edge , ingest from origin <nl> # define SRS_CONSTS_LOG_EDGE_PLAY " EIG " <nl> - / / uploading speed - up , publish to edge , foward to origin <nl> + / / Uploading speed - up , publish to edge , foward to origin <nl> # define SRS_CONSTS_LOG_EDGE_PUBLISH " EFW " <nl> - / / edge / origin forwarder . <nl> + / / The edge / origin forwarder . <nl> # define SRS_CONSTS_LOG_FOWARDER " FWR " <nl> - / / play stream on edge / origin . <nl> + / / Play stream on edge / origin . <nl> # define SRS_CONSTS_LOG_PLAY " PLA " <nl> - / / client publish to edge / origin <nl> + / / Client publish to edge / origin <nl> # define SRS_CONSTS_LOG_CLIENT_PUBLISH " CPB " <nl> - / / web / flash publish to edge / origin <nl> + / / The web / flash publish to edge / origin <nl> # define SRS_CONSTS_LOG_WEB_PUBLISH " WPB " <nl> - / / ingester for edge ( play ) / origin <nl> + / / Ingester for edge ( play ) / origin <nl> # define SRS_CONSTS_LOG_INGESTER " IGS " <nl> - / / hls log id . <nl> + / / The hls log id . <nl> # define SRS_CONSTS_LOG_HLS " HLS " <nl> - / / encoder log id . <nl> + / / The encoder log id . <nl> # define SRS_CONSTS_LOG_ENCODER " ENC " <nl> - / / http stream log id . <nl> + / / The http stream log id . <nl> # define SRS_CONSTS_LOG_HTTP_STREAM " HTS " <nl> - / / http stream cache log id . <nl> + / / The http stream cache log id . <nl> # define SRS_CONSTS_LOG_HTTP_STREAM_CACHE " HTC " <nl> - / / stream caster log id . <nl> + / / The stream caster log id . <nl> # define SRS_CONSTS_LOG_STREAM_CASTER " SCS " <nl> - / / the nginx exec log id . <nl> + / / The nginx exec log id . <nl> # define SRS_CONSTS_LOG_EXEC " EXE " <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / HTTP consts values <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / the default http port . <nl> + / / The default http port . <nl> # define SRS_CONSTS_HTTP_DEFAULT_PORT 80 <nl> - / / linux path seprator <nl> + / / The linux path seprator <nl> # define SRS_CONSTS_HTTP_PATH_SEP ' / ' <nl> - / / query string seprator <nl> + / / Query string seprator <nl> # define SRS_CONSTS_HTTP_QUERY_SEP ' ? ' <nl> <nl> - / / the default recv timeout . <nl> + / / The default recv timeout . <nl> # define SRS_HTTP_RECV_TIMEOUT ( 60 * SRS_UTIME_SECONDS ) <nl> <nl> / / 6 . 1 . 1 Status Code and Reason Phrase <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> # define SRS_CONSTS_KAFKA_DEFAULT_PORT 9092 <nl> <nl> - / / the common io timeout , for both recv and send . <nl> + / / The common io timeout , for both recv and send . <nl> # define SRS_CONSTS_KAFKA_TIMEOUT ( 30 * SRS_UTIME_MILLISECONDS ) <nl> <nl> # endif <nl> mmm a / trunk / src / kernel / srs_kernel_error . hpp <nl> ppp b / trunk / src / kernel / srs_kernel_error . hpp <nl> <nl> <nl> # include < string > <nl> <nl> - / / for srs - librtmp , @ see https : / / github . com / ossrs / srs / issues / 213 <nl> + / / For srs - librtmp , @ see https : / / github . com / ossrs / srs / issues / 213 <nl> # ifndef _WIN32 <nl> # define ERROR_SUCCESS 0 <nl> # endif <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / system error . <nl> + / / The system error . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> # define ERROR_SOCKET_CREATE 1000 <nl> # define ERROR_SOCKET_SETREUSE 1001 <nl> <nl> # define ERROR_RTMP_STREAM_NAME_EMPTY 2051 <nl> # define ERROR_HTTP_HIJACK 2052 <nl> / / <nl> - / / system control message , <nl> - / / not an error , but special control logic . <nl> + / / The system control message , <nl> + / / It ' s not an error , but special control logic . <nl> / / <nl> - / / connection is redirect to another server . <nl> + / / When connection is redirect to another server . <nl> # define ERROR_CONTROL_REDIRECT 2997 <nl> - / / sys ctl : rtmp close stream , support replay . <nl> + / / For sys ctl : rtmp close stream , support replay . <nl> # define ERROR_CONTROL_RTMP_CLOSE 2998 <nl> - / / FMLE stop publish and republish . <nl> + / / When FMLE stop publish and republish . <nl> # define ERROR_CONTROL_REPUBLISH 2999 <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / application level <nl> + / / The application level errors . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> # define ERROR_HLS_METADATA 3000 <nl> # define ERROR_HLS_DECODE_ERROR 3001 <nl> <nl> / / # define ERROR_API_METHOD_NOT_ALLOWD <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / user - define error . <nl> + / / For user - define error . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> # define ERROR_USER_START 9000 <nl> / / # define ERROR_USER_DISCONNECT 9001 <nl> # define ERROR_SOURCE_NOT_FOUND 9002 <nl> # define ERROR_USER_END 9999 <nl> <nl> - / * * <nl> - * whether the error code is an system control error . <nl> - * / <nl> + / / Whether the error code is an system control error . <nl> / / TODO : FIXME : Remove it from underlayer for confused with error and logger . <nl> extern bool srs_is_system_control_error ( int error_code ) ; <nl> extern bool srs_is_system_control_error ( srs_error_t err ) ; <nl> extern bool srs_is_client_gracefully_close ( int error_code ) ; <nl> extern bool srs_is_client_gracefully_close ( srs_error_t err ) ; <nl> <nl> - / / Use complex errors , @ read https : / / github . com / ossrs / srs / issues / 913 <nl> + / / The complex error carries code , message , callstack and instant variables , <nl> + / / which is more strong and easy to locate problem by log , <nl> + / / please @ read https : / / github . com / ossrs / srs / issues / 913 <nl> class SrsCplxError <nl> { <nl> private : <nl> mmm a / trunk / src / kernel / srs_kernel_flv . hpp <nl> ppp b / trunk / src / kernel / srs_kernel_flv . hpp <nl> <nl> <nl> # include < string > <nl> <nl> - / / for srs - librtmp , @ see https : / / github . com / ossrs / srs / issues / 213 <nl> + / / For srs - librtmp , @ see https : / / github . com / ossrs / srs / issues / 213 <nl> # ifndef _WIN32 <nl> # include < sys / uio . h > <nl> # endif <nl> class SrsFileReader ; <nl> <nl> # define SRS_FLV_TAG_HEADER_SIZE 11 <nl> # define SRS_FLV_PREVIOUS_TAG_SIZE 4 <nl> - <nl> - / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> - / * * <nl> - 5 . Protocol Control Messages <nl> - RTMP reserves message type IDs 1 - 7 for protocol control messages . <nl> - These messages contain information needed by the RTM Chunk Stream <nl> - protocol or RTMP itself . Protocol messages with IDs 1 & 2 are <nl> - reserved for usage with RTM Chunk Stream protocol . Protocol messages <nl> - with IDs 3 - 6 are reserved for usage of RTMP . Protocol message with ID <nl> - 7 is used between edge server and origin server . <nl> - * / <nl> + / / 5 . Protocol Control Messages <nl> + / / RTMP reserves message type IDs 1 - 7 for protocol control messages . <nl> + / / These messages contain information needed by the RTM Chunk Stream <nl> + / / protocol or RTMP itself . Protocol messages with IDs 1 & 2 are <nl> + / / reserved for usage with RTM Chunk Stream protocol . Protocol messages <nl> + / / with IDs 3 - 6 are reserved for usage of RTMP . Protocol message with ID <nl> + / / 7 is used between edge server and origin server . <nl> # define RTMP_MSG_SetChunkSize 0x01 <nl> # define RTMP_MSG_AbortMessage 0x02 <nl> # define RTMP_MSG_Acknowledgement 0x03 <nl> class SrsFileReader ; <nl> # define RTMP_MSG_WindowAcknowledgementSize 0x05 <nl> # define RTMP_MSG_SetPeerBandwidth 0x06 <nl> # define RTMP_MSG_EdgeAndOriginServerCommand 0x07 <nl> - / * * <nl> - 3 . Types of messages <nl> - The server and the client send messages over the network to <nl> - communicate with each other . The messages can be of any type which <nl> - includes audio messages , video messages , command messages , shared <nl> - object messages , data messages , and user control messages . <nl> - 3 . 1 . Command message <nl> - Command messages carry the AMF - encoded commands between the client <nl> - and the server . These messages have been assigned message type value <nl> - of 20 for AMF0 encoding and message type value of 17 for AMF3 <nl> - encoding . These messages are sent to perform some operations like <nl> - connect , createStream , publish , play , pause on the peer . Command <nl> - messages like onstatus , result etc . are used to inform the sender <nl> - about the status of the requested commands . A command message <nl> - consists of command name , transaction ID , and command object that <nl> - contains related parameters . A client or a server can request Remote <nl> - Procedure Calls ( RPC ) over streams that are communicated using the <nl> - command messages to the peer . <nl> - * / <nl> + / / 3 . Types of messages <nl> + / / The server and the client send messages over the network to <nl> + / / communicate with each other . The messages can be of any type which <nl> + / / includes audio messages , video messages , command messages , shared <nl> + / / object messages , data messages , and user control messages . <nl> + / / 3 . 1 . Command message <nl> + / / Command messages carry the AMF - encoded commands between the client <nl> + / / and the server . These messages have been assigned message type value <nl> + / / of 20 for AMF0 encoding and message type value of 17 for AMF3 <nl> + / / encoding . These messages are sent to perform some operations like <nl> + / / connect , createStream , publish , play , pause on the peer . Command <nl> + / / messages like onstatus , result etc . are used to inform the sender <nl> + / / about the status of the requested commands . A command message <nl> + / / consists of command name , transaction ID , and command object that <nl> + / / contains related parameters . A client or a server can request Remote <nl> + / / Procedure Calls ( RPC ) over streams that are communicated using the <nl> + / / command messages to the peer . <nl> # define RTMP_MSG_AMF3CommandMessage 17 / / 0x11 <nl> # define RTMP_MSG_AMF0CommandMessage 20 / / 0x14 <nl> - / * * <nl> - 3 . 2 . Data message <nl> - The client or the server sends this message to send Metadata or any <nl> - user data to the peer . Metadata includes details about the <nl> - data ( audio , video etc . ) like creation time , duration , theme and so <nl> - on . These messages have been assigned message type value of 18 for <nl> - AMF0 and message type value of 15 for AMF3 . <nl> - * / <nl> + / / 3 . 2 . Data message <nl> + / / The client or the server sends this message to send Metadata or any <nl> + / / user data to the peer . Metadata includes details about the <nl> + / / data ( audio , video etc . ) like creation time , duration , theme and so <nl> + / / on . These messages have been assigned message type value of 18 for <nl> + / / AMF0 and message type value of 15 for AMF3 . <nl> # define RTMP_MSG_AMF0DataMessage 18 / / 0x12 <nl> # define RTMP_MSG_AMF3DataMessage 15 / / 0x0F <nl> - / * * <nl> - 3 . 3 . Shared object message <nl> - A shared object is a Flash object ( a collection of name value pairs ) <nl> - that are in synchronization across multiple clients , instances , and <nl> - so on . The message types kMsgContainer = 19 for AMF0 and <nl> - kMsgContainerEx = 16 for AMF3 are reserved for shared object events . <nl> - Each message can contain multiple events . <nl> - * / <nl> + / / 3 . 3 . Shared object message <nl> + / / A shared object is a Flash object ( a collection of name value pairs ) <nl> + / / that are in synchronization across multiple clients , instances , and <nl> + / / so on . The message types kMsgContainer = 19 for AMF0 and <nl> + / / kMsgContainerEx = 16 for AMF3 are reserved for shared object events . <nl> + / / Each message can contain multiple events . <nl> # define RTMP_MSG_AMF3SharedObject 16 / / 0x10 <nl> # define RTMP_MSG_AMF0SharedObject 19 / / 0x13 <nl> - / * * <nl> - 3 . 4 . Audio message <nl> - The client or the server sends this message to send audio data to the <nl> - peer . The message type value of 8 is reserved for audio messages . <nl> - * / <nl> + / / 3 . 4 . Audio message <nl> + / / The client or the server sends this message to send audio data to the <nl> + / / peer . The message type value of 8 is reserved for audio messages . <nl> # define RTMP_MSG_AudioMessage 8 / / 0x08 <nl> - / * * <nl> - 3 . 5 . Video message <nl> - The client or the server sends this message to send video data to the <nl> - peer . The message type value of 9 is reserved for video messages . <nl> - These messages are large and can delay the sending of other type of <nl> - messages . To avoid such a situation , the video message is assigned <nl> - the lowest priority . <nl> - * / <nl> + / / 3 . 5 . Video message <nl> + / / The client or the server sends this message to send video data to the <nl> + / / peer . The message type value of 9 is reserved for video messages . <nl> + / / These messages are large and can delay the sending of other type of <nl> + / / messages . To avoid such a situation , the video message is assigned <nl> + / / The lowest priority . <nl> # define RTMP_MSG_VideoMessage 9 / / 0x09 <nl> - / * * <nl> - 3 . 6 . Aggregate message <nl> - An aggregate message is a single message that contains a list of submessages . <nl> - The message type value of 22 is reserved for aggregate <nl> - messages . <nl> - * / <nl> + / / 3 . 6 . Aggregate message <nl> + / / An aggregate message is a single message that contains a list of submessages . <nl> + / / The message type value of 22 is reserved for aggregate <nl> + / / messages . <nl> # define RTMP_MSG_AggregateMessage 22 / / 0x16 <nl> - <nl> - / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> - / * * <nl> - * the chunk stream id used for some under - layer message , <nl> - * for example , the PC ( protocol control ) message . <nl> - * / <nl> + / / The chunk stream id used for some under - layer message , <nl> + / / For example , the PC ( protocol control ) message . <nl> # define RTMP_CID_ProtocolControl 0x02 <nl> - / * * <nl> - * the AMF0 / AMF3 command message , invoke method and return the result , over NetConnection . <nl> - * generally use 0x03 . <nl> - * / <nl> + / / The AMF0 / AMF3 command message , invoke method and return the result , over NetConnection . <nl> + / / generally use 0x03 . <nl> # define RTMP_CID_OverConnection 0x03 <nl> - / * * <nl> - * the AMF0 / AMF3 command message , invoke method and return the result , over NetConnection , <nl> - * the midst state ( we guess ) . <nl> - * rarely used , e . g . onStatus ( NetStream . Play . Reset ) . <nl> - * / <nl> + / / The AMF0 / AMF3 command message , invoke method and return the result , over NetConnection , <nl> + / / The midst state ( we guess ) . <nl> + / / rarely used , e . g . onStatus ( NetStream . Play . Reset ) . <nl> # define RTMP_CID_OverConnection2 0x04 <nl> - / * * <nl> - * the stream message ( amf0 / amf3 ) , over NetStream . <nl> - * generally use 0x05 . <nl> - * / <nl> + / / The stream message ( amf0 / amf3 ) , over NetStream . <nl> + / / generally use 0x05 . <nl> # define RTMP_CID_OverStream 0x05 <nl> - / * * <nl> - * the stream message ( amf0 / amf3 ) , over NetStream , the midst state ( we guess ) . <nl> - * rarely used , e . g . play ( " mp4 : mystram . f4v " ) <nl> - * / <nl> + / / The stream message ( amf0 / amf3 ) , over NetStream , the midst state ( we guess ) . <nl> + / / rarely used , e . g . play ( " mp4 : mystram . f4v " ) <nl> # define RTMP_CID_OverStream2 0x08 <nl> - / * * <nl> - * the stream message ( video ) , over NetStream <nl> - * generally use 0x06 . <nl> - * / <nl> + / / The stream message ( video ) , over NetStream <nl> + / / generally use 0x06 . <nl> # define RTMP_CID_Video 0x06 <nl> - / * * <nl> - * the stream message ( audio ) , over NetStream . <nl> - * generally use 0x07 . <nl> - * / <nl> + / / The stream message ( audio ) , over NetStream . <nl> + / / generally use 0x07 . <nl> # define RTMP_CID_Audio 0x07 <nl> <nl> - / * * <nl> - * 6 . 1 . Chunk Format <nl> - * Extended timestamp : 0 or 4 bytes <nl> - * This field MUST be sent when the normal timsestamp is set to <nl> - * 0xffffff , it MUST NOT be sent if the normal timestamp is set to <nl> - * anything else . So for values less than 0xffffff the normal <nl> - * timestamp field SHOULD be used in which case the extended timestamp <nl> - * MUST NOT be present . For values greater than or equal to 0xffffff <nl> - * the normal timestamp field MUST NOT be used and MUST be set to <nl> - * 0xffffff and the extended timestamp MUST be sent . <nl> - * / <nl> + / / 6 . 1 . Chunk Format <nl> + / / Extended timestamp : 0 or 4 bytes <nl> + / / This field MUST be sent when the normal timsestamp is set to <nl> + / / 0xffffff , it MUST NOT be sent if the normal timestamp is set to <nl> + / / anything else . So for values less than 0xffffff the normal <nl> + / / timestamp field SHOULD be used in which case the extended timestamp <nl> + / / MUST NOT be present . For values greater than or equal to 0xffffff <nl> + / / The normal timestamp field MUST NOT be used and MUST be set to <nl> + / / 0xffffff and the extended timestamp MUST be sent . <nl> # define RTMP_EXTENDED_TIMESTAMP 0xFFFFFF <nl> <nl> - / * * <nl> - * 4 . 1 . Message Header <nl> - * / <nl> + / / 4 . 1 . Message Header <nl> class SrsMessageHeader <nl> { <nl> public : <nl> - / * * <nl> - * 3bytes . <nl> - * Three - byte field that contains a timestamp delta of the message . <nl> - * @ remark , only used for decoding message from chunk stream . <nl> - * / <nl> + / / 3bytes . <nl> + / / Three - byte field that contains a timestamp delta of the message . <nl> + / / @ remark , only used for decoding message from chunk stream . <nl> int32_t timestamp_delta ; <nl> - / * * <nl> - * 3bytes . <nl> - * Three - byte field that represents the size of the payload in bytes . <nl> - * It is set in big - endian format . <nl> - * / <nl> + / / 3bytes . <nl> + / / Three - byte field that represents the size of the payload in bytes . <nl> + / / It is set in big - endian format . <nl> int32_t payload_length ; <nl> - / * * <nl> - * 1byte . <nl> - * One byte field to represent the message type . A range of type IDs <nl> - * ( 1 - 7 ) are reserved for protocol control messages . <nl> - * / <nl> + / / 1byte . <nl> + / / One byte field to represent the message type . A range of type IDs <nl> + / / ( 1 - 7 ) are reserved for protocol control messages . <nl> int8_t message_type ; <nl> - / * * <nl> - * 4bytes . <nl> - * Four - byte field that identifies the stream of the message . These <nl> - * bytes are set in little - endian format . <nl> - * / <nl> + / / 4bytes . <nl> + / / Four - byte field that identifies the stream of the message . These <nl> + / / bytes are set in little - endian format . <nl> int32_t stream_id ; <nl> <nl> - / * * <nl> - * Four - byte field that contains a timestamp of the message . <nl> - * The 4 bytes are packed in the big - endian order . <nl> - * @ remark , used as calc timestamp when decode and encode time . <nl> - * @ remark , we use 64bits for large time for jitter detect and hls . <nl> - * / <nl> + / / Four - byte field that contains a timestamp of the message . <nl> + / / The 4 bytes are packed in the big - endian order . <nl> + / / @ remark , used as calc timestamp when decode and encode time . <nl> + / / @ remark , we use 64bits for large time for jitter detect and hls . <nl> int64_t timestamp ; <nl> public : <nl> - / * * <nl> - * get the perfered cid ( chunk stream id ) which sendout over . <nl> - * set at decoding , and canbe used for directly send message , <nl> - * for example , dispatch to all connections . <nl> - * / <nl> + / / Get the perfered cid ( chunk stream id ) which sendout over . <nl> + / / set at decoding , and canbe used for directly send message , <nl> + / / For example , dispatch to all connections . <nl> int perfer_cid ; <nl> public : <nl> SrsMessageHeader ( ) ; <nl> class SrsMessageHeader <nl> bool is_set_peer_bandwidth ( ) ; <nl> bool is_aggregate ( ) ; <nl> public : <nl> - / * * <nl> - * create a amf0 script header , set the size and stream_id . <nl> - * / <nl> + / / Create a amf0 script header , set the size and stream_id . <nl> void initialize_amf0_script ( int size , int stream ) ; <nl> - / * * <nl> - * create a audio header , set the size , timestamp and stream_id . <nl> - * / <nl> + / / Create a audio header , set the size , timestamp and stream_id . <nl> void initialize_audio ( int size , uint32_t time , int stream ) ; <nl> - / * * <nl> - * create a video header , set the size , timestamp and stream_id . <nl> - * / <nl> + / / Create a video header , set the size , timestamp and stream_id . <nl> void initialize_video ( int size , uint32_t time , int stream ) ; <nl> } ; <nl> <nl> - / * * <nl> - * message is raw data RTMP message , bytes oriented , <nl> - * protcol always recv RTMP message , and can send RTMP message or RTMP packet . <nl> - * the common message is read from underlay protocol sdk . <nl> - * while the shared ptr message used to copy and send . <nl> - * / <nl> + / / The message is raw data RTMP message , bytes oriented , <nl> + / / protcol always recv RTMP message , and can send RTMP message or RTMP packet . <nl> + / / The common message is read from underlay protocol sdk . <nl> + / / while the shared ptr message used to copy and send . <nl> class SrsCommonMessage <nl> { <nl> - / / 4 . 1 . Message Header <nl> + / / 4 . 1 . Message Header <nl> public : <nl> SrsMessageHeader header ; <nl> - / / 4 . 2 . Message Payload <nl> + / / 4 . 2 . Message Payload <nl> public : <nl> - / * * <nl> - * current message parsed size , <nl> - * size < = header . payload_length <nl> - * for the payload maybe sent in multiple chunks . <nl> - * / <nl> + / / The current message parsed size , <nl> + / / size < = header . payload_length <nl> + / / For the payload maybe sent in multiple chunks . <nl> int size ; <nl> - / * * <nl> - * the payload of message , the SrsCommonMessage never know about the detail of payload , <nl> - * user must use SrsProtocol . decode_message to get concrete packet . <nl> - * @ remark , not all message payload can be decoded to packet . for example , <nl> - * video / audio packet use raw bytes , no video / audio packet . <nl> - * / <nl> + / / The payload of message , the SrsCommonMessage never know about the detail of payload , <nl> + / / user must use SrsProtocol . decode_message to get concrete packet . <nl> + / / @ remark , not all message payload can be decoded to packet . for example , <nl> + / / video / audio packet use raw bytes , no video / audio packet . <nl> char * payload ; <nl> public : <nl> SrsCommonMessage ( ) ; <nl> virtual ~ SrsCommonMessage ( ) ; <nl> public : <nl> - / * * <nl> - * alloc the payload to specified size of bytes . <nl> - * / <nl> + / / Alloc the payload to specified size of bytes . <nl> virtual void create_payload ( int size ) ; <nl> public : <nl> - / * * <nl> - * create common message , <nl> - * from the header and body . <nl> - * @ remark user should never free the body . <nl> - * @ param pheader , the header to copy to the message . NULL to ignore . <nl> - * / <nl> + / / Create common message , <nl> + / / from the header and body . <nl> + / / @ remark user should never free the body . <nl> + / / @ param pheader , the header to copy to the message . NULL to ignore . <nl> virtual srs_error_t create ( SrsMessageHeader * pheader , char * body , int size ) ; <nl> } ; <nl> <nl> - / * * <nl> - * the message header for shared ptr message . <nl> - * only the message for all msgs are same . <nl> - * / <nl> + / / The message header for shared ptr message . <nl> + / / only the message for all msgs are same . <nl> struct SrsSharedMessageHeader <nl> { <nl> - / * * <nl> - * 3bytes . <nl> - * Three - byte field that represents the size of the payload in bytes . <nl> - * It is set in big - endian format . <nl> - * / <nl> + / / 3bytes . <nl> + / / Three - byte field that represents the size of the payload in bytes . <nl> + / / It is set in big - endian format . <nl> int32_t payload_length ; <nl> - / * * <nl> - * 1byte . <nl> - * One byte field to represent the message type . A range of type IDs <nl> - * ( 1 - 7 ) are reserved for protocol control messages . <nl> - * / <nl> + / / 1byte . <nl> + / / One byte field to represent the message type . A range of type IDs <nl> + / / ( 1 - 7 ) are reserved for protocol control messages . <nl> int8_t message_type ; <nl> - / * * <nl> - * get the perfered cid ( chunk stream id ) which sendout over . <nl> - * set at decoding , and canbe used for directly send message , <nl> - * for example , dispatch to all connections . <nl> - * / <nl> + / / Get the perfered cid ( chunk stream id ) which sendout over . <nl> + / / set at decoding , and canbe used for directly send message , <nl> + / / For example , dispatch to all connections . <nl> int perfer_cid ; <nl> <nl> SrsSharedMessageHeader ( ) ; <nl> virtual ~ SrsSharedMessageHeader ( ) ; <nl> } ; <nl> <nl> - / * * <nl> - * shared ptr message . <nl> - * for audio / video / data message that need less memory copy . <nl> - * and only for output . <nl> - * <nl> - * create first object by constructor and create ( ) , <nl> - * use copy if need reference count message . <nl> - * <nl> - * / <nl> + / / The shared ptr message . <nl> + / / For audio / video / data message that need less memory copy . <nl> + / / and only for output . <nl> + / / <nl> + / / Create first object by constructor and create ( ) , <nl> + / / use copy if need reference count message . <nl> class SrsSharedPtrMessage <nl> { <nl> - / / 4 . 1 . Message Header <nl> + / / 4 . 1 . Message Header <nl> public : <nl> - / / the header can shared , only set the timestamp and stream id . <nl> + / / The header can shared , only set the timestamp and stream id . <nl> / / @ see https : / / github . com / ossrs / srs / issues / 251 <nl> / / SrsSharedMessageHeader header ; <nl> - / * * <nl> - * Four - byte field that contains a timestamp of the message . <nl> - * The 4 bytes are packed in the big - endian order . <nl> - * @ remark , used as calc timestamp when decode and encode time . <nl> - * @ remark , we use 64bits for large time for jitter detect and hls . <nl> - * / <nl> + / / Four - byte field that contains a timestamp of the message . <nl> + / / The 4 bytes are packed in the big - endian order . <nl> + / / @ remark , used as calc timestamp when decode and encode time . <nl> + / / @ remark , we use 64bits for large time for jitter detect and hls . <nl> int64_t timestamp ; <nl> - / * * <nl> - * 4bytes . <nl> - * Four - byte field that identifies the stream of the message . These <nl> - * bytes are set in big - endian format . <nl> - * / <nl> + / / 4bytes . <nl> + / / Four - byte field that identifies the stream of the message . These <nl> + / / bytes are set in big - endian format . <nl> int32_t stream_id ; <nl> / / 4 . 2 . Message Payload <nl> public : <nl> - / * * <nl> - * current message parsed size , <nl> - * size < = header . payload_length <nl> - * for the payload maybe sent in multiple chunks . <nl> - * / <nl> + / / The current message parsed size , <nl> + / / size < = header . payload_length <nl> + / / For the payload maybe sent in multiple chunks . <nl> int size ; <nl> - / * * <nl> - * the payload of message , the SrsCommonMessage never know about the detail of payload , <nl> - * user must use SrsProtocol . decode_message to get concrete packet . <nl> - * @ remark , not all message payload can be decoded to packet . for example , <nl> - * video / audio packet use raw bytes , no video / audio packet . <nl> - * / <nl> + / / The payload of message , the SrsCommonMessage never know about the detail of payload , <nl> + / / user must use SrsProtocol . decode_message to get concrete packet . <nl> + / / @ remark , not all message payload can be decoded to packet . for example , <nl> + / / video / audio packet use raw bytes , no video / audio packet . <nl> char * payload ; <nl> private : <nl> class SrsSharedPtrPayload <nl> { <nl> public : <nl> - / / shared message header . <nl> + / / The shared message header . <nl> / / @ see https : / / github . com / ossrs / srs / issues / 251 <nl> SrsSharedMessageHeader header ; <nl> - / / actual shared payload . <nl> + / / The actual shared payload . <nl> char * payload ; <nl> - / / size of payload . <nl> + / / The size of payload . <nl> int size ; <nl> - / / the reference count <nl> + / / The reference count <nl> int shared_count ; <nl> public : <nl> SrsSharedPtrPayload ( ) ; <nl> class SrsSharedPtrMessage <nl> SrsSharedPtrMessage ( ) ; <nl> virtual ~ SrsSharedPtrMessage ( ) ; <nl> public : <nl> - / * * <nl> - * create shared ptr message , <nl> - * copy header , manage the payload of msg , <nl> - * set the payload to NULL to prevent double free . <nl> - * @ remark payload of msg set to NULL if success . <nl> - * / <nl> + / / Create shared ptr message , <nl> + / / copy header , manage the payload of msg , <nl> + / / set the payload to NULL to prevent double free . <nl> + / / @ remark payload of msg set to NULL if success . <nl> virtual srs_error_t create ( SrsCommonMessage * msg ) ; <nl> - / * * <nl> - * create shared ptr message , <nl> - * from the header and payload . <nl> - * @ remark user should never free the payload . <nl> - * @ param pheader , the header to copy to the message . NULL to ignore . <nl> - * / <nl> + / / Create shared ptr message , <nl> + / / from the header and payload . <nl> + / / @ remark user should never free the payload . <nl> + / / @ param pheader , the header to copy to the message . NULL to ignore . <nl> virtual srs_error_t create ( SrsMessageHeader * pheader , char * payload , int size ) ; <nl> - / * * <nl> - * get current reference count . <nl> - * when this object created , count set to 0 . <nl> - * if copy ( ) this object , count increase 1 . <nl> - * if this or copy deleted , free payload when count is 0 , or count - - . <nl> - * @ remark , assert object is created . <nl> - * / <nl> + / / Get current reference count . <nl> + / / when this object created , count set to 0 . <nl> + / / if copy ( ) this object , count increase 1 . <nl> + / / if this or copy deleted , free payload when count is 0 , or count - - . <nl> + / / @ remark , assert object is created . <nl> virtual int count ( ) ; <nl> - / * * <nl> - * check perfer cid and stream id . <nl> - * @ return whether stream id already set . <nl> - * / <nl> + / / check perfer cid and stream id . <nl> + / / @ return whether stream id already set . <nl> virtual bool check ( int stream_id ) ; <nl> public : <nl> virtual bool is_av ( ) ; <nl> virtual bool is_audio ( ) ; <nl> virtual bool is_video ( ) ; <nl> public : <nl> - / * * <nl> - * generate the chunk header to cache . <nl> - * @ return the size of header . <nl> - * / <nl> + / / generate the chunk header to cache . <nl> + / / @ return the size of header . <nl> virtual int chunk_header ( char * cache , int nb_cache , bool c0 ) ; <nl> public : <nl> - / * * <nl> - * copy current shared ptr message , use ref - count . <nl> - * @ remark , assert object is created . <nl> - * / <nl> + / / copy current shared ptr message , use ref - count . <nl> + / / @ remark , assert object is created . <nl> virtual SrsSharedPtrMessage * copy ( ) ; <nl> } ; <nl> <nl> - / * * <nl> - * Transmux RTMP packets to FLV stream . <nl> - * / <nl> + / / Transmux RTMP packets to FLV stream . <nl> class SrsFlvTransmuxer <nl> { <nl> private : <nl> class SrsFlvTransmuxer <nl> SrsFlvTransmuxer ( ) ; <nl> virtual ~ SrsFlvTransmuxer ( ) ; <nl> public : <nl> - / * * <nl> - * initialize the underlayer file stream . <nl> - * @ remark user can initialize multiple times to encode multiple flv files . <nl> - * @ remark , user must free the @ param fw , flv encoder never close / free it . <nl> - * / <nl> + / / Initialize the underlayer file stream . <nl> + / / @ remark user can initialize multiple times to encode multiple flv files . <nl> + / / @ remark , user must free the @ param fw , flv encoder never close / free it . <nl> virtual srs_error_t initialize ( ISrsWriter * fw ) ; <nl> public : <nl> - / * * <nl> - * write flv header . <nl> - * write following : <nl> - * 1 . E . 2 The FLV header <nl> - * 2 . PreviousTagSize0 UI32 Always 0 <nl> - * that is , 9 + 4 = 13bytes . <nl> - * / <nl> + / / Write flv header . <nl> + / / Write following : <nl> + / / 1 . E . 2 The FLV header <nl> + / / 2 . PreviousTagSize0 UI32 Always 0 <nl> + / / that is , 9 + 4 = 13bytes . <nl> virtual srs_error_t write_header ( ) ; <nl> virtual srs_error_t write_header ( char flv_header [ 9 ] ) ; <nl> - / * * <nl> - * write flv metadata . <nl> - * @ param type , the type of data , or other message type . <nl> - * @ see SrsFrameType <nl> - * @ param data , the amf0 metadata which serialize from : <nl> - * AMF0 string : onMetaData , <nl> - * AMF0 object : the metadata object . <nl> - * @ remark assert data is not NULL . <nl> - * / <nl> + / / Write flv metadata . <nl> + / / @ param type , the type of data , or other message type . <nl> + / / @ see SrsFrameType <nl> + / / @ param data , the amf0 metadata which serialize from : <nl> + / / AMF0 string : onMetaData , <nl> + / / AMF0 object : the metadata object . <nl> + / / @ remark assert data is not NULL . <nl> virtual srs_error_t write_metadata ( char type , char * data , int size ) ; <nl> - / * * <nl> - * write audio / video packet . <nl> - * @ remark assert data is not NULL . <nl> - * / <nl> + / / Write audio / video packet . <nl> + / / @ remark assert data is not NULL . <nl> virtual srs_error_t write_audio ( int64_t timestamp , char * data , int size ) ; <nl> virtual srs_error_t write_video ( int64_t timestamp , char * data , int size ) ; <nl> public : <nl> - / * * <nl> - * get the tag size , <nl> - * including the tag header , body , and 4bytes previous tag size . <nl> - * @ remark assert data_size is not negative . <nl> - * / <nl> + / / Get the tag size , <nl> + / / including the tag header , body , and 4bytes previous tag size . <nl> + / / @ remark assert data_size is not negative . <nl> static int size_tag ( int data_size ) ; <nl> # ifdef SRS_PERF_FAST_FLV_ENCODER <nl> private : <nl> - / / cache tag header . <nl> + / / The cache tag header . <nl> int nb_tag_headers ; <nl> char * tag_headers ; <nl> - / / cache pps ( previous tag size ) <nl> + / / The cache pps ( previous tag size ) <nl> int nb_ppts ; <nl> char * ppts ; <nl> - / / cache iovss . <nl> + / / The cache iovss . <nl> int nb_iovss_cache ; <nl> iovec * iovss_cache ; <nl> public : <nl> - / * * <nl> - * write the tags in a time . <nl> - * / <nl> + / / Write the tags in a time . <nl> virtual srs_error_t write_tags ( SrsSharedPtrMessage * * msgs , int count ) ; <nl> # endif <nl> private : <nl> class SrsFlvTransmuxer <nl> virtual srs_error_t write_tag ( char * header , int header_size , char * tag , int tag_size ) ; <nl> } ; <nl> <nl> - / * * <nl> - * decode flv file . <nl> - * / <nl> + / / Decode flv file . <nl> class SrsFlvDecoder <nl> { <nl> private : <nl> class SrsFlvDecoder <nl> SrsFlvDecoder ( ) ; <nl> virtual ~ SrsFlvDecoder ( ) ; <nl> public : <nl> - / * * <nl> - * initialize the underlayer file stream <nl> - * @ remark user can initialize multiple times to decode multiple flv files . <nl> - * @ remark user must free the @ param fr , flv decoder never close / free it <nl> - * / <nl> + / / Initialize the underlayer file stream <nl> + / / @ remark user can initialize multiple times to decode multiple flv files . <nl> + / / @ remark user must free the @ param fr , flv decoder never close / free it <nl> virtual srs_error_t initialize ( ISrsReader * fr ) ; <nl> public : <nl> - / * * <nl> - * read the flv header , donot including the 4bytes previous tag size . <nl> - * @ remark assert header not NULL . <nl> - * / <nl> + / / Read the flv header , donot including the 4bytes previous tag size . <nl> + / / @ remark assert header not NULL . <nl> virtual srs_error_t read_header ( char header [ 9 ] ) ; <nl> - / * * <nl> - * read the tag header infos . <nl> - * @ remark assert ptype / pdata_size / ptime not NULL . <nl> - * / <nl> + / / Read the tag header infos . <nl> + / / @ remark assert ptype / pdata_size / ptime not NULL . <nl> virtual srs_error_t read_tag_header ( char * ptype , int32_t * pdata_size , uint32_t * ptime ) ; <nl> - / * * <nl> - * read the tag data . <nl> - * @ remark assert data not NULL . <nl> - * / <nl> + / / Read the tag data . <nl> + / / @ remark assert data not NULL . <nl> virtual srs_error_t read_tag_data ( char * data , int32_t size ) ; <nl> - / * * <nl> - * read the 4bytes previous tag size . <nl> - * @ remark assert previous_tag_size not NULL . <nl> - * / <nl> + / / Read the 4bytes previous tag size . <nl> + / / @ remark assert previous_tag_size not NULL . <nl> virtual srs_error_t read_previous_tag_size ( char previous_tag_size [ 4 ] ) ; <nl> } ; <nl> <nl> - / * * <nl> - * decode flv fast by only decoding the header and tag . <nl> - * used for vod flv stream to read the header and sequence header , <nl> - * then seek to specified offset . <nl> - * / <nl> + / / Decode flv fast by only decoding the header and tag . <nl> + / / used for vod flv stream to read the header and sequence header , <nl> + / / then seek to specified offset . <nl> class SrsFlvVodStreamDecoder <nl> { <nl> private : <nl> class SrsFlvVodStreamDecoder <nl> SrsFlvVodStreamDecoder ( ) ; <nl> virtual ~ SrsFlvVodStreamDecoder ( ) ; <nl> public : <nl> - / * * <nl> - * initialize the underlayer file stream <nl> - * @ remark user can initialize multiple times to decode multiple flv files . <nl> - * @ remark user must free the @ param fr , flv decoder never close / free it . <nl> - * / <nl> + / / Initialize the underlayer file stream <nl> + / / @ remark user can initialize multiple times to decode multiple flv files . <nl> + / / @ remark user must free the @ param fr , flv decoder never close / free it . <nl> virtual srs_error_t initialize ( ISrsReader * fr ) ; <nl> public : <nl> - / * * <nl> - * read the flv header and its size . <nl> - * @ param header , fill it 13bytes ( 9bytes header , 4bytes previous tag size ) . <nl> - * @ remark assert header not NULL . <nl> - * / <nl> + / / Read the flv header and its size . <nl> + / / @ param header , fill it 13bytes ( 9bytes header , 4bytes previous tag size ) . <nl> + / / @ remark assert header not NULL . <nl> virtual srs_error_t read_header_ext ( char header [ 13 ] ) ; <nl> - / * * <nl> - * read the sequence header tags offset and its size . <nl> - * @ param pstart , the start offset of sequence header . <nl> - * @ param psize , output the size , ( tag header ) + ( tag body ) + ( 4bytes previous tag size ) . <nl> - * @ remark we think the first audio / video is sequence header . <nl> - * @ remark assert pstart / psize not NULL . <nl> - * / <nl> + / / Read the sequence header tags offset and its size . <nl> + / / @ param pstart , the start offset of sequence header . <nl> + / / @ param psize , output the size , ( tag header ) + ( tag body ) + ( 4bytes previous tag size ) . <nl> + / / @ remark we think the first audio / video is sequence header . <nl> + / / @ remark assert pstart / psize not NULL . <nl> virtual srs_error_t read_sequence_header_summary ( int64_t * pstart , int * psize ) ; <nl> public : <nl> - / * * <nl> - * for start offset , seed to this position and response flv stream . <nl> - * / <nl> + / / For start offset , seed to this position and response flv stream . <nl> virtual srs_error_t seek2 ( int64_t offset ) ; <nl> } ; <nl> <nl> mmm a / trunk / src / kernel / srs_kernel_log . hpp <nl> ppp b / trunk / src / kernel / srs_kernel_log . hpp <nl> <nl> <nl> # include < srs_kernel_consts . hpp > <nl> <nl> - / * * <nl> - * the log level , for example : <nl> - * if specified Debug level , all level messages will be logged . <nl> - * if specified Warn level , only Warn / Error / Fatal level messages will be logged . <nl> - * / <nl> + / / The log level , for example : <nl> + / / if specified Debug level , all level messages will be logged . <nl> + / / if specified Warn level , only Warn / Error / Fatal level messages will be logged . <nl> enum SrsLogLevel <nl> { <nl> SrsLogLevelForbidden = 0x00 , <nl> - / / only used for very verbose debug , generally , <nl> + / / Only used for very verbose debug , generally , <nl> / / we compile without this level for high performance . <nl> SrsLogLevelVerbose = 0x01 , <nl> SrsLogLevelInfo = 0x02 , <nl> enum SrsLogLevel <nl> SrsLogLevelDisabled = 0x20 , <nl> } ; <nl> <nl> - / * * <nl> - * the log interface provides method to write log . <nl> - * but we provides some macro , which enable us to disable the log when compile . <nl> - * @ see also SmtDebug / SmtTrace / SmtWarn / SmtError which is corresponding to Debug / Trace / Warn / Fatal . <nl> - * / <nl> + / / The log interface provides method to write log . <nl> + / / but we provides some macro , which enable us to disable the log when compile . <nl> + / / @ see also SmtDebug / SmtTrace / SmtWarn / SmtError which is corresponding to Debug / Trace / Warn / Fatal . <nl> class ISrsLog <nl> { <nl> public : <nl> ISrsLog ( ) ; <nl> virtual ~ ISrsLog ( ) ; <nl> public : <nl> - / * * <nl> - * initialize log utilities . <nl> - * / <nl> + / / Initialize log utilities . <nl> virtual srs_error_t initialize ( ) ; <nl> - / * * <nl> - * reopen the log file for log rotate . <nl> - * / <nl> + / / Reopen the log file for log rotate . <nl> virtual void reopen ( ) ; <nl> public : <nl> - / * * <nl> - * log for verbose , very verbose information . <nl> - * / <nl> + / / The log for verbose , very verbose information . <nl> virtual void verbose ( const char * tag , int context_id , const char * fmt , . . . ) ; <nl> - / * * <nl> - * log for debug , detail information . <nl> - * / <nl> + / / The log for debug , detail information . <nl> virtual void info ( const char * tag , int context_id , const char * fmt , . . . ) ; <nl> - / * * <nl> - * log for trace , important information . <nl> - * / <nl> + / / The log for trace , important information . <nl> virtual void trace ( const char * tag , int context_id , const char * fmt , . . . ) ; <nl> - / * * <nl> - * log for warn , warn is something should take attention , but not a error . <nl> - * / <nl> + / / The log for warn , warn is something should take attention , but not a error . <nl> virtual void warn ( const char * tag , int context_id , const char * fmt , . . . ) ; <nl> - / * * <nl> - * log for error , something error occur , do something about the error , ie . close the connection , <nl> - * but we will donot abort the program . <nl> - * / <nl> + / / The log for error , something error occur , do something about the error , ie . close the connection , <nl> + / / but we will donot abort the program . <nl> virtual void error ( const char * tag , int context_id , const char * fmt , . . . ) ; <nl> } ; <nl> <nl> - / * * <nl> - * the context id manager to identify context , for instance , the green - thread . <nl> - * usage : <nl> - * _srs_context - > generate_id ( ) ; / / when thread start . <nl> - * _srs_context - > get_id ( ) ; / / get current generated id . <nl> - * int old_id = _srs_context - > set_id ( 1000 ) ; / / set context id if need to merge thread context . <nl> - * / <nl> - / / the context for multiple clients . <nl> + / / The context id manager to identify context , for instance , the green - thread . <nl> + / / Usage : <nl> + / / _srs_context - > generate_id ( ) ; / / when thread start . <nl> + / / _srs_context - > get_id ( ) ; / / get current generated id . <nl> + / / int old_id = _srs_context - > set_id ( 1000 ) ; / / set context id if need to merge thread context . <nl> + / / The context for multiple clients . <nl> class ISrsThreadContext <nl> { <nl> public : <nl> ISrsThreadContext ( ) ; <nl> virtual ~ ISrsThreadContext ( ) ; <nl> public : <nl> - / * * <nl> - * generate the id for current context . <nl> - * / <nl> + / / Generate the id for current context . <nl> virtual int generate_id ( ) ; <nl> - / * * <nl> - * get the generated id of current context . <nl> - * / <nl> + / / Get the generated id of current context . <nl> virtual int get_id ( ) ; <nl> - / * * <nl> - * set the id of current context . <nl> - * @ return the previous id value ; 0 if no context . <nl> - * / <nl> + / / Set the id of current context . <nl> + / / @ return the previous id value ; 0 if no context . <nl> virtual int set_id ( int v ) ; <nl> } ; <nl> <nl> - / / @ global user must provides a log object <nl> + / / @ global User must provides a log object <nl> extern ISrsLog * _srs_log ; <nl> <nl> - / / @ global user must implements the LogContext and define a global instance . <nl> + / / @ global User must implements the LogContext and define a global instance . <nl> extern ISrsThreadContext * _srs_context ; <nl> <nl> - / / donot print method <nl> + / / Log style . <nl> + / / Use __FUNCTION__ to print c method <nl> + / / Use __PRETTY_FUNCTION__ to print c + + class : method <nl> # if 1 <nl> # define srs_verbose ( msg , . . . ) _srs_log - > verbose ( NULL , _srs_context - > get_id ( ) , msg , # # __VA_ARGS__ ) <nl> # define srs_info ( msg , . . . ) _srs_log - > info ( NULL , _srs_context - > get_id ( ) , msg , # # __VA_ARGS__ ) <nl> extern ISrsThreadContext * _srs_context ; <nl> # define srs_warn ( msg , . . . ) _srs_log - > warn ( NULL , _srs_context - > get_id ( ) , msg , # # __VA_ARGS__ ) <nl> # define srs_error ( msg , . . . ) _srs_log - > error ( NULL , _srs_context - > get_id ( ) , msg , # # __VA_ARGS__ ) <nl> # endif <nl> - / / use __FUNCTION__ to print c method <nl> # if 0 <nl> # define srs_verbose ( msg , . . . ) _srs_log - > verbose ( __FUNCTION__ , _srs_context - > get_id ( ) , msg , # # __VA_ARGS__ ) <nl> # define srs_info ( msg , . . . ) _srs_log - > info ( __FUNCTION__ , _srs_context - > get_id ( ) , msg , # # __VA_ARGS__ ) <nl> extern ISrsThreadContext * _srs_context ; <nl> # define srs_warn ( msg , . . . ) _srs_log - > warn ( __FUNCTION__ , _srs_context - > get_id ( ) , msg , # # __VA_ARGS__ ) <nl> # define srs_error ( msg , . . . ) _srs_log - > error ( __FUNCTION__ , _srs_context - > get_id ( ) , msg , # # __VA_ARGS__ ) <nl> # endif <nl> - / / use __PRETTY_FUNCTION__ to print c + + class : method <nl> # if 0 <nl> # define srs_verbose ( msg , . . . ) _srs_log - > verbose ( __PRETTY_FUNCTION__ , _srs_context - > get_id ( ) , msg , # # __VA_ARGS__ ) <nl> # define srs_info ( msg , . . . ) _srs_log - > info ( __PRETTY_FUNCTION__ , _srs_context - > get_id ( ) , msg , # # __VA_ARGS__ ) <nl> extern ISrsThreadContext * _srs_context ; <nl> # define srs_error ( msg , . . . ) _srs_log - > error ( __PRETTY_FUNCTION__ , _srs_context - > get_id ( ) , msg , # # __VA_ARGS__ ) <nl> # endif <nl> <nl> - / / TODO : FIXME : add more verbose and info logs . <nl> + / / TODO : FIXME : Add more verbose and info logs . <nl> # ifndef SRS_AUTO_VERBOSE <nl> # undef srs_verbose <nl> # define srs_verbose ( msg , . . . ) ( void ) 0 <nl> mmm a / trunk / src / kernel / srs_kernel_mp4 . hpp <nl> ppp b / trunk / src / kernel / srs_kernel_mp4 . hpp <nl> class SrsMp4TrackFragmentHeaderBox ; <nl> class SrsMp4TrackFragmentDecodeTimeBox ; <nl> class SrsMp4TrackFragmentRunBox ; <nl> <nl> - / * * <nl> - * 4 . 2 Object Structure <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 16 <nl> - * / <nl> + / / 4 . 2 Object Structure <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 16 <nl> enum SrsMp4BoxType <nl> { <nl> SrsMp4BoxTypeForbidden = 0x00 , <nl> enum SrsMp4BoxType <nl> SrsMp4BoxTypeTRUN = 0x7472756e , / / ' trun ' <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 4 . 3 . 3 Semantics <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 37 <nl> - * / <nl> + / / 8 . 4 . 3 . 3 Semantics <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 37 <nl> enum SrsMp4HandlerType <nl> { <nl> SrsMp4HandlerTypeForbidden = 0x00 , <nl> enum SrsMp4HandlerType <nl> SrsMp4HandlerTypeSOUN = 0x736f756e , / / ' soun ' <nl> } ; <nl> <nl> - / * * <nl> - * File format brands <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 166 <nl> - * / <nl> + / / File format brands <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 166 <nl> enum SrsMp4BoxBrand <nl> { <nl> SrsMp4BoxBrandForbidden = 0x00 , <nl> enum SrsMp4BoxBrand <nl> SrsMp4BoxBrandMSDH = 0x6d736468 , / / ' msdh ' <nl> } ; <nl> <nl> - / * * <nl> - * The context to dump . <nl> - * / <nl> + / / The context to dump . <nl> struct SrsMp4DumpContext <nl> { <nl> int level ; <nl> struct SrsMp4DumpContext <nl> SrsMp4DumpContext indent ( ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 4 . 2 Object Structure <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 16 <nl> - * / <nl> + / / 4 . 2 Object Structure <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 16 <nl> class SrsMp4Box : public ISrsCodec <nl> { <nl> private : <nl> / / The size is the entire size of the box , including the size and type header , fields , <nl> - / / and all contained boxes . This facilitates general parsing of the file . <nl> + / / And all contained boxes . This facilitates general parsing of the file . <nl> / / <nl> / / if size is 1 then the actual size is in the field largesize ; <nl> / / if size is 0 , then this box is the last one in the file , and its contents <nl> class SrsMp4Box : public ISrsCodec <nl> public : <nl> / / identifies the box type ; standard boxes use a compact type , which is normally four printable <nl> / / characters , to permit ease of identification , and is shown so in the boxes below . User extensions use <nl> - / / an extended type ; in this case , the type field is set to ‘ uuid ’ . <nl> + / / An extended type ; in this case , the type field is set to ‘ uuid ’ . <nl> SrsMp4BoxType type ; <nl> / / For box ' uuid ' . <nl> std : : vector < char > usertype ; <nl> class SrsMp4Box : public ISrsCodec <nl> virtual int remove ( SrsMp4BoxType bt ) ; <nl> / / Dumps the box and all contained boxes . <nl> virtual std : : stringstream & dumps ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> - / * * <nl> - * Discovery the box from buffer . <nl> - * @ param ppbox Output the discoveried box , which user must free it . <nl> - * / <nl> + / / Discovery the box from buffer . <nl> + / / @ param ppbox Output the discoveried box , which user must free it . <nl> static srs_error_t discovery ( SrsBuffer * buf , SrsMp4Box * * ppbox ) ; <nl> / / Interface ISrsCodec <nl> public : <nl> class SrsMp4Box : public ISrsCodec <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 4 . 2 Object Structure <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 17 <nl> - * / <nl> + / / 4 . 2 Object Structure <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 17 <nl> class SrsMp4FullBox : public SrsMp4Box <nl> { <nl> public : <nl> - / / an integer that specifies the version of this format of the box . <nl> + / / An integer that specifies the version of this format of the box . <nl> uint8_t version ; <nl> - / / a map of flags <nl> + / / A map of flags <nl> uint32_t flags ; <nl> public : <nl> SrsMp4FullBox ( ) ; <nl> class SrsMp4FullBox : public SrsMp4Box <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 4 . 3 File Type Box ( ftyp ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 17 <nl> - * Files written to this version of this specification must contain a file - type box . For compatibility with an earlier <nl> - * version of this specification , files may be conformant to this specification and not contain a file - type box . Files <nl> - * with no file - type box should be read as if they contained an FTYP box with Major_brand = ' mp41 ' , minor_version = 0 , and <nl> - * the single compatible brand ' mp41 ' . <nl> - * / <nl> + / / 4 . 3 File Type Box ( ftyp ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 17 <nl> + / / Files written to this version of this specification must contain a file - type box . For compatibility with an earlier <nl> + / / version of this specification , files may be conformant to this specification and not contain a file - type box . Files <nl> + / / with no file - type box should be read as if they contained an FTYP box with Major_brand = ' mp41 ' , minor_version = 0 , and <nl> + / / The single compatible brand ' mp41 ' . <nl> class SrsMp4FileTypeBox : public SrsMp4Box <nl> { <nl> public : <nl> - / / a brand identifier <nl> + / / A brand identifier <nl> SrsMp4BoxBrand major_brand ; <nl> - / / an informative integer for the minor version of the major brand <nl> + / / An informative integer for the minor version of the major brand <nl> uint32_t minor_version ; <nl> private : <nl> - / / a list , to the end of the box , of brands <nl> + / / A list , to the end of the box , of brands <nl> std : : vector < SrsMp4BoxBrand > compatible_brands ; <nl> public : <nl> SrsMp4FileTypeBox ( ) ; <nl> class SrsMp4FileTypeBox : public SrsMp4Box <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 16 . 2 Segment Type Box ( styp ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 105 <nl> - * If segments are stored in separate files ( e . g . on a standard HTTP server ) it is recommended that these <nl> - * ' segment files ' contain a segment - type box , which must be first if present , to enable identification of those files , <nl> - * and declaration of the specifications with which they are compliant . <nl> - * / <nl> + / / 8 . 16 . 2 Segment Type Box ( styp ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 105 <nl> + / / If segments are stored in separate files ( e . g . on a standard HTTP server ) it is recommended that these <nl> + / / ' segment files ' contain a segment - type box , which must be first if present , to enable identification of those files , <nl> + / / And declaration of the specifications with which they are compliant . <nl> class SrsMp4SegmentTypeBox : public SrsMp4FileTypeBox <nl> { <nl> public : <nl> class SrsMp4SegmentTypeBox : public SrsMp4FileTypeBox <nl> virtual ~ SrsMp4SegmentTypeBox ( ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 8 . 4 Movie Fragment Box ( moof ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 66 <nl> - * The movie fragments extend the presentation in time . They provide the information that would previously have <nl> - * been in the Movie Box . The actual samples are in Media Data Boxes , as usual , if they are in the same file . <nl> - * The data reference index is in the sample description , so it is possible to build incremental presentations <nl> - * where the media data is in files other than the file containing the Movie Box . <nl> - * / <nl> + / / 8 . 8 . 4 Movie Fragment Box ( moof ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 66 <nl> + / / The movie fragments extend the presentation in time . They provide the information that would previously have <nl> + / / been in the Movie Box . The actual samples are in Media Data Boxes , as usual , if they are in the same file . <nl> + / / The data reference index is in the sample description , so it is possible to build incremental presentations <nl> + / / where the media data is in files other than the file containing the Movie Box . <nl> class SrsMp4MovieFragmentBox : public SrsMp4Box <nl> { <nl> public : <nl> class SrsMp4MovieFragmentBox : public SrsMp4Box <nl> virtual void set_traf ( SrsMp4TrackFragmentBox * v ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 8 . 5 Movie Fragment Header Box ( mfhd ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 67 <nl> - * The movie fragment header contains a sequence number , as a safety check . The sequence number usually <nl> - * starts at 1 and must increase for each movie fragment in the file , in the order in which they occur . This allows <nl> - * readers to verify integrity of the sequence ; it is an error to construct a file where the fragments are out of <nl> - * sequence . <nl> - * / <nl> + / / 8 . 8 . 5 Movie Fragment Header Box ( mfhd ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 67 <nl> + / / The movie fragment header contains a sequence number , as a safety check . The sequence number usually <nl> + / / starts at 1 and must increase for each movie fragment in the file , in the order in which they occur . This allows <nl> + / / readers to verify integrity of the sequence ; it is an error to construct a file where the fragments are out of <nl> + / / sequence . <nl> class SrsMp4MovieFragmentHeaderBox : public SrsMp4FullBox <nl> { <nl> public : <nl> - / / the ordinal number of this fragment , in increasing order <nl> + / / The ordinal number of this fragment , in increasing order <nl> uint32_t sequence_number ; <nl> public : <nl> SrsMp4MovieFragmentHeaderBox ( ) ; <nl> class SrsMp4MovieFragmentHeaderBox : public SrsMp4FullBox <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 8 . 6 Track Fragment Box ( traf ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 67 <nl> - * Within the movie fragment there is a set of track fragments , zero or more per track . The track fragments in <nl> - * turn contain zero or more track runs , each of which document a contiguous run of samples for that track . <nl> - * Within these structures , many fields are optional and can be defaulted . <nl> - * / <nl> + / / 8 . 8 . 6 Track Fragment Box ( traf ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 67 <nl> + / / Within the movie fragment there is a set of track fragments , zero or more per track . The track fragments in <nl> + / / turn contain zero or more track runs , each of which document a contiguous run of samples for that track . <nl> + / / Within these structures , many fields are optional and can be defaulted . <nl> class SrsMp4TrackFragmentBox : public SrsMp4Box <nl> { <nl> public : <nl> class SrsMp4TrackFragmentBox : public SrsMp4Box <nl> virtual void set_trun ( SrsMp4TrackFragmentRunBox * v ) ; <nl> } ; <nl> <nl> - / * * <nl> - * The tf_flags of tfhd . <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 68 <nl> - * / <nl> + / / The tf_flags of tfhd . <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 68 <nl> enum SrsMp4TfhdFlags <nl> { <nl> - / * * <nl> - * indicates the presence of the base - data - offset field . This provides <nl> - * an explicit anchor for the data offsets in each track run ( see below ) . If not provided , the base - data - <nl> - * offset for the first track in the movie fragment is the position of the first byte of the enclosing Movie <nl> - * Fragment Box , and for second and subsequent track fragments , the default is the end of the data <nl> - * defined by the preceding fragment . Fragments ' inheriting ' their offset in this way must all use <nl> - * the same data - reference ( i . e . , the data for these tracks must be in the same file ) . <nl> - * / <nl> + / / indicates the presence of the base - data - offset field . This provides <nl> + / / An explicit anchor for the data offsets in each track run ( see below ) . If not provided , the base - data - <nl> + / / offset for the first track in the movie fragment is the position of the first byte of the enclosing Movie <nl> + / / Fragment Box , and for second and subsequent track fragments , the default is the end of the data <nl> + / / defined by the preceding fragment . Fragments ' inheriting ' their offset in this way must all use <nl> + / / The same data - reference ( i . e . , the data for these tracks must be in the same file ) . <nl> SrsMp4TfhdFlagsBaseDataOffset = 0x000001 , <nl> - / * * <nl> - * indicates the presence of this field , which over - rides , in this <nl> - * fragment , the default set up in the Track Extends Box . <nl> - * / <nl> + / / indicates the presence of this field , which over - rides , in this <nl> + / / fragment , the default set up in the Track Extends Box . <nl> SrsMp4TfhdFlagsSampleDescriptionIndex = 0x000002 , <nl> SrsMp4TfhdFlagsDefaultSampleDuration = 0x000008 , <nl> SrsMp4TfhdFlagsDefautlSampleSize = 0x000010 , <nl> SrsMp4TfhdFlagsDefaultSampleFlags = 0x000020 , <nl> - / * * <nl> - * this indicates that the duration provided in either default - sample - duration , <nl> - * or by the default - duration in the Track Extends Box , is empty , i . e . that there are no samples for this <nl> - * time interval . It is an error to make a presentation that has both edit lists in the Movie Box , and empty - <nl> - * duration fragments . <nl> - * / <nl> + / / this indicates that the duration provided in either default - sample - duration , <nl> + / / or by the default - duration in the Track Extends Box , is empty , i . e . that there are no samples for this <nl> + / / time interval . It is an error to make a presentation that has both edit lists in the Movie Box , and empty - <nl> + / / duration fragments . <nl> SrsMp4TfhdFlagsDurationIsEmpty = 0x010000 , <nl> - / * * <nl> - * if base - data - offset - present is zero , this indicates that the base - data - <nl> - * offset for this track fragment is the position of the first byte of the enclosing Movie Fragment Box . <nl> - * Support for the default - base - is - moof flag is required under the ‘ iso5 ’ brand , and it shall not be used in <nl> - * brands or compatible brands earlier than iso5 . <nl> - * / <nl> + / / if base - data - offset - present is zero , this indicates that the base - data - <nl> + / / offset for this track fragment is the position of the first byte of the enclosing Movie Fragment Box . <nl> + / / Support for the default - base - is - moof flag is required under the ‘ iso5 ’ brand , and it shall not be used in <nl> + / / brands or compatible brands earlier than iso5 . <nl> SrsMp4TfhdFlagsDefaultBaseIsMoof = 0x020000 , <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 8 . 7 Track Fragment Header Box ( tfhd ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 68 <nl> - * Each movie fragment can add zero or more fragments to each track ; and a track fragment can add zero or <nl> - * more contiguous runs of samples . The track fragment header sets up information and defaults used for those <nl> - * runs of samples . <nl> - * / <nl> + / / 8 . 8 . 7 Track Fragment Header Box ( tfhd ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 68 <nl> + / / Each movie fragment can add zero or more fragments to each track ; and a track fragment can add zero or <nl> + / / more contiguous runs of samples . The track fragment header sets up information and defaults used for those <nl> + / / runs of samples . <nl> class SrsMp4TrackFragmentHeaderBox : public SrsMp4FullBox <nl> { <nl> public : <nl> uint32_t track_id ; <nl> / / all the following are optional fields <nl> public : <nl> - / / the base offset to use when calculating data offsets <nl> + / / The base offset to use when calculating data offsets <nl> uint64_t base_data_offset ; <nl> uint32_t sample_description_index ; <nl> uint32_t default_sample_duration ; <nl> class SrsMp4TrackFragmentHeaderBox : public SrsMp4FullBox <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 8 . 12 Track fragment decode time ( tfdt ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 72 <nl> - * The Track Fragment Base Media Decode Time Box provides the absolute decode time , measured on <nl> - * the media timeline , of the first sample in decode order in the track fragment . This can be useful , for example , <nl> - * when performing random access in a file ; it is not necessary to sum the sample durations of all preceding <nl> - * samples in previous fragments to find this value ( where the sample durations are the deltas in the Decoding <nl> - * Time to Sample Box and the sample_durations in the preceding track runs ) . <nl> - * / <nl> + / / 8 . 8 . 12 Track fragment decode time ( tfdt ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 72 <nl> + / / The Track Fragment Base Media Decode Time Box provides the absolute decode time , measured on <nl> + / / The media timeline , of the first sample in decode order in the track fragment . This can be useful , for example , <nl> + / / when performing random access in a file ; it is not necessary to sum the sample durations of all preceding <nl> + / / samples in previous fragments to find this value ( where the sample durations are the deltas in the Decoding <nl> + / / Time to Sample Box and the sample_durations in the preceding track runs ) . <nl> class SrsMp4TrackFragmentDecodeTimeBox : public SrsMp4FullBox <nl> { <nl> public : <nl> class SrsMp4TrackFragmentDecodeTimeBox : public SrsMp4FullBox <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * The tr_flags for trun <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 69 <nl> - * / <nl> + / / The tr_flags for trun <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 69 <nl> enum SrsMp4TrunFlags <nl> { <nl> / / data - offset - present . <nl> enum SrsMp4TrunFlags <nl> SrsMp4TrunFlagsFirstSample = 0x000004 , <nl> / / indicates that each sample has its own duration , otherwise the default is used . <nl> SrsMp4TrunFlagsSampleDuration = 0x000100 , <nl> - / / each sample has its own size , otherwise the default is used . <nl> + / / Each sample has its own size , otherwise the default is used . <nl> SrsMp4TrunFlagsSampleSize = 0x000200 , <nl> - / / each sample has its own flags , otherwise the default is used . <nl> + / / Each sample has its own flags , otherwise the default is used . <nl> SrsMp4TrunFlagsSampleFlag = 0x000400 , <nl> - / / each sample has a composition time offset ( e . g . as used for I / P / B video in MPEG ) . <nl> + / / Each sample has a composition time offset ( e . g . as used for I / P / B video in MPEG ) . <nl> SrsMp4TrunFlagsSampleCtsOffset = 0x000800 , <nl> } ; <nl> <nl> - / * * <nl> - * Entry for trun . <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 69 <nl> - * / <nl> + / / Entry for trun . <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 69 <nl> struct SrsMp4TrunEntry <nl> { <nl> SrsMp4FullBox * owner ; <nl> struct SrsMp4TrunEntry <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 8 . 8 Track Fragment Run Box ( trun ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 69 <nl> - * Within the Track Fragment Box , there are zero or more Track Run Boxes . If the duration - is - empty flag is set in <nl> - * the tf_flags , there are no track runs . A track run documents a contiguous set of samples for a track . <nl> - * / <nl> + / / 8 . 8 . 8 Track Fragment Run Box ( trun ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 69 <nl> + / / Within the Track Fragment Box , there are zero or more Track Run Boxes . If the duration - is - empty flag is set in <nl> + / / The tf_flags , there are no track runs . A track run documents a contiguous set of samples for a track . <nl> class SrsMp4TrackFragmentRunBox : public SrsMp4FullBox <nl> { <nl> public : <nl> - / / the number of samples being added in this run ; also the number of rows in the following <nl> + / / The number of samples being added in this run ; also the number of rows in the following <nl> / / table ( the rows can be empty ) <nl> uint32_t sample_count ; <nl> - / / the following are optional fields <nl> + / / The following are optional fields <nl> public : <nl> / / added to the implicit or explicit data_offset established in the track fragment header . <nl> int32_t data_offset ; <nl> class SrsMp4TrackFragmentRunBox : public SrsMp4FullBox <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 1 . 1 Media Data Box ( mdat ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 29 <nl> - * This box contains the media data . In video tracks , this box would contain video frames . <nl> - * A presentation may contain zero or more Media Data Boxes . The actual media data follows the type field ; <nl> - * its structure is described by the metadata ( see particularly the sample table , subclause 8 . 5 , and the <nl> - * item location box , subclause 8 . 11 . 3 ) . <nl> - * <nl> - * @ remark The mdat box only decode and encode the header , <nl> - * so user must read and write the data by yourself . <nl> - * To encode mdat : <nl> - * SrsMp4MediaDataBox * mdat = new SrsMp4MediaDataBox ( ) ; <nl> - * mdat - > nb_data = 1024000 ; <nl> - * <nl> - * char * buffer = new char [ mdat - > sz_header ( ) ] ; <nl> - * SrsBuffer * buf = new SrsBuffer ( buffer ) ; <nl> - * mdat - > encode ( buf ) ; <nl> - * <nl> - * file - > write ( buffer , mdat - > sz_header ( ) ) ; / / Write the mdat box header . <nl> - * file - > write ( data , size ) ; / / Write the mdat box data . <nl> - * <nl> - * To decode mdat : <nl> - * SrsMp4MediaDataBox * mdat = new SrsMp4MediaDataBox ( ) ; <nl> - * char * buffer = new char [ mdat - > sz_header ( ) ] ; <nl> - * SrsBuffer * buf = . . . ; / / Read mdat - > sz_header ( ) data from io . <nl> - * <nl> - * mdat - > decode ( buf ) ; / / The buf should be empty now . <nl> - * file - > lseek ( mdat - > nb_data , SEEK_CUR ) ; / / Skip the mdat data in file . <nl> - * <nl> - * To discovery any box from file : <nl> - * SrsSimpleStream * stream = new SrsSimpleStream ( ) ; <nl> - * SrsBuffer * buf = new SrsBuffer ( stream . . . ) ; / / Create read buffer from stream . <nl> - * <nl> - * / / We don ' t know what ' s the next box , so try to read 4bytes and discovery it . <nl> - * append ( file , stream , 4 ) ; / / Append 4bytes from file to stream . <nl> - * <nl> - * SrsMp4Box * box = NULL ; <nl> - * SrsMp4Box : : discovery ( buf , & box ) ; <nl> - * <nl> - * required = ( box - > is_mdat ( ) ? box - > sz_header ( ) : box - > sz ( ) ) ; / / Now we know how many bytes we needed . <nl> - * append ( file , stream , required ) ; <nl> - * box - > decode ( buf ) ; <nl> - * <nl> - * if ( box - > is_mdat ( ) ) { <nl> - * file - > lseek ( mdat - > nb_data , SEEK_CUR ) ; / / Skip the mdat data in file . <nl> - * } <nl> - * / <nl> + / / 8 . 1 . 1 Media Data Box ( mdat ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 29 <nl> + / / This box contains the media data . In video tracks , this box would contain video frames . <nl> + / / A presentation may contain zero or more Media Data Boxes . The actual media data follows the type field ; <nl> + / / its structure is described by the metadata ( see particularly the sample table , subclause 8 . 5 , and the <nl> + / / item location box , subclause 8 . 11 . 3 ) . <nl> + / / <nl> + / / @ remark The mdat box only decode and encode the header , <nl> + / / so user must read and write the data by yourself . <nl> + / / To encode mdat : <nl> + / / SrsMp4MediaDataBox * mdat = new SrsMp4MediaDataBox ( ) ; <nl> + / / mdat - > nb_data = 1024000 ; <nl> + / / <nl> + / / char * buffer = new char [ mdat - > sz_header ( ) ] ; <nl> + / / SrsBuffer * buf = new SrsBuffer ( buffer ) ; <nl> + / / mdat - > encode ( buf ) ; <nl> + / / <nl> + / / file - > write ( buffer , mdat - > sz_header ( ) ) ; / / Write the mdat box header . <nl> + / / file - > write ( data , size ) ; / / Write the mdat box data . <nl> + / / <nl> + / / To decode mdat : <nl> + / / SrsMp4MediaDataBox * mdat = new SrsMp4MediaDataBox ( ) ; <nl> + / / char * buffer = new char [ mdat - > sz_header ( ) ] ; <nl> + / / SrsBuffer * buf = . . . ; / / Read mdat - > sz_header ( ) data from io . <nl> + / / <nl> + / / mdat - > decode ( buf ) ; / / The buf should be empty now . <nl> + / / file - > lseek ( mdat - > nb_data , SEEK_CUR ) ; / / Skip the mdat data in file . <nl> + / / <nl> + / / To discovery any box from file : <nl> + / / SrsSimpleStream * stream = new SrsSimpleStream ( ) ; <nl> + / / SrsBuffer * buf = new SrsBuffer ( stream . . . ) ; / / Create read buffer from stream . <nl> + / / <nl> + / / / / We don ' t know what ' s the next box , so try to read 4bytes and discovery it . <nl> + / / append ( file , stream , 4 ) ; / / Append 4bytes from file to stream . <nl> + / / <nl> + / / SrsMp4Box * box = NULL ; <nl> + / / SrsMp4Box : : discovery ( buf , & box ) ; <nl> + / / <nl> + / / required = ( box - > is_mdat ( ) ? box - > sz_header ( ) : box - > sz ( ) ) ; / / Now we know how many bytes we needed . <nl> + / / append ( file , stream , required ) ; <nl> + / / box - > decode ( buf ) ; <nl> + / / <nl> + / / if ( box - > is_mdat ( ) ) { <nl> + / / file - > lseek ( mdat - > nb_data , SEEK_CUR ) ; / / Skip the mdat data in file . <nl> + / / } <nl> class SrsMp4MediaDataBox : public SrsMp4Box <nl> { <nl> public : <nl> class SrsMp4MediaDataBox : public SrsMp4Box <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 1 . 2 Free Space Box ( free or skip ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 29 <nl> - * / <nl> + / / 8 . 1 . 2 Free Space Box ( free or skip ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 29 <nl> class SrsMp4FreeSpaceBox : public SrsMp4Box <nl> { <nl> private : <nl> class SrsMp4FreeSpaceBox : public SrsMp4Box <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 2 . 1 Movie Box ( moov ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 30 <nl> - * The metadata for a presentation is stored in the single Movie Box which occurs at the top - level of a file . <nl> - * Normally this box is close to the beginning or end of the file , though this is not required . <nl> - * / <nl> + / / 8 . 2 . 1 Movie Box ( moov ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 30 <nl> + / / The metadata for a presentation is stored in the single Movie Box which occurs at the top - level of a file . <nl> + / / Normally this box is close to the beginning or end of the file , though this is not required . <nl> class SrsMp4MovieBox : public SrsMp4Box <nl> { <nl> public : <nl> class SrsMp4MovieBox : public SrsMp4Box <nl> virtual srs_error_t decode_header ( SrsBuffer * buf ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 2 . 2 Movie Header Box ( mvhd ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 31 <nl> - * / <nl> + / / 8 . 2 . 2 Movie Header Box ( mvhd ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 31 <nl> class SrsMp4MovieHeaderBox : public SrsMp4FullBox <nl> { <nl> public : <nl> - / / an integer that declares the creation time of the presentation ( in seconds since <nl> + / / An integer that declares the creation time of the presentation ( in seconds since <nl> / / midnight , Jan . 1 , 1904 , in UTC time ) <nl> uint64_t creation_time ; <nl> - / / an integer that declares the most recent time the presentation was modified ( in <nl> + / / An integer that declares the most recent time the presentation was modified ( in <nl> / / seconds since midnight , Jan . 1 , 1904 , in UTC time ) <nl> uint64_t modification_time ; <nl> public : <nl> - / / an integer that specifies the time - scale for the entire presentation ; this is the number of <nl> + / / An integer that specifies the time - scale for the entire presentation ; this is the number of <nl> / / time units that pass in one second . For example , a time coordinate system that measures time in <nl> / / sixtieths of a second has a time scale of 60 . <nl> uint32_t timescale ; <nl> - / / an integer that declares length of the presentation ( in the indicated timescale ) . This property <nl> + / / An integer that declares length of the presentation ( in the indicated timescale ) . This property <nl> / / is derived from the presentation ’ s tracks : the value of this field corresponds to the duration of the <nl> / / longest track in the presentation . If the duration cannot be determined then duration is set to all 1s . <nl> uint64_t duration_in_tbn ; <nl> public : <nl> - / / a fixed point 16 . 16 number that indicates the preferred rate to play the presentation ; 1 . 0 <nl> + / / A fixed point 16 . 16 number that indicates the preferred rate to play the presentation ; 1 . 0 <nl> / / ( 0x00010000 ) is normal forward playback <nl> uint32_t rate ; <nl> - / / a fixed point 8 . 8 number that indicates the preferred playback volume . 1 . 0 ( 0x0100 ) is full volume . <nl> + / / A fixed point 8 . 8 number that indicates the preferred playback volume . 1 . 0 ( 0x0100 ) is full volume . <nl> uint16_t volume ; <nl> uint16_t reserved0 ; <nl> uint64_t reserved1 ; <nl> - / / a transformation matrix for the video ; ( u , v , w ) are restricted here to ( 0 , 0 , 1 ) , hex values ( 0 , 0 , 0x40000000 ) . <nl> + / / A transformation matrix for the video ; ( u , v , w ) are restricted here to ( 0 , 0 , 1 ) , hex values ( 0 , 0 , 0x40000000 ) . <nl> int32_t matrix [ 9 ] ; <nl> uint32_t pre_defined [ 6 ] ; <nl> - / / a non - zero integer that indicates a value to use for the track ID of the next track to be <nl> + / / A non - zero integer that indicates a value to use for the track ID of the next track to be <nl> / / added to this presentation . Zero is not a valid track ID value . The value of next_track_ID shall be <nl> / / larger than the largest track - ID in use . If this value is equal to all 1s ( 32 - bit maxint ) , and a new media <nl> / / track is to be added , then a search must be made in the file for an unused track identifier . <nl> enum SrsMp4TrackType <nl> SrsMp4TrackTypeVideo = 0x02 , <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 8 . 1 Movie Extends Box ( mvex ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 64 <nl> - * This box warns readers that there might be Movie Fragment Boxes in this file . To know of all samples in the <nl> - * tracks , these Movie Fragment Boxes must be found and scanned in order , and their information logically <nl> - * added to that found in the Movie Box . <nl> - * / <nl> + / / 8 . 8 . 1 Movie Extends Box ( mvex ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 64 <nl> + / / This box warns readers that there might be Movie Fragment Boxes in this file . To know of all samples in the <nl> + / / tracks , these Movie Fragment Boxes must be found and scanned in order , and their information logically <nl> + / / added to that found in the Movie Box . <nl> class SrsMp4MovieExtendsBox : public SrsMp4Box <nl> { <nl> public : <nl> class SrsMp4MovieExtendsBox : public SrsMp4Box <nl> virtual void set_trex ( SrsMp4TrackExtendsBox * v ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 8 . 3 Track Extends Box ( trex ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 65 <nl> - * / <nl> + / / 8 . 8 . 3 Track Extends Box ( trex ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 65 <nl> class SrsMp4TrackExtendsBox : public SrsMp4FullBox <nl> { <nl> public : <nl> / / identifies the track ; this shall be the track ID of a track in the Movie Box <nl> uint32_t track_ID ; <nl> - / / these fields set up defaults used in the track fragments . <nl> + / / These fields set up defaults used in the track fragments . <nl> uint32_t default_sample_description_index ; <nl> uint32_t default_sample_duration ; <nl> uint32_t default_sample_size ; <nl> class SrsMp4TrackExtendsBox : public SrsMp4FullBox <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 3 . 1 Track Box ( trak ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 32 <nl> - * This is a container box for a single track of a presentation . A presentation consists of one or more tracks . <nl> - * Each track is independent of the other tracks in the presentation and carries its own temporal and spatial <nl> - * information . Each track will contain its associated Media Box . <nl> - * / <nl> + / / 8 . 3 . 1 Track Box ( trak ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 32 <nl> + / / This is a container box for a single track of a presentation . A presentation consists of one or more tracks . <nl> + / / Each track is independent of the other tracks in the presentation and carries its own temporal and spatial <nl> + / / information . Each track will contain its associated Media Box . <nl> class SrsMp4TrackBox : public SrsMp4Box <nl> { <nl> public : <nl> class SrsMp4TrackBox : public SrsMp4Box <nl> virtual SrsMp4AudioSampleEntry * mp4a ( ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 3 . 2 Track Header Box ( tkhd ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 32 <nl> - * / <nl> + / / 8 . 3 . 2 Track Header Box ( tkhd ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 32 <nl> class SrsMp4TrackHeaderBox : public SrsMp4FullBox <nl> { <nl> public : <nl> - / / an integer that declares the creation time of the presentation ( in seconds since <nl> + / / An integer that declares the creation time of the presentation ( in seconds since <nl> / / midnight , Jan . 1 , 1904 , in UTC time ) <nl> uint64_t creation_time ; <nl> - / / an integer that declares the most recent time the presentation was modified ( in <nl> + / / An integer that declares the most recent time the presentation was modified ( in <nl> / / seconds since midnight , Jan . 1 , 1904 , in UTC time ) <nl> uint64_t modification_time ; <nl> - / / an integer that uniquely identifies this track over the entire life - time of this presentation . <nl> + / / An integer that uniquely identifies this track over the entire life - time of this presentation . <nl> / / Track IDs are never re - used and cannot be zero . <nl> uint32_t track_ID ; <nl> uint32_t reserved0 ; <nl> - / / an integer that indicates the duration of this track ( in the timescale indicated in the Movie <nl> + / / An integer that indicates the duration of this track ( in the timescale indicated in the Movie <nl> / / Header Box ) . The value of this field is equal to the sum of the durations of all of the track ’ s edits . If <nl> - / / there is no edit list , then the duration is the sum of the sample durations , converted into the timescale <nl> + / / There is no edit list , then the duration is the sum of the sample durations , converted into the timescale <nl> / / in the Movie Header Box . If the duration of this track cannot be determined then duration is set to all <nl> / / 1s . <nl> uint64_t duration ; <nl> class SrsMp4TrackHeaderBox : public SrsMp4FullBox <nl> / / specifies the front - to - back ordering of video tracks ; tracks with lower numbers are closer to the <nl> / / viewer . 0 is the normal value , and - 1 would be in front of track 0 , and so on . <nl> int16_t layer ; <nl> - / / an integer that specifies a group or collection of tracks . If this field is 0 there is no <nl> + / / An integer that specifies a group or collection of tracks . If this field is 0 there is no <nl> / / information on possible relations to other tracks . If this field is not 0 , it should be the same for tracks <nl> / / that contain alternate data for one another and different for tracks belonging to different such groups . <nl> / / Only one track within an alternate group should be played or streamed at any one time , and must be <nl> / / distinguishable from other tracks in the group via attributes such as bitrate , codec , language , packet <nl> / / size etc . A group may have only one member . <nl> int16_t alternate_group ; <nl> - / / a fixed 8 . 8 value specifying the track ' s relative audio volume . Full volume is 1 . 0 ( 0x0100 ) and <nl> + / / A fixed 8 . 8 value specifying the track ' s relative audio volume . Full volume is 1 . 0 ( 0x0100 ) and <nl> / / is the normal value . Its value is irrelevant for a purely visual track . Tracks may be composed by <nl> / / combining them according to their volume , and then using the overall Movie Header Box volume <nl> / / setting ; or more complex audio composition ( e . g . MPEG - 4 BIFS ) may be used . <nl> int16_t volume ; <nl> uint16_t reserved2 ; <nl> - / / a transformation matrix for the video ; ( u , v , w ) are restricted here to ( 0 , 0 , 1 ) , hex ( 0 , 0 , 0x40000000 ) . <nl> + / / A transformation matrix for the video ; ( u , v , w ) are restricted here to ( 0 , 0 , 1 ) , hex ( 0 , 0 , 0x40000000 ) . <nl> int32_t matrix [ 9 ] ; <nl> - / / the track ' s visual presentation size as fixed - point 16 . 16 values . These need <nl> + / / The track ' s visual presentation size as fixed - point 16 . 16 values . These need <nl> / / not be the same as the pixel dimensions of the images , which is documented in the sample <nl> / / description ( s ) ; all images in the sequence are scaled to this size , before any overall transformation of <nl> - / / the track represented by the matrix . The pixel dimensions of the images are the default values . <nl> + / / The track represented by the matrix . The pixel dimensions of the images are the default values . <nl> int32_t width ; <nl> int32_t height ; <nl> public : <nl> class SrsMp4TrackHeaderBox : public SrsMp4FullBox <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 6 . 5 Edit Box ( edts ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 54 <nl> - * An Edit Box maps the presentation time - line to the media time - line as it is stored in the file . <nl> - * The Edit Box is a container for the edit lists . <nl> - * / <nl> + / / 8 . 6 . 5 Edit Box ( edts ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 54 <nl> + / / An Edit Box maps the presentation time - line to the media time - line as it is stored in the file . <nl> + / / The Edit Box is a container for the edit lists . <nl> class SrsMp4EditBox : public SrsMp4Box <nl> { <nl> public : <nl> class SrsMp4EditBox : public SrsMp4Box <nl> virtual ~ SrsMp4EditBox ( ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 6 . 6 Edit List Box <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 55 <nl> - * / <nl> + / / 8 . 6 . 6 Edit List Box <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 55 <nl> struct SrsMp4ElstEntry <nl> { <nl> public : <nl> - / / an integer that specifies the duration of this edit segment in units of the timescale <nl> + / / An integer that specifies the duration of this edit segment in units of the timescale <nl> / / in the Movie Header Box <nl> uint64_t segment_duration ; <nl> - / / an integer containing the starting time within the media of this edit segment ( in media time <nl> + / / An integer containing the starting time within the media of this edit segment ( in media time <nl> / / scale units , in composition time ) . If this field is set to – 1 , it is an empty edit . The last edit in a track <nl> / / shall never be an empty edit . Any difference between the duration in the Movie Header Box , and the <nl> / / track ’ s duration is expressed as an implicit empty edit at the end . <nl> int64_t media_time ; <nl> public : <nl> / / specifies the relative rate at which to play the media corresponding to this edit segment . If this value is 0 , <nl> - / / then the edit is specifying a ‘ dwell ’ : the media at media - time is presented for the segment - duration . Otherwise <nl> + / / Then the edit is specifying a ‘ dwell ’ : the media at media - time is presented for the segment - duration . Otherwise <nl> / / this field shall contain the value 1 . <nl> int16_t media_rate_integer ; <nl> int16_t media_rate_fraction ; <nl> struct SrsMp4ElstEntry <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 6 . 6 Edit List Box ( elst ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 54 <nl> - * This box contains an explicit timeline map . Each entry defines part of the track time - line : by mapping part of <nl> - * the media time - line , or by indicating ‘ empty ’ time , or by defining a ‘ dwell ’ , where a single time - point in the <nl> - * media is held for a period . <nl> - * / <nl> + / / 8 . 6 . 6 Edit List Box ( elst ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 54 <nl> + / / This box contains an explicit timeline map . Each entry defines part of the track time - line : by mapping part of <nl> + / / The media time - line , or by indicating ‘ empty ’ time , or by defining a ‘ dwell ’ , where a single time - point in the <nl> + / / media is held for a period . <nl> class SrsMp4EditListBox : public SrsMp4FullBox <nl> { <nl> public : <nl> - / / an integer that gives the number of entries in the following table <nl> + / / An integer that gives the number of entries in the following table <nl> std : : vector < SrsMp4ElstEntry > entries ; <nl> public : <nl> SrsMp4EditListBox ( ) ; <nl> class SrsMp4EditListBox : public SrsMp4FullBox <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 4 . 1 Media Box ( mdia ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 36 <nl> - * The media declaration container contains all the objects that declare information about the media data within a <nl> - * track . <nl> - * / <nl> + / / 8 . 4 . 1 Media Box ( mdia ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 36 <nl> + / / The media declaration container contains all the objects that declare information about the media data within a <nl> + / / track . <nl> class SrsMp4MediaBox : public SrsMp4Box <nl> { <nl> public : <nl> class SrsMp4MediaBox : public SrsMp4Box <nl> virtual void set_minf ( SrsMp4MediaInformationBox * v ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 4 . 2 Media Header Box ( mdhd ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 36 <nl> - * The media declaration container contains all the objects that declare information about the media data within a <nl> - * track . <nl> - * / <nl> + / / 8 . 4 . 2 Media Header Box ( mdhd ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 36 <nl> + / / The media declaration container contains all the objects that declare information about the media data within a <nl> + / / track . <nl> class SrsMp4MediaHeaderBox : public SrsMp4FullBox <nl> { <nl> public : <nl> - / / an integer that declares the creation time of the presentation ( in seconds since <nl> + / / An integer that declares the creation time of the presentation ( in seconds since <nl> / / midnight , Jan . 1 , 1904 , in UTC time ) <nl> uint64_t creation_time ; <nl> - / / an integer that declares the most recent time the presentation was modified ( in <nl> + / / An integer that declares the most recent time the presentation was modified ( in <nl> / / seconds since midnight , Jan . 1 , 1904 , in UTC time ) <nl> uint64_t modification_time ; <nl> - / / an integer that specifies the time - scale for the entire presentation ; this is the number of <nl> + / / An integer that specifies the time - scale for the entire presentation ; this is the number of <nl> / / time units that pass in one second . For example , a time coordinate system that measures time in <nl> / / sixtieths of a second has a time scale of 60 . <nl> uint32_t timescale ; <nl> - / / an integer that declares length of the presentation ( in the indicated timescale ) . This property <nl> + / / An integer that declares length of the presentation ( in the indicated timescale ) . This property <nl> / / is derived from the presentation ’ s tracks : the value of this field corresponds to the duration of the <nl> / / longest track in the presentation . If the duration cannot be determined then duration is set to all 1s . <nl> uint64_t duration ; <nl> private : <nl> - / / the language code for this media . See ISO 639 - 2 / T for the set of three character <nl> + / / The language code for this media . See ISO 639 - 2 / T for the set of three character <nl> / / codes . Each character is packed as the difference between its ASCII value and 0x60 . Since the code <nl> / / is confined to being three lower - case letters , these values are strictly positive . <nl> uint16_t language ; <nl> class SrsMp4MediaHeaderBox : public SrsMp4FullBox <nl> SrsMp4MediaHeaderBox ( ) ; <nl> virtual ~ SrsMp4MediaHeaderBox ( ) ; <nl> public : <nl> - / / the language code for this media . See ISO 639 - 2 / T for the set of three character <nl> + / / The language code for this media . See ISO 639 - 2 / T for the set of three character <nl> / / codes . Each character is packed as the difference between its ASCII value and 0x60 . Since the code <nl> / / is confined to being three lower - case letters , these values are strictly positive . <nl> / / @ param v The ASCII , for example , ' u ' . <nl> class SrsMp4MediaHeaderBox : public SrsMp4FullBox <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 4 . 3 Handler Reference Box ( hdlr ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 37 <nl> - * This box within a Media Box declares the process by which the media - data in the track is presented , and thus , <nl> - * the nature of the media in a track . For example , a video track would be handled by a video handler . <nl> - * / <nl> + / / 8 . 4 . 3 Handler Reference Box ( hdlr ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 37 <nl> + / / This box within a Media Box declares the process by which the media - data in the track is presented , and thus , <nl> + / / The nature of the media in a track . For example , a video track would be handled by a video handler . <nl> class SrsMp4HandlerReferenceBox : public SrsMp4FullBox <nl> { <nl> public : <nl> uint32_t pre_defined ; <nl> - / / an integer containing one of the following values , or a value from a derived specification : <nl> + / / An integer containing one of the following values , or a value from a derived specification : <nl> / / ‘ vide ’ , Video track <nl> / / ‘ soun ’ , Audio track <nl> SrsMp4HandlerType handler_type ; <nl> uint32_t reserved [ 3 ] ; <nl> - / / a null - terminated string in UTF - 8 characters which gives a human - readable name for the track <nl> + / / A null - terminated string in UTF - 8 characters which gives a human - readable name for the track <nl> / / type ( for debugging and inspection purposes ) . <nl> std : : string name ; <nl> public : <nl> class SrsMp4HandlerReferenceBox : public SrsMp4FullBox <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 4 . 4 Media Information Box ( minf ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 38 <nl> - * This box contains all the objects that declare characteristic information of the media in the track . <nl> - * / <nl> + / / 8 . 4 . 4 Media Information Box ( minf ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 38 <nl> + / / This box contains all the objects that declare characteristic information of the media in the track . <nl> class SrsMp4MediaInformationBox : public SrsMp4Box <nl> { <nl> public : <nl> class SrsMp4MediaInformationBox : public SrsMp4Box <nl> virtual void set_stbl ( SrsMp4SampleTableBox * v ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 4 . 5 . 2 Video Media Header Box ( vmhd ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 38 <nl> - * The video media header contains general presentation information , independent of the coding , for video <nl> - * media . Note that the flags field has the value 1 . <nl> - * / <nl> + / / 8 . 4 . 5 . 2 Video Media Header Box ( vmhd ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 38 <nl> + / / The video media header contains general presentation information , independent of the coding , for video <nl> + / / media . Note that the flags field has the value 1 . <nl> class SrsMp4VideoMeidaHeaderBox : public SrsMp4FullBox <nl> { <nl> public : <nl> - / / a composition mode for this video track , from the following enumerated set , <nl> + / / A composition mode for this video track , from the following enumerated set , <nl> / / which may be extended by derived specifications : <nl> / / copy = 0 copy over the existing image <nl> uint16_t graphicsmode ; <nl> - / / a set of 3 colour values ( red , green , blue ) available for use by graphics modes <nl> + / / A set of 3 colour values ( red , green , blue ) available for use by graphics modes <nl> uint16_t opcolor [ 3 ] ; <nl> public : <nl> SrsMp4VideoMeidaHeaderBox ( ) ; <nl> class SrsMp4VideoMeidaHeaderBox : public SrsMp4FullBox <nl> virtual srs_error_t decode_header ( SrsBuffer * buf ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 4 . 5 . 3 Sound Media Header Box ( smhd ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 39 <nl> - * The sound media header contains general presentation information , independent of the coding , for audio <nl> - * media . This header is used for all tracks containing audio . <nl> - * / <nl> + / / 8 . 4 . 5 . 3 Sound Media Header Box ( smhd ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 39 <nl> + / / The sound media header contains general presentation information , independent of the coding , for audio <nl> + / / media . This header is used for all tracks containing audio . <nl> class SrsMp4SoundMeidaHeaderBox : public SrsMp4FullBox <nl> { <nl> public : <nl> - / / a fixed - point 8 . 8 number that places mono audio tracks in a stereo space ; 0 is centre ( the <nl> + / / A fixed - point 8 . 8 number that places mono audio tracks in a stereo space ; 0 is centre ( the <nl> / / normal value ) ; full left is - 1 . 0 and full right is 1 . 0 . <nl> int16_t balance ; <nl> uint16_t reserved ; <nl> class SrsMp4SoundMeidaHeaderBox : public SrsMp4FullBox <nl> virtual srs_error_t decode_header ( SrsBuffer * buf ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 7 . 1 Data Information Box ( dinf ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 56 <nl> - * The data information box contains objects that declare the location of the media information in a track . <nl> - * / <nl> + / / 8 . 7 . 1 Data Information Box ( dinf ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 56 <nl> + / / The data information box contains objects that declare the location of the media information in a track . <nl> class SrsMp4DataInformationBox : public SrsMp4Box <nl> { <nl> public : <nl> class SrsMp4DataInformationBox : public SrsMp4Box <nl> virtual void set_dref ( SrsMp4DataReferenceBox * v ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 7 . 2 Data Reference Box <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 56 <nl> - * a 24 - bit integer with flags ; one flag is defined ( x000001 ) which means that the media <nl> - * data is in the same file as the Movie Box containing this data reference . <nl> - * / <nl> + / / 8 . 7 . 2 Data Reference Box <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 56 <nl> + / / A 24 - bit integer with flags ; one flag is defined ( x000001 ) which means that the media <nl> + / / data is in the same file as the Movie Box containing this data reference . <nl> class SrsMp4DataEntryBox : public SrsMp4FullBox <nl> { <nl> public : <nl> class SrsMp4DataEntryBox : public SrsMp4FullBox <nl> virtual ~ SrsMp4DataEntryBox ( ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 7 . 2 Data Reference Box ( url ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 56 <nl> - * / <nl> + / / 8 . 7 . 2 Data Reference Box ( url ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 56 <nl> class SrsMp4DataEntryUrlBox : public SrsMp4DataEntryBox <nl> { <nl> public : <nl> class SrsMp4DataEntryUrlBox : public SrsMp4DataEntryBox <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 7 . 2 Data Reference Box ( urn ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 56 <nl> - * / <nl> + / / 8 . 7 . 2 Data Reference Box ( urn ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 56 <nl> class SrsMp4DataEntryUrnBox : public SrsMp4DataEntryBox <nl> { <nl> public : <nl> class SrsMp4DataEntryUrnBox : public SrsMp4DataEntryBox <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 7 . 2 Data Reference Box ( dref ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 56 <nl> - * The data reference object contains a table of data references ( normally URLs ) that declare the location ( s ) of <nl> - * the media data used within the presentation . The data reference index in the sample description ties entries <nl> - * in this table to the samples in the track . A track may be split over several sources in this way . <nl> - * / <nl> + / / 8 . 7 . 2 Data Reference Box ( dref ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 56 <nl> + / / The data reference object contains a table of data references ( normally URLs ) that declare the location ( s ) of <nl> + / / The media data used within the presentation . The data reference index in the sample description ties entries <nl> + / / in this table to the samples in the track . A track may be split over several sources in this way . <nl> class SrsMp4DataReferenceBox : public SrsMp4FullBox <nl> { <nl> private : <nl> class SrsMp4DataReferenceBox : public SrsMp4FullBox <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 5 . 1 Sample Table Box ( stbl ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 40 <nl> - * The sample table contains all the time and data indexing of the media samples in a track . Using the tables <nl> - * here , it is possible to locate samples in time , determine their type ( e . g . I - frame or not ) , and determine their <nl> - * size , container , and offset into that container . <nl> - * / <nl> + / / 8 . 5 . 1 Sample Table Box ( stbl ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 40 <nl> + / / The sample table contains all the time and data indexing of the media samples in a track . Using the tables <nl> + / / here , it is possible to locate samples in time , determine their type ( e . g . I - frame or not ) , and determine their <nl> + / / size , container , and offset into that container . <nl> class SrsMp4SampleTableBox : public SrsMp4Box <nl> { <nl> public : <nl> class SrsMp4SampleTableBox : public SrsMp4Box <nl> virtual srs_error_t decode_header ( SrsBuffer * buf ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 5 . 2 Sample Description Box <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 43 <nl> - * / <nl> + / / 8 . 5 . 2 Sample Description Box <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 43 <nl> class SrsMp4SampleEntry : public SrsMp4Box <nl> { <nl> public : <nl> uint8_t reserved [ 6 ] ; <nl> - / / an integer that contains the index of the data reference to use to retrieve <nl> + / / An integer that contains the index of the data reference to use to retrieve <nl> / / data associated with samples that use this sample description . Data references are stored in Data <nl> / / Reference Boxes . The index ranges from 1 to the number of data references . <nl> uint16_t data_reference_index ; <nl> class SrsMp4SampleEntry : public SrsMp4Box <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 5 . 2 Sample Description Box ( avc1 ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 44 <nl> - * / <nl> + / / 8 . 5 . 2 Sample Description Box ( avc1 ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 44 <nl> class SrsMp4VisualSampleEntry : public SrsMp4SampleEntry <nl> { <nl> public : <nl> uint16_t pre_defined0 ; <nl> uint16_t reserved0 ; <nl> uint32_t pre_defined1 [ 3 ] ; <nl> - / / the maximum visual width and height of the stream described by this sample <nl> + / / The maximum visual width and height of the stream described by this sample <nl> / / description , in pixels <nl> uint16_t width ; <nl> uint16_t height ; <nl> class SrsMp4VisualSampleEntry : public SrsMp4SampleEntry <nl> / / how many frames of compressed video are stored in each sample . The default is <nl> / / 1 , for one frame per sample ; it may be more than 1 for multiple frames per sample <nl> uint16_t frame_count ; <nl> - / / a name , for informative purposes . It is formatted in a fixed 32 - byte field , with the first <nl> + / / A name , for informative purposes . It is formatted in a fixed 32 - byte field , with the first <nl> / / byte set to the number of bytes to be displayed , followed by that number of bytes of displayable data , <nl> - / / and then padding to complete 32 bytes total ( including the size byte ) . The field may be set to 0 . <nl> + / / And then padding to complete 32 bytes total ( including the size byte ) . The field may be set to 0 . <nl> char compressorname [ 32 ] ; <nl> / / one of the following values <nl> / / 0x0018 – images are in colour with no alpha <nl> class SrsMp4VisualSampleEntry : public SrsMp4SampleEntry <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 5 . 3 . 4 AVC Video Stream Definition ( avcC ) <nl> - * ISO_IEC_14496 - 15 - AVC - format - 2012 . pdf , page 19 <nl> - * / <nl> + / / 5 . 3 . 4 AVC Video Stream Definition ( avcC ) <nl> + / / ISO_IEC_14496 - 15 - AVC - format - 2012 . pdf , page 19 <nl> class SrsMp4AvccBox : public SrsMp4Box <nl> { <nl> public : <nl> class SrsMp4AvccBox : public SrsMp4Box <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 5 . 2 Sample Description Box ( mp4a ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 45 <nl> - * / <nl> + / / 8 . 5 . 2 Sample Description Box ( mp4a ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 45 <nl> class SrsMp4AudioSampleEntry : public SrsMp4SampleEntry <nl> { <nl> public : <nl> enum SrsMp4ESTagEs { <nl> SrsMp4ESTagESExtSLConfigDescrTag = 0x064 , <nl> } ; <nl> <nl> - / * * <nl> - * 7 . 2 . 2 . 2 BaseDescriptor <nl> - * ISO_IEC_14496 - 1 - System - 2010 . pdf , page 32 <nl> - * / <nl> + / / 7 . 2 . 2 . 2 BaseDescriptor <nl> + / / ISO_IEC_14496 - 1 - System - 2010 . pdf , page 32 <nl> class SrsMp4BaseDescriptor : public ISrsCodec <nl> { <nl> public : <nl> enum SrsMp4StreamType <nl> SrsMp4StreamTypeAudioStream = 0x05 , <nl> } ; <nl> <nl> - / * * <nl> - * 7 . 2 . 6 . 7 DecoderSpecificInfo <nl> - * ISO_IEC_14496 - 1 - System - 2010 . pdf , page 51 <nl> - * / <nl> + / / 7 . 2 . 6 . 7 DecoderSpecificInfo <nl> + / / ISO_IEC_14496 - 1 - System - 2010 . pdf , page 51 <nl> class SrsMp4DecoderSpecificInfo : public SrsMp4BaseDescriptor <nl> { <nl> public : <nl> class SrsMp4DecoderSpecificInfo : public SrsMp4BaseDescriptor <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 7 . 2 . 6 . 6 DecoderConfigDescriptor <nl> - * ISO_IEC_14496 - 1 - System - 2010 . pdf , page 48 <nl> - * / <nl> + / / 7 . 2 . 6 . 6 DecoderConfigDescriptor <nl> + / / ISO_IEC_14496 - 1 - System - 2010 . pdf , page 48 <nl> class SrsMp4DecoderConfigDescriptor : public SrsMp4BaseDescriptor <nl> { <nl> public : <nl> - / / an indication of the object or scene description type that needs to be supported <nl> + / / An indication of the object or scene description type that needs to be supported <nl> / / by the decoder for this elementary stream as per Table 5 . <nl> SrsMp4ObjectType objectTypeIndication ; / / bit ( 8 ) <nl> SrsMp4StreamType streamType ; / / bit ( 6 ) <nl> class SrsMp4DecoderConfigDescriptor : public SrsMp4BaseDescriptor <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 7 . 3 . 2 . 3 SL Packet Header Configuration <nl> - * ISO_IEC_14496 - 1 - System - 2010 . pdf , page 92 <nl> - * / <nl> + / / 7 . 3 . 2 . 3 SL Packet Header Configuration <nl> + / / ISO_IEC_14496 - 1 - System - 2010 . pdf , page 92 <nl> class SrsMp4SLConfigDescriptor : public SrsMp4BaseDescriptor <nl> { <nl> public : <nl> class SrsMp4SLConfigDescriptor : public SrsMp4BaseDescriptor <nl> virtual srs_error_t decode_payload ( SrsBuffer * buf ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 7 . 2 . 6 . 5 ES_Descriptor <nl> - * ISO_IEC_14496 - 1 - System - 2010 . pdf , page 47 <nl> - * / <nl> + / / 7 . 2 . 6 . 5 ES_Descriptor <nl> + / / ISO_IEC_14496 - 1 - System - 2010 . pdf , page 47 <nl> class SrsMp4ES_Descriptor : public SrsMp4BaseDescriptor <nl> { <nl> public : <nl> class SrsMp4ES_Descriptor : public SrsMp4BaseDescriptor <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 5 . 6 Sample Description Boxes <nl> - * Elementary Stream Descriptors ( esds ) <nl> - * ISO_IEC_14496 - 14 - MP4 - 2003 . pdf , page 15 <nl> - * @ see http : / / www . mp4ra . org / codecs . html <nl> - * / <nl> + / / 5 . 6 Sample Description Boxes <nl> + / / Elementary Stream Descriptors ( esds ) <nl> + / / ISO_IEC_14496 - 14 - MP4 - 2003 . pdf , page 15 <nl> + / / @ see http : / / www . mp4ra . org / codecs . html <nl> class SrsMp4EsdsBox : public SrsMp4FullBox <nl> { <nl> public : <nl> class SrsMp4EsdsBox : public SrsMp4FullBox <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 5 . 2 Sample Description Box ( stsd ) , for Audio / Video . <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 40 <nl> - * The sample description table gives detailed information about the coding type used , and any initialization <nl> - * information needed for that coding . <nl> - * / <nl> + / / 8 . 5 . 2 Sample Description Box ( stsd ) , for Audio / Video . <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 40 <nl> + / / The sample description table gives detailed information about the coding type used , and any initialization <nl> + / / information needed for that coding . <nl> class SrsMp4SampleDescriptionBox : public SrsMp4FullBox <nl> { <nl> private : <nl> class SrsMp4SampleDescriptionBox : public SrsMp4FullBox <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 6 . 1 . 2 Decoding Time to Sample Box ( stts ) , for Audio / Video . <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 48 <nl> - * / <nl> + / / 8 . 6 . 1 . 2 Decoding Time to Sample Box ( stts ) , for Audio / Video . <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 48 <nl> struct SrsMp4SttsEntry <nl> { <nl> - / / an integer that counts the number of consecutive samples that have the given <nl> + / / An integer that counts the number of consecutive samples that have the given <nl> / / duration . <nl> uint32_t sample_count ; <nl> - / / an integer that gives the delta of these samples in the time - scale of the media . <nl> + / / An integer that gives the delta of these samples in the time - scale of the media . <nl> uint32_t sample_delta ; <nl> / / Constructor <nl> SrsMp4SttsEntry ( ) ; <nl> struct SrsMp4SttsEntry <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 6 . 1 . 2 Decoding Time to Sample Box ( stts ) , for Audio / Video . <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 48 <nl> - * This box contains a compact version of a table that allows indexing from decoding time to sample number . <nl> - * Other tables give sample sizes and pointers , from the sample number . Each entry in the table gives the <nl> - * number of consecutive samples with the same time delta , and the delta of those samples . By adding the <nl> - * deltas a complete time - to - sample map may be built . <nl> - * / <nl> + / / 8 . 6 . 1 . 2 Decoding Time to Sample Box ( stts ) , for Audio / Video . <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 48 <nl> + / / This box contains a compact version of a table that allows indexing from decoding time to sample number . <nl> + / / Other tables give sample sizes and pointers , from the sample number . Each entry in the table gives the <nl> + / / number of consecutive samples with the same time delta , and the delta of those samples . By adding the <nl> + / / deltas a complete time - to - sample map may be built . <nl> class SrsMp4DecodingTime2SampleBox : public SrsMp4FullBox <nl> { <nl> public : <nl> - / / an integer that gives the number of entries in the following table . <nl> + / / An integer that gives the number of entries in the following table . <nl> std : : vector < SrsMp4SttsEntry > entries ; <nl> private : <nl> / / The index for counter to calc the dts for samples . <nl> class SrsMp4DecodingTime2SampleBox : public SrsMp4FullBox <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 6 . 1 . 3 Composition Time to Sample Box ( ctts ) , for Video . <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 49 <nl> - * / <nl> + / / 8 . 6 . 1 . 3 Composition Time to Sample Box ( ctts ) , for Video . <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 49 <nl> struct SrsMp4CttsEntry <nl> { <nl> - / / an integer that counts the number of consecutive samples that have the given offset . <nl> + / / An integer that counts the number of consecutive samples that have the given offset . <nl> uint32_t sample_count ; <nl> / / uint32_t for version = 0 <nl> / / int32_t for version = 1 <nl> - / / an integer that gives the offset between CT and DT , such that CT ( n ) = DT ( n ) + <nl> + / / An integer that gives the offset between CT and DT , such that CT ( n ) = DT ( n ) + <nl> / / CTTS ( n ) . <nl> int64_t sample_offset ; <nl> / / Constructor <nl> struct SrsMp4CttsEntry <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 6 . 1 . 3 Composition Time to Sample Box ( ctts ) , for Video . <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 49 <nl> - * This box provides the offset between decoding time and composition time . In version 0 of this box the <nl> - * decoding time must be less than the composition time , and the offsets are expressed as unsigned numbers <nl> - * such that CT ( n ) = DT ( n ) + CTTS ( n ) where CTTS ( n ) is the ( uncompressed ) table entry for sample n . In version <nl> - * 1 of this box , the composition timeline and the decoding timeline are still derived from each other , but the <nl> - * offsets are signed . It is recommended that for the computed composition timestamps , there is exactly one with <nl> - * the value 0 ( zero ) . <nl> - * / <nl> + / / 8 . 6 . 1 . 3 Composition Time to Sample Box ( ctts ) , for Video . <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 49 <nl> + / / This box provides the offset between decoding time and composition time . In version 0 of this box the <nl> + / / decoding time must be less than the composition time , and the offsets are expressed as unsigned numbers <nl> + / / such that CT ( n ) = DT ( n ) + CTTS ( n ) where CTTS ( n ) is the ( uncompressed ) table entry for sample n . In version <nl> + / / 1 of this box , the composition timeline and the decoding timeline are still derived from each other , but the <nl> + / / offsets are signed . It is recommended that for the computed composition timestamps , there is exactly one with <nl> + / / The value 0 ( zero ) . <nl> class SrsMp4CompositionTime2SampleBox : public SrsMp4FullBox <nl> { <nl> public : <nl> - / / an integer that gives the number of entries in the following table . <nl> + / / An integer that gives the number of entries in the following table . <nl> std : : vector < SrsMp4CttsEntry > entries ; <nl> private : <nl> / / The index for counter to calc the dts for samples . <nl> class SrsMp4CompositionTime2SampleBox : public SrsMp4FullBox <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 6 . 2 Sync Sample Box ( stss ) , for Video . <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 51 <nl> - * This box provides a compact marking of the sync samples within the stream . The table is arranged in strictly <nl> - * increasing order of sample number . <nl> - * / <nl> + / / 8 . 6 . 2 Sync Sample Box ( stss ) , for Video . <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 51 <nl> + / / This box provides a compact marking of the sync samples within the stream . The table is arranged in strictly <nl> + / / increasing order of sample number . <nl> class SrsMp4SyncSampleBox : public SrsMp4FullBox <nl> { <nl> public : <nl> - / / an integer that gives the number of entries in the following table . If entry_count is zero , <nl> - / / there are no sync samples within the stream and the following table is empty . <nl> + / / An integer that gives the number of entries in the following table . If entry_count is zero , <nl> + / / There are no sync samples within the stream and the following table is empty . <nl> uint32_t entry_count ; <nl> - / / the numbers of the samples that are sync samples in the stream . <nl> + / / The numbers of the samples that are sync samples in the stream . <nl> uint32_t * sample_numbers ; <nl> public : <nl> SrsMp4SyncSampleBox ( ) ; <nl> class SrsMp4SyncSampleBox : public SrsMp4FullBox <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 7 . 4 Sample To Chunk Box ( stsc ) , for Audio / Video . <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 58 <nl> - * / <nl> + / / 8 . 7 . 4 Sample To Chunk Box ( stsc ) , for Audio / Video . <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 58 <nl> struct SrsMp4StscEntry <nl> { <nl> - / / an integer that gives the index of the first chunk in this run of chunks that share the <nl> + / / An integer that gives the index of the first chunk in this run of chunks that share the <nl> / / same samples - per - chunk and sample - description - index ; the index of the first chunk in a track has the <nl> / / value 1 ( the first_chunk field in the first record of this box has the value 1 , identifying that the first <nl> / / sample maps to the first chunk ) . <nl> uint32_t first_chunk ; <nl> - / / an integer that gives the number of samples in each of these chunks <nl> + / / An integer that gives the number of samples in each of these chunks <nl> uint32_t samples_per_chunk ; <nl> - / / an integer that gives the index of the sample entry that describes the <nl> + / / An integer that gives the index of the sample entry that describes the <nl> / / samples in this chunk . The index ranges from 1 to the number of sample entries in the Sample <nl> / / Description Box <nl> uint32_t sample_description_index ; <nl> struct SrsMp4StscEntry <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 7 . 4 Sample To Chunk Box ( stsc ) , for Audio / Video . <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 58 <nl> - * Samples within the media data are grouped into chunks . Chunks can be of different sizes , and the samples <nl> - * within a chunk can have different sizes . This table can be used to find the chunk that contains a sample , <nl> - * its position , and the associated sample description . <nl> - * / <nl> + / / 8 . 7 . 4 Sample To Chunk Box ( stsc ) , for Audio / Video . <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 58 <nl> + / / Samples within the media data are grouped into chunks . Chunks can be of different sizes , and the samples <nl> + / / within a chunk can have different sizes . This table can be used to find the chunk that contains a sample , <nl> + / / its position , and the associated sample description . <nl> class SrsMp4Sample2ChunkBox : public SrsMp4FullBox <nl> { <nl> public : <nl> - / / an integer that gives the number of entries in the following table <nl> + / / An integer that gives the number of entries in the following table <nl> uint32_t entry_count ; <nl> - / / the numbers of the samples that are sync samples in the stream . <nl> + / / The numbers of the samples that are sync samples in the stream . <nl> SrsMp4StscEntry * entries ; <nl> private : <nl> / / The index for counter to calc the dts for samples . <nl> class SrsMp4Sample2ChunkBox : public SrsMp4FullBox <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 7 . 5 Chunk Offset Box ( stco ) , for Audio / Video . <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 59 <nl> - * The chunk offset table gives the index of each chunk into the containing file . There are two variants , permitting <nl> - * the use of 32 - bit or 64 - bit offsets . The latter is useful when managing very large presentations . At most one of <nl> - * these variants will occur in any single instance of a sample table . <nl> - * / <nl> + / / 8 . 7 . 5 Chunk Offset Box ( stco ) , for Audio / Video . <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 59 <nl> + / / The chunk offset table gives the index of each chunk into the containing file . There are two variants , permitting <nl> + / / The use of 32 - bit or 64 - bit offsets . The latter is useful when managing very large presentations . At most one of <nl> + / / These variants will occur in any single instance of a sample table . <nl> class SrsMp4ChunkOffsetBox : public SrsMp4FullBox <nl> { <nl> public : <nl> - / / an integer that gives the number of entries in the following table <nl> + / / An integer that gives the number of entries in the following table <nl> uint32_t entry_count ; <nl> - / / a 32 bit integer that gives the offset of the start of a chunk into its containing <nl> + / / A 32 bit integer that gives the offset of the start of a chunk into its containing <nl> / / media file . <nl> uint32_t * entries ; <nl> public : <nl> class SrsMp4ChunkOffsetBox : public SrsMp4FullBox <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 7 . 5 Chunk Large Offset Box ( co64 ) , for Audio / Video . <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 59 <nl> - * The chunk offset table gives the index of each chunk into the containing file . There are two variants , permitting <nl> - * the use of 32 - bit or 64 - bit offsets . The latter is useful when managing very large presentations . At most one of <nl> - * these variants will occur in any single instance of a sample table . <nl> - * / <nl> + / / 8 . 7 . 5 Chunk Large Offset Box ( co64 ) , for Audio / Video . <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 59 <nl> + / / The chunk offset table gives the index of each chunk into the containing file . There are two variants , permitting <nl> + / / The use of 32 - bit or 64 - bit offsets . The latter is useful when managing very large presentations . At most one of <nl> + / / These variants will occur in any single instance of a sample table . <nl> class SrsMp4ChunkLargeOffsetBox : public SrsMp4FullBox <nl> { <nl> public : <nl> - / / an integer that gives the number of entries in the following table <nl> + / / An integer that gives the number of entries in the following table <nl> uint32_t entry_count ; <nl> - / / a 64 bit integer that gives the offset of the start of a chunk into its containing <nl> + / / A 64 bit integer that gives the offset of the start of a chunk into its containing <nl> / / media file . <nl> uint64_t * entries ; <nl> public : <nl> class SrsMp4ChunkLargeOffsetBox : public SrsMp4FullBox <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 7 . 3 . 2 Sample Size Box ( stsz ) , for Audio / Video . <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 58 <nl> - * This box contains the sample count and a table giving the size in bytes of each sample . This allows the media data <nl> - * itself to be unframed . The total number of samples in the media is always indicated in the sample count . <nl> - * / <nl> + / / 8 . 7 . 3 . 2 Sample Size Box ( stsz ) , for Audio / Video . <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 58 <nl> + / / This box contains the sample count and a table giving the size in bytes of each sample . This allows the media data <nl> + / / itself to be unframed . The total number of samples in the media is always indicated in the sample count . <nl> class SrsMp4SampleSizeBox : public SrsMp4FullBox <nl> { <nl> public : <nl> - / / the default sample size . If all the samples are the same size , this field <nl> + / / The default sample size . If all the samples are the same size , this field <nl> / / contains that size value . If this field is set to 0 , then the samples have different sizes , and those sizes <nl> / / are stored in the sample size table . If this field is not 0 , it specifies the constant sample size , and no <nl> / / array follows . <nl> uint32_t sample_size ; <nl> - / / an integer that gives the number of samples in the track ; if sample - size is 0 , then it is <nl> + / / An integer that gives the number of samples in the track ; if sample - size is 0 , then it is <nl> / / also the number of entries in the following table . <nl> uint32_t sample_count ; <nl> - / / each entry_size is an integer specifying the size of a sample , indexed by its number . <nl> + / / Each entry_size is an integer specifying the size of a sample , indexed by its number . <nl> uint32_t * entry_sizes ; <nl> public : <nl> SrsMp4SampleSizeBox ( ) ; <nl> class SrsMp4SampleSizeBox : public SrsMp4FullBox <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 8 . 10 . 1 User Data Box ( udta ) <nl> - * ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 78 <nl> - * This box contains objects that declare user information about the containing box and its data ( presentation or <nl> - * track ) . <nl> - * / <nl> + / / 8 . 10 . 1 User Data Box ( udta ) <nl> + / / ISO_IEC_14496 - 12 - base - format - 2012 . pdf , page 78 <nl> + / / This box contains objects that declare user information about the containing box and its data ( presentation or <nl> + / / track ) . <nl> class SrsMp4UserDataBox : public SrsMp4Box <nl> { <nl> public : <nl> class SrsMp4UserDataBox : public SrsMp4Box <nl> virtual std : : stringstream & dumps_detail ( std : : stringstream & ss , SrsMp4DumpContext dc ) ; <nl> } ; <nl> <nl> - / * * <nl> - * Generally , a MP4 sample contains a frame , for example , a video frame or audio frame . <nl> - * / <nl> + / / Generally , a MP4 sample contains a frame , for example , a video frame or audio frame . <nl> class SrsMp4Sample <nl> { <nl> public : <nl> class SrsMp4Sample <nl> virtual uint32_t pts_ms ( ) ; <nl> } ; <nl> <nl> - / * * <nl> - * Build samples from moov , or write samples to moov . <nl> - * One or more sample are grouped to a chunk , each track contains one or more chunks . <nl> - * The offset of chunk is specified by stco . <nl> - * The chunk - sample series is speicified by stsc . <nl> - * The sample size is specified by stsz . <nl> - * The dts is specified by stts . <nl> - * For video : <nl> - * The cts / pts is specified by ctts . <nl> - * The keyframe is specified by stss . <nl> - * / <nl> + / / Build samples from moov , or write samples to moov . <nl> + / / One or more sample are grouped to a chunk , each track contains one or more chunks . <nl> + / / The offset of chunk is specified by stco . <nl> + / / The chunk - sample series is speicified by stsc . <nl> + / / The sample size is specified by stsz . <nl> + / / The dts is specified by stts . <nl> + / / For video : <nl> + / / The cts / pts is specified by ctts . <nl> + / / The keyframe is specified by stss . <nl> class SrsMp4SampleManager <nl> { <nl> public : <nl> class SrsMp4SampleManager <nl> SrsMp4DecodingTime2SampleBox * stts , SrsMp4CompositionTime2SampleBox * ctts , SrsMp4SyncSampleBox * stss ) ; <nl> } ; <nl> <nl> - / * * <nl> - * The MP4 box reader , to get the RAW boxes without decode . <nl> - * @ remark For mdat box , we only decode the header , then skip the data . <nl> - * / <nl> + / / The MP4 box reader , to get the RAW boxes without decode . <nl> + / / @ remark For mdat box , we only decode the header , then skip the data . <nl> class SrsMp4BoxReader <nl> { <nl> private : <nl> class SrsMp4BoxReader <nl> virtual srs_error_t skip ( SrsMp4Box * box , SrsSimpleStream * stream ) ; <nl> } ; <nl> <nl> - / * * <nl> - * The MP4 demuxer . <nl> - * / <nl> + / / The MP4 demuxer . <nl> class SrsMp4Decoder <nl> { <nl> private : <nl> class SrsMp4Decoder <nl> SrsMp4Decoder ( ) ; <nl> virtual ~ SrsMp4Decoder ( ) ; <nl> public : <nl> - / * * <nl> - * Initialize the decoder with a reader r . <nl> - * @ param r The underlayer io reader , user must manage it . <nl> - * / <nl> + / / Initialize the decoder with a reader r . <nl> + / / @ param r The underlayer io reader , user must manage it . <nl> virtual srs_error_t initialize ( ISrsReadSeeker * rs ) ; <nl> - / * * <nl> - * Read a sample from mp4 . <nl> - * @ param pht The sample hanler type , audio / soun or video / vide . <nl> - * @ param pft , The frame type . For video , it ' s SrsVideoAvcFrameType . For audio , ignored . <nl> - * @ param pct , The codec type . For video , it ' s SrsVideoAvcFrameTrait . For audio , it ' s SrsAudioAacFrameTrait . <nl> - * @ param pdts The output dts in milliseconds . <nl> - * @ param ppts The output pts in milliseconds . <nl> - * @ param pnb_sample The output size of payload . <nl> - * @ param psample The output payload , user must free it . <nl> - * @ remark The decoder will generate the first two audio / video sequence header . <nl> - * / <nl> + / / Read a sample from mp4 . <nl> + / / @ param pht The sample hanler type , audio / soun or video / vide . <nl> + / / @ param pft , The frame type . For video , it ' s SrsVideoAvcFrameType . For audio , ignored . <nl> + / / @ param pct , The codec type . For video , it ' s SrsVideoAvcFrameTrait . For audio , it ' s SrsAudioAacFrameTrait . <nl> + / / @ param pdts The output dts in milliseconds . <nl> + / / @ param ppts The output pts in milliseconds . <nl> + / / @ param pnb_sample The output size of payload . <nl> + / / @ param psample The output payload , user must free it . <nl> + / / @ remark The decoder will generate the first two audio / video sequence header . <nl> virtual srs_error_t read_sample ( SrsMp4HandlerType * pht , uint16_t * pft , uint16_t * pct , <nl> - uint32_t * pdts , uint32_t * ppts , uint8_t * * psample , uint32_t * pnb_sample ) ; <nl> + uint32_t * pdts , uint32_t * ppts , uint8_t * * psample , uint32_t * pnb_sample ) ; <nl> private : <nl> virtual srs_error_t parse_ftyp ( SrsMp4FileTypeBox * ftyp ) ; <nl> virtual srs_error_t parse_moov ( SrsMp4MovieBox * moov ) ; <nl> class SrsMp4Decoder <nl> virtual srs_error_t do_load_next_box ( SrsMp4Box * * ppbox , uint32_t required_box_type ) ; <nl> } ; <nl> <nl> - / * * <nl> - * The MP4 muxer . <nl> - * / <nl> + / / The MP4 muxer . <nl> class SrsMp4Encoder <nl> { <nl> private : <nl> class SrsMp4Encoder <nl> virtual srs_error_t do_write_sample ( SrsMp4Sample * ps , uint8_t * sample , uint32_t nb_sample ) ; <nl> } ; <nl> <nl> - / * * <nl> - * A fMP4 encoder , to write the init . mp4 with sequence header . <nl> - * / <nl> + / / A fMP4 encoder , to write the init . mp4 with sequence header . <nl> class SrsMp4M2tsInitEncoder <nl> { <nl> private : <nl> class SrsMp4M2tsInitEncoder <nl> virtual srs_error_t write ( SrsFormat * format , bool video , int tid ) ; <nl> } ; <nl> <nl> - / * * <nl> - * A fMP4 encoder , to cache segments then flush to disk , because the fMP4 should write <nl> - * trun box before mdat . <nl> - * / <nl> + / / A fMP4 encoder , to cache segments then flush to disk , because the fMP4 should write <nl> + / / trun box before mdat . <nl> class SrsMp4M2tsSegmentEncoder <nl> { <nl> private : <nl> mmm a / trunk / src / kernel / srs_kernel_ts . hpp <nl> ppp b / trunk / src / kernel / srs_kernel_ts . hpp <nl> <nl> * <nl> * Permission is hereby granted , free of charge , to any person obtaining a copy of <nl> * this software and associated documentation files ( the " Software " ) , to deal in <nl> - * the Software without restriction , including without limitation the rights to <nl> + * The Software without restriction , including without limitation the rights to <nl> * use , copy , modify , merge , publish , distribute , sublicense , and / or sell copies of <nl> - * the Software , and to permit persons to whom the Software is furnished to do so , <nl> + * The Software , and to permit persons to whom the Software is furnished to do so , <nl> * subject to the following conditions : <nl> * <nl> * The above copyright notice and this permission notice shall be included in all <nl> * copies or substantial portions of the Software . <nl> * <nl> - * THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR <nl> + * The SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR <nl> * IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY , FITNESS <nl> * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR <nl> * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER <nl> class SrsTsContext ; <nl> / / Transport Stream packets are 188 bytes in length . <nl> # define SRS_TS_PACKET_SIZE 188 <nl> <nl> - / / the aggregate pure audio for hls , in ts tbn ( ms * 90 ) . <nl> + / / The aggregate pure audio for hls , in ts tbn ( ms * 90 ) . <nl> # define SRS_CONSTS_HLS_PURE_AUDIO_AGGREGATE 720 * 90 <nl> <nl> - / * * <nl> - * the pid of ts packet , <nl> - * Table 2 - 3 - PID table , hls - mpeg - ts - iso13818 - 1 . pdf , page 37 <nl> - * NOTE - The transport packets with PID values 0x0000 , 0x0001 , and 0x0010 - 0x1FFE are allowed to carry a PCR . <nl> - * / <nl> + / / The pid of ts packet , <nl> + / / Table 2 - 3 - PID table , hls - mpeg - ts - iso13818 - 1 . pdf , page 37 <nl> + / / NOTE - The transport packets with PID values 0x0000 , 0x0001 , and 0x0010 - 0x1FFE are allowed to carry a PCR . <nl> enum SrsTsPid <nl> { <nl> / / Program Association Table ( see Table 2 - 25 ) . <nl> enum SrsTsPid <nl> / / May be assigned as network_PID , Program_map_PID , elementary_PID , or for other purposes <nl> SrsTsPidAppStart = 0x10 , <nl> SrsTsPidAppEnd = 0x1ffe , <nl> - / / null packets ( see Table 2 - 3 ) <nl> + / / For null packets ( see Table 2 - 3 ) <nl> SrsTsPidNULL = 0x01FFF , <nl> } ; <nl> <nl> - / * * <nl> - * the transport_scrambling_control of ts packet , <nl> - * Table 2 - 4 - Scrambling control values , hls - mpeg - ts - iso13818 - 1 . pdf , page 38 <nl> - * / <nl> + / / The transport_scrambling_control of ts packet , <nl> + / / Table 2 - 4 - Scrambling control values , hls - mpeg - ts - iso13818 - 1 . pdf , page 38 <nl> enum SrsTsScrambled <nl> { <nl> / / Not scrambled <nl> enum SrsTsScrambled <nl> SrsTsScrambledUserDefined3 = 0x03 , <nl> } ; <nl> <nl> - / * * <nl> - * the adaption_field_control of ts packet , <nl> - * Table 2 - 5 - Adaptation field control values , hls - mpeg - ts - iso13818 - 1 . pdf , page 38 <nl> - * / <nl> + / / the adaption_field_control of ts packet , <nl> + / / Table 2 - 5 - Adaptation field control values , hls - mpeg - ts - iso13818 - 1 . pdf , page 38 <nl> enum SrsTsAdaptationFieldType <nl> { <nl> / / Reserved for future use by ISO / IEC <nl> enum SrsTsAdaptationFieldType <nl> SrsTsAdaptationFieldTypeBoth = 0x03 , <nl> } ; <nl> <nl> - / * * <nl> - * the actually parsed ts pid , <nl> - * @ see SrsTsPid , some pid , for example , PMT / Video / Audio is specified by PAT or other tables . <nl> - * / <nl> + / / the actually parsed ts pid , <nl> + / / @ see SrsTsPid , some pid , for example , PMT / Video / Audio is specified by PAT or other tables . <nl> enum SrsTsPidApply <nl> { <nl> SrsTsPidApplyReserved = 0 , / / TSPidTypeReserved , nothing parsed , used reserved . <nl> enum SrsTsPidApply <nl> SrsTsPidApplyAudio , / / vor audio <nl> } ; <nl> <nl> - / * * <nl> - * Table 2 - 29 - Stream type assignments <nl> - * / <nl> + / / Table 2 - 29 - Stream type assignments <nl> enum SrsTsStream <nl> { <nl> / / ITU - T | ISO / IEC Reserved <nl> enum SrsTsStream <nl> } ; <nl> std : : string srs_ts_stream2string ( SrsTsStream stream ) ; <nl> <nl> - / * * <nl> - * the ts channel . <nl> - * / <nl> + / / The ts channel . <nl> struct SrsTsChannel <nl> { <nl> int pid ; <nl> struct SrsTsChannel <nl> virtual ~ SrsTsChannel ( ) ; <nl> } ; <nl> <nl> - / * * <nl> - * the stream_id of PES payload of ts packet . <nl> - * Table 2 - 18 - Stream_id assignments , hls - mpeg - ts - iso13818 - 1 . pdf , page 52 . <nl> - * / <nl> + / / The stream_id of PES payload of ts packet . <nl> + / / Table 2 - 18 - Stream_id assignments , hls - mpeg - ts - iso13818 - 1 . pdf , page 52 . <nl> enum SrsTsPESStreamId <nl> { <nl> / / program_stream_map <nl> enum SrsTsPESStreamId <nl> SrsTsPESStreamIdProgramStreamDirectory = 0xff , / / 0b11111111 <nl> } ; <nl> <nl> - / * * <nl> - * the media audio / video message parsed from PES packet . <nl> - * / <nl> + / / The media audio / video message parsed from PES packet . <nl> class SrsTsMessage <nl> { <nl> public : <nl> - / / decoder only , <nl> + / / For decoder only , <nl> / / the ts messgae does not use them , <nl> / / for user to get the channel and packet . <nl> SrsTsChannel * channel ; <nl> SrsTsPacket * packet ; <nl> public : <nl> - / / the audio cache buffer start pts , to flush audio if full . <nl> + / / The audio cache buffer start pts , to flush audio if full . <nl> / / @ remark the pts is not the adjust one , it ' s the orignal pts . <nl> int64_t start_pts ; <nl> - / / whether this message with pcr info , <nl> + / / Whether this message with pcr info , <nl> / / generally , the video IDR ( I frame , the keyframe of h . 264 ) carray the pcr info . <nl> bool write_pcr ; <nl> - / / whether got discontinuity ts , for example , sequence header changed . <nl> + / / Whether got discontinuity ts , for example , sequence header changed . <nl> bool is_discontinuity ; <nl> public : <nl> - / / the timestamp in 90khz <nl> + / / The timestamp in 90khz <nl> int64_t dts ; <nl> int64_t pts ; <nl> - / / the id of pes stream to indicates the payload codec . <nl> + / / The id of pes stream to indicates the payload codec . <nl> / / @ remark use is_audio ( ) and is_video ( ) to check it , and stream_number ( ) to finger it out . <nl> SrsTsPESStreamId sid ; <nl> - / / the size of payload , 0 indicates the length ( ) of payload . <nl> + / / The size of payload , 0 indicates the length ( ) of payload . <nl> uint16_t PES_packet_length ; <nl> - / / the chunk id . <nl> + / / The chunk id . <nl> uint8_t continuity_counter ; <nl> - / / the payload bytes . <nl> + / / The payload bytes . <nl> SrsSimpleStream * payload ; <nl> public : <nl> SrsTsMessage ( SrsTsChannel * c = NULL , SrsTsPacket * p = NULL ) ; <nl> virtual ~ SrsTsMessage ( ) ; <nl> - / / decoder <nl> + / / For decoder <nl> public : <nl> - / * * <nl> - * dumps all bytes in stream to ts message . <nl> - * / <nl> - virtual srs_error_t dump ( SrsBuffer * stream , int * pnb_bytes ) ; <nl> - / * * <nl> - * whether ts message is completed to reap . <nl> - * @ param payload_unit_start_indicator whether new ts message start . <nl> - * PES_packet_length is 0 , the payload_unit_start_indicator = 1 to reap ts message . <nl> - * PES_packet_length > 0 , the payload . length ( ) = = PES_packet_length to reap ts message . <nl> - * @ remark when PES_packet_length > 0 , the payload_unit_start_indicator should never be 1 when not completed . <nl> - * @ remark when fresh , the payload_unit_start_indicator should be 1 . <nl> - * / <nl> + / / To dumps all bytes in stream to ts message . <nl> + virtual srs_error_t dump ( SrsBuffer * stream , int * pnb_bytes ) ; <nl> + / / Whether ts message is completed to reap . <nl> + / / @ param payload_unit_start_indicator whether new ts message start . <nl> + / / PES_packet_length is 0 , the payload_unit_start_indicator = 1 to reap ts message . <nl> + / / PES_packet_length > 0 , the payload . length ( ) = = PES_packet_length to reap ts message . <nl> + / / @ remark when PES_packet_length > 0 , the payload_unit_start_indicator should never be 1 when not completed . <nl> + / / @ remark when fresh , the payload_unit_start_indicator should be 1 . <nl> virtual bool completed ( int8_t payload_unit_start_indicator ) ; <nl> - / * * <nl> - * whether the message is fresh . <nl> - * / <nl> + / / Whether the message is fresh . <nl> virtual bool fresh ( ) ; <nl> public : <nl> - / * * <nl> - * whether the sid indicates the elementary stream audio . <nl> - * / <nl> + / / Whether the sid indicates the elementary stream audio . <nl> virtual bool is_audio ( ) ; <nl> - / * * <nl> - * whether the sid indicates the elementary stream video . <nl> - * / <nl> + / / Whether the sid indicates the elementary stream video . <nl> virtual bool is_video ( ) ; <nl> - / * * <nl> - * when audio or video , get the stream number which specifies the format of stream . <nl> - * @ return the stream number for audio / video ; otherwise , - 1 . <nl> - * / <nl> + / / When audio or video , get the stream number which specifies the format of stream . <nl> + / / @ return the stream number for audio / video ; otherwise , - 1 . <nl> virtual int stream_number ( ) ; <nl> public : <nl> - / * * <nl> - * detach the ts message , <nl> - * for user maybe need to parse the message by queue . <nl> - * @ remark we always use the payload of original message . <nl> - * / <nl> + / / Detach the ts message , <nl> + / / for user maybe need to parse the message by queue . <nl> + / / @ remark we always use the payload of original message . <nl> virtual SrsTsMessage * detach ( ) ; <nl> } ; <nl> <nl> - / * * <nl> - * the ts message handler . <nl> - * / <nl> + / / The ts message handler . <nl> class ISrsTsHandler <nl> { <nl> public : <nl> ISrsTsHandler ( ) ; <nl> virtual ~ ISrsTsHandler ( ) ; <nl> public : <nl> - / * * <nl> - * when ts context got message , use handler to process it . <nl> - * @ param msg the ts msg , user should never free it . <nl> - * @ return an int error code . <nl> - * / <nl> + / / When ts context got message , use handler to process it . <nl> + / / @ param msg the ts msg , user should never free it . <nl> + / / @ return an int error code . <nl> virtual srs_error_t on_ts_message ( SrsTsMessage * msg ) = 0 ; <nl> } ; <nl> <nl> - / * * <nl> - * the context of ts , to decode the ts stream . <nl> - * / <nl> + / / The context of ts , to decode the ts stream . <nl> class SrsTsContext <nl> { <nl> private : <nl> class SrsTsContext <nl> SrsTsContext ( ) ; <nl> virtual ~ SrsTsContext ( ) ; <nl> public : <nl> - / * * <nl> - * whether the hls stream is pure audio stream . <nl> - * / <nl> - / / TODO : FIXME : merge with muxer codec detect . <nl> + / / Whether the hls stream is pure audio stream . <nl> + / / TODO : FIXME : merge with muxer codec detect . <nl> virtual bool is_pure_audio ( ) ; <nl> - / * * <nl> - * when PMT table parsed , we know some info about stream . <nl> - * / <nl> + / / When PMT table parsed , we know some info about stream . <nl> virtual void on_pmt_parsed ( ) ; <nl> - / * * <nl> - * reset the context for a new ts segment start . <nl> - * / <nl> + / / Reset the context for a new ts segment start . <nl> virtual void reset ( ) ; <nl> / / codec <nl> public : <nl> - / * * <nl> - * get the pid apply , the parsed pid . <nl> - * @ return the apply channel ; NULL for invalid . <nl> - * / <nl> + / / Get the pid apply , the parsed pid . <nl> + / / @ return the apply channel ; NULL for invalid . <nl> virtual SrsTsChannel * get ( int pid ) ; <nl> - / * * <nl> - * set the pid apply , the parsed pid . <nl> - * / <nl> + / / Set the pid apply , the parsed pid . <nl> virtual void set ( int pid , SrsTsPidApply apply_pid , SrsTsStream stream = SrsTsStreamReserved ) ; <nl> / / decode methods <nl> public : <nl> - / * * <nl> - * the stream contains only one ts packet . <nl> - * @ param handler the ts message handler to process the msg . <nl> - * @ remark we will consume all bytes in stream . <nl> - * / <nl> + / / The stream contains only one ts packet . <nl> + / / @ param handler the ts message handler to process the msg . <nl> + / / @ remark we will consume all bytes in stream . <nl> virtual srs_error_t decode ( SrsBuffer * stream , ISrsTsHandler * handler ) ; <nl> / / encode methods <nl> public : <nl> - / * * <nl> - * write the PES packet , the video / audio stream . <nl> - * @ param msg the video / audio msg to write to ts . <nl> - * @ param vc the video codec , write the PAT / PMT table when changed . <nl> - * @ param ac the audio codec , write the PAT / PMT table when changed . <nl> - * / <nl> + / / Write the PES packet , the video / audio stream . <nl> + / / @ param msg the video / audio msg to write to ts . <nl> + / / @ param vc the video codec , write the PAT / PMT table when changed . <nl> + / / @ param ac the audio codec , write the PAT / PMT table when changed . <nl> virtual srs_error_t encode ( ISrsStreamWriter * writer , SrsTsMessage * msg , SrsVideoCodecId vc , SrsAudioCodecId ac ) ; <nl> / / drm methods <nl> public : <nl> - / * * <nl> - * set sync byte of ts segment . <nl> - * replace the standard ts sync byte to bravo sync byte . <nl> - * / <nl> + / / Set sync byte of ts segment . <nl> + / / replace the standard ts sync byte to bravo sync byte . <nl> virtual void set_sync_byte ( int8_t sb ) ; <nl> private : <nl> virtual srs_error_t encode_pat_pmt ( ISrsStreamWriter * writer , int16_t vpid , SrsTsStream vs , int16_t apid , SrsTsStream as ) ; <nl> virtual srs_error_t encode_pes ( ISrsStreamWriter * writer , SrsTsMessage * msg , int16_t pid , SrsTsStream sid , bool pure_audio ) ; <nl> } ; <nl> <nl> - / * * <nl> - * the packet in ts stream , <nl> - * 2 . 4 . 3 . 2 Transport Stream packet layer , hls - mpeg - ts - iso13818 - 1 . pdf , page 36 <nl> - * Transport Stream packets shall be 188 bytes long . <nl> - * / <nl> + / / The packet in ts stream , <nl> + / / 2 . 4 . 3 . 2 Transport Stream packet layer , hls - mpeg - ts - iso13818 - 1 . pdf , page 36 <nl> + / / Transport Stream packets shall be 188 bytes long . <nl> class SrsTsPacket <nl> { <nl> public : <nl> / / 1B <nl> - / * * <nl> - * The sync_byte is a fixed 8 - bit field whose value is ' 0100 0111 ' ( 0x47 ) . Sync_byte emulation in the choice of <nl> - * values for other regularly occurring fields , such as PID , should be avoided . <nl> - * / <nl> + / / The sync_byte is a fixed 8 - bit field whose value is ' 0100 0111 ' ( 0x47 ) . Sync_byte emulation in the choice of <nl> + / / values for other regularly occurring fields , such as PID , should be avoided . <nl> int8_t sync_byte ; / / 8bits <nl> <nl> / / 2B <nl> - / * * <nl> - * The transport_error_indicator is a 1 - bit flag . When set to ' 1 ' it indicates that at least <nl> - * 1 uncorrectable bit error exists in the associated Transport Stream packet . This bit may be set to ' 1 ' by entities external to <nl> - * the transport layer . When set to ' 1 ' this bit shall not be reset to ' 0 ' unless the bit value ( s ) in error have been corrected . <nl> - * / <nl> + / / The transport_error_indicator is a 1 - bit flag . When set to ' 1 ' it indicates that at least <nl> + / / 1 uncorrectable bit error exists in the associated Transport Stream packet . This bit may be set to ' 1 ' by entities external to <nl> + / / the transport layer . When set to ' 1 ' this bit shall not be reset to ' 0 ' unless the bit value ( s ) in error have been corrected . <nl> int8_t transport_error_indicator ; / / 1bit <nl> - / * * <nl> - * The payload_unit_start_indicator is a 1 - bit flag which has normative meaning for <nl> - * Transport Stream packets that carry PES packets ( refer to 2 . 4 . 3 . 6 ) or PSI data ( refer to 2 . 4 . 4 ) . <nl> - * <nl> - * When the payload of the Transport Stream packet contains PES packet data , the payload_unit_start_indicator has the <nl> - * following significance : a ' 1 ' indicates that the payload of this Transport Stream packet will commence ( start ) with the first byte <nl> - * of a PES packet and a ' 0 ' indicates no PES packet shall start in this Transport Stream packet . If the <nl> - * payload_unit_start_indicator is set to ' 1 ' , then one and only one PES packet starts in this Transport Stream packet . This <nl> - * also applies to private streams of stream_type 6 ( refer to Table 2 - 29 ) . <nl> - * <nl> - * When the payload of the Transport Stream packet contains PSI data , the payload_unit_start_indicator has the following <nl> - * significance : if the Transport Stream packet carries the first byte of a PSI section , the payload_unit_start_indicator value <nl> - * shall be ' 1 ' , indicating that the first byte of the payload of this Transport Stream packet carries the pointer_field . If the <nl> - * Transport Stream packet does not carry the first byte of a PSI section , the payload_unit_start_indicator value shall be ' 0 ' , <nl> - * indicating that there is no pointer_field in the payload . Refer to 2 . 4 . 4 . 1 and 2 . 4 . 4 . 2 . This also applies to private streams of <nl> - * stream_type 5 ( refer to Table 2 - 29 ) . <nl> - * <nl> - * For null packets the payload_unit_start_indicator shall be set to ' 0 ' . <nl> - * <nl> - * The meaning of this bit for Transport Stream packets carrying only private data is not defined in this Specification . <nl> - * / <nl> + / / The payload_unit_start_indicator is a 1 - bit flag which has normative meaning for <nl> + / / Transport Stream packets that carry PES packets ( refer to 2 . 4 . 3 . 6 ) or PSI data ( refer to 2 . 4 . 4 ) . <nl> + / / <nl> + / / When the payload of the Transport Stream packet contains PES packet data , the payload_unit_start_indicator has the <nl> + / / following significance : a ' 1 ' indicates that the payload of this Transport Stream packet will commence ( start ) with the first byte <nl> + / / of a PES packet and a ' 0 ' indicates no PES packet shall start in this Transport Stream packet . If the <nl> + / / payload_unit_start_indicator is set to ' 1 ' , then one and only one PES packet starts in this Transport Stream packet . This <nl> + / / also applies to private streams of stream_type 6 ( refer to Table 2 - 29 ) . <nl> + / / <nl> + / / When the payload of the Transport Stream packet contains PSI data , the payload_unit_start_indicator has the following <nl> + / / significance : if the Transport Stream packet carries the first byte of a PSI section , the payload_unit_start_indicator value <nl> + / / shall be ' 1 ' , indicating that the first byte of the payload of this Transport Stream packet carries the pointer_field . If the <nl> + / / Transport Stream packet does not carry the first byte of a PSI section , the payload_unit_start_indicator value shall be ' 0 ' , <nl> + / / indicating that there is no pointer_field in the payload . Refer to 2 . 4 . 4 . 1 and 2 . 4 . 4 . 2 . This also applies to private streams of <nl> + / / stream_type 5 ( refer to Table 2 - 29 ) . <nl> + / / <nl> + / / For null packets the payload_unit_start_indicator shall be set to ' 0 ' . <nl> + / / <nl> + / / The meaning of this bit for Transport Stream packets carrying only private data is not defined in this Specification . <nl> int8_t payload_unit_start_indicator ; / / 1bit <nl> - / * * <nl> - * The transport_priority is a 1 - bit indicator . When set to ' 1 ' it indicates that the associated packet is <nl> - * of greater priority than other packets having the same PID which do not have the bit set to ' 1 ' . The transport mechanism <nl> - * can use this to prioritize its data within an elementary stream . Depending on the application the transport_priority field <nl> - * may be coded regardless of the PID or within one PID only . This field may be changed by channel specific encoders or <nl> - * decoders . <nl> - * / <nl> + / / The transport_priority is a 1 - bit indicator . When set to ' 1 ' it indicates that the associated packet is <nl> + / / of greater priority than other packets having the same PID which do not have the bit set to ' 1 ' . The transport mechanism <nl> + / / can use this to prioritize its data within an elementary stream . Depending on the application the transport_priority field <nl> + / / may be coded regardless of the PID or within one PID only . This field may be changed by channel specific encoders or <nl> + / / decoders . <nl> int8_t transport_priority ; / / 1bit <nl> - / * * <nl> - * The PID is a 13 - bit field , indicating the type of the data stored in the packet payload . PID value 0x0000 is <nl> - * reserved for the Program Association Table ( see Table 2 - 25 ) . PID value 0x0001 is reserved for the Conditional Access <nl> - * Table ( see Table 2 - 27 ) . PID values 0x0002 - 0x000F are reserved . PID value 0x1FFF is reserved for null packets ( see <nl> - * Table 2 - 3 ) . <nl> - * / <nl> + / / The PID is a 13 - bit field , indicating the type of the data stored in the packet payload . PID value 0x0000 is <nl> + / / reserved for the Program Association Table ( see Table 2 - 25 ) . PID value 0x0001 is reserved for the Conditional Access <nl> + / / Table ( see Table 2 - 27 ) . PID values 0x0002 - 0x000F are reserved . PID value 0x1FFF is reserved for null packets ( see <nl> + / / Table 2 - 3 ) . <nl> SrsTsPid pid ; / / 13bits <nl> <nl> / / 1B <nl> - / * * <nl> - * This 2 - bit field indicates the scrambling mode of the Transport Stream packet payload . <nl> - * The Transport Stream packet header , and the adaptation field when present , shall not be scrambled . In the case of a null <nl> - * packet the value of the transport_scrambling_control field shall be set to ' 00 ' ( see Table 2 - 4 ) . <nl> - * / <nl> + / / This 2 - bit field indicates the scrambling mode of the Transport Stream packet payload . <nl> + / / The Transport Stream packet header , and the adaptation field when present , shall not be scrambled . In the case of a null <nl> + / / packet the value of the transport_scrambling_control field shall be set to ' 00 ' ( see Table 2 - 4 ) . <nl> SrsTsScrambled transport_scrambling_control ; / / 2bits <nl> - / * * <nl> - * This 2 - bit field indicates whether this Transport Stream packet header is followed by an <nl> - * adaptation field and / or payload ( see Table 2 - 5 ) . <nl> - * <nl> - * ITU - T Rec . H . 222 . 0 | ISO / IEC 13818 - 1 decoders shall discard Transport Stream packets with the <nl> - * adaptation_field_control field set to a value of ' 00 ' . In the case of a null packet the value of the adaptation_field_control <nl> - * shall be set to ' 01 ' . <nl> - * / <nl> + / / This 2 - bit field indicates whether this Transport Stream packet header is followed by an <nl> + / / adaptation field and / or payload ( see Table 2 - 5 ) . <nl> + / / <nl> + / / ITU - T Rec . H . 222 . 0 | ISO / IEC 13818 - 1 decoders shall discard Transport Stream packets with the <nl> + / / adaptation_field_control field set to a value of ' 00 ' . In the case of a null packet the value of the adaptation_field_control <nl> + / / shall be set to ' 01 ' . <nl> SrsTsAdaptationFieldType adaption_field_control ; / / 2bits <nl> - / * * <nl> - * The continuity_counter is a 4 - bit field incrementing with each Transport Stream packet with the <nl> - * same PID . The continuity_counter wraps around to 0 after its maximum value . The continuity_counter shall not be <nl> - * incremented when the adaptation_field_control of the packet equals ' 00 ' ( reseverd ) or ' 10 ' ( adaptation field only ) . <nl> - * <nl> - * In Transport Streams , duplicate packets may be sent as two , and only two , consecutive Transport Stream packets of the <nl> - * same PID . The duplicate packets shall have the same continuity_counter value as the original packet and the <nl> - * adaptation_field_control field shall be equal to ' 01 ' ( payload only ) or ' 11 ' ( both ) . In duplicate packets each byte of the original packet shall be <nl> - * duplicated , with the exception that in the program clock reference fields , if present , a valid value shall be encoded . <nl> - * <nl> - * The continuity_counter in a particular Transport Stream packet is continuous when it differs by a positive value of one <nl> - * from the continuity_counter value in the previous Transport Stream packet of the same PID , or when either of the nonincrementing <nl> - * conditions ( adaptation_field_control set to ' 00 ' or ' 10 ' , or duplicate packets as described above ) are met . <nl> - * The continuity counter may be discontinuous when the discontinuity_indicator is set to ' 1 ' ( refer to 2 . 4 . 3 . 4 ) . In the case of <nl> - * a null packet the value of the continuity_counter is undefined . <nl> - * / <nl> + / / The continuity_counter is a 4 - bit field incrementing with each Transport Stream packet with the <nl> + / / same PID . The continuity_counter wraps around to 0 after its maximum value . The continuity_counter shall not be <nl> + / / incremented when the adaptation_field_control of the packet equals ' 00 ' ( reseverd ) or ' 10 ' ( adaptation field only ) . <nl> + / / <nl> + / / In Transport Streams , duplicate packets may be sent as two , and only two , consecutive Transport Stream packets of the <nl> + / / same PID . The duplicate packets shall have the same continuity_counter value as the original packet and the <nl> + / / adaptation_field_control field shall be equal to ' 01 ' ( payload only ) or ' 11 ' ( both ) . In duplicate packets each byte of the original packet shall be <nl> + / / duplicated , with the exception that in the program clock reference fields , if present , a valid value shall be encoded . <nl> + / / <nl> + / / The continuity_counter in a particular Transport Stream packet is continuous when it differs by a positive value of one <nl> + / / from the continuity_counter value in the previous Transport Stream packet of the same PID , or when either of the nonincrementing <nl> + / / conditions ( adaptation_field_control set to ' 00 ' or ' 10 ' , or duplicate packets as described above ) are met . <nl> + / / The continuity counter may be discontinuous when the discontinuity_indicator is set to ' 1 ' ( refer to 2 . 4 . 3 . 4 ) . In the case of <nl> + / / a null packet the value of the continuity_counter is undefined . <nl> uint8_t continuity_counter ; / / 4bits <nl> private : <nl> SrsTsAdaptationField * adaptation_field ; <nl> class SrsTsPacket <nl> public : <nl> static SrsTsPacket * create_pat ( SrsTsContext * context , int16_t pmt_number , int16_t pmt_pid ) ; <nl> static SrsTsPacket * create_pmt ( SrsTsContext * context , int16_t pmt_number , int16_t pmt_pid , <nl> - int16_t vpid , SrsTsStream vs , int16_t apid , SrsTsStream as ) ; <nl> + int16_t vpid , SrsTsStream vs , int16_t apid , SrsTsStream as ) ; <nl> static SrsTsPacket * create_pes_first ( SrsTsContext * context , int16_t pid , SrsTsPESStreamId sid , <nl> - uint8_t continuity_counter , bool discontinuity , int64_t pcr , int64_t dts , int64_t pts , int size ) ; <nl> + uint8_t continuity_counter , bool discontinuity , int64_t pcr , int64_t dts , int64_t pts , int size ) ; <nl> static SrsTsPacket * create_pes_continue ( SrsTsContext * context , <nl> - int16_t pid , SrsTsPESStreamId sid , uint8_t continuity_counter ) ; <nl> + int16_t pid , SrsTsPESStreamId sid , uint8_t continuity_counter ) ; <nl> } ; <nl> <nl> - / * * <nl> - * the adaption field of ts packet . <nl> - * 2 . 4 . 3 . 5 Semantic definition of fields in adaptation field , hls - mpeg - ts - iso13818 - 1 . pdf , page 39 <nl> - * Table 2 - 6 - Transport Stream adaptation field , hls - mpeg - ts - iso13818 - 1 . pdf , page 40 <nl> - * / <nl> + / / The adaption field of ts packet . <nl> + / / 2 . 4 . 3 . 5 Semantic definition of fields in adaptation field , hls - mpeg - ts - iso13818 - 1 . pdf , page 39 <nl> + / / Table 2 - 6 - Transport Stream adaptation field , hls - mpeg - ts - iso13818 - 1 . pdf , page 40 <nl> class SrsTsAdaptationField <nl> { <nl> public : <nl> / / 1B <nl> - / * * <nl> - * The adaptation_field_length is an 8 - bit field specifying the number of bytes in the <nl> - * adaptation_field immediately following the adaptation_field_length . The value 0 is for inserting a single stuffing byte in <nl> - * a Transport Stream packet . When the adaptation_field_control value is ' 11 ' , the value of the adaptation_field_length shall <nl> - * be in the range 0 to 182 . When the adaptation_field_control value is ' 10 ' , the value of the adaptation_field_length shall <nl> - * be 183 . For Transport Stream packets carrying PES packets , stuffing is needed when there is insufficient PES packet data <nl> - * to completely fill the Transport Stream packet payload bytes . Stuffing is accomplished by defining an adaptation field <nl> - * longer than the sum of the lengths of the data elements in it , so that the payload bytes remaining after the adaptation field <nl> - * exactly accommodates the available PES packet data . The extra space in the adaptation field is filled with stuffing bytes . <nl> - * <nl> - * This is the only method of stuffing allowed for Transport Stream packets carrying PES packets . For Transport Stream <nl> - * packets carrying PSI , an alternative stuffing method is described in 2 . 4 . 4 . <nl> - * / <nl> + / / The adaptation_field_length is an 8 - bit field specifying the number of bytes in the <nl> + / / adaptation_field immediately following the adaptation_field_length . The value 0 is for inserting a single stuffing byte in <nl> + / / a Transport Stream packet . When the adaptation_field_control value is ' 11 ' , the value of the adaptation_field_length shall <nl> + / / be in the range 0 to 182 . When the adaptation_field_control value is ' 10 ' , the value of the adaptation_field_length shall <nl> + / / be 183 . For Transport Stream packets carrying PES packets , stuffing is needed when there is insufficient PES packet data <nl> + / / to completely fill the Transport Stream packet payload bytes . Stuffing is accomplished by defining an adaptation field <nl> + / / longer than the sum of the lengths of the data elements in it , so that the payload bytes remaining after the adaptation field <nl> + / / exactly accommodates the available PES packet data . The extra space in the adaptation field is filled with stuffing bytes . <nl> + / / <nl> + / / This is the only method of stuffing allowed for Transport Stream packets carrying PES packets . For Transport Stream <nl> + / / packets carrying PSI , an alternative stuffing method is described in 2 . 4 . 4 . <nl> uint8_t adaption_field_length ; / / 8bits <nl> / / 1B <nl> - / * * <nl> - * This is a 1 - bit field which when set to ' 1 ' indicates that the discontinuity state is true for the <nl> - * current Transport Stream packet . When the discontinuity_indicator is set to ' 0 ' or is not present , the discontinuity state is <nl> - * false . The discontinuity indicator is used to indicate two types of discontinuities , system time - base discontinuities and <nl> - * continuity_counter discontinuities . <nl> - * <nl> - * A system time - base discontinuity is indicated by the use of the discontinuity_indicator in Transport Stream packets of a <nl> - * PID designated as a PCR_PID ( refer to 2 . 4 . 4 . 9 ) . When the discontinuity state is true for a Transport Stream packet of a <nl> - * PID designated as a PCR_PID , the next PCR in a Transport Stream packet with that same PID represents a sample of a <nl> - * new system time clock for the associated program . The system time - base discontinuity point is defined to be the instant <nl> - * in time when the first byte of a packet containing a PCR of a new system time - base arrives at the input of the T - STD . <nl> - * The discontinuity_indicator shall be set to ' 1 ' in the packet in which the system time - base discontinuity occurs . The <nl> - * discontinuity_indicator bit may also be set to ' 1 ' in Transport Stream packets of the same PCR_PID prior to the packet <nl> - * which contains the new system time - base PCR . In this case , once the discontinuity_indicator has been set to ' 1 ' , it shall <nl> - * continue to be set to ' 1 ' in all Transport Stream packets of the same PCR_PID up to and including the Transport Stream <nl> - * packet which contains the first PCR of the new system time - base . After the occurrence of a system time - base <nl> - * discontinuity , no fewer than two PCRs for the new system time - base shall be received before another system time - base <nl> - * discontinuity can occur . Further , except when trick mode status is true , data from no more than two system time - bases <nl> - * shall be present in the set of T - STD buffers for one program at any time . <nl> - * <nl> - * Prior to the occurrence of a system time - base discontinuity , the first byte of a Transport Stream packet which contains a <nl> - * PTS or DTS which refers to the new system time - base shall not arrive at the input of the T - STD . After the occurrence of <nl> - * a system time - base discontinuity , the first byte of a Transport Stream packet which contains a PTS or DTS which refers <nl> - * to the previous system time - base shall not arrive at the input of the T - STD . <nl> - * <nl> - * A continuity_counter discontinuity is indicated by the use of the discontinuity_indicator in any Transport Stream packet . <nl> - * When the discontinuity state is true in any Transport Stream packet of a PID not designated as a PCR_PID , the <nl> - * continuity_counter in that packet may be discontinuous with respect to the previous Transport Stream packet of the same <nl> - * PID . When the discontinuity state is true in a Transport Stream packet of a PID that is designated as a PCR_PID , the <nl> - * continuity_counter may only be discontinuous in the packet in which a system time - base discontinuity occurs . A <nl> - * continuity counter discontinuity point occurs when the discontinuity state is true in a Transport Stream packet and the <nl> - * continuity_counter in the same packet is discontinuous with respect to the previous Transport Stream packet of the same <nl> - * PID . A continuity counter discontinuity point shall occur at most one time from the initiation of the discontinuity state <nl> - * until the conclusion of the discontinuity state . Furthermore , for all PIDs that are not designated as PCR_PIDs , when the <nl> - * discontinuity_indicator is set to ' 1 ' in a packet of a specific PID , the discontinuity_indicator may be set to ' 1 ' in the next <nl> - * Transport Stream packet of that same PID , but shall not be set to ' 1 ' in three consecutive Transport Stream packet of that <nl> - * same PID . <nl> - * <nl> - * For the purpose of this clause , an elementary stream access point is defined as follows : <nl> - * Video - The first byte of a video sequence header . <nl> - * Audio - The first byte of an audio frame . <nl> - * <nl> - * After a continuity counter discontinuity in a Transport packet which is designated as containing elementary stream data , <nl> - * the first byte of elementary stream data in a Transport Stream packet of the same PID shall be the first byte of an <nl> - * elementary stream access point or in the case of video , the first byte of an elementary stream access point or a <nl> - * sequence_end_code followed by an access point . Each Transport Stream packet which contains elementary stream data <nl> - * with a PID not designated as a PCR_PID , and in which a continuity counter discontinuity point occurs , and in which a <nl> - * PTS or DTS occurs , shall arrive at the input of the T - STD after the system time - base discontinuity for the associated <nl> - * program occurs . In the case where the discontinuity state is true , if two consecutive Transport Stream packets of the same <nl> - * PID occur which have the same continuity_counter value and have adaptation_field_control values set to ' 01 ' or ' 11 ' , the <nl> - * second packet may be discarded . A Transport Stream shall not be constructed in such a way that discarding such a packet <nl> - * will cause the loss of PES packet payload data or PSI data . <nl> - * <nl> - * After the occurrence of a discontinuity_indicator set to ' 1 ' in a Transport Stream packet which contains PSI information , <nl> - * a single discontinuity in the version_number of PSI sections may occur . At the occurrence of such a discontinuity , a <nl> - * version of the TS_program_map_sections of the appropriate program shall be sent with section_length = = 13 and the <nl> - * current_next_indicator = = 1 , such that there are no program_descriptors and no elementary streams described . This shall <nl> - * then be followed by a version of the TS_program_map_section for each affected program with the version_number <nl> - * incremented by one and the current_next_indicator = = 1 , containing a complete program definition . This indicates a <nl> - * version change in PSI data . <nl> - * / <nl> + / / This is a 1 - bit field which when set to ' 1 ' indicates that the discontinuity state is true for the <nl> + / / current Transport Stream packet . When the discontinuity_indicator is set to ' 0 ' or is not present , the discontinuity state is <nl> + / / false . The discontinuity indicator is used to indicate two types of discontinuities , system time - base discontinuities and <nl> + / / continuity_counter discontinuities . <nl> + / / <nl> + / / A system time - base discontinuity is indicated by the use of the discontinuity_indicator in Transport Stream packets of a <nl> + / / PID designated as a PCR_PID ( refer to 2 . 4 . 4 . 9 ) . When the discontinuity state is true for a Transport Stream packet of a <nl> + / / PID designated as a PCR_PID , the next PCR in a Transport Stream packet with that same PID represents a sample of a <nl> + / / new system time clock for the associated program . The system time - base discontinuity point is defined to be the instant <nl> + / / in time when the first byte of a packet containing a PCR of a new system time - base arrives at the input of the T - STD . <nl> + / / The discontinuity_indicator shall be set to ' 1 ' in the packet in which the system time - base discontinuity occurs . The <nl> + / / discontinuity_indicator bit may also be set to ' 1 ' in Transport Stream packets of the same PCR_PID prior to the packet <nl> + / / which contains the new system time - base PCR . In this case , once the discontinuity_indicator has been set to ' 1 ' , it shall <nl> + / / continue to be set to ' 1 ' in all Transport Stream packets of the same PCR_PID up to and including the Transport Stream <nl> + / / packet which contains the first PCR of the new system time - base . After the occurrence of a system time - base <nl> + / / discontinuity , no fewer than two PCRs for the new system time - base shall be received before another system time - base <nl> + / / discontinuity can occur . Further , except when trick mode status is true , data from no more than two system time - bases <nl> + / / shall be present in the set of T - STD buffers for one program at any time . <nl> + / / <nl> + / / Prior to the occurrence of a system time - base discontinuity , the first byte of a Transport Stream packet which contains a <nl> + / / PTS or DTS which refers to the new system time - base shall not arrive at the input of the T - STD . After the occurrence of <nl> + / / a system time - base discontinuity , the first byte of a Transport Stream packet which contains a PTS or DTS which refers <nl> + / / to the previous system time - base shall not arrive at the input of the T - STD . <nl> + / / <nl> + / / A continuity_counter discontinuity is indicated by the use of the discontinuity_indicator in any Transport Stream packet . <nl> + / / When the discontinuity state is true in any Transport Stream packet of a PID not designated as a PCR_PID , the <nl> + / / continuity_counter in that packet may be discontinuous with respect to the previous Transport Stream packet of the same <nl> + / / PID . When the discontinuity state is true in a Transport Stream packet of a PID that is designated as a PCR_PID , the <nl> + / / continuity_counter may only be discontinuous in the packet in which a system time - base discontinuity occurs . A <nl> + / / continuity counter discontinuity point occurs when the discontinuity state is true in a Transport Stream packet and the <nl> + / / continuity_counter in the same packet is discontinuous with respect to the previous Transport Stream packet of the same <nl> + / / PID . A continuity counter discontinuity point shall occur at most one time from the initiation of the discontinuity state <nl> + / / until the conclusion of the discontinuity state . Furthermore , for all PIDs that are not designated as PCR_PIDs , when the <nl> + / / discontinuity_indicator is set to ' 1 ' in a packet of a specific PID , the discontinuity_indicator may be set to ' 1 ' in the next <nl> + / / Transport Stream packet of that same PID , but shall not be set to ' 1 ' in three consecutive Transport Stream packet of that <nl> + / / same PID . <nl> + / / <nl> + / / For the purpose of this clause , an elementary stream access point is defined as follows : <nl> + / / Video - The first byte of a video sequence header . <nl> + / / Audio - The first byte of an audio frame . <nl> + / / <nl> + / / After a continuity counter discontinuity in a Transport packet which is designated as containing elementary stream data , <nl> + / / the first byte of elementary stream data in a Transport Stream packet of the same PID shall be the first byte of an <nl> + / / elementary stream access point or in the case of video , the first byte of an elementary stream access point or a <nl> + / / sequence_end_code followed by an access point . Each Transport Stream packet which contains elementary stream data <nl> + / / with a PID not designated as a PCR_PID , and in which a continuity counter discontinuity point occurs , and in which a <nl> + / / PTS or DTS occurs , shall arrive at the input of the T - STD after the system time - base discontinuity for the associated <nl> + / / program occurs . In the case where the discontinuity state is true , if two consecutive Transport Stream packets of the same <nl> + / / PID occur which have the same continuity_counter value and have adaptation_field_control values set to ' 01 ' or ' 11 ' , the <nl> + / / second packet may be discarded . A Transport Stream shall not be constructed in such a way that discarding such a packet <nl> + / / will cause the loss of PES packet payload data or PSI data . <nl> + / / <nl> + / / After the occurrence of a discontinuity_indicator set to ' 1 ' in a Transport Stream packet which contains PSI information , <nl> + / / a single discontinuity in the version_number of PSI sections may occur . At the occurrence of such a discontinuity , a <nl> + / / version of the TS_program_map_sections of the appropriate program shall be sent with section_length = = 13 and the <nl> + / / current_next_indicator = = 1 , such that there are no program_descriptors and no elementary streams described . This shall <nl> + / / then be followed by a version of the TS_program_map_section for each affected program with the version_number <nl> + / / incremented by one and the current_next_indicator = = 1 , containing a complete program definition . This indicates a <nl> + / / version change in PSI data . <nl> int8_t discontinuity_indicator ; / / 1bit <nl> - / * * <nl> - * The random_access_indicator is a 1 - bit field that indicates that the current Transport <nl> - * Stream packet , and possibly subsequent Transport Stream packets with the same PID , contain some information to aid <nl> - * random access at this point . Specifically , when the bit is set to ' 1 ' , the next PES packet to start in the payload of Transport <nl> - * Stream packets with the current PID shall contain the first byte of a video sequence header if the PES stream type ( refer <nl> - * to Table 2 - 29 ) is 1 or 2 , or shall contain the first byte of an audio frame if the PES stream type is 3 or 4 . In addition , in <nl> - * the case of video , a presentation timestamp shall be present in the PES packet containing the first picture following the <nl> - * sequence header . In the case of audio , the presentation timestamp shall be present in the PES packet containing the first <nl> - * byte of the audio frame . In the PCR_PID the random_access_indicator may only be set to ' 1 ' in Transport Stream packet <nl> - * containing the PCR fields . <nl> - * / <nl> + / / The random_access_indicator is a 1 - bit field that indicates that the current Transport <nl> + / / Stream packet , and possibly subsequent Transport Stream packets with the same PID , contain some information to aid <nl> + / / random access at this point . Specifically , when the bit is set to ' 1 ' , the next PES packet to start in the payload of Transport <nl> + / / Stream packets with the current PID shall contain the first byte of a video sequence header if the PES stream type ( refer <nl> + / / to Table 2 - 29 ) is 1 or 2 , or shall contain the first byte of an audio frame if the PES stream type is 3 or 4 . In addition , in <nl> + / / the case of video , a presentation timestamp shall be present in the PES packet containing the first picture following the <nl> + / / sequence header . In the case of audio , the presentation timestamp shall be present in the PES packet containing the first <nl> + / / byte of the audio frame . In the PCR_PID the random_access_indicator may only be set to ' 1 ' in Transport Stream packet <nl> + / / containing the PCR fields . <nl> int8_t random_access_indicator ; / / 1bit <nl> - / * * <nl> - * The elementary_stream_priority_indicator is a 1 - bit field . It indicates , among <nl> - * packets with the same PID , the priority of the elementary stream data carried within the payload of this Transport Stream <nl> - * packet . A ' 1 ' indicates that the payload has a higher priority than the payloads of other Transport Stream packets . In the <nl> - * case of video , this field may be set to ' 1 ' only if the payload contains one or more bytes from an intra - coded slice . A <nl> - * value of ' 0 ' indicates that the payload has the same priority as all other packets which do not have this bit set to ' 1 ' . <nl> - * / <nl> + / / The elementary_stream_priority_indicator is a 1 - bit field . It indicates , among <nl> + / / packets with the same PID , the priority of the elementary stream data carried within the payload of this Transport Stream <nl> + / / packet . A ' 1 ' indicates that the payload has a higher priority than the payloads of other Transport Stream packets . In the <nl> + / / case of video , this field may be set to ' 1 ' only if the payload contains one or more bytes from an intra - coded slice . A <nl> + / / value of ' 0 ' indicates that the payload has the same priority as all other packets which do not have this bit set to ' 1 ' . <nl> int8_t elementary_stream_priority_indicator ; / / 1bit <nl> - / * * <nl> - * The PCR_flag is a 1 - bit flag . A value of ' 1 ' indicates that the adaptation_field contains a PCR field coded in <nl> - * two parts . A value of ' 0 ' indicates that the adaptation field does not contain any PCR field . <nl> - * / <nl> + / / The PCR_flag is a 1 - bit flag . A value of ' 1 ' indicates that the adaptation_field contains a PCR field coded in <nl> + / / two parts . A value of ' 0 ' indicates that the adaptation field does not contain any PCR field . <nl> int8_t PCR_flag ; / / 1bit <nl> - / * * <nl> - * The OPCR_flag is a 1 - bit flag . A value of ' 1 ' indicates that the adaptation_field contains an OPCR field <nl> - * coded in two parts . A value of ' 0 ' indicates that the adaptation field does not contain any OPCR field . <nl> - * / <nl> + / / The OPCR_flag is a 1 - bit flag . A value of ' 1 ' indicates that the adaptation_field contains an OPCR field <nl> + / / coded in two parts . A value of ' 0 ' indicates that the adaptation field does not contain any OPCR field . <nl> int8_t OPCR_flag ; / / 1bit <nl> - / * * <nl> - * The splicing_point_flag is a 1 - bit flag . When set to ' 1 ' , it indicates that a splice_countdown field <nl> - * shall be present in the associated adaptation field , specifying the occurrence of a splicing point . A value of ' 0 ' indicates <nl> - * that a splice_countdown field is not present in the adaptation field . <nl> - * / <nl> + / / The splicing_point_flag is a 1 - bit flag . When set to ' 1 ' , it indicates that a splice_countdown field <nl> + / / shall be present in the associated adaptation field , specifying the occurrence of a splicing point . A value of ' 0 ' indicates <nl> + / / that a splice_countdown field is not present in the adaptation field . <nl> int8_t splicing_point_flag ; / / 1bit <nl> - / * * <nl> - * The transport_private_data_flag is a 1 - bit flag . A value of ' 1 ' indicates that the <nl> - * adaptation field contains one or more private_data bytes . A value of ' 0 ' indicates the adaptation field does not contain any <nl> - * private_data bytes . <nl> - * / <nl> + / / The transport_private_data_flag is a 1 - bit flag . A value of ' 1 ' indicates that the <nl> + / / adaptation field contains one or more private_data bytes . A value of ' 0 ' indicates the adaptation field does not contain any <nl> + / / private_data bytes . <nl> int8_t transport_private_data_flag ; / / 1bit <nl> - / * * <nl> - * The adaptation_field_extension_flag is a 1 - bit field which when set to ' 1 ' indicates <nl> - * the presence of an adaptation field extension . A value of ' 0 ' indicates that an adaptation field extension is not present in <nl> - * the adaptation field . <nl> - * / <nl> + / / The adaptation_field_extension_flag is a 1 - bit field which when set to ' 1 ' indicates <nl> + / / the presence of an adaptation field extension . A value of ' 0 ' indicates that an adaptation field extension is not present in <nl> + / / the adaptation field . <nl> int8_t adaptation_field_extension_flag ; / / 1bit <nl> <nl> - / / if PCR_flag , 6B <nl> - / * * <nl> - * The program_clock_reference ( PCR ) is a <nl> - * 42 - bit field coded in two parts . The first part , program_clock_reference_base , is a 33 - bit field whose value is given by <nl> - * PCR_base ( i ) , as given in equation 2 - 2 . The second part , program_clock_reference_extension , is a 9 - bit field whose value <nl> - * is given by PCR_ext ( i ) , as given in equation 2 - 3 . The PCR indicates the intended time of arrival of the byte containing <nl> - * the last bit of the program_clock_reference_base at the input of the system target decoder . <nl> - * / <nl> + / / If PCR_flag , 6B <nl> + / / The program_clock_reference ( PCR ) is a <nl> + / / 42 - bit field coded in two parts . The first part , program_clock_reference_base , is a 33 - bit field whose value is given by <nl> + / / PCR_base ( i ) , as given in equation 2 - 2 . The second part , program_clock_reference_extension , is a 9 - bit field whose value <nl> + / / is given by PCR_ext ( i ) , as given in equation 2 - 3 . The PCR indicates the intended time of arrival of the byte containing <nl> + / / the last bit of the program_clock_reference_base at the input of the system target decoder . <nl> int64_t program_clock_reference_base ; / / 33bits <nl> - / * * <nl> - * 6bits reserved , must be ' 1 ' <nl> - * / <nl> + / / 6bits reserved , must be ' 1 ' <nl> int8_t const1_value0 ; / / 6bits <nl> int16_t program_clock_reference_extension ; / / 9bits <nl> <nl> - / / if OPCR_flag , 6B <nl> - / * * <nl> - * The optional original <nl> - * program reference ( OPCR ) is a 42 - bit field coded in two parts . These two parts , the base and the extension , are coded <nl> - * identically to the two corresponding parts of the PCR field . The presence of the OPCR is indicated by the OPCR_flag . <nl> - * The OPCR field shall be coded only in Transport Stream packets in which the PCR field is present . OPCRs are permitted <nl> - * in both single program and multiple program Transport Streams . <nl> - * <nl> - * OPCR assists in the reconstruction of a single program Transport Stream from another Transport Stream . When <nl> - * reconstructing the original single program Transport Stream , the OPCR may be copied to the PCR field . The resulting <nl> - * PCR value is valid only if the original single program Transport Stream is reconstructed exactly in its entirety . This <nl> - * would include at least any PSI and private data packets which were present in the original Transport Stream and would <nl> - * possibly require other private arrangements . It also means that the OPCR must be an identical copy of its associated PCR <nl> - * in the original single program Transport Stream . <nl> - * / <nl> + / / If OPCR_flag , 6B <nl> + / / The optional original <nl> + / / program reference ( OPCR ) is a 42 - bit field coded in two parts . These two parts , the base and the extension , are coded <nl> + / / identically to the two corresponding parts of the PCR field . The presence of the OPCR is indicated by the OPCR_flag . <nl> + / / The OPCR field shall be coded only in Transport Stream packets in which the PCR field is present . OPCRs are permitted <nl> + / / in both single program and multiple program Transport Streams . <nl> + / / <nl> + / / OPCR assists in the reconstruction of a single program Transport Stream from another Transport Stream . When <nl> + / / reconstructing the original single program Transport Stream , the OPCR may be copied to the PCR field . The resulting <nl> + / / PCR value is valid only if the original single program Transport Stream is reconstructed exactly in its entirety . This <nl> + / / would include at least any PSI and private data packets which were present in the original Transport Stream and would <nl> + / / possibly require other private arrangements . It also means that the OPCR must be an identical copy of its associated PCR <nl> + / / in the original single program Transport Stream . <nl> int64_t original_program_clock_reference_base ; / / 33bits <nl> - / * * <nl> - * 6bits reserved , must be ' 1 ' <nl> - * / <nl> + / / 6bits reserved , must be ' 1 ' <nl> int8_t const1_value2 ; / / 6bits <nl> int16_t original_program_clock_reference_extension ; / / 9bits <nl> <nl> - / / if splicing_point_flag , 1B <nl> - / * * <nl> - * The splice_countdown is an 8 - bit field , representing a value which may be positive or negative . A <nl> - * positive value specifies the remaining number of Transport Stream packets , of the same PID , following the associated <nl> - * Transport Stream packet until a splicing point is reached . Duplicate Transport Stream packets and Transport Stream <nl> - * packets which only contain adaptation fields are excluded . The splicing point is located immediately after the last byte of <nl> - * the Transport Stream packet in which the associated splice_countdown field reaches zero . In the Transport Stream packet <nl> - * where the splice_countdown reaches zero , the last data byte of the Transport Stream packet payload shall be the last byte <nl> - * of a coded audio frame or a coded picture . In the case of video , the corresponding access unit may or may not be <nl> - * terminated by a sequence_end_code . Transport Stream packets with the same PID , which follow , may contain data from <nl> - * a different elementary stream of the same type . <nl> - * <nl> - * The payload of the next Transport Stream packet of the same PID ( duplicate packets and packets without payload being <nl> - * excluded ) shall commence with the first byte of a PES packet . In the case of audio , the PES packet payload shall <nl> - * commence with an access point . In the case of video , the PES packet payload shall commence with an access point , or <nl> - * with a sequence_end_code , followed by an access point . Thus , the previous coded audio frame or coded picture aligns <nl> - * with the packet boundary , or is padded to make this so . Subsequent to the splicing point , the countdown field may also <nl> - * be present . When the splice_countdown is a negative number whose value is minus n ( - n ) , it indicates that the associated <nl> - * Transport Stream packet is the n - th packet following the splicing point ( duplicate packets and packets without payload <nl> - * being excluded ) . <nl> - * <nl> - * For the purposes of this subclause , an access point is defined as follows : <nl> - * Video - The first byte of a video_sequence_header . <nl> - * Audio - The first byte of an audio frame . <nl> - * / <nl> + / / If splicing_point_flag , 1B <nl> + / / The splice_countdown is an 8 - bit field , representing a value which may be positive or negative . A <nl> + / / positive value specifies the remaining number of Transport Stream packets , of the same PID , following the associated <nl> + / / Transport Stream packet until a splicing point is reached . Duplicate Transport Stream packets and Transport Stream <nl> + / / packets which only contain adaptation fields are excluded . The splicing point is located immediately after the last byte of <nl> + / / the Transport Stream packet in which the associated splice_countdown field reaches zero . In the Transport Stream packet <nl> + / / where the splice_countdown reaches zero , the last data byte of the Transport Stream packet payload shall be the last byte <nl> + / / of a coded audio frame or a coded picture . In the case of video , the corresponding access unit may or may not be <nl> + / / terminated by a sequence_end_code . Transport Stream packets with the same PID , which follow , may contain data from <nl> + / / a different elementary stream of the same type . <nl> + / / <nl> + / / The payload of the next Transport Stream packet of the same PID ( duplicate packets and packets without payload being <nl> + / / excluded ) shall commence with the first byte of a PES packet . In the case of audio , the PES packet payload shall <nl> + / / commence with an access point . In the case of video , the PES packet payload shall commence with an access point , or <nl> + / / with a sequence_end_code , followed by an access point . Thus , the previous coded audio frame or coded picture aligns <nl> + / / with the packet boundary , or is padded to make this so . Subsequent to the splicing point , the countdown field may also <nl> + / / be present . When the splice_countdown is a negative number whose value is minus n ( - n ) , it indicates that the associated <nl> + / / Transport Stream packet is the n - th packet following the splicing point ( duplicate packets and packets without payload <nl> + / / being excluded ) . <nl> + / / <nl> + / / For the purposes of this subclause , an access point is defined as follows : <nl> + / / Video - The first byte of a video_sequence_header . <nl> + / / Audio - The first byte of an audio frame . <nl> int8_t splice_countdown ; / / 8bits <nl> <nl> - / / if transport_private_data_flag , 1 + p [ 0 ] B <nl> + / / If transport_private_data_flag , 1 + p [ 0 ] B <nl> std : : vector < char > transport_private_data ; / / [ transport_private_data_length ] bytes <nl> <nl> - / / if adaptation_field_extension_flag , 2 + x B <nl> - / * * <nl> - * The adaptation_field_extension_length is an 8 - bit field . It indicates the number of <nl> - * bytes of the extended adaptation field data immediately following this field , including reserved bytes if present . <nl> - * / <nl> + / / If adaptation_field_extension_flag , 2 + x B <nl> + / / The adaptation_field_extension_length is an 8 - bit field . It indicates the number of <nl> + / / bytes of the extended adaptation field data immediately following this field , including reserved bytes if present . <nl> uint8_t adaptation_field_extension_length ; / / 8bits <nl> - / * * <nl> - * This is a 1 - bit field which when set to ' 1 ' indicates the presence of the ltw_offset <nl> - * field . <nl> - * / <nl> + / / This is a 1 - bit field which when set to ' 1 ' indicates the presence of the ltw_offset <nl> + / / field . <nl> int8_t ltw_flag ; / / 1bit <nl> - / * * <nl> - * This is a 1 - bit field which when set to ' 1 ' indicates the presence of the piecewise_rate field . <nl> - * / <nl> + / / This is a 1 - bit field which when set to ' 1 ' indicates the presence of the piecewise_rate field . <nl> int8_t piecewise_rate_flag ; / / 1bit <nl> - / * * <nl> - * This is a 1 - bit flag which when set to ' 1 ' indicates that the splice_type and DTS_next_AU fields <nl> - * are present . A value of ' 0 ' indicates that neither splice_type nor DTS_next_AU fields are present . This field shall not be <nl> - * set to ' 1 ' in Transport Stream packets in which the splicing_point_flag is not set to ' 1 ' . Once it is set to ' 1 ' in a Transport <nl> - * Stream packet in which the splice_countdown is positive , it shall be set to ' 1 ' in all the subsequent Transport Stream <nl> - * packets of the same PID that have the splicing_point_flag set to ' 1 ' , until the packet in which the splice_countdown <nl> - * reaches zero ( including this packet ) . When this flag is set , if the elementary stream carried in this PID is an audio stream , <nl> - * the splice_type field shall be set to ' 0000 ' . If the elementary stream carried in this PID is a video stream , it shall fulfil the <nl> - * constraints indicated by the splice_type value . <nl> - * / <nl> + / / This is a 1 - bit flag which when set to ' 1 ' indicates that the splice_type and DTS_next_AU fields <nl> + / / are present . A value of ' 0 ' indicates that neither splice_type nor DTS_next_AU fields are present . This field shall not be <nl> + / / set to ' 1 ' in Transport Stream packets in which the splicing_point_flag is not set to ' 1 ' . Once it is set to ' 1 ' in a Transport <nl> + / / Stream packet in which the splice_countdown is positive , it shall be set to ' 1 ' in all the subsequent Transport Stream <nl> + / / packets of the same PID that have the splicing_point_flag set to ' 1 ' , until the packet in which the splice_countdown <nl> + / / reaches zero ( including this packet ) . When this flag is set , if the elementary stream carried in this PID is an audio stream , <nl> + / / the splice_type field shall be set to ' 0000 ' . If the elementary stream carried in this PID is a video stream , it shall fulfil the <nl> + / / constraints indicated by the splice_type value . <nl> int8_t seamless_splice_flag ; / / 1bit <nl> - / * * <nl> - * reserved 5bits , must be ' 1 ' <nl> - * / <nl> + / / reserved 5bits , must be ' 1 ' <nl> int8_t const1_value1 ; / / 5bits <nl> / / if ltw_flag , 2B <nl> - / * * <nl> - * ( legal time window_valid_flag ) - This is a 1 - bit field which when set to ' 1 ' indicates that the value of the <nl> - * ltw_offset shall be valid . A value of ' 0 ' indicates that the value in the ltw_offset field is undefined . <nl> - * / <nl> + / / ( legal time window_valid_flag ) - This is a 1 - bit field which when set to ' 1 ' indicates that the value of the <nl> + / / ltw_offset shall be valid . A value of ' 0 ' indicates that the value in the ltw_offset field is undefined . <nl> int8_t ltw_valid_flag ; / / 1bit <nl> - / * * <nl> - * ( legal time window offset ) - This is a 15 - bit field , the value of which is defined only if the ltw_valid flag has <nl> - * a value of ' 1 ' . When defined , the legal time window offset is in units of ( 300 / fs ) seconds , where fs is the system clock <nl> - * frequency of the program that this PID belongs to , and fulfils : <nl> - * offset = t1 ( i ) - t ( i ) <nl> - * ltw_offset = offset / / 1 <nl> - * where i is the index of the first byte of this Transport Stream packet , offset is the value encoded in this field , t ( i ) is the <nl> - * arrival time of byte i in the T - STD , and t1 ( i ) is the upper bound in time of a time interval called the Legal Time Window <nl> - * which is associated with this Transport Stream packet . <nl> - * / <nl> + / / ( legal time window offset ) - This is a 15 - bit field , the value of which is defined only if the ltw_valid flag has <nl> + / / a value of ' 1 ' . When defined , the legal time window offset is in units of ( 300 / fs ) seconds , where fs is the system clock <nl> + / / frequency of the program that this PID belongs to , and fulfils : <nl> + / / offset = t1 ( i ) - t ( i ) <nl> + / / ltw_offset = offset / / 1 <nl> + / / where i is the index of the first byte of this Transport Stream packet , offset is the value encoded in this field , t ( i ) is the <nl> + / / arrival time of byte i in the T - STD , and t1 ( i ) is the upper bound in time of a time interval called the Legal Time Window <nl> + / / which is associated with this Transport Stream packet . <nl> int16_t ltw_offset ; / / 15bits <nl> / / if piecewise_rate_flag , 3B <nl> / / 2bits reserved <nl> - / * * <nl> - * The meaning of this 22 - bit field is only defined when both the ltw_flag and the ltw_valid_flag are set <nl> - * to ' 1 ' . When defined , it is a positive integer specifying a hypothetical bitrate R which is used to define the end times of <nl> - * the Legal Time Windows of Transport Stream packets of the same PID that follow this packet but do not include the <nl> - * legal_time_window_offset field . <nl> - * / <nl> + / / The meaning of this 22 - bit field is only defined when both the ltw_flag and the ltw_valid_flag are set <nl> + / / to ' 1 ' . When defined , it is a positive integer specifying a hypothetical bitrate R which is used to define the end times of <nl> + / / the Legal Time Windows of Transport Stream packets of the same PID that follow this packet but do not include the <nl> + / / legal_time_window_offset field . <nl> int32_t piecewise_rate ; / / 22bits <nl> / / if seamless_splice_flag , 5B <nl> - / * * <nl> - * This is a 4 - bit field . From the first occurrence of this field onwards , it shall have the same value in all the <nl> - * subsequent Transport Stream packets of the same PID in which it is present , until the packet in which the <nl> - * splice_countdown reaches zero ( including this packet ) . If the elementary stream carried in that PID is an audio stream , <nl> - * this field shall have the value ' 0000 ' . If the elementary stream carried in that PID is a video stream , this field indicates the <nl> - * conditions that shall be respected by this elementary stream for splicing purposes . These conditions are defined as a <nl> - * function of profile , level and splice_type in Table 2 - 7 through Table 2 - 16 . <nl> - * / <nl> + / / This is a 4 - bit field . From the first occurrence of this field onwards , it shall have the same value in all the <nl> + / / subsequent Transport Stream packets of the same PID in which it is present , until the packet in which the <nl> + / / splice_countdown reaches zero ( including this packet ) . If the elementary stream carried in that PID is an audio stream , <nl> + / / this field shall have the value ' 0000 ' . If the elementary stream carried in that PID is a video stream , this field indicates the <nl> + / / conditions that shall be respected by this elementary stream for splicing purposes . These conditions are defined as a <nl> + / / function of profile , level and splice_type in Table 2 - 7 through Table 2 - 16 . <nl> int8_t splice_type ; / / 4bits <nl> - / * * <nl> - * ( decoding time stamp next access unit ) - This is a 33 - bit field , coded in three parts . In the case of <nl> - * continuous and periodic decoding through this splicing point it indicates the decoding time of the first access unit <nl> - * following the splicing point . This decoding time is expressed in the time base which is valid in the Transport Stream <nl> - * packet in which the splice_countdown reaches zero . From the first occurrence of this field onwards , it shall have the <nl> - * same value in all the subsequent Transport Stream packets of the same PID in which it is present , until the packet in <nl> - * which the splice_countdown reaches zero ( including this packet ) . <nl> - * / <nl> + / / ( decoding time stamp next access unit ) - This is a 33 - bit field , coded in three parts . In the case of <nl> + / / continuous and periodic decoding through this splicing point it indicates the decoding time of the first access unit <nl> + / / following the splicing point . This decoding time is expressed in the time base which is valid in the Transport Stream <nl> + / / packet in which the splice_countdown reaches zero . From the first occurrence of this field onwards , it shall have the <nl> + / / same value in all the subsequent Transport Stream packets of the same PID in which it is present , until the packet in <nl> + / / which the splice_countdown reaches zero ( including this packet ) . <nl> int8_t DTS_next_AU0 ; / / 3bits <nl> int8_t marker_bit0 ; / / 1bit <nl> int16_t DTS_next_AU1 ; / / 15bits <nl> class SrsTsAdaptationField <nl> int16_t DTS_next_AU2 ; / / 15bits <nl> int8_t marker_bit2 ; / / 1bit <nl> / / left bytes . <nl> - / * * <nl> - * This is a fixed 8 - bit value equal to ' 1111 1111 ' that can be inserted by the encoder . It is discarded by the <nl> - * decoder . <nl> - * / <nl> + / / This is a fixed 8 - bit value equal to ' 1111 1111 ' that can be inserted by the encoder . It is discarded by the <nl> + / / decoder . <nl> int nb_af_ext_reserved ; <nl> <nl> / / left bytes . <nl> - / * * <nl> - * This is a fixed 8 - bit value equal to ' 1111 1111 ' that can be inserted by the encoder . It is discarded by the <nl> - * decoder . <nl> - * / <nl> + / / This is a fixed 8 - bit value equal to ' 1111 1111 ' that can be inserted by the encoder . It is discarded by the <nl> + / / decoder . <nl> int nb_af_reserved ; <nl> private : <nl> SrsTsPacket * packet ; <nl> class SrsTsAdaptationField <nl> virtual srs_error_t encode ( SrsBuffer * stream ) ; <nl> } ; <nl> <nl> - / * * <nl> - * 2 . 4 . 4 . 4 Table_id assignments , hls - mpeg - ts - iso13818 - 1 . pdf , page 62 <nl> - * The table_id field identifies the contents of a Transport Stream PSI section as shown in Table 2 - 26 . <nl> - * / <nl> + / / 2 . 4 . 4 . 4 Table_id assignments , hls - mpeg - ts - iso13818 - 1 . pdf , page 62 <nl> + / / The table_id field identifies the contents of a Transport Stream PSI section as shown in Table 2 - 26 . <nl> enum SrsTsPsiId <nl> { <nl> / / program_association_section <nl> enum SrsTsPsiId <nl> SrsTsPsiIdForbidden = 0xFF , <nl> } ; <nl> <nl> - / * * <nl> - * the payload of ts packet , can be PES or PSI payload . <nl> - * / <nl> + / / The payload of ts packet , can be PES or PSI payload . <nl> class SrsTsPayload <nl> { <nl> protected : <nl> class SrsTsPayload <nl> virtual srs_error_t encode ( SrsBuffer * stream ) = 0 ; <nl> } ; <nl> <nl> - / * * <nl> - * the PES payload of ts packet . <nl> - * 2 . 4 . 3 . 6 PES packet , hls - mpeg - ts - iso13818 - 1 . pdf , page 49 <nl> - * / <nl> + / / The PES payload of ts packet . <nl> + / / 2 . 4 . 3 . 6 PES packet , hls - mpeg - ts - iso13818 - 1 . pdf , page 49 <nl> class SrsTsPayloadPES : public SrsTsPayload <nl> { <nl> public : <nl> / / 3B <nl> - / * * <nl> - * The packet_start_code_prefix is a 24 - bit code . Together with the stream_id that follows it <nl> - * constitutes a packet start code that identifies the beginning of a packet . The packet_start_code_prefix is the bit string <nl> - * ' 0000 0000 0000 0000 0000 0001 ' ( 0x000001 ) . <nl> - * / <nl> + / / The packet_start_code_prefix is a 24 - bit code . Together with the stream_id that follows it <nl> + / / constitutes a packet start code that identifies the beginning of a packet . The packet_start_code_prefix is the bit string <nl> + / / ' 0000 0000 0000 0000 0000 0001 ' ( 0x000001 ) . <nl> int32_t packet_start_code_prefix ; / / 24bits <nl> / / 1B <nl> - / * * <nl> - * In Program Streams , the stream_id specifies the type and number of the elementary stream as defined by the <nl> - * stream_id Table 2 - 18 . In Transport Streams , the stream_id may be set to any valid value which correctly describes the <nl> - * elementary stream type as defined in Table 2 - 18 . In Transport Streams , the elementary stream type is specified in the <nl> - * Program Specific Information as specified in 2 . 4 . 4 . <nl> - * / <nl> + / / In Program Streams , the stream_id specifies the type and number of the elementary stream as defined by the <nl> + / / stream_id Table 2 - 18 . In Transport Streams , the stream_id may be set to any valid value which correctly describes the <nl> + / / elementary stream type as defined in Table 2 - 18 . In Transport Streams , the elementary stream type is specified in the <nl> + / / Program Specific Information as specified in 2 . 4 . 4 . <nl> / / @ see SrsTsPESStreamId , value can be SrsTsPESStreamIdAudioCommon or SrsTsPESStreamIdVideoCommon . <nl> uint8_t stream_id ; / / 8bits <nl> / / 2B <nl> - / * * <nl> - * A 16 - bit field specifying the number of bytes in the PES packet following the last byte of the <nl> - * field . A value of 0 indicates that the PES packet length is neither specified nor bounded and is allowed only in <nl> - * PES packets whose payload consists of bytes from a video elementary stream contained in Transport Stream packets . <nl> - * / <nl> + / / A 16 - bit field specifying the number of bytes in the PES packet following the last byte of the <nl> + / / field . A value of 0 indicates that the PES packet length is neither specified nor bounded and is allowed only in <nl> + / / PES packets whose payload consists of bytes from a video elementary stream contained in Transport Stream packets . <nl> uint16_t PES_packet_length ; / / 16bits <nl> <nl> / / 1B <nl> - / * * <nl> - * 2bits const ' 10 ' <nl> - * / <nl> + / / 2bits const ' 10 ' <nl> int8_t const2bits ; / / 2bits <nl> - / * * <nl> - * The 2 - bit PES_scrambling_control field indicates the scrambling mode of the PES packet <nl> - * payload . When scrambling is performed at the PES level , the PES packet header , including the optional fields when <nl> - * present , shall not be scrambled ( see Table 2 - 19 ) . <nl> - * / <nl> + / / The 2 - bit PES_scrambling_control field indicates the scrambling mode of the PES packet <nl> + / / payload . When scrambling is performed at the PES level , the PES packet header , including the optional fields when <nl> + / / present , shall not be scrambled ( see Table 2 - 19 ) . <nl> int8_t PES_scrambling_control ; / / 2bits <nl> - / * * <nl> - * This is a 1 - bit field indicating the priority of the payload in this PES packet . A ' 1 ' indicates a higher <nl> - * priority of the payload of the PES packet payload than a PES packet payload with this field set to ' 0 ' . A multiplexor can <nl> - * use the PES_priority bit to prioritize its data within an elementary stream . This field shall not be changed by the transport <nl> - * mechanism . <nl> - * / <nl> + / / This is a 1 - bit field indicating the priority of the payload in this PES packet . A ' 1 ' indicates a higher <nl> + / / priority of the payload of the PES packet payload than a PES packet payload with this field set to ' 0 ' . A multiplexor can <nl> + / / use the PES_priority bit to prioritize its data within an elementary stream . This field shall not be changed by the transport <nl> + / / mechanism . <nl> int8_t PES_priority ; / / 1bit <nl> - / * * <nl> - * This is a 1 - bit flag . When set to a value of ' 1 ' it indicates that the PES packet header is <nl> - * immediately followed by the video start code or audio syncword indicated in the data_stream_alignment_descriptor <nl> - * in 2 . 6 . 10 if this descriptor is present . If set to a value of ' 1 ' and the descriptor is not present , alignment as indicated in <nl> - * alignment_type ' 01 ' in Table 2 - 47 and Table 2 - 48 is required . When set to a value of ' 0 ' it is not defined whether any such <nl> - * alignment occurs or not . <nl> - * / <nl> + / / This is a 1 - bit flag . When set to a value of ' 1 ' it indicates that the PES packet header is <nl> + / / immediately followed by the video start code or audio syncword indicated in the data_stream_alignment_descriptor <nl> + / / in 2 . 6 . 10 if this descriptor is present . If set to a value of ' 1 ' and the descriptor is not present , alignment as indicated in <nl> + / / alignment_type ' 01 ' in Table 2 - 47 and Table 2 - 48 is required . When set to a value of ' 0 ' it is not defined whether any such <nl> + / / alignment occurs or not . <nl> int8_t data_alignment_indicator ; / / 1bit <nl> - / * * <nl> - * This is a 1 - bit field . When set to ' 1 ' it indicates that the material of the associated PES packet payload is <nl> - * protected by copyright . When set to ' 0 ' it is not defined whether the material is protected by copyright . A copyright <nl> - * descriptor described in 2 . 6 . 24 is associated with the elementary stream which contains this PES packet and the copyright <nl> - * flag is set to ' 1 ' if the descriptor applies to the material contained in this PES packet <nl> - * / <nl> + / / This is a 1 - bit field . When set to ' 1 ' it indicates that the material of the associated PES packet payload is <nl> + / / protected by copyright . When set to ' 0 ' it is not defined whether the material is protected by copyright . A copyright <nl> + / / descriptor described in 2 . 6 . 24 is associated with the elementary stream which contains this PES packet and the copyright <nl> + / / flag is set to ' 1 ' if the descriptor applies to the material contained in this PES packet <nl> int8_t copyright ; / / 1bit <nl> - / * * <nl> - * This is a 1 - bit field . When set to ' 1 ' the contents of the associated PES packet payload is an original . <nl> - * When set to ' 0 ' it indicates that the contents of the associated PES packet payload is a copy . <nl> - * / <nl> + / / This is a 1 - bit field . When set to ' 1 ' the contents of the associated PES packet payload is an original . <nl> + / / When set to ' 0 ' it indicates that the contents of the associated PES packet payload is a copy . <nl> int8_t original_or_copy ; / / 1bit <nl> <nl> / / 1B <nl> - / * * <nl> - * This is a 2 - bit field . When the PTS_DTS_flags field is set to ' 10 ' , the PTS fields shall be present in <nl> - * the PES packet header . When the PTS_DTS_flags field is set to ' 11 ' , both the PTS fields and DTS fields shall be present <nl> - * in the PES packet header . When the PTS_DTS_flags field is set to ' 00 ' no PTS or DTS fields shall be present in the PES <nl> - * packet header . The value ' 01 ' is forbidden . <nl> - * / <nl> + / / This is a 2 - bit field . When the PTS_DTS_flags field is set to ' 10 ' , the PTS fields shall be present in <nl> + / / the PES packet header . When the PTS_DTS_flags field is set to ' 11 ' , both the PTS fields and DTS fields shall be present <nl> + / / in the PES packet header . When the PTS_DTS_flags field is set to ' 00 ' no PTS or DTS fields shall be present in the PES <nl> + / / packet header . The value ' 01 ' is forbidden . <nl> int8_t PTS_DTS_flags ; / / 2bits <nl> - / * * <nl> - * A 1 - bit flag , which when set to ' 1 ' indicates that ESCR base and extension fields are present in the PES <nl> - * packet header . When set to ' 0 ' it indicates that no ESCR fields are present . <nl> - * / <nl> + / / A 1 - bit flag , which when set to ' 1 ' indicates that ESCR base and extension fields are present in the PES <nl> + / / packet header . When set to ' 0 ' it indicates that no ESCR fields are present . <nl> int8_t ESCR_flag ; / / 1bit <nl> - / * * <nl> - * A 1 - bit flag , which when set to ' 1 ' indicates that the ES_rate field is present in the PES packet header . <nl> - * When set to ' 0 ' it indicates that no ES_rate field is present . <nl> - * / <nl> + / / A 1 - bit flag , which when set to ' 1 ' indicates that the ES_rate field is present in the PES packet header . <nl> + / / When set to ' 0 ' it indicates that no ES_rate field is present . <nl> int8_t ES_rate_flag ; / / 1bit <nl> - / * * <nl> - * A 1 - bit flag , which when set to ' 1 ' it indicates the presence of an 8 - bit trick mode field . When <nl> - * set to ' 0 ' it indicates that this field is not present . <nl> - * / <nl> + / / A 1 - bit flag , which when set to ' 1 ' it indicates the presence of an 8 - bit trick mode field . When <nl> + / / set to ' 0 ' it indicates that this field is not present . <nl> int8_t DSM_trick_mode_flag ; / / 1bit <nl> - / * * <nl> - * A 1 - bit flag , which when set to ' 1 ' indicates the presence of the additional_copy_info field . <nl> - * When set to ' 0 ' it indicates that this field is not present . <nl> - * / <nl> + / / A 1 - bit flag , which when set to ' 1 ' indicates the presence of the additional_copy_info field . <nl> + / / When set to ' 0 ' it indicates that this field is not present . <nl> int8_t additional_copy_info_flag ; / / 1bit <nl> - / * * <nl> - * A 1 - bit flag , which when set to ' 1 ' indicates that a CRC field is present in the PES packet . When set to <nl> - * ' 0 ' it indicates that this field is not present . <nl> - * / <nl> + / / A 1 - bit flag , which when set to ' 1 ' indicates that a CRC field is present in the PES packet . When set to <nl> + / / ' 0 ' it indicates that this field is not present . <nl> int8_t PES_CRC_flag ; / / 1bit <nl> - / * * <nl> - * A 1 - bit flag , which when set to ' 1 ' indicates that an extension field exists in this PES packet <nl> - * header . When set to ' 0 ' it indicates that this field is not present . <nl> - * / <nl> + / / A 1 - bit flag , which when set to ' 1 ' indicates that an extension field exists in this PES packet <nl> + / / header . When set to ' 0 ' it indicates that this field is not present . <nl> int8_t PES_extension_flag ; / / 1bit <nl> <nl> / / 1B <nl> - / * * <nl> - * An 8 - bit field specifying the total number of bytes occupied by the optional fields and any <nl> - * stuffing bytes contained in this PES packet header . The presence of optional fields is indicated in the byte that precedes <nl> - * the PES_header_data_length field . <nl> - * / <nl> + / / An 8 - bit field specifying the total number of bytes occupied by the optional fields and any <nl> + / / stuffing bytes contained in this PES packet header . The presence of optional fields is indicated in the byte that precedes <nl> + / / the PES_header_data_length field . <nl> uint8_t PES_header_data_length ; / / 8bits <nl> <nl> / / 5B <nl> - / * * <nl> - * Presentation times shall be related to decoding times as follows : The PTS is a 33 - bit <nl> - * number coded in three separate fields . It indicates the time of presentation , tp n ( k ) , in the system target decoder of a <nl> - * presentation unit k of elementary stream n . The value of PTS is specified in units of the period of the system clock <nl> - * frequency divided by 300 ( yielding 90 kHz ) . The presentation time is derived from the PTS according to equation 2 - 11 <nl> - * below . Refer to 2 . 7 . 4 for constraints on the frequency of coding presentation timestamps . <nl> - * / <nl> + / / Presentation times shall be related to decoding times as follows : The PTS is a 33 - bit <nl> + / / number coded in three separate fields . It indicates the time of presentation , tp n ( k ) , in the system target decoder of a <nl> + / / presentation unit k of elementary stream n . The value of PTS is specified in units of the period of the system clock <nl> + / / frequency divided by 300 ( yielding 90 kHz ) . The presentation time is derived from the PTS according to equation 2 - 11 <nl> + / / below . Refer to 2 . 7 . 4 for constraints on the frequency of coding presentation timestamps . <nl> / / = = = = = = = = = = = 1B <nl> / / 4bits const <nl> / / 3bits PTS [ 32 . . 30 ] <nl> class SrsTsPayloadPES : public SrsTsPayload <nl> int64_t pts ; / / 33bits <nl> <nl> / / 5B <nl> - / * * <nl> - * The DTS is a 33 - bit number coded in three separate fields . It indicates the decoding time , <nl> - * td n ( j ) , in the system target decoder of an access unit j of elementary stream n . The value of DTS is specified in units of <nl> - * the period of the system clock frequency divided by 300 ( yielding 90 kHz ) . <nl> - * / <nl> + / / The DTS is a 33 - bit number coded in three separate fields . It indicates the decoding time , <nl> + / / td n ( j ) , in the system target decoder of an access unit j of elementary stream n . The value of DTS is specified in units of <nl> + / / the period of the system clock frequency divided by 300 ( yielding 90 kHz ) . <nl> / / = = = = = = = = = = = 1B <nl> / / 4bits const <nl> / / 3bits DTS [ 32 . . 30 ] <nl> class SrsTsPayloadPES : public SrsTsPayload <nl> int64_t dts ; / / 33bits <nl> <nl> / / 6B <nl> - / * * <nl> - * The elementary stream clock reference is a 42 - bit field coded in two parts . The first <nl> - * part , ESCR_base , is a 33 - bit field whose value is given by ESCR_base ( i ) , as given in equation 2 - 14 . The second part , <nl> - * ESCR_ext , is a 9 - bit field whose value is given by ESCR_ext ( i ) , as given in equation 2 - 15 . The ESCR field indicates the <nl> - * intended time of arrival of the byte containing the last bit of the ESCR_base at the input of the PES - STD for PES streams <nl> - * ( refer to 2 . 5 . 2 . 4 ) . <nl> - * / <nl> + / / The elementary stream clock reference is a 42 - bit field coded in two parts . The first <nl> + / / part , ESCR_base , is a 33 - bit field whose value is given by ESCR_base ( i ) , as given in equation 2 - 14 . The second part , <nl> + / / ESCR_ext , is a 9 - bit field whose value is given by ESCR_ext ( i ) , as given in equation 2 - 15 . The ESCR field indicates the <nl> + / / intended time of arrival of the byte containing the last bit of the ESCR_base at the input of the PES - STD for PES streams <nl> + / / ( refer to 2 . 5 . 2 . 4 ) . <nl> / / 2bits reserved <nl> / / 3bits ESCR_base [ 32 . . 30 ] <nl> / / 1bit const ' 1 ' <nl> class SrsTsPayloadPES : public SrsTsPayload <nl> int16_t ESCR_extension ; / / 9bits <nl> <nl> / / 3B <nl> - / * * <nl> - * The ES_rate field is a 22 - bit unsigned integer specifying the rate at which the <nl> - * system target decoder receives bytes of the PES packet in the case of a PES stream . The ES_rate is valid in the PES <nl> - * packet in which it is included and in subsequent PES packets of the same PES stream until a new ES_rate field is <nl> - * encountered . The value of the ES_rate is measured in units of 50 bytes / second . The value 0 is forbidden . The value of the <nl> - * ES_rate is used to define the time of arrival of bytes at the input of a P - STD for PES streams defined in 2 . 5 . 2 . 4 . The <nl> - * value encoded in the ES_rate field may vary from PES_packet to PES_packet . <nl> - * / <nl> + / / The ES_rate field is a 22 - bit unsigned integer specifying the rate at which the <nl> + / / system target decoder receives bytes of the PES packet in the case of a PES stream . The ES_rate is valid in the PES <nl> + / / packet in which it is included and in subsequent PES packets of the same PES stream until a new ES_rate field is <nl> + / / encountered . The value of the ES_rate is measured in units of 50 bytes / second . The value 0 is forbidden . The value of the <nl> + / / ES_rate is used to define the time of arrival of bytes at the input of a P - STD for PES streams defined in 2 . 5 . 2 . 4 . The <nl> + / / value encoded in the ES_rate field may vary from PES_packet to PES_packet . <nl> / / 1bit const ' 1 ' <nl> / / 22bits ES_rate <nl> / / 1bit const ' 1 ' <nl> int32_t ES_rate ; / / 22bits <nl> <nl> / / 1B <nl> - / * * <nl> - * A 3 - bit field that indicates which trick mode is applied to the associated video stream . In cases of <nl> - * other types of elementary streams , the meanings of this field and those defined by the following five bits are undefined . <nl> - * For the definition of trick_mode status , refer to the trick mode section of 2 . 4 . 2 . 3 . <nl> - * / <nl> + / / A 3 - bit field that indicates which trick mode is applied to the associated video stream . In cases of <nl> + / / other types of elementary streams , the meanings of this field and those defined by the following five bits are undefined . <nl> + / / For the definition of trick_mode status , refer to the trick mode section of 2 . 4 . 2 . 3 . <nl> int8_t trick_mode_control ; / / 3bits <nl> int8_t trick_mode_value ; / / 5bits <nl> <nl> / / 1B <nl> / / 1bit const ' 1 ' <nl> - / * * <nl> - * This 7 - bit field contains private data relating to copyright information . <nl> - * / <nl> + / / This 7 - bit field contains private data relating to copyright information . <nl> int8_t additional_copy_info ; / / 7bits <nl> <nl> / / 2B <nl> - / * * <nl> - * The previous_PES_packet_CRC is a 16 - bit field that contains the CRC value that yields <nl> - * a zero output of the 16 registers in the decoder similar to the one defined in Annex A , <nl> - * / <nl> + / / The previous_PES_packet_CRC is a 16 - bit field that contains the CRC value that yields <nl> + / / a zero output of the 16 registers in the decoder similar to the one defined in Annex A , <nl> int16_t previous_PES_packet_CRC ; / / 16bits <nl> <nl> / / 1B <nl> - / * * <nl> - * A 1 - bit flag which when set to ' 1 ' indicates that the PES packet header contains private data . <nl> - * When set to a value of ' 0 ' it indicates that private data is not present in the PES header . <nl> - * / <nl> + / / A 1 - bit flag which when set to ' 1 ' indicates that the PES packet header contains private data . <nl> + / / When set to a value of ' 0 ' it indicates that private data is not present in the PES header . <nl> int8_t PES_private_data_flag ; / / 1bit <nl> - / * * <nl> - * A 1 - bit flag which when set to ' 1 ' indicates that an ISO / IEC 11172 - 1 pack header or a <nl> - * Program Stream pack header is stored in this PES packet header . If this field is in a PES packet that is contained in a <nl> - * Program Stream , then this field shall be set to ' 0 ' . In a Transport Stream , when set to the value ' 0 ' it indicates that no pack <nl> - * header is present in the PES header . <nl> - * / <nl> + / / A 1 - bit flag which when set to ' 1 ' indicates that an ISO / IEC 11172 - 1 pack header or a <nl> + / / Program Stream pack header is stored in this PES packet header . If this field is in a PES packet that is contained in a <nl> + / / Program Stream , then this field shall be set to ' 0 ' . In a Transport Stream , when set to the value ' 0 ' it indicates that no pack <nl> + / / header is present in the PES header . <nl> int8_t pack_header_field_flag ; / / 1bit <nl> - / * * <nl> - * A 1 - bit flag which when set to ' 1 ' indicates that the <nl> - * program_packet_sequence_counter , MPEG1_MPEG2_identifier , and original_stuff_length fields are present in this <nl> - * PES packet . When set to a value of ' 0 ' it indicates that these fields are not present in the PES header . <nl> - * / <nl> + / / A 1 - bit flag which when set to ' 1 ' indicates that the <nl> + / / program_packet_sequence_counter , MPEG1_MPEG2_identifier , and original_stuff_length fields are present in this <nl> + / / PES packet . When set to a value of ' 0 ' it indicates that these fields are not present in the PES header . <nl> int8_t program_packet_sequence_counter_flag ; / / 1bit <nl> - / * * <nl> - * A 1 - bit flag which when set to ' 1 ' indicates that the P - STD_buffer_scale and P - STD_buffer_size <nl> - * are present in the PES packet header . When set to a value of ' 0 ' it indicates that these fields are not present in the <nl> - * PES header . <nl> - * / <nl> + / / A 1 - bit flag which when set to ' 1 ' indicates that the P - STD_buffer_scale and P - STD_buffer_size <nl> + / / are present in the PES packet header . When set to a value of ' 0 ' it indicates that these fields are not present in the <nl> + / / PES header . <nl> int8_t P_STD_buffer_flag ; / / 1bit <nl> - / * * <nl> - * reverved value , must be ' 1 ' <nl> - * / <nl> + / / reverved value , must be ' 1 ' <nl> int8_t const1_value0 ; / / 3bits <nl> - / * * <nl> - * A 1 - bit field which when set to ' 1 ' indicates the presence of the PES_extension_field_length <nl> - * field and associated fields . When set to a value of ' 0 ' this indicates that the PES_extension_field_length field and any <nl> - * associated fields are not present . <nl> - * / <nl> + / / A 1 - bit field which when set to ' 1 ' indicates the presence of the PES_extension_field_length <nl> + / / field and associated fields . When set to a value of ' 0 ' this indicates that the PES_extension_field_length field and any <nl> + / / associated fields are not present . <nl> int8_t PES_extension_flag_2 ; / / 1bit <nl> <nl> / / 16B <nl> - / * * <nl> - * This is a 16 - byte field which contains private data . This data , combined with the fields before and <nl> - * after , shall not emulate the packet_start_code_prefix ( 0x000001 ) . <nl> - * / <nl> + / / This is a 16 - byte field which contains private data . This data , combined with the fields before and <nl> + / / after , shall not emulate the packet_start_code_prefix ( 0x000001 ) . <nl> std : : vector < char > PES_private_data ; / / 128bits <nl> <nl> / / ( 1 + x ) B <nl> class SrsTsPayloadPES : public SrsTsPayload <nl> <nl> / / 2B <nl> / / 1bit const ' 1 ' <nl> - / * * <nl> - * The program_packet_sequence_counter field is a 7 - bit field . It is an optional <nl> - * counter that increments with each successive PES packet from a Program Stream or from an ISO / IEC 11172 - 1 Stream or <nl> - * the PES packets associated with a single program definition in a Transport Stream , providing functionality similar to a <nl> - * continuity counter ( refer to 2 . 4 . 3 . 2 ) . This allows an application to retrieve the original PES packet sequence of a Program <nl> - * Stream or the original packet sequence of the original ISO / IEC 11172 - 1 stream . The counter will wrap around to 0 after <nl> - * its maximum value . Repetition of PES packets shall not occur . Consequently , no two consecutive PES packets in the <nl> - * program multiplex shall have identical program_packet_sequence_counter values . <nl> - * / <nl> + / / The program_packet_sequence_counter field is a 7 - bit field . It is an optional <nl> + / / counter that increments with each successive PES packet from a Program Stream or from an ISO / IEC 11172 - 1 Stream or <nl> + / / the PES packets associated with a single program definition in a Transport Stream , providing functionality similar to a <nl> + / / continuity counter ( refer to 2 . 4 . 3 . 2 ) . This allows an application to retrieve the original PES packet sequence of a Program <nl> + / / Stream or the original packet sequence of the original ISO / IEC 11172 - 1 stream . The counter will wrap around to 0 after <nl> + / / its maximum value . Repetition of PES packets shall not occur . Consequently , no two consecutive PES packets in the <nl> + / / program multiplex shall have identical program_packet_sequence_counter values . <nl> int8_t program_packet_sequence_counter ; / / 7bits <nl> / / 1bit const ' 1 ' <nl> - / * * <nl> - * A 1 - bit flag which when set to ' 1 ' indicates that this PES packet carries information from <nl> - * an ISO / IEC 11172 - 1 stream . When set to ' 0 ' it indicates that this PES packet carries information from a Program Stream . <nl> - * / <nl> + / / A 1 - bit flag which when set to ' 1 ' indicates that this PES packet carries information from <nl> + / / an ISO / IEC 11172 - 1 stream . When set to ' 0 ' it indicates that this PES packet carries information from a Program Stream . <nl> int8_t MPEG1_MPEG2_identifier ; / / 1bit <nl> - / * * <nl> - * This 6 - bit field specifies the number of stuffing bytes used in the original ITU - T <nl> - * Rec . H . 222 . 0 | ISO / IEC 13818 - 1 PES packet header or in the original ISO / IEC 11172 - 1 packet header . <nl> - * / <nl> + / / This 6 - bit field specifies the number of stuffing bytes used in the original ITU - T <nl> + / / Rec . H . 222 . 0 | ISO / IEC 13818 - 1 PES packet header or in the original ISO / IEC 11172 - 1 packet header . <nl> int8_t original_stuff_length ; / / 6bits <nl> <nl> / / 2B <nl> / / 2bits const ' 01 ' <nl> - / * * <nl> - * The P - STD_buffer_scale is a 1 - bit field , the meaning of which is only defined if this PES packet <nl> - * is contained in a Program Stream . It indicates the scaling factor used to interpret the subsequent P - STD_buffer_size field . <nl> - * If the preceding stream_id indicates an audio stream , P - STD_buffer_scale shall have the value ' 0 ' . If the preceding <nl> - * stream_id indicates a video stream , P - STD_buffer_scale shall have the value ' 1 ' . For all other stream types , the value <nl> - * may be either ' 1 ' or ' 0 ' . <nl> - * / <nl> + / / The P - STD_buffer_scale is a 1 - bit field , the meaning of which is only defined if this PES packet <nl> + / / is contained in a Program Stream . It indicates the scaling factor used to interpret the subsequent P - STD_buffer_size field . <nl> + / / If the preceding stream_id indicates an audio stream , P - STD_buffer_scale shall have the value ' 0 ' . If the preceding <nl> + / / stream_id indicates a video stream , P - STD_buffer_scale shall have the value ' 1 ' . For all other stream types , the value <nl> + / / may be either ' 1 ' or ' 0 ' . <nl> int8_t P_STD_buffer_scale ; / / 1bit <nl> - / * * <nl> - * The P - STD_buffer_size is a 13 - bit unsigned integer , the meaning of which is only defined if this <nl> - * PES packet is contained in a Program Stream . It defines the size of the input buffer , BS n , in the P - STD . If <nl> - * P - STD_buffer_scale has the value ' 0 ' , then the P - STD_buffer_size measures the buffer size in units of 128 bytes . If <nl> - * P - STD_buffer_scale has the value ' 1 ' , then the P - STD_buffer_size measures the buffer size in units of 1024 bytes . <nl> - * / <nl> + / / The P - STD_buffer_size is a 13 - bit unsigned integer , the meaning of which is only defined if this <nl> + / / PES packet is contained in a Program Stream . It defines the size of the input buffer , BS n , in the P - STD . If <nl> + / / P - STD_buffer_scale has the value ' 0 ' , then the P - STD_buffer_size measures the buffer size in units of 128 bytes . If <nl> + / / P - STD_buffer_scale has the value ' 1 ' , then the P - STD_buffer_size measures the buffer size in units of 1024 bytes . <nl> int16_t P_STD_buffer_size ; / / 13bits <nl> <nl> / / ( 1 + x ) B <nl> class SrsTsPayloadPES : public SrsTsPayload <nl> std : : vector < char > PES_extension_field ; / / [ PES_extension_field_length ] bytes <nl> <nl> / / NB <nl> - / * * <nl> - * This is a fixed 8 - bit value equal to ' 1111 1111 ' that can be inserted by the encoder , for example to meet <nl> - * the requirements of the channel . It is discarded by the decoder . No more than 32 stuffing bytes shall be present in one <nl> - * PES packet header . <nl> - * / <nl> + / / This is a fixed 8 - bit value equal to ' 1111 1111 ' that can be inserted by the encoder , for example to meet <nl> + / / the requirements of the channel . It is discarded by the decoder . No more than 32 stuffing bytes shall be present in one <nl> + / / PES packet header . <nl> int nb_stuffings ; <nl> <nl> / / NB <nl> - / * * <nl> - * PES_packet_data_bytes shall be contiguous bytes of data from the elementary stream <nl> - * indicated by the packet ' s stream_id or PID . When the elementary stream data conforms to ITU - T <nl> - * Rec . H . 262 | ISO / IEC 13818 - 2 or ISO / IEC 13818 - 3 , the PES_packet_data_bytes shall be byte aligned to the bytes of this <nl> - * Recommendation | International Standard . The byte - order of the elementary stream shall be preserved . The number of <nl> - * PES_packet_data_bytes , N , is specified by the PES_packet_length field . N shall be equal to the value indicated in the <nl> - * PES_packet_length minus the number of bytes between the last byte of the PES_packet_length field and the first <nl> - * PES_packet_data_byte . <nl> - * <nl> - * In the case of a private_stream_1 , private_stream_2 , ECM_stream , or EMM_stream , the contents of the <nl> - * PES_packet_data_byte field are user definable and will not be specified by ITU - T | ISO / IEC in the future . <nl> - * / <nl> + / / PES_packet_data_bytes shall be contiguous bytes of data from the elementary stream <nl> + / / indicated by the packet ' s stream_id or PID . When the elementary stream data conforms to ITU - T <nl> + / / Rec . H . 262 | ISO / IEC 13818 - 2 or ISO / IEC 13818 - 3 , the PES_packet_data_bytes shall be byte aligned to the bytes of this <nl> + / / Recommendation | International Standard . The byte - order of the elementary stream shall be preserved . The number of <nl> + / / PES_packet_data_bytes , N , is specified by the PES_packet_length field . N shall be equal to the value indicated in the <nl> + / / PES_packet_length minus the number of bytes between the last byte of the PES_packet_length field and the first <nl> + / / PES_packet_data_byte . <nl> + / / <nl> + / / In the case of a private_stream_1 , private_stream_2 , ECM_stream , or EMM_stream , the contents of the <nl> + / / PES_packet_data_byte field are user definable and will not be specified by ITU - T | ISO / IEC in the future . <nl> int nb_bytes ; <nl> <nl> / / NB <nl> - / * * <nl> - * This is a fixed 8 - bit value equal to ' 1111 1111 ' . It is discarded by the decoder . <nl> - * / <nl> + / / This is a fixed 8 - bit value equal to ' 1111 1111 ' . It is discarded by the decoder . <nl> int nb_paddings ; <nl> public : <nl> SrsTsPayloadPES ( SrsTsPacket * p ) ; <nl> class SrsTsPayloadPES : public SrsTsPayload <nl> virtual srs_error_t encode_33bits_dts_pts ( SrsBuffer * stream , uint8_t fb , int64_t v ) ; <nl> } ; <nl> <nl> - / * * <nl> - * the PSI payload of ts packet . <nl> - * 2 . 4 . 4 Program specific information , hls - mpeg - ts - iso13818 - 1 . pdf , page 59 <nl> - * / <nl> + / / The PSI payload of ts packet . <nl> + / / 2 . 4 . 4 Program specific information , hls - mpeg - ts - iso13818 - 1 . pdf , page 59 <nl> class SrsTsPayloadPSI : public SrsTsPayload <nl> { <nl> public : <nl> / / 1B <nl> - / * * <nl> - * This is an 8 - bit field whose value shall be the number of bytes , immediately following the pointer_field <nl> - * until the first byte of the first section that is present in the payload of the Transport Stream packet ( so a value of 0x00 in <nl> - * the pointer_field indicates that the section starts immediately after the pointer_field ) . When at least one section begins in <nl> - * a given Transport Stream packet , then the payload_unit_start_indicator ( refer to 2 . 4 . 3 . 2 ) shall be set to 1 and the first <nl> - * byte of the payload of that Transport Stream packet shall contain the pointer . When no section begins in a given <nl> - * Transport Stream packet , then the payload_unit_start_indicator shall be set to 0 and no pointer shall be sent in the <nl> - * payload of that packet . <nl> - * / <nl> + / / This is an 8 - bit field whose value shall be the number of bytes , immediately following the pointer_field <nl> + / / until the first byte of the first section that is present in the payload of the Transport Stream packet ( so a value of 0x00 in <nl> + / / The pointer_field indicates that the section starts immediately after the pointer_field ) . When at least one section begins in <nl> + / / a given Transport Stream packet , then the payload_unit_start_indicator ( refer to 2 . 4 . 3 . 2 ) shall be set to 1 and the first <nl> + / / byte of the payload of that Transport Stream packet shall contain the pointer . When no section begins in a given <nl> + / / Transport Stream packet , then the payload_unit_start_indicator shall be set to 0 and no pointer shall be sent in the <nl> + / / payload of that packet . <nl> int8_t pointer_field ; <nl> public : <nl> / / 1B <nl> - / * * <nl> - * This is an 8 - bit field , which shall be set to 0x00 as shown in Table 2 - 26 . <nl> - * / <nl> + / / This is an 8 - bit field , which shall be set to 0x00 as shown in Table 2 - 26 . <nl> SrsTsPsiId table_id ; / / 8bits <nl> <nl> / / 2B <nl> - / * * <nl> - * The section_syntax_indicator is a 1 - bit field which shall be set to ' 1 ' . <nl> - * / <nl> + / / The section_syntax_indicator is a 1 - bit field which shall be set to ' 1 ' . <nl> int8_t section_syntax_indicator ; / / 1bit <nl> - / * * <nl> - * const value , must be ' 0 ' <nl> - * / <nl> + / / const value , must be ' 0 ' <nl> int8_t const0_value ; / / 1bit <nl> - / * * <nl> - * reverved value , must be ' 1 ' <nl> - * / <nl> + / / reverved value , must be ' 1 ' <nl> int8_t const1_value ; / / 2bits <nl> - / * * <nl> - * This is a 12 - bit field , the first two bits of which shall be ' 00 ' . The remaining 10 bits specify the number <nl> - * of bytes of the section , starting immediately following the section_length field , and including the CRC . The value in this <nl> - * field shall not exceed 1021 ( 0x3FD ) . <nl> - * / <nl> + / / This is a 12 - bit field , the first two bits of which shall be ' 00 ' . The remaining 10 bits specify the number <nl> + / / of bytes of the section , starting immediately following the section_length field , and including the CRC . The value in this <nl> + / / field shall not exceed 1021 ( 0x3FD ) . <nl> uint16_t section_length ; / / 12bits <nl> public : <nl> / / the specified psi info , for example , PAT fields . <nl> public : <nl> / / 4B <nl> - / * * <nl> - * This is a 32 - bit field that contains the CRC value that gives a zero output of the registers in the decoder <nl> - * defined in Annex A after processing the entire section . <nl> - * @ remark crc32 ( bytes without pointer field , before crc32 field ) <nl> - * / <nl> + / / This is a 32 - bit field that contains the CRC value that gives a zero output of the registers in the decoder <nl> + / / defined in Annex A after processing the entire section . <nl> + / / @ remark crc32 ( bytes without pointer field , before crc32 field ) <nl> int32_t CRC_32 ; / / 32bits <nl> public : <nl> SrsTsPayloadPSI ( SrsTsPacket * p ) ; <nl> class SrsTsPayloadPSI : public SrsTsPayload <nl> virtual srs_error_t psi_decode ( SrsBuffer * stream ) = 0 ; <nl> } ; <nl> <nl> - / * * <nl> - * the program of PAT of PSI ts packet . <nl> - * / <nl> + / / The program of PAT of PSI ts packet . <nl> class SrsTsPayloadPATProgram <nl> { <nl> public : <nl> / / 4B <nl> - / * * <nl> - * Program_number is a 16 - bit field . It specifies the program to which the program_map_PID is <nl> - * applicable . When set to 0x0000 , then the following PID reference shall be the network PID . For all other cases the value <nl> - * of this field is user defined . This field shall not take any single value more than once within one version of the Program <nl> - * Association Table . <nl> - * / <nl> + / / Program_number is a 16 - bit field . It specifies the program to which the program_map_PID is <nl> + / / applicable . When set to 0x0000 , then the following PID reference shall be the network PID . For all other cases the value <nl> + / / of this field is user defined . This field shall not take any single value more than once within one version of the Program <nl> + / / Association Table . <nl> int16_t number ; / / 16bits <nl> - / * * <nl> - * reverved value , must be ' 1 ' <nl> - * / <nl> + / / reverved value , must be ' 1 ' <nl> int8_t const1_value ; / / 3bits <nl> - / * * <nl> - * program_map_PID / network_PID 13bits <nl> - * network_PID - The network_PID is a 13 - bit field , which is used only in conjunction with the value of the <nl> - * program_number set to 0x0000 , specifies the PID of the Transport Stream packets which shall contain the Network <nl> - * Information Table . The value of the network_PID field is defined by the user , but shall only take values as specified in <nl> - * Table 2 - 3 . The presence of the network_PID is optional . <nl> - * / <nl> + / / program_map_PID / network_PID 13bits <nl> + / / network_PID - The network_PID is a 13 - bit field , which is used only in conjunction with the value of the <nl> + / / program_number set to 0x0000 , specifies the PID of the Transport Stream packets which shall contain the Network <nl> + / / Information Table . The value of the network_PID field is defined by the user , but shall only take values as specified in <nl> + / / Table 2 - 3 . The presence of the network_PID is optional . <nl> int16_t pid ; / / 13bits <nl> public : <nl> SrsTsPayloadPATProgram ( int16_t n = 0 , int16_t p = 0 ) ; <nl> class SrsTsPayloadPATProgram <nl> virtual srs_error_t encode ( SrsBuffer * stream ) ; <nl> } ; <nl> <nl> - / * * <nl> - * the PAT payload of PSI ts packet . <nl> - * 2 . 4 . 4 . 3 Program association Table , hls - mpeg - ts - iso13818 - 1 . pdf , page 61 <nl> - * The Program Association Table provides the correspondence between a program_number and the PID value of the <nl> - * Transport Stream packets which carry the program definition . The program_number is the numeric label associated with <nl> - * a program . <nl> - * / <nl> + / / The PAT payload of PSI ts packet . <nl> + / / 2 . 4 . 4 . 3 Program association Table , hls - mpeg - ts - iso13818 - 1 . pdf , page 61 <nl> + / / The Program Association Table provides the correspondence between a program_number and the PID value of the <nl> + / / Transport Stream packets which carry the program definition . The program_number is the numeric label associated with <nl> + / / a program . <nl> class SrsTsPayloadPAT : public SrsTsPayloadPSI <nl> { <nl> public : <nl> / / 2B <nl> - / * * <nl> - * This is a 16 - bit field which serves as a label to identify this Transport Stream from any other <nl> - * multiplex within a network . Its value is defined by the user . <nl> - * / <nl> + / / This is a 16 - bit field which serves as a label to identify this Transport Stream from any other <nl> + / / multiplex within a network . Its value is defined by the user . <nl> uint16_t transport_stream_id ; / / 16bits <nl> <nl> / / 1B <nl> - / * * <nl> - * reverved value , must be ' 1 ' <nl> - * / <nl> + / / reverved value , must be ' 1 ' <nl> int8_t const3_value ; / / 2bits <nl> - / * * <nl> - * This 5 - bit field is the version number of the whole Program Association Table . The version number <nl> - * shall be incremented by 1 modulo 32 whenever the definition of the Program Association Table changes . When the <nl> - * current_next_indicator is set to ' 1 ' , then the version_number shall be that of the currently applicable Program Association <nl> - * Table . When the current_next_indicator is set to ' 0 ' , then the version_number shall be that of the next applicable Program <nl> - * Association Table . <nl> - * / <nl> + / / This 5 - bit field is the version number of the whole Program Association Table . The version number <nl> + / / shall be incremented by 1 modulo 32 whenever the definition of the Program Association Table changes . When the <nl> + / / current_next_indicator is set to ' 1 ' , then the version_number shall be that of the currently applicable Program Association <nl> + / / Table . When the current_next_indicator is set to ' 0 ' , then the version_number shall be that of the next applicable Program <nl> + / / Association Table . <nl> int8_t version_number ; / / 5bits <nl> - / * * <nl> - * A 1 - bit indicator , which when set to ' 1 ' indicates that the Program Association Table sent is <nl> - * currently applicable . When the bit is set to ' 0 ' , it indicates that the table sent is not yet applicable and shall be the next <nl> - * table to become valid . <nl> - * / <nl> + / / A 1 - bit indicator , which when set to ' 1 ' indicates that the Program Association Table sent is <nl> + / / currently applicable . When the bit is set to ' 0 ' , it indicates that the table sent is not yet applicable and shall be the next <nl> + / / table to become valid . <nl> int8_t current_next_indicator ; / / 1bit <nl> <nl> / / 1B <nl> - / * * <nl> - * This 8 - bit field gives the number of this section . The section_number of the first section in the <nl> - * Program Association Table shall be 0x00 . It shall be incremented by 1 with each additional section in the Program <nl> - * Association Table . <nl> - * / <nl> + / / This 8 - bit field gives the number of this section . The section_number of the first section in the <nl> + / / Program Association Table shall be 0x00 . It shall be incremented by 1 with each additional section in the Program <nl> + / / Association Table . <nl> uint8_t section_number ; / / 8bits <nl> <nl> / / 1B <nl> - / * * <nl> - * This 8 - bit field specifies the number of the last section ( that is , the section with the highest <nl> - * section_number ) of the complete Program Association Table . <nl> - * / <nl> + / / This 8 - bit field specifies the number of the last section ( that is , the section with the highest <nl> + / / section_number ) of the complete Program Association Table . <nl> uint8_t last_section_number ; / / 8bits <nl> <nl> / / multiple 4B program data . <nl> class SrsTsPayloadPAT : public SrsTsPayloadPSI <nl> virtual srs_error_t psi_encode ( SrsBuffer * stream ) ; <nl> } ; <nl> <nl> - / * * <nl> - * the esinfo for PMT program . <nl> - * / <nl> + / / The esinfo for PMT program . <nl> class SrsTsPayloadPMTESInfo <nl> { <nl> public : <nl> / / 1B <nl> - / * * <nl> - * This is an 8 - bit field specifying the type of program element carried within the packets with the PID <nl> - * whose value is specified by the elementary_PID . The values of stream_type are specified in Table 2 - 29 . <nl> - * / <nl> + / / This is an 8 - bit field specifying the type of program element carried within the packets with the PID <nl> + / / whose value is specified by the elementary_PID . The values of stream_type are specified in Table 2 - 29 . <nl> SrsTsStream stream_type ; / / 8bits <nl> <nl> / / 2B <nl> - / * * <nl> - * reverved value , must be ' 1 ' <nl> - * / <nl> + / / reverved value , must be ' 1 ' <nl> int8_t const1_value0 ; / / 3bits <nl> - / * * <nl> - * This is a 13 - bit field specifying the PID of the Transport Stream packets which carry the associated <nl> - * program element . <nl> - * / <nl> + / / This is a 13 - bit field specifying the PID of the Transport Stream packets which carry the associated <nl> + / / program element . <nl> int16_t elementary_PID ; / / 13bits <nl> <nl> / / ( 2 + x ) B <nl> - / * * <nl> - * reverved value , must be ' 1 ' <nl> - * / <nl> + / / reverved value , must be ' 1 ' <nl> int8_t const1_value1 ; / / 4bits <nl> std : : vector < char > ES_info ; / / [ ES_info_length ] bytes . <nl> public : <nl> class SrsTsPayloadPMTESInfo <nl> virtual srs_error_t encode ( SrsBuffer * stream ) ; <nl> } ; <nl> <nl> - / * * <nl> - * the PMT payload of PSI ts packet . <nl> - * 2 . 4 . 4 . 8 Program Map Table , hls - mpeg - ts - iso13818 - 1 . pdf , page 64 <nl> - * The Program Map Table provides the mappings between program numbers and the program elements that comprise <nl> - * them . A single instance of such a mapping is referred to as a " program definition " . The program map table is the <nl> - * complete collection of all program definitions for a Transport Stream . This table shall be transmitted in packets , the PID <nl> - * values of which are selected by the encoder . More than one PID value may be used , if desired . The table is contained in <nl> - * one or more sections with the following syntax . It may be segmented to occupy multiple sections . In each section , the <nl> - * section number field shall be set to zero . Sections are identified by the program_number field . <nl> - * / <nl> + / / The PMT payload of PSI ts packet . <nl> + / / 2 . 4 . 4 . 8 Program Map Table , hls - mpeg - ts - iso13818 - 1 . pdf , page 64 <nl> + / / The Program Map Table provides the mappings between program numbers and the program elements that comprise <nl> + / / Them . A single instance of such a mapping is referred to as a " program definition " . The program map table is the <nl> + / / complete collection of all program definitions for a Transport Stream . This table shall be transmitted in packets , the PID <nl> + / / values of which are selected by the encoder . More than one PID value may be used , if desired . The table is contained in <nl> + / / one or more sections with the following syntax . It may be segmented to occupy multiple sections . In each section , the <nl> + / / section number field shall be set to zero . Sections are identified by the program_number field . <nl> class SrsTsPayloadPMT : public SrsTsPayloadPSI <nl> { <nl> public : <nl> / / 2B <nl> - / * * <nl> - * program_number is a 16 - bit field . It specifies the program to which the program_map_PID is <nl> - * applicable . One program definition shall be carried within only one TS_program_map_section . This implies that a <nl> - * program definition is never longer than 1016 ( 0x3F8 ) . See Informative Annex C for ways to deal with the cases when <nl> - * that length is not sufficient . The program_number may be used as a designation for a broadcast channel , for example . By <nl> - * describing the different program elements belonging to a program , data from different sources ( e . g . sequential events ) <nl> - * can be concatenated together to form a continuous set of streams using a program_number . For examples of applications <nl> - * refer to Annex C . <nl> - * / <nl> + / / program_number is a 16 - bit field . It specifies the program to which the program_map_PID is <nl> + / / applicable . One program definition shall be carried within only one TS_program_map_section . This implies that a <nl> + / / program definition is never longer than 1016 ( 0x3F8 ) . See Informative Annex C for ways to deal with the cases when <nl> + / / that length is not sufficient . The program_number may be used as a designation for a broadcast channel , for example . By <nl> + / / describing the different program elements belonging to a program , data from different sources ( e . g . sequential events ) <nl> + / / can be concatenated together to form a continuous set of streams using a program_number . For examples of applications <nl> + / / refer to Annex C . <nl> uint16_t program_number ; / / 16bits <nl> <nl> / / 1B <nl> - / * * <nl> - * reverved value , must be ' 1 ' <nl> - * / <nl> + / / reverved value , must be ' 1 ' <nl> int8_t const1_value0 ; / / 2bits <nl> - / * * <nl> - * This 5 - bit field is the version number of the TS_program_map_section . The version number shall be <nl> - * incremented by 1 modulo 32 when a change in the information carried within the section occurs . Version number refers <nl> - * to the definition of a single program , and therefore to a single section . When the current_next_indicator is set to ' 1 ' , then <nl> - * the version_number shall be that of the currently applicable TS_program_map_section . When the current_next_indicator <nl> - * is set to ' 0 ' , then the version_number shall be that of the next applicable TS_program_map_section . <nl> - * / <nl> + / / This 5 - bit field is the version number of the TS_program_map_section . The version number shall be <nl> + / / incremented by 1 modulo 32 when a change in the information carried within the section occurs . Version number refers <nl> + / / to the definition of a single program , and therefore to a single section . When the current_next_indicator is set to ' 1 ' , then <nl> + / / The version_number shall be that of the currently applicable TS_program_map_section . When the current_next_indicator <nl> + / / is set to ' 0 ' , then the version_number shall be that of the next applicable TS_program_map_section . <nl> int8_t version_number ; / / 5bits <nl> - / * * <nl> - * A 1 - bit field , which when set to ' 1 ' indicates that the TS_program_map_section sent is <nl> - * currently applicable . When the bit is set to ' 0 ' , it indicates that the TS_program_map_section sent is not yet applicable <nl> - * and shall be the next TS_program_map_section to become valid . <nl> - * / <nl> + / / A 1 - bit field , which when set to ' 1 ' indicates that the TS_program_map_section sent is <nl> + / / currently applicable . When the bit is set to ' 0 ' , it indicates that the TS_program_map_section sent is not yet applicable <nl> + / / and shall be the next TS_program_map_section to become valid . <nl> int8_t current_next_indicator ; / / 1bit <nl> <nl> / / 1B <nl> - / * * <nl> - * The value of this 8 - bit field shall be 0x00 . <nl> - * / <nl> + / / The value of this 8 - bit field shall be 0x00 . <nl> uint8_t section_number ; / / 8bits <nl> <nl> / / 1B <nl> - / * * <nl> - * The value of this 8 - bit field shall be 0x00 . <nl> - * / <nl> + / / The value of this 8 - bit field shall be 0x00 . <nl> uint8_t last_section_number ; / / 8bits <nl> <nl> / / 2B <nl> - / * * <nl> - * reverved value , must be ' 1 ' <nl> - * / <nl> + / / reverved value , must be ' 1 ' <nl> int8_t const1_value1 ; / / 3bits <nl> - / * * <nl> - * This is a 13 - bit field indicating the PID of the Transport Stream packets which shall contain the PCR fields <nl> - * valid for the program specified by program_number . If no PCR is associated with a program definition for private <nl> - * streams , then this field shall take the value of 0x1FFF . Refer to the semantic definition of PCR in 2 . 4 . 3 . 5 and Table 2 - 3 <nl> - * for restrictions on the choice of PCR_PID value . <nl> - * / <nl> + / / This is a 13 - bit field indicating the PID of the Transport Stream packets which shall contain the PCR fields <nl> + / / valid for the program specified by program_number . If no PCR is associated with a program definition for private <nl> + / / streams , then this field shall take the value of 0x1FFF . Refer to the semantic definition of PCR in 2 . 4 . 3 . 5 and Table 2 - 3 <nl> + / / for restrictions on the choice of PCR_PID value . <nl> int16_t PCR_PID ; / / 13bits <nl> <nl> / / 2B <nl> int8_t const1_value2 ; / / 4bits <nl> - / * * <nl> - * This is a 12 - bit field , the first two bits of which shall be ' 00 ' . The remaining 10 bits specify the <nl> - * number of bytes of the descriptors immediately following the program_info_length field . <nl> - * / <nl> + / / This is a 12 - bit field , the first two bits of which shall be ' 00 ' . The remaining 10 bits specify the <nl> + / / number of bytes of the descriptors immediately following the program_info_length field . <nl> std : : vector < char > program_info_desc ; / / [ program_info_length ] bytes <nl> <nl> / / array of TSPMTESInfo . <nl> class SrsTsPayloadPMT : public SrsTsPayloadPSI <nl> virtual srs_error_t psi_encode ( SrsBuffer * stream ) ; <nl> } ; <nl> <nl> - / * * <nl> - * Write the TS message to TS context . <nl> - * / <nl> + / / Write the TS message to TS context . <nl> class SrsTsContextWriter <nl> { <nl> private : <nl> class SrsTsContextWriter <nl> SrsTsContextWriter ( ISrsStreamWriter * w , SrsTsContext * c , SrsAudioCodecId ac , SrsVideoCodecId vc ) ; <nl> virtual ~ SrsTsContextWriter ( ) ; <nl> public : <nl> - / * * <nl> - * write an audio frame to ts , <nl> - * / <nl> + / / Write an audio frame to ts , <nl> virtual srs_error_t write_audio ( SrsTsMessage * audio ) ; <nl> - / * * <nl> - * write a video frame to ts , <nl> - * / <nl> + / / Write a video frame to ts , <nl> virtual srs_error_t write_video ( SrsTsMessage * video ) ; <nl> public : <nl> - / * * <nl> - * get the video codec of ts muxer . <nl> - * / <nl> + / / get the video codec of ts muxer . <nl> virtual SrsVideoCodecId video_codec ( ) ; <nl> } ; <nl> <nl> - / * <nl> - * Used for HLS Encryption <nl> - * / <nl> + / / Used for HLS Encryption <nl> class SrsEncFileWriter : public SrsFileWriter <nl> { <nl> public : <nl> class SrsEncFileWriter : public SrsFileWriter <nl> int nb_buf ; <nl> } ; <nl> <nl> - / * * <nl> - * TS messages cache , to group frames to TS message , <nl> - * for example , we may write multiple AAC RAW frames to a TS message . <nl> - * / <nl> + / / TS messages cache , to group frames to TS message , <nl> + / / for example , we may write multiple AAC RAW frames to a TS message . <nl> class SrsTsMessageCache <nl> { <nl> public : <nl> - / / current ts message . <nl> + / / The current ts message . <nl> SrsTsMessage * audio ; <nl> SrsTsMessage * video ; <nl> public : <nl> SrsTsMessageCache ( ) ; <nl> virtual ~ SrsTsMessageCache ( ) ; <nl> public : <nl> - / * * <nl> - * write audio to cache <nl> - * / <nl> + / / Write audio to cache <nl> virtual srs_error_t cache_audio ( SrsAudioFrame * frame , int64_t dts ) ; <nl> - / * * <nl> - * write video to muxer . <nl> - * / <nl> + / / Write video to muxer . <nl> virtual srs_error_t cache_video ( SrsVideoFrame * frame , int64_t dts ) ; <nl> private : <nl> virtual srs_error_t do_cache_mp3 ( SrsAudioFrame * frame ) ; <nl> class SrsTsMessageCache <nl> virtual srs_error_t do_cache_avc ( SrsVideoFrame * frame ) ; <nl> } ; <nl> <nl> - / * * <nl> - * Transmux the RTMP stream to HTTP - TS stream . <nl> - * / <nl> + / / Transmux the RTMP stream to HTTP - TS stream . <nl> class SrsTsTransmuxer <nl> { <nl> private : <nl> class SrsTsTransmuxer <nl> SrsTsTransmuxer ( ) ; <nl> virtual ~ SrsTsTransmuxer ( ) ; <nl> public : <nl> - / * * <nl> - * initialize the underlayer file stream . <nl> - * @ param fw the writer to use for ts encoder , user must free it . <nl> - * / <nl> + / / Initialize the underlayer file stream . <nl> + / / @ param fw the writer to use for ts encoder , user must free it . <nl> virtual srs_error_t initialize ( ISrsStreamWriter * fw ) ; <nl> public : <nl> - / * * <nl> - * write audio / video packet . <nl> - * @ remark assert data is not NULL . <nl> - * / <nl> + / / Write audio / video packet . <nl> + / / @ remark assert data is not NULL . <nl> virtual srs_error_t write_audio ( int64_t timestamp , char * data , int size ) ; <nl> virtual srs_error_t write_video ( int64_t timestamp , char * data , int size ) ; <nl> private : <nl> mmm a / trunk / src / kernel / srs_kernel_utility . hpp <nl> ppp b / trunk / src / kernel / srs_kernel_utility . hpp <nl> <nl> class SrsBuffer ; <nl> class SrsBitBuffer ; <nl> <nl> - / / compare <nl> + / / Basic compare function . <nl> # define srs_min ( a , b ) ( ( ( a ) < ( b ) ) ? ( a ) : ( b ) ) <nl> # define srs_max ( a , b ) ( ( ( a ) < ( b ) ) ? ( b ) : ( a ) ) <nl> <nl> - / / read nalu uev . <nl> + / / To read H . 264 NALU uev . <nl> extern srs_error_t srs_avc_nalu_read_uev ( SrsBitBuffer * stream , int32_t & v ) ; <nl> extern srs_error_t srs_avc_nalu_read_bit ( SrsBitBuffer * stream , int8_t & v ) ; <nl> <nl> - / / get current system time in srs_utime_t , use cache to avoid performance problem <nl> + / / Get current system time in srs_utime_t , use cache to avoid performance problem <nl> extern srs_utime_t srs_get_system_time ( ) ; <nl> extern srs_utime_t srs_get_system_startup_time ( ) ; <nl> - / / the deamon st - thread will update it . <nl> + / / A daemon st - thread updates it . <nl> extern srs_utime_t srs_update_system_time ( ) ; <nl> <nl> - / / the any address for listener , <nl> - / / it ' s " 0 . 0 . 0 . 0 " for ipv4 , and " : : " for ipv6 . <nl> + / / The " ANY " address to listen , it ' s " 0 . 0 . 0 . 0 " for ipv4 , and " : : " for ipv6 . <nl> extern std : : string srs_any_address4listener ( ) ; <nl> <nl> - / / dns resolve utility , return the resolved ip address . <nl> + / / The dns resolve utility , return the resolved ip address . <nl> extern std : : string srs_dns_resolve ( std : : string host , int & family ) ; <nl> <nl> - / / split the host : port to host and port . <nl> + / / Split the host : port to host and port . <nl> / / @ remark the hostport format in < host [ : port ] > , where port is optional . <nl> extern void srs_parse_hostport ( const std : : string & hostport , std : : string & host , int & port ) ; <nl> <nl> - / / parse the endpoint to ip and port . <nl> - / / @ remark hostport format in < [ ip : ] port > , where ip is default to " 0 . 0 . 0 . 0 " . <nl> + / / Parse the endpoint to ip and port . <nl> + / / @ remark The hostport format in < [ ip : ] port > , where ip is default to " 0 . 0 . 0 . 0 " . <nl> extern void srs_parse_endpoint ( std : : string hostport , std : : string & ip , int & port ) ; <nl> <nl> - / / parse the int64 value to string . <nl> + / / Parse the int64 value to string . <nl> extern std : : string srs_int2str ( int64_t value ) ; <nl> - / / parse the float value to string , precise is 2 . <nl> + / / Parse the float value to string , precise is 2 . <nl> extern std : : string srs_float2str ( double value ) ; <nl> - / / convert bool to switch value , true to " on " , false to " off " . <nl> + / / Convert bool to switch value , true to " on " , false to " off " . <nl> extern std : : string srs_bool2switch ( bool v ) ; <nl> <nl> - / / whether system is little endian <nl> + / / Whether system is little endian <nl> extern bool srs_is_little_endian ( ) ; <nl> <nl> - / / replace old_str to new_str of str <nl> + / / Replace old_str to new_str of str <nl> extern std : : string srs_string_replace ( std : : string str , std : : string old_str , std : : string new_str ) ; <nl> - / / trim char in trim_chars of str <nl> + / / Trim char in trim_chars of str <nl> extern std : : string srs_string_trim_end ( std : : string str , std : : string trim_chars ) ; <nl> - / / trim char in trim_chars of str <nl> + / / Trim char in trim_chars of str <nl> extern std : : string srs_string_trim_start ( std : : string str , std : : string trim_chars ) ; <nl> - / / remove char in remove_chars of str <nl> + / / Remove char in remove_chars of str <nl> extern std : : string srs_string_remove ( std : : string str , std : : string remove_chars ) ; <nl> - / / remove first substring from str <nl> + / / Remove first substring from str <nl> extern std : : string srs_erase_first_substr ( std : : string str , std : : string erase_string ) ; <nl> - / / remove last substring from str <nl> + / / Remove last substring from str <nl> extern std : : string srs_erase_last_substr ( std : : string str , std : : string erase_string ) ; <nl> - / / whether string end with <nl> + / / Whether string end with <nl> extern bool srs_string_ends_with ( std : : string str , std : : string flag ) ; <nl> extern bool srs_string_ends_with ( std : : string str , std : : string flag0 , std : : string flag1 ) ; <nl> extern bool srs_string_ends_with ( std : : string str , std : : string flag0 , std : : string flag1 , std : : string flag2 ) ; <nl> extern bool srs_string_ends_with ( std : : string str , std : : string flag0 , std : : string flag1 , std : : string flag2 , std : : string flag3 ) ; <nl> - / / whether string starts with <nl> + / / Whether string starts with <nl> extern bool srs_string_starts_with ( std : : string str , std : : string flag ) ; <nl> extern bool srs_string_starts_with ( std : : string str , std : : string flag0 , std : : string flag1 ) ; <nl> extern bool srs_string_starts_with ( std : : string str , std : : string flag0 , std : : string flag1 , std : : string flag2 ) ; <nl> extern bool srs_string_starts_with ( std : : string str , std : : string flag0 , std : : string flag1 , std : : string flag2 , std : : string flag3 ) ; <nl> - / / whether string contains with <nl> + / / Whether string contains with <nl> extern bool srs_string_contains ( std : : string str , std : : string flag ) ; <nl> extern bool srs_string_contains ( std : : string str , std : : string flag0 , std : : string flag1 ) ; <nl> extern bool srs_string_contains ( std : : string str , std : : string flag0 , std : : string flag1 , std : : string flag2 ) ; <nl> - / / find the min match in str for flags . <nl> + / / Find the min match in str for flags . <nl> extern std : : string srs_string_min_match ( std : : string str , std : : vector < std : : string > flags ) ; <nl> - / / split the string by flag to array . <nl> + / / Split the string by flag to array . <nl> extern std : : vector < std : : string > srs_string_split ( std : : string str , std : : string flag ) ; <nl> extern std : : vector < std : : string > srs_string_split ( std : : string str , std : : vector < std : : string > flags ) ; <nl> <nl> - / * * <nl> - * compare the memory in bytes . <nl> - * @ return true if completely equal ; otherwise , false . <nl> - * / <nl> + / / Compare the memory in bytes . <nl> + / / @ return true if completely equal ; otherwise , false . <nl> extern bool srs_bytes_equals ( void * pa , void * pb , int size ) ; <nl> <nl> - / / create dir recursively <nl> + / / Create dir recursively <nl> extern srs_error_t srs_create_dir_recursively ( std : : string dir ) ; <nl> <nl> - / / whether path exists . <nl> + / / Whether path exists . <nl> extern bool srs_path_exists ( std : : string path ) ; <nl> - / / get the dirname of path , for instance , dirname ( " / live / livestream " ) = " / live " <nl> + / / Get the dirname of path , for instance , dirname ( " / live / livestream " ) = " / live " <nl> extern std : : string srs_path_dirname ( std : : string path ) ; <nl> - / / get the basename of path , for instance , basename ( " / live / livestream " ) = " livestream " <nl> + / / Get the basename of path , for instance , basename ( " / live / livestream " ) = " livestream " <nl> extern std : : string srs_path_basename ( std : : string path ) ; <nl> - / / get the filename of path , for instance , filename ( " livestream . flv " ) = " livestream " <nl> + / / Get the filename of path , for instance , filename ( " livestream . flv " ) = " livestream " <nl> extern std : : string srs_path_filename ( std : : string path ) ; <nl> - / / get the file extension of path , for instance , filext ( " live . flv " ) = " . flv " <nl> + / / Get the file extension of path , for instance , filext ( " live . flv " ) = " . flv " <nl> extern std : : string srs_path_filext ( std : : string path ) ; <nl> <nl> - / * * <nl> - * whether stream starts with the avc NALU in " AnnexB " <nl> - * from ISO_IEC_14496 - 10 - AVC - 2003 . pdf , page 211 . <nl> - * start code must be " N [ 00 ] 00 00 01 " where N > = 0 <nl> - * @ param pnb_start_code output the size of start code , must > = 3 . <nl> - * NULL to ignore . <nl> - * / <nl> + / / Whether stream starts with the avc NALU in " AnnexB " from ISO_IEC_14496 - 10 - AVC - 2003 . pdf , page 211 . <nl> + / / The start code must be " N [ 00 ] 00 00 01 " where N > = 0 <nl> + / / @ param pnb_start_code output the size of start code , must > = 3 . NULL to ignore . <nl> extern bool srs_avc_startswith_annexb ( SrsBuffer * stream , int * pnb_start_code = NULL ) ; <nl> <nl> - / * * <nl> - * whether stream starts with the aac ADTS <nl> - * from ISO_IEC_14496 - 3 - AAC - 2001 . pdf , page 75 , 1 . A . 2 . 2 ADTS . <nl> - * start code must be ' 1111 1111 1111 ' B , that is 0xFFF <nl> - * / <nl> + / / Whether stream starts with the aac ADTS from ISO_IEC_14496 - 3 - AAC - 2001 . pdf , page 75 , 1 . A . 2 . 2 ADTS . <nl> + / / The start code must be ' 1111 1111 1111 ' B , that is 0xFFF <nl> extern bool srs_aac_startswith_adts ( SrsBuffer * stream ) ; <nl> <nl> - / * * <nl> - * cacl the crc32 of bytes in buf , for ffmpeg . <nl> - * / <nl> + / / Cacl the crc32 of bytes in buf , for ffmpeg . <nl> extern uint32_t srs_crc32_mpegts ( const void * buf , int size ) ; <nl> <nl> - / * * <nl> - * calc the crc32 of bytes in buf by IEEE , for zip . <nl> - * / <nl> + / / Calc the crc32 of bytes in buf by IEEE , for zip . <nl> extern uint32_t srs_crc32_ieee ( const void * buf , int size , uint32_t previous = 0 ) ; <nl> <nl> - / * * <nl> - * Decode a base64 - encoded string . <nl> - * / <nl> + / / Decode a base64 - encoded string . <nl> extern srs_error_t srs_av_base64_decode ( std : : string cipher , std : : string & plaintext ) ; <nl> <nl> - / * * <nl> - * Calculate the output size needed to base64 - encode x bytes to a <nl> - * null - terminated string . <nl> - * / <nl> + / / Calculate the output size needed to base64 - encode x bytes to a null - terminated string . <nl> # define SRS_AV_BASE64_SIZE ( x ) ( ( ( x ) + 2 ) / 3 * 4 + 1 ) <nl> <nl> - / * * <nl> - * convert hex string to data . <nl> - * for example , p = config = ' 139056E5A0 ' <nl> - * output hex to data = { 0x13 , 0x90 , 0x56 , 0xe5 , 0xa0 } <nl> - * / <nl> + / / Convert hex string to data , for example , p = config = ' 139056E5A0 ' <nl> + / / The output data in hex { 0x13 , 0x90 , 0x56 , 0xe5 , 0xa0 } as such . <nl> extern int srs_hex_to_data ( uint8_t * data , const char * p , int size ) ; <nl> <nl> - / * * <nl> - * convert data string to hex . <nl> - * / <nl> + / / Convert data string to hex . <nl> extern char * srs_data_to_hex ( char * des , const uint8_t * src , int len ) ; <nl> <nl> - / * * <nl> - * generate the c0 chunk header for msg . <nl> - * @ param cache , the cache to write header . <nl> - * @ param nb_cache , the size of cache . <nl> - * @ return the size of header . 0 if cache not enough . <nl> - * / <nl> + / / Generate the c0 chunk header for msg . <nl> + / / @ param cache , the cache to write header . <nl> + / / @ param nb_cache , the size of cache . <nl> + / / @ return The size of header . 0 if cache not enough . <nl> extern int srs_chunk_header_c0 ( int perfer_cid , uint32_t timestamp , int32_t payload_length , int8_t message_type , int32_t stream_id , char * cache , int nb_cache ) ; <nl> <nl> - / * * <nl> - * generate the c3 chunk header for msg . <nl> - * @ param cache , the cache to write header . <nl> - * @ param nb_cache , the size of cache . <nl> - * @ return the size of header . 0 if cache not enough . <nl> - * / <nl> + / / Generate the c3 chunk header for msg . <nl> + / / @ param cache , the cache to write header . <nl> + / / @ param nb_cache , the size of cache . <nl> + / / @ return the size of header . 0 if cache not enough . <nl> extern int srs_chunk_header_c3 ( int perfer_cid , uint32_t timestamp , char * cache , int nb_cache ) ; <nl> <nl> # endif <nl>
Refine typo in kernel .
ossrs/srs
35fe05d62c31326ef17041a3d620f8a14bf7155c
2019-04-22T01:19:05Z
mmm a / stdlib / public / core / SequenceAlgorithms . swift . gyb <nl> ppp b / stdlib / public / core / SequenceAlgorithms . swift . gyb <nl> extension SequenceType { <nl> / / / and * N * is the length of the result . <nl> @ warn_unused_result <nl> public func flatMap < S : SequenceType > ( <nl> - transform : ( $ { GElement } ) throws - > S <nl> + @ noescape transform : ( $ { GElement } ) throws - > S <nl> ) rethrows - > [ S . $ { GElement } ] { <nl> var result : [ S . $ { GElement } ] = [ ] <nl> for element in self { <nl>
Merge pull request from kballard / flatmap - noescape
apple/swift
2a91bfbc63bcc1ce6d4f6a832e88d5546b7cd3f5
2016-02-23T19:16:29Z
mmm a / modules / gpu / src / brute_force_matcher . cpp <nl> ppp b / modules / gpu / src / brute_force_matcher . cpp <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : radiusMatch ( const GpuMat & , std : : vector <nl> namespace cv { namespace gpu { namespace bfmatcher <nl> { <nl> template < typename T > <nl> - void matchSingleL1_gpu ( const DevMem2D & queryDescs , const DevMem2D & trainDescs , <nl> + void matchSingleL1_gpu ( const DevMem2D & queryDescs , const DevMem2D & trainDescs , <nl> const DevMem2D & mask , const DevMem2Di & trainIdx , const DevMem2Di & imgIdx , const DevMem2Df & distance ) ; <nl> template < typename T > <nl> - void matchSingleL2_gpu ( const DevMem2D & queryDescs , const DevMem2D & trainDescs , <nl> + void matchSingleL2_gpu ( const DevMem2D & queryDescs , const DevMem2D & trainDescs , <nl> const DevMem2D & mask , const DevMem2Di & trainIdx , const DevMem2Di & imgIdx , const DevMem2Df & distance ) ; <nl> template < typename T > <nl> - void matchCollectionL1_gpu ( const DevMem2D & queryDescs , const DevMem2D & trainCollection , <nl> - const DevMem2D_ < PtrStep > & maskCollection , const DevMem2Di & trainIdx , const DevMem2Di & imgIdx , <nl> + void matchCollectionL1_gpu ( const DevMem2D & queryDescs , const DevMem2D & trainCollection , <nl> + const DevMem2D_ < PtrStep > & maskCollection , const DevMem2Di & trainIdx , const DevMem2Di & imgIdx , <nl> const DevMem2Df & distance ) ; <nl> template < typename T > <nl> - void matchCollectionL2_gpu ( const DevMem2D & queryDescs , const DevMem2D & trainCollection , <nl> - const DevMem2D_ < PtrStep > & maskCollection , const DevMem2Di & trainIdx , const DevMem2Di & imgIdx , <nl> + void matchCollectionL2_gpu ( const DevMem2D & queryDescs , const DevMem2D & trainCollection , <nl> + const DevMem2D_ < PtrStep > & maskCollection , const DevMem2Di & trainIdx , const DevMem2Di & imgIdx , <nl> const DevMem2Df & distance ) ; <nl> <nl> template < typename T > <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : add ( const vector < GpuMat > & descCollecti <nl> trainDescCollection . insert ( trainDescCollection . end ( ) , descCollection . begin ( ) , descCollection . end ( ) ) ; <nl> } <nl> <nl> - const vector < GpuMat > & cv : : gpu : : BruteForceMatcher_GPU_base : : getTrainDescriptors ( ) const <nl> + const vector < GpuMat > & cv : : gpu : : BruteForceMatcher_GPU_base : : getTrainDescriptors ( ) const <nl> { <nl> return trainDescCollection ; <nl> } <nl> <nl> - void cv : : gpu : : BruteForceMatcher_GPU_base : : clear ( ) <nl> + void cv : : gpu : : BruteForceMatcher_GPU_base : : clear ( ) <nl> { <nl> trainDescCollection . clear ( ) ; <nl> } <nl> bool cv : : gpu : : BruteForceMatcher_GPU_base : : isMaskSupported ( ) const <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / Match <nl> <nl> - void cv : : gpu : : BruteForceMatcher_GPU_base : : matchSingle ( const GpuMat & queryDescs , const GpuMat & trainDescs , <nl> + void cv : : gpu : : BruteForceMatcher_GPU_base : : matchSingle ( const GpuMat & queryDescs , const GpuMat & trainDescs , <nl> GpuMat & trainIdx , GpuMat & distance , const GpuMat & mask ) <nl> { <nl> using namespace cv : : gpu : : bfmatcher ; <nl> <nl> - typedef void ( * match_caller_t ) ( const DevMem2D & queryDescs , const DevMem2D & trainDescs , <nl> + typedef void ( * match_caller_t ) ( const DevMem2D & queryDescs , const DevMem2D & trainDescs , <nl> const DevMem2D & mask , const DevMem2Di & trainIdx , const DevMem2Di & imgIdx , const DevMem2Df & distance ) ; <nl> <nl> - static const match_caller_t match_callers [ 2 ] [ 8 ] = <nl> + static const match_caller_t match_callers [ 2 ] [ 8 ] = <nl> { <nl> { <nl> - matchSingleL1_gpu < unsigned char > , matchSingleL1_gpu < char > , matchSingleL1_gpu < unsigned short > , <nl> + matchSingleL1_gpu < unsigned char > , matchSingleL1_gpu < char > , matchSingleL1_gpu < unsigned short > , <nl> matchSingleL1_gpu < short > , matchSingleL1_gpu < int > , matchSingleL1_gpu < float > , 0 , 0 <nl> } , <nl> { <nl> - matchSingleL2_gpu < unsigned char > , matchSingleL2_gpu < char > , matchSingleL2_gpu < unsigned short > , <nl> + matchSingleL2_gpu < unsigned char > , matchSingleL2_gpu < char > , matchSingleL2_gpu < unsigned short > , <nl> matchSingleL2_gpu < short > , matchSingleL2_gpu < int > , matchSingleL2_gpu < float > , 0 , 0 <nl> } <nl> } ; <nl> - <nl> + <nl> CV_Assert ( queryDescs . channels ( ) = = 1 ) ; <nl> CV_Assert ( trainDescs . cols = = queryDescs . cols & & trainDescs . type ( ) = = queryDescs . type ( ) ) ; <nl> - <nl> + <nl> const int nQuery = queryDescs . rows ; <nl> <nl> trainIdx . create ( 1 , nQuery , CV_32S ) ; <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : matchSingle ( const GpuMat & queryDescs , <nl> match_caller_t func = match_callers [ distType ] [ queryDescs . depth ( ) ] ; <nl> CV_Assert ( func ! = 0 ) ; <nl> <nl> - / / For single train there is no need to save imgIdx , so we just save imgIdx to trainIdx . <nl> + / / For single train there is no need to save imgIdx , so we just save imgIdx to trainIdx . <nl> / / trainIdx store after imgIdx , so we doesn ' t lose it value . <nl> func ( queryDescs , trainDescs , mask , trainIdx , trainIdx , distance ) ; <nl> } <nl> <nl> - void cv : : gpu : : BruteForceMatcher_GPU_base : : matchDownload ( const GpuMat & trainIdx , const GpuMat & distance , <nl> + void cv : : gpu : : BruteForceMatcher_GPU_base : : matchDownload ( const GpuMat & trainIdx , const GpuMat & distance , <nl> vector < DMatch > & matches ) <nl> { <nl> const int nQuery = trainIdx . cols ; <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : matchDownload ( const GpuMat & trainIdx , <nl> } <nl> } <nl> <nl> - void cv : : gpu : : BruteForceMatcher_GPU_base : : match ( const GpuMat & queryDescs , const GpuMat & trainDescs , <nl> + void cv : : gpu : : BruteForceMatcher_GPU_base : : match ( const GpuMat & queryDescs , const GpuMat & trainDescs , <nl> vector < DMatch > & matches , const GpuMat & mask ) <nl> { <nl> GpuMat trainIdx , distance ; <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : match ( const GpuMat & queryDescs , const <nl> matchDownload ( trainIdx , distance , matches ) ; <nl> } <nl> <nl> - void cv : : gpu : : BruteForceMatcher_GPU_base : : makeGpuCollection ( GpuMat & trainCollection , GpuMat & maskCollection , <nl> + void cv : : gpu : : BruteForceMatcher_GPU_base : : makeGpuCollection ( GpuMat & trainCollection , GpuMat & maskCollection , <nl> const vector < GpuMat > & masks ) <nl> { <nl> if ( masks . empty ( ) ) <nl> - { <nl> + { <nl> Mat trainCollectionCPU ( 1 , trainDescCollection . size ( ) , CV_8UC ( sizeof ( DevMem2D ) ) ) ; <nl> <nl> for ( size_t i = 0 ; i < trainDescCollection . size ( ) ; + + i ) <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : makeGpuCollection ( GpuMat & trainCollect <nl> else <nl> { <nl> CV_Assert ( masks . size ( ) = = trainDescCollection . size ( ) ) ; <nl> - <nl> + <nl> Mat trainCollectionCPU ( 1 , trainDescCollection . size ( ) , CV_8UC ( sizeof ( DevMem2D ) ) ) ; <nl> Mat maskCollectionCPU ( 1 , trainDescCollection . size ( ) , CV_8UC ( sizeof ( PtrStep ) ) ) ; <nl> <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : makeGpuCollection ( GpuMat & trainCollect <nl> <nl> trainCollectionCPU . ptr < DevMem2D > ( 0 ) [ i ] = trainDescs ; <nl> <nl> - maskCollectionCPU . ptr < PtrStep > ( 0 ) [ i ] = static_cast < PtrStep > ( mask ) ; <nl> + maskCollectionCPU . ptr < PtrStep > ( 0 ) [ i ] = mask ; <nl> } <nl> <nl> trainCollection . upload ( trainCollectionCPU ) ; <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : makeGpuCollection ( GpuMat & trainCollect <nl> } <nl> } <nl> <nl> - void cv : : gpu : : BruteForceMatcher_GPU_base : : matchCollection ( const GpuMat & queryDescs , const GpuMat & trainCollection , <nl> + void cv : : gpu : : BruteForceMatcher_GPU_base : : matchCollection ( const GpuMat & queryDescs , const GpuMat & trainCollection , <nl> GpuMat & trainIdx , GpuMat & imgIdx , GpuMat & distance , const GpuMat & maskCollection ) <nl> { <nl> using namespace cv : : gpu : : bfmatcher ; <nl> <nl> - typedef void ( * match_caller_t ) ( const DevMem2D & queryDescs , const DevMem2D & trainCollection , <nl> - const DevMem2D_ < PtrStep > & maskCollection , const DevMem2Di & trainIdx , const DevMem2Di & imgIdx , <nl> + typedef void ( * match_caller_t ) ( const DevMem2D & queryDescs , const DevMem2D & trainCollection , <nl> + const DevMem2D_ < PtrStep > & maskCollection , const DevMem2Di & trainIdx , const DevMem2Di & imgIdx , <nl> const DevMem2Df & distance ) ; <nl> <nl> - static const match_caller_t match_callers [ 2 ] [ 8 ] = <nl> + static const match_caller_t match_callers [ 2 ] [ 8 ] = <nl> { <nl> { <nl> - matchCollectionL1_gpu < unsigned char > , matchCollectionL1_gpu < char > , <nl> - matchCollectionL1_gpu < unsigned short > , matchCollectionL1_gpu < short > , <nl> + matchCollectionL1_gpu < unsigned char > , matchCollectionL1_gpu < char > , <nl> + matchCollectionL1_gpu < unsigned short > , matchCollectionL1_gpu < short > , <nl> matchCollectionL1_gpu < int > , matchCollectionL1_gpu < float > , 0 , 0 <nl> } , <nl> { <nl> - matchCollectionL2_gpu < unsigned char > , matchCollectionL2_gpu < char > , <nl> - matchCollectionL2_gpu < unsigned short > , matchCollectionL2_gpu < short > , <nl> + matchCollectionL2_gpu < unsigned char > , matchCollectionL2_gpu < char > , <nl> + matchCollectionL2_gpu < unsigned short > , matchCollectionL2_gpu < short > , <nl> matchCollectionL2_gpu < int > , matchCollectionL2_gpu < float > , 0 , 0 <nl> } <nl> } ; <nl> - <nl> + <nl> CV_Assert ( queryDescs . channels ( ) = = 1 ) ; <nl> - <nl> + <nl> const int nQuery = queryDescs . rows ; <nl> <nl> trainIdx . create ( 1 , nQuery , CV_32S ) ; <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : matchCollection ( const GpuMat & queryDes <nl> func ( queryDescs , trainCollection , maskCollection , trainIdx , imgIdx , distance ) ; <nl> } <nl> <nl> - void cv : : gpu : : BruteForceMatcher_GPU_base : : matchDownload ( const GpuMat & trainIdx , GpuMat & imgIdx , <nl> + void cv : : gpu : : BruteForceMatcher_GPU_base : : matchDownload ( const GpuMat & trainIdx , GpuMat & imgIdx , <nl> const GpuMat & distance , vector < DMatch > & matches ) <nl> { <nl> const int nQuery = trainIdx . cols ; <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : matchDownload ( const GpuMat & trainIdx , <nl> } <nl> } <nl> <nl> - void cv : : gpu : : BruteForceMatcher_GPU_base : : match ( const GpuMat & queryDescs , vector < DMatch > & matches , <nl> + void cv : : gpu : : BruteForceMatcher_GPU_base : : match ( const GpuMat & queryDescs , vector < DMatch > & matches , <nl> const vector < GpuMat > & masks ) <nl> - { <nl> + { <nl> GpuMat trainCollection ; <nl> GpuMat maskCollection ; <nl> <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : match ( const GpuMat & queryDescs , vector <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / KnnMatch <nl> <nl> - void cv : : gpu : : BruteForceMatcher_GPU_base : : knnMatch ( const GpuMat & queryDescs , const GpuMat & trainDescs , <nl> + void cv : : gpu : : BruteForceMatcher_GPU_base : : knnMatch ( const GpuMat & queryDescs , const GpuMat & trainDescs , <nl> GpuMat & trainIdx , GpuMat & distance , GpuMat & allDist , int k , const GpuMat & mask ) <nl> { <nl> using namespace cv : : gpu : : bfmatcher ; <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : knnMatch ( const GpuMat & queryDescs , con <nl> typedef void ( * match_caller_t ) ( const DevMem2D & queryDescs , const DevMem2D & trainDescs , int knn , <nl> const DevMem2D & mask , const DevMem2Di & trainIdx , const DevMem2Df & distance , const DevMem2Df & allDist ) ; <nl> <nl> - static const match_caller_t match_callers [ 2 ] [ 8 ] = <nl> + static const match_caller_t match_callers [ 2 ] [ 8 ] = <nl> { <nl> { <nl> - knnMatchL1_gpu < unsigned char > , knnMatchL1_gpu < char > , knnMatchL1_gpu < unsigned short > , <nl> + knnMatchL1_gpu < unsigned char > , knnMatchL1_gpu < char > , knnMatchL1_gpu < unsigned short > , <nl> knnMatchL1_gpu < short > , knnMatchL1_gpu < int > , knnMatchL1_gpu < float > , 0 , 0 <nl> } , <nl> { <nl> - knnMatchL2_gpu < unsigned char > , knnMatchL2_gpu < char > , knnMatchL2_gpu < unsigned short > , <nl> + knnMatchL2_gpu < unsigned char > , knnMatchL2_gpu < char > , knnMatchL2_gpu < unsigned short > , <nl> knnMatchL2_gpu < short > , knnMatchL2_gpu < int > , knnMatchL2_gpu < float > , 0 , 0 <nl> } <nl> } ; <nl> - <nl> + <nl> CV_Assert ( queryDescs . channels ( ) = = 1 ) ; <nl> - <nl> + <nl> const int nQuery = queryDescs . rows ; <nl> const int nTrain = trainDescs . rows ; <nl> <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : knnMatchDownload ( const GpuMat & trainId <nl> } <nl> } <nl> <nl> - void cv : : gpu : : BruteForceMatcher_GPU_base : : knnMatch ( const GpuMat & queryDescs , const GpuMat & trainDescs , <nl> + void cv : : gpu : : BruteForceMatcher_GPU_base : : knnMatch ( const GpuMat & queryDescs , const GpuMat & trainDescs , <nl> vector < vector < DMatch > > & matches , int k , const GpuMat & mask , bool compactResult ) <nl> { <nl> GpuMat trainIdx , distance , allDist ; <nl> namespace <nl> } ; <nl> } <nl> <nl> - void cv : : gpu : : BruteForceMatcher_GPU_base : : knnMatch ( const GpuMat & queryDescs , <nl> + void cv : : gpu : : BruteForceMatcher_GPU_base : : knnMatch ( const GpuMat & queryDescs , <nl> vector < vector < DMatch > > & matches , int knn , const vector < GpuMat > & masks , bool compactResult ) <nl> { <nl> vector < vector < DMatch > > curMatches ; <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : knnMatch ( const GpuMat & queryDescs , <nl> <nl> for ( size_t imgIdx = 0 ; imgIdx < trainDescCollection . size ( ) ; + + imgIdx ) <nl> { <nl> - knnMatch ( queryDescs , trainDescCollection [ imgIdx ] , curMatches , knn , <nl> + knnMatch ( queryDescs , trainDescCollection [ imgIdx ] , curMatches , knn , <nl> masks . empty ( ) ? GpuMat ( ) : masks [ imgIdx ] ) ; <nl> <nl> for ( int queryIdx = 0 ; queryIdx < queryDescs . rows ; + + queryIdx ) <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : knnMatch ( const GpuMat & queryDescs , <nl> <nl> if ( compactResult ) <nl> { <nl> - vector < vector < DMatch > > : : iterator new_end = remove_if ( matches . begin ( ) , matches . end ( ) , <nl> + vector < vector < DMatch > > : : iterator new_end = remove_if ( matches . begin ( ) , matches . end ( ) , <nl> mem_fun_ref ( & vector < DMatch > : : empty ) ) ; <nl> matches . erase ( new_end , matches . end ( ) ) ; <nl> } <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : knnMatch ( const GpuMat & queryDescs , <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / RadiusMatch <nl> <nl> - void cv : : gpu : : BruteForceMatcher_GPU_base : : radiusMatch ( const GpuMat & queryDescs , const GpuMat & trainDescs , <nl> + void cv : : gpu : : BruteForceMatcher_GPU_base : : radiusMatch ( const GpuMat & queryDescs , const GpuMat & trainDescs , <nl> GpuMat & trainIdx , GpuMat & nMatches , GpuMat & distance , float maxDistance , const GpuMat & mask ) <nl> { <nl> using namespace cv : : gpu : : bfmatcher ; <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : radiusMatch ( const GpuMat & queryDescs , <nl> typedef void ( * radiusMatch_caller_t ) ( const DevMem2D & queryDescs , const DevMem2D & trainDescs , float maxDistance , <nl> const DevMem2D & mask , const DevMem2Di & trainIdx , unsigned int * nMatches , const DevMem2Df & distance ) ; <nl> <nl> - static const radiusMatch_caller_t radiusMatch_callers [ 2 ] [ 8 ] = <nl> + static const radiusMatch_caller_t radiusMatch_callers [ 2 ] [ 8 ] = <nl> { <nl> { <nl> - radiusMatchL1_gpu < unsigned char > , radiusMatchL1_gpu < char > , radiusMatchL1_gpu < unsigned short > , <nl> + radiusMatchL1_gpu < unsigned char > , radiusMatchL1_gpu < char > , radiusMatchL1_gpu < unsigned short > , <nl> radiusMatchL1_gpu < short > , radiusMatchL1_gpu < int > , radiusMatchL1_gpu < float > , 0 , 0 <nl> } , <nl> { <nl> - radiusMatchL2_gpu < unsigned char > , radiusMatchL2_gpu < char > , radiusMatchL2_gpu < unsigned short > , <nl> + radiusMatchL2_gpu < unsigned char > , radiusMatchL2_gpu < char > , radiusMatchL2_gpu < unsigned short > , <nl> radiusMatchL2_gpu < short > , radiusMatchL2_gpu < int > , radiusMatchL2_gpu < float > , 0 , 0 <nl> } <nl> } ; <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : radiusMatch ( const GpuMat & queryDescs , <nl> int major , minor ; <nl> getComputeCapability ( getDevice ( ) , major , minor ) ; <nl> CV_Assert ( 100 * major + 10 * minor > = 110 ) ; / / works onle on device with CC > = 1 . 1 <nl> - <nl> + <nl> const int nQuery = queryDescs . rows ; <nl> const int nTrain = trainDescs . rows ; <nl> - <nl> + <nl> CV_Assert ( queryDescs . channels ( ) = = 1 ) ; <nl> CV_Assert ( trainDescs . type ( ) = = queryDescs . type ( ) & & trainDescs . cols = = queryDescs . cols ) ; <nl> - CV_Assert ( trainIdx . empty ( ) | | trainIdx . rows = = nQuery ) ; <nl> - <nl> + CV_Assert ( trainIdx . empty ( ) | | trainIdx . rows = = nQuery ) ; <nl> + <nl> nMatches . create ( 1 , nQuery , CV_32SC1 ) ; <nl> nMatches . setTo ( Scalar : : all ( 0 ) ) ; <nl> if ( trainIdx . empty ( ) ) <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : radiusMatch ( const GpuMat & queryDescs , <nl> func ( queryDescs , trainDescs , maxDistance , mask , trainIdx , nMatches . ptr < unsigned int > ( ) , distance ) ; <nl> } <nl> <nl> - void cv : : gpu : : BruteForceMatcher_GPU_base : : radiusMatchDownload ( const GpuMat & trainIdx , const GpuMat & nMatches , <nl> + void cv : : gpu : : BruteForceMatcher_GPU_base : : radiusMatchDownload ( const GpuMat & trainIdx , const GpuMat & nMatches , <nl> const GpuMat & distance , std : : vector < std : : vector < DMatch > > & matches , bool compactResult ) <nl> { <nl> const int nQuery = trainIdx . rows ; <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : radiusMatchDownload ( const GpuMat & trai <nl> int trainIdx = * trainIdx_ptr ; <nl> <nl> float distance = * distance_ptr ; <nl> - <nl> + <nl> DMatch m ( queryIdx , trainIdx , 0 , distance ) ; <nl> <nl> curMatches . push_back ( m ) ; <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : radiusMatchDownload ( const GpuMat & trai <nl> } <nl> } <nl> <nl> - void cv : : gpu : : BruteForceMatcher_GPU_base : : radiusMatch ( const GpuMat & queryDescs , const GpuMat & trainDescs , <nl> + void cv : : gpu : : BruteForceMatcher_GPU_base : : radiusMatch ( const GpuMat & queryDescs , const GpuMat & trainDescs , <nl> vector < vector < DMatch > > & matches , float maxDistance , const GpuMat & mask , bool compactResult ) <nl> { <nl> GpuMat trainIdx , nMatches , distance ; <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : radiusMatch ( const GpuMat & queryDescs , <nl> radiusMatchDownload ( trainIdx , nMatches , distance , matches , compactResult ) ; <nl> } <nl> <nl> - void cv : : gpu : : BruteForceMatcher_GPU_base : : radiusMatch ( const GpuMat & queryDescs , vector < vector < DMatch > > & matches , <nl> + void cv : : gpu : : BruteForceMatcher_GPU_base : : radiusMatch ( const GpuMat & queryDescs , vector < vector < DMatch > > & matches , <nl> float maxDistance , const vector < GpuMat > & masks , bool compactResult ) <nl> <nl> { <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : radiusMatch ( const GpuMat & queryDescs , <nl> <nl> for ( size_t imgIdx = 0 ; imgIdx < trainDescCollection . size ( ) ; + + imgIdx ) <nl> { <nl> - radiusMatch ( queryDescs , trainDescCollection [ imgIdx ] , curMatches , maxDistance , <nl> + radiusMatch ( queryDescs , trainDescCollection [ imgIdx ] , curMatches , maxDistance , <nl> masks . empty ( ) ? GpuMat ( ) : masks [ imgIdx ] ) ; <nl> <nl> for ( int queryIdx = 0 ; queryIdx < queryDescs . rows ; + + queryIdx ) <nl> void cv : : gpu : : BruteForceMatcher_GPU_base : : radiusMatch ( const GpuMat & queryDescs , <nl> <nl> if ( compactResult ) <nl> { <nl> - vector < vector < DMatch > > : : iterator new_end = remove_if ( matches . begin ( ) , matches . end ( ) , <nl> + vector < vector < DMatch > > : : iterator new_end = remove_if ( matches . begin ( ) , matches . end ( ) , <nl> mem_fun_ref ( & vector < DMatch > : : empty ) ) ; <nl> matches . erase ( new_end , matches . end ( ) ) ; <nl> } <nl>
fixed compile error under linux
opencv/opencv
442cd75c324f8bebee7e79e61d846a318606f246
2010-12-13T09:50:32Z
mmm a / docs / RELEASE_NOTES . md <nl> ppp b / docs / RELEASE_NOTES . md <nl> <nl> <nl> * * Table of Contents * * * generated with [ DocToc ] ( http : / / doctoc . herokuapp . com / ) * <nl> <nl> - - [ cocos2d - x v3 . 2rc0 Release Notes ] ( # user - content - cocos2d - x - v32rc0 - release - notes ) <nl> + - [ cocos2d - x v3 . 2 Release Notes ] ( # user - content - cocos2d - x - v32 - release - notes ) <nl> - [ Misc Information ] ( # user - content - misc - information ) <nl> - [ Requirements ] ( # user - content - requirements ) <nl> - [ Runtime Requirements ] ( # user - content - runtime - requirements ) <nl> <nl> - [ Windows ] ( # user - content - windows ) <nl> - [ Linux ] ( # user - content - linux ) <nl> - [ How to start a new game ] ( # user - content - how - to - start - a - new - game ) <nl> - - [ Highlights of v3 . 2rc0 ] ( # user - content - highlights - of - v32rc0 ) <nl> + - [ Highlights of v3 . 2 ] ( # user - content - highlights - of - v32 ) <nl> + - [ Documents ] ( # user - content - documents ) <nl> - [ Toolchain requirement changed ] ( # user - content - toolchain - requirement - changed ) <nl> - [ Features in detail ] ( # user - content - features - in - detail ) <nl> - - [ Sprite3d ] ( # user - content - sprite3d ) <nl> + - [ Sprite3D & Animation3D ] ( # user - content - sprite3d - - animation3d ) <nl> - [ fbx - conv usage ] ( # user - content - fbx - conv - usage ) <nl> - - [ Controller support ] ( # user - content - controller - support ) <nl> + - [ Game controller ] ( # user - content - game - controller ) <nl> - [ Fast tilemap ] ( # user - content - fast - tilemap ) <nl> - [ Node : : enumerateChildren ] ( # user - content - nodeenumeratechildren ) <nl> - [ utils : : findChildren ] ( # user - content - utilsfindchildren ) <nl> + - [ Node : : setNormalizedPosition ] ( # user - content - nodesetnormalizedposition ) <nl> <nl> # Misc Information <nl> <nl> void setNormalizedPosition ( Vec2 pos ) <nl> } <nl> ` ` ` <nl> <nl> - Full test please refer to ` NodeNormalizedPositionTest1 / 2 ` in ` tests / cpp - tests / Classes / NodeTest / NodeTest . cpp ` . <nl> \ No newline at end of file <nl> + Full test please refer to ` NodeNormalizedPositionTest1 / 2 ` in ` tests / cpp - tests / Classes / NodeTest / NodeTest . cpp ` . <nl>
Update RELEASE_NOTES . md
cocos2d/cocos2d-x
7fab7cdd1568d3e03b2799ccf4b3ef3bc3f1849e
2014-07-15T06:46:56Z
mmm a / test / common / http / BUILD <nl> ppp b / test / common / http / BUILD <nl> envoy_cc_test ( <nl> " / / test / mocks / http : http_mocks " , <nl> " / / test / mocks / upstream : upstream_mocks " , <nl> " / / test / test_common : utility_lib " , <nl> + " @ envoy_api / / envoy / api / v2 / core : protocol_cc " , <nl> ] , <nl> ) <nl> <nl> mmm a / test / common / http / utility_test . cc <nl> ppp b / test / common / http / utility_test . cc <nl> <nl> # include < cstdint > <nl> # include < string > <nl> <nl> + # include " envoy / api / v2 / core / protocol . pb . h " <nl> + # include " envoy / api / v2 / core / protocol . pb . validate . h " <nl> + <nl> # include " common / common / fmt . h " <nl> # include " common / config / protocol_json . h " <nl> # include " common / http / exception . h " <nl> TEST ( HttpUtility , createSslRedirectPath ) { <nl> <nl> namespace { <nl> <nl> - Http2Settings parseHttp2SettingsFromJson ( const std : : string & json_string ) { <nl> + Http2Settings parseHttp2SettingsFromV2Yaml ( const std : : string & yaml ) { <nl> envoy : : api : : v2 : : core : : Http2ProtocolOptions http2_protocol_options ; <nl> - auto json_object_ptr = Json : : Factory : : loadFromString ( json_string ) ; <nl> - Config : : ProtocolJson : : translateHttp2ProtocolOptions ( <nl> - * json_object_ptr - > getObject ( " http2_settings " , true ) , http2_protocol_options ) ; <nl> + TestUtility : : loadFromYamlAndValidate ( yaml , http2_protocol_options ) ; <nl> return Utility : : parseHttp2Settings ( http2_protocol_options ) ; <nl> } <nl> <nl> Http2Settings parseHttp2SettingsFromJson ( const std : : string & json_string ) { <nl> <nl> TEST ( HttpUtility , parseHttp2Settings ) { <nl> { <nl> - auto http2_settings = parseHttp2SettingsFromJson ( " { } " ) ; <nl> + auto http2_settings = parseHttp2SettingsFromV2Yaml ( " { } " ) ; <nl> EXPECT_EQ ( Http2Settings : : DEFAULT_HPACK_TABLE_SIZE , http2_settings . hpack_table_size_ ) ; <nl> EXPECT_EQ ( Http2Settings : : DEFAULT_MAX_CONCURRENT_STREAMS , <nl> http2_settings . max_concurrent_streams_ ) ; <nl> TEST ( HttpUtility , parseHttp2Settings ) { <nl> } <nl> <nl> { <nl> - auto http2_settings = parseHttp2SettingsFromJson ( R " raw ( { <nl> - " http2_settings " : { <nl> - " hpack_table_size " : 1 , <nl> - " max_concurrent_streams " : 2 , <nl> - " initial_stream_window_size " : 3 , <nl> - " initial_connection_window_size " : 4 <nl> - } <nl> - } ) raw " ) ; <nl> + const std : : string yaml = R " EOF ( <nl> + hpack_table_size : 1 <nl> + max_concurrent_streams : 2 <nl> + initial_stream_window_size : 65535 <nl> + initial_connection_window_size : 65535 <nl> + ) EOF " ; <nl> + auto http2_settings = parseHttp2SettingsFromV2Yaml ( yaml ) ; <nl> EXPECT_EQ ( 1U , http2_settings . hpack_table_size_ ) ; <nl> EXPECT_EQ ( 2U , http2_settings . max_concurrent_streams_ ) ; <nl> - EXPECT_EQ ( 3U , http2_settings . initial_stream_window_size_ ) ; <nl> - EXPECT_EQ ( 4U , http2_settings . initial_connection_window_size_ ) ; <nl> + EXPECT_EQ ( 65535U , http2_settings . initial_stream_window_size_ ) ; <nl> + EXPECT_EQ ( 65535U , http2_settings . initial_connection_window_size_ ) ; <nl> } <nl> } <nl> <nl>
[ test ] migrate Http2ProtocolOptions test stubs to v2 ( )
envoyproxy/envoy
2c756f1b296980a993db18abe11c116fad7e882a
2019-07-03T17:00:57Z
mmm a / aten / src / ATen / native / cpu / UnaryOpsKernel . cpp <nl> ppp b / aten / src / ATen / native / cpu / UnaryOpsKernel . cpp <nl> static void bitwise_not_kernel ( TensorIterator & iter ) { <nl> return ! a ; <nl> } ) ; <nl> } else { <nl> - AT_DISPATCH_INTEGRAL_TYPES ( iter . dtype ( ) , " bitwise_cpu " , [ & ] ( ) { <nl> + AT_DISPATCH_INTEGRAL_TYPES ( iter . dtype ( ) , " bitwise_not_cpu " , [ & ] ( ) { <nl> cpu_kernel ( <nl> iter , <nl> [ ] ( scalar_t a ) - > scalar_t { <nl> mmm a / docs / source / torch . rst <nl> ppp b / docs / source / torch . rst <nl> Pointwise Ops <nl> . . autofunction : : asin <nl> . . autofunction : : atan <nl> . . autofunction : : atan2 <nl> + . . autofunction : : bitwise_not <nl> . . autofunction : : ceil <nl> . . autofunction : : clamp <nl> . . autofunction : : cos <nl> BLAS and LAPACK Operations <nl> . . autofunction : : addr <nl> . . autofunction : : baddbmm <nl> . . autofunction : : bmm <nl> - . . autofunction : : bitwise_not <nl> . . autofunction : : chain_matmul <nl> . . autofunction : : cholesky <nl> . . autofunction : : cholesky_inverse <nl>
Sanity fixes for bitwise_not ( )
pytorch/pytorch
d9d5d9a913df229638aca20a3981926f18472aff
2019-08-15T04:07:26Z
deleted file mode 100644 <nl> index 929ff1b634 . . 0000000000 <nl> mmm a / tests / tests / chain_tests . cpp <nl> ppp / dev / null <nl> <nl> - / * * <nl> - * @ file <nl> - * @ copyright defined in eos / LICENSE . txt <nl> - * / <nl> - # include < boost / test / unit_test . hpp > <nl> - <nl> - # include < eos / chain / chain_controller . hpp > <nl> - # include < eos / chain / permission_link_object . hpp > <nl> - # include < eos / chain / authority_checker . hpp > <nl> - <nl> - # include < eos / chain / producer_objects . hpp > <nl> - <nl> - # include < fc / crypto / digest . hpp > <nl> - <nl> - # include < boost / range / algorithm / find_if . hpp > <nl> - # include < boost / range / algorithm / permutation . hpp > <nl> - <nl> - # include " . . / common / database_fixture . hpp " <nl> - <nl> - using namespace eosio ; <nl> - using namespace chain ; <nl> - using rate_limiting_type = eosio : : chain : : testing_blockchain : : rate_limit_type ; <nl> - <nl> - BOOST_AUTO_TEST_SUITE ( chain_tests ) <nl> - <nl> - / / Test transaction signature chain_controller : : get_required_keys <nl> - BOOST_FIXTURE_TEST_CASE ( get_required_keys , testing_fixture ) <nl> - { try { <nl> - Make_Blockchain ( chain ) <nl> - <nl> - chain . set_auto_sign_transactions ( false ) ; <nl> - chain . set_skip_transaction_signature_checking ( false ) ; <nl> - <nl> - signed_transaction trx ; <nl> - trx . messages . resize ( 1 ) ; <nl> - transaction_set_reference_block ( trx , chain . head_block_id ( ) ) ; <nl> - trx . expiration = chain . head_block_time ( ) + 100 ; <nl> - trx . scope = sort_names ( { " inita " , " initb " } ) ; <nl> - types : : transfer trans = { " inita " , " initb " , ( 100 ) , " " } ; <nl> - <nl> - trx . messages [ 0 ] . type = " transfer " ; <nl> - trx . messages [ 0 ] . authorization = { { " inita " , " active " } } ; <nl> - trx . messages [ 0 ] . code = config : : eos_contract_name ; <nl> - transaction_set_message ( trx , 0 , " transfer " , trans ) ; <nl> - BOOST_REQUIRE_THROW ( chain . push_transaction ( trx ) , tx_missing_sigs ) ; <nl> - <nl> - auto required_keys = chain . get_required_keys ( trx , available_keys ( ) ) ; <nl> - BOOST_CHECK ( required_keys . size ( ) < available_keys ( ) . size ( ) ) ; / / otherwise not a very good test <nl> - chain . sign_transaction ( trx ) ; / / uses get_required_keys <nl> - chain . push_transaction ( trx ) ; <nl> - <nl> - BOOST_CHECK_EQUAL ( chain . get_liquid_balance ( " inita " ) , asset ( 100000 - 100 ) ) ; <nl> - BOOST_CHECK_EQUAL ( chain . get_liquid_balance ( " initb " ) , asset ( 100000 + 100 ) ) ; <nl> - <nl> - } FC_LOG_AND_RETHROW ( ) } <nl> - <nl> - / / Test chain_controller : : _transaction_message_rate message rate calculation <nl> - template < typename tx_msgs_exceeded > <nl> - void transaction_msg_rate_calculation ( rate_limiting_type account_type ) <nl> - { try { <nl> - fc : : time_point_sec now ( 0 ) ; <nl> - auto last_update_sec = now ; <nl> - fc : : time_point_sec rate_limit_time_frame_sec ( 10 ) ; <nl> - uint32_t rate_limit = 10 ; <nl> - uint32_t previous_rate = 9 ; <nl> - auto rate = eosio : : chain : : chain_controller : : _transaction_message_rate ( now , last_update_sec , rate_limit_time_frame_sec , <nl> - rate_limit , previous_rate , account_type , N ( my . name ) ) ; <nl> - BOOST_CHECK_EQUAL ( 10 , rate ) ; <nl> - <nl> - previous_rate = 10 ; <nl> - BOOST_CHECK_EXCEPTION ( eosio : : chain : : chain_controller : : _transaction_message_rate ( now , last_update_sec , rate_limit_time_frame_sec , <nl> - rate_limit , previous_rate , account_type , N ( my . name ) ) , \ <nl> - tx_msgs_exceeded , <nl> - [ ] ( tx_msgs_exceeded const & e ) - > bool { return true ; } ) ; <nl> - <nl> - last_update_sec = fc : : time_point_sec ( 10 ) ; <nl> - now = fc : : time_point_sec ( 11 ) ; <nl> - rate = eosio : : chain : : chain_controller : : _transaction_message_rate ( now , last_update_sec , rate_limit_time_frame_sec , <nl> - rate_limit , previous_rate , account_type , N ( my . name ) ) ; <nl> - BOOST_CHECK_EQUAL ( 10 , rate ) ; <nl> - <nl> - now = fc : : time_point_sec ( 12 ) ; <nl> - rate = eosio : : chain : : chain_controller : : _transaction_message_rate ( now , last_update_sec , rate_limit_time_frame_sec , <nl> - rate_limit , previous_rate , account_type , N ( my . name ) ) ; <nl> - BOOST_CHECK_EQUAL ( 9 , rate ) ; <nl> - <nl> - now = fc : : time_point_sec ( 13 ) ; <nl> - rate = eosio : : chain : : chain_controller : : _transaction_message_rate ( now , last_update_sec , rate_limit_time_frame_sec , <nl> - rate_limit , previous_rate , account_type , N ( my . name ) ) ; <nl> - BOOST_CHECK_EQUAL ( 8 , rate ) ; <nl> - <nl> - now = fc : : time_point_sec ( 19 ) ; <nl> - rate = eosio : : chain : : chain_controller : : _transaction_message_rate ( now , last_update_sec , rate_limit_time_frame_sec , <nl> - rate_limit , previous_rate , account_type , N ( my . name ) ) ; <nl> - BOOST_CHECK_EQUAL ( 2 , rate ) ; <nl> - <nl> - now = fc : : time_point_sec ( 19 ) ; <nl> - / / our scenario will never have a previous_rate higher than max ( since it was limited ) but just checking algorithm <nl> - previous_rate = 90 ; <nl> - rate = eosio : : chain : : chain_controller : : _transaction_message_rate ( now , last_update_sec , rate_limit_time_frame_sec , <nl> - rate_limit , previous_rate , account_type , N ( my . name ) ) ; <nl> - BOOST_CHECK_EQUAL ( 10 , rate ) ; <nl> - <nl> - now = fc : : time_point_sec ( 20 ) ; <nl> - / / our scenario will never have a previous_rate higher than max ( since it was limited ) but just checking algorithm <nl> - previous_rate = 10000 ; <nl> - rate = eosio : : chain : : chain_controller : : _transaction_message_rate ( now , last_update_sec , rate_limit_time_frame_sec , <nl> - rate_limit , previous_rate , account_type , N ( my . name ) ) ; <nl> - BOOST_CHECK_EQUAL ( 1 , rate ) ; <nl> - <nl> - now = fc : : time_point_sec ( 2000 ) ; <nl> - rate = eosio : : chain : : chain_controller : : _transaction_message_rate ( now , last_update_sec , rate_limit_time_frame_sec , <nl> - rate_limit , previous_rate , account_type , N ( my . name ) ) ; <nl> - BOOST_CHECK_EQUAL ( 1 , rate ) ; <nl> - <nl> - rate_limit_time_frame_sec = fc : : time_point_sec ( 10000 ) ; <nl> - now = fc : : time_point_sec ( 2010 ) ; <nl> - previous_rate = 10 ; <nl> - rate = eosio : : chain : : chain_controller : : _transaction_message_rate ( now , last_update_sec , rate_limit_time_frame_sec , <nl> - rate_limit , previous_rate , account_type , N ( my . name ) ) ; <nl> - BOOST_CHECK_EQUAL ( 9 , rate ) ; <nl> - <nl> - rate_limit = 10000 ; <nl> - now = fc : : time_point_sec ( 10000 ) ; <nl> - last_update_sec = fc : : time_point_sec ( 9999 ) ; <nl> - previous_rate = 10000 ; <nl> - rate = eosio : : chain : : chain_controller : : _transaction_message_rate ( now , last_update_sec , rate_limit_time_frame_sec , <nl> - rate_limit , previous_rate , account_type , N ( my . name ) ) ; <nl> - BOOST_CHECK_EQUAL ( 10000 , rate ) ; <nl> - <nl> - last_update_sec = fc : : time_point_sec ( 10000 ) ; <nl> - BOOST_CHECK_EXCEPTION ( eosio : : chain : : chain_controller : : _transaction_message_rate ( now , last_update_sec , rate_limit_time_frame_sec , <nl> - rate_limit , previous_rate , account_type , N ( my . name ) ) , \ <nl> - tx_msgs_exceeded , <nl> - [ ] ( tx_msgs_exceeded const & e ) - > bool { return true ; } ) ; <nl> - <nl> - } FC_LOG_AND_RETHROW ( ) } <nl> - <nl> - / / Test chain_controller : : _transaction_message_rate message rate calculation <nl> - template < typename tx_msgs_exceeded > <nl> - void transaction_msg_rate_running_calculation ( rate_limiting_type account_type ) <nl> - { try { <nl> - fc : : time_point_sec now ( 1000 ) ; <nl> - auto last_update_sec = now ; <nl> - auto rate_limit_time_frame_sec = now ; <nl> - uint32_t rate_limit = 1000 ; <nl> - uint32_t rate = 0 ; <nl> - for ( uint32_t i = 0 ; i < 1000 ; + + i ) <nl> - { <nl> - rate = eosio : : chain : : chain_controller : : _transaction_message_rate ( now , last_update_sec , rate_limit_time_frame_sec , <nl> - rate_limit , rate , account_type , N ( my . name ) ) ; <nl> - } <nl> - BOOST_REQUIRE_EQUAL ( 1000 , rate ) ; <nl> - <nl> - BOOST_REQUIRE_EXCEPTION ( eosio : : chain : : chain_controller : : _transaction_message_rate ( now , last_update_sec , rate_limit_time_frame_sec , <nl> - rate_limit , rate , account_type , N ( my . name ) ) , \ <nl> - tx_msgs_exceeded , <nl> - [ ] ( tx_msgs_exceeded const & e ) - > bool { return true ; } ) ; <nl> - <nl> - now + = 1 ; <nl> - rate = eosio : : chain : : chain_controller : : _transaction_message_rate ( now , last_update_sec , rate_limit_time_frame_sec , <nl> - rate_limit , rate , account_type , N ( my . name ) ) ; <nl> - BOOST_REQUIRE_EQUAL ( 1000 , rate ) ; <nl> - <nl> - last_update_sec = now ; <nl> - BOOST_REQUIRE_EXCEPTION ( eosio : : chain : : chain_controller : : _transaction_message_rate ( now , last_update_sec , rate_limit_time_frame_sec , <nl> - rate_limit , rate , account_type , N ( my . name ) ) , \ <nl> - tx_msgs_exceeded , <nl> - [ ] ( tx_msgs_exceeded const & e ) - > bool { return true ; } ) ; <nl> - <nl> - now + = 10 ; <nl> - rate = eosio : : chain : : chain_controller : : _transaction_message_rate ( now , last_update_sec , rate_limit_time_frame_sec , <nl> - rate_limit , rate , account_type , N ( my . name ) ) ; <nl> - last_update_sec = now ; <nl> - for ( uint32_t i = 0 ; i < 9 ; + + i ) <nl> - { <nl> - rate = eosio : : chain : : chain_controller : : _transaction_message_rate ( now , last_update_sec , rate_limit_time_frame_sec , <nl> - rate_limit , rate , account_type , N ( my . name ) ) ; <nl> - } <nl> - BOOST_REQUIRE_EQUAL ( 1000 , rate ) ; <nl> - <nl> - BOOST_REQUIRE_EXCEPTION ( eosio : : chain : : chain_controller : : _transaction_message_rate ( now , last_update_sec , rate_limit_time_frame_sec , <nl> - rate_limit , rate , account_type , N ( my . name ) ) , \ <nl> - tx_msgs_exceeded , <nl> - [ ] ( tx_msgs_exceeded const & e ) - > bool { return true ; } ) ; <nl> - <nl> - for ( uint32_t j = 0 ; j < 100 ; + + j ) <nl> - { <nl> - <nl> - now + = 10 ; <nl> - rate = eosio : : chain : : chain_controller : : _transaction_message_rate ( now , last_update_sec , rate_limit_time_frame_sec , <nl> - rate_limit , rate , account_type , N ( my . name ) ) ; <nl> - last_update_sec = now ; <nl> - for ( uint32_t i = 0 ; i < 9 ; + + i ) <nl> - { <nl> - rate = eosio : : chain : : chain_controller : : _transaction_message_rate ( now , last_update_sec , rate_limit_time_frame_sec , <nl> - rate_limit , rate , account_type , N ( my . name ) ) ; <nl> - } <nl> - BOOST_REQUIRE_EQUAL ( 1000 , rate ) ; <nl> - <nl> - BOOST_REQUIRE_EXCEPTION ( eosio : : chain : : chain_controller : : _transaction_message_rate ( now , last_update_sec , rate_limit_time_frame_sec , <nl> - rate_limit , rate , account_type , N ( my . name ) ) , \ <nl> - tx_msgs_exceeded , <nl> - [ ] ( tx_msgs_exceeded const & e ) - > bool { return true ; } ) ; <nl> - } <nl> - <nl> - now + = 100 ; <nl> - rate = eosio : : chain : : chain_controller : : _transaction_message_rate ( now , last_update_sec , rate_limit_time_frame_sec , <nl> - rate_limit , rate , account_type , N ( my . name ) ) ; <nl> - BOOST_REQUIRE_EQUAL ( 901 , rate ) ; <nl> - } FC_LOG_AND_RETHROW ( ) } <nl> - <nl> - BOOST_FIXTURE_TEST_CASE ( authorization_transaction_msg_rate_calculation , testing_fixture ) <nl> - { <nl> - transaction_msg_rate_calculation < tx_msgs_auth_exceeded > ( rate_limiting_type : : authorization_account ) ; <nl> - } <nl> - <nl> - BOOST_FIXTURE_TEST_CASE ( authorization_transaction_msg_rate_running_calculation , testing_fixture ) <nl> - { <nl> - transaction_msg_rate_running_calculation < tx_msgs_auth_exceeded > ( rate_limiting_type : : authorization_account ) ; <nl> - } <nl> - <nl> - BOOST_FIXTURE_TEST_CASE ( code_transaction_msg_rate_calculation , testing_fixture ) <nl> - { <nl> - transaction_msg_rate_calculation < tx_msgs_code_exceeded > ( rate_limiting_type : : code_account ) ; <nl> - } <nl> - <nl> - BOOST_FIXTURE_TEST_CASE ( code_transaction_msg_rate_running_calculation , testing_fixture ) <nl> - { <nl> - transaction_msg_rate_running_calculation < tx_msgs_code_exceeded > ( rate_limiting_type : : code_account ) ; <nl> - } <nl> - <nl> - <nl> - BOOST_AUTO_TEST_SUITE_END ( ) <nl>
Remove old rate limiting tests
EOSIO/eos
48a9545cb1c79364cead0241238974243dc55a67
2018-04-05T07:03:10Z
mmm a / platformio . ini <nl> ppp b / platformio . ini <nl> board = BigTree_SKR_Pro <nl> extra_scripts = pre : buildroot / share / PlatformIO / scripts / generic_create_variant . py <nl> build_flags = $ { common . build_flags } <nl> - DUSBCON - DUSBD_USE_CDC - DUSBD_VID = 0x0483 - DUSB_PRODUCT = \ " STM32F407ZG \ " <nl> - - DTARGET_STM32F4 - DSTM32F407_5ZX - DVECT_TAB_OFFSET = 0x8000 - DHAVE_HWSERIAL6 <nl> + - DTARGET_STM32F4 - DSTM32F407_5ZX - DVECT_TAB_OFFSET = 0x8000 - DHAVE_HWSERIAL6 - DSS_TIMER = 4 <nl> lib_deps = <nl> U8glib - HAL = https : / / github . com / MarlinFirmware / U8glib - HAL / archive / bugfix . zip <nl> LiquidCrystal @ 1 . 3 . 4 <nl> - TMCStepper @ > = 0 . 5 . 0 , < 1 . 0 . 0 <nl> + TMCStepper @ > = 0 . 5 . 2 , < 1 . 0 . 0 <nl> Adafruit NeoPixel <nl> LiquidTWI2 = https : / / github . com / lincomatic / LiquidTWI2 / archive / master . zip <nl> Arduino - L6470 = https : / / github . com / ameyer / Arduino - L6470 / archive / dev . zip <nl> + SoftwareSerialM = https : / / github . com / sjasonsmith / SoftwareSerialM / archive / SKR_PRO . zip <nl> src_filter = $ { common . default_src_filter } + < src / HAL / HAL_STM32 > <nl> monitor_speed = 250000 <nl> <nl>
Use modified SoftwareSerialM which works with SKR Pro ( )
MarlinFirmware/Marlin
1fe0646d255553a9dddaa2e5862b9c28a848b48a
2019-11-04T21:17:42Z
new file mode 100644 <nl> index 0000000000000 . . 92acb7ab7fe19 <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / ci_build / presubmit / macos / py2_cc / build . sh <nl> <nl> + # ! / bin / bash <nl> + # Copyright 2019 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + # TODO ( mihaimaruseac , hyey , ggadde ) : Convert to py3 <nl> + <nl> + set - e <nl> + <nl> + # Error if we somehow forget to set the path to bazel_wrapper . py <nl> + set - u <nl> + BAZEL_WRAPPER_PATH = $ 1 <nl> + set + u <nl> + <nl> + # From this point on , logs can be publicly available <nl> + set - x <nl> + <nl> + function setup_pip ( ) { <nl> + install_pip2 <nl> + python - m virtualenv tf_build_env - - system - site - packages <nl> + source tf_build_env / bin / activate <nl> + install_macos_pip_deps <nl> + } <nl> + <nl> + function run_build ( ) { <nl> + # Run configure . <nl> + export TF_NEED_CUDA = 0 <nl> + export PYTHON_BIN_PATH = $ ( which python2 ) <nl> + yes " " | $ PYTHON_BIN_PATH configure . py <nl> + tag_filters = " - no_oss , - no_oss_py2 , - gpu , - tpu , - benchmark - test , - nomac , - no_mac , - v1only " <nl> + <nl> + # Get the default test targets for bazel . <nl> + source tensorflow / tools / ci_build / build_scripts / PRESUBMIT_BUILD_TARGETS . sh <nl> + <nl> + " $ { BAZEL_WRAPPER_PATH } " \ <nl> + test \ <nl> + - - build_tag_filters = " $ { tag_filters } " \ <nl> + - - test_tag_filters = " $ { tag_filters } " \ <nl> + - - action_env = PATH \ <nl> + - - remote_accept_cached = true \ <nl> + - - spawn_strategy = standalone \ <nl> + - - remote_local_fallback = false \ <nl> + - - remote_timeout = 600 \ <nl> + - - strategy = Javac = standalone \ <nl> + - - strategy = Closure = standalone \ <nl> + - - genrule_strategy = standalone \ <nl> + - - $ { DEFAULT_BAZEL_TARGETS } - / / tensorflow / lite / . . . <nl> + <nl> + # Copy log to output to be available to GitHub <nl> + ls - la " $ ( bazel info output_base ) / java . log " <nl> + cp " $ ( bazel info output_base ) / java . log " " $ { KOKORO_ARTIFACTS_DIR } / " <nl> + } <nl> + <nl> + source tensorflow / tools / ci_build / release / common . sh <nl> + update_bazel_macos <nl> + which bazel <nl> + set_bazel_outdir <nl> + <nl> + setup_pip <nl> + run_build <nl>
Add open source build scripts for the macos presubmit .
tensorflow/tensorflow
b68d2af793c1f939350c8128b0222846407196ba
2019-12-23T20:52:26Z
mmm a / lib / IRGen / GenDecl . cpp <nl> ppp b / lib / IRGen / GenDecl . cpp <nl> IRGenModule : : getAddrOfLLVMVariableOrGOTEquivalent ( LinkEntity entity , <nl> / / Handle SILFunctions specially , because unlike other entities they aren ' t <nl> / / variables and aren ' t kept in the GlobalVars table . <nl> if ( entity . isSILFunction ( ) ) { <nl> - auto fn = getAddrOfSILFunction ( entity . getSILFunction ( ) , NotForDefinition ) ; <nl> - if ( entity . getSILFunction ( ) - > isDefinition ( ) <nl> - & & ! isAvailableExternally ( entity . getSILFunction ( ) - > getLinkage ( ) ) ) { <nl> + auto * silFn = entity . getSILFunction ( ) ; <nl> + auto fn = getAddrOfSILFunction ( silFn , NotForDefinition ) ; <nl> + if ( silFn - > isDefinition ( ) & & <nl> + ! isAvailableExternally ( silFn - > getLinkage ( ) ) & & <nl> + this = = IRGen . getGenModule ( silFn ) ) { <nl> return { fn , ConstantReference : : Direct } ; <nl> } <nl> <nl> mmm a / lib / IRGen / GenKeyPath . cpp <nl> ppp b / lib / IRGen / GenKeyPath . cpp <nl> getAccessorForComputedComponent ( IRGenModule & IGM , <nl> / / If it ' s only externally available , we need a local thunk to relative - <nl> / / reference . <nl> if ( requirements . empty ( ) & & <nl> - ! LinkEntity : : forSILFunction ( accessor , false ) . isAvailableExternally ( IGM ) ) { <nl> - <nl> + ! isAvailableExternally ( accessor - > getLinkage ( ) ) & & <nl> + & IGM = = IGM . IRGen . getGenModule ( accessor ) ) { <nl> return IGM . getAddrOfSILFunction ( accessor , NotForDefinition ) ; <nl> } <nl> auto accessorFn = IGM . getAddrOfSILFunction ( accessor , NotForDefinition ) ; <nl> emitKeyPathComponent ( IRGenModule & IGM , <nl> fields . add ( llvm : : ConstantExpr : : getTruncOrBitCast ( idValue , IGM . Int32Ty ) ) ; <nl> break ; <nl> } <nl> - <nl> - if ( isInstantiableOnce ) { <nl> - / / No generic arguments or indexes , so we can invoke the <nl> - / / getter / setter as is . <nl> + <nl> + / / Push the accessors , possibly thunked to marshal generic environment . <nl> + fields . addRelativeAddress ( <nl> + getAccessorForComputedComponent ( IGM , component , Getter , <nl> + genericEnv , requirements , <nl> + hasSubscriptIndices ) ) ; <nl> + if ( settable ) <nl> fields . addRelativeAddress ( <nl> - IGM . getAddrOfSILFunction ( component . getComputedPropertyGetter ( ) , <nl> - NotForDefinition ) ) ; <nl> - if ( settable ) <nl> - fields . addRelativeAddress ( <nl> - IGM . getAddrOfSILFunction ( component . getComputedPropertySetter ( ) , <nl> - NotForDefinition ) ) ; <nl> - } else { <nl> + getAccessorForComputedComponent ( IGM , component , Setter , <nl> + genericEnv , requirements , <nl> + hasSubscriptIndices ) ) ; <nl> + <nl> + if ( ! isInstantiableOnce ) { <nl> / / If there ' s generic context or subscript indexes , embed as <nl> / / arguments in the component . Thunk the SIL - level accessors to give the <nl> / / runtime implementation a polymorphically - callable interface . <nl> - <nl> - / / Push the accessors , possibly thunked to marshal generic environment . <nl> - fields . addRelativeAddress ( <nl> - getAccessorForComputedComponent ( IGM , component , Getter , <nl> - genericEnv , requirements , <nl> - hasSubscriptIndices ) ) ; <nl> - if ( settable ) <nl> - fields . addRelativeAddress ( <nl> - getAccessorForComputedComponent ( IGM , component , Setter , <nl> - genericEnv , requirements , <nl> - hasSubscriptIndices ) ) ; <nl> - <nl> + <nl> fields . addRelativeAddress ( <nl> getLayoutFunctionForComputedComponent ( IGM , component , <nl> genericEnv , requirements ) ) ; <nl> new file mode 100644 <nl> index 000000000000 . . a9cb8e953d9d <nl> mmm / dev / null <nl> ppp b / test / IRGen / Inputs / multithread_keypaths_other . swift <nl> <nl> + public struct A { <nl> + / / note : not public <nl> + var foo : Int { get { return 0 } set { } } <nl> + } <nl> mmm a / test / IRGen / keypaths . sil <nl> ppp b / test / IRGen / keypaths . sil <nl> entry : <nl> } <nl> <nl> sil @ k_id : $ @ convention ( thin ) ( ) - > ( ) <nl> - sil @ k_get : $ @ convention ( thin ) ( @ in_guaranteed S ) - > @ out Int <nl> + sil @ k_get : $ @ convention ( thin ) ( @ in_guaranteed S ) - > @ out Int { <nl> + bb0 ( % 0 : @ trivial $ * Int , % 1 : @ trivial $ * S ) : <nl> + unreachable <nl> + } <nl> + <nl> + sil @ l_get : $ @ convention ( thin ) ( @ in_guaranteed C ) - > @ out Int { <nl> + bb0 ( % 0 : @ trivial $ * Int , % 1 : @ trivial $ * C ) : <nl> + unreachable <nl> + } <nl> <nl> - sil @ l_get : $ @ convention ( thin ) ( @ in_guaranteed C ) - > @ out Int <nl> - sil @ l_set : $ @ convention ( thin ) ( @ in_guaranteed Int , @ in_guaranteed C ) - > ( ) <nl> + sil @ l_set : $ @ convention ( thin ) ( @ in_guaranteed Int , @ in_guaranteed C ) - > ( ) { <nl> + bb0 ( % 0 : @ trivial $ * Int , % 1 : @ trivial $ * C ) : <nl> + unreachable <nl> + } <nl> <nl> - sil @ m_get : $ @ convention ( thin ) ( @ in_guaranteed S ) - > @ out @ callee_guaranteed ( ) - > @ out ( ) <nl> - sil @ m_set : $ @ convention ( thin ) ( @ in_guaranteed @ callee_guaranteed ( ) - > @ out ( ) , @ inout S ) - > ( ) <nl> + sil @ m_get : $ @ convention ( thin ) ( @ in_guaranteed S ) - > @ out @ callee_guaranteed ( ) - > @ out ( ) { <nl> + bb0 ( % 0 : @ trivial $ * @ callee_guaranteed ( ) - > @ out ( ) , % 1 : @ trivial $ * S ) : <nl> + unreachable <nl> + } <nl> + <nl> + sil @ m_set : $ @ convention ( thin ) ( @ in_guaranteed @ callee_guaranteed ( ) - > @ out ( ) , @ inout S ) - > ( ) { <nl> + bb0 ( % 0 : @ trivial $ * @ callee_guaranteed ( ) - > @ out ( ) , % 1 : @ trivial $ * S ) : <nl> + unreachable <nl> + } <nl> <nl> sil @ m2_get : $ @ convention ( thin ) ( @ in_guaranteed C2 ) - > @ out @ callee_guaranteed ( ) - > @ out ( ) <nl> sil @ m2_set : $ @ convention ( thin ) ( @ in_guaranteed @ callee_guaranteed ( ) - > @ out ( ) , @ inout C2 ) - > ( ) <nl> new file mode 100644 <nl> index 000000000000 . . 5dcf87ee0920 <nl> mmm / dev / null <nl> ppp b / test / IRGen / multithread_keypaths . swift <nl> <nl> + / / RUN : % empty - directory ( % t ) <nl> + / / RUN : % target - swift - frontend - c % S / Inputs / multithread_keypaths_other . swift % s - num - threads 2 - o % t / 1 . o - o % t / 2 . o - module - name multithread_keypaths <nl> + / / RUN : % target - swift - frontend - c % S / Inputs / multithread_keypaths_other . swift % s - num - threads 2 - o % t / 1 . o - o % t / 2 . o - module - name multithread_keypaths - enable - testing <nl> + / / RUN : % target - swift - frontend - c % S / Inputs / multithread_keypaths_other . swift % s - num - threads 2 - o % t / 1 . o - o % t / 2 . o - module - name multithread_keypaths - enable - resilience <nl> + / / RUN : % target - swift - frontend - c % S / Inputs / multithread_keypaths_other . swift % s - num - threads 2 - o % t / 1 . o - o % t / 2 . o - module - name multithread_keypaths - enable - testing - enable - resilience <nl> + <nl> + func f ( _ k : WritableKeyPath < A , Int > ) { } <nl> + <nl> + func g ( ) { <nl> + f ( \ A . foo ) <nl> + } <nl>
IRGen : Fix keypath pattern emission regression in multi - threaded mode
apple/swift
50f68839a960554a322e55fb273555f53023b878
2018-11-09T05:44:29Z
mmm a / modules / mono / glue / cs_files / Mathf . cs <nl> ppp b / modules / mono / glue / cs_files / Mathf . cs <nl> public static float Cosh ( float s ) <nl> <nl> public static int Decimals ( float step ) <nl> { <nl> - return Decimals ( step ) ; <nl> + return Decimals ( ( decimal ) step ) ; <nl> } <nl> <nl> public static int Decimals ( decimal step ) <nl>
Fix an infinite recursion in the Mathf . Decimals method when using floats .
godotengine/godot
2109bd3f9722929a1d8b6850b235677a03a1313a
2018-01-31T19:02:17Z
mmm a / modules / control / controller / lon_controller . cc <nl> ppp b / modules / control / controller / lon_controller . cc <nl> void LonController : : CloseLogFile ( ) { <nl> } <nl> } <nl> } <nl> - void LonController : : Stop ( ) { CloseLogFile ( ) ; } <nl> + void LonController : : Stop ( ) { <nl> + CloseLogFile ( ) ; <nl> + } <nl> <nl> - LonController : : ~ LonController ( ) { CloseLogFile ( ) ; } <nl> + LonController : : ~ LonController ( ) { <nl> + CloseLogFile ( ) ; <nl> + } <nl> <nl> Status LonController : : Init ( const ControlConf * control_conf ) { <nl> control_conf_ = control_conf ; <nl> Status LonController : : ComputeControlCommand ( <nl> double preview_time = lon_controller_conf . preview_window ( ) * ts ; <nl> <nl> if ( preview_time < 0 . 0 ) { <nl> - const auto error_msg = common : : util : : StrCat ( " Preview time set as : " , <nl> - preview_time , " less than 0 " ) ; <nl> + const auto error_msg = common : : util : : StrCat ( <nl> + " Preview time set as : " , preview_time , " less than 0 " ) ; <nl> AERROR < < error_msg ; <nl> return Status ( ErrorCode : : CONTROL_COMPUTE_ERROR , error_msg ) ; <nl> } <nl> Status LonController : : Reset ( ) { <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - std : : string LonController : : Name ( ) const { return name_ ; } <nl> + std : : string LonController : : Name ( ) const { <nl> + return name_ ; <nl> + } <nl> <nl> void LonController : : ComputeLongitudinalErrors ( <nl> const TrajectoryAnalyzer * trajectory_analyzer , const double preview_time , <nl> mmm a / modules / control / controller / mpc_controller . cc <nl> ppp b / modules / control / controller / mpc_controller . cc <nl> void MPCController : : ComputeLongitudinalErrors ( <nl> debug - > set_station_reference ( reference_point . path_point ( ) . s ( ) ) ; <nl> debug - > set_speed_reference ( reference_point . v ( ) ) ; <nl> debug - > set_acceleration_reference ( reference_point . a ( ) ) ; <nl> + <nl> + debug - > set_station_feedback ( s_matched ) ; <nl> + debug - > set_speed_feedback ( <nl> + VehicleStateProvider : : instance ( ) - > linear_velocity ( ) ) ; <nl> } <nl> <nl> } / / namespace control <nl> mmm a / modules / control / proto / control_cmd . proto <nl> ppp b / modules / control / proto / control_cmd . proto <nl> message SimpleMPCDebug { <nl> optional double speed_error = 21 ; <nl> optional double acceleration_reference = 22 ; <nl> optional bool is_full_stop = 23 ; <nl> + optional double station_feedback = 24 ; <nl> + optional double speed_feedback = 25 ; <nl> } <nl> <nl> message InputDebug { <nl>
Control : Update MPC debug proto ( )
ApolloAuto/apollo
5c828372c2ed1fd919d5dcdfa36f6add89bdb2cd
2017-12-29T00:06:13Z
mmm a / docker / test / stateless / Dockerfile <nl> ppp b / docker / test / stateless / Dockerfile <nl> RUN apt - get update - y \ <nl> ENV TZ = Europe / Moscow <nl> RUN ln - snf / usr / share / zoneinfo / $ TZ / etc / localtime & & echo $ TZ > / etc / timezone <nl> <nl> + ENV TSAN_OPTIONS = ' halt_on_error = 1 ' <nl> + ENV UBSAN_OPTIONS = ' print_stacktrace = 1 ' <nl> + ENV ASAN_SYMBOLIZER_PATH = / usr / lib / llvm - 6 . 0 / bin / llvm - symbolizer <nl> + <nl> COPY zookeeper . xml / etc / clickhouse - server / config . d / zookeeper . xml <nl> COPY listen . xml / etc / clickhouse - server / config . d / listen . xml <nl> COPY part_log . xml / etc / clickhouse - server / config . d / part_log . xml <nl> mmm a / docker / test / stress / Dockerfile <nl> ppp b / docker / test / stress / Dockerfile <nl> RUN apt - get update - y \ <nl> netcat - openbsd \ <nl> telnet <nl> <nl> + ENV TSAN_OPTIONS = ' halt_on_error = 1 ' <nl> + ENV UBSAN_OPTIONS = ' print_stacktrace = 1 ' <nl> + ENV ASAN_SYMBOLIZER_PATH = / usr / lib / llvm - 6 . 0 / bin / llvm - symbolizer <nl> + <nl> COPY . / stress / stress <nl> COPY log_queries . xml / etc / clickhouse - server / users . d / log_queries . xml <nl> COPY part_log . xml / etc / clickhouse - server / config . d / part_log . xml <nl>
Add env variables for test images
ClickHouse/ClickHouse
7984e4f8a454f91700e2c46022e0477e5c9ee842
2019-01-16T08:58:40Z
mmm a / tensorflow / python / distribute / parameter_server_strategy . py <nl> ppp b / tensorflow / python / distribute / parameter_server_strategy . py <nl> <nl> class ParameterServerStrategy ( distribute_lib . Strategy ) : <nl> " " " An asynchronous multi - worker parameter server tf . distribute strategy . <nl> <nl> - This strategy requires two jobs : workers and parameter servers . Variables and <nl> + This strategy requires two roles : workers and parameter servers . Variables and <nl> updates to those variables will be assigned to parameter servers and other <nl> operations are assigned to workers . <nl> <nl>
Merge pull request from tensorflow : terrytangyuan - patch - 1
tensorflow/tensorflow
618b1d75c82defe352746faef95ed545661b1018
2020-02-13T19:02:29Z
mmm a / test / cpp / tensorexpr / test_simplify . cpp <nl> ppp b / test / cpp / tensorexpr / test_simplify . cpp <nl> void testSimplifySubs ( ) { <nl> ExprHandle simplified = IRSimplifier : : simplify ( body ) ; <nl> IS_IMM_WITH_VAL ( Int , simplified . node ( ) , 2 ) ; <nl> } <nl> + <nl> + { <nl> + / / Sub where result is negative . <nl> + ExprHandle body = x - ( x + 1 ) ; <nl> + ExprHandle simplified = IRSimplifier : : simplify ( body ) ; <nl> + IS_IMM_WITH_VAL ( Int , simplified . node ( ) , - 1 ) ; <nl> + } <nl> + <nl> + { <nl> + / / Sub where result is positive due to negative scalar on RHS . <nl> + ExprHandle body = x - ( x - 1 ) ; <nl> + ExprHandle simplified = IRSimplifier : : simplify ( body ) ; <nl> + IS_IMM_WITH_VAL ( Int , simplified . node ( ) , 1 ) ; <nl> + } <nl> + <nl> + { <nl> + / / Term - Polynomial sub where RHS must be negated . <nl> + ExprHandle body = ( x * 2 ) - ( x * 2 + 1 ) ; <nl> + ExprHandle simplified = IRSimplifier : : simplify ( body ) ; <nl> + IS_IMM_WITH_VAL ( Int , simplified . node ( ) , - 1 ) ; <nl> + } <nl> + <nl> + { <nl> + / / Term - Polynomial sub where the result is a Term . <nl> + ExprHandle body = ( y * x * 2 ) - ( x * y ) ; <nl> + ExprHandle simplified = IRSimplifier : : simplify ( body ) ; <nl> + IS_NODE_WITH_NAME ( Mul , simplified . node ( ) , mul ) ; <nl> + <nl> + IS_VAR_WITH_NAME ( mul - > lhs ( ) , " x " ) ; <nl> + IS_VAR_WITH_NAME ( mul - > rhs ( ) , " y " ) ; <nl> + } <nl> + <nl> + { <nl> + / / Term - Polynomial sub where the result is a Polynomial . <nl> + ExprHandle body = ( x * 2 ) - ( x + 1 ) ; <nl> + ExprHandle simplified = IRSimplifier : : simplify ( body ) ; <nl> + IS_NODE_WITH_NAME ( Sub , simplified . node ( ) , sub ) ; <nl> + <nl> + IS_VAR_WITH_NAME ( sub - > lhs ( ) , " x " ) ; <nl> + IS_IMM_WITH_VAL ( Int , sub - > rhs ( ) , 1 ) ; <nl> + } <nl> } <nl> <nl> void testSimplifyDiv ( ) { <nl> mmm a / torch / csrc / jit / tensorexpr / ir_simplifier . cpp <nl> ppp b / torch / csrc / jit / tensorexpr / ir_simplifier . cpp <nl> const Expr * PolynomialTransformer : : mutate ( const Sub * v ) { <nl> if ( rhsPoly & & lhsTerm ) { <nl> / / Negate every part of the Polynomial . <nl> const Expr * minusOne = getImmediateByType ( lhsTerm - > dtype ( ) , - 1 ) ; <nl> - const Expr * negateScalar = evaluateOp ( new Mul ( minusOne , lhsTerm - > scalar ( ) ) ) ; <nl> + const Expr * negateScalar = evaluateOp ( new Mul ( minusOne , rhsPoly - > scalar ( ) ) ) ; <nl> <nl> std : : vector < const Term * > variables ; <nl> - for ( auto * t : lhsPoly - > variables ( ) ) { <nl> + for ( auto * t : rhsPoly - > variables ( ) ) { <nl> const Expr * negate = evaluateOp ( new Mul ( minusOne , t - > scalar ( ) ) ) ; <nl> variables . push_back ( new Term ( hasher_ , negate , t - > variables ( ) ) ) ; <nl> } <nl>
[ NNC ] Fix crash when simplifying certain subtractions ( )
pytorch/pytorch
2fa91fa3050bf48780cb757c74db3d0b70a5e458
2020-10-09T22:15:55Z
mmm a / imgui . cpp <nl> ppp b / imgui . cpp <nl> bool CollapsingHeader ( const char * label , const char * str_id , const bool display_ <nl> ImGuiAabb bb = ImGuiAabb ( pos_min , ImVec2 ( pos_max . x , pos_min . y + text_size . y ) ) ; <nl> if ( display_frame ) <nl> { <nl> - bb . Min . x - = window_padding . x * 0 . 5f ; <nl> - bb . Max . x + = window_padding . x * 0 . 5f ; <nl> + bb . Min . x - = window_padding . x * 0 . 5f - 1 ; <nl> + bb . Max . x + = window_padding . x * 0 . 5f - 1 ; <nl> bb . Max . y + = style . FramePadding . y * 2 ; <nl> } <nl> <nl> void ShowTestWindow ( bool * open ) <nl> <nl> if ( ImGui : : CollapsingHeader ( " Window options " ) ) <nl> { <nl> - ImGui : : Checkbox ( " no titlebar " , & no_titlebar ) ; ImGui : : SameLine ( 200 ) ; <nl> - ImGui : : Checkbox ( " no border " , & no_border ) ; ImGui : : SameLine ( 400 ) ; <nl> + ImGui : : Checkbox ( " no titlebar " , & no_titlebar ) ; ImGui : : SameLine ( 150 ) ; <nl> + ImGui : : Checkbox ( " no border " , & no_border ) ; ImGui : : SameLine ( 300 ) ; <nl> ImGui : : Checkbox ( " no resize " , & no_resize ) ; <nl> - ImGui : : Checkbox ( " no move " , & no_move ) ; ImGui : : SameLine ( 200 ) ; <nl> + ImGui : : Checkbox ( " no move " , & no_move ) ; ImGui : : SameLine ( 150 ) ; <nl> ImGui : : Checkbox ( " no scrollbar " , & no_scrollbar ) ; <nl> ImGui : : SliderFloat ( " fill alpha " , & fill_alpha , 0 . 0f , 1 . 0f ) ; <nl> if ( ImGui : : TreeNode ( " Style Editor " ) ) <nl> void ShowTestWindow ( bool * open ) <nl> ImGui : : EndTooltip ( ) ; <nl> } <nl> <nl> + ImGui : : Separator ( ) ; <nl> + ImGui : : Text ( " ^ Horizontal separator " ) ; <nl> + <nl> static int item = 1 ; <nl> ImGui : : Combo ( " combo " , & item , " aaaa \ 0bbbb \ 0cccc \ 0dddd \ 0eeee \ 0 \ 0 " ) ; <nl> <nl>
Fixed collapsing header border ( if borders are enabled ) being off the clip rectangle .
ocornut/imgui
5a9639b423d2d1600fcc29c4df4fd07db593c156
2014-08-27T10:38:26Z
mmm a / bindings / python / doc / conf . py <nl> ppp b / bindings / python / doc / conf . py <nl> <nl> - # ! / usr / bin / env python3 <nl> - # - * - coding : utf - 8 - * - <nl> - # <nl> - # Python API for CNTK v2 documentation build configuration file , created by <nl> - # sphinx - quickstart on Wed Apr 6 13 : 21 : 01 2016 . <nl> - # <nl> - # This file is execfile ( ) d with the current directory set to its <nl> - # containing dir . <nl> - # <nl> - # Note that not all possible configuration values are present in this <nl> - # autogenerated file . <nl> - # <nl> - # All configuration values have a default ; values that are commented out <nl> - # serve to show the default . <nl> + try : <nl> + import cntk <nl> + except ImportError : <nl> + raise ImportError ( " Unable to import cntk ; the cntk module needs to be built " <nl> + " and importable to generate documentation " ) <nl> + <nl> + try : <nl> + import sphinx_rtd_theme <nl> + except ImportError : <nl> + raise ImportError ( " Unable to import sphinx_rtd_theme , please install via " <nl> + " ' pip install sphinx_rtd_theme ' " ) <nl> <nl> - import sys <nl> - import os <nl> - <nl> - # - - General configuration mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - <nl> - # If your documentation needs a minimal Sphinx version , state it here . <nl> - # needs_sphinx = ' 1 . 0 ' <nl> - <nl> - # Add any Sphinx extension module names here , as strings . They can be <nl> - # extensions coming with Sphinx ( named ' sphinx . ext . * ' ) or your custom <nl> - # ones . <nl> extensions = [ <nl> ' sphinx . ext . autodoc ' , <nl> - ' sphinx . ext . todo ' , <nl> ' sphinx . ext . mathjax ' , <nl> ' sphinx . ext . napoleon ' , <nl> + ' sphinx . ext . todo ' , <nl> ' sphinx . ext . viewcode ' , <nl> ] <nl> <nl> - # Add any paths that contain templates here , relative to this directory . <nl> - templates_path = [ ' _templates ' ] <nl> + master_doc = ' index ' <nl> <nl> - # The suffix ( es ) of source filenames . <nl> - # You can specify multiple suffix as a list of string : <nl> - # source_suffix = [ ' . rst ' , ' . md ' ] <nl> - source_suffix = ' . rst ' <nl> + exclude_patterns = [ <nl> + ' _build ' , <nl> + ' cntk_py ' , <nl> + ' tests ' , <nl> + ' * * / tests / * ' , <nl> + ' * tests * ' <nl> + ] <nl> <nl> - # The encoding of source files . <nl> - # source_encoding = ' utf - 8 - sig ' <nl> + needs_sphinx = ' 1 . 3 ' <nl> <nl> - # The master toctree document . <nl> - master_doc = ' index ' <nl> + # TODO nitpick_ignore <nl> <nl> - # General information about the project . <nl> project = ' Python API for CNTK ' <nl> copyright = ' 2017 , Microsoft ' <nl> - author = ' Microsoft ' <nl> - <nl> - # The version info for the project you ' re documenting , acts as replacement for <nl> - # | version | and | release | , also used in various other places throughout the <nl> - # built documents . <nl> - # <nl> - # The short X . Y version . <nl> - version = ' 2 . 0rc1 ' <nl> - # The full version , including alpha / beta / rc tags . <nl> - release = ' 2 . 0rc1 ' <nl> <nl> - # The language for content autogenerated by Sphinx . Refer to documentation <nl> - # for a list of supported languages . <nl> - # <nl> - # This is also used if you do content translation via gettext catalogs . <nl> - # Usually you set " language " from the command line for these cases . <nl> - language = None <nl> - <nl> - # There are two options for replacing | today | : either , you set today to some <nl> - # non - false value , then it is used : <nl> - # today = ' ' <nl> - # Else , today_fmt is used as the format for a strftime call . <nl> - # today_fmt = ' % B % d , % Y ' <nl> - <nl> - # List of patterns , relative to source directory , that match files and <nl> - # directories to ignore when looking for source files . <nl> - exclude_patterns = [ ' _build ' , ' cntk_py ' , ' tests ' , ' * * / tests / * ' , ' * tests * ' ] <nl> - <nl> - # The reST default role ( used for this markup : ` text ` ) to use for all <nl> - # documents . <nl> - # default_role = None <nl> - <nl> - # If true , ' ( ) ' will be appended to : func : etc . cross - reference text . <nl> - # add_function_parentheses = True <nl> - <nl> - # If true , the current module name will be prepended to all description <nl> - # unit titles ( such as . . function : : ) . <nl> - add_module_names = False <nl> - <nl> - # If true , sectionauthor and moduleauthor directives will be shown in the <nl> - # output . They are ignored by default . <nl> - # show_authors = False <nl> + version = cntk . __version__ # TODO consider shortening <nl> + release = cntk . __version__ <nl> <nl> # The name of the Pygments ( syntax highlighting ) style to use . <nl> pygments_style = ' sphinx ' <nl> <nl> - # A list of ignored prefixes for module index sorting . <nl> - # modindex_common_prefix = [ ] <nl> - <nl> - # If true , keep warnings as " system message " paragraphs in the built documents . <nl> - # keep_warnings = False <nl> - <nl> - # If true , ` todo ` and ` todoList ` produce output , else they produce nothing . <nl> - todo_include_todos = True <nl> - <nl> - <nl> - # - - Options for HTML output mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + # Do not prepend the current module to all description unit titles ( such as . . <nl> + # function : : ) . <nl> + add_module_names = False <nl> <nl> - # The theme to use for HTML and HTML Help pages . See the documentation for <nl> - # a list of builtin themes . <nl> - # Required : <nl> - # pip install sphinx_rtd_theme <nl> - import sphinx_rtd_theme <nl> + # The theme to use for HTML and HTML Help pages . <nl> html_theme = " sphinx_rtd_theme " <nl> html_theme_path = [ sphinx_rtd_theme . get_html_theme_path ( ) ] <nl> <nl> - # Theme options are theme - specific and customize the look and feel of a theme <nl> - # further . For a list of options available for each theme , see the <nl> - # documentation . <nl> - # html_theme_options = { } <nl> - <nl> - # The name for this set of Sphinx documents . If None , it defaults to <nl> - # " < project > v < release > documentation " . <nl> - # html_title = None <nl> - <nl> - # A shorter title for the navigation bar . Default is the same as html_title . <nl> - # html_short_title = None <nl> - <nl> - # The name of an image file ( relative to this directory ) to place at the top <nl> - # of the sidebar . <nl> - # html_logo = None <nl> - <nl> - # The name of an image file ( within the static path ) to use as favicon of the <nl> - # docs . This file should be a Windows icon file ( . ico ) being 16x16 or 32x32 <nl> - # pixels large . <nl> - # html_favicon = None <nl> - <nl> - # Add any paths that contain custom static files ( such as style sheets ) here , <nl> - # relative to this directory . They are copied after the builtin static files , <nl> - # so a file named " default . css " will overwrite the builtin " default . css " . <nl> - # html_static_path = [ ' _static ' ] <nl> - <nl> - # Add any extra paths that contain custom files ( such as robots . txt or <nl> - # . htaccess ) here , relative to this directory . These files are copied <nl> - # directly to the root of the documentation . <nl> - # html_extra_path = [ ] <nl> - <nl> - # If not ' ' , a ' Last updated on : ' timestamp is inserted at every page bottom , <nl> - # using the given strftime format . <nl> - # html_last_updated_fmt = ' % b % d , % Y ' <nl> - <nl> - # If true , SmartyPants will be used to convert quotes and dashes to <nl> - # typographically correct entities . <nl> - # html_use_smartypants = True <nl> - <nl> - # Custom sidebar templates , maps document names to template names . <nl> - # html_sidebars = { } <nl> - <nl> - # Additional templates that should be rendered to pages , maps page names to <nl> - # template names . <nl> - # html_additional_pages = { } <nl> - <nl> - # If false , no module index is generated . <nl> - # html_domain_indices = True <nl> - <nl> - # If false , no index is generated . <nl> - # html_use_index = True <nl> - <nl> - # If true , the index is split into individual pages for each letter . <nl> - # html_split_index = False <nl> - <nl> - # If true , links to the reST sources are added to the pages . <nl> - # html_show_sourcelink = True <nl> - <nl> - # If true , " Created using Sphinx " is shown in the HTML footer . Default is True . <nl> - # html_show_sphinx = True <nl> - <nl> - # If true , " ( C ) Copyright . . . " is shown in the HTML footer . Default is True . <nl> - # html_show_copyright = True <nl> - <nl> - # If true , an OpenSearch description file will be output , and all pages will <nl> - # contain a < link > tag referring to it . The value of this option must be the <nl> - # base URL from which the finished HTML is served . <nl> - # html_use_opensearch = ' ' <nl> - <nl> - # This is the file name suffix for HTML files ( e . g . " . xhtml " ) . <nl> - # html_file_suffix = None <nl> - <nl> - # Language to be used for generating the HTML full - text search index . <nl> - # Sphinx supports the following languages : <nl> - # ' da ' , ' de ' , ' en ' , ' es ' , ' fi ' , ' fr ' , ' h ' , ' it ' , ' ja ' <nl> - # ' nl ' , ' no ' , ' pt ' , ' ro ' , ' r ' , ' sv ' , ' tr ' <nl> - # html_search_language = ' en ' <nl> - <nl> - # A dictionary with options for the search language support , empty by default . <nl> - # Now only ' ja ' uses this config value <nl> - # html_search_options = { ' type ' : ' default ' } <nl> - <nl> - # The name of a javascript file ( relative to the configuration directory ) that <nl> - # implements a search results scorer . If empty , the default will be used . <nl> - # html_search_scorer = ' scorer . js ' <nl> - <nl> - # Output file base name for HTML help builder . <nl> - htmlhelp_basename = ' CNTK15doc ' <nl> - <nl> - # - - Options for LaTeX output mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - <nl> - latex_elements = { <nl> - # The paper size ( ' letterpaper ' or ' a4paper ' ) . <nl> - # ' papersize ' : ' letterpaper ' , <nl> - <nl> - # The font size ( ' 10pt ' , ' 11pt ' or ' 12pt ' ) . <nl> - # ' pointsize ' : ' 10pt ' , <nl> - <nl> - # Additional stuff for the LaTeX preamble . <nl> - # ' preamble ' : ' ' , <nl> - <nl> - # Latex figure ( float ) alignment <nl> - # ' figure_align ' : ' htbp ' , <nl> - } <nl> - <nl> - # Grouping the document tree into LaTeX files . List of tuples <nl> - # ( source start file , target name , title , <nl> - # author , documentclass [ howto , manual , or own class ] ) . <nl> - latex_documents = [ <nl> - ( master_doc , ' CNTK20 . tex ' , ' Python API CNTK Documentation ' , <nl> - ' Microsoft ' , ' manual ' ) , <nl> - ] <nl> - <nl> - # The name of an image file ( relative to this directory ) to place at the top of <nl> - # the title page . <nl> - # latex_logo = None <nl> - <nl> - # For " manual " documents , if this is true , then toplevel headings are parts , <nl> - # not chapters . <nl> - # latex_use_parts = False <nl> - <nl> - # If true , show page references after internal links . <nl> - # latex_show_pagerefs = False <nl> - <nl> - # If true , show URL addresses after external links . <nl> - # latex_show_urls = False <nl> - <nl> - # Documents to append as an appendix to all manuals . <nl> - # latex_appendices = [ ] <nl> - <nl> - # If false , no module index is generated . <nl> - # latex_domain_indices = True <nl> - <nl> - <nl> - # - - Options for manual page output mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - <nl> - # One entry per manual page . List of tuples <nl> - # ( source start file , name , description , authors , manual section ) . <nl> - man_pages = [ <nl> - ( master_doc , ' cntk20 ' , ' Python API for CNTK Documentation ' , <nl> - [ author ] , 1 ) <nl> - ] <nl> - <nl> - # If true , show URL addresses after external links . <nl> - # man_show_urls = False <nl> - <nl> - <nl> - # - - Options for Texinfo output mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> - <nl> - # Grouping the document tree into Texinfo files . List of tuples <nl> - # ( source start file , target name , title , author , <nl> - # dir menu entry , description , category ) <nl> - texinfo_documents = [ <nl> - ( master_doc , ' CNTKv2 ' , ' Python API for CNTK Documentation ' , <nl> - author , ' CNTKv2 ' , ' One line description of project . ' , <nl> - ' Miscellaneous ' ) , <nl> - ] <nl> - <nl> - # Documents to append as an appendix to all manuals . <nl> - # texinfo_appendices = [ ] <nl> - <nl> - # If false , no module index is generated . <nl> - # texinfo_domain_indices = True <nl> + # sphinx . ext . napoleon options <nl> + napoleon_google_docstring = True <nl> + napoleon_numpy_docstring = False <nl> <nl> - # How to display URL addresses : ' footnote ' , ' no ' , or ' inline ' . <nl> - # texinfo_show_urls = ' footnote ' <nl> + # sphinx . ext . todo options <nl> + todo_include_todos = cntk . __version__ . endswith ( ' + ' ) <nl> <nl> - # If true , do not generate a @ detailmenu in the " Top " node ' s menu . <nl> - # texinfo_no_detailmenu = False <nl> mmm a / bindings / python / doc / index . rst <nl> ppp b / bindings / python / doc / index . rst <nl> <nl> . . some aliases <nl> . . _CNTK : https : / / cntk . ai / <nl> <nl> - Python API for CNTK ( 2 . 0rc1 ) <nl> + Python API for CNTK ( | version | ) <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> CNTK_ , the Microsoft Cognitive Toolkit , is a system for describing , training , <nl> and executing computational networks . It is also a framework for describing <nl> arbitrary learning machines such as deep neural networks ( DNNs ) . CNTK is an <nl> implementation of computational networks that supports both CPU and GPU . <nl> <nl> - This page describes the Python API for CNTK_ version 2 . 0rc1 . This is an ongoing effort <nl> + This page describes the Python API for CNTK_ version | version | . This is an ongoing effort <nl> to expose such an API to the CNTK system , thus enabling the use of higher - level <nl> tools such as IDEs to facilitate the definition of computational networks , to execute <nl> them on sample data in real time . Please give feedback through these ` channels ` _ . <nl>
bindings / python / doc / conf . py : simplify , include non - default options for the most part
microsoft/CNTK
3c9c36a0590d679ed12bda7322ad663ce1413d2c
2017-04-07T14:24:11Z
mmm a / lib / ClangImporter / ClangImporter . cpp <nl> ppp b / lib / ClangImporter / ClangImporter . cpp <nl> ClangImporter : : create ( ASTContext & ctx , const ClangImporterOptions & importerOpts , <nl> bool ClangImporter : : addSearchPath ( StringRef newSearchPath , bool isFramework , <nl> bool isSystem ) { <nl> clang : : FileManager & fileMgr = Impl . Instance - > getFileManager ( ) ; <nl> - auto entry = fileMgr . getDirectory ( newSearchPath ) ; <nl> - if ( ! entry ) <nl> + auto optionalEntry = fileMgr . getOptionalDirectoryRef ( newSearchPath ) ; <nl> + if ( ! optionalEntry ) <nl> return true ; <nl> + auto entry = * optionalEntry ; <nl> <nl> auto & headerSearchInfo = Impl . getClangPreprocessor ( ) . getHeaderSearchInfo ( ) ; <nl> auto exists = std : : any_of ( headerSearchInfo . search_dir_begin ( ) , <nl> headerSearchInfo . search_dir_end ( ) , <nl> [ & ] ( const clang : : DirectoryLookup & lookup ) - > bool { <nl> if ( isFramework ) <nl> - return lookup . getFrameworkDir ( ) = = * entry ; <nl> - return lookup . getDir ( ) = = * entry ; <nl> + return lookup . getFrameworkDir ( ) = = & entry . getDirEntry ( ) ; <nl> + return lookup . getDir ( ) = = & entry . getDirEntry ( ) ; <nl> } ) ; <nl> if ( exists ) { <nl> / / Don ' t bother adding a search path that ' s already there . Clang would have <nl> bool ClangImporter : : addSearchPath ( StringRef newSearchPath , bool isFramework , <nl> } <nl> <nl> auto kind = isSystem ? clang : : SrcMgr : : C_System : clang : : SrcMgr : : C_User ; <nl> - headerSearchInfo . AddSearchPath ( { * entry , kind , isFramework } , <nl> + headerSearchInfo . AddSearchPath ( { entry , kind , isFramework } , <nl> / * isAngled = * / true ) ; <nl> <nl> / / In addition to changing the current preprocessor directly , we still need <nl>
[ ClangImporter ] Update an DirectoryEntry usage to DirectoryEntryRef
apple/swift
57bf724b1ea478f68ce7199a5ec9884ebc1eeb96
2019-09-12T02:01:42Z
mmm a / modules / core / include / opencv2 / core . hpp <nl> ppp b / modules / core / include / opencv2 / core . hpp <nl> class CV_EXPORTS PCA <nl> The operator performs % PCA of the supplied dataset . It is safe to reuse <nl> the same PCA structure for multiple datasets . That is , if the structure <nl> has been previously used with another dataset , the existing internal <nl> - data is reclaimed and the new eigenvalues , @ ref eigenvectors , and @ ref <nl> + data is reclaimed and the new @ ref eigenvalues , @ ref eigenvectors and @ ref <nl> mean are allocated and computed . <nl> <nl> - The computed eigenvalues are sorted from the largest to the smallest and <nl> - the corresponding eigenvectors are stored as eigenvectors rows . <nl> + The computed @ ref eigenvalues are sorted from the largest to the smallest and <nl> + the corresponding @ ref eigenvectors are stored as eigenvectors rows . <nl> <nl> @ param data input samples stored as the matrix rows or as the matrix <nl> columns . <nl> class CV_EXPORTS PCA <nl> * / <nl> void backProject ( InputArray vec , OutputArray result ) const ; <nl> <nl> - / * * @ brief write and load PCA matrix <nl> + / * * @ brief write PCA objects <nl> <nl> - * / <nl> - void write ( FileStorage & fs ) const ; <nl> - void read ( const FileNode & fs ) ; <nl> + Writes @ ref eigenvalues @ ref eigenvectors and @ ref mean to specified FileStorage <nl> + * / <nl> + void write ( FileStorage & fs ) const ; <nl> + <nl> + / * * @ brief load PCA objects <nl> + <nl> + Loads @ ref eigenvalues @ ref eigenvectors and @ ref mean from specified FileNode <nl> + * / <nl> + void read ( const FileNode & fn ) ; <nl> <nl> Mat eigenvectors ; / / ! < eigenvectors of the covariation matrix <nl> Mat eigenvalues ; / / ! < eigenvalues of the covariation matrix <nl> mmm a / modules / core / src / pca . cpp <nl> ppp b / modules / core / src / pca . cpp <nl> void PCA : : write ( FileStorage & fs ) const <nl> fs < < " mean " < < mean ; <nl> } <nl> <nl> - void PCA : : read ( const FileNode & fs ) <nl> + void PCA : : read ( const FileNode & fn ) <nl> { <nl> - CV_Assert ( ! fs . empty ( ) ) ; <nl> - String name = ( String ) fs [ " name " ] ; <nl> - CV_Assert ( name = = " PCA " ) ; <nl> + CV_Assert ( ! fn . empty ( ) ) ; <nl> + CV_Assert ( ( String ) fn [ " name " ] = = " PCA " ) ; <nl> <nl> - cv : : read ( fs [ " vectors " ] , eigenvectors ) ; <nl> - cv : : read ( fs [ " values " ] , eigenvalues ) ; <nl> - cv : : read ( fs [ " mean " ] , mean ) ; <nl> + cv : : read ( fn [ " vectors " ] , eigenvectors ) ; <nl> + cv : : read ( fn [ " values " ] , eigenvalues ) ; <nl> + cv : : read ( fn [ " mean " ] , mean ) ; <nl> } <nl> <nl> template < typename T > <nl>
Update pca . cpp
opencv/opencv
c6e6d4c822d69e42aa7ae13e46075c545e46f03e
2016-05-21T11:23:45Z
mmm a / . gitignore <nl> ppp b / . gitignore <nl> build_config . mk <nl> * . so . * <nl> * _test <nl> db_bench <nl> + leveldbutil <nl> mmm a / Makefile <nl> ppp b / Makefile <nl> OPT ? = - O2 - DNDEBUG # ( A ) Production use ( optimized mode ) <nl> # mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> # detect what platform we ' re building on <nl> - $ ( shell CC = $ ( CC ) CXX = $ ( CXX ) TARGET_OS = $ ( TARGET_OS ) \ <nl> + $ ( shell CC = " $ ( CC ) " CXX = " $ ( CXX ) " TARGET_OS = " $ ( TARGET_OS ) " \ <nl> . / build_detect_platform build_config . mk . / ) <nl> # this file is generated by the previous line to set build flags and sources <nl> include build_config . mk <nl> SHARED = $ ( SHARED1 ) <nl> else <nl> # Update db . h if you change these . <nl> SHARED_MAJOR = 1 <nl> - SHARED_MINOR = 9 <nl> + SHARED_MINOR = 10 <nl> SHARED1 = libleveldb . $ ( PLATFORM_SHARED_EXT ) <nl> SHARED2 = $ ( SHARED1 ) . $ ( SHARED_MAJOR ) <nl> SHARED3 = $ ( SHARED1 ) . $ ( SHARED_MAJOR ) . $ ( SHARED_MINOR ) <nl> mmm a / build_detect_platform <nl> ppp b / build_detect_platform <nl> if test - z " $ CXX " ; then <nl> CXX = g + + <nl> fi <nl> <nl> + if test - z " $ TMPDIR " ; then <nl> + TMPDIR = / tmp <nl> + fi <nl> + <nl> # Detect OS <nl> if test - z " $ TARGET_OS " ; then <nl> TARGET_OS = ` uname - s ` <nl> if [ " $ CROSS_COMPILE " = " true " ] ; then <nl> # Cross - compiling ; do not try any compilation tests . <nl> true <nl> else <nl> + CXXOUTPUT = " $ { TMPDIR } / leveldb_build_detect_platform - cxx . $ $ " <nl> + <nl> # If - std = c + + 0x works , use < cstdatomic > . Otherwise use port_posix . h . <nl> - $ CXX $ CXXFLAGS - std = c + + 0x - x c + + - - o / dev / null 2 > / dev / null < < EOF <nl> + $ CXX $ CXXFLAGS - std = c + + 0x - x c + + - - o $ CXXOUTPUT 2 > / dev / null < < EOF <nl> # include < cstdatomic > <nl> int main ( ) { } <nl> EOF <nl> EOF <nl> <nl> # Test whether Snappy library is installed <nl> # http : / / code . google . com / p / snappy / <nl> - $ CXX $ CXXFLAGS - x c + + - - o / dev / null 2 > / dev / null < < EOF <nl> + $ CXX $ CXXFLAGS - x c + + - - o $ CXXOUTPUT 2 > / dev / null < < EOF <nl> # include < snappy . h > <nl> int main ( ) { } <nl> EOF <nl> EOF <nl> fi <nl> <nl> # Test whether tcmalloc is available <nl> - $ CXX $ CXXFLAGS - x c + + - - o / dev / null - ltcmalloc 2 > / dev / null < < EOF <nl> + $ CXX $ CXXFLAGS - x c + + - - o $ CXXOUTPUT - ltcmalloc 2 > / dev / null < < EOF <nl> int main ( ) { } <nl> EOF <nl> if [ " $ ? " = 0 ] ; then <nl> PLATFORM_LIBS = " $ PLATFORM_LIBS - ltcmalloc " <nl> fi <nl> + <nl> + rm - f $ CXXOUTPUT 2 > / dev / null <nl> fi <nl> <nl> PLATFORM_CCFLAGS = " $ PLATFORM_CCFLAGS $ COMMON_FLAGS " <nl> mmm a / db / db_impl . cc <nl> ppp b / db / db_impl . cc <nl> Status DBImpl : : MakeRoomForWrite ( bool force ) { <nl> } else if ( imm_ ! = NULL ) { <nl> / / We have filled up the current memtable , but the previous <nl> / / one is still being compacted , so we wait . <nl> + Log ( options_ . info_log , " Current memtable full ; waiting . . . \ n " ) ; <nl> bg_cv_ . Wait ( ) ; <nl> } else if ( versions_ - > NumLevelFiles ( 0 ) > = config : : kL0_StopWritesTrigger ) { <nl> / / There are too many level - 0 files . <nl> - Log ( options_ . info_log , " waiting . . . \ n " ) ; <nl> + Log ( options_ . info_log , " Too many L0 files ; waiting . . . \ n " ) ; <nl> bg_cv_ . Wait ( ) ; <nl> } else { <nl> / / Attempt to switch to a new memtable and trigger compaction of old <nl> mmm a / db / dbformat . cc <nl> ppp b / db / dbformat . cc <nl> std : : string ParsedInternalKey : : DebugString ( ) const { <nl> ( unsigned long long ) sequence , <nl> int ( type ) ) ; <nl> std : : string result = " ' " ; <nl> - result + = user_key . ToString ( ) ; <nl> + result + = EscapeString ( user_key . ToString ( ) ) ; <nl> result + = buf ; <nl> return result ; <nl> } <nl> mmm a / include / leveldb / db . h <nl> ppp b / include / leveldb / db . h <nl> namespace leveldb { <nl> <nl> / / Update Makefile if you change these <nl> static const int kMajorVersion = 1 ; <nl> - static const int kMinorVersion = 9 ; <nl> + static const int kMinorVersion = 10 ; <nl> <nl> struct Options ; <nl> struct ReadOptions ; <nl> mmm a / table / block . cc <nl> ppp b / table / block . cc <nl> <nl> namespace leveldb { <nl> <nl> inline uint32_t Block : : NumRestarts ( ) const { <nl> - assert ( size_ > = 2 * sizeof ( uint32_t ) ) ; <nl> + assert ( size_ > = sizeof ( uint32_t ) ) ; <nl> return DecodeFixed32 ( data_ + size_ - sizeof ( uint32_t ) ) ; <nl> } <nl> <nl> Block : : Block ( const BlockContents & contents ) <nl> if ( size_ < sizeof ( uint32_t ) ) { <nl> size_ = 0 ; / / Error marker <nl> } else { <nl> - restart_offset_ = size_ - ( 1 + NumRestarts ( ) ) * sizeof ( uint32_t ) ; <nl> - if ( restart_offset_ > size_ - sizeof ( uint32_t ) ) { <nl> - / / The size is too small for NumRestarts ( ) and therefore <nl> - / / restart_offset_ wrapped around . <nl> + size_t max_restarts_allowed = ( size_ - sizeof ( uint32_t ) ) / sizeof ( uint32_t ) ; <nl> + if ( NumRestarts ( ) > max_restarts_allowed ) { <nl> + / / The size is too small for NumRestarts ( ) <nl> size_ = 0 ; <nl> + } else { <nl> + restart_offset_ = size_ - ( 1 + NumRestarts ( ) ) * sizeof ( uint32_t ) ; <nl> } <nl> } <nl> } <nl> class Block : : Iter : public Iterator { <nl> } ; <nl> <nl> Iterator * Block : : NewIterator ( const Comparator * cmp ) { <nl> - if ( size_ < 2 * sizeof ( uint32_t ) ) { <nl> + if ( size_ < sizeof ( uint32_t ) ) { <nl> return NewErrorIterator ( Status : : Corruption ( " bad block contents " ) ) ; <nl> } <nl> const uint32_t num_restarts = NumRestarts ( ) ; <nl> mmm a / table / table . cc <nl> ppp b / table / table . cc <nl> Status Table : : InternalGet ( const ReadOptions & options , const Slice & k , <nl> ! filter - > KeyMayMatch ( handle . offset ( ) , k ) ) { <nl> / / Not found <nl> } else { <nl> - Slice handle = iiter - > value ( ) ; <nl> Iterator * block_iter = BlockReader ( this , options , iiter - > value ( ) ) ; <nl> block_iter - > Seek ( k ) ; <nl> if ( block_iter - > Valid ( ) ) { <nl> mmm a / table / table_test . cc <nl> ppp b / table / table_test . cc <nl> class Harness { <nl> Constructor * constructor_ ; <nl> } ; <nl> <nl> + / / Test empty table / block . <nl> + TEST ( Harness , Empty ) { <nl> + for ( int i = 0 ; i < kNumTestArgs ; i + + ) { <nl> + Init ( kTestArgList [ i ] ) ; <nl> + Random rnd ( test : : RandomSeed ( ) + 1 ) ; <nl> + Test ( & rnd ) ; <nl> + } <nl> + } <nl> + <nl> + / / Special test for a block with no restart entries . The C + + leveldb <nl> + / / code never generates such blocks , but the Java version of leveldb <nl> + / / seems to . <nl> + TEST ( Harness , ZeroRestartPointsInBlock ) { <nl> + char data [ sizeof ( uint32_t ) ] ; <nl> + memset ( data , 0 , sizeof ( data ) ) ; <nl> + BlockContents contents ; <nl> + contents . data = Slice ( data , sizeof ( data ) ) ; <nl> + contents . cachable = false ; <nl> + contents . heap_allocated = false ; <nl> + Block block ( contents ) ; <nl> + Iterator * iter = block . NewIterator ( BytewiseComparator ( ) ) ; <nl> + iter - > SeekToFirst ( ) ; <nl> + ASSERT_TRUE ( ! iter - > Valid ( ) ) ; <nl> + iter - > SeekToLast ( ) ; <nl> + ASSERT_TRUE ( ! iter - > Valid ( ) ) ; <nl> + iter - > Seek ( " foo " ) ; <nl> + ASSERT_TRUE ( ! iter - > Valid ( ) ) ; <nl> + delete iter ; <nl> + } <nl> + <nl> / / Test the empty key <nl> TEST ( Harness , SimpleEmptyKey ) { <nl> for ( int i = 0 ; i < kNumTestArgs ; i + + ) { <nl> mmm a / util / cache . cc <nl> ppp b / util / cache . cc <nl> class HandleTable { <nl> LRUHandle * h = list_ [ i ] ; <nl> while ( h ! = NULL ) { <nl> LRUHandle * next = h - > next_hash ; <nl> - Slice key = h - > key ( ) ; <nl> uint32_t hash = h - > hash ; <nl> LRUHandle * * ptr = & new_list [ hash & ( new_length - 1 ) ] ; <nl> h - > next_hash = * ptr ; <nl> class LRUCache { <nl> / / mutex_ protects the following state . <nl> port : : Mutex mutex_ ; <nl> size_t usage_ ; <nl> - uint64_t last_id_ ; <nl> <nl> / / Dummy head of LRU list . <nl> / / lru . prev is newest entry , lru . next is oldest entry . <nl> class LRUCache { <nl> } ; <nl> <nl> LRUCache : : LRUCache ( ) <nl> - : usage_ ( 0 ) , <nl> - last_id_ ( 0 ) { <nl> + : usage_ ( 0 ) { <nl> / / Make empty circular linked list <nl> lru_ . next = & lru_ ; <nl> lru_ . prev = & lru_ ; <nl> mmm a / util / env_posix . cc <nl> ppp b / util / env_posix . cc <nl> class PosixEnv : public Env { <nl> PosixEnv ( ) ; <nl> virtual ~ PosixEnv ( ) { <nl> fprintf ( stderr , " Destroying Env : : Default ( ) \ n " ) ; <nl> - exit ( 1 ) ; <nl> + abort ( ) ; <nl> } <nl> <nl> virtual Status NewSequentialFile ( const std : : string & fname , <nl> class PosixEnv : public Env { <nl> void PthreadCall ( const char * label , int result ) { <nl> if ( result ! = 0 ) { <nl> fprintf ( stderr , " pthread % s : % s \ n " , label , strerror ( result ) ) ; <nl> - exit ( 1 ) ; <nl> + abort ( ) ; <nl> } <nl> } <nl> <nl>
Release leveldb 1 . 10
google/leveldb
28dad918f2ffb80fd70110ed5cd47744339649f2
2013-05-15T00:03:07Z
mmm a / binding . gyp <nl> ppp b / binding . gyp <nl> <nl> ' conditions ' : [ <nl> [ ' runtime = = " node " ' , { <nl> ' defines ' : [ <nl> - ' GRPC_UV ' <nl> + ' GRPC_UV ' , <nl> + ' GRPC_ARES = 0 ' , <nl> ] <nl> } ] , <nl> [ ' OS ! = " win " and runtime = = " electron " ' , { <nl>
Disable async resolve for GRPC_UV
grpc/grpc
c77344940b6e0f028338e5099cc988740480cd0b
2017-01-25T18:45:11Z
mmm a / Marlin / src / MarlinCore . cpp <nl> ppp b / Marlin / src / MarlinCore . cpp <nl> void startOrResumeJob ( ) { <nl> <nl> # if HAS_RESUME_CONTINUE / / Display " Click to Continue . . . " <nl> case 1 : / / 30 min timeout with LCD , 1 min without <nl> - did_state = queue . enqueue_P ( PSTR ( " M0Q1S " TERN ( HAS_LCD_MENU , " 1800 " , " 60 " ) ) ) ; <nl> + did_state = queue . enqueue_one_P ( PSTR ( " M0Q1S " TERN ( HAS_LCD_MENU , " 1800 " , " 60 " ) ) ) ; <nl> break ; <nl> # endif <nl> <nl> case 2 : print_job_timer . stop ( ) ; break ; <nl> <nl> case 3 : <nl> - did_state = print_job_timer . duration ( ) < 60 | | queue . enqueue_P ( PSTR ( " M31 " ) ) ; <nl> + did_state = print_job_timer . duration ( ) < 60 | | queue . enqueue_one_P ( PSTR ( " M31 " ) ) ; <nl> break ; <nl> <nl> case 4 : <nl> mmm a / Marlin / src / gcode / queue . cpp <nl> ppp b / Marlin / src / gcode / queue . cpp <nl> void GCodeQueue : : inject_P ( PGM_P const pgcode ) { injected_commands_P = pgcode ; } <nl> * / <nl> void GCodeQueue : : enqueue_one_now ( const char * cmd ) { while ( ! enqueue_one ( cmd ) ) idle ( ) ; } <nl> <nl> + / * * <nl> + * Attempt to enqueue a single G - code command <nl> + * and return ' true ' if successful . <nl> + * / <nl> + bool GCodeQueue : : enqueue_one_P ( PGM_P const pgcode ) { <nl> + size_t i = 0 ; <nl> + PGM_P p = pgcode ; <nl> + char c ; <nl> + while ( ( c = pgm_read_byte ( & p [ i ] ) ) & & c ! = ' \ n ' ) i + + ; <nl> + char cmd [ i + 1 ] ; <nl> + memcpy_P ( cmd , p , i ) ; <nl> + cmd [ i ] = ' \ 0 ' ; <nl> + return _enqueue ( cmd ) ; <nl> + } <nl> + <nl> / * * <nl> * Enqueue from program memory and return only when commands are actually enqueued <nl> * Never call this from a G - code handler ! <nl> mmm a / Marlin / src / gcode / queue . h <nl> ppp b / Marlin / src / gcode / queue . h <nl> class GCodeQueue { <nl> * / <nl> static void enqueue_one_now ( const char * cmd ) ; <nl> <nl> + / * * <nl> + * Attempt to enqueue a single G - code command <nl> + * and return ' true ' if successful . <nl> + * / <nl> + static bool enqueue_one_P ( PGM_P const pgcode ) ; <nl> + <nl> / * * <nl> * Enqueue from program memory and return only when commands are actually enqueued <nl> * / <nl> class GCodeQueue { <nl> * / <nl> static void flush_and_request_resend ( ) ; <nl> <nl> - / * * <nl> - * Attempt to enqueue a single G - code command <nl> - * and return ' true ' if successful . <nl> - * / <nl> - FORCE_INLINE static bool enqueue_P ( const char * cmd ) { return _enqueue ( cmd ) ; } <nl> - <nl> private : <nl> <nl> static uint8_t index_w ; / / Ring buffer write position <nl>
Fix broken enqueue_P
MarlinFirmware/Marlin
4e3a793f1ec29f2e9df5f84818f5fbfa2e832fac
2020-03-10T23:28:49Z
mmm a / tensorflow / c / BUILD <nl> ppp b / tensorflow / c / BUILD <nl> load ( <nl> # mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> # Public targets <nl> <nl> + filegroup ( <nl> + name = " headers " , <nl> + srcs = [ " c_api . h " ] , <nl> + visibility = [ " / / tensorflow : __subpackages__ " ] , <nl> + ) <nl> + <nl> tf_cuda_library ( <nl> name = " c_api " , <nl> srcs = [ " c_api . cc " ] , <nl> mmm a / tensorflow / tools / ci_build / builds / libtensorflow . sh <nl> ppp b / tensorflow / tools / ci_build / builds / libtensorflow . sh <nl> function build_libtensorflow_tarball ( ) { <nl> fi <nl> bazel clean - - expunge <nl> yes " " | . / configure <nl> - <nl> - # TODO ( ashankar ) : Once <nl> - # https : / / github . com / tensorflow / tensorflow / commit / 1b32b698eddc10c0d85b0b8cf838f42023394de7 <nl> - # can be undone , i . e . , when bazel supports pkg_tar with python3 + then all of this below <nl> - # can be replaced with something like : <nl> - # bazel build $ { BAZEL_OPTS } / / tensorflow / tools / lib_package : libtensorflow . tar . gz <nl> - <nl> - bazel build $ { BAZEL_OPTS } / / tensorflow : libtensorflow . so <nl> + <nl> + # Remove this test call when <nl> + # https : / / github . com / bazelbuild / bazel / issues / 2352 <nl> + # and https : / / github . com / bazelbuild / bazel / issues / 1580 <nl> + # have been resolved and the " manual " tags on the BUILD targets <nl> + # in tensorflow / tools / lib_package / BUILD are removed . <nl> + # Till then , must manually run the test . <nl> + bazel test $ { BAZEL_OPTS } / / tensorflow / tools / lib_package / . . . <nl> + <nl> + bazel build $ { BAZEL_OPTS } / / tensorflow / tools / lib_package : libtensorflow . tar . gz <nl> DIR = lib_package <nl> - rm - rf $ { DIR } <nl> - mkdir - p $ { DIR } / build / lib <nl> - mkdir - p $ { DIR } / build / include / tensorflow / c <nl> - cp bazel - bin / tensorflow / libtensorflow . so $ { DIR } / build / lib <nl> - cp tensorflow / c / c_api . h $ { DIR } / build / include / tensorflow / c <nl> - tar - C $ { DIR } / build - cvf $ { DIR } / libtensorflow $ { TARBALL_SUFFIX } . tar . gz include / tensorflow / c / c_api . h lib / libtensorflow . so <nl> - rm - rf $ { DIR } / build <nl> + mkdir - p $ { DIR } <nl> + cp bazel - bin / tensorflow / tools / lib_package / libtensorflow . tar . gz $ { DIR } / libtensorflow $ { TARBALL_SUFFIX } . tar . gz <nl> } <nl> mmm a / tensorflow / tools / ci_build / ci_sanity . sh <nl> ppp b / tensorflow / tools / ci_build / ci_sanity . sh <nl> do_buildifier ( ) { <nl> } <nl> <nl> do_external_licenses_check ( ) { <nl> - echo " Running do_external_licenses_check " <nl> - echo " " <nl> + BUILD_TARGET = " $ 1 " <nl> + LICENSES_TARGET = " $ 2 " <nl> <nl> EXTERNAL_LICENSES_CHECK_START_TIME = $ ( date + ' % s ' ) <nl> <nl> do_external_licenses_check ( ) { <nl> MISSING_LICENSES_FILE = " $ ( mktemp ) _missing_licenses . log " <nl> EXTRA_LICENSES_FILE = " $ ( mktemp ) _extra_licenses . log " <nl> <nl> - echo " Getting external dependencies for / / tensorflow / tools / pip_package : build_pip_package . " <nl> - bazel query ' attr ( " licenses " , " notice " , deps ( / / tensorflow / tools / pip_package : build_pip_package ) ) ' - - no_implicit_deps - - no_host_deps - - keep_going \ <nl> + echo " Getting external dependencies for $ { BUILD_TARGET } " <nl> + bazel query " attr ( ' licenses ' , ' notice ' , deps ( $ { BUILD_TARGET } ) ) " - - no_implicit_deps - - no_host_deps - - keep_going \ <nl> | egrep - v " ^ / / tensorflow " \ <nl> | sed - e ' s | : . * | | ' \ <nl> | sort \ <nl> do_external_licenses_check ( ) { <nl> | tee $ { EXTERNAL_DEPENDENCIES_FILE } <nl> <nl> echo <nl> - echo " Getting list of external licenses . " <nl> - bazel query ' deps ( / / tensorflow / tools / pip_package : licenses ) ' - - no_implicit_deps - - no_host_deps - - keep_going \ <nl> + echo " Getting list of external licenses mentioned in $ { LICENSES_TARGET } . " <nl> + bazel query " deps ( $ { LICENSES_TARGET } ) " - - no_implicit_deps - - no_host_deps - - keep_going \ <nl> | egrep - v " ^ / / tensorflow " \ <nl> | sed - e ' s | : . * | | ' \ <nl> | sort \ <nl> do_external_licenses_check ( ) { <nl> echo <nl> <nl> if [ [ - s $ { MISSING_LICENSES_FILE } ] ] | | [ [ - s $ { EXTRA_LICENSES_FILE } ] ] ; then <nl> - echo " FAIL : pip package external dependencies vs licenses mismatch . " <nl> + echo " FAIL : mismatch in packaged licenses and external dependencies " <nl> if [ [ - s $ { MISSING_LICENSES_FILE } ] ] ; then <nl> echo " Missing the licenses for the following external dependencies : " <nl> cat $ { MISSING_LICENSES_FILE } <nl> do_external_licenses_check ( ) { <nl> fi <nl> } <nl> <nl> + do_pip_package_licenses_check ( ) { <nl> + echo " Running do_pip_package_licenses_check " <nl> + echo " " <nl> + do_external_licenses_check \ <nl> + " / / tensorflow / tools / pip_package : build_pip_package " \ <nl> + " / / tensorflow / tools / pip_package : licenses " <nl> + } <nl> + <nl> + do_lib_package_licenses_check ( ) { <nl> + echo " Running do_lib_package_licenses_check " <nl> + echo " " <nl> + do_external_licenses_check \ <nl> + " / / tensorflow : libtensorflow . so " \ <nl> + " / / tensorflow / tools / lib_package : clicenses_generate " <nl> + } <nl> <nl> # Run bazel build - - nobuild to test the validity of the BUILD files <nl> do_bazel_nobuild ( ) { <nl> do_bazel_nobuild ( ) { <nl> } <nl> <nl> # Supply all sanity step commands and descriptions <nl> - SANITY_STEPS = ( " do_pylint PYTHON2 " " do_pylint PYTHON3 " " do_buildifier " " do_bazel_nobuild " " do_external_licenses_check " ) <nl> - SANITY_STEPS_DESC = ( " Python 2 pylint " " Python 3 pylint " " buildifier check " " bazel nobuild " " external dependencies licenses check " ) <nl> + SANITY_STEPS = ( " do_pylint PYTHON2 " " do_pylint PYTHON3 " " do_buildifier " " do_bazel_nobuild " " do_pip_package_licenses_check " " do_lib_package_licenses_check " ) <nl> + SANITY_STEPS_DESC = ( " Python 2 pylint " " Python 3 pylint " " buildifier check " " bazel nobuild " " pip : license check for external dependencies " " C library : license check for external dependencies " ) <nl> <nl> INCREMENTAL_FLAG = " " <nl> <nl> new file mode 100644 <nl> index 0000000000000 . . 41e7221efe447 <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / lib_package / BUILD <nl> <nl> + # Packaging the TensorFlow C API into a small , standalone archive for use with <nl> + # language bindings and installations without Python . <nl> + # <nl> + # TODO ( ashankar ) : Something similar for the JNI library for Java ? <nl> + # TODO ( ashankar ) : Something similar for the C + + API ( caveat : ABI compatibility ) <nl> + <nl> + package ( default_visibility = [ " / / visibility : private " ] ) <nl> + <nl> + load ( " @ bazel_tools / / tools / build_defs / pkg : pkg . bzl " , " pkg_tar " ) <nl> + <nl> + pkg_tar ( <nl> + name = " libtensorflow " , <nl> + extension = " tar . gz " , <nl> + # Mark as " manual " till <nl> + # https : / / github . com / bazelbuild / bazel / issues / 2352 <nl> + # and https : / / github . com / bazelbuild / bazel / issues / 1580 <nl> + # are resolved , otherwise these rules break when built <nl> + # with Python 3 . <nl> + tags = [ " manual " ] , <nl> + deps = [ <nl> + " : cheaders " , <nl> + " : clib " , <nl> + " : clicenses " , <nl> + ] , <nl> + ) <nl> + <nl> + pkg_tar ( <nl> + name = " cheaders " , <nl> + files = [ " / / tensorflow / c : headers " ] , <nl> + package_dir = " include / tensorflow / c " , <nl> + # Mark as " manual " till <nl> + # https : / / github . com / bazelbuild / bazel / issues / 2352 <nl> + # and https : / / github . com / bazelbuild / bazel / issues / 1580 <nl> + # are resolved , otherwise these rules break when built <nl> + # with Python 3 . <nl> + tags = [ " manual " ] , <nl> + ) <nl> + <nl> + pkg_tar ( <nl> + name = " clib " , <nl> + files = [ " / / tensorflow : libtensorflow . so " ] , <nl> + package_dir = " lib " , <nl> + # Mark as " manual " till <nl> + # https : / / github . com / bazelbuild / bazel / issues / 2352 <nl> + # and https : / / github . com / bazelbuild / bazel / issues / 1580 <nl> + # are resolved , otherwise these rules break when built <nl> + # with Python 3 . <nl> + tags = [ " manual " ] , <nl> + ) <nl> + <nl> + pkg_tar ( <nl> + name = " clicenses " , <nl> + files = [ " : include / tensorflow / c / LICENSE " ] , <nl> + package_dir = " include / tensorflow / c " , <nl> + # Mark as " manual " till <nl> + # https : / / github . com / bazelbuild / bazel / issues / 2352 <nl> + # and https : / / github . com / bazelbuild / bazel / issues / 1580 <nl> + # are resolved , otherwise these rules break when built <nl> + # with Python 3 . <nl> + tags = [ " manual " ] , <nl> + ) <nl> + <nl> + genrule ( <nl> + name = " clicenses_generate " , <nl> + srcs = [ <nl> + " / / third_party / hadoop : LICENSE . txt " , <nl> + " / / third_party / eigen3 : LICENSE " , <nl> + " @ boringssl / / : LICENSE " , <nl> + " @ com_googlesource_code_re2 / / : LICENSE " , <nl> + " @ curl / / : COPYING " , <nl> + " @ eigen_archive / / : COPYING . MPL2 " , <nl> + " @ farmhash_archive / / : COPYING " , <nl> + " @ gemmlowp / / : LICENSE " , <nl> + " @ gif_archive / / : COPYING " , <nl> + " @ grpc / / : LICENSE " , <nl> + " @ highwayhash / / : LICENSE " , <nl> + " @ jemalloc / / : COPYING " , <nl> + " @ jpeg / / : LICENSE . md " , <nl> + " @ libxsmm_archive / / : LICENSE " , <nl> + " @ local_config_sycl / / sycl : LICENSE . text " , <nl> + " @ nanopb_git / / : LICENSE . txt " , <nl> + " @ png_archive / / : LICENSE " , <nl> + " @ protobuf / / : LICENSE " , <nl> + " @ zlib_archive / / : zlib . h " , <nl> + ] , <nl> + outs = [ " include / tensorflow / c / LICENSE " ] , <nl> + cmd = " $ ( location : concat_licenses . sh ) $ ( SRCS ) > $ @ " , <nl> + tools = [ " : concat_licenses . sh " ] , <nl> + ) <nl> + <nl> + sh_test ( <nl> + name = " libtensorflow_test " , <nl> + size = " small " , <nl> + srcs = [ " libtensorflow_test . sh " ] , <nl> + data = [ <nl> + " libtensorflow_test . c " , <nl> + " : libtensorflow . tar . gz " , <nl> + ] , <nl> + # Mark as " manual " till <nl> + # https : / / github . com / bazelbuild / bazel / issues / 2352 <nl> + # and https : / / github . com / bazelbuild / bazel / issues / 1580 <nl> + # are resolved , otherwise these rules break when built <nl> + # with Python 3 . <nl> + # Till then , this test is explicitly executed when building <nl> + # the release by tensorflow / tools / ci_build / builds / libtensorflow . sh <nl> + tags = [ " manual " ] , <nl> + ) <nl> new file mode 100644 <nl> index 0000000000000 . . fbec0a067a94b <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / lib_package / README . md <nl> <nl> + Bazel rules to package the TensorFlow C - library and [ header <nl> + files ] ( https : / / www . tensorflow . org / code / tensorflow / c / c_api . h ) <nl> + into an archive . <nl> + <nl> + # # TensorFlow C library <nl> + <nl> + The TensorFlow [ C <nl> + API ] ( https : / / www . tensorflow . org / code / tensorflow / c / c_api . h ) <nl> + is typically a requirement of TensorFlow APIs in other languages such as <nl> + [ Go ] ( https : / / www . tensorflow . org / code / tensorflow / go ) <nl> + and [ Rust ] ( https : / / github . com / tensorflow / rust ) . <nl> + <nl> + The command : <nl> + <nl> + ` ` ` sh <nl> + bazel build - c opt / / tensorflow / tools / lib_package : libtensorflow <nl> + ` ` ` <nl> + <nl> + produces ` bazel - bin / tensorflow / tools / lib_package / libtensorflow . tar . gz ` , which <nl> + can be distributed and installed using something like : <nl> + <nl> + ` ` ` sh <nl> + tar - C / usr / local - xzf libtensorflow . tar . gz <nl> + ` ` ` <nl> + <nl> + # # Release <nl> + <nl> + Scripts to generate archives using these rules for release are in <nl> + [ tensorflow / tools / ci_build / linux ] ( https : / / www . tensorflow . org / code / tensorflow / tools / ci_build / linux ) <nl> + and <nl> + [ tensorflow / tools / ci_build / osx ] ( https : / / www . tensorflow . org / code / tensorflow / tools / ci_build / osx ) <nl> new file mode 100755 <nl> index 0000000000000 . . 2070f64e9fa43 <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / lib_package / concat_licenses . sh <nl> <nl> + # ! / usr / bin / env bash <nl> + # Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + # <nl> + # Script aimed to combining multiple license files into a single one . <nl> + <nl> + for f in $ @ <nl> + do <nl> + echo " mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - " <nl> + echo " BEGIN LICENSE FOR $ f " <nl> + echo " mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - " <nl> + cat $ f <nl> + echo " mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - " <nl> + echo " END LICENSE FOR $ f " <nl> + echo " mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - " <nl> + done <nl> new file mode 100644 <nl> index 0000000000000 . . dff6fb77ecc9a <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / lib_package / libtensorflow_test . c <nl> <nl> + / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + / / Companion source file for libtensorflow_test . sh <nl> + <nl> + # include < tensorflow / c / c_api . h > <nl> + <nl> + int main ( ) { <nl> + TF_Status * s = TF_NewStatus ( ) ; <nl> + TF_SetStatus ( s , TF_UNKNOWN , " Some error " ) ; <nl> + if ( TF_GetCode ( s ) ! = TF_UNKNOWN ) { <nl> + return 1 ; <nl> + } <nl> + TF_DeleteStatus ( s ) ; <nl> + return 0 ; <nl> + } <nl> new file mode 100755 <nl> index 0000000000000 . . 6463ecea70bbc <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / lib_package / libtensorflow_test . sh <nl> <nl> + # ! / usr / bin / env bash <nl> + # Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + set - ex <nl> + <nl> + # Sanity test for the package C - library archive . <nl> + # - Unarchive <nl> + # - Compile a trivial C file that uses the archive <nl> + # - Run it <nl> + <nl> + # Tools needed : A C - compiler and tar <nl> + CC = " $ { CC } " <nl> + TAR = " $ { TAR } " <nl> + <nl> + [ - z " $ { CC } " ] & & CC = " / usr / bin / gcc " <nl> + [ - z " $ { TAR } " ] & & TAR = " tar " <nl> + <nl> + # bazel tests run with $ { PWD } set to the root of the bazel workspace <nl> + TARFILE = " $ { PWD } / tensorflow / tools / lib_package / libtensorflow . tar . gz " <nl> + CFILE = " $ { PWD } / tensorflow / tools / lib_package / libtensorflow_test . c " <nl> + <nl> + cd $ { TEST_TMPDIR } <nl> + <nl> + # Extract the archive into tensorflow / <nl> + mkdir tensorflow <nl> + $ { TAR } - xzf $ { TARFILE } - Ctensorflow <nl> + <nl> + # Compile the test . c file <nl> + $ { CC } $ { CFILE } - Itensorflow / include - Ltensorflow / lib - ltensorflow - oa . out <nl> + <nl> + # Execute it , with the shared library available . <nl> + # DYLD_LIBRARY_PATH is used on OS X , LD_LIBRARY_PATH on Linux <nl> + export DYLD_LIBRARY_PATH = tensorflow / lib <nl> + export LD_LIBRARY_PATH = tensorflow / lib <nl> + . / a . out <nl>
Automated rollback of change 144673014
tensorflow/tensorflow
31f7498f9731e259ddb5021fc76b7b9eca782c68
2017-01-18T16:05:27Z
mmm a / aten / src / TH / generic / THTensor . cpp <nl> ppp b / aten / src / TH / generic / THTensor . cpp <nl> THTensor * THTensor_ ( newWithStorage2d ) ( THStorage * storage , ptrdiff_t storageOffse <nl> return THTensor_ ( newWithStorage ) ( storage , storageOffset , { size0 , size1 } , { stride0 , stride1 } ) ; <nl> } <nl> <nl> - THTensor * THTensor_ ( newWithStorage3d ) ( THStorage * storage , ptrdiff_t storageOffset , <nl> - int64_t size0 , int64_t stride0 , <nl> - int64_t size1 , int64_t stride1 , <nl> - int64_t size2 , int64_t stride2 ) <nl> - { <nl> - return THTensor_ ( newWithStorage ) ( storage , storageOffset , { size0 , size1 , size2 } , { stride0 , stride1 , stride2 } ) ; <nl> - } <nl> - <nl> - THTensor * THTensor_ ( newWithStorage4d ) ( THStorage * storage , ptrdiff_t storageOffset , <nl> - int64_t size0 , int64_t stride0 , <nl> - int64_t size1 , int64_t stride1 , <nl> - int64_t size2 , int64_t stride2 , <nl> - int64_t size3 , int64_t stride3 ) <nl> - { <nl> - return THTensor_ ( newWithStorage ) ( storage , storageOffset , <nl> - { size0 , size1 , size2 , size3 } , <nl> - { stride0 , stride1 , stride2 , stride3 } ) ; <nl> - } <nl> - <nl> THTensor * THTensor_ ( newWithSize ) ( at : : IntArrayRef size , at : : IntArrayRef stride ) <nl> { <nl> return THTensor_ ( newWithStorage ) ( NULL , 0 , size , stride ) ; <nl> mmm a / aten / src / TH / generic / THTensor . h <nl> ppp b / aten / src / TH / generic / THTensor . h <nl> TH_API THTensor * THTensor_ ( newWithStorage1d ) ( THStorage * storage_ , ptrdiff_t stor <nl> TH_API THTensor * THTensor_ ( newWithStorage2d ) ( THStorage * storage_ , ptrdiff_t storageOffset_ , <nl> int64_t size0_ , int64_t stride0_ , <nl> int64_t size1_ , int64_t stride1_ ) ; <nl> - TH_API THTensor * THTensor_ ( newWithStorage3d ) ( THStorage * storage_ , ptrdiff_t storageOffset_ , <nl> - int64_t size0_ , int64_t stride0_ , <nl> - int64_t size1_ , int64_t stride1_ , <nl> - int64_t size2_ , int64_t stride2_ ) ; <nl> - TH_API THTensor * THTensor_ ( newWithStorage4d ) ( THStorage * storage_ , ptrdiff_t storageOffset_ , <nl> - int64_t size0_ , int64_t stride0_ , <nl> - int64_t size1_ , int64_t stride1_ , <nl> - int64_t size2_ , int64_t stride2_ , <nl> - int64_t size3_ , int64_t stride3_ ) ; <nl> <nl> / * stride might be NULL * / <nl> TH_API THTensor * THTensor_ ( newWithSize1d ) ( int64_t size0_ ) ; <nl> mmm a / aten / src / THC / generic / THCTensor . cpp <nl> ppp b / aten / src / THC / generic / THCTensor . cpp <nl> THCTensor * THCTensor_ ( newWithStorage2d ) ( THCState * state , THCStorage * storage , pt <nl> return THCTensor_ ( newWithStorage ) ( state , storage , storageOffset , { size0 , size1 } , { stride0 , stride1 } ) ; <nl> } <nl> <nl> - THCTensor * THCTensor_ ( newWithStorage3d ) ( THCState * state , THCStorage * storage , ptrdiff_t storageOffset , <nl> - int64_t size0 , int64_t stride0 , <nl> - int64_t size1 , int64_t stride1 , <nl> - int64_t size2 , int64_t stride2 ) <nl> - { <nl> - return THCTensor_ ( newWithStorage ) ( state , storage , storageOffset , { size0 , size1 , size2 } , { stride0 , stride1 , stride2 } ) ; <nl> - } <nl> - <nl> - THCTensor * THCTensor_ ( newWithStorage4d ) ( THCState * state , THCStorage * storage , ptrdiff_t storageOffset , <nl> - int64_t size0 , int64_t stride0 , <nl> - int64_t size1 , int64_t stride1 , <nl> - int64_t size2 , int64_t stride2 , <nl> - int64_t size3 , int64_t stride3 ) <nl> - { <nl> - return THCTensor_ ( newWithStorage ) ( state , storage , storageOffset , <nl> - { size0 , size1 , size2 , size3 } , <nl> - { stride0 , stride1 , stride2 , stride3 } ) ; <nl> - } <nl> - <nl> THCTensor * THCTensor_ ( newWithSize ) ( THCState * state , at : : IntArrayRef size , at : : IntArrayRef stride ) <nl> { <nl> return THCTensor_ ( newWithStorage ) ( state , NULL , 0 , size , stride ) ; <nl> mmm a / aten / src / THC / generic / THCTensor . h <nl> ppp b / aten / src / THC / generic / THCTensor . h <nl> THC_API THCTensor * THCTensor_ ( newWithStorage1d ) ( THCState * state , THCStorage * sto <nl> THC_API THCTensor * THCTensor_ ( newWithStorage2d ) ( THCState * state , THCStorage * storage_ , ptrdiff_t storageOffset_ , <nl> int64_t size0_ , int64_t stride0_ , <nl> int64_t size1_ , int64_t stride1_ ) ; <nl> - THC_API THCTensor * THCTensor_ ( newWithStorage3d ) ( THCState * state , THCStorage * storage_ , ptrdiff_t storageOffset_ , <nl> - int64_t size0_ , int64_t stride0_ , <nl> - int64_t size1_ , int64_t stride1_ , <nl> - int64_t size2_ , int64_t stride2_ ) ; <nl> - THC_API THCTensor * THCTensor_ ( newWithStorage4d ) ( THCState * state , THCStorage * storage_ , ptrdiff_t storageOffset_ , <nl> - int64_t size0_ , int64_t stride0_ , <nl> - int64_t size1_ , int64_t stride1_ , <nl> - int64_t size2_ , int64_t stride2_ , <nl> - int64_t size3_ , int64_t stride3_ ) ; <nl> <nl> / * stride might be NULL * / <nl> THC_API THCTensor * THCTensor_ ( newWithSize1d ) ( THCState * state , int64_t size0_ ) ; <nl>
Kill some unused ( TH ) Storage - based APIs . ( )
pytorch/pytorch
bd77abffe3b0195a13a1788d6d0e98e1b7982a7b
2020-02-27T20:23:25Z
mmm a / docs / community / contribute . md <nl> ppp b / docs / community / contribute . md <nl> After your patch has been merged , remember to add your name to [ CONTRIBUTORS . md ] <nl> <nl> # # Code Contribution <nl> <nl> + Before you start coding … <nl> + <nl> + … please make sure there is a JIRA issue that corresponds to your contribution . This is a general rule that the MXNet community follows for all code contributions , including bug fixes , improvements , or new features , with an exception for trivial hot fixes . If you would like to fix a bug that you found or if you would like to add a new feature or improvement to MXNet , please follow the [ File a bug report or Propose an improvement or a new feature ] ( http : / / mxnet . io / community / index . html ) guidelines to open an issue in [ MXNet ’ s JIRA ] ( http : / / issues . apache . org / jira / browse / MXNet ) before starting with the implementation . <nl> + <nl> + If the description of a JIRA issue indicates that its resolution will touch sensible parts of the code base , be sufficiently complex , or add significant amounts of new code , the MXNet community might request a design document ( most contributions should not require a design document ) . The purpose of this document is to ensure that the overall approach to address the issue is sensible and agreed upon by the community . JIRA issues that require a design document are tagged with the requires - design - doc label . The label can be attached by any community member who feels that a design document is necessary . A good description helps to decide whether a JIRA issue requires a design document or not . The design document must be added or attached to or link from the JIRA issue and cover the following aspects : <nl> + <nl> + - Overview of the general approach < br / > <nl> + - List of API changes ( changed interfaces , new and deprecated configuration parameters , changed behavior , … ) < br / > <nl> + - Main components and classes to be touched < br / > <nl> + - Known limitations of the proposed approach < br / > <nl> + <nl> + A design document can be added by anybody , including the reporter of the issue or the person working on it . < br / > <nl> + <nl> + Contributions for JIRA issues that require a design document will not be added to MXNet ’ s code base before a design document has been accepted by the community with lazy consensus . Please check if a design document is required before starting to code . <nl> + <nl> + <nl> # # # Core Library <nl> <nl> - Follow the [ Google C + + Style Guide ] ( https : / / google . github . io / styleguide / cppguide . html ) for C + + code . <nl> mmm a / docs / community / index . md <nl> ppp b / docs / community / index . md <nl> <nl> # # Questions about Using MXNet <nl> If you need help with using MXNet , have questions about applying it to a particular kind of problem , or have a discussion topic , please use our [ forum ] ( https : / / discuss . mxnet . io ) . <nl> <nl> - # # Issue Tracker <nl> - We track bugs and new feature requests in the MXNet Github repo in the issues folder : [ mxnet / issues ] ( https : / / github . com / apache / incubator - mxnet / issues ) . <nl> + # # File a bug report <nl> + Please let us know if you experienced a problem with MXNet and file a bug report . Open [ MXNet ’ s JIRA ] ( http : / / issues . apache . org / jira / browse / MXNet ) and click on the blue ` Create ` button at the top . Please give detailed information about the problem you encountered and , if possible , add a description that helps to reproduce the problem . < p / > <nl> + Issues may also be entered on github : [ mxnet / issues ] ( https : / / github . com / apache / incubator - mxnet / issues ) . Github issues are synced to JIRA periodically . Thank you very much . <nl> + <nl> + # # Propose an improvement or a new feature <nl> + Our community is constantly looking for feedback to improve Apache MXNet . If you have an idea how to improve MXNet or have a new feature in mind that would be beneficial for MXNet users , please open an issue in [ MXNet ’ s JIRA ] ( http : / / issues . apache . org / jira / browse / MXNet ) . The improvement or new feature should be described in appropriate detail and include the scope and its requirements if possible . Detailed information is important for a few reasons : < br / > <nl> + - It ensures your requirements are met when the improvement or feature is implemented . < br / > <nl> + - It helps to estimate the effort and to design a solution that addresses your needs . < br / > <nl> + - It allows for constructive discussions that might arise around this issue . <nl> + <nl> + Detailed information is also required , if you plan to contribute the improvement or feature you proposed yourself . Please read the [ contributions ] ( http : / / mxnet . io / community / contribute . html ) guide in this case as well . <nl> <nl> # # Contributors <nl> MXNet has been developed and is used by a group of active community members . Contribute to improving it ! For more information , see [ contributions ] ( http : / / mxnet . io / community / contribute . html ) . <nl>
[ MXNET - 87 ] Add JIRA info to docs ( )
apache/incubator-mxnet
72091ba24d509e2785beadc44712ddc1bde26f35
2018-03-31T04:57:56Z
mmm a / jstests / sharding / shard_insert_getlasterror_w2 . js <nl> ppp b / jstests / sharding / shard_insert_getlasterror_w2 . js <nl> function go ( ) { <nl> dba . printShardingStatus ( ) <nl> printjson ( db [ ' foo ' ] . count ( ) ) <nl> <nl> + / / Test case where GLE should return an error <nl> + db . foo . insert ( { _id : ' a ' , x : 1 } ) ; <nl> + db . foo . insert ( { _id : ' a ' , x : 1 } ) ; <nl> + var x = db . getLastErrorObj ( 2 , 30000 ) <nl> + assert . neq ( x . err , null , tojson ( x ) ) ; <nl> + <nl> / / Add more data <nl> for ( var i = N ; i < 2 * N ; i + + ) { <nl> db [ ' foo ' ] . insert ( { x : i , text : Text } ) <nl>
Add failing test for GLE with sharding + replset
mongodb/mongo
d798243f50226bcd1c076d831f94e3062eaeaecd
2011-01-18T18:11:48Z
mmm a / tensorflow / python / BUILD <nl> ppp b / tensorflow / python / BUILD <nl> py_library ( <nl> " : _pywrap_kernel_registry " , <nl> " : _pywrap_py_exception_registry " , <nl> " : _pywrap_py_func " , # TODO ( b / 142001480 ) : remove once the bug is fixed . <nl> + " : _pywrap_python_api_dispatcher " , <nl> " : _pywrap_python_op_gen " , <nl> " : _pywrap_quantize_training " , <nl> " : _pywrap_stacktrace_handler " , <nl> tf_py_test ( <nl> tfrt_enabled = True , <nl> ) <nl> <nl> + cc_library ( <nl> + name = " python_api_dispatcher " , <nl> + srcs = [ " framework / python_api_dispatcher . cc " ] , <nl> + hdrs = [ " framework / python_api_dispatcher . h " ] , <nl> + deps = [ <nl> + " : cpp_python_util " , <nl> + " : safe_pyobject_ptr " , <nl> + " / / tensorflow / core / platform : logging " , <nl> + " / / third_party / python_runtime : headers " , # buildcleaner : keep <nl> + " @ com_google_absl / / absl / container : inlined_vector " , <nl> + " @ com_google_absl / / absl / strings " , <nl> + ] , <nl> + ) <nl> + <nl> + # Note : this target is only used by python_api_dispatcher_test . <nl> + tf_python_pybind_extension ( <nl> + name = " _pywrap_python_api_dispatcher " , <nl> + # testonly = True , <nl> + srcs = [ " framework / python_api_dispatcher_wrapper . cc " ] , <nl> + hdrs = [ " framework / python_api_dispatcher . h " ] , <nl> + module_name = " _pywrap_python_api_dispatcher " , <nl> + deps = [ <nl> + " : safe_pyobject_ptr_required_hdrs " , <nl> + " / / third_party / python_runtime : headers " , # buildcleaner : keep <nl> + " @ pybind11 " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_py_test ( <nl> + name = " python_api_dispatcher_test " , <nl> + srcs = [ " framework / python_api_dispatcher_test . py " ] , <nl> + python_version = " PY3 " , <nl> + tags = [ " no_pip " ] , <nl> + deps = [ <nl> + " : _pywrap_python_api_dispatcher " , <nl> + " : client_testlib " , <nl> + ] , <nl> + ) <nl> + <nl> py_library ( <nl> name = " framework_ops " , # " ops " is already the name of a deprecated target <nl> srcs = [ " framework / ops . py " ] , <nl> pywrap_tensorflow_macro ( <nl> " : pybind11_lib " , <nl> " : pybind11_status " , <nl> " : pybind11_proto " , <nl> + " : python_api_dispatcher " , <nl> " : python_op_gen " , <nl> " : safe_pyobject_ptr " , <nl> " : tf_session_helper " , <nl> filegroup ( <nl> " : numpy_lib " , # checkpoint_reader <nl> " : py_exception_registry " , # py_exception_registry <nl> " : py_func_lib " , # py_func <nl> + " : python_api_dispatcher " , # python_api_dispatcher <nl> " : python_op_gen " , # python_op_gen <nl> " : safe_ptr " , # checkpoint_reader <nl> " / / tensorflow / c : checkpoint_reader " , # checkpoint_reader <nl> new file mode 100644 <nl> index 0000000000000 . . 57a6a9ce94b06 <nl> mmm / dev / null <nl> ppp b / tensorflow / python / framework / python_api_dispatcher . cc <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / python / framework / python_api_dispatcher . h " <nl> + <nl> + # include < set > <nl> + <nl> + # include " absl / container / inlined_vector . h " <nl> + # include " absl / strings / str_join . h " <nl> + # include " tensorflow / core / platform / logging . h " <nl> + # include " tensorflow / python / lib / core / safe_pyobject_ptr . h " <nl> + # include " tensorflow / python / util / util . h " <nl> + <nl> + namespace tensorflow { <nl> + <nl> + using ParamInfo = PythonAPIDispatcher : : ParamInfo ; <nl> + <nl> + / / List of python types to check for dispatch . In most cases , this vector <nl> + / / will have size zero or one ; and sizes greater than 3 should be rare . <nl> + using TypeList = absl : : InlinedVector < PyTypeObject * , 3 > ; <nl> + <nl> + namespace { <nl> + <nl> + / / Returns the __tf__dispatch__ attribute of ` obj ` . <nl> + Safe_PyObjectPtr GetAttr_TFDispatch ( PyObject * obj ) { <nl> + # if PY_MAJOR_VERSION < 3 <nl> + / / Python 2 . x : <nl> + static PyObject * attr = PyString_InternFromString ( " __tf_dispatch__ " ) ; <nl> + # else <nl> + / / Python 3 . x : <nl> + static PyObject * attr = PyUnicode_InternFromString ( " __tf_dispatch__ " ) ; <nl> + # endif <nl> + return Safe_PyObjectPtr ( PyObject_GetAttr ( obj , attr ) ) ; <nl> + } <nl> + <nl> + / / Searches ` params ` for dispatchable types , and returns a vector of borrowed <nl> + / / references to those types . Removes consecutive duplicates ( i . e . , if a <nl> + / / dispatchable parameter has the same type as the previously encountered <nl> + / / dispatcahble parameter , then it ' s type is not added again ) , so the result <nl> + / / will usually have a length of zero or one ; but in the general case , it may be <nl> + / / longer , and may contain ( nonconsecutive ) duplicates . <nl> + / / <nl> + / / Assumes that ` params ` is a tuple , and that all parameter indices in <nl> + / / ` dispatch_params ` and ` dispatch_list_params ` are valid . <nl> + TypeList FindDispatchTypes ( PyObject * params , <nl> + const std : : vector < ParamInfo > & dispatchable_params ) { <nl> + TypeList dispatch_types ; <nl> + for ( const auto & param : dispatchable_params ) { <nl> + DCHECK_GE ( param . index , 0 ) ; <nl> + DCHECK_LT ( param . index , PyTuple_GET_SIZE ( params ) ) ; <nl> + PyObject * value = PyTuple_GET_ITEM ( params , param . index ) ; <nl> + if ( param . is_list ) { <nl> + DCHECK ( PyList_Check ( value ) ) ; <nl> + Py_ssize_t num_items = PyList_Size ( value ) ; <nl> + for ( Py_ssize_t i = 0 ; i < num_items ; + + i ) { <nl> + PyObject * item = PyList_GET_ITEM ( value , i ) ; <nl> + / / TODO ( b / 164980194 ) Consider changing IsDispatchable to not use a <nl> + / / cache . This may impact efficiency ( needs to be measured ) , but would <nl> + / / allow us to support monkey - patching classes to be dispatchable . <nl> + if ( swig : : IsDispatchable ( item ) ) { <nl> + if ( dispatch_types . empty ( ) | | <nl> + value - > ob_type ! = dispatch_types . back ( ) ) { <nl> + dispatch_types . push_back ( item - > ob_type ) ; <nl> + } <nl> + } <nl> + } <nl> + } else { <nl> + if ( swig : : IsDispatchable ( value ) ) { <nl> + if ( dispatch_types . empty ( ) | | value - > ob_type ! = dispatch_types . back ( ) ) { <nl> + dispatch_types . push_back ( value - > ob_type ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + return dispatch_types ; <nl> + } <nl> + <nl> + / / Removes duplicates from ` dispatch_types ` , and moves any subtypes to <nl> + / / before their supertypes . Note : this method is only called when <nl> + / / ` dispatch_types . size ( ) > 1 ` . <nl> + void SortDispatchTypes ( TypeList & dispatch_types ) { <nl> + / / Remove duplicates . Note : this is O ( n ^ 2 ) in the number of dispatchable <nl> + / / types , but we expect this number to be very small in almost every case <nl> + / / ( usually zero , sometimes one , and rarely larger than two ) . <nl> + for ( int i = 0 ; i < dispatch_types . size ( ) - 1 ; + + i ) { <nl> + if ( dispatch_types [ i ] = = nullptr ) continue ; <nl> + for ( int j = i + 1 ; j < dispatch_types . size ( ) ; + + j ) { <nl> + if ( dispatch_types [ i ] = = dispatch_types [ j ] ) { <nl> + dispatch_types [ j ] = nullptr ; / / mark duplicate <nl> + } <nl> + } <nl> + } <nl> + dispatch_types . erase ( <nl> + std : : remove_if ( dispatch_types . begin ( ) , dispatch_types . end ( ) , <nl> + [ ] ( PyTypeObject * t ) { return t = = nullptr ; } ) , <nl> + dispatch_types . end ( ) ) ; <nl> + <nl> + / / Move subclasses before superclasses . As above , this is O ( n ^ 2 ) , but we <nl> + / / expect n to be small . <nl> + TypeList sorted ; <nl> + TypeList subtypes ; <nl> + for ( int i = 0 ; i < dispatch_types . size ( ) ; + + i ) { <nl> + if ( dispatch_types [ i ] = = nullptr ) continue ; <nl> + subtypes . clear ( ) ; <nl> + for ( int j = i + 1 ; j < dispatch_types . size ( ) ; + + j ) { <nl> + if ( dispatch_types [ j ] = = nullptr ) continue ; <nl> + if ( PyType_IsSubtype ( dispatch_types [ j ] , dispatch_types [ i ] ) ) { <nl> + subtypes . push_back ( dispatch_types [ j ] ) ; <nl> + dispatch_types [ j ] = nullptr ; / / mark as already added . <nl> + } <nl> + } <nl> + if ( ! subtypes . empty ( ) ) { <nl> + std : : sort ( subtypes . begin ( ) , subtypes . end ( ) , PyType_IsSubtype ) ; <nl> + sorted . insert ( sorted . end ( ) , subtypes . begin ( ) , subtypes . end ( ) ) ; <nl> + } <nl> + sorted . push_back ( dispatch_types [ i ] ) ; <nl> + } <nl> + DCHECK_EQ ( dispatch_types . size ( ) , sorted . size ( ) ) ; <nl> + dispatch_types . swap ( sorted ) ; <nl> + } <nl> + <nl> + } / / namespace <nl> + <nl> + PythonAPIDispatcher : : PythonAPIDispatcher ( const std : : string & api_name , <nl> + PyObject * api_func , int num_params , <nl> + bool right_to_left ) <nl> + : api_name_ ( PyUnicode_FromStringAndSize ( api_name . c_str ( ) , api_name . size ( ) ) ) , <nl> + api_func_ ( api_func ) , <nl> + num_params_ ( num_params ) , <nl> + right_to_left_ ( right_to_left ) { <nl> + Py_INCREF ( api_func ) ; <nl> + } <nl> + <nl> + bool PythonAPIDispatcher : : Initialize ( <nl> + std : : vector < ParamInfo > dispatchable_params ) { <nl> + dispatchable_params_ . swap ( dispatchable_params ) ; <nl> + std : : sort ( dispatchable_params_ . begin ( ) , dispatchable_params_ . end ( ) , <nl> + [ ] ( const ParamInfo & a , const ParamInfo & b ) - > bool { <nl> + return a . index < b . index ; <nl> + } ) ; <nl> + if ( right_to_left_ ) { <nl> + std : : reverse ( dispatchable_params_ . begin ( ) , dispatchable_params_ . end ( ) ) ; <nl> + } <nl> + <nl> + for ( const auto & p : dispatchable_params_ ) { <nl> + if ( p . index < 0 | | p . index > = num_params_ ) { <nl> + PyErr_SetString ( <nl> + PyExc_ValueError , <nl> + absl : : StrCat ( " PythonAPIDispatcher : dispatchable parameter index out " , <nl> + " of range : " , p . index , " not in [ 0 , " , num_params_ , " ) " ) <nl> + . c_str ( ) ) ; <nl> + return false ; <nl> + } <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + PyObject * PythonAPIDispatcher : : Dispatch ( PyObject * params ) const { <nl> + DCHECK ( PyTuple_Check ( params ) ) ; <nl> + <nl> + / / TODO ( b / 164980194 ) Consider removing this check , if the caller is also <nl> + / / checking / guaranteeing it ( once dispatch has been integrated w / the Python <nl> + / / API handlers ) . <nl> + if ( num_params_ ! = PyTuple_Size ( params ) ) { <nl> + # if PY_MAJOR_VERSION < 3 <nl> + / / Python 2 . x : <nl> + Safe_PyObjectPtr api_name_str ( PyUnicode_AsUTF8String ( api_name_ . get ( ) ) ) ; <nl> + if ( ! api_name_str ) return nullptr ; <nl> + const char * api_name = PyString_AsString ( api_name_str . get ( ) ) ; <nl> + # else <nl> + / / Python 3 . x : <nl> + const char * api_name = PyUnicode_AsUTF8AndSize ( api_name_ . get ( ) , nullptr ) ; <nl> + # endif <nl> + PyErr_SetString ( <nl> + PyExc_TypeError , <nl> + absl : : StrCat ( api_name ? api_name : " unknown PythonAPIDispatcher " , <nl> + " expected " , num_params_ , " parameters , but got " , <nl> + PyTuple_Size ( params ) ) <nl> + . c_str ( ) ) ; <nl> + return nullptr ; <nl> + } <nl> + <nl> + TypeList dispatch_types = FindDispatchTypes ( params , dispatchable_params_ ) ; <nl> + <nl> + if ( dispatch_types . empty ( ) ) { <nl> + return Py_NotImplemented ; <nl> + } <nl> + <nl> + if ( dispatch_types . size ( ) > 1 ) { <nl> + SortDispatchTypes ( dispatch_types ) ; <nl> + } <nl> + <nl> + for ( PyTypeObject * dispatch_type : dispatch_types ) { <nl> + Safe_PyObjectPtr dispatcher = <nl> + GetAttr_TFDispatch ( reinterpret_cast < PyObject * > ( dispatch_type ) ) ; <nl> + if ( ! dispatcher ) return nullptr ; <nl> + PyObject * result = PyObject_CallFunctionObjArgs ( <nl> + dispatcher . get ( ) , api_name_ . get ( ) , api_func_ . get ( ) , params , nullptr ) ; <nl> + if ( result ! = Py_NotImplemented ) { <nl> + return result ; <nl> + } <nl> + } <nl> + <nl> + return Py_NotImplemented ; <nl> + } <nl> + <nl> + } / / namespace tensorflow <nl> new file mode 100644 <nl> index 0000000000000 . . 7cb3879dd7440 <nl> mmm / dev / null <nl> ppp b / tensorflow / python / framework / python_api_dispatcher . h <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # ifndef TENSORFLOW_PYTHON_FRAMEWORK_PYTHON_API_DISPATCHER_H_ <nl> + # define TENSORFLOW_PYTHON_FRAMEWORK_PYTHON_API_DISPATCHER_H_ <nl> + <nl> + # include < Python . h > <nl> + <nl> + # include < string > <nl> + # include < vector > <nl> + <nl> + # include " tensorflow / python / lib / core / safe_pyobject_ptr . h " <nl> + <nl> + namespace tensorflow { <nl> + <nl> + / / Dispatch handler for Python APIs . <nl> + / / <nl> + / / A separate PythonAPIDispatcher object is created for each Python API , and <nl> + / / keeps track of which parameters should be checked for dispatch . <nl> + / / <nl> + / / When PythonAPIDispatcher : : Dispatch ( ) is called with a tuple of <nl> + / / canonicalized parameters , it checks the indicated parameters ' values for <nl> + / / ` __tf_dispatch__ ` methods . If found , then this method is called with the <nl> + / / following arguments : ` __tf_dispatch__ ( api_name , api_func , canon_args ) ` , <nl> + / / where : <nl> + / / <nl> + / / * ` api_name ` is the fully - qualified name of the python API ( e . g . , <nl> + / / ` " tf . math . sum " ` ) . <nl> + / / * ` api_func ` is the function that implements the APIs for ` Tensor ` inputs . <nl> + / / * ` canon_args ` is the canonicalized argument list . <nl> + / / <nl> + class PythonAPIDispatcher { <nl> + public : <nl> + / / Information about an API parameter that supports dispatch . ` index ` is the <nl> + / / parameter ' s index in the canonicalized parameter list , and ` is_list ` is <nl> + / / true if the parameter expects a list of values ( e . g . the ` values ` parameter <nl> + / / to ` tf . concat ` ) . <nl> + struct ParamInfo { <nl> + int index ; <nl> + bool is_list ; <nl> + } ; <nl> + <nl> + / / Constructs a PythonAPIDispatcher . <nl> + / / <nl> + / / Args : <nl> + / / api_name : The fully qualified name of the API handled by this dispatcher . <nl> + / / api_func : The python function for which implements the API for ` Tensor ` <nl> + / / inputs . <nl> + / / num_params : The number of canonical parameters that the API expects . <nl> + / / right_to_left : If true , then the normal precedence rules ( in which <nl> + / / dispatchers are tried from left - to - right ) are changed to try <nl> + / / dispatchers from right - to - left instead . This is used for operations <nl> + / / such as ` __radd__ ` , where the normal parameter order is reversed . <nl> + PythonAPIDispatcher ( const std : : string & api_name , PyObject * api_func , <nl> + int num_params , bool right_to_left = false ) ; <nl> + <nl> + / / Initiliaze this PythonAPIDispatcher with information about which parameters <nl> + / / support dispatch . Returns true on success , or sets a python exception and <nl> + / / returns false on error . <nl> + bool Initialize ( std : : vector < ParamInfo > dispatchable_params ) ; <nl> + <nl> + / / Checks if any of the dispatchable parameters have a ` __tf_dispatch__ ` <nl> + / / method , and if so , calls them . In particular , this method : <nl> + / / <nl> + / / 1 . Constructs an ordered list of dispatchable types . <nl> + / / <nl> + / / * Checks each argument that support dispatch to see if its value ( s ) have <nl> + / / a ` __tf_dispatch__ ` method . <nl> + / / * Arguments are checked left - to - right unless ` right_to_left ` was set to <nl> + / / True in the constructor . * Within * a list - valued parameter , elements <nl> + / / are always checked left - to - right ( even if ` right_to_left ` is True ) . <nl> + / / * Duplicate types are removed ( only the first occurrence of each type is <nl> + / / kept ) . <nl> + / / * If any type ` T_sub ` is a subtype of another type ` T_super ` , but occurs <nl> + / / after ` T_super ` in the list of dispatchable types , then it is moved to <nl> + / / just before ` T_super ` . <nl> + / / <nl> + / / 2 . Tries calling each of the dispatchable types ' ` __tf_dispatch__ ` methods . <nl> + / / <nl> + / / * Dispatch methods are called with the following arguments : <nl> + / / ` __tf_dispatch__ ( api_name , api_func , canon_args ) ` <nl> + / / * Dispatch methods are tried in the order described above . <nl> + / / * If a dispatch method returns a value , then ` Dispatch ( ) ` returns a <nl> + / / new reference to that value . <nl> + / / * If a dispatch method raises an exception , then ` Dispatch ( ) ` returns <nl> + / / null ( i . e . , propogates the exception ) . <nl> + / / * If a dispatch method returns ` NotImplemented ` , then the dispatcher <nl> + / / moves on to the next type . <nl> + / / <nl> + / / 3 . If no dispatchers for found , or all dispatchers returned <nl> + / / ` NotImplemented ' , then the dispatcher returns a * borrowed * reference <nl> + / / to ` Py_NotImplemented ` . <nl> + / / <nl> + / / Args : <nl> + / / params : A ` PyTuple ` containing the canonicalized parameters to the API . <nl> + / / All ` POSITIONAL_OR_KEYWORD ` arguments must be converted to positional <nl> + / / arguments ( ` KEYWORD_ONLY ` arguments are not currently supported ) . Any <nl> + / / dispatchable parameter with ` is_list = True ` must have been converted to <nl> + / / ` PyList ` . <nl> + / / <nl> + / / Returns : <nl> + / / * If a ` __tf_dispatch__ ` handler successfully handled the API : <nl> + / / Returns a * new * reference to the handler ' s return value . <nl> + / / * If no handler was found , or all handlers returned NotImplemented : <nl> + / / Returns a * borrowed * reference to ` Py_NotImplemented ` . <nl> + / / * On error : Sets an exception and returns ` nullptr ` . <nl> + PyObject * Dispatch ( PyObject * params ) const ; <nl> + <nl> + private : <nl> + Safe_PyObjectPtr api_name_ ; <nl> + Safe_PyObjectPtr api_func_ ; <nl> + int num_params_ ; <nl> + std : : vector < ParamInfo > dispatchable_params_ ; <nl> + bool right_to_left_ ; <nl> + } ; <nl> + <nl> + } / / namespace tensorflow <nl> + <nl> + # endif / / TENSORFLOW_PYTHON_FRAMEWORK_PYTHON_API_DISPATCHER_H_ <nl> new file mode 100644 <nl> index 0000000000000 . . 51dda8a0f9fe6 <nl> mmm / dev / null <nl> ppp b / tensorflow / python / framework / python_api_dispatcher_test . py <nl> <nl> + # Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + " " " Tests for tensorflow . python . framework . python_api_dispatcher . " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + <nl> + from absl . testing import parameterized <nl> + <nl> + from tensorflow . python import _pywrap_python_api_dispatcher <nl> + from tensorflow . python . framework import ops <nl> + from tensorflow . python . framework import test_util <nl> + from tensorflow . python . ops import array_ops <nl> + from tensorflow . python . ops import math_ops <nl> + from tensorflow . python . platform import googletest <nl> + <nl> + <nl> + class Trace ( object ) : <nl> + " " " A dispatchable type that builds traces of ops it ' s called with . " " " <nl> + <nl> + log = [ ] <nl> + <nl> + def __init__ ( self , api_name , * args ) : <nl> + self . api_name = api_name <nl> + self . args = args <nl> + <nl> + @ classmethod <nl> + def __tf_dispatch__ ( cls , api_name , api_func , args ) : <nl> + Trace . log . append ( " __tf_dispatch__ % s " % ( ( cls . __name__ , api_name ) , ) ) <nl> + if " disabled " in str ( args ) or api_name = = " disabled " : <nl> + return NotImplemented <nl> + del api_func # not used <nl> + return cls ( api_name , * args ) <nl> + <nl> + def __repr__ ( self ) : <nl> + return " % s % s " % ( type ( self ) . __name__ , ( self . api_name , ) + self . args ) <nl> + <nl> + def __eq__ ( self , other ) : <nl> + return ( type ( self ) is type ( other ) and self . api_name = = other . api_name and <nl> + self . args = = other . args ) <nl> + <nl> + <nl> + class Trace2 ( Trace ) : <nl> + pass <nl> + <nl> + <nl> + class Trace2B ( Trace2 ) : <nl> + pass <nl> + <nl> + <nl> + class Trace3 ( Trace ) : <nl> + pass <nl> + <nl> + <nl> + class Trace4 ( Trace ) : <nl> + pass <nl> + <nl> + <nl> + class WeightedTensor ( object ) : <nl> + <nl> + def __init__ ( self , tensor , weight ) : <nl> + self . tensor = ops . convert_to_tensor ( tensor ) <nl> + self . weight = weight # Python float <nl> + <nl> + @ classmethod <nl> + def __tf_dispatch__ ( cls , api_name , api_func , args ) : <nl> + del api_name # unused <nl> + weights = [ arg . weight for arg in args if isinstance ( arg , WeightedTensor ) ] <nl> + tensors = [ <nl> + arg . tensor if isinstance ( arg , WeightedTensor ) else arg for arg in args <nl> + ] <nl> + tensor_result = api_func ( * tensors ) <nl> + avg_weight = sum ( weights ) / len ( weights ) <nl> + return cls ( tensor_result , avg_weight ) <nl> + <nl> + <nl> + @ test_util . run_all_in_graph_and_eager_modes <nl> + class PythonAPIDispatcherTest ( test_util . TensorFlowTestCase , <nl> + parameterized . TestCase ) : <nl> + <nl> + def testNoDispatchableTypes ( self ) : <nl> + add_dispatcher = _pywrap_python_api_dispatcher . PythonAPIDispatcher ( <nl> + " tf . math . add " , math_ops . add , 2 , [ 0 , 1 ] , [ ] , False ) <nl> + self . assertEqual ( add_dispatcher . Dispatch ( 1 , 2 ) , NotImplemented ) <nl> + <nl> + concat_dispatcher = _pywrap_python_api_dispatcher . PythonAPIDispatcher ( <nl> + " tf . concat " , array_ops . concat , 2 , [ 1 ] , [ 0 ] , False ) <nl> + self . assertEqual ( concat_dispatcher . Dispatch ( [ 1 ] , 0 ) , NotImplemented ) <nl> + <nl> + def testSimpleDispatchWithTrace ( self ) : <nl> + dispatcher = _pywrap_python_api_dispatcher . PythonAPIDispatcher ( <nl> + " tf . math . add " , math_ops . add , 2 , [ 0 , 1 ] , [ ] , False ) <nl> + x = 5 <nl> + y = Trace ( " constant " , " y " ) <nl> + z = Trace ( " constant " , " z " ) <nl> + <nl> + Trace . log . clear ( ) <nl> + self . assertEqual ( dispatcher . Dispatch ( x , y ) , Trace ( " tf . math . add " , x , y ) ) <nl> + self . assertEqual ( dispatcher . Dispatch ( y , x ) , Trace ( " tf . math . add " , y , x ) ) <nl> + self . assertEqual ( dispatcher . Dispatch ( y , z ) , Trace ( " tf . math . add " , y , z ) ) <nl> + self . assertEqual ( Trace . log , [ <nl> + " __tf_dispatch__ ( ' Trace ' , ' tf . math . add ' ) " , <nl> + " __tf_dispatch__ ( ' Trace ' , ' tf . math . add ' ) " , <nl> + " __tf_dispatch__ ( ' Trace ' , ' tf . math . add ' ) " <nl> + ] ) <nl> + <nl> + def testDispatcherReturnsNotImplemented ( self ) : <nl> + dispatcher = _pywrap_python_api_dispatcher . PythonAPIDispatcher ( <nl> + " tf . math . add " , math_ops . add , 2 , [ 0 , 1 ] , [ ] , False ) <nl> + x = 5 <nl> + y = Trace ( " constant " , " disabled " ) <nl> + z = Trace ( " constant " , " z " ) <nl> + <nl> + self . assertEqual ( dispatcher . Dispatch ( x , y ) , NotImplemented ) <nl> + self . assertEqual ( dispatcher . Dispatch ( y , x ) , NotImplemented ) <nl> + self . assertEqual ( dispatcher . Dispatch ( y , z ) , NotImplemented ) <nl> + self . assertEqual ( dispatcher . Dispatch ( z , z ) , Trace ( " tf . math . add " , z , z ) ) <nl> + <nl> + def testSimpleDispatchWithWeightedTensor ( self ) : <nl> + dispatcher = _pywrap_python_api_dispatcher . PythonAPIDispatcher ( <nl> + " tf . math . add " , math_ops . add , 2 , [ 0 , 1 ] , [ ] , False ) <nl> + x = 5 <nl> + y = WeightedTensor ( [ 1 , 2 , 3 ] , 0 . 6 ) <nl> + z = WeightedTensor ( [ 10 , 20 , 30 ] , 0 . 2 ) <nl> + <nl> + x_plus_y = dispatcher . Dispatch ( x , y ) <nl> + y_plus_x = dispatcher . Dispatch ( y , x ) <nl> + y_plus_z = dispatcher . Dispatch ( y , z ) <nl> + <nl> + self . assertAllEqual ( x_plus_y . tensor , [ 6 , 7 , 8 ] ) <nl> + self . assertAllEqual ( y_plus_x . tensor , [ 6 , 7 , 8 ] ) <nl> + self . assertAllEqual ( y_plus_z . tensor , [ 11 , 22 , 33 ] ) <nl> + <nl> + self . assertEqual ( x_plus_y . weight , 0 . 6 ) <nl> + self . assertEqual ( y_plus_x . weight , 0 . 6 ) <nl> + self . assertEqual ( y_plus_z . weight , 0 . 4 ) <nl> + <nl> + def testDispatchPrecedence ( self ) : <nl> + # We use an API for which dispatch is disabled , so all dispatchers get <nl> + # called ( since this test checks the order of the dispatcher list ) . <nl> + dispatcher = _pywrap_python_api_dispatcher . PythonAPIDispatcher ( <nl> + " disabled " , None , 5 , [ 0 , 1 , 4 ] , [ 2 , 3 ] , False ) <nl> + <nl> + t = Trace ( " constant " , " t " ) <nl> + t2_1 = Trace2 ( " constant " , " t2_1 " ) <nl> + t2_2 = Trace2 ( " constant " , " t2_2 " ) <nl> + t2b = Trace2B ( " constant " , " t2b " ) <nl> + t3 = Trace3 ( " constant " , " t3 " ) <nl> + t4 = Trace4 ( " constant " , " t4 " ) <nl> + <nl> + # Three dispatchable types , none of which is a subclass of the other : <nl> + # * precedence is left - to - right . <nl> + # * duplicates are removed . <nl> + Trace . log . clear ( ) <nl> + result = dispatcher . Dispatch ( t2_1 , t3 , [ ] , [ t2_2 , t3 ] , t4 ) <nl> + self . assertEqual ( result , NotImplemented ) <nl> + self . assertEqual ( Trace . log , [ <nl> + " __tf_dispatch__ ( ' Trace2 ' , ' disabled ' ) " , <nl> + " __tf_dispatch__ ( ' Trace3 ' , ' disabled ' ) " , <nl> + " __tf_dispatch__ ( ' Trace4 ' , ' disabled ' ) " <nl> + ] ) <nl> + <nl> + # Subtypes are moved before their base types . <nl> + Trace . log . clear ( ) <nl> + result = dispatcher . Dispatch ( t2_1 , t3 , [ t ] , [ t2_2 , t , t3 , t4 ] , t2b ) <nl> + self . assertEqual ( result , NotImplemented ) <nl> + self . assertEqual ( Trace . log , [ <nl> + " __tf_dispatch__ ( ' Trace2B ' , ' disabled ' ) " , <nl> + " __tf_dispatch__ ( ' Trace2 ' , ' disabled ' ) " , <nl> + " __tf_dispatch__ ( ' Trace3 ' , ' disabled ' ) " , <nl> + " __tf_dispatch__ ( ' Trace4 ' , ' disabled ' ) " , <nl> + " __tf_dispatch__ ( ' Trace ' , ' disabled ' ) " <nl> + ] ) <nl> + <nl> + def testDispatchPrecedenceRightToLeft ( self ) : <nl> + # We use an API for which dispatch is disabled , so all dispatchers get <nl> + # called ( since this test checks the order of the dispatcher list ) . <nl> + dispatcher = _pywrap_python_api_dispatcher . PythonAPIDispatcher ( <nl> + " disabled " , None , 5 , [ 4 , 0 , 1 ] , [ 2 , 3 ] , True ) <nl> + <nl> + t = Trace ( " constant " , " t " ) <nl> + t2_1 = Trace2 ( " constant " , " t2_1 " ) <nl> + t2_2 = Trace2 ( " constant " , " t2_2 " ) <nl> + t2b = Trace2B ( " constant " , " t2b " ) <nl> + t3 = Trace3 ( " constant " , " t3 " ) <nl> + t4 = Trace4 ( " constant " , " t4 " ) <nl> + <nl> + # Three dispatchable types , none of which is a subclass of the other : <nl> + # * precedence is right_to_left ( since we set right_to_left = True in the <nl> + # PtyonAPIDispatcher constructor ) . ( Note : arguments are scanned <nl> + # right - to - left , but the elements of list arguments are still scanned <nl> + # left - to - right . ) <nl> + # * duplicates are removed . <nl> + Trace . log . clear ( ) <nl> + result = dispatcher . Dispatch ( t2_1 , t3 , [ ] , [ t2_2 , t3 ] , t4 ) <nl> + self . assertEqual ( result , NotImplemented ) <nl> + self . assertEqual ( Trace . log , [ <nl> + " __tf_dispatch__ ( ' Trace4 ' , ' disabled ' ) " , <nl> + " __tf_dispatch__ ( ' Trace2 ' , ' disabled ' ) " , <nl> + " __tf_dispatch__ ( ' Trace3 ' , ' disabled ' ) " <nl> + ] ) <nl> + <nl> + # Subtypes are moved before their base types . ( Note : moving subtypes occurs <nl> + # * after * we swap the order to be right - to - left ; so the dispatch order here <nl> + # is not what we ' d get by just reversing the final dispatch order if <nl> + # right_to_left were false . ) <nl> + Trace . log . clear ( ) <nl> + result = dispatcher . Dispatch ( t2_1 , t3 , [ t ] , [ t2_2 , t , t3 , t4 ] , t2b ) <nl> + self . assertEqual ( result , NotImplemented ) <nl> + self . assertEqual ( Trace . log , [ <nl> + " __tf_dispatch__ ( ' Trace2B ' , ' disabled ' ) " , <nl> + " __tf_dispatch__ ( ' Trace2 ' , ' disabled ' ) " , <nl> + " __tf_dispatch__ ( ' Trace3 ' , ' disabled ' ) " , <nl> + " __tf_dispatch__ ( ' Trace4 ' , ' disabled ' ) " , <nl> + " __tf_dispatch__ ( ' Trace ' , ' disabled ' ) " <nl> + ] ) <nl> + <nl> + def testDispatchParamOutOfRange ( self ) : <nl> + with self . assertRaisesRegex ( ValueError , " index out of range " ) : <nl> + _pywrap_python_api_dispatcher . PythonAPIDispatcher ( " some_api " , None , 5 , <nl> + [ 0 , 1 , 5 ] , [ 2 , 3 ] , True ) <nl> + with self . assertRaisesRegex ( ValueError , " index out of range " ) : <nl> + _pywrap_python_api_dispatcher . PythonAPIDispatcher ( " some_api " , None , 5 , <nl> + [ 0 , - 3 ] , [ 2 , 3 ] , True ) <nl> + with self . assertRaisesRegex ( ValueError , " index out of range " ) : <nl> + _pywrap_python_api_dispatcher . PythonAPIDispatcher ( " some_api " , None , 5 , <nl> + [ 0 , 1 ] , [ 10 , 3 ] , True ) <nl> + <nl> + <nl> + if __name__ = = " __main__ " : <nl> + googletest . main ( ) <nl> new file mode 100644 <nl> index 0000000000000 . . 4f707a902e274 <nl> mmm / dev / null <nl> ppp b / tensorflow / python / framework / python_api_dispatcher_wrapper . cc <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + / / Note : This library is only used by python_api_dispatcher_test . It is <nl> + / / not meant to be used in other circumstances . <nl> + <nl> + # include " pybind11 / pybind11 . h " <nl> + # include " pybind11 / pytypes . h " <nl> + # include " pybind11 / stl . h " <nl> + # include " tensorflow / python / framework / python_api_dispatcher . h " <nl> + <nl> + namespace py = pybind11 ; <nl> + <nl> + namespace { <nl> + <nl> + tensorflow : : PythonAPIDispatcher MakePythonAPIDispatcher ( <nl> + const std : : string & api_name , py : : handle api_func , int num_params , <nl> + const std : : vector < int > & dispatch_params , <nl> + const std : : vector < int > & dispatch_list_params , bool right_to_left ) { <nl> + std : : vector < tensorflow : : PythonAPIDispatcher : : ParamInfo > dispatchable_params ; <nl> + dispatchable_params . reserve ( dispatch_params . size ( ) + <nl> + dispatch_list_params . size ( ) ) ; <nl> + for ( int p : dispatch_params ) { <nl> + dispatchable_params . push_back ( { p , false } ) ; <nl> + } <nl> + for ( int p : dispatch_list_params ) { <nl> + dispatchable_params . push_back ( { p , true } ) ; <nl> + } <nl> + <nl> + auto dispatcher = tensorflow : : PythonAPIDispatcher ( api_name , api_func . ptr ( ) , <nl> + num_params , right_to_left ) ; <nl> + if ( ! dispatcher . Initialize ( dispatchable_params ) ) { <nl> + throw py : : error_already_set ( ) ; <nl> + } <nl> + return dispatcher ; <nl> + } <nl> + <nl> + py : : handle Dispatch ( tensorflow : : PythonAPIDispatcher * self , py : : args args ) { <nl> + auto result = self - > Dispatch ( args . ptr ( ) ) ; <nl> + if ( result = = nullptr ) { <nl> + throw py : : error_already_set ( ) ; <nl> + } else if ( result = = Py_NotImplemented ) { <nl> + Py_INCREF ( result ) ; <nl> + return result ; <nl> + } else { <nl> + return result ; <nl> + } <nl> + } <nl> + <nl> + } / / namespace <nl> + <nl> + PYBIND11_MODULE ( _pywrap_python_api_dispatcher , m ) { <nl> + py : : class_ < tensorflow : : PythonAPIDispatcher > ( m , " PythonAPIDispatcher " ) <nl> + . def ( py : : init ( & MakePythonAPIDispatcher ) ) <nl> + . def ( " Dispatch " , Dispatch ) ; <nl> + } <nl> mmm a / tensorflow / tools / def_file_filter / symbols_pybind . txt <nl> ppp b / tensorflow / tools / def_file_filter / symbols_pybind . txt <nl> stream_executor : : port : : internal_statusor : : Helper : : Crash <nl> [ tensor_handle ] # tfe <nl> tensorflow : : TensorHandle : : Tensor <nl> <nl> + [ python_api_dispatcher ] # python_api_dispatcher <nl> + tensorflow : : PythonAPIDispatcher <nl>
Dispatch handler for Python APIs . For background , see the RFC for [ TensorFlow Extension Types ] ( https : / / github . com / tensorflow / community / pull / 269 ) .
tensorflow/tensorflow
6f980e4a0529af43027aba2c4ea5f12efe0e87f1
2020-10-21T23:10:52Z
mmm a / lib / SILPasses / StackPromotion . cpp <nl> ppp b / lib / SILPasses / StackPromotion . cpp <nl> class StackPromoter { <nl> SILFunction * getBufferDeallocFunc ( SILFunction * OrigFunc , <nl> SILLocation Loc ) ; <nl> <nl> - / / / Checks if the allocation \ p AI can be promoted and returns the insertion <nl> - / / / point for the deallocation instruction ( s ) if it is possible . <nl> - SILInstruction * canPromoteAlloc ( SILInstruction * AI ) ; <nl> + / / / Returns true if the allocation \ p AI can be promoted . <nl> + / / / In this case it sets the \ a DeallocInsertionPoint to the instruction <nl> + / / / where the deallocation must be inserted . <nl> + / / / It optionally also sets \ a AllocInsertionPoint in case the allocation <nl> + / / / instruction must be moved to another place . <nl> + bool canPromoteAlloc ( SILInstruction * AI , <nl> + SILInstruction * & AllocInsertionPoint , <nl> + SILInstruction * & DeallocInsertionPoint ) ; <nl> <nl> bool strictlyDominates ( SILBasicBlock * A , SILBasicBlock * B ) { <nl> return A ! = B & & DT - > dominates ( A , B ) ; <nl> static bool isPromotableAllocInst ( SILInstruction * I ) { <nl> StackPromoter : : ChangeState StackPromoter : : promote ( ) { <nl> / / Search the whole function for stack promotable allocations . <nl> for ( SILBasicBlock & BB : * ConGraph - > getFunction ( ) ) { <nl> - for ( SILInstruction & I : BB ) { <nl> - if ( isPromotableAllocInst ( & I ) ) { <nl> - tryPromoteAlloc ( & I ) ; <nl> + for ( auto Iter = BB . begin ( ) ; Iter ! = BB . end ( ) ; ) { <nl> + / / The allocaiton instruction may be moved , so increment Iter prior to <nl> + / / doing the optimization . <nl> + SILInstruction * I = & * Iter + + ; <nl> + if ( isPromotableAllocInst ( I ) ) { <nl> + tryPromoteAlloc ( I ) ; <nl> } <nl> } <nl> } <nl> StackPromoter : : ChangeState StackPromoter : : promote ( ) { <nl> } <nl> <nl> void StackPromoter : : tryPromoteAlloc ( SILInstruction * I ) { <nl> - SILInstruction * InsertionPoint = canPromoteAlloc ( I ) ; <nl> - if ( ! InsertionPoint ) <nl> + SILInstruction * AllocInsertionPoint = nullptr ; <nl> + SILInstruction * DeallocInsertionPoint = nullptr ; <nl> + if ( ! canPromoteAlloc ( I , AllocInsertionPoint , DeallocInsertionPoint ) ) <nl> return ; <nl> <nl> DEBUG ( llvm : : dbgs ( ) < < " Promoted " < < * I ) ; <nl> DEBUG ( llvm : : dbgs ( ) < < " in " < < I - > getFunction ( ) - > getName ( ) < < ' \ n ' ) ; <nl> NumStackPromoted + + ; <nl> <nl> - SILBuilder B ( InsertionPoint ) ; <nl> + SILBuilder B ( DeallocInsertionPoint ) ; <nl> if ( auto * ARI = dyn_cast < AllocRefInst > ( I ) ) { <nl> / / It ' s an object allocation . We set the [ stack ] attribute in the alloc_ref . <nl> ARI - > setStackAllocatable ( ) ; <nl> + if ( AllocInsertionPoint ) <nl> + ARI - > moveBefore ( AllocInsertionPoint ) ; <nl> <nl> / / / And create a dealloc_ref [ stack ] at the end of the object ' s lifetime . <nl> B . createDeallocRef ( I - > getLoc ( ) , I , true ) ; <nl> void StackPromoter : : tryPromoteAlloc ( SILInstruction * I ) { <nl> return ; <nl> } <nl> if ( auto * AI = dyn_cast < ApplyInst > ( I ) ) { <nl> + assert ( ! AllocInsertionPoint & & " can ' t move call to swift_bufferAlloc " ) ; <nl> / / It ' s an array buffer allocation . <nl> auto * OldFRI = cast < FunctionRefInst > ( AI - > getCallee ( ) ) ; <nl> SILFunction * OldF = OldFRI - > getReferencedFunction ( ) ; <nl> static void dumpUsePoints ( const llvm : : SmallPtrSetImpl < ValueBase * > & UsePoints ) { <nl> } <nl> # endif <nl> <nl> - SILInstruction * StackPromoter : : canPromoteAlloc ( SILInstruction * AI ) { <nl> + bool StackPromoter : : canPromoteAlloc ( SILInstruction * AI , <nl> + SILInstruction * & AllocInsertionPoint , <nl> + SILInstruction * & DeallocInsertionPoint ) { <nl> + AllocInsertionPoint = nullptr ; <nl> + DeallocInsertionPoint = nullptr ; <nl> auto * Node = ConGraph - > getNodeOrNull ( AI ) ; <nl> if ( ! Node ) <nl> - return nullptr ; <nl> + return false ; <nl> <nl> / / The most important check : does the object escape the current function ? <nl> if ( Node - > escapes ( ) ) <nl> - return nullptr ; <nl> + return false ; <nl> <nl> / / Now we have to determine the lifetime of the allocated object in its <nl> / / function . <nl> SILInstruction * StackPromoter : : canPromoteAlloc ( SILInstruction * AI ) { <nl> / / But in case all pathes from this block end in unreachable then the <nl> / / final release of the object may be optimized away . We bail out in this <nl> / / case . <nl> - return nullptr ; <nl> + return false ; <nl> } <nl> DEBUG ( dumpUsePoints ( UsePoints ) ) ; <nl> <nl> SILInstruction * StackPromoter : : canPromoteAlloc ( SILInstruction * AI ) { <nl> while ( ! strictlyPostDominates ( EndBlock , Pred ) ) { <nl> EndBlock = getImmediatePostDom ( EndBlock ) ; <nl> if ( ! EndBlock ) <nl> - return nullptr ; <nl> + return false ; <nl> } <nl> } <nl> Iter = BB - > begin ( ) ; <nl> SILInstruction * StackPromoter : : canPromoteAlloc ( SILInstruction * AI ) { <nl> SILInstruction & I = * Iter + + ; <nl> if ( BB = = EndBlock & & StackDepth = = 0 & & NumUsePointsToFind = = 0 ) { <nl> / / We found a place to insert the stack deallocation . <nl> - return & I ; <nl> + DeallocInsertionPoint = & I ; <nl> + return true ; <nl> } <nl> if ( I . isAllocatingStack ( ) ) { <nl> StackDepth + + ; <nl> SILInstruction * StackPromoter : : canPromoteAlloc ( SILInstruction * AI ) { <nl> / / % obj = alloc_ref / / the allocation <nl> / / dealloc_stack % 1 <nl> / / use_of_obj ( % obj ) <nl> - return nullptr ; <nl> + / / <nl> + / / In this case we can move the alloc_ref before the alloc_stack <nl> + / / to fix the nesting . <nl> + if ( ! isa < AllocRefInst > ( AI ) ) <nl> + return false ; <nl> + auto * Alloc = dyn_cast < SILInstruction > ( I . getOperand ( 0 ) . getDef ( ) ) ; <nl> + if ( ! Alloc ) <nl> + return false ; <nl> + / / This should always be the case , but let ' s be on the safe side . <nl> + if ( ! PDT - > dominates ( StartBlock , Alloc - > getParent ( ) ) ) <nl> + return false ; <nl> + AllocInsertionPoint = Alloc ; <nl> + StackDepth + + ; <nl> } <nl> StackDepth - - ; <nl> } <nl> SILInstruction * StackPromoter : : canPromoteAlloc ( SILInstruction * AI ) { <nl> / / dealloc_stack % 1 / / this is the new EndBlock <nl> EndBlock = getImmediatePostDom ( EndBlock ) ; <nl> if ( ! EndBlock ) <nl> - return nullptr ; <nl> + return false ; <nl> } <nl> / / Again , it ' s important that the EndBlock is the first in the WorkList . <nl> WorkList . insert ( EndBlock , - 1 ) ; <nl> SILInstruction * StackPromoter : : canPromoteAlloc ( SILInstruction * AI ) { <nl> / / cond_br . . . , loop , exit <nl> / / exit : <nl> / / use ( % container ) <nl> - return nullptr ; <nl> + return false ; <nl> } <nl> WorkList . insert ( Succ , StackDepth ) ; <nl> } <nl> mmm a / test / SILPasses / stack_promotion . sil <nl> ppp b / test / SILPasses / stack_promotion . sil <nl> bb5 : <nl> return % a1 : $ Int32 <nl> } <nl> <nl> + / / CHECK - LABEL : sil @ promote_and_move_alloc_before_alloc_stack <nl> + / / CHECK : [ [ O : % [ 0 - 9 ] + ] ] = alloc_ref [ stack ] $ XX <nl> + / / CHECK : alloc_stack <nl> + / / CHECK : { { ^ } } bb2 : <nl> + / / CHECK : dealloc_stack <nl> + / / CHECK : strong_release <nl> + / / CHECK : dealloc_ref [ stack ] [ [ O ] ] : $ XX <nl> + / / CHECK : return <nl> + sil @ promote_and_move_alloc_before_alloc_stack : $ @ convention ( thin ) ( ) - > Int32 { <nl> + bb0 : <nl> + % s1 = alloc_stack $ Int32 <nl> + cond_br undef , bb1 , bb2 <nl> + <nl> + bb1 : <nl> + br bb2 <nl> + <nl> + bb2 : <nl> + % o1 = alloc_ref $ XX <nl> + % f1 = function_ref @ xx_init : $ @ convention ( thin ) ( @ guaranteed XX ) - > XX <nl> + % n1 = apply % f1 ( % o1 ) : $ @ convention ( thin ) ( @ guaranteed XX ) - > XX <nl> + dealloc_stack % s1 # 0 : $ * @ local_storage Int32 <nl> + % l1 = ref_element_addr % n1 : $ XX , # XX . x <nl> + % l2 = load % l1 : $ * Int32 <nl> + strong_release % n1 : $ XX <nl> + return % l2 : $ Int32 <nl> + } <nl> + <nl> / / CHECK - LABEL : sil @ promote_array <nl> / / CHECK : [ [ AF : % [ 0 - 9 ] + ] ] = function_ref @ swift_bufferAllocateOnStack : $ @ convention ( thin ) ( @ thick AnyObject . Type , Int , Int ) - > @ owned AnyObject <nl> / / CHECK : [ [ B : % [ 0 - 9 ] + ] ] = apply [ [ AF ] ] ( <nl>
StackPromotion : handle more cases of stack nesting instructions .
apple/swift
1384b245ab1b910bcc74ad52dad7a82525fd346f
2015-11-11T17:42:52Z
mmm a / contrib / gitian - descriptors / gitian - linux . yml <nl> ppp b / contrib / gitian - descriptors / gitian - linux . yml <nl> script : | <nl> . / autogen . sh <nl> . / configure - - prefix = $ { BASEPREFIX } / ` echo " $ { HOSTS } " | awk ' { print $ 1 ; } ' ` <nl> make dist <nl> - DISTNAME = ` echo bitcoin - * . tar . gz ` <nl> - <nl> + SOURCEDIST = ` echo bitcoin - * . tar . gz ` <nl> + DISTNAME = ` echo $ { SOURCEDIST } | sed ' s / . tar . * / / ' ` <nl> # Correct tar file order <nl> mkdir - p temp <nl> pushd temp <nl> - tar xf . . / $ DISTNAME <nl> - find bitcoin - * | sort | tar - - no - recursion - c - T - | gzip - 9n > . . / $ DISTNAME <nl> + tar xf . . / $ SOURCEDIST <nl> + find bitcoin - * | sort | tar - - no - recursion - c - T - | gzip - 9n > . . / $ SOURCEDIST <nl> popd <nl> <nl> ORIGPATH = " $ PATH " <nl> script : | <nl> export PATH = $ { BASEPREFIX } / $ { i } / native / bin : $ { ORIGPATH } <nl> mkdir - p distsrc - $ { i } <nl> cd distsrc - $ { i } <nl> - tar - - strip - components = 1 - xf . . / $ DISTNAME <nl> + INSTALLPATH = ` pwd ` / installed / $ { DISTNAME } <nl> + mkdir - p $ { INSTALLPATH } <nl> + tar - - strip - components = 1 - xf . . / $ SOURCEDIST <nl> <nl> - . / configure - - prefix = $ { BASEPREFIX } / $ { i } - - bindir = $ { OUTDIR } / $ { i } / bin - - includedir = $ { OUTDIR } / $ { i } / include - - libdir = $ { OUTDIR } / $ { i } / lib - - disable - ccache - - disable - maintainer - mode - - disable - dependency - tracking $ { CONFIGFLAGS } <nl> + . / configure - - prefix = $ { BASEPREFIX } / $ { i } - - bindir = $ { INSTALLPATH } / bin - - includedir = $ { INSTALLPATH } / include - - libdir = $ { INSTALLPATH } / lib - - disable - ccache - - disable - maintainer - mode - - disable - dependency - tracking $ { CONFIGFLAGS } <nl> make $ { MAKEOPTS } <nl> make install - strip <nl> - cd . . <nl> + cd installed <nl> + find . - name " lib * . la " - delete <nl> + find . - name " lib * . a " - delete <nl> + rm - rf $ { DISTNAME } / lib / pkgconfig <nl> + find . | sort | tar - - no - recursion - c - T - | gzip - 9n > $ { OUTDIR } / $ { DISTNAME } - $ { i } . tar . gz <nl> + cd . . / . . / <nl> done <nl> mkdir - p $ OUTDIR / src <nl> - mv $ DISTNAME $ OUTDIR / src <nl> - mv $ { OUTDIR } / x86_64 - * $ { OUTDIR } / 64 <nl> - mv $ { OUTDIR } / i686 - * $ { OUTDIR } / 32 <nl> + mv $ SOURCEDIST $ OUTDIR / src <nl> + mv $ { OUTDIR } / $ { DISTNAME } - x86_64 - * . tar . gz $ { OUTDIR } / $ { DISTNAME } - linux64 . tar . gz <nl> + mv $ { OUTDIR } / $ { DISTNAME } - i686 - * . tar . gz $ { OUTDIR } / $ { DISTNAME } - linux32 . tar . gz <nl> <nl> - # Delete unwanted stuff <nl> - find $ { OUTDIR } - name " lib * . la " - delete <nl> mmm a / contrib / gitian - descriptors / gitian - osx . yml <nl> ppp b / contrib / gitian - descriptors / gitian - osx . yml <nl> script : | <nl> . / autogen . sh <nl> . / configure - - prefix = $ { BASEPREFIX } / ` echo " $ { HOSTS } " | awk ' { print $ 1 ; } ' ` <nl> make dist <nl> - DISTNAME = ` echo bitcoin - * . tar . gz ` <nl> + SOURCEDIST = ` echo bitcoin - * . tar . gz ` <nl> + DISTNAME = ` echo $ { SOURCEDIST } | sed ' s / . tar . * / / ' ` <nl> <nl> # Correct tar file order <nl> mkdir - p temp <nl> pushd temp <nl> - tar xf . . / $ DISTNAME <nl> - find bitcoin - * | sort | tar - - no - recursion - c - T - | gzip - 9n > . . / $ DISTNAME <nl> + tar xf . . / $ SOURCEDIST <nl> + find bitcoin - * | sort | tar - - no - recursion - c - T - | gzip - 9n > . . / $ SOURCEDIST <nl> popd <nl> <nl> ORIGPATH = " $ PATH " <nl> script : | <nl> export PATH = $ { BASEPREFIX } / $ { i } / native / bin : $ { ORIGPATH } <nl> mkdir - p distsrc - $ { i } <nl> cd distsrc - $ { i } <nl> - tar - - strip - components = 1 - xf . . / $ DISTNAME <nl> + INSTALLPATH = ` pwd ` / installed / $ { DISTNAME } <nl> + mkdir - p $ { INSTALLPATH } <nl> + tar - - strip - components = 1 - xf . . / $ SOURCEDIST <nl> <nl> - . / configure - - prefix = $ { BASEPREFIX } / $ { i } - - bindir = $ { OUTDIR } / $ { i } / bin - - includedir = $ { OUTDIR } / $ { i } / include - - libdir = $ { OUTDIR } / $ { i } / lib - - disable - ccache - - disable - maintainer - mode - - disable - dependency - tracking $ { CONFIGFLAGS } <nl> + . / configure - - prefix = $ { BASEPREFIX } / $ { i } - - bindir = $ { INSTALLPATH } / bin - - includedir = $ { INSTALLPATH } / include - - libdir = $ { INSTALLPATH } / lib - - disable - ccache - - disable - maintainer - mode - - disable - dependency - tracking $ { CONFIGFLAGS } <nl> make $ { MAKEOPTS } <nl> make install - strip <nl> make deploy <nl> - $ { WRAP_DIR } / dmg dmg Bitcoin - Qt . dmg $ { OUTDIR } / Bitcoin - Qt . dmg <nl> - cd . . <nl> + $ { WRAP_DIR } / dmg dmg Bitcoin - Qt . dmg $ { OUTDIR } / $ { DISTNAME } - osx . dmg <nl> + <nl> + cd installed <nl> + find . - name " lib * . la " - delete <nl> + find . - name " lib * . a " - delete <nl> + rm - rf $ { DISTNAME } / lib / pkgconfig <nl> + find . | sort | tar - - no - recursion - c - T - | gzip - 9n > $ { OUTDIR } / $ { DISTNAME } - $ { i } . tar . gz <nl> + cd . . / . . / <nl> done <nl> mkdir - p $ OUTDIR / src <nl> - mv $ DISTNAME $ OUTDIR / src <nl> - <nl> - # Delete unwanted stuff <nl> - find $ { OUTDIR } - name " lib * . la " - delete <nl> + mv $ SOURCEDIST $ OUTDIR / src <nl> + mv $ { OUTDIR } / $ { DISTNAME } - x86_64 - * . tar . gz $ { OUTDIR } / $ { DISTNAME } - osx64 . tar . gz <nl> mmm a / contrib / gitian - descriptors / gitian - win . yml <nl> ppp b / contrib / gitian - descriptors / gitian - win . yml <nl> packages : <nl> - " mingw - w64 " <nl> - " g + + - mingw - w64 " <nl> - " nsis " <nl> + - " zip " <nl> reference_datetime : " 2013 - 06 - 01 00 : 00 : 00 " <nl> remotes : <nl> - " url " : " https : / / github . com / bitcoin / bitcoin . git " <nl> script : | <nl> HOSTS = " x86_64 - w64 - mingw32 i686 - w64 - mingw32 " <nl> CONFIGFLAGS = " - - enable - upnp - default " <nl> FAKETIME_HOST_PROGS = " g + + ar ranlib nm windres strip " <nl> - FAKETIME_PROGS = " date makensis " <nl> + FAKETIME_PROGS = " date makensis zip " <nl> <nl> export QT_RCC_TEST = 1 <nl> export GZIP = " - 9n " <nl> script : | <nl> . / autogen . sh <nl> . / configure - - prefix = $ { BASEPREFIX } / ` echo " $ { HOSTS } " | awk ' { print $ 1 ; } ' ` <nl> make dist <nl> - DISTNAME = ` echo bitcoin - * . tar . gz ` <nl> + SOURCEDIST = ` echo bitcoin - * . tar . gz ` <nl> + DISTNAME = ` echo $ { SOURCEDIST } | sed ' s / . tar . * / / ' ` <nl> <nl> # Correct tar file order <nl> mkdir - p temp <nl> pushd temp <nl> - tar xf . . / $ DISTNAME <nl> - find bitcoin - * | sort | tar - - no - recursion - c - T - | gzip - 9n > . . / $ DISTNAME <nl> + tar xf . . / $ SOURCEDIST <nl> + find bitcoin - * | sort | tar - - no - recursion - c - T - | gzip - 9n > . . / $ SOURCEDIST <nl> popd <nl> <nl> ORIGPATH = " $ PATH " <nl> script : | <nl> export PATH = $ { BASEPREFIX } / $ { i } / native / bin : $ { ORIGPATH } <nl> mkdir - p distsrc - $ { i } <nl> cd distsrc - $ { i } <nl> - tar - - strip - components = 1 - xf . . / $ DISTNAME <nl> + INSTALLPATH = ` pwd ` / installed / $ { DISTNAME } <nl> + mkdir - p $ { INSTALLPATH } <nl> + tar - - strip - components = 1 - xf . . / $ SOURCEDIST <nl> <nl> - . / configure - - prefix = $ { BASEPREFIX } / $ { i } - - bindir = $ { OUTDIR } / $ { i } / bin - - includedir = $ { OUTDIR } / $ { i } / include - - libdir = $ { OUTDIR } / $ { i } / lib - - disable - ccache - - disable - maintainer - mode - - disable - dependency - tracking $ { CONFIGFLAGS } <nl> + . / configure - - prefix = $ { BASEPREFIX } / $ { i } - - bindir = $ { INSTALLPATH } / bin - - includedir = $ { INSTALLPATH } / include - - libdir = $ { INSTALLPATH } / lib - - disable - ccache - - disable - maintainer - mode - - disable - dependency - tracking $ { CONFIGFLAGS } <nl> make $ { MAKEOPTS } <nl> make deploy <nl> make install - strip <nl> cp - f bitcoin - * setup * . exe $ OUTDIR / <nl> - cd . . <nl> + cd installed <nl> + mv $ { DISTNAME } / bin / * . dll $ { DISTNAME } / lib / <nl> + find . - name " lib * . la " - delete <nl> + find . - name " lib * . a " - delete <nl> + rm - rf $ { DISTNAME } / lib / pkgconfig <nl> + find . - type f | sort | zip - X @ $ { OUTDIR } / $ { DISTNAME } - $ { i } . zip <nl> + cd . . / . . <nl> done <nl> mkdir - p $ OUTDIR / src <nl> - mv $ DISTNAME $ OUTDIR / src <nl> - mv $ { OUTDIR } / x86_64 - * $ { OUTDIR } / 64 <nl> - mv $ { OUTDIR } / i686 - * $ { OUTDIR } / 32 <nl> - <nl> - # Delete unwanted stuff <nl> - find $ { OUTDIR } - name " lib * . la " - delete <nl> + mv $ SOURCEDIST $ OUTDIR / src <nl> + mv $ { OUTDIR } / $ { DISTNAME } - x86_64 - * . zip $ { OUTDIR } / $ { DISTNAME } - win64 . zip <nl> + mv $ { OUTDIR } / $ { DISTNAME } - i686 - * . zip $ { OUTDIR } / $ { DISTNAME } - win32 . zip <nl>
gitian : update descriptors to use a sane uniform output
bitcoin/bitcoin
52bb7a7e1b69e7cc863621fb04958e0a39066138
2014-11-25T23:49:02Z
mmm a / validation - test / stdlib / UnicodeUTFEncoders . swift <nl> ppp b / validation - test / stdlib / UnicodeUTFEncoders . swift <nl> import Foundation <nl> @ _silgen_name ( " random " ) func random ( ) - > UInt32 <nl> @ _silgen_name ( " srandomdev " ) func srandomdev ( ) <nl> <nl> - protocol TestableUnicodeCodec : UnicodeCodecType { <nl> + protocol TestableUnicodeCodec : UnicodeCodec { <nl> associatedtype CodeUnit : Integer <nl> static func encodingId ( ) - > NSStringEncoding <nl> static func name ( ) - > NSString <nl>
Rename UnicodeCodecType to UnicodeCodec in a new test
apple/swift
e9796f1c3d6b61fe0c3e27d9124605f219b145c9
2016-03-10T02:40:59Z
mmm a / src / compiler / effect - control - linearizer . cc <nl> ppp b / src / compiler / effect - control - linearizer . cc <nl> Node * EffectControlLinearizer : : LowerCheckedUint32Bounds ( Node * node , <nl> Node * frame_state ) { <nl> Node * index = node - > InputAt ( 0 ) ; <nl> Node * limit = node - > InputAt ( 1 ) ; <nl> - const CheckParameters & params = CheckParametersOf ( node - > op ( ) ) ; <nl> + const CheckBoundsParameters & params = CheckBoundsParametersOf ( node - > op ( ) ) ; <nl> <nl> Node * check = __ Uint32LessThan ( index , limit ) ; <nl> - __ DeoptimizeIfNot ( DeoptimizeReason : : kOutOfBounds , params . feedback ( ) , check , <nl> - frame_state , IsSafetyCheck : : kCriticalSafetyCheck ) ; <nl> + switch ( params . mode ( ) ) { <nl> + case CheckBoundsParameters : : kDeoptOnOutOfBounds : <nl> + __ DeoptimizeIfNot ( DeoptimizeReason : : kOutOfBounds , <nl> + params . check_parameters ( ) . feedback ( ) , check , <nl> + frame_state , IsSafetyCheck : : kCriticalSafetyCheck ) ; <nl> + break ; <nl> + case CheckBoundsParameters : : kAbortOnOutOfBounds : { <nl> + auto if_abort = __ MakeDeferredLabel ( ) ; <nl> + auto done = __ MakeLabel ( ) ; <nl> + <nl> + __ Branch ( check , & done , & if_abort ) ; <nl> + <nl> + __ Bind ( & if_abort ) ; <nl> + __ Unreachable ( ) ; <nl> + __ Goto ( & done ) ; <nl> + <nl> + __ Bind ( & done ) ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> return index ; <nl> } <nl> <nl> mmm a / src / compiler / simplified - lowering . cc <nl> ppp b / src / compiler / simplified - lowering . cc <nl> class RepresentationSelector { <nl> VisitBinop ( node , UseInfo : : TruncatingWord32 ( ) , <nl> MachineRepresentation : : kWord32 ) ; <nl> if ( lower ( ) ) { <nl> + CheckBoundsParameters : : Mode mode = <nl> + CheckBoundsParameters : : kDeoptOnOutOfBounds ; <nl> if ( lowering - > poisoning_level_ = = <nl> PoisoningMitigationLevel : : kDontPoison & & <nl> ( index_type . IsNone ( ) | | length_type . IsNone ( ) | | <nl> class RepresentationSelector { <nl> index_type . Max ( ) < length_type . Min ( ) ) ) ) { <nl> / / The bounds check is redundant if we already know that <nl> / / the index is within the bounds of [ 0 . 0 , length [ . <nl> - DeferReplacement ( node , node - > InputAt ( 0 ) ) ; <nl> - } else { <nl> - NodeProperties : : ChangeOp ( <nl> - node , simplified ( ) - > CheckedUint32Bounds ( p . feedback ( ) ) ) ; <nl> + mode = CheckBoundsParameters : : kAbortOnOutOfBounds ; <nl> } <nl> + NodeProperties : : ChangeOp ( <nl> + node , simplified ( ) - > CheckedUint32Bounds ( p . feedback ( ) , mode ) ) ; <nl> } <nl> } else { <nl> VisitBinop ( <nl> class RepresentationSelector { <nl> UseInfo : : TruncatingWord32 ( ) , MachineRepresentation : : kWord32 ) ; <nl> if ( lower ( ) ) { <nl> NodeProperties : : ChangeOp ( <nl> - node , simplified ( ) - > CheckedUint32Bounds ( p . feedback ( ) ) ) ; <nl> + node , <nl> + simplified ( ) - > CheckedUint32Bounds ( <nl> + p . feedback ( ) , CheckBoundsParameters : : kDeoptOnOutOfBounds ) ) ; <nl> } <nl> } <nl> } else { <nl> mmm a / src / compiler / simplified - operator . cc <nl> ppp b / src / compiler / simplified - operator . cc <nl> bool operator = = ( CheckMinusZeroParameters const & lhs , <nl> V ( CheckedTaggedSignedToInt32 , 1 , 1 ) \ <nl> V ( CheckedTaggedToTaggedPointer , 1 , 1 ) \ <nl> V ( CheckedTaggedToTaggedSigned , 1 , 1 ) \ <nl> - V ( CheckedUint32Bounds , 2 , 1 ) \ <nl> V ( CheckedUint32ToInt32 , 1 , 1 ) \ <nl> V ( CheckedUint32ToTaggedSigned , 1 , 1 ) \ <nl> V ( CheckedUint64Bounds , 2 , 1 ) \ <nl> V ( CheckedUint64ToInt32 , 1 , 1 ) \ <nl> V ( CheckedUint64ToTaggedSigned , 1 , 1 ) <nl> <nl> + # define CHECKED_BOUNDS_OP_LIST ( V ) V ( CheckedUint32Bounds ) <nl> + <nl> struct SimplifiedOperatorGlobalCache final { <nl> # define PURE ( Name , properties , value_input_count , control_input_count ) \ <nl> struct Name # # Operator final : public Operator { \ <nl> struct SimplifiedOperatorGlobalCache final { <nl> CHECKED_WITH_FEEDBACK_OP_LIST ( CHECKED_WITH_FEEDBACK ) <nl> # undef CHECKED_WITH_FEEDBACK <nl> <nl> + # define CHECKED_BOUNDS ( Name ) \ <nl> + struct Name # # Operator final : public Operator1 < CheckBoundsParameters > { \ <nl> + Name # # Operator ( VectorSlotPair feedback , CheckBoundsParameters : : Mode mode ) \ <nl> + : Operator1 < CheckBoundsParameters > ( \ <nl> + IrOpcode : : k # # Name , Operator : : kFoldable | Operator : : kNoThrow , \ <nl> + # Name , 2 , 1 , 1 , 1 , 1 , 0 , \ <nl> + CheckBoundsParameters ( feedback , mode ) ) { } \ <nl> + } ; \ <nl> + Name # # Operator k # # Name # # Deopting = { \ <nl> + VectorSlotPair ( ) , CheckBoundsParameters : : kDeoptOnOutOfBounds } ; \ <nl> + Name # # Operator k # # Name # # Aborting = { \ <nl> + VectorSlotPair ( ) , CheckBoundsParameters : : kAbortOnOutOfBounds } ; <nl> + CHECKED_BOUNDS_OP_LIST ( CHECKED_BOUNDS ) <nl> + # undef CHECKED_BOUNDS <nl> + <nl> template < DeoptimizeReason kDeoptimizeReason > <nl> struct CheckIfOperator final : public Operator1 < CheckIfParameters > { <nl> CheckIfOperator ( ) <nl> GET_FROM_CACHE ( LoadFieldByIndex ) <nl> CHECKED_WITH_FEEDBACK_OP_LIST ( GET_FROM_CACHE_WITH_FEEDBACK ) <nl> # undef GET_FROM_CACHE_WITH_FEEDBACK <nl> <nl> + # define GET_FROM_CACHE_WITH_FEEDBACK ( Name ) \ <nl> + const Operator * SimplifiedOperatorBuilder : : Name ( \ <nl> + const VectorSlotPair & feedback , CheckBoundsParameters : : Mode mode ) { \ <nl> + if ( ! feedback . IsValid ( ) ) { \ <nl> + switch ( mode ) { \ <nl> + case CheckBoundsParameters : : kDeoptOnOutOfBounds : \ <nl> + return & cache_ . k # # Name # # Deopting ; \ <nl> + case CheckBoundsParameters : : kAbortOnOutOfBounds : \ <nl> + return & cache_ . k # # Name # # Aborting ; \ <nl> + } \ <nl> + } \ <nl> + return new ( zone ( ) ) \ <nl> + SimplifiedOperatorGlobalCache : : Name # # Operator ( feedback , mode ) ; \ <nl> + } <nl> + CHECKED_BOUNDS_OP_LIST ( GET_FROM_CACHE_WITH_FEEDBACK ) <nl> + # undef GET_FROM_CACHE_WITH_FEEDBACK <nl> + <nl> bool IsCheckedWithFeedback ( const Operator * op ) { <nl> # define CASE ( Name , . . . ) case IrOpcode : : k # # Name : <nl> switch ( op - > opcode ( ) ) { <nl> std : : ostream & operator < < ( std : : ostream & os , CheckParameters const & p ) { <nl> } <nl> <nl> CheckParameters const & CheckParametersOf ( Operator const * op ) { <nl> + if ( op - > opcode ( ) = = IrOpcode : : kCheckedUint32Bounds ) { <nl> + return OpParameter < CheckBoundsParameters > ( op ) . check_parameters ( ) ; <nl> + } <nl> # define MAKE_OR ( name , arg2 , arg3 ) op - > opcode ( ) = = IrOpcode : : k # # name | | <nl> CHECK ( ( CHECKED_WITH_FEEDBACK_OP_LIST ( MAKE_OR ) false ) ) ; <nl> # undef MAKE_OR <nl> return OpParameter < CheckParameters > ( op ) ; <nl> } <nl> <nl> + bool operator = = ( CheckBoundsParameters const & lhs , <nl> + CheckBoundsParameters const & rhs ) { <nl> + return lhs . check_parameters ( ) = = rhs . check_parameters ( ) & & <nl> + lhs . mode ( ) = = rhs . mode ( ) ; <nl> + } <nl> + <nl> + size_t hash_value ( CheckBoundsParameters const & p ) { <nl> + return base : : hash_combine ( hash_value ( p . check_parameters ( ) ) , p . mode ( ) ) ; <nl> + } <nl> + <nl> + std : : ostream & operator < < ( std : : ostream & os , CheckBoundsParameters const & p ) { <nl> + os < < p . check_parameters ( ) < < " , " ; <nl> + switch ( p . mode ( ) ) { <nl> + case CheckBoundsParameters : : kDeoptOnOutOfBounds : <nl> + os < < " deopt " ; <nl> + break ; <nl> + case CheckBoundsParameters : : kAbortOnOutOfBounds : <nl> + os < < " abort " ; <nl> + break ; <nl> + } <nl> + return os ; <nl> + } <nl> + <nl> + CheckBoundsParameters const & CheckBoundsParametersOf ( Operator const * op ) { <nl> + CHECK_EQ ( op - > opcode ( ) , IrOpcode : : kCheckedUint32Bounds ) ; <nl> + return OpParameter < CheckBoundsParameters > ( op ) ; <nl> + } <nl> + <nl> bool operator = = ( CheckIfParameters const & lhs , CheckIfParameters const & rhs ) { <nl> return lhs . reason ( ) = = rhs . reason ( ) & & lhs . feedback ( ) = = rhs . feedback ( ) ; <nl> } <nl> const Operator * SimplifiedOperatorBuilder : : TransitionAndStoreNonNumberElement ( <nl> # undef EFFECT_DEPENDENT_OP_LIST <nl> # undef SPECULATIVE_NUMBER_BINOP_LIST <nl> # undef CHECKED_WITH_FEEDBACK_OP_LIST <nl> + # undef CHECKED_BOUNDS_OP_LIST <nl> # undef CHECKED_OP_LIST <nl> # undef ACCESS_OP_LIST <nl> <nl> mmm a / src / compiler / simplified - operator . h <nl> ppp b / src / compiler / simplified - operator . h <nl> std : : ostream & operator < < ( std : : ostream & , CheckParameters const & ) ; <nl> <nl> CheckParameters const & CheckParametersOf ( Operator const * ) V8_WARN_UNUSED_RESULT ; <nl> <nl> + class CheckBoundsParameters final { <nl> + public : <nl> + enum Mode { kAbortOnOutOfBounds , kDeoptOnOutOfBounds } ; <nl> + <nl> + CheckBoundsParameters ( const VectorSlotPair & feedback , Mode mode ) <nl> + : check_parameters_ ( feedback ) , mode_ ( mode ) { } <nl> + <nl> + Mode mode ( ) const { return mode_ ; } <nl> + const CheckParameters & check_parameters ( ) const { return check_parameters_ ; } <nl> + <nl> + private : <nl> + CheckParameters check_parameters_ ; <nl> + Mode mode_ ; <nl> + } ; <nl> + <nl> + bool operator = = ( CheckBoundsParameters const & , CheckBoundsParameters const & ) ; <nl> + <nl> + size_t hash_value ( CheckBoundsParameters const & ) ; <nl> + <nl> + std : : ostream & operator < < ( std : : ostream & , CheckBoundsParameters const & ) ; <nl> + <nl> + CheckBoundsParameters const & CheckBoundsParametersOf ( Operator const * ) <nl> + V8_WARN_UNUSED_RESULT ; <nl> + <nl> class CheckIfParameters final { <nl> public : <nl> explicit CheckIfParameters ( DeoptimizeReason reason , <nl> class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final <nl> const VectorSlotPair & feedback ) ; <nl> const Operator * CheckedUint32Div ( ) ; <nl> const Operator * CheckedUint32Mod ( ) ; <nl> - const Operator * CheckedUint32Bounds ( const VectorSlotPair & feedback ) ; <nl> + const Operator * CheckedUint32Bounds ( const VectorSlotPair & feedback , <nl> + CheckBoundsParameters : : Mode mode ) ; <nl> const Operator * CheckedUint32ToInt32 ( const VectorSlotPair & feedback ) ; <nl> const Operator * CheckedUint32ToTaggedSigned ( const VectorSlotPair & feedback ) ; <nl> const Operator * CheckedUint64Bounds ( const VectorSlotPair & feedback ) ; <nl> mmm a / test / unittests / compiler / redundancy - elimination - unittest . cc <nl> ppp b / test / unittests / compiler / redundancy - elimination - unittest . cc <nl> TEST_F ( RedundancyEliminationTest , CheckedUint32Bounds ) { <nl> Node * effect = graph ( ) - > start ( ) ; <nl> Node * control = graph ( ) - > start ( ) ; <nl> <nl> - Node * check1 = effect = <nl> - graph ( ) - > NewNode ( simplified ( ) - > CheckedUint32Bounds ( feedback1 ) , index , <nl> - length , effect , control ) ; <nl> + Node * check1 = effect = graph ( ) - > NewNode ( <nl> + simplified ( ) - > CheckedUint32Bounds ( <nl> + feedback1 , CheckBoundsParameters : : kDeoptOnOutOfBounds ) , <nl> + index , length , effect , control ) ; <nl> Reduction r1 = Reduce ( check1 ) ; <nl> ASSERT_TRUE ( r1 . Changed ( ) ) ; <nl> EXPECT_EQ ( r1 . replacement ( ) , check1 ) ; <nl> <nl> - Node * check2 = effect = <nl> - graph ( ) - > NewNode ( simplified ( ) - > CheckedUint32Bounds ( feedback2 ) , index , <nl> - length , effect , control ) ; <nl> + Node * check2 = effect = graph ( ) - > NewNode ( <nl> + simplified ( ) - > CheckedUint32Bounds ( <nl> + feedback2 , CheckBoundsParameters : : kDeoptOnOutOfBounds ) , <nl> + index , length , effect , control ) ; <nl> Reduction r2 = Reduce ( check2 ) ; <nl> ASSERT_TRUE ( r2 . Changed ( ) ) ; <nl> EXPECT_EQ ( r2 . replacement ( ) , check1 ) ; <nl>
[ turbofan ] Introduce aborting bounds checks .
v8/v8
7bb6dc0e06fa158df508bc8997f0fce4e33512a5
2019-02-08T16:14:23Z
mmm a / Marlin / Configuration . h <nl> ppp b / Marlin / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / Marlin / src / Marlin . cpp <nl> ppp b / Marlin / src / Marlin . cpp <nl> void setup ( ) { <nl> <nl> / / Load data from EEPROM if available ( or use defaults ) <nl> / / This also updates variables in the planner , elsewhere <nl> - ( void ) settings . load ( ) ; <nl> + # if ENABLED ( EEPROM_AUTO_INIT ) <nl> + if ( ! settings . load ( ) ) { <nl> + ( void ) settings . reset ( ) ; <nl> + ( void ) settings . save ( ) ; <nl> + SERIAL_ECHO_MSG ( " EEPROM Initialized " ) ; <nl> + } <nl> + # else <nl> + ( void ) settings . load ( ) ; <nl> + # endif <nl> <nl> # if HAS_M206_COMMAND <nl> / / Initialize current position based on home_offset <nl> mmm a / Marlin / src / module / configuration_store . cpp <nl> ppp b / Marlin / src / module / configuration_store . cpp <nl> void MarlinSettings : : postprocess ( ) { <nl> report_current_position ( ) ; <nl> } <nl> <nl> - # if ENABLED ( PRINTCOUNTER ) & & ENABLED ( EEPROM_SETTINGS ) <nl> + # if BOTH ( PRINTCOUNTER , EEPROM_SETTINGS ) <nl> # include " printcounter . h " <nl> - <nl> static_assert ( <nl> ! WITHIN ( STATS_EEPROM_ADDRESS , EEPROM_OFFSET , EEPROM_OFFSET + sizeof ( SettingsData ) ) & & <nl> ! WITHIN ( STATS_EEPROM_ADDRESS + sizeof ( printStatistics ) , EEPROM_OFFSET , EEPROM_OFFSET + sizeof ( SettingsData ) ) , <nl> mmm a / config / default / Configuration . h <nl> ppp b / config / default / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / 3DFabXYZ / Migbot / Configuration . h <nl> ppp b / config / examples / 3DFabXYZ / Migbot / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / AlephObjects / TAZ4 / Configuration . h <nl> ppp b / config / examples / AlephObjects / TAZ4 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / AliExpress / CL - 260 / Configuration . h <nl> ppp b / config / examples / AliExpress / CL - 260 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / AliExpress / UM2pExt / Configuration . h <nl> ppp b / config / examples / AliExpress / UM2pExt / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Anet / A2 / Configuration . h <nl> ppp b / config / examples / Anet / A2 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Anet / A2plus / Configuration . h <nl> ppp b / config / examples / Anet / A2plus / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Anet / A6 / Configuration . h <nl> ppp b / config / examples / Anet / A6 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Anet / A8 / Configuration . h <nl> ppp b / config / examples / Anet / A8 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / AnyCubic / i3 / Configuration . h <nl> ppp b / config / examples / AnyCubic / i3 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / ArmEd / Configuration . h <nl> ppp b / config / examples / ArmEd / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Azteeg / X5GT / Configuration . h <nl> ppp b / config / examples / Azteeg / X5GT / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / BIBO / TouchX / cyclops / Configuration . h <nl> ppp b / config / examples / BIBO / TouchX / cyclops / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / BIBO / TouchX / default / Configuration . h <nl> ppp b / config / examples / BIBO / TouchX / default / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / BQ / Hephestos / Configuration . h <nl> ppp b / config / examples / BQ / Hephestos / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / BQ / Hephestos_2 / Configuration . h <nl> ppp b / config / examples / BQ / Hephestos_2 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / BQ / WITBOX / Configuration . h <nl> ppp b / config / examples / BQ / WITBOX / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Cartesio / Configuration . h <nl> ppp b / config / examples / Cartesio / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Creality / CR - 10 / Configuration . h <nl> ppp b / config / examples / Creality / CR - 10 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Creality / CR - 10S / Configuration . h <nl> ppp b / config / examples / Creality / CR - 10S / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Creality / CR - 10_5S / Configuration . h <nl> ppp b / config / examples / Creality / CR - 10_5S / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Creality / CR - 10mini / Configuration . h <nl> ppp b / config / examples / Creality / CR - 10mini / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Creality / CR - 8 / Configuration . h <nl> ppp b / config / examples / Creality / CR - 8 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Creality / Ender - 2 / Configuration . h <nl> ppp b / config / examples / Creality / Ender - 2 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Creality / Ender - 3 / Configuration . h <nl> ppp b / config / examples / Creality / Ender - 3 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Creality / Ender - 4 / Configuration . h <nl> ppp b / config / examples / Creality / Ender - 4 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Einstart - S / Configuration . h <nl> ppp b / config / examples / Einstart - S / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Felix / Configuration . h <nl> ppp b / config / examples / Felix / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Felix / DUAL / Configuration . h <nl> ppp b / config / examples / Felix / DUAL / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / FlashForge / CreatorPro / Configuration . h <nl> ppp b / config / examples / FlashForge / CreatorPro / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / FolgerTech / i3 - 2020 / Configuration . h <nl> ppp b / config / examples / FolgerTech / i3 - 2020 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Formbot / Raptor / Configuration . h <nl> ppp b / config / examples / Formbot / Raptor / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Formbot / T_Rex_2 + / Configuration . h <nl> ppp b / config / examples / Formbot / T_Rex_2 + / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Formbot / T_Rex_3 / Configuration . h <nl> ppp b / config / examples / Formbot / T_Rex_3 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Geeetech / A10M / Configuration . h <nl> ppp b / config / examples / Geeetech / A10M / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Geeetech / A20M / Configuration . h <nl> ppp b / config / examples / Geeetech / A20M / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Geeetech / GT2560 / Configuration . h <nl> ppp b / config / examples / Geeetech / GT2560 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Geeetech / I3_Pro_X - GT2560 / Configuration . h <nl> ppp b / config / examples / Geeetech / I3_Pro_X - GT2560 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Geeetech / MeCreator2 / Configuration . h <nl> ppp b / config / examples / Geeetech / MeCreator2 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> diff - - git a / config / examples / Geeetech / Prusa i3 Pro B / bltouch / Configuration . h b / config / examples / Geeetech / Prusa i3 Pro B / bltouch / Configuration . h <nl> mmm a / config / examples / Geeetech / Prusa i3 Pro B / bltouch / Configuration . h <nl> ppp b / config / examples / Geeetech / Prusa i3 Pro B / bltouch / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> diff - - git a / config / examples / Geeetech / Prusa i3 Pro B / noprobe / Configuration . h b / config / examples / Geeetech / Prusa i3 Pro B / noprobe / Configuration . h <nl> mmm a / config / examples / Geeetech / Prusa i3 Pro B / noprobe / Configuration . h <nl> ppp b / config / examples / Geeetech / Prusa i3 Pro B / noprobe / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> diff - - git a / config / examples / Geeetech / Prusa i3 Pro C / Configuration . h b / config / examples / Geeetech / Prusa i3 Pro C / Configuration . h <nl> mmm a / config / examples / Geeetech / Prusa i3 Pro C / Configuration . h <nl> ppp b / config / examples / Geeetech / Prusa i3 Pro C / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> diff - - git a / config / examples / Geeetech / Prusa i3 Pro W / Configuration . h b / config / examples / Geeetech / Prusa i3 Pro W / Configuration . h <nl> mmm a / config / examples / Geeetech / Prusa i3 Pro W / Configuration . h <nl> ppp b / config / examples / Geeetech / Prusa i3 Pro W / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Infitary / i3 - M508 / Configuration . h <nl> ppp b / config / examples / Infitary / i3 - M508 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / JGAurora / A5 / Configuration . h <nl> ppp b / config / examples / JGAurora / A5 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / MakerParts / Configuration . h <nl> ppp b / config / examples / MakerParts / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Malyan / M150 / Configuration . h <nl> ppp b / config / examples / Malyan / M150 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Malyan / M200 / Configuration . h <nl> ppp b / config / examples / Malyan / M200 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Micromake / C1 / basic / Configuration . h <nl> ppp b / config / examples / Micromake / C1 / basic / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Micromake / C1 / enhanced / Configuration . h <nl> ppp b / config / examples / Micromake / C1 / enhanced / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Mks / Robin / Configuration . h <nl> ppp b / config / examples / Mks / Robin / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Mks / Sbase / Configuration . h <nl> ppp b / config / examples / Mks / Sbase / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - / / # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + / / # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Printrbot / PrintrboardG2 / Configuration . h <nl> ppp b / config / examples / Printrbot / PrintrboardG2 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / RapideLite / RL200 / Configuration . h <nl> ppp b / config / examples / RapideLite / RL200 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / RepRapPro / Huxley / Configuration . h <nl> ppp b / config / examples / RepRapPro / Huxley / Configuration . h <nl> Black rubber belt ( MXL ) , 18 - tooth aluminium pulley : 87 . 489 step per mm ( Huxley <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / RepRapWorld / Megatronics / Configuration . h <nl> ppp b / config / examples / RepRapWorld / Megatronics / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / RigidBot / Configuration . h <nl> ppp b / config / examples / RigidBot / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / SCARA / Configuration . h <nl> ppp b / config / examples / SCARA / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / STM32 / Black_STM32F407VET6 / Configuration . h <nl> ppp b / config / examples / STM32 / Black_STM32F407VET6 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / STM32 / STM32F10 / Configuration . h <nl> ppp b / config / examples / STM32 / STM32F10 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - / / # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + / / # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / STM32 / STM32F4 / Configuration . h <nl> ppp b / config / examples / STM32 / STM32F4 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / STM32 / stm32f103ret6 / Configuration . h <nl> ppp b / config / examples / STM32 / stm32f103ret6 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - / / # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + / / # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Sanguinololu / Configuration . h <nl> ppp b / config / examples / Sanguinololu / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / TheBorg / Configuration . h <nl> ppp b / config / examples / TheBorg / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / TinyBoy2 / Configuration . h <nl> ppp b / config / examples / TinyBoy2 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Tronxy / X1 / Configuration . h <nl> ppp b / config / examples / Tronxy / X1 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Tronxy / X3A / Configuration . h <nl> ppp b / config / examples / Tronxy / X3A / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - / / # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + / / # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Tronxy / X5S - 2E / Configuration . h <nl> ppp b / config / examples / Tronxy / X5S - 2E / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Tronxy / X5S / Configuration . h <nl> ppp b / config / examples / Tronxy / X5S / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Tronxy / XY100 / Configuration . h <nl> ppp b / config / examples / Tronxy / XY100 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / UltiMachine / Archim1 / Configuration . h <nl> ppp b / config / examples / UltiMachine / Archim1 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / UltiMachine / Archim2 / Configuration . h <nl> ppp b / config / examples / UltiMachine / Archim2 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / VORONDesign / Configuration . h <nl> ppp b / config / examples / VORONDesign / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Velleman / K8200 / Configuration . h <nl> ppp b / config / examples / Velleman / K8200 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Velleman / K8400 / Configuration . h <nl> ppp b / config / examples / Velleman / K8400 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / Velleman / K8400 / Dual - head / Configuration . h <nl> ppp b / config / examples / Velleman / K8400 / Dual - head / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / WASP / PowerWASP / Configuration . h <nl> ppp b / config / examples / WASP / PowerWASP / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> diff - - git a / config / examples / Wanhao / Duplicator 6 / Configuration . h b / config / examples / Wanhao / Duplicator 6 / Configuration . h <nl> mmm a / config / examples / Wanhao / Duplicator 6 / Configuration . h <nl> ppp b / config / examples / Wanhao / Duplicator 6 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / adafruit / ST7565 / Configuration . h <nl> ppp b / config / examples / adafruit / ST7565 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / delta / Anycubic / Kossel / Configuration . h <nl> ppp b / config / examples / delta / Anycubic / Kossel / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / delta / FLSUN / auto_calibrate / Configuration . h <nl> ppp b / config / examples / delta / FLSUN / auto_calibrate / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / delta / FLSUN / kossel / Configuration . h <nl> ppp b / config / examples / delta / FLSUN / kossel / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / delta / FLSUN / kossel_mini / Configuration . h <nl> ppp b / config / examples / delta / FLSUN / kossel_mini / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> diff - - git a / config / examples / delta / Geeetech / Rostock 301 / Configuration . h b / config / examples / delta / Geeetech / Rostock 301 / Configuration . h <nl> mmm a / config / examples / delta / Geeetech / Rostock 301 / Configuration . h <nl> ppp b / config / examples / delta / Geeetech / Rostock 301 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / delta / Hatchbox_Alpha / Configuration . h <nl> ppp b / config / examples / delta / Hatchbox_Alpha / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / delta / MKS / SBASE / Configuration . h <nl> ppp b / config / examples / delta / MKS / SBASE / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> diff - - git a / config / examples / delta / Tevo Little Monster / Configuration . h b / config / examples / delta / Tevo Little Monster / Configuration . h <nl> mmm a / config / examples / delta / Tevo Little Monster / Configuration . h <nl> ppp b / config / examples / delta / Tevo Little Monster / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / delta / generic / Configuration . h <nl> ppp b / config / examples / delta / generic / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / delta / kossel_mini / Configuration . h <nl> ppp b / config / examples / delta / kossel_mini / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / delta / kossel_pro / Configuration . h <nl> ppp b / config / examples / delta / kossel_pro / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / delta / kossel_xl / Configuration . h <nl> ppp b / config / examples / delta / kossel_xl / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / gCreate / gMax1 . 5 + / Configuration . h <nl> ppp b / config / examples / gCreate / gMax1 . 5 + / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / makibox / Configuration . h <nl> ppp b / config / examples / makibox / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / tvrrug / Round2 / Configuration . h <nl> ppp b / config / examples / tvrrug / Round2 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - / / # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + / / # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl> mmm a / config / examples / wt150 / Configuration . h <nl> ppp b / config / examples / wt150 / Configuration . h <nl> <nl> <nl> / / @ section extras <nl> <nl> - / / <nl> - / / EEPROM <nl> - / / <nl> - / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores parameters in EEPROM <nl> - / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> - / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / <nl> - # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> - / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> - # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + / * * <nl> + * EEPROM <nl> + * <nl> + * Persistent storage to preserve configurable settings across reboots . <nl> + * <nl> + * M500 - Store settings to EEPROM . <nl> + * M501 - Read settings from EEPROM . ( i . e . , Throw away unsaved changes ) <nl> + * M502 - Revert settings to " factory " defaults . ( Follow with M500 to init the EEPROM . ) <nl> + * / <nl> + # define EEPROM_SETTINGS / / Persistent storage with M500 and M501 <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + # if ENABLED ( EEPROM_SETTINGS ) <nl> + / / # define EEPROM_AUTO_INIT / / Init EEPROM automatically on any errors . <nl> + # endif <nl> <nl> / / <nl> / / Host Keepalive <nl>
Option to Auto - Initialize EEPROM ( )
MarlinFirmware/Marlin
5dcb25664f829be801e20ab6032e782c1c65b4ba
2019-05-07T21:30:31Z
mmm a / src / objective - c / GRPCClient / private / GRPCChannel . h <nl> ppp b / src / objective - c / GRPCClient / private / GRPCChannel . h <nl> struct grpc_channel_credentials ; <nl> - ( void ) ref ; <nl> <nl> / * * <nl> - * Decrease the refcount of the channel . If the refcount of the channel decrease to 0 , start a timer <nl> - * to destroy the channel <nl> + * Decrease the refcount of the channel . If the refcount of the channel decrease to 0 , the channel <nl> + * is destroyed after 30 seconds . <nl> * / <nl> - ( void ) unref ; <nl> <nl>
More verbose channel destroy message
grpc/grpc
ed1e6c48e05e510186303e430a800078128dfc89
2018-10-20T19:09:19Z
mmm a / drivers / gles2 / shaders / canvas . glsl <nl> ppp b / drivers / gles2 / shaders / canvas . glsl <nl> FRAGMENT_SHADER_CODE <nl> highp float shadow_attenuation = 0 . 0 ; <nl> <nl> # ifdef USE_RGBA_SHADOWS <nl> - <nl> - # define SHADOW_DEPTH ( m_tex , m_uv ) dot ( texture2D ( ( m_tex ) , ( m_uv ) ) , vec4 ( 1 . 0 / ( 256 . 0 * 256 . 0 * 256 . 0 ) , 1 . 0 / ( 256 . 0 * 256 . 0 ) , 1 . 0 / 256 . 0 , 1 . 0 ) ) <nl> + # define SHADOW_DEPTH ( m_tex , m_uv ) dot ( texture2D ( ( m_tex ) , ( m_uv ) ) , vec4 ( 1 . 0 / ( 255 . 0 * 255 . 0 * 255 . 0 ) , 1 . 0 / ( 255 . 0 * 255 . 0 ) , 1 . 0 / 255 . 0 , 1 . 0 ) ) <nl> <nl> # else <nl> <nl> mmm a / drivers / gles2 / shaders / canvas_shadow . glsl <nl> ppp b / drivers / gles2 / shaders / canvas_shadow . glsl <nl> void main ( ) { <nl> <nl> # ifdef USE_RGBA_SHADOWS <nl> <nl> - highp vec4 comp = fract ( depth * vec4 ( 256 . 0 * 256 . 0 * 256 . 0 , 256 . 0 * 256 . 0 , 256 . 0 , 1 . 0 ) ) ; <nl> - comp - = comp . xxyz * vec4 ( 0 . 0 , 1 . 0 / 256 . 0 , 1 . 0 / 256 . 0 , 1 . 0 / 256 . 0 ) ; <nl> + highp vec4 comp = fract ( depth * vec4 ( 255 . 0 * 255 . 0 * 255 . 0 , 255 . 0 * 255 . 0 , 255 . 0 , 1 . 0 ) ) ; <nl> + comp - = comp . xxyz * vec4 ( 0 . 0 , 1 . 0 / 255 . 0 , 1 . 0 / 255 . 0 , 1 . 0 / 255 . 0 ) ; <nl> gl_FragColor = comp ; <nl> # else <nl> <nl> mmm a / drivers / gles2 / shaders / scene . glsl <nl> ppp b / drivers / gles2 / shaders / scene . glsl <nl> LIGHT_SHADER_CODE <nl> <nl> # ifdef USE_RGBA_SHADOWS <nl> <nl> - # define SHADOW_DEPTH ( m_val ) dot ( m_val , vec4 ( 1 . 0 / ( 256 . 0 * 256 . 0 * 256 . 0 ) , 1 . 0 / ( 256 . 0 * 256 . 0 ) , 1 . 0 / 256 . 0 , 1 . 0 ) ) <nl> + # define SHADOW_DEPTH ( m_val ) dot ( m_val , vec4 ( 1 . 0 / ( 255 . 0 * 255 . 0 * 255 . 0 ) , 1 . 0 / ( 255 . 0 * 255 . 0 ) , 1 . 0 / 255 . 0 , 1 . 0 ) ) <nl> <nl> # else <nl> <nl> FRAGMENT_SHADER_CODE <nl> # ifdef USE_RGBA_SHADOWS <nl> <nl> highp float depth = ( ( position_interp . z / position_interp . w ) + 1 . 0 ) * 0 . 5 + 0 . 0 ; / / bias <nl> - highp vec4 comp = fract ( depth * vec4 ( 256 . 0 * 256 . 0 * 256 . 0 , 256 . 0 * 256 . 0 , 256 . 0 , 1 . 0 ) ) ; <nl> - comp - = comp . xxyz * vec4 ( 0 . 0 , 1 . 0 / 256 . 0 , 1 . 0 / 256 . 0 , 1 . 0 / 256 . 0 ) ; <nl> + highp vec4 comp = fract ( depth * vec4 ( 255 . 0 * 255 . 0 * 255 . 0 , 255 . 0 * 255 . 0 , 255 . 0 , 1 . 0 ) ) ; <nl> + comp - = comp . xxyz * vec4 ( 0 . 0 , 1 . 0 / 255 . 0 , 1 . 0 / 255 . 0 , 1 . 0 / 255 . 0 ) ; <nl> gl_FragColor = comp ; <nl> <nl> # endif <nl> mmm a / drivers / gles3 / rasterizer_storage_gles3 . cpp <nl> ppp b / drivers / gles3 / rasterizer_storage_gles3 . cpp <nl> void RasterizerStorageGLES3 : : initialize ( ) { <nl> glGetIntegerv ( GL_MAX_TEXTURE_IMAGE_UNITS , & config . max_texture_image_units ) ; <nl> glGetIntegerv ( GL_MAX_TEXTURE_SIZE , & config . max_texture_size ) ; <nl> <nl> - config . use_rgba_2d_shadows = config . framebuffer_float_supported ; <nl> + config . use_rgba_2d_shadows = ! config . framebuffer_float_supported ; <nl> <nl> / / generic quadie for copying <nl> <nl> mmm a / drivers / gles3 / shaders / canvas . glsl <nl> ppp b / drivers / gles3 / shaders / canvas . glsl <nl> FRAGMENT_SHADER_CODE <nl> <nl> # ifdef USE_RGBA_SHADOWS <nl> <nl> - # define SHADOW_DEPTH ( m_tex , m_uv ) dot ( texture ( ( m_tex ) , ( m_uv ) ) , vec4 ( 1 . 0 / ( 256 . 0 * 256 . 0 * 256 . 0 ) , 1 . 0 / ( 256 . 0 * 256 . 0 ) , 1 . 0 / 256 . 0 , 1 . 0 ) ) <nl> + # define SHADOW_DEPTH ( m_tex , m_uv ) dot ( texture ( ( m_tex ) , ( m_uv ) ) , vec4 ( 1 . 0 / ( 255 . 0 * 255 . 0 * 255 . 0 ) , 1 . 0 / ( 255 . 0 * 255 . 0 ) , 1 . 0 / 255 . 0 , 1 . 0 ) ) <nl> <nl> # else <nl> <nl> mmm a / drivers / gles3 / shaders / canvas_shadow . glsl <nl> ppp b / drivers / gles3 / shaders / canvas_shadow . glsl <nl> void main ( ) { <nl> <nl> # ifdef USE_RGBA_SHADOWS <nl> <nl> - highp vec4 comp = fract ( depth * vec4 ( 256 . 0 * 256 . 0 * 256 . 0 , 256 . 0 * 256 . 0 , 256 . 0 , 1 . 0 ) ) ; <nl> - comp - = comp . xxyz * vec4 ( 0 . 0 , 1 . 0 / 256 . 0 , 1 . 0 / 256 . 0 , 1 . 0 / 256 . 0 ) ; <nl> + highp vec4 comp = fract ( depth * vec4 ( 255 . 0 * 255 . 0 * 255 . 0 , 255 . 0 * 255 . 0 , 255 . 0 , 1 . 0 ) ) ; <nl> + comp - = comp . xxyz * vec4 ( 0 . 0 , 1 . 0 / 255 . 0 , 1 . 0 / 255 . 0 , 1 . 0 / 255 . 0 ) ; <nl> distance_buf = comp ; <nl> # else <nl> <nl>
Merge pull request from raphael10241024 / fix_shadow
godotengine/godot
47df673d737f10dcc7dbcc32a574a3589bddc3f7
2019-07-29T08:11:15Z
mmm a / cocos / scripting / auto - generated <nl> ppp b / cocos / scripting / auto - generated <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit 893da8ccfb4ed7fa754c483a90dc4e5248c36e03 <nl> + Subproject commit 2f3c5316657e64ec38b8ed3ea6826eb48c46f32c <nl>
update the reference to cocos2d - x repo .
cocos2d/cocos2d-x
56ef19714642b4a74b227d1f20b2a034613c19bd
2013-10-24T10:36:36Z
mmm a / src / core / socket . cc <nl> ppp b / src / core / socket . cc <nl> ssize_t swSocket_write_blocking ( int __fd , const void * __data , size_t __len ) <nl> <nl> while ( written < ( ssize_t ) __len ) <nl> { <nl> - n = write ( __fd , ( char * ) __data + written , __len - written ) ; <nl> + n = write ( __fd , ( char * ) __data + written , __len - written ) ; <nl> if ( n < 0 ) <nl> { <nl> if ( errno = = EINTR ) <nl> void swSocket_free ( swSocket * sock ) <nl> int swSocket_bind ( int sock , int type , const char * host , int * port ) <nl> { <nl> int ret ; <nl> - swSocketAddress address ; <nl> + swSocketAddress address = { 0 } ; <nl> <nl> / / SO_REUSEADDR option <nl> int option = 1 ; <nl> int swSocket_bind ( int sock , int type , const char * host , int * port ) <nl> / / unix socket <nl> if ( type = = SW_SOCK_UNIX_DGRAM | | type = = SW_SOCK_UNIX_STREAM ) <nl> { <nl> - bzero ( & ( address . addr . un ) , sizeof ( address . addr . un ) ) ; <nl> unlink ( host ) ; <nl> address . addr . un . sun_family = AF_UNIX ; <nl> strncpy ( address . addr . un . sun_path , host , sizeof ( address . addr . un . sun_path ) - 1 ) ; <nl> - ret = bind ( sock , ( struct sockaddr * ) & ( address . addr . un ) , sizeof ( address . addr . un ) ) ; <nl> + ret = bind ( sock , ( struct sockaddr * ) & address . addr . un , sizeof ( address . addr . un ) ) ; <nl> } <nl> / / IPv6 <nl> else if ( type > SW_SOCK_UDP ) <nl> { <nl> - bzero ( & ( address . addr . inet_v6 ) , sizeof ( address . addr . inet_v6 ) ) ; <nl> - if ( inet_pton ( AF_INET6 , host , & ( address . addr . inet_v6 . sin6_addr ) ) < 0 ) <nl> + if ( inet_pton ( AF_INET6 , host , & address . addr . inet_v6 . sin6_addr ) < 0 ) <nl> { <nl> swSysWarn ( " inet_pton ( AF_INET6 , % s ) failed " , host ) ; <nl> return SW_ERR ; <nl> } <nl> address . addr . inet_v6 . sin6_port = htons ( * port ) ; <nl> address . addr . inet_v6 . sin6_family = AF_INET6 ; <nl> - ret = bind ( sock , ( struct sockaddr * ) & ( address . addr . inet_v6 ) , sizeof ( address . addr . inet_v6 ) ) ; <nl> + ret = bind ( sock , ( struct sockaddr * ) & address . addr . inet_v6 , sizeof ( address . addr . inet_v6 ) ) ; <nl> if ( ret = = 0 & & * port = = 0 ) <nl> { <nl> address . len = sizeof ( address . addr . inet_v6 ) ; <nl> - if ( getsockname ( sock , ( struct sockaddr * ) & ( address . addr . inet_v6 ) , & ( address . len ) ) ! = - 1 ) <nl> + if ( getsockname ( sock , ( struct sockaddr * ) & address . addr . inet_v6 , & address . len ) ! = - 1 ) <nl> { <nl> * port = ntohs ( address . addr . inet_v6 . sin6_port ) ; <nl> } <nl> int swSocket_bind ( int sock , int type , const char * host , int * port ) <nl> / / IPv4 <nl> else <nl> { <nl> - bzero ( & ( address . addr . inet_v4 ) , sizeof ( address . addr . inet_v4 ) ) ; <nl> - if ( inet_pton ( AF_INET , host , & ( address . addr . inet_v4 . sin_addr ) ) < 0 ) <nl> + if ( inet_pton ( AF_INET , host , & address . addr . inet_v4 . sin_addr ) < 0 ) <nl> { <nl> swSysWarn ( " inet_pton ( AF_INET , % s ) failed " , host ) ; <nl> return SW_ERR ; <nl> } <nl> address . addr . inet_v4 . sin_port = htons ( * port ) ; <nl> address . addr . inet_v4 . sin_family = AF_INET ; <nl> - ret = bind ( sock , ( struct sockaddr * ) & ( address . addr . inet_v4 ) , sizeof ( address . addr . inet_v4 ) ) ; <nl> + ret = bind ( sock , ( struct sockaddr * ) & address . addr . inet_v4 , sizeof ( address . addr . inet_v4 ) ) ; <nl> if ( ret = = 0 & & * port = = 0 ) <nl> { <nl> address . len = sizeof ( address . addr . inet_v4 ) ; <nl> - if ( getsockname ( sock , ( struct sockaddr * ) & ( address . addr . inet_v4 ) , & ( address . len ) ) ! = - 1 ) <nl> + if ( getsockname ( sock , ( struct sockaddr * ) & address . addr . inet_v4 , & address . len ) ! = - 1 ) <nl> { <nl> * port = ntohs ( address . addr . inet_v4 . sin_port ) ; <nl> } <nl>
modify the code style ( )
swoole/swoole-src
846facb01a7ac3f1eaa7074ba40515186a250732
2020-01-15T10:54:20Z
mmm a / example / notebooks / composite_symbol . ipynb <nl> ppp b / example / notebooks / composite_symbol . ipynb <nl> <nl> " cell_type " : " markdown " , <nl> " metadata " : { } , <nl> " source " : [ <nl> - " We will make the other factory with ` ` ` strde = ( 2 , 2 ) ` ` ` " <nl> + " We will make the other factory with ` ` ` stride = ( 2 , 2 ) ` ` ` " <nl> ] <nl> } , <nl> { <nl>
Merge pull request from wangg12 / patch - 1
apache/incubator-mxnet
62f05a5a3305cab317a24a948f5f34eff22c652b
2015-11-20T07:11:35Z
mmm a / Marlin / src / feature / tmc_util . cpp <nl> ppp b / Marlin / src / feature / tmc_util . cpp <nl> <nl> bool stealthchop_was_enabled = st . en_pwm_mode ( ) ; <nl> <nl> st . TCOOLTHRS ( 0xFFFFF ) ; <nl> - # if STEALTHCHOP_ENABLED <nl> - st . en_pwm_mode ( false ) ; <nl> - # endif <nl> + st . en_pwm_mode ( false ) ; <nl> st . diag1_stall ( true ) ; <nl> <nl> return stealthchop_was_enabled ; <nl> } <nl> void tmc_disable_stallguard ( TMC2130Stepper & st , const bool restore_stealth ) { <nl> st . TCOOLTHRS ( 0 ) ; <nl> - # if STEALTHCHOP_ENABLED <nl> - st . en_pwm_mode ( restore_stealth ) ; <nl> - # endif <nl> + st . en_pwm_mode ( restore_stealth ) ; <nl> st . diag1_stall ( false ) ; <nl> } <nl> bool tmc_enable_stallguard ( TMC2660Stepper ) { <nl> mmm a / Marlin / src / feature / tmc_util . h <nl> ppp b / Marlin / src / feature / tmc_util . h <nl> class TMCStorage { <nl> } <nl> <nl> struct { <nl> - # if STEALTHCHOP_ENABLED <nl> + # if HAS_STEALTHCHOP <nl> bool stealthChop_enabled = false ; <nl> # endif <nl> # if ENABLED ( HYBRID_THRESHOLD ) <nl> class TMCMarlin : public TMC , public TMCStorage < AXIS_LETTER , DRIVER_ID > { <nl> TMC : : rms_current ( mA , mult ) ; <nl> } <nl> <nl> - # if STEALTHCHOP_ENABLED <nl> + # if HAS_STEALTHCHOP <nl> inline void refresh_stepping_mode ( ) { this - > en_pwm_mode ( this - > stored . stealthChop_enabled ) ; } <nl> inline bool get_stealthChop_status ( ) { return this - > en_pwm_mode ( ) ; } <nl> # endif <nl> class TMCMarlin < TMC2208Stepper , AXIS_LETTER , DRIVER_ID > : public TMC2208Stepper , <nl> TMC2208Stepper : : rms_current ( mA , mult ) ; <nl> } <nl> <nl> - # if STEALTHCHOP_ENABLED <nl> + # if HAS_STEALTHCHOP <nl> inline void refresh_stepping_mode ( ) { en_spreadCycle ( ! this - > stored . stealthChop_enabled ) ; } <nl> inline bool get_stealthChop_status ( ) { return ! this - > en_spreadCycle ( ) ; } <nl> # endif <nl> class TMCMarlin < TMC2208Stepper , AXIS_LETTER , DRIVER_ID > : public TMC2208Stepper , <nl> # if ENABLED ( HYBRID_THRESHOLD ) <nl> this - > stored . hybrid_thrs = _tmc_thrs ( this - > microsteps ( ) , this - > TPWMTHRS ( ) , planner . settings . axis_steps_per_mm [ spmm_id ] ) ; <nl> # endif <nl> - # if STEALTHCHOP_ENABLED <nl> - this - > stored . stealthChop_enabled = ! this - > en_spreadCycle ( ) ; <nl> - # endif <nl> } <nl> <nl> inline void refresh_stepper_current ( ) { rms_current ( this - > val_mA ) ; } <nl> mmm a / Marlin / src / lcd / menu / menu_tmc . cpp <nl> ppp b / Marlin / src / lcd / menu / menu_tmc . cpp <nl> void menu_tmc_current ( ) { <nl> <nl> # endif <nl> <nl> - # if STEALTHCHOP_ENABLED <nl> + # if HAS_STEALTHCHOP <nl> <nl> # define TMC_EDIT_STEP_MODE ( ST ) MENU_ITEM_EDIT_CALLBACK ( bool , MSG_ # # ST , & stepper # # ST . stored . stealthChop_enabled , refresh_stepping_mode_ # # ST ) <nl> <nl> void menu_tmc ( ) { <nl> # if ENABLED ( SENSORLESS_HOMING ) <nl> MENU_ITEM ( submenu , MSG_TMC_HOMING_THRS , menu_tmc_homing_thrs ) ; <nl> # endif <nl> - # if STEALTHCHOP_ENABLED <nl> + # if HAS_STEALTHCHOP <nl> MENU_ITEM ( submenu , MSG_TMC_STEPPING_MODE , menu_tmc_step_mode ) ; <nl> # endif <nl> END_MENU ( ) ; <nl>
Fix TMC - related compile issues . ( )
MarlinFirmware/Marlin
e5801b75f89254d088b423499719b5ccbc48cb0d
2019-01-21T05:54:57Z
mmm a / tensorflow / python / ops / ragged / ragged_tensor . py <nl> ppp b / tensorflow / python / ops / ragged / ragged_tensor . py <nl> def __init__ ( self , values , row_partition , internal = False ) : <nl> values : A potentially ragged tensor of any dtype and shape ` [ nvals , . . . ] ` . <nl> row_partition : A ` RowPartition ` object , representing the arrangement of <nl> the lists at the top level . <nl> - internal : Must contain a private " key " value to validate that this <nl> - constructor is not called from user code . Otherwise , an exception will <nl> - be raised . <nl> + internal : True if the constructor is being called by one of the factory <nl> + methods . If false , an exception will be raised . <nl> <nl> Raises : <nl> ValueError : If internal = False . Note that this method is intended only <nl> def __init__ ( self , values , row_partition , internal = False ) : <nl> row_partition is not a ` RowPartition ` . <nl> " " " <nl> <nl> - if internal is not _ragged_factory_key : <nl> + if not internal : <nl> raise ValueError ( " RaggedTensor constructor is private ; please use one " <nl> " of the factory methods instead ( e . g . , " <nl> " RaggedTensor . from_row_lengths ( ) ) " ) <nl> def _from_row_partition ( cls , values , row_partition , validate = True ) : <nl> row_partition = row_partition . with_dependencies ( checks ) <nl> return cls ( <nl> values = values , <nl> - internal = _ragged_factory_key , <nl> + internal = True , <nl> row_partition = row_partition ) <nl> <nl> @ classmethod <nl> def with_values ( self , new_values ) : <nl> new_values = new_values . with_row_splits_dtype ( dtypes . int64 ) <nl> return self . with_row_splits_dtype ( dtypes . int64 ) . with_values ( new_values ) <nl> return RaggedTensor ( <nl> - values = new_values , <nl> - row_partition = self . _row_partition , <nl> - internal = _ragged_factory_key ) <nl> + values = new_values , row_partition = self . _row_partition , internal = True ) <nl> <nl> def with_flat_values ( self , new_values ) : <nl> " " " Returns a copy of ` self ` with ` flat_values ` replaced by ` new_value ` . <nl> def with_row_splits_dtype ( self , dtype ) : <nl> return RaggedTensor ( <nl> values = current_values . with_row_splits_dtype ( dtype ) , <nl> row_partition = self . _row_partition . with_row_splits_dtype ( dtype ) , <nl> - internal = _ragged_factory_key ) <nl> + internal = True ) <nl> else : <nl> return RaggedTensor ( <nl> values = current_values , <nl> row_partition = self . _row_partition . with_row_splits_dtype ( dtype ) , <nl> - internal = _ragged_factory_key ) <nl> + internal = True ) <nl> <nl> def merge_dims ( self , outer_axis , inner_axis ) : <nl> " " " Merges outer_axis . . . inner_axis into a single dimension . <nl> def _from_components ( self , tensor_list ) : <nl> result = RaggedTensor ( <nl> result , <nl> RowPartition . from_row_splits ( row_splits , validate = False ) , <nl> - internal = _ragged_factory_key ) <nl> + internal = True ) <nl> return result <nl> <nl> # The RaggedTensorSpec tensor_list encoding uses to / from_variant ops <nl> def _get_optional_partition_dtype ( values ) : <nl> <nl> <nl> ops . no_gradient ( " RaggedTensorToVariant " ) <nl> - <nl> - <nl> - _ragged_factory_key = object ( ) # unique private object <nl> mmm a / tensorflow / python / ops / ragged / ragged_tensor_test . py <nl> ppp b / tensorflow / python / ops / ragged / ragged_tensor_test . py <nl> def testRaggedTensorConstruction ( self ) : <nl> values = constant_op . constant ( [ ' a ' , ' b ' , ' c ' , ' d ' , ' e ' , ' f ' , ' g ' ] ) <nl> row_splits = constant_op . constant ( [ 0 , 2 , 2 , 5 , 6 , 7 ] , dtypes . int64 ) <nl> rp = RowPartition . from_row_splits ( row_splits ) <nl> - rt = RaggedTensor ( <nl> - values = values , <nl> - row_partition = rp , <nl> - internal = ragged_tensor . _ragged_factory_key ) <nl> + rt = RaggedTensor ( values = values , row_partition = rp , internal = True ) <nl> <nl> self . assertAllEqual ( rt , <nl> [ [ b ' a ' , b ' b ' ] , [ ] , [ b ' c ' , b ' d ' , b ' e ' ] , [ b ' f ' ] , [ b ' g ' ] ] ) <nl> def testRaggedTensorConstructionErrors ( self ) : <nl> <nl> with self . assertRaisesRegexp ( TypeError , <nl> ' values must be a Tensor or RaggedTensor ' ) : <nl> - RaggedTensor ( <nl> - values = range ( 7 ) , <nl> - row_partition = rp , <nl> - internal = ragged_tensor . _ragged_factory_key ) <nl> + RaggedTensor ( values = range ( 7 ) , row_partition = rp , internal = True ) <nl> <nl> with self . assertRaisesRegexp ( TypeError , <nl> ' row_partition must be a RowPartition ' ) : <nl> - RaggedTensor ( <nl> - values = values , <nl> - row_partition = [ 0 , 2 , 2 , 5 , 6 , 7 ] , <nl> - internal = ragged_tensor . _ragged_factory_key ) <nl> + RaggedTensor ( values = values , row_partition = [ 0 , 2 , 2 , 5 , 6 , 7 ] , <nl> + internal = True ) <nl> <nl> # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> # RaggedTensor Factory Ops <nl>
Make it harder to call the private RaggedTensor constructor from user code .
tensorflow/tensorflow
4bfab04344d478a79ce387106872f69fa8711f95
2020-05-01T15:46:36Z
mmm a / README . md <nl> ppp b / README . md <nl> git clone https : / / github . com / mozilla / DeepSpeech <nl> If you want to use the pre - trained English model for performing speech - to - text , you can download it ( along with other important inference material ) from the [ DeepSpeech releases page ] ( https : / / github . com / mozilla / DeepSpeech / releases ) . Alternatively , you can run the following command to download and unzip the files in your current directory : <nl> <nl> ` ` ` bash <nl> - wget - O - https : / / github . com / mozilla / DeepSpeech / releases / download / v0 . 3 . 0 / deepspeech - 0 . 3 . 0 - models . tar . gz | tar xvfz - <nl> + wget https : / / github . com / mozilla / DeepSpeech / releases / download / v0 . 4 . 0 / deepspeech - 0 . 4 . 0 - models . tar . gz <nl> + tar xvfz deepspeech - 0 . 4 . 0 - models . tar . gz <nl> ` ` ` <nl> <nl> # # Using the model <nl> python3 util / taskcluster . py - - arch osx - - target . <nl> <nl> also , if you need some binaries different than current master , like ` v0 . 2 . 0 - alpha . 6 ` , you can use ` - - branch ` : <nl> ` ` ` bash <nl> - python3 util / taskcluster . py - - branch " v0 . 2 . 0 - alpha . 6 - - target . " <nl> + python3 util / taskcluster . py - - branch " v0 . 2 . 0 - alpha . 6 " - - target " . " <nl> ` ` ` <nl> <nl> This will download ` native_client . tar . xz ` which includes the deepspeech binary and associated libraries , and extract it into the current folder . ` taskcluster . py ` will download binaries for Linux / x86_64 by default , but you can override that behavior with the ` - - arch ` parameter . See the help info with ` python util / taskcluster . py - h ` for more details . Proper DeepSpeech or TensorFlow ' s branch can be specified as well . <nl> mmm a / VERSION <nl> ppp b / VERSION <nl> @ @ - 1 + 1 @ @ <nl> - 0 . 4 . 0 - alpha . 3 <nl> + 0 . 4 . 0 <nl> mmm a / examples / ffmpeg_vad_streaming / package . json <nl> ppp b / examples / ffmpeg_vad_streaming / package . json <nl> <nl> } , <nl> " dependencies " : { <nl> " argparse " : " ^ 1 . 0 . 10 " , <nl> - " deepspeech " : " ^ 0 . 3 . 0 " , <nl> + " deepspeech " : " ^ 0 . 4 . 0 " , <nl> " node - vad " : " ^ 1 . 1 . 1 " , <nl> " util " : " ^ 0 . 11 . 1 " <nl> } , <nl> mmm a / examples / mic_vad_streaming / requirements . txt <nl> ppp b / examples / mic_vad_streaming / requirements . txt <nl> <nl> - deepspeech ~ = 0 . 3 . 0 <nl> + deepspeech ~ = 0 . 4 . 0 <nl> pyaudio ~ = 0 . 2 . 11 <nl> webrtcvad ~ = 2 . 0 . 10 <nl> halo ~ = 0 . 0 . 18 <nl> mmm a / examples / vad_transcriber / requirements . txt <nl> ppp b / examples / vad_transcriber / requirements . txt <nl> <nl> - deepspeech = = 0 . 3 . 0 <nl> + deepspeech = = 0 . 4 . 0 <nl> webrtcvad <nl> pyqt5 <nl> mmm a / examples / vad_transcriber / wavTranscription . md <nl> ppp b / examples / vad_transcriber / wavTranscription . md <nl> Set the aggressiveness mode , to an integer between 0 and 3 . <nl> <nl> ` ` ` <nl> ( venv ) ~ / Deepspeech / examples / vad_transcriber <nl> - $ python3 audioTranscript_cmd . py - - aggressive 1 - - audio . / audio / guido - van - rossum . wav - - model . / models / 0 . 3 . 0 / <nl> + $ python3 audioTranscript_cmd . py - - aggressive 1 - - audio . / audio / guido - van - rossum . wav - - model . / models / 0 . 4 . 0 / <nl> <nl> <nl> Filename Duration ( s ) Inference Time ( s ) Model Load Time ( s ) LM Load Time ( s ) <nl> mmm a / native_client / README . md <nl> ppp b / native_client / README . md <nl> make package <nl> make npm - pack <nl> ` ` ` <nl> <nl> - This will create the package ` deepspeech - 0 . 3 . 0 . tgz ` in ` native_client / javascript ` . <nl> + This will create the package ` deepspeech - VERSION . tgz ` in ` native_client / javascript ` . <nl> <nl> # # Building the CTC decoder package <nl> <nl>
Merge pull request from mozilla / update - to - 0 . 4
mozilla/DeepSpeech
48ad71142bfa7f305fb83cf975aafd99a6256f6c
2019-01-07T15:05:45Z
mmm a / spec / asar - spec . js <nl> ppp b / spec / asar - spec . js <nl> describe ( ' asar package ' , function ( ) { <nl> fs . readdirSync ( asar ) <nl> } , / ENOTDIR / ) <nl> } ) <nl> + <nl> + it ( ' is reset to its original value when execSync throws an error ' , function ( ) { <nl> + process . noAsar = false <nl> + assert . throws ( function ( ) { <nl> + ChildProcess . execSync ( path . join ( __dirname , ' does - not - exist . txt ' ) ) <nl> + } ) <nl> + assert . equal ( process . noAsar , false ) <nl> + } ) <nl> } ) <nl> } ) <nl> <nl>
Add failing spec for restoring noAsar value
electron/electron
b186d752dae356614c8fafa3191f70e69f3837fa
2016-09-06T20:40:15Z
mmm a / buildscripts / eslint . py <nl> ppp b / buildscripts / eslint . py <nl> def _lint_files ( eslint , files ) : <nl> " files that were skipped " ) <nl> sys . exit ( 1 ) <nl> <nl> + return True <nl> + <nl> def lint_patch ( eslint , infile ) : <nl> " " " Lint patch command entry point <nl> " " " <nl> def main ( ) : <nl> usage = " % prog [ - e < eslint > ] [ - d ] lint | lint - patch | fix [ glob patterns ] " <nl> description = " lint runs ESLint on provided patterns or all . js files under jstests / " \ <nl> " and src / mongo . lint - patch runs ESLint against . js files modified in the " \ <nl> - " patch file ( for upload . py ) . fix runs ESLint with - - fix on provided patterns " \ <nl> + " provided patch file ( for upload . py ) . " \ <nl> + " fix runs ESLint with - - fix on provided patterns " \ <nl> " or files under jstests / and src / mongo . " <nl> epilog = " * Unless you specify - d a separate ESLint process will be launched for every file " <nl> parser = OptionParser ( ) <nl> def main ( ) : <nl> <nl> if len ( args ) > 1 : <nl> command = args [ 1 ] <nl> - <nl> searchlist = args [ 2 : ] <nl> if not searchlist : <nl> searchlist = [ " jstests / " , " src / mongo / " ] <nl> def main ( ) : <nl> if command = = " lint " : <nl> success = lint ( options . eslint , options . dirmode , searchlist ) <nl> elif command = = " lint - patch " : <nl> - success = lint_patch ( options . eslint , searchlist ) <nl> + if not args [ 2 : ] : <nl> + success = False <nl> + print ( " You must provide the patch ' s fully qualified file name with lint - patch " ) <nl> + else : <nl> + success = lint_patch ( options . eslint , searchlist ) <nl> elif command = = " fix " : <nl> success = autofix_func ( options . eslint , options . dirmode , searchlist ) <nl> else : <nl>
SERVER - 23254 eslint . py returns 1 on successful patch lint instead of 0
mongodb/mongo
20ca6518797b67206d1f23d097c61c78a3ad8810
2016-03-21T21:57:34Z
mmm a / tests / test_core . py <nl> ppp b / tests / test_core . py <nl> def test_dylink_postsets_chunking ( self ) : <nl> <nl> @ needs_dlfcn <nl> def test_dylink_syslibs ( self ) : # one module uses libcxx , need to force its inclusion when it isn ' t the main <nl> - self . banned_js_engines = [ NODE_JS , V8_ENGINE ] # https : / / bugs . chromium . org / p / v8 / issues / detail ? id = 9678 <nl> + # https : / / github . com / emscripten - core / emscripten / issues / 10571 <nl> + return self . skipTest ( ' Currently not working due to duplicate symbol errors in wasm - ld ' ) <nl> <nl> def test ( syslibs , expect_pass = True , need_reverse = True ) : <nl> print ( ' syslibs ' , syslibs , self . get_setting ( ' ASSERTIONS ' ) ) <nl> class Derived : public Base { <nl> <nl> @ needs_dlfcn <nl> def test_dylink_raii_exceptions ( self ) : <nl> - self . banned_js_engines = [ NODE_JS , V8_ENGINE ] # https : / / bugs . chromium . org / p / v8 / issues / detail ? id = 9678 <nl> - <nl> self . emcc_args + = [ ' - s ' , ' DISABLE_EXCEPTION_CATCHING = 0 ' ] <nl> <nl> self . dylink_test ( main = r ' ' ' <nl>
Remove banned engines from dylink tests ( )
emscripten-core/emscripten
5b548c7dbe6668d3bcc1e6e076161ab87d236699
2020-02-27T02:21:17Z
mmm a / src / misc_utilities / CMakeLists . txt <nl> ppp b / src / misc_utilities / CMakeLists . txt <nl> <nl> <nl> ADD_EXECUTABLE ( openalpr - utils - sortstate sortstate . cpp ) <nl> - TARGET_LINK_LIBRARIES ( sortstate - openalpr - utils <nl> + TARGET_LINK_LIBRARIES ( openalpr - utils - sortstate <nl> openalpr <nl> support <nl> $ { OpenCV_LIBS } <nl> TARGET_LINK_LIBRARIES ( sortstate - openalpr - utils <nl> ) <nl> <nl> ADD_EXECUTABLE ( openalpr - utils - classifychars classifychars . cpp ) <nl> - TARGET_LINK_LIBRARIES ( classifychars - openalpr - utils <nl> + TARGET_LINK_LIBRARIES ( openalpr - utils - classifychars <nl> openalpr <nl> support <nl> $ { OpenCV_LIBS } <nl>
Updated names to be prefixed with openalpr - utils -
openalpr/openalpr
a37d7a95888e002d6e33a166633fc54a6437f935
2014-12-16T02:11:09Z
mmm a / src / google / protobuf / stubs / mathlimits . cc <nl> ppp b / src / google / protobuf / stubs / mathlimits . cc <nl> <nl> namespace google { <nl> namespace protobuf { <nl> <nl> - # define DEF_COMMON_LIMITS ( Type ) <nl> - # define DEF_UNSIGNED_INT_LIMITS ( Type ) <nl> - # define DEF_SIGNED_INT_LIMITS ( Type ) <nl> - # define DEF_PRECISION_LIMITS ( Type ) <nl> - <nl> / / http : / / en . wikipedia . org / wiki / Quadruple_precision_floating - point_format # Double - double_arithmetic <nl> / / With some compilers ( gcc 4 . 6 . x ) on some platforms ( powerpc64 ) , <nl> / / " long double " is implemented as a pair of double : " double double " format . <nl> namespace protobuf { <nl> / / max ( DBL_EPSILON * DBL_EPSILON , kEpsilon ) rather than a multiple of kEpsilon . <nl> <nl> # define DEF_FP_LIMITS ( Type , PREFIX ) \ <nl> - DEF_COMMON_LIMITS ( Type ) \ <nl> const Type MathLimits < Type > : : kPosMin = PREFIX # # _MIN ; \ <nl> const Type MathLimits < Type > : : kPosMax = PREFIX # # _MAX ; \ <nl> const Type MathLimits < Type > : : kMin = - MathLimits < Type > : : kPosMax ; \ <nl> const Type MathLimits < Type > : : kEpsilon = PREFIX # # _EPSILON ; \ <nl> const Type MathLimits < Type > : : kStdError = \ <nl> 32 * ( DBL_EPSILON * DBL_EPSILON > MathLimits < Type > : : kEpsilon \ <nl> ? DBL_EPSILON * DBL_EPSILON : MathLimits < Type > : : kEpsilon ) ; \ <nl> - DEF_PRECISION_LIMITS ( Type ) \ <nl> const Type MathLimits < Type > : : kNaN = HUGE_VAL - HUGE_VAL ; \ <nl> const Type MathLimits < Type > : : kPosInf = HUGE_VAL ; \ <nl> const Type MathLimits < Type > : : kNegInf = - HUGE_VAL ; <nl> <nl> - / / The following are * not * casts ! <nl> - DEF_SIGNED_INT_LIMITS ( int8 ) <nl> - DEF_SIGNED_INT_LIMITS ( int16 ) / / NOLINT ( readability / casting ) <nl> - DEF_SIGNED_INT_LIMITS ( int32 ) / / NOLINT ( readability / casting ) <nl> - DEF_SIGNED_INT_LIMITS ( int64 ) / / NOLINT ( readability / casting ) <nl> - DEF_UNSIGNED_INT_LIMITS ( uint8 ) <nl> - DEF_UNSIGNED_INT_LIMITS ( uint16 ) / / NOLINT ( readability / casting ) <nl> - DEF_UNSIGNED_INT_LIMITS ( uint32 ) / / NOLINT ( readability / casting ) <nl> - DEF_UNSIGNED_INT_LIMITS ( uint64 ) / / NOLINT ( readability / casting ) <nl> - <nl> - DEF_SIGNED_INT_LIMITS ( long int ) <nl> - DEF_UNSIGNED_INT_LIMITS ( unsigned long int ) <nl> - <nl> DEF_FP_LIMITS ( float , FLT ) <nl> DEF_FP_LIMITS ( double , DBL ) <nl> DEF_FP_LIMITS ( long double , LDBL ) ; <nl> <nl> - # undef DEF_COMMON_LIMITS <nl> - # undef DEF_SIGNED_INT_LIMITS <nl> - # undef DEF_UNSIGNED_INT_LIMITS <nl> # undef DEF_FP_LIMITS <nl> - # undef DEF_PRECISION_LIMITS <nl> } / / namespace protobuf <nl> } / / namespace google <nl>
mathlimits : remove no - op macro expansion code
protocolbuffers/protobuf
2350e20cae1a9a1f54147c6e18e1a0da706cca96
2019-03-28T00:07:57Z
mmm a / shell / app / atom_main_delegate . cc <nl> ppp b / shell / app / atom_main_delegate . cc <nl> bool IsBrowserProcess ( base : : CommandLine * cmd ) { <nl> return process_type . empty ( ) ; <nl> } <nl> <nl> + bool IsSandboxEnabled ( base : : CommandLine * command_line ) { <nl> + return command_line - > HasSwitch ( switches : : kEnableSandbox ) | | <nl> + ! command_line - > HasSwitch ( service_manager : : switches : : kNoSandbox ) ; <nl> + } <nl> + <nl> / / Returns true if this subprocess type needs the ResourceBundle initialized <nl> / / and resources loaded . <nl> bool SubprocessNeedsResourceBundle ( const std : : string & process_type ) { <nl> content : : ContentGpuClient * AtomMainDelegate : : CreateContentGpuClient ( ) { <nl> <nl> content : : ContentRendererClient * <nl> AtomMainDelegate : : CreateContentRendererClient ( ) { <nl> - if ( base : : CommandLine : : ForCurrentProcess ( ) - > HasSwitch ( <nl> - switches : : kEnableSandbox ) | | <nl> - ! base : : CommandLine : : ForCurrentProcess ( ) - > HasSwitch ( <nl> - service_manager : : switches : : kNoSandbox ) ) { <nl> + auto * command_line = base : : CommandLine : : ForCurrentProcess ( ) ; <nl> + <nl> + if ( IsSandboxEnabled ( command_line ) ) { <nl> renderer_client_ . reset ( new AtomSandboxedRendererClient ) ; <nl> } else { <nl> renderer_client_ . reset ( new AtomRendererClient ) ; <nl> mmm a / shell / browser / api / atom_api_web_contents . cc <nl> ppp b / shell / browser / api / atom_api_web_contents . cc <nl> base : : ProcessId WebContents : : GetOSProcessID ( ) const { <nl> return base : : GetProcId ( process_handle ) ; <nl> } <nl> <nl> + base : : ProcessId WebContents : : GetOSProcessIdForFrame ( <nl> + const std : : string & name , <nl> + const std : : string & document_url ) const { <nl> + for ( auto * frame : web_contents ( ) - > GetAllFrames ( ) ) { <nl> + if ( frame - > GetFrameName ( ) = = name & & <nl> + frame - > GetLastCommittedURL ( ) . spec ( ) = = document_url ) { <nl> + return base : : GetProcId ( frame - > GetProcess ( ) - > GetProcess ( ) . Handle ( ) ) ; <nl> + } <nl> + } <nl> + return base : : kNullProcessId ; <nl> + } <nl> + <nl> WebContents : : Type WebContents : : GetType ( ) const { <nl> return type_ ; <nl> } <nl> void WebContents : : BuildPrototype ( v8 : : Isolate * isolate , <nl> & WebContents : : SetBackgroundThrottling ) <nl> . SetMethod ( " getProcessId " , & WebContents : : GetProcessID ) <nl> . SetMethod ( " getOSProcessId " , & WebContents : : GetOSProcessID ) <nl> + . SetMethod ( " _getOSProcessIdForFrame " , <nl> + & WebContents : : GetOSProcessIdForFrame ) <nl> . SetMethod ( " equal " , & WebContents : : Equal ) <nl> . SetMethod ( " _loadURL " , & WebContents : : LoadURL ) <nl> . SetMethod ( " downloadURL " , & WebContents : : DownloadURL ) <nl> mmm a / shell / browser / api / atom_api_web_contents . h <nl> ppp b / shell / browser / api / atom_api_web_contents . h <nl> class WebContents : public mate : : TrackableObject < WebContents > , <nl> void SetBackgroundThrottling ( bool allowed ) ; <nl> int GetProcessID ( ) const ; <nl> base : : ProcessId GetOSProcessID ( ) const ; <nl> + base : : ProcessId GetOSProcessIdForFrame ( const std : : string & name , <nl> + const std : : string & document_url ) const ; <nl> Type GetType ( ) const ; <nl> bool Equal ( const WebContents * web_contents ) const ; <nl> void LoadURL ( const GURL & url , const mate : : Dictionary & options ) ; <nl> mmm a / shell / browser / atom_browser_client . cc <nl> ppp b / shell / browser / atom_browser_client . cc <nl> void AtomBrowserClient : : ConsiderSiteInstanceForAffinity ( <nl> } <nl> } <nl> <nl> + bool AtomBrowserClient : : IsRendererSubFrame ( int process_id ) const { <nl> + return base : : ContainsKey ( renderer_is_subframe_ , process_id ) ; <nl> + } <nl> + <nl> void AtomBrowserClient : : RenderProcessWillLaunch ( <nl> content : : RenderProcessHost * host , <nl> service_manager : : mojom : : ServiceRequest * service_request ) { <nl> void AtomBrowserClient : : RegisterPendingSiteInstance ( <nl> auto * web_contents = content : : WebContents : : FromRenderFrameHost ( rfh ) ; <nl> auto * pending_process = pending_site_instance - > GetProcess ( ) ; <nl> pending_processes_ [ pending_process - > GetID ( ) ] = web_contents ; <nl> + <nl> + if ( rfh - > GetParent ( ) ) <nl> + renderer_is_subframe_ . insert ( pending_process - > GetID ( ) ) ; <nl> + else <nl> + renderer_is_subframe_ . erase ( pending_process - > GetID ( ) ) ; <nl> } <nl> <nl> void AtomBrowserClient : : AppendExtraCommandLineSwitches ( <nl> void AtomBrowserClient : : AppendExtraCommandLineSwitches ( <nl> } <nl> auto * web_preferences = WebContentsPreferences : : From ( web_contents ) ; <nl> if ( web_preferences ) <nl> - web_preferences - > AppendCommandLineSwitches ( command_line ) ; <nl> + web_preferences - > AppendCommandLineSwitches ( <nl> + command_line , IsRendererSubFrame ( process_id ) ) ; <nl> SessionPreferences : : AppendExtraCommandLineSwitches ( <nl> web_contents - > GetBrowserContext ( ) , command_line ) ; <nl> if ( CanUseCustomSiteInstance ( ) ) { <nl> void AtomBrowserClient : : RenderProcessHostDestroyed ( <nl> content : : RenderProcessHost * host ) { <nl> int process_id = host - > GetID ( ) ; <nl> pending_processes_ . erase ( process_id ) ; <nl> + renderer_is_subframe_ . erase ( process_id ) ; <nl> RemoveProcessPreferences ( process_id ) ; <nl> } <nl> <nl> mmm a / shell / browser / atom_browser_client . h <nl> ppp b / shell / browser / atom_browser_client . h <nl> class AtomBrowserClient : public content : : ContentBrowserClient , <nl> void ConsiderSiteInstanceForAffinity ( content : : RenderFrameHost * rfh , <nl> content : : SiteInstance * site_instance ) ; <nl> <nl> + bool IsRendererSubFrame ( int process_id ) const ; <nl> + <nl> / / pending_render_process = > web contents . <nl> std : : map < int , content : : WebContents * > pending_processes_ ; <nl> <nl> std : : map < int , base : : ProcessId > render_process_host_pids_ ; <nl> <nl> + std : : set < int > renderer_is_subframe_ ; <nl> + <nl> / / list of site per affinity . weak_ptr to prevent instance locking <nl> std : : map < std : : string , content : : SiteInstance * > site_per_affinities_ ; <nl> <nl> mmm a / shell / browser / web_contents_preferences . cc <nl> ppp b / shell / browser / web_contents_preferences . cc <nl> WebContentsPreferences * WebContentsPreferences : : From ( <nl> } <nl> <nl> void WebContentsPreferences : : AppendCommandLineSwitches ( <nl> - base : : CommandLine * command_line ) { <nl> + base : : CommandLine * command_line , <nl> + bool is_subframe ) { <nl> / / Check if plugins are enabled . <nl> if ( IsEnabled ( options : : kPlugins ) ) <nl> command_line - > AppendSwitch ( switches : : kEnablePlugins ) ; <nl> void WebContentsPreferences : : AppendCommandLineSwitches ( <nl> if ( IsEnabled ( options : : kWebviewTag ) ) <nl> command_line - > AppendSwitch ( switches : : kWebviewTag ) ; <nl> <nl> + / / Sandbox can be enabled for renderer processes hosting cross - origin frames <nl> + / / unless nodeIntegrationInSubFrames is enabled <nl> + bool can_sandbox_frame = <nl> + is_subframe & & ! IsEnabled ( options : : kNodeIntegrationInSubFrames ) ; <nl> + <nl> / / If the ` sandbox ` option was passed to the BrowserWindow ' s webPreferences , <nl> / / pass ` - - enable - sandbox ` to the renderer so it won ' t have any node . js <nl> - / / integration . <nl> - if ( IsEnabled ( options : : kSandbox ) ) { <nl> + / / integration . Otherwise disable Chromium sandbox , unless app . enableSandbox ( ) <nl> + / / was called . <nl> + if ( IsEnabled ( options : : kSandbox ) | | can_sandbox_frame ) { <nl> command_line - > AppendSwitch ( switches : : kEnableSandbox ) ; <nl> } else if ( ! command_line - > HasSwitch ( switches : : kEnableSandbox ) ) { <nl> command_line - > AppendSwitch ( service_manager : : switches : : kNoSandbox ) ; <nl> mmm a / shell / browser / web_contents_preferences . h <nl> ppp b / shell / browser / web_contents_preferences . h <nl> class WebContentsPreferences <nl> void Merge ( const base : : DictionaryValue & new_web_preferences ) ; <nl> <nl> / / Append command paramters according to preferences . <nl> - void AppendCommandLineSwitches ( base : : CommandLine * command_line ) ; <nl> + void AppendCommandLineSwitches ( base : : CommandLine * command_line , <nl> + bool is_subframe ) ; <nl> <nl> / / Modify the WebPreferences according to preferences . <nl> void OverrideWebkitPrefs ( content : : WebPreferences * prefs ) ; <nl> mmm a / spec / api - subframe - spec . js <nl> ppp b / spec / api - subframe - spec . js <nl> <nl> const { expect } = require ( ' chai ' ) <nl> const { remote } = require ( ' electron ' ) <nl> const path = require ( ' path ' ) <nl> + const http = require ( ' http ' ) <nl> <nl> const { emittedNTimes , emittedOnce } = require ( ' . / events - helpers ' ) <nl> const { closeWindow } = require ( ' . / window - helpers ' ) <nl> <nl> - const { BrowserWindow } = remote <nl> + const { app , BrowserWindow , ipcMain } = remote <nl> <nl> describe ( ' renderer nodeIntegrationInSubFrames ' , ( ) = > { <nl> const generateTests = ( description , webPreferences ) = > { <nl> describe ( ' renderer nodeIntegrationInSubFrames ' , ( ) = > { <nl> generateTests ( config . title , config . webPreferences ) <nl> } ) <nl> } ) <nl> + <nl> + describe ( ' cross - site frame sandboxing ' , ( ) = > { <nl> + let server = null <nl> + <nl> + beforeEach ( function ( ) { <nl> + if ( process . platform = = = ' linux ' ) { <nl> + this . skip ( ) <nl> + } <nl> + } ) <nl> + <nl> + before ( function ( done ) { <nl> + server = http . createServer ( ( req , res ) = > { <nl> + res . end ( ` < iframe name = " frame " src = " $ { server . cross_site_url } " / > ` ) <nl> + } ) <nl> + server . listen ( 0 , ' 127 . 0 . 0 . 1 ' , ( ) = > { <nl> + server . url = ` http : / / 127 . 0 . 0 . 1 : $ { server . address ( ) . port } / ` <nl> + server . cross_site_url = ` http : / / localhost : $ { server . address ( ) . port } / ` <nl> + done ( ) <nl> + } ) <nl> + } ) <nl> + <nl> + after ( ( ) = > { <nl> + server . close ( ) <nl> + server = null <nl> + } ) <nl> + <nl> + const fixtures = path . resolve ( __dirname , ' fixtures ' ) <nl> + const preload = path . join ( fixtures , ' module ' , ' preload - pid . js ' ) <nl> + <nl> + let w <nl> + <nl> + afterEach ( ( ) = > { <nl> + return closeWindow ( w ) . then ( ( ) = > { <nl> + w = null <nl> + } ) <nl> + } ) <nl> + <nl> + const generateSpecs = ( description , webPreferences ) = > { <nl> + describe ( description , ( ) = > { <nl> + it ( ' iframe process is sandboxed if possible ' , async ( ) = > { <nl> + w = new BrowserWindow ( { <nl> + show : false , <nl> + webPreferences <nl> + } ) <nl> + <nl> + await w . loadURL ( server . url ) <nl> + <nl> + const pidMain = w . webContents . getOSProcessId ( ) <nl> + const pidFrame = w . webContents . _getOSProcessIdForFrame ( ' frame ' , server . cross_site_url ) <nl> + <nl> + const metrics = app . getAppMetrics ( ) <nl> + const isProcessSandboxed = function ( pid ) { <nl> + const entry = metrics . filter ( metric = > metric . pid = = = pid ) [ 0 ] <nl> + return entry & & entry . sandboxed <nl> + } <nl> + <nl> + const sandboxMain = ! ! ( webPreferences . sandbox | | process . mas ) <nl> + const sandboxFrame = sandboxMain | | ! webPreferences . nodeIntegrationInSubFrames <nl> + <nl> + expect ( isProcessSandboxed ( pidMain ) ) . to . equal ( sandboxMain ) <nl> + expect ( isProcessSandboxed ( pidFrame ) ) . to . equal ( sandboxFrame ) <nl> + } ) <nl> + } ) <nl> + } <nl> + <nl> + generateSpecs ( ' nodeIntegrationInSubFrames = false , sandbox = false ' , { <nl> + nodeIntegrationInSubFrames : false , <nl> + sandbox : false <nl> + } ) <nl> + <nl> + generateSpecs ( ' nodeIntegrationInSubFrames = false , sandbox = true ' , { <nl> + nodeIntegrationInSubFrames : false , <nl> + sandbox : true <nl> + } ) <nl> + <nl> + generateSpecs ( ' nodeIntegrationInSubFrames = true , sandbox = false ' , { <nl> + nodeIntegrationInSubFrames : true , <nl> + sandbox : false <nl> + } ) <nl> + <nl> + generateSpecs ( ' nodeIntegrationInSubFrames = true , sandbox = true ' , { <nl> + nodeIntegrationInSubFrames : true , <nl> + sandbox : true <nl> + } ) <nl> + } ) <nl>
feat : sandbox renderer processes for cross - origin frames ( )
electron/electron
f3f2990b9ed5593fe0fda3c84cbbaab9ace842ef
2019-06-20T10:10:56Z
mmm a / dist / windows / installer - translations / italian . nsi <nl> ppp b / dist / windows / installer - translations / italian . nsi <nl> <nl> ; LangString inst_qbt_req $ { LANG_ENGLISH } " qBittorrent ( required ) " <nl> LangString inst_qbt_req $ { LANG_ITALIAN } " qBittorrent ( necessario ) " <nl> ; LangString inst_dekstop $ { LANG_ENGLISH } " Create Desktop Shortcut " <nl> - LangString inst_dekstop $ { LANG_ITALIAN } " Crea icone sul desktop " <nl> + LangString inst_dekstop $ { LANG_ITALIAN } " Crea collegamento sul Desktop " <nl> ; LangString inst_startmenu $ { LANG_ENGLISH } " Create Start Menu Shortcut " <nl> - LangString inst_startmenu $ { LANG_ITALIAN } " Crea gruppo programmi " <nl> + LangString inst_startmenu $ { LANG_ITALIAN } " Aggiungi al menu Start " <nl> ; LangString inst_startup $ { LANG_ENGLISH } " Start qBittorrent on Windows start up " <nl> LangString inst_startup $ { LANG_ITALIAN } " Esegui qBittorrent all ' avvio di Windows " <nl> ; LangString inst_torrent $ { LANG_ENGLISH } " Open . torrent files with qBittorrent " <nl> LangString inst_magnet $ { LANG_ITALIAN } " Apri collegamenti magnet con qBittorrent <nl> ; LangString inst_firewall $ { LANG_ENGLISH } " Add Windows Firewall rule " <nl> LangString inst_firewall $ { LANG_ITALIAN } " Aggiungi regola al firewall di Windows " <nl> ; LangString inst_pathlimit $ { LANG_ENGLISH } " Disable Windows path length limit ( 260 character MAX_PATH limitation , requires Windows 10 1607 or later ) " <nl> - LangString inst_pathlimit $ { LANG_ITALIAN } " Disabilita limite percorso Windows ( limite MAX_PATH max 260 caratterin , richiede Windows 10 versione 1607 o successive ) " <nl> + LangString inst_pathlimit $ { LANG_ITALIAN } " Disabilita limite lunghezza percorsi Windows ( limite MAX_PATH di 260 caratteri , richiede Windows 10 versione 1607 o successive ) " <nl> ; LangString inst_firewallinfo $ { LANG_ENGLISH } " Adding Windows Firewall rule " <nl> LangString inst_firewallinfo $ { LANG_ITALIAN } " Aggiunta regola al firewall di Windows " <nl> ; LangString inst_warning $ { LANG_ENGLISH } " qBittorrent is running . Please close the application before installing . " <nl> - LangString inst_warning $ { LANG_ITALIAN } " qBittorrent è in esecuzione . % n % nChiudi l ' applicazione qBittorrent prima della nuova installazione . " <nl> + LangString inst_warning $ { LANG_ITALIAN } " qBittorrent è in esecuzione . Chiudilo prima di procedere con l ' installazione . " <nl> ; LangString inst_uninstall_question $ { LANG_ENGLISH } " Current version will be uninstalled . User settings and torrents will remain intact . " <nl> - LangString inst_uninstall_question $ { LANG_ITALIAN } " L ' attuale versione di qBittorrent verrà disinstallata . % n % nLe impostazioni utente e i torrent rimanno invariati . " <nl> + LangString inst_uninstall_question $ { LANG_ITALIAN } " La versione attuale verrà disinstallata . Le impostazioni utente e i torrent rimarranno invariati . " <nl> ; LangString inst_unist $ { LANG_ENGLISH } " Uninstalling previous version . " <nl> - LangString inst_unist $ { LANG_ITALIAN } " Disinstallazione versione precedente di qBittorrent . " <nl> + LangString inst_unist $ { LANG_ITALIAN } " Disinstallazione versione precedente . " <nl> ; LangString launch_qbt $ { LANG_ENGLISH } " Launch qBittorrent . " <nl> - LangString launch_qbt $ { LANG_ITALIAN } " Esegui qBittorrent " <nl> + LangString launch_qbt $ { LANG_ITALIAN } " Esegui qBittorrent . " <nl> ; LangString inst_requires_64bit $ { LANG_ENGLISH } " This installer works only in 64 - bit Windows versions . " <nl> - LangString inst_requires_64bit $ { LANG_ITALIAN } " Questo installer di qBittorrent funziona solo con Windows a 64bit . " <nl> + LangString inst_requires_64bit $ { LANG_ITALIAN } " Questo installer funziona solo con versioni di Windows a 64bit . " <nl> ; LangString inst_requires_win7 $ { LANG_ENGLISH } " This qBittorrent version requires at least Windows 7 . " <nl> LangString inst_requires_win7 $ { LANG_ITALIAN } " Questa versione di qBittorrent richiede Windows 7 o versioni successive . " <nl> <nl> LangString remove_shortcuts $ { LANG_ITALIAN } " Rimuovi collegamenti " <nl> ; LangString remove_associations $ { LANG_ENGLISH } " Remove file associations " <nl> LangString remove_associations $ { LANG_ITALIAN } " Rimuovi associazione file " <nl> ; LangString remove_registry $ { LANG_ENGLISH } " Remove registry keys " <nl> - LangString remove_registry $ { LANG_ITALIAN } " Rimuovi chiavi registro " <nl> + LangString remove_registry $ { LANG_ITALIAN } " Rimuovi chiavi di registro " <nl> ; LangString remove_conf $ { LANG_ENGLISH } " Remove configuration files " <nl> LangString remove_conf $ { LANG_ITALIAN } " Rimuovi file di configurazione " <nl> ; LangString remove_firewall $ { LANG_ENGLISH } " Remove Windows Firewall rule " <nl> LangString remove_firewallinfo $ { LANG_ITALIAN } " Rimozione regola dal firewall di <nl> ; LangString remove_cache $ { LANG_ENGLISH } " Remove torrents and cached data " <nl> LangString remove_cache $ { LANG_ITALIAN } " Rimuovi torrent e dati nella cache " <nl> ; LangString uninst_warning $ { LANG_ENGLISH } " qBittorrent is running . Please close the application before uninstalling . " <nl> - LangString uninst_warning $ { LANG_ITALIAN } " qBittorrent è in esecuzione . % n % nChiudi qBittorrent prima della disinstallazione . " <nl> + LangString uninst_warning $ { LANG_ITALIAN } " qBittorrent è in esecuzione . Chiudilo prima di procedere con la disinstallazione . " <nl> ; LangString uninst_tor_warn $ { LANG_ENGLISH } " Not removing . torrent association . It is associated with : " <nl> LangString uninst_tor_warn $ { LANG_ITALIAN } " Associazione file . torrent non rimossa . File associati con : " <nl> ; LangString uninst_mag_warn $ { LANG_ENGLISH } " Not removing magnet association . It is associated with : " <nl>
Merge pull request from alessandrosimonelli / patch - 1
qbittorrent/qBittorrent
c56cb8adb64bb7a35adf94cfc84bf38f539494b8
2020-12-19T03:50:55Z
mmm a / yarn . lock <nl> ppp b / yarn . lock <nl> <nl> " @ microsoft / tsdoc " " 0 . 12 . 19 " <nl> " @ rushstack / node - core - library " " 3 . 29 . 1 " <nl> <nl> + " @ microsoft / api - extractor - model @ 7 . 8 . 19 " : <nl> + version " 7 . 8 . 19 " <nl> + resolved " https : / / registry . yarnpkg . com / @ microsoft / api - extractor - model / - / api - extractor - model - 7 . 8 . 19 . tgz # a49fb1b3f454966c4f60e8d63ffae2599eeaace3 " <nl> + integrity sha512 - tEEPuww0Gbyw9LuTcJ7nDCTjb + aLSAox8Xl9 / iSxNTv5yHJN1QX3cqajlC3ibDHlRz7oMpQfHZX7YpAygbgIvg = = <nl> + dependencies : <nl> + " @ microsoft / tsdoc " " 0 . 12 . 19 " <nl> + " @ rushstack / node - core - library " " 3 . 30 . 0 " <nl> + <nl> " @ microsoft / api - extractor @ ^ 7 . 3 . 8 " : <nl> - version " 7 . 9 . 10 " <nl> - resolved " https : / / registry . yarnpkg . com / @ microsoft / api - extractor / - / api - extractor - 7 . 9 . 10 . tgz # e15676ca35c5061971697a017ac05abf9cba694b " <nl> - integrity sha512 - hN / iyFN7FRM6flSDmeb2RJGGeo1CF7CONlsjxgoiXU3cqx8601vWheJK06s8 + aR8IrBtnV12ZVMii8syQw6AgA = = <nl> + version " 7 . 9 . 11 " <nl> + resolved " https : / / registry . yarnpkg . com / @ microsoft / api - extractor / - / api - extractor - 7 . 9 . 11 . tgz # ba4276bf1343f6e9df2560e77e7861af4e8740dd " <nl> + integrity sha512 - t + LwGAuTjr + odFEl5xV3vl7qOWf84CM8BWKgb93kEnVd8uha3KfuWtDfnstxG4oC / TL6tu5 + 9rOwKJiNIidf2A = = <nl> dependencies : <nl> - " @ microsoft / api - extractor - model " " 7 . 8 . 18 " <nl> + " @ microsoft / api - extractor - model " " 7 . 8 . 19 " <nl> " @ microsoft / tsdoc " " 0 . 12 . 19 " <nl> - " @ rushstack / node - core - library " " 3 . 29 . 1 " <nl> - " @ rushstack / ts - command - line " " 4 . 6 . 3 " <nl> + " @ rushstack / node - core - library " " 3 . 30 . 0 " <nl> + " @ rushstack / ts - command - line " " 4 . 6 . 4 " <nl> colors " ~ 1 . 2 . 1 " <nl> lodash " ~ 4 . 17 . 15 " <nl> resolve " ~ 1 . 17 . 0 " <nl> <nl> timsort " ~ 0 . 3 . 0 " <nl> z - schema " ~ 3 . 18 . 3 " <nl> <nl> + " @ rushstack / node - core - library @ 3 . 30 . 0 " : <nl> + version " 3 . 30 . 0 " <nl> + resolved " https : / / registry . yarnpkg . com / @ rushstack / node - core - library / - / node - core - library - 3 . 30 . 0 . tgz # a2b814a611a040ac69d6c31ffc92bf9155c983fb " <nl> + integrity sha512 - vZo1fi / ObL3CmRXlQUX / E1xL9KL9arBfCJ7pYf3O / vFrD8ffSfpQ6 + 6lhgAsKrCIM5Epddsgeb2REPxMwYZX1g = = <nl> + dependencies : <nl> + " @ types / node " " 10 . 17 . 13 " <nl> + colors " ~ 1 . 2 . 1 " <nl> + fs - extra " ~ 7 . 0 . 1 " <nl> + import - lazy " ~ 4 . 0 . 0 " <nl> + jju " ~ 1 . 4 . 0 " <nl> + resolve " ~ 1 . 17 . 0 " <nl> + semver " ~ 7 . 3 . 0 " <nl> + timsort " ~ 0 . 3 . 0 " <nl> + z - schema " ~ 3 . 18 . 3 " <nl> + <nl> " @ rushstack / ts - command - line @ 4 . 6 . 3 " : <nl> version " 4 . 6 . 3 " <nl> resolved " https : / / registry . yarnpkg . com / @ rushstack / ts - command - line / - / ts - command - line - 4 . 6 . 3 . tgz # 0c4213a340e6e56a9a910f962e2db6f061cf81e2 " <nl> <nl> colors " ~ 1 . 2 . 1 " <nl> string - argv " ~ 0 . 3 . 1 " <nl> <nl> + " @ rushstack / ts - command - line @ 4 . 6 . 4 " : <nl> + version " 4 . 6 . 4 " <nl> + resolved " https : / / registry . yarnpkg . com / @ rushstack / ts - command - line / - / ts - command - line - 4 . 6 . 4 . tgz # bf04299aa69ccf066085ce7ed88b03852f3ed781 " <nl> + integrity sha512 - ubIANZimyU07 + ChU56LfiD36NJ8gvw1txlvUP20GYNQi4lf5N0xEnev4r + AtKkOdnowpGy60ObGmYxSUpSacpw = = <nl> + dependencies : <nl> + " @ types / argparse " " 1 . 0 . 38 " <nl> + argparse " ~ 1 . 0 . 9 " <nl> + colors " ~ 1 . 2 . 1 " <nl> + string - argv " ~ 0 . 3 . 1 " <nl> + <nl> " @ types / argparse @ 1 . 0 . 38 " : <nl> version " 1 . 0 . 38 " <nl> resolved " https : / / registry . yarnpkg . com / @ types / argparse / - / argparse - 1 . 0 . 38 . tgz # a81fd8606d481f873a3800c6ebae4f1d768a56a9 " <nl>
Bump @ microsoft / api - extractor from 7 . 9 . 10 to 7 . 9 . 11 ( )
microsoft/react-native-windows
635ef5d5bf297860cf13d0993ffbb253fedc61d6
2020-08-27T14:20:18Z
mmm a / tensorflow / compiler / mlir / xla / hlo_function_importer . cc <nl> ppp b / tensorflow / compiler / mlir / xla / hlo_function_importer . cc <nl> StatusOr < mlir : : Operation * > HloFunctionImporter : : ImportInstruction ( <nl> attributes . push_back ( ConvertPrecisionConfig ( instruction ) ) ; <nl> MakeAndReturn ( ConvOp ) ; <nl> } <nl> + <nl> + case HloOpcode : : kFft : { <nl> + auto fft_type = <nl> + builder_ - > getStringAttr ( FftType_Name ( instruction - > fft_type ( ) ) ) ; <nl> + <nl> + std : : vector < int64_t > fft_length ( instruction - > fft_length ( ) . begin ( ) , <nl> + instruction - > fft_length ( ) . end ( ) ) ; <nl> + <nl> + attributes . push_back ( builder_ - > getNamedAttr ( " fft_type " , fft_type ) ) ; <nl> + attributes . push_back ( <nl> + builder_ - > getNamedAttr ( " fft_length " , Convert ( fft_length ) ) ) ; <nl> + MakeAndReturn ( FftOp ) ; <nl> + } <nl> # define NoAttributeCase ( hlo_op_code , mlir_op ) \ <nl> case HloOpcode : : hlo_op_code : { \ <nl> MakeAndReturn ( mlir_op ) ; \ <nl> StatusOr < mlir : : RankedTensorType > HloFunctionImporter : : ConvertTensorType ( <nl> return builder_ - > getTensorType ( array , builder_ - > getIntegerType ( 32 ) ) ; <nl> case PrimitiveType : : U64 : <nl> return builder_ - > getTensorType ( array , builder_ - > getIntegerType ( 64 ) ) ; <nl> + case PrimitiveType : : C64 : <nl> + return builder_ - > getTensorType ( <nl> + array , mlir : : ComplexType : : get ( builder_ - > getF32Type ( ) ) ) ; <nl> default : <nl> return tensorflow : : errors : : Internal ( <nl> absl : : StrCat ( " Unsupported type : " , PrimitiveType_Name ( type ) ) ) ; <nl> mmm a / tensorflow / compiler / mlir / xla / ir / hlo_ops . td <nl> ppp b / tensorflow / compiler / mlir / xla / ir / hlo_ops . td <nl> def HLO_PredTensor : TensorOf < [ HLO_Pred ] > ; <nl> / / Any integer or floating - point tensor types <nl> def HLO_IntOrFpTensor : TensorOf < [ HLO_Int , AnyFloat ] > ; <nl> <nl> - def HLO_Tensor : TensorOf < [ AnyFloat , AnyInteger ] > ; <nl> + def HLO_Tensor : TensorOf < [ AnyFloat , AnyInteger , AnyComplex ] > ; <nl> <nl> def HLO_Tuple : NestedTupleOf < [ HLO_Tensor ] > ; <nl> <nl> def HLO_DotGeneralOp : HLO_Op < " dot_general " , [ NoSideEffect ] > , BASE_HLO_DotGeneral <nl> let results = ( outs HLO_Tensor ) ; <nl> } <nl> <nl> + def HLO_FftOp : HLO_Op < " fft " , [ NoSideEffect ] > , BASE_HLO_FftOp { <nl> + let arguments = ( ins <nl> + HLO_Tensor : $ operand , <nl> + HLO_FftTypeAttr : $ fft_type , <nl> + I64ElementsAttr : $ fft_length <nl> + ) ; <nl> + <nl> + let results = ( outs HLO_Tensor ) ; <nl> + <nl> + / / TODO ( b / 129422361 ) Attributes are not supported by the codegen . <nl> + let hasCustomHLOConverter = 1 ; <nl> + } <nl> + <nl> def HLO_GatherOp : HLO_Op < " gather " , [ NoSideEffect ] > , BASE_HLO_GatherOp { <nl> let arguments = ( ins <nl> HLO_Tensor : $ operand , <nl> def HLO_GatherOp : HLO_Op < " gather " , [ NoSideEffect ] > , BASE_HLO_GatherOp { <nl> <nl> let results = ( outs HLO_Tensor ) ; <nl> <nl> - / / TODO ( b / 129422361 ) Attributes are not by the codegen . The optional argument <nl> - / / ( dimensions ) needs to be added as an attribute . <nl> + / / TODO ( b / 129422361 ) Attributes are not supported by the codegen . The <nl> + / / optional argument ( dimensions ) needs to be added as an attribute . <nl> let hasCustomHLOConverter = 1 ; <nl> } <nl> <nl> mmm a / tensorflow / compiler / mlir / xla / ir / hlo_ops_base . td <nl> ppp b / tensorflow / compiler / mlir / xla / ir / hlo_ops_base . td <nl> def HLO_PrecisionConfigAttr : <nl> OptionalAttr < <nl> TypedArrayAttrBase < HLO_PrecisionAttr , " Precision Config attribute " > > ; <nl> <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + / / Fast Fourier Transform Type enum definitions . <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + <nl> + / / These mirror the XLA FftType proto enum . <nl> + def HLO_FFT_TYPE_FFT : StrEnumAttrCase < " FFT " > ; <nl> + def HLO_FFT_TYPE_IFFT : StrEnumAttrCase < " IFFT " > ; <nl> + def HLO_FFT_TYPE_RFFT : StrEnumAttrCase < " RFFT " > ; <nl> + def HLO_FFT_TYPE_IRFFT : StrEnumAttrCase < " IRFFT " > ; <nl> + <nl> + def HLO_FftTypeAttr : StrEnumAttr < " FftType " , <nl> + " XLA fast fourier transform type . " , <nl> + [ HLO_FFT_TYPE_FFT , HLO_FFT_TYPE_IFFT , <nl> + HLO_FFT_TYPE_RFFT , HLO_FFT_TYPE_IRFFT ] > ; <nl> + <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> / / Comparison op definitions . <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> class BASE_HLO_DotGeneralOp { <nl> } ] ; <nl> } <nl> <nl> + class BASE_HLO_FftOp { <nl> + string summary = " Fast fourier transform operator " ; <nl> + <nl> + string description = [ { <nl> + Returns the fast - fourier - transform of the input array . <nl> + <nl> + See <nl> + https : / / www . tensorflow . org / xla / operation_semantics # fft . <nl> + } ] ; <nl> + } <nl> <nl> class BASE_HLO_GatherOp { <nl> string summary = " Gather operator " ; <nl> mmm a / tensorflow / compiler / mlir / xla / mlir_hlo_to_hlo . cc <nl> ppp b / tensorflow / compiler / mlir / xla / mlir_hlo_to_hlo . cc <nl> LogicalResult ExportXlaOp ( DynamicUpdateSliceOp op , OpLoweringContext ctx ) { <nl> return failure ( ) ; <nl> } <nl> <nl> + LogicalResult ExportXlaOp ( FftOp op , OpLoweringContext ctx ) { return failure ( ) ; } <nl> + <nl> LogicalResult ExportXlaOp ( GatherOp op , OpLoweringContext ctx ) { <nl> return failure ( ) ; <nl> } <nl> new file mode 100644 <nl> index 0000000000000 . . c82ef33a0a747 <nl> mmm / dev / null <nl> ppp b / tensorflow / compiler / mlir / xla / tests / translate / fft . hlotxt <nl> <nl> + / / RUN : tf - mlir - translate - hlo - text - to - mlir - hlo % s - o - | FileCheck % s <nl> + <nl> + HloModule tfcompile <nl> + <nl> + / / CHECK - LABEL : func @ main ( % arg0 : tensor < 3x9xf32 > ) - > tensor < 3x5xcomplex < f32 > > { <nl> + ENTRY % tfcompile { <nl> + % arg0 . 1 = f32 [ 3 , 9 ] { 1 , 0 } parameter ( 0 ) , parameter_replication = { false } , metadata = { op_name = " XLA_Args " } <nl> + / / CHECK : " xla_hlo . fft " ( % arg0 ) { fft_length = dense < 9 > : tensor < 1xi64 > , fft_type = " RFFT " <nl> + ROOT % fft . 2 = c64 [ 3 , 5 ] { 1 , 0 } fft ( % arg0 . 1 ) , fft_type = RFFT , fft_length = { 9 } , metadata = { op_type = " RFFT " op_name = " rfft " } <nl> + } <nl>
HLO Function importer support for the xla_hlo . fft function
tensorflow/tensorflow
204419ee29b2c2c6dae055cc6b325f45861d0781
2019-10-17T18:03:34Z
mmm a / format . cc <nl> ppp b / format . cc <nl> void fmt : : BasicWriter < Char > : : PrintfParser : : Format ( <nl> <nl> int precision = - 1 ; <nl> switch ( have_width ) { <nl> - case false : <nl> + case false : { <nl> / / TODO : parse optional flags <nl> - for ( bool stop = false ; ! stop ; ) { <nl> + bool stop = false ; <nl> + do { <nl> switch ( * s ) { <nl> case ' - ' : <nl> + + s ; <nl> void fmt : : BasicWriter < Char > : : PrintfParser : : Format ( <nl> default : <nl> stop = true ; <nl> } <nl> - } <nl> + } while ( ! stop ) ; <nl> <nl> / * <nl> / / Parse fill and alignment . <nl> void fmt : : BasicWriter < Char > : : PrintfParser : : Format ( <nl> if ( * s < ' 0 ' | | * s > ' 9 ' ) <nl> break ; <nl> spec . width_ = internal : : ParseNonnegativeInt ( s , error ) ; <nl> + } <nl> / / Fall through . <nl> default : <nl> if ( spec . fill_ = = ' 0 ' ) { <nl>
Avoid unnecessary check .
fmtlib/fmt
533c8214d45009dc4e258e42d727cb0cbf59a187
2014-06-06T15:54:37Z
mmm a / tensorflow / compiler / xla / service / gpu / cudnn_conv_algorithm_picker . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / cudnn_conv_algorithm_picker . cc <nl> CudnnConvAlgorithmPicker : : PickBestAlgorithm ( <nl> VLOG ( 3 ) < < " Trying algorithm " < < AlgorithmToString ( alg ) < < " for " <nl> < < instr - > ToString ( ) ; <nl> <nl> - / / Use assignment insetad of brace - list to make GCC 4 . 9 happy . <nl> + / / Use assignment instead of brace - list to make GCC 4 . 9 happy . <nl> RunConvOptions options ; <nl> options . profile_result = & profile_result ; <nl> options . algo_override = alg ; <nl> mmm a / tensorflow / compiler / xla / service / gpu / cudnn_conv_runner . h <nl> ppp b / tensorflow / compiler / xla / service / gpu / cudnn_conv_runner . h <nl> struct RunConvOptions { <nl> / / Nullable output - parameter pointer for profiling results . <nl> se : : dnn : : ProfileResult * profile_result = nullptr ; <nl> <nl> - / / Use this algorithm , instead the one from the instrcution . <nl> + / / Use this algorithm , instead of the one from the instruction . <nl> absl : : optional < se : : dnn : : AlgorithmDesc > algo_override ; <nl> } ; <nl> <nl>
Fix typos in comments .
tensorflow/tensorflow
8d9872cebb57219acbfaad57e5203b3a407ac28e
2019-01-09T20:07:51Z
mmm a / src / core / file_sys / registered_cache . h <nl> ppp b / src / core / file_sys / registered_cache . h <nl> <nl> <nl> # include < array > <nl> # include < functional > <nl> - # include < map > <nl> # include < memory > <nl> # include < string > <nl> # include < vector > <nl>
file_sys / registered_cache : Remove unused < map > include
yuzu-emu/yuzu
7b1aaaa0691bfc610033fa8555cf7f7ddcce364e
2018-11-27T21:33:18Z
mmm a / jstests / sort10 . js <nl> ppp b / jstests / sort10 . js <nl> <nl> / / signed dates check <nl> t = db . sort2 ; <nl> <nl> - var opts = { } ; <nl> - if ( Math . random ( ) < 0 . 3 ) { <nl> - opts . background = true ; <nl> - printjson ( opts ) ; <nl> - } <nl> - t . drop ( ) ; <nl> - t . insert ( { x : new Date ( 50000 ) } ) ; <nl> - t . insert ( { x : new Date ( - 50 ) } ) ; <nl> - var d = new Date ( - 50 ) ; <nl> - for ( var pass = 0 ; pass < 2 ; pass + + ) { <nl> - assert ( t . find ( ) . sort ( { x : 1 } ) [ 0 ] . x . valueOf ( ) = = d . valueOf ( ) ) ; <nl> - t . ensureIndex ( { x : 1 } , opts ) ; <nl> - t . insert ( { x : new Date ( ) } ) ; <nl> + function checkSorting1 ( opts ) { <nl> + t . drop ( ) ; <nl> + t . insert ( { x : new Date ( 50000 ) } ) ; <nl> + t . insert ( { x : new Date ( - 50 ) } ) ; <nl> + var d = new Date ( - 50 ) ; <nl> + for ( var pass = 0 ; pass < 2 ; pass + + ) { <nl> + assert ( t . find ( ) . sort ( { x : 1 } ) [ 0 ] . x . valueOf ( ) = = d . valueOf ( ) ) ; <nl> + t . ensureIndex ( { x : 1 } , opts ) ; <nl> + t . insert ( { x : new Date ( ) } ) ; <nl> + } <nl> } <nl> <nl> + checkSorting1 ( { } ) <nl> + checkSorting1 ( { " background " : true } ) <nl> + <nl> <nl> <nl> - function checkSorting ( dates , sortOrder ) { <nl> + function checkSorting2 ( dates , sortOrder ) { <nl> cur = t . find ( ) . sort ( { x : sortOrder } ) ; <nl> assert . eq ( dates . length , cur . count ( ) , " Incorrect number of results returned " ) ; <nl> index = 0 ; <nl> for ( var i = 0 ; i < dates . length ; i + + ) { <nl> dates . sort ( function ( a , b ) { return a - b } ) ; <nl> reverseDates = dates . slice ( 0 ) . reverse ( ) <nl> <nl> - checkSorting ( dates , 1 ) <nl> - checkSorting ( reverseDates , - 1 ) <nl> + checkSorting2 ( dates , 1 ) <nl> + checkSorting2 ( reverseDates , - 1 ) <nl> t . ensureIndex ( { x : 1 } ) <nl> - checkSorting ( dates , 1 ) <nl> - checkSorting ( reverseDates , - 1 ) <nl> + checkSorting2 ( dates , 1 ) <nl> + checkSorting2 ( reverseDates , - 1 ) <nl> t . dropIndexes ( ) <nl> t . ensureIndex ( { x : - 1 } ) <nl> - checkSorting ( dates , 1 ) <nl> - checkSorting ( reverseDates , - 1 ) <nl> + checkSorting2 ( dates , 1 ) <nl> + checkSorting2 ( reverseDates , - 1 ) <nl>
Make date - sorting test deterministic .
mongodb/mongo
1450e4ccdf72c77f9e5b8d037bcaff732d863ab3
2011-08-25T16:50:25Z
new file mode 100644 <nl> index 00000000000 . . d5bdb816bf2 <nl> mmm / dev / null <nl> ppp b / tests / queries / 0_stateless / 01559_misplaced_codec_diagnostics . reference <nl> @ @ - 0 , 0 + 1 @ @ <nl> + Unknown data type family : CODEC <nl> new file mode 100755 <nl> index 00000000000 . . 9904b6388d6 <nl> mmm / dev / null <nl> ppp b / tests / queries / 0_stateless / 01559_misplaced_codec_diagnostics . sh <nl> <nl> + # ! / usr / bin / env bash <nl> + <nl> + CURDIR = $ ( cd " $ ( dirname " $ { BASH_SOURCE [ 0 ] } " ) " & & pwd ) <nl> + . " $ CURDIR " / . . / shell_config . sh <nl> + <nl> + $ { CLICKHOUSE_CLIENT } - - query " CREATE TABLE t ( c CODEC ( NONE ) ) ENGINE = Memory " 2 > & 1 | grep - oF ' Unknown data type family : CODEC ' | uniq <nl>
Add a test
ClickHouse/ClickHouse
57a0dd30741a55aa2d8d00650bb6bab988223039
2020-11-06T18:23:30Z
mmm a / include / mlir / IR / StmtVisitor . h <nl> ppp b / include / mlir / IR / StmtVisitor . h <nl> template < typename SubClass , typename RetTy = void > class StmtWalker { <nl> / / Define walkers for MLFunction and all MLFunction statement kinds . <nl> void walk ( MLFunction * f ) { <nl> static_cast < SubClass * > ( this ) - > visitMLFunction ( f ) ; <nl> - walk ( f - > begin ( ) , f - > end ( ) ) ; <nl> + static_cast < SubClass * > ( this ) - > walk ( f - > begin ( ) , f - > end ( ) ) ; <nl> } <nl> <nl> void walkPostOrder ( MLFunction * f ) { <nl> - walkPostOrder ( f - > begin ( ) , f - > end ( ) ) ; <nl> + static_cast < SubClass * > ( this ) - > walkPostOrder ( f - > begin ( ) , f - > end ( ) ) ; <nl> static_cast < SubClass * > ( this ) - > visitMLFunction ( f ) ; <nl> } <nl> <nl> template < typename SubClass , typename RetTy = void > class StmtWalker { <nl> <nl> void walkForStmt ( ForStmt * forStmt ) { <nl> static_cast < SubClass * > ( this ) - > visitForStmt ( forStmt ) ; <nl> - walk ( forStmt - > begin ( ) , forStmt - > end ( ) ) ; <nl> + static_cast < SubClass * > ( this ) - > walk ( forStmt - > begin ( ) , forStmt - > end ( ) ) ; <nl> } <nl> <nl> void walkForStmtPostOrder ( ForStmt * forStmt ) { <nl> - walkPostOrder ( forStmt - > begin ( ) , forStmt - > end ( ) ) ; <nl> + static_cast < SubClass * > ( this ) - > walkPostOrder ( forStmt - > begin ( ) , <nl> + forStmt - > end ( ) ) ; <nl> static_cast < SubClass * > ( this ) - > visitForStmt ( forStmt ) ; <nl> } <nl> <nl> void walkIfStmt ( IfStmt * ifStmt ) { <nl> static_cast < SubClass * > ( this ) - > visitIfStmt ( ifStmt ) ; <nl> - walk ( ifStmt - > getThen ( ) - > begin ( ) , ifStmt - > getThen ( ) - > end ( ) ) ; <nl> - walk ( ifStmt - > getElse ( ) - > begin ( ) , ifStmt - > getElse ( ) - > end ( ) ) ; <nl> + static_cast < SubClass * > ( this ) - > walk ( ifStmt - > getThen ( ) - > begin ( ) , <nl> + ifStmt - > getThen ( ) - > end ( ) ) ; <nl> + static_cast < SubClass * > ( this ) - > walk ( ifStmt - > getElse ( ) - > begin ( ) , <nl> + ifStmt - > getElse ( ) - > end ( ) ) ; <nl> } <nl> <nl> void walkIfStmtPostOrder ( IfStmt * ifStmt ) { <nl> - walkPostOrder ( ifStmt - > getThen ( ) - > begin ( ) , ifStmt - > getThen ( ) - > end ( ) ) ; <nl> - walkPostOrder ( ifStmt - > getElse ( ) - > begin ( ) , ifStmt - > getElse ( ) - > end ( ) ) ; <nl> + static_cast < SubClass * > ( this ) - > walkPostOrder ( ifStmt - > getThen ( ) - > begin ( ) , <nl> + ifStmt - > getThen ( ) - > end ( ) ) ; <nl> + static_cast < SubClass * > ( this ) - > walkPostOrder ( ifStmt - > getElse ( ) - > begin ( ) , <nl> + ifStmt - > getElse ( ) - > end ( ) ) ; <nl> static_cast < SubClass * > ( this ) - > visitIfStmt ( ifStmt ) ; <nl> } <nl> <nl> mmm a / include / mlir / Transforms / Passes . h <nl> ppp b / include / mlir / Transforms / Passes . h <nl> namespace mlir { <nl> class MLFunctionPass ; <nl> class ModulePass ; <nl> <nl> - / / Loop unrolling passes . <nl> - / / / Creates a loop unrolling pass . <nl> - MLFunctionPass * createLoopUnrollPass ( int unrollFactor , int unrollFull ) ; <nl> + / / / Creates a loop unrolling pass . Default option or command - line options take <nl> + / / / effect if - 1 is passed as parameter . <nl> + MLFunctionPass * createLoopUnrollPass ( int unrollFactor = - 1 , <nl> + int unrollFull = - 1 ) ; <nl> + <nl> + / / / Creates a loop unroll jam pass to unroll jam by the specified factor . A <nl> + / / / factor of - 1 lets the pass use the default factor or the one on the command <nl> + / / / line if provided . <nl> + MLFunctionPass * createLoopUnrollAndJamPass ( int unrollJamFactor = - 1 ) ; <nl> <nl> / / / Replaces all ML functions in the module with equivalent CFG functions . <nl> / / / Function references are appropriately patched to refer to the newly <nl> mmm a / lib / Transforms / LoopUnroll . cpp <nl> ppp b / lib / Transforms / LoopUnroll . cpp <nl> void LoopUnroll : : runOnMLFunction ( MLFunction * f ) { <nl> <nl> / / / Unroll a for stmt . Default unroll factor is 4 . <nl> bool LoopUnroll : : runOnForStmt ( ForStmt * forStmt ) { <nl> + / / Unroll by the factor passed , if any . <nl> + if ( unrollFactor . hasValue ( ) ) <nl> + return loopUnrollByFactor ( forStmt , unrollFactor . getValue ( ) ) ; <nl> + / / Unroll by the command line factor if one was specified . <nl> + if ( clUnrollFactor . getNumOccurrences ( ) > 0 ) <nl> + return loopUnrollByFactor ( forStmt , clUnrollFactor ) ; <nl> / / Unroll completely if full loop unroll was specified . <nl> if ( clUnrollFull . getNumOccurrences ( ) > 0 | | <nl> ( unrollFull . hasValue ( ) & & unrollFull . getValue ( ) ) ) <nl> return loopUnrollFull ( forStmt ) ; <nl> <nl> - / / Unroll by the specified factor if one was specified . <nl> - if ( clUnrollFactor . getNumOccurrences ( ) > 0 ) <nl> - return loopUnrollByFactor ( forStmt , clUnrollFactor ) ; <nl> - else if ( unrollFactor . hasValue ( ) ) <nl> - return loopUnrollByFactor ( forStmt , unrollFactor . getValue ( ) ) ; <nl> - <nl> / / Unroll by four otherwise . <nl> return loopUnrollByFactor ( forStmt , 4 ) ; <nl> } <nl> new file mode 100644 <nl> index 0000000000000 . . eeab87c48e5d2 <nl> mmm / dev / null <nl> ppp b / lib / Transforms / LoopUnrollJam . cpp <nl> <nl> + / / = = = - LoopUnrollAndJam . cpp - Code to perform loop unroll jam <nl> + / / mmmmmmmmmmmmmmm - = = = / / <nl> + / / <nl> + / / Copyright 2019 The MLIR Authors . <nl> + / / <nl> + / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + / / you may not use this file except in compliance with the License . <nl> + / / You may obtain a copy of the License at <nl> + / / <nl> + / / http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + / / <nl> + / / Unless required by applicable law or agreed to in writing , software <nl> + / / distributed under the License is distributed on an " AS IS " BASIS , <nl> + / / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + / / See the License for the specific language governing permissions and <nl> + / / limitations under the License . <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / <nl> + / / This file implements loop unroll jam for MLFunctions . Unroll and jam is a <nl> + / / transformation that improves locality , in particular , register reuse , while <nl> + / / also improving instruction level parallelism . The example below shows what it <nl> + / / does in nearly the general case . Loop unroll jam currently works if the <nl> + / / bounds of the loops inner to the loop being unroll - jammed do not depend on <nl> + / / the latter . <nl> + / / <nl> + / / Before After unroll - jam of i by factor 2 : <nl> + / / <nl> + / / for i , step = 2 <nl> + / / for i S1 ( i ) ; <nl> + / / S1 ; S2 ( i ) ; <nl> + / / S2 ; S1 ( i + 1 ) ; <nl> + / / for j S2 ( i + 1 ) ; <nl> + / / S3 ; for j <nl> + / / S4 ; S3 ( i , j ) ; <nl> + / / S5 ; S4 ( i , j ) ; <nl> + / / S6 ; S3 ( i + 1 , j ) <nl> + / / S4 ( i + 1 , j ) <nl> + / / S5 ( i ) ; <nl> + / / S6 ( i ) ; <nl> + / / S5 ( i + 1 ) ; <nl> + / / S6 ( i + 1 ) ; <nl> + / / <nl> + / / Note : ' if / else ' blocks are not jammed . So , if there are loops inside if <nl> + / / stmt ' s , bodies of those loops will not be jammed . <nl> + / / <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + # include " mlir / IR / AffineExpr . h " <nl> + # include " mlir / IR / Builders . h " <nl> + # include " mlir / IR / StandardOps . h " <nl> + # include " mlir / IR / StmtVisitor . h " <nl> + # include " mlir / Transforms / Pass . h " <nl> + # include " mlir / Transforms / Passes . h " <nl> + # include " llvm / ADT / DenseMap . h " <nl> + # include " llvm / Support / CommandLine . h " <nl> + <nl> + using namespace mlir ; <nl> + using namespace llvm : : cl ; <nl> + <nl> + / / Loop unroll jam factor . <nl> + static llvm : : cl : : opt < unsigned > <nl> + clUnrollJamFactor ( " unroll - jam - factor " , llvm : : cl : : Hidden , <nl> + llvm : : cl : : desc ( " Use this unroll jam factor for all loops " <nl> + " ( default 4 ) " ) ) ; <nl> + <nl> + namespace { <nl> + / / / Loop unroll jam pass . For test purposes , this just unroll jams the first <nl> + / / / outer loop in an MLFunction . <nl> + struct LoopUnrollAndJam : public MLFunctionPass { <nl> + Optional < unsigned > unrollJamFactor ; <nl> + static const unsigned kDefaultUnrollJamFactor = 4 ; <nl> + <nl> + explicit LoopUnrollAndJam ( Optional < unsigned > unrollJamFactor ) <nl> + : unrollJamFactor ( unrollJamFactor ) { } <nl> + <nl> + void runOnMLFunction ( MLFunction * f ) override ; <nl> + bool runOnForStmt ( ForStmt * forStmt ) ; <nl> + bool loopUnrollJamByFactor ( ForStmt * forStmt , unsigned unrollJamFactor ) ; <nl> + } ; <nl> + } / / end anonymous namespace <nl> + <nl> + MLFunctionPass * mlir : : createLoopUnrollAndJamPass ( int unrollJamFactor ) { <nl> + return new LoopUnrollAndJam ( <nl> + unrollJamFactor = = - 1 ? None : Optional < unsigned > ( unrollJamFactor ) ) ; <nl> + } <nl> + <nl> + void LoopUnrollAndJam : : runOnMLFunction ( MLFunction * f ) { <nl> + / / Currently , just the outermost loop from the first loop nest is <nl> + / / unroll - and - jammed by this pass . However , runOnForStmt can be called on any <nl> + / / for Stmt . <nl> + if ( ! isa < ForStmt > ( f - > begin ( ) ) ) <nl> + return ; <nl> + <nl> + auto * forStmt = cast < ForStmt > ( f - > begin ( ) ) ; <nl> + runOnForStmt ( forStmt ) ; <nl> + } <nl> + <nl> + / / / Unroll and jam a ' for ' stmt . Default unroll jam factor is <nl> + / / / kDefaultUnrollJamFactor . Return false if nothing was done . <nl> + bool LoopUnrollAndJam : : runOnForStmt ( ForStmt * forStmt ) { <nl> + / / Unroll and jam by the factor that was passed if any . <nl> + if ( unrollJamFactor . hasValue ( ) ) <nl> + return loopUnrollJamByFactor ( forStmt , unrollJamFactor . getValue ( ) ) ; <nl> + / / Otherwise , unroll jam by the command - line factor if one was specified . <nl> + if ( clUnrollJamFactor . getNumOccurrences ( ) > 0 ) <nl> + return loopUnrollJamByFactor ( forStmt , clUnrollJamFactor ) ; <nl> + <nl> + / / Unroll and jam by four otherwise . <nl> + return loopUnrollJamByFactor ( forStmt , kDefaultUnrollJamFactor ) ; <nl> + } <nl> + <nl> + / / / Unrolls and jams this loop by the specified factor . <nl> + bool LoopUnrollAndJam : : loopUnrollJamByFactor ( ForStmt * forStmt , <nl> + unsigned unrollJamFactor ) { <nl> + assert ( unrollJamFactor > = 1 & & " unroll jam factor should be > = 1 " ) ; <nl> + <nl> + if ( unrollJamFactor = = 1 | | forStmt - > getStatements ( ) . empty ( ) ) <nl> + return false ; <nl> + <nl> + if ( ! forStmt - > hasConstantBounds ( ) ) <nl> + return false ; <nl> + <nl> + / / Gathers all maximal sub - blocks of statements that do not themselves include <nl> + / / a for stmt ( a statement could have a descendant for stmt though in its <nl> + / / tree ) . <nl> + class JamBlockGatherer : public StmtWalker < JamBlockGatherer > { <nl> + public : <nl> + typedef llvm : : iplist < Statement > StmtListType ; <nl> + <nl> + / / Store iterators to the first and last stmt of each sub - block found . <nl> + std : : vector < std : : pair < StmtBlock : : iterator , StmtBlock : : iterator > > subBlocks ; <nl> + <nl> + / / This is a linear time walk . <nl> + void walk ( StmtListType : : iterator Start , StmtListType : : iterator End ) { <nl> + for ( auto it = Start ; it ! = End ; ) { <nl> + auto subBlockStart = it ; <nl> + while ( it ! = End & & ! isa < ForStmt > ( it ) ) <nl> + + + it ; <nl> + if ( it ! = subBlockStart ) <nl> + / / Record the last statement ( one behind the iterator ) while not <nl> + / / changing the iterator position . <nl> + subBlocks . push_back ( { subBlockStart , ( - - it ) + + } ) ; <nl> + / / Process all for Stmts that appear next . <nl> + while ( it ! = End & & isa < ForStmt > ( it ) ) <nl> + walkForStmt ( cast < ForStmt > ( it + + ) ) ; <nl> + } <nl> + } <nl> + } ; <nl> + <nl> + auto lb = forStmt - > getConstantLowerBound ( ) ; <nl> + auto ub = forStmt - > getConstantUpperBound ( ) ; <nl> + auto step = forStmt - > getStep ( ) ; <nl> + <nl> + int64_t tripCount = ( ub - lb + 1 ) % step = = 0 ? ( ub - lb + 1 ) / step <nl> + : ( ub - lb + 1 ) / step + 1 ; <nl> + <nl> + / / If the trip count is lower than the unroll jam factor , no unrolled body . <nl> + / / TODO ( bondhugula ) : option to specify cleanup loop unrolling . <nl> + if ( tripCount < unrollJamFactor ) <nl> + return true ; <nl> + <nl> + / / Gather all sub - blocks to jam upon the loop being unrolled . <nl> + JamBlockGatherer jbg ; <nl> + jbg . walkForStmt ( forStmt ) ; <nl> + auto & subBlocks = jbg . subBlocks ; <nl> + <nl> + / / Generate the cleanup loop if trip count isn ' t a multiple of <nl> + / / unrollJamFactor . <nl> + if ( tripCount % unrollJamFactor ) { <nl> + DenseMap < const MLValue * , MLValue * > operandMap ; <nl> + / / Insert the cleanup loop right after ' forStmt ' . <nl> + MLFuncBuilder builder ( forStmt - > getBlock ( ) , + + StmtBlock : : iterator ( forStmt ) ) ; <nl> + auto * cleanupForStmt = cast < ForStmt > ( builder . clone ( * forStmt , operandMap ) ) ; <nl> + cleanupForStmt - > setConstantLowerBound ( <nl> + lb + ( tripCount - tripCount % unrollJamFactor ) * step ) ; <nl> + } <nl> + <nl> + MLFuncBuilder b ( forStmt ) ; <nl> + forStmt - > setStep ( step * unrollJamFactor ) ; <nl> + forStmt - > setConstantUpperBound ( <nl> + lb + ( tripCount - tripCount % unrollJamFactor - 1 ) * step ) ; <nl> + <nl> + for ( auto & subBlock : subBlocks ) { <nl> + / / Builder to insert unroll - jammed bodies . Insert right at the end of <nl> + / / sub - block . <nl> + MLFuncBuilder builder ( subBlock . first - > getBlock ( ) , <nl> + std : : next ( subBlock . second ) ) ; <nl> + <nl> + / / Unroll and jam ( appends unrollJamFactor - 1 additional copies ) . <nl> + for ( unsigned i = 1 ; i < unrollJamFactor ; i + + ) { <nl> + DenseMap < const MLValue * , MLValue * > operandMapping ; <nl> + <nl> + / / If the induction variable is used , create a remapping to the value for <nl> + / / this unrolled instance . <nl> + if ( ! forStmt - > use_empty ( ) ) { <nl> + / / iv ' = iv + i , i = 1 to unrollJamFactor - 1 . <nl> + auto * bumpExpr = builder . getAddExpr ( builder . getDimExpr ( 0 ) , <nl> + builder . getConstantExpr ( i * step ) ) ; <nl> + auto * bumpMap = builder . getAffineMap ( 1 , 0 , { bumpExpr } , { } ) ; <nl> + auto * ivUnroll = <nl> + builder . create < AffineApplyOp > ( forStmt - > getLoc ( ) , bumpMap , forStmt ) <nl> + - > getResult ( 0 ) ; <nl> + operandMapping [ forStmt ] = cast < MLValue > ( ivUnroll ) ; <nl> + } <nl> + / / Clone the sub - block being unroll - jammed ( this doesn ' t include the last <nl> + / / stmt because subBlock . second is inclusive ) . <nl> + for ( auto it = subBlock . first ; it ! = subBlock . second ; + + it ) { <nl> + builder . clone ( * it , operandMapping ) ; <nl> + } <nl> + / / Clone the last statement of the sub - block . <nl> + builder . clone ( * subBlock . second , operandMapping ) ; <nl> + } <nl> + } <nl> + return true ; <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . 3a0d0e17fdf27 <nl> mmm / dev / null <nl> ppp b / test / Transforms / unroll - jam . mlir <nl> <nl> + / / RUN : mlir - opt % s - o - - loop - unroll - jam - unroll - jam - factor = 2 | FileCheck % s <nl> + <nl> + / / CHECK : # map0 = ( d0 ) - > ( d0 + 1 ) <nl> + <nl> + / / CHECK - LABEL : mlfunc @ unroll_jam_imperfect_nest ( ) { <nl> + mlfunc @ unroll_jam_imperfect_nest ( ) { <nl> + / / CHECK : for % i0 = 0 to 99 step 2 { <nl> + for % i = 0 to 100 { <nl> + / / CHECK : % 0 = " addi32 " ( % i0 , % i0 ) : ( affineint , affineint ) - > i32 <nl> + / / CHECK - NEXT : % 1 = affine_apply # map0 ( % i0 ) <nl> + / / CHECK - NEXT : % 2 = " addi32 " ( % 1 , % 1 ) : ( affineint , affineint ) - > i32 <nl> + % x = " addi32 " ( % i , % i ) : ( affineint , affineint ) - > i32 <nl> + for % j = 0 to 17 { <nl> + / / CHECK : % 3 = " addi32 " ( % i0 , % i0 ) : ( affineint , affineint ) - > i32 <nl> + / / CHECK - NEXT : % 4 = " addi32 " ( % 3 , % 3 ) : ( i32 , i32 ) - > i32 <nl> + / / CHECK - NEXT : % 5 = affine_apply # map0 ( % i0 ) <nl> + / / CHECK - NEXT : % 6 = " addi32 " ( % 5 , % 5 ) : ( affineint , affineint ) - > i32 <nl> + / / CHECK - NEXT : % 7 = " addi32 " ( % 6 , % 6 ) : ( i32 , i32 ) - > i32 <nl> + % y = " addi32 " ( % i , % i ) : ( affineint , affineint ) - > i32 <nl> + % z = " addi32 " ( % y , % y ) : ( i32 , i32 ) - > i32 <nl> + } <nl> + / / CHECK : % 8 = " addi32 " ( % i0 , % i0 ) : ( affineint , affineint ) - > i32 <nl> + / / CHECK - NEXT : % 9 = affine_apply # map0 ( % i0 ) <nl> + / / CHECK - NEXT : % 10 = " addi32 " ( % 9 , % 9 ) : ( affineint , affineint ) - > i32 <nl> + % w = " addi32 " ( % i , % i ) : ( affineint , affineint ) - > i32 <nl> + } / / CHECK } <nl> + / / cleanup loop . <nl> + / / CHECK : for % i2 = 100 to 100 { <nl> + / / CHECK - NEXT : % 11 = " addi32 " ( % i2 , % i2 ) : ( affineint , affineint ) - > i32 <nl> + / / CHECK - NEXT : for % i3 = 0 to 17 { <nl> + / / CHECK - NEXT : % 12 = " addi32 " ( % i2 , % i2 ) : ( affineint , affineint ) - > i32 <nl> + / / CHECK - NEXT : % 13 = " addi32 " ( % 12 , % 12 ) : ( i32 , i32 ) - > i32 <nl> + / / CHECK - NEXT : } <nl> + / / CHECK - NEXT : % 14 = " addi32 " ( % i2 , % i2 ) : ( affineint , affineint ) - > i32 <nl> + / / CHECK - NEXT : } <nl> + return <nl> + } <nl> mmm a / tools / mlir - opt / mlir - opt . cpp <nl> ppp b / tools / mlir - opt / mlir - opt . cpp <nl> checkParserErrors ( " check - parser - errors " , cl : : desc ( " Check for parser errors " ) , <nl> enum Passes { <nl> ConvertToCFG , <nl> LoopUnroll , <nl> + LoopUnrollAndJam , <nl> TFRaiseControlFlow , <nl> } ; <nl> <nl> static cl : : list < Passes > passList ( <nl> cl : : values ( clEnumValN ( ConvertToCFG , " convert - to - cfg " , <nl> " Convert all ML functions in the module to CFG ones " ) , <nl> clEnumValN ( LoopUnroll , " loop - unroll " , " Unroll loops " ) , <nl> + clEnumValN ( LoopUnrollAndJam , " loop - unroll - jam " , <nl> + " Unroll and jam loops " ) , <nl> clEnumValN ( TFRaiseControlFlow , " tf - raise - control - flow " , <nl> " Dynamic TensorFlow Switch / Match nodes to a CFG " ) ) ) ; <nl> <nl> OptResult parseAndPrintMemoryBuffer ( std : : unique_ptr < MemoryBuffer > buffer ) { <nl> pass = createConvertToCFGPass ( ) ; <nl> break ; <nl> case LoopUnroll : <nl> - pass = createLoopUnrollPass ( - 1 , - 1 ) ; <nl> + pass = createLoopUnrollPass ( ) ; <nl> + break ; <nl> + case LoopUnrollAndJam : <nl> + pass = createLoopUnrollAndJamPass ( ) ; <nl> break ; <nl> case TFRaiseControlFlow : <nl> pass = createRaiseTFControlFlowPass ( ) ; <nl>
Introduce loop unroll jam transformation .
tensorflow/tensorflow
6cd3502f6398456b326b58d47d5cbc421fbd2905
2019-03-29T20:07:30Z
mmm a / xbmc / Util . cpp <nl> ppp b / xbmc / Util . cpp <nl> int CUtil : : ScanArchiveForAssociatedItems ( const std : : string & strArchivePath , <nl> const std : : vector < std : : string > & item_exts , <nl> std : : vector < std : : string > & associatedFiles ) <nl> { <nl> - CLog : : LogFunction ( LOGDEBUG , __FUNCTION__ , " Scanning archive % s " , CURL : : GetRedacted ( strArchivePath ) . c_str ( ) ) ; <nl> + CLog : : LogF ( LOGDEBUG , " Scanning archive % s " , CURL : : GetRedacted ( strArchivePath ) . c_str ( ) ) ; <nl> int nItemsAdded = 0 ; <nl> CFileItemList ItemList ; <nl> <nl> mmm a / xbmc / cores / VideoPlayer / DVDCodecs / Video / DXVA . cpp <nl> ppp b / xbmc / cores / VideoPlayer / DVDCodecs / Video / DXVA . cpp <nl> bool CDXVAContext : : CreateContext ( ) <nl> if ( FAILED ( g_Windowing . Get3D11Device ( ) - > QueryInterface ( __uuidof ( ID3D11VideoDevice ) , reinterpret_cast < void * * > ( & m_service ) ) ) <nl> | | FAILED ( g_Windowing . GetImmediateContext ( ) - > QueryInterface ( __uuidof ( ID3D11VideoContext ) , reinterpret_cast < void * * > ( & m_vcontext ) ) ) ) <nl> { <nl> - CLog : : LogFunction ( LOGWARNING , __FUNCTION__ , " failed to get Video Device and Context . " ) ; <nl> + CLog : : LogF ( LOGWARNING , " failed to get Video Device and Context . " ) ; <nl> return false ; <nl> } <nl> <nl> bool CDXVAContext : : GetConfig ( const D3D11_VIDEO_DECODER_DESC * format , D3D11_VIDEO <nl> <nl> if ( FAILED ( res ) ) <nl> { <nl> - CLog : : LogFunction ( LOGNOTICE , __FUNCTION__ , " failed getting decoder configuration count . " ) ; <nl> + CLog : : LogF ( LOGNOTICE , " failed getting decoder configuration count . " ) ; <nl> return false ; <nl> } <nl> <nl> bool CDXVAContext : : GetConfig ( const D3D11_VIDEO_DECODER_DESC * format , D3D11_VIDEO <nl> D3D11_VIDEO_DECODER_CONFIG pConfig = { 0 } ; <nl> if ( FAILED ( m_service - > GetVideoDecoderConfig ( format , i , & pConfig ) ) ) <nl> { <nl> - CLog : : LogFunction ( LOGNOTICE , __FUNCTION__ , " failed getting decoder configuration . " ) ; <nl> + CLog : : LogF ( LOGNOTICE , " failed getting decoder configuration . " ) ; <nl> return false ; <nl> } <nl> <nl> bool CDXVAContext : : CreateSurfaces ( D3D11_VIDEO_DECODER_DESC format , unsigned int <nl> ID3D11Texture2D * texture = nullptr ; <nl> if ( FAILED ( pDevice - > CreateTexture2D ( & texDesc , NULL , & texture ) ) ) <nl> { <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " failed creating decoder texture array . " ) ; <nl> + CLog : : LogF ( LOGERROR , " failed creating decoder texture array . " ) ; <nl> return false ; <nl> } <nl> <nl> bool CDXVAContext : : CreateSurfaces ( D3D11_VIDEO_DECODER_DESC format , unsigned int <nl> hr = m_service - > CreateVideoDecoderOutputView ( texture , & vdovDesc , & surfaces [ i ] ) ; <nl> if ( FAILED ( hr ) ) <nl> { <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " failed creating surfaces . " ) ; <nl> + CLog : : LogF ( LOGERROR , " failed creating surfaces . " ) ; <nl> break ; <nl> } <nl> pContext - > ClearView ( surfaces [ i ] , clearColor , nullptr , 0 ) ; <nl> bool CDXVAContext : : CreateDecoder ( D3D11_VIDEO_DECODER_DESC * format , const D3D11_V <nl> <nl> if ( retry = = 0 ) <nl> { <nl> - CLog : : LogFunction ( LOGNOTICE , __FUNCTION__ , " hw may not support multiple decoders , releasing existing ones . " ) ; <nl> + CLog : : LogF ( LOGNOTICE , " hw may not support multiple decoders , releasing existing ones . " ) ; <nl> for ( auto it = m_decoders . begin ( ) ; it ! = m_decoders . end ( ) ; + + it ) <nl> { <nl> ( * it ) - > CloseDXVADecoder ( ) ; <nl> bool CDXVAContext : : CreateDecoder ( D3D11_VIDEO_DECODER_DESC * format , const D3D11_V <nl> retry + + ; <nl> } <nl> <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " failed creating decoder . " , __FUNCTION__ ) ; <nl> + CLog : : LogF ( LOGERROR , " failed creating decoder . " ) ; <nl> return false ; <nl> } <nl> <nl> ID3D11View * CDXVAOutputBuffer : : GetSRV ( unsigned idx ) <nl> HRESULT hr = g_Windowing . Get3D11Device ( ) - > CreateShaderResourceView ( pResource , & srvDesc , <nl> reinterpret_cast < ID3D11ShaderResourceView * * > ( & planes [ idx ] ) ) ; <nl> if ( FAILED ( hr ) ) <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " unable to create SRV for decoder surface ( % d ) " , plane_format ) ; <nl> + CLog : : LogF ( LOGERROR , " unable to create SRV for decoder surface ( % d ) " , plane_format ) ; <nl> <nl> SAFE_RELEASE ( pResource ) ; <nl> return planes [ idx ] ; <nl> CDXVABufferPool : : CDXVABufferPool ( ) <nl> <nl> CDXVABufferPool : : ~ CDXVABufferPool ( ) <nl> { <nl> - CLog : : LogFunction ( LOGDEBUG , __FUNCTION__ , " destructing buffer pool . " ) ; <nl> + CLog : : LogF ( LOGDEBUG , " destructing buffer pool . " ) ; <nl> Reset ( ) ; <nl> } <nl> <nl> CDecoder : : CDecoder ( CProcessInfo & processInfo ) <nl> <nl> CDecoder : : ~ CDecoder ( ) <nl> { <nl> - CLog : : LogFunction ( LOGDEBUG , __FUNCTION__ , " destructing decoder , % p . " , this ) ; <nl> + CLog : : LogF ( LOGDEBUG , " destructing decoder , % p . " , this ) ; <nl> g_Windowing . Unregister ( this ) ; <nl> Close ( ) ; <nl> free ( m_context - > surface ) ; <nl> void CDecoder : : Close ( ) <nl> <nl> if ( m_dxva_context ) <nl> { <nl> - CLog : : LogFunction ( LOGNOTICE , __FUNCTION__ , " closing decoder . " ) ; <nl> + CLog : : LogF ( LOGNOTICE , " closing decoder . " ) ; <nl> m_dxva_context - > Release ( this ) ; <nl> } <nl> m_dxva_context = nullptr ; <nl> CDVDVideoCodec : : VCReturn CDecoder : : Check ( AVCodecContext * avctx ) <nl> lock . Enter ( ) ; <nl> if ( m_state = = DXVA_LOST ) <nl> { <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " device didn ' t reset in reasonable time . " ) ; <nl> + CLog : : LogF ( LOGERROR , " device didn ' t reset in reasonable time . " ) ; <nl> return CDVDVideoCodec : : VC_ERROR ; <nl> } <nl> } <nl> CDVDVideoCodec : : VCReturn CDecoder : : Check ( AVCodecContext * avctx ) <nl> { <nl> if ( ! Open ( avctx , avctx , avctx - > pix_fmt ) ) <nl> { <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " decoder was not able to reset . " ) ; <nl> + CLog : : LogF ( LOGERROR , " decoder was not able to reset . " ) ; <nl> Close ( ) ; <nl> return CDVDVideoCodec : : VC_ERROR ; <nl> } <nl> CDVDVideoCodec : : VCReturn CDecoder : : Check ( AVCodecContext * avctx ) <nl> { <nl> if ( avctx - > refs > m_refs ) <nl> { <nl> - CLog : : LogFunction ( LOGWARNING , __FUNCTION__ , " number of required reference frames increased , recreating decoder . " ) ; <nl> + CLog : : LogF ( LOGWARNING , " number of required reference frames increased , recreating decoder . " ) ; <nl> Close ( ) ; <nl> return CDVDVideoCodec : : VC_FLUSHED ; <nl> } <nl> void CDecoder : : ReleaseBuffer ( uint8_t * data ) <nl> ID3D11VideoDecoderOutputView * view = reinterpret_cast < ID3D11VideoDecoderOutputView * > ( data ) ; <nl> if ( ! m_bufferPool - > IsValid ( view ) ) <nl> { <nl> - CLog : : LogFunction ( LOGWARNING , __FUNCTION__ , " return of invalid surface . " ) ; <nl> + CLog : : LogF ( LOGWARNING , " return of invalid surface . " ) ; <nl> } <nl> m_bufferPool - > ReturnView ( view ) ; <nl> <nl> int CDecoder : : GetBuffer ( AVCodecContext * avctx , AVFrame * pic ) <nl> ID3D11View * view = m_bufferPool - > GetView ( ) ; <nl> if ( view = = nullptr ) <nl> { <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " no surface available . " ) ; <nl> + CLog : : LogF ( LOGERROR , " no surface available . " ) ; <nl> m_state = DXVA_LOST ; <nl> return - 1 ; <nl> } <nl> int CDecoder : : GetBuffer ( AVCodecContext * avctx , AVFrame * pic ) <nl> AVBufferRef * buffer = av_buffer_create ( pic - > data [ 3 ] , 0 , CDecoder : : FFReleaseBuffer , this , 0 ) ; <nl> if ( ! buffer ) <nl> { <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " error creating buffer . " ) ; <nl> + CLog : : LogF ( LOGERROR , " error creating buffer . " ) ; <nl> return - 1 ; <nl> } <nl> pic - > buf [ 0 ] = buffer ; <nl> mmm a / xbmc / cores / VideoPlayer / VideoRenderers / RenderCapture . cpp <nl> ppp b / xbmc / cores / VideoPlayer / VideoRenderers / RenderCapture . cpp <nl> void CRenderCaptureDX : : BeginRender ( ) <nl> <nl> if ( ! m_renderTex . Create ( m_width , m_height , 1 , D3D11_USAGE_DEFAULT , DXGI_FORMAT_B8G8R8A8_UNORM ) ) <nl> { <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " CreateTexture2D ( RENDER_TARGET ) failed . " ) ; <nl> + CLog : : LogF ( LOGERROR , " CreateTexture2D ( RENDER_TARGET ) failed . " ) ; <nl> SetState ( CAPTURESTATE_FAILED ) ; <nl> return ; <nl> } <nl> <nl> if ( ! m_copyTex . Create ( m_width , m_height , 1 , D3D11_USAGE_STAGING , DXGI_FORMAT_B8G8R8A8_UNORM ) ) <nl> { <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " CreateRenderTargetView failed . " ) ; <nl> + CLog : : LogF ( LOGERROR , " CreateRenderTargetView failed . " ) ; <nl> SetState ( CAPTURESTATE_FAILED ) ; <nl> return ; <nl> } <nl> void CRenderCaptureDX : : BeginRender ( ) <nl> result = pDevice - > CreateQuery ( & queryDesc , & m_query ) ; <nl> if ( FAILED ( result ) ) <nl> { <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " CreateQuery failed % s " , <nl> + CLog : : LogF ( LOGERROR , " CreateQuery failed % s " , <nl> DX : : GetErrorDescription ( result ) . c_str ( ) ) ; <nl> m_asyncSupported = false ; <nl> SAFE_RELEASE ( m_query ) ; <nl> mmm a / xbmc / guilib / FFmpegImage . cpp <nl> ppp b / xbmc / guilib / FFmpegImage . cpp <nl> static int64_t mem_file_seek ( void * h , int64_t pos , int whence ) <nl> mbuf - > pos = Clamp ( ( ( int64_t ) mbuf - > pos ) + pos , mbuf - > size ) ; <nl> } <nl> else <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " Unknown seek mode : % i " , whence ) ; <nl> + CLog : : LogF ( LOGERROR , " Unknown seek mode : % i " , whence ) ; <nl> <nl> return mbuf - > pos ; <nl> } <nl> bool CFFmpegImage : : Initialize ( unsigned char * buffer , unsigned int bufSize ) <nl> uint8_t * fbuffer = ( uint8_t * ) av_malloc ( bufferSize + FF_INPUT_BUFFER_PADDING_SIZE ) ; <nl> if ( ! fbuffer ) <nl> { <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " Could not allocate buffer " ) ; <nl> + CLog : : LogF ( LOGERROR , " Could not allocate buffer " ) ; <nl> return false ; <nl> } <nl> m_buf . data = buffer ; <nl> bool CFFmpegImage : : Initialize ( unsigned char * buffer , unsigned int bufSize ) <nl> if ( ! m_ioctx ) <nl> { <nl> av_free ( fbuffer ) ; <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " Could not allocate AVIOContext " ) ; <nl> + CLog : : LogF ( LOGERROR , " Could not allocate AVIOContext " ) ; <nl> return false ; <nl> } <nl> <nl> bool CFFmpegImage : : Initialize ( unsigned char * buffer , unsigned int bufSize ) <nl> if ( ! m_fctx ) <nl> { <nl> FreeIOCtx ( & m_ioctx ) ; <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " Could not allocate AVFormatContext " ) ; <nl> + CLog : : LogF ( LOGERROR , " Could not allocate AVFormatContext " ) ; <nl> return false ; <nl> } <nl> <nl> AVFrame * CFFmpegImage : : ExtractFrame ( ) <nl> { <nl> if ( ! m_fctx | | ! m_fctx - > streams [ 0 ] ) <nl> { <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " No valid format context or stream " ) ; <nl> + CLog : : LogF ( LOGERROR , " No valid format context or stream " ) ; <nl> return nullptr ; <nl> } <nl> <nl> bool CFFmpegImage : : Decode ( unsigned char * const pixels , unsigned int width , unsi <nl> <nl> if ( ! m_pFrame | | ! m_pFrame - > data [ 0 ] ) <nl> { <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " AVFrame member not allocated " ) ; <nl> + CLog : : LogF ( LOGERROR , " AVFrame member not allocated " ) ; <nl> return false ; <nl> } <nl> <nl> bool CFFmpegImage : : DecodeFrame ( AVFrame * frame , unsigned int width , unsigned int <nl> AVFrame * pictureRGB = av_frame_alloc ( ) ; <nl> if ( ! pictureRGB ) <nl> { <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " AVFrame could not be allocated " ) ; <nl> + CLog : : LogF ( LOGERROR , " AVFrame could not be allocated " ) ; <nl> return false ; <nl> } <nl> <nl> bool CFFmpegImage : : DecodeFrame ( AVFrame * frame , unsigned int width , unsigned int <nl> int size = av_image_fill_arrays ( pictureRGB - > data , pictureRGB - > linesize , NULL , AV_PIX_FMT_RGB32 , width , height , 16 ) ; <nl> if ( size < 0 ) <nl> { <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " Could not allocate AVFrame member with % i x % i pixes " , width , height ) ; <nl> + CLog : : LogF ( LOGERROR , " Could not allocate AVFrame member with % i x % i pixes " , width , height ) ; <nl> av_frame_free ( & pictureRGB ) ; <nl> return false ; <nl> } <nl> bool CFFmpegImage : : DecodeFrame ( AVFrame * frame , unsigned int width , unsigned int <nl> / / we copy the data manually later so give a chance to intrinsics ( e . g . mmx , neon ) <nl> if ( av_frame_get_buffer ( pictureRGB , 32 ) < 0 ) <nl> { <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " Could not allocate temp buffer of size % i bytes " , size ) ; <nl> + CLog : : LogF ( LOGERROR , " Could not allocate temp buffer of size % i bytes " , size ) ; <nl> av_frame_free ( & pictureRGB ) ; <nl> return false ; <nl> } <nl> bool CFFmpegImage : : DecodeFrame ( AVFrame * frame , unsigned int width , unsigned int <nl> int minPitch = std : : min ( ( int ) pitch , pictureRGB - > linesize [ 0 ] ) ; <nl> if ( minPitch < 0 ) <nl> { <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " negative pitch or height " ) ; <nl> + CLog : : LogF ( LOGERROR , " negative pitch or height " ) ; <nl> av_frame_free ( & pictureRGB ) ; <nl> return false ; <nl> } <nl> mmm a / xbmc / rendering / dx / DeviceResources . cpp <nl> ppp b / xbmc / rendering / dx / DeviceResources . cpp <nl> using namespace concurrency ; <nl> # else <nl> # define breakOnDebug <nl> # endif <nl> - # define LOG_HR ( hr ) CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " function call at line % d ends with error : % s " , __LINE__ , DX : : GetErrorDescription ( hr ) . c_str ( ) ) ; <nl> + # define LOG_HR ( hr ) CLog : : LogF ( LOGERROR , " function call at line % d ends with error : % s " , __LINE__ , DX : : GetErrorDescription ( hr ) . c_str ( ) ) ; <nl> # define CHECK_ERR ( ) if ( FAILED ( hr ) ) { LOG_HR ( hr ) ; breakOnDebug ; return ; } <nl> # define RETURN_ERR ( ret ) if ( FAILED ( hr ) ) { LOG_HR ( hr ) ; breakOnDebug ; return ( # # ret ) ; } <nl> <nl> void DX : : DeviceResources : : CreateDeviceIndependentResources ( ) <nl> / / Configures the Direct3D device , and stores handles to it and the device context . <nl> void DX : : DeviceResources : : CreateDeviceResources ( ) <nl> { <nl> - CLog : : LogFunction ( LOGDEBUG , __FUNCTION__ , " creating DirectX 11 device . " ) ; <nl> + CLog : : LogF ( LOGDEBUG , " creating DirectX 11 device . " ) ; <nl> <nl> UINT creationFlags = D3D11_CREATE_DEVICE_VIDEO_SUPPORT ; <nl> # if defined ( _DEBUG ) <nl> void DX : : DeviceResources : : CreateDeviceResources ( ) <nl> <nl> if ( FAILED ( hr ) ) <nl> { <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " unable to create hardware device , trying to create WARP devices then . " ) ; <nl> + CLog : : LogF ( LOGERROR , " unable to create hardware device , trying to create WARP devices then . " ) ; <nl> hr = D3D11CreateDevice ( <nl> nullptr , <nl> D3D_DRIVER_TYPE_WARP , / / Create a WARP device instead of a hardware device . <nl> void DX : : DeviceResources : : CreateDeviceResources ( ) <nl> ) ; <nl> if ( FAILED ( hr ) ) <nl> { <nl> - CLog : : LogFunction ( LOGFATAL , __FUNCTION__ , " unable to create WARP device . Rendering in not possible . " ) ; <nl> + CLog : : LogF ( LOGFATAL , " unable to create WARP device . Rendering in not possible . " ) ; <nl> CHECK_ERR ( ) ; <nl> } <nl> } <nl> void DX : : DeviceResources : : CreateDeviceResources ( ) <nl> DXGI_ADAPTER_DESC aDesc ; <nl> m_adapter - > GetDesc ( & aDesc ) ; <nl> <nl> - CLog : : LogFunction ( LOGDEBUG , __FUNCTION__ , " device is created on adapter ' % S ' with feature level % 04x . " , aDesc . Description , m_d3dFeatureLevel ) ; <nl> + CLog : : LogF ( LOGDEBUG , " device is created on adapter ' % S ' with feature level % 04x . " , aDesc . Description , m_d3dFeatureLevel ) ; <nl> <nl> m_bDeviceCreated = true ; <nl> } <nl> <nl> void DX : : DeviceResources : : ReleaseBackBuffer ( ) <nl> { <nl> - CLog : : LogFunction ( LOGDEBUG , __FUNCTION__ , " release buffers . " ) ; <nl> + CLog : : LogF ( LOGDEBUG , " release buffers . " ) ; <nl> <nl> / / Clear the previous window size specific context . <nl> ID3D11RenderTargetView * nullViews [ ] = { nullptr } ; <nl> void DX : : DeviceResources : : CreateBackBuffer ( ) <nl> if ( ! m_bDeviceCreated ) <nl> return ; <nl> <nl> - CLog : : LogFunction ( LOGDEBUG , __FUNCTION__ , " create buffers . " ) ; <nl> + CLog : : LogF ( LOGDEBUG , " create buffers . " ) ; <nl> <nl> / / Get swap chain back buffer . <nl> ComPtr < ID3D11Texture2D > backBuffer ; <nl> void DX : : DeviceResources : : CreateBackBuffer ( ) <nl> / / Create back buffer texture from swap chain texture <nl> if ( ! m_backBufferTex . Acquire ( backBuffer . Get ( ) ) ) <nl> { <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " failed to create render target . " ) ; <nl> + CLog : : LogF ( LOGERROR , " failed to create render target . " ) ; <nl> return ; <nl> } <nl> <nl> void DX : : DeviceResources : : ResizeBuffers ( ) <nl> if ( ! m_bDeviceCreated ) <nl> return ; <nl> <nl> - CLog : : LogFunction ( LOGDEBUG , __FUNCTION__ , " resize buffers . " ) ; <nl> + CLog : : LogF ( LOGDEBUG , " resize buffers . " ) ; <nl> <nl> bool bHWStereoEnabled = RENDER_STEREO_MODE_HARDWAREBASED = = g_graphicsContext . GetStereoMode ( ) ; <nl> bool windowed = true ; <nl> void DX : : DeviceResources : : ResizeBuffers ( ) <nl> if ( FAILED ( hr ) & & bHWStereoEnabled ) <nl> { <nl> / / switch to stereo mode failed , create mono swapchain <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " creating stereo swap chain failed with error . " ) ; <nl> - CLog : : LogFunction ( LOGNOTICE , __FUNCTION__ , " fallback to monoscopic mode . " ) ; <nl> + CLog : : LogF ( LOGERROR , " creating stereo swap chain failed with error . " ) ; <nl> + CLog : : LogF ( LOGNOTICE , " fallback to monoscopic mode . " ) ; <nl> <nl> swapChainDesc . Stereo = false ; <nl> bHWStereoEnabled = false ; <nl> void DX : : DeviceResources : : FinishCommandList ( bool bExecute ) const <nl> ComPtr < ID3D11CommandList > pCommandList ; <nl> if ( FAILED ( m_deferrContext - > FinishCommandList ( true , & pCommandList ) ) ) <nl> { <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " failed to finish command queue . " ) ; <nl> + CLog : : LogF ( LOGERROR , " failed to finish command queue . " ) ; <nl> return ; <nl> } <nl> <nl> void DX : : DeviceResources : : SetLogicalSize ( float width , float height ) <nl> # endif <nl> return ; <nl> <nl> - CLog : : LogFunction ( LOGDEBUG , __FUNCTION__ , " receive changing logical size to % f x % f " , width , height ) ; <nl> + CLog : : LogF ( LOGDEBUG , " receive changing logical size to % f x % f " , width , height ) ; <nl> <nl> if ( m_logicalSize . Width ! = width | | m_logicalSize . Height ! = height ) <nl> { <nl> - CLog : : LogFunction ( LOGDEBUG , __FUNCTION__ , " change logical size to % f x % f " , width , height ) ; <nl> + CLog : : LogF ( LOGDEBUG , " change logical size to % f x % f " , width , height ) ; <nl> <nl> m_logicalSize = Size ( width , height ) ; <nl> <nl> void DX : : DeviceResources : : SetMonitor ( HMONITOR monitor ) const <nl> if ( currentDesc . AdapterLuid . HighPart ! = foundDesc . AdapterLuid . HighPart <nl> | | currentDesc . AdapterLuid . LowPart ! = foundDesc . AdapterLuid . LowPart ) <nl> { <nl> - CLog : : LogFunction ( LOGDEBUG , __FUNCTION__ , " selected % S adapter . " , foundDesc . Description ) ; <nl> + CLog : : LogF ( LOGDEBUG , " selected % S adapter . " , foundDesc . Description ) ; <nl> <nl> / / adapter is changed , ( re ) init hooks into new driver <nl> g_Windowing . InitHooks ( output . Get ( ) ) ; <nl> mmm a / xbmc / windowing / windows / WinSystemWin32 . cpp <nl> ppp b / xbmc / windowing / windows / WinSystemWin32 . cpp <nl> const MONITOR_DETAILS * CWinSystemWin32 : : GetMonitor ( int screen ) const <nl> } <nl> else <nl> { <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " no monitor found for screen % i " , screen ) ; <nl> + CLog : : LogF ( LOGERROR , " no monitor found for screen % i " , screen ) ; <nl> return nullptr ; <nl> } <nl> } <nl> RECT CWinSystemWin32 : : ScreenRect ( int screen ) const <nl> <nl> if ( ! details ) <nl> { <nl> - CLog : : LogFunction ( LOGERROR , __FUNCTION__ , " no monitor found for screen % i " , screen ) ; <nl> + CLog : : LogF ( LOGERROR , " no monitor found for screen % i " , screen ) ; <nl> } <nl> <nl> DEVMODEW sDevMode ; <nl>
[ log ] replace LogFunction ( . . . , __FUNCTION__ , . . . ) with LogF macro
xbmc/xbmc
06bbeadc8ad0dbb92d1027ee31d4d49d9053d506
2017-08-15T11:52:56Z
mmm a / editor / plugins / animation_state_machine_editor . cpp <nl> ppp b / editor / plugins / animation_state_machine_editor . cpp <nl> void AnimationNodeStateMachineEditor : : _state_machine_pos_draw ( ) { <nl> <nl> float len = MAX ( 0 . 0001 , playback - > get_current_length ( ) ) ; <nl> <nl> - float pos = CLAMP ( playback - > get_current_play_pos ( ) , 0 , len ) ; <nl> + float pos = CLAMP ( play_pos , 0 , len ) ; <nl> float c = pos / len ; <nl> Color fg = get_color ( " font_color " , " Label " ) ; <nl> Color bg = fg ; <nl> void AnimationNodeStateMachineEditor : : _notification ( int p_what ) { <nl> bool is_playing = false ; <nl> StringName current_node ; <nl> StringName blend_from_node ; <nl> - float play_pos = 0 ; <nl> + play_pos = 0 ; <nl> <nl> if ( playback . is_valid ( ) ) { <nl> tp = playback - > get_travel_path ( ) ; <nl> void AnimationNodeStateMachineEditor : : _notification ( int p_what ) { <nl> state_machine_play_pos - > update ( ) ; <nl> } <nl> <nl> + { <nl> + if ( current_node ! = StringName ( ) & & state_machine - > has_node ( current_node ) ) { <nl> + <nl> + String next = current_node ; <nl> + Ref < AnimationNodeStateMachine > anodesm = state_machine - > get_node ( next ) ; <nl> + Ref < AnimationNodeStateMachinePlayback > current_node_playback ; <nl> + <nl> + while ( anodesm . is_valid ( ) ) { <nl> + current_node_playback = AnimationTreeEditor : : get_singleton ( ) - > get_tree ( ) - > get ( AnimationTreeEditor : : get_singleton ( ) - > get_base_path ( ) + next + " / playback " ) ; <nl> + next + = " / " + current_node_playback - > get_current_node ( ) ; <nl> + anodesm = anodesm - > get_node ( current_node_playback - > get_current_node ( ) ) ; <nl> + } <nl> + <nl> + / / when current_node is a state machine , use playback of current_node to set play_pos <nl> + if ( current_node_playback . is_valid ( ) ) <nl> + play_pos = current_node_playback - > get_current_play_pos ( ) ; <nl> + } <nl> + } <nl> + <nl> if ( last_play_pos ! = play_pos ) { <nl> <nl> last_play_pos = play_pos ; <nl> mmm a / editor / plugins / animation_state_machine_editor . h <nl> ppp b / editor / plugins / animation_state_machine_editor . h <nl> class AnimationNodeStateMachineEditor : public AnimationTreeNodeEditorPlugin { <nl> StringName last_current_node ; <nl> Vector < StringName > last_travel_path ; <nl> float last_play_pos ; <nl> + float play_pos ; <nl> <nl> float error_time ; <nl> String error_text ; <nl>
Show play position of sub state machine
godotengine/godot
771fbd282a3e98aa487878b9b6fac8d891074e65
2019-01-13T18:00:33Z
mmm a / folly / fibers / Baton . h <nl> ppp b / folly / fibers / Baton . h <nl> class Baton { <nl> * Puts active fiber to sleep . Returns when post is called or the deadline <nl> * expires . <nl> * <nl> - * @ param timeout Baton will be automatically awaken if deadline expires <nl> + * @ param deadline Baton will be automatically awaken if deadline expires <nl> * <nl> * @ return true if was posted , false if timeout expired <nl> * / <nl> class Baton { <nl> * Puts active fiber to sleep . Returns when post is called or the deadline <nl> * expires . <nl> * <nl> - * @ param timeout Baton will be automatically awaken if deadline expires <nl> + * @ param deadline Baton will be automatically awaken if deadline expires <nl> * @ param mainContextFunc this function is immediately executed on the main <nl> * context . <nl> * <nl> class Baton { <nl> * Puts active fiber to sleep . Returns when post is called or the deadline <nl> * expires . <nl> * <nl> - * @ param timeout Baton will be automatically awaken if deadline expires <nl> + * @ param deadline Baton will be automatically awaken if deadline expires <nl> * @ param mainContextFunc this function is immediately executed on the main <nl> * context . <nl> * <nl> mmm a / folly / fibers / FiberManagerInternal . h <nl> ppp b / folly / fibers / FiberManagerInternal . h <nl> class FiberManager : public : : folly : : Executor { <nl> / * * <nl> * Initializes , but doesn ' t start FiberManager loop <nl> * <nl> - * @ param loopController <nl> + * @ param loopController A LoopController object <nl> * @ param options FiberManager options <nl> * / <nl> explicit FiberManager ( <nl> class FiberManager : public : : folly : : Executor { <nl> / * * <nl> * Initializes , but doesn ' t start FiberManager loop <nl> * <nl> - * @ param loopController <nl> + * @ param loopController A LoopController object <nl> * @ param options FiberManager options <nl> * @ tparam LocalT only local of this type may be stored on fibers . <nl> * Locals of other types will be considered thread - locals . <nl> class FiberManager : public : : folly : : Executor { <nl> * Sets exception callback which will be called if any of the tasks throws an <nl> * exception . <nl> * <nl> - * @ param ec <nl> + * @ param ec An ExceptionCallback object . <nl> * / <nl> void setExceptionCallback ( ExceptionCallback ec ) ; <nl> <nl> mmm a / folly / fibers / Promise . h <nl> ppp b / folly / fibers / Promise . h <nl> class Promise { <nl> / * * <nl> * Fulfill the promise with a given try <nl> * <nl> - * @ param t <nl> + * @ param t A Try with either a value or an error . <nl> * / <nl> void setTry ( folly : : Try < T > & & t ) ; <nl> <nl> mmm a / folly / io / async / EventBase . h <nl> ppp b / folly / io / async / EventBase . h <nl> class EventBase : public TimeoutManager , <nl> * Setup execution observation / instrumentation for every EventHandler <nl> * executed in this EventBase . <nl> * <nl> - * @ param executionObserver EventHandle ' s execution observer . <nl> + * @ param observer EventHandle ' s execution observer . <nl> * / <nl> void setExecutionObserver ( ExecutionObserver * observer ) { <nl> executionObserver_ = observer ; <nl>
Fix some - Wdocumentation errors in folly
facebook/folly
73d254b5900db163ad0a30292516a429d1958921
2020-09-24T17:21:06Z
mmm a / CNTK . sln <nl> ppp b / CNTK . sln <nl> Project ( " { 8BC9CEB8 - 8B4A - 11D0 - 8D11 - 00A0C91BC942 } " ) = " CPPEvalClient " , " Examples \ E <nl> EndProjectSection <nl> EndProject <nl> Project ( " { 8BC9CEB8 - 8B4A - 11D0 - 8D11 - 00A0C91BC942 } " ) = " BrainScriptTests " , " Tests \ UnitTests \ BrainScriptTests \ BrainScriptTests . vcxproj " , " { 9F999212 - AFC5 - 4EAC - AA78 - F7247D46C456 } " <nl> + ProjectSection ( ProjectDependencies ) = postProject <nl> + { 86883653 - 8A61 - 4038 - 81A0 - 2379FAE4200A } = { 86883653 - 8A61 - 4038 - 81A0 - 2379FAE4200A } <nl> + EndProjectSection <nl> EndProject <nl> Global <nl> GlobalSection ( SolutionConfigurationPlatforms ) = preSolution <nl>
Integrate eldak / fixingBSTestsDependencies into master
microsoft/CNTK
bdad2987f01143b9c38daf398790c9f25a569a30
2016-08-03T17:23:02Z
mmm a / lib / IRGen / IRGenSIL . cpp <nl> ppp b / lib / IRGen / IRGenSIL . cpp <nl> void IRGenSILFunction : : visitUncheckedAddrCastInst ( <nl> setLoweredAddress ( SILValue ( i , 0 ) , result ) ; <nl> } <nl> <nl> - static void emitValueBitCast ( IRGenFunction & IGF , <nl> + static void emitValueBitCast ( IRGenSILFunction & IGF , <nl> SourceLoc loc , <nl> Explosion & in , <nl> const LoadableTypeInfo & inTI , <nl> static void emitValueBitCast ( IRGenFunction & IGF , <nl> / / Unfortunately , we can ' t check this invariant until we get to IRGen , since <nl> / / the AST and SIL don ' t know anything about type layout . <nl> if ( inTI . getFixedSize ( ) ! = outTI . getFixedSize ( ) ) { <nl> - IGF . unimplemented ( loc , " bitcast between types of different size " ) ; <nl> + <nl> + / / We can hit this case in specialized functions even for correct user code . <nl> + / / If the user dynamically checks for correct type sizes in the generic <nl> + / / function , a specialized function can contain the ( not executed ) bitcast <nl> + / / with mismatching fixed sizes . <nl> + / / Usually llvm can eliminate this code again because the user ' s safety <nl> + / / check should be constant foldable on llvm level . <nl> + llvm : : BasicBlock * failBB = IGF . getFailBB ( ) ; <nl> + IGF . Builder . CreateBr ( failBB ) ; <nl> + llvm : : BasicBlock * contBB = llvm : : BasicBlock : : Create ( IGF . IGM . getLLVMContext ( ) ) ; <nl> + IGF . Builder . emitBlock ( contBB ) ; <nl> in . claimAll ( ) ; <nl> for ( auto schema : outTI . getSchema ( out . getKind ( ) ) ) <nl> out . add ( llvm : : UndefValue : : get ( schema . getScalarType ( ) ) ) ; <nl> mmm a / test / IRGen / bitcast_different_size . sil <nl> ppp b / test / IRGen / bitcast_different_size . sil <nl> <nl> - / / RUN : % swift - target x86_64 - apple - macosx10 . 9 - emit - ir % s - verify <nl> + / / RUN : % swift - target x86_64 - apple - macosx10 . 9 - emit - ir % s - verify | FileCheck % s <nl> <nl> sil_stage canonical <nl> <nl> import Swift <nl> <nl> - sil @ bitcast_different_size : $ @ thin ( Int32 ) - > Int64 { <nl> + / / CHECK - LABEL : define i64 @ bitcast_different_size1 <nl> + <nl> + sil @ bitcast_different_size1 : $ @ thin ( Int32 ) - > Int64 { <nl> entry ( % i : $ Int32 ) : <nl> - % o = unchecked_trivial_bit_cast % i : $ Int32 to $ Int64 / / expected - error { { bitcast between types of different size } } <nl> - % p = unchecked_ref_bit_cast % i : $ Int32 to $ Int64 / / expected - error { { bitcast between types of different size } } <nl> + / / CHECK : ret { { . * } } undef <nl> + / / CHECK : call { { . * } } trap <nl> + % o = unchecked_trivial_bit_cast % i : $ Int32 to $ Int64 <nl> return % o : $ Int64 <nl> } <nl> + <nl> + / / CHECK - LABEL : define i64 @ bitcast_different_size2 <nl> + <nl> + sil @ bitcast_different_size2 : $ @ thin ( Int32 ) - > Int64 { <nl> + entry ( % i : $ Int32 ) : <nl> + / / CHECK : ret { { . * } } undef <nl> + / / CHECK : call { { . * } } trap <nl> + % p = unchecked_ref_bit_cast % i : $ Int32 to $ Int64 <nl> + return % p : $ Int64 <nl> + } <nl> new file mode 100644 <nl> index 000000000000 . . 7f4a2842887f <nl> mmm / dev / null <nl> ppp b / test / IRGen / bitcast_specialization . swift <nl> <nl> + / / RUN : % swift - target x86_64 - apple - macosx10 . 9 - O % s <nl> + <nl> + / / This is a compile - only test . It checks that the compiler does not crash for <nl> + / / a ( not executed ) bitcast with different sizes . This appears in the <nl> + / / specialized version fo myDictionaryBridge . <nl> + / / < rdar : / / problem / 17821040 > <nl> + <nl> + / / A miminized version of _dictionaryBridgeToObjectiveC in the stdlib <nl> + public func myDictionaryBridge < <nl> + SrcType , DestType <nl> + > ( <nl> + source : Dictionary < SrcType , Int > , keyBridgesDirectly : Bool <nl> + ) - > DestType ? { <nl> + <nl> + for ( key , value ) in source { <nl> + if keyBridgesDirectly { <nl> + var bridgedKey = unsafeBitCast ( key , DestType . self ) <nl> + return bridgedKey <nl> + } <nl> + } <nl> + return nil <nl> + } <nl> + <nl> + var dict1 = Dictionary < String , Int > ( ) <nl> + <nl> + var res : Int ? = myDictionaryBridge ( dict1 , false ) <nl> + <nl>
Generate a runtime fail for bitcasts with different sizes instead of a failed assert .
apple/swift
a124e3239db1f37dc35567f35beca775d728dc14
2014-07-30T09:23:32Z
mmm a / src / core / ext / filters / client_channel / client_channel_channelz . cc <nl> ppp b / src / core / ext / filters / client_channel / client_channel_channelz . cc <nl> RefCountedPtr < ChannelNode > ClientChannelNode : : MakeClientChannelNode ( <nl> <nl> ClientChannelSubchannelNode : : ClientChannelSubchannelNode ( <nl> size_t channel_tracer_max_nodes , grpc_subchannel * subchannel ) <nl> - : SubchannelNode ( channel_tracer_max_nodes ) , subchannel_ ( subchannel ) { <nl> - target_ = <nl> - UniquePtr < char > ( gpr_strdup ( grpc_subchannel_get_target ( subchannel_ ) ) ) ; <nl> - } <nl> + : SubchannelNode ( channel_tracer_max_nodes ) , <nl> + subchannel_ ( subchannel ) , <nl> + target_ ( UniquePtr < char > ( <nl> + gpr_strdup ( grpc_subchannel_get_target ( subchannel_ ) ) ) ) { } <nl> <nl> void ClientChannelSubchannelNode : : PopulateTarget ( grpc_json * json ) { <nl> GPR_ASSERT ( target_ . get ( ) ! = nullptr ) ; <nl> mmm a / src / core / lib / channel / channelz . h <nl> ppp b / src / core / lib / channel / channelz . h <nl> class ChannelNodePeer ; <nl> } <nl> <nl> / / base class for all channelz entities <nl> - class ChannelzBaseNode : public RefCounted < ChannelzBaseNode > { <nl> + class BaseNode : public RefCounted < BaseNode > { <nl> public : <nl> - ChannelzBaseNode ( ) { } <nl> - virtual ~ ChannelzBaseNode ( ) { } <nl> + BaseNode ( ) { } <nl> + virtual ~ BaseNode ( ) { } <nl> + <nl> private : <nl> GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE <nl> GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_NEW <nl> class ChannelzBaseNode : public RefCounted < ChannelzBaseNode > { <nl> <nl> / / Handles channelz bookkeeping for sockets <nl> / / TODO ( ncteisen ) : implement in subsequent PR . <nl> - class SocketNode : public ChannelzBaseNode { <nl> + class SocketNode : public BaseNode { <nl> public : <nl> - SocketNode ( ) : ChannelzBaseNode ( ) { } <nl> + SocketNode ( ) : BaseNode ( ) { } <nl> ~ SocketNode ( ) override { } <nl> + <nl> private : <nl> GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE <nl> GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_NEW <nl> class SocketNode : public ChannelzBaseNode { <nl> / / - perform common rendering . <nl> / / <nl> / / This class also defines some fat interfaces so that its children can <nl> - / / implement the functionality different . For example , querying the <nl> - / / connectivity state looks different for channels and subchannels , and does <nl> - / / not make sense for servers . So servers will not override , and channels and <nl> - / / subchannels will override with their own way to query connectivity state . <nl> - class CallCountingBase : public ChannelzBaseNode { <nl> + / / implement the functionality differently . For example , querying the <nl> + / / connectivity state looks different for channels than for subchannels , and <nl> + / / does not make sense for servers . So servers will not override , and channels <nl> + / / and subchannels will override with their own way to query connectivity state . <nl> + class CallCountingBase : public BaseNode { <nl> public : <nl> CallCountingBase ( size_t channel_tracer_max_nodes ) ; <nl> ~ CallCountingBase ( ) override ; <nl> class ServerNode : public CallCountingBase { <nl> ServerNode ( size_t channel_tracer_max_nodes ) <nl> : CallCountingBase ( channel_tracer_max_nodes ) { } <nl> ~ ServerNode ( ) override { } <nl> + <nl> private : <nl> GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE <nl> GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_NEW <nl> class SubchannelNode : public CallCountingBase { <nl> private : <nl> GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE <nl> GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_NEW <nl> - <nl> + <nl> intptr_t subchannel_uuid_ ; <nl> } ; <nl> <nl>
reviewer feedback
grpc/grpc
ca32a8a85286ae0c9c94c3eaeaee3100d0304f99
2018-07-19T23:59:50Z
similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . Assert . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . Assert . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . QueueBase . from_list . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . QueueBase . from_list . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . TFRecordReader . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . TFRecordReader . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . add_check_numerics_ops . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . add_check_numerics_ops . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . all_variables . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . all_variables . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . argmax . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . argmax . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . argmin . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . argmin . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . assert_type . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . assert_type . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . batch_ifft . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . batch_ifft . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . concat . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . concat . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . contrib . distributions . Chi2 . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . contrib . distributions . Chi2 . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . contrib . distributions . MultivariateNormal . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . contrib . distributions . MultivariateNormal . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . contrib . ffmpeg . encode_audio . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . contrib . ffmpeg . encode_audio . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . contrib . layers . summarize_activation . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . contrib . layers . summarize_activation . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . contrib . layers . variance_scaling_initializer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . contrib . layers . variance_scaling_initializer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . learn . NanLossDuringTrainingError . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . contrib . learn . NanLossDuringTrainingError . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . metrics . set_difference . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . contrib . metrics . set_difference . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . metrics . streaming_accuracy . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . contrib . metrics . streaming_accuracy . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . contrib . util . constant_value . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . contrib . util . constant_value . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . contrib . util . ops_used_by_graph_def . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . contrib . util . ops_used_by_graph_def . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . cos . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . cos . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . count_up_to . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . count_up_to . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . device . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . device . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . erfc . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . erfc . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . errors . UnknownError . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . errors . UnknownError . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . get_default_session . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . get_default_session . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . gradients . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . gradients . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . group . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . group . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . identity . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . identity . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . ifft3d . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . ifft3d . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . image . adjust_saturation . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . image . adjust_saturation . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . image . pad_to_bounding_box . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . image . pad_to_bounding_box . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . image . random_saturation . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . image . random_saturation . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . import_graph_def . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . import_graph_def . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . inv . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . inv . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . logical_and . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . logical_and . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . merge_all_summaries . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . merge_all_summaries . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . name_scope . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . name_scope . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . nn . avg_pool . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . nn . avg_pool . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . nn . conv3d . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . nn . conv3d . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . nn . depthwise_conv2d_native . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . nn . depthwise_conv2d_native . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . nn . elu . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . nn . elu . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . nn . max_pool . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . nn . max_pool . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . op_scope . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . op_scope . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . pad . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . pad . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . reduce_join . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . reduce_join . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . reduce_sum . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . reduce_sum . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . scatter_update . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . scatter_update . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . select . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . select . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . sparse_concat . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . sparse_concat . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . sparse_reset_shape . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . sparse_reset_shape . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . sparse_segment_sum . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . sparse_segment_sum . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . test . get_temp_dir . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . test . get_temp_dir . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . test . main . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . test . main . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . tile . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . tile . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . to_float . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . to_float . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . train . LooperThread . loop . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . train . LooperThread . loop . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . train . Optimizer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . train . Optimizer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . train . export_meta_graph . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . train . export_meta_graph . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . uniform_unit_scaling_initializer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . uniform_unit_scaling_initializer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . variable_op_scope . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . variable_op_scope . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . where . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . where . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . while_loop . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . while_loop . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . DType . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . DType . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . Graph . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . Graph . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . as_dtype . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . as_dtype . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . check_numerics . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . check_numerics . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . clip_by_norm . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . clip_by_norm . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . distributions . DirichletMultinomial . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . distributions . DirichletMultinomial . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . contrib . layers . l2_regularizer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . layers . l2_regularizer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . contrib . learn . TensorFlowRegressor . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . learn . TensorFlowRegressor . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . learn . extract_dask_labels . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . learn . extract_dask_labels . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . contrib . util . stripped_op_list_for_graph . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . util . stripped_op_list_for_graph . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . cross . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . cross . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . expand_dims . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . expand_dims . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . fft . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . fft . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . gather_nd . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . gather_nd . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . greater . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . greater . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . is_finite . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . is_finite . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . is_strictly_increasing . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . is_strictly_increasing . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . lgamma . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . lgamma . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . load_op_library . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . load_op_library . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . log . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . log . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . matrix_solve . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . matrix_solve . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . minimum . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . minimum . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . mod . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . mod . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . nn . conv2d . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . nn . conv2d . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . nn . softplus . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . nn . softplus . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . nn . uniform_candidate_sampler . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . nn . uniform_candidate_sampler . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . nn . weighted_cross_entropy_with_logits . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . nn . weighted_cross_entropy_with_logits . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . one_hot . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . one_hot . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . pack . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . pack . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . rsqrt . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . rsqrt . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . segment_min . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . segment_min . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . shape_n . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . shape_n . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . slice . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . slice . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . squeeze . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . squeeze . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . sub . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . sub . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . test . is_built_with_cuda . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . test . is_built_with_cuda . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . train . SessionManager . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . train . SessionManager . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . train . get_checkpoint_state . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . train . get_checkpoint_state . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . train . input_producer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . train . input_producer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . train . shuffle_batch_join . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . train . shuffle_batch_join . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . train . start_queue_runners . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . train . start_queue_runners . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . train . update_checkpoint_state . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . train . update_checkpoint_state . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . unique . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . unique . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . verify_tensor_all_finite . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . verify_tensor_all_finite . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . zeros_like . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . zeros_like . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . FixedLenFeature . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . FixedLenFeature . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . Variable . from_proto . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . Variable . from_proto . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . batch_matrix_diag_part . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . batch_matrix_diag_part . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . case . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . case . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . cholesky_solve . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . cholesky_solve . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . contrib . copy_graph . copy_op_to_graph . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . copy_graph . copy_op_to_graph . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . contrib . copy_graph . get_copied_op . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . copy_graph . get_copied_op . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . contrib . distributions . normal_conjugates_known_sigma_posterior . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . distributions . normal_conjugates_known_sigma_posterior . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . contrib . ffmpeg . decode_audio . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . ffmpeg . decode_audio . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . contrib . layers . xavier_initializer_conv2d . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . layers . xavier_initializer_conv2d . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . learn . TensorFlowLinearRegressor . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . learn . TensorFlowLinearRegressor . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . contrib . learn . extract_pandas_data . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . learn . extract_pandas_data . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . contrib . learn . infer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . learn . infer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . metrics . auc_using_histogram . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . metrics . auc_using_histogram . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . contrib . metrics . set_intersection . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . metrics . set_intersection . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . metrics . set_size . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . metrics . set_size . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . metrics . streaming_mean_cosine_distance . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . metrics . streaming_mean_cosine_distance . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . contrib . metrics . streaming_percentage_less . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . metrics . streaming_percentage_less . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . contrib . util . make_tensor_proto . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . util . make_tensor_proto . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . div . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . div . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . errors . DataLossError . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . errors . DataLossError . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . errors . NotFoundError . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . errors . NotFoundError . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . foldl . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . foldl . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . igammac . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . igammac . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . image . resize_area . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . image . resize_area . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . image . resize_nearest_neighbor . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . image . resize_nearest_neighbor . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . is_nan . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . is_nan . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . matrix_solve_ls . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . matrix_solve_ls . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . matrix_triangular_solve . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . matrix_triangular_solve . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . merge_summary . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . merge_summary . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . moving_average_variables . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . moving_average_variables . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . nn . batch_normalization . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . nn . batch_normalization . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . nn . l2_loss . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . nn . l2_loss . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . ones_initializer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . ones_initializer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . random_normal . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . random_normal . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . read_file . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . read_file . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . real . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . real . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . reduce_all . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . reduce_all . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . scatter_sub . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . scatter_sub . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . sparse_fill_empty_rows . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . sparse_fill_empty_rows . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . to_double . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . to_double . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . trace . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . trace . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . train . AdadeltaOptimizer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . train . AdadeltaOptimizer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . train . limit_epochs . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . train . limit_epochs . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . truediv . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . truediv . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . truncated_normal . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . truncated_normal . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . unsorted_segment_sum . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . unsorted_segment_sum . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . IdentityReader . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . IdentityReader . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . IndexedSlices . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . IndexedSlices . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . NoGradient . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . NoGradient . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . RandomShuffleQueue . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . RandomShuffleQueue . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . add_n . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . add_n . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . assert_equal . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . assert_equal . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . assert_integer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . assert_integer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . assert_less . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . assert_less . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . assert_positive . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . assert_positive . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . batch_cholesky . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . batch_cholesky . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . batch_matrix_solve_ls . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . batch_matrix_solve_ls . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . batch_to_space . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . batch_to_space . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . complex . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . complex . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . contrib . distributions . Uniform . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . contrib . distributions . Uniform . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . layers . convolution2d . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . contrib . layers . convolution2d . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . layers . fully_connected . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . contrib . layers . fully_connected . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . contrib . layers . summarize_tensor . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . contrib . layers . summarize_tensor . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . contrib . layers . summarize_tensors . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . contrib . layers . summarize_tensors . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . contrib . learn . TensorFlowEstimator . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . contrib . learn . TensorFlowEstimator . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . contrib . learn . read_batch_features . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . contrib . learn . read_batch_features . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . contrib . metrics . set_union . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . contrib . metrics . set_union . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . contrib . metrics . streaming_sparse_recall_at_k . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . contrib . metrics . streaming_sparse_recall_at_k . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . control_dependencies . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . control_dependencies . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . decode_csv . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . decode_csv . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . decode_raw . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . decode_raw . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . dynamic_partition . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . dynamic_partition . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . dynamic_stitch . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . dynamic_stitch . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . errors . OutOfRangeError . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . errors . OutOfRangeError . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . image . flip_up_down . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . image . flip_up_down . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . image . per_image_whitening . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . image . per_image_whitening . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . image . transpose_image . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . image . transpose_image . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . is_non_decreasing . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . is_non_decreasing . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . is_variable_initialized . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . is_variable_initialized . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . lbeta . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . lbeta . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . less . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . less . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . listdiff . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . listdiff . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . mul . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . mul . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . neg . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . neg . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . nn . compute_accidental_hits . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . nn . compute_accidental_hits . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . nn . embedding_lookup_sparse . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . nn . embedding_lookup_sparse . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . nn . nce_loss . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . nn . nce_loss . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . nn . normalize_moments . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . nn . normalize_moments . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . nn . separable_conv2d . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . nn . separable_conv2d . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . nn . top_k . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . nn . top_k . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . random_shuffle . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . random_shuffle . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . segment_max . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . segment_max . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . sparse_segment_sqrt_n_grad . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . sparse_segment_sqrt_n_grad . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . sparse_to_dense . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . sparse_to_dense . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . square . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . square . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . tanh . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . tanh . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . train . AdamOptimizer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . train . AdamOptimizer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . train . batch . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . train . batch . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . train . shuffle_batch . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . train . shuffle_batch . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . train . summary_iterator . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . train . summary_iterator . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . truncated_normal_initializer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . truncated_normal_initializer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . variable_scope . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . variable_scope . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . FIFOQueue . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . FIFOQueue . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . InteractiveSession . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . InteractiveSession . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . Variable . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . Variable . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . batch_fft2d . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . batch_fft2d . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . batch_ifft2d . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . batch_ifft2d . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . batch_matrix_band_part . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . batch_matrix_band_part . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . batch_matrix_solve . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . batch_matrix_solve . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . cholesky . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . cholesky . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . conj . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . conj . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . constant . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . constant . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . contrib . distributions . Gamma . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . contrib . distributions . Gamma . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . contrib . learn . Estimator . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . contrib . learn . Estimator . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . contrib . learn . TensorFlowDNNClassifier . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . contrib . learn . TensorFlowDNNClassifier . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . contrib . learn . TensorFlowLinearClassifier . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . contrib . learn . TensorFlowLinearClassifier . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . learn . extract_dask_data . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . contrib . learn . extract_dask_data . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . learn . run_n . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . contrib . learn . run_n . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . contrib . learn . train . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . contrib . learn . train . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . depth_to_space . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . depth_to_space . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . erf . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . erf . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . errors . DeadlineExceededError . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . errors . DeadlineExceededError . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . errors . FailedPreconditionError . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . errors . FailedPreconditionError . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . errors . InternalError . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . errors . InternalError . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . exp . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . exp . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . foldr . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . foldr . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . get_session_handle . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . get_session_handle . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . greater_equal . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . greater_equal . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . image . adjust_contrast . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . image . adjust_contrast . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . image . convert_image_dtype . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . image . convert_image_dtype . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . image . flip_left_right . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . image . flip_left_right . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . image . random_contrast . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . image . random_contrast . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . image . resize_image_with_crop_or_pad . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . image . resize_image_with_crop_or_pad . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . image . rgb_to_grayscale . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . image . rgb_to_grayscale . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . initialize_all_variables . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . initialize_all_variables . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . initialize_variables . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . initialize_variables . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . logical_xor . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . logical_xor . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . matrix_inverse . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . matrix_inverse . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . multinomial . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . multinomial . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . nn . bias_add . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . nn . bias_add . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . nn . max_pool_with_argmax . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . nn . max_pool_with_argmax . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . nn . sampled_softmax_loss . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . nn . sampled_softmax_loss . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . nn . softmax . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . nn . softmax . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . no_op . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . no_op . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . no_regularizer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . no_regularizer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . python_io . tf_record_iterator . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . python_io . tf_record_iterator . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . rank . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . rank . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . report_uninitialized_variables . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . report_uninitialized_variables . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . segment_sum . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . segment_sum . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . string_to_hash_bucket_fast . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . string_to_hash_bucket_fast . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . test . compute_gradient_error . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . test . compute_gradient_error . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . to_bfloat16 . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . to_bfloat16 . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . train . Saver . from_proto . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . train . Saver . from_proto . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . train . SummaryWriter . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . train . SummaryWriter . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . train . generate_checkpoint_state_proto . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . train . generate_checkpoint_state_proto . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . train . match_filenames_once . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . train . match_filenames_once . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . train . slice_input_producer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . train . slice_input_producer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . zeros . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . zeros . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . zeta . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . zeta . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . DeviceSpec . from_string . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . DeviceSpec . from_string . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . FixedLenSequenceFeature . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . FixedLenSequenceFeature . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . RegisterGradient . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . RegisterGradient . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . Tensor . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . Tensor . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . VarLenFeature . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . VarLenFeature . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . VariableScope . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . VariableScope . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . add_to_collection . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . add_to_collection . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . assert_non_negative . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . assert_non_negative . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . assert_non_positive . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . assert_non_positive . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . assert_rank_at_least . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . assert_rank_at_least . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . batch_cholesky_solve . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . batch_cholesky_solve . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . batch_fft3d . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . batch_fft3d . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . batch_ifft3d . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . batch_ifft3d . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . batch_matmul . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . batch_matmul . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . batch_matrix_diag . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . batch_matrix_diag . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . ceil . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . ceil . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . contrib . distributions . ContinuousDistribution . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . contrib . distributions . ContinuousDistribution . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . layers . sum_regularizer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . contrib . layers . sum_regularizer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . contrib . learn . RunConfig . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . contrib . learn . RunConfig . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . contrib . learn . TensorFlowClassifier . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . contrib . learn . TensorFlowClassifier . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . contrib . learn . read_batch_examples . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . contrib . learn . read_batch_examples . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . contrib . learn . run_feeds . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . contrib . learn . run_feeds . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . contrib . metrics . streaming_mean_relative_error . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . contrib . metrics . streaming_mean_relative_error . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . delete_session_tensor . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . delete_session_tensor . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . digamma . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . digamma . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . errors . ResourceExhaustedError . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . errors . ResourceExhaustedError . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . floor . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . floor . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . gather . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . gather . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . get_variable . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . get_variable . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . global_norm . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . global_norm . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . histogram_fixed_width . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . histogram_fixed_width . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . ifft . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . ifft . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . image . adjust_brightness . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . image . adjust_brightness . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . image . draw_bounding_boxes . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . image . draw_bounding_boxes . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . image . random_flip_left_right . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . image . random_flip_left_right . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . image . resize_images . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . image . resize_images . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . image_summary . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . image_summary . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . is_numeric_tensor . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . is_numeric_tensor . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . logical_or . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . logical_or . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . map_fn . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . map_fn . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . matching_files . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . matching_files . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . nn . dropout . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . nn . dropout . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . nn . fixed_unigram_candidate_sampler . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . nn . fixed_unigram_candidate_sampler . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . nn . softmax_cross_entropy_with_logits . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . nn . softmax_cross_entropy_with_logits . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . pow . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . pow . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . random_normal_initializer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . random_normal_initializer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . random_uniform_initializer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . random_uniform_initializer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . reset_default_graph . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . reset_default_graph . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . reshape . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . reshape . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . round . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . round . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . scan . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . scan . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . scatter_add . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . scatter_add . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . sin . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . sin . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . space_to_batch . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . space_to_batch . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . sparse_merge . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . sparse_merge . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . sparse_retain . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . sparse_retain . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . sparse_segment_sqrt_n . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . sparse_segment_sqrt_n . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . sparse_tensor_dense_matmul . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . sparse_tensor_dense_matmul . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . squared_difference . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . squared_difference . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . string_to_number . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . string_to_number . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . test . compute_gradient . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . test . compute_gradient . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . train . Server . create_local_server . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . train . Server . create_local_server . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . train . Server . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . train . Server . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . train . batch_join . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . train . batch_join . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . train . latest_checkpoint . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . train . latest_checkpoint . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . train . write_graph . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . train . write_graph . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . trainable_variables . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . trainable_variables . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . unique_with_counts . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . unique_with_counts . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . AggregationMethod . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . AggregationMethod . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . OpError . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . OpError . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . Print . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . Print . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . ReaderBase . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . ReaderBase . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . TextLineReader . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . TextLineReader . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . add . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . add . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . assert_proper_iterable . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . assert_proper_iterable . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . assert_rank . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . assert_rank . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . audio_summary . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . audio_summary . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . batch_matrix_determinant . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . batch_matrix_determinant . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . bytes . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . bytes . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . clip_by_global_norm . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . clip_by_global_norm . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . cond . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . cond . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . layers . l1_regularizer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . contrib . layers . l1_regularizer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . contrib . layers . optimize_loss . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . contrib . layers . optimize_loss . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . contrib . layers . summarize_activations . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . contrib . layers . summarize_activations . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . contrib . layers . xavier_initializer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . contrib . layers . xavier_initializer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . learn . ModeKeys . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . contrib . learn . ModeKeys . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . learn . TensorFlowDNNRegressor . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . contrib . learn . TensorFlowDNNRegressor . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . contrib . learn . TensorFlowRNNClassifier . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . contrib . learn . TensorFlowRNNClassifier . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . learn . evaluate . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . contrib . learn . evaluate . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . contrib . metrics . confusion_matrix . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . contrib . metrics . confusion_matrix . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . contrib . metrics . streaming_mean_absolute_error . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . contrib . metrics . streaming_mean_absolute_error . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . contrib . metrics . streaming_recall . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . contrib . metrics . streaming_recall . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . convert_to_tensor_or_indexed_slices . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . convert_to_tensor_or_indexed_slices . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . errors . AlreadyExistsError . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . errors . AlreadyExistsError . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . errors . UnavailableError . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . errors . UnavailableError . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . fft3d . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . fft3d . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . get_default_graph . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . get_default_graph . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . get_variable_scope . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . get_variable_scope . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . igamma . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . igamma . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . image . random_flip_up_down . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . image . random_flip_up_down . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . image . rgb_to_hsv . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . image . rgb_to_hsv . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . initialize_local_variables . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . initialize_local_variables . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . is_inf . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . is_inf . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . less_equal . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . less_equal . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . make_template . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . make_template . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . nn . in_top_k . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . nn . in_top_k . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . nn . learned_unigram_candidate_sampler . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . nn . learned_unigram_candidate_sampler . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . nn . local_response_normalization . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . nn . local_response_normalization . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . nn . log_uniform_candidate_sampler . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . nn . log_uniform_candidate_sampler . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . nn . relu6 . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . nn . relu6 . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . nn . softsign . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . nn . softsign . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . nn . sufficient_statistics . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . nn . sufficient_statistics . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . ones_like . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . ones_like . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . placeholder . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . placeholder . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . placeholder_with_default . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . placeholder_with_default . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . shape . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . shape . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . sparse_reorder . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . sparse_reorder . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . sqrt . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . sqrt . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . to_int32 . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . to_int32 . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . train . RMSPropOptimizer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . train . RMSPropOptimizer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . train . Saver . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . train . Saver . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . train . Supervisor . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . train . Supervisor . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . train . add_queue_runner . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . train . add_queue_runner . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . transpose . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . transpose . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . DeviceSpec . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . DeviceSpec . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . Dimension . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . Dimension . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . QueueBase . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . QueueBase . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . SparseTensor . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . SparseTensor . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . TensorShape . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . TensorShape . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . WholeFileReader . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . WholeFileReader . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . abs . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . abs . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . accumulate_n . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . accumulate_n . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . batch_matrix_triangular_solve . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . batch_matrix_triangular_solve . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . batch_self_adjoint_eig . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . batch_self_adjoint_eig . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . bitcast . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . bitcast . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . boolean_mask . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . boolean_mask . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . clip_by_average_norm . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . clip_by_average_norm . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . constant_initializer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . constant_initializer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . copy_graph . copy_variable_to_graph . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . contrib . copy_graph . copy_variable_to_graph . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . contrib . distributions . DiscreteDistribution . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . contrib . distributions . DiscreteDistribution . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . contrib . distributions . Normal . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . contrib . distributions . Normal . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . contrib . distributions . StudentT . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . contrib . distributions . StudentT . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . contrib . learn . BaseEstimator . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . contrib . learn . BaseEstimator . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . contrib . metrics . streaming_auc . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . contrib . metrics . streaming_auc . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . contrib . metrics . streaming_mean_squared_error . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . contrib . metrics . streaming_mean_squared_error . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . contrib . metrics . streaming_root_mean_squared_error . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . contrib . metrics . streaming_root_mean_squared_error . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . edit_distance . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . edit_distance . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . equal . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . equal . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . errors . AbortedError . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . errors . AbortedError . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . errors . PermissionDeniedError . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . errors . PermissionDeniedError . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . floordiv . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . floordiv . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . ifft2d . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . ifft2d . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . image . central_crop . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . image . central_crop . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . image . decode_jpeg . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . image . decode_jpeg . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . image . decode_png . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . image . decode_png . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . image . random_brightness . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . image . random_brightness . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . image . resize_bicubic . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . image . resize_bicubic . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . linspace . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . linspace . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . maximum . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . maximum . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . nn . atrous_conv2d . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . nn . atrous_conv2d . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . nn . conv2d_transpose . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . nn . conv2d_transpose . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . nn . embedding_lookup . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . nn . embedding_lookup . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . nn . max_pool3d . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . nn . max_pool3d . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . nn . moments . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . nn . moments . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . nn . sigmoid_cross_entropy_with_logits . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . nn . sigmoid_cross_entropy_with_logits . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . nn . sparse_softmax_cross_entropy_with_logits . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . nn . sparse_softmax_cross_entropy_with_logits . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . not_equal . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . not_equal . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . parse_example . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . parse_example . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . python_io . TFRecordWriter . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . python_io . TFRecordWriter . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . random_crop . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . random_crop . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . reduce_mean . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . reduce_mean . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . scalar_mul . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . scalar_mul . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . scalar_summary . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . scalar_summary . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . set_random_seed . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . set_random_seed . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . sigmoid . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . sigmoid . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . size . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . size . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . sparse_split . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . sparse_split . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . sparse_to_indicator . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . sparse_to_indicator . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . split . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . split . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . test . assert_equal_graph_def . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . test . assert_equal_graph_def . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . train . AdagradOptimizer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . train . AdagradOptimizer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . train . GradientDescentOptimizer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . train . GradientDescentOptimizer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . train . global_step . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . train . global_step . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . train . import_meta_graph . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . train . import_meta_graph . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . train . range_input_producer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . train . range_input_producer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . Operation . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . Operation . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . assert_less_equal . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . assert_less_equal . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . batch_fft . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . batch_fft . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . complex_abs . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . complex_abs . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . contrib . distributions . Exponential . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . contrib . distributions . Exponential . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . contrib . distributions . normal_congugates_known_sigma_predictive . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . contrib . distributions . normal_congugates_known_sigma_predictive . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . layers . summarize_collection . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . contrib . layers . summarize_collection . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . learn . TensorFlowRNNRegressor . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . contrib . learn . TensorFlowRNNRegressor . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . contrib . learn . read_batch_record_features . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . contrib . learn . read_batch_record_features . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . metrics . streaming_mean . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . contrib . metrics . streaming_mean . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . contrib . metrics . streaming_precision . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . contrib . metrics . streaming_precision . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . contrib . util . make_ndarray . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . contrib . util . make_ndarray . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . convert_to_tensor . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . convert_to_tensor . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . diag_part . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . diag_part . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . errors . InvalidArgumentError . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . errors . InvalidArgumentError . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . errors . UnauthenticatedError . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . errors . UnauthenticatedError . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . get_seed . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . get_seed . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . histogram_summary . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . histogram_summary . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . imag . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . imag . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . image . crop_to_bounding_box . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . image . crop_to_bounding_box . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . image . grayscale_to_rgb . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . image . grayscale_to_rgb . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . image . hsv_to_rgb . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . image . hsv_to_rgb . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . image . sample_distorted_bounding_box . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . image . sample_distorted_bounding_box . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . local_variables . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . local_variables . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . logical_not . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . logical_not . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . matmul . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . matmul . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . polygamma . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . polygamma . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . random_uniform . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . random_uniform . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . range . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . range . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . register_tensor_conversion_function . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . register_tensor_conversion_function . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . reverse_sequence . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . reverse_sequence . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . saturate_cast . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . saturate_cast . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . self_adjoint_eig . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . self_adjoint_eig . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . space_to_depth . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . space_to_depth . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . sparse_add . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . sparse_add . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . sparse_mask . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . sparse_mask . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . sparse_placeholder . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . sparse_placeholder . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . sparse_tensor_to_dense . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . sparse_tensor_to_dense . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . stop_gradient . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . stop_gradient . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . string_to_hash_bucket . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . string_to_hash_bucket . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . train . ClusterSpec . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . train . ClusterSpec . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . train . ExponentialMovingAverage . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . train . ExponentialMovingAverage . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . train . FtrlOptimizer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . train . FtrlOptimizer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . train . MomentumOptimizer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . train . MomentumOptimizer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard5 / tf . train . QueueRunner . from_proto . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . train . QueueRunner . from_proto . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . train . QueueRunner . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . train . QueueRunner . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard7 / tf . train . replica_device_setter . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . train . replica_device_setter . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard1 / tf . train . string_input_producer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . train . string_input_producer . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . unpack . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . unpack . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard3 / tf . variable_axis_size_partitioner . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . variable_axis_size_partitioner . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . FixedLengthRecordReader . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . FixedLengthRecordReader . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . GraphKeys . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . GraphKeys . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . RegisterShape . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . RegisterShape . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . Session . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . Session . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . SparseTensorValue . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . SparseTensorValue . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . assert_negative . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . assert_negative . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . assert_variables_initialized . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . assert_variables_initialized . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . batch_matrix_inverse . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . batch_matrix_inverse . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . cast . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . cast . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . clip_by_value . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . clip_by_value . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . contrib . distributions . BaseDistribution . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . contrib . distributions . BaseDistribution . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . contrib . layers . apply_regularization . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . contrib . layers . apply_regularization . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . learn . extract_pandas_labels . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . contrib . learn . extract_pandas_labels . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . contrib . learn . extract_pandas_matrix . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . contrib . learn . extract_pandas_matrix . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . contrib . metrics . accuracy . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . contrib . metrics . accuracy . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . contrib . metrics . streaming_recall_at_k . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . contrib . metrics . streaming_recall_at_k . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . contrib . metrics . streaming_sparse_precision_at_k . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . contrib . metrics . streaming_sparse_precision_at_k . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . decode_json_example . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . decode_json_example . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . diag . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . diag . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . errors . CancelledError . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . errors . CancelledError . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . errors . UnimplementedError . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . errors . UnimplementedError . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . fft2d . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . fft2d . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . fill . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . fill . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . get_collection . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . get_collection . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . get_collection_ref . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . get_collection_ref . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . get_session_tensor . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . get_session_tensor . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . image . adjust_hue . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . image . adjust_hue . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . image . encode_jpeg . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . image . encode_jpeg . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . image . encode_png . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . image . encode_png . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . image . extract_glimpse . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . image . extract_glimpse . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . image . random_hue . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . image . random_hue . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . image . resize_bilinear . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . image . resize_bilinear . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . invert_permutation . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . invert_permutation . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . load_file_system_library . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . load_file_system_library . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . matrix_determinant . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . matrix_determinant . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . nn . avg_pool3d . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . nn . avg_pool3d . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . nn . depthwise_conv2d . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . nn . depthwise_conv2d . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . nn . l2_normalize . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . nn . l2_normalize . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . nn . log_softmax . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . nn . log_softmax . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . nn . relu . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . nn . relu . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . nn . zero_fraction . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . nn . zero_fraction . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . ones . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . ones . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . parse_single_example . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . parse_single_example . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . py_func . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . py_func . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . reduce_any . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . reduce_any . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . reduce_max . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . reduce_max . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . reduce_min . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . reduce_min . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . reduce_prod . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . reduce_prod . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . reverse . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . reverse . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . segment_mean . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . segment_mean . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . segment_prod . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . segment_prod . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard6 / tf . sign . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . sign . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . sparse_segment_mean . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . sparse_segment_mean . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . sparse_softmax . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . sparse_softmax . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard8 / tf . string_to_hash_bucket_strong . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . string_to_hash_bucket_strong . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . to_int64 . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . to_int64 . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . train . Coordinator . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . train . Coordinator . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . train . LooperThread . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . train . LooperThread . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard2 / tf . train . exponential_decay . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . train . exponential_decay . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . tuple . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . tuple . md <nl> similarity index 100 % <nl> rename from tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . zeros_initializer . md <nl> rename to tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . zeros_initializer . md <nl>
Update generated Python Op docs .
tensorflow/tensorflow
0bb913604ab8c8465a0ef7c203d699b36e7d157e
2016-05-26T15:02:00Z
mmm a / src / core / lib / iomgr / closure . h <nl> ppp b / src / core / lib / iomgr / closure . h <nl> inline void grpc_closure_run ( grpc_closure * c , grpc_error * error ) { <nl> # define GRPC_CLOSURE_RUN ( closure , error ) grpc_closure_run ( closure , error ) <nl> # endif <nl> <nl> - # ifndef NDEBUG <nl> - inline void grpc_closure_sched ( const char * file , int line , grpc_closure * c , <nl> - grpc_error * error ) { <nl> - # else <nl> - inline void grpc_closure_sched ( grpc_closure * c , grpc_error * error ) { <nl> - # endif <nl> - GPR_TIMER_SCOPE ( " grpc_closure_sched " , 0 ) ; <nl> - if ( c ! = nullptr ) { <nl> - # ifndef NDEBUG <nl> - if ( c - > scheduled ) { <nl> - gpr_log ( GPR_ERROR , <nl> - " Closure already scheduled . ( closure : % p , created : [ % s : % d ] , " <nl> - " previously scheduled at : [ % s : % d ] , newly scheduled at [ % s : % d ] , " <nl> - " run ? : % s " , <nl> - c , c - > file_created , c - > line_created , c - > file_initiated , <nl> - c - > line_initiated , file , line , c - > run ? " true " : " false " ) ; <nl> - abort ( ) ; <nl> - } <nl> - c - > scheduled = true ; <nl> - c - > file_initiated = file ; <nl> - c - > line_initiated = line ; <nl> - c - > run = false ; <nl> - GPR_ASSERT ( c - > cb ! = nullptr ) ; <nl> - # endif <nl> - c - > scheduler - > vtable - > sched ( c , error ) ; <nl> - } else { <nl> - GRPC_ERROR_UNREF ( error ) ; <nl> - } <nl> - } <nl> - <nl> - / * * Schedule a closure to be run . Does not need to be run from a safe point . * / <nl> - # ifndef NDEBUG <nl> - # define GRPC_CLOSURE_SCHED ( closure , error ) \ <nl> - grpc_closure_sched ( __FILE__ , __LINE__ , closure , error ) <nl> - # else <nl> - # define GRPC_CLOSURE_SCHED ( closure , error ) grpc_closure_sched ( closure , error ) <nl> - # endif <nl> - <nl> # ifndef NDEBUG <nl> inline void grpc_closure_list_sched ( const char * file , int line , <nl> grpc_closure_list * list ) { <nl> mmm a / src / core / lib / iomgr / exec_ctx . cc <nl> ppp b / src / core / lib / iomgr / exec_ctx . cc <nl> grpc_millis ExecCtx : : Now ( ) { <nl> return now_ ; <nl> } <nl> <nl> + void ExecCtx : : Run ( const DebugLocation & location , grpc_closure * closure , <nl> + grpc_error * error ) { <nl> + # ifndef NDEBUG <nl> + if ( closure - > scheduled ) { <nl> + gpr_log ( GPR_ERROR , <nl> + " Closure already scheduled . ( closure : % p , created : [ % s : % d ] , " <nl> + " previously scheduled at : [ % s : % d ] , newly scheduled at [ % s : % d ] , " <nl> + " run ? : % s " , <nl> + closure , closure - > file_created , closure - > line_created , <nl> + closure - > file_initiated , closure - > line_initiated , location . file ( ) , <nl> + location . line ( ) , closure - > run ? " true " : " false " ) ; <nl> + abort ( ) ; <nl> + } <nl> + closure - > scheduled = true ; <nl> + closure - > file_initiated = file ; <nl> + closure - > line_initiated = line ; <nl> + closure - > run = false ; <nl> + GPR_ASSERT ( closure - > cb ! = nullptr ) ; <nl> + # endif <nl> + exec_ctx_sched ( closure , error ) ; <nl> + } <nl> + <nl> } / / namespace grpc_core <nl> mmm a / src / core / lib / iomgr / exec_ctx . h <nl> ppp b / src / core / lib / iomgr / exec_ctx . h <nl> <nl> <nl> # include " src / core / lib / gpr / time_precise . h " <nl> # include " src / core / lib / gpr / tls . h " <nl> + # include " src / core / lib / gprpp / debug_location . h " <nl> # include " src / core / lib / gprpp / fork . h " <nl> # include " src / core / lib / iomgr / closure . h " <nl> <nl> class ExecCtx { <nl> gpr_tls_set ( & exec_ctx_ , reinterpret_cast < intptr_t > ( exec_ctx ) ) ; <nl> } <nl> <nl> + static void Run ( const DebugLocation & location , grpc_closure * closure , <nl> + grpc_error * error ) ; <nl> + <nl> protected : <nl> / * * Check if ready to finish . * / <nl> virtual bool CheckReadyToFinish ( ) { return false ; } <nl>
Adding ExecCtx : : Run
grpc/grpc
336b476d32621cd9f5a896bbdbd8e31dc9a3176f
2019-10-31T23:41:40Z
mmm a / docs / docs / linear_algebra . xml <nl> ppp b / docs / docs / linear_algebra . xml <nl> <nl> < name > svd_fast < / name > <nl> < link > dlib / matrix / matrix_la_abstract . h . html # svd_fast < / link > <nl> < / item > <nl> + < item > <nl> + < name > orthogonalize < / name > <nl> + < link > dlib / matrix / matrix_la_abstract . h . html # orthogonalize < / link > <nl> + < / item > <nl> < item > <nl> < name > det < / name > <nl> < link > dlib / matrix / matrix_la_abstract . h . html # det < / link > <nl> mmm a / docs / docs / term_index . xml <nl> ppp b / docs / docs / term_index . xml <nl> <nl> < term file = " dlib / matrix / matrix_la_abstract . h . html " name = " svd2 " / > <nl> < term file = " dlib / matrix / matrix_la_abstract . h . html " name = " svd3 " / > <nl> < term file = " dlib / matrix / matrix_la_abstract . h . html " name = " svd_fast " / > <nl> + < term file = " dlib / matrix / matrix_la_abstract . h . html " name = " orthogonalize " / > <nl> < term file = " dlib / matrix / matrix_la_abstract . h . html " name = " det " / > <nl> < term file = " dlib / matrix / matrix_la_abstract . h . html " name = " trace " / > <nl> < term file = " dlib / matrix / matrix_la_abstract . h . html " name = " chol " / > <nl>
updated docs
davisking/dlib
9f9a720a234a074d6d20ccfdf07807b661b2f34b
2013-01-19T05:17:16Z
mmm a / tools / js - optimizer . js <nl> ppp b / tools / js - optimizer . js <nl> function emterpretify ( ast ) { <nl> var temp = node [ 2 ] ; <nl> node [ 2 ] = node [ 3 ] ; <nl> node [ 3 ] = temp ; <nl> - node [ 1 ] = node [ 1 ] = = = ' > = ' ? ' < ' : ' < = ' ; <nl> + node [ 1 ] = node [ 1 ] = = = ' > = ' ? ' < = ' : ' < ' ; <nl> } else throw ' ex ' + type ; <nl> } <nl> return makeBinary ( node , type , sign ) ; <nl>
fix binary bug
emscripten-core/emscripten
8bc2a6b2f378cbff5ba824b6259a98ab16fc6ca0
2014-09-22T02:09:05Z
mmm a / xbmc / addons / kodi - addon - dev - kit / include / kodi / versions . h <nl> ppp b / xbmc / addons / kodi - addon - dev - kit / include / kodi / versions . h <nl> <nl> # define ADDON_INSTANCE_VERSION_PERIPHERAL_DEPENDS " addon - instance / Peripheral . h " \ <nl> " addon - instance / PeripheralUtils . h " <nl> <nl> - # define ADDON_INSTANCE_VERSION_PVR " 6 . 5 . 2 " <nl> - # define ADDON_INSTANCE_VERSION_PVR_MIN " 6 . 5 . 1 " <nl> + # define ADDON_INSTANCE_VERSION_PVR " 7 . 0 . 0 " <nl> + # define ADDON_INSTANCE_VERSION_PVR_MIN " 7 . 0 . 0 " <nl> # define ADDON_INSTANCE_VERSION_PVR_XML_ID " kodi . binary . instance . pvr " <nl> - # define ADDON_INSTANCE_VERSION_PVR_DEPENDS " addon - instance / PVR . h " <nl> + # define ADDON_INSTANCE_VERSION_PVR_DEPENDS " c - api / addon - instance / pvr . h " \ <nl> + " c - api / addon - instance / pvr / pvr_channel_groups . h " \ <nl> + " c - api / addon - instance / pvr / pvr_channels . h " \ <nl> + " c - api / addon - instance / pvr / pvr_defines . h " \ <nl> + " c - api / addon - instance / pvr / pvr_edl . h " \ <nl> + " c - api / addon - instance / pvr / pvr_epg . h " \ <nl> + " c - api / addon - instance / pvr / pvr_general . h " \ <nl> + " c - api / addon - instance / pvr / pvr_menu_hook . h " \ <nl> + " c - api / addon - instance / pvr / pvr_recordings . h " \ <nl> + " c - api / addon - instance / pvr / pvr_stream . h " \ <nl> + " c - api / addon - instance / pvr / pvr_timers . h " \ <nl> + " addon - instance / PVR . h " \ <nl> + " addon - instance / pvr / ChannelGroups . h " \ <nl> + " addon - instance / pvr / Channels . h " \ <nl> + " addon - instance / pvr / EDL . h " \ <nl> + " addon - instance / pvr / EPG . h " \ <nl> + " addon - instance / pvr / General . h " \ <nl> + " addon - instance / pvr / MenuHook . h " \ <nl> + " addon - instance / pvr / Recordings . h " \ <nl> + " addon - instance / pvr / Stream . h " \ <nl> + " addon - instance / pvr / Timers . h " <nl> <nl> # define ADDON_INSTANCE_VERSION_SCREENSAVER " 2 . 0 . 1 " <nl> # define ADDON_INSTANCE_VERSION_SCREENSAVER_MIN " 2 . 0 . 1 " <nl>
[ addons ] [ pvr ] increase version to 7 . 0 . 0
xbmc/xbmc
f993d541442240c807827fd741fe4e5e13992609
2020-06-12T16:58:06Z
mmm a / tensorflow / contrib / bigtable / kernels / bigtable_kernels . cc <nl> ppp b / tensorflow / contrib / bigtable / kernels / bigtable_kernels . cc <nl> class ToBigtableOp : public AsyncOpKernel { <nl> grpc : : Status mutation_status ; <nl> std : : vector < : : google : : cloud : : bigtable : : FailedMutation > failures = <nl> resource - > table ( ) . BulkApply ( std : : move ( mutation ) , mutation_status ) ; <nl> + if ( ! mutation_status . ok ( ) ) { <nl> + LOG ( ERROR ) < < " Failure applying mutation : " <nl> + < < mutation_status . error_code ( ) < < " - " <nl> + < < mutation_status . error_message ( ) < < " ( " <nl> + < < mutation_status . error_details ( ) < < " ) . " ; <nl> + } <nl> if ( ! failures . empty ( ) ) { <nl> for ( const auto & failure : failures ) { <nl> LOG ( ERROR ) < < " Failure applying mutation on row ( " <nl>
[ tf . data / Bigtable ] Log mutation_status errors .
tensorflow/tensorflow
fb58f1d287e7210bebc22e0946b969c2ec0303e7
2018-07-03T23:48:00Z
mmm a / build / features . gypi <nl> ppp b / build / features . gypi <nl> <nl> # Use external files for startup data blobs : <nl> # the JS builtins sources and the start snapshot . <nl> ' v8_use_external_startup_data % ' : 0 , <nl> + <nl> + # Set to 1 to enable DCHECKs in release builds . <nl> + ' dcheck_always_on % ' : 0 , <nl> } , <nl> ' target_defaults ' : { <nl> ' conditions ' : [ <nl> <nl> [ ' v8_use_external_startup_data = = 1 ' , { <nl> ' defines ' : [ ' V8_USE_EXTERNAL_STARTUP_DATA ' , ] , <nl> } ] , <nl> + [ ' dcheck_always_on ! = 0 ' , { <nl> + ' defines ' : [ ' DEBUG ' , ] , <nl> + } ] , <nl> ] , # conditions <nl> ' configurations ' : { <nl> ' DebugBaseCommon ' : { <nl> mmm a / build / toolchain . gypi <nl> ppp b / build / toolchain . gypi <nl> <nl> } , <nl> } ] , <nl> ] , <nl> + ' defines ' : [ <nl> + ' ENABLE_SLOW_DCHECKS ' , <nl> + ] , <nl> } , # DebugBase0 <nl> # Abstract configuration for v8_optimized_debug = = 1 . <nl> ' DebugBase1 ' : { <nl> <nl> ' LinkIncremental ' : ' 2 ' , <nl> } , <nl> } , <nl> + ' defines ' : [ <nl> + ' ENABLE_SLOW_DCHECKS ' , <nl> + ] , <nl> ' conditions ' : [ <nl> [ ' OS = = " linux " or OS = = " freebsd " or OS = = " openbsd " or OS = = " netbsd " or \ <nl> OS = = " qnx " ' , { <nl> <nl> ' - fdata - sections ' , <nl> ' - ffunction - sections ' , <nl> ] , <nl> - ' defines ' : [ <nl> - ' OPTIMIZED_DEBUG ' <nl> - ] , <nl> ' conditions ' : [ <nl> # TODO ( crbug . com / 272548 ) : Avoid - O3 in NaCl <nl> [ ' nacl_target_arch = = " none " ' , { <nl> mmm a / src / api . cc <nl> ppp b / src / api . cc <nl> void SetResourceConstraints ( i : : Isolate * isolate , <nl> i : : Object * * V8 : : GlobalizeReference ( i : : Isolate * isolate , i : : Object * * obj ) { <nl> LOG_API ( isolate , " Persistent : : New " ) ; <nl> i : : Handle < i : : Object > result = isolate - > global_handles ( ) - > Create ( * obj ) ; <nl> - # ifdef DEBUG <nl> + # ifdef VERIFY_HEAP <nl> ( * obj ) - > ObjectVerify ( ) ; <nl> - # endif / / DEBUG <nl> + # endif / / VERIFY_HEAP <nl> return result . location ( ) ; <nl> } <nl> <nl> <nl> i : : Object * * V8 : : CopyPersistent ( i : : Object * * obj ) { <nl> i : : Handle < i : : Object > result = i : : GlobalHandles : : CopyGlobal ( obj ) ; <nl> - # ifdef DEBUG <nl> + # ifdef VERIFY_HEAP <nl> ( * obj ) - > ObjectVerify ( ) ; <nl> - # endif / / DEBUG <nl> + # endif / / VERIFY_HEAP <nl> return result . location ( ) ; <nl> } <nl> <nl> mmm a / src / checks . h <nl> ppp b / src / checks . h <nl> <nl> <nl> # include " src / base / logging . h " <nl> <nl> - # ifdef DEBUG <nl> - # ifndef OPTIMIZED_DEBUG <nl> - # define ENABLE_SLOW_DCHECKS 1 <nl> - # endif <nl> - # endif <nl> - <nl> namespace v8 { <nl> <nl> class Value ; <nl> mmm a / src / jsregexp . cc <nl> ppp b / src / jsregexp . cc <nl> RegExpEngine : : CompilationResult RegExpCompiler : : Assemble ( <nl> Handle < HeapObject > code = macro_assembler_ - > GetCode ( pattern ) ; <nl> heap - > IncreaseTotalRegexpCodeGenerated ( code - > Size ( ) ) ; <nl> work_list_ = NULL ; <nl> - # ifdef DEBUG <nl> + # ifdef ENABLE_DISASSEMBLER <nl> if ( FLAG_print_code ) { <nl> CodeTracer : : Scope trace_scope ( heap - > isolate ( ) - > GetCodeTracer ( ) ) ; <nl> OFStream os ( trace_scope . file ( ) ) ; <nl> Handle < Code > : : cast ( code ) - > Disassemble ( pattern - > ToCString ( ) . get ( ) , os ) ; <nl> } <nl> + # endif <nl> + # ifdef DEBUG <nl> if ( FLAG_trace_regexp_assembler ) { <nl> delete macro_assembler_ ; <nl> } <nl> mmm a / src / objects - inl . h <nl> ppp b / src / objects - inl . h <nl> void JSRegExp : : SetDataAt ( int index , Object * value ) { <nl> <nl> ElementsKind JSObject : : GetElementsKind ( ) { <nl> ElementsKind kind = map ( ) - > elements_kind ( ) ; <nl> - # if DEBUG <nl> + # if VERIFY_HEAP <nl> FixedArrayBase * fixed_array = <nl> reinterpret_cast < FixedArrayBase * > ( READ_FIELD ( this , kElementsOffset ) ) ; <nl> <nl> mmm a / src / objects - printer . cc <nl> ppp b / src / objects - printer . cc <nl> void JSObject : : PrintElements ( std : : ostream & os ) { / / NOLINT <nl> } <nl> <nl> <nl> - void JSObject : : PrintTransitions ( std : : ostream & os ) { / / NOLINT <nl> - if ( ! map ( ) - > HasTransitionArray ( ) ) return ; <nl> - map ( ) - > transitions ( ) - > PrintTransitions ( os , false ) ; <nl> - } <nl> - <nl> - <nl> void JSObject : : JSObjectPrint ( std : : ostream & os ) { / / NOLINT <nl> HeapObject : : PrintHeader ( os , " JSObject " ) ; <nl> / / Don ' t call GetElementsKind , its validation code can cause the printer to <nl> void Name : : NamePrint ( std : : ostream & os ) { / / NOLINT <nl> } <nl> <nl> <nl> - / / This method is only meant to be called from gdb for debugging purposes . <nl> - / / Since the string can also be in two - byte encoding , non - Latin1 characters <nl> - / / will be ignored in the output . <nl> - char * String : : ToAsciiArray ( ) { <nl> - / / Static so that subsequent calls frees previously allocated space . <nl> - / / This also means that previous results will be overwritten . <nl> - static char * buffer = NULL ; <nl> - if ( buffer ! = NULL ) delete [ ] buffer ; <nl> - buffer = new char [ length ( ) + 1 ] ; <nl> - WriteToFlat ( this , reinterpret_cast < uint8_t * > ( buffer ) , 0 , length ( ) ) ; <nl> - buffer [ length ( ) ] = 0 ; <nl> - return buffer ; <nl> - } <nl> - <nl> - <nl> static const char * const weekdays [ ] = { <nl> " ? ? ? " , " Sun " , " Mon " , " Tue " , " Wed " , " Thu " , " Fri " , " Sat " <nl> } ; <nl> void BreakPointInfo : : BreakPointInfoPrint ( std : : ostream & os ) { / / NOLINT <nl> } <nl> <nl> <nl> - void DescriptorArray : : Print ( ) { <nl> - OFStream os ( stdout ) ; <nl> - this - > PrintDescriptors ( os ) ; <nl> - os < < std : : flush ; <nl> - } <nl> - <nl> - <nl> - void DescriptorArray : : PrintDescriptors ( std : : ostream & os ) { / / NOLINT <nl> - HandleScope scope ( GetIsolate ( ) ) ; <nl> - os < < " Descriptor array " < < number_of_descriptors ( ) < < " \ n " ; <nl> - for ( int i = 0 ; i < number_of_descriptors ( ) ; i + + ) { <nl> - Descriptor desc ; <nl> - Get ( i , & desc ) ; <nl> - os < < " " < < i < < " : " < < desc < < " \ n " ; <nl> - } <nl> - os < < " \ n " ; <nl> - } <nl> - <nl> - <nl> static void PrintBitMask ( std : : ostream & os , uint32_t value ) { / / NOLINT <nl> for ( int i = 0 ; i < 32 ; i + + ) { <nl> if ( ( i & 7 ) = = 0 ) os < < " " ; <nl> void LayoutDescriptor : : Print ( std : : ostream & os ) { / / NOLINT <nl> } <nl> <nl> <nl> + # endif / / OBJECT_PRINT <nl> + <nl> + <nl> + # if TRACE_MAPS <nl> + <nl> + <nl> + void Name : : NameShortPrint ( ) { <nl> + if ( this - > IsString ( ) ) { <nl> + PrintF ( " % s " , String : : cast ( this ) - > ToCString ( ) . get ( ) ) ; <nl> + } else { <nl> + DCHECK ( this - > IsSymbol ( ) ) ; <nl> + Symbol * s = Symbol : : cast ( this ) ; <nl> + if ( s - > name ( ) - > IsUndefined ( ) ) { <nl> + PrintF ( " # < % s > " , s - > PrivateSymbolToName ( ) ) ; <nl> + } else { <nl> + PrintF ( " < % s > " , String : : cast ( s - > name ( ) ) - > ToCString ( ) . get ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> + int Name : : NameShortPrint ( Vector < char > str ) { <nl> + if ( this - > IsString ( ) ) { <nl> + return SNPrintF ( str , " % s " , String : : cast ( this ) - > ToCString ( ) . get ( ) ) ; <nl> + } else { <nl> + DCHECK ( this - > IsSymbol ( ) ) ; <nl> + Symbol * s = Symbol : : cast ( this ) ; <nl> + if ( s - > name ( ) - > IsUndefined ( ) ) { <nl> + return SNPrintF ( str , " # < % s > " , s - > PrivateSymbolToName ( ) ) ; <nl> + } else { <nl> + return SNPrintF ( str , " < % s > " , String : : cast ( s - > name ( ) ) - > ToCString ( ) . get ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> + # endif / / TRACE_MAPS <nl> + <nl> + <nl> + # ifdef DEBUG <nl> + / / This method is only meant to be called from gdb for debugging purposes . <nl> + / / Since the string can also be in two - byte encoding , non - Latin1 characters <nl> + / / will be ignored in the output . <nl> + char * String : : ToAsciiArray ( ) { <nl> + / / Static so that subsequent calls frees previously allocated space . <nl> + / / This also means that previous results will be overwritten . <nl> + static char * buffer = NULL ; <nl> + if ( buffer ! = NULL ) delete [ ] buffer ; <nl> + buffer = new char [ length ( ) + 1 ] ; <nl> + WriteToFlat ( this , reinterpret_cast < uint8_t * > ( buffer ) , 0 , length ( ) ) ; <nl> + buffer [ length ( ) ] = 0 ; <nl> + return buffer ; <nl> + } <nl> + <nl> + <nl> + void DescriptorArray : : Print ( ) { <nl> + OFStream os ( stdout ) ; <nl> + this - > PrintDescriptors ( os ) ; <nl> + os < < std : : flush ; <nl> + } <nl> + <nl> + <nl> + void DescriptorArray : : PrintDescriptors ( std : : ostream & os ) { / / NOLINT <nl> + HandleScope scope ( GetIsolate ( ) ) ; <nl> + os < < " Descriptor array " < < number_of_descriptors ( ) < < " \ n " ; <nl> + for ( int i = 0 ; i < number_of_descriptors ( ) ; i + + ) { <nl> + Descriptor desc ; <nl> + Get ( i , & desc ) ; <nl> + os < < " " < < i < < " : " < < desc < < " \ n " ; <nl> + } <nl> + os < < " \ n " ; <nl> + } <nl> + <nl> + <nl> void TransitionArray : : Print ( ) { <nl> OFStream os ( stdout ) ; <nl> this - > PrintTransitions ( os ) ; <nl> void TransitionArray : : PrintTransitions ( std : : ostream & os , <nl> for ( int i = 0 ; i < number_of_transitions ( ) ; i + + ) { <nl> Name * key = GetKey ( i ) ; <nl> os < < " " ; <nl> + # ifdef OBJECT_PRINT <nl> key - > NamePrint ( os ) ; <nl> + # else <nl> + key - > ShortPrint ( os ) ; <nl> + # endif <nl> os < < " : " ; <nl> if ( key = = GetHeap ( ) - > frozen_symbol ( ) ) { <nl> os < < " ( transition to frozen ) " ; <nl> void TransitionArray : : PrintTransitions ( std : : ostream & os , <nl> } <nl> <nl> <nl> - # endif / / OBJECT_PRINT <nl> - <nl> - <nl> - # if TRACE_MAPS <nl> - <nl> - <nl> - void Name : : NameShortPrint ( ) { <nl> - if ( this - > IsString ( ) ) { <nl> - PrintF ( " % s " , String : : cast ( this ) - > ToCString ( ) . get ( ) ) ; <nl> - } else { <nl> - DCHECK ( this - > IsSymbol ( ) ) ; <nl> - Symbol * s = Symbol : : cast ( this ) ; <nl> - if ( s - > name ( ) - > IsUndefined ( ) ) { <nl> - PrintF ( " # < % s > " , s - > PrivateSymbolToName ( ) ) ; <nl> - } else { <nl> - PrintF ( " < % s > " , String : : cast ( s - > name ( ) ) - > ToCString ( ) . get ( ) ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - <nl> - int Name : : NameShortPrint ( Vector < char > str ) { <nl> - if ( this - > IsString ( ) ) { <nl> - return SNPrintF ( str , " % s " , String : : cast ( this ) - > ToCString ( ) . get ( ) ) ; <nl> - } else { <nl> - DCHECK ( this - > IsSymbol ( ) ) ; <nl> - Symbol * s = Symbol : : cast ( this ) ; <nl> - if ( s - > name ( ) - > IsUndefined ( ) ) { <nl> - return SNPrintF ( str , " # < % s > " , s - > PrivateSymbolToName ( ) ) ; <nl> - } else { <nl> - return SNPrintF ( str , " < % s > " , String : : cast ( s - > name ( ) ) - > ToCString ( ) . get ( ) ) ; <nl> - } <nl> - } <nl> + void JSObject : : PrintTransitions ( std : : ostream & os ) { / / NOLINT <nl> + if ( ! map ( ) - > HasTransitionArray ( ) ) return ; <nl> + map ( ) - > transitions ( ) - > PrintTransitions ( os , false ) ; <nl> } <nl> - <nl> - <nl> - # endif / / TRACE_MAPS <nl> + # endif / / DEBUG <nl> } } / / namespace v8 : : internal <nl> mmm a / src / objects . cc <nl> ppp b / src / objects . cc <nl> void Object : : ShortPrint ( StringStream * accumulator ) { <nl> } <nl> <nl> <nl> + void Object : : ShortPrint ( std : : ostream & os ) { os < < Brief ( this ) ; } <nl> + <nl> + <nl> std : : ostream & operator < < ( std : : ostream & os , const Brief & v ) { <nl> if ( v . value - > IsSmi ( ) ) { <nl> Smi : : cast ( v . value ) - > SmiPrint ( os ) ; <nl> mmm a / src / objects . h <nl> ppp b / src / objects . h <nl> class Object { <nl> / / Prints this object without details to a message accumulator . <nl> void ShortPrint ( StringStream * accumulator ) ; <nl> <nl> + void ShortPrint ( std : : ostream & os ) ; / / NOLINT <nl> + <nl> DECLARE_CAST ( Object ) <nl> <nl> / / Layout description . <nl> class Object { <nl> <nl> / / Prints this object with details . <nl> void Print ( std : : ostream & os ) ; / / NOLINT <nl> + # else <nl> + void Print ( ) { ShortPrint ( ) ; } <nl> + void Print ( std : : ostream & os ) { ShortPrint ( os ) ; } / / NOLINT <nl> # endif <nl> <nl> private : <nl> class JSObject : public JSReceiver { <nl> # ifdef OBJECT_PRINT <nl> void PrintProperties ( std : : ostream & os ) ; / / NOLINT <nl> void PrintElements ( std : : ostream & os ) ; / / NOLINT <nl> + # endif <nl> + # ifdef DEBUG <nl> void PrintTransitions ( std : : ostream & os ) ; / / NOLINT <nl> # endif <nl> <nl> class DescriptorArray : public FixedArray { <nl> static const int kDescriptorValue = 2 ; <nl> static const int kDescriptorSize = 3 ; <nl> <nl> - # ifdef OBJECT_PRINT <nl> + # ifdef DEBUG <nl> / / For our gdb macros , we should perhaps change these in the future . <nl> void Print ( ) ; <nl> <nl> / / Print all the descriptors . <nl> void PrintDescriptors ( std : : ostream & os ) ; / / NOLINT <nl> - # endif <nl> <nl> - # ifdef DEBUG <nl> / / Is the descriptor array sorted and without duplicates ? <nl> bool IsSortedNoDuplicates ( int valid_descriptors = - 1 ) ; <nl> <nl> class String : public Name { <nl> / / Dispatched behavior . <nl> void StringShortPrint ( StringStream * accumulator ) ; <nl> void PrintUC16 ( std : : ostream & os , int start = 0 , int end = - 1 ) ; / / NOLINT <nl> - # ifdef OBJECT_PRINT <nl> + # ifdef DEBUG <nl> char * ToAsciiArray ( ) ; <nl> # endif <nl> DECLARE_PRINTER ( String ) <nl> mmm a / src / transitions . h <nl> ppp b / src / transitions . h <nl> class TransitionArray : public FixedArray { <nl> static const int kTransitionTarget = 1 ; <nl> static const int kTransitionSize = 2 ; <nl> <nl> - # ifdef OBJECT_PRINT <nl> + # ifdef DEBUG <nl> / / For our gdb macros , we should perhaps change these in the future . <nl> void Print ( ) ; <nl> <nl> / / Print all the transitions . <nl> void PrintTransitions ( std : : ostream & os , bool print_header = true ) ; / / NOLINT <nl> - # endif <nl> <nl> - # ifdef DEBUG <nl> bool IsSortedNoDuplicates ( int valid_entries = - 1 ) ; <nl> bool IsConsistentWithBackPointers ( Map * current_map ) ; <nl> bool IsEqualTo ( TransitionArray * other ) ; <nl> mmm a / test / cctest / test - api . cc <nl> ppp b / test / cctest / test - api . cc <nl> TEST ( Regress385349 ) { <nl> } <nl> <nl> <nl> - # ifdef DEBUG <nl> + # ifdef ENABLE_DISASSEMBLER <nl> static int probes_counter = 0 ; <nl> static int misses_counter = 0 ; <nl> static int updates_counter = 0 ; <nl> static const char * kMegamorphicTestProgram = <nl> <nl> <nl> static void StubCacheHelper ( bool primary ) { <nl> - # ifdef DEBUG <nl> + # ifdef ENABLE_DISASSEMBLER <nl> i : : FLAG_native_code_counters = true ; <nl> if ( primary ) { <nl> i : : FLAG_test_primary_stub_cache = true ; <nl> mmm a / test / mjsunit / mjsunit . status <nl> ppp b / test / mjsunit / mjsunit . status <nl> <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # Tests verifying CHECK and ASSERT . <nl> ' verify - check - false ' : [ FAIL , NO_VARIANTS ] , <nl> - ' verify - assert - false ' : [ NO_VARIANTS , [ ' mode = = release ' , PASS ] , [ ' mode = = debug ' , FAIL ] ] , <nl> + ' verify - assert - false ' : [ NO_VARIANTS , [ ' mode = = release and dcheck_always_on = = False ' , PASS ] , [ ' mode = = debug or dcheck_always_on = = True ' , FAIL ] ] , <nl> <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # Tests with different versions for release and debug . <nl> mmm a / test / webkit / webkit . status <nl> ppp b / test / webkit / webkit . status <nl> <nl> [ ' arch = = arm64 and simulator_run = = True ' , { <nl> ' dfg - int - overflow - in - loop ' : [ SKIP ] , <nl> } ] , # ' arch = = arm64 and simulator_run = = True ' <nl> + [ ' dcheck_always_on = = True and arch = = arm64 ' , { <nl> + # Doesn ' t work with gcc 4 . 6 on arm64 for some reason . <nl> + ' reentrant - caching ' : [ SKIP ] , <nl> + } ] , # ' dcheck_always_on = = True and arch = = arm64 ' <nl> <nl> <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl>
Turn on DCHECKs and other debugging code if dcheck_always_on is 1
v8/v8
97c1f4b15c8f31d5b133053354530ae8b536d278
2014-12-08T09:26:16Z
mmm a / GameSDK . cryproject <nl> ppp b / GameSDK . cryproject <nl> <nl> { <nl> " version " : 1 , <nl> " type " : " CRYENGINE Project " , <nl> - " info " : { " name " : " CRYENGINE " } , <nl> + " info " : { " name " : " CRYENGINE SDK " } , <nl> " content " : { <nl> - " assets " : [ " gamesdk " ] , <nl> + " assets " : [ " Assets " ] , <nl> " code " : [ " " ] , <nl> " libs " : [ <nl> { <nl>
! XB ( CE - 12706 ) ( GameSDK ) Incorrect project name for " CRYENGINE "
CRYTEK/CRYENGINE
426b0aefe1c48d2b7b90297d2e50b933f6697bdd
2017-06-12T15:45:39Z
mmm a / stdlib / public / core / UnsafePointer . swift . gyb <nl> ppp b / stdlib / public / core / UnsafePointer . swift . gyb <nl> public struct $ { Self } < Pointee > <nl> / / / <nl> / / / - Postcondition : The pointee is initialized ; the value should eventually <nl> / / / be destroyed or moved from to avoid leaks . <nl> - / / FIXME : add tests ( since the ` count ` has been added ) <nl> public func initialize ( with newValue : Pointee , count : Int = 1 ) { <nl> + / / FIXME : add tests ( since the ` count ` has been added ) <nl> _debugPrecondition ( count > = 0 , <nl> " $ { Self } . initialize ( with : ) : negative count " ) <nl> / / Must not use ` initializeFrom ` with a ` Collection ` as that will introduce <nl>
Merge pull request from natecook1000 / nc - ump - doc
apple/swift
7fcf1dbd045da7cc74bb58d3e97bdf1139768c7e
2016-06-23T07:01:38Z
mmm a / hphp / runtime / ext / json / JSON_parser . cpp <nl> ppp b / hphp / runtime / ext / json / JSON_parser . cpp <nl> struct json_parser { <nl> size_t bufSize = length < = RuntimeOption : : EvalSimpleJsonMaxLength ? <nl> SimpleParser : : BufferBytesForLength ( length ) : <nl> sb_cap * 2 ; <nl> - if ( tl_buffer . raw ) delete [ ] tl_buffer . raw ; <nl> - tl_buffer . raw = new char [ bufSize ] ; <nl> + if ( tl_buffer . raw ) { <nl> + free ( tl_buffer . raw ) ; <nl> + tl_buffer . raw = nullptr ; <nl> + } <nl> + if ( ! MM ( ) . preAllocOOM ( bufSize ) ) { <nl> + tl_buffer . raw = ( char * ) malloc ( bufSize ) ; <nl> + if ( ! tl_buffer . raw ) MM ( ) . forceOOM ( ) ; <nl> + } <nl> + check_non_safepoint_surprise ( ) ; <nl> + always_assert ( tl_buffer . raw ) ; <nl> sb_buf . setBuf ( tl_buffer . raw , sb_cap ) ; <nl> sb_key . setBuf ( tl_buffer . raw + sb_cap , sb_cap ) ; <nl> } else { <nl> struct json_parser { <nl> } <nl> void flushSb ( ) { <nl> if ( tl_buffer . raw ) { <nl> - delete [ ] tl_buffer . raw ; <nl> + free ( tl_buffer . raw ) ; <nl> tl_buffer . raw = nullptr ; <nl> } <nl> sb_cap = 0 ; <nl>
Throw OOM instead of nullptr deref on huge JSON
facebook/hhvm
b0e049145f442fb8b87a3146d957ffbbbe276d46
2017-07-12T20:35:46Z
mmm a / dbms / include / DB / Storages / MergeTree / MergeTreeData . h <nl> ppp b / dbms / include / DB / Storages / MergeTree / MergeTreeData . h <nl> struct MergeTreeSettings <nl> double insert_delay_step = 1 . 1 ; <nl> <nl> / / / Для скольки последних блоков хранить хеши в ZooKeeper . <nl> - size_t replicated_deduplication_window = 1000 ; <nl> + size_t replicated_deduplication_window = 100 ; <nl> <nl> / / / Хранить примерно столько последних записей в логе в ZooKeeper , даже если они никому уже не нужны . <nl> / / / Не влияет на работу таблиц ; используется только чтобы успеть посмотреть на лог в ZooKeeper глазами прежде , чем его очистят . <nl>
Merge
ClickHouse/ClickHouse
857f7db99d2202ce7f228088e3dba313358033d2
2014-08-07T19:20:30Z