diff
stringlengths 41
2.03M
| msg
stringlengths 1
1.5k
⌀ | repo
stringlengths 5
40
| sha
stringlengths 40
40
| time
stringlengths 20
20
|
---|---|---|---|---|
mmm a / src / Application . cpp <nl> ppp b / src / Application . cpp <nl> Application : : Application ( int & argc , char * * argv ) : <nl> m_translatorQt = nullptr ; <nl> } <nl> <nl> + / / On Windows , add the plugins subdirectory to the list of library directories . We need this <nl> + / / for Qt to search for more image format plugins . <nl> + # ifdef Q_WS_WIN <nl> + QApplication : : addLibraryPath ( QApplication : : applicationDirPath ( ) + " / plugins " ) ; <nl> + # endif <nl> + <nl> + <nl> / / Parse command line <nl> QString fileToOpen ; <nl> QString tableToBrowse ; <nl>
|
Explicitly specify the plugins directory on Windows systems
|
sqlitebrowser/sqlitebrowser
|
3fe181bba78596968c7b3f776de1bc1e6f504f0a
|
2017-10-31T16:58:25Z
|
mmm a / test / core / end2end / invalid_call_argument_test . c <nl> ppp b / test / core / end2end / invalid_call_argument_test . c <nl> static void cleanup_test ( ) { <nl> } <nl> <nl> static void test_non_null_reserved_on_start_batch ( ) { <nl> + gpr_log ( GPR_INFO , " test_non_null_reserved_on_start_batch " ) ; <nl> + <nl> prepare_test ( 1 ) ; <nl> GPR_ASSERT ( GRPC_CALL_ERROR = = <nl> grpc_call_start_batch ( g_state . call , NULL , 0 , NULL , tag ( 1 ) ) ) ; <nl> static void test_non_null_reserved_on_start_batch ( ) { <nl> } <nl> <nl> static void test_non_null_reserved_on_op ( ) { <nl> + gpr_log ( GPR_INFO , " test_non_null_reserved_on_op " ) ; <nl> + <nl> grpc_op * op ; <nl> prepare_test ( 1 ) ; <nl> <nl> static void test_non_null_reserved_on_op ( ) { <nl> } <nl> <nl> static void test_send_initial_metadata_more_than_once ( ) { <nl> + gpr_log ( GPR_INFO , " test_send_initial_metadata_more_than_once " ) ; <nl> + <nl> grpc_op * op ; <nl> prepare_test ( 1 ) ; <nl> <nl> static void test_send_initial_metadata_more_than_once ( ) { <nl> } <nl> <nl> static void test_too_many_metadata ( ) { <nl> + gpr_log ( GPR_INFO , " test_too_many_metadata " ) ; <nl> + <nl> grpc_op * op ; <nl> prepare_test ( 1 ) ; <nl> <nl> static void test_too_many_metadata ( ) { <nl> } <nl> <nl> static void test_send_null_message ( ) { <nl> + gpr_log ( GPR_INFO , " test_send_null_message " ) ; <nl> + <nl> grpc_op * op ; <nl> prepare_test ( 1 ) ; <nl> <nl> static void test_send_null_message ( ) { <nl> } <nl> <nl> static void test_send_messages_at_the_same_time ( ) { <nl> + gpr_log ( GPR_INFO , " test_send_messages_at_the_same_time " ) ; <nl> + <nl> grpc_op * op ; <nl> gpr_slice request_payload_slice = gpr_slice_from_copied_string ( " hello world " ) ; <nl> grpc_byte_buffer * request_payload = <nl> static void test_send_messages_at_the_same_time ( ) { <nl> } <nl> <nl> static void test_send_server_status_from_client ( ) { <nl> + gpr_log ( GPR_INFO , " test_send_server_status_from_client " ) ; <nl> + <nl> grpc_op * op ; <nl> prepare_test ( 1 ) ; <nl> <nl> static void test_send_server_status_from_client ( ) { <nl> } <nl> <nl> static void test_receive_initial_metadata_twice_at_client ( ) { <nl> + gpr_log ( GPR_INFO , " test_receive_initial_metadata_twice_at_client " ) ; <nl> + <nl> grpc_op * op ; <nl> prepare_test ( 1 ) ; <nl> op = g_state . ops ; <nl> static void test_receive_initial_metadata_twice_at_client ( ) { <nl> } <nl> <nl> static void test_receive_message_with_invalid_flags ( ) { <nl> + gpr_log ( GPR_INFO , " test_receive_message_with_invalid_flags " ) ; <nl> + <nl> grpc_op * op ; <nl> grpc_byte_buffer * payload = NULL ; <nl> prepare_test ( 1 ) ; <nl> static void test_receive_message_with_invalid_flags ( ) { <nl> } <nl> <nl> static void test_receive_two_messages_at_the_same_time ( ) { <nl> + gpr_log ( GPR_INFO , " test_receive_two_messages_at_the_same_time " ) ; <nl> + <nl> grpc_op * op ; <nl> grpc_byte_buffer * payload = NULL ; <nl> prepare_test ( 1 ) ; <nl> static void test_receive_two_messages_at_the_same_time ( ) { <nl> } <nl> <nl> static void test_recv_close_on_server_from_client ( ) { <nl> + gpr_log ( GPR_INFO , " test_recv_close_on_server_from_client " ) ; <nl> + <nl> grpc_op * op ; <nl> prepare_test ( 1 ) ; <nl> <nl> static void test_recv_close_on_server_from_client ( ) { <nl> } <nl> <nl> static void test_recv_status_on_client_twice ( ) { <nl> + gpr_log ( GPR_INFO , " test_recv_status_on_client_twice " ) ; <nl> + <nl> grpc_op * op ; <nl> prepare_test ( 1 ) ; <nl> <nl> static void test_recv_status_on_client_twice ( ) { <nl> } <nl> <nl> static void test_send_close_from_client_on_server ( ) { <nl> + gpr_log ( GPR_INFO , " test_send_close_from_client_on_server " ) ; <nl> + <nl> grpc_op * op ; <nl> prepare_test ( 0 ) ; <nl> <nl> static void test_send_close_from_client_on_server ( ) { <nl> } <nl> <nl> static void test_recv_status_on_client_from_server ( ) { <nl> + gpr_log ( GPR_INFO , " test_recv_status_on_client_from_server " ) ; <nl> + <nl> grpc_op * op ; <nl> prepare_test ( 0 ) ; <nl> <nl> static void test_recv_status_on_client_from_server ( ) { <nl> } <nl> <nl> static void test_send_status_from_server_with_invalid_flags ( ) { <nl> + gpr_log ( GPR_INFO , " test_send_status_from_server_with_invalid_flags " ) ; <nl> + <nl> grpc_op * op ; <nl> prepare_test ( 0 ) ; <nl> <nl> static void test_send_status_from_server_with_invalid_flags ( ) { <nl> } <nl> <nl> static void test_too_many_trailing_metadata ( ) { <nl> + gpr_log ( GPR_INFO , " test_too_many_trailing_metadata " ) ; <nl> + <nl> grpc_op * op ; <nl> prepare_test ( 0 ) ; <nl> <nl> static void test_too_many_trailing_metadata ( ) { <nl> } <nl> <nl> static void test_send_server_status_twice ( ) { <nl> + gpr_log ( GPR_INFO , " test_send_server_status_twice " ) ; <nl> + <nl> grpc_op * op ; <nl> prepare_test ( 0 ) ; <nl> <nl> static void test_send_server_status_twice ( ) { <nl> } <nl> <nl> static void test_recv_close_on_server_with_invalid_flags ( ) { <nl> + gpr_log ( GPR_INFO , " test_recv_close_on_server_with_invalid_flags " ) ; <nl> + <nl> grpc_op * op ; <nl> prepare_test ( 0 ) ; <nl> <nl> static void test_recv_close_on_server_with_invalid_flags ( ) { <nl> } <nl> <nl> static void test_recv_close_on_server_twice ( ) { <nl> + gpr_log ( GPR_INFO , " test_recv_close_on_server_twice " ) ; <nl> + <nl> grpc_op * op ; <nl> prepare_test ( 0 ) ; <nl> <nl>
|
Log test names
|
grpc/grpc
|
781bcd2fe263fc98dfea8393f3fd7b53b214c973
|
2016-03-16T23:59:34Z
|
mmm a / scene / gui / text_edit . cpp <nl> ppp b / scene / gui / text_edit . cpp <nl> Vector < String > TextEdit : : get_wrap_rows_text ( int p_line ) const { <nl> int tab_offset_px = get_indent_level ( p_line ) * cache . font - > get_char_size ( ' ' ) . width ; <nl> <nl> while ( col < line_text . length ( ) ) { <nl> - char c = line_text [ col ] ; <nl> + CharType c = line_text [ col ] ; <nl> int w = text . get_char_width ( c , line_text [ col + 1 ] , px + word_px ) ; <nl> <nl> int indent_ofs = ( cur_wrap_index ! = 0 ? tab_offset_px : 0 ) ; <nl> String TextEdit : : get_word_at_pos ( const Vector2 & p_pos ) const { <nl> if ( select_word ( s , col , beg , end ) ) { <nl> <nl> bool inside_quotes = false ; <nl> - char selected_quote = ' \ 0 ' ; <nl> + CharType selected_quote = ' \ 0 ' ; <nl> int qbegin = 0 , qend = 0 ; <nl> for ( int i = 0 ; i < s . length ( ) ; i + + ) { <nl> if ( s [ i ] = = ' " ' | | s [ i ] = = ' \ ' ' ) { <nl>
|
Fix some wchar_t truncations
|
godotengine/godot
|
9cf689183d31fd57a5cc6074ed49375d02cc3041
|
2018-09-28T04:31:15Z
|
mmm a / tensorflow / core / distributed_runtime / master_session . cc <nl> ppp b / tensorflow / core / distributed_runtime / master_session . cc <nl> limitations under the License . <nl> # include " tensorflow / core / platform / mutex . h " <nl> # include " tensorflow / core / platform / tracing . h " <nl> # include " tensorflow / core / public / session_options . h " <nl> + # include " tensorflow / core / util / device_name_utils . h " <nl> <nl> namespace tensorflow { <nl> <nl> Status MasterSession : : CreateWorkerSessions ( <nl> } <nl> } ) ; <nl> <nl> + string task_name ; <nl> + string local_device_name ; <nl> + DeviceNameUtils : : SplitDeviceName ( devices_ - > client_device ( ) - > name ( ) , <nl> + & task_name , & local_device_name ) ; <nl> + const int64 client_device_incarnation = <nl> + devices_ - > client_device ( ) - > attributes ( ) . incarnation ( ) ; <nl> + <nl> Status status = Status : : OK ( ) ; <nl> / / Create all the workers & kick off the computations . <nl> for ( size_t i = 0 ; i < worker_names . size ( ) ; + + i ) { <nl> workers [ i ] . name = & worker_names [ i ] ; <nl> workers [ i ] . worker = worker_cache - > GetOrCreateWorker ( worker_names [ i ] ) ; <nl> workers [ i ] . request . set_session_handle ( handle_ ) ; <nl> + workers [ i ] . request . set_master_task ( task_name ) ; <nl> + workers [ i ] . request . set_master_incarnation ( client_device_incarnation ) ; <nl> if ( session_opts_ . config . share_cluster_devices_in_session ( ) | | <nl> session_opts_ . config . experimental ( ) <nl> . share_cluster_devices_in_session ( ) ) { <nl> mmm a / tensorflow / core / distributed_runtime / session_mgr . cc <nl> ppp b / tensorflow / core / distributed_runtime / session_mgr . cc <nl> Status SessionMgr : : CreateSession ( <nl> const protobuf : : RepeatedPtrField < DeviceAttributes > & <nl> cluster_device_attributes , <nl> bool isolate_session_state ) { <nl> + return CreateSession ( session , server_def , cluster_device_attributes , <nl> + isolate_session_state , / * master_task = * / " " , <nl> + / * master_incarnation = * / 0 ) ; <nl> + } <nl> + <nl> + Status SessionMgr : : CreateSession ( <nl> + const string & session , const ServerDef & server_def , <nl> + const protobuf : : RepeatedPtrField < DeviceAttributes > & <nl> + cluster_device_attributes , <nl> + bool isolate_session_state , string master_task , int64 master_incarnation ) { <nl> mutex_lock l ( mu_ ) ; <nl> if ( session . empty ( ) ) { <nl> return errors : : InvalidArgument ( " Session must be non - empty . " ) ; <nl> } <nl> <nl> + / / For given master task name , check if one or more ` WorkerSession ` s have been <nl> + / / created previously on this worker , and if so garbage collect the expired <nl> + / / ` WorkerSession ` s . This happens when the master fails before sending <nl> + / / ` DeleteSession ` requests , which can cause ` WorkerSession ` s to be leaked . <nl> + if ( ! master_task . empty ( ) ) { <nl> + auto it_range = master_to_associated_sessions_ . equal_range ( master_task ) ; <nl> + if ( it_range . first ! = it_range . second & & <nl> + it_range . first - > second . master_incarnation ! = master_incarnation ) { <nl> + LOG ( INFO ) < < " When creating WorkerSession for master task " < < master_task <nl> + < < " , found old WorkerSessions created by the same master task " <nl> + < < " with a different incarnation . These sessions will " <nl> + < < " be garbage collected . Current WorkerSession count : " <nl> + < < sessions_ . size ( ) ; <nl> + <nl> + auto it = it_range . first ; <nl> + while ( it ! = it_range . second ) { <nl> + auto session_it = sessions_ . find ( it - > second . session_handle ) ; <nl> + if ( session_it ! = sessions_ . end ( ) ) { <nl> + sessions_ . erase ( session_it ) ; <nl> + } <nl> + it = master_to_associated_sessions_ . erase ( it ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> WorkerCacheInterface * worker_cache = nullptr ; <nl> string worker_name ; <nl> if ( server_def . cluster ( ) . job ( ) . empty ( ) ) { <nl> Status SessionMgr : : CreateSession ( <nl> } <nl> <nl> sessions_ . insert ( std : : make_pair ( session , std : : move ( worker_session ) ) ) ; <nl> + if ( ! master_task . empty ( ) ) { <nl> + MasterAssociatedSession s { master_incarnation , session } ; <nl> + master_to_associated_sessions_ . emplace ( master_task , s ) ; <nl> + } <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> mmm a / tensorflow / core / distributed_runtime / session_mgr . h <nl> ppp b / tensorflow / core / distributed_runtime / session_mgr . h <nl> limitations under the License . <nl> # include " tensorflow / core / distributed_runtime / worker_session . h " <nl> # include " tensorflow / core / lib / core / status . h " <nl> # include " tensorflow / core / platform / mutex . h " <nl> + # include " tensorflow / core / platform / thread_annotations . h " <nl> # include " tensorflow / core / protobuf / tensorflow_server . pb . h " <nl> # include " tensorflow / core / protobuf / worker . pb . h " <nl> <nl> class SessionMgr { <nl> const protobuf : : RepeatedPtrField < DeviceAttributes > & device_attributes , <nl> bool isolate_session_state ) ; <nl> <nl> + / / Create WorkerSession from the master with the given ` master_task ` and <nl> + / / ` master_incarnation ` . We first look for existing WorkerSessions associated <nl> + / / with the specified master task . If there are sessions created by the same <nl> + / / master but with a different incarnation , it indicates that the remote <nl> + / / master has restarted before deleting the sessions on worker . When it <nl> + / / happens , old sessions associated with the master will be automatically <nl> + / / removed before the new session is created . <nl> + Status CreateSession ( <nl> + const string & session , const ServerDef & server_def , <nl> + const protobuf : : RepeatedPtrField < DeviceAttributes > & device_attributes , <nl> + bool isolate_session_state , string master_task , int64 master_incarnation ) ; <nl> + <nl> void ResetDefaultWorkerCache ( WorkerCacheInterface * worker_cache ) ; <nl> <nl> / / Updates state ( worker cache , devices ) of worker session identified by <nl> class SessionMgr { <nl> mutex mu_ ; <nl> / / A map from session identifier to internal session structure . <nl> std : : map < string , std : : shared_ptr < WorkerSession > > sessions_ TF_GUARDED_BY ( mu_ ) ; <nl> + <nl> + / / Incarnation and WorkerSession handle associated with a master task . <nl> + struct MasterAssociatedSession { <nl> + const int64 master_incarnation ; <nl> + const string session_handle ; <nl> + } ; <nl> + / / A map from master task name to its associated worker sessions . <nl> + std : : unordered_multimap < string , MasterAssociatedSession > <nl> + master_to_associated_sessions_ TF_GUARDED_BY ( mu_ ) ; <nl> } ; <nl> <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / distributed_runtime / session_mgr_test . cc <nl> ppp b / tensorflow / core / distributed_runtime / session_mgr_test . cc <nl> TEST_F ( SessionMgrTest , CreateSessionIsolateSessionState ) { <nl> EXPECT_NE ( devices_3 [ 0 ] - > resource_manager ( ) , devices_4 [ 0 ] - > resource_manager ( ) ) ; <nl> } <nl> <nl> + TEST_F ( SessionMgrTest , CreateSessionWithMasterName ) { <nl> + ServerDef server_def ; <nl> + server_def . set_job_name ( " worker " ) ; <nl> + server_def . set_task_index ( 3 ) ; <nl> + auto job = server_def . mutable_cluster ( ) - > add_job ( ) ; <nl> + job - > set_name ( " worker " ) ; <nl> + job - > mutable_tasks ( ) - > insert ( { 3 , " localhost : 3333 " } ) ; <nl> + <nl> + protobuf : : RepeatedPtrField < DeviceAttributes > cluster_device_attributes ; <nl> + <nl> + const string master_name = " / job : master / replica : 0 / task : 1 " ; <nl> + const int64 old_incarnation = random : : New64 ( ) ; <nl> + const int64 new_incarnation = random : : New64 ( ) ; <nl> + <nl> + / / Allow multiple worker sessions to be created by the same master <nl> + string sess_handle1 = " test_session_handle_1 " ; <nl> + TF_EXPECT_OK ( mgr_ . CreateSession ( sess_handle1 , server_def , <nl> + cluster_device_attributes , true , master_name , <nl> + old_incarnation ) ) ; <nl> + string sess_handle2 = " test_session_handle_2 " ; <nl> + TF_EXPECT_OK ( mgr_ . CreateSession ( sess_handle2 , server_def , <nl> + cluster_device_attributes , true , master_name , <nl> + old_incarnation ) ) ; <nl> + <nl> + std : : shared_ptr < WorkerSession > session ; <nl> + TF_EXPECT_OK ( mgr_ . WorkerSessionForSession ( sess_handle1 , & session ) ) ; <nl> + EXPECT_NE ( nullptr , session ) < < " Session for " < < sess_handle1 < < " was null " ; <nl> + <nl> + TF_EXPECT_OK ( mgr_ . WorkerSessionForSession ( sess_handle2 , & session ) ) ; <nl> + EXPECT_NE ( nullptr , session ) < < " Session for " < < sess_handle2 < < " was null " ; <nl> + <nl> + / / When the master creates a WorkerSession with new incarnation , the old <nl> + / / WorkerSessions should be garbage collected . <nl> + string sess_handle3 = " test_session_handle_3 " ; <nl> + TF_EXPECT_OK ( mgr_ . CreateSession ( sess_handle3 , server_def , <nl> + cluster_device_attributes , true , master_name , <nl> + new_incarnation ) ) ; <nl> + <nl> + EXPECT_NE ( mgr_ . WorkerSessionForSession ( sess_handle1 , & session ) , <nl> + tensorflow : : Status : : OK ( ) ) <nl> + < < " Session for " < < sess_handle1 <nl> + < < " should have been garbage collected . " ; <nl> + <nl> + EXPECT_NE ( mgr_ . WorkerSessionForSession ( sess_handle2 , & session ) , <nl> + tensorflow : : Status : : OK ( ) ) <nl> + < < " Session for " < < sess_handle2 <nl> + < < " should have been garbage collected . " ; <nl> + <nl> + TF_EXPECT_OK ( mgr_ . WorkerSessionForSession ( sess_handle3 , & session ) ) ; <nl> + EXPECT_NE ( nullptr , session ) < < " Session for " < < sess_handle3 < < " was null " ; <nl> + <nl> + TF_EXPECT_OK ( mgr_ . DeleteSession ( sess_handle2 ) ) ; <nl> + TF_EXPECT_OK ( mgr_ . DeleteSession ( sess_handle3 ) ) ; <nl> + } <nl> + <nl> + TEST_F ( SessionMgrTest , CreateSessionWithoutMasterName ) { <nl> + ServerDef server_def ; <nl> + server_def . set_job_name ( " worker " ) ; <nl> + server_def . set_task_index ( 3 ) ; <nl> + auto job = server_def . mutable_cluster ( ) - > add_job ( ) ; <nl> + job - > set_name ( " worker " ) ; <nl> + job - > mutable_tasks ( ) - > insert ( { 3 , " localhost : 3333 " } ) ; <nl> + <nl> + protobuf : : RepeatedPtrField < DeviceAttributes > cluster_device_attributes ; <nl> + <nl> + / / WorkerSession will NOT be garbage collected for empty master names . <nl> + string sess_handle1 = " test_session_handle_no_master_1 " ; <nl> + TF_EXPECT_OK ( mgr_ . CreateSession ( sess_handle1 , server_def , <nl> + cluster_device_attributes , true , " " , 0 ) ) ; <nl> + string sess_handle2 = " test_session_handle_no_master_2 " ; <nl> + TF_EXPECT_OK ( mgr_ . CreateSession ( sess_handle2 , server_def , <nl> + cluster_device_attributes , true , " " , 0 ) ) ; <nl> + <nl> + std : : shared_ptr < WorkerSession > session ; <nl> + TF_EXPECT_OK ( mgr_ . WorkerSessionForSession ( sess_handle1 , & session ) ) ; <nl> + EXPECT_NE ( nullptr , session ) < < " Session for " < < sess_handle1 < < " was null " ; <nl> + <nl> + TF_EXPECT_OK ( mgr_ . WorkerSessionForSession ( sess_handle2 , & session ) ) ; <nl> + EXPECT_NE ( nullptr , session ) < < " Session for " < < sess_handle2 < < " was null " ; <nl> + <nl> + TF_EXPECT_OK ( mgr_ . DeleteSession ( sess_handle1 ) ) ; <nl> + TF_EXPECT_OK ( mgr_ . DeleteSession ( sess_handle2 ) ) ; <nl> + } <nl> + <nl> TEST_F ( SessionMgrTest , LegacySession ) { <nl> string session_handle = " " ; <nl> std : : shared_ptr < WorkerSession > session ; <nl> mmm a / tensorflow / core / distributed_runtime / worker . cc <nl> ppp b / tensorflow / core / distributed_runtime / worker . cc <nl> void Worker : : CreateWorkerSessionAsync ( const CreateWorkerSessionRequest * request , <nl> StatusCallback done ) { <nl> Status s = env_ - > session_mgr - > CreateSession ( <nl> request - > session_handle ( ) , request - > server_def ( ) , <nl> - request - > cluster_device_attributes ( ) , request - > isolate_session_state ( ) ) ; <nl> + request - > cluster_device_attributes ( ) , request - > isolate_session_state ( ) , <nl> + request - > master_task ( ) , request - > master_incarnation ( ) ) ; <nl> done ( s ) ; <nl> } <nl> <nl> mmm a / tensorflow / core / protobuf / worker . proto <nl> ppp b / tensorflow / core / protobuf / worker . proto <nl> message CreateWorkerSessionRequest { <nl> <nl> / / The device attributes of all the devices in the cluster . <nl> repeated DeviceAttributes cluster_device_attributes = 4 ; <nl> + <nl> + / / The master task name from which the request is sent . <nl> + string master_task = 5 ; <nl> + <nl> + / / The incarnation ID of the master task local CPU device . <nl> + / / If the target worker already has a WorkerSession created previously with <nl> + / / the same master task name but a different incarnation , it usually indicates <nl> + / / that the previous master failed before deleting the WorkerSession on the <nl> + / / worker . To prevent memory leaks , the worker should garbage collect the old <nl> + / / WorkerSessions . <nl> + int64 master_incarnation = 6 ; <nl> } <nl> <nl> message CreateWorkerSessionResponse { } <nl> mmm a / tensorflow / python / training / server_lib_test . py <nl> ppp b / tensorflow / python / training / server_lib_test . py <nl> <nl> <nl> import numpy as np <nl> <nl> + from tensorflow . core . protobuf import cluster_pb2 <nl> from tensorflow . core . protobuf import config_pb2 <nl> from tensorflow . core . protobuf import tensorflow_server_pb2 <nl> from tensorflow . python . client import session <nl> def testSetConfiguration ( self ) : <nl> self . assertEqual ( 0 . 1 , server . server_def . default_session_config . gpu_options . <nl> per_process_gpu_memory_fraction ) <nl> <nl> + def testRestartedMaster ( self ) : <nl> + master_old = server_lib . Server . create_local_server ( ) <nl> + master_new = server_lib . Server . create_local_server ( ) <nl> + worker = self . _cached_server <nl> + <nl> + def get_cluster_def ( master , worker ) : <nl> + cluster_def = cluster_pb2 . ClusterDef ( ) <nl> + job = cluster_def . job . add ( ) <nl> + job . name = " master " <nl> + job . tasks [ 0 ] = master . target [ len ( " grpc : / / " ) : ] <nl> + job = cluster_def . job . add ( ) <nl> + job . name = " worker " <nl> + job . tasks [ 0 ] = worker . target [ len ( " grpc : / / " ) : ] <nl> + return cluster_def <nl> + <nl> + def check_session_devices ( sess ) : <nl> + # Make sure we have the correct set of cluster devices <nl> + devices = sess . list_devices ( ) <nl> + device_names = set ( d . name for d in devices ) <nl> + self . assertIn ( " / job : master / replica : 0 / task : 0 / device : CPU : 0 " , device_names ) <nl> + self . assertIn ( " / job : worker / replica : 0 / task : 0 / device : CPU : 0 " , device_names ) <nl> + <nl> + with ops . Graph ( ) . as_default ( ) : <nl> + # Construct a simple graph that runs ops on remote worker <nl> + with ops . device ( " / job : worker / replica : 0 / task : 0 / device : CPU : 0 " ) : <nl> + a = constant_op . constant ( [ 1 . 0 ] ) <nl> + b = a + a <nl> + <nl> + config = config_pb2 . ConfigProto ( <nl> + cluster_def = get_cluster_def ( master_old , worker ) ) <nl> + sess_old = session . Session ( master_old . target , config = config ) <nl> + check_session_devices ( sess_old ) <nl> + <nl> + # Create a session with the new master and the worker . <nl> + # The new master has the same task name ( ' / job : master / replica : 0 / task : 0 ' ) <nl> + # as the old master , but is initiated from a different server thus has a <nl> + # different incarnation . This triggers the WorkerSession on worker with <nl> + # the old master incarnation to be garbage collected . <nl> + <nl> + config = config_pb2 . ConfigProto ( <nl> + cluster_def = get_cluster_def ( master_new , worker ) ) <nl> + sess_new = session . Session ( master_new . target , config = config ) <nl> + check_session_devices ( sess_new ) <nl> + <nl> + # Running on worker with the new session should work as expected <nl> + v = sess_new . run ( b ) <nl> + self . assertAllEqual ( v , [ 2 . 0 ] ) <nl> + <nl> + # Running on worker with the old session should raise an exception since <nl> + # the WorkerSession of the old session has been garbage collected <nl> + with self . assertRaisesRegex ( errors_impl . AbortedError , <nl> + " Session handle is not found " ) : <nl> + sess_old . run ( b ) <nl> + <nl> + sess_old . close ( ) <nl> + sess_new . close ( ) <nl> + <nl> def testInvalidHostname ( self ) : <nl> with self . assertRaisesRegex ( errors_impl . InvalidArgumentError , " port " ) : <nl> _ = server_lib . Server ( <nl>
|
Garbage collect old WorkerSession when the restarted master task create new one .
|
tensorflow/tensorflow
|
dbc843d6ec806def963ed8016aca337595faa10d
|
2020-08-03T18:31:26Z
|
mmm a / swoole_socket_coro . c <nl> ppp b / swoole_socket_coro . c <nl> enum socket_opcode <nl> <nl> typedef struct <nl> { <nl> - zend_object std ; <nl> zval object ; <nl> int fd ; <nl> int domain ; <nl> typedef struct <nl> # ifdef SWOOLE_SOCKETS_SUPPORT <nl> zval * resource ; <nl> # endif <nl> + zend_object std ; <nl> } socket_coro ; <nl> <nl> static PHP_METHOD ( swoole_socket_coro , __construct ) ; <nl> static const zend_function_entry swoole_socket_coro_methods [ ] = <nl> PHP_FE_END <nl> } ; <nl> <nl> - static zend_object * swoole_socket_coro_create ( zend_class_entry * ce ) <nl> + static inline socket_coro * sw_socket_coro_fetch_object ( zend_object * obj ) <nl> { <nl> - socket_coro * sock = emalloc ( sizeof ( socket_coro ) ) ; <nl> - bzero ( sock , sizeof ( socket_coro ) ) ; <nl> + return ( socket_coro * ) ( ( char * ) obj - XtOffsetOf ( socket_coro , std ) ) ; <nl> + } <nl> + <nl> + # define Z_SOCKET_CORO_OBJ_P ( zv ) sw_socket_coro_fetch_object ( Z_OBJ_P ( zv ) ) ; <nl> + <nl> + <nl> + static void swoole_socket_coro_free_storage ( zend_object * object ) <nl> + { <nl> + socket_coro * sock = ( socket_coro * ) sw_socket_coro_fetch_object ( object ) ; <nl> + if ( sock - > fd > = 0 ) <nl> + { <nl> + SwooleG . main_reactor - > close ( SwooleG . main_reactor , sock - > fd ) ; <nl> + } <nl> + zend_object_std_dtor ( & sock - > std ) ; <nl> + } <nl> <nl> - zend_object_std_init ( & sock - > std , ce ) ; <nl> + static zend_object * swoole_socket_coro_create ( zend_class_entry * ce TSRMLS_DC ) <nl> + { <nl> + socket_coro * sock = ecalloc ( 1 , sizeof ( socket_coro ) + zend_object_properties_size ( ce ) ) ; <nl> + zend_object_std_init ( & sock - > std , ce TSRMLS_CC ) ; <nl> + object_properties_init ( & sock - > std , ce ) ; <nl> sock - > std . handlers = & swoole_socket_coro_handlers ; <nl> <nl> return & sock - > std ; <nl> } <nl> <nl> + void swoole_socket_coro_init ( int module_number TSRMLS_DC ) <nl> + { <nl> + INIT_CLASS_ENTRY ( swoole_socket_coro_ce , " Swoole \ \ Coroutine \ \ Socket " , swoole_socket_coro_methods ) ; <nl> + <nl> + swoole_socket_coro_class_entry_ptr = zend_register_internal_class ( & swoole_socket_coro_ce TSRMLS_CC ) ; <nl> + swoole_socket_coro_class_entry_ptr - > ce_flags | = ZEND_ACC_FINAL ; <nl> + swoole_socket_coro_class_entry_ptr - > create_object = swoole_socket_coro_create ; <nl> + swoole_socket_coro_class_entry_ptr - > serialize = zend_class_serialize_deny ; <nl> + swoole_socket_coro_class_entry_ptr - > unserialize = zend_class_unserialize_deny ; <nl> + zend_declare_property_long ( swoole_socket_coro_class_entry_ptr , SW_STRL ( " errCode " ) - 1 , 0 , ZEND_ACC_PUBLIC TSRMLS_CC ) ; <nl> + <nl> + memcpy ( & swoole_socket_coro_handlers , zend_get_std_object_handlers ( ) , sizeof ( swoole_socket_coro_handlers ) ) ; <nl> + swoole_socket_coro_handlers . free_obj = swoole_socket_coro_free_storage ; <nl> + swoole_socket_coro_handlers . clone_obj = NULL ; <nl> + swoole_socket_coro_handlers . offset = XtOffsetOf ( socket_coro , std ) ; <nl> + <nl> + INIT_CLASS_ENTRY ( swoole_socket_coro_exception_ce , " Swoole \ \ Coroutine \ \ Socket \ \ Exception " , NULL ) ; <nl> + swoole_socket_coro_exception_class_entry_ptr = sw_zend_register_internal_class_ex ( & swoole_socket_coro_exception_ce , <nl> + zend_exception_get_default ( TSRMLS_C ) , NULL TSRMLS_CC ) ; <nl> + <nl> + if ( SWOOLE_G ( use_shortname ) ) <nl> + { <nl> + sw_zend_register_class_alias ( " Co \ \ Socket " , swoole_socket_coro_class_entry_ptr ) ; <nl> + sw_zend_register_class_alias ( " Co \ \ Socket \ \ Exception " , swoole_socket_coro_exception_class_entry_ptr ) ; <nl> + } <nl> + } <nl> + <nl> static int socket_onReadable ( swReactor * reactor , swEvent * event ) <nl> { <nl> socket_coro * sock = ( socket_coro * ) event - > socket - > object ; <nl> static int socket_onReadable ( swReactor * reactor , swEvent * event ) <nl> # endif <nl> if ( conn > = 0 ) <nl> { <nl> - object_init_ex ( & result , swoole_socket_coro_class_entry_ptr ) ; <nl> - socket_coro * client_sock = ( socket_coro * ) Z_OBJ ( result ) ; <nl> + zend_object * client ; <nl> + client = swoole_socket_coro_create ( swoole_socket_coro_class_entry_ptr ) ; <nl> + socket_coro * client_sock = ( socket_coro * ) sw_socket_coro_fetch_object ( client ) ; <nl> + ZVAL_OBJ ( & result , & client_sock - > std ) ; <nl> client_sock - > fd = conn ; <nl> client_sock - > domain = sock - > domain ; <nl> + client_sock - > object = result ; <nl> + / / zend_object_std_dtor ( & client_sock - > std ) ; <nl> } <nl> else <nl> { <nl> static int socket_onWritable ( swReactor * reactor , swEvent * event ) <nl> <nl> static void socket_onResolveCompleted ( swAio_event * event ) <nl> { <nl> - socket_coro * sock = event - > object ; <nl> + socket_coro * sock = ( socket_coro * ) event - > object ; <nl> php_context * context = & sock - > context ; <nl> <nl> zval * retval = NULL ; <nl> static void socket_onTimeout ( swTimer * timer , swTimer_node * tnode ) <nl> } <nl> } <nl> <nl> - static void swoole_socket_coro_free_storage ( zend_object * object ) <nl> - { <nl> - socket_coro * sock = ( socket_coro * ) object ; <nl> - if ( sock - > fd > = 0 ) <nl> - { <nl> - SwooleG . main_reactor - > close ( SwooleG . main_reactor , sock - > fd ) ; <nl> - } <nl> - zend_object_std_dtor ( & sock - > std ) ; <nl> - } <nl> - <nl> static int swoole_socket_connect ( socket_coro * sock , char * host , size_t l_host , int port ) <nl> { <nl> switch ( sock - > domain ) <nl> static int swoole_socket_connect ( socket_coro * sock , char * host , size_t l_host , i <nl> return - 3 ; <nl> } <nl> <nl> - void swoole_socket_coro_init ( int module_number TSRMLS_DC ) <nl> - { <nl> - INIT_CLASS_ENTRY ( swoole_socket_coro_ce , " Swoole \ \ Coroutine \ \ Socket " , swoole_socket_coro_methods ) ; <nl> - <nl> - swoole_socket_coro_class_entry_ptr = zend_register_internal_class ( & swoole_socket_coro_ce ) ; <nl> - swoole_socket_coro_class_entry_ptr - > ce_flags | = ZEND_ACC_FINAL ; <nl> - swoole_socket_coro_class_entry_ptr - > create_object = swoole_socket_coro_create ; <nl> - swoole_socket_coro_class_entry_ptr - > serialize = zend_class_serialize_deny ; <nl> - swoole_socket_coro_class_entry_ptr - > unserialize = zend_class_unserialize_deny ; <nl> - <nl> - memcpy ( & swoole_socket_coro_handlers , zend_get_std_object_handlers ( ) , sizeof ( zend_object_handlers ) ) ; <nl> - swoole_socket_coro_handlers . free_obj = swoole_socket_coro_free_storage ; <nl> - swoole_socket_coro_handlers . clone_obj = NULL ; <nl> - <nl> - zend_declare_property_long ( swoole_socket_coro_class_entry_ptr , SW_STRL ( " errCode " ) - 1 , 0 , ZEND_ACC_PUBLIC TSRMLS_CC ) ; <nl> - <nl> - INIT_CLASS_ENTRY ( swoole_socket_coro_exception_ce , " Swoole \ \ Coroutine \ \ Socket \ \ Exception " , NULL ) ; <nl> - swoole_socket_coro_exception_class_entry_ptr = sw_zend_register_internal_class_ex ( & swoole_socket_coro_exception_ce , <nl> - zend_exception_get_default ( TSRMLS_C ) , NULL TSRMLS_CC ) ; <nl> - <nl> - if ( SWOOLE_G ( use_shortname ) ) <nl> - { <nl> - sw_zend_register_class_alias ( " Co \ \ Socket " , swoole_socket_coro_class_entry_ptr ) ; <nl> - sw_zend_register_class_alias ( " Co \ \ Socket \ \ Exception " , swoole_socket_coro_exception_class_entry_ptr ) ; <nl> - } <nl> - } <nl> - <nl> static PHP_METHOD ( swoole_socket_coro , __construct ) <nl> { <nl> zend_long domain , type , protocol ; <nl> static PHP_METHOD ( swoole_socket_coro , __construct ) <nl> Z_PARAM_LONG ( protocol ) ; <nl> ZEND_PARSE_PARAMETERS_END ( ) ; <nl> <nl> - socket_coro * sock = ( socket_coro * ) Z_OBJ_P ( getThis ( ) ) ; <nl> + socket_coro * sock = ( socket_coro * ) Z_SOCKET_CORO_OBJ_P ( getThis ( ) ) ; <nl> sock - > fd = socket ( domain , type , protocol ) ; <nl> sock - > domain = domain ; <nl> sock - > type = type ; <nl> static PHP_METHOD ( swoole_socket_coro , bind ) <nl> <nl> int retval ; <nl> <nl> - socket_coro * sock = ( socket_coro * ) Z_OBJ_P ( getThis ( ) ) ; <nl> + socket_coro * sock = ( socket_coro * ) Z_SOCKET_CORO_OBJ_P ( getThis ( ) ) ; <nl> switch ( sock - > domain ) <nl> { <nl> case AF_UNIX : <nl> static PHP_METHOD ( swoole_socket_coro , listen ) <nl> Z_PARAM_LONG ( backlog ) ; <nl> ZEND_PARSE_PARAMETERS_END ( ) ; <nl> <nl> - socket_coro * sock = ( socket_coro * ) Z_OBJ_P ( getThis ( ) ) ; <nl> + socket_coro * sock = ( socket_coro * ) Z_SOCKET_CORO_OBJ_P ( getThis ( ) ) ; <nl> if ( listen ( sock - > fd , backlog ) ! = 0 ) <nl> { <nl> zend_update_property_long ( swoole_socket_coro_class_entry_ptr , getThis ( ) , ZEND_STRL ( " errCode " ) , errno TSRMLS_CC ) ; <nl> static PHP_METHOD ( swoole_socket_coro , accept ) <nl> Z_PARAM_DOUBLE ( timeout ) ; <nl> ZEND_PARSE_PARAMETERS_END ( ) ; <nl> <nl> - socket_coro * sock = ( socket_coro * ) Z_OBJ_P ( getThis ( ) ) ; <nl> + socket_coro * sock = ( socket_coro * ) Z_SOCKET_CORO_OBJ_P ( getThis ( ) ) ; <nl> if ( unlikely ( sock - > cid & & sock - > cid ! = sw_get_current_cid ( ) ) ) <nl> { <nl> swoole_php_fatal_error ( E_WARNING , " socket has already been bound to another coroutine . " ) ; <nl> static PHP_METHOD ( swoole_socket_coro , recv ) <nl> Z_PARAM_DOUBLE ( timeout ) ; <nl> ZEND_PARSE_PARAMETERS_END ( ) ; <nl> <nl> - socket_coro * sock = ( socket_coro * ) Z_OBJ_P ( getThis ( ) ) ; <nl> + socket_coro * sock = ( socket_coro * ) Z_SOCKET_CORO_OBJ_P ( getThis ( ) ) ; <nl> if ( unlikely ( sock - > cid & & sock - > cid ! = sw_get_current_cid ( ) ) ) <nl> { <nl> swoole_php_fatal_error ( E_WARNING , " socket has already been bound to another coroutine . " ) ; <nl> static PHP_METHOD ( swoole_socket_coro , recvfrom ) <nl> Z_PARAM_DOUBLE ( timeout ) ; <nl> ZEND_PARSE_PARAMETERS_END ( ) ; <nl> <nl> - socket_coro * sock = ( socket_coro * ) Z_OBJ_P ( getThis ( ) ) ; <nl> + socket_coro * sock = ( socket_coro * ) Z_SOCKET_CORO_OBJ_P ( getThis ( ) ) ; <nl> if ( unlikely ( sock - > cid & & sock - > cid ! = sw_get_current_cid ( ) ) ) <nl> { <nl> swoole_php_fatal_error ( E_WARNING , " socket has already been bound to another coroutine . " ) ; <nl> static PHP_METHOD ( swoole_socket_coro , send ) <nl> RETURN_FALSE ; <nl> } <nl> <nl> - socket_coro * sock = ( socket_coro * ) Z_OBJ_P ( getThis ( ) ) ; <nl> + socket_coro * sock = ( socket_coro * ) Z_SOCKET_CORO_OBJ_P ( getThis ( ) ) ; <nl> if ( unlikely ( sock - > cid & & sock - > cid ! = sw_get_current_cid ( ) ) ) <nl> { <nl> swoole_php_fatal_error ( E_WARNING , " socket has already been bound to another coroutine . " ) ; <nl> static PHP_METHOD ( swoole_socket_coro , sendto ) <nl> Z_PARAM_STRING ( data , l_data ) ; <nl> ZEND_PARSE_PARAMETERS_END ( ) ; <nl> <nl> - socket_coro * sock = ( socket_coro * ) Z_OBJ_P ( getThis ( ) ) ; <nl> + socket_coro * sock = ( socket_coro * ) Z_SOCKET_CORO_OBJ_P ( getThis ( ) ) ; <nl> <nl> int ret ; <nl> if ( sock - > domain = = AF_INET ) <nl> static PHP_METHOD ( swoole_socket_coro , close ) <nl> { <nl> coro_check ( TSRMLS_C ) ; <nl> <nl> - socket_coro * sock = ( socket_coro * ) Z_OBJ_P ( getThis ( ) ) ; <nl> + socket_coro * sock = ( socket_coro * ) Z_SOCKET_CORO_OBJ_P ( getThis ( ) ) ; <nl> if ( sock - > fd < 0 ) <nl> { <nl> RETURN_FALSE ; <nl> static PHP_METHOD ( swoole_socket_coro , close ) <nl> <nl> static PHP_METHOD ( swoole_socket_coro , getsockname ) <nl> { <nl> - socket_coro * sock = ( socket_coro * ) Z_OBJ_P ( getThis ( ) ) ; <nl> + socket_coro * sock = ( socket_coro * ) Z_SOCKET_CORO_OBJ_P ( getThis ( ) ) ; <nl> array_init ( return_value ) ; <nl> <nl> swSocketAddress info ; <nl> static PHP_METHOD ( swoole_socket_coro , getsockname ) <nl> <nl> static PHP_METHOD ( swoole_socket_coro , getpeername ) <nl> { <nl> - socket_coro * sock = ( socket_coro * ) Z_OBJ_P ( getThis ( ) ) ; <nl> + socket_coro * sock = ( socket_coro * ) Z_SOCKET_CORO_OBJ_P ( getThis ( ) ) ; <nl> array_init ( return_value ) ; <nl> <nl> swSocketAddress info ; <nl> static PHP_METHOD ( swoole_socket_coro , connect ) <nl> { <nl> coro_check ( TSRMLS_C ) ; <nl> <nl> - socket_coro * sock = ( socket_coro * ) Z_OBJ_P ( getThis ( ) ) ; <nl> + socket_coro * sock = ( socket_coro * ) Z_SOCKET_CORO_OBJ_P ( getThis ( ) ) ; <nl> char * host ; <nl> size_t l_host ; <nl> zend_long port = 0 ; <nl> static PHP_METHOD ( swoole_socket_coro , connect ) <nl> # ifdef SWOOLE_SOCKETS_SUPPORT <nl> static PHP_METHOD ( swoole_socket_coro , getSocket ) <nl> { <nl> - socket_coro * sock = ( socket_coro * ) Z_OBJ_P ( getThis ( ) ) ; <nl> + socket_coro * sock = ( socket_coro * ) Z_SOCKET_CORO_OBJ_P ( getThis ( ) ) ; <nl> if ( sock - > fd < 0 ) <nl> { <nl> RETURN_FALSE ; <nl>
|
Fix the socket coro core dump .
|
swoole/swoole-src
|
cc3f428a01cc4e14c467185d03d455d66c9a61f6
|
2018-06-11T08:57:43Z
|
mmm a / include / mlir / SPIRV / SPIRVDialect . h <nl> ppp b / include / mlir / SPIRV / SPIRVDialect . h <nl> <nl> # include " mlir / IR / Dialect . h " <nl> <nl> namespace mlir { <nl> - class MLIRContext ; <nl> - <nl> namespace spirv { <nl> <nl> class SPIRVDialect : public Dialect { <nl> public : <nl> explicit SPIRVDialect ( MLIRContext * context ) ; <nl> <nl> + static StringRef getDialectNamespace ( ) { return " spv " ; } <nl> + <nl> / / / Parses a type registered to this dialect . <nl> Type parseType ( llvm : : StringRef spec , Location loc ) const override ; <nl> <nl> / / / Prints a type registered to this dialect . <nl> void printType ( Type type , llvm : : raw_ostream & os ) const override ; <nl> + <nl> + private : <nl> + / / / Parses ` spec ` as a type and verifies it can be used in SPIR - V types . <nl> + Type parseAndVerifyType ( StringRef spec , Location loc ) const ; <nl> + <nl> + / / / Parses ` spec ` as a SPIR - V array type . <nl> + Type parseArrayType ( StringRef spec , Location loc ) const ; <nl> + <nl> + / / / Parses ` spec ` as a SPIR - V run - time array type . <nl> + Type parseRuntimeArrayType ( StringRef spec , Location loc ) const ; <nl> } ; <nl> <nl> } / / end namespace spirv <nl> mmm a / lib / SPIRV / CMakeLists . txt <nl> ppp b / lib / SPIRV / CMakeLists . txt <nl> add_dependencies ( MLIRSPIRV <nl> MLIRSPIRVEnumsIncGen <nl> MLIRStdOpsToSPIRVConversionIncGen ) <nl> <nl> - target_link_libraries ( MLIRSPIRV MLIRIR MLIRSupport ) <nl> + target_link_libraries ( MLIRSPIRV <nl> + MLIRIR <nl> + MLIRParser <nl> + MLIRSupport ) <nl> mmm a / lib / SPIRV / SPIRVDialect . cpp <nl> ppp b / lib / SPIRV / SPIRVDialect . cpp <nl> <nl> <nl> # include " mlir / SPIRV / SPIRVDialect . h " <nl> <nl> - # include " mlir / IR / Builders . h " <nl> # include " mlir / IR / MLIRContext . h " <nl> # include " mlir / IR / StandardTypes . h " <nl> + # include " mlir / Parser . h " <nl> # include " mlir / SPIRV / SPIRVOps . h " <nl> # include " mlir / SPIRV / SPIRVTypes . h " <nl> # include " llvm / ADT / StringExtras . h " <nl> using namespace mlir : : spirv ; <nl> / / SPIR - V Dialect <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> <nl> - SPIRVDialect : : SPIRVDialect ( MLIRContext * context ) : Dialect ( " spv " , context ) { <nl> + SPIRVDialect : : SPIRVDialect ( MLIRContext * context ) <nl> + : Dialect ( getDialectNamespace ( ) , context ) { <nl> addTypes < ArrayType , RuntimeArrayType > ( ) ; <nl> <nl> addOperations < <nl> SPIRVDialect : : SPIRVDialect ( MLIRContext * context ) : Dialect ( " spv " , context ) { <nl> / / Type Parsing <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> <nl> - / / TODO ( b / 133530217 ) : The following implements some type parsing logic . It is <nl> - / / intended to be short - lived and used just before the main parser logic gets <nl> - / / exposed to dialects . So there is little type checking inside . <nl> - <nl> - static Type parseScalarType ( StringRef spec , Builder builder ) { <nl> - return llvm : : StringSwitch < Type > ( spec ) <nl> - . Case ( " f32 " , builder . getF32Type ( ) ) <nl> - . Case ( " i32 " , builder . getIntegerType ( 32 ) ) <nl> - . Case ( " f16 " , builder . getF16Type ( ) ) <nl> - . Case ( " i16 " , builder . getIntegerType ( 16 ) ) <nl> - . Default ( Type ( ) ) ; <nl> - } <nl> - <nl> / / Parses " < number > x " from the beginning of ` spec ` . <nl> static bool parseNumberX ( StringRef & spec , int64_t & number ) { <nl> spec = spec . ltrim ( ) ; <nl> static bool parseNumberX ( StringRef & spec , int64_t & number ) { <nl> return true ; <nl> } <nl> <nl> - static Type parseVectorType ( StringRef spec , Builder builder ) { <nl> - if ( ! spec . consume_front ( " vector < " ) | | ! spec . consume_back ( " > " ) ) <nl> + Type SPIRVDialect : : parseAndVerifyType ( StringRef spec , Location loc ) const { <nl> + auto * context = getContext ( ) ; <nl> + auto type = mlir : : parseType ( spec , context ) ; <nl> + if ( ! type ) { <nl> + context - > emitError ( loc , " cannot parse type : " ) < < spec ; <nl> return Type ( ) ; <nl> + } <nl> <nl> - int64_t count = 0 ; <nl> - if ( ! parseNumberX ( spec , count ) ) <nl> - return Type ( ) ; <nl> + / / Allow SPIR - V dialect types <nl> + if ( & type . getDialect ( ) = = this ) <nl> + return type ; <nl> <nl> - spec = spec . trim ( ) ; <nl> - auto scalarType = parseScalarType ( spec , builder ) ; <nl> - if ( ! scalarType ) <nl> + / / Check other allowed types <nl> + if ( auto t = type . dyn_cast < FloatType > ( ) ) { <nl> + if ( type . isBF16 ( ) ) { <nl> + context - > emitError ( loc , " cannot use ' bf16 ' to compose SPIR - V types " ) ; <nl> + return Type ( ) ; <nl> + } <nl> + } else if ( auto t = type . dyn_cast < IntegerType > ( ) ) { <nl> + if ( ! llvm : : is_contained ( llvm : : ArrayRef < unsigned > ( { 8 , 16 , 32 , 64 } ) , <nl> + t . getWidth ( ) ) ) { <nl> + context - > emitError ( loc , <nl> + " only 8 / 16 / 32 / 64 - bit integer type allowed but found " ) <nl> + < < type ; <nl> + return Type ( ) ; <nl> + } <nl> + } else if ( auto t = type . dyn_cast < VectorType > ( ) ) { <nl> + if ( t . getRank ( ) ! = 1 ) { <nl> + context - > emitError ( loc , " only 1 - D vector allowed but found " ) < < t ; <nl> + return Type ( ) ; <nl> + } <nl> + } else { <nl> + context - > emitError ( loc , " cannot use " ) <nl> + < < type < < " to compose SPIR - V types " ; <nl> return Type ( ) ; <nl> + } <nl> <nl> - return VectorType : : get ( { count } , scalarType ) ; <nl> + return type ; <nl> } <nl> <nl> - static Type parseArrayType ( StringRef spec , Builder builder ) { <nl> - if ( ! spec . consume_front ( " array < " ) | | ! spec . consume_back ( " > " ) ) <nl> + Type SPIRVDialect : : parseArrayType ( StringRef spec , Location loc ) const { <nl> + auto * context = getContext ( ) ; <nl> + if ( ! spec . consume_front ( " array < " ) | | ! spec . consume_back ( " > " ) ) { <nl> + context - > emitError ( loc , " spv . array delimiter < . . . > mismatch " ) ; <nl> return Type ( ) ; <nl> + } <nl> <nl> - Type elementType ; <nl> int64_t count = 0 ; <nl> - <nl> spec = spec . trim ( ) ; <nl> - if ( ! parseNumberX ( spec , count ) ) <nl> + if ( ! parseNumberX ( spec , count ) ) { <nl> + context - > emitError ( <nl> + loc , " expected array element count followed by ' x ' but found ' " ) <nl> + < < spec < < " ' " ; <nl> return Type ( ) ; <nl> - <nl> - spec = spec . ltrim ( ) ; <nl> - if ( spec . startswith ( " vector " ) ) { <nl> - elementType = parseVectorType ( spec , builder ) ; <nl> - } else { <nl> - elementType = parseScalarType ( spec , builder ) ; <nl> } <nl> + <nl> + Type elementType = parseAndVerifyType ( spec , loc ) ; <nl> if ( ! elementType ) <nl> return Type ( ) ; <nl> <nl> return ArrayType : : get ( elementType , count ) ; <nl> } <nl> <nl> - static Type parseRuntimeArrayType ( StringRef spec , Builder builder ) { <nl> - if ( ! spec . consume_front ( " rtarray < " ) | | ! spec . consume_back ( " > " ) ) <nl> + Type SPIRVDialect : : parseRuntimeArrayType ( StringRef spec , Location loc ) const { <nl> + auto * context = getContext ( ) ; <nl> + if ( ! spec . consume_front ( " rtarray < " ) | | ! spec . consume_back ( " > " ) ) { <nl> + context - > emitError ( loc , " spv . rtarray delimiter < . . . > mismatch " ) ; <nl> return Type ( ) ; <nl> - <nl> - Type elementType ; <nl> - spec = spec . trim ( ) ; <nl> - if ( spec . startswith ( " vector " ) ) { <nl> - elementType = parseVectorType ( spec , builder ) ; <nl> - } else { <nl> - elementType = parseScalarType ( spec , builder ) ; <nl> } <nl> + <nl> + Type elementType = parseAndVerifyType ( spec , loc ) ; <nl> if ( ! elementType ) <nl> return Type ( ) ; <nl> <nl> static Type parseRuntimeArrayType ( StringRef spec , Builder builder ) { <nl> } <nl> <nl> Type SPIRVDialect : : parseType ( StringRef spec , Location loc ) const { <nl> - Builder builder ( getContext ( ) ) ; <nl> <nl> - if ( auto type = parseArrayType ( spec , builder ) ) <nl> - return type ; <nl> - if ( auto type = parseRuntimeArrayType ( spec , builder ) ) <nl> - return type ; <nl> + if ( spec . startswith ( " array " ) ) { <nl> + return parseArrayType ( spec , loc ) ; <nl> + } <nl> + if ( spec . startswith ( " rtarray " ) ) { <nl> + return parseRuntimeArrayType ( spec , loc ) ; <nl> + } <nl> <nl> getContext ( ) - > emitError ( loc , " unknown SPIR - V type : " ) < < spec ; <nl> return Type ( ) ; <nl> mmm a / test / SPIRV / types . mlir <nl> ppp b / test / SPIRV / types . mlir <nl> func @ vector_array_type ( ! spv . array < 32 x vector < 4xf32 > > ) - > ( ) <nl> <nl> / / mmm - - <nl> <nl> - / / expected - error @ + 1 { { unknown SPIR - V type } } <nl> + / / expected - error @ + 1 { { spv . array delimiter < . . . > mismatch } } <nl> + func @ missing_left_angle_bracket ( ! spv . array 4xf32 > ) - > ( ) <nl> + <nl> + / / mmm - - <nl> + <nl> + / / expected - error @ + 1 { { expected array element count followed by ' x ' but found ' f32 ' } } <nl> func @ missing_count ( ! spv . array < f32 > ) - > ( ) <nl> <nl> / / mmm - - <nl> <nl> - / / expected - error @ + 1 { { unknown SPIR - V type } } <nl> + / / expected - error @ + 1 { { expected array element count followed by ' x ' but found ' f32 ' } } <nl> func @ missing_x ( ! spv . array < 4 f32 > ) - > ( ) <nl> <nl> / / mmm - - <nl> <nl> - / / expected - error @ + 1 { { unknown SPIR - V type } } <nl> + / / expected - error @ + 1 { { cannot parse type : blabla } } <nl> + func @ cannot_parse_type ( ! spv . array < 4xblabla > ) - > ( ) <nl> + <nl> + / / mmm - - <nl> + <nl> + / / expected - error @ + 1 { { cannot parse type : 3xf32 } } <nl> func @ more_than_one_dim ( ! spv . array < 4x3xf32 > ) - > ( ) <nl> <nl> / / mmm - - <nl> <nl> + / / expected - error @ + 1 { { only 1 - D vector allowed but found ' vector < 4x3xf32 > ' } } <nl> + func @ non_1D_vector ( ! spv . array < 4xvector < 4x3xf32 > > ) - > ( ) <nl> + <nl> + / / mmm - - <nl> + <nl> + / / expected - error @ + 1 { { cannot use ' tensor < 4xf32 > ' to compose SPIR - V types } } <nl> + func @ tensor_type ( ! spv . array < 4xtensor < 4xf32 > > ) - > ( ) <nl> + <nl> + / / mmm - - <nl> + <nl> + / / expected - error @ + 1 { { cannot use ' bf16 ' to compose SPIR - V types } } <nl> + func @ bf16_type ( ! spv . array < 4xbf16 > ) - > ( ) <nl> + <nl> + / / mmm - - <nl> + <nl> + / / expected - error @ + 1 { { only 8 / 16 / 32 / 64 - bit integer type allowed but found ' i256 ' } } <nl> + func @ i256_type ( ! spv . array < 4xi256 > ) - > ( ) <nl> + <nl> + / / mmm - - <nl> + <nl> + / / expected - error @ + 1 { { cannot use ' index ' to compose SPIR - V types } } <nl> + func @ index_type ( ! spv . array < 4xindex > ) - > ( ) <nl> + <nl> + / / mmm - - <nl> + <nl> + / / expected - error @ + 1 { { cannot use ' ! llvm . i32 ' to compose SPIR - V types } } <nl> + func @ llvm_type ( ! spv . array < 4x ! llvm . i32 > ) - > ( ) <nl> + <nl> + / / mmm - - <nl> + <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> / / RuntimeArrayType <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> func @ vector_runtime_array_type ( ! spv . rtarray < vector < 4xf32 > > ) - > ( ) <nl> <nl> / / mmm - - <nl> <nl> - / / expected - error @ + 1 { { unknown SPIR - V type } } <nl> + / / expected - error @ + 1 { { spv . rtarray delimiter < . . . > mismatch } } <nl> + func @ missing_left_angle_bracket ( ! spv . rtarray f32 > ) - > ( ) <nl> + <nl> + / / mmm - - <nl> + <nl> + / / expected - error @ + 1 { { cannot parse type : 4xf32 } } <nl> func @ redundant_count ( ! spv . rtarray < 4xf32 > ) - > ( ) <nl>
|
[ spirv ] Use mlir : : parseType in type parsers and add more checks
|
tensorflow/tensorflow
|
20df46c5a48432eaca9c0ac4cbcdf0dd8e01b457
|
2019-06-20T06:00:14Z
|
mmm a / servers / rendering / renderer_rd / renderer_scene_render_rd . cpp <nl> ppp b / servers / rendering / renderer_rd / renderer_scene_render_rd . cpp <nl> void RendererSceneRenderRD : : _create_reflection_importance_sample ( ReflectionData <nl> <nl> void RendererSceneRenderRD : : _update_reflection_mipmaps ( ReflectionData & rd , int p_start , int p_end ) { <nl> for ( int i = p_start ; i < p_end ; i + + ) { <nl> - for ( int j = 0 ; j < rd . layers [ i ] . mipmaps . size ( ) - 1 ; j + + ) { <nl> - for ( int k = 0 ; k < 6 ; k + + ) { <nl> - RID view = rd . layers [ i ] . mipmaps [ j ] . views [ k ] ; <nl> - RID texture = rd . layers [ i ] . mipmaps [ j + 1 ] . views [ k ] ; <nl> - Size2i size = rd . layers [ i ] . mipmaps [ j + 1 ] . size ; <nl> - storage - > get_effects ( ) - > make_mipmap ( view , texture , size ) ; <nl> - } <nl> + for ( int j = 0 ; j < rd . layers [ i ] . views . size ( ) - 1 ; j + + ) { <nl> + RID view = rd . layers [ i ] . views [ j ] ; <nl> + RID texture = rd . layers [ i ] . views [ j + 1 ] ; <nl> + Size2i size = rd . layers [ i ] . mipmaps [ j + 1 ] . size ; <nl> + storage - > get_effects ( ) - > cubemap_downsample ( view , texture , size ) ; <nl> } <nl> } <nl> } <nl>
|
Use cubemap downsampler for reflection mipmaps
|
godotengine/godot
|
28d0f8a455bc3216eabbf5fd87184dd21e8187d7
|
2020-12-25T00:39:17Z
|
mmm a / xbmc / utils / test / Makefile <nl> ppp b / xbmc / utils / test / Makefile <nl> <nl> SRCS = \ <nl> TestAlarmClock . cpp \ <nl> + TestAliasShortcutUtils . cpp \ <nl> TestBase64 . cpp \ <nl> TestGlobalsHandling . cpp \ <nl> TestXBMCTinyXML . cpp <nl> new file mode 100644 <nl> index 000000000000 . . 079778a2b646 <nl> mmm / dev / null <nl> ppp b / xbmc / utils / test / TestAliasShortcutUtils . cpp <nl> <nl> + / * <nl> + * Copyright ( C ) 2005 - 2012 Team XBMC <nl> + * http : / / www . xbmc . org <nl> + * <nl> + * This Program is free software ; you can redistribute it and / or modify <nl> + * it under the terms of the GNU General Public License as published by <nl> + * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> + * any later version . <nl> + * <nl> + * This Program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU General Public License <nl> + * along with XBMC ; see the file COPYING . If not , write to <nl> + * the Free Software Foundation , 675 Mass Ave , Cambridge , MA 02139 , USA . <nl> + * http : / / www . gnu . org / copyleft / gpl . html <nl> + * <nl> + * / <nl> + <nl> + # include " utils / AliasShortcutUtils . h " <nl> + <nl> + # include " gtest / gtest . h " <nl> + <nl> + TEST ( TestAliasShortcutUtils , IsAliasShortcut ) <nl> + { <nl> + CStdString a = " " ; <nl> + # if defined ( TARGET_DARWIN_OSX ) <nl> + / * TODO : Write test case for OSX * / <nl> + # else <nl> + EXPECT_FALSE ( IsAliasShortcut ( a ) ) ; <nl> + # endif <nl> + } <nl> + <nl> + TEST ( TestAliasShortcutUtils , TranslateAliasShortcut ) <nl> + { <nl> + CStdString a = " " ; <nl> + TranslateAliasShortcut ( a ) ; <nl> + # if defined ( TARGET_DARWIN_OSX ) <nl> + / * TODO : Write test case for OSX * / <nl> + # else <nl> + EXPECT_STREQ ( " " , a . c_str ( ) ) ; <nl> + # endif <nl> + } <nl>
|
[ GSOC ] Add test case for functions in AliasShortcutUtils . h .
|
xbmc/xbmc
|
93da58fec73ad527292726a178dc9ca221991736
|
2012-09-05T19:07:38Z
|
mmm a / hphp / hack / src / typing / typing_subtype . ml <nl> ppp b / hphp / hack / src / typing / typing_subtype . ml <nl> let check_mutability <nl> ( * immutable is not compatible with mutable * ) <nl> | ( None , Some ( Param_borrowed_mutable | Param_owned_mutable ) ) <nl> ( * mutable is not compatible with immutable * ) <nl> - <nl> + <nl> | ( Some ( Param_borrowed_mutable | Param_owned_mutable ) , None ) <nl> ( * borrowed mutable is not compatible with owned mutable * ) <nl> - <nl> + <nl> | ( Some Param_borrowed_mutable , Some Param_owned_mutable ) <nl> ( * maybe - mutable is not compatible with immutable / mutable * ) <nl> - <nl> + <nl> | ( Some Param_maybe_mutable , <nl> ( None | Some ( Param_borrowed_mutable | Param_owned_mutable ) ) ) - > <nl> invalid <nl> and simplify_subtype_i <nl> simplify_subtype ~ subtype_env ty_sub ' ty_super ' env <nl> ( * A < : ? B iif A & nonnull < : B <nl> Only apply if B is a type variable or an intersection , to avoid oscillating <nl> - forever between this case and the previous one . * ) <nl> + forever between this case and the previous one . <nl> + Also make sure , in the case of a single type variable , that the type <nl> + variable has no constraints in its upper bounds , to avoid the following <nl> + loop : <nl> + subtype ( nonnull & _ & # 1 ) < : has_member ( m , # 2 ) <nl> + simplify ( nonnull & _ & # 1 ) < : has_member ( m , # 2 ) <nl> + simplifies to ( # 1 & _ ) < : ? # 3 with upper bound has_member ( m , # 2 ) <nl> + simplifies to ( # 1 & _ & nonnull ) < : # 3 <nl> + into prop_to_env ( # 1 & _ & nonnull ) < : # 3 <nl> + calls simplify on ( # 1 & _ & nonnull ) < : has_member ( m , # 2 ) <nl> + back to start * ) <nl> | ( ty_sub , LoclType ( r , Toption ty_super ' ) ) <nl> when let ( _ , ( _ , ety_super ' ) ) = Env . expand_type env ty_super ' in <nl> match ety_super ' with <nl> - | Tintersection _ <nl> - | Tvar _ - > <nl> - true <nl> + | Tintersection _ - > true <nl> + | Tvar var - > <nl> + let upper_bounds = Env . get_tyvar_upper_bounds env var in <nl> + not @ @ Internal_type_set . exists is_constraint_type upper_bounds <nl> | _ - > false - > <nl> ( * We want to intersect ty_sub with nonnull , but ty_super might be a <nl> constraint type , and we can ' t intersect with constraint types , <nl> and simplify_subtype_i <nl> match ( ak_sub , ak_super ) with <nl> ( * An empty array is a subtype of any array type * ) <nl> | ( AKempty , _ ) - > valid ( ) <nl> - | ( AKvarray ty_sub , ( AKvarray ty_super ) ) <nl> - - > <nl> + | ( AKvarray ty_sub , AKvarray ty_super ) - > <nl> simplify_subtype ~ subtype_env ~ this_ty ty_sub ty_super env <nl> - | ( AKvarray_or_darray ( tk_sub , tv_sub ) , AKvarray_or_darray ( tk_super , tv_super ) ) <nl> + | ( AKvarray_or_darray ( tk_sub , tv_sub ) , <nl> + AKvarray_or_darray ( tk_super , tv_super ) ) <nl> | ( AKdarray ( tk_sub , tv_sub ) , AKdarray ( tk_super , tv_super ) ) <nl> - | ( AKdarray ( tk_sub , tv_sub ) , AKvarray_or_darray ( tk_super , tv_super ) ) - > <nl> + | ( AKdarray ( tk_sub , tv_sub ) , AKvarray_or_darray ( tk_super , tv_super ) ) <nl> + - > <nl> env <nl> | > simplify_subtype ~ subtype_env ~ this_ty tk_sub tk_super <nl> & & & simplify_subtype ~ subtype_env ~ this_ty tv_sub tv_super <nl> and simplify_subtype_reactivity <nl> ( * ok : <nl> < < __Rx > > <nl> function f ( < < __AtMostRxAsFunc > > ( function ( ) : int ) $ f ) { return $ f ( ) } * ) <nl> - <nl> + <nl> | ( RxVar None , RxVar _ ) - > <nl> valid ( ) <nl> | ( RxVar ( Some sub ) , RxVar ( Some super ) ) <nl> new file mode 100644 <nl> index 00000000000 . . e69de29bb2d <nl> new file mode 100644 <nl> index 00000000000 . . b48c308886e <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / integration / data / global_inference / dynamic_nonnull_has_member / dynamic_nonnull_has_member . php <nl> <nl> + < ? hh <nl> + <nl> + function foo ( ) : void { <nl> + $ v = Vector { } ; <nl> + $ x = $ v [ 0 ] ; <nl> + if ( $ x is dynamic & & $ x is nonnull ) { <nl> + $ x - > x ; <nl> + } <nl> + } <nl> mmm a / hphp / hack / test / integration / test_global_inference . py <nl> ppp b / hphp / hack / test / integration / test_global_inference . py <nl> def test ( self ) - > None : <nl> self . test_driver . check_cmd ( [ " No errors ! " ] ) <nl> <nl> <nl> + class TestDynamicNonnullHasMember ( TestCase [ GlobalInferenceDriver ] ) : <nl> + @ classmethod <nl> + def get_template_repo ( cls ) - > str : <nl> + return " hphp / hack / test / integration / data / global_inference / dynamic_nonnull_has_member " <nl> + <nl> + @ classmethod <nl> + def get_test_driver ( cls ) - > GlobalInferenceDriver : <nl> + return GlobalInferenceDriver ( ) <nl> + <nl> + def test ( self ) - > None : <nl> + self . test_driver . start_hh_server ( <nl> + args = [ " - - config " , " infer_missing = global " , " - - config " , " timeout = 20 " ] <nl> + ) <nl> + self . test_driver . check_cmd ( [ " No errors ! " ] ) <nl> + <nl> + <nl> class TestThreeFilesGlobalInference ( TestCase [ GlobalInferenceDriver ] ) : <nl> " " " <nl> Test if we got no datarace . This test might be non deterministic : <nl> new file mode 100644 <nl> index 00000000000 . . 52b36d81ac6 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / unit / typing_subtype / typing_subtype_test . ml <nl> <nl> + ( * <nl> + * Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the " hack " directory of this source tree . <nl> + * <nl> + * ) <nl> + <nl> + open Core_kernel <nl> + open Typing_defs <nl> + module Env = Typing_env <nl> + module ITySet = Internal_type_set <nl> + <nl> + let do_test_sub_type_expect_ok ? on_error env sub_ty super_ty = <nl> + let on_error = <nl> + Option . value on_error ~ default : ( fun _ _ - > failwith " on error was called " ) <nl> + in <nl> + Typing_subtype . sub_type_i env sub_ty super_ty on_error <nl> + <nl> + let r_none = Typing_reason . Rnone <nl> + <nl> + let no_r t = ( r_none , t ) <nl> + <nl> + let default_env ( ) = <nl> + let env = <nl> + Typing_env . empty GlobalOptions . default Relative_path . default None <nl> + in <nl> + let env = Env . set_log_level env " sub " 2 in <nl> + let env = Env . set_log_level env " env " 2 in <nl> + let env = Env . set_log_level env " inter " 2 in <nl> + env <nl> + <nl> + ( * test ( nonnull & _ & # 1 ) < : has_member ( m , # 2 ) <nl> + <nl> + previously this looped as follows : <nl> + loop : <nl> + subtype ( nonnull & _ & # 1 ) < : has_member ( m , # 2 ) <nl> + simplify ( nonnull & _ & # 1 ) < : has_member ( m , # 2 ) <nl> + simplifies to ( # 1 & _ ) < : ? # 3 with upper bound has_member ( m , # 2 ) <nl> + simplifies to ( # 1 & _ & nonnull ) < : # 3 <nl> + into prop_to_env ( # 1 & _ & nonnull ) < : # 3 <nl> + calls simplify on ( # 1 & _ & nonnull ) < : has_member ( m , # 2 ) <nl> + back to start * ) <nl> + let test_intersect_with_nonnull_sub_constraint ( ) = <nl> + let env = default_env ( ) in <nl> + ( * construct sub type * ) <nl> + let ( env , ty1 ) = Env . fresh_type_reason env r_none in <nl> + let sub_ty = <nl> + no_r <nl> + Typing_defs . ( <nl> + Tintersection [ no_r Tnonnull ; no_r @ @ Tany TanySentinel . value ; ty1 ] ) <nl> + in <nl> + let sub_ty = Typing_defs . LoclType sub_ty in <nl> + ( * construct super type * ) <nl> + let m = ( Pos . none , " m " ) in <nl> + let ( env , mem_ty ) = Env . fresh_type_reason env r_none in <nl> + let super_ty = Typing_make_type . has_member r_none m mem_ty Aast . CIself in <nl> + ( * do it ! * ) <nl> + let env = do_test_sub_type_expect_ok env sub_ty super_ty in <nl> + ( * check it * ) <nl> + let ty1_id = 1 in <nl> + match ITySet . elements @ @ Env . get_tyvar_upper_bounds env ty1_id with <nl> + | [ LoclType ( _ , Toption ( _ , Tvar ty3_id ) ) ] - > <nl> + begin <nl> + match ITySet . elements @ @ Env . get_tyvar_upper_bounds env ty3_id with <nl> + | [ ConstraintType ( _ , Thas_member _ ) ] - > true <nl> + | _ - > failwith " failed to match upper bound of # 3 " <nl> + end <nl> + | _ - > failwith " failed to match upper bound of # 1 " <nl> + <nl> + let tests = <nl> + [ <nl> + ( " test_intersect_with_nonnull_sub_constraint " , <nl> + test_intersect_with_nonnull_sub_constraint ) ; <nl> + ] <nl> + <nl> + let ( ) = Unit_test . main tests <nl> mmm a / hphp / hack / test / unit / unit_test . ml <nl> ppp b / hphp / hack / test / unit / unit_test . ml <nl> let run_all ( tests : ( string * ( unit - > bool ) ) list ) = <nl> 0 <nl> else <nl> 1 ) <nl> + <nl> + let run_only tests names = <nl> + let f = <nl> + match names with <nl> + | [ ] - > const true <nl> + | _ - > ( fun ( n , _ ) - > List . mem names n ( = ) ) <nl> + in <nl> + let tests = List . filter tests ~ f in <nl> + run_all tests <nl> + <nl> + let main tests = <nl> + let names = Option . value ( List . tl @ @ Array . to_list Sys . argv ) ~ default : [ ] in <nl> + run_only tests names <nl>
|
fix endless loop when simplifying ( N & _ & ) < : HM ( m , )
|
facebook/hhvm
|
f370ec7f5c738bd6282c8e0a7023f4a747214569
|
2019-11-13T10:05:12Z
|
mmm a / language / English / strings . po <nl> ppp b / language / English / strings . po <nl> msgid " Alarm ! " <nl> msgstr " " <nl> <nl> msgctxt " # 13212 " <nl> - msgid " Canceled with % im % is left " <nl> + msgid " Cancelled with % im % is left " <nl> msgstr " " <nl> <nl> # . minutes ( left from countdown ) <nl> msgstr " " <nl> # empty string with id 16023 <nl> <nl> msgctxt " # 16024 " <nl> - msgid " Canceling . . . " <nl> + msgid " Cancelling . . . " <nl> msgstr " " <nl> <nl> msgctxt " # 16025 " <nl>
|
use british english spelling
|
xbmc/xbmc
|
d4498552823502dab6a379847fd4810d0cccbd7c
|
2012-11-18T01:22:26Z
|
mmm a / jstests / sharding / gridfs . js <nl> ppp b / jstests / sharding / gridfs . js <nl> var mongos = test . s0 <nl> <nl> <nl> var filename = " mongod " / / A large file we are guaranteed to have <nl> + if ( _isWindows ( ) ) <nl> + filename + = " . exe " <nl> <nl> function testGridFS ( name ) { <nl> var d = mongos . getDB ( name ) <nl>
|
fix gridfs . js test for windows SERVER - 3746
|
mongodb/mongo
|
08727fb0e8089b869fe179be6fcc1eaa1e76e5b1
|
2012-05-25T16:05:30Z
|
mmm a / modules / ocl / src / kernels / meanShift . cl <nl> ppp b / modules / ocl / src / kernels / meanShift . cl <nl> <nl> / / <nl> / / Copyright ( C ) 2010 - 2012 , Institute Of Software Chinese Academy Of Science , all rights reserved . <nl> / / Copyright ( C ) 2010 - 2012 , Advanced Micro Devices , Inc . , all rights reserved . <nl> + / / Copyright ( C ) 2010 - 2012 , Multicoreware , Inc . , all rights reserved . <nl> / / Third party copyrights are property of their respective owners . <nl> / / <nl> / / @ Authors <nl> / / Shengen Yan , yanshengen @ gmail . com <nl> / / Xu Pang , pangxu010 @ 163 . com <nl> + / / Wenju He , wenju @ multicorewareinc . com <nl> / / <nl> / / Redistribution and use in source and binary forms , with or without modification , <nl> / / are permitted provided that the following conditions are met : <nl> <nl> / / the use of this software , even if advised of the possibility of such damage . <nl> / / <nl> / / M * / <nl> - # if defined ( DOUBLE_SUPPORT ) <nl> - # pragma OPENCL EXTENSION cl_khr_fp64 : enable <nl> - typedef double F ; <nl> - # else <nl> - typedef float F ; <nl> - # endif <nl> <nl> short2 do_mean_shift ( int x0 , int y0 , __global uchar4 * out , int out_step , <nl> __global uchar4 * in , int in_step , int dst_off , int src_off , <nl> short2 do_mean_shift ( int x0 , int y0 , __global uchar4 * out , int out_step , <nl> if ( count = = 0 ) <nl> break ; <nl> <nl> - F icount = 1 . 0 / count ; <nl> - int x1 = convert_int_rtz ( sx * icount ) ; <nl> - int y1 = convert_int_rtz ( sy * icount ) ; <nl> - s . x = convert_int_rtz ( s . x * icount ) ; <nl> - s . y = convert_int_rtz ( s . y * icount ) ; <nl> - s . z = convert_int_rtz ( s . z * icount ) ; <nl> + int x1 = sx / count ; <nl> + int y1 = sy / count ; <nl> + s . x = s . x / count ; <nl> + s . y = s . y / count ; <nl> + s . z = s . z / count ; <nl> <nl> int4 tmp = s - convert_int4 ( c ) ; <nl> int norm2 = tmp . x * tmp . x + tmp . y * tmp . y + <nl> mmm a / modules / ocl / test / test_imgproc . cpp <nl> ppp b / modules / ocl / test / test_imgproc . cpp <nl> COOR do_meanShift ( int x0 , int y0 , uchar * sptr , uchar * dptr , int sstep , cv : : Size <nl> if ( count = = 0 ) <nl> break ; <nl> <nl> - double icount = 1 . 0 / count ; <nl> - int x1 = cvFloor ( sx * icount ) ; <nl> - int y1 = cvFloor ( sy * icount ) ; <nl> - s0 = cvFloor ( s0 * icount ) ; <nl> - s1 = cvFloor ( s1 * icount ) ; <nl> - s2 = cvFloor ( s2 * icount ) ; <nl> + int x1 = sx / count ; <nl> + int y1 = sy / count ; <nl> + s0 = s0 / count ; <nl> + s1 = s1 / count ; <nl> + s2 = s2 / count ; <nl> <nl> bool stopFlag = ( x0 = = x1 & & y0 = = y1 ) | | ( abs ( x1 - x0 ) + abs ( y1 - y0 ) + <nl> tab [ s0 - c0 + 255 ] + tab [ s1 - c1 + 255 ] + tab [ s2 - c2 + 255 ] < = eps ) ; <nl> TEST_P ( meanShiftFiltering , Mat ) <nl> gdst . download ( cpu_gdst ) ; <nl> <nl> char sss [ 1024 ] ; <nl> - char warning [ 300 ] = " Warning : If the selected device doesn ' t support double , a deviation will exist . \ nIf the accuracy is acceptable , please ignore it . \ n " ; <nl> sprintf ( sss , " roicols = % d , roirows = % d , srcx = % d , srcy = % d , dstx = % d , dsty = % d \ n " , roicols , roirows , srcx , srcy , dstx , dsty ) ; <nl> - strcat ( sss , warning ) ; <nl> EXPECT_MAT_NEAR ( dst , cpu_gdst , 0 . 0 , sss ) ; <nl> <nl> } <nl> TEST_P ( meanShiftProc , Mat ) <nl> gdstCoor . download ( cpu_gdstCoor ) ; <nl> <nl> char sss [ 1024 ] ; <nl> - char warning [ 300 ] = " Warning : If the selected device doesn ' t support double , a deviation will exist . \ nIf the accuracy is acceptable , please ignore it . \ n " ; <nl> sprintf ( sss , " roicols = % d , roirows = % d , srcx = % d , srcy = % d , dstx = % d , dsty = % d \ n " , roicols , roirows , srcx , srcy , dstx , dsty ) ; <nl> - strcat ( sss , warning ) ; <nl> EXPECT_MAT_NEAR ( dst , cpu_gdst , 0 . 0 , sss ) ; <nl> EXPECT_MAT_NEAR ( dstCoor , cpu_gdstCoor , 0 . 0 , sss ) ; <nl> } <nl>
|
fix meanShift mismatch on none - double GPUs
|
opencv/opencv
|
f4ef0c15985a26a559b3eaf2286a2da13cf999fd
|
2013-02-22T02:13:20Z
|
mmm a / tools / run_tests / sanity / check_submodules . sh <nl> ppp b / tools / run_tests / sanity / check_submodules . sh <nl> cat < < EOF | awk ' { print $ 1 } ' | sort > " $ want_submodules " <nl> a2e6adecc294dc4cd98cc285a9134ce58e0f2ad0 third_party / abseil - cpp ( heads / master ) <nl> 090faecb454fbd6e6e17a75ef8146acb037118d4 third_party / benchmark ( v1 . 5 . 0 ) <nl> 73594cde8c9a52a102c4341c244c833aa61b9c06 third_party / bloaty ( remotes / origin / wide - 14 - g73594cd ) <nl> - 7f02881e96e51f1873afcf384d02f782b48967ca third_party / boringssl ( remotes / origin / HEAD ) <nl> d471142fdb8dd60df4d04ffc6d232c2f1da0cd80 third_party / boringssl - with - bazel ( remotes / origin / master - with - bazel ) <nl> e982924acee7f7313b4baa4ee5ec000c5e373c30 third_party / cares / cares ( cares - 1_15_0 ) <nl> c83ed7ea9eb5fb3b93d1ad52b59750f1958b8bde third_party / envoy - api ( heads / master ) <nl>
|
update check_submodules . sh
|
grpc/grpc
|
c31501f7e4c4c891713706a023217fc68b812bdb
|
2020-01-16T08:44:37Z
|
mmm a / lib / SILGen / SwitchEnumBuilder . cpp <nl> ppp b / lib / SILGen / SwitchEnumBuilder . cpp <nl> void SwitchCaseFullExpr : : unreachableExit ( ) { <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> <nl> void SwitchEnumBuilder : : emit ( ) & & { <nl> - bool isAddressOnly = optional . getType ( ) . isAddressOnly ( builder . getFunction ( ) ) & & <nl> - getSGF ( ) . silConv . useLoweredAddresses ( ) ; <nl> + bool isAddressOnly = <nl> + subjectExprOperand . getType ( ) . isAddressOnly ( builder . getFunction ( ) ) & & <nl> + getSGF ( ) . silConv . useLoweredAddresses ( ) ; <nl> using DeclBlockPair = std : : pair < EnumElementDecl * , SILBasicBlock * > ; <nl> { <nl> / / TODO : We could store the data in CaseBB form and not have to do this . <nl> void SwitchEnumBuilder : : emit ( ) & & { <nl> defaultBlockData ? defaultBlockData - > count : ProfileCounter ( ) ; <nl> ArrayRef < ProfileCounter > caseBlockCountsRef = caseBlockCounts ; <nl> if ( isAddressOnly ) { <nl> - builder . createSwitchEnumAddr ( loc , optional . getValue ( ) , defaultBlock , <nl> - caseBlocks , caseBlockCountsRef , <nl> + builder . createSwitchEnumAddr ( loc , subjectExprOperand . getValue ( ) , <nl> + defaultBlock , caseBlocks , caseBlockCountsRef , <nl> defaultBlockCount ) ; <nl> } else { <nl> - if ( optional . getType ( ) . isAddress ( ) ) { <nl> + if ( subjectExprOperand . getType ( ) . isAddress ( ) ) { <nl> / / TODO : Refactor this into a maybe load . <nl> - if ( optional . hasCleanup ( ) ) { <nl> - optional = builder . createLoadTake ( loc , optional ) ; <nl> + if ( subjectExprOperand . hasCleanup ( ) ) { <nl> + subjectExprOperand = builder . createLoadTake ( loc , subjectExprOperand ) ; <nl> } else { <nl> - optional = builder . createLoadCopy ( loc , optional ) ; <nl> + subjectExprOperand = builder . createLoadCopy ( loc , subjectExprOperand ) ; <nl> } <nl> } <nl> - builder . createSwitchEnum ( loc , optional . forward ( getSGF ( ) ) , defaultBlock , <nl> - caseBlocks , caseBlockCountsRef , <nl> + builder . createSwitchEnum ( loc , subjectExprOperand . forward ( getSGF ( ) ) , <nl> + defaultBlock , caseBlocks , caseBlockCountsRef , <nl> defaultBlockCount ) ; <nl> } <nl> } <nl> void SwitchEnumBuilder : : emit ( ) & & { <nl> SwitchCaseFullExpr presentScope ( builder . getSILGenFunction ( ) , <nl> CleanupLocation : : get ( loc ) , branchDest ) ; <nl> builder . emitBlock ( defaultBlock ) ; <nl> - ManagedValue input = optional ; <nl> + ManagedValue input = subjectExprOperand ; <nl> if ( ! isAddressOnly ) { <nl> - input = builder . createOwnedPhiArgument ( optional . getType ( ) ) ; <nl> + input = builder . createOwnedPhiArgument ( subjectExprOperand . getType ( ) ) ; <nl> } <nl> handler ( input , std : : move ( presentScope ) ) ; <nl> builder . clearInsertionPoint ( ) ; <nl> void SwitchEnumBuilder : : emit ( ) & & { <nl> ManagedValue input ; <nl> if ( decl - > hasAssociatedValues ( ) ) { <nl> / / Pull the payload out if we have one . <nl> - SILType inputType = optional . getType ( ) . getEnumElementType ( <nl> + SILType inputType = subjectExprOperand . getType ( ) . getEnumElementType ( <nl> decl , builder . getModule ( ) , builder . getFunction ( ) ) ; <nl> - input = optional ; <nl> + input = subjectExprOperand ; <nl> if ( ! isAddressOnly ) { <nl> input = builder . createOwnedPhiArgument ( inputType ) ; <nl> } <nl> void SwitchEnumBuilder : : emit ( ) & & { <nl> SwitchCaseFullExpr presentScope ( builder . getSILGenFunction ( ) , <nl> CleanupLocation : : get ( loc ) , branchDest ) ; <nl> builder . emitBlock ( defaultBlock ) ; <nl> - ManagedValue input = optional ; <nl> + ManagedValue input = subjectExprOperand ; <nl> if ( ! isAddressOnly ) { <nl> - input = builder . createOwnedPhiArgument ( optional . getType ( ) ) ; <nl> + input = builder . createOwnedPhiArgument ( subjectExprOperand . getType ( ) ) ; <nl> } <nl> handler ( input , std : : move ( presentScope ) ) ; <nl> builder . clearInsertionPoint ( ) ; <nl> mmm a / lib / SILGen / SwitchEnumBuilder . h <nl> ppp b / lib / SILGen / SwitchEnumBuilder . h <nl> class SwitchEnumBuilder { <nl> <nl> SILGenBuilder & builder ; <nl> SILLocation loc ; <nl> - ManagedValue optional ; <nl> + ManagedValue subjectExprOperand ; <nl> llvm : : Optional < DefaultCaseData > defaultBlockData ; <nl> llvm : : SmallVector < NormalCaseData , 8 > caseDataArray ; <nl> <nl> public : <nl> SwitchEnumBuilder ( SILGenBuilder & builder , SILLocation loc , <nl> - ManagedValue optional ) <nl> - : builder ( builder ) , loc ( loc ) , optional ( optional ) { } <nl> + ManagedValue subjectExprOperand ) <nl> + : builder ( builder ) , loc ( loc ) , subjectExprOperand ( subjectExprOperand ) { } <nl> <nl> void addDefaultCase ( <nl> SILBasicBlock * defaultBlock , SwitchCaseBranchDest branchDest , <nl>
|
[ NFC ] Rename SwitchEnumBuilder ' s optional field to subjectExprOperand .
|
apple/swift
|
41958596309021ea24ca997ea26ec2c9f1272ca0
|
2020-12-12T02:31:05Z
|
mmm a / hphp / test / run . php <nl> ppp b / hphp / test / run . php <nl> function extra_args ( $ options ) : string { <nl> return $ args ; <nl> } <nl> <nl> - function hhvm_cmd_impl ( ) { <nl> - $ args = func_get_args ( ) ; <nl> - $ options = array_shift ( & $ args ) ; <nl> - $ config = array_shift ( & $ args ) ; <nl> - $ extra_args = $ args ; <nl> + function hhvm_cmd_impl ( $ options , $ config , . . . $ extra_args ) { <nl> $ modes = ( array ) mode_cmd ( $ options ) ; <nl> $ cmds = array ( ) ; <nl> foreach ( $ modes as $ mode ) { <nl> private static function send ( $ type , $ msg ) { <nl> * and any one of the arguments is preceded by an integer ( see the color <nl> * constants above ) , that argument will be given the indicated color . <nl> * / <nl> - public static function sayColor ( ) { <nl> - $ args = func_get_args ( ) ; <nl> + public static function sayColor ( . . . $ args ) { <nl> while ( count ( $ args ) ) { <nl> $ color = null ; <nl> $ str = array_shift ( & $ args ) ; <nl> public static function getResults ( ) { <nl> } <nl> <nl> / * * Output is in the format expected by JsonTestRunner . * / <nl> - public static function say ( / * . . . * / ) { <nl> - $ data = array_map ( function ( $ row ) { <nl> - return self : : jsonEncode ( $ row ) . " \ n " ; <nl> - } , func_get_args ( ) ) ; <nl> + public static function say ( . . . $ args ) { <nl> + $ data = array_map ( <nl> + $ row = = > self : : jsonEncode ( $ row ) . " \ n " , <nl> + $ args <nl> + ) ; <nl> fwrite ( STDERR , implode ( " " , $ data ) ) ; <nl> } <nl> <nl>
|
Stop needlessly using func_get_args in run . php
|
facebook/hhvm
|
2ef9c3f8b06781feb417182aa6990d91d763d76a
|
2019-01-30T22:51:44Z
|
mmm a / tensorflow / go / op / wrappers . go <nl> ppp b / tensorflow / go / op / wrappers . go <nl> func IteratorGetNextAsOptional ( scope * Scope , iterator tf . Output , output_types [ ] <nl> return op . Output ( 0 ) <nl> } <nl> <nl> + / / Returns the value stored in an Optional variant or raises an error if none exists . <nl> + func OptionalGetValue ( scope * Scope , optional tf . Output , output_types [ ] tf . DataType , output_shapes [ ] tf . Shape ) ( components [ ] tf . Output ) { <nl> + if scope . Err ( ) ! = nil { <nl> + return <nl> + } <nl> + attrs : = map [ string ] interface { } { " output_types " : output_types , " output_shapes " : output_shapes } <nl> + opspec : = tf . OpSpec { <nl> + Type : " OptionalGetValue " , <nl> + Input : [ ] tf . Input { <nl> + optional , <nl> + } , <nl> + Attrs : attrs , <nl> + } <nl> + op : = scope . AddOperation ( opspec ) <nl> + if scope . Err ( ) ! = nil { <nl> + return <nl> + } <nl> + var idx int <nl> + var err error <nl> + if components , idx , err = makeOutputList ( op , idx , " components " ) ; err ! = nil { <nl> + scope . UpdateErr ( " OptionalGetValue " , err ) <nl> + return <nl> + } <nl> + return components <nl> + } <nl> + <nl> / / Outputs a tensor containing the reduction across all input tensors . <nl> / / <nl> / / Outputs a tensor containing the reduction across all input tensors passed to ops <nl> func Save ( scope * Scope , filename tf . Output , tensor_names tf . Output , data [ ] tf . Ou <nl> return scope . AddOperation ( opspec ) <nl> } <nl> <nl> + / / Returns x * y element - wise . Returns zero if y is zero , even if x if infinite or NaN . <nl> + / / <nl> + / / * NOTE * : ` Mul ` supports broadcasting . More about broadcasting <nl> + / / [ here ] ( http : / / docs . scipy . org / doc / numpy / user / basics . broadcasting . html ) <nl> + func MulNoNan ( scope * Scope , x tf . Output , y tf . Output ) ( z tf . Output ) { <nl> + if scope . Err ( ) ! = nil { <nl> + return <nl> + } <nl> + opspec : = tf . OpSpec { <nl> + Type : " MulNoNan " , <nl> + Input : [ ] tf . Input { <nl> + x , y , <nl> + } , <nl> + } <nl> + op : = scope . AddOperation ( opspec ) <nl> + return op . Output ( 0 ) <nl> + } <nl> + <nl> / / Returns x / y element - wise for integer types . <nl> / / <nl> / / Truncation designates that negative numbers will round fractional quantities <nl> func ArgMin ( scope * Scope , input tf . Output , dimension tf . Output , optional . . . ArgM <nl> return op . Output ( 0 ) <nl> } <nl> <nl> - / / Convert the quantized ' input ' tensor into a lower - precision ' output ' , using the <nl> + / / Converts the quantized ` input ` tensor into a lower - precision ` output ` . <nl> / / <nl> - / / output range specified with ' requested_output_min ' and ' requested_output_max ' . <nl> + / / Converts the quantized ` input ` tensor into a lower - precision ` output ` , using the <nl> + / / output range specified with ` requested_output_min ` and ` requested_output_max ` . <nl> / / <nl> - / / [ input_min , input_max ] are scalar floats that specify the range for the float <nl> - / / interpretation of the ' input ' data . For example , if input_min is - 1 . 0f and <nl> - / / input_max is 1 . 0f , and we are dealing with quint16 quantized data , then a 0 <nl> + / / ` [ input_min , input_max ] ` are scalar floats that specify the range for the float <nl> + / / interpretation of the ` input ` data . For example , if ` input_min ` is - 1 . 0f and <nl> + / / ` input_max ` is 1 . 0f , and we are dealing with ` quint16 ` quantized data , then a 0 <nl> / / value in the 16 - bit data should be interpreted as - 1 . 0f , and a 65535 means 1 . 0f . <nl> / / <nl> / / Arguments : <nl> func TensorListScatterIntoExistingList ( scope * Scope , input_handle tf . Output , ten <nl> return op . Output ( 0 ) <nl> } <nl> <nl> - / / Given a quantized tensor described by ( input , input_min , input_max ) , outputs a <nl> + / / Computes a range that covers the actual values present in a quantized tensor . <nl> / / <nl> - / / range that covers the actual values present in that tensor . This op is <nl> - / / typically used to produce the requested_output_min and requested_output_max for <nl> - / / Requantize . <nl> + / / Given a quantized tensor described by ` ( input , input_min , input_max ) ` , outputs a <nl> + / / range that covers the actual values present in that tensor . This op is typically <nl> + / / used to produce the ` requested_output_min ` and ` requested_output_max ` for <nl> + / / ` Requantize ` . <nl> / / <nl> / / Arguments : <nl> / / <nl> func OptionalHasValue ( scope * Scope , optional tf . Output ) ( has_value tf . Output ) { <nl> op : = scope . AddOperation ( opspec ) <nl> return op . Output ( 0 ) <nl> } <nl> - <nl> - / / Returns the value stored in an Optional variant or raises an error if none exists . <nl> - func OptionalGetValue ( scope * Scope , optional tf . Output , output_types [ ] tf . DataType , output_shapes [ ] tf . Shape ) ( components [ ] tf . Output ) { <nl> - if scope . Err ( ) ! = nil { <nl> - return <nl> - } <nl> - attrs : = map [ string ] interface { } { " output_types " : output_types , " output_shapes " : output_shapes } <nl> - opspec : = tf . OpSpec { <nl> - Type : " OptionalGetValue " , <nl> - Input : [ ] tf . Input { <nl> - optional , <nl> - } , <nl> - Attrs : attrs , <nl> - } <nl> - op : = scope . AddOperation ( opspec ) <nl> - if scope . Err ( ) ! = nil { <nl> - return <nl> - } <nl> - var idx int <nl> - var err error <nl> - if components , idx , err = makeOutputList ( op , idx , " components " ) ; err ! = nil { <nl> - scope . UpdateErr ( " OptionalGetValue " , err ) <nl> - return <nl> - } <nl> - return components <nl> - } <nl>
|
Go : Update generated wrapper functions for TensorFlow ops .
|
tensorflow/tensorflow
|
935e4b2d00ec35919d360c644cd2ad7271aac6e9
|
2019-02-21T02:50:59Z
|
mmm a / tensorflow / core / kernels / data / iterator_ops . cc <nl> ppp b / tensorflow / core / kernels / data / iterator_ops . cc <nl> mutex AnonymousIteratorHandleOp : : static_resource_lookup_mutex_ { <nl> LINKER_INITIALIZED } ; <nl> int64 AnonymousIteratorHandleOp : : current_id_ ( 0 ) ; <nl> <nl> - void MakeIteratorOp : : Compute ( OpKernelContext * ctx ) { <nl> + void MakeIteratorOp : : ComputeAsync ( OpKernelContext * ctx , DoneCallback done ) { <nl> DatasetBase * dataset ; <nl> OP_REQUIRES_OK ( ctx , GetDatasetFromVariantTensor ( ctx - > input ( 0 ) , & dataset ) ) ; <nl> - core : : RefCountPtr < IteratorResource > iterator_resource ; <nl> + IteratorResource * iterator_resource ; <nl> OP_REQUIRES_OK ( <nl> ctx , LookupResource ( ctx , HandleFromInput ( ctx , 1 ) , & iterator_resource ) ) ; <nl> - OP_REQUIRES_OK ( ctx , iterator_resource - > SetIteratorFromDataset ( ctx , dataset ) ) ; <nl> + background_worker_ . Schedule ( std : : bind ( <nl> + [ ctx , iterator_resource , dataset ] ( DoneCallback done ) { <nl> + Status s = iterator_resource - > SetIteratorFromDataset ( ctx , dataset ) ; <nl> + iterator_resource - > Unref ( ) ; <nl> + if ( ! s . ok ( ) ) { <nl> + ctx - > SetStatus ( s ) ; <nl> + } <nl> + done ( ) ; <nl> + } , <nl> + std : : move ( done ) ) ) ; <nl> } <nl> <nl> void DeleteIteratorOp : : Compute ( OpKernelContext * ctx ) { <nl> mmm a / tensorflow / core / kernels / data / iterator_ops . h <nl> ppp b / tensorflow / core / kernels / data / iterator_ops . h <nl> class AnonymousIteratorHandleOp : public OpKernel { <nl> const int op_version_ ; <nl> } ; <nl> <nl> - class MakeIteratorOp : public OpKernel { <nl> + class MakeIteratorOp : public AsyncOpKernel { <nl> public : <nl> - explicit MakeIteratorOp ( OpKernelConstruction * ctx ) : OpKernel ( ctx ) { } <nl> + explicit MakeIteratorOp ( OpKernelConstruction * ctx ) <nl> + : AsyncOpKernel ( ctx ) , <nl> + background_worker_ ( ctx - > env ( ) , " tf_data_make_iterator " ) { } <nl> <nl> - void Compute ( OpKernelContext * ctx ) override ; <nl> + void ComputeAsync ( OpKernelContext * ctx , DoneCallback done ) override ; <nl> + <nl> + private : <nl> + BackgroundWorker background_worker_ ; <nl> } ; <nl> <nl> class IteratorGetNextOp : public AsyncOpKernel { <nl> mmm a / tensorflow / python / data / kernel_tests / BUILD <nl> ppp b / tensorflow / python / data / kernel_tests / BUILD <nl> cuda_py_test ( <nl> " / / tensorflow / python : framework_test_lib " , <nl> ] , <nl> tags = [ <nl> - " no_oss " , # TODO ( b / 117920141 ) : Investigate breakage and re - enable . <nl> " no_windows_gpu " , <nl> ] , <nl> xla_enable_strict_auto_jit = True , <nl> mmm a / tensorflow / python / data / kernel_tests / multi_device_iterator_test . py <nl> ppp b / tensorflow / python / data / kernel_tests / multi_device_iterator_test . py <nl> <nl> from absl . testing import parameterized <nl> <nl> from tensorflow . core . protobuf import config_pb2 <nl> + from tensorflow . python . client import session <nl> from tensorflow . python . data . experimental . ops import optimization <nl> from tensorflow . python . data . kernel_tests import test_base <nl> from tensorflow . python . data . ops import dataset_ops <nl> def testMultipleInitializationsGraph ( self ) : <nl> init_op = multi_device_iterator . initializer <nl> <nl> config = config_pb2 . ConfigProto ( device_count = { " CPU " : 3 } ) <nl> - with self . test_session ( config = config ) as sess : <nl> + pool = config . session_inter_op_thread_pool . add ( ) <nl> + pool . num_threads = 2 <nl> + with session . Session ( config = config ) as sess : <nl> for i in range ( 1000 ) : <nl> sess . run ( init_op , feed_dict = { epoch : i } ) <nl> self . assertEqual ( [ ( i , 0 ) , ( i , 1 ) ] , self . evaluate ( [ elem_on_1 , <nl>
|
Making the MakeIteratorOp async . This allows for blocking type work to be done
|
tensorflow/tensorflow
|
595cb7ecd95572135d33f7f1e5cf3ae83135f306
|
2019-06-04T03:12:29Z
|
mmm a / build / cocos2d - win32 . vc2012 . sln <nl> ppp b / build / cocos2d - win32 . vc2012 . sln <nl> Project ( " { 8BC9CEB8 - 8B4A - 11D0 - 8D11 - 00A0C91BC942 } " ) = " libchipmunk " , " . . \ external \ <nl> EndProject <nl> Project ( " { 8BC9CEB8 - 8B4A - 11D0 - 8D11 - 00A0C91BC942 } " ) = " cpp - tests " , " . . \ tests \ cpp - tests \ proj . win32 \ cpp - tests . vcxproj " , " { 76A39BB2 - 9B84 - 4C65 - 98A5 - 654D86B86F2A } " <nl> EndProject <nl> - Project ( " { 8BC9CEB8 - 8B4A - 11D0 - 8D11 - 00A0C91BC942 } " ) = " libnetwork " , " . . \ cocos \ network \ proj . win32 \ libNetwork . vcxproj " , " { DF2638C0 - 8128 - 4847 - 867C - 6EAFE3DEE7B5 } " <nl> - EndProject <nl> Project ( " { 8BC9CEB8 - 8B4A - 11D0 - 8D11 - 00A0C91BC942 } " ) = " lua - tests " , " . . \ tests \ lua - tests \ project \ proj . win32 \ lua - tests . win32 . vcxproj " , " { 4E6A7A0E - DDD8 - 4BAA - 8B22 - C964069364ED } " <nl> EndProject <nl> Project ( " { 8BC9CEB8 - 8B4A - 11D0 - 8D11 - 00A0C91BC942 } " ) = " cpp - empty - test " , " . . \ tests \ cpp - empty - test \ proj . win32 \ cpp - empty - test . vcxproj " , " { B8BF9E81 - 35FD - 4582 - BA1C - B85FA365BABB } " <nl> Project ( " { 8BC9CEB8 - 8B4A - 11D0 - 8D11 - 00A0C91BC942 } " ) = " libbox2d " , " . . \ external \ Box <nl> EndProject <nl> Project ( " { 8BC9CEB8 - 8B4A - 11D0 - 8D11 - 00A0C91BC942 } " ) = " libcocos2d " , " . . \ cocos \ 2d \ libcocos2d . vcxproj " , " { 98A51BA8 - FC3A - 415B - AC8F - 8C7BD464E93E } " <nl> EndProject <nl> - Project ( " { 8BC9CEB8 - 8B4A - 11D0 - 8D11 - 00A0C91BC942 } " ) = " libcocosdenshion " , " . . \ cocos \ audio \ proj . win32 \ libcocosdenshion . vcxproj " , " { F8EDD7FA - 9A51 - 4E80 - BAEB - 860825D2EAC6 } " <nl> - EndProject <nl> - Project ( " { 8BC9CEB8 - 8B4A - 11D0 - 8D11 - 00A0C91BC942 } " ) = " libui " , " . . \ cocos \ ui \ proj . win32 \ libui . vcxproj " , " { 7E06E92C - 537A - 442B - 9E4A - 4761C84F8A1A } " <nl> - EndProject <nl> - Project ( " { 8BC9CEB8 - 8B4A - 11D0 - 8D11 - 00A0C91BC942 } " ) = " libextension " , " . . \ extensions \ proj . win32 \ libextension . vcxproj " , " { 21B2C324 - 891F - 48EA - AD1A - 5AE13DE12E28 } " <nl> - EndProject <nl> Project ( " { 8BC9CEB8 - 8B4A - 11D0 - 8D11 - 00A0C91BC942 } " ) = " libspine " , " . . \ cocos \ editor - support \ spine \ proj . win32 \ libspine . vcxproj " , " { B7C2A162 - DEC9 - 4418 - 972E - 240AB3CBFCAE } " <nl> EndProject <nl> Project ( " { 8BC9CEB8 - 8B4A - 11D0 - 8D11 - 00A0C91BC942 } " ) = " libcocosbuilder " , " . . \ cocos \ editor - support \ cocosbuilder \ proj . win32 \ libcocosbuilder . vcxproj " , " { 811C0DAB - 7B96 - 4BD3 - A154 - B7572B58E4AB } " <nl> EndProject <nl> - Project ( " { 8BC9CEB8 - 8B4A - 11D0 - 8D11 - 00A0C91BC942 } " ) = " libcocostudio " , " . . \ cocos \ editor - support \ cocostudio \ proj . win32 \ libcocostudio . vcxproj " , " { B57CF53F - 2E49 - 4031 - 9822 - 047CC0E6BDE2 } " <nl> - EndProject <nl> Project ( " { 8BC9CEB8 - 8B4A - 11D0 - 8D11 - 00A0C91BC942 } " ) = " libluacocos2d " , " . . \ cocos \ scripting \ lua - bindings \ proj . win32 \ libluacocos2d . vcxproj " , " { 9F2D6CE6 - C893 - 4400 - B50C - 6DB70CC2562F } " <nl> EndProject <nl> Project ( " { 8BC9CEB8 - 8B4A - 11D0 - 8D11 - 00A0C91BC942 } " ) = " libluacocosdenshion " , " . . \ cocos \ scripting \ lua - bindings \ proj . win32 \ libluacocosdenshion . vcxproj " , " { 46A7D57A - 7F25 - 4F52 - 823B - FFAC4FF3A624 } " <nl> Project ( " { 8BC9CEB8 - 8B4A - 11D0 - 8D11 - 00A0C91BC942 } " ) = " libluaextension " , " . . \ cocos <nl> EndProject <nl> Project ( " { 8BC9CEB8 - 8B4A - 11D0 - 8D11 - 00A0C91BC942 } " ) = " libluaspine " , " . . \ cocos \ scripting \ lua - bindings \ proj . win32 \ libluaspine . vcxproj " , " { 31EF6AB1 - 0D9A - 4BC6 - 99B8 - 2C482EA373C4 } " <nl> EndProject <nl> - Project ( " { 8BC9CEB8 - 8B4A - 11D0 - 8D11 - 00A0C91BC942 } " ) = " libcocos3d " , " . . \ cocos \ 3d \ proj . win32 \ libcocos3d . vcxproj " , " { E24950FA - 5BC1 - 4AEE - A900 - 4F0259354BF0 } " <nl> - EndProject <nl> Project ( " { 8BC9CEB8 - 8B4A - 11D0 - 8D11 - 00A0C91BC942 } " ) = " libluacocos3d " , " . . \ cocos \ scripting \ lua - bindings \ proj . win32 \ libluacocos3d . vcxproj " , " { 06840490 - 14A4 - 43D6 - 88BC - AAFA44D043EB } " <nl> EndProject <nl> Global <nl> Global <nl> { 76A39BB2 - 9B84 - 4C65 - 98A5 - 654D86B86F2A } . Debug | Win32 . Build . 0 = Debug | Win32 <nl> { 76A39BB2 - 9B84 - 4C65 - 98A5 - 654D86B86F2A } . Release | Win32 . ActiveCfg = Release | Win32 <nl> { 76A39BB2 - 9B84 - 4C65 - 98A5 - 654D86B86F2A } . Release | Win32 . Build . 0 = Release | Win32 <nl> - { DF2638C0 - 8128 - 4847 - 867C - 6EAFE3DEE7B5 } . Debug | Win32 . ActiveCfg = Debug | Win32 <nl> - { DF2638C0 - 8128 - 4847 - 867C - 6EAFE3DEE7B5 } . Debug | Win32 . Build . 0 = Debug | Win32 <nl> - { DF2638C0 - 8128 - 4847 - 867C - 6EAFE3DEE7B5 } . Release | Win32 . ActiveCfg = Release | Win32 <nl> - { DF2638C0 - 8128 - 4847 - 867C - 6EAFE3DEE7B5 } . Release | Win32 . Build . 0 = Release | Win32 <nl> { 4E6A7A0E - DDD8 - 4BAA - 8B22 - C964069364ED } . Debug | Win32 . ActiveCfg = Debug | Win32 <nl> { 4E6A7A0E - DDD8 - 4BAA - 8B22 - C964069364ED } . Debug | Win32 . Build . 0 = Debug | Win32 <nl> { 4E6A7A0E - DDD8 - 4BAA - 8B22 - C964069364ED } . Release | Win32 . ActiveCfg = Release | Win32 <nl> Global <nl> { 98A51BA8 - FC3A - 415B - AC8F - 8C7BD464E93E } . Debug | Win32 . Build . 0 = Debug | Win32 <nl> { 98A51BA8 - FC3A - 415B - AC8F - 8C7BD464E93E } . Release | Win32 . ActiveCfg = Release | Win32 <nl> { 98A51BA8 - FC3A - 415B - AC8F - 8C7BD464E93E } . Release | Win32 . Build . 0 = Release | Win32 <nl> - { F8EDD7FA - 9A51 - 4E80 - BAEB - 860825D2EAC6 } . Debug | Win32 . ActiveCfg = Debug | Win32 <nl> - { F8EDD7FA - 9A51 - 4E80 - BAEB - 860825D2EAC6 } . Debug | Win32 . Build . 0 = Debug | Win32 <nl> - { F8EDD7FA - 9A51 - 4E80 - BAEB - 860825D2EAC6 } . Release | Win32 . ActiveCfg = Release | Win32 <nl> - { F8EDD7FA - 9A51 - 4E80 - BAEB - 860825D2EAC6 } . Release | Win32 . Build . 0 = Release | Win32 <nl> - { 7E06E92C - 537A - 442B - 9E4A - 4761C84F8A1A } . Debug | Win32 . ActiveCfg = Debug | Win32 <nl> - { 7E06E92C - 537A - 442B - 9E4A - 4761C84F8A1A } . Debug | Win32 . Build . 0 = Debug | Win32 <nl> - { 7E06E92C - 537A - 442B - 9E4A - 4761C84F8A1A } . Release | Win32 . ActiveCfg = Release | Win32 <nl> - { 7E06E92C - 537A - 442B - 9E4A - 4761C84F8A1A } . Release | Win32 . Build . 0 = Release | Win32 <nl> - { 21B2C324 - 891F - 48EA - AD1A - 5AE13DE12E28 } . Debug | Win32 . ActiveCfg = Debug | Win32 <nl> - { 21B2C324 - 891F - 48EA - AD1A - 5AE13DE12E28 } . Debug | Win32 . Build . 0 = Debug | Win32 <nl> - { 21B2C324 - 891F - 48EA - AD1A - 5AE13DE12E28 } . Release | Win32 . ActiveCfg = Release | Win32 <nl> - { 21B2C324 - 891F - 48EA - AD1A - 5AE13DE12E28 } . Release | Win32 . Build . 0 = Release | Win32 <nl> { B7C2A162 - DEC9 - 4418 - 972E - 240AB3CBFCAE } . Debug | Win32 . ActiveCfg = Debug | Win32 <nl> { B7C2A162 - DEC9 - 4418 - 972E - 240AB3CBFCAE } . Debug | Win32 . Build . 0 = Debug | Win32 <nl> { B7C2A162 - DEC9 - 4418 - 972E - 240AB3CBFCAE } . Release | Win32 . ActiveCfg = Release | Win32 <nl> Global <nl> { 811C0DAB - 7B96 - 4BD3 - A154 - B7572B58E4AB } . Debug | Win32 . Build . 0 = Debug | Win32 <nl> { 811C0DAB - 7B96 - 4BD3 - A154 - B7572B58E4AB } . Release | Win32 . ActiveCfg = Release | Win32 <nl> { 811C0DAB - 7B96 - 4BD3 - A154 - B7572B58E4AB } . Release | Win32 . Build . 0 = Release | Win32 <nl> - { B57CF53F - 2E49 - 4031 - 9822 - 047CC0E6BDE2 } . Debug | Win32 . ActiveCfg = Debug | Win32 <nl> - { B57CF53F - 2E49 - 4031 - 9822 - 047CC0E6BDE2 } . Debug | Win32 . Build . 0 = Debug | Win32 <nl> - { B57CF53F - 2E49 - 4031 - 9822 - 047CC0E6BDE2 } . Release | Win32 . ActiveCfg = Release | Win32 <nl> - { B57CF53F - 2E49 - 4031 - 9822 - 047CC0E6BDE2 } . Release | Win32 . Build . 0 = Release | Win32 <nl> { 9F2D6CE6 - C893 - 4400 - B50C - 6DB70CC2562F } . Debug | Win32 . ActiveCfg = Debug | Win32 <nl> { 9F2D6CE6 - C893 - 4400 - B50C - 6DB70CC2562F } . Debug | Win32 . Build . 0 = Debug | Win32 <nl> { 9F2D6CE6 - C893 - 4400 - B50C - 6DB70CC2562F } . Release | Win32 . ActiveCfg = Release | Win32 <nl> Global <nl> { 31EF6AB1 - 0D9A - 4BC6 - 99B8 - 2C482EA373C4 } . Debug | Win32 . Build . 0 = Debug | Win32 <nl> { 31EF6AB1 - 0D9A - 4BC6 - 99B8 - 2C482EA373C4 } . Release | Win32 . ActiveCfg = Release | Win32 <nl> { 31EF6AB1 - 0D9A - 4BC6 - 99B8 - 2C482EA373C4 } . Release | Win32 . Build . 0 = Release | Win32 <nl> - { E24950FA - 5BC1 - 4AEE - A900 - 4F0259354BF0 } . Debug | Win32 . ActiveCfg = Debug | Win32 <nl> - { E24950FA - 5BC1 - 4AEE - A900 - 4F0259354BF0 } . Debug | Win32 . Build . 0 = Debug | Win32 <nl> - { E24950FA - 5BC1 - 4AEE - A900 - 4F0259354BF0 } . Release | Win32 . ActiveCfg = Release | Win32 <nl> - { E24950FA - 5BC1 - 4AEE - A900 - 4F0259354BF0 } . Release | Win32 . Build . 0 = Release | Win32 <nl> { 06840490 - 14A4 - 43D6 - 88BC - AAFA44D043EB } . Debug | Win32 . ActiveCfg = Debug | Win32 <nl> { 06840490 - 14A4 - 43D6 - 88BC - AAFA44D043EB } . Debug | Win32 . Build . 0 = Debug | Win32 <nl> { 06840490 - 14A4 - 43D6 - 88BC - AAFA44D043EB } . Release | Win32 . ActiveCfg = Release | Win32 <nl> mmm a / cocos / 2d / libcocos2d . vcxproj <nl> ppp b / cocos / 2d / libcocos2d . vcxproj <nl> <nl> < / PreBuildEvent > <nl> < ClCompile > <nl> < Optimization > Disabled < / Optimization > <nl> - < AdditionalIncludeDirectories > $ ( MSBuildProgramFiles32 ) \ Microsoft SDKs \ Windows \ v7 . 1A \ include ; $ ( EngineRoot ) external \ sqlite3 \ include ; $ ( EngineRoot ) external \ unzip ; $ ( EngineRoot ) external \ edtaa3func ; $ ( EngineRoot ) external \ tinyxml2 ; $ ( EngineRoot ) external \ png \ include \ win32 ; $ ( EngineRoot ) external \ jpeg \ include \ win32 ; $ ( EngineRoot ) external \ tiff \ include \ win32 ; $ ( EngineRoot ) external \ webp \ include \ win32 ; $ ( EngineRoot ) external \ freetype2 \ include \ win32 ; $ ( EngineRoot ) external \ win32 - specific \ icon \ include ; $ ( EngineRoot ) external \ win32 - specific \ zlib \ include ; $ ( EngineRoot ) external \ chipmunk \ include \ chipmunk ; $ ( EngineRoot ) external \ xxhash ; $ ( EngineRoot ) external \ ConvertUTF ; $ ( EngineRoot ) external ; % ( AdditionalIncludeDirectories ) < / AdditionalIncludeDirectories > <nl> - < PreprocessorDefinitions > WIN32 ; _USRDLL ; _DEBUG ; _WINDOWS ; _LIB ; COCOS2DXWIN32_EXPORTS ; GL_GLEXT_PROTOTYPES ; COCOS2D_DEBUG = 1 ; _CRT_SECURE_NO_WARNINGS ; _SCL_SECURE_NO_WARNINGS ; % ( PreprocessorDefinitions ) < / PreprocessorDefinitions > <nl> + < AdditionalIncludeDirectories > $ ( MSBuildProgramFiles32 ) \ Microsoft SDKs \ Windows \ v7 . 1A \ include ; $ ( EngineRoot ) external \ sqlite3 \ include ; $ ( EngineRoot ) external \ unzip ; $ ( EngineRoot ) external \ edtaa3func ; $ ( EngineRoot ) external \ tinyxml2 ; $ ( EngineRoot ) external \ png \ include \ win32 ; $ ( EngineRoot ) external \ jpeg \ include \ win32 ; $ ( EngineRoot ) external \ tiff \ include \ win32 ; $ ( EngineRoot ) external \ webp \ include \ win32 ; $ ( EngineRoot ) external \ freetype2 \ include \ win32 ; $ ( EngineRoot ) external \ win32 - specific \ icon \ include ; $ ( EngineRoot ) external \ win32 - specific \ zlib \ include ; $ ( EngineRoot ) external \ chipmunk \ include \ chipmunk ; $ ( EngineRoot ) external \ xxhash ; $ ( EngineRoot ) external \ ConvertUTF ; $ ( EngineRoot ) external \ Box2d ; $ ( EngineRoot ) external \ curl \ include \ win32 ; $ ( EngineRoot ) external \ websockets \ include \ win32 ; $ ( EngineRoot ) external ; $ ( EngineRoot ) cocos ; $ ( EngineRoot ) cocos \ editor - support ; $ ( EngineRoot ) cocos \ audio \ include ; $ ( EngineRoot ) extensions ; $ ( EngineRoot ) ; % ( AdditionalIncludeDirectories ) < / AdditionalIncludeDirectories > <nl> + < PreprocessorDefinitions > WIN32 ; _USRDLL ; _DEBUG ; _WINDOWS ; _LIB ; COCOS2DXWIN32_EXPORTS ; GL_GLEXT_PROTOTYPES ; COCOS2D_DEBUG = 1 ; _CRT_SECURE_NO_WARNINGS ; _SCL_SECURE_NO_WARNINGS ; _USE3DDLL ; _EXPORT_DLL_ ; _USRSTUDIODLL ; _USREXDLL ; _USEGUIDLL ; CC_ENABLE_CHIPMUNK_INTEGRATION = 1 ; % ( PreprocessorDefinitions ) < / PreprocessorDefinitions > <nl> < MinimalRebuild > false < / MinimalRebuild > <nl> < BasicRuntimeChecks > EnableFastChecks < / BasicRuntimeChecks > <nl> < RuntimeLibrary > MultiThreadedDebugDLL < / RuntimeLibrary > <nl> xcopy / Y / Q " $ ( ProjectDir ) . . \ . . \ external \ sqlite3 \ libraries \ win32 \ * . * " " $ ( OutDir ) <nl> < ImportLibrary > $ ( TargetDir ) $ ( TargetName ) . lib < / ImportLibrary > <nl> < TargetMachine > MachineX86 < / TargetMachine > <nl> < ModuleDefinitionFile > cocos2d . def < / ModuleDefinitionFile > <nl> - < AdditionalDependencies > libchipmunk . lib ; sqlite3 . lib ; % ( AdditionalDependencies ) < / AdditionalDependencies > <nl> + < AdditionalDependencies > libchipmunk . lib ; sqlite3 . lib ; libcurl_imp . lib ; libBox2D . lib ; websockets . lib ; % ( AdditionalDependencies ) < / AdditionalDependencies > <nl> < / Link > <nl> < PostBuildEvent > <nl> < Command > <nl> xcopy / Y / Q " $ ( ProjectDir ) . . \ . . \ external \ sqlite3 \ libraries \ win32 \ * . * " " $ ( OutDir ) <nl> < / Command > <nl> < / PreBuildEvent > <nl> < ClCompile > <nl> - < AdditionalIncludeDirectories > $ ( MSBuildProgramFiles32 ) \ Microsoft SDKs \ Windows \ v7 . 1A \ include ; $ ( EngineRoot ) external \ sqlite3 \ include ; $ ( EngineRoot ) external \ unzip ; $ ( EngineRoot ) external \ edtaa3func ; $ ( EngineRoot ) external \ tinyxml2 ; $ ( EngineRoot ) external \ png \ include \ win32 ; $ ( EngineRoot ) external \ jpeg \ include \ win32 ; $ ( EngineRoot ) external \ tiff \ include \ win32 ; $ ( EngineRoot ) external \ webp \ include \ win32 ; $ ( EngineRoot ) external \ freetype2 \ include \ win32 ; $ ( EngineRoot ) external \ win32 - specific \ icon \ include ; $ ( EngineRoot ) external \ win32 - specific \ zlib \ include ; $ ( EngineRoot ) external \ chipmunk \ include \ chipmunk ; $ ( EngineRoot ) external \ xxhash ; $ ( EngineRoot ) external \ ConvertUTF ; $ ( EngineRoot ) external ; % ( AdditionalIncludeDirectories ) < / AdditionalIncludeDirectories > <nl> - < PreprocessorDefinitions > WIN32 ; _USRDLL ; NDEBUG ; _WINDOWS ; _LIB ; COCOS2DXWIN32_EXPORTS ; GL_GLEXT_PROTOTYPES ; _CRT_SECURE_NO_WARNINGS ; _SCL_SECURE_NO_WARNINGS ; % ( PreprocessorDefinitions ) < / PreprocessorDefinitions > <nl> + < AdditionalIncludeDirectories > $ ( MSBuildProgramFiles32 ) \ Microsoft SDKs \ Windows \ v7 . 1A \ include ; $ ( EngineRoot ) external \ sqlite3 \ include ; $ ( EngineRoot ) external \ unzip ; $ ( EngineRoot ) external \ edtaa3func ; $ ( EngineRoot ) external \ tinyxml2 ; $ ( EngineRoot ) external \ png \ include \ win32 ; $ ( EngineRoot ) external \ jpeg \ include \ win32 ; $ ( EngineRoot ) external \ tiff \ include \ win32 ; $ ( EngineRoot ) external \ webp \ include \ win32 ; $ ( EngineRoot ) external \ freetype2 \ include \ win32 ; $ ( EngineRoot ) external \ win32 - specific \ icon \ include ; $ ( EngineRoot ) external \ win32 - specific \ zlib \ include ; $ ( EngineRoot ) external \ chipmunk \ include \ chipmunk ; $ ( EngineRoot ) external \ xxhash ; $ ( EngineRoot ) external \ ConvertUTF ; $ ( EngineRoot ) external \ Box2d ; $ ( EngineRoot ) external \ curl \ include \ win32 ; $ ( EngineRoot ) external \ websockets \ win32 \ include ; $ ( EngineRoot ) external ; $ ( EngineRoot ) cocos ; $ ( EngineRoot ) cocos \ editor - support ; $ ( EngineRoot ) cocos \ audio \ include ; $ ( EngineRoot ) extensions ; $ ( EngineRoot ) ; % ( AdditionalIncludeDirectories ) < / AdditionalIncludeDirectories > <nl> + < PreprocessorDefinitions > WIN32 ; _USRDLL ; NDEBUG ; _WINDOWS ; _LIB ; COCOS2DXWIN32_EXPORTS ; GL_GLEXT_PROTOTYPES ; _CRT_SECURE_NO_WARNINGS ; _SCL_SECURE_NO_WARNINGS ; _USE3DDLL ; _EXPORT_DLL_ ; _USRSTUDIODLL ; _USREXDLL ; _USEGUIDLL ; CC_ENABLE_CHIPMUNK_INTEGRATION = 1 ; % ( PreprocessorDefinitions ) < / PreprocessorDefinitions > <nl> < RuntimeLibrary > MultiThreadedDLL < / RuntimeLibrary > <nl> < PrecompiledHeader > <nl> < / PrecompiledHeader > <nl> xcopy / Y / Q " $ ( ProjectDir ) . . \ . . \ external \ win32 - specific \ gles \ prebuilt \ * . * " " $ ( Ou <nl> < / Command > <nl> < / PreLinkEvent > <nl> < Link > <nl> - < AdditionalDependencies > libchipmunk . lib ; sqlite3 . lib ; % ( AdditionalDependencies ) < / AdditionalDependencies > <nl> + < AdditionalDependencies > libchipmunk . lib ; sqlite3 . lib ; libcurl_imp . lib ; libBox2D . lib ; websockets . lib ; % ( AdditionalDependencies ) < / AdditionalDependencies > <nl> < OutputFile > $ ( OutDir ) $ ( ProjectName ) . dll < / OutputFile > <nl> < AdditionalLibraryDirectories > $ ( OutDir ) ; % ( AdditionalLibraryDirectories ) < / AdditionalLibraryDirectories > <nl> < IgnoreSpecificDefaultLibraries > ; % ( IgnoreSpecificDefaultLibraries ) < / IgnoreSpecificDefaultLibraries > <nl> xcopy / Y / Q " $ ( ProjectDir ) . . \ . . \ external \ win32 - specific \ gles \ prebuilt \ * . * " " $ ( Ou <nl> < / PostBuildEvent > <nl> < / ItemDefinitionGroup > <nl> < ItemGroup > <nl> + < ClCompile Include = " . . \ . . \ extensions \ assets - manager \ AssetsManager . cpp " / > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControl . cpp " / > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlButton . cpp " / > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlColourPicker . cpp " / > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlHuePicker . cpp " / > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlPotentiometer . cpp " / > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlSaturationBrightnessPicker . cpp " / > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlSlider . cpp " / > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlStepper . cpp " / > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlSwitch . cpp " / > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlUtils . cpp " / > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCInvocation . cpp " / > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCScale9Sprite . cpp " / > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCEditBox \ CCEditBox . cpp " / > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCEditBox \ CCEditBoxImplWin . cpp " / > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCScrollView \ CCScrollView . cpp " / > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCScrollView \ CCTableView . cpp " / > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCScrollView \ CCTableViewCell . cpp " / > <nl> + < ClCompile Include = " . . \ . . \ extensions \ physics - nodes \ CCPhysicsDebugNode . cpp " / > <nl> + < ClCompile Include = " . . \ . . \ extensions \ physics - nodes \ CCPhysicsSprite . cpp " / > <nl> + < ClCompile Include = " . . \ . . \ extensions \ proj . win32 \ Win32InputBox . cpp " / > <nl> < ClCompile Include = " . . \ . . \ external \ ConvertUTF \ ConvertUTF . c " / > <nl> < ClCompile Include = " . . \ . . \ external \ ConvertUTF \ ConvertUTFWrapper . cpp " / > <nl> < ClCompile Include = " . . \ . . \ external \ edtaa3func \ edtaa3func . cpp " / > <nl> xcopy / Y / Q " $ ( ProjectDir ) . . \ . . \ external \ win32 - specific \ gles \ prebuilt \ * . * " " $ ( Ou <nl> < ClCompile Include = " . . \ . . \ external \ unzip \ ioapi . cpp " / > <nl> < ClCompile Include = " . . \ . . \ external \ unzip \ unzip . cpp " / > <nl> < ClCompile Include = " . . \ . . \ external \ xxhash \ xxhash . c " / > <nl> + < ClCompile Include = " . . \ 3d \ CCAABB . cpp " / > <nl> + < ClCompile Include = " . . \ 3d \ CCAnimate3D . cpp " / > <nl> + < ClCompile Include = " . . \ 3d \ CCAnimation3D . cpp " / > <nl> + < ClCompile Include = " . . \ 3d \ CCAttachNode . cpp " / > <nl> + < ClCompile Include = " . . \ 3d \ CCBundle3D . cpp " / > <nl> + < ClCompile Include = " . . \ 3d \ CCBundleReader . cpp " / > <nl> + < ClCompile Include = " . . \ 3d \ CCMesh . cpp " / > <nl> + < ClCompile Include = " . . \ 3d \ CCMeshSkin . cpp " / > <nl> + < ClCompile Include = " . . \ 3d \ CCOBB . cpp " / > <nl> + < ClCompile Include = " . . \ 3d \ CCObjLoader . cpp " / > <nl> + < ClCompile Include = " . . \ 3d \ CCRay . cpp " / > <nl> + < ClCompile Include = " . . \ 3d \ CCSkeleton3D . cpp " / > <nl> + < ClCompile Include = " . . \ 3d \ CCSprite3D . cpp " / > <nl> + < ClCompile Include = " . . \ 3d \ CCSprite3DMaterial . cpp " / > <nl> + < ClCompile Include = " . . \ 3d \ CCSubMesh . cpp " / > <nl> + < ClCompile Include = " . . \ 3d \ CCSubMeshState . cpp " / > <nl> + < ClCompile Include = " . . \ audio \ win32 \ MciPlayer . cpp " / > <nl> + < ClCompile Include = " . . \ audio \ win32 \ SimpleAudioEngine . cpp " / > <nl> < ClCompile Include = " . . \ base \ atitc . cpp " / > <nl> < ClCompile Include = " . . \ base \ base64 . cpp " / > <nl> < ClCompile Include = " . . \ base \ CCAutoreleasePool . cpp " / > <nl> xcopy / Y / Q " $ ( ProjectDir ) . . \ . . \ external \ win32 - specific \ gles \ prebuilt \ * . * " " $ ( Ou <nl> < ClCompile Include = " . . \ deprecated \ CCNotificationCenter . cpp " / > <nl> < ClCompile Include = " . . \ deprecated \ CCSet . cpp " / > <nl> < ClCompile Include = " . . \ deprecated \ CCString . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ ActionTimeline \ CCActionTimeline . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ ActionTimeline \ CCActionTimelineCache . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ ActionTimeline \ CCFrame . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ ActionTimeline \ CCNodeReader . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ ActionTimeline \ CCTimeLine . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCActionFrame . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCActionFrameEasing . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCActionManagerEx . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCActionNode . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCActionObject . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCArmature . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCArmatureAnimation . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCArmatureDataManager . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCArmatureDefine . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCBatchNode . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCBone . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCColliderDetector . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCComAttribute . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCComAudio . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCComController . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCComRender . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCDataReaderHelper . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCDatas . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCDecorativeDisplay . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCDisplayFactory . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCDisplayManager . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCInputDelegate . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCProcessBase . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCSGUIReader . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCSkin . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCSpriteFrameCacheHelper . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCSSceneReader . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCTransformHelp . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCTween . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCUtilMath . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CocoLoader . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ DictionaryHelper . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ TriggerBase . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ TriggerMng . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ TriggerObj . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ ButtonReader \ ButtonReader . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ CheckBoxReader \ CheckBoxReader . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ ImageViewReader \ ImageViewReader . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ LayoutReader \ LayoutReader . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ ListViewReader \ ListViewReader . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ LoadingBarReader \ LoadingBarReader . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ PageViewReader \ PageViewReader . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ ScrollViewReader \ ScrollViewReader . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ SliderReader \ SliderReader . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ TextAtlasReader \ TextAtlasReader . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ TextBMFontReader \ TextBMFontReader . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ TextFieldReader \ TextFieldReader . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ TextReader \ TextReader . cpp " / > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ WidgetReader . cpp " / > <nl> < ClCompile Include = " . . \ math \ CCAffineTransform . cpp " / > <nl> < ClCompile Include = " . . \ math \ CCGeometry . cpp " / > <nl> < ClCompile Include = " . . \ math \ CCVertex . cpp " / > <nl> xcopy / Y / Q " $ ( ProjectDir ) . . \ . . \ external \ win32 - specific \ gles \ prebuilt \ * . * " " $ ( Ou <nl> < ClCompile Include = " . . \ math \ Vec2 . cpp " / > <nl> < ClCompile Include = " . . \ math \ Vec3 . cpp " / > <nl> < ClCompile Include = " . . \ math \ Vec4 . cpp " / > <nl> + < ClCompile Include = " . . \ network \ HttpClient . cpp " / > <nl> + < ClCompile Include = " . . \ network \ SocketIO . cpp " / > <nl> + < ClCompile Include = " . . \ network \ WebSocket . cpp " / > <nl> < ClCompile Include = " . . \ physics \ CCPhysicsBody . cpp " / > <nl> < ClCompile Include = " . . \ physics \ CCPhysicsContact . cpp " / > <nl> < ClCompile Include = " . . \ physics \ CCPhysicsJoint . cpp " / > <nl> xcopy / Y / Q " $ ( ProjectDir ) . . \ . . \ external \ win32 - specific \ gles \ prebuilt \ * . * " " $ ( Ou <nl> < ClCompile Include = " . . \ renderer \ CCVertexIndexBuffer . cpp " / > <nl> < ClCompile Include = " . . \ renderer \ CCVertexIndexData . cpp " / > <nl> < ClCompile Include = " . . \ storage \ local - storage \ LocalStorage . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ CocosGUI . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ UIButton . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ UICheckBox . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ UIDeprecated . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ UIHBox . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ UIHelper . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ UIImageView . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ UILayout . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ UILayoutManager . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ UILayoutParameter . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ UIListView . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ UILoadingBar . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ UIPageView . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ UIRelativeBox . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ UIRichText . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ UIScale9Sprite . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ UIScrollView . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ UISlider . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ UIText . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ UITextAtlas . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ UITextBMFont . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ UITextField . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ UIVBox . cpp " / > <nl> + < ClCompile Include = " . . \ ui \ UIWidget . cpp " / > <nl> < ClCompile Include = " CCAction . cpp " / > <nl> < ClCompile Include = " CCActionCamera . cpp " / > <nl> < ClCompile Include = " CCActionCatmullRom . cpp " / > <nl> xcopy / Y / Q " $ ( ProjectDir ) . . \ . . \ external \ win32 - specific \ gles \ prebuilt \ * . * " " $ ( Ou <nl> < ClCompile Include = " CCTweenFunction . cpp " / > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> + < ClInclude Include = " . . \ . . \ extensions \ assets - manager \ AssetsManager . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ cocos - ext . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ ExtensionExport . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ ExtensionMacros . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControl . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlButton . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlColourPicker . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlExtensions . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlHuePicker . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlPotentiometer . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlSaturationBrightnessPicker . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlSlider . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlStepper . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlSwitch . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlUtils . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCInvocation . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCScale9Sprite . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCEditBox \ CCEditBox . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCEditBox \ CCEditBoxImpl . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCEditBox \ CCEditBoxImplWin . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCScrollView \ CCScrollView . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCScrollView \ CCTableView . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCScrollView \ CCTableViewCell . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ physics - nodes \ CCPhysicsDebugNode . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ physics - nodes \ CCPhysicsSprite . h " / > <nl> + < ClInclude Include = " . . \ . . \ extensions \ proj . win32 \ Win32InputBox . h " / > <nl> < ClInclude Include = " . . \ . . \ external \ ConvertUTF \ ConvertUTF . h " / > <nl> < ClInclude Include = " . . \ . . \ external \ edtaa3func \ edtaa3func . h " / > <nl> + < ClInclude Include = " . . \ . . \ external \ json \ document . h " / > <nl> + < ClInclude Include = " . . \ . . \ external \ json \ filestream . h " / > <nl> + < ClInclude Include = " . . \ . . \ external \ json \ internal \ pow10 . h " / > <nl> + < ClInclude Include = " . . \ . . \ external \ json \ internal \ stack . h " / > <nl> + < ClInclude Include = " . . \ . . \ external \ json \ internal \ strfunc . h " / > <nl> + < ClInclude Include = " . . \ . . \ external \ json \ prettywriter . h " / > <nl> + < ClInclude Include = " . . \ . . \ external \ json \ rapidjson . h " / > <nl> + < ClInclude Include = " . . \ . . \ external \ json \ reader . h " / > <nl> + < ClInclude Include = " . . \ . . \ external \ json \ stringbuffer . h " / > <nl> + < ClInclude Include = " . . \ . . \ external \ json \ writer . h " / > <nl> < ClInclude Include = " . . \ . . \ external \ tinyxml2 \ tinyxml2 . h " / > <nl> < ClInclude Include = " . . \ . . \ external \ unzip \ ioapi . h " / > <nl> < ClInclude Include = " . . \ . . \ external \ unzip \ unzip . h " / > <nl> < ClInclude Include = " . . \ . . \ external \ xxhash \ xxhash . h " / > <nl> + < ClInclude Include = " . . \ 3d \ 3dExport . h " / > <nl> + < ClInclude Include = " . . \ 3d \ CCAABB . h " / > <nl> + < ClInclude Include = " . . \ 3d \ CCAnimate3D . h " / > <nl> + < ClInclude Include = " . . \ 3d \ CCAnimation3D . h " / > <nl> + < ClInclude Include = " . . \ 3d \ CCAnimationCurve . h " / > <nl> + < ClInclude Include = " . . \ 3d \ CCAttachNode . h " / > <nl> + < ClInclude Include = " . . \ 3d \ CCBundle3D . h " / > <nl> + < ClInclude Include = " . . \ 3d \ CCBundle3DData . h " / > <nl> + < ClInclude Include = " . . \ 3d \ CCBundleReader . h " / > <nl> + < ClInclude Include = " . . \ 3d \ CCMesh . h " / > <nl> + < ClInclude Include = " . . \ 3d \ CCMeshSkin . h " / > <nl> + < ClInclude Include = " . . \ 3d \ CCOBB . h " / > <nl> + < ClInclude Include = " . . \ 3d \ CCObjLoader . h " / > <nl> + < ClInclude Include = " . . \ 3d \ CCRay . h " / > <nl> + < ClInclude Include = " . . \ 3d \ CCSkeleton3D . h " / > <nl> + < ClInclude Include = " . . \ 3d \ CCSprite3D . h " / > <nl> + < ClInclude Include = " . . \ 3d \ CCSprite3DMaterial . h " / > <nl> + < ClInclude Include = " . . \ 3d \ CCSubMesh . h " / > <nl> + < ClInclude Include = " . . \ 3d \ CCSubMeshState . h " / > <nl> + < ClInclude Include = " . . \ 3d \ cocos3d . h " / > <nl> + < ClInclude Include = " . . \ audio \ include \ Export . h " / > <nl> + < ClInclude Include = " . . \ audio \ include \ SimpleAudioEngine . h " / > <nl> + < ClInclude Include = " . . \ audio \ win32 \ MciPlayer . h " / > <nl> < ClInclude Include = " . . \ base \ atitc . h " / > <nl> < ClInclude Include = " . . \ base \ base64 . h " / > <nl> < ClInclude Include = " . . \ base \ CCAutoreleasePool . h " / > <nl> xcopy / Y / Q " $ ( ProjectDir ) . . \ . . \ external \ win32 - specific \ gles \ prebuilt \ * . * " " $ ( Ou <nl> < ClInclude Include = " . . \ deprecated \ CCNotificationCenter . h " / > <nl> < ClInclude Include = " . . \ deprecated \ CCSet . h " / > <nl> < ClInclude Include = " . . \ deprecated \ CCString . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ ActionTimeline \ CCActionTimeline . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ ActionTimeline \ CCActionTimelineCache . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ ActionTimeline \ CCFrame . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ ActionTimeline \ CCNodeReader . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ ActionTimeline \ CCTimeLine . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ ActionTimeline \ CCTimelineMacro . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCActionFrame . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCActionFrameEasing . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCActionManagerEx . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCActionNode . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCActionObject . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCArmature . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCArmatureAnimation . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCArmatureDataManager . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCArmatureDefine . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCBatchNode . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCBone . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCColliderDetector . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCComAttribute . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCComAudio . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCComBase . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCComController . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCComRender . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCDataReaderHelper . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCDatas . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCDecorativeDisplay . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCDisplayFactory . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCDisplayManager . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCInputDelegate . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCProcessBase . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCSGUIReader . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCSkin . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCSpriteFrameCacheHelper . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCSSceneReader . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCTransformHelp . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCTween . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCUtilMath . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CocoLoader . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CocosStudioExport . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CocoStudio . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ DictionaryHelper . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ TriggerBase . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ TriggerMng . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ TriggerObj . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ ButtonReader \ ButtonReader . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ CheckBoxReader \ CheckBoxReader . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ ImageViewReader \ ImageViewReader . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ LayoutReader \ LayoutReader . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ ListViewReader \ ListViewReader . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ LoadingBarReader \ LoadingBarReader . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ PageViewReader \ PageViewReader . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ ScrollViewReader \ ScrollViewReader . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ SliderReader \ SliderReader . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ TextAtlasReader \ TextAtlasReader . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ TextBMFontReader \ TextBMFontReader . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ TextFieldReader \ TextFieldReader . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ TextReader \ TextReader . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ WidgetReader . h " / > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ WidgetReaderProtocol . h " / > <nl> < ClInclude Include = " . . \ math \ CCAffineTransform . h " / > <nl> < ClInclude Include = " . . \ math \ CCGeometry . h " / > <nl> < ClInclude Include = " . . \ math \ CCMath . h " / > <nl> xcopy / Y / Q " $ ( ProjectDir ) . . \ . . \ external \ win32 - specific \ gles \ prebuilt \ * . * " " $ ( Ou <nl> < ClInclude Include = " . . \ math \ Vec2 . h " / > <nl> < ClInclude Include = " . . \ math \ Vec3 . h " / > <nl> < ClInclude Include = " . . \ math \ Vec4 . h " / > <nl> + < ClInclude Include = " . . \ network \ HttpClient . h " / > <nl> + < ClInclude Include = " . . \ network \ HttpRequest . h " / > <nl> + < ClInclude Include = " . . \ network \ HttpResponse . h " / > <nl> + < ClInclude Include = " . . \ network \ SocketIO . h " / > <nl> + < ClInclude Include = " . . \ network \ WebSocket . h " / > <nl> < ClInclude Include = " . . \ physics \ CCPhysicsBody . h " / > <nl> < ClInclude Include = " . . \ physics \ CCPhysicsContact . h " / > <nl> < ClInclude Include = " . . \ physics \ CCPhysicsJoint . h " / > <nl> xcopy / Y / Q " $ ( ProjectDir ) . . \ . . \ external \ win32 - specific \ gles \ prebuilt \ * . * " " $ ( Ou <nl> < ClInclude Include = " . . \ renderer \ CCVertexIndexBuffer . h " / > <nl> < ClInclude Include = " . . \ renderer \ CCVertexIndexData . h " / > <nl> < ClInclude Include = " . . \ storage \ local - storage \ LocalStorage . h " / > <nl> + < ClInclude Include = " . . \ ui \ CocosGUI . h " / > <nl> + < ClInclude Include = " . . \ ui \ GUIExport . h " / > <nl> + < ClInclude Include = " . . \ ui \ UIButton . h " / > <nl> + < ClInclude Include = " . . \ ui \ UICheckBox . h " / > <nl> + < ClInclude Include = " . . \ ui \ UIDeprecated . h " / > <nl> + < ClInclude Include = " . . \ ui \ UIHBox . h " / > <nl> + < ClInclude Include = " . . \ ui \ UIHelper . h " / > <nl> + < ClInclude Include = " . . \ ui \ UIImageView . h " / > <nl> + < ClInclude Include = " . . \ ui \ UILayout . h " / > <nl> + < ClInclude Include = " . . \ ui \ UILayoutManager . h " / > <nl> + < ClInclude Include = " . . \ ui \ UILayoutParameter . h " / > <nl> + < ClInclude Include = " . . \ ui \ UIListView . h " / > <nl> + < ClInclude Include = " . . \ ui \ UILoadingBar . h " / > <nl> + < ClInclude Include = " . . \ ui \ UIPageView . h " / > <nl> + < ClInclude Include = " . . \ ui \ UIRelativeBox . h " / > <nl> + < ClInclude Include = " . . \ ui \ UIRichText . h " / > <nl> + < ClInclude Include = " . . \ ui \ UIScale9Sprite . h " / > <nl> + < ClInclude Include = " . . \ ui \ UIScrollView . h " / > <nl> + < ClInclude Include = " . . \ ui \ UISlider . h " / > <nl> + < ClInclude Include = " . . \ ui \ UIText . h " / > <nl> + < ClInclude Include = " . . \ ui \ UITextAtlas . h " / > <nl> + < ClInclude Include = " . . \ ui \ UITextBMFont . h " / > <nl> + < ClInclude Include = " . . \ ui \ UITextField . h " / > <nl> + < ClInclude Include = " . . \ ui \ UIVBox . h " / > <nl> + < ClInclude Include = " . . \ ui \ UIWidget . h " / > <nl> < ClInclude Include = " CCAction . h " / > <nl> < ClInclude Include = " CCActionCamera . h " / > <nl> < ClInclude Include = " CCActionCatmullRom . h " / > <nl> xcopy / Y / Q " $ ( ProjectDir ) . . \ . . \ external \ win32 - specific \ gles \ prebuilt \ * . * " " $ ( Ou <nl> < None Include = " cocos2d . def " / > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> + < ProjectReference Include = " . . \ . . \ external \ Box2D \ proj . win32 \ libbox2d . vcxproj " > <nl> + < Project > { 929480e7 - 23c0 - 4df6 - 8456 - 096d71547116 } < / Project > <nl> + < Private > false < / Private > <nl> + < ReferenceOutputAssembly > true < / ReferenceOutputAssembly > <nl> + < CopyLocalSatelliteAssemblies > false < / CopyLocalSatelliteAssemblies > <nl> + < LinkLibraryDependencies > true < / LinkLibraryDependencies > <nl> + < UseLibraryDependencyInputs > false < / UseLibraryDependencyInputs > <nl> + < / ProjectReference > <nl> < ProjectReference Include = " . . \ . . \ external \ chipmunk \ proj . win32 \ chipmunk . vcxproj " > <nl> < Project > { 207bc7a9 - ccf1 - 4f2f - a04d - 45f72242ae25 } < / Project > <nl> + < Private > false < / Private > <nl> + < ReferenceOutputAssembly > true < / ReferenceOutputAssembly > <nl> + < CopyLocalSatelliteAssemblies > false < / CopyLocalSatelliteAssemblies > <nl> + < LinkLibraryDependencies > true < / LinkLibraryDependencies > <nl> + < UseLibraryDependencyInputs > false < / UseLibraryDependencyInputs > <nl> < / ProjectReference > <nl> < / ItemGroup > <nl> < Import Project = " $ ( VCTargetsPath ) \ Microsoft . Cpp . targets " / > <nl> mmm a / cocos / 2d / libcocos2d . vcxproj . filters <nl> ppp b / cocos / 2d / libcocos2d . vcxproj . filters <nl> <nl> < Filter Include = " storage " > <nl> < UniqueIdentifier > { 44bdf58f - 4af2 - 433c - b4af - 58dc05ef96b5 } < / UniqueIdentifier > <nl> < / Filter > <nl> + < Filter Include = " 3d " > <nl> + < UniqueIdentifier > { 63733c51 - 582a - 4f0f - 9a82 - e066da459a72 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio " > <nl> + < UniqueIdentifier > { dbdbbfad - 71fc - 4f1d - a809 - b43651f87267 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocosdenshion " > <nl> + < UniqueIdentifier > { 3bd71d42 - dfa4 - 4649 - bb01 - ad607fdca1c2 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " ui " > <nl> + < UniqueIdentifier > { 704c4ce6 - b7ad - 4406 - 9414 - 71169734bbc1 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " extension " > <nl> + < UniqueIdentifier > { 976bf662 - 699e - 482e - b5db - a20d4abab137 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " extension \ AssetsManager " > <nl> + < UniqueIdentifier > { ad47c713 - 2196 - 4c8f - 9205 - 2a2a7ecc0b80 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " extension \ GUI " > <nl> + < UniqueIdentifier > { b27aba95 - 51a2 - 413c - 8570 - 0aff9adf2b6b } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " extension \ GUI \ CCControlExtension " > <nl> + < UniqueIdentifier > { 220cf2ee - 61b0 - 40cf - a88a - 8627e4e609f1 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " extension \ GUI \ CCScrollView " > <nl> + < UniqueIdentifier > { a1f539bc - d5be - 4224 - a4d2 - 01c0b6f17d6e } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " extension \ GUI \ CCEditBox " > <nl> + < UniqueIdentifier > { dc45cd54 - 4576 - 4401 - 87b7 - a276f91a45bd } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " extension \ physics_nodes " > <nl> + < UniqueIdentifier > { 1de7fce7 - 0dee - 4571 - 8fcd - 43eb617aaf8b } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocosdenshion \ Header Files " > <nl> + < UniqueIdentifier > { f42979de - 0079 - 4eba - b469 - 81ebff9ec588 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocosdenshion \ Source Files " > <nl> + < UniqueIdentifier > { f42ec238 - cddc - 475a - b343 - 6d0984cb9681 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " network " > <nl> + < UniqueIdentifier > { 31338a7d - ebe1 - 4867 - 9c17 - d3645122a864 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " network \ Header Files " > <nl> + < UniqueIdentifier > { 5a094fe5 - f941 - 4dd4 - a892 - 28d721162ea7 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " network \ Source Files " > <nl> + < UniqueIdentifier > { 9702eb68 - 42c9 - 405a - bc89 - a1bd85a40ec7 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " ui \ BaseClasses " > <nl> + < UniqueIdentifier > { ad654e6b - 96ee - 4693 - 9789 - dc4aa1c52e70 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " ui \ Layouts " > <nl> + < UniqueIdentifier > { 177a9a30 - a4a6 - 41e6 - 93f0 - 4a4cbe9e4039 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " ui \ System " > <nl> + < UniqueIdentifier > { c2ebbbc1 - 85b3 - 4d6f - a3c5 - 116eae2124e1 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " ui \ UIWidgets " > <nl> + < UniqueIdentifier > { 0d201449 - ce7a - 4f87 - 86ab - 9c30e803c901 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " ui \ UIWidgets \ ScrollWidget " > <nl> + < UniqueIdentifier > { 6ac0e3c8 - d5b1 - 44d9 - 8c41 - 21662a767cc6 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ trigger " > <nl> + < UniqueIdentifier > { 0554a5b1 - 03a1 - 4d38 - 87a5 - 976dbe9a39d9 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ TimelineAction " > <nl> + < UniqueIdentifier > { c75d4c37 - d555 - 4a5b - a0ba - 4bc9a3d846e1 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ reader " > <nl> + < UniqueIdentifier > { 67bdb22e - 8cfc - 41ed - bb07 - 861e88a31752 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ reader \ WidgetReader " > <nl> + < UniqueIdentifier > { 284a709e - 9efa - 484a - a595 - 83db6c7836c8 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ reader \ WidgetReader \ ButtonReader " > <nl> + < UniqueIdentifier > { 3c7267f2 - 06ea - 4c7c - a13c - 552076a3dc8c } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ reader \ WidgetReader \ CheckBoxReader " > <nl> + < UniqueIdentifier > { b47c2297 - bf5e - 43e6 - ae70 - 741efee00689 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ reader \ WidgetReader \ ImageViewReader " > <nl> + < UniqueIdentifier > { 1b5e8a9e - 87cf - 4800 - bb7b - 4f63504fb132 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ reader \ WidgetReader \ LayoutReader " > <nl> + < UniqueIdentifier > { d47fb3dd - 9ab8 - 4559 - b8b5 - 776d87aba319 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ reader \ WidgetReader \ ListViewReader " > <nl> + < UniqueIdentifier > { 65c0429b - 5a85 - 46ac - ab4a - edb0aeb5d388 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ reader \ WidgetReader \ LoadingBarReader " > <nl> + < UniqueIdentifier > { 8bdf34ea - 1e66 - 41b0 - 9dbf - 7530ba14dce5 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ reader \ WidgetReader \ PageViewReader " > <nl> + < UniqueIdentifier > { 690fb572 - be0b - 4bb6 - 9b8b - a007afb99b39 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ reader \ WidgetReader \ ScrollViewReader " > <nl> + < UniqueIdentifier > { 5e397fa9 - 4e36 - 43f3 - 8c4f - 1dd3382c1458 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ reader \ WidgetReader \ SliderReader " > <nl> + < UniqueIdentifier > { fa2abcd0 - 6362 - 4741 - a144 - a3b0226fe4f1 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ reader \ WidgetReader \ TextAtlasReader " > <nl> + < UniqueIdentifier > { 03596848 - 0d59 - 4d69 - 8193 - 30f404683c7a } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ reader \ WidgetReader \ TextBMFontReader " > <nl> + < UniqueIdentifier > { 4d27423a - d9e8 - 496b - bcc4 - 8684230c6c18 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ reader \ WidgetReader \ TextFieldReader " > <nl> + < UniqueIdentifier > { 075b4cf0 - 63ae - 4cb1 - a6d8 - e7cb24146531 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ reader \ WidgetReader \ TextReader " > <nl> + < UniqueIdentifier > { 9421ece2 - 69f8 - 4cee - 8e59 - 575c3ba06f15 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ json " > <nl> + < UniqueIdentifier > { a9901b6a - 0c7b - 4adb - 8e17 - 105c872d744d } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ json \ rapidjson " > <nl> + < UniqueIdentifier > { a9a3bf20 - 9daf - 465d - 9525 - cce2ab0d85a3 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ json \ rapidjson \ internal " > <nl> + < UniqueIdentifier > { 0cf2210c - 3544 - 41ed - 9d4e - 88987a1c7bcf } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ components " > <nl> + < UniqueIdentifier > { dc6e53e4 - e518 - 403f - b2a9 - 97f6f7cd0961 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ armature " > <nl> + < UniqueIdentifier > { 22a4587f - d4ca - 44fa - a734 - ded122cd79e5 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ armature \ animation " > <nl> + < UniqueIdentifier > { 137efcbb - 0d14 - 4f1c - a856 - 7b1669a6d2af } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ armature \ datas " > <nl> + < UniqueIdentifier > { 4825cd2d - ca8b - 434c - 8c79 - 2d3d3258086f } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ armature \ display " > <nl> + < UniqueIdentifier > { 57ba2fcb - 04bd - 4b2f - 9ae6 - 375992ad363a } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ armature \ physics " > <nl> + < UniqueIdentifier > { acdfaa6f - 1374 - 4611 - a5e7 - 9038dc6900c4 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ armature \ utils " > <nl> + < UniqueIdentifier > { f195e2f1 - 1030 - 4848 - 9498 - f8142bf89009 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " cocostudio \ action " > <nl> + < UniqueIdentifier > { d48c8016 - e933 - 48dd - a5c0 - 202b0a84b82a } < / UniqueIdentifier > <nl> + < / Filter > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> < ClCompile Include = " . . \ physics \ CCPhysicsBody . cpp " > <nl> <nl> < ClCompile Include = " . . \ base \ ccRandom . cpp " > <nl> < Filter > base < / Filter > <nl> < / ClCompile > <nl> + < ClCompile Include = " . . \ 3d \ CCAABB . cpp " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ 3d \ CCAnimate3D . cpp " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ 3d \ CCAnimation3D . cpp " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ 3d \ CCAttachNode . cpp " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ 3d \ CCBundle3D . cpp " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ 3d \ CCBundleReader . cpp " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ 3d \ CCMesh . cpp " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ 3d \ CCMeshSkin . cpp " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ 3d \ CCOBB . cpp " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ 3d \ CCObjLoader . cpp " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ 3d \ CCRay . cpp " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ 3d \ CCSkeleton3D . cpp " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ 3d \ CCSprite3D . cpp " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ 3d \ CCSprite3DMaterial . cpp " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ 3d \ CCSubMesh . cpp " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ 3d \ CCSubMeshState . cpp " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ . . \ extensions \ assets - manager \ AssetsManager . cpp " > <nl> + < Filter > extension \ AssetsManager < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControl . cpp " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlButton . cpp " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlColourPicker . cpp " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlHuePicker . cpp " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlPotentiometer . cpp " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlSaturationBrightnessPicker . cpp " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlSlider . cpp " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlStepper . cpp " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlSwitch . cpp " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlUtils . cpp " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCInvocation . cpp " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCScale9Sprite . cpp " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCScrollView \ CCScrollView . cpp " > <nl> + < Filter > extension \ GUI \ CCScrollView < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCScrollView \ CCTableView . cpp " > <nl> + < Filter > extension \ GUI \ CCScrollView < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCScrollView \ CCTableViewCell . cpp " > <nl> + < Filter > extension \ GUI \ CCScrollView < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCEditBox \ CCEditBox . cpp " > <nl> + < Filter > extension \ GUI \ CCEditBox < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ . . \ extensions \ GUI \ CCEditBox \ CCEditBoxImplWin . cpp " > <nl> + < Filter > extension \ GUI \ CCEditBox < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ . . \ extensions \ proj . win32 \ Win32InputBox . cpp " > <nl> + < Filter > extension \ GUI \ CCEditBox < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ . . \ extensions \ physics - nodes \ CCPhysicsDebugNode . cpp " > <nl> + < Filter > extension \ physics_nodes < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ . . \ extensions \ physics - nodes \ CCPhysicsSprite . cpp " > <nl> + < Filter > extension \ physics_nodes < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ audio \ win32 \ MciPlayer . cpp " > <nl> + < Filter > cocosdenshion \ Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ audio \ win32 \ SimpleAudioEngine . cpp " > <nl> + < Filter > cocosdenshion \ Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ network \ HttpClient . cpp " > <nl> + < Filter > network \ Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ network \ SocketIO . cpp " > <nl> + < Filter > network \ Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ network \ WebSocket . cpp " > <nl> + < Filter > network \ Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ UIScale9Sprite . cpp " > <nl> + < Filter > ui \ BaseClasses < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ UIWidget . cpp " > <nl> + < Filter > ui \ BaseClasses < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ UIHBox . cpp " > <nl> + < Filter > ui \ Layouts < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ UILayout . cpp " > <nl> + < Filter > ui \ Layouts < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ UILayoutManager . cpp " > <nl> + < Filter > ui \ Layouts < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ UILayoutParameter . cpp " > <nl> + < Filter > ui \ Layouts < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ UIRelativeBox . cpp " > <nl> + < Filter > ui \ Layouts < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ UIVBox . cpp " > <nl> + < Filter > ui \ Layouts < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ CocosGUI . cpp " > <nl> + < Filter > ui \ System < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ UIDeprecated . cpp " > <nl> + < Filter > ui \ System < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ UIHelper . cpp " > <nl> + < Filter > ui \ System < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ UIButton . cpp " > <nl> + < Filter > ui \ UIWidgets < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ UICheckBox . cpp " > <nl> + < Filter > ui \ UIWidgets < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ UIImageView . cpp " > <nl> + < Filter > ui \ UIWidgets < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ UILoadingBar . cpp " > <nl> + < Filter > ui \ UIWidgets < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ UIRichText . cpp " > <nl> + < Filter > ui \ UIWidgets < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ UISlider . cpp " > <nl> + < Filter > ui \ UIWidgets < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ UIText . cpp " > <nl> + < Filter > ui \ UIWidgets < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ UITextAtlas . cpp " > <nl> + < Filter > ui \ UIWidgets < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ UITextBMFont . cpp " > <nl> + < Filter > ui \ UIWidgets < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ UITextField . cpp " > <nl> + < Filter > ui \ UIWidgets < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ UIListView . cpp " > <nl> + < Filter > ui \ UIWidgets \ ScrollWidget < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ UIPageView . cpp " > <nl> + < Filter > ui \ UIWidgets \ ScrollWidget < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ ui \ UIScrollView . cpp " > <nl> + < Filter > ui \ UIWidgets \ ScrollWidget < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ TriggerBase . cpp " > <nl> + < Filter > cocostudio \ trigger < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ TriggerMng . cpp " > <nl> + < Filter > cocostudio \ trigger < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ TriggerObj . cpp " > <nl> + < Filter > cocostudio \ trigger < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ ActionTimeline \ CCActionTimeline . cpp " > <nl> + < Filter > cocostudio \ TimelineAction < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ ActionTimeline \ CCFrame . cpp " > <nl> + < Filter > cocostudio \ TimelineAction < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ ActionTimeline \ CCNodeReader . cpp " > <nl> + < Filter > cocostudio \ TimelineAction < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ ActionTimeline \ CCActionTimelineCache . cpp " > <nl> + < Filter > cocostudio \ TimelineAction < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ ActionTimeline \ CCTimeLine . cpp " > <nl> + < Filter > cocostudio \ TimelineAction < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCSGUIReader . cpp " > <nl> + < Filter > cocostudio \ reader < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCSSceneReader . cpp " > <nl> + < Filter > cocostudio \ reader < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ WidgetReader . cpp " > <nl> + < Filter > cocostudio \ reader \ WidgetReader < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ ButtonReader \ ButtonReader . cpp " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ ButtonReader < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ CheckBoxReader \ CheckBoxReader . cpp " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ CheckBoxReader < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ ImageViewReader \ ImageViewReader . cpp " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ ImageViewReader < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ LayoutReader \ LayoutReader . cpp " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ LayoutReader < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ ListViewReader \ ListViewReader . cpp " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ ListViewReader < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ LoadingBarReader \ LoadingBarReader . cpp " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ LoadingBarReader < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ PageViewReader \ PageViewReader . cpp " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ PageViewReader < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ ScrollViewReader \ ScrollViewReader . cpp " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ ScrollViewReader < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ SliderReader \ SliderReader . cpp " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ SliderReader < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ TextAtlasReader \ TextAtlasReader . cpp " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ TextAtlasReader < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ TextBMFontReader \ TextBMFontReader . cpp " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ TextBMFontReader < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ TextFieldReader \ TextFieldReader . cpp " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ TextFieldReader < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ WidgetReader \ TextReader \ TextReader . cpp " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ TextReader < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CocoLoader . cpp " > <nl> + < Filter > cocostudio \ json < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ DictionaryHelper . cpp " > <nl> + < Filter > cocostudio \ json < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCComAttribute . cpp " > <nl> + < Filter > cocostudio \ components < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCComAudio . cpp " > <nl> + < Filter > cocostudio \ components < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCComController . cpp " > <nl> + < Filter > cocostudio \ components < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCComRender . cpp " > <nl> + < Filter > cocostudio \ components < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCInputDelegate . cpp " > <nl> + < Filter > cocostudio \ components < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCArmature . cpp " > <nl> + < Filter > cocostudio \ armature < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCBone . cpp " > <nl> + < Filter > cocostudio \ armature < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCArmatureAnimation . cpp " > <nl> + < Filter > cocostudio \ armature \ animation < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCProcessBase . cpp " > <nl> + < Filter > cocostudio \ armature \ animation < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCTween . cpp " > <nl> + < Filter > cocostudio \ armature \ animation < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCDatas . cpp " > <nl> + < Filter > cocostudio \ armature \ datas < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCBatchNode . cpp " > <nl> + < Filter > cocostudio \ armature \ display < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCDecorativeDisplay . cpp " > <nl> + < Filter > cocostudio \ armature \ display < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCDisplayFactory . cpp " > <nl> + < Filter > cocostudio \ armature \ display < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCDisplayManager . cpp " > <nl> + < Filter > cocostudio \ armature \ display < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCSkin . cpp " > <nl> + < Filter > cocostudio \ armature \ display < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCColliderDetector . cpp " > <nl> + < Filter > cocostudio \ armature \ physics < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCArmatureDataManager . cpp " > <nl> + < Filter > cocostudio \ armature \ utils < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCArmatureDefine . cpp " > <nl> + < Filter > cocostudio \ armature \ utils < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCDataReaderHelper . cpp " > <nl> + < Filter > cocostudio \ armature \ utils < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCSpriteFrameCacheHelper . cpp " > <nl> + < Filter > cocostudio \ armature \ utils < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCTransformHelp . cpp " > <nl> + < Filter > cocostudio \ armature \ utils < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCUtilMath . cpp " > <nl> + < Filter > cocostudio \ armature \ utils < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCActionFrame . cpp " > <nl> + < Filter > cocostudio \ action < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCActionFrameEasing . cpp " > <nl> + < Filter > cocostudio \ action < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCActionManagerEx . cpp " > <nl> + < Filter > cocostudio \ action < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCActionNode . cpp " > <nl> + < Filter > cocostudio \ action < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ editor - support \ cocostudio \ CCActionObject . cpp " > <nl> + < Filter > cocostudio \ action < / Filter > <nl> + < / ClCompile > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> < ClInclude Include = " . . \ physics \ CCPhysicsBody . h " > <nl> <nl> < ClInclude Include = " . . \ base \ ccRandom . h " > <nl> < Filter > base < / Filter > <nl> < / ClInclude > <nl> + < ClInclude Include = " . . \ 3d \ 3dExport . h " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ 3d \ CCAABB . h " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ 3d \ CCAnimate3D . h " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ 3d \ CCAnimation3D . h " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ 3d \ CCAnimationCurve . h " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ 3d \ CCAttachNode . h " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ 3d \ CCBundle3D . h " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ 3d \ CCBundle3DData . h " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ 3d \ CCBundleReader . h " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ 3d \ CCMesh . h " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ 3d \ CCMeshSkin . h " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ 3d \ CCOBB . h " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ 3d \ CCObjLoader . h " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ 3d \ CCRay . h " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ 3d \ CCSkeleton3D . h " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ 3d \ CCSprite3D . h " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ 3d \ CCSprite3DMaterial . h " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ 3d \ CCSubMesh . h " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ 3d \ CCSubMeshState . h " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ 3d \ cocos3d . h " > <nl> + < Filter > 3d < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ assets - manager \ AssetsManager . h " > <nl> + < Filter > extension \ AssetsManager < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControl . h " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlButton . h " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlColourPicker . h " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlExtensions . h " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlHuePicker . h " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlPotentiometer . h " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlSaturationBrightnessPicker . h " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlSlider . h " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlStepper . h " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlSwitch . h " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCControlUtils . h " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCInvocation . h " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCControlExtension \ CCScale9Sprite . h " > <nl> + < Filter > extension \ GUI \ CCControlExtension < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCScrollView \ CCScrollView . h " > <nl> + < Filter > extension \ GUI \ CCScrollView < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCScrollView \ CCTableView . h " > <nl> + < Filter > extension \ GUI \ CCScrollView < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCScrollView \ CCTableViewCell . h " > <nl> + < Filter > extension \ GUI \ CCScrollView < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCEditBox \ CCEditBox . h " > <nl> + < Filter > extension \ GUI \ CCEditBox < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCEditBox \ CCEditBoxImpl . h " > <nl> + < Filter > extension \ GUI \ CCEditBox < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ GUI \ CCEditBox \ CCEditBoxImplWin . h " > <nl> + < Filter > extension \ GUI \ CCEditBox < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ proj . win32 \ Win32InputBox . h " > <nl> + < Filter > extension \ GUI \ CCEditBox < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ physics - nodes \ CCPhysicsDebugNode . h " > <nl> + < Filter > extension \ physics_nodes < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ physics - nodes \ CCPhysicsSprite . h " > <nl> + < Filter > extension \ physics_nodes < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ cocos - ext . h " > <nl> + < Filter > extension < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ ExtensionExport . h " > <nl> + < Filter > extension < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ extensions \ ExtensionMacros . h " > <nl> + < Filter > extension < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ audio \ include \ Export . h " > <nl> + < Filter > cocosdenshion \ Header Files < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ audio \ win32 \ MciPlayer . h " > <nl> + < Filter > cocosdenshion \ Header Files < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ audio \ include \ SimpleAudioEngine . h " > <nl> + < Filter > cocosdenshion \ Header Files < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ network \ HttpClient . h " > <nl> + < Filter > network \ Header Files < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ network \ HttpRequest . h " > <nl> + < Filter > network \ Header Files < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ network \ HttpResponse . h " > <nl> + < Filter > network \ Header Files < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ network \ SocketIO . h " > <nl> + < Filter > network \ Header Files < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ network \ WebSocket . h " > <nl> + < Filter > network \ Header Files < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ UIScale9Sprite . h " > <nl> + < Filter > ui \ BaseClasses < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ UIWidget . h " > <nl> + < Filter > ui \ BaseClasses < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ UIHBox . h " > <nl> + < Filter > ui \ Layouts < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ UILayout . h " > <nl> + < Filter > ui \ Layouts < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ UILayoutManager . h " > <nl> + < Filter > ui \ Layouts < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ UILayoutParameter . h " > <nl> + < Filter > ui \ Layouts < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ UIRelativeBox . h " > <nl> + < Filter > ui \ Layouts < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ UIVBox . h " > <nl> + < Filter > ui \ Layouts < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ CocosGUI . h " > <nl> + < Filter > ui \ System < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ GUIExport . h " > <nl> + < Filter > ui \ System < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ UIDeprecated . h " > <nl> + < Filter > ui \ System < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ UIHelper . h " > <nl> + < Filter > ui \ System < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ UIButton . h " > <nl> + < Filter > ui \ UIWidgets < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ UICheckBox . h " > <nl> + < Filter > ui \ UIWidgets < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ UIImageView . h " > <nl> + < Filter > ui \ UIWidgets < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ UILoadingBar . h " > <nl> + < Filter > ui \ UIWidgets < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ UIRichText . h " > <nl> + < Filter > ui \ UIWidgets < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ UISlider . h " > <nl> + < Filter > ui \ UIWidgets < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ UIText . h " > <nl> + < Filter > ui \ UIWidgets < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ UITextAtlas . h " > <nl> + < Filter > ui \ UIWidgets < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ UITextBMFont . h " > <nl> + < Filter > ui \ UIWidgets < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ UITextField . h " > <nl> + < Filter > ui \ UIWidgets < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ UIListView . h " > <nl> + < Filter > ui \ UIWidgets \ ScrollWidget < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ UIPageView . h " > <nl> + < Filter > ui \ UIWidgets \ ScrollWidget < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ ui \ UIScrollView . h " > <nl> + < Filter > ui \ UIWidgets \ ScrollWidget < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CocosStudioExport . h " > <nl> + < Filter > cocostudio < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ TriggerBase . h " > <nl> + < Filter > cocostudio \ trigger < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ TriggerMng . h " > <nl> + < Filter > cocostudio \ trigger < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ TriggerObj . h " > <nl> + < Filter > cocostudio \ trigger < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ ActionTimeline \ CCActionTimeline . h " > <nl> + < Filter > cocostudio \ TimelineAction < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ ActionTimeline \ CCFrame . h " > <nl> + < Filter > cocostudio \ TimelineAction < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ ActionTimeline \ CCNodeReader . h " > <nl> + < Filter > cocostudio \ TimelineAction < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ ActionTimeline \ CCActionTimelineCache . h " > <nl> + < Filter > cocostudio \ TimelineAction < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ ActionTimeline \ CCTimeLine . h " > <nl> + < Filter > cocostudio \ TimelineAction < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ ActionTimeline \ CCTimelineMacro . h " > <nl> + < Filter > cocostudio \ TimelineAction < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCSGUIReader . h " > <nl> + < Filter > cocostudio \ reader < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCSSceneReader . h " > <nl> + < Filter > cocostudio \ reader < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ WidgetReader . h " > <nl> + < Filter > cocostudio \ reader \ WidgetReader < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ WidgetReaderProtocol . h " > <nl> + < Filter > cocostudio \ reader \ WidgetReader < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ ButtonReader \ ButtonReader . h " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ ButtonReader < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ CheckBoxReader \ CheckBoxReader . h " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ CheckBoxReader < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ ImageViewReader \ ImageViewReader . h " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ ImageViewReader < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ LayoutReader \ LayoutReader . h " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ LayoutReader < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ ListViewReader \ ListViewReader . h " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ ListViewReader < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ LoadingBarReader \ LoadingBarReader . h " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ LoadingBarReader < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ PageViewReader \ PageViewReader . h " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ PageViewReader < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ ScrollViewReader \ ScrollViewReader . h " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ ScrollViewReader < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ SliderReader \ SliderReader . h " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ SliderReader < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ TextAtlasReader \ TextAtlasReader . h " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ TextAtlasReader < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ TextBMFontReader \ TextBMFontReader . h " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ TextBMFontReader < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ TextFieldReader \ TextFieldReader . h " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ TextFieldReader < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ WidgetReader \ TextReader \ TextReader . h " > <nl> + < Filter > cocostudio \ reader \ WidgetReader \ TextReader < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CocoLoader . h " > <nl> + < Filter > cocostudio \ json < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CocoStudio . h " > <nl> + < Filter > cocostudio \ json < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ DictionaryHelper . h " > <nl> + < Filter > cocostudio \ json < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ external \ json \ document . h " > <nl> + < Filter > cocostudio \ json \ rapidjson < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ external \ json \ filestream . h " > <nl> + < Filter > cocostudio \ json \ rapidjson < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ external \ json \ prettywriter . h " > <nl> + < Filter > cocostudio \ json \ rapidjson < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ external \ json \ rapidjson . h " > <nl> + < Filter > cocostudio \ json \ rapidjson < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ external \ json \ reader . h " > <nl> + < Filter > cocostudio \ json \ rapidjson < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ external \ json \ stringbuffer . h " > <nl> + < Filter > cocostudio \ json \ rapidjson < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ external \ json \ writer . h " > <nl> + < Filter > cocostudio \ json \ rapidjson < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ external \ json \ internal \ pow10 . h " > <nl> + < Filter > cocostudio \ json \ rapidjson \ internal < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ external \ json \ internal \ stack . h " > <nl> + < Filter > cocostudio \ json \ rapidjson \ internal < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ . . \ external \ json \ internal \ strfunc . h " > <nl> + < Filter > cocostudio \ json \ rapidjson \ internal < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCComAttribute . h " > <nl> + < Filter > cocostudio \ components < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCComAudio . h " > <nl> + < Filter > cocostudio \ components < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCComBase . h " > <nl> + < Filter > cocostudio \ components < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCComController . h " > <nl> + < Filter > cocostudio \ components < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCComRender . h " > <nl> + < Filter > cocostudio \ components < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCInputDelegate . h " > <nl> + < Filter > cocostudio \ components < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCArmature . h " > <nl> + < Filter > cocostudio \ armature < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCBone . h " > <nl> + < Filter > cocostudio \ armature < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCArmatureAnimation . h " > <nl> + < Filter > cocostudio \ armature \ animation < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCProcessBase . h " > <nl> + < Filter > cocostudio \ armature \ animation < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCTween . h " > <nl> + < Filter > cocostudio \ armature \ animation < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCDatas . h " > <nl> + < Filter > cocostudio \ armature \ datas < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCBatchNode . h " > <nl> + < Filter > cocostudio \ armature \ display < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCDecorativeDisplay . h " > <nl> + < Filter > cocostudio \ armature \ display < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCDisplayFactory . h " > <nl> + < Filter > cocostudio \ armature \ display < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCDisplayManager . h " > <nl> + < Filter > cocostudio \ armature \ display < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCSkin . h " > <nl> + < Filter > cocostudio \ armature \ display < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCColliderDetector . h " > <nl> + < Filter > cocostudio \ armature \ physics < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCArmatureDataManager . h " > <nl> + < Filter > cocostudio \ armature \ utils < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCArmatureDefine . h " > <nl> + < Filter > cocostudio \ armature \ utils < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCDataReaderHelper . h " > <nl> + < Filter > cocostudio \ armature \ utils < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCSpriteFrameCacheHelper . h " > <nl> + < Filter > cocostudio \ armature \ utils < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCTransformHelp . h " > <nl> + < Filter > cocostudio \ armature \ utils < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCUtilMath . h " > <nl> + < Filter > cocostudio \ armature \ utils < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCActionFrame . h " > <nl> + < Filter > cocostudio \ action < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCActionFrameEasing . h " > <nl> + < Filter > cocostudio \ action < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCActionManagerEx . h " > <nl> + < Filter > cocostudio \ action < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCActionNode . h " > <nl> + < Filter > cocostudio \ action < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ editor - support \ cocostudio \ CCActionObject . h " > <nl> + < Filter > cocostudio \ action < / Filter > <nl> + < / ClInclude > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> < None Include = " . . \ math \ Mat4 . inl " > <nl> mmm a / cocos / audio / proj . win32 / libcocosdenshion . vcxproj <nl> ppp b / cocos / audio / proj . win32 / libcocosdenshion . vcxproj <nl> <nl> < AdditionalLibraryDirectories > $ ( OutDir ) < / AdditionalLibraryDirectories > <nl> < / Link > <nl> < / ItemDefinitionGroup > <nl> - < ItemGroup > <nl> - < ClInclude Include = " . . \ include \ Export . h " / > <nl> - < ClInclude Include = " . . \ win32 \ MciPlayer . h " / > <nl> - < ClInclude Include = " . . \ include \ SimpleAudioEngine . h " / > <nl> - < / ItemGroup > <nl> - < ItemGroup > <nl> - < ClCompile Include = " . . \ win32 \ MciPlayer . cpp " / > <nl> - < ClCompile Include = " . . \ win32 \ SimpleAudioEngine . cpp " / > <nl> - < / ItemGroup > <nl> < ItemGroup > <nl> < ProjectReference Include = " . . \ . . \ 2d \ libcocos2d . vcxproj " > <nl> < Project > { 98a51ba8 - fc3a - 415b - ac8f - 8c7bd464e93e } < / Project > <nl> mmm a / cocos / audio / proj . win32 / libcocosdenshion . vcxproj . filters <nl> ppp b / cocos / audio / proj . win32 / libcocosdenshion . vcxproj . filters <nl> <nl> < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> < Project ToolsVersion = " 4 . 0 " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> < ItemGroup > <nl> - < Filter Include = " Header Files " > <nl> - < UniqueIdentifier > { 93995380 - 89BD - 4b04 - 88EB - 625FBE52EBFB } < / UniqueIdentifier > <nl> - < Extensions > h ; hpp ; hxx ; hm ; inl ; inc ; xsd < / Extensions > <nl> - < / Filter > <nl> < Filter Include = " Resource Files " > <nl> < UniqueIdentifier > { 67DA6AB6 - F800 - 4c08 - 8B7A - 83BB121AAD01 } < / UniqueIdentifier > <nl> < Extensions > rc ; ico ; cur ; bmp ; dlg ; rc2 ; rct ; bin ; rgs ; gif ; jpg ; jpeg ; jpe ; resx < / Extensions > <nl> < / Filter > <nl> - < Filter Include = " Source Files " > <nl> - < UniqueIdentifier > { 4FC737F1 - C7A5 - 4376 - A066 - 2A32D752A2FF } < / UniqueIdentifier > <nl> - < Extensions > cpp ; c ; cc ; cxx ; def ; odl ; idl ; hpj ; bat ; asm ; asmx < / Extensions > <nl> - < / Filter > <nl> - < / ItemGroup > <nl> - < ItemGroup > <nl> - < ClInclude Include = " . . \ include \ Export . h " > <nl> - < Filter > Header Files < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ win32 \ MciPlayer . h " > <nl> - < Filter > Header Files < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ include \ SimpleAudioEngine . h " > <nl> - < Filter > Header Files < / Filter > <nl> - < / ClInclude > <nl> - < / ItemGroup > <nl> - < ItemGroup > <nl> - < ClCompile Include = " . . \ win32 \ MciPlayer . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ win32 \ SimpleAudioEngine . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> < / ItemGroup > <nl> < / Project > <nl> \ No newline at end of file <nl> mmm a / cocos / editor - support / cocosbuilder / proj . win32 / libCocosBuilder . vcxproj . filters <nl> ppp b / cocos / editor - support / cocosbuilder / proj . win32 / libCocosBuilder . vcxproj . filters <nl> <nl> < Project ToolsVersion = " 4 . 0 " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> < ItemGroup > <nl> < Filter Include = " Header Files " > <nl> - < UniqueIdentifier > { 67DA6AB6 - F800 - 4c08 - 8B7A - 83BB121AAD01 } < / UniqueIdentifier > <nl> - < Extensions > rc ; ico ; cur ; bmp ; dlg ; rc2 ; rct ; bin ; rgs ; gif ; jpg ; jpeg ; jpe ; resx ; tiff ; tif ; png ; wav ; mfcribbon - ms < / Extensions > <nl> + < UniqueIdentifier > { e4e3b3ff - 6d85 - 4408 - bbdf - 38f296363698 } < / UniqueIdentifier > <nl> < / Filter > <nl> < Filter Include = " Source Files " > <nl> - < UniqueIdentifier > { 4FC737F1 - C7A5 - 4376 - A066 - 2A32D752A2FF } < / UniqueIdentifier > <nl> - < Extensions > cpp ; c ; cc ; cxx ; def ; odl ; idl ; hpj ; bat ; asm ; asmx < / Extensions > <nl> + < UniqueIdentifier > { e79db1b4 - c859 - 48c0 - ab1e - a31200c09923 } < / UniqueIdentifier > <nl> < / Filter > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> - < ClCompile Include = " . . \ CCBSequenceProperty . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCControlButtonLoader . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCControlLoader . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCLabelBMFontLoader . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCLabelTTFLoader . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCLayerColorLoader . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCLayerGradientLoader . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCLayerLoader . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCMenuItemImageLoader . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCMenuItemLoader . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCNode + CCBRelativePositioning . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCNodeLoader . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCNodeLoaderLibrary . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCParticleSystemQuadLoader . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCScale9SpriteLoader . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCScrollViewLoader . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCSpriteLoader . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCBAnimationManager . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCBFileLoader . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCBKeyframe . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCBReader . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCBSequence . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < / ItemGroup > <nl> - < ItemGroup > <nl> - < ClInclude Include = " . . \ CCParticleSystemQuadLoader . h " > <nl> - < Filter > Header Files < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCScale9SpriteLoader . h " > <nl> - < Filter > Header Files < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCScrollViewLoader . h " > <nl> - < Filter > Header Files < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCSpriteLoader . h " > <nl> - < Filter > Header Files < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CocosBuilder . h " > <nl> - < Filter > Header Files < / Filter > <nl> - < / ClInclude > <nl> < ClInclude Include = " . . \ CCBAnimationManager . h " > <nl> < Filter > Header Files < / Filter > <nl> < / ClInclude > <nl> <nl> < ClInclude Include = " . . \ CCNodeLoaderListener . h " > <nl> < Filter > Header Files < / Filter > <nl> < / ClInclude > <nl> + < ClInclude Include = " . . \ CCParticleSystemQuadLoader . h " > <nl> + < Filter > Header Files < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ CCScale9SpriteLoader . h " > <nl> + < Filter > Header Files < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ CCScrollViewLoader . h " > <nl> + < Filter > Header Files < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ CCSpriteLoader . h " > <nl> + < Filter > Header Files < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ CocosBuilder . h " > <nl> + < Filter > Header Files < / Filter > <nl> + < / ClInclude > <nl> + < / ItemGroup > <nl> + < ItemGroup > <nl> + < ClCompile Include = " . . \ CCBAnimationManager . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ CCBFileLoader . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ CCBKeyframe . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ CCBReader . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ CCBSequence . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ CCBSequenceProperty . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ CCControlButtonLoader . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ CCControlLoader . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ CCLabelBMFontLoader . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ CCLabelTTFLoader . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ CCLayerColorLoader . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ CCLayerGradientLoader . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ CCLayerLoader . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ CCMenuItemImageLoader . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ CCMenuItemLoader . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ CCNode + CCBRelativePositioning . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ CCNodeLoader . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ CCNodeLoaderLibrary . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ CCParticleSystemQuadLoader . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ CCScale9SpriteLoader . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ CCScrollViewLoader . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ CCSpriteLoader . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> < / ItemGroup > <nl> < / Project > <nl> \ No newline at end of file <nl> mmm a / cocos / editor - support / cocosbuilder / proj . win32 / libcocosbuilder . vcxproj <nl> ppp b / cocos / editor - support / cocosbuilder / proj . win32 / libcocosbuilder . vcxproj <nl> <nl> < / Link > <nl> < / ItemDefinitionGroup > <nl> < ItemGroup > <nl> - < ClCompile Include = " . . \ CCBAnimationManager . cpp " / > <nl> - < ClCompile Include = " . . \ CCBFileLoader . cpp " / > <nl> - < ClCompile Include = " . . \ CCBKeyframe . cpp " / > <nl> - < ClCompile Include = " . . \ CCBReader . cpp " / > <nl> - < ClCompile Include = " . . \ CCBSequence . cpp " / > <nl> - < ClCompile Include = " . . \ CCBSequenceProperty . cpp " / > <nl> - < ClCompile Include = " . . \ CCControlButtonLoader . cpp " / > <nl> - < ClCompile Include = " . . \ CCControlLoader . cpp " / > <nl> - < ClCompile Include = " . . \ CCLabelBMFontLoader . cpp " / > <nl> - < ClCompile Include = " . . \ CCLabelTTFLoader . cpp " / > <nl> - < ClCompile Include = " . . \ CCLayerColorLoader . cpp " / > <nl> - < ClCompile Include = " . . \ CCLayerGradientLoader . cpp " / > <nl> - < ClCompile Include = " . . \ CCLayerLoader . cpp " / > <nl> - < ClCompile Include = " . . \ CCMenuItemImageLoader . cpp " / > <nl> - < ClCompile Include = " . . \ CCMenuItemLoader . cpp " / > <nl> - < ClCompile Include = " . . \ CCNode + CCBRelativePositioning . cpp " / > <nl> - < ClCompile Include = " . . \ CCNodeLoader . cpp " / > <nl> - < ClCompile Include = " . . \ CCNodeLoaderLibrary . cpp " / > <nl> - < ClCompile Include = " . . \ CCParticleSystemQuadLoader . cpp " / > <nl> - < ClCompile Include = " . . \ CCScale9SpriteLoader . cpp " / > <nl> - < ClCompile Include = " . . \ CCScrollViewLoader . cpp " / > <nl> - < ClCompile Include = " . . \ CCSpriteLoader . cpp " / > <nl> + < ProjectReference Include = " . . \ . . \ . . \ 2d \ libcocos2d . vcxproj " > <nl> + < Project > { 98a51ba8 - fc3a - 415b - ac8f - 8c7bd464e93e } < / Project > <nl> + < / ProjectReference > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> < ClInclude Include = " . . \ CCBAnimationManager . h " / > <nl> <nl> < ClInclude Include = " . . \ CocosBuilder . h " / > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> - < ProjectReference Include = " . . \ . . \ . . \ . . \ extensions \ proj . win32 \ libextension . vcxproj " > <nl> - < Project > { 21b2c324 - 891f - 48ea - ad1a - 5ae13de12e28 } < / Project > <nl> - < / ProjectReference > <nl> - < ProjectReference Include = " . . \ . . \ . . \ 2d \ libcocos2d . vcxproj " > <nl> - < Project > { 98a51ba8 - fc3a - 415b - ac8f - 8c7bd464e93e } < / Project > <nl> - < / ProjectReference > <nl> + < ClCompile Include = " . . \ CCBAnimationManager . cpp " / > <nl> + < ClCompile Include = " . . \ CCBFileLoader . cpp " / > <nl> + < ClCompile Include = " . . \ CCBKeyframe . cpp " / > <nl> + < ClCompile Include = " . . \ CCBReader . cpp " / > <nl> + < ClCompile Include = " . . \ CCBSequence . cpp " / > <nl> + < ClCompile Include = " . . \ CCBSequenceProperty . cpp " / > <nl> + < ClCompile Include = " . . \ CCControlButtonLoader . cpp " / > <nl> + < ClCompile Include = " . . \ CCControlLoader . cpp " / > <nl> + < ClCompile Include = " . . \ CCLabelBMFontLoader . cpp " / > <nl> + < ClCompile Include = " . . \ CCLabelTTFLoader . cpp " / > <nl> + < ClCompile Include = " . . \ CCLayerColorLoader . cpp " / > <nl> + < ClCompile Include = " . . \ CCLayerGradientLoader . cpp " / > <nl> + < ClCompile Include = " . . \ CCLayerLoader . cpp " / > <nl> + < ClCompile Include = " . . \ CCMenuItemImageLoader . cpp " / > <nl> + < ClCompile Include = " . . \ CCMenuItemLoader . cpp " / > <nl> + < ClCompile Include = " . . \ CCNode + CCBRelativePositioning . cpp " / > <nl> + < ClCompile Include = " . . \ CCNodeLoader . cpp " / > <nl> + < ClCompile Include = " . . \ CCNodeLoaderLibrary . cpp " / > <nl> + < ClCompile Include = " . . \ CCParticleSystemQuadLoader . cpp " / > <nl> + < ClCompile Include = " . . \ CCScale9SpriteLoader . cpp " / > <nl> + < ClCompile Include = " . . \ CCScrollViewLoader . cpp " / > <nl> + < ClCompile Include = " . . \ CCSpriteLoader . cpp " / > <nl> < / ItemGroup > <nl> < Import Project = " $ ( VCTargetsPath ) \ Microsoft . Cpp . targets " / > <nl> < ImportGroup Label = " ExtensionTargets " > <nl> mmm a / cocos / editor - support / cocostudio / proj . win32 / libcocostudio . vcxproj <nl> ppp b / cocos / editor - support / cocostudio / proj . win32 / libcocostudio . vcxproj <nl> <nl> < Platform > Win32 < / Platform > <nl> < / ProjectConfiguration > <nl> < / ItemGroup > <nl> - < ItemGroup > <nl> - < ClCompile Include = " . . \ ActionTimeline \ CCActionTimeline . cpp " / > <nl> - < ClCompile Include = " . . \ ActionTimeline \ CCFrame . cpp " / > <nl> - < ClCompile Include = " . . \ ActionTimeline \ CCNodeReader . cpp " / > <nl> - < ClCompile Include = " . . \ ActionTimeline \ CCActionTimelineCache . cpp " / > <nl> - < ClCompile Include = " . . \ ActionTimeline \ CCTimeLine . cpp " / > <nl> - < ClCompile Include = " . . \ CCActionFrame . cpp " / > <nl> - < ClCompile Include = " . . \ CCActionFrameEasing . cpp " / > <nl> - < ClCompile Include = " . . \ CCActionManagerEx . cpp " / > <nl> - < ClCompile Include = " . . \ CCActionNode . cpp " / > <nl> - < ClCompile Include = " . . \ CCActionObject . cpp " / > <nl> - < ClCompile Include = " . . \ CCArmature . cpp " / > <nl> - < ClCompile Include = " . . \ CCArmatureAnimation . cpp " / > <nl> - < ClCompile Include = " . . \ CCArmatureDataManager . cpp " / > <nl> - < ClCompile Include = " . . \ CCArmatureDefine . cpp " / > <nl> - < ClCompile Include = " . . \ CCBatchNode . cpp " / > <nl> - < ClCompile Include = " . . \ CCBone . cpp " / > <nl> - < ClCompile Include = " . . \ CCColliderDetector . cpp " / > <nl> - < ClCompile Include = " . . \ CCComAttribute . cpp " / > <nl> - < ClCompile Include = " . . \ CCComAudio . cpp " / > <nl> - < ClCompile Include = " . . \ CCComController . cpp " / > <nl> - < ClCompile Include = " . . \ CCComRender . cpp " / > <nl> - < ClCompile Include = " . . \ CCDataReaderHelper . cpp " / > <nl> - < ClCompile Include = " . . \ CCDatas . cpp " / > <nl> - < ClCompile Include = " . . \ CCDecorativeDisplay . cpp " / > <nl> - < ClCompile Include = " . . \ CCDisplayFactory . cpp " / > <nl> - < ClCompile Include = " . . \ CCDisplayManager . cpp " / > <nl> - < ClCompile Include = " . . \ CCInputDelegate . cpp " / > <nl> - < ClCompile Include = " . . \ CCProcessBase . cpp " / > <nl> - < ClCompile Include = " . . \ CCSGUIReader . cpp " / > <nl> - < ClCompile Include = " . . \ CCSkin . cpp " / > <nl> - < ClCompile Include = " . . \ CCSpriteFrameCacheHelper . cpp " / > <nl> - < ClCompile Include = " . . \ CCSSceneReader . cpp " / > <nl> - < ClCompile Include = " . . \ CCTransformHelp . cpp " / > <nl> - < ClCompile Include = " . . \ CCTween . cpp " / > <nl> - < ClCompile Include = " . . \ CCUtilMath . cpp " / > <nl> - < ClCompile Include = " . . \ CocoLoader . cpp " / > <nl> - < ClCompile Include = " . . \ DictionaryHelper . cpp " / > <nl> - < ClCompile Include = " . . \ TriggerBase . cpp " / > <nl> - < ClCompile Include = " . . \ TriggerMng . cpp " / > <nl> - < ClCompile Include = " . . \ TriggerObj . cpp " / > <nl> - < ClCompile Include = " . . \ WidgetReader \ ButtonReader \ ButtonReader . cpp " / > <nl> - < ClCompile Include = " . . \ WidgetReader \ CheckBoxReader \ CheckBoxReader . cpp " / > <nl> - < ClCompile Include = " . . \ WidgetReader \ ImageViewReader \ ImageViewReader . cpp " / > <nl> - < ClCompile Include = " . . \ WidgetReader \ LayoutReader \ LayoutReader . cpp " / > <nl> - < ClCompile Include = " . . \ WidgetReader \ ListViewReader \ ListViewReader . cpp " / > <nl> - < ClCompile Include = " . . \ WidgetReader \ LoadingBarReader \ LoadingBarReader . cpp " / > <nl> - < ClCompile Include = " . . \ WidgetReader \ PageViewReader \ PageViewReader . cpp " / > <nl> - < ClCompile Include = " . . \ WidgetReader \ ScrollViewReader \ ScrollViewReader . cpp " / > <nl> - < ClCompile Include = " . . \ WidgetReader \ SliderReader \ SliderReader . cpp " / > <nl> - < ClCompile Include = " . . \ WidgetReader \ TextAtlasReader \ TextAtlasReader . cpp " / > <nl> - < ClCompile Include = " . . \ WidgetReader \ TextBMFontReader \ TextBMFontReader . cpp " / > <nl> - < ClCompile Include = " . . \ WidgetReader \ TextFieldReader \ TextFieldReader . cpp " / > <nl> - < ClCompile Include = " . . \ WidgetReader \ TextReader \ TextReader . cpp " / > <nl> - < ClCompile Include = " . . \ WidgetReader \ WidgetReader . cpp " / > <nl> - < / ItemGroup > <nl> - < ItemGroup > <nl> - < ClInclude Include = " . . \ . . \ . . \ . . \ external \ json \ document . h " / > <nl> - < ClInclude Include = " . . \ . . \ . . \ . . \ external \ json \ filestream . h " / > <nl> - < ClInclude Include = " . . \ . . \ . . \ . . \ external \ json \ internal \ pow10 . h " / > <nl> - < ClInclude Include = " . . \ . . \ . . \ . . \ external \ json \ internal \ stack . h " / > <nl> - < ClInclude Include = " . . \ . . \ . . \ . . \ external \ json \ internal \ strfunc . h " / > <nl> - < ClInclude Include = " . . \ . . \ . . \ . . \ external \ json \ prettywriter . h " / > <nl> - < ClInclude Include = " . . \ . . \ . . \ . . \ external \ json \ rapidjson . h " / > <nl> - < ClInclude Include = " . . \ . . \ . . \ . . \ external \ json \ reader . h " / > <nl> - < ClInclude Include = " . . \ . . \ . . \ . . \ external \ json \ stringbuffer . h " / > <nl> - < ClInclude Include = " . . \ . . \ . . \ . . \ external \ json \ writer . h " / > <nl> - < ClInclude Include = " . . \ ActionTimeline \ CCActionTimeline . h " / > <nl> - < ClInclude Include = " . . \ ActionTimeline \ CCFrame . h " / > <nl> - < ClInclude Include = " . . \ ActionTimeline \ CCNodeReader . h " / > <nl> - < ClInclude Include = " . . \ ActionTimeline \ CCActionTimelineCache . h " / > <nl> - < ClInclude Include = " . . \ ActionTimeline \ CCTimeLine . h " / > <nl> - < ClInclude Include = " . . \ ActionTimeline \ CCTimelineMacro . h " / > <nl> - < ClInclude Include = " . . \ CCActionFrame . h " / > <nl> - < ClInclude Include = " . . \ CCActionFrameEasing . h " / > <nl> - < ClInclude Include = " . . \ CCActionManagerEx . h " / > <nl> - < ClInclude Include = " . . \ CCActionNode . h " / > <nl> - < ClInclude Include = " . . \ CCActionObject . h " / > <nl> - < ClInclude Include = " . . \ CCArmature . h " / > <nl> - < ClInclude Include = " . . \ CCArmatureAnimation . h " / > <nl> - < ClInclude Include = " . . \ CCArmatureDataManager . h " / > <nl> - < ClInclude Include = " . . \ CCArmatureDefine . h " / > <nl> - < ClInclude Include = " . . \ CCBatchNode . h " / > <nl> - < ClInclude Include = " . . \ CCBone . h " / > <nl> - < ClInclude Include = " . . \ CCColliderDetector . h " / > <nl> - < ClInclude Include = " . . \ CCComAttribute . h " / > <nl> - < ClInclude Include = " . . \ CCComAudio . h " / > <nl> - < ClInclude Include = " . . \ CCComBase . h " / > <nl> - < ClInclude Include = " . . \ CCComController . h " / > <nl> - < ClInclude Include = " . . \ CCComRender . h " / > <nl> - < ClInclude Include = " . . \ CCDataReaderHelper . h " / > <nl> - < ClInclude Include = " . . \ CCDatas . h " / > <nl> - < ClInclude Include = " . . \ CCDecorativeDisplay . h " / > <nl> - < ClInclude Include = " . . \ CCDisplayFactory . h " / > <nl> - < ClInclude Include = " . . \ CCDisplayManager . h " / > <nl> - < ClInclude Include = " . . \ CCInputDelegate . h " / > <nl> - < ClInclude Include = " . . \ CCProcessBase . h " / > <nl> - < ClInclude Include = " . . \ CCSGUIReader . h " / > <nl> - < ClInclude Include = " . . \ CCSkin . h " / > <nl> - < ClInclude Include = " . . \ CCSpriteFrameCacheHelper . h " / > <nl> - < ClInclude Include = " . . \ CCSSceneReader . h " / > <nl> - < ClInclude Include = " . . \ CCTransformHelp . h " / > <nl> - < ClInclude Include = " . . \ CCTween . h " / > <nl> - < ClInclude Include = " . . \ CCUtilMath . h " / > <nl> - < ClInclude Include = " . . \ CocoLoader . h " / > <nl> - < ClInclude Include = " . . \ CocoStudio . h " / > <nl> - < ClInclude Include = " . . \ DictionaryHelper . h " / > <nl> - < ClInclude Include = " . . \ CocosStudioExport . h " / > <nl> - < ClInclude Include = " . . \ TriggerBase . h " / > <nl> - < ClInclude Include = " . . \ TriggerMng . h " / > <nl> - < ClInclude Include = " . . \ TriggerObj . h " / > <nl> - < ClInclude Include = " . . \ WidgetReader \ ButtonReader \ ButtonReader . h " / > <nl> - < ClInclude Include = " . . \ WidgetReader \ CheckBoxReader \ CheckBoxReader . h " / > <nl> - < ClInclude Include = " . . \ WidgetReader \ ImageViewReader \ ImageViewReader . h " / > <nl> - < ClInclude Include = " . . \ WidgetReader \ LayoutReader \ LayoutReader . h " / > <nl> - < ClInclude Include = " . . \ WidgetReader \ ListViewReader \ ListViewReader . h " / > <nl> - < ClInclude Include = " . . \ WidgetReader \ LoadingBarReader \ LoadingBarReader . h " / > <nl> - < ClInclude Include = " . . \ WidgetReader \ PageViewReader \ PageViewReader . h " / > <nl> - < ClInclude Include = " . . \ WidgetReader \ ScrollViewReader \ ScrollViewReader . h " / > <nl> - < ClInclude Include = " . . \ WidgetReader \ SliderReader \ SliderReader . h " / > <nl> - < ClInclude Include = " . . \ WidgetReader \ TextAtlasReader \ TextAtlasReader . h " / > <nl> - < ClInclude Include = " . . \ WidgetReader \ TextBMFontReader \ TextBMFontReader . h " / > <nl> - < ClInclude Include = " . . \ WidgetReader \ TextFieldReader \ TextFieldReader . h " / > <nl> - < ClInclude Include = " . . \ WidgetReader \ TextReader \ TextReader . h " / > <nl> - < ClInclude Include = " . . \ WidgetReader \ WidgetReader . h " / > <nl> - < ClInclude Include = " . . \ WidgetReader \ WidgetReaderProtocol . h " / > <nl> - < / ItemGroup > <nl> < ItemGroup > <nl> < ProjectReference Include = " . . \ . . \ . . \ 2d \ libcocos2d . vcxproj " > <nl> < Project > { 98a51ba8 - fc3a - 415b - ac8f - 8c7bd464e93e } < / Project > <nl> < / ProjectReference > <nl> - < ProjectReference Include = " . . \ . . \ . . \ audio \ proj . win32 \ libcocosdenshion . vcxproj " > <nl> - < Project > { f8edd7fa - 9a51 - 4e80 - baeb - 860825d2eac6 } < / Project > <nl> - < / ProjectReference > <nl> < ProjectReference Include = " . . \ . . \ . . \ ui \ proj . win32 \ libui . vcxproj " > <nl> < Project > { 7e06e92c - 537a - 442b - 9e4a - 4761c84f8a1a } < / Project > <nl> < / ProjectReference > <nl> mmm a / cocos / editor - support / cocostudio / proj . win32 / libcocostudio . vcxproj . filters <nl> ppp b / cocos / editor - support / cocostudio / proj . win32 / libcocostudio . vcxproj . filters <nl> <nl> < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> - < Project ToolsVersion = " 4 . 0 " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> - < ItemGroup > <nl> - < Filter Include = " action " > <nl> - < UniqueIdentifier > { d793b86c - 0905 - 4c9b - b6bc - 161e351c9eb2 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " armature " > <nl> - < UniqueIdentifier > { 855f2366 - 3429 - 4f77 - a080 - a41a39c99270 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " components " > <nl> - < UniqueIdentifier > { 1a8c6b14 - fb28 - 4485 - 8417 - 9b6838fbc4ef } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " json " > <nl> - < UniqueIdentifier > { 42f1ce3f - 46ab - 4a16 - a96e - 9eb076c873f7 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " reader " > <nl> - < UniqueIdentifier > { f8271f80 - 1663 - 4425 - 91c8 - 7365ec3af017 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " armature \ animation " > <nl> - < UniqueIdentifier > { 48f28446 - ffe4 - 4aa1 - a34c - 8968c3367ae6 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " armature \ datas " > <nl> - < UniqueIdentifier > { ad9b4fd1 - dc17 - 4704 - 9c60 - 7709eb916f13 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " armature \ display " > <nl> - < UniqueIdentifier > { 55c682b3 - 7a48 - 4fab - ad5a - eb979f3c305e } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " armature \ physics " > <nl> - < UniqueIdentifier > { c6464479 - e0ab - 4afc - 96fc - 1ffc73e40232 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " armature \ utils " > <nl> - < UniqueIdentifier > { 023e3440 - 1259 - 4981 - ba54 - 24390d1df447 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " json \ rapidjson " > <nl> - < UniqueIdentifier > { f1f20817 - 9232 - 4967 - a4dd - 77550e84e39b } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " json \ rapidjson \ internal " > <nl> - < UniqueIdentifier > { 5149f922 - cf29 - 4a4b - b604 - f15c6e8856b0 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " trigger " > <nl> - < UniqueIdentifier > { 59eee6d9 - bbfd - 4193 - be1d - 6c18fc38f896 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " reader \ WidgetReader " > <nl> - < UniqueIdentifier > { d4e605dc - d49d - 44ea - a020 - fc06445e7997 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " reader \ WidgetReader \ ButtonReader " > <nl> - < UniqueIdentifier > { 3a4d7a49 - fb6f - 4873 - b474 - 5a8e90b7fc4b } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " reader \ WidgetReader \ CheckBoxReader " > <nl> - < UniqueIdentifier > { e8fc7f81 - 1ca7 - 4501 - ba4d - 23b7b07d68d4 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " reader \ WidgetReader \ ImageViewReader " > <nl> - < UniqueIdentifier > { ab407657 - d557 - 4f8f - a983 - 770b55874863 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " reader \ WidgetReader \ LayoutReader " > <nl> - < UniqueIdentifier > { ad6968ff - 3327 - 4c34 - b277 - 675bb417dcb5 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " reader \ WidgetReader \ ListViewReader " > <nl> - < UniqueIdentifier > { 26cc66bd - 6939 - 4983 - 851d - e75003f6d1cc } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " reader \ WidgetReader \ LoadingBarReader " > <nl> - < UniqueIdentifier > { 017a43a1 - 3837 - 4bc9 - 99f8 - 67711721e733 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " reader \ WidgetReader \ PageViewReader " > <nl> - < UniqueIdentifier > { 77596e85 - 4621 - 4601 - 89c2 - 91237f7acaff } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " reader \ WidgetReader \ ScrollViewReader " > <nl> - < UniqueIdentifier > { d5050413 - 04d0 - 492f - 9f86 - a5ef66e8b98e } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " reader \ WidgetReader \ SliderReader " > <nl> - < UniqueIdentifier > { f172d385 - 3589 - 4e88 - b4bc - 7b8dbc5829cb } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " reader \ WidgetReader \ TextAtlasReader " > <nl> - < UniqueIdentifier > { 4e608ffb - 0b02 - 44be - a032 - aac71fb3d82e } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " reader \ WidgetReader \ TextBMFontReader " > <nl> - < UniqueIdentifier > { 71a233f2 - c8a6 - 4b79 - b103 - e9b5ff2bfa09 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " reader \ WidgetReader \ TextFieldReader " > <nl> - < UniqueIdentifier > { 33f9d319 - f921 - 48f4 - 9c09 - 0a765ec782ca } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " reader \ WidgetReader \ TextReader " > <nl> - < UniqueIdentifier > { bc251d28 - 036e - 4272 - 852b - bd25fd110b33 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " TimelineAction " > <nl> - < UniqueIdentifier > { 2d371825 - 3c46 - 4901 - 850c - 3bccf6b49efc } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < / ItemGroup > <nl> - < ItemGroup > <nl> - < ClCompile Include = " . . \ CCComAttribute . cpp " > <nl> - < Filter > components < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCComAudio . cpp " > <nl> - < Filter > components < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCComController . cpp " > <nl> - < Filter > components < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCComRender . cpp " > <nl> - < Filter > components < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCInputDelegate . cpp " > <nl> - < Filter > components < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ DictionaryHelper . cpp " > <nl> - < Filter > json < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCSSceneReader . cpp " > <nl> - < Filter > reader < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCSGUIReader . cpp " > <nl> - < Filter > reader < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCTween . cpp " > <nl> - < Filter > armature \ animation < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCProcessBase . cpp " > <nl> - < Filter > armature \ animation < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCArmatureAnimation . cpp " > <nl> - < Filter > armature \ animation < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCDatas . cpp " > <nl> - < Filter > armature \ datas < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCBatchNode . cpp " > <nl> - < Filter > armature \ display < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCDecorativeDisplay . cpp " > <nl> - < Filter > armature \ display < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCDisplayFactory . cpp " > <nl> - < Filter > armature \ display < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCDisplayManager . cpp " > <nl> - < Filter > armature \ display < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCSkin . cpp " > <nl> - < Filter > armature \ display < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCArmatureDataManager . cpp " > <nl> - < Filter > armature \ utils < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCArmatureDefine . cpp " > <nl> - < Filter > armature \ utils < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCDataReaderHelper . cpp " > <nl> - < Filter > armature \ utils < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCSpriteFrameCacheHelper . cpp " > <nl> - < Filter > armature \ utils < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCTransformHelp . cpp " > <nl> - < Filter > armature \ utils < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCUtilMath . cpp " > <nl> - < Filter > armature \ utils < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCColliderDetector . cpp " > <nl> - < Filter > armature \ physics < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCArmature . cpp " > <nl> - < Filter > armature < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCBone . cpp " > <nl> - < Filter > armature < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCActionFrame . cpp " > <nl> - < Filter > action < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCActionFrameEasing . cpp " > <nl> - < Filter > action < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCActionManagerEx . cpp " > <nl> - < Filter > action < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCActionNode . cpp " > <nl> - < Filter > action < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCActionObject . cpp " > <nl> - < Filter > action < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ TriggerBase . cpp " > <nl> - < Filter > trigger < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ TriggerMng . cpp " > <nl> - < Filter > trigger < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ TriggerObj . cpp " > <nl> - < Filter > trigger < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ WidgetReader \ WidgetReader . cpp " > <nl> - < Filter > reader \ WidgetReader < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ WidgetReader \ ButtonReader \ ButtonReader . cpp " > <nl> - < Filter > reader \ WidgetReader \ ButtonReader < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ WidgetReader \ CheckBoxReader \ CheckBoxReader . cpp " > <nl> - < Filter > reader \ WidgetReader \ CheckBoxReader < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ WidgetReader \ ImageViewReader \ ImageViewReader . cpp " > <nl> - < Filter > reader \ WidgetReader \ ImageViewReader < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ WidgetReader \ LayoutReader \ LayoutReader . cpp " > <nl> - < Filter > reader \ WidgetReader \ LayoutReader < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ WidgetReader \ ListViewReader \ ListViewReader . cpp " > <nl> - < Filter > reader \ WidgetReader \ ListViewReader < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ WidgetReader \ LoadingBarReader \ LoadingBarReader . cpp " > <nl> - < Filter > reader \ WidgetReader \ LoadingBarReader < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ WidgetReader \ TextReader \ TextReader . cpp " > <nl> - < Filter > reader \ WidgetReader \ TextReader < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ WidgetReader \ TextFieldReader \ TextFieldReader . cpp " > <nl> - < Filter > reader \ WidgetReader \ TextFieldReader < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ WidgetReader \ TextBMFontReader \ TextBMFontReader . cpp " > <nl> - < Filter > reader \ WidgetReader \ TextBMFontReader < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ WidgetReader \ TextAtlasReader \ TextAtlasReader . cpp " > <nl> - < Filter > reader \ WidgetReader \ TextAtlasReader < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ WidgetReader \ SliderReader \ SliderReader . cpp " > <nl> - < Filter > reader \ WidgetReader \ SliderReader < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ WidgetReader \ ScrollViewReader \ ScrollViewReader . cpp " > <nl> - < Filter > reader \ WidgetReader \ ScrollViewReader < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ WidgetReader \ PageViewReader \ PageViewReader . cpp " > <nl> - < Filter > reader \ WidgetReader \ PageViewReader < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CocoLoader . cpp " > <nl> - < Filter > json < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ ActionTimeline \ CCNodeReader . cpp " > <nl> - < Filter > TimelineAction < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ ActionTimeline \ CCActionTimelineCache . cpp " > <nl> - < Filter > TimelineAction < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ ActionTimeline \ CCActionTimeline . cpp " > <nl> - < Filter > TimelineAction < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ ActionTimeline \ CCFrame . cpp " > <nl> - < Filter > TimelineAction < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ ActionTimeline \ CCTimeLine . cpp " > <nl> - < Filter > TimelineAction < / Filter > <nl> - < / ClCompile > <nl> - < / ItemGroup > <nl> - < ItemGroup > <nl> - < ClInclude Include = " . . \ CCComAttribute . h " > <nl> - < Filter > components < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCComAudio . h " > <nl> - < Filter > components < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCComController . h " > <nl> - < Filter > components < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCComRender . h " > <nl> - < Filter > components < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCInputDelegate . h " > <nl> - < Filter > components < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ DictionaryHelper . h " > <nl> - < Filter > json < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCSSceneReader . h " > <nl> - < Filter > reader < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCSGUIReader . h " > <nl> - < Filter > reader < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCTween . h " > <nl> - < Filter > armature \ animation < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCProcessBase . h " > <nl> - < Filter > armature \ animation < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCArmatureAnimation . h " > <nl> - < Filter > armature \ animation < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCDatas . h " > <nl> - < Filter > armature \ datas < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCBatchNode . h " > <nl> - < Filter > armature \ display < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCDecorativeDisplay . h " > <nl> - < Filter > armature \ display < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCDisplayFactory . h " > <nl> - < Filter > armature \ display < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCDisplayManager . h " > <nl> - < Filter > armature \ display < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCSkin . h " > <nl> - < Filter > armature \ display < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCArmatureDataManager . h " > <nl> - < Filter > armature \ utils < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCArmatureDefine . h " > <nl> - < Filter > armature \ utils < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCDataReaderHelper . h " > <nl> - < Filter > armature \ utils < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCSpriteFrameCacheHelper . h " > <nl> - < Filter > armature \ utils < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCTransformHelp . h " > <nl> - < Filter > armature \ utils < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCUtilMath . h " > <nl> - < Filter > armature \ utils < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCColliderDetector . h " > <nl> - < Filter > armature \ physics < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCArmature . h " > <nl> - < Filter > armature < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCBone . h " > <nl> - < Filter > armature < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ . . \ . . \ . . \ external \ json \ document . h " > <nl> - < Filter > json \ rapidjson < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ . . \ . . \ . . \ external \ json \ filestream . h " > <nl> - < Filter > json \ rapidjson < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ . . \ . . \ . . \ external \ json \ prettywriter . h " > <nl> - < Filter > json \ rapidjson < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ . . \ . . \ . . \ external \ json \ rapidjson . h " > <nl> - < Filter > json \ rapidjson < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ . . \ . . \ . . \ external \ json \ reader . h " > <nl> - < Filter > json \ rapidjson < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ . . \ . . \ . . \ external \ json \ stringbuffer . h " > <nl> - < Filter > json \ rapidjson < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ . . \ . . \ . . \ external \ json \ writer . h " > <nl> - < Filter > json \ rapidjson < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ . . \ . . \ . . \ external \ json \ internal \ pow10 . h " > <nl> - < Filter > json \ rapidjson \ internal < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ . . \ . . \ . . \ external \ json \ internal \ stack . h " > <nl> - < Filter > json \ rapidjson \ internal < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ . . \ . . \ . . \ external \ json \ internal \ strfunc . h " > <nl> - < Filter > json \ rapidjson \ internal < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCActionFrame . h " > <nl> - < Filter > action < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCActionFrameEasing . h " > <nl> - < Filter > action < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCActionManagerEx . h " > <nl> - < Filter > action < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCActionNode . h " > <nl> - < Filter > action < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCActionObject . h " > <nl> - < Filter > action < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ TriggerBase . h " > <nl> - < Filter > trigger < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ TriggerMng . h " > <nl> - < Filter > trigger < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ TriggerObj . h " > <nl> - < Filter > trigger < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CCComBase . h " > <nl> - < Filter > components < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ WidgetReader \ WidgetReader . h " > <nl> - < Filter > reader \ WidgetReader < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ WidgetReader \ WidgetReaderProtocol . h " > <nl> - < Filter > reader \ WidgetReader < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ WidgetReader \ ButtonReader \ ButtonReader . h " > <nl> - < Filter > reader \ WidgetReader \ ButtonReader < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ WidgetReader \ CheckBoxReader \ CheckBoxReader . h " > <nl> - < Filter > reader \ WidgetReader \ CheckBoxReader < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ WidgetReader \ ImageViewReader \ ImageViewReader . h " > <nl> - < Filter > reader \ WidgetReader \ ImageViewReader < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ WidgetReader \ LayoutReader \ LayoutReader . h " > <nl> - < Filter > reader \ WidgetReader \ LayoutReader < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ WidgetReader \ ListViewReader \ ListViewReader . h " > <nl> - < Filter > reader \ WidgetReader \ ListViewReader < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ WidgetReader \ LoadingBarReader \ LoadingBarReader . h " > <nl> - < Filter > reader \ WidgetReader \ LoadingBarReader < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ WidgetReader \ TextReader \ TextReader . h " > <nl> - < Filter > reader \ WidgetReader \ TextReader < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ WidgetReader \ TextFieldReader \ TextFieldReader . h " > <nl> - < Filter > reader \ WidgetReader \ TextFieldReader < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ WidgetReader \ TextBMFontReader \ TextBMFontReader . h " > <nl> - < Filter > reader \ WidgetReader \ TextBMFontReader < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ WidgetReader \ TextAtlasReader \ TextAtlasReader . h " > <nl> - < Filter > reader \ WidgetReader \ TextAtlasReader < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ WidgetReader \ SliderReader \ SliderReader . h " > <nl> - < Filter > reader \ WidgetReader \ SliderReader < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ WidgetReader \ ScrollViewReader \ ScrollViewReader . h " > <nl> - < Filter > reader \ WidgetReader \ ScrollViewReader < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ WidgetReader \ PageViewReader \ PageViewReader . h " > <nl> - < Filter > reader \ WidgetReader \ PageViewReader < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CocoLoader . h " > <nl> - < Filter > json < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CocoStudio . h " > <nl> - < Filter > json < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ ActionTimeline \ CCNodeReader . h " > <nl> - < Filter > TimelineAction < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ ActionTimeline \ CCActionTimelineCache . h " > <nl> - < Filter > TimelineAction < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ ActionTimeline \ CCActionTimeline . h " > <nl> - < Filter > TimelineAction < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ ActionTimeline \ CCFrame . h " > <nl> - < Filter > TimelineAction < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ ActionTimeline \ CCTimeLine . h " > <nl> - < Filter > TimelineAction < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ ActionTimeline \ CCTimelineMacro . h " > <nl> - < Filter > TimelineAction < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CocosStudioExport . h " / > <nl> - < / ItemGroup > <nl> - < / Project > <nl> \ No newline at end of file <nl> + < Project ToolsVersion = " 4 . 0 " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " / > <nl> \ No newline at end of file <nl> mmm a / cocos / editor - support / spine / proj . win32 / libSpine . vcxproj <nl> ppp b / cocos / editor - support / spine / proj . win32 / libSpine . vcxproj <nl> <nl> < / ProjectConfiguration > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> - < ClInclude Include = " . . \ Animation . h " / > <nl> - < ClInclude Include = " . . \ AnimationState . h " / > <nl> - < ClInclude Include = " . . \ AnimationStateData . h " / > <nl> - < ClInclude Include = " . . \ Atlas . h " / > <nl> - < ClInclude Include = " . . \ AtlasAttachmentLoader . h " / > <nl> - < ClInclude Include = " . . \ Attachment . h " / > <nl> - < ClInclude Include = " . . \ AttachmentLoader . h " / > <nl> - < ClInclude Include = " . . \ Bone . h " / > <nl> - < ClInclude Include = " . . \ BoneData . h " / > <nl> - < ClInclude Include = " . . \ BoundingBoxAttachment . h " / > <nl> - < ClInclude Include = " . . \ CCSkeleton . h " / > <nl> - < ClInclude Include = " . . \ CCSkeletonAnimation . h " / > <nl> - < ClInclude Include = " . . \ extension . h " / > <nl> - < ClInclude Include = " . . \ Event . h " / > <nl> - < ClInclude Include = " . . \ EventData . h " / > <nl> - < ClInclude Include = " . . \ Json . h " / > <nl> - < ClInclude Include = " . . \ RegionAttachment . h " / > <nl> - < ClInclude Include = " . . \ Skeleton . h " / > <nl> - < ClInclude Include = " . . \ SkeletonBounds . h " / > <nl> - < ClInclude Include = " . . \ SkeletonData . h " / > <nl> - < ClInclude Include = " . . \ SkeletonJson . h " / > <nl> - < ClInclude Include = " . . \ Skin . h " / > <nl> - < ClInclude Include = " . . \ Slot . h " / > <nl> - < ClInclude Include = " . . \ SlotData . h " / > <nl> - < ClInclude Include = " . . \ spine - cocos2dx . h " / > <nl> - < ClInclude Include = " . . \ spine . h " / > <nl> + < ProjectReference Include = " . . \ . . \ . . \ 2d \ libcocos2d . vcxproj " > <nl> + < Project > { 98a51ba8 - fc3a - 415b - ac8f - 8c7bd464e93e } < / Project > <nl> + < / ProjectReference > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> < ClCompile Include = " . . \ Animation . cpp " / > <nl> <nl> < ClCompile Include = " . . \ BoundingBoxAttachment . cpp " / > <nl> < ClCompile Include = " . . \ CCSkeleton . cpp " / > <nl> < ClCompile Include = " . . \ CCSkeletonAnimation . cpp " / > <nl> - < ClCompile Include = " . . \ extension . cpp " / > <nl> < ClCompile Include = " . . \ Event . cpp " / > <nl> < ClCompile Include = " . . \ EventData . cpp " / > <nl> + < ClCompile Include = " . . \ extension . cpp " / > <nl> < ClCompile Include = " . . \ Json . cpp " / > <nl> < ClCompile Include = " . . \ RegionAttachment . cpp " / > <nl> < ClCompile Include = " . . \ Skeleton . cpp " / > <nl> <nl> < ClCompile Include = " . . \ spine - cocos2dx . cpp " / > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> - < ProjectReference Include = " . . \ . . \ . . \ 2d \ libcocos2d . vcxproj " > <nl> - < Project > { 98a51ba8 - fc3a - 415b - ac8f - 8c7bd464e93e } < / Project > <nl> - < / ProjectReference > <nl> + < ClInclude Include = " . . \ Animation . h " / > <nl> + < ClInclude Include = " . . \ AnimationState . h " / > <nl> + < ClInclude Include = " . . \ AnimationStateData . h " / > <nl> + < ClInclude Include = " . . \ Atlas . h " / > <nl> + < ClInclude Include = " . . \ AtlasAttachmentLoader . h " / > <nl> + < ClInclude Include = " . . \ Attachment . h " / > <nl> + < ClInclude Include = " . . \ AttachmentLoader . h " / > <nl> + < ClInclude Include = " . . \ Bone . h " / > <nl> + < ClInclude Include = " . . \ BoneData . h " / > <nl> + < ClInclude Include = " . . \ BoundingBoxAttachment . h " / > <nl> + < ClInclude Include = " . . \ CCSkeleton . h " / > <nl> + < ClInclude Include = " . . \ CCSkeletonAnimation . h " / > <nl> + < ClInclude Include = " . . \ Event . h " / > <nl> + < ClInclude Include = " . . \ EventData . h " / > <nl> + < ClInclude Include = " . . \ extension . h " / > <nl> + < ClInclude Include = " . . \ Json . h " / > <nl> + < ClInclude Include = " . . \ RegionAttachment . h " / > <nl> + < ClInclude Include = " . . \ Skeleton . h " / > <nl> + < ClInclude Include = " . . \ SkeletonBounds . h " / > <nl> + < ClInclude Include = " . . \ SkeletonData . h " / > <nl> + < ClInclude Include = " . . \ SkeletonJson . h " / > <nl> + < ClInclude Include = " . . \ Skin . h " / > <nl> + < ClInclude Include = " . . \ Slot . h " / > <nl> + < ClInclude Include = " . . \ SlotData . h " / > <nl> + < ClInclude Include = " . . \ spine - cocos2dx . h " / > <nl> + < ClInclude Include = " . . \ spine . h " / > <nl> < / ItemGroup > <nl> < PropertyGroup Label = " Globals " > <nl> < ProjectGuid > { B7C2A162 - DEC9 - 4418 - 972E - 240AB3CBFCAE } < / ProjectGuid > <nl> mmm a / cocos / editor - support / spine / proj . win32 / libSpine . vcxproj . filters <nl> ppp b / cocos / editor - support / spine / proj . win32 / libSpine . vcxproj . filters <nl> <nl> < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> < Project ToolsVersion = " 4 . 0 " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> < ItemGroup > <nl> - < Filter Include = " Header Files " > <nl> - < UniqueIdentifier > { 67DA6AB6 - F800 - 4c08 - 8B7A - 83BB121AAD01 } < / UniqueIdentifier > <nl> - < Extensions > rc ; ico ; cur ; bmp ; dlg ; rc2 ; rct ; bin ; rgs ; gif ; jpg ; jpeg ; jpe ; resx ; tiff ; tif ; png ; wav ; mfcribbon - ms < / Extensions > <nl> - < / Filter > <nl> < Filter Include = " Source Files " > <nl> - < UniqueIdentifier > { 4FC737F1 - C7A5 - 4376 - A066 - 2A32D752A2FF } < / UniqueIdentifier > <nl> - < Extensions > cpp ; c ; cc ; cxx ; def ; odl ; idl ; hpj ; bat ; asm ; asmx < / Extensions > <nl> + < UniqueIdentifier > { 3e21d10b - 4a29 - 49c0 - b21a - fa573a4e65c1 } < / UniqueIdentifier > <nl> + < / Filter > <nl> + < Filter Include = " Header Files " > <nl> + < UniqueIdentifier > { ca7b23a8 - 6f11 - 4059 - b242 - ba5102033974 } < / UniqueIdentifier > <nl> < / Filter > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> - < ClInclude Include = " . . \ SlotData . h " > <nl> - < Filter > Header Files < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ spine . h " > <nl> - < Filter > Header Files < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ spine - cocos2dx . h " > <nl> - < Filter > Header Files < / Filter > <nl> - < / ClInclude > <nl> + < ClCompile Include = " . . \ Animation . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ AnimationState . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ AnimationStateData . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ Atlas . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ AtlasAttachmentLoader . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ Attachment . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ AttachmentLoader . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ Bone . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ BoneData . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ BoundingBoxAttachment . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ CCSkeleton . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ CCSkeletonAnimation . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ Event . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ EventData . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ extension . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ Json . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ RegionAttachment . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ Skeleton . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ SkeletonBounds . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ SkeletonData . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ SkeletonJson . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ Skin . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ Slot . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ SlotData . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ spine - cocos2dx . cpp " > <nl> + < Filter > Source Files < / Filter > <nl> + < / ClCompile > <nl> + < / ItemGroup > <nl> + < ItemGroup > <nl> < ClInclude Include = " . . \ Animation . h " > <nl> < Filter > Header Files < / Filter > <nl> < / ClInclude > <nl> <nl> < ClInclude Include = " . . \ BoundingBoxAttachment . h " > <nl> < Filter > Header Files < / Filter > <nl> < / ClInclude > <nl> - < ClInclude Include = " . . \ Event . h " > <nl> + < ClInclude Include = " . . \ CCSkeleton . h " > <nl> < Filter > Header Files < / Filter > <nl> < / ClInclude > <nl> - < ClInclude Include = " . . \ EventData . h " > <nl> + < ClInclude Include = " . . \ CCSkeletonAnimation . h " > <nl> < Filter > Header Files < / Filter > <nl> < / ClInclude > <nl> - < ClInclude Include = " . . \ CCSkeleton . h " > <nl> + < ClInclude Include = " . . \ Event . h " > <nl> < Filter > Header Files < / Filter > <nl> < / ClInclude > <nl> - < ClInclude Include = " . . \ CCSkeletonAnimation . h " > <nl> + < ClInclude Include = " . . \ EventData . h " > <nl> < Filter > Header Files < / Filter > <nl> < / ClInclude > <nl> < ClInclude Include = " . . \ extension . h " > <nl> <nl> < ClInclude Include = " . . \ Skeleton . h " > <nl> < Filter > Header Files < / Filter > <nl> < / ClInclude > <nl> - < ClInclude Include = " . . \ SkeletonData . h " > <nl> + < ClInclude Include = " . . \ SkeletonBounds . h " > <nl> < Filter > Header Files < / Filter > <nl> < / ClInclude > <nl> - < ClInclude Include = " . . \ SkeletonBounds . h " > <nl> + < ClInclude Include = " . . \ SkeletonData . h " > <nl> < Filter > Header Files < / Filter > <nl> < / ClInclude > <nl> < ClInclude Include = " . . \ SkeletonJson . h " > <nl> <nl> < ClInclude Include = " . . \ Slot . h " > <nl> < Filter > Header Files < / Filter > <nl> < / ClInclude > <nl> - < / ItemGroup > <nl> - < ItemGroup > <nl> - < ClCompile Include = " . . \ Skeleton . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ SkeletonBounds . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ SkeletonData . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ SkeletonJson . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ Skin . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ Slot . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ SlotData . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ spine - cocos2dx . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ Animation . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ AnimationState . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ AnimationStateData . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ Atlas . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ AtlasAttachmentLoader . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ Attachment . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ AttachmentLoader . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ Bone . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ BoundingBoxAttachment . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ Event . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ EventData . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCSkeleton . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CCSkeletonAnimation . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ extension . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ Json . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ RegionAttachment . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ BoneData . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> + < ClInclude Include = " . . \ SlotData . h " > <nl> + < Filter > Header Files < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ spine - cocos2dx . h " > <nl> + < Filter > Header Files < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ spine . h " > <nl> + < Filter > Header Files < / Filter > <nl> + < / ClInclude > <nl> < / ItemGroup > <nl> < / Project > <nl> \ No newline at end of file <nl> mmm a / cocos / network / proj . win32 / libnetwork . vcxproj <nl> ppp b / cocos / network / proj . win32 / libnetwork . vcxproj <nl> <nl> < Platform > Win32 < / Platform > <nl> < / ProjectConfiguration > <nl> < / ItemGroup > <nl> - < ItemGroup > <nl> - < ClInclude Include = " . . \ HttpClient . h " / > <nl> - < ClInclude Include = " . . \ HttpRequest . h " / > <nl> - < ClInclude Include = " . . \ HttpResponse . h " / > <nl> - < ClInclude Include = " . . \ SocketIO . h " / > <nl> - < ClInclude Include = " . . \ WebSocket . h " / > <nl> - < / ItemGroup > <nl> - < ItemGroup > <nl> - < ClCompile Include = " . . \ HttpClient . cpp " / > <nl> - < ClCompile Include = " . . \ SocketIO . cpp " / > <nl> - < ClCompile Include = " . . \ WebSocket . cpp " / > <nl> - < / ItemGroup > <nl> < ItemGroup > <nl> < ProjectReference Include = " . . \ . . \ 2d \ libcocos2d . vcxproj " > <nl> < Project > { 98a51ba8 - fc3a - 415b - ac8f - 8c7bd464e93e } < / Project > <nl> mmm a / cocos / network / proj . win32 / libnetwork . vcxproj . filters <nl> ppp b / cocos / network / proj . win32 / libnetwork . vcxproj . filters <nl> <nl> < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> < Project ToolsVersion = " 4 . 0 " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> < ItemGroup > <nl> - < Filter Include = " Header Files " > <nl> - < UniqueIdentifier > { 93995380 - 89BD - 4b04 - 88EB - 625FBE52EBFB } < / UniqueIdentifier > <nl> - < Extensions > h ; hpp ; hxx ; hm ; inl ; inc ; xsd < / Extensions > <nl> - < / Filter > <nl> < Filter Include = " Resource Files " > <nl> < UniqueIdentifier > { 67DA6AB6 - F800 - 4c08 - 8B7A - 83BB121AAD01 } < / UniqueIdentifier > <nl> < Extensions > rc ; ico ; cur ; bmp ; dlg ; rc2 ; rct ; bin ; rgs ; gif ; jpg ; jpeg ; jpe ; resx < / Extensions > <nl> < / Filter > <nl> - < Filter Include = " Source Files " > <nl> - < UniqueIdentifier > { 4FC737F1 - C7A5 - 4376 - A066 - 2A32D752A2FF } < / UniqueIdentifier > <nl> - < Extensions > cpp ; c ; cc ; cxx ; def ; odl ; idl ; hpj ; bat ; asm ; asmx < / Extensions > <nl> - < / Filter > <nl> - < / ItemGroup > <nl> - < ItemGroup > <nl> - < ClInclude Include = " . . \ HttpClient . h " > <nl> - < Filter > Header Files < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ HttpRequest . h " > <nl> - < Filter > Header Files < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ HttpResponse . h " > <nl> - < Filter > Header Files < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ SocketIO . h " > <nl> - < Filter > Header Files < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ WebSocket . h " > <nl> - < Filter > Header Files < / Filter > <nl> - < / ClInclude > <nl> - < / ItemGroup > <nl> - < ItemGroup > <nl> - < ClCompile Include = " . . \ HttpClient . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ SocketIO . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ WebSocket . cpp " > <nl> - < Filter > Source Files < / Filter > <nl> - < / ClCompile > <nl> < / ItemGroup > <nl> < / Project > <nl> \ No newline at end of file <nl> mmm a / cocos / scripting / lua - bindings / proj . win32 / libluacocos3d . vcxproj <nl> ppp b / cocos / scripting / lua - bindings / proj . win32 / libluacocos3d . vcxproj <nl> <nl> < ClInclude Include = " . . \ auto \ lua_cocos2dx_3d_auto . hpp " / > <nl> < ClInclude Include = " . . \ manual \ 3d \ lua_cocos2dx_3d_manual . h " / > <nl> < / ItemGroup > <nl> - < ItemGroup > <nl> - < ProjectReference Include = " . . \ . . \ . . \ 3d \ proj . win32 \ libcocos3d . vcxproj " > <nl> - < Project > { e24950fa - 5bc1 - 4aee - a900 - 4f0259354bf0 } < / Project > <nl> - < / ProjectReference > <nl> - < / ItemGroup > <nl> < PropertyGroup Label = " Globals " > <nl> < ProjectGuid > { 06840490 - 14A4 - 43D6 - 88BC - AAFA44D043EB } < / ProjectGuid > <nl> < RootNamespace > libluacocos3d < / RootNamespace > <nl> mmm a / cocos / scripting / lua - bindings / proj . win32 / libluacocosdenshion . vcxproj <nl> ppp b / cocos / scripting / lua - bindings / proj . win32 / libluacocosdenshion . vcxproj <nl> <nl> < None Include = " . . \ script \ DeprecatedCocosDenshionClass . lua " / > <nl> < None Include = " . . \ script \ DeprecatedCocosDenshionFunc . lua " / > <nl> < / ItemGroup > <nl> - < ItemGroup > <nl> - < ProjectReference Include = " . . \ . . \ . . \ audio \ proj . win32 \ libcocosdenshion . vcxproj " > <nl> - < Project > { f8edd7fa - 9a51 - 4e80 - baeb - 860825d2eac6 } < / Project > <nl> - < / ProjectReference > <nl> - < / ItemGroup > <nl> < ItemGroup > <nl> < ClCompile Include = " . . \ auto \ lua_cocos2dx_cocosdenshion_auto . cpp " / > <nl> < ClCompile Include = " . . \ manual \ cocosdenshion \ lua_cocos2dx_cocosdenshion_manual . cpp " / > <nl> mmm a / cocos / scripting / lua - bindings / proj . win32 / libluacocostudio . vcxproj <nl> ppp b / cocos / scripting / lua - bindings / proj . win32 / libluacocostudio . vcxproj <nl> <nl> < None Include = " . . \ script \ DeprecatedCocoStudioFunc . lua " / > <nl> < None Include = " . . \ script \ StudioConstants . lua " / > <nl> < / ItemGroup > <nl> - < ItemGroup > <nl> - < ProjectReference Include = " . . \ . . \ . . \ editor - support \ cocostudio \ proj . win32 \ libcocostudio . vcxproj " > <nl> - < Project > { b57cf53f - 2e49 - 4031 - 9822 - 047cc0e6bde2 } < / Project > <nl> - < / ProjectReference > <nl> - < / ItemGroup > <nl> < PropertyGroup Label = " Globals " > <nl> < ProjectGuid > { 9640951E - C5D0 - 47B5 - 9C42 - 0BCADA261C50 } < / ProjectGuid > <nl> < RootNamespace > libluacocostudio < / RootNamespace > <nl> mmm a / cocos / scripting / lua - bindings / proj . win32 / libluaextension . vcxproj <nl> ppp b / cocos / scripting / lua - bindings / proj . win32 / libluaextension . vcxproj <nl> <nl> < None Include = " . . \ script \ DeprecatedExtensionFunc . lua " / > <nl> < None Include = " . . \ script \ ExtensionConstants . lua " / > <nl> < / ItemGroup > <nl> - < ItemGroup > <nl> - < ProjectReference Include = " . . \ . . \ . . \ . . \ extensions \ proj . win32 \ libextension . vcxproj " > <nl> - < Project > { 21b2c324 - 891f - 48ea - ad1a - 5ae13de12e28 } < / Project > <nl> - < / ProjectReference > <nl> - < / ItemGroup > <nl> < PropertyGroup Label = " Globals " > <nl> < ProjectGuid > { 7068296E - 38A4 - 4BCA - 85DB - 3A09BD014847 } < / ProjectGuid > <nl> < RootNamespace > libluaextension < / RootNamespace > <nl> mmm a / cocos / scripting / lua - bindings / proj . win32 / libluanetwork . vcxproj <nl> ppp b / cocos / scripting / lua - bindings / proj . win32 / libluanetwork . vcxproj <nl> <nl> < ClInclude Include = " . . \ manual \ network \ Lua_web_socket . h " / > <nl> < ClInclude Include = " . . \ manual \ network \ lua_xml_http_request . h " / > <nl> < / ItemGroup > <nl> - < ItemGroup > <nl> - < ProjectReference Include = " . . \ . . \ . . \ network \ proj . win32 \ libNetwork . vcxproj " > <nl> - < Project > { df2638c0 - 8128 - 4847 - 867c - 6eafe3dee7b5 } < / Project > <nl> - < / ProjectReference > <nl> - < / ItemGroup > <nl> < PropertyGroup Label = " Globals " > <nl> < ProjectGuid > { 65E52F4B - 703F - 419E - AD67 - 926241A10042 } < / ProjectGuid > <nl> < RootNamespace > libluacocosdenshion < / RootNamespace > <nl> mmm a / cocos / scripting / lua - bindings / proj . win32 / libluaui . vcxproj <nl> ppp b / cocos / scripting / lua - bindings / proj . win32 / libluaui . vcxproj <nl> <nl> < None Include = " . . \ script \ DeprecatedUIFunc . lua " / > <nl> < None Include = " . . \ script \ GuiConstants . lua " / > <nl> < / ItemGroup > <nl> - < ItemGroup > <nl> - < ProjectReference Include = " . . \ . . \ . . \ ui \ proj . win32 \ libui . vcxproj " > <nl> - < Project > { 7e06e92c - 537a - 442b - 9e4a - 4761c84f8a1a } < / Project > <nl> - < / ProjectReference > <nl> - < / ItemGroup > <nl> < PropertyGroup Label = " Globals " > <nl> < ProjectGuid > { FE78EEBB - 3DBB - 4713 - 8CBF - 63D742C5BD82 } < / ProjectGuid > <nl> < RootNamespace > libluaui < / RootNamespace > <nl> mmm a / cocos / ui / proj . win32 / libui . vcxproj <nl> ppp b / cocos / ui / proj . win32 / libui . vcxproj <nl> <nl> < / ProjectConfiguration > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> - < ClInclude Include = " . . \ CocosGUI . h " / > <nl> - < ClInclude Include = " . . \ GUIExport . h " / > <nl> - < ClInclude Include = " . . \ UIButton . h " / > <nl> - < ClInclude Include = " . . \ UICheckBox . h " / > <nl> - < ClInclude Include = " . . \ UIDeprecated . h " / > <nl> - < ClInclude Include = " . . \ UIHBox . h " / > <nl> - < ClInclude Include = " . . \ UIHelper . h " / > <nl> - < ClInclude Include = " . . \ UIImageView . h " / > <nl> - < ClInclude Include = " . . \ UILayout . h " / > <nl> - < ClInclude Include = " . . \ UILayoutManager . h " / > <nl> - < ClInclude Include = " . . \ UILayoutParameter . h " / > <nl> - < ClInclude Include = " . . \ UIListView . h " / > <nl> - < ClInclude Include = " . . \ UILoadingBar . h " / > <nl> - < ClInclude Include = " . . \ UIPageView . h " / > <nl> - < ClInclude Include = " . . \ UIRelativeBox . h " / > <nl> - < ClInclude Include = " . . \ UIRichText . h " / > <nl> - < ClInclude Include = " . . \ UIScale9Sprite . h " / > <nl> - < ClInclude Include = " . . \ UIScrollView . h " / > <nl> - < ClInclude Include = " . . \ UISlider . h " / > <nl> - < ClInclude Include = " . . \ UIText . h " / > <nl> - < ClInclude Include = " . . \ UITextAtlas . h " / > <nl> - < ClInclude Include = " . . \ UITextBMFont . h " / > <nl> - < ClInclude Include = " . . \ UITextField . h " / > <nl> - < ClInclude Include = " . . \ UIVBox . h " / > <nl> - < ClInclude Include = " . . \ UIWidget . h " / > <nl> - < / ItemGroup > <nl> - < ItemGroup > <nl> - < ClCompile Include = " . . \ CocosGUI . cpp " / > <nl> - < ClCompile Include = " . . \ UIButton . cpp " / > <nl> - < ClCompile Include = " . . \ UICheckBox . cpp " / > <nl> - < ClCompile Include = " . . \ UIDeprecated . cpp " / > <nl> - < ClCompile Include = " . . \ UIHBox . cpp " / > <nl> - < ClCompile Include = " . . \ UIHelper . cpp " / > <nl> - < ClCompile Include = " . . \ UIImageView . cpp " / > <nl> - < ClCompile Include = " . . \ UILayout . cpp " / > <nl> - < ClCompile Include = " . . \ UILayoutManager . cpp " / > <nl> - < ClCompile Include = " . . \ UILayoutParameter . cpp " / > <nl> - < ClCompile Include = " . . \ UIListView . cpp " / > <nl> - < ClCompile Include = " . . \ UILoadingBar . cpp " / > <nl> - < ClCompile Include = " . . \ UIPageView . cpp " / > <nl> - < ClCompile Include = " . . \ UIRelativeBox . cpp " / > <nl> - < ClCompile Include = " . . \ UIRichText . cpp " / > <nl> - < ClCompile Include = " . . \ UIScale9Sprite . cpp " / > <nl> - < ClCompile Include = " . . \ UIScrollView . cpp " / > <nl> - < ClCompile Include = " . . \ UISlider . cpp " / > <nl> - < ClCompile Include = " . . \ UIText . cpp " / > <nl> - < ClCompile Include = " . . \ UITextAtlas . cpp " / > <nl> - < ClCompile Include = " . . \ UITextBMFont . cpp " / > <nl> - < ClCompile Include = " . . \ UITextField . cpp " / > <nl> - < ClCompile Include = " . . \ UIVBox . cpp " / > <nl> - < ClCompile Include = " . . \ UIWidget . cpp " / > <nl> - < / ItemGroup > <nl> - < ItemGroup > <nl> - < ProjectReference Include = " . . \ . . \ . . \ extensions \ proj . win32 \ libextension . vcxproj " > <nl> - < Project > { 21b2c324 - 891f - 48ea - ad1a - 5ae13de12e28 } < / Project > <nl> - < / ProjectReference > <nl> < ProjectReference Include = " . . \ . . \ 2d \ libcocos2d . vcxproj " > <nl> < Project > { 98a51ba8 - fc3a - 415b - ac8f - 8c7bd464e93e } < / Project > <nl> < / ProjectReference > <nl> mmm a / cocos / ui / proj . win32 / libui . vcxproj . filters <nl> ppp b / cocos / ui / proj . win32 / libui . vcxproj . filters <nl> <nl> < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> - < Project ToolsVersion = " 4 . 0 " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> - < ItemGroup > <nl> - < Filter Include = " BaseClasses " > <nl> - < UniqueIdentifier > { e31ab7d3 - b8b2 - 467f - 9e08 - fd5fe168b491 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " Layouts " > <nl> - < UniqueIdentifier > { f9d13563 - 9e5e - 4b35 - b0e7 - d41f587efa42 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " System " > <nl> - < UniqueIdentifier > { ed8a2ae0 - 5690 - 4d0d - 829b - 7c07164c0597 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " UIWidgets " > <nl> - < UniqueIdentifier > { 5f6e9e52 - fbe7 - 4073 - ac71 - 98632f9e6781 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " UIWidgets \ ScrollWidget " > <nl> - < UniqueIdentifier > { b59b178a - b7e0 - 4826 - ba07 - 44c46cd29a10 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < / ItemGroup > <nl> - < ItemGroup > <nl> - < ClInclude Include = " . . \ UIScrollView . h " > <nl> - < Filter > UIWidgets \ ScrollWidget < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ UIListView . h " > <nl> - < Filter > UIWidgets \ ScrollWidget < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ UIPageView . h " > <nl> - < Filter > UIWidgets \ ScrollWidget < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ UIButton . h " > <nl> - < Filter > UIWidgets < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ UICheckBox . h " > <nl> - < Filter > UIWidgets < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ UIImageView . h " > <nl> - < Filter > UIWidgets < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ UISlider . h " > <nl> - < Filter > UIWidgets < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ UITextField . h " > <nl> - < Filter > UIWidgets < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ UILoadingBar . h " > <nl> - < Filter > UIWidgets < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ UIHelper . h " > <nl> - < Filter > System < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ CocosGUI . h " > <nl> - < Filter > System < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ UIWidget . h " > <nl> - < Filter > BaseClasses < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ UILayout . h " > <nl> - < Filter > Layouts < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ UILayoutParameter . h " > <nl> - < Filter > Layouts < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ UITextBMFont . h " > <nl> - < Filter > UIWidgets < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ UIText . h " > <nl> - < Filter > UIWidgets < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ UITextAtlas . h " > <nl> - < Filter > UIWidgets < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ UIRichText . h " > <nl> - < Filter > UIWidgets < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ UIHBox . h " > <nl> - < Filter > Layouts < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ UIRelativeBox . h " > <nl> - < Filter > Layouts < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ UIVBox . h " > <nl> - < Filter > Layouts < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ UILayoutManager . h " > <nl> - < Filter > Layouts < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ UIDeprecated . h " > <nl> - < Filter > System < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ GUIExport . h " > <nl> - < Filter > System < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ UIScale9Sprite . h " > <nl> - < Filter > BaseClasses < / Filter > <nl> - < / ClInclude > <nl> - < / ItemGroup > <nl> - < ItemGroup > <nl> - < ClCompile Include = " . . \ UIScrollView . cpp " > <nl> - < Filter > UIWidgets \ ScrollWidget < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ UIListView . cpp " > <nl> - < Filter > UIWidgets \ ScrollWidget < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ UIPageView . cpp " > <nl> - < Filter > UIWidgets \ ScrollWidget < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ UIButton . cpp " > <nl> - < Filter > UIWidgets < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ UICheckBox . cpp " > <nl> - < Filter > UIWidgets < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ UIImageView . cpp " > <nl> - < Filter > UIWidgets < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ UISlider . cpp " > <nl> - < Filter > UIWidgets < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ UITextField . cpp " > <nl> - < Filter > UIWidgets < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ UILoadingBar . cpp " > <nl> - < Filter > UIWidgets < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ UIHelper . cpp " > <nl> - < Filter > System < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ CocosGUI . cpp " > <nl> - < Filter > System < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ UIWidget . cpp " > <nl> - < Filter > BaseClasses < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ UILayout . cpp " > <nl> - < Filter > Layouts < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ UILayoutParameter . cpp " > <nl> - < Filter > Layouts < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ UITextBMFont . cpp " > <nl> - < Filter > UIWidgets < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ UIText . cpp " > <nl> - < Filter > UIWidgets < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ UITextAtlas . cpp " > <nl> - < Filter > UIWidgets < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ UIRichText . cpp " > <nl> - < Filter > UIWidgets < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ UIHBox . cpp " > <nl> - < Filter > Layouts < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ UIRelativeBox . cpp " > <nl> - < Filter > Layouts < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ UIVBox . cpp " > <nl> - < Filter > Layouts < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ UILayoutManager . cpp " > <nl> - < Filter > Layouts < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ UIDeprecated . cpp " > <nl> - < Filter > System < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ UIScale9Sprite . cpp " > <nl> - < Filter > BaseClasses < / Filter > <nl> - < / ClCompile > <nl> - < / ItemGroup > <nl> - < / Project > <nl> \ No newline at end of file <nl> + < Project ToolsVersion = " 4 . 0 " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " / > <nl> \ No newline at end of file <nl> mmm a / extensions / proj . win32 / libextension . vcxproj <nl> ppp b / extensions / proj . win32 / libextension . vcxproj <nl> <nl> < AdditionalDependencies > libcocos2d . lib ; opengl32 . lib ; glew32 . lib ; libcurl_imp . lib ; libchipmunk . lib ; libBox2D . lib ; glfw3 . lib ; % ( AdditionalDependencies ) < / AdditionalDependencies > <nl> < / Link > <nl> < / ItemDefinitionGroup > <nl> - < ItemGroup > <nl> - < ClCompile Include = " . . \ assets - manager \ AssetsManager . cpp " / > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCControl . cpp " / > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCControlButton . cpp " / > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCControlColourPicker . cpp " / > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCControlHuePicker . cpp " / > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCControlPotentiometer . cpp " / > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCControlSaturationBrightnessPicker . cpp " / > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCControlSlider . cpp " / > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCControlStepper . cpp " / > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCControlSwitch . cpp " / > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCControlUtils . cpp " / > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCInvocation . cpp " / > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCScale9Sprite . cpp " / > <nl> - < ClCompile Include = " . . \ GUI \ CCEditBox \ CCEditBox . cpp " / > <nl> - < ClCompile Include = " . . \ GUI \ CCEditBox \ CCEditBoxImplWin . cpp " / > <nl> - < ClCompile Include = " . . \ GUI \ CCScrollView \ CCScrollView . cpp " / > <nl> - < ClCompile Include = " . . \ GUI \ CCScrollView \ CCTableView . cpp " / > <nl> - < ClCompile Include = " . . \ GUI \ CCScrollView \ CCTableViewCell . cpp " / > <nl> - < ClCompile Include = " . . \ physics - nodes \ CCPhysicsDebugNode . cpp " / > <nl> - < ClCompile Include = " . . \ physics - nodes \ CCPhysicsSprite . cpp " / > <nl> - < ClCompile Include = " Win32InputBox . cpp " / > <nl> - < / ItemGroup > <nl> - < ItemGroup > <nl> - < ClInclude Include = " . . \ assets - manager \ AssetsManager . h " / > <nl> - < ClInclude Include = " . . \ ExtensionExport . h " / > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCControl . h " / > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCControlButton . h " / > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCControlColourPicker . h " / > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCControlExtensions . h " / > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCControlHuePicker . h " / > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCControlPotentiometer . h " / > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCControlSaturationBrightnessPicker . h " / > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCControlSlider . h " / > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCControlStepper . h " / > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCControlSwitch . h " / > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCControlUtils . h " / > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCInvocation . h " / > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCScale9Sprite . h " / > <nl> - < ClInclude Include = " . . \ GUI \ CCEditBox \ CCEditBox . h " / > <nl> - < ClInclude Include = " . . \ GUI \ CCEditBox \ CCEditBoxImpl . h " / > <nl> - < ClInclude Include = " . . \ GUI \ CCEditBox \ CCEditBoxImplWin . h " / > <nl> - < ClInclude Include = " . . \ GUI \ CCScrollView \ CCScrollView . h " / > <nl> - < ClInclude Include = " . . \ cocos - ext . h " / > <nl> - < ClInclude Include = " . . \ ExtensionMacros . h " / > <nl> - < ClInclude Include = " . . \ GUI \ CCScrollView \ CCTableView . h " / > <nl> - < ClInclude Include = " . . \ GUI \ CCScrollView \ CCTableViewCell . h " / > <nl> - < ClInclude Include = " . . \ physics - nodes \ CCPhysicsDebugNode . h " / > <nl> - < ClInclude Include = " . . \ physics - nodes \ CCPhysicsSprite . h " / > <nl> - < ClInclude Include = " Win32InputBox . h " / > <nl> - < / ItemGroup > <nl> < ItemGroup > <nl> < ProjectReference Include = " . . \ . . \ cocos \ 2d \ libcocos2d . vcxproj " > <nl> < Project > { 98a51ba8 - fc3a - 415b - ac8f - 8c7bd464e93e } < / Project > <nl> mmm a / extensions / proj . win32 / libextension . vcxproj . filters <nl> ppp b / extensions / proj . win32 / libextension . vcxproj . filters <nl> <nl> < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> - < Project ToolsVersion = " 4 . 0 " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> - < ItemGroup > <nl> - < Filter Include = " GUI " > <nl> - < UniqueIdentifier > { 202b519b - b5e0 - 499f - b3b8 - ed5da144b248 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " GUI \ CCControlExtension " > <nl> - < UniqueIdentifier > { c07abd14 - e9dd - 4e2d - 85c4 - a180070161b4 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " GUI \ CCScrollView " > <nl> - < UniqueIdentifier > { 46797895 - f71d - 4ddb - b381 - d0884e678d39 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " physics_nodes " > <nl> - < UniqueIdentifier > { d5806151 - 7ae1 - 4fef - af5a - 2fa1d1c7377b } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " GUI \ CCEditBox " > <nl> - < UniqueIdentifier > { 5d186e3d - 0aaf - 4904 - a5d8 - e5cb0f35f4cc } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " AssetsManager " > <nl> - < UniqueIdentifier > { 49487dbe - 5758 - 436a - b014 - 8e2edc6b33ae } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < / ItemGroup > <nl> - < ItemGroup > <nl> - < ClCompile Include = " . . \ GUI \ CCScrollView \ CCScrollView . cpp " > <nl> - < Filter > GUI \ CCScrollView < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ GUI \ CCScrollView \ CCTableView . cpp " > <nl> - < Filter > GUI \ CCScrollView < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ GUI \ CCScrollView \ CCTableViewCell . cpp " > <nl> - < Filter > GUI \ CCScrollView < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCControlColourPicker . cpp " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCControlHuePicker . cpp " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCControlPotentiometer . cpp " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCControlSaturationBrightnessPicker . cpp " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCControlSlider . cpp " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCControlStepper . cpp " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCControlSwitch . cpp " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCControlUtils . cpp " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCInvocation . cpp " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCScale9Sprite . cpp " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCControl . cpp " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ GUI \ CCControlExtension \ CCControlButton . cpp " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ GUI \ CCEditBox \ CCEditBoxImplWin . cpp " > <nl> - < Filter > GUI \ CCEditBox < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " Win32InputBox . cpp " > <nl> - < Filter > GUI \ CCEditBox < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ GUI \ CCEditBox \ CCEditBox . cpp " > <nl> - < Filter > GUI \ CCEditBox < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ assets - manager \ AssetsManager . cpp " > <nl> - < Filter > AssetsManager < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ physics - nodes \ CCPhysicsDebugNode . cpp " > <nl> - < Filter > physics_nodes < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " . . \ physics - nodes \ CCPhysicsSprite . cpp " > <nl> - < Filter > physics_nodes < / Filter > <nl> - < / ClCompile > <nl> - < / ItemGroup > <nl> - < ItemGroup > <nl> - < ClInclude Include = " . . \ GUI \ CCScrollView \ CCScrollView . h " > <nl> - < Filter > GUI \ CCScrollView < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ cocos - ext . h " / > <nl> - < ClInclude Include = " . . \ ExtensionMacros . h " / > <nl> - < ClInclude Include = " . . \ GUI \ CCScrollView \ CCTableView . h " > <nl> - < Filter > GUI \ CCScrollView < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ GUI \ CCScrollView \ CCTableViewCell . h " > <nl> - < Filter > GUI \ CCScrollView < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCControlButton . h " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCControlColourPicker . h " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCControlExtensions . h " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCControlHuePicker . h " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCControlPotentiometer . h " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCControlSaturationBrightnessPicker . h " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCControlSlider . h " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCControlStepper . h " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCControlSwitch . h " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCControlUtils . h " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCInvocation . h " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCScale9Sprite . h " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ GUI \ CCControlExtension \ CCControl . h " > <nl> - < Filter > GUI \ CCControlExtension < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ GUI \ CCEditBox \ CCEditBoxImplWin . h " > <nl> - < Filter > GUI \ CCEditBox < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " Win32InputBox . h " > <nl> - < Filter > GUI \ CCEditBox < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ GUI \ CCEditBox \ CCEditBox . h " > <nl> - < Filter > GUI \ CCEditBox < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ GUI \ CCEditBox \ CCEditBoxImpl . h " > <nl> - < Filter > GUI \ CCEditBox < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ assets - manager \ AssetsManager . h " > <nl> - < Filter > AssetsManager < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ physics - nodes \ CCPhysicsDebugNode . h " > <nl> - < Filter > physics_nodes < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ physics - nodes \ CCPhysicsSprite . h " > <nl> - < Filter > physics_nodes < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " . . \ ExtensionExport . h " / > <nl> - < / ItemGroup > <nl> - < / Project > <nl> \ No newline at end of file <nl> + < Project ToolsVersion = " 4 . 0 " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " / > <nl> \ No newline at end of file <nl> mmm a / external / Box2D / proj . win32 / libbox2d . vcxproj <nl> ppp b / external / Box2D / proj . win32 / libbox2d . vcxproj <nl> <nl> < Platform > Win32 < / Platform > <nl> < / ProjectConfiguration > <nl> < / ItemGroup > <nl> - < PropertyGroup Label = " Globals " > <nl> - < ProjectName > libbox2d < / ProjectName > <nl> - < ProjectGuid > { 929480E7 - 23C0 - 4DF6 - 8456 - 096D71547116 } < / ProjectGuid > <nl> - < RootNamespace > libbox2d . win32 < / RootNamespace > <nl> - < Keyword > Win32Proj < / Keyword > <nl> - < / PropertyGroup > <nl> - < Import Project = " $ ( VCTargetsPath ) \ Microsoft . Cpp . Default . props " / > <nl> - < PropertyGroup Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Release | Win32 ' " Label = " Configuration " > <nl> - < ConfigurationType > StaticLibrary < / ConfigurationType > <nl> - < CharacterSet > Unicode < / CharacterSet > <nl> - < PlatformToolset Condition = " ' $ ( VisualStudioVersion ) ' = = ' 10 . 0 ' " > v100 < / PlatformToolset > <nl> - < PlatformToolset Condition = " ' $ ( VisualStudioVersion ) ' = = ' 11 . 0 ' " > v110 < / PlatformToolset > <nl> - < PlatformToolset Condition = " ' $ ( VisualStudioVersion ) ' = = ' 11 . 0 ' and exists ( ' $ ( MSBuildProgramFiles32 ) \ Microsoft SDKs \ Windows \ v7 . 1A ' ) " > v110_xp < / PlatformToolset > <nl> - < PlatformToolset Condition = " ' $ ( VisualStudioVersion ) ' = = ' 12 . 0 ' " > v120 < / PlatformToolset > <nl> - < PlatformToolset Condition = " ' $ ( VisualStudioVersion ) ' = = ' 12 . 0 ' and exists ( ' $ ( MSBuildProgramFiles32 ) \ Microsoft SDKs \ Windows \ v7 . 1A ' ) " > v120_xp < / PlatformToolset > <nl> - < / PropertyGroup > <nl> - < PropertyGroup Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Debug | Win32 ' " Label = " Configuration " > <nl> - < ConfigurationType > StaticLibrary < / ConfigurationType > <nl> - < CharacterSet > Unicode < / CharacterSet > <nl> - < PlatformToolset Condition = " ' $ ( VisualStudioVersion ) ' = = ' 10 . 0 ' " > v100 < / PlatformToolset > <nl> - < PlatformToolset Condition = " ' $ ( VisualStudioVersion ) ' = = ' 11 . 0 ' " > v110 < / PlatformToolset > <nl> - < PlatformToolset Condition = " ' $ ( VisualStudioVersion ) ' = = ' 11 . 0 ' and exists ( ' $ ( MSBuildProgramFiles32 ) \ Microsoft SDKs \ Windows \ v7 . 1A ' ) " > v110_xp < / PlatformToolset > <nl> - < PlatformToolset Condition = " ' $ ( VisualStudioVersion ) ' = = ' 12 . 0 ' " > v120 < / PlatformToolset > <nl> - < PlatformToolset Condition = " ' $ ( VisualStudioVersion ) ' = = ' 12 . 0 ' and exists ( ' $ ( MSBuildProgramFiles32 ) \ Microsoft SDKs \ Windows \ v7 . 1A ' ) " > v120_xp < / PlatformToolset > <nl> - < / PropertyGroup > <nl> - < Import Project = " $ ( VCTargetsPath ) \ Microsoft . Cpp . props " / > <nl> - < ImportGroup Label = " ExtensionSettings " > <nl> - < / ImportGroup > <nl> - < ImportGroup Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Release | Win32 ' " Label = " PropertySheets " > <nl> - < Import Project = " $ ( UserRootDir ) \ Microsoft . Cpp . $ ( Platform ) . user . props " Condition = " exists ( ' $ ( UserRootDir ) \ Microsoft . Cpp . $ ( Platform ) . user . props ' ) " Label = " LocalAppDataPlatform " / > <nl> - < / ImportGroup > <nl> - < ImportGroup Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Debug | Win32 ' " Label = " PropertySheets " > <nl> - < Import Project = " $ ( UserRootDir ) \ Microsoft . Cpp . $ ( Platform ) . user . props " Condition = " exists ( ' $ ( UserRootDir ) \ Microsoft . Cpp . $ ( Platform ) . user . props ' ) " Label = " LocalAppDataPlatform " / > <nl> - < / ImportGroup > <nl> - < PropertyGroup Label = " UserMacros " / > <nl> - < PropertyGroup > <nl> - < _ProjectFileVersion > 10 . 0 . 40219 . 1 < / _ProjectFileVersion > <nl> - < OutDir Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Debug | Win32 ' " > $ ( SolutionDir ) $ ( Configuration ) . win32 \ < / OutDir > <nl> - < IntDir Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Debug | Win32 ' " > $ ( Configuration ) . win32 \ < / IntDir > <nl> - < OutDir Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Release | Win32 ' " > $ ( SolutionDir ) $ ( Configuration ) . win32 \ < / OutDir > <nl> - < IntDir Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Release | Win32 ' " > $ ( Configuration ) . win32 \ < / IntDir > <nl> - < CodeAnalysisRuleSet Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Debug | Win32 ' " > AllRules . ruleset < / CodeAnalysisRuleSet > <nl> - < CodeAnalysisRules Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Debug | Win32 ' " / > <nl> - < CodeAnalysisRuleAssemblies Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Debug | Win32 ' " / > <nl> - < CodeAnalysisRuleSet Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Release | Win32 ' " > AllRules . ruleset < / CodeAnalysisRuleSet > <nl> - < CodeAnalysisRules Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Release | Win32 ' " / > <nl> - < CodeAnalysisRuleAssemblies Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Release | Win32 ' " / > <nl> - < / PropertyGroup > <nl> - < PropertyGroup Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Debug | Win32 ' " > <nl> - < LibraryPath > $ ( MSBuildProgramFiles32 ) \ Microsoft SDKs \ Windows \ v7 . 1A \ lib ; $ ( LibraryPath ) < / LibraryPath > <nl> - < / PropertyGroup > <nl> - < PropertyGroup Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Release | Win32 ' " > <nl> - < LibraryPath > $ ( MSBuildProgramFiles32 ) \ Microsoft SDKs \ Windows \ v7 . 1A \ lib ; $ ( LibraryPath ) < / LibraryPath > <nl> - < / PropertyGroup > <nl> - < ItemDefinitionGroup Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Debug | Win32 ' " > <nl> - < ClCompile > <nl> - < Optimization > Disabled < / Optimization > <nl> - < AdditionalIncludeDirectories > $ ( MSBuildProgramFiles32 ) \ Microsoft SDKs \ Windows \ v7 . 1A \ include ; . . / . . / ; % ( AdditionalIncludeDirectories ) < / AdditionalIncludeDirectories > <nl> - < PreprocessorDefinitions > WIN32 ; _DEBUG ; _LIB ; % ( PreprocessorDefinitions ) < / PreprocessorDefinitions > <nl> - < MinimalRebuild > false < / MinimalRebuild > <nl> - < BasicRuntimeChecks > EnableFastChecks < / BasicRuntimeChecks > <nl> - < RuntimeLibrary > MultiThreadedDebugDLL < / RuntimeLibrary > <nl> - < PrecompiledHeader > <nl> - < / PrecompiledHeader > <nl> - < WarningLevel > Level3 < / WarningLevel > <nl> - < DebugInformationFormat > EditAndContinue < / DebugInformationFormat > <nl> - < MultiProcessorCompilation > true < / MultiProcessorCompilation > <nl> - < / ClCompile > <nl> - < Lib > <nl> - < OutputFile > $ ( OutDir ) $ ( ProjectName ) . lib < / OutputFile > <nl> - < / Lib > <nl> - < / ItemDefinitionGroup > <nl> - < ItemDefinitionGroup Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Release | Win32 ' " > <nl> - < ClCompile > <nl> - < AdditionalIncludeDirectories > $ ( MSBuildProgramFiles32 ) \ Microsoft SDKs \ Windows \ v7 . 1A \ include ; . . / . . / ; % ( AdditionalIncludeDirectories ) < / AdditionalIncludeDirectories > <nl> - < PreprocessorDefinitions > WIN32 ; NDEBUG ; _LIB ; % ( PreprocessorDefinitions ) < / PreprocessorDefinitions > <nl> - < RuntimeLibrary > MultiThreadedDLL < / RuntimeLibrary > <nl> - < PrecompiledHeader > <nl> - < / PrecompiledHeader > <nl> - < WarningLevel > Level3 < / WarningLevel > <nl> - < DebugInformationFormat > None < / DebugInformationFormat > <nl> - < MultiProcessorCompilation > true < / MultiProcessorCompilation > <nl> - < Optimization > MinSpace < / Optimization > <nl> - < WholeProgramOptimization > false < / WholeProgramOptimization > <nl> - < / ClCompile > <nl> - < Lib > <nl> - < OutputFile > $ ( OutDir ) $ ( ProjectName ) . lib < / OutputFile > <nl> - < / Lib > <nl> - < / ItemDefinitionGroup > <nl> < ItemGroup > <nl> < ClCompile Include = " . . \ Collision \ b2BroadPhase . cpp " / > <nl> < ClCompile Include = " . . \ Collision \ b2CollideCircle . cpp " / > <nl> <nl> < ClCompile Include = " . . \ Rope \ b2Rope . cpp " / > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> + < ClInclude Include = " . . \ Box2D . h " / > <nl> < ClInclude Include = " . . \ Collision \ b2BroadPhase . h " / > <nl> < ClInclude Include = " . . \ Collision \ b2Collision . h " / > <nl> < ClInclude Include = " . . \ Collision \ b2Distance . h " / > <nl> <nl> < ClInclude Include = " . . \ Dynamics \ Joints \ b2WeldJoint . h " / > <nl> < ClInclude Include = " . . \ Dynamics \ Joints \ b2WheelJoint . h " / > <nl> < ClInclude Include = " . . \ Rope \ b2Rope . h " / > <nl> - < ClInclude Include = " . . \ Box2D . h " / > <nl> < / ItemGroup > <nl> + < PropertyGroup Label = " Globals " > <nl> + < ProjectName > libbox2d < / ProjectName > <nl> + < ProjectGuid > { 929480E7 - 23C0 - 4DF6 - 8456 - 096D71547116 } < / ProjectGuid > <nl> + < RootNamespace > libbox2d . win32 < / RootNamespace > <nl> + < Keyword > Win32Proj < / Keyword > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VCTargetsPath ) \ Microsoft . Cpp . Default . props " / > <nl> + < PropertyGroup Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Release | Win32 ' " Label = " Configuration " > <nl> + < ConfigurationType > StaticLibrary < / ConfigurationType > <nl> + < CharacterSet > Unicode < / CharacterSet > <nl> + < PlatformToolset Condition = " ' $ ( VisualStudioVersion ) ' = = ' 10 . 0 ' " > v100 < / PlatformToolset > <nl> + < PlatformToolset Condition = " ' $ ( VisualStudioVersion ) ' = = ' 11 . 0 ' " > v110 < / PlatformToolset > <nl> + < PlatformToolset Condition = " ' $ ( VisualStudioVersion ) ' = = ' 11 . 0 ' and exists ( ' $ ( MSBuildProgramFiles32 ) \ Microsoft SDKs \ Windows \ v7 . 1A ' ) " > v110_xp < / PlatformToolset > <nl> + < PlatformToolset Condition = " ' $ ( VisualStudioVersion ) ' = = ' 12 . 0 ' " > v120 < / PlatformToolset > <nl> + < PlatformToolset Condition = " ' $ ( VisualStudioVersion ) ' = = ' 12 . 0 ' and exists ( ' $ ( MSBuildProgramFiles32 ) \ Microsoft SDKs \ Windows \ v7 . 1A ' ) " > v120_xp < / PlatformToolset > <nl> + < / PropertyGroup > <nl> + < PropertyGroup Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Debug | Win32 ' " Label = " Configuration " > <nl> + < ConfigurationType > StaticLibrary < / ConfigurationType > <nl> + < CharacterSet > Unicode < / CharacterSet > <nl> + < PlatformToolset Condition = " ' $ ( VisualStudioVersion ) ' = = ' 10 . 0 ' " > v100 < / PlatformToolset > <nl> + < PlatformToolset Condition = " ' $ ( VisualStudioVersion ) ' = = ' 11 . 0 ' " > v110 < / PlatformToolset > <nl> + < PlatformToolset Condition = " ' $ ( VisualStudioVersion ) ' = = ' 11 . 0 ' and exists ( ' $ ( MSBuildProgramFiles32 ) \ Microsoft SDKs \ Windows \ v7 . 1A ' ) " > v110_xp < / PlatformToolset > <nl> + < PlatformToolset Condition = " ' $ ( VisualStudioVersion ) ' = = ' 12 . 0 ' " > v120 < / PlatformToolset > <nl> + < PlatformToolset Condition = " ' $ ( VisualStudioVersion ) ' = = ' 12 . 0 ' and exists ( ' $ ( MSBuildProgramFiles32 ) \ Microsoft SDKs \ Windows \ v7 . 1A ' ) " > v120_xp < / PlatformToolset > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VCTargetsPath ) \ Microsoft . Cpp . props " / > <nl> + < ImportGroup Label = " ExtensionSettings " > <nl> + < / ImportGroup > <nl> + < ImportGroup Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Release | Win32 ' " Label = " PropertySheets " > <nl> + < Import Project = " $ ( UserRootDir ) \ Microsoft . Cpp . $ ( Platform ) . user . props " Condition = " exists ( ' $ ( UserRootDir ) \ Microsoft . Cpp . $ ( Platform ) . user . props ' ) " Label = " LocalAppDataPlatform " / > <nl> + < / ImportGroup > <nl> + < ImportGroup Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Debug | Win32 ' " Label = " PropertySheets " > <nl> + < Import Project = " $ ( UserRootDir ) \ Microsoft . Cpp . $ ( Platform ) . user . props " Condition = " exists ( ' $ ( UserRootDir ) \ Microsoft . Cpp . $ ( Platform ) . user . props ' ) " Label = " LocalAppDataPlatform " / > <nl> + < / ImportGroup > <nl> + < PropertyGroup Label = " UserMacros " / > <nl> + < PropertyGroup > <nl> + < _ProjectFileVersion > 10 . 0 . 40219 . 1 < / _ProjectFileVersion > <nl> + < OutDir Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Debug | Win32 ' " > $ ( SolutionDir ) $ ( Configuration ) . win32 \ < / OutDir > <nl> + < IntDir Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Debug | Win32 ' " > $ ( Configuration ) . win32 \ < / IntDir > <nl> + < OutDir Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Release | Win32 ' " > $ ( SolutionDir ) $ ( Configuration ) . win32 \ < / OutDir > <nl> + < IntDir Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Release | Win32 ' " > $ ( Configuration ) . win32 \ < / IntDir > <nl> + < CodeAnalysisRuleSet Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Debug | Win32 ' " > AllRules . ruleset < / CodeAnalysisRuleSet > <nl> + < CodeAnalysisRules Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Debug | Win32 ' " / > <nl> + < CodeAnalysisRuleAssemblies Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Debug | Win32 ' " / > <nl> + < CodeAnalysisRuleSet Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Release | Win32 ' " > AllRules . ruleset < / CodeAnalysisRuleSet > <nl> + < CodeAnalysisRules Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Release | Win32 ' " / > <nl> + < CodeAnalysisRuleAssemblies Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Release | Win32 ' " / > <nl> + < / PropertyGroup > <nl> + < PropertyGroup Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Debug | Win32 ' " > <nl> + < LibraryPath > $ ( MSBuildProgramFiles32 ) \ Microsoft SDKs \ Windows \ v7 . 1A \ lib ; $ ( LibraryPath ) < / LibraryPath > <nl> + < / PropertyGroup > <nl> + < PropertyGroup Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Release | Win32 ' " > <nl> + < LibraryPath > $ ( MSBuildProgramFiles32 ) \ Microsoft SDKs \ Windows \ v7 . 1A \ lib ; $ ( LibraryPath ) < / LibraryPath > <nl> + < / PropertyGroup > <nl> + < ItemDefinitionGroup Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Debug | Win32 ' " > <nl> + < ClCompile > <nl> + < Optimization > Disabled < / Optimization > <nl> + < AdditionalIncludeDirectories > $ ( MSBuildProgramFiles32 ) \ Microsoft SDKs \ Windows \ v7 . 1A \ include ; . . / . . / ; % ( AdditionalIncludeDirectories ) < / AdditionalIncludeDirectories > <nl> + < PreprocessorDefinitions > WIN32 ; _DEBUG ; _LIB ; % ( PreprocessorDefinitions ) < / PreprocessorDefinitions > <nl> + < MinimalRebuild > false < / MinimalRebuild > <nl> + < BasicRuntimeChecks > EnableFastChecks < / BasicRuntimeChecks > <nl> + < RuntimeLibrary > MultiThreadedDebugDLL < / RuntimeLibrary > <nl> + < PrecompiledHeader > <nl> + < / PrecompiledHeader > <nl> + < WarningLevel > Level3 < / WarningLevel > <nl> + < DebugInformationFormat > EditAndContinue < / DebugInformationFormat > <nl> + < MultiProcessorCompilation > true < / MultiProcessorCompilation > <nl> + < / ClCompile > <nl> + < Lib > <nl> + < OutputFile > $ ( OutDir ) $ ( ProjectName ) . lib < / OutputFile > <nl> + < / Lib > <nl> + < / ItemDefinitionGroup > <nl> + < ItemDefinitionGroup Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Release | Win32 ' " > <nl> + < ClCompile > <nl> + < AdditionalIncludeDirectories > $ ( MSBuildProgramFiles32 ) \ Microsoft SDKs \ Windows \ v7 . 1A \ include ; . . / . . / ; % ( AdditionalIncludeDirectories ) < / AdditionalIncludeDirectories > <nl> + < PreprocessorDefinitions > WIN32 ; NDEBUG ; _LIB ; % ( PreprocessorDefinitions ) < / PreprocessorDefinitions > <nl> + < RuntimeLibrary > MultiThreadedDLL < / RuntimeLibrary > <nl> + < PrecompiledHeader > <nl> + < / PrecompiledHeader > <nl> + < WarningLevel > Level3 < / WarningLevel > <nl> + < DebugInformationFormat > None < / DebugInformationFormat > <nl> + < MultiProcessorCompilation > true < / MultiProcessorCompilation > <nl> + < Optimization > MinSpace < / Optimization > <nl> + < WholeProgramOptimization > false < / WholeProgramOptimization > <nl> + < / ClCompile > <nl> + < Lib > <nl> + < OutputFile > $ ( OutDir ) $ ( ProjectName ) . lib < / OutputFile > <nl> + < / Lib > <nl> + < / ItemDefinitionGroup > <nl> < Import Project = " $ ( VCTargetsPath ) \ Microsoft . Cpp . targets " / > <nl> < ImportGroup Label = " ExtensionTargets " > <nl> < / ImportGroup > <nl> mmm a / external / Box2D / proj . win32 / libbox2d . vcxproj . filters <nl> ppp b / external / Box2D / proj . win32 / libbox2d . vcxproj . filters <nl> <nl> < Project ToolsVersion = " 4 . 0 " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> < ItemGroup > <nl> < Filter Include = " Collision " > <nl> - < UniqueIdentifier > { bdf099c5 - cc32 - 468b - b53f - ec010d7f823f } < / UniqueIdentifier > <nl> + < UniqueIdentifier > { 96e593a1 - 0dd7 - 4dae - b88d - ee4fc8a03ad3 } < / UniqueIdentifier > <nl> < / Filter > <nl> < Filter Include = " Collision \ Shapes " > <nl> - < UniqueIdentifier > { 92792f88 - 4e5c - 46ee - 8d87 - 002f3b2297ae } < / UniqueIdentifier > <nl> + < UniqueIdentifier > { 65359ee0 - 026c - 4116 - 9235 - 72fbc83ad4cb } < / UniqueIdentifier > <nl> < / Filter > <nl> < Filter Include = " Common " > <nl> - < UniqueIdentifier > { f41a6fa6 - 5ac1 - 4514 - 9ac0 - 6f3a38f35b4d } < / UniqueIdentifier > <nl> + < UniqueIdentifier > { 4682feab - 6913 - 410c - af1a - a6063c5a5c70 } < / UniqueIdentifier > <nl> < / Filter > <nl> < Filter Include = " Dynamics " > <nl> - < UniqueIdentifier > { 0b56913a - 34b7 - 410a - b386 - 869d6f7a20be } < / UniqueIdentifier > <nl> + < UniqueIdentifier > { 82f2dcc2 - 92b3 - 41db - 8482 - 9de0b1012103 } < / UniqueIdentifier > <nl> < / Filter > <nl> < Filter Include = " Dynamics \ Contacts " > <nl> - < UniqueIdentifier > { f0e7b230 - 79cc - 49b8 - 9ed7 - 9dbfa062f2a4 } < / UniqueIdentifier > <nl> + < UniqueIdentifier > { 85580c9a - 58ff - 4035 - b17f - a539b89be2bb } < / UniqueIdentifier > <nl> < / Filter > <nl> < Filter Include = " Dynamics \ Joints " > <nl> - < UniqueIdentifier > { 932f2008 - afbe - 42f5 - 993d - a1df0ec67756 } < / UniqueIdentifier > <nl> + < UniqueIdentifier > { 4857faee - e51c - 4875 - a34d - bde79c74b51a } < / UniqueIdentifier > <nl> < / Filter > <nl> < Filter Include = " Rope " > <nl> - < UniqueIdentifier > { f8937e09 - 93a4 - 49fa - 8f3e - dfc36da764df } < / UniqueIdentifier > <nl> + < UniqueIdentifier > { ac5c2bb8 - 243b - 4c0e - bc5b - 09299960f055 } < / UniqueIdentifier > <nl> < / Filter > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> <nl> < ClCompile Include = " . . \ Dynamics \ Joints \ b2Joint . cpp " > <nl> < Filter > Dynamics \ Joints < / Filter > <nl> < / ClCompile > <nl> + < ClCompile Include = " . . \ Dynamics \ Joints \ b2MotorJoint . cpp " > <nl> + < Filter > Dynamics \ Joints < / Filter > <nl> + < / ClCompile > <nl> < ClCompile Include = " . . \ Dynamics \ Joints \ b2MouseJoint . cpp " > <nl> < Filter > Dynamics \ Joints < / Filter > <nl> < / ClCompile > <nl> <nl> < ClCompile Include = " . . \ Rope \ b2Rope . cpp " > <nl> < Filter > Rope < / Filter > <nl> < / ClCompile > <nl> - < ClCompile Include = " . . \ Dynamics \ Joints \ b2MotorJoint . cpp " > <nl> - < Filter > Dynamics \ Joints < / Filter > <nl> - < / ClCompile > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> < ClInclude Include = " . . \ Collision \ b2BroadPhase . h " > <nl> <nl> < ClInclude Include = " . . \ Dynamics \ Joints \ b2Joint . h " > <nl> < Filter > Dynamics \ Joints < / Filter > <nl> < / ClInclude > <nl> + < ClInclude Include = " . . \ Dynamics \ Joints \ b2MotorJoint . h " > <nl> + < Filter > Dynamics \ Joints < / Filter > <nl> + < / ClInclude > <nl> < ClInclude Include = " . . \ Dynamics \ Joints \ b2MouseJoint . h " > <nl> < Filter > Dynamics \ Joints < / Filter > <nl> < / ClInclude > <nl> <nl> < Filter > Rope < / Filter > <nl> < / ClInclude > <nl> < ClInclude Include = " . . \ Box2D . h " / > <nl> - < ClInclude Include = " . . \ Dynamics \ Joints \ b2MotorJoint . h " > <nl> - < Filter > Dynamics \ Joints < / Filter > <nl> - < / ClInclude > <nl> < / ItemGroup > <nl> < / Project > <nl> \ No newline at end of file <nl> mmm a / external / chipmunk / proj . win32 / chipmunk . vcxproj <nl> ppp b / external / chipmunk / proj . win32 / chipmunk . vcxproj <nl> <nl> < Platform > Win32 < / Platform > <nl> < / ProjectConfiguration > <nl> < / ItemGroup > <nl> + < ItemGroup > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ chipmunk . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ chipmunk_ffi . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ chipmunk_private . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ chipmunk_types . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ chipmunk_unsafe . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ constraints \ cpConstraint . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ constraints \ cpDampedRotarySpring . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ constraints \ cpDampedSpring . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ constraints \ cpGearJoint . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ constraints \ cpGrooveJoint . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ constraints \ cpPinJoint . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ constraints \ cpPivotJoint . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ constraints \ cpRatchetJoint . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ constraints \ cpRotaryLimitJoint . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ constraints \ cpSimpleMotor . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ constraints \ cpSlideJoint . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ constraints \ util . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ cpArbiter . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ cpBB . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ cpBody . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ cpPolyShape . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ cpShape . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ cpSpace . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ cpSpatialIndex . h " / > <nl> + < ClInclude Include = " . . \ include \ chipmunk \ cpVect . h " / > <nl> + < ClInclude Include = " . . \ src \ prime . h " / > <nl> + < / ItemGroup > <nl> + < ItemGroup > <nl> + < ClCompile Include = " . . \ src \ chipmunk . c " / > <nl> + < ClCompile Include = " . . \ src \ constraints \ cpConstraint . c " / > <nl> + < ClCompile Include = " . . \ src \ constraints \ cpDampedRotarySpring . c " / > <nl> + < ClCompile Include = " . . \ src \ constraints \ cpDampedSpring . c " / > <nl> + < ClCompile Include = " . . \ src \ constraints \ cpGearJoint . c " / > <nl> + < ClCompile Include = " . . \ src \ constraints \ cpGrooveJoint . c " / > <nl> + < ClCompile Include = " . . \ src \ constraints \ cpPinJoint . c " / > <nl> + < ClCompile Include = " . . \ src \ constraints \ cpPivotJoint . c " / > <nl> + < ClCompile Include = " . . \ src \ constraints \ cpRatchetJoint . c " / > <nl> + < ClCompile Include = " . . \ src \ constraints \ cpRotaryLimitJoint . c " / > <nl> + < ClCompile Include = " . . \ src \ constraints \ cpSimpleMotor . c " / > <nl> + < ClCompile Include = " . . \ src \ constraints \ cpSlideJoint . c " / > <nl> + < ClCompile Include = " . . \ src \ cpArbiter . c " / > <nl> + < ClCompile Include = " . . \ src \ cpArray . c " / > <nl> + < ClCompile Include = " . . \ src \ cpBB . c " / > <nl> + < ClCompile Include = " . . \ src \ cpBBTree . c " / > <nl> + < ClCompile Include = " . . \ src \ cpBody . c " / > <nl> + < ClCompile Include = " . . \ src \ cpCollision . c " / > <nl> + < ClCompile Include = " . . \ src \ cpHashSet . c " / > <nl> + < ClCompile Include = " . . \ src \ cpPolyShape . c " / > <nl> + < ClCompile Include = " . . \ src \ cpShape . c " / > <nl> + < ClCompile Include = " . . \ src \ cpSpace . c " / > <nl> + < ClCompile Include = " . . \ src \ cpSpaceComponent . c " / > <nl> + < ClCompile Include = " . . \ src \ cpSpaceHash . c " / > <nl> + < ClCompile Include = " . . \ src \ cpSpaceQuery . c " / > <nl> + < ClCompile Include = " . . \ src \ cpSpaceStep . c " / > <nl> + < ClCompile Include = " . . \ src \ cpSpatialIndex . c " / > <nl> + < ClCompile Include = " . . \ src \ cpSweep1D . c " / > <nl> + < ClCompile Include = " . . \ src \ cpVect . c " / > <nl> + < / ItemGroup > <nl> < PropertyGroup Label = " Globals " > <nl> < ProjectName > libchipmunk < / ProjectName > <nl> < ProjectGuid > { 207BC7A9 - CCF1 - 4F2F - A04D - 45F72242AE25 } < / ProjectGuid > <nl> <nl> < OutputFile > $ ( OutDir ) $ ( ProjectName ) . lib < / OutputFile > <nl> < / Lib > <nl> < / ItemDefinitionGroup > <nl> - < ItemGroup > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ chipmunk . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ chipmunk_ffi . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ chipmunk_private . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ chipmunk_types . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ chipmunk_unsafe . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ cpArbiter . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ cpBB . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ cpBody . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ cpPolyShape . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ cpShape . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ cpSpace . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ cpSpatialIndex . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ cpVect . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ constraints \ cpConstraint . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ constraints \ cpDampedRotarySpring . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ constraints \ cpDampedSpring . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ constraints \ cpGearJoint . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ constraints \ cpGrooveJoint . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ constraints \ cpPinJoint . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ constraints \ cpPivotJoint . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ constraints \ cpRatchetJoint . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ constraints \ cpRotaryLimitJoint . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ constraints \ cpSimpleMotor . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ constraints \ cpSlideJoint . h " / > <nl> - < ClInclude Include = " . . \ include \ chipmunk \ constraints \ util . h " / > <nl> - < ClInclude Include = " . . \ src \ prime . h " / > <nl> - < / ItemGroup > <nl> - < ItemGroup > <nl> - < ClCompile Include = " . . \ src \ chipmunk . c " / > <nl> - < ClCompile Include = " . . \ src \ cpArbiter . c " / > <nl> - < ClCompile Include = " . . \ src \ cpArray . c " / > <nl> - < ClCompile Include = " . . \ src \ cpBB . c " / > <nl> - < ClCompile Include = " . . \ src \ cpBBTree . c " / > <nl> - < ClCompile Include = " . . \ src \ cpBody . c " / > <nl> - < ClCompile Include = " . . \ src \ cpCollision . c " / > <nl> - < ClCompile Include = " . . \ src \ cpHashSet . c " / > <nl> - < ClCompile Include = " . . \ src \ cpPolyShape . c " / > <nl> - < ClCompile Include = " . . \ src \ cpShape . c " / > <nl> - < ClCompile Include = " . . \ src \ cpSpace . c " / > <nl> - < ClCompile Include = " . . \ src \ cpSpaceComponent . c " / > <nl> - < ClCompile Include = " . . \ src \ cpSpaceHash . c " / > <nl> - < ClCompile Include = " . . \ src \ cpSpaceQuery . c " / > <nl> - < ClCompile Include = " . . \ src \ cpSpaceStep . c " / > <nl> - < ClCompile Include = " . . \ src \ cpSpatialIndex . c " / > <nl> - < ClCompile Include = " . . \ src \ cpSweep1D . c " / > <nl> - < ClCompile Include = " . . \ src \ cpVect . c " / > <nl> - < ClCompile Include = " . . \ src \ constraints \ cpConstraint . c " / > <nl> - < ClCompile Include = " . . \ src \ constraints \ cpDampedRotarySpring . c " / > <nl> - < ClCompile Include = " . . \ src \ constraints \ cpDampedSpring . c " / > <nl> - < ClCompile Include = " . . \ src \ constraints \ cpGearJoint . c " / > <nl> - < ClCompile Include = " . . \ src \ constraints \ cpGrooveJoint . c " / > <nl> - < ClCompile Include = " . . \ src \ constraints \ cpPinJoint . c " / > <nl> - < ClCompile Include = " . . \ src \ constraints \ cpPivotJoint . c " / > <nl> - < ClCompile Include = " . . \ src \ constraints \ cpRatchetJoint . c " / > <nl> - < ClCompile Include = " . . \ src \ constraints \ cpRotaryLimitJoint . c " / > <nl> - < ClCompile Include = " . . \ src \ constraints \ cpSimpleMotor . c " / > <nl> - < ClCompile Include = " . . \ src \ constraints \ cpSlideJoint . c " / > <nl> - < / ItemGroup > <nl> < Import Project = " $ ( VCTargetsPath ) \ Microsoft . Cpp . targets " / > <nl> < ImportGroup Label = " ExtensionTargets " > <nl> < / ImportGroup > <nl> mmm a / external / chipmunk / proj . win32 / chipmunk . vcxproj . filters <nl> ppp b / external / chipmunk / proj . win32 / chipmunk . vcxproj . filters <nl> <nl> < Project ToolsVersion = " 4 . 0 " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> < ItemGroup > <nl> < Filter Include = " include " > <nl> - < UniqueIdentifier > { 93995380 - 89BD - 4b04 - 88EB - 625FBE52EBFB } < / UniqueIdentifier > <nl> - < Extensions > h ; hpp ; hxx ; hm ; inl ; inc ; xsd < / Extensions > <nl> + < UniqueIdentifier > { ffa91ca5 - f4b1 - 424f - 9342 - bd250042f423 } < / UniqueIdentifier > <nl> < / Filter > <nl> < Filter Include = " include \ constraints " > <nl> - < UniqueIdentifier > { 4dabab00 - 1a6f - 41ee - be09 - a30737358f86 } < / UniqueIdentifier > <nl> + < UniqueIdentifier > { 70e7e6e1 - 042e - 42b6 - a19d - 547b8de61bbc } < / UniqueIdentifier > <nl> < / Filter > <nl> < Filter Include = " src " > <nl> - < UniqueIdentifier > { 4FC737F1 - C7A5 - 4376 - A066 - 2A32D752A2FF } < / UniqueIdentifier > <nl> - < Extensions > cpp ; c ; cc ; cxx ; def ; odl ; idl ; hpj ; bat ; asm ; asmx < / Extensions > <nl> + < UniqueIdentifier > { 78b690a1 - 6061 - 49d6 - a8b7 - ad6a231ddca2 } < / UniqueIdentifier > <nl> < / Filter > <nl> < Filter Include = " src \ constraints " > <nl> - < UniqueIdentifier > { e573d383 - f016 - 40f6 - 999a - d804311ffc62 } < / UniqueIdentifier > <nl> + < UniqueIdentifier > { 179bc1e1 - ed4a - 4412 - 92b1 - f74fddef2877 } < / UniqueIdentifier > <nl> < / Filter > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> mmm a / tests / cpp - tests / proj . win32 / cpp - tests . vcxproj <nl> ppp b / tests / cpp - tests / proj . win32 / cpp - tests . vcxproj <nl> <nl> < ProjectReference Include = " . . \ . . \ . . \ cocos \ 2d \ libcocos2d . vcxproj " > <nl> < Project > { 98a51ba8 - fc3a - 415b - ac8f - 8c7bd464e93e } < / Project > <nl> < / ProjectReference > <nl> - < ProjectReference Include = " . . \ . . \ . . \ cocos \ 3d \ proj . win32 \ libcocos3d . vcxproj " > <nl> - < Project > { e24950fa - 5bc1 - 4aee - a900 - 4f0259354bf0 } < / Project > <nl> - < / ProjectReference > <nl> - < ProjectReference Include = " . . \ . . \ . . \ cocos \ audio \ proj . win32 \ libcocosdenshion . vcxproj " > <nl> - < Project > { f8edd7fa - 9a51 - 4e80 - baeb - 860825d2eac6 } < / Project > <nl> - < / ProjectReference > <nl> < ProjectReference Include = " . . \ . . \ . . \ cocos \ editor - support \ cocosbuilder \ proj . win32 \ libcocosbuilder . vcxproj " > <nl> < Project > { 811c0dab - 7b96 - 4bd3 - a154 - b7572b58e4ab } < / Project > <nl> < / ProjectReference > <nl> - < ProjectReference Include = " . . \ . . \ . . \ cocos \ editor - support \ cocostudio \ proj . win32 \ libcocostudio . vcxproj " > <nl> - < Project > { b57cf53f - 2e49 - 4031 - 9822 - 047cc0e6bde2 } < / Project > <nl> - < / ProjectReference > <nl> < ProjectReference Include = " . . \ . . \ . . \ cocos \ editor - support \ spine \ proj . win32 \ libspine . vcxproj " > <nl> < Project > { b7c2a162 - dec9 - 4418 - 972e - 240ab3cbfcae } < / Project > <nl> < / ProjectReference > <nl> - < ProjectReference Include = " . . \ . . \ . . \ cocos \ network \ proj . win32 \ libNetwork . vcxproj " > <nl> - < Project > { df2638c0 - 8128 - 4847 - 867c - 6eafe3dee7b5 } < / Project > <nl> - < / ProjectReference > <nl> - < ProjectReference Include = " . . \ . . \ . . \ cocos \ ui \ proj . win32 \ libui . vcxproj " > <nl> - < Project > { 7e06e92c - 537a - 442b - 9e4a - 4761c84f8a1a } < / Project > <nl> - < / ProjectReference > <nl> - < ProjectReference Include = " . . \ . . \ . . \ extensions \ proj . win32 \ libextension . vcxproj " > <nl> - < Project > { 21b2c324 - 891f - 48ea - ad1a - 5ae13de12e28 } < / Project > <nl> - < / ProjectReference > <nl> < ProjectReference Include = " . . \ . . \ . . \ external \ Box2D \ proj . win32 \ libbox2d . vcxproj " > <nl> < Project > { 929480e7 - 23c0 - 4df6 - 8456 - 096d71547116 } < / Project > <nl> < / ProjectReference > <nl> < ProjectReference Include = " . . \ . . \ . . \ external \ chipmunk \ proj . win32 \ chipmunk . vcxproj " > <nl> < Project > { 207bc7a9 - ccf1 - 4f2f - a04d - 45f72242ae25 } < / Project > <nl> - < ReferenceOutputAssembly > false < / ReferenceOutputAssembly > <nl> < / ProjectReference > <nl> < / ItemGroup > <nl> < Import Project = " $ ( VCTargetsPath ) \ Microsoft . Cpp . targets " / > <nl>
|
Integrate libcocosdenshion , libcocos3d , libnetwork , libcocostudio and libextension into cocos2d on the iOS / Mac platform
|
cocos2d/cocos2d-x
|
2f60e610a2bec57ebeaa5dfca96acdf0f871af5a
|
2014-08-22T05:49:21Z
|
new file mode 100644 <nl> index 000000000000 . . 6f6250a6b9ce <nl> mmm / dev / null <nl> ppp b / test / SourceKit / ExpressionType / basic . swift <nl> <nl> + func foo ( ) { return 1 } <nl> + <nl> + / / RUN : % sourcekitd - test - req = collect - type % s - - % s | % FileCheck % s <nl> + / / CHECK : ( 20 , 21 ) : Int <nl> mmm a / tools / SourceKit / docs / Protocol . md <nl> ppp b / tools / SourceKit / docs / Protocol . md <nl> Welcome to SourceKit . Type ' : help ' for assistance . <nl> } <nl> ` ` ` <nl> <nl> + # # Expression Type <nl> + This request collects the types of all expressions in a source file after type checking . <nl> + To fulfill this task , the client must provide the path to the Swift source file under <nl> + type checking and the necessary compiler arguments to help resolve all dependencies . <nl> <nl> + # # # Request <nl> + <nl> + ` ` ` <nl> + { <nl> + < key . request > : ( UID ) < source . request . expression . type > , <nl> + < key . sourcefile > : ( string ) / / Absolute path to the file . <nl> + < key . compilerargs > : [ string * ] / / Array of zero or more strings for the compiler arguments , <nl> + / / e . g [ " - sdk " , " / path / to / sdk " ] . If key . sourcefile is provided , <nl> + / / these must include the path to that file . <nl> + } <nl> + ` ` ` <nl> + <nl> + # # # Response <nl> + ` ` ` <nl> + { <nl> + < key . printedtypebuffer > : ( string ) / / A text buffer where all expression types are printed to . <nl> + < key . expression_type_list > : ( array ) [ expr - type - info * ] / / A list of expression and type <nl> + } <nl> + ` ` ` <nl> + <nl> + ` ` ` <nl> + expr - type - info : : = <nl> + { <nl> + < key . expression_offset > : ( int64 ) / / Offset of an expression in the source file <nl> + < key . expression_length > : ( int64 ) / / Length of an expression in the source file <nl> + < key . type_offset > : ( int64 ) / / Offset of the printed type of the expression in the printed type buffer <nl> + < key . type_length > : ( int64 ) / / Length of the printed type of the expression in the printed type buffer <nl> + } <nl> + ` ` ` <nl> + <nl> + # # # Testing <nl> + <nl> + ` ` ` <nl> + $ sourcekitd - test - req = collect - type / path / to / file . swift - - / path / to / file . swift <nl> + ` ` ` <nl> <nl> # UIDs <nl> <nl> mmm a / tools / SourceKit / include / SourceKit / Core / LangSupport . h <nl> ppp b / tools / SourceKit / include / SourceKit / Core / LangSupport . h <nl> struct CodeCompletionInfo { <nl> Optional < ArrayRef < ParameterStructure > > parametersStructure ; <nl> } ; <nl> <nl> + struct ExpressionType { <nl> + unsigned ExprOffset ; <nl> + unsigned ExprLength ; <nl> + unsigned TypeOffset ; <nl> + unsigned TypeLength ; <nl> + } ; <nl> + <nl> + struct ExpressionTypesInFile { <nl> + std : : vector < ExpressionType > Results ; <nl> + StringRef TypeBuffer ; <nl> + } ; <nl> + <nl> class CodeCompletionConsumer { <nl> virtual void anchor ( ) ; <nl> <nl> class LangSupport { <nl> ArrayRef < const char * > Args , <nl> CategorizedEditsReceiver Receiver ) = 0 ; <nl> <nl> + virtual void collectExpressionTypes ( StringRef FileName , <nl> + ArrayRef < const char * > Args , <nl> + std : : function < void ( const ExpressionTypesInFile & ) > Receiver ) = 0 ; <nl> + <nl> virtual void getDocInfo ( llvm : : MemoryBuffer * InputBuf , <nl> StringRef ModuleName , <nl> ArrayRef < const char * > Args , <nl> class LangSupport { <nl> <nl> virtual void getStatistics ( StatisticsReceiver ) = 0 ; <nl> } ; <nl> - <nl> } / / namespace SourceKit <nl> <nl> # endif <nl> mmm a / tools / SourceKit / lib / SwiftLang / SwiftLangSupport . h <nl> ppp b / tools / SourceKit / lib / SwiftLang / SwiftLangSupport . h <nl> class SwiftLangSupport : public LangSupport { <nl> unsigned Length , ArrayRef < const char * > Args , <nl> CategorizedRenameRangesReceiver Receiver ) override ; <nl> <nl> + void collectExpressionTypes ( StringRef FileName , ArrayRef < const char * > Args , <nl> + std : : function < void ( const ExpressionTypesInFile & ) > Receiver ) override ; <nl> + <nl> void semanticRefactoring ( StringRef Filename , SemanticRefactoringInfo Info , <nl> ArrayRef < const char * > Args , <nl> CategorizedEditsReceiver Receiver ) override ; <nl> mmm a / tools / SourceKit / lib / SwiftLang / SwiftSourceDocInfo . cpp <nl> ppp b / tools / SourceKit / lib / SwiftLang / SwiftSourceDocInfo . cpp <nl> semanticRefactoring ( StringRef Filename , SemanticRefactoringInfo Info , <nl> static const char OncePerASTToken = 0 ; <nl> getASTManager ( ) - > processASTAsync ( Invok , std : : move ( Consumer ) , & OncePerASTToken ) ; <nl> } <nl> + <nl> + void SwiftLangSupport : : collectExpressionTypes ( StringRef FileName , <nl> + ArrayRef < const char * > Args , <nl> + std : : function < void ( const ExpressionTypesInFile & ) > Receiver ) { <nl> + std : : string Error ; <nl> + SwiftInvocationRef Invok = ASTMgr - > getInvocation ( Args , FileName , Error ) ; <nl> + if ( ! Invok ) { <nl> + / / FIXME : Report it as failed request . <nl> + LOG_WARN_FUNC ( " failed to create an ASTInvocation : " < < Error ) ; <nl> + Receiver ( { } ) ; <nl> + return ; <nl> + } <nl> + assert ( Invok ) ; <nl> + class ExpressionTypeCollector : public SwiftASTConsumer { <nl> + std : : function < void ( const ExpressionTypesInFile & ) > Receiver ; <nl> + public : <nl> + ExpressionTypeCollector ( std : : function < void ( const ExpressionTypesInFile & ) > Receiver ) : <nl> + Receiver ( std : : move ( Receiver ) ) { } <nl> + void handlePrimaryAST ( ASTUnitRef AstUnit ) override { <nl> + auto * SF = AstUnit - > getCompilerInstance ( ) . getPrimarySourceFile ( ) ; <nl> + std : : vector < ExpressionTypeInfo > Scratch ; <nl> + llvm : : SmallString < 256 > TypeBuffer ; <nl> + llvm : : raw_svector_ostream OS ( TypeBuffer ) ; <nl> + ExpressionTypesInFile Result ; <nl> + for ( auto Item : collectExpressionType ( * SF , Scratch , OS ) ) { <nl> + Result . Results . push_back ( { Item . offset , Item . length , Item . typeOffset , <nl> + Item . typeLength } ) ; <nl> + } <nl> + Result . TypeBuffer = OS . str ( ) ; <nl> + Receiver ( Result ) ; <nl> + } <nl> + <nl> + void cancelled ( ) override { <nl> + Receiver ( { } ) ; <nl> + } <nl> + <nl> + void failed ( StringRef Error ) override { <nl> + Receiver ( { } ) ; <nl> + } <nl> + } ; <nl> + auto Collector = std : : make_shared < ExpressionTypeCollector > ( Receiver ) ; <nl> + / / / FIXME : When request cancellation is implemented and Xcode adopts it , <nl> + / / / don ' t use ' OncePerASTToken ' . <nl> + static const char OncePerASTToken = 0 ; <nl> + getASTManager ( ) - > processASTAsync ( Invok , std : : move ( Collector ) , & OncePerASTToken ) ; <nl> + } <nl> mmm a / tools / SourceKit / tools / sourcekitd - test / TestOptions . cpp <nl> ppp b / tools / SourceKit / tools / sourcekitd - test / TestOptions . cpp <nl> bool TestOptions : : parseArgs ( llvm : : ArrayRef < const char * > Args ) { <nl> . Case ( " markup - xml " , SourceKitRequest : : MarkupToXML ) <nl> . Case ( " stats " , SourceKitRequest : : Statistics ) <nl> . Case ( " track - compiles " , SourceKitRequest : : EnableCompileNotifications ) <nl> + . Case ( " collect - type " , SourceKitRequest : : CollectExpresstionType ) <nl> . Default ( SourceKitRequest : : None ) ; <nl> <nl> if ( Request = = SourceKitRequest : : None ) { <nl> bool TestOptions : : parseArgs ( llvm : : ArrayRef < const char * > Args ) { <nl> " doc - info / sema / interface - gen / interface - gen - openfind - usr / find - interface / " <nl> " open / close / edit / print - annotations / print - diags / extract - comment / module - groups / " <nl> " range / syntactic - rename / find - rename - ranges / translate / markup - xml / stats / " <nl> - " track - compiles \ n " ; <nl> + " track - compiles / collect - type \ n " ; <nl> return true ; <nl> } <nl> break ; <nl> mmm a / tools / SourceKit / tools / sourcekitd - test / TestOptions . h <nl> ppp b / tools / SourceKit / tools / sourcekitd - test / TestOptions . h <nl> enum class SourceKitRequest { <nl> Statistics , <nl> SyntaxTree , <nl> EnableCompileNotifications , <nl> + CollectExpresstionType , <nl> # define SEMANTIC_REFACTORING ( KIND , NAME , ID ) KIND , <nl> # include " swift / IDE / RefactoringKinds . def " <nl> } ; <nl> mmm a / tools / SourceKit / tools / sourcekitd - test / sourcekitd - test . cpp <nl> ppp b / tools / SourceKit / tools / sourcekitd - test / sourcekitd - test . cpp <nl> static void printCursorInfo ( sourcekitd_variant_t Info , StringRef Filename , <nl> static void printNameTranslationInfo ( sourcekitd_variant_t Info , llvm : : raw_ostream & OS ) ; <nl> static void printRangeInfo ( sourcekitd_variant_t Info , StringRef Filename , <nl> llvm : : raw_ostream & OS ) ; <nl> + static void printExpressionType ( sourcekitd_variant_t Info , llvm : : raw_ostream & OS ) ; <nl> static void printDocInfo ( sourcekitd_variant_t Info , StringRef Filename ) ; <nl> static void printInterfaceGen ( sourcekitd_variant_t Info , bool CheckASCII ) ; <nl> static void printSemanticInfo ( ) ; <nl> static int handleTestInvocation ( TestOptions Opts , TestOptions & InitOpts ) { <nl> break ; <nl> } <nl> <nl> + case SourceKitRequest : : CollectExpresstionType : { <nl> + sourcekitd_request_dictionary_set_uid ( Req , KeyRequest , RequestCollectExpressionType ) ; <nl> + break ; <nl> + } <nl> + <nl> # define SEMANTIC_REFACTORING ( KIND , NAME , ID ) case SourceKitRequest : : KIND : \ <nl> { \ <nl> sourcekitd_request_dictionary_set_uid ( Req , KeyRequest , RequestSemanticRefactoring ) ; \ <nl> static bool handleResponse ( sourcekitd_response_t Resp , const TestOptions & Opts , <nl> printRangeInfo ( Info , SourceFile , llvm : : outs ( ) ) ; <nl> break ; <nl> <nl> + case SourceKitRequest : : CollectExpresstionType : <nl> + printExpressionType ( Info , llvm : : outs ( ) ) ; <nl> + break ; <nl> + <nl> case SourceKitRequest : : DocInfo : <nl> printDocInfo ( Info , SourceFile ) ; <nl> break ; <nl> static void printRangeInfo ( sourcekitd_variant_t Info , StringRef FilenameIn , <nl> if ( Typename ) <nl> OS < < " < type > " < < Typename < < " < / type > \ n " ; <nl> } <nl> + <nl> + static void printExpressionType ( sourcekitd_variant_t Info , llvm : : raw_ostream & OS ) { <nl> + auto * TypeBuffer = sourcekitd_variant_dictionary_get_string ( Info , KeyTypeBuffer ) ; <nl> + sourcekitd_variant_t ExprList = sourcekitd_variant_dictionary_get_value ( Info , <nl> + KeyExpressionTypeList ) ; <nl> + unsigned Count = sourcekitd_variant_array_get_count ( ExprList ) ; <nl> + for ( unsigned i = 0 ; i ! = Count ; + + i ) { <nl> + sourcekitd_variant_t Item = sourcekitd_variant_array_get_value ( ExprList , i ) ; <nl> + unsigned Offset = sourcekitd_variant_dictionary_get_int64 ( Item , KeyExpressionOffset ) ; <nl> + unsigned Length = sourcekitd_variant_dictionary_get_int64 ( Item , KeyExpressionLength ) ; <nl> + StringRef PrintedType ( TypeBuffer + sourcekitd_variant_dictionary_get_int64 ( Item , <nl> + KeyTypeOffset ) , sourcekitd_variant_dictionary_get_int64 ( Item , KeyTypeLength ) ) ; <nl> + OS < < " ( " < < Offset < < " , " < < Offset + Length < < " ) : " < < PrintedType < < " \ n " ; <nl> + } <nl> + if ( ! Count ) { <nl> + OS < < " cannot find expression types in the file \ n " ; <nl> + } <nl> + } <nl> + <nl> static void printFoundInterface ( sourcekitd_variant_t Info , <nl> llvm : : raw_ostream & OS ) { <nl> const char * Name = sourcekitd_variant_dictionary_get_string ( Info , <nl> mmm a / tools / SourceKit / tools / sourcekitd / lib / API / Requests . cpp <nl> ppp b / tools / SourceKit / tools / sourcekitd / lib / API / Requests . cpp <nl> static sourcekitd_response_t reportDocInfo ( llvm : : MemoryBuffer * InputBuf , <nl> <nl> static void reportCursorInfo ( const CursorInfoData & Info , ResponseReceiver Rec ) ; <nl> <nl> + static void reportExpressionTypeInfo ( const ExpressionTypesInFile & Info , ResponseReceiver Rec ) ; <nl> + <nl> static void reportRangeInfo ( const RangeInfo & Info , ResponseReceiver Rec ) ; <nl> <nl> static void reportNameInfo ( const NameTranslatingInfo & Info , ResponseReceiver Rec ) ; <nl> handleSemanticRequest ( RequestDict Req , <nl> return Rec ( createErrorRequestInvalid ( " ' key . line ' or ' key . column ' are required " ) ) ; <nl> } <nl> <nl> + if ( ReqUID = = RequestCollectExpressionType ) { <nl> + LangSupport & Lang = getGlobalContext ( ) . getSwiftLangSupport ( ) ; <nl> + return Lang . collectExpressionTypes ( * SourceFile , Args , <nl> + [ Rec ] ( const ExpressionTypesInFile & Info ) { <nl> + reportExpressionTypeInfo ( Info , Rec ) ; <nl> + } ) ; <nl> + } <nl> + <nl> if ( ReqUID = = RequestFindLocalRenameRanges ) { <nl> int64_t Line = 0 , Column = 0 , Length = 0 ; <nl> if ( Req . getInt64 ( KeyLine , Line , / * isOptional = * / false ) ) <nl> static void reportNameInfo ( const NameTranslatingInfo & Info , ResponseReceiver Rec <nl> Rec ( RespBuilder . createResponse ( ) ) ; <nl> } <nl> <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + / / ReportExpressionTypeInfo <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + static void reportExpressionTypeInfo ( const ExpressionTypesInFile & Info , <nl> + ResponseReceiver Rec ) { <nl> + ResponseBuilder Builder ; <nl> + auto Dict = Builder . getDictionary ( ) ; <nl> + Dict . set ( KeyTypeBuffer , Info . TypeBuffer ) ; <nl> + ResponseBuilder : : Array Arr = Dict . setArray ( KeyExpressionTypeList ) ; <nl> + for ( auto Result : Info . Results ) { <nl> + auto Elem = Arr . appendDictionary ( ) ; <nl> + Elem . set ( KeyExpressionOffset , Result . ExprOffset ) ; <nl> + Elem . set ( KeyExpressionLength , Result . ExprLength ) ; <nl> + Elem . set ( KeyTypeOffset , Result . TypeOffset ) ; <nl> + Elem . set ( KeyTypeLength , Result . TypeLength ) ; <nl> + } <nl> + Rec ( Builder . createResponse ( ) ) ; <nl> + } <nl> + <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> / / FindRelatedIdents <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> mmm a / utils / gyb_sourcekit_support / UIDs . py <nl> ppp b / utils / gyb_sourcekit_support / UIDs . py <nl> def __init__ ( self , internal_name , external_name ) : <nl> KEY ( ' ImplicitMembers ' , ' key . implicitmembers ' ) , <nl> KEY ( ' ExpectedTypes ' , ' key . expectedtypes ' ) , <nl> KEY ( ' Members ' , ' key . members ' ) , <nl> + KEY ( ' TypeBuffer ' , ' key . printedtypebuffer ' ) , <nl> + KEY ( ' ExpressionTypeList ' , ' key . expression_type_list ' ) , <nl> + KEY ( ' ExpressionOffset ' , ' key . expression_offset ' ) , <nl> + KEY ( ' ExpressionLength ' , ' key . expression_length ' ) , <nl> + KEY ( ' TypeOffset ' , ' key . type_offset ' ) , <nl> + KEY ( ' TypeLength ' , ' key . type_length ' ) , <nl> ] <nl> <nl> <nl> def __init__ ( self , internal_name , external_name ) : <nl> REQUEST ( ' EnableCompileNotifications ' , <nl> ' source . request . enable - compile - notifications ' ) , <nl> REQUEST ( ' TestNotification ' , ' source . request . test_notification ' ) , <nl> + REQUEST ( ' CollectExpressionType ' , ' source . request . expression . type ' ) , <nl> ] <nl> <nl> <nl>
|
Merge remote - tracking branch ' origin / master ' into master - next
|
apple/swift
|
6eca658f90cca8c3ac10a6310966701451588d04
|
2019-02-20T22:49:04Z
|
mmm a / src / mongo / db / repl / data_replicator . cpp <nl> ppp b / src / mongo / db / repl / data_replicator . cpp <nl> <nl> # include " mongo / db / repl / database_cloner . h " <nl> # include " mongo / db / repl / member_state . h " <nl> # include " mongo / db / repl / optime . h " <nl> - # include " mongo / db / repl / reporter . h " <nl> # include " mongo / db / repl / sync_source_selector . h " <nl> # include " mongo / stdx / functional . h " <nl> # include " mongo / stdx / thread . h " <nl> DataReplicator : : DataReplicator ( DataReplicatorOptions opts , ReplicationExecutor * <nl> uassert ( ErrorCodes : : BadValue , " invalid applier function " , _opts . applierFn ) ; <nl> uassert ( ErrorCodes : : BadValue , " invalid rollback function " , _opts . rollbackFn ) ; <nl> uassert ( ErrorCodes : : BadValue , <nl> - " invalid replication progress manager " , <nl> - _opts . replicationProgressManager ) ; <nl> + " invalid replSetUpdatePosition command object creation function " , <nl> + _opts . prepareReplSetUpdatePositionCommandFn ) ; <nl> uassert ( ErrorCodes : : BadValue , " invalid getMyLastOptime function " , _opts . getMyLastOptime ) ; <nl> uassert ( ErrorCodes : : BadValue , " invalid setMyLastOptime function " , _opts . setMyLastOptime ) ; <nl> uassert ( ErrorCodes : : BadValue , " invalid setFollowerMode function " , _opts . setFollowerMode ) ; <nl> void DataReplicator : : _doNextActions_Steady_inlock ( ) { <nl> <nl> if ( ! _reporterPaused & & ( ! _reporter | | ! _reporter - > getStatus ( ) . isOK ( ) ) ) { <nl> / / TODO get reporter in good shape <nl> - _reporter . reset ( new Reporter ( _exec , _opts . replicationProgressManager , _syncSource ) ) ; <nl> + _reporter . reset ( <nl> + new Reporter ( _exec , _opts . prepareReplSetUpdatePositionCommandFn , _syncSource ) ) ; <nl> } <nl> } <nl> <nl> mmm a / src / mongo / db / repl / data_replicator . h <nl> ppp b / src / mongo / db / repl / data_replicator . h <nl> <nl> # include " mongo / db / repl / database_cloner . h " <nl> # include " mongo / db / repl / optime . h " <nl> # include " mongo / db / repl / replication_executor . h " <nl> + # include " mongo / db / repl / reporter . h " <nl> # include " mongo / db / repl / sync_source_selector . h " <nl> # include " mongo / stdx / condition_variable . h " <nl> # include " mongo / stdx / mutex . h " <nl> class OplogFetcher ; <nl> struct InitialSyncState ; <nl> struct MemberState ; <nl> class ReplicationProgressManager ; <nl> - class Reporter ; <nl> class SyncSourceSelector ; <nl> <nl> / * * State for decision tree * / <nl> struct DataReplicatorOptions { <nl> <nl> Applier : : ApplyOperationFn applierFn ; <nl> RollbackFn rollbackFn ; <nl> - ReplicationProgressManager * replicationProgressManager = nullptr ; <nl> + Reporter : : PrepareReplSetUpdatePositionCommandFn prepareReplSetUpdatePositionCommandFn ; <nl> GetMyLastOptimeFn getMyLastOptime ; <nl> SetMyLastOptimeFn setMyLastOptime ; <nl> SetFollowerModeFn setFollowerMode ; <nl> mmm a / src / mongo / db / repl / data_replicator_test . cpp <nl> ppp b / src / mongo / db / repl / data_replicator_test . cpp <nl> class SyncSourceSelectorMock : public SyncSourceSelector { <nl> HostAndPort _blacklistedSource ; <nl> } ; <nl> <nl> - class DataReplicatorTest : public ReplicationExecutorTest , <nl> - public ReplicationProgressManager , <nl> - public SyncSourceSelector { <nl> + class DataReplicatorTest : public ReplicationExecutorTest , public SyncSourceSelector { <nl> public : <nl> DataReplicatorTest ( ) { } <nl> <nl> class DataReplicatorTest : public ReplicationExecutorTest , <nl> _syncSourceSelector . reset ( new SyncSourceSelectorMock ( HostAndPort ( " localhost " , - 1 ) ) ) ; <nl> } <nl> <nl> - / / ReplicationProgressManager <nl> - bool prepareReplSetUpdatePositionCommand ( BSONObjBuilder * cmdBuilder ) override { <nl> - cmdBuilder - > append ( " replSetUpdatePosition " , 1 ) ; <nl> - return true ; <nl> - } <nl> - <nl> / / SyncSourceSelector <nl> void clearSyncSourceBlacklist ( ) override { <nl> _syncSourceSelector - > clearSyncSourceBlacklist ( ) ; <nl> class DataReplicatorTest : public ReplicationExecutorTest , <nl> return _rollbackFn ( txn , lastOpTimeWritten , syncSource ) ; <nl> } ; <nl> <nl> - options . replicationProgressManager = this ; <nl> + options . prepareReplSetUpdatePositionCommandFn = <nl> + [ ] ( ) - > StatusWith < BSONObj > { return BSON ( " replSetUpdatePosition " < < 1 ) ; } ; <nl> options . getMyLastOptime = [ this ] ( ) { return _myLastOpTime ; } ; <nl> options . setMyLastOptime = [ this ] ( const OpTime & opTime ) { _setMyLastOptime ( opTime ) ; } ; <nl> options . setFollowerMode = [ this ] ( const MemberState & state ) { <nl> mmm a / src / mongo / db / repl / replication_coordinator . h <nl> ppp b / src / mongo / db / repl / replication_coordinator . h <nl> <nl> # include " mongo / base / status . h " <nl> # include " mongo / db / repl / member_state . h " <nl> # include " mongo / db / repl / repl_settings . h " <nl> - # include " mongo / db / repl / reporter . h " <nl> # include " mongo / db / repl / sync_source_selector . h " <nl> # include " mongo / util / net / hostandport . h " <nl> # include " mongo / util / time_support . h " <nl> extern const char * replAllDead ; <nl> * with the rest of the system . The public methods on ReplicationCoordinator are the public <nl> * API that the replication subsystem presents to the rest of the codebase . <nl> * / <nl> - class ReplicationCoordinator : public ReplicationProgressManager , public SyncSourceSelector { <nl> + class ReplicationCoordinator : public SyncSourceSelector { <nl> MONGO_DISALLOW_COPYING ( ReplicationCoordinator ) ; <nl> <nl> public : <nl> mmm a / src / mongo / db / repl / replication_coordinator_impl . cpp <nl> ppp b / src / mongo / db / repl / replication_coordinator_impl . cpp <nl> DataReplicatorOptions createDataReplicatorOptions ( ReplicationCoordinator * replCo <nl> options . applierFn = [ ] ( OperationContext * , const BSONObj & ) - > Status { return Status : : OK ( ) ; } ; <nl> options . rollbackFn = <nl> [ ] ( OperationContext * , const OpTime & , const HostAndPort & ) { return Status : : OK ( ) ; } ; <nl> - options . replicationProgressManager = replCoord ; <nl> + options . prepareReplSetUpdatePositionCommandFn = [ replCoord ] ( ) - > StatusWith < BSONObj > { <nl> + BSONObjBuilder bob ; <nl> + if ( replCoord - > prepareReplSetUpdatePositionCommand ( & bob ) ) { <nl> + return bob . obj ( ) ; <nl> + } <nl> + return Status ( ErrorCodes : : OperationFailed , <nl> + " unable to prepare replSetUpdatePosition command object " ) ; <nl> + } ; <nl> options . getMyLastOptime = [ replCoord ] ( ) { return replCoord - > getMyLastOptime ( ) ; } ; <nl> options . setMyLastOptime = <nl> [ replCoord ] ( const OpTime & opTime ) { replCoord - > setMyLastOptime ( opTime ) ; } ; <nl> mmm a / src / mongo / db / repl / reporter . cpp <nl> ppp b / src / mongo / db / repl / reporter . cpp <nl> namespace mongo { <nl> namespace repl { <nl> <nl> Reporter : : Reporter ( ReplicationExecutor * executor , <nl> - ReplicationProgressManager * replicationProgressManager , <nl> + PrepareReplSetUpdatePositionCommandFn prepareReplSetUpdatePositionCommandFn , <nl> const HostAndPort & target ) <nl> : _executor ( executor ) , <nl> - _updatePositionSource ( replicationProgressManager ) , <nl> + _prepareReplSetUpdatePositionCommandFn ( prepareReplSetUpdatePositionCommandFn ) , <nl> _target ( target ) , <nl> _status ( Status : : OK ( ) ) , <nl> _willRunAgain ( false ) , <nl> _active ( false ) { <nl> uassert ( ErrorCodes : : BadValue , " null replication executor " , executor ) ; <nl> - uassert ( ErrorCodes : : BadValue , " null replication progress manager " , replicationProgressManager ) ; <nl> + uassert ( ErrorCodes : : BadValue , <nl> + " null function to create replSetUpdatePosition command object " , <nl> + prepareReplSetUpdatePositionCommandFn ) ; <nl> uassert ( ErrorCodes : : BadValue , " target name cannot be empty " , ! target . empty ( ) ) ; <nl> } <nl> <nl> Status Reporter : : _schedule_inlock ( ) { <nl> <nl> LOG ( 2 ) < < " Reporter scheduling report to : " < < _target ; <nl> <nl> - BSONObjBuilder cmd ; <nl> - if ( ! _updatePositionSource - > prepareReplSetUpdatePositionCommand ( & cmd ) ) { <nl> + auto prepareResult = _prepareReplSetUpdatePositionCommandFn ( ) ; <nl> + <nl> + if ( ! prepareResult . isOK ( ) ) { <nl> / / Returning NodeNotFound because currently this is the only way <nl> / / prepareReplSetUpdatePositionCommand ( ) can fail in production . <nl> return Status ( ErrorCodes : : NodeNotFound , <nl> " Reporter failed to create replSetUpdatePositionCommand command . " ) ; <nl> } <nl> - auto cmdObj = cmd . obj ( ) ; <nl> + auto cmdObj = prepareResult . getValue ( ) ; <nl> StatusWith < ReplicationExecutor : : CallbackHandle > scheduleResult = <nl> _executor - > scheduleRemoteCommand ( <nl> RemoteCommandRequest ( _target , " admin " , cmdObj ) , <nl> mmm a / src / mongo / db / repl / reporter . h <nl> ppp b / src / mongo / db / repl / reporter . h <nl> <nl> # pragma once <nl> <nl> # include " mongo / base / status . h " <nl> + # include " mongo / base / status_with . h " <nl> + # include " mongo / db / jsobj . h " <nl> # include " mongo / db / repl / replication_executor . h " <nl> # include " mongo / stdx / functional . h " <nl> <nl> namespace mongo { <nl> namespace repl { <nl> <nl> - class ReplicationProgressManager { <nl> - public : <nl> - virtual bool prepareReplSetUpdatePositionCommand ( BSONObjBuilder * cmdBuilder ) = 0 ; <nl> - virtual ~ ReplicationProgressManager ( ) = default ; <nl> - } ; <nl> - <nl> class Reporter { <nl> MONGO_DISALLOW_COPYING ( Reporter ) ; <nl> <nl> public : <nl> + / * * <nl> + * Prepares a BSONObj describing an invocation of the replSetUpdatePosition command that can <nl> + * be sent to this node ' s sync source to update it about our progress in replication . <nl> + * <nl> + * The returned status indicates whether or not the command was created . <nl> + * / <nl> + using PrepareReplSetUpdatePositionCommandFn = stdx : : function < StatusWith < BSONObj > ( ) > ; <nl> + <nl> Reporter ( ReplicationExecutor * executor , <nl> - ReplicationProgressManager * replicationProgressManager , <nl> + PrepareReplSetUpdatePositionCommandFn prepareReplSetUpdatePositionCommandFn , <nl> const HostAndPort & target ) ; <nl> virtual ~ Reporter ( ) ; <nl> <nl> class Reporter { <nl> <nl> / / Not owned by us . <nl> ReplicationExecutor * _executor ; <nl> - ReplicationProgressManager * _updatePositionSource ; <nl> + <nl> + / / Prepares update command object . <nl> + PrepareReplSetUpdatePositionCommandFn _prepareReplSetUpdatePositionCommandFn ; <nl> <nl> / / Host to whom the Reporter sends updates . <nl> HostAndPort _target ; <nl> mmm a / src / mongo / db / repl / reporter_test . cpp <nl> ppp b / src / mongo / db / repl / reporter_test . cpp <nl> using namespace mongo ; <nl> using namespace mongo : : repl ; <nl> using executor : : NetworkInterfaceMock ; <nl> <nl> - class MockProgressManager : public ReplicationProgressManager { <nl> + class MockProgressManager { <nl> public : <nl> void updateMap ( int memberId , const Timestamp & ts ) { <nl> progressMap [ memberId ] = ts ; <nl> class ReporterTest : public ReplicationExecutorTest { <nl> <nl> std : : unique_ptr < Reporter > reporter ; <nl> std : : unique_ptr < MockProgressManager > posUpdater ; <nl> + Reporter : : PrepareReplSetUpdatePositionCommandFn prepareReplSetUpdatePositionCommandFn ; <nl> } ; <nl> <nl> ReporterTest : : ReporterTest ( ) { } <nl> ReporterTest : : ReporterTest ( ) { } <nl> void ReporterTest : : setUp ( ) { <nl> ReplicationExecutorTest : : setUp ( ) ; <nl> posUpdater . reset ( new MockProgressManager ( ) ) ; <nl> - reporter . reset ( new Reporter ( & getExecutor ( ) , posUpdater . get ( ) , HostAndPort ( " h1 " ) ) ) ; <nl> + prepareReplSetUpdatePositionCommandFn = [ this ] ( ) - > StatusWith < BSONObj > { <nl> + BSONObjBuilder bob ; <nl> + if ( posUpdater - > prepareReplSetUpdatePositionCommand ( & bob ) ) { <nl> + return bob . obj ( ) ; <nl> + } <nl> + return Status ( ErrorCodes : : OperationFailed , <nl> + " unable to prepare replSetUpdatePosition command object " ) ; <nl> + } ; <nl> + reporter . reset ( new Reporter ( & getExecutor ( ) , <nl> + [ this ] ( ) { return prepareReplSetUpdatePositionCommandFn ( ) ; } , <nl> + HostAndPort ( " h1 " ) ) ) ; <nl> launchExecutorThread ( ) ; <nl> } <nl> <nl> void ReporterTest : : scheduleNetworkResponse ( ErrorCodes : : Error code , const std : : st <nl> } <nl> <nl> TEST_F ( ReporterTest , InvalidConstruction ) { <nl> - / / null ReplicationProgressManager <nl> - ASSERT_THROWS ( Reporter ( & getExecutor ( ) , nullptr , HostAndPort ( " h1 " ) ) , UserException ) ; <nl> + / / null PrepareReplSetUpdatePositionCommandFn <nl> + ASSERT_THROWS ( Reporter ( & getExecutor ( ) , <nl> + Reporter : : PrepareReplSetUpdatePositionCommandFn ( ) , <nl> + HostAndPort ( " h1 " ) ) , <nl> + UserException ) ; <nl> <nl> / / null ReplicationExecutor <nl> - ASSERT_THROWS ( Reporter ( nullptr , posUpdater . get ( ) , HostAndPort ( " h1 " ) ) , UserException ) ; <nl> + ASSERT_THROWS ( Reporter ( nullptr , prepareReplSetUpdatePositionCommandFn , HostAndPort ( " h1 " ) ) , <nl> + UserException ) ; <nl> <nl> / / empty HostAndPort <nl> - ASSERT_THROWS ( Reporter ( & getExecutor ( ) , posUpdater . get ( ) , HostAndPort ( ) ) , UserException ) ; <nl> + ASSERT_THROWS ( Reporter ( & getExecutor ( ) , prepareReplSetUpdatePositionCommandFn , HostAndPort ( ) ) , <nl> + UserException ) ; <nl> } <nl> <nl> TEST_F ( ReporterTest , IsActiveOnceScheduled ) { <nl>
|
SERVER - 18042 replaced reporter ReplicatonProgressManager with a stdx : : function
|
mongodb/mongo
|
46a628daf135fd509aab21b6bf195bbf7a2ab27b
|
2015-06-29T20:27:55Z
|
mmm a / src / ast - inl . h <nl> ppp b / src / ast - inl . h <nl> ForInStatement : : ForInStatement ( Isolate * isolate , ZoneStringList * labels ) <nl> } <nl> <nl> <nl> - int FunctionLiteral : : start_position ( ) const { <nl> - return scope ( ) - > start_position ( ) ; <nl> - } <nl> - <nl> - <nl> - int FunctionLiteral : : end_position ( ) const { <nl> - return scope ( ) - > end_position ( ) ; <nl> - } <nl> - <nl> - <nl> bool FunctionLiteral : : strict_mode ( ) const { <nl> return scope ( ) - > is_strict_mode ( ) ; <nl> } <nl> mmm a / src / ast . h <nl> ppp b / src / ast . h <nl> class FunctionLiteral : public Expression { <nl> bool has_only_simple_this_property_assignments , <nl> Handle < FixedArray > this_property_assignments , <nl> int num_parameters , <nl> + int start_position , <nl> + int end_position , <nl> Type type , <nl> bool has_duplicate_parameters ) <nl> : Expression ( isolate ) , <nl> class FunctionLiteral : public Expression { <nl> has_only_simple_this_property_assignments ) , <nl> this_property_assignments_ ( this_property_assignments ) , <nl> num_parameters_ ( num_parameters ) , <nl> + start_position_ ( start_position ) , <nl> + end_position_ ( end_position ) , <nl> function_token_position_ ( RelocInfo : : kNoPosition ) , <nl> inferred_name_ ( HEAP - > empty_string ( ) ) , <nl> is_expression_ ( type ! = DECLARATION ) , <nl> class FunctionLiteral : public Expression { <nl> ZoneList < Statement * > * body ( ) const { return body_ ; } <nl> void set_function_token_position ( int pos ) { function_token_position_ = pos ; } <nl> int function_token_position ( ) const { return function_token_position_ ; } <nl> - int start_position ( ) const ; <nl> - int end_position ( ) const ; <nl> + int start_position ( ) const { return start_position_ ; } <nl> + int end_position ( ) const { return end_position_ ; } <nl> bool is_expression ( ) const { return is_expression_ ; } <nl> bool is_anonymous ( ) const { return is_anonymous_ ; } <nl> bool strict_mode ( ) const ; <nl> mmm a / src / contexts . h <nl> ppp b / src / contexts . h <nl> class Context : public FixedArray { <nl> PREVIOUS_INDEX , <nl> / / The extension slot is used for either the global object ( in global <nl> / / contexts ) , eval extension object ( function contexts ) , subject of with <nl> - / / ( with contexts ) , or the variable name ( catch contexts ) , the serialized <nl> - / / scope info ( block contexts ) . <nl> + / / ( with contexts ) , or the variable name ( catch contexts ) . <nl> EXTENSION_INDEX , <nl> GLOBAL_INDEX , <nl> MIN_CONTEXT_SLOTS , <nl> mmm a / src / objects . h <nl> ppp b / src / objects . h <nl> class SerializedScopeInfo : public FixedArray { <nl> return reinterpret_cast < SerializedScopeInfo * > ( object ) ; <nl> } <nl> <nl> - / / Return the type of this scope . <nl> - ScopeType Type ( ) ; <nl> - <nl> / / Does this scope call eval ? <nl> bool CallsEval ( ) ; <nl> <nl> class SerializedScopeInfo : public FixedArray { <nl> / / Return if this has context slots besides MIN_CONTEXT_SLOTS ; <nl> bool HasHeapAllocatedLocals ( ) ; <nl> <nl> - / / Return if contexts are allocated for this scope . <nl> - bool HasContext ( ) ; <nl> - <nl> / / Lookup support for serialized scope info . Returns the <nl> / / the stack slot index for a given slot name if the slot is <nl> / / present ; otherwise returns a value < 0 . The name must be a symbol <nl> mmm a / src / parser . cc <nl> ppp b / src / parser . cc <nl> unsigned * ScriptDataImpl : : ReadAddress ( int position ) { <nl> } <nl> <nl> <nl> - Scope * Parser : : NewScope ( Scope * parent , ScopeType type ) { <nl> + Scope * Parser : : NewScope ( Scope * parent , Scope : : Type type ) { <nl> Scope * result = new ( zone ( ) ) Scope ( parent , type ) ; <nl> result - > Initialize ( ) ; <nl> return result ; <nl> FunctionLiteral * Parser : : DoParseProgram ( Handle < String > source , <nl> mode_ = FLAG_lazy ? PARSE_LAZILY : PARSE_EAGERLY ; <nl> if ( allow_natives_syntax_ | | extension_ ! = NULL ) mode_ = PARSE_EAGERLY ; <nl> <nl> - ScopeType type = in_global_context ? GLOBAL_SCOPE : EVAL_SCOPE ; <nl> + Scope : : Type type = <nl> + in_global_context <nl> + ? Scope : : GLOBAL_SCOPE <nl> + : Scope : : EVAL_SCOPE ; <nl> Handle < String > no_name = isolate ( ) - > factory ( ) - > empty_symbol ( ) ; <nl> <nl> FunctionLiteral * result = NULL ; <nl> { Scope * scope = NewScope ( top_scope_ , type ) ; <nl> - scope - > set_start_position ( 0 ) ; <nl> - scope - > set_end_position ( source - > length ( ) ) ; <nl> LexicalScope lexical_scope ( this , scope , isolate ( ) ) ; <nl> if ( strict_mode = = kStrictMode ) { <nl> top_scope_ - > EnableStrictMode ( ) ; <nl> FunctionLiteral * Parser : : DoParseProgram ( Handle < String > source , <nl> lexical_scope . only_simple_this_property_assignments ( ) , <nl> lexical_scope . this_property_assignments ( ) , <nl> 0 , <nl> + 0 , <nl> + source - > length ( ) , <nl> FunctionLiteral : : ANONYMOUS_EXPRESSION , <nl> false ) ; / / Does not have duplicate parameters . <nl> } else if ( stack_overflow_ ) { <nl> FunctionLiteral * Parser : : ParseLazy ( CompilationInfo * info , <nl> <nl> { <nl> / / Parse the function literal . <nl> - Scope * scope = NewScope ( top_scope_ , GLOBAL_SCOPE ) ; <nl> + Scope * scope = NewScope ( top_scope_ , Scope : : GLOBAL_SCOPE ) ; <nl> if ( ! info - > closure ( ) . is_null ( ) ) { <nl> scope = Scope : : DeserializeScopeChain ( info , scope ) ; <nl> } <nl> Block * Parser : : ParseScopedBlock ( ZoneStringList * labels , bool * ok ) { <nl> <nl> / / Construct block expecting 16 statements . <nl> Block * body = new ( zone ( ) ) Block ( isolate ( ) , labels , 16 , false ) ; <nl> - Scope * block_scope = NewScope ( top_scope_ , BLOCK_SCOPE ) ; <nl> + Scope * block_scope = NewScope ( top_scope_ , Scope : : BLOCK_SCOPE ) ; <nl> if ( top_scope_ - > is_strict_mode ( ) ) { <nl> block_scope - > EnableStrictMode ( ) ; <nl> } <nl> <nl> / / Parse the statements and collect escaping labels . <nl> Expect ( Token : : LBRACE , CHECK_OK ) ; <nl> - block_scope - > set_start_position ( scanner ( ) . location ( ) . beg_pos ) ; <nl> { SaveScope save_scope ( this , block_scope ) ; <nl> TargetCollector collector ; <nl> Target target ( & this - > target_stack_ , & collector ) ; <nl> Block * Parser : : ParseScopedBlock ( ZoneStringList * labels , bool * ok ) { <nl> } <nl> } <nl> Expect ( Token : : RBRACE , CHECK_OK ) ; <nl> - block_scope - > set_end_position ( scanner ( ) . location ( ) . end_pos ) ; <nl> + <nl> block_scope = block_scope - > FinalizeBlockScope ( ) ; <nl> body - > set_block_scope ( block_scope ) ; <nl> return body ; <nl> Statement * Parser : : ParseWithStatement ( ZoneStringList * labels , bool * ok ) { <nl> Expect ( Token : : RPAREN , CHECK_OK ) ; <nl> <nl> top_scope_ - > DeclarationScope ( ) - > RecordWithStatement ( ) ; <nl> - Scope * with_scope = NewScope ( top_scope_ , WITH_SCOPE ) ; <nl> + Scope * with_scope = NewScope ( top_scope_ , Scope : : WITH_SCOPE ) ; <nl> Statement * stmt ; <nl> { SaveScope save_scope ( this , with_scope ) ; <nl> - with_scope - > set_start_position ( scanner ( ) . peek_location ( ) . beg_pos ) ; <nl> stmt = ParseStatement ( labels , CHECK_OK ) ; <nl> - with_scope - > set_end_position ( scanner ( ) . location ( ) . end_pos ) ; <nl> } <nl> return new ( zone ( ) ) WithStatement ( expr , stmt ) ; <nl> } <nl> TryStatement * Parser : : ParseTryStatement ( bool * ok ) { <nl> Consume ( Token : : CATCH ) ; <nl> <nl> Expect ( Token : : LPAREN , CHECK_OK ) ; <nl> - catch_scope = NewScope ( top_scope_ , CATCH_SCOPE ) ; <nl> - if ( top_scope_ - > is_strict_mode ( ) ) { <nl> - catch_scope - > EnableStrictMode ( ) ; <nl> - } <nl> - catch_scope - > set_start_position ( scanner ( ) . location ( ) . beg_pos ) ; <nl> name = ParseIdentifier ( CHECK_OK ) ; <nl> <nl> if ( top_scope_ - > is_strict_mode ( ) & & IsEvalOrArguments ( name ) ) { <nl> TryStatement * Parser : : ParseTryStatement ( bool * ok ) { <nl> <nl> if ( peek ( ) = = Token : : LBRACE ) { <nl> Target target ( & this - > target_stack_ , & catch_collector ) ; <nl> + catch_scope = NewScope ( top_scope_ , Scope : : CATCH_SCOPE ) ; <nl> + if ( top_scope_ - > is_strict_mode ( ) ) { <nl> + catch_scope - > EnableStrictMode ( ) ; <nl> + } <nl> VariableMode mode = harmony_scoping_ ? LET : VAR ; <nl> catch_variable = catch_scope - > DeclareLocal ( name , mode ) ; <nl> <nl> TryStatement * Parser : : ParseTryStatement ( bool * ok ) { <nl> } else { <nl> Expect ( Token : : LBRACE , CHECK_OK ) ; <nl> } <nl> - catch_scope - > set_end_position ( scanner ( ) . location ( ) . end_pos ) ; <nl> + <nl> tok = peek ( ) ; <nl> } <nl> <nl> Statement * Parser : : ParseForStatement ( ZoneStringList * labels , bool * ok ) { <nl> <nl> / / Create an in - between scope for let - bound iteration variables . <nl> Scope * saved_scope = top_scope_ ; <nl> - Scope * for_scope = NewScope ( top_scope_ , BLOCK_SCOPE ) ; <nl> + Scope * for_scope = NewScope ( top_scope_ , Scope : : BLOCK_SCOPE ) ; <nl> if ( top_scope_ - > is_strict_mode ( ) ) { <nl> for_scope - > EnableStrictMode ( ) ; <nl> } <nl> Statement * Parser : : ParseForStatement ( ZoneStringList * labels , bool * ok ) { <nl> <nl> Expect ( Token : : FOR , CHECK_OK ) ; <nl> Expect ( Token : : LPAREN , CHECK_OK ) ; <nl> - for_scope - > set_start_position ( scanner ( ) . location ( ) . beg_pos ) ; <nl> if ( peek ( ) ! = Token : : SEMICOLON ) { <nl> if ( peek ( ) = = Token : : VAR | | peek ( ) = = Token : : CONST ) { <nl> Handle < String > name ; <nl> Statement * Parser : : ParseForStatement ( ZoneStringList * labels , bool * ok ) { <nl> result - > AddStatement ( variable_statement ) ; <nl> result - > AddStatement ( loop ) ; <nl> top_scope_ = saved_scope ; <nl> - for_scope - > set_end_position ( scanner ( ) . location ( ) . end_pos ) ; <nl> for_scope = for_scope - > FinalizeBlockScope ( ) ; <nl> ASSERT ( for_scope = = NULL ) ; <nl> / / Parsed for - in loop w / variable / const declaration . <nl> Statement * Parser : : ParseForStatement ( ZoneStringList * labels , bool * ok ) { <nl> body_block - > AddStatement ( body ) ; <nl> loop - > Initialize ( temp_proxy , enumerable , body_block ) ; <nl> top_scope_ = saved_scope ; <nl> - for_scope - > set_end_position ( scanner ( ) . location ( ) . end_pos ) ; <nl> for_scope = for_scope - > FinalizeBlockScope ( ) ; <nl> body_block - > set_block_scope ( for_scope ) ; <nl> / / Parsed for - in loop w / let declaration . <nl> Statement * Parser : : ParseForStatement ( ZoneStringList * labels , bool * ok ) { <nl> Statement * body = ParseStatement ( NULL , CHECK_OK ) ; <nl> if ( loop ) loop - > Initialize ( expression , enumerable , body ) ; <nl> top_scope_ = saved_scope ; <nl> - for_scope - > set_end_position ( scanner ( ) . location ( ) . end_pos ) ; <nl> for_scope = for_scope - > FinalizeBlockScope ( ) ; <nl> ASSERT ( for_scope = = NULL ) ; <nl> / / Parsed for - in loop . <nl> Statement * Parser : : ParseForStatement ( ZoneStringList * labels , bool * ok ) { <nl> <nl> Statement * body = ParseStatement ( NULL , CHECK_OK ) ; <nl> top_scope_ = saved_scope ; <nl> - for_scope - > set_end_position ( scanner ( ) . location ( ) . end_pos ) ; <nl> for_scope = for_scope - > FinalizeBlockScope ( ) ; <nl> if ( for_scope ! = NULL ) { <nl> / / Rewrite a for statement of the form <nl> FunctionLiteral * Parser : : ParseFunctionLiteral ( Handle < String > function_name , <nl> / / Function declarations are function scoped in normal mode , so they are <nl> / / hoisted . In harmony block scoping mode they are block scoped , so they <nl> / / are not hoisted . <nl> - Scope * scope = ( type = = FunctionLiteral : : DECLARATION & & ! harmony_scoping_ ) <nl> - ? NewScope ( top_scope_ - > DeclarationScope ( ) , FUNCTION_SCOPE ) <nl> - : NewScope ( top_scope_ , FUNCTION_SCOPE ) ; <nl> + Scope * scope = ( type = = FunctionLiteral : : DECLARATION & & <nl> + ! harmony_scoping_ ) <nl> + ? NewScope ( top_scope_ - > DeclarationScope ( ) , Scope : : FUNCTION_SCOPE ) <nl> + : NewScope ( top_scope_ , Scope : : FUNCTION_SCOPE ) ; <nl> ZoneList < Statement * > * body = new ( zone ( ) ) ZoneList < Statement * > ( 8 ) ; <nl> int materialized_literal_count ; <nl> int expected_property_count ; <nl> + int start_pos ; <nl> + int end_pos ; <nl> bool only_simple_this_property_assignments ; <nl> Handle < FixedArray > this_property_assignments ; <nl> bool has_duplicate_parameters = false ; <nl> FunctionLiteral * Parser : : ParseFunctionLiteral ( Handle < String > function_name , <nl> / / FormalParameterList : : <nl> / / ' ( ' ( Identifier ) * [ ' , ' ] ' ) ' <nl> Expect ( Token : : LPAREN , CHECK_OK ) ; <nl> - scope - > set_start_position ( scanner ( ) . location ( ) . beg_pos ) ; <nl> + start_pos = scanner ( ) . location ( ) . beg_pos ; <nl> Scanner : : Location name_loc = Scanner : : Location : : invalid ( ) ; <nl> Scanner : : Location dupe_loc = Scanner : : Location : : invalid ( ) ; <nl> Scanner : : Location reserved_loc = Scanner : : Location : : invalid ( ) ; <nl> FunctionLiteral * Parser : : ParseFunctionLiteral ( Handle < String > function_name , <nl> / / compile after all . <nl> is_lazily_compiled = false ; <nl> } else { <nl> - scope - > set_end_position ( entry . end_pos ( ) ) ; <nl> - if ( scope - > end_position ( ) < = function_block_pos ) { <nl> + end_pos = entry . end_pos ( ) ; <nl> + if ( end_pos < = function_block_pos ) { <nl> / / End position greater than end of stream is safe , and hard to check . <nl> ReportInvalidPreparseData ( function_name , CHECK_OK ) ; <nl> } <nl> isolate ( ) - > counters ( ) - > total_preparse_skipped ( ) - > Increment ( <nl> - scope - > end_position ( ) - function_block_pos ) ; <nl> + end_pos - function_block_pos ) ; <nl> / / Seek to position just before terminal ' } ' . <nl> - scanner ( ) . SeekForward ( scope - > end_position ( ) - 1 ) ; <nl> + scanner ( ) . SeekForward ( end_pos - 1 ) ; <nl> materialized_literal_count = entry . literal_count ( ) ; <nl> expected_property_count = entry . property_count ( ) ; <nl> if ( entry . strict_mode ( ) ) top_scope_ - > EnableStrictMode ( ) ; <nl> FunctionLiteral * Parser : : ParseFunctionLiteral ( Handle < String > function_name , <nl> this_property_assignments = lexical_scope . this_property_assignments ( ) ; <nl> <nl> Expect ( Token : : RBRACE , CHECK_OK ) ; <nl> - scope - > set_end_position ( scanner ( ) . location ( ) . end_pos ) ; <nl> + end_pos = scanner ( ) . location ( ) . end_pos ; <nl> } <nl> <nl> / / Validate strict mode . <nl> if ( top_scope_ - > is_strict_mode ( ) ) { <nl> if ( IsEvalOrArguments ( function_name ) ) { <nl> - int start_pos = scope - > start_position ( ) ; <nl> int position = function_token_position ! = RelocInfo : : kNoPosition <nl> ? function_token_position <nl> : ( start_pos > 0 ? start_pos - 1 : start_pos ) ; <nl> FunctionLiteral * Parser : : ParseFunctionLiteral ( Handle < String > function_name , <nl> return NULL ; <nl> } <nl> if ( name_is_strict_reserved ) { <nl> - int start_pos = scope - > start_position ( ) ; <nl> int position = function_token_position ! = RelocInfo : : kNoPosition <nl> ? function_token_position <nl> : ( start_pos > 0 ? start_pos - 1 : start_pos ) ; <nl> FunctionLiteral * Parser : : ParseFunctionLiteral ( Handle < String > function_name , <nl> * ok = false ; <nl> return NULL ; <nl> } <nl> - CheckOctalLiteral ( scope - > start_position ( ) , <nl> - scope - > end_position ( ) , <nl> - CHECK_OK ) ; <nl> + CheckOctalLiteral ( start_pos , end_pos , CHECK_OK ) ; <nl> } <nl> } <nl> <nl> FunctionLiteral * Parser : : ParseFunctionLiteral ( Handle < String > function_name , <nl> only_simple_this_property_assignments , <nl> this_property_assignments , <nl> num_parameters , <nl> + start_pos , <nl> + end_pos , <nl> type , <nl> has_duplicate_parameters ) ; <nl> function_literal - > set_function_token_position ( function_token_position ) ; <nl> mmm a / src / parser . h <nl> ppp b / src / parser . h <nl> class Parser { <nl> return & empty ; <nl> } <nl> <nl> - Scope * NewScope ( Scope * parent , ScopeType type ) ; <nl> + Scope * NewScope ( Scope * parent , Scope : : Type type ) ; <nl> <nl> Handle < String > LookupSymbol ( int symbol_id ) ; <nl> <nl> mmm a / src / runtime . cc <nl> ppp b / src / runtime . cc <nl> static Handle < JSObject > MaterializeBlockScope ( <nl> } <nl> <nl> <nl> - / / Iterate over the actual scopes visible from a stack frame . The iteration <nl> - / / proceeds from the innermost visible nested scope outwards . All scopes are <nl> + / / Iterate over the actual scopes visible from a stack frame . All scopes are <nl> / / backed by an actual context except the local scope , which is inserted <nl> - / / " artificially " in the context chain . <nl> + / / " artifically " in the context chain . <nl> class ScopeIterator { <nl> public : <nl> enum ScopeType { <nl> class ScopeIterator { <nl> inlined_frame_index_ ( inlined_frame_index ) , <nl> function_ ( JSFunction : : cast ( frame - > function ( ) ) ) , <nl> context_ ( Context : : cast ( frame - > context ( ) ) ) , <nl> - nested_scope_chain_ ( 4 ) { <nl> + local_done_ ( false ) , <nl> + at_local_ ( false ) { <nl> <nl> - / / Check whether we are in global code or function code . If there is a stack <nl> - / / slot for . result then this function has been created for evaluating <nl> - / / global code and it is not a real function . <nl> + / / Check whether the first scope is actually a local scope . <nl> + / / If there is a stack slot for . result then this local scope has been <nl> + / / created for evaluating top level code and it is not a real local scope . <nl> / / Checking for the existence of . result seems fragile , but the scope info <nl> / / saved with the code object does not otherwise have that information . <nl> int index = function_ - > shared ( ) - > scope_info ( ) - > <nl> StackSlotIndex ( isolate_ - > heap ( ) - > result_symbol ( ) ) ; <nl> - <nl> - / / Reparse the code and analyze the scopes . <nl> - ZoneScope zone_scope ( isolate , DELETE_ON_EXIT ) ; <nl> - Handle < SharedFunctionInfo > shared_info ( function_ - > shared ( ) ) ; <nl> - Handle < Script > script ( Script : : cast ( shared_info - > script ( ) ) ) ; <nl> - Scope * scope ; <nl> if ( index > = 0 ) { <nl> - / / Global code <nl> - CompilationInfo info ( script ) ; <nl> - info . MarkAsGlobal ( ) ; <nl> - bool result = ParserApi : : Parse ( & info ) ; <nl> - ASSERT ( result ) ; <nl> - result = Scope : : Analyze ( & info ) ; <nl> - ASSERT ( result ) ; <nl> - scope = info . function ( ) - > scope ( ) ; <nl> - } else { <nl> - / / Function code <nl> - CompilationInfo info ( shared_info ) ; <nl> - bool result = ParserApi : : Parse ( & info ) ; <nl> - ASSERT ( result ) ; <nl> - result = Scope : : Analyze ( & info ) ; <nl> - ASSERT ( result ) ; <nl> - scope = info . function ( ) - > scope ( ) ; <nl> + local_done_ = true ; <nl> + } else if ( context_ - > IsGlobalContext ( ) | | <nl> + context_ - > IsFunctionContext ( ) ) { <nl> + at_local_ = true ; <nl> + } else if ( context_ - > closure ( ) ! = * function_ ) { <nl> + / / The context_ is a block or with or catch block from the outer function . <nl> + ASSERT ( context_ - > IsWithContext ( ) | | <nl> + context_ - > IsCatchContext ( ) | | <nl> + context_ - > IsBlockContext ( ) ) ; <nl> + at_local_ = true ; <nl> } <nl> - <nl> - / / Retrieve the scope chain for the current position . <nl> - int statement_position = <nl> - shared_info - > code ( ) - > SourceStatementPosition ( frame_ - > pc ( ) ) ; <nl> - scope - > GetNestedScopeChain ( & nested_scope_chain_ , statement_position ) ; <nl> } <nl> <nl> / / More scopes ? <nl> class ScopeIterator { <nl> <nl> / / Move to the next scope . <nl> void Next ( ) { <nl> - ScopeType scope_type = Type ( ) ; <nl> - if ( scope_type = = ScopeTypeGlobal ) { <nl> - / / The global scope is always the last in the chain . <nl> - ASSERT ( context_ - > IsGlobalContext ( ) ) ; <nl> + / / If at a local scope mark the local scope as passed . <nl> + if ( at_local_ ) { <nl> + at_local_ = false ; <nl> + local_done_ = true ; <nl> + <nl> + / / If the current context is not associated with the local scope the <nl> + / / current context is the next real scope , so don ' t move to the next <nl> + / / context in this case . <nl> + if ( context_ - > closure ( ) ! = * function_ ) { <nl> + return ; <nl> + } <nl> + } <nl> + <nl> + / / The global scope is always the last in the chain . <nl> + if ( context_ - > IsGlobalContext ( ) ) { <nl> context_ = Handle < Context > ( ) ; <nl> return ; <nl> } <nl> - if ( nested_scope_chain_ . is_empty ( ) ) { <nl> - context_ = Handle < Context > ( context_ - > previous ( ) , isolate_ ) ; <nl> - } else { <nl> - if ( nested_scope_chain_ . last ( ) - > HasContext ( ) ) { <nl> - context_ = Handle < Context > ( context_ - > previous ( ) , isolate_ ) ; <nl> - } <nl> - nested_scope_chain_ . RemoveLast ( ) ; <nl> + <nl> + / / Move to the next context . <nl> + context_ = Handle < Context > ( context_ - > previous ( ) , isolate_ ) ; <nl> + <nl> + / / If passing the local scope indicate that the current scope is now the <nl> + / / local scope . <nl> + if ( ! local_done_ & & <nl> + ( context_ - > IsGlobalContext ( ) | | context_ - > IsFunctionContext ( ) ) ) { <nl> + at_local_ = true ; <nl> } <nl> } <nl> <nl> / / Return the type of the current scope . <nl> ScopeType Type ( ) { <nl> - if ( ! nested_scope_chain_ . is_empty ( ) ) { <nl> - Handle < SerializedScopeInfo > scope_info = nested_scope_chain_ . last ( ) ; <nl> - switch ( scope_info - > Type ( ) ) { <nl> - case FUNCTION_SCOPE : <nl> - ASSERT ( context_ - > IsFunctionContext ( ) | | <nl> - ! scope_info - > HasContext ( ) ) ; <nl> - return ScopeTypeLocal ; <nl> - case GLOBAL_SCOPE : <nl> - ASSERT ( context_ - > IsGlobalContext ( ) ) ; <nl> - return ScopeTypeGlobal ; <nl> - case WITH_SCOPE : <nl> - ASSERT ( context_ - > IsWithContext ( ) ) ; <nl> - return ScopeTypeWith ; <nl> - case CATCH_SCOPE : <nl> - ASSERT ( context_ - > IsCatchContext ( ) ) ; <nl> - return ScopeTypeCatch ; <nl> - case BLOCK_SCOPE : <nl> - ASSERT ( ! scope_info - > HasContext ( ) | | <nl> - context_ - > IsBlockContext ( ) ) ; <nl> - return ScopeTypeBlock ; <nl> - case EVAL_SCOPE : <nl> - UNREACHABLE ( ) ; <nl> - } <nl> + if ( at_local_ ) { <nl> + return ScopeTypeLocal ; <nl> } <nl> if ( context_ - > IsGlobalContext ( ) ) { <nl> ASSERT ( context_ - > global ( ) - > IsGlobalObject ( ) ) ; <nl> class ScopeIterator { <nl> return Handle < JSObject > ( CurrentContext ( ) - > global ( ) ) ; <nl> case ScopeIterator : : ScopeTypeLocal : <nl> / / Materialize the content of the local scope into a JSObject . <nl> - ASSERT ( nested_scope_chain_ . length ( ) = = 1 ) ; <nl> return MaterializeLocalScope ( isolate_ , frame_ , inlined_frame_index_ ) ; <nl> case ScopeIterator : : ScopeTypeWith : <nl> / / Return the with object . <nl> class ScopeIterator { <nl> return Handle < JSObject > ( ) ; <nl> } <nl> <nl> - Handle < SerializedScopeInfo > CurrentScopeInfo ( ) { <nl> - if ( ! nested_scope_chain_ . is_empty ( ) ) { <nl> - return nested_scope_chain_ . last ( ) ; <nl> - } else if ( context_ - > IsBlockContext ( ) ) { <nl> - return Handle < SerializedScopeInfo > ( <nl> - SerializedScopeInfo : : cast ( context_ - > extension ( ) ) ) ; <nl> - } else if ( context_ - > IsFunctionContext ( ) ) { <nl> - return Handle < SerializedScopeInfo > ( <nl> - context_ - > closure ( ) - > shared ( ) - > scope_info ( ) ) ; <nl> - } <nl> - return Handle < SerializedScopeInfo > : : null ( ) ; <nl> - } <nl> - <nl> / / Return the context for this scope . For the local context there might not <nl> / / be an actual context . <nl> Handle < Context > CurrentContext ( ) { <nl> - if ( Type ( ) = = ScopeTypeGlobal | | <nl> - nested_scope_chain_ . is_empty ( ) ) { <nl> - return context_ ; <nl> - } else if ( nested_scope_chain_ . last ( ) - > HasContext ( ) ) { <nl> - return context_ ; <nl> - } else { <nl> + if ( at_local_ & & context_ - > closure ( ) ! = * function_ ) { <nl> return Handle < Context > ( ) ; <nl> } <nl> + return context_ ; <nl> } <nl> <nl> # ifdef DEBUG <nl> class ScopeIterator { <nl> int inlined_frame_index_ ; <nl> Handle < JSFunction > function_ ; <nl> Handle < Context > context_ ; <nl> - List < Handle < SerializedScopeInfo > > nested_scope_chain_ ; <nl> + bool local_done_ ; <nl> + bool at_local_ ; <nl> <nl> DISALLOW_IMPLICIT_CONSTRUCTORS ( ScopeIterator ) ; <nl> } ; <nl> RUNTIME_FUNCTION ( MaybeObject * , Runtime_ClearStepping ) { <nl> <nl> / / Creates a copy of the with context chain . The copy of the context chain is <nl> / / is linked to the function context supplied . <nl> - static Handle < Context > CopyNestedScopeContextChain ( Isolate * isolate , <nl> - Handle < JSFunction > function , <nl> - Handle < Context > base , <nl> - JavaScriptFrame * frame , <nl> - int inlined_frame_index ) { <nl> - HandleScope scope ( isolate ) ; <nl> - List < Handle < SerializedScopeInfo > > scope_chain ; <nl> - List < Handle < Context > > context_chain ; <nl> - <nl> - ScopeIterator it ( isolate , frame , inlined_frame_index ) ; <nl> - for ( ; it . Type ( ) ! = ScopeIterator : : ScopeTypeGlobal & & <nl> - it . Type ( ) ! = ScopeIterator : : ScopeTypeLocal ; it . Next ( ) ) { <nl> - ASSERT ( ! it . Done ( ) ) ; <nl> - scope_chain . Add ( it . CurrentScopeInfo ( ) ) ; <nl> - context_chain . Add ( it . CurrentContext ( ) ) ; <nl> + static Handle < Context > CopyWithContextChain ( Isolate * isolate , <nl> + Handle < JSFunction > function , <nl> + Handle < Context > current , <nl> + Handle < Context > base ) { <nl> + / / At the end of the chain . Return the base context to link to . <nl> + if ( current - > IsFunctionContext ( ) | | current - > IsGlobalContext ( ) ) { <nl> + return base ; <nl> } <nl> <nl> - / / At the end of the chain . Return the base context to link to . <nl> - Handle < Context > context = base ; <nl> - <nl> - / / Iteratively copy and or materialize the nested contexts . <nl> - while ( ! scope_chain . is_empty ( ) ) { <nl> - Handle < SerializedScopeInfo > scope_info = scope_chain . RemoveLast ( ) ; <nl> - Handle < Context > current = context_chain . RemoveLast ( ) ; <nl> - ASSERT ( ! ( scope_info - > HasContext ( ) & current . is_null ( ) ) ) ; <nl> - <nl> - if ( scope_info - > Type ( ) = = CATCH_SCOPE ) { <nl> - Handle < String > name ( String : : cast ( current - > extension ( ) ) ) ; <nl> - Handle < Object > thrown_object ( current - > get ( Context : : THROWN_OBJECT_INDEX ) ) ; <nl> - context = <nl> - isolate - > factory ( ) - > NewCatchContext ( function , <nl> - context , <nl> - name , <nl> - thrown_object ) ; <nl> - } else if ( scope_info - > Type ( ) = = BLOCK_SCOPE ) { <nl> - / / Materialize the contents of the block scope into a JSObject . <nl> - Handle < JSObject > block_scope_object = <nl> - MaterializeBlockScope ( isolate , current ) ; <nl> - if ( block_scope_object . is_null ( ) ) { <nl> - return Handle < Context > : : null ( ) ; <nl> - } <nl> - / / Allocate a new function context for the debug evaluation and set the <nl> - / / extension object . <nl> - Handle < Context > new_context = <nl> - isolate - > factory ( ) - > NewFunctionContext ( Context : : MIN_CONTEXT_SLOTS , <nl> - function ) ; <nl> - new_context - > set_extension ( * block_scope_object ) ; <nl> - new_context - > set_previous ( * context ) ; <nl> - context = new_context ; <nl> - } else { <nl> - ASSERT ( scope_info - > Type ( ) = = WITH_SCOPE ) ; <nl> - ASSERT ( current - > IsWithContext ( ) ) ; <nl> - Handle < JSObject > extension ( JSObject : : cast ( current - > extension ( ) ) ) ; <nl> - context = <nl> - isolate - > factory ( ) - > NewWithContext ( function , context , extension ) ; <nl> + / / Recursively copy the with and catch contexts . <nl> + HandleScope scope ( isolate ) ; <nl> + Handle < Context > previous ( current - > previous ( ) ) ; <nl> + Handle < Context > new_previous = <nl> + CopyWithContextChain ( isolate , function , previous , base ) ; <nl> + Handle < Context > new_current ; <nl> + if ( current - > IsCatchContext ( ) ) { <nl> + Handle < String > name ( String : : cast ( current - > extension ( ) ) ) ; <nl> + Handle < Object > thrown_object ( current - > get ( Context : : THROWN_OBJECT_INDEX ) ) ; <nl> + new_current = <nl> + isolate - > factory ( ) - > NewCatchContext ( function , <nl> + new_previous , <nl> + name , <nl> + thrown_object ) ; <nl> + } else if ( current - > IsBlockContext ( ) ) { <nl> + Handle < SerializedScopeInfo > scope_info ( <nl> + SerializedScopeInfo : : cast ( current - > extension ( ) ) ) ; <nl> + new_current = <nl> + isolate - > factory ( ) - > NewBlockContext ( function , new_previous , scope_info ) ; <nl> + / / Copy context slots . <nl> + int num_context_slots = scope_info - > NumberOfContextSlots ( ) ; <nl> + for ( int i = Context : : MIN_CONTEXT_SLOTS ; i < num_context_slots ; + + i ) { <nl> + new_current - > set ( i , current - > get ( i ) ) ; <nl> } <nl> + } else { <nl> + ASSERT ( current - > IsWithContext ( ) ) ; <nl> + Handle < JSObject > extension ( JSObject : : cast ( current - > extension ( ) ) ) ; <nl> + new_current = <nl> + isolate - > factory ( ) - > NewWithContext ( function , new_previous , extension ) ; <nl> } <nl> - <nl> - return scope . CloseAndEscape ( context ) ; <nl> + return scope . CloseAndEscape ( new_current ) ; <nl> } <nl> <nl> <nl> RUNTIME_FUNCTION ( MaybeObject * , Runtime_DebugEvaluate ) { <nl> if ( scope_info - > HasHeapAllocatedLocals ( ) ) { <nl> function_context = Handle < Context > ( frame_context - > declaration_context ( ) ) ; <nl> } <nl> - context = CopyNestedScopeContextChain ( isolate , <nl> - go_between , <nl> - context , <nl> - frame , <nl> - inlined_frame_index ) ; <nl> + context = CopyWithContextChain ( isolate , go_between , frame_context , context ) ; <nl> <nl> if ( additional_context - > IsJSObject ( ) ) { <nl> Handle < JSObject > extension = Handle < JSObject > : : cast ( additional_context ) ; <nl> mmm a / src / scopeinfo . cc <nl> ppp b / src / scopeinfo . cc <nl> ScopeInfo < Allocator > : : ScopeInfo ( Scope * scope ) <nl> : function_name_ ( FACTORY - > empty_symbol ( ) ) , <nl> calls_eval_ ( scope - > calls_eval ( ) ) , <nl> is_strict_mode_ ( scope - > is_strict_mode ( ) ) , <nl> - type_ ( scope - > type ( ) ) , <nl> parameters_ ( scope - > num_parameters ( ) ) , <nl> stack_slots_ ( scope - > num_stack_slots ( ) ) , <nl> context_slots_ ( scope - > num_heap_slots ( ) ) , <nl> ScopeInfo < Allocator > : : ScopeInfo ( Scope * scope ) <nl> / / <nl> / / - calls eval boolean flag <nl> / / <nl> - / / - is strict mode scope <nl> - / / <nl> - / / - scope type <nl> - / / <nl> / / - number of variables in the context object ( smi ) ( = function context <nl> / / slot index + 1 ) <nl> / / - list of pairs ( name , Var mode ) of context - allocated variables ( starting <nl> ScopeInfo < Allocator > : : ScopeInfo ( Scope * scope ) <nl> / / present ) <nl> <nl> <nl> - template < class T > <nl> - static inline Object * * ReadInt ( Object * * p , T * x ) { <nl> - * x = static_cast < T > ( ( reinterpret_cast < Smi * > ( * p + + ) ) - > value ( ) ) ; <nl> + static inline Object * * ReadInt ( Object * * p , int * x ) { <nl> + * x = ( reinterpret_cast < Smi * > ( * p + + ) ) - > value ( ) ; <nl> return p ; <nl> } <nl> <nl> static inline Object * * ReadBool ( Object * * p , bool * x ) { <nl> } <nl> <nl> <nl> - template < class T > <nl> - static inline Object * * ReadObject ( Object * * p , Handle < T > * s ) { <nl> - * s = Handle < T > : : cast ( Handle < Object > ( * p + + ) ) ; <nl> + static inline Object * * ReadSymbol ( Object * * p , Handle < String > * s ) { <nl> + * s = Handle < String > ( reinterpret_cast < String * > ( * p + + ) ) ; <nl> return p ; <nl> } <nl> <nl> <nl> - template < class Allocator , class T > <nl> - static Object * * ReadList ( Object * * p , List < Handle < T > , Allocator > * list ) { <nl> + template < class Allocator > <nl> + static Object * * ReadList ( Object * * p , List < Handle < String > , Allocator > * list ) { <nl> ASSERT ( list - > is_empty ( ) ) ; <nl> int n ; <nl> p = ReadInt ( p , & n ) ; <nl> while ( n - - > 0 ) { <nl> - Handle < T > s ; <nl> - p = ReadObject ( p , & s ) ; <nl> + Handle < String > s ; <nl> + p = ReadSymbol ( p , & s ) ; <nl> list - > Add ( s ) ; <nl> } <nl> return p ; <nl> static Object * * ReadList ( Object * * p , <nl> while ( n - - > 0 ) { <nl> Handle < String > s ; <nl> int m ; <nl> - p = ReadObject ( p , & s ) ; <nl> + p = ReadSymbol ( p , & s ) ; <nl> p = ReadInt ( p , & m ) ; <nl> list - > Add ( s ) ; <nl> modes - > Add ( static_cast < VariableMode > ( m ) ) ; <nl> ScopeInfo < Allocator > : : ScopeInfo ( SerializedScopeInfo * data ) <nl> if ( data - > length ( ) > 0 ) { <nl> Object * * p0 = data - > data_start ( ) ; <nl> Object * * p = p0 ; <nl> - p = ReadObject ( p , & function_name_ ) ; <nl> + p = ReadSymbol ( p , & function_name_ ) ; <nl> p = ReadBool ( p , & calls_eval_ ) ; <nl> p = ReadBool ( p , & is_strict_mode_ ) ; <nl> - p = ReadInt ( p , & type_ ) ; <nl> p = ReadList < Allocator > ( p , & context_slots_ , & context_modes_ ) ; <nl> p = ReadList < Allocator > ( p , & parameters_ ) ; <nl> p = ReadList < Allocator > ( p , & stack_slots_ ) ; <nl> static inline Object * * WriteBool ( Object * * p , bool b ) { <nl> } <nl> <nl> <nl> - template < class T > <nl> - static inline Object * * WriteObject ( Object * * p , Handle < T > s ) { <nl> + static inline Object * * WriteSymbol ( Object * * p , Handle < String > s ) { <nl> * p + + = * s ; <nl> return p ; <nl> } <nl> <nl> <nl> - template < class Allocator , class T > <nl> - static Object * * WriteList ( Object * * p , List < Handle < T > , Allocator > * list ) { <nl> + template < class Allocator > <nl> + static Object * * WriteList ( Object * * p , List < Handle < String > , Allocator > * list ) { <nl> const int n = list - > length ( ) ; <nl> p = WriteInt ( p , n ) ; <nl> for ( int i = 0 ; i < n ; i + + ) { <nl> - p = WriteObject ( p , list - > at ( i ) ) ; <nl> + p = WriteSymbol ( p , list - > at ( i ) ) ; <nl> } <nl> return p ; <nl> } <nl> static Object * * WriteList ( Object * * p , <nl> const int n = list - > length ( ) ; <nl> p = WriteInt ( p , n ) ; <nl> for ( int i = 0 ; i < n ; i + + ) { <nl> - p = WriteObject ( p , list - > at ( i ) ) ; <nl> + p = WriteSymbol ( p , list - > at ( i ) ) ; <nl> p = WriteInt ( p , modes - > at ( i ) ) ; <nl> } <nl> return p ; <nl> static Object * * WriteList ( Object * * p , <nl> <nl> template < class Allocator > <nl> Handle < SerializedScopeInfo > ScopeInfo < Allocator > : : Serialize ( ) { <nl> - / / function name , calls eval , is_strict_mode , scope type , <nl> - / / length for 3 tables : <nl> - const int extra_slots = 1 + 1 + 1 + 1 + 3 ; <nl> + / / function name , calls eval , is_strict_mode , length for 3 tables : <nl> + const int extra_slots = 1 + 1 + 1 + 3 ; <nl> int length = extra_slots + <nl> context_slots_ . length ( ) * 2 + <nl> parameters_ . length ( ) + <nl> Handle < SerializedScopeInfo > ScopeInfo < Allocator > : : Serialize ( ) { <nl> <nl> Object * * p0 = data - > data_start ( ) ; <nl> Object * * p = p0 ; <nl> - p = WriteObject ( p , function_name_ ) ; <nl> + p = WriteSymbol ( p , function_name_ ) ; <nl> p = WriteBool ( p , calls_eval_ ) ; <nl> p = WriteBool ( p , is_strict_mode_ ) ; <nl> - p = WriteInt ( p , type_ ) ; <nl> p = WriteList ( p , & context_slots_ , & context_modes_ ) ; <nl> p = WriteList ( p , & parameters_ ) ; <nl> p = WriteList ( p , & stack_slots_ ) ; <nl> SerializedScopeInfo * SerializedScopeInfo : : Empty ( ) { <nl> <nl> Object * * SerializedScopeInfo : : ContextEntriesAddr ( ) { <nl> ASSERT ( length ( ) > 0 ) ; <nl> - / / + 4 for function name , calls eval , strict mode , scope type . <nl> - return data_start ( ) + 4 ; <nl> + / / + 3 for function name , calls eval , strict mode . <nl> + return data_start ( ) + 3 ; <nl> } <nl> <nl> <nl> bool SerializedScopeInfo : : IsStrictMode ( ) { <nl> } <nl> <nl> <nl> - ScopeType SerializedScopeInfo : : Type ( ) { <nl> - ASSERT ( length ( ) > 0 ) ; <nl> - / / + 3 for function name , calls eval , strict mode . <nl> - Object * * p = data_start ( ) + 3 ; <nl> - ScopeType type ; <nl> - p = ReadInt ( p , & type ) ; <nl> - return type ; <nl> - } <nl> - <nl> - <nl> int SerializedScopeInfo : : NumberOfStackSlots ( ) { <nl> if ( length ( ) > 0 ) { <nl> Object * * p = StackSlotEntriesAddr ( ) ; <nl> bool SerializedScopeInfo : : HasHeapAllocatedLocals ( ) { <nl> } <nl> <nl> <nl> - bool SerializedScopeInfo : : HasContext ( ) { <nl> - return HasHeapAllocatedLocals ( ) | | <nl> - Type ( ) = = WITH_SCOPE ; <nl> - } <nl> - <nl> - <nl> int SerializedScopeInfo : : StackSlotIndex ( String * name ) { <nl> ASSERT ( name - > IsSymbol ( ) ) ; <nl> if ( length ( ) > 0 ) { <nl> mmm a / src / scopeinfo . h <nl> ppp b / src / scopeinfo . h <nl> <nl> namespace v8 { <nl> namespace internal { <nl> <nl> - / / ScopeInfo represents information about different scopes of a source <nl> - / / program and the allocation of the scope ' s variables . Scope information <nl> - / / is stored in a compressed form in SerializedScopeInfo objects and is used <nl> + / / Scope information represents information about a functions ' s <nl> + / / scopes ( currently only one , because we don ' t do any inlining ) <nl> + / / and the allocation of the scope ' s variables . Scope information <nl> + / / is stored in a compressed form in FixedArray objects and is used <nl> / / at runtime ( stack dumps , deoptimization , etc . ) . <nl> + / / <nl> + / / Historical note : In other VMs built by this team , ScopeInfo was <nl> + / / usually called DebugInfo since the information was used ( among <nl> + / / other things ) for on - demand debugging ( Self , Smalltalk ) . However , <nl> + / / DebugInfo seems misleading , since this information is primarily used <nl> + / / in debugging - unrelated contexts . <nl> <nl> / / Forward defined as <nl> / / template < class Allocator = FreeStoreAllocationPolicy > class ScopeInfo ; <nl> class ScopeInfo BASE_EMBEDDED { <nl> Handle < String > LocalName ( int i ) const ; <nl> int NumberOfLocals ( ) const ; <nl> <nl> - ScopeType type ( ) const { return type_ ; } <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / Debugging support <nl> <nl> class ScopeInfo BASE_EMBEDDED { <nl> Handle < String > function_name_ ; <nl> bool calls_eval_ ; <nl> bool is_strict_mode_ ; <nl> - ScopeType type_ ; <nl> List < Handle < String > , Allocator > parameters_ ; <nl> List < Handle < String > , Allocator > stack_slots_ ; <nl> List < Handle < String > , Allocator > context_slots_ ; <nl> mmm a / src / scopes . cc <nl> ppp b / src / scopes . cc <nl> Variable * VariableMap : : Lookup ( Handle < String > name ) { <nl> <nl> <nl> / / Dummy constructor <nl> - Scope : : Scope ( ScopeType type ) <nl> + Scope : : Scope ( Type type ) <nl> : isolate_ ( Isolate : : Current ( ) ) , <nl> inner_scopes_ ( 0 ) , <nl> variables_ ( false ) , <nl> Scope : : Scope ( ScopeType type ) <nl> } <nl> <nl> <nl> - Scope : : Scope ( Scope * outer_scope , ScopeType type ) <nl> + Scope : : Scope ( Scope * outer_scope , Type type ) <nl> : isolate_ ( Isolate : : Current ( ) ) , <nl> inner_scopes_ ( 4 ) , <nl> variables_ ( ) , <nl> Scope : : Scope ( Scope * outer_scope , ScopeType type ) <nl> <nl> <nl> Scope : : Scope ( Scope * inner_scope , <nl> - ScopeType type , <nl> + Type type , <nl> Handle < SerializedScopeInfo > scope_info ) <nl> : isolate_ ( Isolate : : Current ( ) ) , <nl> inner_scopes_ ( 4 ) , <nl> Scope : : Scope ( Scope * inner_scope , Handle < String > catch_variable_name ) <nl> } <nl> <nl> <nl> - void Scope : : SetDefaults ( ScopeType type , <nl> + void Scope : : SetDefaults ( Type type , <nl> Scope * outer_scope , <nl> Handle < SerializedScopeInfo > scope_info ) { <nl> outer_scope_ = outer_scope ; <nl> void Scope : : SetDefaults ( ScopeType type , <nl> num_stack_slots_ = 0 ; <nl> num_heap_slots_ = 0 ; <nl> scope_info_ = scope_info ; <nl> - start_position_ = RelocInfo : : kNoPosition ; <nl> - end_position_ = RelocInfo : : kNoPosition ; <nl> } <nl> <nl> <nl> Handle < SerializedScopeInfo > Scope : : GetSerializedScopeInfo ( ) { <nl> } <nl> <nl> <nl> - void Scope : : GetNestedScopeChain ( <nl> - List < Handle < SerializedScopeInfo > > * chain , <nl> - int position ) { <nl> - chain - > Add ( Handle < SerializedScopeInfo > ( GetSerializedScopeInfo ( ) ) ) ; <nl> - <nl> - for ( int i = 0 ; i < inner_scopes_ . length ( ) ; i + + ) { <nl> - Scope * scope = inner_scopes_ [ i ] ; <nl> - int beg_pos = scope - > start_position ( ) ; <nl> - int end_pos = scope - > end_position ( ) ; <nl> - ASSERT ( beg_pos > = 0 & & end_pos > = 0 ) ; <nl> - if ( beg_pos < = position & & position < = end_pos ) { <nl> - scope - > GetNestedScopeChain ( chain , position ) ; <nl> - return ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - <nl> # ifdef DEBUG <nl> - static const char * Header ( ScopeType type ) { <nl> + static const char * Header ( Scope : : Type type ) { <nl> switch ( type ) { <nl> - case EVAL_SCOPE : return " eval " ; <nl> - case FUNCTION_SCOPE : return " function " ; <nl> - case GLOBAL_SCOPE : return " global " ; <nl> - case CATCH_SCOPE : return " catch " ; <nl> - case BLOCK_SCOPE : return " block " ; <nl> - case WITH_SCOPE : return " with " ; <nl> + case Scope : : EVAL_SCOPE : return " eval " ; <nl> + case Scope : : FUNCTION_SCOPE : return " function " ; <nl> + case Scope : : GLOBAL_SCOPE : return " global " ; <nl> + case Scope : : CATCH_SCOPE : return " catch " ; <nl> + case Scope : : BLOCK_SCOPE : return " block " ; <nl> + case Scope : : WITH_SCOPE : return " with " ; <nl> } <nl> UNREACHABLE ( ) ; <nl> return NULL ; <nl> mmm a / src / scopes . h <nl> ppp b / src / scopes . h <nl> class Scope : public ZoneObject { <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> / / Construction <nl> <nl> - Scope ( Scope * outer_scope , ScopeType type ) ; <nl> + enum Type { <nl> + EVAL_SCOPE , / / The top - level scope for an eval source . <nl> + FUNCTION_SCOPE , / / The top - level scope for a function . <nl> + GLOBAL_SCOPE , / / The top - level scope for a program or a top - level eval . <nl> + CATCH_SCOPE , / / The scope introduced by catch . <nl> + BLOCK_SCOPE , / / The scope introduced by a new block . <nl> + WITH_SCOPE / / The scope introduced by with . <nl> + } ; <nl> + <nl> + Scope ( Scope * outer_scope , Type type ) ; <nl> <nl> / / Compute top scope and allocate variables . For lazy compilation the top <nl> / / scope only contains the single lazily compiled function , so this <nl> class Scope : public ZoneObject { <nl> strict_mode_ = FLAG_strict_mode ; <nl> } <nl> <nl> - / / Position in the source where this scope begins and ends . <nl> - / / <nl> - / / * For the scope of a with statement <nl> - / / with ( obj ) stmt <nl> - / / start position : start position of first token of ' stmt ' <nl> - / / end position : end position of last token of ' stmt ' <nl> - / / * For the scope of a block <nl> - / / { stmts } <nl> - / / start position : start position of ' { ' <nl> - / / end position : end position of ' } ' <nl> - / / * For the scope of a function literal or decalaration <nl> - / / function fun ( a , b ) { stmts } <nl> - / / start position : start position of ' ( ' <nl> - / / end position : end position of ' } ' <nl> - / / * For the scope of a catch block <nl> - / / try { stms } catch ( e ) { stmts } <nl> - / / start position : start position of ' ( ' <nl> - / / end position : end position of ' ) ' <nl> - / / * For the scope of a for - statement <nl> - / / for ( let x . . . ) stmt <nl> - / / start position : start position of ' ( ' <nl> - / / end position : end position of last token of ' stmt ' <nl> - int start_position ( ) const { return start_position_ ; } <nl> - void set_start_position ( int statement_pos ) { <nl> - start_position_ = statement_pos ; <nl> - } <nl> - int end_position ( ) const { return end_position_ ; } <nl> - void set_end_position ( int statement_pos ) { <nl> - end_position_ = statement_pos ; <nl> - } <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> / / Predicates . <nl> <nl> class Scope : public ZoneObject { <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> / / Accessors . <nl> <nl> - / / The type of this scope . <nl> - ScopeType type ( ) const { return type_ ; } <nl> - <nl> / / The variable corresponding the ' this ' value . <nl> Variable * receiver ( ) { return receiver_ ; } <nl> <nl> class Scope : public ZoneObject { <nl> / / Declarations list . <nl> ZoneList < Declaration * > * declarations ( ) { return & decls_ ; } <nl> <nl> - / / Inner scope list . <nl> - ZoneList < Scope * > * inner_scopes ( ) { return & inner_scopes_ ; } <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> / / Variable allocation . <nl> class Scope : public ZoneObject { <nl> <nl> Handle < SerializedScopeInfo > GetSerializedScopeInfo ( ) ; <nl> <nl> - / / Get the chain of nested scopes within this scope for the source statement <nl> - / / position . The scopes will be added to the list from the outermost scope to <nl> - / / the innermost scope . Only nested block , catch or with scopes are tracked <nl> - / / and will be returned , but no inner function scopes . <nl> - void GetNestedScopeChain ( List < Handle < SerializedScopeInfo > > * chain , <nl> - int statement_position ) ; <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> / / Strict mode support . <nl> bool IsDeclared ( Handle < String > name ) { <nl> class Scope : public ZoneObject { <nl> protected : <nl> friend class ParserFactory ; <nl> <nl> - explicit Scope ( ScopeType type ) ; <nl> + explicit Scope ( Type type ) ; <nl> <nl> Isolate * const isolate_ ; <nl> <nl> class Scope : public ZoneObject { <nl> ZoneList < Scope * > inner_scopes_ ; / / the immediately enclosed inner scopes <nl> <nl> / / The scope type . <nl> - ScopeType type_ ; <nl> + Type type_ ; <nl> <nl> / / Debugging support . <nl> Handle < String > scope_name_ ; <nl> class Scope : public ZoneObject { <nl> bool scope_calls_eval_ ; <nl> / / This scope is a strict mode scope . <nl> bool strict_mode_ ; <nl> - / / Source positions . <nl> - int start_position_ ; <nl> - int end_position_ ; <nl> <nl> / / Computed via PropagateScopeInfo . <nl> bool outer_scope_calls_non_strict_eval_ ; <nl> class Scope : public ZoneObject { <nl> <nl> private : <nl> / / Construct a scope based on the scope info . <nl> - Scope ( Scope * inner_scope , <nl> - ScopeType type , <nl> - Handle < SerializedScopeInfo > scope_info ) ; <nl> + Scope ( Scope * inner_scope , Type type , Handle < SerializedScopeInfo > scope_info ) ; <nl> <nl> / / Construct a catch scope with a binding for the name . <nl> Scope ( Scope * inner_scope , Handle < String > catch_variable_name ) ; <nl> class Scope : public ZoneObject { <nl> } <nl> } <nl> <nl> - void SetDefaults ( ScopeType type , <nl> + void SetDefaults ( Type type , <nl> Scope * outer_scope , <nl> Handle < SerializedScopeInfo > scope_info ) ; <nl> } ; <nl> mmm a / src / v8globals . h <nl> ppp b / src / v8globals . h <nl> enum CallKind { <nl> } ; <nl> <nl> <nl> - enum ScopeType { <nl> - EVAL_SCOPE , / / The top - level scope for an eval source . <nl> - FUNCTION_SCOPE , / / The top - level scope for a function . <nl> - GLOBAL_SCOPE , / / The top - level scope for a program or a top - level eval . <nl> - CATCH_SCOPE , / / The scope introduced by catch . <nl> - BLOCK_SCOPE , / / The scope introduced by a new block . <nl> - WITH_SCOPE / / The scope introduced by with . <nl> - } ; <nl> - <nl> - <nl> static const uint32_t kHoleNanUpper32 = 0x7FFFFFFF ; <nl> static const uint32_t kHoleNanLower32 = 0xFFFFFFFF ; <nl> static const uint32_t kNaNOrInfinityLowerBoundUpper32 = 0x7FF00000 ; <nl> mmm a / test / cctest / test - parsing . cc <nl> ppp b / test / cctest / test - parsing . cc <nl> TEST ( RegExpScanning ) { <nl> TestScanRegExp ( " / = / " , " = " ) ; <nl> TestScanRegExp ( " / = ? / " , " = ? " ) ; <nl> } <nl> - <nl> - <nl> - TEST ( ScopePositions ) { <nl> - / / Test the parser for correctly setting the start and end positions <nl> - / / of a scope . We check the scope positions of exactly one scope <nl> - / / nested in the global scope of a program . ' inner source ' is the <nl> - / / source code that determines the part of the source belonging <nl> - / / to the nested scope . ' outer_prefix ' and ' outer_suffix ' are <nl> - / / parts of the source that belong to the global scope . <nl> - struct SourceData { <nl> - const char * outer_prefix ; <nl> - const char * inner_source ; <nl> - const char * outer_suffix ; <nl> - i : : ScopeType scope_type ; <nl> - } ; <nl> - <nl> - const SourceData source_data [ ] = { <nl> - { " with ( { } ) " , " { block ; } " , " more ; " , i : : WITH_SCOPE } , <nl> - { " with ( { } ) " , " { block ; } " , " ; more ; " , i : : WITH_SCOPE } , <nl> - { " with ( { } ) " , " { \ n " <nl> - " block ; \ n " <nl> - " } " , " \ n " <nl> - " more ; " , i : : WITH_SCOPE } , <nl> - { " with ( { } ) " , " statement ; " , " more ; " , i : : WITH_SCOPE } , <nl> - { " with ( { } ) " , " statement " , " \ n " <nl> - " more ; " , i : : WITH_SCOPE } , <nl> - { " with ( { } ) \ n " <nl> - " " , " statement ; " , " \ n " <nl> - " more ; " , i : : WITH_SCOPE } , <nl> - { " try { } catch " , " ( e ) { block ; } " , " more ; " , i : : CATCH_SCOPE } , <nl> - { " try { } catch " , " ( e ) { block ; } " , " ; more ; " , i : : CATCH_SCOPE } , <nl> - { " try { } catch " , " ( e ) { \ n " <nl> - " block ; \ n " <nl> - " } " , " \ n " <nl> - " more ; " , i : : CATCH_SCOPE } , <nl> - { " try { } catch " , " ( e ) { block ; } " , " finally { block ; } more ; " , <nl> - i : : CATCH_SCOPE } , <nl> - { " start ; \ n " <nl> - " " , " { let block ; } " , " more ; " , i : : BLOCK_SCOPE } , <nl> - { " start ; \ n " <nl> - " " , " { let block ; } " , " ; more ; " , i : : BLOCK_SCOPE } , <nl> - { " start ; \ n " <nl> - " " , " { \ n " <nl> - " let block ; \ n " <nl> - " } " , " \ n " <nl> - " more ; " , i : : BLOCK_SCOPE } , <nl> - { " start ; \ n " <nl> - " function fun " , " ( a , b ) { infunction ; } " , " more ; " , <nl> - i : : FUNCTION_SCOPE } , <nl> - { " start ; \ n " <nl> - " function fun " , " ( a , b ) { \ n " <nl> - " infunction ; \ n " <nl> - " } " , " \ n " <nl> - " more ; " , i : : FUNCTION_SCOPE } , <nl> - { " ( function fun " , " ( a , b ) { infunction ; } " , " ) ( ) ; " , <nl> - i : : FUNCTION_SCOPE } , <nl> - { " for " , " ( let x = 1 ; x < 10 ; + + x ) { block ; } " , " more ; " , <nl> - i : : BLOCK_SCOPE } , <nl> - { " for " , " ( let x = 1 ; x < 10 ; + + x ) { block ; } " , " ; more ; " , <nl> - i : : BLOCK_SCOPE } , <nl> - { " for " , " ( let x = 1 ; x < 10 ; + + x ) { \ n " <nl> - " block ; \ n " <nl> - " } " , " \ n " <nl> - " more ; " , i : : BLOCK_SCOPE } , <nl> - { " for " , " ( let x = 1 ; x < 10 ; + + x ) statement ; " , " more ; " , <nl> - i : : BLOCK_SCOPE } , <nl> - { " for " , " ( let x = 1 ; x < 10 ; + + x ) statement " , " \ n " <nl> - " more ; " , i : : BLOCK_SCOPE } , <nl> - { " for " , " ( let x = 1 ; x < 10 ; + + x ) \ n " <nl> - " statement ; " , " \ n " <nl> - " more ; " , i : : BLOCK_SCOPE } , <nl> - { " for " , " ( let x in { } ) { block ; } " , " more ; " , i : : BLOCK_SCOPE } , <nl> - { " for " , " ( let x in { } ) { block ; } " , " ; more ; " , i : : BLOCK_SCOPE } , <nl> - { " for " , " ( let x in { } ) { \ n " <nl> - " block ; \ n " <nl> - " } " , " \ n " <nl> - " more ; " , i : : BLOCK_SCOPE } , <nl> - { " for " , " ( let x in { } ) statement ; " , " more ; " , i : : BLOCK_SCOPE } , <nl> - { " for " , " ( let x in { } ) statement " , " \ n " <nl> - " more ; " , i : : BLOCK_SCOPE } , <nl> - { " for " , " ( let x in { } ) \ n " <nl> - " statement ; " , " \ n " <nl> - " more ; " , i : : BLOCK_SCOPE } , <nl> - { NULL , NULL , NULL , i : : EVAL_SCOPE } <nl> - } ; <nl> - <nl> - v8 : : HandleScope handles ; <nl> - v8 : : Persistent < v8 : : Context > context = v8 : : Context : : New ( ) ; <nl> - v8 : : Context : : Scope context_scope ( context ) ; <nl> - <nl> - int marker ; <nl> - i : : Isolate : : Current ( ) - > stack_guard ( ) - > SetStackLimit ( <nl> - reinterpret_cast < uintptr_t > ( & marker ) - 128 * 1024 ) ; <nl> - <nl> - for ( int i = 0 ; source_data [ i ] . outer_prefix ; i + + ) { <nl> - int kPrefixLen = i : : StrLength ( source_data [ i ] . outer_prefix ) ; <nl> - int kInnerLen = i : : StrLength ( source_data [ i ] . inner_source ) ; <nl> - int kSuffixLen = i : : StrLength ( source_data [ i ] . outer_suffix ) ; <nl> - int kProgramSize = kPrefixLen + kInnerLen + kSuffixLen ; <nl> - i : : Vector < char > program = i : : Vector < char > : : New ( kProgramSize + 1 ) ; <nl> - int length ; <nl> - length = i : : OS : : SNPrintF ( program , " % s % s % s " , <nl> - source_data [ i ] . outer_prefix , <nl> - source_data [ i ] . inner_source , <nl> - source_data [ i ] . outer_suffix ) ; <nl> - ASSERT ( length = = kProgramSize ) ; <nl> - <nl> - / / Parse program source . <nl> - i : : Handle < i : : String > source ( <nl> - FACTORY - > NewStringFromAscii ( i : : CStrVector ( program . start ( ) ) ) ) ; <nl> - i : : Handle < i : : Script > script = FACTORY - > NewScript ( source ) ; <nl> - i : : Parser parser ( script , false , NULL , NULL ) ; <nl> - parser . SetHarmonyScoping ( true ) ; <nl> - i : : FunctionLiteral * function = <nl> - parser . ParseProgram ( source , true , i : : kNonStrictMode ) ; <nl> - ASSERT ( function ! = NULL ) ; <nl> - <nl> - / / Check scope types and positions . <nl> - i : : Scope * scope = function - > scope ( ) ; <nl> - CHECK ( scope - > is_global_scope ( ) ) ; <nl> - CHECK_EQ ( scope - > start_position ( ) , 0 ) ; <nl> - CHECK_EQ ( scope - > end_position ( ) , kProgramSize ) ; <nl> - CHECK_EQ ( scope - > inner_scopes ( ) - > length ( ) , 1 ) ; <nl> - <nl> - i : : Scope * inner_scope = scope - > inner_scopes ( ) - > at ( 0 ) ; <nl> - CHECK_EQ ( inner_scope - > type ( ) , source_data [ i ] . scope_type ) ; <nl> - CHECK_EQ ( inner_scope - > start_position ( ) , kPrefixLen ) ; <nl> - / / The end position of a token is one position after the last <nl> - / / character belonging to that token . <nl> - CHECK_EQ ( inner_scope - > end_position ( ) , kPrefixLen + kInnerLen ) ; <nl> - } <nl> - } <nl> mmm a / test / mjsunit / debug - scopes . js <nl> ppp b / test / mjsunit / debug - scopes . js <nl> <nl> - / / Copyright 2011 the V8 project authors . All rights reserved . <nl> + / / Copyright 2008 the V8 project authors . All rights reserved . <nl> / / Redistribution and use in source and binary forms , with or without <nl> / / modification , are permitted provided that the following conditions are <nl> / / met : <nl> <nl> / / ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> / / OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> <nl> - / / Flags : - - expose - debug - as debug - - allow - natives - syntax <nl> + / / Flags : - - expose - debug - as debug <nl> / / The functions used for testing backtraces . They are at the top to make the <nl> / / testing of source line / column easier . <nl> <nl> with ( with_object ) { <nl> EndTest ( ) ; <nl> <nl> <nl> - / / With block in function that is marked for optimization while being executed . <nl> - BeginTest ( " With 7 " ) ; <nl> - <nl> - function with_7 ( ) { <nl> - with ( { } ) { <nl> - % OptimizeFunctionOnNextCall ( with_7 ) ; <nl> - debugger ; <nl> - } <nl> - } <nl> - <nl> - listener_delegate = function ( exec_state ) { <nl> - CheckScopeChain ( [ debug . ScopeType . With , <nl> - debug . ScopeType . Local , <nl> - debug . ScopeType . Global ] , exec_state ) ; <nl> - CheckScopeContent ( { } , 0 , exec_state ) ; <nl> - } ; <nl> - with_7 ( ) ; <nl> - EndTest ( ) ; <nl> - <nl> - <nl> / / Simple closure formed by returning an inner function referering the outer <nl> / / functions arguments . <nl> BeginTest ( " Closure 1 " ) ; <nl> try { <nl> EndTest ( ) ; <nl> <nl> <nl> - / / Catch block in function that is marked for optimization while being executed . <nl> - BeginTest ( " Catch block 7 " ) ; <nl> - function catch_block_7 ( ) { <nl> - % OptimizeFunctionOnNextCall ( catch_block_7 ) ; <nl> - try { <nl> - throw ' Exception ' ; <nl> - } catch ( e ) { <nl> - debugger ; <nl> - } <nl> - } ; <nl> - <nl> - <nl> - listener_delegate = function ( exec_state ) { <nl> - CheckScopeChain ( [ debug . ScopeType . Catch , <nl> - debug . ScopeType . Local , <nl> - debug . ScopeType . Global ] , exec_state ) ; <nl> - CheckScopeContent ( { e : ' Exception ' } , 0 , exec_state ) ; <nl> - } ; <nl> - catch_block_7 ( ) ; <nl> - EndTest ( ) ; <nl> - <nl> - <nl> assertEquals ( begin_test_count , break_count , <nl> ' one or more tests did not enter the debugger ' ) ; <nl> assertEquals ( begin_test_count , end_test_count , <nl>
|
Revert 9673 , 9674 and 9675 because of failing webkit tests .
|
v8/v8
|
17cc6d313f1bc041d4199b02655959ecdd1abfe5
|
2011-10-19T12:15:02Z
|
mmm a / tensorflow / c / eager / BUILD <nl> ppp b / tensorflow / c / eager / BUILD <nl> tf_cuda_library ( <nl> " / / conditions : default " : [ ] , <nl> } ) + [ <nl> " / / tensorflow / core / common_runtime / eager : eager_operation " , <nl> + " / / tensorflow / core / distributed_runtime / eager : eager_client " , <nl> + " / / tensorflow / core / distributed_runtime / rpc / eager : grpc_eager_client " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : grpc_channel " , <nl> + " / / tensorflow / core / distributed_runtime / rpc / eager : eager_grpc_server_lib " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : grpc_server_lib " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : grpc_worker_cache " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : grpc_worker_service " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : rpc_rendezvous_mgr " , <nl> + " / / tensorflow / core / distributed_runtime : remote_device " , <nl> + " / / tensorflow / core / distributed_runtime : server_lib " , <nl> + " / / tensorflow / core / distributed_runtime : worker_env " , <nl> " / / tensorflow / core : gpu_runtime " , <nl> ] , <nl> ) <nl> tf_cuda_library ( <nl> " / / tensorflow / core / common_runtime / eager : eager_operation " , <nl> " / / tensorflow / core / common_runtime / eager : kernel_and_device " , <nl> " / / tensorflow / core / common_runtime / eager : tensor_handle " , <nl> + " / / tensorflow / core / distributed_runtime : remote_device " , <nl> + " / / tensorflow / core / distributed_runtime : server_lib " , <nl> + " / / tensorflow / core / distributed_runtime : worker_env " , <nl> + " / / tensorflow / core / distributed_runtime / eager : eager_client " , <nl> + " / / tensorflow / core / distributed_runtime / eager : remote_tensor_handle " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : grpc_channel " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : grpc_worker_cache " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : grpc_worker_service " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : rpc_rendezvous_mgr " , <nl> + " / / tensorflow / core / distributed_runtime / rpc / eager : eager_grpc_server_lib " , <nl> + " / / tensorflow / core / distributed_runtime / rpc / eager : grpc_eager_client " , <nl> ] , <nl> ) <nl> <nl> tf_cuda_cc_test ( <nl> " / / tensorflow / core : protos_all_cc " , <nl> " / / tensorflow / core : test " , <nl> " / / tensorflow / core : test_main " , <nl> + " / / tensorflow / core / distributed_runtime / rpc / eager : eager_grpc_server_lib " , <nl> ] , <nl> ) <nl> <nl> mmm a / tensorflow / c / eager / c_api . cc <nl> ppp b / tensorflow / c / eager / c_api . cc <nl> limitations under the License . <nl> # include " tensorflow / core / common_runtime / eager / execute . h " <nl> # include " tensorflow / core / common_runtime / function . h " <nl> # include " tensorflow / core / common_runtime / rendezvous_mgr . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / eager / eager_grpc_server_lib . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / eager / grpc_eager_client . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / grpc_channel . h " <nl> + # include " tensorflow / core / distributed_runtime / server_lib . h " <nl> + # include " tensorflow / core / distributed_runtime / worker_env . h " <nl> # include " tensorflow / core / framework / node_def_util . h " <nl> # include " tensorflow / core / framework / rendezvous . h " <nl> # include " tensorflow / core / framework / tensor_shape . pb . h " <nl> # include " tensorflow / core / framework / types . h " <nl> # include " tensorflow / core / lib / core / refcount . h " <nl> + # include " tensorflow / core / lib / gtl / cleanup . h " <nl> # include " tensorflow / core / lib / gtl / flatmap . h " <nl> # include " tensorflow / core / lib / gtl / map_util . h " <nl> # include " tensorflow / core / lib / gtl / stl_util . h " <nl> string DeviceName ( const tensorflow : : Device * d ) { <nl> std : : atomic_int_fast64_t func_id_generator ( 0 ) ; <nl> # endif / / TENSORFLOW_EAGER_USE_XLA <nl> <nl> + tensorflow : : Status GetAllRemoteDevices ( <nl> + const std : : vector < string > & remote_workers , <nl> + tensorflow : : WorkerCacheInterface * worker_cache , <nl> + std : : unique_ptr < tensorflow : : DeviceMgr > * device_mgr ) { <nl> + std : : vector < tensorflow : : Device * > remote_devices ; <nl> + tensorflow : : Status status ; <nl> + / / TODO ( nareshmodi ) do this in parallel instead of serially . <nl> + for ( const string & remote_worker : remote_workers ) { <nl> + tensorflow : : Notification n ; <nl> + tensorflow : : NewRemoteDevices ( <nl> + tensorflow : : Env : : Default ( ) , worker_cache , remote_worker , <nl> + [ & status , & n , & remote_devices ] ( <nl> + const tensorflow : : Status & s , <nl> + std : : vector < tensorflow : : Device * > * devices ) { <nl> + status = s ; <nl> + if ( s . ok ( ) ) { <nl> + for ( tensorflow : : Device * d : * devices ) { <nl> + remote_devices . push_back ( d ) ; <nl> + } <nl> + } <nl> + n . Notify ( ) ; <nl> + } ) ; <nl> + n . WaitForNotification ( ) ; <nl> + } <nl> + std : : unique_ptr < tensorflow : : DeviceMgr > remote_device_mgr ( <nl> + new tensorflow : : DeviceMgr ( remote_devices ) ) ; <nl> + <nl> + TF_RETURN_IF_ERROR ( status ) ; <nl> + <nl> + * device_mgr = std : : move ( remote_device_mgr ) ; <nl> + return tensorflow : : Status : : OK ( ) ; <nl> + } <nl> + <nl> + tensorflow : : Status CreateRemoteContexts ( <nl> + const std : : vector < string > & remote_workers , <nl> + tensorflow : : eager : : EagerClientCache * remote_eager_workers , bool async , <nl> + tensorflow : : gtl : : FlatMap < string , tensorflow : : uint64 > * remote_contexts ) { <nl> + for ( int i = 0 ; i < remote_workers . size ( ) ; i + + ) { <nl> + const string & remote_worker = remote_workers [ i ] ; <nl> + <nl> + tensorflow : : eager : : CreateContextRequest request ; <nl> + tensorflow : : eager : : CreateContextResponse response ; <nl> + tensorflow : : DeviceNameUtils : : ParsedName parsed_name ; <nl> + if ( ! tensorflow : : DeviceNameUtils : : ParseFullName ( remote_worker , <nl> + & parsed_name ) ) { <nl> + return tensorflow : : errors : : InvalidArgument ( <nl> + " Unable to parse " , remote_worker , " as a device name " ) ; <nl> + } <nl> + request . mutable_server_def ( ) - > set_job_name ( parsed_name . job ) ; <nl> + request . mutable_server_def ( ) - > set_task_index ( parsed_name . task ) ; <nl> + request . set_async ( async ) ; <nl> + auto * eager_client = remote_eager_workers - > GetClient ( remote_worker ) ; <nl> + if ( eager_client = = nullptr ) { <nl> + return tensorflow : : errors : : Internal ( <nl> + " Cannot find a client for the given target : " , remote_worker ) ; <nl> + } <nl> + tensorflow : : Notification n ; <nl> + tensorflow : : Status status ; <nl> + / / TODO ( nareshmodi ) do this in parallel instead of serially . <nl> + eager_client - > CreateContextAsync ( <nl> + & request , & response , [ & status , & n ] ( const tensorflow : : Status & s ) { <nl> + status = s ; <nl> + n . Notify ( ) ; <nl> + } ) ; <nl> + n . WaitForNotification ( ) ; <nl> + TF_RETURN_IF_ERROR ( status ) ; <nl> + <nl> + remote_contexts - > emplace ( remote_worker , response . context_id ( ) ) ; <nl> + } <nl> + return tensorflow : : Status : : OK ( ) ; <nl> + } <nl> + <nl> + tensorflow : : Status NewRemoteAwareTFE_Context ( const TFE_ContextOptions * opts , <nl> + TFE_Context * * ctx ) { <nl> + string worker_name = tensorflow : : strings : : StrCat ( <nl> + " / job : " , opts - > server_def . job_name ( ) , <nl> + " / replica : 0 / task : " , opts - > server_def . task_index ( ) ) ; <nl> + std : : unique_ptr < tensorflow : : eager : : EagerGrpcServer > server ; <nl> + TF_RETURN_IF_ERROR ( <nl> + tensorflow : : eager : : EagerGrpcServer : : Create ( opts - > server_def , & server ) ) ; <nl> + <nl> + TF_RETURN_IF_ERROR ( server - > Start ( ) ) ; <nl> + <nl> + std : : vector < string > remote_workers ; <nl> + server - > master_env ( ) - > worker_cache - > ListWorkers ( & remote_workers ) ; <nl> + remote_workers . erase ( <nl> + std : : remove ( remote_workers . begin ( ) , remote_workers . end ( ) , worker_name ) , <nl> + remote_workers . end ( ) ) ; <nl> + <nl> + std : : unique_ptr < tensorflow : : DeviceMgr > remote_device_mgr ; <nl> + TF_RETURN_IF_ERROR ( GetAllRemoteDevices ( <nl> + remote_workers , server - > master_env ( ) - > worker_cache , & remote_device_mgr ) ) ; <nl> + <nl> + std : : shared_ptr < tensorflow : : GrpcChannelCache > channel_cache = <nl> + server - > channel_cache ( ) ; <nl> + std : : unique_ptr < tensorflow : : eager : : EagerClientCache > remote_eager_workers ( <nl> + tensorflow : : eager : : NewGrpcEagerClientCache ( channel_cache ) ) ; <nl> + <nl> + / / Initialize remote eager workers . <nl> + tensorflow : : gtl : : FlatMap < string , tensorflow : : uint64 > remote_contexts ; <nl> + TF_RETURN_IF_ERROR ( CreateRemoteContexts ( remote_workers , <nl> + remote_eager_workers . get ( ) , <nl> + opts - > async , & remote_contexts ) ) ; <nl> + <nl> + tensorflow : : RemoteRendezvous * r = <nl> + server - > worker_env ( ) - > rendezvous_mgr - > Find ( 0 ) ; <nl> + <nl> + auto * device_mgr = server - > worker_env ( ) - > device_mgr ; <nl> + * ctx = new TFE_Context ( opts - > session_options . options , opts - > policy , <nl> + opts - > async , device_mgr , r , std : : move ( server ) , <nl> + std : : move ( remote_eager_workers ) , <nl> + std : : move ( remote_device_mgr ) , remote_contexts ) ; <nl> + <nl> + return tensorflow : : Status : : OK ( ) ; <nl> + } <nl> } / / namespace <nl> <nl> extern " C " { <nl> void TFE_ContextOptionsSetDevicePlacementPolicy ( <nl> options - > policy = policy ; <nl> } <nl> <nl> + TF_CAPI_EXPORT extern void TFE_ContextOptionsSetServerDef ( <nl> + TFE_ContextOptions * options , const void * proto , size_t proto_len , <nl> + TF_Status * status ) { <nl> + if ( ! options - > server_def . ParseFromArray ( proto , proto_len ) ) { <nl> + status - > status = tensorflow : : errors : : InvalidArgument ( <nl> + " Invalid tensorflow . ServerDef protocol buffer " ) ; <nl> + } <nl> + } <nl> + <nl> TF_CAPI_EXPORT extern void TFE_ContextSetAsyncForThread ( TFE_Context * ctx , <nl> unsigned char async , <nl> TF_Status * status ) { <nl> TF_CAPI_EXPORT extern void TFE_ContextSetAsyncForThread ( TFE_Context * ctx , <nl> void TFE_DeleteContextOptions ( TFE_ContextOptions * options ) { delete options ; } <nl> <nl> TFE_Context * TFE_NewContext ( const TFE_ContextOptions * opts , TF_Status * status ) { <nl> + if ( ! opts - > server_def . job_name ( ) . empty ( ) ) { <nl> + TFE_Context * ctx = nullptr ; <nl> + status - > status = NewRemoteAwareTFE_Context ( opts , & ctx ) ; <nl> + return ctx ; <nl> + } <nl> + <nl> std : : vector < tensorflow : : Device * > devices ; <nl> status - > status = tensorflow : : DeviceFactory : : AddDevices ( <nl> opts - > session_options . options , " / job : localhost / replica : 0 / task : 0 " , <nl> & devices ) ; <nl> - if ( ! status - > status . ok ( ) ) { <nl> - return nullptr ; <nl> - } <nl> + if ( ! status - > status . ok ( ) ) return nullptr ; <nl> std : : unique_ptr < tensorflow : : DeviceMgr > device_mgr ( <nl> new tensorflow : : DeviceMgr ( devices ) ) ; <nl> + <nl> tensorflow : : Rendezvous * r = <nl> new tensorflow : : IntraProcessRendezvous ( device_mgr . get ( ) ) ; <nl> + <nl> return new TFE_Context ( opts - > session_options . options , opts - > policy , <nl> opts - > async , std : : move ( device_mgr ) , r ) ; <nl> } <nl> void TFE_DeleteContext ( TFE_Context * ctx , TF_Status * status ) { delete ctx ; } <nl> <nl> TF_DeviceList * TFE_ContextListDevices ( TFE_Context * ctx , TF_Status * status ) { <nl> TF_DeviceList * list = new TF_DeviceList ; <nl> - ctx - > context . device_mgr ( ) - > ListDeviceAttributes ( & list - > response ) ; <nl> + ctx - > context . local_device_mgr ( ) - > ListDeviceAttributes ( & list - > response ) ; <nl> + if ( ctx - > context . remote_device_mgr ( ) ) { <nl> + ctx - > context . remote_device_mgr ( ) - > ListDeviceAttributes ( & list - > response ) ; <nl> + } <nl> return list ; <nl> } <nl> <nl> mmm a / tensorflow / c / eager / c_api . h <nl> ppp b / tensorflow / c / eager / c_api . h <nl> TF_CAPI_EXPORT extern void TFE_ContextOptionsSetAsync ( TFE_ContextOptions * , <nl> TF_CAPI_EXPORT extern void TFE_ContextOptionsSetDevicePlacementPolicy ( <nl> TFE_ContextOptions * , TFE_ContextDevicePlacementPolicy ) ; <nl> <nl> + / / A tensorflow . ServerDef specifies remote workers ( in addition to the current <nl> + / / workers name ) . Operations created on this context can then be executed on <nl> + / / any of these remote workers by setting an appropriate device . <nl> + / / <nl> + / / If the following is set , all servers identified by the <nl> + / / ServerDef must be up when the context is created . <nl> + TF_CAPI_EXPORT extern void TFE_ContextOptionsSetServerDef ( <nl> + TFE_ContextOptions * options , const void * proto , size_t proto_len , <nl> + TF_Status * status ) ; <nl> + <nl> / / Destroy an options object . <nl> TF_CAPI_EXPORT extern void TFE_DeleteContextOptions ( TFE_ContextOptions * ) ; <nl> <nl> mmm a / tensorflow / c / eager / c_api_internal . h <nl> ppp b / tensorflow / c / eager / c_api_internal . h <nl> limitations under the License . <nl> # include " tensorflow / core / common_runtime / eager / tensor_handle . h " <nl> # include " tensorflow / core / common_runtime / function . h " <nl> # include " tensorflow / core / common_runtime / rendezvous_mgr . h " <nl> + # include " tensorflow / core / distributed_runtime / eager / eager_client . h " <nl> + # include " tensorflow / core / distributed_runtime / remote_device . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / eager / eager_grpc_server_lib . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / grpc_worker_cache . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / grpc_worker_service . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / rpc_rendezvous_mgr . h " <nl> + # include " tensorflow / core / distributed_runtime / server_lib . h " <nl> + # include " tensorflow / core / distributed_runtime / worker_env . h " <nl> # include " tensorflow / core / framework / rendezvous . h " <nl> # include " tensorflow / core / lib / core / stringpiece . h " <nl> # include " tensorflow / core / lib / gtl / inlined_vector . h " <nl> struct TFE_ContextOptions { <nl> / / true if async execution is enabled . <nl> bool async = false ; <nl> TFE_ContextDevicePlacementPolicy policy { TFE_DEVICE_PLACEMENT_SILENT } ; <nl> + tensorflow : : ServerDef server_def ; <nl> } ; <nl> <nl> struct TFE_Context { <nl> struct TFE_Context { <nl> default_policy ) , <nl> async , std : : move ( device_mgr ) , rendezvous ) { } <nl> <nl> + explicit TFE_Context ( <nl> + const tensorflow : : SessionOptions & opts , <nl> + TFE_ContextDevicePlacementPolicy default_policy , bool async , <nl> + tensorflow : : DeviceMgr * local_device_mgr , <nl> + tensorflow : : Rendezvous * rendezvous , <nl> + std : : unique_ptr < tensorflow : : GrpcServer > server , <nl> + std : : unique_ptr < tensorflow : : eager : : EagerClientCache > remote_eager_workers , <nl> + std : : unique_ptr < tensorflow : : DeviceMgr > remote_device_mgr , <nl> + const tensorflow : : gtl : : FlatMap < tensorflow : : string , tensorflow : : uint64 > & <nl> + remote_contexts ) <nl> + : context ( opts , <nl> + static_cast < tensorflow : : ContextDevicePlacementPolicy > ( <nl> + default_policy ) , <nl> + async , local_device_mgr , rendezvous , std : : move ( server ) , <nl> + std : : move ( remote_eager_workers ) , std : : move ( remote_device_mgr ) , <nl> + remote_contexts ) { } <nl> + <nl> tensorflow : : EagerContext context ; <nl> } ; <nl> <nl> mmm a / tensorflow / c / eager / c_api_test . cc <nl> ppp b / tensorflow / c / eager / c_api_test . cc <nl> limitations under the License . <nl> # include " tensorflow / c / eager / c_api . h " <nl> <nl> # include < string . h > <nl> + # include " tensorflow / core / distributed_runtime / rpc / eager / eager_grpc_server_lib . h " <nl> # include " tensorflow / core / framework / function . pb . h " <nl> # include " tensorflow / core / lib / strings / strcat . h " <nl> # include " tensorflow / core / platform / logging . h " <nl> limitations under the License . <nl> # include " tensorflow / core / platform / protobuf . h " <nl> # include " tensorflow / core / platform / test . h " <nl> # include " tensorflow / core / platform / test_benchmark . h " <nl> + # include " tensorflow / core / protobuf / cluster . pb . h " <nl> # include " tensorflow / core / protobuf / config . pb . h " <nl> + # include " tensorflow / core / protobuf / tensorflow_server . pb . h " <nl> <nl> using tensorflow : : string ; <nl> <nl> TEST ( CAPI , Context ) { <nl> TF_DeleteStatus ( status ) ; <nl> } <nl> <nl> + tensorflow : : ServerDef GetServerDef ( int num_tasks ) { <nl> + tensorflow : : ServerDef server_def ; <nl> + server_def . set_protocol ( " grpc " ) ; <nl> + server_def . set_job_name ( " localhost " ) ; <nl> + server_def . set_task_index ( 0 ) ; <nl> + tensorflow : : ClusterDef * cluster_def = server_def . mutable_cluster ( ) ; <nl> + tensorflow : : JobDef * job_def = cluster_def - > add_job ( ) ; <nl> + job_def - > set_name ( " localhost " ) ; <nl> + for ( int i = 0 ; i < num_tasks ; i + + ) { <nl> + int port = tensorflow : : testing : : PickUnusedPortOrDie ( ) ; <nl> + job_def - > mutable_tasks ( ) - > insert ( <nl> + { i , tensorflow : : strings : : StrCat ( " localhost : " , port ) } ) ; <nl> + } <nl> + return server_def ; <nl> + } <nl> + <nl> + void TestRemoteExecute ( bool async ) { <nl> + tensorflow : : ServerDef server_def = GetServerDef ( 2 ) ; <nl> + <nl> + / / This server def has the task index set to 0 . <nl> + string serialized = server_def . SerializeAsString ( ) ; <nl> + <nl> + server_def . set_task_index ( 1 ) ; <nl> + <nl> + std : : unique_ptr < tensorflow : : eager : : EagerGrpcServer > worker_server ; <nl> + ASSERT_TRUE ( <nl> + tensorflow : : eager : : EagerGrpcServer : : Create ( server_def , & worker_server ) <nl> + . ok ( ) ) ; <nl> + ASSERT_TRUE ( worker_server - > Start ( ) . ok ( ) ) ; <nl> + <nl> + TF_Status * status = TF_NewStatus ( ) ; <nl> + TFE_ContextOptions * opts = TFE_NewContextOptions ( ) ; <nl> + TFE_ContextOptionsSetServerDef ( opts , serialized . data ( ) , serialized . size ( ) , <nl> + status ) ; <nl> + TFE_ContextOptionsSetAsync ( opts , static_cast < unsigned char > ( 1 ) ) ; <nl> + EXPECT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + TFE_Context * ctx = TFE_NewContext ( opts , status ) ; <nl> + EXPECT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + TFE_DeleteContextOptions ( opts ) ; <nl> + <nl> + TFE_TensorHandle * h0_task0 = TestMatrixTensorHandle ( ) ; <nl> + TFE_TensorHandle * h1_task0 = TestMatrixTensorHandle ( ) ; <nl> + const char remote_device_name [ ] = <nl> + " / job : localhost / replica : 0 / task : 1 / device : CPU : 0 " ; <nl> + auto * h0_task1 = <nl> + TFE_TensorHandleCopyToDevice ( h0_task0 , ctx , remote_device_name , status ) ; <nl> + ASSERT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + auto * h1_task1 = <nl> + TFE_TensorHandleCopyToDevice ( h1_task0 , ctx , remote_device_name , status ) ; <nl> + ASSERT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + <nl> + TFE_Op * matmul = MatMulOp ( ctx , h0_task1 , h1_task1 ) ; <nl> + TFE_OpSetDevice ( matmul , remote_device_name , status ) ; <nl> + EXPECT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + <nl> + TFE_TensorHandle * retvals [ 1 ] ; <nl> + int num_retvals = 1 ; <nl> + TFE_Execute ( matmul , & retvals [ 0 ] , & num_retvals , status ) ; <nl> + EXPECT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + <nl> + auto * retval_task0 = TFE_TensorHandleCopyToDevice ( <nl> + retvals [ 0 ] , ctx , " / job : localhost / replica : 0 / task : 0 / device : CPU : 0 " , status ) ; <nl> + ASSERT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + <nl> + TF_Tensor * t = TFE_TensorHandleResolve ( retval_task0 , status ) ; <nl> + ASSERT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + TFE_DeleteTensorHandle ( retval_task0 ) ; <nl> + float product [ 4 ] = { 0 } ; <nl> + EXPECT_EQ ( sizeof ( product ) , TF_TensorByteSize ( t ) ) ; <nl> + memcpy ( & product [ 0 ] , TF_TensorData ( t ) , TF_TensorByteSize ( t ) ) ; <nl> + TF_DeleteTensor ( t ) ; <nl> + EXPECT_EQ ( 7 , product [ 0 ] ) ; <nl> + EXPECT_EQ ( 10 , product [ 1 ] ) ; <nl> + EXPECT_EQ ( 15 , product [ 2 ] ) ; <nl> + EXPECT_EQ ( 22 , product [ 3 ] ) ; <nl> + <nl> + TFE_DeleteTensorHandle ( h0_task0 ) ; <nl> + TFE_DeleteTensorHandle ( h1_task0 ) ; <nl> + TFE_DeleteTensorHandle ( h0_task1 ) ; <nl> + TFE_DeleteTensorHandle ( h1_task1 ) ; <nl> + TFE_DeleteTensorHandle ( retvals [ 0 ] ) ; <nl> + <nl> + TFE_DeleteOp ( matmul ) ; <nl> + <nl> + TFE_ContextAsyncWait ( ctx , status ) ; <nl> + TFE_DeleteContext ( ctx , status ) ; <nl> + EXPECT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + <nl> + TF_DeleteStatus ( status ) ; <nl> + <nl> + / / TODO ( nareshmodi ) : Figure out how to correctly shut the server down . <nl> + worker_server . release ( ) ; <nl> + } <nl> + <nl> + TEST ( CAPI , RemoteExecute ) { TestRemoteExecute ( false ) ; } <nl> + TEST ( CAPI , RemoteExecuteAsync ) { TestRemoteExecute ( true ) ; } <nl> + <nl> TEST ( CAPI , TensorHandle ) { <nl> TFE_TensorHandle * h = TestMatrixTensorHandle ( ) ; <nl> EXPECT_EQ ( TF_FLOAT , TFE_TensorHandleDataType ( h ) ) ; <nl> mmm a / tensorflow / compiler / aot / tests / tfcompile_test . cc <nl> ppp b / tensorflow / compiler / aot / tests / tfcompile_test . cc <nl> TEST ( TFCompileTest , HloProfiling ) { <nl> auto header = HasSubstr ( " Execution profile for " ) ; <nl> auto total_cycles_profile_line = HasSubstr ( " [ total ] " ) ; <nl> auto dot_profile_line = HasSubstr ( <nl> - " % dot . 0 . 2 = f32 [ 2 , 2 ] { 1 , 0 } dot ( f32 [ 2 , 2 ] { 1 , 0 } % arg0 . 0 . 0 , f32 [ 2 , 2 ] { 1 , 0 } " <nl> + " % dot . 0 . 4 = f32 [ 2 , 2 ] { 1 , 0 } dot ( f32 [ 2 , 2 ] { 1 , 0 } % arg0 . 0 . 0 , f32 [ 2 , 2 ] { 1 , 0 } " <nl> " % arg1 . 0 . 1 ) " ) ; <nl> auto add_profile_line = HasSubstr ( <nl> - " % add . 0 . 5 = f32 [ 2 , 2 ] { 1 , 0 } add ( f32 [ 2 , 2 ] { 1 , 0 } % arg0 . 0 . 0 , f32 [ 2 , 2 ] { 1 , 0 } " <nl> + " % add . 0 . 6 = f32 [ 2 , 2 ] { 1 , 0 } add ( f32 [ 2 , 2 ] { 1 , 0 } % arg0 . 0 . 0 , f32 [ 2 , 2 ] { 1 , 0 } " <nl> " % arg1 . 0 . 1 ) " ) ; <nl> auto tuple_profile_line = HasSubstr ( <nl> " % tuple . 0 . 8 = ( f32 [ 2 , 2 ] { 1 , 0 } , f32 [ 2 , 2 ] { 1 , 0 } ) tuple ( f32 [ 2 , 2 ] { 1 , 0 } " <nl> - " % dot . 0 . 2 , f32 [ 2 , 2 ] { 1 , 0 } % add . 0 . 5 ) " ) ; <nl> + " % dot . 0 . 4 , f32 [ 2 , 2 ] { 1 , 0 } % add . 0 . 6 ) " ) ; <nl> + auto arg0_profile_line = HasSubstr ( " % arg0 . 0 . 0 = f32 [ 2 , 2 ] { 1 , 0 } parameter ( 0 ) " ) ; <nl> + auto arg1_profile_line = HasSubstr ( " % arg1 . 0 . 1 = f32 [ 2 , 2 ] { 1 , 0 } parameter ( 1 ) " ) ; <nl> <nl> EXPECT_THAT ( hlo_profile_lines , <nl> IsSupersetOf ( { header , total_cycles_profile_line , dot_profile_line , <nl> mmm a / tensorflow / compiler / jit / BUILD <nl> ppp b / tensorflow / compiler / jit / BUILD <nl> cc_library ( <nl> " / / tensorflow / core / kernels : no_op " , <nl> " / / tensorflow / core / kernels : sendrecv_ops " , <nl> " / / tensorflow / core / kernels : variable_ops " , <nl> - " @ com_google_absl / / absl / memory " , <nl> ] , <nl> ) <nl> <nl> cc_library ( <nl> " / / tensorflow / core : framework " , <nl> " / / tensorflow / core : lib " , <nl> " / / tensorflow / core : protos_all_cc " , <nl> - " @ com_google_absl / / absl / memory " , <nl> ] , <nl> alwayslink = 1 , <nl> ) <nl> tf_cc_test ( <nl> " / / tensorflow / core : test " , <nl> " / / tensorflow / core : test_main " , <nl> " / / tensorflow / core : testlib " , <nl> - " @ com_google_absl / / absl / memory " , <nl> ] , <nl> ) <nl> <nl> mmm a / tensorflow / compiler / jit / create_xla_launch_op . cc <nl> ppp b / tensorflow / compiler / jit / create_xla_launch_op . cc <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> # include " tensorflow / compiler / jit / create_xla_launch_op . h " <nl> <nl> - # include " absl / memory / memory . h " <nl> # include " tensorflow / compiler / jit / defs . h " <nl> # include " tensorflow / compiler / jit / kernels / xla_launch_op . h " <nl> # include " tensorflow / compiler / jit / mark_for_compilation_pass . h " <nl> limitations under the License . <nl> # include " tensorflow / core / common_runtime / function . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / lib / core / status . h " <nl> + # include " tensorflow / core / util / ptr_util . h " <nl> <nl> namespace tensorflow { <nl> namespace { <nl> Status CreateXlaLaunchOp ( FunctionLibraryRuntime * flr , const NodeDef & node_def , <nl> & fbody - > fdef . signature ( ) , flr , fbody - > arg_types , input_memory_types , <nl> fbody - > ret_types , output_memory_types , flr - > graph_def_version ( ) , & s ) ; <nl> <nl> - * kernel = absl : : make_unique < XlaLocalLaunchBase > ( <nl> - & construction , constant_arg_indices , resource_arg_indices , function ) ; <nl> + * kernel = MakeUnique < XlaLocalLaunchBase > ( & construction , constant_arg_indices , <nl> + resource_arg_indices , function ) ; <nl> return s ; <nl> } <nl> <nl> mmm a / tensorflow / compiler / jit / create_xla_launch_op_test . cc <nl> ppp b / tensorflow / compiler / jit / create_xla_launch_op_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / compiler / jit / create_xla_launch_op . h " <nl> <nl> - # include " absl / memory / memory . h " <nl> # include " tensorflow / core / common_runtime / device_factory . h " <nl> # include " tensorflow / core / common_runtime / function . h " <nl> # include " tensorflow / core / framework / function_testlib . h " <nl> limitations under the License . <nl> # include " tensorflow / core / platform / test . h " <nl> # include " tensorflow / core / public / session_options . h " <nl> # include " tensorflow / core / public / version . h " <nl> + # include " tensorflow / core / util / ptr_util . h " <nl> <nl> namespace tensorflow { <nl> <nl> class CreateXlaLaunchOpTest : public : : testing : : Test { <nl> for ( const auto & fdef : flib ) { <nl> * ( proto . add_function ( ) ) = fdef ; <nl> } <nl> - lib_def_ = absl : : make_unique < FunctionLibraryDefinition > ( <nl> - OpRegistry : : Global ( ) , proto ) ; <nl> + lib_def_ = <nl> + MakeUnique < FunctionLibraryDefinition > ( OpRegistry : : Global ( ) , proto ) ; <nl> OptimizerOptions opts ; <nl> - device_mgr_ = absl : : make_unique < DeviceMgr > ( devices_ ) ; <nl> - pflr_ = absl : : make_unique < ProcessFunctionLibraryRuntime > ( <nl> + device_mgr_ = MakeUnique < DeviceMgr > ( devices_ ) ; <nl> + pflr_ = MakeUnique < ProcessFunctionLibraryRuntime > ( <nl> device_mgr_ . get ( ) , Env : : Default ( ) , TF_GRAPH_DEF_VERSION , lib_def_ . get ( ) , <nl> opts , / * default_thread_pool = * / nullptr , / * cluster_flr = * / nullptr ) ; <nl> flr_ = pflr_ - > GetFLR ( " / job : localhost / replica : 0 / task : 0 / cpu : 0 " ) ; <nl> mmm a / tensorflow / compiler / jit / kernels / xla_launch_op . cc <nl> ppp b / tensorflow / compiler / jit / kernels / xla_launch_op . cc <nl> void XlaLocalLaunchBase : : Compute ( OpKernelContext * ctx ) { <nl> / / this is more obviously correct . ) <nl> core : : ScopedUnref cache_ref ( cache ) ; <nl> <nl> - const XlaDevice : : Metadata * metadata ; <nl> + const XlaDevice : : Metadata * metadata = nullptr ; <nl> Status s = XlaDevice : : GetMetadata ( ctx , & metadata ) ; <nl> bool allocate_xla_tensors = s . ok ( ) ; <nl> <nl> void XlaLocalLaunchBase : : Compute ( OpKernelContext * ctx ) { <nl> options . graph_def_version = ctx - > function_library ( ) - > graph_def_version ( ) ; <nl> options . allow_cpu_custom_calls = ( platform_id_ = = se : : host : : kHostPlatformId ) ; <nl> options . device_allocator = xla_allocator ; <nl> - / / TODO ( b / 77671268 ) : We don ' t set variable_representation_shape_fn here . This <nl> - / / is restricted to Variables , but we need something like this to apply to <nl> - / / normal Tensors too . <nl> + if ( metadata ) { <nl> + options . shape_representation_fn = metadata - > shape_representation_fn ( ) ; <nl> + } <nl> <nl> const XlaCompiler : : CompilationResult * kernel ; <nl> xla : : LocalExecutable * executable ; <nl> void XlaLocalLaunchBase : : Compute ( OpKernelContext * ctx ) { <nl> for ( int i : constants_ ) { <nl> constant_args . insert ( { i , ctx - > input ( i ) } ) ; <nl> } <nl> - OP_REQUIRES_OK ( ctx , cache - > Compile ( options , function_ , constant_args , <nl> - variables , ctx , & kernel , & executable , <nl> - / * compile_options = * / nullptr ) ) ; <nl> + XlaCompiler : : CompileOptions compile_options ; <nl> + compile_options . is_entry_computation = true ; <nl> + OP_REQUIRES_OK ( <nl> + ctx , cache - > Compile ( options , function_ , constant_args , variables , ctx , <nl> + & kernel , & executable , & compile_options ) ) ; <nl> <nl> VLOG ( 1 ) < < " Executing XLA Computation . . . " ; <nl> <nl> mmm a / tensorflow / compiler / jit / xla_compile_on_demand_op . cc <nl> ppp b / tensorflow / compiler / jit / xla_compile_on_demand_op . cc <nl> Status XlaCompileOnDemandOp : : Compile ( <nl> options . client = metadata . client ( ) ; <nl> options . flib_def = <nl> new FunctionLibraryDefinition ( OpRegistry : : Global ( ) , FunctionDefLibrary { } ) ; <nl> + options . shape_representation_fn = metadata . shape_representation_fn ( ) ; <nl> + <nl> + XlaCompiler : : CompileOptions compile_options ; <nl> + compile_options . is_entry_computation = true ; <nl> <nl> std : : map < int , OptionalTensor > variable_args = GetVariables ( ctx ) ; <nl> return cache - > CompileSingleOp ( options , constant_arguments , variable_args , ctx , <nl> - result , executable , <nl> - / * compile_options = * / nullptr ) ; <nl> + result , executable , & compile_options ) ; <nl> } <nl> <nl> void XlaCompileOnDemandOp : : Compute ( OpKernelContext * ctx ) { <nl> mmm a / tensorflow / compiler / jit / xla_cpu_device . cc <nl> ppp b / tensorflow / compiler / jit / xla_cpu_device . cc <nl> Status XlaCpuDeviceFactory : : CreateDevices ( const SessionOptions & options , <nl> ( void ) registrations ; <nl> <nl> std : : unique_ptr < XlaDevice > device ; <nl> - TF_RETURN_IF_ERROR ( XlaDevice : : Create ( " Host " , DEVICE_XLA_CPU , 0 , <nl> - DEVICE_CPU_XLA_JIT , options , name_prefix , <nl> - registration , <nl> - / * transfer_as_literal = * / false , & device ) ) ; <nl> + TF_RETURN_IF_ERROR ( <nl> + XlaDevice : : Create ( " Host " , DEVICE_XLA_CPU , 0 , DEVICE_CPU_XLA_JIT , options , <nl> + name_prefix , registration , <nl> + / * transfer_as_literal = * / false , <nl> + / * shape_representation_fn = * / { } , & device ) ) ; <nl> devices - > push_back ( device . release ( ) ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> mmm a / tensorflow / compiler / jit / xla_device . cc <nl> ppp b / tensorflow / compiler / jit / xla_device . cc <nl> limitations under the License . <nl> # include < stdlib . h > <nl> # include < unordered_set > <nl> <nl> - # include " absl / memory / memory . h " <nl> # include " tensorflow / compiler / jit / defs . h " <nl> # include " tensorflow / compiler / jit / xla_compile_on_demand_op . h " <nl> # include " tensorflow / compiler / jit / xla_device_context . h " <nl> limitations under the License . <nl> # include " tensorflow / core / public / session_options . h " <nl> # include " tensorflow / core / public / version . h " <nl> # include " tensorflow / core / util / device_name_utils . h " <nl> + # include " tensorflow / core / util / ptr_util . h " <nl> # include " tensorflow / core / util / stream_executor_util . h " <nl> <nl> namespace tensorflow { <nl> XlaDeviceAllocator * XlaDeviceAllocatorState : : GetOrCreateXlaDeviceAllocator ( <nl> const string & jit_device_name , const SessionOptions & options , <nl> const string & name_prefix , <nl> const XlaOpRegistry : : DeviceRegistration & registration , <nl> - bool transfer_as_literal , std : : unique_ptr < XlaDevice > * device ) { <nl> + bool transfer_as_literal , <nl> + const XlaCompiler : : ShapeRepresentationFn & shape_representation_fn , <nl> + std : : unique_ptr < XlaDevice > * device ) { <nl> VLOG ( 1 ) < < " XlaDevice : : Create " < < platform_name < < " " < < device_name < < " : " <nl> < < device_ordinal ; <nl> <nl> XlaDeviceAllocator * XlaDeviceAllocatorState : : GetOrCreateXlaDeviceAllocator ( <nl> DeviceType ( device_name ) , Bytes ( 16ULL < < 30 ) , DeviceLocality ( ) , <nl> strings : : StrCat ( " device : " , device_name , " device " ) ) ; <nl> <nl> - device - > reset ( new XlaDevice ( options , attrs , device_ordinal , <nl> - DeviceType ( jit_device_name ) , <nl> - platform . ValueOrDie ( ) , transfer_as_literal ) ) ; <nl> + device - > reset ( new XlaDevice ( <nl> + options , attrs , device_ordinal , DeviceType ( jit_device_name ) , <nl> + platform . ValueOrDie ( ) , transfer_as_literal , shape_representation_fn ) ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - XlaDevice : : Metadata : : Metadata ( int device_ordinal , se : : Platform * platform , <nl> - const DeviceType & device_type ) <nl> + XlaDevice : : Metadata : : Metadata ( <nl> + int device_ordinal , se : : Platform * platform , const DeviceType & device_type , <nl> + XlaCompiler : : ShapeRepresentationFn shape_representation_fn ) <nl> : device_ordinal_ ( device_ordinal ) , <nl> device_type_ ( device_type ) , <nl> - platform_ ( platform ) { } <nl> + platform_ ( platform ) , <nl> + shape_representation_fn_ ( std : : move ( shape_representation_fn ) ) { } <nl> <nl> int XlaDevice : : Metadata : : device_ordinal ( ) const { return device_ordinal_ ; } <nl> <nl> const DeviceType & XlaDevice : : Metadata : : jit_device_type ( ) const { <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - XlaDevice : : XlaDevice ( const SessionOptions & options , <nl> - const DeviceAttributes & attrs , int device_ordinal , <nl> - const DeviceType & jit_device_name , se : : Platform * platform , <nl> - bool transfer_as_literal ) <nl> + XlaDevice : : XlaDevice ( <nl> + const SessionOptions & options , const DeviceAttributes & attrs , <nl> + int device_ordinal , const DeviceType & jit_device_name , <nl> + se : : Platform * platform , bool transfer_as_literal , <nl> + const XlaCompiler : : ShapeRepresentationFn & shape_representation_fn ) <nl> : LocalDevice ( options , attrs ) , <nl> - xla_metadata_ ( device_ordinal , platform , jit_device_name ) , <nl> + xla_metadata_ ( device_ordinal , platform , jit_device_name , <nl> + shape_representation_fn ) , <nl> device_ordinal_ ( device_ordinal ) , <nl> jit_device_name_ ( jit_device_name ) , <nl> xla_allocator_ ( nullptr ) , <nl> platform_ ( platform ) , <nl> - transfer_as_literal_ ( transfer_as_literal ) { <nl> + transfer_as_literal_ ( transfer_as_literal ) , <nl> + shape_representation_fn_ ( shape_representation_fn ) { <nl> VLOG ( 1 ) < < " Created XLA device " < < jit_device_name ; <nl> } <nl> <nl> Status XlaDevice : : CreateAndSetGpuDeviceInfo ( ) { <nl> GetAllocator ( { } ) ; <nl> / / XlaDevice owns both gpu_device_info_ and <nl> / / gpu_device_info_ - > default_context . <nl> - gpu_device_info_ = absl : : make_unique < GpuDeviceInfo > ( ) ; <nl> + gpu_device_info_ = MakeUnique < GpuDeviceInfo > ( ) ; <nl> gpu_device_info_ - > stream = stream ; <nl> - gpu_device_info_ - > default_context = <nl> - new XlaDeviceContext ( stream , client ( ) , transfer_as_literal_ ) ; <nl> + gpu_device_info_ - > default_context = new XlaDeviceContext ( <nl> + stream , client ( ) , transfer_as_literal_ , shape_representation_fn_ ) ; <nl> set_tensorflow_gpu_device_info ( gpu_device_info_ . get ( ) ) ; <nl> } <nl> <nl> Status XlaDevice : : FillContextMap ( const Graph * graph , <nl> TF_ASSIGN_OR_RETURN ( se : : Stream * stream , GetStream ( ) ) ; <nl> / / Call GetAllocator for the side - effect of ensuring the allocator is created . <nl> GetAllocator ( { } ) ; <nl> - auto ctx = new XlaDeviceContext ( stream , client ( ) , transfer_as_literal_ ) ; <nl> + auto ctx = new XlaDeviceContext ( stream , client ( ) , transfer_as_literal_ , <nl> + shape_representation_fn_ ) ; <nl> for ( Node * n : graph - > nodes ( ) ) { <nl> VLOG ( 2 ) < < n - > id ( ) < < " : " < < n - > type_string ( ) < < " : " < < n - > name ( ) ; <nl> ctx - > Ref ( ) ; <nl> Status XlaDevice : : MakeTensorFromProto ( const TensorProto & tensor_proto , <nl> Tensor copy ( GetAllocator ( alloc_attrs ) , parsed . dtype ( ) , parsed . shape ( ) ) ; <nl> Notification n ; <nl> TF_ASSIGN_OR_RETURN ( se : : Stream * stream , GetStream ( ) ) ; <nl> - XlaTransferManager manager ( stream , client ( ) , transfer_as_literal_ ) ; <nl> + XlaTransferManager manager ( stream , client ( ) , transfer_as_literal_ , <nl> + shape_representation_fn_ ) ; <nl> manager . CopyCPUTensorToDevice ( & parsed , this , & copy , <nl> [ & n , & status ] ( const Status & s ) { <nl> status = s ; <nl> mmm a / tensorflow / compiler / jit / xla_device . h <nl> ppp b / tensorflow / compiler / jit / xla_device . h <nl> limitations under the License . <nl> / / runtime . <nl> / / <nl> / / Operators assigned to an XlaDevice are compiled into XLA computations . <nl> - / / Tensors on an XlaDevice are thin wrappers around XLA GlobalDataHandles ; state <nl> - / / is managed by XLA . <nl> + / / Tensors on an XlaDevice are thin wrappers around XLA ScopedShapedBuffers . <nl> / / <nl> / / XlaDevice is instantiated separately for each XLA backend ( e . g . , CPU or GPU ) , <nl> / / under different names ( e . g . , XLA_CPU or XLA_GPU ) . <nl> limitations under the License . <nl> # define TENSORFLOW_COMPILER_JIT_XLA_DEVICE_H_ <nl> <nl> # include " tensorflow / compiler / jit / xla_tensor . h " <nl> + # include " tensorflow / compiler / tf2xla / xla_compiler . h " <nl> # include " tensorflow / compiler / tf2xla / xla_op_registry . h " <nl> # include " tensorflow / compiler / xla / client / local_client . h " <nl> # include " tensorflow / core / common_runtime / device_factory . h " <nl> class XlaDevice : public LocalDevice { <nl> class Metadata { <nl> public : <nl> Metadata ( int device_ordinal , se : : Platform * platform , <nl> - const DeviceType & device_type ) ; <nl> + const DeviceType & device_type , <nl> + XlaCompiler : : ShapeRepresentationFn shape_representation_fn ) ; <nl> <nl> / / The index of the device on this host . <nl> int device_ordinal ( ) const ; <nl> class XlaDevice : public LocalDevice { <nl> se : : Platform * platform ( ) const ; <nl> xla : : LocalClient * client ( ) const ; <nl> const DeviceType & jit_device_type ( ) const ; <nl> + const XlaCompiler : : ShapeRepresentationFn & shape_representation_fn ( ) const { <nl> + return shape_representation_fn_ ; <nl> + } <nl> <nl> private : <nl> const int device_ordinal_ ; <nl> const DeviceType device_type_ ; <nl> se : : Platform * platform_ ; / / Not owned . <nl> + XlaCompiler : : ShapeRepresentationFn shape_representation_fn_ ; <nl> <nl> TF_DISALLOW_COPY_AND_ASSIGN ( Metadata ) ; <nl> } ; <nl> class XlaDevice : public LocalDevice { <nl> / / ' transfer_as_literal ' is true if device < - > host transfers must be done using <nl> / / XLA ' s TransferLiteral { To , From } Device interface . If false , we can use <nl> / / ThenMemcpy instead . <nl> - static Status Create ( const string & platform_name , const string & device_name , <nl> - int device_ordinal , const string & jit_device_name , <nl> - const SessionOptions & options , const string & name_prefix , <nl> - const XlaOpRegistry : : DeviceRegistration & registration , <nl> - bool transfer_as_literal , <nl> - std : : unique_ptr < XlaDevice > * device ) ; <nl> + static Status Create ( <nl> + const string & platform_name , const string & device_name , <nl> + int device_ordinal , const string & jit_device_name , <nl> + const SessionOptions & options , const string & name_prefix , <nl> + const XlaOpRegistry : : DeviceRegistration & registration , <nl> + bool transfer_as_literal , <nl> + const XlaCompiler : : ShapeRepresentationFn & shape_representation_fn , <nl> + std : : unique_ptr < XlaDevice > * device ) ; <nl> <nl> XlaDevice ( const SessionOptions & options , const DeviceAttributes & attrs , <nl> int device_ordinal , const DeviceType & jit_device_name , <nl> - se : : Platform * platform , bool transfer_as_literal ) ; <nl> + se : : Platform * platform , bool transfer_as_literal , <nl> + const XlaCompiler : : ShapeRepresentationFn & shape_representation_fn ) ; <nl> ~ XlaDevice ( ) override ; <nl> <nl> Allocator * GetAllocator ( AllocatorAttributes attr ) override ; <nl> class XlaDevice : public LocalDevice { <nl> / / The name of the device that is used to compile Ops for this XlaDevice . <nl> DeviceType jit_device_name_ ; <nl> / / Memory allocator associated with this device . <nl> - Allocator * xla_allocator_ ; / / Not owned . <nl> - se : : Platform * platform_ ; / / Not owned . <nl> + Allocator * xla_allocator_ ; / / Not owned . <nl> + se : : Platform * platform_ ; / / Not owned . <nl> / / Stream associated with this device . Operations enqueued on this <nl> / / stream are executed on the device . Operations include data <nl> / / copying back and forth between CPU and the device , and <nl> class XlaDevice : public LocalDevice { <nl> / / Must we use XLA ' s transfer manager for correct host < - > device transfers ? if <nl> / / false , we can use ThenMemcpy ( ) instead . <nl> bool transfer_as_literal_ ; <nl> + XlaCompiler : : ShapeRepresentationFn shape_representation_fn_ ; <nl> <nl> / / If set , holds default device context ( that we must Unref ) <nl> / / and its stream . <nl> mmm a / tensorflow / compiler / jit / xla_device_context . cc <nl> ppp b / tensorflow / compiler / jit / xla_device_context . cc <nl> void XlaDeviceAllocator : : DeallocateRaw ( void * ptr ) { <nl> <nl> void XlaDeviceAllocator : : GetStats ( AllocatorStats * stats ) { stats - > Clear ( ) ; } <nl> <nl> - XlaTransferManager : : XlaTransferManager ( se : : Stream * stream , <nl> - xla : : LocalClient * client , <nl> - bool transfer_as_literal ) <nl> + XlaTransferManager : : XlaTransferManager ( <nl> + se : : Stream * stream , xla : : LocalClient * client , bool transfer_as_literal , <nl> + XlaCompiler : : ShapeRepresentationFn shape_representation_fn ) <nl> : stream_ ( stream ) , <nl> client_ ( client ) , <nl> transfer_manager_ ( client - > backend ( ) . transfer_manager ( ) ) , <nl> - transfer_as_literal_ ( transfer_as_literal ) { } <nl> + transfer_as_literal_ ( transfer_as_literal ) , <nl> + shape_representation_fn_ ( std : : move ( shape_representation_fn ) ) { } <nl> <nl> Status XlaTransferManager : : TransferLiteralToDevice ( <nl> const Tensor & host_tensor , Tensor * device_tensor ) const { <nl> Status XlaTransferManager : : TransferLiteralFromDevice ( <nl> transfer_manager_ - > TransferLiteralFromDevice ( <nl> stream_ - > parent ( ) , shaped_buffer ) ) ; <nl> VLOG ( 1 ) < < " Transfer from device as literal : " < < literal - > ToString ( ) ; <nl> - return LiteralToHostTensor ( * literal , host_tensor - > dtype ( ) , host_tensor ) ; <nl> + Tensor tensor ; <nl> + TF_RETURN_IF_ERROR ( <nl> + LiteralToHostTensor ( * literal , host_tensor - > dtype ( ) , & tensor ) ) ; <nl> + / / Reshape the tensor back to its declared shape . <nl> + if ( ! host_tensor - > CopyFrom ( tensor , device_tensor . shape ( ) ) ) { <nl> + return errors : : Internal ( <nl> + " Tensor : : CopyFrom failed when copying from XLA device to CPU " ) ; <nl> + } <nl> + return Status : : OK ( ) ; <nl> } <nl> <nl> void XlaTransferManager : : CopyCPUTensorToDevice ( const Tensor * cpu_tensor , <nl> void XlaTransferManager : : CopyCPUTensorToDevice ( const Tensor * cpu_tensor , <nl> <nl> XlaTensor * xla_tensor = XlaTensor : : FromTensor ( device_tensor ) ; <nl> CHECK ( xla_tensor ) ; <nl> + <nl> + TensorShape shape ; <nl> + if ( shape_representation_fn_ ) { <nl> + shape = shape_representation_fn_ ( device_tensor - > shape ( ) , <nl> + device_tensor - > dtype ( ) ) ; <nl> + } else { <nl> + shape = device_tensor - > shape ( ) ; <nl> + } <nl> if ( ! xla_tensor - > has_shaped_buffer ( ) ) { <nl> Status s = xla_tensor - > AllocateShapedBuffer ( <nl> - device_tensor - > dtype ( ) , device_tensor - > shape ( ) , client_ , <nl> + device_tensor - > dtype ( ) , shape , client_ , <nl> stream_ - > parent ( ) - > device_ordinal ( ) ) ; <nl> if ( ! s . ok ( ) ) { <nl> done ( s ) ; <nl> void XlaTransferManager : : CopyCPUTensorToDevice ( const Tensor * cpu_tensor , <nl> } <nl> } <nl> <nl> - se : : DeviceMemoryBase dev_dst_ptr = <nl> - XlaTensor : : DeviceMemoryFromTensor ( * device_tensor ) ; <nl> Status status ; <nl> if ( transfer_as_literal_ ) { <nl> - status = TransferLiteralToDevice ( * cpu_tensor , device_tensor ) ; <nl> + Tensor reshaped_cpu_tensor ; <nl> + if ( ! reshaped_cpu_tensor . CopyFrom ( * cpu_tensor , shape ) ) { <nl> + done ( errors : : Internal ( <nl> + " Tensor : : CopyFrom failed when copying from CPU to XLA device " ) ) ; <nl> + return ; <nl> + } <nl> + status = TransferLiteralToDevice ( reshaped_cpu_tensor , device_tensor ) ; <nl> } else { <nl> + se : : DeviceMemoryBase dev_dst_ptr = <nl> + XlaTensor : : DeviceMemoryFromTensor ( * device_tensor ) ; <nl> stream_ - > ThenMemcpy ( & dev_dst_ptr , src_ptr , total_bytes ) ; <nl> / / TODO ( hpucha ) : Make this asynchronous . <nl> Status block_status = stream_ - > BlockHostUntilDone ( ) ; <nl> void XlaTransferManager : : CopyDeviceTensorToCPU ( const Tensor * device_tensor , <nl> done ( Status : : OK ( ) ) ; <nl> } <nl> <nl> - XlaDeviceContext : : XlaDeviceContext ( se : : Stream * stream , xla : : LocalClient * client , <nl> - bool transfer_as_literal ) <nl> - : manager_ ( stream , client , transfer_as_literal ) { } <nl> + XlaDeviceContext : : XlaDeviceContext ( <nl> + se : : Stream * stream , xla : : LocalClient * client , bool transfer_as_literal , <nl> + XlaCompiler : : ShapeRepresentationFn shape_representation_fn ) <nl> + : manager_ ( stream , client , transfer_as_literal , <nl> + std : : move ( shape_representation_fn ) ) { } <nl> <nl> void XlaDeviceContext : : CopyCPUTensorToDevice ( const Tensor * cpu_tensor , <nl> Device * device , <nl> mmm a / tensorflow / compiler / jit / xla_device_context . h <nl> ppp b / tensorflow / compiler / jit / xla_device_context . h <nl> limitations under the License . <nl> # include < memory > <nl> <nl> # include " tensorflow / compiler / jit / xla_tensor . h " <nl> + # include " tensorflow / compiler / tf2xla / xla_compiler . h " <nl> # include " tensorflow / compiler / xla / client / global_data . h " <nl> # include " tensorflow / compiler / xla / client / local_client . h " <nl> # include " tensorflow / core / framework / allocator . h " <nl> class XlaDeviceAllocator : public Allocator { <nl> / / Helper class for managing data transfers between host and XLA devices . <nl> class XlaTransferManager { <nl> public : <nl> - explicit XlaTransferManager ( se : : Stream * stream , xla : : LocalClient * client , <nl> - bool transfer_as_literal ) ; <nl> + explicit XlaTransferManager ( <nl> + se : : Stream * stream , xla : : LocalClient * client , bool transfer_as_literal , <nl> + XlaCompiler : : ShapeRepresentationFn shape_representation_fn ) ; <nl> <nl> void CopyCPUTensorToDevice ( const Tensor * cpu_tensor , Device * device , <nl> Tensor * device_tensor , StatusCallback done ) const ; <nl> class XlaTransferManager { <nl> / / Transfer manager , for marshalling data to and from the device . <nl> xla : : TransferManager * transfer_manager_ ; <nl> / / True if we must use XLA ' s TransferManager for correct device transfers . <nl> - bool transfer_as_literal_ ; <nl> + const bool transfer_as_literal_ ; <nl> + const XlaCompiler : : ShapeRepresentationFn shape_representation_fn_ ; <nl> } ; <nl> <nl> / / DeviceContext for operators assigned to XlaDevice devices . The <nl> class XlaTransferManager { <nl> / / wraps the methods in XlaTransferManager . <nl> class XlaDeviceContext : public DeviceContext { <nl> public : <nl> - explicit XlaDeviceContext ( se : : Stream * stream , xla : : LocalClient * client , <nl> - bool transfer_as_literal ) ; <nl> + explicit XlaDeviceContext ( <nl> + se : : Stream * stream , xla : : LocalClient * client , bool transfer_as_literal , <nl> + XlaCompiler : : ShapeRepresentationFn shape_representation_fn ) ; <nl> <nl> void CopyCPUTensorToDevice ( const Tensor * cpu_tensor , Device * device , <nl> Tensor * device_tensor , <nl> mmm a / tensorflow / compiler / jit / xla_gpu_device . cc <nl> ppp b / tensorflow / compiler / jit / xla_gpu_device . cc <nl> Status XlaGpuDeviceFactory : : CreateDevices ( const SessionOptions & options , <nl> Status status = <nl> XlaDevice : : Create ( " CUDA " , DEVICE_XLA_GPU , 0 , DEVICE_GPU_XLA_JIT , options , <nl> name_prefix , registration , <nl> - / * transfer_as_literal = * / false , & device ) ; <nl> + / * transfer_as_literal = * / false , <nl> + / * shape_representation_fn = * / { } , & device ) ; <nl> if ( ! status . ok ( ) ) { <nl> / / Treat failures as non - fatal ; there might not be a GPU in the machine . <nl> VLOG ( 1 ) < < " Failed to create XLA_GPU device : " < < status ; <nl> mmm a / tensorflow / compiler / jit / xla_launch_util . cc <nl> ppp b / tensorflow / compiler / jit / xla_launch_util . cc <nl> void XlaComputationLaunchContext : : PopulateOutputs ( <nl> <nl> OP_REQUIRES_OK ( <nl> ctx , ctx - > allocate_output ( i , const_tensor . shape ( ) , & output_tensor ) ) ; <nl> - if ( XlaTensor * xla_tensor = XlaTensor : : FromTensor ( output_tensor ) ) { <nl> - OP_REQUIRES_OK ( ctx , xla_tensor - > AllocateShapedBuffer ( <nl> - const_tensor . dtype ( ) , const_tensor . shape ( ) , <nl> - client_ , stream - > parent ( ) - > device_ordinal ( ) ) ) ; <nl> - } <nl> <nl> Device * device = dynamic_cast < Device * > ( ctx - > device ( ) ) ; <nl> OP_REQUIRES ( ctx , device ! = nullptr , <nl> mmm a / tensorflow / compiler / tests / BUILD <nl> ppp b / tensorflow / compiler / tests / BUILD <nl> py_library ( <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : client " , <nl> " / / tensorflow / python : client_testlib " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : platform " , <nl> " / / tensorflow / python : random_seed " , <nl> " / / tensorflow / python : session " , <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : platform_test " , <nl> " / / tensorflow / python : training " , <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : platform_test " , <nl> " / / tensorflow / python : training " , <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : platform_test " , <nl> ] , <nl> tf_xla_py_test ( <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : bitwise_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : math_ops_gen " , <nl> " / / tensorflow / python : nn_ops " , <nl> tf_xla_py_test ( <nl> tags = [ " optonly " ] , <nl> deps = [ <nl> " : xla_test " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : platform_test " , <nl> " / / tensorflow / python : random_ops " , <nl> ] , <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : platform_test " , <nl> " / / tensorflow / python : training " , <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : platform_test " , <nl> " / / tensorflow / python : training " , <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : platform_test " , <nl> ] , <nl> tf_xla_py_test ( <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : array_ops_gen " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : gradient_checker " , <nl> " / / tensorflow / python : gradients " , <nl> " / / tensorflow / python : math_ops " , <nl> tf_xla_py_test ( <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : array_ops_gen " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : gradient_checker " , <nl> " / / tensorflow / python : gradients " , <nl> " / / tensorflow / python : math_ops " , <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : nn " , <nl> " / / tensorflow / python : nn_ops " , <nl> " / / tensorflow / python : nn_ops_gen " , <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : nn " , <nl> " / / tensorflow / python : nn_ops " , <nl> " / / tensorflow / python : nn_ops_gen " , <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : nn " , <nl> " / / tensorflow / python : nn_ops " , <nl> " / / tensorflow / python : nn_ops_gen " , <nl> tf_xla_py_test ( <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : data_flow_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : platform_test " , <nl> ] , <nl> ) <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : platform_test " , <nl> ] , <nl> ) <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : layers " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : nn " , <nl> tf_xla_py_test ( <nl> " / / tensorflow / contrib / signal : signal_py " , <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : extra_py_tests_deps " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : platform_test " , <nl> " / / tensorflow / python : spectral_ops " , <nl> ] , <nl> tf_xla_py_test ( <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : data_flow_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : platform_test " , <nl> ] , <nl> ) <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : platform_test " , <nl> " / / tensorflow / python : training " , <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : platform_test " , <nl> ] , <nl> ) <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : image_ops " , <nl> " / / tensorflow / python : platform_test " , <nl> ] , <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : nn " , <nl> " / / tensorflow / python : nn_ops_gen " , <nl> " / / tensorflow / python : platform_test " , <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : platform_test " , <nl> ] , <nl> ) <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : platform_test " , <nl> " / / tensorflow / python : training " , <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : platform_test " , <nl> ] , <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : control_flow_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : platform_test " , <nl> ] , <nl> ) <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : nn_ops " , <nl> " / / tensorflow / python : nn_ops_gen " , <nl> " / / tensorflow / python : platform_test " , <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : nn_ops " , <nl> " / / tensorflow / python : nn_ops_gen " , <nl> " / / tensorflow / python : platform_test " , <nl> tf_xla_py_test ( <nl> ] , <nl> deps = [ <nl> " : xla_test " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : platform_test " , <nl> " / / tensorflow / python : random_ops " , <nl> ] , <nl> tf_xla_py_test ( <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : errors " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : platform_test " , <nl> ] , <nl> tf_xla_py_test ( <nl> " / / tensorflow / compiler / tf2xla / python : xla " , <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : errors " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : platform_test " , <nl> ] , <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> ] , <nl> ) <nl> <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : platform_test " , <nl> ] , <nl> ) <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : platform_test " , <nl> " / / tensorflow / python : training " , <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : platform_test " , <nl> ] , <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : math_ops_gen " , <nl> " / / tensorflow / python : platform_test " , <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : platform_test " , <nl> ] , <nl> tf_xla_py_test ( <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : data_flow_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : platform_test " , <nl> ] , <nl> ) <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / contrib / stateless " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : platform_test " , <nl> ] , <nl> ) <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : math_ops_gen " , <nl> " / / tensorflow / python : nn_ops " , <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : platform_test " , <nl> ] , <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : nn_ops " , <nl> " / / tensorflow / python : nn_ops_gen " , <nl> tf_xla_py_test ( <nl> srcs = [ " fused_batchnorm_test . py " ] , <nl> deps = [ <nl> " : xla_test " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : math_ops_gen " , <nl> " / / tensorflow / python : nn " , <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : math_ops_gen " , <nl> " / / tensorflow / python : nn_ops " , <nl> tf_xla_py_test ( <nl> " : xla_test " , <nl> " / / tensorflow / compiler / tf2xla / python : xla " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : platform_test " , <nl> " / / tensorflow / python : training " , <nl> ] , <nl> tf_xla_py_test ( <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : data_flow_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : platform_test " , <nl> ] , <nl> ) <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : platform_test " , <nl> ] , <nl> ) <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : platform_test " , <nl> ] , <nl> ) <nl> <nl> - cuda_py_test ( <nl> + tf_xla_py_test ( <nl> name = " xla_device_test " , <nl> size = " small " , <nl> srcs = [ " xla_device_test . py " ] , <nl> + tags = [ " optonly " ] , <nl> + deps = [ <nl> + " : xla_test " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : framework " , <nl> + " / / tensorflow / python : platform_test " , <nl> + ] , <nl> + ) <nl> + <nl> + cuda_py_test ( <nl> + name = " xla_device_gpu_test " , <nl> + size = " small " , <nl> + srcs = [ " xla_device_gpu_test . py " ] , <nl> additional_deps = [ <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : client " , <nl> " / / tensorflow / python : client_testlib " , <nl> " / / tensorflow / python : control_flow_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : math_ops " , <nl> ] , <nl> ) <nl> cuda_py_test ( <nl> " / / tensorflow / python : client_testlib " , <nl> " / / tensorflow / python : control_flow_ops " , <nl> " / / tensorflow / python : framework " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> " / / tensorflow / python : gradients " , <nl> - " / / tensorflow / python : layers " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : nn_ops " , <nl> + ] , <nl> + ) <nl> + <nl> + cuda_py_test ( <nl> + name = " dense_layer_test " , <nl> + size = " small " , <nl> + srcs = [ " dense_layer_test . py " ] , <nl> + additional_deps = [ <nl> + " / / tensorflow / contrib / compiler : compiler_py " , <nl> + " / / tensorflow / core : protos_all_py " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : layers " , <nl> " / / tensorflow / python : variables " , <nl> ] , <nl> ) <nl> py_library ( <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : random_ops " , <nl> " / / tensorflow / python : variables " , <nl> cuda_py_test ( <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : client_testlib " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : gradients " , <nl> " / / tensorflow / python : init_ops " , <nl> " / / tensorflow / python : math_ops " , <nl> tf_xla_py_test ( <nl> srcs = [ " fake_quant_ops_test . py " ] , <nl> deps = [ <nl> " : xla_test " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : platform_test " , <nl> ] , <nl> ) <nl> tf_xla_py_test ( <nl> deps = [ <nl> " : xla_test " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : platform_test " , <nl> ] , <nl> ) <nl> mmm a / tensorflow / compiler / tests / argminmax_test . py <nl> ppp b / tensorflow / compiler / tests / argminmax_test . py <nl> <nl> <nl> class ArgMinMaxTest ( xla_test . XLATestCase ) : <nl> <nl> - def _assertOpOutputMatchesExpected ( self , op , inp , expected ) : <nl> - " " " Verifies that ' op ' produces ' expected ' when fed input ' inp ' . <nl> + def _assertOpOutputMatchesExpected ( self , op , axis , output_type , op_input , <nl> + expected ) : <nl> + " " " Verifies that ' op ' produces ' expected ' when fed input ' op_input ' . <nl> <nl> Args : <nl> - op : operator to test <nl> - inp : numpy input array to use as input to ' op ' . <nl> + op : argmin or argmax operator to test . <nl> + axis : integer axis to reduce across . <nl> + output_type : numpy datatype of the output to produce . <nl> + op_input : numpy input array to use as input to ' op ' . <nl> expected : numpy array representing the expected output of ' op ' . <nl> " " " <nl> with self . test_session ( ) as session : <nl> with self . test_scope ( ) : <nl> pinp = array_ops . placeholder ( <nl> - dtypes . as_dtype ( inp . dtype ) , inp . shape , name = " a " ) <nl> - output = op ( pinp ) <nl> - result = session . run ( output , { pinp : inp } ) <nl> + dtypes . as_dtype ( op_input . dtype ) , op_input . shape , name = " a " ) <nl> + output = op ( pinp , axis = axis , output_type = output_type ) <nl> + result = session . run ( output , { pinp : op_input } ) <nl> self . assertAllEqual ( result , expected ) <nl> <nl> def testArgMinMax ( self ) : <nl> # Complex numbers do not support argmin / argmax . <nl> minmax_types = set ( self . numeric_types ) - set ( self . complex_types ) <nl> for dtype in minmax_types : <nl> - self . _assertOpOutputMatchesExpected ( <nl> - lambda x : math_ops . argmax ( x , axis = 0 , output_type = dtypes . int32 ) , <nl> - np . array ( [ 1 , 10 , 27 , 3 , 3 , 4 ] , dtype = dtype ) , <nl> - expected = np . int32 ( 2 ) ) <nl> - self . _assertOpOutputMatchesExpected ( <nl> - lambda x : math_ops . argmax ( x , axis = 0 , output_type = dtypes . int32 ) , <nl> - np . array ( [ [ 4 , 1 , 7 ] , [ 3 , 2 , 4 ] ] , dtype = dtype ) , <nl> - expected = np . array ( [ 0 , 1 , 0 ] , dtype = np . int32 ) ) <nl> - self . _assertOpOutputMatchesExpected ( <nl> - lambda x : math_ops . argmax ( x , axis = 1 , output_type = dtypes . int32 ) , <nl> - np . array ( [ [ 4 , 1 ] , [ 3 , 2 ] ] , dtype = dtype ) , <nl> - expected = np . array ( [ 0 , 0 ] , dtype = np . int32 ) ) <nl> + # output_type is a numpy data type that is used to specify the desired <nl> + # output type of the op as well as to convert the Python number to the <nl> + # array scalar of the type . <nl> + for output_type in self . int_types : <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . argmax , <nl> + axis = 0 , <nl> + output_type = output_type , <nl> + op_input = np . array ( [ 1 , 10 , 27 , 3 , 3 , 4 ] , dtype = dtype ) , <nl> + expected = output_type ( 2 ) ) <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . argmax , <nl> + axis = 0 , <nl> + output_type = output_type , <nl> + op_input = np . array ( [ [ 4 , 1 , 7 ] , [ 3 , 2 , 4 ] ] , dtype = dtype ) , <nl> + expected = np . array ( [ 0 , 1 , 0 ] , dtype = output_type ) ) <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . argmax , <nl> + axis = 1 , <nl> + output_type = output_type , <nl> + op_input = np . array ( [ [ 4 , 1 ] , [ 3 , 2 ] ] , dtype = dtype ) , <nl> + expected = np . array ( [ 0 , 0 ] , dtype = output_type ) ) <nl> <nl> - self . _assertOpOutputMatchesExpected ( <nl> - lambda x : math_ops . argmin ( x , axis = 0 , output_type = dtypes . int32 ) , <nl> - np . array ( [ 3 , 10 , 27 , 3 , 2 , 4 ] , dtype = dtype ) , <nl> - expected = np . int32 ( 4 ) ) <nl> - self . _assertOpOutputMatchesExpected ( <nl> - lambda x : math_ops . argmin ( x , axis = 0 , output_type = dtypes . int32 ) , <nl> - np . array ( [ [ 4 , 1 , 7 ] , [ 3 , 2 , 4 ] ] , dtype = dtype ) , <nl> - expected = np . array ( [ 1 , 0 , 1 ] , dtype = np . int32 ) ) <nl> - self . _assertOpOutputMatchesExpected ( <nl> - lambda x : math_ops . argmin ( x , axis = 1 , output_type = dtypes . int32 ) , <nl> - np . array ( [ [ 4 , 1 ] , [ 3 , 2 ] ] , dtype = dtype ) , <nl> - expected = np . array ( [ 1 , 1 ] , dtype = np . int32 ) ) <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . argmin , <nl> + axis = 0 , <nl> + output_type = output_type , <nl> + op_input = np . array ( [ 3 , 10 , 27 , 3 , 2 , 4 ] , dtype = dtype ) , <nl> + expected = output_type ( 4 ) ) <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . argmin , <nl> + axis = 0 , <nl> + output_type = output_type , <nl> + op_input = np . array ( [ [ 4 , 1 , 7 ] , [ 3 , 2 , 4 ] ] , dtype = dtype ) , <nl> + expected = np . array ( [ 1 , 0 , 1 ] , dtype = output_type ) ) <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . argmin , <nl> + axis = 1 , <nl> + output_type = output_type , <nl> + op_input = np . array ( [ [ 4 , 1 ] , [ 3 , 2 ] ] , dtype = dtype ) , <nl> + expected = np . array ( [ 1 , 1 ] , dtype = output_type ) ) <nl> <nl> <nl> if __name__ = = " __main__ " : <nl> new file mode 100644 <nl> index 0000000000000 . . b0bf1b79d6c8b <nl> mmm / dev / null <nl> ppp b / tensorflow / compiler / tests / dense_layer_test . py <nl> <nl> + # Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + " " " Tests for DenseLayer JIT compilation on the CPU and GPU devices . " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + <nl> + import os <nl> + import numpy as np <nl> + <nl> + from tensorflow . contrib . compiler import jit <nl> + from tensorflow . core . protobuf import config_pb2 <nl> + from tensorflow . python . layers import layers <nl> + from tensorflow . python . ops import array_ops <nl> + from tensorflow . python . ops import variables <nl> + from tensorflow . python . platform import test <nl> + <nl> + jit_scope = jit . experimental_jit_scope <nl> + <nl> + <nl> + def GetRunMetadataLabels ( run_metadata ) : <nl> + " " " Returns all labels in run_metadata . " " " <nl> + labels = [ ] <nl> + for dev_stats in run_metadata . step_stats . dev_stats : <nl> + for node_stats in dev_stats . node_stats : <nl> + labels . append ( node_stats . timeline_label ) <nl> + return labels <nl> + <nl> + <nl> + def InLabels ( labels , substr ) : <nl> + " " " Returns true iff one of the labels contains substr . " " " <nl> + return any ( [ substr in x for x in labels ] ) <nl> + <nl> + <nl> + def XlaLaunchOpCount ( labels ) : <nl> + " " " Count how many _XlaLaunch labels are present . " " " <nl> + return sum ( " _XlaLaunch ( " in x for x in labels ) <nl> + <nl> + <nl> + class DenseLayerTest ( test . TestCase ) : <nl> + <nl> + def testDenseLayerAutoJit ( self ) : <nl> + " " " Tests dense layer compilation in auto - jit mode . <nl> + <nl> + Dense layer should be compiled into a single _XlaLaunch op in auto - jit mode . <nl> + " " " <nl> + <nl> + os . environ [ " TF_XLA_FLAGS " ] = ( " - - tf_xla_cpu_global_jit " ) <nl> + config = config_pb2 . ConfigProto ( ) <nl> + config . graph_options . optimizer_options . global_jit_level = ( <nl> + config_pb2 . OptimizerOptions . ON_1 ) <nl> + <nl> + with self . test_session ( config = config ) as sess : <nl> + x = array_ops . placeholder ( shape = [ None , None , 3 ] , dtype = np . float32 ) <nl> + y = layers . dense ( x , 3 ) <nl> + <nl> + sess . run ( variables . initialize_all_variables ( ) ) <nl> + run_metadata = config_pb2 . RunMetadata ( ) <nl> + sess . run ( <nl> + y , { x : np . array ( [ [ [ 1 , 2 , 3 ] , [ 4 , 5 , 6 ] ] , [ [ 1 , 2 , 3 ] , [ 4 , 5 , 6 ] ] ] ) } , <nl> + run_metadata = run_metadata , <nl> + options = config_pb2 . RunOptions ( <nl> + trace_level = config_pb2 . RunOptions . FULL_TRACE ) ) <nl> + <nl> + labels = GetRunMetadataLabels ( run_metadata ) <nl> + self . assertEqual ( 1 , XlaLaunchOpCount ( labels ) ) <nl> + self . assertFalse ( InLabels ( labels , " ListDiff " ) ) <nl> + <nl> + def testDenseLayerJitScopeDefinedShape ( self ) : <nl> + " " " Tests that the dense layer node is properly compiled in jit scope . <nl> + <nl> + Dense layer with static shape input tensor should be compiled into a single <nl> + _XlaLaunch op by XLA . <nl> + " " " <nl> + <nl> + with self . test_session ( ) as sess : <nl> + x = array_ops . placeholder ( shape = [ 2 , 2 , 3 ] , dtype = np . float32 ) <nl> + with jit_scope ( ) : <nl> + y = layers . dense ( x , 3 ) <nl> + <nl> + sess . run ( variables . initialize_all_variables ( ) ) <nl> + run_metadata = config_pb2 . RunMetadata ( ) <nl> + sess . run ( <nl> + y , { x : np . array ( [ [ [ 1 , 2 , 3 ] , [ 4 , 5 , 6 ] ] , [ [ 1 , 2 , 3 ] , [ 4 , 5 , 6 ] ] ] ) } , <nl> + run_metadata = run_metadata , <nl> + options = config_pb2 . RunOptions ( <nl> + trace_level = config_pb2 . RunOptions . FULL_TRACE ) ) <nl> + <nl> + labels = GetRunMetadataLabels ( run_metadata ) <nl> + self . assertEqual ( 1 , XlaLaunchOpCount ( labels ) ) <nl> + # No need to check whether ListDiff is compiled or not because ListDiff op <nl> + # is not used when input tensor shape is fully defined . <nl> + <nl> + def testDenseLayerJitScopeUndefinedShape ( self ) : <nl> + " " " Tests that the dense layer node is properly compiled in jit scope . <nl> + <nl> + Dense layer uses shape op to get shape of input tensor if its shape is not <nl> + fully defined . XLA does not cluster shape op with other operators . But in <nl> + experimental_jit_scope , XLA is forced to compile shape op into its own <nl> + cluster , causing dense layer to be split into TWO _XlaLaunch ops . <nl> + " " " <nl> + <nl> + with self . test_session ( ) as sess : <nl> + x = array_ops . placeholder ( shape = [ None , None , 3 ] , dtype = np . float32 ) <nl> + with jit_scope ( ) : <nl> + y = layers . dense ( x , 3 ) <nl> + <nl> + sess . run ( variables . initialize_all_variables ( ) ) <nl> + run_metadata = config_pb2 . RunMetadata ( ) <nl> + sess . run ( <nl> + y , { x : np . array ( [ [ [ 1 , 2 , 3 ] , [ 4 , 5 , 6 ] ] , [ [ 1 , 2 , 3 ] , [ 4 , 5 , 6 ] ] ] ) } , <nl> + run_metadata = run_metadata , <nl> + options = config_pb2 . RunOptions ( <nl> + trace_level = config_pb2 . RunOptions . FULL_TRACE ) ) <nl> + <nl> + labels = GetRunMetadataLabels ( run_metadata ) <nl> + self . assertEqual ( 2 , XlaLaunchOpCount ( labels ) ) <nl> + self . assertFalse ( InLabels ( labels , " ListDiff " ) ) <nl> + <nl> + <nl> + if __name__ = = " __main__ " : <nl> + test . main ( ) <nl> mmm a / tensorflow / compiler / tests / eager_test . py <nl> ppp b / tensorflow / compiler / tests / eager_test . py <nl> def foo ( c1 , r1 , v1 , c2 , v2 , r2 ) : <nl> self . assertAllEqual ( [ [ 1 . ] ] , c . numpy ( ) ) <nl> self . assertAllEqual ( [ [ 20 . , 40 . ] , [ 90 . , 120 . ] ] , d . numpy ( ) ) <nl> <nl> + def testDefunInGradientTape ( self ) : <nl> + with self . test_scope ( ) : <nl> + v0 = resource_variable_ops . ResourceVariable ( 5 . 0 ) <nl> + <nl> + @ function . defun ( compiled = True ) <nl> + def f ( x ) : <nl> + x = v0 * v0 * x <nl> + return x <nl> + <nl> + x = constant_op . constant ( 3 . 0 ) <nl> + with backprop . GradientTape ( ) as tape : <nl> + y = f ( x ) <nl> + dy = tape . gradient ( y , v0 ) <nl> + <nl> + self . assertEqual ( 75 , y . numpy ( ) ) <nl> + self . assertEqual ( 30 , dy . numpy ( ) ) <nl> + <nl> <nl> if __name__ = = ' __main__ ' : <nl> ops . enable_eager_execution ( <nl> mmm a / tensorflow / compiler / tests / function_test . py <nl> ppp b / tensorflow / compiler / tests / function_test . py <nl> <nl> from tensorflow . python . framework import constant_op <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import function <nl> - from tensorflow . python . framework import test_util <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . platform import googletest <nl> <nl> <nl> - @ test_util . with_c_api <nl> class FunctionTest ( XLATestCase ) : <nl> <nl> def testFunction ( self ) : <nl> mmm a / tensorflow / compiler / tests / jit_test . py <nl> ppp b / tensorflow / compiler / tests / jit_test . py <nl> <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import function <nl> from tensorflow . python . framework import ops <nl> - from tensorflow . python . layers import layers <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import control_flow_ops <nl> from tensorflow . python . ops import gradients_impl <nl> from tensorflow . python . ops import math_ops <nl> from tensorflow . python . ops import nn_ops <nl> - from tensorflow . python . ops import variables <nl> from tensorflow . python . platform import test <nl> <nl> jit_scope = jit . experimental_jit_scope <nl> def Forward ( x ) : <nl> self . assertFalse ( InLabels ( labels , " Mul " ) ) <nl> self . assertTrue ( InLabels ( labels , " _XlaLaunch " ) ) <nl> <nl> - def testDenseLayer ( self ) : <nl> - " " " Tests that the dense layer node is properly compiled . " " " <nl> - <nl> - with self . test_session ( config = NoRewriteSessionConfig ( ) ) as sess : <nl> - x = array_ops . placeholder ( shape = [ 2 , 3 ] , dtype = np . float32 ) <nl> - with jit_scope ( ) : <nl> - y = layers . dense ( x , 3 ) <nl> - <nl> - sess . run ( variables . initialize_all_variables ( ) ) <nl> - run_metadata = config_pb2 . RunMetadata ( ) <nl> - sess . run ( y , { x : np . array ( [ [ 1 , 2 , 3 ] , [ 4 , 5 , 6 ] ] ) } , <nl> - run_metadata = run_metadata , <nl> - options = config_pb2 . RunOptions ( <nl> - trace_level = config_pb2 . RunOptions . FULL_TRACE ) ) <nl> - <nl> - self . assert_ ( MetadataHasXlaLaunch ( run_metadata ) ) <nl> - <nl> <nl> class ElementWiseFusionTest ( test . TestCase ) : <nl> <nl> mmm a / tensorflow / compiler / tests / unary_ops_test . py <nl> ppp b / tensorflow / compiler / tests / unary_ops_test . py <nl> def _assertSoftplusMatchesExpected ( self , features , dtype ) : <nl> zero = np . asarray ( 0 ) . astype ( dtype ) <nl> expected = np . logaddexp ( zero , features ) <nl> self . _assertOpOutputMatchesExpected ( <nl> - nn_ops . softplus , features , expected = expected ) <nl> + nn_ops . softplus , features , expected = expected , <nl> + rtol = 1e - 6 , <nl> + atol = 9 . 1e - 6 ) <nl> <nl> def testSoftplus ( self ) : <nl> for dtype in self . float_types : <nl> new file mode 100644 <nl> index 0000000000000 . . 1e30ebd55d09f <nl> mmm / dev / null <nl> ppp b / tensorflow / compiler / tests / xla_device_gpu_test . py <nl> <nl> + # Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + " " " Test cases for XLA devices . " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + <nl> + from tensorflow . python . client import session as session_lib <nl> + from tensorflow . python . framework import dtypes <nl> + from tensorflow . python . framework import ops <nl> + from tensorflow . python . ops import array_ops <nl> + from tensorflow . python . platform import test <nl> + <nl> + <nl> + class XlaDeviceGpuTest ( test . TestCase ) : <nl> + <nl> + def testCopiesToAndFromGpuWork ( self ) : <nl> + " " " Tests that copies between GPU and XLA devices work . " " " <nl> + if not test . is_gpu_available ( ) : <nl> + return <nl> + <nl> + with session_lib . Session ( ) as sess : <nl> + x = array_ops . placeholder ( dtypes . float32 , [ 2 ] ) <nl> + with ops . device ( " GPU " ) : <nl> + y = x * 2 <nl> + with ops . device ( " device : XLA_CPU : 0 " ) : <nl> + z = y * y <nl> + with ops . device ( " GPU " ) : <nl> + w = y + z <nl> + result = sess . run ( w , { x : [ 1 . 5 , 0 . 5 ] } ) <nl> + self . assertAllClose ( result , [ 12 . , 2 . ] , rtol = 1e - 3 ) <nl> + <nl> + <nl> + if __name__ = = " __main__ " : <nl> + test . main ( ) <nl> mmm a / tensorflow / compiler / tests / xla_device_test . py <nl> ppp b / tensorflow / compiler / tests / xla_device_test . py <nl> <nl> - # Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + # Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> # <nl> # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> # you may not use this file except in compliance with the License . <nl> <nl> from __future__ import division <nl> from __future__ import print_function <nl> <nl> - from tensorflow . python . client import session as session_lib <nl> - from tensorflow . python . framework import dtypes <nl> + import numpy as np <nl> + <nl> + from tensorflow . compiler . tests . xla_test import XLATestCase <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . platform import test <nl> <nl> <nl> - class XlaDeviceTest ( test . TestCase ) : <nl> + class XlaDeviceTest ( XLATestCase ) : <nl> <nl> def testCopies ( self ) : <nl> - " " " Tests that copies between GPU and XLA devices work . " " " <nl> - if not test . is_gpu_available ( ) : <nl> - return <nl> - <nl> - with session_lib . Session ( ) as sess : <nl> - x = array_ops . placeholder ( dtypes . float32 , [ 2 ] ) <nl> - with ops . device ( " GPU " ) : <nl> - y = x * 2 <nl> - with ops . device ( " device : XLA_CPU : 0 " ) : <nl> - z = y * y <nl> - with ops . device ( " GPU " ) : <nl> - w = y + z <nl> - result = sess . run ( w , { x : [ 1 . 5 , 0 . 5 ] } ) <nl> - self . assertAllClose ( result , [ 12 . , 2 . ] , rtol = 1e - 3 ) <nl> + " " " Tests that copies onto and off XLA devices work . " " " <nl> + shapes = [ [ 0 ] , [ 1 ] , [ 1 , 0 ] , [ 1024 , 0 ] , [ 1024 , 1 ] , [ 3 , 777 ] , [ 777 , 3 ] , <nl> + [ 16384 , 1 ] , [ 1 , 16384 ] , [ 1 , 20000 , 1 , 1 ] ] <nl> + for dtype in self . numeric_types : <nl> + for shape in shapes : <nl> + with self . test_session ( ) as sess : <nl> + with ops . device ( " CPU " ) : <nl> + x = array_ops . placeholder ( dtype , shape ) <nl> + with self . test_scope ( ) : <nl> + y = x + x <nl> + with ops . device ( " CPU " ) : <nl> + z = array_ops . identity ( y ) <nl> + <nl> + inputs = np . random . randint ( - 100 , 100 , shape ) . astype ( dtype ) <nl> + result = sess . run ( z , { x : inputs } ) <nl> + self . assertAllCloseAccordingToType ( result , inputs + inputs ) <nl> <nl> <nl> if __name__ = = " __main__ " : <nl> mmm a / tensorflow / compiler / tf2xla / BUILD <nl> ppp b / tensorflow / compiler / tf2xla / BUILD <nl> tf_cc_test ( <nl> " / / tensorflow / compiler / tf2xla / kernels : xla_ops " , <nl> " / / tensorflow / compiler / xla : literal_util " , <nl> " / / tensorflow / compiler / xla : shape_util " , <nl> + " / / tensorflow / compiler / xla : status_macros " , <nl> " / / tensorflow / compiler / xla / client : client_library " , <nl> " / / tensorflow / compiler / xla / client : local_client " , <nl> " / / tensorflow / compiler / xla / service : cpu_plugin " , <nl> mmm a / tensorflow / compiler / tf2xla / functionalize_control_flow . cc <nl> ppp b / tensorflow / compiler / tf2xla / functionalize_control_flow . cc <nl> Status BuildLoopBody ( const Graph & graph , Frame * frame , <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - Status FunctionalizeLoop ( Graph * graph , Frame * frame , <nl> + / / Copy the FunctionDef of given function from lookup_library to library , if <nl> + / / it can be found in lookup_library but is missing from library . <nl> + Status AddMissingFunctionByName ( const string & function_name , <nl> + const FunctionLibraryDefinition * lookup_library , <nl> + FunctionLibraryDefinition * library ) { <nl> + if ( ! library - > Find ( function_name ) & & lookup_library - > Find ( function_name ) ) { <nl> + return library - > AddFunctionDef ( * lookup_library - > Find ( function_name ) ) ; <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + / / Iterate over all functions that the given fdef refers to . Copy the missing <nl> + / / FunctionDefs from lookup_library to library . <nl> + Status AddMissingFunctionDef ( const FunctionDef & fdef , <nl> + const FunctionLibraryDefinition * lookup_library , <nl> + FunctionLibraryDefinition * library ) { <nl> + TF_RET_CHECK ( lookup_library ) ; <nl> + for ( const NodeDef & node : fdef . node_def ( ) ) { <nl> + if ( library - > Find ( node . op ( ) ) ) { <nl> + continue ; <nl> + } <nl> + / / The function refered by ' SymbolicGradient ' node is specified in its <nl> + / / attribute ' f ' . <nl> + if ( node . op ( ) = = FunctionLibraryDefinition : : kGradientOp ) { <nl> + const AttrValue * attr = <nl> + AttrSlice ( & node . attr ( ) ) . Find ( FunctionLibraryDefinition : : kFuncAttr ) ; <nl> + if ( ! attr ) { <nl> + return errors : : InvalidArgument ( " SymbolicGradient is missing attr : f " ) ; <nl> + } <nl> + const string & func_name = attr - > func ( ) . name ( ) ; <nl> + TF_RETURN_IF_ERROR ( <nl> + AddMissingFunctionByName ( func_name , lookup_library , library ) ) ; <nl> + / / Copy the user - defined gradient function if it exists . <nl> + const string grad_name = lookup_library - > FindGradient ( func_name ) ; <nl> + if ( ! grad_name . empty ( ) & & library - > FindGradient ( func_name ) . empty ( ) ) { <nl> + TF_RETURN_IF_ERROR ( <nl> + AddMissingFunctionByName ( grad_name , lookup_library , library ) ) ; <nl> + GradientDef grad_def ; <nl> + grad_def . set_function_name ( func_name ) ; <nl> + grad_def . set_gradient_func ( grad_name ) ; <nl> + TF_RETURN_IF_ERROR ( library - > AddGradientDef ( grad_def ) ) ; <nl> + } <nl> + } else if ( lookup_library - > Find ( node . op ( ) ) ) { <nl> + TF_RETURN_IF_ERROR ( <nl> + library - > AddFunctionDef ( * lookup_library - > Find ( node . op ( ) ) ) ) ; <nl> + } <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + Status FunctionalizeLoop ( const FunctionLibraryDefinition * lookup_library , <nl> + Graph * graph , Frame * frame , <nl> FunctionLibraryDefinition * library ) { <nl> VLOG ( 2 ) < < " Frame " < < frame - > name < < " before : " <nl> < < dump_graph : : DumpGraphToFile ( " functionalize_before " , * graph , <nl> Status FunctionalizeLoop ( Graph * graph , Frame * frame , <nl> <nl> TF_RETURN_IF_ERROR ( library - > AddFunctionDef ( cond_fdef ) ) ; <nl> TF_RETURN_IF_ERROR ( library - > AddFunctionDef ( body_fdef ) ) ; <nl> + if ( lookup_library ) { <nl> + / / Copy missing FunctionDefs from lookup_library to library to make library <nl> + / / self - contained . <nl> + TF_RETURN_IF_ERROR ( <nl> + AddMissingFunctionDef ( cond_fdef , lookup_library , library ) ) ; <nl> + TF_RETURN_IF_ERROR ( <nl> + AddMissingFunctionDef ( body_fdef , lookup_library , library ) ) ; <nl> + } <nl> <nl> / / Builds a While operator . <nl> NodeDef while_def ; <nl> Status FunctionalizeCond : : Functionalize ( Graph * graph , <nl> / / functional equivalents . <nl> Status FunctionalizeControlFlow ( Graph * graph , <nl> FunctionLibraryDefinition * library ) { <nl> + return FunctionalizeControlFlow ( / * lookup_library = * / nullptr , graph , library ) ; <nl> + } <nl> + <nl> + Status FunctionalizeControlFlow ( const FunctionLibraryDefinition * lookup_library , <nl> + Graph * graph , <nl> + FunctionLibraryDefinition * library ) { <nl> VLOG ( 2 ) < < " FunctionalizeControlFlow ( initial ) : " <nl> < < dump_graph : : DumpGraphToFile ( " functionalize_initial " , * graph , <nl> library ) ; <nl> Status FunctionalizeControlFlow ( Graph * graph , <nl> continue ; <nl> } <nl> <nl> - TF_RETURN_IF_ERROR ( FunctionalizeLoop ( graph , frame , library ) ) ; <nl> + TF_RETURN_IF_ERROR ( <nl> + FunctionalizeLoop ( lookup_library , graph , frame , library ) ) ; <nl> <nl> / / If the parent has no remaining children , add it to the worklist . <nl> - - frame - > parent - > num_children ; <nl> mmm a / tensorflow / compiler / tf2xla / functionalize_control_flow . h <nl> ppp b / tensorflow / compiler / tf2xla / functionalize_control_flow . h <nl> limitations under the License . <nl> namespace tensorflow { <nl> <nl> / / Transformation that converts tf . while_loop ( ) loops into functional While <nl> - / / operators , suitable for XLA compilation . <nl> + / / operators , suitable for XLA compilation . If lookup_library is provided , use <nl> + / / it to make the library for control flow self - contained . <nl> Status FunctionalizeControlFlow ( Graph * graph , <nl> FunctionLibraryDefinition * library ) ; <nl> + Status FunctionalizeControlFlow ( const FunctionLibraryDefinition * lookup_library , <nl> + Graph * graph , <nl> + FunctionLibraryDefinition * library ) ; <nl> <nl> } / / namespace tensorflow <nl> <nl> mmm a / tensorflow / compiler / tf2xla / functionalize_control_flow_test . cc <nl> ppp b / tensorflow / compiler / tf2xla / functionalize_control_flow_test . cc <nl> TEST ( FunctionalizeControlFlow , OneLoopVar ) { <nl> } <nl> } <nl> <nl> + / / @ function . Defun ( noinline = True ) <nl> + / / def increment_fn ( x ) : <nl> + / / return [ x + 1 ] <nl> + / / Define the above function , and add it to the given graph . It ' s used as the <nl> + / / while loop body in NoinlineLoopBody test . <nl> + Status AddNoinlineFunctionToGraph ( const string & node_name , Graph * graph ) { <nl> + FunctionDef fdef = FunctionDefHelper : : Create ( <nl> + " increment_fn " , { " x : int32 " } , { " add : int32 " } , { } , <nl> + { <nl> + { { " add / y " } , " Const " , { } , { { " dtype " , DT_INT32 } } } , <nl> + { { " add_0 " } , " Add " , { " x " , " add / y : output : 0 " } , { { " T " , DT_INT32 } } } , <nl> + } , <nl> + { { " add " , " add_0 : z : 0 " } } ) ; <nl> + ( * fdef . mutable_attr ( ) ) [ " _noinline " ] . set_b ( true ) ; <nl> + FunctionDefLibrary fdef_lib ; <nl> + * ( fdef_lib . add_function ( ) ) = fdef ; <nl> + TF_RETURN_IF_ERROR ( graph - > AddFunctionLibrary ( fdef_lib ) ) ; <nl> + NodeDef increment_fn ; <nl> + increment_fn . set_name ( node_name ) ; <nl> + increment_fn . set_op ( " increment_fn " ) ; <nl> + * increment_fn . add_input ( ) = " while / Identity " ; <nl> + * increment_fn . add_input ( ) = " ^ while / Identity " ; <nl> + Status status ; <nl> + graph - > AddNode ( increment_fn , & status ) ; <nl> + return status ; <nl> + } <nl> + <nl> + / / Graph : <nl> + / / x = array_ops . placeholder ( dtypes . int32 ) <nl> + / / y = control_flow_ops . while_loop ( lambda i : i < 10 , increment_fn , [ x ] ) <nl> + TEST ( FunctionalizeControlFlow , NoinlineLoopBody ) { <nl> + const string & noinline_node_name = " while / increment_fn " ; <nl> + Graph graph ( OpRegistry : : Global ( ) ) ; <nl> + { <nl> + Scope scope = Scope : : NewRootScope ( ) . ExitOnError ( ) ; <nl> + auto dummy = ops : : Placeholder ( scope . WithOpName ( " Dummy " ) , DT_INT32 ) ; <nl> + auto source = ops : : Placeholder ( scope . WithOpName ( " source " ) , DT_INT32 ) ; <nl> + auto enter = ops : : internal : : Enter ( scope . WithOpName ( " while / Enter " ) , source , <nl> + " while / while_context " ) ; <nl> + auto merge = ops : : Merge ( scope . WithOpName ( " while / Merge " ) , <nl> + std : : initializer_list < Input > { enter , dummy } ) ; <nl> + auto ten = ops : : Const < int32 > ( <nl> + scope . WithOpName ( " while / Less / y " ) . WithControlDependencies ( merge . output ) , <nl> + 10 ) ; <nl> + auto less = ops : : Less ( scope . WithOpName ( " while / Less " ) , merge . output , ten ) ; <nl> + auto loop_cond = ops : : LoopCond ( scope . WithOpName ( " while / LoopCond " ) , less ) ; <nl> + auto switch_ = <nl> + ops : : Switch ( scope . WithOpName ( " while / Switch " ) , merge . output , loop_cond ) ; <nl> + auto exit = ops : : internal : : Exit ( scope . WithOpName ( " while / Exit " ) , <nl> + switch_ . output_false ) ; <nl> + auto identity = <nl> + ops : : Identity ( scope . WithOpName ( " while / Identity " ) , switch_ . output_true ) ; <nl> + <nl> + TF_ASSERT_OK ( AddNoinlineFunctionToGraph ( noinline_node_name , scope . graph ( ) ) ) ; <nl> + <nl> + NodeDef next_iter ; <nl> + next_iter . set_name ( " while / NextIteration " ) ; <nl> + next_iter . set_op ( " NextIteration " ) ; <nl> + * next_iter . add_input ( ) = noinline_node_name ; <nl> + ( * next_iter . mutable_attr ( ) ) [ " T " ] . set_type ( DT_INT32 ) ; <nl> + <nl> + Status status ; <nl> + Node * n = scope . graph ( ) - > AddNode ( next_iter , & status ) ; <nl> + TF_ASSERT_OK ( status ) ; <nl> + <nl> + / / Remove the dummy node and add the loop backedge . <nl> + scope . graph ( ) - > RemoveNode ( dummy . node ( ) ) ; <nl> + scope . graph ( ) - > AddEdge ( n , 0 , merge . output . node ( ) , 1 ) ; <nl> + TF_ASSERT_OK ( scope . ToGraph ( & graph ) ) ; <nl> + } <nl> + <nl> + FunctionLibraryDefinition lookup_lib ( graph . flib_def ( ) ) ; <nl> + FunctionLibraryDefinition library ( OpRegistry : : Global ( ) , { } ) ; <nl> + / / Function increment_fn will be copied from lookup_lib to library . <nl> + TF_ASSERT_OK ( FunctionalizeControlFlow ( & lookup_lib , & graph , & library ) ) ; <nl> + <nl> + GraphDef graph_def ; <nl> + graph . ToGraphDef ( & graph_def ) ; <nl> + <nl> + NameAttrList cond_fn , body_fn ; <nl> + TF_ASSERT_OK ( FindWhileCondAndBody ( graph_def , & cond_fn , & body_fn ) ) ; <nl> + <nl> + / / Outer graph <nl> + { <nl> + Scope scope = Scope : : NewRootScope ( ) . ExitOnError ( ) ; <nl> + auto source = ops : : Placeholder ( scope . WithOpName ( " source " ) , DT_INT32 ) ; <nl> + auto while_op = <nl> + ops : : XlaWhile ( scope . WithOpName ( " while / LoopCond " ) , <nl> + std : : initializer_list < Input > { source } , cond_fn , body_fn ) ; <nl> + GraphDef expected ; <nl> + TF_ASSERT_OK ( scope . ToGraphDef ( & expected ) ) ; <nl> + TF_EXPECT_GRAPH_EQ ( expected , graph_def ) ; <nl> + } <nl> + <nl> + / / Body graph . <nl> + { <nl> + Scope scope = Scope : : NewRootScope ( ) . ExitOnError ( ) ; <nl> + auto arg = ops : : _Arg ( scope . WithOpName ( " _arg0 " ) , DT_INT32 , 0 ) ; <nl> + TF_ASSERT_OK ( AddNoinlineFunctionToGraph ( noinline_node_name , scope . graph ( ) ) ) ; <nl> + auto identity = ops : : Identity ( scope . WithOpName ( " while / Identity " ) , arg ) ; <nl> + NodeDef retval ; <nl> + retval . set_name ( " _retval0_RetVal " ) ; <nl> + retval . set_op ( FunctionLibraryDefinition : : kRetOp ) ; <nl> + * retval . add_input ( ) = noinline_node_name ; <nl> + ( * retval . mutable_attr ( ) ) [ " T " ] . set_type ( DT_INT32 ) ; <nl> + ( * retval . mutable_attr ( ) ) [ " index " ] . set_i ( 0 ) ; <nl> + Status status ; <nl> + scope . graph ( ) - > AddNode ( retval , & status ) ; <nl> + TF_ASSERT_OK ( status ) ; <nl> + <nl> + GraphDef expected ; <nl> + TF_ASSERT_OK ( scope . ToGraphDef ( & expected ) ) ; <nl> + <nl> + InstantiationResultForTest result ; <nl> + / / Verify that increment_fn has been copied to library . <nl> + TF_EXPECT_OK ( InstantiateFunctionForTest ( body_fn . name ( ) , library , & result ) ) ; <nl> + <nl> + EXPECT_EQ ( DataTypeVector { DT_INT32 } , result . arg_types ) ; <nl> + EXPECT_EQ ( DataTypeVector { DT_INT32 } , result . ret_types ) ; <nl> + / / Ignore the function library when comparing the graphs . <nl> + expected . clear_library ( ) ; <nl> + TF_EXPECT_GRAPH_EQ ( expected , result . gdef ) ; <nl> + } <nl> + } <nl> + <nl> / / Tests functionalizing OneLoopVar where the loop value is not used post the <nl> / / loop . <nl> / / Graph : <nl> mmm a / tensorflow / compiler / tf2xla / graph_compiler . cc <nl> ppp b / tensorflow / compiler / tf2xla / graph_compiler . cc <nl> Status GraphCompiler : : CompileFunctionalNode ( Node * n , <nl> TF_RETURN_IF_ERROR ( <nl> PrepareArguments ( & xla_op_context , graph . get ( ) , expressions , & arguments ) ) ; <nl> <nl> + XlaCompiler : : CompileOptions compile_options ; <nl> + compile_options . is_entry_computation = false ; <nl> XlaCompiler : : CompilationResult result ; <nl> - <nl> - TF_RETURN_IF_ERROR ( compiler - > CompileFunction ( XlaCompiler : : CompileOptions ( ) , <nl> - func , arguments , & result ) ) ; <nl> + TF_RETURN_IF_ERROR ( <nl> + compiler - > CompileFunction ( compile_options , func , arguments , & result ) ) ; <nl> <nl> TF_RET_CHECK ( arguments . size ( ) = = expressions . size ( ) ) ; <nl> <nl> Status GraphCompiler : : CompileFunctionalNode ( Node * n , <nl> auto output_handle = b - > Call ( * result . computation , handles ) ; <nl> / / The output handle of ` Call ` computation is a tuple type . Unzip it so <nl> / / that it can fit into future computations . <nl> + int computation_output = 0 ; <nl> for ( int64 i = 0 ; i < n - > num_outputs ( ) ; + + i ) { <nl> if ( result . outputs [ i ] . is_constant ) { <nl> xla_op_context . SetConstantOutput ( i , result . outputs [ i ] . constant_value ) ; <nl> } else { <nl> - xla_op_context . SetOutput ( i , b - > GetTupleElement ( output_handle , i ) ) ; <nl> + xla_op_context . SetOutput ( <nl> + i , b - > GetTupleElement ( output_handle , computation_output ) ) ; <nl> + + + computation_output ; <nl> } <nl> } <nl> return b - > first_error ( ) ; <nl> mmm a / tensorflow / compiler / tf2xla / kernels / retval_op . cc <nl> ppp b / tensorflow / compiler / tf2xla / kernels / retval_op . cc <nl> class RetvalOp : public XlaOpKernel { <nl> } <nl> <nl> XlaContext & tc = XlaContext : : Get ( ctx ) ; <nl> - if ( input_shape . num_elements ( ) = = 0 | | is_constant . ValueOrDie ( ) ) { <nl> + if ( tc . resolve_compile_time_constants ( ) & & <nl> + ( input_shape . num_elements ( ) = = 0 | | is_constant . ValueOrDie ( ) ) ) { <nl> xla : : Literal literal ; <nl> OP_REQUIRES_OK ( ctx , ctx - > ConstantInput ( 0 , & literal ) ) ; <nl> OP_REQUIRES_OK ( ctx , tc . AddConstRetval ( index_ , dtype_ , literal ) ) ; <nl> } else { <nl> - / / The core from which a return value is returned depends on the core <nl> - / / assignment of the input to the retval . Since we can ' t change the core <nl> - / / assignment of < input > as this point , create a tuple / get - tuple - element <nl> - / / combination so that the core will be set on them . <nl> - auto tuple_elem = <nl> - ctx - > builder ( ) - > GetTupleElement ( ctx - > builder ( ) - > Tuple ( { input } ) , 0 ) ; <nl> - tc . AddRetval ( index_ , dtype_ , tuple_elem ) ; <nl> + TensorShape shape = ctx - > InputShape ( 0 ) ; <nl> + TensorShape representation_shape = <nl> + tc . is_entry_computation ( ) <nl> + ? tc . RepresentationShape ( shape , ctx - > input_type ( 0 ) ) <nl> + : shape ; <nl> + <nl> + xla : : XlaOp output = input ; <nl> + if ( tc . is_entry_computation ( ) ) { <nl> + output = <nl> + ctx - > builder ( ) - > Reshape ( input , representation_shape . dim_sizes ( ) ) ; <nl> + } else { <nl> + / / The core from which a return value is returned depends on the <nl> + / / device assignment of the input to the retval . Since we can ' t change <nl> + / / the device assignment of " input " at this point , we must always <nl> + / / introduce an operator here , even if the shape does not change . <nl> + / / TODO ( b / 76097077 ) : propagate device assignments onto arguments and <nl> + / / return values of functions , and then reshape unconditionally . <nl> + output = ctx - > builder ( ) - > GetTupleElement ( <nl> + ctx - > builder ( ) - > Tuple ( { output } ) , 0 ) ; <nl> + } <nl> + tc . AddRetval ( index_ , dtype_ , shape , output ) ; <nl> } <nl> } <nl> } <nl> mmm a / tensorflow / compiler / tf2xla / kernels / unary_ops . cc <nl> ppp b / tensorflow / compiler / tf2xla / kernels / unary_ops . cc <nl> XLAJIT_MAKE_UNARY ( Sinh , <nl> b - > Mul ( b - > Sub ( b - > Exp ( x ) , b - > Exp ( b - > Neg ( x ) ) ) , <nl> XlaHelpers : : FloatLiteral ( b , input_type ( 0 ) , 0 . 5 ) ) ) ; <nl> <nl> - static xla : : XlaOp Softplus ( xla : : XlaBuilder * b , DataType dtype , <nl> - const xla : : XlaOp & features ) { <nl> - xla : : XlaOp threshold = b - > Add ( b - > Log ( XlaHelpers : : Epsilon ( b , dtype ) ) , <nl> - XlaHelpers : : FloatLiteral ( b , dtype , 2 . 0 ) ) ; <nl> - / / Value above which exp ( x ) may overflow , but softplus ( x ) = = x <nl> - / / is within machine epsilon . <nl> - xla : : XlaOp too_large = b - > Gt ( features , b - > Neg ( threshold ) ) ; <nl> - / / Value below which exp ( x ) may underflow , but softplus ( x ) = = exp ( x ) <nl> - / / is within machine epsilon . <nl> - xla : : XlaOp too_small = b - > Lt ( features , threshold ) ; <nl> - xla : : XlaOp features_exp = b - > Exp ( features ) ; <nl> - xla : : XlaOp output = b - > Select ( <nl> - too_large , features , <nl> - b - > Select ( too_small , features_exp , <nl> - b - > Log ( b - > Add ( features_exp , XlaHelpers : : One ( b , dtype ) ) ) ) ) ; <nl> - return output ; <nl> - } <nl> - XLAJIT_MAKE_UNARY ( Softplus , Softplus ( b , input_type ( 0 ) , x ) ) ; <nl> + / / softplus ( x ) = log ( 1 + exp ( x ) ) <nl> + / / <nl> + / / This is not numerically stable when x is large , it can easily overflow . <nl> + / / However , we can compute it as LogSumExp ( x , 0 ) : <nl> + / / max ( x , 0 ) + log ( exp ( x - max ( x , 0 ) ) + exp ( 0 - max ( x , 0 ) ) ) <nl> + / / <nl> + / / This is equivalent to : <nl> + / / max ( x , 0 ) + log1p ( exp ( - abs ( x ) ) ) <nl> + XLAJIT_MAKE_UNARY ( Softplus , <nl> + b - > Add ( b - > Max ( x , XlaHelpers : : Zero ( b , input_type ( 0 ) ) ) , <nl> + b - > Log1p ( b - > Exp ( b - > Neg ( b - > Abs ( x ) ) ) ) ) ) ; <nl> <nl> / / softsign ( x ) = x / ( abs ( x ) + 1 ) <nl> XLAJIT_MAKE_UNARY ( Softsign , <nl> mmm a / tensorflow / compiler / tf2xla / xla_compiler . cc <nl> ppp b / tensorflow / compiler / tf2xla / xla_compiler . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / compiler / tf2xla / xla_compiler . h " <nl> <nl> - # include < deque > <nl> # include < numeric > <nl> + # include < vector > <nl> <nl> - # include " tensorflow / compiler / tf2xla / const_analysis . h " <nl> # include " tensorflow / compiler / tf2xla / dump_graph . h " <nl> # include " tensorflow / compiler / tf2xla / functionalize_control_flow . h " <nl> # include " tensorflow / compiler / tf2xla / graph_compiler . h " <nl> limitations under the License . <nl> # include " tensorflow / compiler / tf2xla / type_util . h " <nl> # include " tensorflow / compiler / tf2xla / xla_compilation_device . h " <nl> # include " tensorflow / compiler / tf2xla / xla_context . h " <nl> - # include " tensorflow / compiler / tf2xla / xla_op_kernel . h " <nl> # include " tensorflow / compiler / xla / client / client_library . h " <nl> # include " tensorflow / core / common_runtime / device . h " <nl> # include " tensorflow / core / common_runtime / executor . h " <nl> limitations under the License . <nl> # include " tensorflow / core / graph / node_builder . h " <nl> # include " tensorflow / core / lib / hash / hash . h " <nl> # include " tensorflow / core / platform / logging . h " <nl> - # include " tensorflow / core / public / version . h " <nl> <nl> namespace tensorflow { <nl> namespace { <nl> XlaCompiler : : XlaCompiler ( XlaCompiler : : Options options ) <nl> local_flib_runtime_ = local_pflr_ - > GetFLR ( device_ - > name ( ) ) ; <nl> flib_runtime_ = pflr_ - > GetFLR ( device_ - > name ( ) ) ; <nl> <nl> - / / The default variable representation shape is the identity function . <nl> - if ( ! options_ . variable_representation_shape_fn ) { <nl> - options_ . variable_representation_shape_fn = <nl> - [ ] ( const TensorShape & shape , DataType type ) { return shape ; } ; <nl> + / / The default shape representation function is the identity . <nl> + if ( ! options_ . shape_representation_fn ) { <nl> + options_ . shape_representation_fn = [ ] ( const TensorShape & shape , <nl> + DataType type ) { return shape ; } ; <nl> } <nl> } <nl> <nl> Status XlaCompiler : : CompileFunction ( const XlaCompiler : : CompileOptions & options , <nl> <nl> / / Computes the XLA shape for argument ' arg ' . <nl> Status XlaCompiler : : XLAShapeForArgument ( const XlaCompiler : : Argument & arg , <nl> + bool is_entry_computation , <nl> xla : : Shape * xla_shape ) { <nl> switch ( arg . kind ) { <nl> case XlaCompiler : : Argument : : kConstant : <nl> - return TensorShapeToXLAShape ( arg . type , arg . constant_value . shape ( ) , <nl> - xla_shape ) ; <nl> - case XlaCompiler : : Argument : : kParameter : <nl> - return TensorShapeToXLAShape ( arg . type , arg . shape , xla_shape ) ; <nl> + LOG ( FATAL ) < < " Unreachable case " ; <nl> + case XlaCompiler : : Argument : : kParameter : { <nl> + TensorShape shape = <nl> + is_entry_computation <nl> + ? options_ . shape_representation_fn ( arg . shape , arg . type ) <nl> + : arg . shape ; <nl> + return TensorShapeToXLAShape ( arg . type , shape , xla_shape ) ; <nl> + } <nl> case XlaCompiler : : Argument : : kResource : { <nl> TF_RET_CHECK ( arg . initialized ) ; <nl> <nl> switch ( arg . resource_kind ) { <nl> case XlaResource : : kVariable : { <nl> TensorShape representation_shape = <nl> - options_ . variable_representation_shape_fn ( arg . shape , arg . type ) ; <nl> + options_ . shape_representation_fn ( arg . shape , arg . type ) ; <nl> return TensorShapeToXLAShape ( arg . type , representation_shape , <nl> xla_shape ) ; <nl> } <nl> Status ExecuteGraph ( XlaContext * xla_context , std : : unique_ptr < Graph > graph , <nl> Status BuildComputation ( <nl> const std : : vector < XlaCompiler : : Argument > & args , <nl> const std : : vector < int > & arg_cores , <nl> - const std : : vector < XlaExpression > & retvals , <nl> + const std : : vector < XlaContext : : Retval > & retvals , <nl> const std : : vector < std : : unique_ptr < XlaResource > > & resources , <nl> bool return_updated_values_for_all_resources , xla : : XlaBuilder * builder , <nl> xla : : XlaComputation * computation , int * num_computation_outputs , <nl> int * num_nonconst_outputs , <nl> + std : : vector < XlaCompiler : : OutputDescription > * outputs , <nl> std : : vector < XlaCompiler : : ResourceUpdate > * resource_updates ) { <nl> std : : vector < xla : : XlaOp > elems ; <nl> elems . reserve ( retvals . size ( ) ) ; <nl> - for ( const XlaExpression & retval : retvals ) { <nl> - if ( ! retval . has_constant_value ( ) ) { <nl> + for ( int i = 0 ; i < retvals . size ( ) ; + + i ) { <nl> + XlaCompiler : : OutputDescription & output = ( * outputs ) [ i ] ; <nl> + output . type = retvals [ i ] . type ; <nl> + output . shape = retvals [ i ] . shape ; <nl> + const XlaExpression & retval = retvals [ i ] . expression ; <nl> + if ( retval . has_constant_value ( ) ) { <nl> + output . is_constant = true ; <nl> + output . constant_value = retval . constant_value ( ) ; <nl> + } else { <nl> + output . is_constant = false ; <nl> elems . push_back ( retval . handle ( ) ) ; <nl> } <nl> } <nl> Status XlaCompiler : : BuildArguments ( <nl> std : : vector < xla : : Shape > arg_shapes ( input_mapping - > size ( ) ) ; <nl> for ( std : : vector < int > : : size_type i = 0 ; i < input_mapping - > size ( ) ; + + i ) { <nl> / / Computes the shapes of non - constant arguments . <nl> - TF_RETURN_IF_ERROR ( <nl> - XLAShapeForArgument ( args [ ( * input_mapping ) [ i ] ] , & arg_shapes [ i ] ) ) ; <nl> + TF_RETURN_IF_ERROR ( XLAShapeForArgument ( <nl> + args [ ( * input_mapping ) [ i ] ] , is_entry_computation , & arg_shapes [ i ] ) ) ; <nl> } <nl> <nl> if ( use_tuple_arg ) { <nl> Status XlaCompiler : : BuildArguments ( <nl> <nl> builder - > ClearOpMetadata ( ) ; <nl> <nl> - / / Fill in the handles in non - constant arguments . <nl> + / / Fill in the handles in non - constant arguments , and reshape parameters <nl> + / / back to their correct shapes . <nl> VLOG ( 2 ) < < " XLA computation inputs : " ; <nl> for ( std : : vector < int > : : size_type i = 0 ; i < input_mapping - > size ( ) ; + + i ) { <nl> const XlaCompiler : : Argument & arg = args [ input_mapping - > at ( i ) ] ; <nl> Status XlaCompiler : : BuildArguments ( <nl> break ; <nl> } <nl> case XlaCompiler : : Argument : : kParameter : <nl> - arg_expression . set_handle ( arg_handles [ i ] ) ; <nl> + / / Reshape parameters back to their correct shapes . <nl> + / / TODO ( b / 76097077 ) : propagate device assignments onto arguments and <nl> + / / return values of functions , and then reshape unconditionally . <nl> + if ( is_entry_computation ) { <nl> + arg_expression . set_handle ( <nl> + builder - > Reshape ( arg_handles [ i ] , arg . shape . dim_sizes ( ) ) ) ; <nl> + } else { <nl> + arg_expression . set_handle ( arg_handles [ i ] ) ; <nl> + } <nl> break ; <nl> case XlaCompiler : : Argument : : kConstant : <nl> case XlaCompiler : : Argument : : kInvalid : <nl> Status XlaCompiler : : CompileGraph ( const XlaCompiler : : CompileOptions & options , <nl> / / Converts Tensorflow ' s graph control - flow constructs into functional <nl> / / control - flow that can be compiled into XLA code . <nl> TF_RETURN_IF_ERROR ( <nl> - FunctionalizeControlFlow ( graph . get ( ) , local_flib_def_ . get ( ) ) ) ; <nl> + FunctionalizeControlFlow ( flib_runtime_ - > GetFunctionLibraryDefinition ( ) , <nl> + graph . get ( ) , local_flib_def_ . get ( ) ) ) ; <nl> <nl> xla : : XlaBuilder builder ( name ) ; <nl> - XlaContext * context = <nl> - new XlaContext ( this , & builder , options_ . allow_cpu_custom_calls , <nl> - options . resolve_compile_time_constants , <nl> - & options_ . variable_representation_shape_fn ) ; <nl> + XlaContext * context = new XlaContext ( <nl> + this , & builder , options_ . allow_cpu_custom_calls , <nl> + options . resolve_compile_time_constants , options . is_entry_computation , <nl> + & options_ . shape_representation_fn ) ; <nl> core : : ScopedUnref context_unref ( context ) ; <nl> <nl> std : : vector < XlaExpression > arg_expressions ; <nl> Status XlaCompiler : : CompileGraph ( const XlaCompiler : : CompileOptions & options , <nl> int num_nonconst_outputs ; <nl> int num_computation_outputs ; <nl> result - > computation = std : : make_shared < xla : : XlaComputation > ( ) ; <nl> + result - > outputs . resize ( context - > retvals ( ) . size ( ) ) ; <nl> TF_RETURN_IF_ERROR ( BuildComputation ( <nl> args , arg_cores , context - > retvals ( ) , context - > resources ( ) , <nl> options . return_updated_values_for_all_resources , & builder , <nl> result - > computation . get ( ) , & num_computation_outputs , <nl> - & num_nonconst_outputs , & result - > resource_updates ) ) ; <nl> + & num_nonconst_outputs , & result - > outputs , & result - > resource_updates ) ) ; <nl> <nl> VLOG ( 2 ) < < " Outputs : total : " < < context - > retvals ( ) . size ( ) <nl> < < " nonconstant : " < < num_nonconst_outputs ; <nl> - result - > outputs . resize ( context - > retvals ( ) . size ( ) ) ; <nl> - for ( std : : vector < XlaExpression > : : size_type i = 0 ; <nl> - i < context - > retvals ( ) . size ( ) ; + + i ) { <nl> - const XlaExpression & retval = context - > retvals ( ) [ i ] ; <nl> - if ( retval . has_constant_value ( ) ) { <nl> - OutputDescription & output = result - > outputs [ i ] ; <nl> - output . shape = retval . constant_value ( ) . shape ( ) ; <nl> - output . is_constant = true ; <nl> - output . constant_value = retval . constant_value ( ) ; <nl> - } <nl> - } <nl> <nl> - / / Compute the output shapes , if there is a computation with non - constant <nl> + / / Compute the XLA output shape , if there is a computation with non - constant <nl> / / outputs . <nl> - auto computation_shape = client ( ) - > GetComputationShape ( * result - > computation ) ; <nl> - if ( ! computation_shape . ok ( ) ) { <nl> - return computation_shape . status ( ) ; <nl> - } <nl> + TF_ASSIGN_OR_RETURN ( std : : unique_ptr < xla : : ProgramShape > computation_shape , <nl> + client ( ) - > GetComputationShape ( * result - > computation ) ) ; <nl> <nl> - result - > xla_output_shape . Swap ( <nl> - computation_shape . ValueOrDie ( ) - > mutable_result ( ) ) ; <nl> + result - > xla_output_shape . Swap ( computation_shape - > mutable_result ( ) ) ; <nl> VLOG ( 2 ) < < " XLA output shape : " <nl> < < xla : : ShapeUtil : : HumanString ( result - > xla_output_shape ) ; <nl> <nl> Status XlaCompiler : : CompileGraph ( const XlaCompiler : : CompileOptions & options , <nl> / / Tensorflow expects a major - to - minor order of results . <nl> xla : : LayoutUtil : : SetToDefaultLayout ( & result - > xla_output_shape ) ; <nl> <nl> - / / Converts the output shapes to TensorShapes . <nl> - int computation_output = 0 ; <nl> - for ( std : : vector < XlaExpression > : : size_type i = 0 ; <nl> - i < context - > retvals ( ) . size ( ) ; + + i ) { <nl> - const XlaExpression & retval = context - > retvals ( ) [ i ] ; <nl> - if ( ! retval . has_constant_value ( ) ) { <nl> - TF_RET_CHECK ( computation_output < num_computation_outputs ) <nl> - < < " Computation has more outputs than expected " ; <nl> - OutputDescription & output = result - > outputs [ i ] ; <nl> - output . is_constant = false ; <nl> - TF_RETURN_IF_ERROR ( XLAShapeToTensorShape ( <nl> - xla : : ShapeUtil : : GetTupleElementShape ( result - > xla_output_shape , <nl> - computation_output ) , <nl> - & output . shape ) ) ; <nl> - + + computation_output ; <nl> - } <nl> - } <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> mmm a / tensorflow / compiler / tf2xla / xla_compiler . h <nl> ppp b / tensorflow / compiler / tf2xla / xla_compiler . h <nl> class XlaContext ; <nl> / / _Retval values are ordered by _Retval index , whereas kResource values are <nl> / / ordered by the original _Arg position of the variable . <nl> / / <nl> + / / If a shape representation function is provided as part of <nl> + / / XlaCompiler : : CompileOptions , kParameter arguments and return values to an <nl> + / / entry computation will be reshaped in accordance to the shape function . <nl> + / / Arguments and return values to a non - entry computation are not reshaped . <nl> + / / Variable resource arguments are passed and returned in reshaped form , even <nl> + / / for non - entry computations . This feature allows TensorFlow to keep on - device <nl> + / / tensors with a different shape to their representation inside the XLA <nl> + / / computation . <nl> + / / <nl> / / In both inputs and outputs , kResource values are placed the end . When <nl> / / emitting While loop bodies , we must ensure that the loop body has <nl> / / identical input and output signatures . By moving variable values <nl> class XlaCompiler { <nl> } ; <nl> <nl> struct OutputDescription { <nl> - / / Type and shape of the output . <nl> + / / Type and shape of the output . The shape is the unflattened shape . <nl> DataType type ; <nl> TensorShape shape ; <nl> <nl> class XlaCompiler { <nl> / / original arguments , and are not necessarily in the same order . ) <nl> std : : vector < int > input_mapping ; <nl> <nl> - / / Input shapes of the computation . <nl> + / / Input shapes of the computation . If we are flattening inputs , these are <nl> + / / the flattened shapes . <nl> std : : vector < xla : : Shape > xla_input_shapes ; <nl> <nl> - / / Output shape in XLA format . The output shape is always a tuple . <nl> + / / Output shape in XLA format . The output shape is always a tuple . If we <nl> + / / are flattening outputs , these are the flattened shapes . <nl> xla : : Shape xla_output_shape ; <nl> <nl> / / TensorFlow shapes of outputs , together with the values of any <nl> class XlaCompiler { <nl> std : : shared_ptr < xla : : XlaComputation > computation ; <nl> } ; <nl> <nl> + typedef std : : function < TensorShape ( const TensorShape & , DataType ) > <nl> + ShapeRepresentationFn ; <nl> struct Options { <nl> / / Name of the compilation device to use . Needs to be live only during <nl> / / XlaCompiler ' s constructor . <nl> class XlaCompiler { <nl> / / If set , the XLA representation of variables represented to XLA as the <nl> / / shape given by this shape function . Variables are reshaped to this shape <nl> / / on write , and reshaped to their original shape on read . <nl> - std : : function < TensorShape ( const TensorShape & , DataType ) > <nl> - variable_representation_shape_fn ; <nl> + ShapeRepresentationFn shape_representation_fn ; <nl> <nl> / / If not nullptr , populate_resource_manager is called with the <nl> / / compilation device ' s resource manager when the compilation <nl> class XlaCompiler { <nl> / / Returns the shape of the XLA parameter for an argument ' arg ' . <nl> / / See the class comment for more details about the argument passing <nl> / / convention . <nl> - Status XLAShapeForArgument ( const Argument & arg , xla : : Shape * xla_shape ) ; <nl> + Status XLAShapeForArgument ( const Argument & arg , bool is_entry_computation , <nl> + xla : : Shape * xla_shape ) ; <nl> <nl> / / Retrieves the channel handle associated with ` key ` . Allocates <nl> / / a new channel handle if none exists . <nl> mmm a / tensorflow / compiler / tf2xla / xla_compiler_test . cc <nl> ppp b / tensorflow / compiler / tf2xla / xla_compiler_test . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / client / local_client . h " <nl> # include " tensorflow / compiler / xla / literal_util . h " <nl> # include " tensorflow / compiler / xla / shape_util . h " <nl> + # include " tensorflow / compiler / xla / status_macros . h " <nl> # include " tensorflow / compiler / xla / tests / literal_test_util . h " <nl> # include " tensorflow / core / common_runtime / function . h " <nl> # include " tensorflow / core / framework / common_shape_fns . h " <nl> # include " tensorflow / core / framework / function . h " <nl> # include " tensorflow / core / framework / function_testlib . h " <nl> # include " tensorflow / core / framework / resource_mgr . h " <nl> + # include " tensorflow / core / framework / tensor . h " <nl> # include " tensorflow / core / framework / tensor_testutil . h " <nl> # include " tensorflow / core / graph / graph . h " <nl> # include " tensorflow / core / graph / graph_constructor . h " <nl> TEST_F ( XlaCompilerTest , ConstantOutputs ) { <nl> } <nl> } <nl> <nl> + TEST_F ( XlaCompilerTest , ConstantOutputsOfFunctionalNode ) { <nl> + / / Define a function with one compile - time constant output and one <nl> + / / data - dependent output . <nl> + / / @ function . Defun ( noinline = True ) <nl> + / / foo ( a ) { b = 7 ; return b , a ; } <nl> + const Tensor seven = test : : AsScalar < int > ( 7 ) ; <nl> + FunctionDef fdef = FunctionDefHelper : : Create ( <nl> + " foo " , { " a_0 : int32 " } , { " const : int32 " , " a : int32 " } , { } , <nl> + { <nl> + { { " Const " } , " Const " , { } , { { " dtype " , DT_INT32 } , { " value " , seven } } } , <nl> + } , <nl> + { { " a " , " a_0 " } , { " const " , " Const : output : 0 " } } ) ; <nl> + ( * fdef . mutable_attr ( ) ) [ " _noinline " ] . set_b ( true ) ; <nl> + FunctionDefLibrary fdef_lib ; <nl> + * ( fdef_lib . add_function ( ) ) = fdef ; <nl> + std : : unique_ptr < Graph > graph ( new Graph ( OpRegistry : : Global ( ) ) ) ; <nl> + { <nl> + Scope scope = Scope : : NewRootScope ( ) . ExitOnError ( ) ; <nl> + TF_EXPECT_OK ( scope . graph ( ) - > AddFunctionLibrary ( fdef_lib ) ) ; <nl> + auto arg = ops : : _Arg ( scope . WithOpName ( " input_arg " ) , DT_INT32 , 0 ) ; <nl> + NodeDef foo ; <nl> + foo . set_name ( " foo " ) ; <nl> + foo . set_op ( " foo " ) ; <nl> + * foo . add_input ( ) = " input_arg " ; <nl> + Status status ; <nl> + scope . graph ( ) - > AddNode ( foo , & status ) ; <nl> + TF_ASSERT_OK ( status ) ; <nl> + NodeDef retval_1 ; <nl> + retval_1 . set_name ( " retval_0 " ) ; <nl> + retval_1 . set_op ( FunctionLibraryDefinition : : kRetOp ) ; <nl> + * retval_1 . add_input ( ) = " foo " ; <nl> + ( * retval_1 . mutable_attr ( ) ) [ " T " ] . set_type ( DT_INT32 ) ; <nl> + ( * retval_1 . mutable_attr ( ) ) [ " index " ] . set_i ( 0 ) ; <nl> + scope . graph ( ) - > AddNode ( retval_1 , & status ) ; <nl> + TF_ASSERT_OK ( status ) ; <nl> + NodeDef retval_2 ; <nl> + retval_2 . set_name ( " retval_1 " ) ; <nl> + retval_2 . set_op ( FunctionLibraryDefinition : : kRetOp ) ; <nl> + * retval_2 . add_input ( ) = " foo : 1 " ; <nl> + ( * retval_2 . mutable_attr ( ) ) [ " T " ] . set_type ( DT_INT32 ) ; <nl> + ( * retval_2 . mutable_attr ( ) ) [ " index " ] . set_i ( 1 ) ; <nl> + scope . graph ( ) - > AddNode ( retval_2 , & status ) ; <nl> + TF_ASSERT_OK ( status ) ; <nl> + TF_ASSERT_OK ( scope . ToGraph ( graph . get ( ) ) ) ; <nl> + } <nl> + <nl> + / / Builds a description of the arguments . <nl> + std : : vector < XlaCompiler : : Argument > args ( 1 ) ; <nl> + args [ 0 ] . kind = XlaCompiler : : Argument : : kParameter ; <nl> + args [ 0 ] . type = DT_INT32 ; <nl> + args [ 0 ] . shape = TensorShape ( { 1 } ) ; <nl> + <nl> + XlaCompiler : : Options options = DefaultOptions ( ) ; <nl> + FunctionLibraryDefinition flib_def ( OpRegistry : : Global ( ) , fdef_lib ) ; <nl> + options . flib_def = & flib_def ; <nl> + XlaCompiler compiler ( options ) ; <nl> + <nl> + XlaCompiler : : CompileOptions compile_options ; <nl> + compile_options . resolve_compile_time_constants = true ; <nl> + XlaCompiler : : CompilationResult result ; <nl> + TF_ASSERT_OK ( compiler . CompileGraph ( compile_options , " constants " , <nl> + std : : move ( graph ) , args , & result ) ) ; <nl> + <nl> + ASSERT_EQ ( 2 , result . outputs . size ( ) ) ; <nl> + EXPECT_TRUE ( result . outputs [ 0 ] . is_constant ) ; <nl> + test : : ExpectTensorEqual < int32 > ( result . outputs [ 0 ] . constant_value , <nl> + test : : AsScalar ( 7 ) ) ; <nl> + EXPECT_FALSE ( result . outputs [ 1 ] . is_constant ) ; <nl> + } <nl> + <nl> / / Tests compilation and execution of a graph that adds two tensors . <nl> TEST_F ( XlaCompilerTest , ResourceManager ) { <nl> / / Builds a graph that calls the dummy resource Op . <nl> TEST_F ( XlaCompilerTest , Variables ) { <nl> EXPECT_TRUE ( xla : : LiteralTestUtil : : Equal ( * expected_literal , * actual_literal ) ) ; <nl> } <nl> <nl> - / / Tests a simple graph that reads and writes a variable , with a <nl> - / / variable_representation_shape_fn passed to the compiler that flattens all <nl> - / / variable tensors to vectors . <nl> - TEST_F ( XlaCompilerTest , VariableRepresentationShapeFunction ) { <nl> + xla : : StatusOr < std : : unique_ptr < Graph > > BuildTestGraph ( ) { <nl> Scope scope = Scope : : NewRootScope ( ) . ExitOnError ( ) ; <nl> auto a = ops : : _Arg ( scope . WithOpName ( " A " ) , DT_INT32 , 0 ) ; <nl> auto var = ops : : _Arg ( scope . WithOpName ( " V " ) , DT_RESOURCE , 1 ) ; <nl> TEST_F ( XlaCompilerTest , VariableRepresentationShapeFunction ) { <nl> auto read_plus_one = ops : : Add ( scope , read , ops : : Const < int32 > ( scope , 1 ) ) ; <nl> auto d = ops : : _Retval ( scope . WithOpName ( " D " ) , read_plus_one , 0 ) ; <nl> std : : unique_ptr < Graph > graph ( new Graph ( OpRegistry : : Global ( ) ) ) ; <nl> - TF_ASSERT_OK ( scope . ToGraph ( graph . get ( ) ) ) ; <nl> + TF_RETURN_IF_ERROR ( scope . ToGraph ( graph . get ( ) ) ) ; <nl> + return std : : move ( graph ) ; <nl> + } <nl> + <nl> + / / Tests a simple graph that reads and writes a variable , with a <nl> + / / shape_representation_fn passed to the compiler that flattens all <nl> + / / variable tensors to vectors . <nl> + TEST_F ( XlaCompilerTest , VariableRepresentationShapeFunction ) { <nl> + TF_ASSERT_OK_AND_ASSIGN ( std : : unique_ptr < Graph > graph , BuildTestGraph ( ) ) ; <nl> <nl> / / Builds a description of the arguments . <nl> std : : vector < XlaCompiler : : Argument > args ( 2 ) ; <nl> TEST_F ( XlaCompilerTest , VariableRepresentationShapeFunction ) { <nl> <nl> / / Compiles the graph . <nl> XlaCompiler : : Options options = DefaultOptions ( ) ; <nl> - options . variable_representation_shape_fn = [ ] ( const TensorShape & shape , <nl> - DataType type ) { <nl> + options . shape_representation_fn = [ ] ( const TensorShape & shape , <nl> + DataType type ) { <nl> return TensorShape ( { shape . num_elements ( ) } ) ; <nl> } ; <nl> XlaCompiler compiler ( options ) ; <nl> <nl> + XlaCompiler : : CompileOptions compile_options ; <nl> + compile_options . is_entry_computation = false ; / / Only reshape variables . <nl> + <nl> XlaCompiler : : CompilationResult result ; <nl> - TF_ASSERT_OK ( compiler . CompileGraph ( XlaCompiler : : CompileOptions ( ) , " add " , <nl> - std : : move ( graph ) , args , & result ) ) ; <nl> + TF_ASSERT_OK ( compiler . CompileGraph ( compile_options , " add " , std : : move ( graph ) , <nl> + args , & result ) ) ; <nl> + <nl> + TF_ASSERT_OK_AND_ASSIGN ( std : : unique_ptr < xla : : ProgramShape > program_shape , <nl> + client_ - > GetComputationShape ( * result . computation ) ) ; <nl> + <nl> + ASSERT_EQ ( program_shape - > parameters_size ( ) , 2 ) ; <nl> + EXPECT_TRUE ( <nl> + xla : : ShapeUtil : : Compatible ( program_shape - > parameters ( 0 ) , <nl> + xla : : ShapeUtil : : MakeShape ( xla : : S32 , { 2 , 2 } ) ) ) ; <nl> + EXPECT_TRUE ( xla : : ShapeUtil : : Compatible ( <nl> + program_shape - > parameters ( 1 ) , xla : : ShapeUtil : : MakeShape ( xla : : S32 , { 4 } ) ) ) ; <nl> + EXPECT_TRUE ( xla : : ShapeUtil : : Compatible ( <nl> + program_shape - > result ( ) , <nl> + xla : : ShapeUtil : : MakeTupleShape ( <nl> + { xla : : ShapeUtil : : MakeShape ( xla : : S32 , { 2 , 2 } ) , <nl> + xla : : ShapeUtil : : MakeShape ( xla : : S32 , { 4 } ) } ) ) ) ; <nl> <nl> / / Tests that the generated computation works . <nl> std : : unique_ptr < xla : : Literal > param0_literal = <nl> TEST_F ( XlaCompilerTest , VariableRepresentationShapeFunction ) { <nl> EXPECT_TRUE ( xla : : LiteralTestUtil : : Equal ( * expected_literal , * actual_literal ) ) ; <nl> } <nl> <nl> + TEST_F ( XlaCompilerTest , ArgRetvalShapeRepresentationFunction ) { <nl> + TF_ASSERT_OK_AND_ASSIGN ( std : : unique_ptr < Graph > graph , BuildTestGraph ( ) ) ; <nl> + <nl> + / / Builds a description of the arguments . <nl> + std : : vector < XlaCompiler : : Argument > args ( 2 ) ; <nl> + args [ 0 ] . kind = XlaCompiler : : Argument : : kParameter ; <nl> + args [ 0 ] . type = DT_INT32 ; <nl> + args [ 0 ] . shape = TensorShape ( { 2 , 2 } ) ; <nl> + args [ 1 ] . kind = XlaCompiler : : Argument : : kResource ; <nl> + args [ 1 ] . resource_kind = XlaResource : : kVariable ; <nl> + args [ 1 ] . initialized = true ; <nl> + args [ 1 ] . type = DT_INT32 ; <nl> + args [ 1 ] . shape = TensorShape ( { 2 , 2 } ) ; <nl> + <nl> + / / Compiles the graph . <nl> + XlaCompiler : : Options options = DefaultOptions ( ) ; <nl> + options . shape_representation_fn = [ ] ( const TensorShape & shape , <nl> + DataType type ) { <nl> + return TensorShape ( { shape . num_elements ( ) } ) ; <nl> + } ; <nl> + XlaCompiler compiler ( options ) ; <nl> + <nl> + XlaCompiler : : CompileOptions compile_options ; <nl> + compile_options . is_entry_computation = true ; / / Reshape args and retvals . <nl> + <nl> + XlaCompiler : : CompilationResult result ; <nl> + TF_ASSERT_OK ( compiler . CompileGraph ( compile_options , " add " , std : : move ( graph ) , <nl> + args , & result ) ) ; <nl> + <nl> + TF_ASSERT_OK_AND_ASSIGN ( std : : unique_ptr < xla : : ProgramShape > program_shape , <nl> + client_ - > GetComputationShape ( * result . computation ) ) ; <nl> + <nl> + ASSERT_EQ ( program_shape - > parameters_size ( ) , 2 ) ; <nl> + EXPECT_TRUE ( xla : : ShapeUtil : : Compatible ( <nl> + program_shape - > parameters ( 0 ) , xla : : ShapeUtil : : MakeShape ( xla : : S32 , { 4 } ) ) ) ; <nl> + EXPECT_TRUE ( xla : : ShapeUtil : : Compatible ( <nl> + program_shape - > parameters ( 1 ) , xla : : ShapeUtil : : MakeShape ( xla : : S32 , { 4 } ) ) ) ; <nl> + EXPECT_TRUE ( xla : : ShapeUtil : : Compatible ( <nl> + program_shape - > result ( ) , <nl> + xla : : ShapeUtil : : MakeTupleShape ( <nl> + { xla : : ShapeUtil : : MakeShape ( xla : : S32 , { 4 } ) , <nl> + xla : : ShapeUtil : : MakeShape ( xla : : S32 , { 4 } ) } ) ) ) ; <nl> + <nl> + / / Tests that the generated computation works . <nl> + std : : unique_ptr < xla : : Literal > param0_literal = <nl> + xla : : Literal : : CreateR1 < int32 > ( { 4 , 55 , 1 , - 3 } ) ; <nl> + std : : unique_ptr < xla : : Literal > param1_literal = <nl> + xla : : Literal : : CreateR1 < int32 > ( { 22 , 11 , 33 , 404 } ) ; <nl> + std : : unique_ptr < xla : : GlobalData > param0_data = <nl> + client_ - > TransferToServer ( * param0_literal ) . ConsumeValueOrDie ( ) ; <nl> + std : : unique_ptr < xla : : GlobalData > param1_data = <nl> + client_ - > TransferToServer ( * param1_literal ) . ConsumeValueOrDie ( ) ; <nl> + <nl> + std : : unique_ptr < xla : : GlobalData > actual = <nl> + client_ <nl> + - > Execute ( * result . computation , { param0_data . get ( ) , param1_data . get ( ) } ) <nl> + . ConsumeValueOrDie ( ) ; <nl> + std : : unique_ptr < xla : : Literal > actual_literal = <nl> + client_ - > Transfer ( * actual ) . ConsumeValueOrDie ( ) ; <nl> + <nl> + std : : unique_ptr < xla : : Literal > expected0 = <nl> + xla : : Literal : : CreateR1 < int32 > ( { 27 , 67 , 35 , 402 } ) ; <nl> + std : : unique_ptr < xla : : Literal > expected1 = <nl> + xla : : Literal : : CreateR1 < int32 > ( { 26 , 66 , 34 , 401 } ) ; <nl> + std : : unique_ptr < xla : : Literal > expected_literal = <nl> + xla : : Literal : : MakeTuple ( { expected0 . get ( ) , expected1 . get ( ) } ) ; <nl> + EXPECT_TRUE ( xla : : LiteralTestUtil : : Equal ( * expected_literal , * actual_literal ) ) ; <nl> + } <nl> + <nl> } / / namespace <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / compiler / tf2xla / xla_context . cc <nl> ppp b / tensorflow / compiler / tf2xla / xla_context . cc <nl> void XlaContext : : set_args ( std : : vector < XlaExpression > args ) { <nl> XlaContext : : XlaContext ( <nl> XlaCompiler * compiler , xla : : XlaBuilder * builder , <nl> bool allow_cpu_custom_calls , bool resolve_compile_time_constants , <nl> + bool is_entry_computation , <nl> const std : : function < TensorShape ( const TensorShape & , DataType ) > * <nl> - variable_representation_shape_fn ) <nl> + shape_representation_fn ) <nl> : compiler_ ( compiler ) , <nl> builder_ ( builder ) , <nl> allow_cpu_custom_calls_ ( allow_cpu_custom_calls ) , <nl> resolve_compile_time_constants_ ( resolve_compile_time_constants ) , <nl> - variable_representation_shape_fn_ ( variable_representation_shape_fn ) { } <nl> + is_entry_computation_ ( is_entry_computation ) , <nl> + shape_representation_fn_ ( shape_representation_fn ) { } <nl> <nl> string XlaContext : : DebugString ( ) { return " TLA JIT context " ; } <nl> <nl> / / This is called by the Retval Op to associate a computed value <nl> / / with a specific return value of the subgraph . <nl> void XlaContext : : AddRetval ( int retval_index , DataType type , <nl> - const xla : : XlaOp & handle ) { <nl> + const TensorShape & shape , const xla : : XlaOp & handle ) { <nl> VLOG ( 1 ) < < " Added retval index " < < retval_index < < " to XLA computation " ; <nl> / / Add the return value to the list being built up . <nl> if ( retvals_ . size ( ) < = retval_index ) { <nl> retvals_ . resize ( retval_index + 1 ) ; <nl> } <nl> - retvals_ [ retval_index ] . set_handle ( handle ) ; <nl> + XlaExpression e ; <nl> + e . set_handle ( handle ) ; <nl> + retvals_ [ retval_index ] = Retval { type , shape , e } ; <nl> } <nl> <nl> Status XlaContext : : AddConstRetval ( int retval_index , DataType dtype , <nl> Status XlaContext : : AddConstRetval ( int retval_index , DataType dtype , <nl> if ( retvals_ . size ( ) < = retval_index ) { <nl> retvals_ . resize ( retval_index + 1 ) ; <nl> } <nl> - if ( resolve_compile_time_constants_ ) { <nl> - Tensor value ; <nl> - TF_RETURN_IF_ERROR ( LiteralToHostTensor ( literal , dtype , & value ) ) ; <nl> - retvals_ [ retval_index ] . set_constant_value ( std : : move ( value ) ) ; <nl> - } else { <nl> - retvals_ [ retval_index ] . set_handle ( builder_ - > ConstantLiteral ( literal ) ) ; <nl> - } <nl> + Tensor value ; <nl> + TF_RETURN_IF_ERROR ( LiteralToHostTensor ( literal , dtype , & value ) ) ; <nl> + XlaExpression e ; <nl> + e . set_constant_value ( value ) ; <nl> + retvals_ [ retval_index ] = Retval { dtype , value . shape ( ) , e } ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status XlaContext : : CreateResource ( <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - TensorShape XlaContext : : VariableRepresentationShape ( const TensorShape & shape , <nl> - DataType type ) const { <nl> - return ( * variable_representation_shape_fn_ ) ( shape , type ) ; <nl> + TensorShape XlaContext : : RepresentationShape ( const TensorShape & shape , <nl> + DataType type ) const { <nl> + return ( * shape_representation_fn_ ) ( shape , type ) ; <nl> } <nl> <nl> const xla : : XlaComputation * XlaContext : : GetOrCreateMax ( const DataType type ) { <nl> mmm a / tensorflow / compiler / tf2xla / xla_context . h <nl> ppp b / tensorflow / compiler / tf2xla / xla_context . h <nl> class XlaContext : public ResourceBase { <nl> static XlaContext & Get ( const OpKernelContext * ctx ) ; <nl> static XlaContext & Get ( const XlaOpKernelContext * ctx ) ; <nl> <nl> - / / Creates a new XlaContext . <nl> + / / Creates a new XlaContext . See the documentation on the class data fields <nl> + / / for descriptions of the arguments . <nl> XlaContext ( XlaCompiler * compiler , xla : : XlaBuilder * builder , <nl> bool allow_cpu_custom_calls , bool resolve_compile_time_constants , <nl> + bool is_entry_computation , <nl> const std : : function < TensorShape ( const TensorShape & , DataType ) > * <nl> - variable_representation_shape_fn ) ; <nl> + shape_representation_fn ) ; <nl> <nl> / / Virtual method defined by ResourceBase . <nl> string DebugString ( ) override ; <nl> class XlaContext : public ResourceBase { <nl> <nl> bool allow_cpu_custom_calls ( ) const { return allow_cpu_custom_calls_ ; } <nl> <nl> + bool resolve_compile_time_constants ( ) const { <nl> + return resolve_compile_time_constants_ ; <nl> + } <nl> + bool is_entry_computation ( ) const { return is_entry_computation_ ; } <nl> + <nl> const std : : vector < XlaExpression > & args ( ) const { return args_ ; } <nl> void set_args ( std : : vector < XlaExpression > args ) ; <nl> <nl> - const std : : vector < XlaExpression > & retvals ( ) { return retvals_ ; } <nl> + struct Retval { <nl> + DataType type ; <nl> + TensorShape shape ; <nl> + / / An XlaExpression representing the Retval ' s value . <nl> + XlaExpression expression ; <nl> + } ; <nl> + const std : : vector < Retval > & retvals ( ) { return retvals_ ; } <nl> <nl> / / This is called by the Retval Op to associate a computed value <nl> / / with a specific return value of the subgraph . <nl> - void AddRetval ( int retval_index , DataType type , const xla : : XlaOp & handle ) ; <nl> + void AddRetval ( int retval_index , DataType type , const TensorShape & shape , <nl> + const xla : : XlaOp & handle ) ; <nl> <nl> / / As for Retval , but for return values that are compile - time constants . <nl> Status AddConstRetval ( int retval_index , DataType dtype , <nl> class XlaContext : public ResourceBase { <nl> } <nl> <nl> / / Returns the XLA shape to be used to represent a variable of TF ` shape ` <nl> - / / and ` type ` . <nl> - TensorShape VariableRepresentationShape ( const TensorShape & shape , <nl> - DataType type ) const ; <nl> + / / and ` type ` , or of an argument or return value of a top - level computation . <nl> + TensorShape RepresentationShape ( const TensorShape & shape , <nl> + DataType type ) const ; <nl> <nl> / / Get an XLA lambda to compute Max . This is cached in the <nl> / / XlaContext since it may be used by multiple Ops . There is a <nl> class XlaContext : public ResourceBase { <nl> std : : vector < XlaExpression > args_ ; <nl> <nl> / / Return values of the Tensorflow graph , indexed by _Retval index . <nl> - std : : vector < XlaExpression > retvals_ ; <nl> + std : : vector < Retval > retvals_ ; <nl> <nl> / / Holds ownership of resources . The resources are not ordered . <nl> std : : vector < std : : unique_ptr < XlaResource > > resources_ ; <nl> <nl> - / / A function that describes how variable shapes should be represented <nl> - / / in XLA . Variable values will be reshaped to this shape . Must be non - null . <nl> + / / Is this a top - level computation , or an inner computation ( e . g . , a while <nl> + / / body ) ? <nl> + const bool is_entry_computation_ ; <nl> + <nl> + / / A function that describes how the shapes of <nl> + / / a ) argument and return value , for entry computations <nl> + / / b ) variables , for all computations , <nl> + / / should be represented in XLA . Parameters / return values will be shaped <nl> + / / according to this function , and reshaped back to / from their declared shapes <nl> + / / for computations . Must be non - null . <nl> const std : : function < TensorShape ( const TensorShape & , DataType ) > * <nl> - variable_representation_shape_fn_ ; <nl> + shape_representation_fn_ ; <nl> <nl> / / Cache of prebuilt computations indexed by their type . <nl> using ComputationMap = std : : map < DataType , xla : : XlaComputation > ; <nl> mmm a / tensorflow / compiler / tf2xla / xla_op_kernel . cc <nl> ppp b / tensorflow / compiler / tf2xla / xla_op_kernel . cc <nl> Status XlaOpKernelContext : : ReadVariableInput ( int index , DataType type , <nl> } <nl> <nl> XlaContext & xla_context = XlaContext : : Get ( context_ ) ; <nl> - TensorShape representation_shape = xla_context . VariableRepresentationShape ( <nl> - variable - > shape ( ) , variable - > type ( ) ) ; <nl> + TensorShape representation_shape = <nl> + xla_context . RepresentationShape ( variable - > shape ( ) , variable - > type ( ) ) ; <nl> if ( representation_shape = = variable - > shape ( ) ) { <nl> * value = variable - > value ( ) ; <nl> } else { <nl> Status XlaOpKernelContext : : AssignVariable ( int input_index , DataType type , <nl> <nl> XlaContext & xla_context = XlaContext : : Get ( context_ ) ; <nl> TensorShape representation_shape = <nl> - xla_context . VariableRepresentationShape ( shape , type ) ; <nl> + xla_context . RepresentationShape ( shape , type ) ; <nl> if ( shape ! = representation_shape ) { <nl> handle = builder ( ) - > Reshape ( handle , representation_shape . dim_sizes ( ) ) ; <nl> } <nl> mmm a / tensorflow / compiler / xla / client / xla_client / xla_builder . cc <nl> ppp b / tensorflow / compiler / xla / client / xla_client / xla_builder . cc <nl> bool CanBeRoot ( HloOpcode opcode ) { <nl> } <nl> } <nl> <nl> - StatusOr < std : : vector < Shape > > GetOperandShapes ( <nl> - tensorflow : : gtl : : ArraySlice < XlaOp > operands ) { <nl> - std : : vector < Shape > operand_shapes ; <nl> - for ( const XlaOp & operand : operands ) { <nl> - TF_ASSIGN_OR_RETURN ( const Shape & shape , operand . GetShape ( ) ) ; <nl> - operand_shapes . push_back ( shape ) ; <nl> - } <nl> - return operand_shapes ; <nl> - } <nl> - <nl> } / / namespace <nl> <nl> StatusOr < Shape > XlaBuilder : : GetShape ( const XlaOp & op ) const { <nl> StatusOr < Shape > XlaBuilder : : GetShape ( const XlaOp & op ) const { <nl> return instr - > shape ( ) ; <nl> } <nl> <nl> - StatusOr < Shape > XlaOp : : GetShape ( ) const { <nl> - if ( builder_ = = nullptr ) { <nl> - return InvalidArgument ( <nl> - " cannot GetShape for an invalid XlaOp with handle % lld " , handle ( ) ) ; <nl> + StatusOr < std : : vector < Shape > > XlaBuilder : : GetOperandShapes ( <nl> + tensorflow : : gtl : : ArraySlice < XlaOp > operands ) const { <nl> + std : : vector < Shape > operand_shapes ; <nl> + for ( const XlaOp & operand : operands ) { <nl> + TF_ASSIGN_OR_RETURN ( const Shape & shape , GetShape ( operand ) ) ; <nl> + operand_shapes . push_back ( shape ) ; <nl> } <nl> - return builder_ - > GetShape ( * this ) ; <nl> + return operand_shapes ; <nl> } <nl> <nl> XlaBuilder : : XlaBuilder ( const string & computation_name ) <nl> StatusOr < XlaOp > XlaBuilder : : AddBroadcastSequence ( const Shape & output_shape , <nl> const XlaOp & operand ) { <nl> TF_RETURN_IF_ERROR ( first_error_ ) ; <nl> <nl> - TF_ASSIGN_OR_RETURN ( const Shape & operand_shape , operand . GetShape ( ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( const Shape & operand_shape , GetShape ( operand ) ) ; <nl> <nl> CHECK ( ShapeUtil : : IsScalar ( operand_shape ) | | <nl> ShapeUtil : : Rank ( operand_shape ) = = ShapeUtil : : Rank ( output_shape ) ) ; <nl> StatusOr < XlaOp > XlaBuilder : : AddBroadcastSequence ( const Shape & output_shape , <nl> XlaOp XlaBuilder : : UnaryOp ( HloOpcode unop , const XlaOp & operand ) { <nl> return NoteErrorOrReturn ( [ & ] ( ) - > StatusOr < XlaOp > { <nl> HloInstructionProto instr ; <nl> - TF_ASSIGN_OR_RETURN ( const Shape & operand_shape , operand . GetShape ( ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( const Shape & operand_shape , GetShape ( operand ) ) ; <nl> TF_ASSIGN_OR_RETURN ( * instr . mutable_shape ( ) , <nl> ShapeInference : : InferUnaryOpShape ( unop , operand_shape ) ) ; <nl> return AddInstruction ( std : : move ( instr ) , unop , { operand } ) ; <nl> XlaOp XlaBuilder : : BinaryOp ( <nl> tensorflow : : gtl : : ArraySlice < int64 > broadcast_dimensions ) { <nl> return NoteErrorOrReturn ( [ & ] ( ) - > StatusOr < XlaOp > { <nl> HloInstructionProto instr ; <nl> - TF_ASSIGN_OR_RETURN ( const Shape & lhs_shape , lhs . GetShape ( ) ) ; <nl> - TF_ASSIGN_OR_RETURN ( const Shape & rhs_shape , rhs . GetShape ( ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( const Shape & lhs_shape , GetShape ( lhs ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( const Shape & rhs_shape , GetShape ( rhs ) ) ; <nl> TF_ASSIGN_OR_RETURN ( * instr . mutable_shape ( ) , <nl> ShapeInference : : InferBinaryOpShape ( <nl> binop , lhs_shape , rhs_shape , broadcast_dimensions ) ) ; <nl> XlaOp XlaBuilder : : BinaryOp ( <nl> updated_rhs = ! should_broadcast_lhs ? broadcasted_operand : rhs ; <nl> } <nl> <nl> - TF_ASSIGN_OR_RETURN ( Shape updated_lhs_shape , updated_lhs . GetShape ( ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( Shape updated_lhs_shape , GetShape ( updated_lhs ) ) ; <nl> if ( ! ShapeUtil : : SameDimensions ( instr . shape ( ) , updated_lhs_shape ) ) { <nl> TF_ASSIGN_OR_RETURN ( updated_lhs , <nl> AddBroadcastSequence ( instr . shape ( ) , updated_lhs ) ) ; <nl> } <nl> - TF_ASSIGN_OR_RETURN ( Shape updated_rhs_shape , updated_rhs . GetShape ( ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( Shape updated_rhs_shape , GetShape ( updated_rhs ) ) ; <nl> if ( ! ShapeUtil : : SameDimensions ( instr . shape ( ) , updated_rhs_shape ) ) { <nl> TF_ASSIGN_OR_RETURN ( updated_rhs , <nl> AddBroadcastSequence ( instr . shape ( ) , updated_rhs ) ) ; <nl> XlaOp XlaBuilder : : TernaryOp ( HloOpcode triop , const XlaOp & lhs , const XlaOp & rhs , <nl> const XlaOp & ehs ) { <nl> return NoteErrorOrReturn ( [ & ] ( ) - > StatusOr < XlaOp > { <nl> HloInstructionProto instr ; <nl> - TF_ASSIGN_OR_RETURN ( const Shape & lhs_shape , lhs . GetShape ( ) ) ; <nl> - TF_ASSIGN_OR_RETURN ( const Shape & rhs_shape , rhs . GetShape ( ) ) ; <nl> - TF_ASSIGN_OR_RETURN ( const Shape & ehs_shape , ehs . GetShape ( ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( const Shape & lhs_shape , GetShape ( lhs ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( const Shape & rhs_shape , GetShape ( rhs ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( const Shape & ehs_shape , GetShape ( ehs ) ) ; <nl> TF_ASSIGN_OR_RETURN ( * instr . mutable_shape ( ) , <nl> ShapeInference : : InferTernaryOpShape ( <nl> triop , lhs_shape , rhs_shape , ehs_shape ) ) ; <nl> XlaOp XlaBuilder : : Parameter ( int64 parameter_number , const Shape & shape , <nl> XlaOp XlaBuilder : : Broadcast ( <nl> const XlaOp & operand , tensorflow : : gtl : : ArraySlice < int64 > broadcast_sizes ) { <nl> return NoteErrorOrReturn ( [ & ] ( ) - > StatusOr < XlaOp > { <nl> - TF_ASSIGN_OR_RETURN ( const Shape & operand_shape , operand . GetShape ( ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( const Shape & operand_shape , GetShape ( operand ) ) ; <nl> TF_ASSIGN_OR_RETURN ( <nl> const Shape & shape , <nl> ShapeInference : : InferBroadcastShape ( operand_shape , broadcast_sizes ) ) ; <nl> XlaOp XlaBuilder : : Reshape ( const XlaOp & operand , <nl> tensorflow : : gtl : : ArraySlice < int64 > dimensions , <nl> tensorflow : : gtl : : ArraySlice < int64 > new_sizes ) { <nl> return NoteErrorOrReturn ( [ & ] ( ) - > StatusOr < XlaOp > { <nl> - TF_ASSIGN_OR_RETURN ( const Shape & operand_shape , operand . GetShape ( ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( const Shape & operand_shape , GetShape ( operand ) ) ; <nl> TF_ASSIGN_OR_RETURN ( const Shape & shape , <nl> ShapeInference : : InferReshapeShape ( <nl> operand_shape , dimensions , new_sizes ) ) ; <nl> XlaOp XlaBuilder : : Reshape ( const XlaOp & operand , <nl> XlaOp XlaBuilder : : Reshape ( const XlaOp & operand , <nl> tensorflow : : gtl : : ArraySlice < int64 > new_sizes ) { <nl> return NoteErrorOrReturn ( [ & ] ( ) - > StatusOr < XlaOp > { <nl> - TF_ASSIGN_OR_RETURN ( auto shape , operand . GetShape ( ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( auto shape , GetShape ( operand ) ) ; <nl> std : : vector < int64 > dimensions ( shape . dimensions_size ( ) ) ; <nl> std : : iota ( dimensions . begin ( ) , dimensions . end ( ) , 0 ) ; <nl> return Reshape ( operand , dimensions , new_sizes ) ; <nl> XlaOp XlaBuilder : : Fft ( const XlaOp & operand , const FftType fft_type , <nl> const tensorflow : : gtl : : ArraySlice < int64 > fft_length ) { <nl> return NoteErrorOrReturn ( [ & ] ( ) - > StatusOr < XlaOp > { <nl> HloInstructionProto instr ; <nl> - TF_ASSIGN_OR_RETURN ( const Shape & operand_shape , operand . GetShape ( ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( const Shape & operand_shape , GetShape ( operand ) ) ; <nl> TF_ASSIGN_OR_RETURN ( <nl> * instr . mutable_shape ( ) , <nl> ShapeInference : : InferFftShape ( operand_shape , fft_type , fft_length ) ) ; <nl> XlaOp XlaBuilder : : Transpose ( const XlaOp & operand , <nl> tensorflow : : gtl : : ArraySlice < int64 > permutation ) { <nl> return NoteErrorOrReturn ( [ & ] ( ) - > StatusOr < XlaOp > { <nl> HloInstructionProto instr ; <nl> - TF_ASSIGN_OR_RETURN ( const Shape & operand_shape , operand . GetShape ( ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( const Shape & operand_shape , GetShape ( operand ) ) ; <nl> TF_ASSIGN_OR_RETURN ( <nl> * instr . mutable_shape ( ) , <nl> ShapeInference : : InferTransposeShape ( operand_shape , permutation ) ) ; <nl> StatusOr < const HloInstructionProto * > XlaBuilder : : LookUpInstruction ( <nl> const XlaOp & op ) const { <nl> TF_RETURN_IF_ERROR ( first_error_ ) ; <nl> <nl> + if ( op . builder_ = = nullptr ) { <nl> + return InvalidArgument ( <nl> + " invalid XlaOp with handle % lld ; the builder of this op is freed " , <nl> + op . handle ( ) ) ; <nl> + } <nl> if ( op . builder_ ! = this ) { <nl> - return InvalidArgument ( " invalid XlaOp with handle % lld " , op . handle ( ) ) ; <nl> + return InvalidArgument ( <nl> + " XlaOp with handle % lld is built by builder ' % s ' , but is trying to use " <nl> + " it in builder ' % s ' " , <nl> + op . handle ( ) , op . builder_ - > name ( ) . c_str ( ) , this - > name ( ) . c_str ( ) ) ; <nl> } <nl> <nl> - TF_RET_CHECK ( op . builder_ = = this ) ; <nl> if ( op . handle ( ) > = instructions_ . size ( ) | | op . handle ( ) < 0 ) { <nl> return InvalidArgument ( " no XlaOp value % lld " , op . handle ( ) ) ; <nl> } <nl> mmm a / tensorflow / compiler / xla / client / xla_client / xla_builder . h <nl> ppp b / tensorflow / compiler / xla / client / xla_client / xla_builder . h <nl> class XlaOp { <nl> XlaOp ( ) : handle_ ( 0 ) , builder_ ( nullptr ) { } <nl> ~ XlaOp ( ) { } <nl> <nl> - StatusOr < Shape > GetShape ( ) const ; <nl> - <nl> const XlaBuilder * builder ( ) const { return builder_ ; } <nl> <nl> bool operator = = ( const XlaOp & rhs ) const { <nl> class XlaBuilder { <nl> / / computation and fills the root_id in the pointer . <nl> StatusOr < ProgramShape > GetProgramShape ( int64 * root_id ) const ; <nl> <nl> + / / Returns shapes for the operands . <nl> + StatusOr < std : : vector < Shape > > GetOperandShapes ( <nl> + tensorflow : : gtl : : ArraySlice < XlaOp > operands ) const ; <nl> + <nl> / / A visitor which checks whether an operation is a compile - time constant , <nl> / / meaning that it doesn ' t depend on any parameters , or on any stateful <nl> / / operation such as ` RngNormal ` or ` Infeed ` . The visitor walks the <nl> mmm a / tensorflow / compiler / xla / client / xla_client / xla_builder_test . cc <nl> ppp b / tensorflow / compiler / xla / client / xla_client / xla_builder_test . cc <nl> TEST_F ( XlaBuilderTest , ParamPlusParamHasBroadcast ) { <nl> auto y = b . Parameter ( 1 , y_shape , " y " ) ; <nl> auto add = b . Add ( x , y , / * broadcast_dimensions = * / { 0 , 1 } ) ; <nl> <nl> - TF_ASSERT_OK_AND_ASSIGN ( auto add_shape , add . GetShape ( ) ) ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( auto add_shape , b . GetShape ( add ) ) ; <nl> EXPECT_TRUE ( ShapeUtil : : Equal ( add_shape , x_shape ) ) ; <nl> <nl> TF_ASSERT_OK_AND_ASSIGN ( auto module , BuildHloModule ( & b ) ) ; <nl> TEST_F ( XlaBuilderTest , OperandFromWrongBuilder ) { <nl> builder . Add ( p0 , p0 ) ; <nl> auto statusor = builder . Build ( ) ; <nl> ASSERT_FALSE ( statusor . ok ( ) ) ; <nl> - EXPECT_THAT ( statusor . status ( ) . error_message ( ) , <nl> - HasSubstr ( " Do not add XlaOp from builder b1 to builder main " ) ) ; <nl> + EXPECT_THAT ( <nl> + statusor . status ( ) . error_message ( ) , <nl> + HasSubstr ( <nl> + " built by builder ' b1 ' , but is trying to use it in builder ' main ' " ) ) ; <nl> } <nl> <nl> TEST_F ( XlaBuilderTest , ReshapeDefaultOrder ) { <nl> mmm a / tensorflow / compiler / xla / service / BUILD <nl> ppp b / tensorflow / compiler / xla / service / BUILD <nl> tf_cc_test ( <nl> ] , <nl> ) <nl> <nl> - cc_library ( <nl> - name = " liveness_util " , <nl> - srcs = [ " liveness_util . cc " ] , <nl> - hdrs = [ " liveness_util . h " ] , <nl> - deps = [ <nl> - " : hlo " , <nl> - " : hlo_dataflow_analysis " , <nl> - " : logical_buffer " , <nl> - " : tuple_points_to_analysis " , <nl> - " / / tensorflow / compiler / xla : shape_util " , <nl> - " / / tensorflow / compiler / xla : types " , <nl> - " / / tensorflow / compiler / xla : util " , <nl> - ] , <nl> - ) <nl> - <nl> - tf_cc_test ( <nl> - name = " liveness_util_test " , <nl> - srcs = [ " liveness_util_test . cc " ] , <nl> - deps = [ <nl> - " : hlo " , <nl> - " : liveness_util " , <nl> - " : tuple_points_to_analysis " , <nl> - " / / tensorflow / compiler / xla / tests : hlo_test_base " , <nl> - " / / tensorflow / compiler / xla / tests : xla_internal_test_main " , <nl> - ] , <nl> - ) <nl> - <nl> cc_library ( <nl> name = " buffer_liveness " , <nl> srcs = [ <nl> cc_library ( <nl> deps = [ <nl> " : hlo " , <nl> " : hlo_ordering " , <nl> - " : liveness_util " , <nl> " : logical_buffer " , <nl> " : tuple_points_to_analysis " , <nl> " / / tensorflow / compiler / xla : shape_util " , <nl> cc_library ( <nl> " : hlo_dataflow_analysis " , <nl> " : hlo_proto " , <nl> " : hlo_value " , <nl> - " : liveness_util " , <nl> " / / tensorflow / compiler / xla : shape_util " , <nl> " / / tensorflow / compiler / xla : status_macros " , <nl> " / / tensorflow / compiler / xla : statusor " , <nl> cc_library ( <nl> " : hlo " , <nl> " : hlo_ordering " , <nl> " : hlo_proto " , <nl> - " : liveness_util " , <nl> " : tuple_points_to_analysis " , <nl> " / / tensorflow / compiler / xla : statusor " , <nl> " / / tensorflow / compiler / xla : util " , <nl> tf_cc_test ( <nl> " : hlo_execution_profile " , <nl> " / / tensorflow / compiler / xla / tests : hlo_test_base " , <nl> " / / tensorflow / compiler / xla / tests : xla_internal_test_main " , <nl> + " / / tensorflow / compiler / xla / tools / parser : hlo_parser " , <nl> " / / tensorflow / core : lib " , <nl> ] , <nl> ) <nl> cc_library ( <nl> " : hlo_graph_dumper " , <nl> " : hlo_ordering " , <nl> " : hlo_pass " , <nl> - " : liveness_util " , <nl> " : logical_buffer " , <nl> " : tuple_simplifier " , <nl> " / / tensorflow / compiler / xla : status_macros " , <nl> cc_library ( <nl> " : hlo_dce " , <nl> " : hlo_ordering " , <nl> " : hlo_scheduling " , <nl> - " : liveness_util " , <nl> " : logical_buffer " , <nl> " : tuple_points_to_analysis " , <nl> " / / tensorflow / compiler / xla : shape_util " , <nl> cc_library ( <nl> " / / tensorflow / core : lib " , <nl> " / / tensorflow / core : stream_executor_no_cuda " , <nl> " / / third_party / eigen3 " , <nl> - " @ com_google_absl / / absl / memory " , <nl> ] , <nl> ) <nl> <nl> mmm a / tensorflow / compiler / xla / service / buffer_assignment . h <nl> ppp b / tensorflow / compiler / xla / service / buffer_assignment . h <nl> class BufferAssignment { <nl> / / Only BufferAssigner can build or modify BufferAssignments . <nl> friend class BufferAssigner ; <nl> <nl> - explicit BufferAssignment ( const HloModule * module , <nl> - std : : unique_ptr < BufferLiveness > liveness , <nl> - LogicalBuffer : : SizeFunction buffer_size , <nl> - LogicalBuffer : : AlignmentFunction color_alignment ) <nl> + BufferAssignment ( const HloModule * module , <nl> + std : : unique_ptr < BufferLiveness > liveness , <nl> + LogicalBuffer : : SizeFunction buffer_size , <nl> + LogicalBuffer : : AlignmentFunction color_alignment ) <nl> : module_ ( module ) , <nl> liveness_ ( std : : move ( liveness ) ) , <nl> buffer_size_ ( std : : move ( buffer_size ) ) , <nl> mmm a / tensorflow / compiler / xla / service / buffer_liveness . cc <nl> ppp b / tensorflow / compiler / xla / service / buffer_liveness . cc <nl> limitations under the License . <nl> # include < vector > <nl> <nl> # include " tensorflow / compiler / xla / service / hlo_computation . h " <nl> - # include " tensorflow / compiler / xla / service / liveness_util . h " <nl> # include " tensorflow / compiler / xla / service / logical_buffer . h " <nl> # include " tensorflow / compiler / xla / shape_util . h " <nl> # include " tensorflow / compiler / xla / status_macros . h " <nl> bool BufferLiveness : : live_range_strictly_before ( const LogicalBuffer & a , <nl> for ( const BufferAlias & alias : points_to_analysis_ - > GetBufferAliases ( a ) ) { <nl> / / Every user of ' a ' must be a predecessor of ' b ' or ' b ' itself . <nl> for ( auto user : alias . instruction ( ) - > users ( ) ) { <nl> - if ( DoesNotUseOperandBuffer ( alias . instruction ( ) , alias . index ( ) , user , <nl> - points_to_analysis ( ) ) ) { <nl> + if ( points_to_analysis ( ) . DoesNotUseOperandBuffer ( alias . instruction ( ) , <nl> + alias . index ( ) , user ) ) { <nl> continue ; <nl> } <nl> if ( user ! = b . instruction ( ) & & <nl> bool BufferLiveness : : live_range_strictly_before ( const LogicalBuffer & a , <nl> / / the qualifications specified in CanShareOperandBufferWithUser . <nl> for ( const BufferAlias & alias : points_to_analysis_ - > GetBufferAliases ( a ) ) { <nl> if ( b . instruction ( ) - > IsUserOf ( alias . instruction ( ) ) & & <nl> - ! CanShareOperandBufferWithUser ( alias . instruction ( ) , alias . index ( ) , <nl> - b . instruction ( ) , b . index ( ) , <nl> - points_to_analysis ( ) ) ) { <nl> + ! points_to_analysis ( ) . CanShareOperandBufferWithUser ( <nl> + alias . instruction ( ) , alias . index ( ) , b . instruction ( ) , b . index ( ) ) ) { <nl> return false ; <nl> } <nl> } <nl> mmm a / tensorflow / compiler / xla / service / copy_insertion . cc <nl> ppp b / tensorflow / compiler / xla / service / copy_insertion . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / service / hlo_module . h " <nl> # include " tensorflow / compiler / xla / service / hlo_opcode . h " <nl> # include " tensorflow / compiler / xla / service / hlo_ordering . h " <nl> - # include " tensorflow / compiler / xla / service / liveness_util . h " <nl> # include " tensorflow / compiler / xla / service / logical_buffer . h " <nl> # include " tensorflow / compiler / xla / service / tuple_simplifier . h " <nl> # include " tensorflow / compiler / xla / status_macros . h " <nl> mmm a / tensorflow / compiler / xla / service / cpu / dot_op_emitter . h <nl> ppp b / tensorflow / compiler / xla / service / cpu / dot_op_emitter . h <nl> class DotOpEmitter { <nl> / / The number of columns on the RHS . <nl> int64 n ; <nl> <nl> - / / True if the LHS matrix column major . <nl> + / / True if the LHS matrix is column major . <nl> bool lhs_column_major ; <nl> <nl> / / True if the LHS contraction dimension is not 1 . <nl> bool lhs_non_canonical ; <nl> <nl> - / / True if the RHS matrix column major . <nl> + / / True if the RHS matrix is column major . <nl> bool rhs_column_major ; <nl> <nl> / / True if the RHS contraction dimension is not 0 . <nl> mmm a / tensorflow / compiler / xla / service / cpu / ir_emitter . cc <nl> ppp b / tensorflow / compiler / xla / service / cpu / ir_emitter . cc <nl> Status IrEmitter : : HandleReduceWindow ( HloInstruction * reduce_window ) { <nl> HloComputation * function = reduce_window - > to_apply ( ) ; <nl> TF_RETURN_IF_ERROR ( ElementTypesSameAndSupported ( <nl> / * instruction = * / * reduce_window , / * operands = * / { operand } , <nl> - / * supported_types = * / { F32 , BF16 } ) ) ; <nl> + / * supported_types = * / { F32 , BF16 , S32 } ) ) ; <nl> <nl> / / TODO ( b / 31410564 ) : Implement dilation for reduce - window . <nl> if ( window_util : : HasDilation ( window ) ) { <nl> mmm a / tensorflow / compiler / xla / service / gpu / BUILD <nl> ppp b / tensorflow / compiler / xla / service / gpu / BUILD <nl> cc_library ( <nl> " / / tensorflow / compiler / xla / service : transfer_manager " , <nl> " / / tensorflow / compiler / xla / service : tuple_points_to_analysis " , <nl> " / / tensorflow / core : lib " , <nl> + " / / tensorflow / core : lib_internal " , <nl> " / / tensorflow / core : stream_executor_no_cuda " , <nl> " / / tensorflow / core / platform / default / build_config : cublas_plugin " , <nl> " / / tensorflow / core / platform / default / build_config : cudnn_plugin " , <nl> mmm a / tensorflow / compiler / xla / service / gpu / gpu_executable . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / gpu_executable . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / status_macros . h " <nl> # include " tensorflow / compiler / xla / util . h " <nl> # include " tensorflow / core / platform / logging . h " <nl> + # include " tensorflow / core / platform / tracing . h " <nl> # include " tensorflow / core / platform / types . h " <nl> <nl> namespace xla { <nl> namespace gpu { <nl> namespace { <nl> <nl> + using tensorflow : : tracing : : ScopedAnnotation ; <nl> + <nl> / / A helper class for profiling HLO in the course of GPU program execution . <nl> / / All of the profiling is guarded internally , to avoid the caller needing to <nl> / / have lots of conditionals sprinkled around . <nl> Status GpuExecutable : : ExecuteThunks ( <nl> sub_streams , hlo_module_ - > entry_computation ( ) ) ; <nl> uint64 start_micros = tensorflow : : Env : : Default ( ) - > NowMicros ( ) ; <nl> <nl> + / / This top - level trace serves two purposes : <nl> + / / 1 ) It marks the scope of the whole XLA module . <nl> + / / 2 ) It tells us whether tracing is enabled . We use this to avoid the <nl> + / / expensive HloInstruction : : ToString ( ) calls inside the loop below if <nl> + / / tracing is disabled . <nl> + ScopedAnnotation top_level_annotation ( hlo_module_ - > name ( ) , " XLA GPU module " ) ; <nl> + <nl> std : : map < const Thunk * , std : : unique_ptr < se : : Event > > thunk_to_finish_event ; <nl> for ( Thunk * thunk : thunk_schedule_ - > TotalOrder ( ) ) { <nl> + / / Annotate execution of this op if tracing was enabled when we started <nl> + / / running this module . If tracing is enabled * while * we ' re running the <nl> + / / module , we won ' t get any data , but that ' s probably an OK trade - off . <nl> + / / <nl> + / / TODO ( jlebar ) : Should we cache the results of HloInstruction : : ToString ( ) , <nl> + / / since we expect it to be an expensive call ? <nl> + tensorflow : : gtl : : optional < ScopedAnnotation > op_annotation ; <nl> + if ( top_level_annotation . IsEnabled ( ) ) { <nl> + op_annotation . emplace ( <nl> + thunk - > hlo_instruction ( ) ! = nullptr <nl> + ? thunk - > hlo_instruction ( ) - > ToString ( HloPrintOptions : : Canonical ( ) ) <nl> + : " < unknown > " , <nl> + " XLA op " ) ; <nl> + } <nl> + <nl> TF_RETURN_IF_ERROR ( thunk - > Initialize ( * this , executor ) ) ; <nl> int32 stream_no = <nl> thunk_schedule_ - > StreamNumberForHlo ( * thunk - > hlo_instruction ( ) ) ; <nl> mmm a / tensorflow / compiler / xla / service / gpu / ir_emitter_nested . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / ir_emitter_nested . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / service / hlo_instruction . h " <nl> # include " tensorflow / compiler / xla / service / hlo_opcode . h " <nl> # include " tensorflow / compiler / xla / service / llvm_ir / llvm_util . h " <nl> + # include " tensorflow / compiler / xla / service / llvm_ir / tuple_ops . h " <nl> # include " tensorflow / compiler / xla / service / name_uniquer . h " <nl> # include " tensorflow / core / lib / core / status . h " <nl> <nl> Status IrEmitterNested : : HandleParameter ( HloInstruction * parameter ) { <nl> Status IrEmitterNested : : EmitTargetElementLoop ( <nl> const HloInstruction & hlo , <nl> const llvm_ir : : ElementGenerator & element_generator ) { <nl> + / / For MOF we give the loop emitter an array for every output it should <nl> + / / generate . <nl> + if ( hlo . IsMultiOutputFusion ( ) ) { <nl> + std : : vector < llvm_ir : : IrArray > target_arrays ; <nl> + for ( int64 i = 0 , e = ShapeUtil : : TupleElementCount ( hlo . shape ( ) ) ; i ! = e ; <nl> + + + i ) { <nl> + target_arrays . push_back ( GetIrArray ( hlo , hlo , { i } ) ) ; <nl> + } <nl> + TF_RETURN_IF_ERROR ( <nl> + llvm_ir : : LoopEmitter ( element_generator , target_arrays , & ir_builder_ ) <nl> + . EmitLoop ( ) ) ; <nl> + <nl> + std : : vector < llvm : : Value * > tuple_operand_ptrs ; <nl> + for ( const llvm_ir : : IrArray & array : target_arrays ) { <nl> + tuple_operand_ptrs . push_back ( array . GetBasePointer ( ) ) ; <nl> + } <nl> + llvm_ir : : EmitTuple ( GetIrArray ( hlo , hlo ) , tuple_operand_ptrs , & ir_builder_ , <nl> + module_ ) ; <nl> + return Status : : OK ( ) ; <nl> + } <nl> return llvm_ir : : LoopEmitter ( element_generator , GetIrArray ( hlo , hlo ) , <nl> & ir_builder_ ) <nl> . EmitLoop ( ) ; <nl> mmm a / tensorflow / compiler / xla / service / heap_simulator . cc <nl> ppp b / tensorflow / compiler / xla / service / heap_simulator . cc <nl> limitations under the License . <nl> # include < vector > <nl> <nl> # include " tensorflow / compiler / xla / map_util . h " <nl> - # include " tensorflow / compiler / xla / service / liveness_util . h " <nl> # include " tensorflow / compiler / xla / util . h " <nl> <nl> namespace xla { <nl> Status HeapSimulator : : RunComputation ( <nl> for ( const BufferValue * operand_buffer : operand_buffers_to_free ) { <nl> if ( buffer - > instruction ( ) - > IsUserOf ( operand_buffer - > instruction ( ) ) & & <nl> buffer - > instruction ( ) - > opcode ( ) ! = HloOpcode : : kCopy & & <nl> - CanShareOperandBufferWithUser ( <nl> + points_to_analysis . CanShareOperandBufferWithUser ( <nl> operand_buffer - > instruction ( ) , operand_buffer - > index ( ) , <nl> - buffer - > instruction ( ) , buffer - > index ( ) , points_to_analysis ) ) { <nl> + buffer - > instruction ( ) , buffer - > index ( ) ) ) { <nl> VLOG ( 3 ) < < " Sharing : " < < buffer - > ToString ( ) < < " with " <nl> < < operand_buffer - > ToString ( ) ; <nl> ShareBuffer ( buffer , operand_buffer , instruction ) ; <nl> mmm a / tensorflow / compiler / xla / service / hlo_computation . h <nl> ppp b / tensorflow / compiler / xla / service / hlo_computation . h <nl> class HloModule ; <nl> <nl> / / Describes a computation at the HLO level . <nl> / / <nl> - / / An HloComputation contains a directed acyclic graph of HLO instructions . The <nl> - / / computation has a single root instruction which produces the output of the <nl> - / / computation . <nl> + / / You can think of an HloComputation like a function . It has some inputs <nl> + / / ( parameters ) and returns exactly one value ( the value of its root node ) . If <nl> + / / you want to return multiple values , you can return a tuple . <nl> + / / <nl> + / / The instructions inside of a computation do not have an explicit total order . <nl> + / / Instead , they have a partial order determined by their data and control <nl> + / / dependencies . <nl> + / / <nl> + / / An HloModule contains one " entry computation " - - this is like main ( ) in a C <nl> + / / program . Every other computation inside of a module is attached to one or <nl> + / / more HloInstructions , as a " nested computation " . For example , the kMap <nl> + / / instruction has a nested computation and " applies " it to every element of its <nl> + / / input , elementwise . ( That is , the input [ x , y , z ] is transformed to [ f ( x ) , <nl> + / / f ( y ) , f ( z ) ] . ) <nl> class HloComputation { <nl> public : <nl> / / Builder class for HloComputation . <nl> mmm a / tensorflow / compiler / xla / service / hlo_dataflow_analysis . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_dataflow_analysis . cc <nl> Status HloDataflowAnalysis : : Verify ( ) const { <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> + bool HloDataflowAnalysis : : DoesNotUseOperandBuffer ( <nl> + const HloInstruction * operand , const ShapeIndex & index , <nl> + const HloInstruction * user ) const { <nl> + CHECK ( user - > IsUserOf ( operand ) ) <nl> + < < " user : " < < user - > ToString ( ) < < " operand : " < < operand - > ToString ( ) ; <nl> + if ( user - > opcode ( ) = = HloOpcode : : kFusion & & <nl> + user - > fusion_kind ( ) = = HloInstruction : : FusionKind : : kLoop ) { <nl> + / / Find fusion parameter associated with ' operand ' . <nl> + HloInstruction * fusion_param = <nl> + user - > fused_parameter ( user - > operand_index ( operand ) ) ; <nl> + / / Iterate through all users of all uses of the fusion parameter value . <nl> + / / Return false if any uses are detected , returns true otherwise . <nl> + const HloValue & value = GetValueDefinedAt ( fusion_param , index ) ; <nl> + return value . uses ( ) . empty ( ) ; <nl> + } else { <nl> + / / Return false if no value at ' operand ' and ' index ' is used at ' user ' . <nl> + for ( const HloValue * value : GetValueSet ( operand , index ) . values ( ) ) { <nl> + for ( const HloUse & use : value - > uses ( ) ) { <nl> + if ( use . instruction = = user ) { <nl> + return false ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + return true ; <nl> + } <nl> + <nl> + bool HloDataflowAnalysis : : CanShareOperandBufferWithUser ( <nl> + HloInstruction * operand , const ShapeIndex & operand_index , <nl> + HloInstruction * user , const ShapeIndex & user_index ) const { <nl> + CHECK ( user - > IsUserOf ( operand ) ) <nl> + < < " user : " < < user - > ToString ( ) < < " operand : " < < operand - > ToString ( ) ; <nl> + const Shape & operand_subshape = <nl> + ShapeUtil : : GetSubshape ( operand - > shape ( ) , operand_index ) ; <nl> + const Shape & user_subshape = <nl> + ShapeUtil : : GetSubshape ( user - > shape ( ) , user_index ) ; <nl> + / / Check that operand and user emit the same shape and layout . <nl> + if ( ! ShapeUtil : : Equal ( operand_subshape , user_subshape ) ) { <nl> + return false ; <nl> + } <nl> + <nl> + if ( user - > opcode ( ) = = HloOpcode : : kFusion ) { <nl> + / / Get the parameter associated with ' operand ' ; <nl> + HloInstruction * fusion_param = <nl> + user - > fused_parameter ( user - > operand_index ( operand ) ) ; <nl> + <nl> + const HloValue & value = GetValueDefinedAt ( fusion_param , operand_index ) ; <nl> + if ( value . uses ( ) . size ( ) ! = 1 ) { <nl> + return false ; <nl> + } <nl> + const HloUse & use = value . uses ( ) [ 0 ] ; <nl> + <nl> + if ( user - > fusion_kind ( ) = = HloInstruction : : FusionKind : : kLoop & & <nl> + user - > fused_expression_root ( ) - > opcode ( ) = = <nl> + HloOpcode : : kDynamicUpdateSlice ) { <nl> + / / Loop fusion with kDynamicUpdateSlice fused root . <nl> + / / <nl> + / / Returns true iff there is exactly one use of ' operand ' at shape index <nl> + / / ' operand_index ' , and this singleton use is the fused root at operand <nl> + / / index 0 . <nl> + return use . instruction = = user - > fused_expression_root ( ) & & <nl> + use . operand_number = = 0 ; <nl> + } else if ( user - > fusion_kind ( ) = = HloInstruction : : FusionKind : : kOutput & & <nl> + user - > fused_expression_root ( ) - > opcode ( ) = = HloOpcode : : kAdd ) { <nl> + / / Output fusion with kAdd fused root . <nl> + <nl> + / / Check if one operand of kAdd fused root is kDot or kConvolution . <nl> + auto * add = user - > fused_expression_root ( ) ; <nl> + auto add_operand_it = <nl> + std : : find_if ( add - > operands ( ) . begin ( ) , add - > operands ( ) . end ( ) , <nl> + [ & ] ( HloInstruction * operand ) { <nl> + return operand - > opcode ( ) = = HloOpcode : : kConvolution | | <nl> + operand - > opcode ( ) = = HloOpcode : : kDot ; <nl> + } ) ; <nl> + if ( add_operand_it = = add - > operands ( ) . end ( ) ) { <nl> + return false ; <nl> + } <nl> + auto * matched_add_operand = * add_operand_it ; <nl> + / / Calculate operand index of ' add ' operand which was not matched above . <nl> + const int64 other_add_operand_index = <nl> + matched_add_operand = = add - > operand ( 0 ) ? 1 : 0 ; <nl> + / / Returns true iff there is exactly one use of ' operand ' at shape index <nl> + / / ' operand_index ' , and this singleton use is the fused root ( at operand <nl> + / / index ' other_add_operand_index ' ) . <nl> + return use . instruction = = user - > fused_expression_root ( ) & & <nl> + use . operand_number = = other_add_operand_index ; <nl> + } <nl> + } <nl> + if ( user - > opcode ( ) = = HloOpcode : : kDynamicUpdateSlice | | <nl> + user - > opcode ( ) = = HloOpcode : : kWhile ) { <nl> + / / We eliminated other users in BufferLiveness : : live_range_strictly_before , <nl> + / / so here we just need to check that the use is at operand index 0 . <nl> + std : : vector < int64 > operand_indices = user - > OperandIndices ( operand ) ; <nl> + return operand_indices . size ( ) = = 1 & & operand_indices [ 0 ] = = 0 ; <nl> + } <nl> + if ( user - > opcode ( ) = = HloOpcode : : kCall ) { <nl> + / / Get all uses of value defined by ' operand ' at ' operand_index ' . <nl> + const auto & uses = GetValueDefinedAt ( operand , operand_index ) . uses ( ) ; <nl> + / / Return true iff : <nl> + / / * ) There exists two uses of ' operand ' . <nl> + / / * ) One use is by ' user ' ( caller ) . <nl> + / / * ) One use is by root instruction of called computation ( callee root ) . <nl> + / / ( Note : we check the root of the called computation , because the <nl> + / / root result buffer is required to alias with the Call result buffer ) . <nl> + / / * ) The root instruction of the called computation is element - wise on <nl> + / / ' operand ' . <nl> + const bool found_caller_use = <nl> + std : : find_if ( uses . begin ( ) , uses . end ( ) , [ user ] ( const HloUse & use ) { <nl> + return use . instruction = = user ; <nl> + } ) ! = uses . end ( ) ; <nl> + auto * callee_root = user - > to_apply ( ) - > root_instruction ( ) ; <nl> + const bool found_elementwise_callee_use = <nl> + std : : find_if ( <nl> + uses . begin ( ) , uses . end ( ) , [ callee_root ] ( const HloUse & use ) { <nl> + return use . instruction = = callee_root & & <nl> + callee_root - > IsElementwiseOnOperand ( use . operand_number ) ; <nl> + } ) ! = uses . end ( ) ; <nl> + return uses . size ( ) = = 2 & & found_caller_use & & found_elementwise_callee_use ; <nl> + } <nl> + / / Check if ' user ' is element - wise . <nl> + return user - > IsElementwise ( ) ; <nl> + } <nl> + <nl> } / / namespace xla <nl> mmm a / tensorflow / compiler / xla / service / hlo_dataflow_analysis . h <nl> ppp b / tensorflow / compiler / xla / service / hlo_dataflow_analysis . h <nl> class HloDataflowAnalysis { <nl> <nl> string ToString ( ) const ; <nl> <nl> + / / Returns true if ' user ' cannot possibly use the buffer at ' index ' in <nl> + / / ' operand ' . Returns false otherwise . <nl> + / / <nl> + / / REQUIRES : ' operand ' is an operand of ' user ' . <nl> + bool DoesNotUseOperandBuffer ( const HloInstruction * operand , <nl> + const ShapeIndex & index , <nl> + const HloInstruction * user ) const ; <nl> + <nl> + / / Returns true if ' user ' ( at ' user_index ' ) can share a buffer with its <nl> + / / operand ' operand ' ( at ' operand_index ' ) . Returns false otherwise . <nl> + / / <nl> + / / REQUIRES : ' operand ' is an operand of ' user ' . <nl> + bool CanShareOperandBufferWithUser ( HloInstruction * operand , <nl> + const ShapeIndex & operand_index , <nl> + HloInstruction * user , <nl> + const ShapeIndex & user_index ) const ; <nl> + <nl> protected : <nl> HloDataflowAnalysis ( const HloModule & module , bool ssa_form , <nl> bool bitcast_defines_value = false ) ; <nl> mmm a / tensorflow / compiler / xla / service / hlo_dataflow_analysis_test . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_dataflow_analysis_test . cc <nl> INSTANTIATE_TEST_CASE_P ( HloDataflowAnalysisInstantiation , <nl> HloDataflowAnalysisTest , <nl> : : testing : : Values ( false , true ) ) ; <nl> <nl> + class HloDataflowAnalysisTestBase : public HloTestBase { <nl> + protected : <nl> + void BuildModule ( std : : unique_ptr < HloComputation > computation ) { <nl> + module_ = CreateNewModule ( ) ; <nl> + computation_ = module_ - > AddEntryComputation ( std : : move ( computation ) ) ; <nl> + } <nl> + <nl> + void RunAnalysis ( ) { <nl> + CHECK_NOTNULL ( module_ . get ( ) ) ; <nl> + dataflow_analysis_ = HloDataflowAnalysis : : Run ( * module_ ) . ConsumeValueOrDie ( ) ; <nl> + } <nl> + <nl> + void BuildModuleAndRunAnalysis ( std : : unique_ptr < HloComputation > computation ) { <nl> + BuildModule ( std : : move ( computation ) ) ; <nl> + RunAnalysis ( ) ; <nl> + } <nl> + <nl> + std : : unique_ptr < HloModule > module_ ; <nl> + HloComputation * computation_ = nullptr ; <nl> + std : : unique_ptr < HloDataflowAnalysis > dataflow_analysis_ ; <nl> + } ; <nl> + <nl> + class DoesNotUseOperandBufferTest : public HloDataflowAnalysisTestBase { } ; <nl> + <nl> + TEST_F ( DoesNotUseOperandBufferTest , GetTupleElement ) { <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + <nl> + Shape elem_shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> + auto tuple = builder . AddInstruction ( HloInstruction : : CreateParameter ( <nl> + 0 , ShapeUtil : : MakeTupleShape ( { elem_shape , elem_shape } ) , " tuple " ) ) ; <nl> + auto gte0 = builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( elem_shape , tuple , 0 ) ) ; <nl> + auto gte1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( elem_shape , tuple , 1 ) ) ; <nl> + builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( elem_shape , HloOpcode : : kAdd , gte0 , gte1 ) ) ; <nl> + <nl> + BuildModuleAndRunAnalysis ( builder . Build ( ) ) ; <nl> + <nl> + / / GetTupleElement instructions only access the top - level buffer of their <nl> + / / operand . <nl> + EXPECT_TRUE ( dataflow_analysis_ - > DoesNotUseOperandBuffer ( tuple , { 0 } , gte0 ) ) ; <nl> + EXPECT_TRUE ( dataflow_analysis_ - > DoesNotUseOperandBuffer ( tuple , { 1 } , gte1 ) ) ; <nl> + EXPECT_FALSE ( dataflow_analysis_ - > DoesNotUseOperandBuffer ( tuple , { } , gte0 ) ) ; <nl> + EXPECT_FALSE ( dataflow_analysis_ - > DoesNotUseOperandBuffer ( tuple , { } , gte1 ) ) ; <nl> + } <nl> + <nl> + TEST_F ( DoesNotUseOperandBufferTest , FusedDynamicUpdateSlice ) { <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + <nl> + Shape data_shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> + auto tuple = builder . AddInstruction ( HloInstruction : : CreateParameter ( <nl> + 0 , ShapeUtil : : MakeTupleShape ( { data_shape , data_shape } ) , " tuple " ) ) ; <nl> + auto gte0 = builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( data_shape , tuple , 0 ) ) ; <nl> + auto gte1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( data_shape , tuple , 1 ) ) ; <nl> + <nl> + / / Create a DynamicUpdateSlice instruction of tuple element 1 . <nl> + auto starts = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( Literal : : CreateR1 < int32 > ( { 2 } ) ) ) ; <nl> + auto update = builder . AddInstruction ( HloInstruction : : CreateConstant ( <nl> + Literal : : CreateR1 < float > ( { 2 . f , 2 . f , 2 . f } ) ) ) ; <nl> + auto dynamic_update_slice = <nl> + builder . AddInstruction ( HloInstruction : : CreateDynamicUpdateSlice ( <nl> + data_shape , gte1 , update , starts ) ) ; <nl> + builder . AddInstruction ( <nl> + HloInstruction : : CreateTuple ( { gte0 , dynamic_update_slice } ) ) ; <nl> + <nl> + BuildModule ( builder . Build ( ) ) ; <nl> + auto fusion = computation_ - > CreateFusionInstruction ( <nl> + { dynamic_update_slice , starts , update , gte1 } , <nl> + HloInstruction : : FusionKind : : kLoop ) ; <nl> + RunAnalysis ( ) ; <nl> + <nl> + / / The fusion instruction never uses tuple element 0 , but does use element 1 . <nl> + EXPECT_TRUE ( dataflow_analysis_ - > DoesNotUseOperandBuffer ( tuple , { 0 } , fusion ) ) ; <nl> + EXPECT_FALSE ( dataflow_analysis_ - > DoesNotUseOperandBuffer ( tuple , { 1 } , fusion ) ) ; <nl> + } <nl> + <nl> + class CanShareOperandBufferWithUserTest : public HloDataflowAnalysisTestBase { } ; <nl> + <nl> + TEST_F ( CanShareOperandBufferWithUserTest , ElementWiseSameShape ) { <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + <nl> + Shape shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> + auto param = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , shape , " param " ) ) ; <nl> + auto exp = builder . AddInstruction ( <nl> + HloInstruction : : CreateUnary ( shape , HloOpcode : : kExp , param ) ) ; <nl> + auto log = builder . AddInstruction ( <nl> + HloInstruction : : CreateUnary ( shape , HloOpcode : : kLog , exp ) ) ; <nl> + <nl> + BuildModuleAndRunAnalysis ( builder . Build ( ) ) ; <nl> + <nl> + EXPECT_TRUE ( <nl> + dataflow_analysis_ - > CanShareOperandBufferWithUser ( param , { } , exp , { } ) ) ; <nl> + EXPECT_TRUE ( <nl> + dataflow_analysis_ - > CanShareOperandBufferWithUser ( exp , { } , log , { } ) ) ; <nl> + } <nl> + <nl> + TEST_F ( CanShareOperandBufferWithUserTest , ElementWiseDifferentShape ) { <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + <nl> + Shape in_shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> + Shape out_shape = ShapeUtil : : MakeShape ( PRED , { 8 } ) ; <nl> + auto param0 = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , in_shape , " param0 " ) ) ; <nl> + auto param1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 1 , in_shape , " param1 " ) ) ; <nl> + auto result = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( out_shape , HloOpcode : : kEq , param0 , param1 ) ) ; <nl> + <nl> + BuildModuleAndRunAnalysis ( builder . Build ( ) ) ; <nl> + <nl> + EXPECT_FALSE ( dataflow_analysis_ - > CanShareOperandBufferWithUser ( param0 , { } , <nl> + result , { } ) ) ; <nl> + EXPECT_FALSE ( dataflow_analysis_ - > CanShareOperandBufferWithUser ( param1 , { } , <nl> + result , { } ) ) ; <nl> + } <nl> + <nl> + TEST_F ( CanShareOperandBufferWithUserTest , CopyShares ) { <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + <nl> + Shape shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> + auto param = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , shape , " param " ) ) ; <nl> + auto exp = builder . AddInstruction ( <nl> + HloInstruction : : CreateUnary ( shape , HloOpcode : : kExp , param ) ) ; <nl> + auto copy = builder . AddInstruction ( <nl> + HloInstruction : : CreateUnary ( shape , HloOpcode : : kCopy , exp ) ) ; <nl> + <nl> + BuildModuleAndRunAnalysis ( builder . Build ( ) ) ; <nl> + <nl> + EXPECT_TRUE ( <nl> + dataflow_analysis_ - > CanShareOperandBufferWithUser ( param , { } , exp , { } ) ) ; <nl> + EXPECT_TRUE ( <nl> + dataflow_analysis_ - > CanShareOperandBufferWithUser ( exp , { } , copy , { } ) ) ; <nl> + } <nl> + <nl> + TEST_F ( CanShareOperandBufferWithUserTest , FusedDynamicUpdateSlice ) { <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + <nl> + Shape data_shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> + auto tuple = builder . AddInstruction ( HloInstruction : : CreateParameter ( <nl> + 0 , ShapeUtil : : MakeTupleShape ( { data_shape , data_shape } ) , " tuple " ) ) ; <nl> + auto gte0 = builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( data_shape , tuple , 0 ) ) ; <nl> + auto gte1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( data_shape , tuple , 1 ) ) ; <nl> + <nl> + / / Create a DynamicUpdateSlice instruction of tuple element 1 . <nl> + auto starts = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( Literal : : CreateR1 < int32 > ( { 2 } ) ) ) ; <nl> + auto update = builder . AddInstruction ( HloInstruction : : CreateConstant ( <nl> + Literal : : CreateR1 < float > ( { 2 . f , 2 . f , 2 . f } ) ) ) ; <nl> + auto dynamic_update_slice = <nl> + builder . AddInstruction ( HloInstruction : : CreateDynamicUpdateSlice ( <nl> + data_shape , gte1 , update , starts ) ) ; <nl> + builder . AddInstruction ( <nl> + HloInstruction : : CreateTuple ( { gte0 , dynamic_update_slice } ) ) ; <nl> + <nl> + BuildModule ( builder . Build ( ) ) ; <nl> + auto fusion = computation_ - > CreateFusionInstruction ( <nl> + { dynamic_update_slice , starts , update , gte1 } , <nl> + HloInstruction : : FusionKind : : kLoop ) ; <nl> + RunAnalysis ( ) ; <nl> + <nl> + / / The fusion instruction can share with tuple element 1 . <nl> + EXPECT_FALSE ( dataflow_analysis_ - > CanShareOperandBufferWithUser ( tuple , { 0 } , <nl> + fusion , { } ) ) ; <nl> + EXPECT_TRUE ( dataflow_analysis_ - > CanShareOperandBufferWithUser ( tuple , { 1 } , <nl> + fusion , { } ) ) ; <nl> + } <nl> + <nl> + TEST_F ( CanShareOperandBufferWithUserTest , DynamicUpdateSliceCanShare ) { <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + <nl> + Shape data_shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> + Shape update_shape = ShapeUtil : : MakeShape ( F32 , { 4 } ) ; <nl> + Shape starts_shape = ShapeUtil : : MakeShape ( S32 , { 1 } ) ; <nl> + auto data = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , data_shape , " data " ) ) ; <nl> + auto update = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 1 , update_shape , " update " ) ) ; <nl> + auto starts = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 2 , starts_shape , " starts " ) ) ; <nl> + auto dus = builder . AddInstruction ( HloInstruction : : CreateDynamicUpdateSlice ( <nl> + data_shape , data , update , starts ) ) ; <nl> + <nl> + BuildModuleAndRunAnalysis ( builder . Build ( ) ) ; <nl> + <nl> + / / The DynamicUpdateSlice instruction can share with the data operand , but not <nl> + / / with update or starts . <nl> + EXPECT_TRUE ( <nl> + dataflow_analysis_ - > CanShareOperandBufferWithUser ( data , { } , dus , { } ) ) ; <nl> + EXPECT_FALSE ( <nl> + dataflow_analysis_ - > CanShareOperandBufferWithUser ( update , { } , dus , { } ) ) ; <nl> + EXPECT_FALSE ( <nl> + dataflow_analysis_ - > CanShareOperandBufferWithUser ( starts , { } , dus , { } ) ) ; <nl> + } <nl> + <nl> + TEST_F ( CanShareOperandBufferWithUserTest , FusedDotAdd ) { <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + Shape data_shape = ShapeUtil : : MakeShape ( F32 , { 2 , 2 } ) ; <nl> + <nl> + auto a = builder . AddInstruction ( HloInstruction : : CreateConstant ( <nl> + Literal : : CreateR2 < float > ( { { 1 . 0 , 0 . 0 } , { 0 . 0 , 1 . 0 } } ) ) ) ; <nl> + auto b = builder . AddInstruction ( HloInstruction : : CreateConstant ( <nl> + Literal : : CreateR2 < float > ( { { 2 . 0 , 2 . 0 } , { 2 . 0 , 2 . 0 } } ) ) ) ; <nl> + <nl> + DotDimensionNumbers dot_dnums ; <nl> + dot_dnums . add_lhs_contracting_dimensions ( 1 ) ; <nl> + dot_dnums . add_rhs_contracting_dimensions ( 0 ) ; <nl> + auto dot = builder . AddInstruction ( <nl> + HloInstruction : : CreateDot ( data_shape , a , b , dot_dnums ) ) ; <nl> + <nl> + auto one = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( Literal : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> + auto add_operand = builder . AddInstruction ( <nl> + HloInstruction : : CreateBroadcast ( data_shape , one , { 1 } ) ) ; <nl> + <nl> + auto add = builder . AddInstruction ( HloInstruction : : CreateBinary ( <nl> + data_shape , HloOpcode : : kAdd , dot , add_operand ) ) ; <nl> + <nl> + BuildModule ( builder . Build ( ) ) ; <nl> + auto fusion = computation_ - > CreateFusionInstruction ( <nl> + { add , dot } , HloInstruction : : FusionKind : : kOutput ) ; <nl> + RunAnalysis ( ) ; <nl> + <nl> + / / Output fused dot add should be able to share buffer with ' add_operand ' . <nl> + EXPECT_TRUE ( dataflow_analysis_ - > CanShareOperandBufferWithUser ( add_operand , { } , <nl> + fusion , { } ) ) ; <nl> + } <nl> + <nl> + TEST_F ( CanShareOperandBufferWithUserTest , OutputFusionCantAliasOperandBuffer ) { <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + Shape data_shape = ShapeUtil : : MakeShape ( F32 , { 2 , 2 } ) ; <nl> + <nl> + auto one = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( Literal : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> + auto operand = builder . AddInstruction ( <nl> + HloInstruction : : CreateBroadcast ( data_shape , one , { 1 } ) ) ; <nl> + <nl> + auto reverse = builder . AddInstruction ( <nl> + HloInstruction : : CreateReverse ( data_shape , operand , { 0 , 1 } ) ) ; <nl> + <nl> + auto two = builder . AddInstruction ( HloInstruction : : CreateConstant ( <nl> + Literal : : CreateR2 < float > ( { { 2 . 0 , 2 . 0 } , { 2 . 0 , 2 . 0 } } ) ) ) ; <nl> + <nl> + auto add = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( data_shape , HloOpcode : : kAdd , reverse , two ) ) ; <nl> + <nl> + BuildModule ( builder . Build ( ) ) ; <nl> + auto fusion = computation_ - > CreateFusionInstruction ( <nl> + { add , two , reverse } , HloInstruction : : FusionKind : : kOutput ) ; <nl> + RunAnalysis ( ) ; <nl> + <nl> + / / Output fused operand - > reverse - > add cannot alias operand buffer ' operand ' . <nl> + EXPECT_FALSE ( dataflow_analysis_ - > CanShareOperandBufferWithUser ( operand , { } , <nl> + fusion , { } ) ) ; <nl> + } <nl> + <nl> + TEST_F ( CanShareOperandBufferWithUserTest , WhileCanShare ) { <nl> + Shape data_shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> + <nl> + auto make_cond = [ this , & data_shape ] ( ) { <nl> + auto builder = HloComputation : : Builder ( TestName ( ) + " . Cond " ) ; <nl> + auto data = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , data_shape , " data " ) ) ; <nl> + builder . AddInstruction ( HloInstruction : : CreateBinary ( <nl> + ShapeUtil : : MakeShape ( PRED , { } ) , HloOpcode : : kEq , data , data ) ) ; <nl> + return builder . Build ( ) ; <nl> + } ; <nl> + <nl> + auto make_body = [ this , & data_shape ] ( ) { <nl> + auto builder = HloComputation : : Builder ( TestName ( ) + " . Body " ) ; <nl> + auto data = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , data_shape , " data " ) ) ; <nl> + builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( data_shape , HloOpcode : : kAdd , data , data ) ) ; <nl> + return builder . Build ( ) ; <nl> + } ; <nl> + <nl> + module_ = CreateNewModule ( ) ; <nl> + HloComputation * cond_computation = <nl> + module_ - > AddEmbeddedComputation ( make_cond ( ) ) ; <nl> + HloComputation * body_computation = <nl> + module_ - > AddEmbeddedComputation ( make_body ( ) ) ; <nl> + <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + auto data = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , data_shape , " data " ) ) ; <nl> + auto whil = builder . AddInstruction ( HloInstruction : : CreateWhile ( <nl> + data_shape , cond_computation , body_computation , data ) ) ; <nl> + computation_ = module_ - > AddEntryComputation ( builder . Build ( ) ) ; <nl> + <nl> + RunAnalysis ( ) ; <nl> + <nl> + / / The While instruction can share with the data operand . <nl> + EXPECT_TRUE ( <nl> + dataflow_analysis_ - > CanShareOperandBufferWithUser ( data , { } , whil , { } ) ) ; <nl> + } <nl> + <nl> + / / Tests that Call can alias operand buffer if the only use of the operand <nl> + / / in the called computation is an elementwise instruction . <nl> + TEST_F ( CanShareOperandBufferWithUserTest , CallToComputationWithFusionRoot ) { <nl> + Shape shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> + / / Build sub - computation with fusion root . <nl> + auto sub_builder = HloComputation : : Builder ( TestName ( ) + " _sub " ) ; <nl> + auto sub_param = sub_builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , shape , " sub_param " ) ) ; <nl> + auto one = sub_builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( Literal : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> + auto ones = sub_builder . AddInstruction ( <nl> + HloInstruction : : CreateBroadcast ( shape , one , { 1 } ) ) ; <nl> + auto add = sub_builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( shape , HloOpcode : : kAdd , sub_param , ones ) ) ; <nl> + <nl> + module_ = CreateNewModule ( ) ; <nl> + auto sub_computation = module_ - > AddEmbeddedComputation ( sub_builder . Build ( ) ) ; <nl> + sub_computation - > CreateFusionInstruction ( { add , ones } , <nl> + HloInstruction : : FusionKind : : kLoop ) ; <nl> + <nl> + / / Build entry - computation with kCall which calls ' sub_computation ' . <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + <nl> + auto param = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , shape , " param " ) ) ; <nl> + auto reverse = <nl> + builder . AddInstruction ( HloInstruction : : CreateReverse ( shape , param , { 0 } ) ) ; <nl> + auto call = builder . AddInstruction ( <nl> + HloInstruction : : CreateCall ( shape , { reverse } , sub_computation ) ) ; <nl> + computation_ = module_ - > AddEntryComputation ( builder . Build ( ) ) ; <nl> + <nl> + RunAnalysis ( ) ; <nl> + <nl> + EXPECT_TRUE ( <nl> + dataflow_analysis_ - > CanShareOperandBufferWithUser ( reverse , { } , call , { } ) ) ; <nl> + } <nl> + <nl> } / / namespace <nl> } / / namespace xla <nl> mmm a / tensorflow / compiler / xla / service / hlo_evaluator_typed_visitor . h <nl> ppp b / tensorflow / compiler / xla / service / hlo_evaluator_typed_visitor . h <nl> class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault { <nl> int64 rank = ShapeUtil : : Rank ( operand_literal . shape ( ) ) ; <nl> <nl> HloEvaluator embedded_evaluator ( parent_ - > max_loop_iterations_ ) ; <nl> - DimensionVector source_index ( rank ) ; <nl> - <nl> - std : : fill ( source_index . begin ( ) , source_index . end ( ) , 0 ) ; <nl> + DimensionVector source_index ( rank , 0 ) ; <nl> + <nl> + / / Used in the dual IterateThroughWindow lambdas below . Hoisted to avoid <nl> + / / dynamic memory allocations . <nl> + auto curr_val_literal = Literal : : CreateR0 < ReturnT > ( ReturnT ( ) ) ; <nl> + auto selected_val_literal = Literal : : CreateR0 < ReturnT > ( ReturnT ( ) ) ; <nl> + auto source_literal_scatter = Literal : : CreateR0 < ReturnT > ( ReturnT ( ) ) ; <nl> + auto scattered_literal = Literal : : CreateR0 < ReturnT > ( ReturnT ( ) ) ; <nl> do { <nl> / / For each element in ` source ` , we place a window in ` operand ` . For each <nl> / / window placement , we iterate inside the window twice : <nl> class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault { <nl> selected_val = curr_val ; <nl> selected_index = operand_index ; <nl> } <nl> - const auto curr_val_literal = Literal : : CreateR0 < ReturnT > ( curr_val ) ; <nl> - const auto selected_val_literal = <nl> - Literal : : CreateR0 < ReturnT > ( * selected_val ) ; <nl> - <nl> - const std : : vector < const Literal * > args = { <nl> - selected_val_literal . get ( ) , curr_val_literal . get ( ) } ; <nl> + curr_val_literal - > Set ( { } , curr_val ) ; <nl> + selected_val_literal - > Set ( { } , * selected_val ) ; <nl> std : : unique_ptr < Literal > computed_result = <nl> - embedded_evaluator . Evaluate < const Literal * > ( * select , args ) <nl> + embedded_evaluator <nl> + . Evaluate < const Literal * > ( <nl> + * select , <nl> + { selected_val_literal . get ( ) , curr_val_literal . get ( ) } ) <nl> . ConsumeValueOrDie ( ) ; <nl> bool selected = ! computed_result - > Get < bool > ( { } ) ; <nl> if ( selected ) { <nl> class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault { <nl> selected_index - > begin ( ) ) ) { <nl> auto source = source_literal . Get < ReturnT > ( source_index ) ; <nl> auto scattered = result - > Get < ReturnT > ( operand_index ) ; <nl> - const auto source_literal = Literal : : CreateR0 < ReturnT > ( source ) ; <nl> - const auto scattered_literal = <nl> - Literal : : CreateR0 < ReturnT > ( scattered ) ; <nl> - <nl> - const std : : vector < const Literal * > args = { <nl> - source_literal . get ( ) , scattered_literal . get ( ) } ; <nl> + source_literal_scatter - > Set ( { } , source ) ; <nl> + scattered_literal - > Set ( { } , scattered ) ; <nl> std : : unique_ptr < Literal > computed_result = <nl> - embedded_evaluator . Evaluate < const Literal * > ( * scatter , args ) <nl> + embedded_evaluator <nl> + . Evaluate < const Literal * > ( * scatter , <nl> + { source_literal_scatter . get ( ) , <nl> + scattered_literal . get ( ) } ) <nl> . ConsumeValueOrDie ( ) ; <nl> result - > Set ( operand_index , computed_result - > Get < ReturnT > ( { } ) ) ; <nl> / / Clear visit states so that the we can use the evaluator again <nl> class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault { <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - / / Enable CLZ only for int32 and uint32 . <nl> + / / Enable CLZ only for int32 , uint32 , int64 and uint64 . <nl> template < <nl> typename NativeT , <nl> typename std : : enable_if < <nl> ( std : : is_floating_point < NativeT > : : value | | <nl> std : : is_integral < NativeT > : : value | | is_complex_t < NativeT > : : value ) & & <nl> ! ( std : : is_same < NativeT , uint32 > : : value | | <nl> - std : : is_same < NativeT , int32 > : : value ) > : : type * = nullptr > <nl> + std : : is_same < NativeT , int32 > : : value | | <nl> + std : : is_same < NativeT , int64 > : : value | | <nl> + std : : is_same < NativeT , uint64 > : : value ) > : : type * = nullptr > <nl> Status HandleClz ( HloInstruction * clz ) { <nl> return InvalidArgument ( " Unsupported type for Clz " ) ; <nl> } <nl> class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault { <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> + template < typename NativeT , <nl> + typename std : : enable_if < <nl> + std : : is_same < NativeT , uint64 > : : value | | <nl> + std : : is_same < NativeT , int64 > : : value > : : type * = nullptr > <nl> + Status HandleClz ( HloInstruction * clz ) { <nl> + TF_ASSIGN_OR_RETURN ( parent_ - > evaluated_ [ clz ] , <nl> + ElementWiseUnaryOp ( clz , [ ] ( ElementwiseT elem_operand ) { <nl> + return 63 - tensorflow : : Log2Floor64 ( elem_operand ) ; <nl> + } ) ) ; <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> Status HandleClz ( HloInstruction * clz ) override { <nl> return HandleClz < ElementwiseT > ( clz ) ; <nl> } <nl> mmm a / tensorflow / compiler / xla / service / hlo_execution_profile_test . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_execution_profile_test . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / service / hlo_execution_profile . h " <nl> # include " tensorflow / compiler / xla / service / hlo_cost_analysis . h " <nl> # include " tensorflow / compiler / xla / tests / hlo_test_base . h " <nl> + # include " tensorflow / compiler / xla / tools / parser / hlo_parser . h " <nl> # include " tensorflow / core / lib / strings / strcat . h " <nl> <nl> namespace xla { <nl> using : : testing : : ContainsRegex ; <nl> class HloExecutionProfileTest : public HloTestBase { } ; <nl> <nl> TEST_F ( HloExecutionProfileTest , Basic ) { <nl> - std : : unique_ptr < HloModule > hlo_module = CreateNewModule ( ) ; <nl> - <nl> - HloComputation : : Builder builder ( TestName ( ) ) ; <nl> + auto hlo_module = tools : : Parse ( R " ( <nl> + HloModule test_module <nl> + ENTRY entry_computation { <nl> + lhs = f32 [ 30 , 30 ] { 1 , 0 } parameter ( 0 ) <nl> + rhs = f32 [ 30 , 30 ] { 1 , 0 } parameter ( 1 ) <nl> + add = f32 [ 30 , 30 ] { 1 , 0 } add ( lhs , rhs ) <nl> + ROOT dot = f32 [ 30 , 30 ] { 1 , 0 } dot ( lhs , add ) , lhs_contracting_dims = { 1 } , rhs_contracting_dims = { 0 } <nl> + } ) " ) <nl> + . ValueOrDie ( ) ; <nl> + const HloInstruction * dot_instruction = <nl> + hlo_module - > entry_computation ( ) - > root_instruction ( ) ; <nl> + const HloInstruction * add_instruction = dot_instruction - > operand ( 1 ) ; <nl> Shape shape = ShapeUtil : : MakeShape ( F32 , { 30 , 30 } ) ; <nl> - HloInstruction * param_lhs = <nl> - builder . AddInstruction ( HloInstruction : : CreateParameter ( 0 , shape , " lhs " ) ) ; <nl> - HloInstruction * param_rhs = <nl> - builder . AddInstruction ( HloInstruction : : CreateParameter ( 1 , shape , " rhs " ) ) ; <nl> - HloInstruction * add_instruction = <nl> - builder . AddInstruction ( HloInstruction : : CreateBinary ( <nl> - shape , HloOpcode : : kAdd , param_lhs , param_rhs ) ) ; <nl> - HloInstruction * dot_instruction = <nl> - builder . AddInstruction ( HloInstruction : : CreateBinary ( <nl> - shape , HloOpcode : : kDot , param_lhs , add_instruction ) ) ; <nl> - <nl> - hlo_module - > AddEntryComputation ( builder . Build ( ) ) ; <nl> <nl> auto shape_size_function = [ & ] ( const Shape & shape ) { <nl> const int64 pointer_size = 8 ; <nl> mmm a / tensorflow / compiler / xla / service / hlo_instruction . h <nl> ppp b / tensorflow / compiler / xla / service / hlo_instruction . h <nl> class CanonicalNameMap { <nl> tensorflow : : gtl : : FlatMap < string , string > canonical_name_map ; <nl> } ; <nl> <nl> - / / HLO instructions are the IR used by the high - level compiler . <nl> + / / HLO instructions are the atomic unit of the high - level compiler ' s IR . <nl> + / / <nl> + / / HloInstructions live inside of an HloComputation , which is analogous to a <nl> + / / function in other programming languages . Nodes have no total order within <nl> + / / their computation . Instead , they have a partial ordering determined by their <nl> + / / data and control dependencies . <nl> + / / <nl> + / / HLO does not have basic blocks or explicit " branch " instructions . Instead , <nl> + / / certain HloInstructions - - namely , kWhile , kConditional , and kCall - - encode <nl> + / / control flow . For example , the kConditional HLO executes one of two possible <nl> + / / computations , depending on the runtime value of a predicate . <nl> + / / <nl> + / / HLO is pure ( mostly ) . It has no concept of mutable state . Instead , data <nl> + / / values are produced by one HLO and flow into consumers across dependency <nl> + / / edges . <nl> class HloInstruction { <nl> public : <nl> + / / A fusion node computes the same value a call to its fusion computation <nl> + / / would compute . However , the choice of fusion kind dictates codegen <nl> + / / strategy for the backend . <nl> + / / <nl> + / / To generate code for a kFusion HloInstruction , most backends do something <nl> + / / like the following : <nl> + / / <nl> + / / 1 ) Identify the " primary " HloInstruction of the fused computation . <nl> + / / 2 ) Emit code that does the work of the primary node , creating its inputs <nl> + / / and transforming its outputs as specified by the fused computation . <nl> + / / <nl> + / / In step ( 2 ) , the code emitted is usually similar to the code that would be <nl> + / / emitted for an * unfused * version of the primary node , except that <nl> + / / <nl> + / / - when the primary node reads an element of one of its operands , instead <nl> + / / of loading the value from memory , it * computes * the value based on the <nl> + / / contents of the fused computation . <nl> + / / - when the primary node outputs a value , instead of storing it to memory , <nl> + / / it forwards the value to its users , which then perform additional <nl> + / / computations before the value is finally stored to memory at the root of <nl> + / / the fusion node . <nl> + / / <nl> + / / An HloInstruction ' s FusionKind helps us find the kFusion instruction ' s <nl> + / / primary node , and can also affect how we generate code in step ( 2 ) . <nl> + / / <nl> + / / - kInput : The primary node is the root of the fused instruction . <nl> + / / <nl> + / / - kOutput : The primary node is not the root of the fused instruction . <nl> + / / This fusion kind requires that one operand buffer of the fusion <nl> + / / instruction be able to alias the output buffer . This constraint is <nl> + / / usually enough to let backends find the primary node unambiguously . <nl> + / / <nl> + / / - kLoop : The primary node is the root of the fused computation , but , <nl> + / / unlike in input fusion , we prescribe a specific implementation for <nl> + / / codegen . Rather than generating code that looks like the code we ' d emit <nl> + / / for an unfused version of the primary / root node , we emit code that <nl> + / / generates one element of the root at a time . <nl> + / / <nl> + / / - kCustom : Custom category for backend - specific fusions that don ' t fit <nl> + / / into the above patterns . <nl> + / / <nl> + / / Not all backends support all fusion kinds , and given a particular fused <nl> + / / computation , it ' s not in general safe to change its fusion kind . Creation <nl> + / / of fusion nodes is always backend - specific . <nl> + / / <nl> + / / For elementwise ops ( e . g . kAdd ) , most backends would emit a <nl> + / / one - element - at - a - time implementation for the unfused version , so loop <nl> + / / fusion and input fusion are probably equivalent if the root node is <nl> + / / elementwise . They ' re not necessarily equivalent e . g . for kReduce , where an <nl> + / / implementation might emit something more sophisticated for an unfused or <nl> + / / input - fusion reduce , but will emit the naive code that reduces one element <nl> + / / at a time for loop fusion with a reduce as the root . <nl> + / / <nl> + / / Another way to think of loop fusion is that it ' s equivalent to input <nl> + / / fusion , but where the root node is an implicit identity node , whose <nl> + / / unfused implementation is " read one element , write one element " . <nl> + / / <nl> + / / TODO ( b / 79869434 ) : This categorization scheme is not great . For one thing , <nl> + / / input and loop fusion are basically the same thing : There is no reason for <nl> + / / the HLO to encode backend - specific decisions about how e . g . a reduce that ' s <nl> + / / the root of a fusion should be lowered . In addition , this scheme as <nl> + / / written doesn ' t work for multi - output fusion , where the primary node is <nl> + / / never actually the root ( which is a kTuple instruction that gathers the <nl> + / / multiple outputs of the fusion ) . <nl> enum class FusionKind { <nl> - kLoop , / / Fused into a loop . <nl> - kInput , / / Op ' s input is fused into the op itself . <nl> - kOutput , / / Op ' s output is fused into the op itself . <nl> - / / REQUIRES : At least one operand buffer must be able <nl> - / / to alias the output buffer . <nl> - kCustom , / / Custom category for backend - specific fusions that <nl> - / / do not match any of the more specific ones . <nl> + kLoop , <nl> + kInput , <nl> + kOutput , <nl> + kCustom , <nl> } ; <nl> <nl> ~ HloInstruction ( ) ; <nl> mmm a / tensorflow / compiler / xla / service / hlo_module . h <nl> ppp b / tensorflow / compiler / xla / service / hlo_module . h <nl> namespace xla { <nl> <nl> / / Describes a compilation unit at the HLO level . <nl> / / <nl> - / / A HLO module contains one or more HLO computations . The module contains one <nl> - / / " entry " computation which produces the result . The module also includes any <nl> - / / embedded computations used by instructions such as " map " and " reduce " . All <nl> - / / computations are owned by the module . <nl> + / / HloModule is the top - level unit in the HLO IR . It corresponds to a whole <nl> + / / " program " . Running a module , from beginning to end , is the only way to run <nl> + / / an XLA program . <nl> + / / <nl> + / / A module contains one " entry computation " ; this HloComputation is like main ( ) <nl> + / / in a C program . The result of running the module is the result of running <nl> + / / this computation . <nl> + / / <nl> + / / A module also contains some number of " nested computations " . Each nested <nl> + / / computation is attached to an HloInstruction within some other computation . <nl> + / / The meaning of the nested computation depends on the instruction it ' s <nl> + / / attached to . <nl> class HloModule { <nl> public : <nl> HloModule ( const string & name , <nl> mmm a / tensorflow / compiler / xla / service / hlo_module_group_metadata . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_module_group_metadata . cc <nl> string HloModuleGroupMetadata : : TrackedInstruction : : ToString ( ) const { <nl> <nl> / * static * / StatusOr < std : : unique_ptr < HloModuleGroupMetadata > > <nl> HloModuleGroupMetadata : : Build ( const std : : vector < HloModule * > & modules ) { <nl> - auto metadata = absl : : make_unique < HloModuleGroupMetadata > ( modules ) ; <nl> + auto metadata = MakeUnique < HloModuleGroupMetadata > ( modules ) ; <nl> TF_RETURN_IF_ERROR ( metadata - > Build ( ) ) ; <nl> return std : : move ( metadata ) ; <nl> } <nl> Status HloModuleGroupMetadata : : AddCompanion ( HloInstruction * instruction1 , <nl> if ( ! ContainsKey ( companion_set_index_ , instruction1 ) & & <nl> ! ContainsKey ( companion_set_index_ , instruction2 ) ) { <nl> companion_sets_ . push_back ( <nl> - absl : : make_unique < std : : unordered_set < HloInstruction * > > ( ) ) ; <nl> + tensorflow : : MakeUnique < std : : unordered_set < HloInstruction * > > ( ) ) ; <nl> auto companion_set = companion_sets_ . back ( ) . get ( ) ; <nl> companion_set - > insert ( instruction1 ) ; <nl> companion_set - > insert ( instruction2 ) ; <nl> mmm a / tensorflow / compiler / xla / service / hlo_module_group_util . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_module_group_util . cc <nl> limitations under the License . <nl> # include < string > <nl> # include < utility > <nl> <nl> + # include " tensorflow / compiler / xla / ptr_util . h " <nl> # include " tensorflow / compiler / xla / service / hlo_opcode . h " <nl> # include " tensorflow / compiler / xla / service / hlo_reachability . h " <nl> # include " tensorflow / compiler / xla / status_macros . h " <nl> HloModuleGroupUtil : : ComputeReachability ( <nl> TF_RETURN_IF_ERROR ( <nl> VisitTopologicalOrder ( & visit_states , visit_function , root ) ) ; <nl> } <nl> - auto reachability = absl : : make_unique < HloReachabilityMap > ( post_order ) ; <nl> + auto reachability = MakeUnique < HloReachabilityMap > ( post_order ) ; <nl> for ( HloInstruction * hlo : post_order ) { <nl> reachability - > SetReachabilityToUnion ( GlobalPredecessors ( hlo ) , hlo ) ; <nl> } <nl> mmm a / tensorflow / compiler / xla / service / hlo_ordering . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_ordering . cc <nl> limitations under the License . <nl> # include < vector > <nl> <nl> # include " tensorflow / compiler / xla / service / hlo_computation . h " <nl> - # include " tensorflow / compiler / xla / service / liveness_util . h " <nl> # include " tensorflow / compiler / xla / shape_util . h " <nl> # include " tensorflow / compiler / xla / status_macros . h " <nl> # include " tensorflow / compiler / xla / statusor . h " <nl> bool HloOrdering : : UseIsBeforeValueDefinition ( <nl> / / is before the def if the instruction allows buffer sharing ( in place <nl> / / computation ) . <nl> if ( use . instruction = = value . defining_instruction ( ) & & <nl> - CanShareOperandBufferWithUser ( <nl> + dataflow . CanShareOperandBufferWithUser ( <nl> use . instruction - > mutable_operand ( use . operand_number ) , <nl> use . operand_index , value . defining_instruction ( ) , <nl> - value . defining_index ( ) , dataflow ) ) { <nl> + value . defining_index ( ) ) ) { <nl> VLOG ( 4 ) < < " use is value def , and instruction can share use buffer " ; <nl> return true ; <nl> } <nl> mmm a / tensorflow / compiler / xla / service / hlo_rematerialization . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_rematerialization . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / service / hlo_opcode . h " <nl> # include " tensorflow / compiler / xla / service / hlo_ordering . h " <nl> # include " tensorflow / compiler / xla / service / hlo_scheduling . h " <nl> - # include " tensorflow / compiler / xla / service / liveness_util . h " <nl> # include " tensorflow / compiler / xla / service / logical_buffer . h " <nl> # include " tensorflow / compiler / xla / status_macros . h " <nl> # include " tensorflow / compiler / xla / statusor . h " <nl> ItemList GetUsers ( const InstructionList & instruction_list , <nl> for ( const BufferAlias & buffer_alias : <nl> points_to_analysis . GetBufferAliases ( * logical_buffer ) ) { <nl> for ( const HloInstruction * user : buffer_alias . instruction ( ) - > users ( ) ) { <nl> - if ( DoesNotUseOperandBuffer ( buffer_alias . instruction ( ) , <nl> - buffer_alias . index ( ) , user , <nl> - points_to_analysis ) ) { <nl> + if ( points_to_analysis . DoesNotUseOperandBuffer ( <nl> + buffer_alias . instruction ( ) , buffer_alias . index ( ) , user ) ) { <nl> / / The alias may be an operand of ' user ' , but the LogicalBuffer cannot <nl> / / possibly be used by the instruction so ignore ' user ' . This is the <nl> / / case , for example , for the tuple element buffers in a GetTupleElement <nl> mmm a / tensorflow / compiler / xla / service / hlo_runner . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_runner . cc <nl> limitations under the License . <nl> # include < string > <nl> # include < utility > <nl> <nl> - # include " absl / memory / memory . h " <nl> # include " third_party / eigen3 / unsupported / Eigen / CXX11 / Tensor " <nl> # include " tensorflow / compiler / xla / layout_util . h " <nl> # include " tensorflow / compiler / xla / ptr_util . h " <nl> StatusOr < std : : vector < std : : unique_ptr < Literal > > > HloRunner : : ExecuteReplicated ( <nl> int64 device = device_assignment ( i , 0 ) ; <nl> TF_ASSIGN_OR_RETURN ( se : : StreamExecutor * executor , <nl> backend ( ) . stream_executor ( device ) ) ; <nl> - streams . push_back ( absl : : make_unique < se : : Stream > ( executor ) ) ; <nl> + streams . push_back ( MakeUnique < se : : Stream > ( executor ) ) ; <nl> streams . back ( ) - > Init ( ) ; <nl> service_run_options . emplace_back ( GetServiceRunOptionsForDevice ( <nl> device , streams . back ( ) . get ( ) , & device_assignment ) ) ; <nl> StatusOr < std : : vector < std : : unique_ptr < Literal > > > HloRunner : : ExecuteReplicated ( <nl> num_threads + = options . num_replicas ; <nl> } <nl> if ( num_threads > 0 ) { <nl> - pool = absl : : make_unique < tensorflow : : thread : : ThreadPool > ( <nl> + pool = MakeUnique < tensorflow : : thread : : ThreadPool > ( <nl> tensorflow : : Env : : Default ( ) , " infeed_outfeed " , <nl> / * num_threads = * / num_threads ) ; <nl> } <nl> StatusOr < std : : vector < std : : unique_ptr < Literal > > > HloRunner : : ExecuteReplicated ( <nl> VLOG ( 1 ) < < " Starting outfeed on device " < < device ; <nl> for ( int64 step = 1 ; <nl> options . infeed_steps < 0 | | step < = options . infeed_steps ; + + step ) { <nl> - auto literal = absl : : make_unique < Literal > ( ) ; <nl> + auto literal = MakeUnique < Literal > ( ) ; <nl> TF_CHECK_OK ( backend ( ) . transfer_manager ( ) - > TransferLiteralFromOutfeed ( <nl> executor , options . outfeed_shape , literal . get ( ) ) ) ; <nl> if ( options . outfeed_values ! = nullptr ) { <nl> mmm a / tensorflow / compiler / xla / service / hlo_scheduling . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_scheduling . cc <nl> <nl> - <nl> - <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> namespace { <nl> / / since its output buffer is bigger than input . The sequence it creates will <nl> / / be : <nl> / / A D E F B C G <nl> - / / , which has a maximum memory usage of 5 ( at one point , B and F will be alive <nl> - / / together ) . <nl> + / / , which has a maximum memory usage of 6 ( B is alive while F is executing ) . <nl> / / <nl> - / / An optimal to shedule the previous graph will be : <nl> + / / An optimal way to shedule the previous graph is : <nl> / / A B C D E F G <nl> - / / , which has a maximum memory usage of 4 . <nl> + / / , which has a maximum memory usage of 5 ( when F is executing ) . <nl> / / <nl> class ListScheduler { <nl> public : <nl> class ListScheduler { <nl> static StatusOr < std : : vector < const HloInstruction * > > Run ( <nl> const HloComputation & computation , <nl> const TuplePointsToAnalysis & points_to_analysis , <nl> - const LogicalBuffer : : SizeFunction & size_function ) { <nl> - ListScheduler scheduler ( computation , points_to_analysis , size_function ) ; <nl> + const LogicalBuffer : : SizeFunction & size_function , <nl> + const tensorflow : : gtl : : FlatMap < const HloComputation * , int64 > & <nl> + memory_by_computation ) { <nl> + ListScheduler scheduler ( computation , points_to_analysis , size_function , <nl> + memory_by_computation ) ; <nl> return scheduler . CreateSchedule ( ) ; <nl> } <nl> <nl> class ListScheduler { <nl> <nl> ListScheduler ( const HloComputation & computation , <nl> const TuplePointsToAnalysis & points_to_analysis , <nl> - const LogicalBuffer : : SizeFunction & size_function ) <nl> + const LogicalBuffer : : SizeFunction & size_function , <nl> + const tensorflow : : gtl : : FlatMap < const HloComputation * , int64 > & <nl> + memory_by_computation ) <nl> : computation_ ( computation ) , <nl> points_to_analysis_ ( points_to_analysis ) , <nl> - size_function_ ( size_function ) { <nl> + size_function_ ( size_function ) , <nl> + memory_by_computation_ ( memory_by_computation ) { <nl> / / Create a map containing the LogicalBuffer uses for each HLO <nl> / / instruction . An HLO instruction " uses " a LogicalBuffer if the <nl> / / LogicalBuffer is in an operand of the instruction as indicated by <nl> class ListScheduler { <nl> } <nl> <nl> / / Returns the number of bytes freed if the HLO instruction is scheduled . <nl> + / / If the instruction calls subcomputations , we count the memory used by the <nl> + / / subcomputations as memory " defined " by the instruction . This is not <nl> + / / entirely accurate , because subcomputation memory will be freed after the <nl> + / / instruction finishes . But it is more accurate than not taking <nl> + / / subcomputations into account at all . In the future , we may improve <nl> + / / accounting for subcomputation memory ( b / 65409243 ) . <nl> int64 BytesFreedIfScheduled ( const ReadyListEntry & entry ) { <nl> int64 freed_bytes = 0 ; <nl> for ( const auto & kv : entry . used_buffer_unscheduled_use_counts ) { <nl> class ListScheduler { <nl> freed_bytes + = size_function_ ( * buffer ) ; <nl> } <nl> } <nl> - return freed_bytes - entry . bytes_defined ; <nl> + / / We only count the memory usage of the largest subcomputation , instead of <nl> + / / adding them all , because subcomputations won ' t execute in parallel . <nl> + int64 max_subcomputation_bytes = 0 ; <nl> + for ( const auto * c : entry . instruction - > called_computations ( ) ) { <nl> + auto it = memory_by_computation_ . find ( c ) ; <nl> + if ( it ! = memory_by_computation_ . end ( ) ) { <nl> + int64 subcomputation_bytes = it - > second ; <nl> + if ( subcomputation_bytes > max_subcomputation_bytes ) { <nl> + max_subcomputation_bytes = subcomputation_bytes ; <nl> + } <nl> + } <nl> + } <nl> + return freed_bytes - entry . bytes_defined - max_subcomputation_bytes ; <nl> } <nl> <nl> / / Constructs the scheduling priority of the given instruction . <nl> class ListScheduler { <nl> const HloComputation & computation_ ; <nl> const TuplePointsToAnalysis & points_to_analysis_ ; <nl> const LogicalBuffer : : SizeFunction & size_function_ ; <nl> + / / Computations are analyzed in post - order . When scheduling an instruction <nl> + / / that includes subcomputations , such as a while loop , we use this map to <nl> + / / look up the memory needed by subcomputations . <nl> + const tensorflow : : gtl : : FlatMap < const HloComputation * , int64 > & <nl> + memory_by_computation_ ; <nl> <nl> / / A map containing the LogicalBuffers that each instruction uses . <nl> tensorflow : : gtl : : FlatMap < const HloInstruction * , <nl> int64 SumLogicalBufferSizes ( <nl> return size ; <nl> } <nl> <nl> + StatusOr < std : : vector < const HloInstruction * > > CreateMemoryMinimizingSequence ( <nl> + const HloComputation & computation , <nl> + const TuplePointsToAnalysis & points_to_analysis , <nl> + const LogicalBuffer : : SizeFunction & size_function , <nl> + const MemorySchedulerAlgorithm & algorithm , <nl> + const tensorflow : : gtl : : FlatMap < const HloComputation * , int64 > & <nl> + memory_by_computation ) { <nl> + VLOG ( 2 ) < < " Computation : " < < computation . name ( ) ; <nl> + if ( algorithm ) { <nl> + return algorithm ( computation , points_to_analysis , size_function , <nl> + memory_by_computation ) ; <nl> + } <nl> + return DefaultMemoryScheduler ( computation , points_to_analysis , size_function , <nl> + memory_by_computation ) ; <nl> + } <nl> + <nl> + } / / namespace <nl> + <nl> StatusOr < int64 > MinimumMemoryForComputation ( <nl> const HloComputation & computation , <nl> const std : : vector < const HloInstruction * > & sequence , <nl> StatusOr < int64 > MinimumMemoryForComputation ( <nl> return result . heap_size ; <nl> } <nl> <nl> - StatusOr < std : : vector < const HloInstruction * > > CreateMemoryMinimizingSequence ( <nl> - const HloComputation & computation , <nl> - const TuplePointsToAnalysis & points_to_analysis , <nl> - const LogicalBuffer : : SizeFunction & size_function , <nl> - const MemorySchedulerAlgorithm & algorithm ) { <nl> - VLOG ( 2 ) < < " Computation : " < < computation . name ( ) ; <nl> - if ( algorithm ) { <nl> - return algorithm ( computation , points_to_analysis , size_function ) ; <nl> - } <nl> - return DefaultMemoryScheduler ( computation , points_to_analysis , size_function ) ; <nl> - } <nl> - <nl> - } / / namespace <nl> - <nl> StatusOr < std : : vector < const HloInstruction * > > DFSMemorySchedulerImpl ( <nl> const HloComputation & computation , <nl> const TuplePointsToAnalysis & points_to_analysis , <nl> StatusOr < std : : vector < const HloInstruction * > > DFSMemorySchedulerImpl ( <nl> StatusOr < std : : vector < const HloInstruction * > > ListMemoryScheduler ( <nl> const HloComputation & computation , <nl> const TuplePointsToAnalysis & points_to_analysis , <nl> - const LogicalBuffer : : SizeFunction & size_function ) { <nl> - return ListScheduler : : Run ( computation , points_to_analysis , size_function ) ; <nl> + const LogicalBuffer : : SizeFunction & size_function , <nl> + const tensorflow : : gtl : : FlatMap < const HloComputation * , int64 > & <nl> + memory_by_computation ) { <nl> + return ListScheduler : : Run ( computation , points_to_analysis , size_function , <nl> + memory_by_computation ) ; <nl> } <nl> <nl> StatusOr < std : : vector < const HloInstruction * > > PostOrderMemoryScheduler ( <nl> const HloComputation & computation , <nl> const TuplePointsToAnalysis & points_to_analysis , <nl> - const LogicalBuffer : : SizeFunction & size_function ) { <nl> + const LogicalBuffer : : SizeFunction & size_function , <nl> + const tensorflow : : gtl : : FlatMap < const HloComputation * , int64 > & <nl> + memory_by_computation ) { <nl> const auto & post_order = computation . MakeInstructionPostOrder ( ) ; <nl> return std : : vector < const HloInstruction * > { post_order . begin ( ) , <nl> post_order . end ( ) } ; <nl> StatusOr < std : : vector < const HloInstruction * > > PostOrderMemoryScheduler ( <nl> StatusOr < std : : vector < const HloInstruction * > > DFSMemoryScheduler ( <nl> const HloComputation & computation , <nl> const TuplePointsToAnalysis & points_to_analysis , <nl> - const LogicalBuffer : : SizeFunction & size_function ) { <nl> + const LogicalBuffer : : SizeFunction & size_function , <nl> + const tensorflow : : gtl : : FlatMap < const HloComputation * , int64 > & <nl> + memory_by_computation ) { <nl> return DFSMemorySchedulerImpl ( computation , points_to_analysis , size_function , <nl> / * reverse_heuristics = * / false ) ; <nl> } <nl> StatusOr < std : : vector < const HloInstruction * > > DFSMemoryScheduler ( <nl> StatusOr < std : : vector < const HloInstruction * > > DFSMemorySchedulerReverse ( <nl> const HloComputation & computation , <nl> const TuplePointsToAnalysis & points_to_analysis , <nl> - const LogicalBuffer : : SizeFunction & size_function ) { <nl> + const LogicalBuffer : : SizeFunction & size_function , <nl> + const tensorflow : : gtl : : FlatMap < const HloComputation * , int64 > & <nl> + memory_by_computation ) { <nl> return DFSMemorySchedulerImpl ( computation , points_to_analysis , size_function , <nl> / * reverse_heuristics = * / true ) ; <nl> } <nl> StatusOr < std : : vector < const HloInstruction * > > DFSMemorySchedulerReverse ( <nl> StatusOr < std : : vector < const HloInstruction * > > DefaultMemoryScheduler ( <nl> const HloComputation & computation , <nl> const TuplePointsToAnalysis & points_to_analysis , <nl> - const LogicalBuffer : : SizeFunction & size_function ) { <nl> + const LogicalBuffer : : SizeFunction & size_function , <nl> + const tensorflow : : gtl : : FlatMap < const HloComputation * , int64 > & <nl> + memory_by_computation ) { <nl> / / We try both a list - scheduler based ordering and a DFS based ordering , and <nl> / / choose whichever returns a lower min - memory , not accounting for <nl> / / fragmentation . <nl> StatusOr < std : : vector < const HloInstruction * > > DefaultMemoryScheduler ( <nl> / / within the caller ' s context . But it ' s good enough for now . <nl> TF_ASSIGN_OR_RETURN ( <nl> std : : vector < const HloInstruction * > list_sequence , <nl> - ListMemoryScheduler ( computation , points_to_analysis , size_function ) ) ; <nl> + ListMemoryScheduler ( computation , points_to_analysis , size_function , <nl> + memory_by_computation ) ) ; <nl> TF_ASSIGN_OR_RETURN ( <nl> const int64 list_memory , <nl> MinimumMemoryForComputation ( computation , list_sequence , <nl> points_to_analysis , size_function ) ) ; <nl> VLOG ( 2 ) < < " Min - memory list sequence : " < < HumanReadableNumBytes ( list_memory ) ; <nl> <nl> - TF_ASSIGN_OR_RETURN ( <nl> - std : : vector < const HloInstruction * > dfs_sequence , <nl> - DFSMemoryScheduler ( computation , points_to_analysis , size_function ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( std : : vector < const HloInstruction * > dfs_sequence , <nl> + DFSMemoryScheduler ( computation , points_to_analysis , <nl> + size_function , memory_by_computation ) ) ; <nl> TF_ASSIGN_OR_RETURN ( <nl> const int64 dfs_memory , <nl> MinimumMemoryForComputation ( computation , dfs_sequence , points_to_analysis , <nl> StatusOr < std : : vector < const HloInstruction * > > DefaultMemoryScheduler ( <nl> <nl> TF_ASSIGN_OR_RETURN ( <nl> std : : vector < const HloInstruction * > post_order_sequence , <nl> - PostOrderMemoryScheduler ( computation , points_to_analysis , size_function ) ) ; <nl> + PostOrderMemoryScheduler ( computation , points_to_analysis , size_function , <nl> + memory_by_computation ) ) ; <nl> TF_ASSIGN_OR_RETURN ( <nl> const int64 post_order_memory , <nl> MinimumMemoryForComputation ( computation , post_order_sequence , <nl> StatusOr < std : : vector < const HloInstruction * > > DefaultMemoryScheduler ( <nl> VLOG ( 2 ) < < " Min - memory post order sequence : " <nl> < < HumanReadableNumBytes ( post_order_memory ) ; <nl> <nl> - TF_ASSIGN_OR_RETURN ( std : : vector < const HloInstruction * > reverse_dfs , <nl> - DFSMemorySchedulerReverse ( computation , points_to_analysis , <nl> - size_function ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( <nl> + std : : vector < const HloInstruction * > reverse_dfs , <nl> + DFSMemorySchedulerReverse ( computation , points_to_analysis , size_function , <nl> + memory_by_computation ) ) ; <nl> TF_ASSIGN_OR_RETURN ( <nl> const int64 reverse_dfs_memory , <nl> MinimumMemoryForComputation ( computation , reverse_dfs , points_to_analysis , <nl> CreateMemoryMinimizingSequence ( const HloModule & module , <nl> SequentialHloOrdering : : HloModuleSequence sequence ; <nl> TF_ASSIGN_OR_RETURN ( std : : unique_ptr < TuplePointsToAnalysis > points_to_analysis , <nl> TuplePointsToAnalysis : : Run ( & module ) ) ; <nl> - for ( const auto * computation : module . MakeNonfusionComputations ( ) ) { <nl> - TF_ASSIGN_OR_RETURN ( <nl> - sequence [ computation ] , <nl> - CreateMemoryMinimizingSequence ( * computation , * points_to_analysis , <nl> - size_function , algorithm ) ) ; <nl> + tensorflow : : gtl : : FlatMap < const HloComputation * , int64 > memory_by_computation ; <nl> + for ( const auto * computation : module . MakeComputationPostOrder ( ) ) { <nl> + if ( ! computation - > IsFusionComputation ( ) ) { <nl> + TF_ASSIGN_OR_RETURN ( auto one_computation_sequence , <nl> + CreateMemoryMinimizingSequence ( <nl> + * computation , * points_to_analysis , size_function , <nl> + algorithm , memory_by_computation ) ) ; <nl> + memory_by_computation [ computation ] = <nl> + MinimumMemoryForComputation ( * computation , one_computation_sequence , <nl> + * points_to_analysis , size_function ) <nl> + . ValueOrDie ( ) ; <nl> + sequence [ computation ] = std : : move ( one_computation_sequence ) ; <nl> + } <nl> } <nl> return sequence ; <nl> } <nl> <nl> StatusOr < std : : vector < const HloInstruction * > > CreateMemoryMinimizingSequence ( <nl> const HloComputation & computation , <nl> - const LogicalBuffer : : SizeFunction & size_function , <nl> - const MemorySchedulerAlgorithm & algorithm ) { <nl> + const LogicalBuffer : : SizeFunction & size_function ) { <nl> CHECK ( ! computation . IsFusionComputation ( ) ) ; <nl> TF_ASSIGN_OR_RETURN ( std : : unique_ptr < TuplePointsToAnalysis > points_to_analysis , <nl> TuplePointsToAnalysis : : Run ( computation . parent ( ) ) ) ; <nl> + tensorflow : : gtl : : FlatMap < const HloComputation * , int64 > empty_map ; <nl> return CreateMemoryMinimizingSequence ( computation , * points_to_analysis , <nl> - size_function , algorithm ) ; <nl> + size_function , nullptr , empty_map ) ; <nl> } <nl> <nl> } / / namespace xla <nl> mmm a / tensorflow / compiler / xla / service / hlo_scheduling . h <nl> ppp b / tensorflow / compiler / xla / service / hlo_scheduling . h <nl> StatusOr < int64 > MinimumMemoryForSequence ( <nl> const SequentialHloOrdering : : HloModuleSequence & module_sequence , <nl> const LogicalBuffer : : SizeFunction & size_function ) ; <nl> <nl> + / / Returns the minimum memory required to compute the given computation , <nl> + / / assuming no fragmentation . <nl> + StatusOr < int64 > MinimumMemoryForComputation ( <nl> + const HloComputation & computation , <nl> + const std : : vector < const HloInstruction * > & sequence , <nl> + const TuplePointsToAnalysis & points_to_analysis , <nl> + const LogicalBuffer : : SizeFunction & size_function ) ; <nl> + <nl> / / A memory scheduler computes an execution sequence for the HLO instructions in <nl> / / ' computation ' that minimizes peak memory , given a points - to analysis result <nl> / / that describes buffer aliasing , together with a target - specific size function <nl> / / that maps a tensor ' s logical size to its padded size . <nl> typedef std : : function < StatusOr < std : : vector < const HloInstruction * > > ( <nl> const HloComputation & , const TuplePointsToAnalysis & , <nl> - const LogicalBuffer : : SizeFunction & ) > <nl> + const LogicalBuffer : : SizeFunction & , <nl> + const tensorflow : : gtl : : FlatMap < const HloComputation * , int64 > & ) > <nl> MemorySchedulerAlgorithm ; <nl> <nl> / / List scheduler <nl> StatusOr < std : : vector < const HloInstruction * > > ListMemoryScheduler ( <nl> const HloComputation & computation , <nl> const TuplePointsToAnalysis & points_to_analysis , <nl> - const LogicalBuffer : : SizeFunction & size_function ) ; <nl> + const LogicalBuffer : : SizeFunction & size_function , <nl> + const tensorflow : : gtl : : FlatMap < const HloComputation * , int64 > & <nl> + memory_by_computation ) ; <nl> <nl> / / DFS - order scheduler <nl> StatusOr < std : : vector < const HloInstruction * > > DFSMemoryScheduler ( <nl> const HloComputation & computation , <nl> const TuplePointsToAnalysis & points_to_analysis , <nl> - const LogicalBuffer : : SizeFunction & size_function ) ; <nl> + const LogicalBuffer : : SizeFunction & size_function , <nl> + const tensorflow : : gtl : : FlatMap < const HloComputation * , int64 > & <nl> + memory_by_computation ) ; <nl> <nl> / / Naive Post Order scheduler <nl> StatusOr < std : : vector < const HloInstruction * > > PostOrderMemoryScheduler ( <nl> const HloComputation & computation , <nl> const TuplePointsToAnalysis & points_to_analysis , <nl> - const LogicalBuffer : : SizeFunction & size_function ) ; <nl> + const LogicalBuffer : : SizeFunction & size_function , <nl> + const tensorflow : : gtl : : FlatMap < const HloComputation * , int64 > & <nl> + memory_by_computation ) ; <nl> <nl> / / DFS - order scheduler with reversed heuristics . This helps some cases ( see <nl> / / b / 78906799 ) . <nl> StatusOr < std : : vector < const HloInstruction * > > DFSMemorySchedulerReverse ( <nl> const HloComputation & computation , <nl> const TuplePointsToAnalysis & points_to_analysis , <nl> - const LogicalBuffer : : SizeFunction & size_function ) ; <nl> + const LogicalBuffer : : SizeFunction & size_function , <nl> + const tensorflow : : gtl : : FlatMap < const HloComputation * , int64 > & <nl> + memory_by_computation ) ; <nl> <nl> / / The default scheduling algorithm . Runs both the list scheduler <nl> / / and the DFS scheduler , and chooses whichever returns a lower min - memory , <nl> StatusOr < std : : vector < const HloInstruction * > > DFSMemorySchedulerReverse ( <nl> StatusOr < std : : vector < const HloInstruction * > > DefaultMemoryScheduler ( <nl> const HloComputation & computation , <nl> const TuplePointsToAnalysis & points_to_analysis , <nl> - const LogicalBuffer : : SizeFunction & size_function ) ; <nl> + const LogicalBuffer : : SizeFunction & size_function , <nl> + const tensorflow : : gtl : : FlatMap < const HloComputation * , int64 > & <nl> + memory_by_computation ) ; <nl> <nl> / / Returns an HloModuleSequence which seeks to minimize the memory required for <nl> / / the computation . size_function is the function returning the number of bytes <nl> CreateMemoryMinimizingSequence ( const HloModule & module , <nl> const MemorySchedulerAlgorithm & algorithm = { } ) ; <nl> <nl> / / Overload of above that computes the sequence for a single computation . <nl> + / / Currently only used by the GPU backend . <nl> StatusOr < std : : vector < const HloInstruction * > > CreateMemoryMinimizingSequence ( <nl> const HloComputation & computation , <nl> - const LogicalBuffer : : SizeFunction & size_function , <nl> - const MemorySchedulerAlgorithm & algorithm = { } ) ; <nl> + const LogicalBuffer : : SizeFunction & size_function ) ; <nl> <nl> } / / namespace xla <nl> <nl> mmm a / tensorflow / compiler / xla / service / hlo_scheduling_test . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_scheduling_test . cc <nl> ENTRY root { <nl> instructions_by_name . at ( " e " ) ) ) ; <nl> } <nl> <nl> - / / The current scheduler is suboptimal , in that it does not account for the <nl> - / / memory used by subcomputations when choosing a schedule . <nl> - / / This test demonstrates the current behavior . <nl> - / / We are working on improving it ( b / 65409243 ) . <nl> - TEST_F ( HloSchedulingTest , SubcomputationsNotAccounted ) { <nl> + TEST_F ( HloSchedulingTest , ListAccountsForSubcomputations ) { <nl> / / % WhileCond ( cond_param : f32 [ 4 ] ) - > pred [ ] { <nl> / / % cond_param = f32 [ 4 ] { 0 } parameter ( 0 ) <nl> / / % constant = f32 [ 1 , 4 ] { 1 , 0 } constant ( f32 [ 1 , 4 ] { { 0 , 0 , 0 , 0 } } ) <nl> TEST_F ( HloSchedulingTest , SubcomputationsNotAccounted ) { <nl> <nl> module - > AddEntryComputation ( builder . Build ( ) ) ; <nl> <nl> - TF_ASSERT_OK_AND_ASSIGN ( <nl> - SequentialHloOrdering : : HloModuleSequence sequence , <nl> - CreateMemoryMinimizingSequence ( * module , [ ] ( const BufferValue & buffer ) { <nl> - return ShapeUtil : : ByteSizeOf ( buffer . shape ( ) ) ; <nl> - } ) ) ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( SequentialHloOrdering : : HloModuleSequence sequence , <nl> + CreateMemoryMinimizingSequence ( <nl> + * module , <nl> + [ ] ( const BufferValue & buffer ) { <nl> + return ShapeUtil : : ByteSizeOf ( buffer . shape ( ) ) ; <nl> + } , <nl> + ListMemoryScheduler ) ) ; <nl> / / Verify that all instructions are in the sequence . <nl> EXPECT_EQ ( module - > entry_computation ( ) - > instruction_count ( ) , <nl> sequence . at ( module - > entry_computation ( ) ) . size ( ) ) ; <nl> SequentialHloOrdering ordering ( module . get ( ) , sequence ) ; <nl> - / / TODO ( b / 65409243 ) : while_loop is scheduled first by List ; it ' s thought to be <nl> - / / cheaper than transpose because the temporary memory needed for <nl> - / / subcomputations is ignored . If we count the temporary memory as part of <nl> - / / bytes_defined , then transpose would be scheduled first . Incidentally , <nl> - / / ignoring subcomputations results in a better schedule here . <nl> - EXPECT_TRUE ( ordering . ExecutesBefore ( while_loop , transpose ) ) ; <nl> - EXPECT_TRUE ( ordering . ExecutesBefore ( bcast , transpose ) ) ; <nl> + / / This schedule is an example of List ' s greedy heuristics being suboptimal . <nl> + / / The while_loop is more expensive than transpose , so it would have been <nl> + / / better to schedule it first , instead of during the busy time . <nl> + EXPECT_TRUE ( ordering . ExecutesBefore ( transpose , while_loop ) ) ; <nl> + EXPECT_TRUE ( ordering . ExecutesBefore ( transpose , bcast ) ) ; <nl> EXPECT_TRUE ( ordering . ExecutesBefore ( bcast , add ) ) ; <nl> EXPECT_TRUE ( ordering . ExecutesBefore ( transpose , add ) ) ; <nl> } <nl> mmm a / tensorflow / compiler / xla / service / instruction_fusion . cc <nl> ppp b / tensorflow / compiler / xla / service / instruction_fusion . cc <nl> StatusOr < bool > InstructionFusion : : Run ( HloModule * module ) { <nl> return changed ; <nl> } <nl> <nl> - HloInstruction * InstructionFusion : : Fuse ( HloInstruction * producer , <nl> - HloInstruction * consumer ) { <nl> + HloInstruction * InstructionFusion : : AddFusionInstruction ( <nl> + HloInstruction * producer , HloInstruction * consumer ) { <nl> HloInstruction * fusion_instruction ; <nl> - <nl> - VLOG ( 2 ) < < " Fusing " < < producer - > ToString ( ) < < " into " <nl> - < < consumer - > ToString ( ) ; <nl> auto kind = ChooseKind ( producer , consumer ) ; <nl> if ( consumer - > opcode ( ) = = HloOpcode : : kFusion ) { <nl> fusion_instruction = consumer ; <nl> HloInstruction * InstructionFusion : : Fuse ( HloInstruction * producer , <nl> HloInstruction : : CreateFusion ( consumer - > shape ( ) , kind , consumer ) ) ; <nl> TF_CHECK_OK ( computation_ - > ReplaceInstruction ( consumer , fusion_instruction ) ) ; <nl> } <nl> + return fusion_instruction ; <nl> + } <nl> <nl> + HloInstruction * InstructionFusion : : Fuse ( HloInstruction * producer , <nl> + HloInstruction * consumer ) { <nl> + VLOG ( 2 ) < < " Fusing " < < producer - > ToString ( ) < < " into " <nl> + < < consumer - > ToString ( ) ; <nl> + HloInstruction * fusion_instruction = AddFusionInstruction ( producer , consumer ) ; <nl> fusion_instruction - > FuseInstruction ( producer ) ; <nl> return fusion_instruction ; <nl> } <nl> <nl> + HloInstruction * InstructionFusion : : FuseIntoMultiOutput ( <nl> + HloInstruction * producer , HloInstruction * consumer ) { <nl> + VLOG ( 2 ) < < " Multi - output fusing " < < producer - > ToString ( ) < < " into " <nl> + < < consumer - > ToString ( ) ; <nl> + HloInstruction * fusion_instruction = AddFusionInstruction ( producer , consumer ) ; <nl> + fusion_instruction - > FuseInstructionIntoMultiOutput ( producer ) ; <nl> + return fusion_instruction ; <nl> + } <nl> + <nl> bool InstructionFusion : : ShouldFuse ( HloInstruction * consumer , <nl> int64 operand_index ) { <nl> HloInstruction * producer = consumer - > mutable_operand ( operand_index ) ; <nl> mmm a / tensorflow / compiler / xla / service / instruction_fusion . h <nl> ppp b / tensorflow / compiler / xla / service / instruction_fusion . h <nl> class InstructionFusion : public HloPassInterface { <nl> virtual HloInstruction * Fuse ( HloInstruction * producer , <nl> HloInstruction * consumer ) ; <nl> <nl> + / / Creates a new fusion instruction containing ` producer ` and ` consumer ` . A <nl> + / / tuple is added as the fusion instruction ' s root , which consumes from both , <nl> + / / ` producer ` and ` consumer ` . This style of fusion is referred to as <nl> + / / multi - output fusion . <nl> + virtual HloInstruction * FuseIntoMultiOutput ( HloInstruction * producer , <nl> + HloInstruction * consumer ) ; <nl> + <nl> / / An " effectively unary " operation is one that has at most one " large " <nl> / / input with the others being negligible in terms of memory usage . <nl> / / We use " has a smaller true rank than the output " as a heuristic <nl> class InstructionFusion : public HloPassInterface { <nl> / / The set of producers whose consumers we cannot fuse into . <nl> using DoNotFuseSet = std : : unordered_set < HloInstruction * > ; <nl> <nl> + HloInstruction * AddFusionInstruction ( HloInstruction * producer , <nl> + HloInstruction * consumer ) ; <nl> + <nl> / / Whether or not we can fuse producer into consumer on all paths <nl> / / from the producer to the consumer where nodes are HLOs and edges are uses . <nl> bool CanFuseOnAllPaths ( HloInstruction * producer , HloInstruction * consumer , <nl> mmm a / tensorflow / compiler / xla / service / instruction_fusion_test . cc <nl> ppp b / tensorflow / compiler / xla / service / instruction_fusion_test . cc <nl> namespace op = xla : : testing : : opcode_matchers ; <nl> <nl> using InstructionFusionTest = HloTestBase ; <nl> <nl> + / / Subclass of InstructionFusion exposing the protected methods Fuse and <nl> + / / FuseIntoMultiOutput for testing . <nl> + class InstructionFusionForTesting : public InstructionFusion { <nl> + public : <nl> + explicit InstructionFusionForTesting ( HloModule * module ) <nl> + : InstructionFusion ( InstructionFusion : : IsExpensive ) { <nl> + module_ = module ; <nl> + computation_ = module - > entry_computation ( ) ; <nl> + } <nl> + <nl> + HloInstruction * Fuse ( HloInstruction * producer , <nl> + HloInstruction * consumer ) override { <nl> + return InstructionFusion : : Fuse ( producer , consumer ) ; <nl> + } <nl> + <nl> + HloInstruction * FuseIntoMultiOutput ( HloInstruction * producer , <nl> + HloInstruction * consumer ) override { <nl> + return InstructionFusion : : FuseIntoMultiOutput ( producer , consumer ) ; <nl> + } <nl> + } ; <nl> + <nl> + TEST_F ( InstructionFusionTest , FuseInstructions ) { <nl> + auto module = tools : : Parse ( R " ( <nl> + HloModule test_module <nl> + ENTRY entry_computation { <nl> + p0 = f32 [ 4 , 3 ] { 1 , 0 } parameter ( 0 ) <nl> + add = f32 [ 4 , 3 ] { 1 , 0 } add ( p0 , p0 ) <nl> + ROOT sub = f32 [ 4 , 3 ] { 1 , 0 } subtract ( add , p0 ) <nl> + } ) " ) <nl> + . ValueOrDie ( ) ; <nl> + HloInstruction * sub = module - > entry_computation ( ) - > root_instruction ( ) ; <nl> + HloInstruction * add = sub - > mutable_operand ( 0 ) ; <nl> + HloInstruction * fusion = <nl> + InstructionFusionForTesting ( module . get ( ) ) . Fuse ( add , sub ) ; <nl> + <nl> + ASSERT_THAT ( fusion , op : : Fusion ( ) ) < < module - > ToString ( ) ; <nl> + EXPECT_THAT ( fusion - > fused_expression_root ( ) , <nl> + op : : Subtract ( op : : Add ( ) , op : : Parameter ( ) ) ) <nl> + < < module - > ToString ( ) ; <nl> + } <nl> + <nl> + TEST_F ( InstructionFusionTest , FuseIntoFusionInstruction ) { <nl> + auto module = tools : : Parse ( R " ( <nl> + HloModule test_module <nl> + fused_computation { <nl> + p1 = f32 [ 4 , 3 ] parameter ( 0 ) <nl> + add = f32 [ 4 , 3 ] add ( p1 , p1 ) <nl> + } <nl> + ENTRY entry_computation { <nl> + p0 = f32 [ 4 , 3 ] parameter ( 0 ) <nl> + abs = f32 [ 4 , 3 ] abs ( p0 ) <nl> + ROOT fusion = f32 [ 4 , 3 ] fusion ( abs ) , kind = kLoop , calls = fused_computation <nl> + } ) " ) <nl> + . ValueOrDie ( ) ; <nl> + HloInstruction * root = module - > entry_computation ( ) - > root_instruction ( ) ; <nl> + HloInstruction * abs = root - > mutable_operand ( 0 ) ; <nl> + HloInstruction * fusion = <nl> + InstructionFusionForTesting ( module . get ( ) ) . Fuse ( abs , root ) ; <nl> + <nl> + ASSERT_THAT ( fusion , op : : Fusion ( ) ) < < module - > ToString ( ) ; <nl> + EXPECT_THAT ( fusion - > fused_expression_root ( ) , op : : Add ( op : : Abs ( ) , op : : Abs ( ) ) ) <nl> + < < module - > ToString ( ) ; <nl> + } <nl> + <nl> + TEST_F ( InstructionFusionTest , FuseInstructionsIntoMultiOutput ) { <nl> + auto module = tools : : Parse ( R " ( <nl> + HloModule test_module <nl> + ENTRY entry_computation { <nl> + p0 = f32 [ 4 , 3 ] { 1 , 0 } parameter ( 0 ) <nl> + abs = f32 [ 4 , 3 ] { 1 , 0 } abs ( p0 ) <nl> + tanh = f32 [ 4 , 3 ] { 1 , 0 } tanh ( abs ) <nl> + ROOT add = f32 [ 4 , 3 ] { 1 , 0 } add ( abs , tanh ) <nl> + } ) " ) <nl> + . ValueOrDie ( ) ; <nl> + HloInstruction * root = module - > entry_computation ( ) - > root_instruction ( ) ; <nl> + HloInstruction * abs = root - > mutable_operand ( 0 ) ; <nl> + HloInstruction * tanh = root - > mutable_operand ( 1 ) ; <nl> + HloInstruction * fusion = <nl> + InstructionFusionForTesting ( module . get ( ) ) . FuseIntoMultiOutput ( abs , tanh ) ; <nl> + <nl> + ASSERT_THAT ( fusion , op : : Fusion ( ) ) < < module - > ToString ( ) ; <nl> + EXPECT_THAT ( fusion - > fused_expression_root ( ) , op : : Tuple ( op : : Tanh ( ) , op : : Abs ( ) ) ) <nl> + < < module - > ToString ( ) ; <nl> + } <nl> + <nl> TEST_F ( InstructionFusionTest , PotentialBitcastReshapeOfParameterUnfused ) { <nl> HloComputation : : Builder builder ( TestName ( ) ) ; <nl> auto param0 = builder . AddInstruction ( <nl> mmm a / tensorflow / compiler / xla / service / interpreter / compiler . cc <nl> ppp b / tensorflow / compiler / xla / service / interpreter / compiler . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / service / reshape_mover . h " <nl> # include " tensorflow / compiler / xla / service / while_loop_simplifier . h " <nl> # include " tensorflow / compiler / xla / status_macros . h " <nl> - # include " tensorflow / compiler / xla / xla_data . pb . h " <nl> # include " tensorflow / core / lib / core / errors . h " <nl> # include " tensorflow / core / platform / types . h " <nl> <nl> deleted file mode 100644 <nl> index 79dfd1e409f15 . . 0000000000000 <nl> mmm a / tensorflow / compiler / xla / service / liveness_util . cc <nl> ppp / dev / null <nl> <nl> - / * Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> - <nl> - Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - you may not use this file except in compliance with the License . <nl> - You may obtain a copy of the License at <nl> - <nl> - http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - <nl> - Unless required by applicable law or agreed to in writing , software <nl> - distributed under the License is distributed on an " AS IS " BASIS , <nl> - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - See the License for the specific language governing permissions and <nl> - limitations under the License . <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - <nl> - # include " tensorflow / compiler / xla / service / liveness_util . h " <nl> - <nl> - # include < algorithm > <nl> - # include < utility > <nl> - # include < vector > <nl> - <nl> - # include " tensorflow / compiler / xla / service / hlo_instruction . h " <nl> - # include " tensorflow / compiler / xla / service / logical_buffer . h " <nl> - # include " tensorflow / compiler / xla / service / tuple_points_to_analysis . h " <nl> - # include " tensorflow / compiler / xla / shape_util . h " <nl> - # include " tensorflow / compiler / xla / types . h " <nl> - # include " tensorflow / compiler / xla / util . h " <nl> - <nl> - namespace xla { <nl> - <nl> - bool DoesNotUseOperandBuffer ( const HloInstruction * operand , <nl> - const ShapeIndex & index , <nl> - const HloInstruction * user , <nl> - const TuplePointsToAnalysis & points_to_analysis ) { <nl> - CHECK ( user - > IsUserOf ( operand ) ) <nl> - < < " user : " < < user - > ToString ( ) < < " operand : " < < operand - > ToString ( ) ; <nl> - if ( user - > opcode ( ) = = HloOpcode : : kGetTupleElement & & ! index . empty ( ) ) { <nl> - / / GetTupleElement instructions only access the top - level buffer of their <nl> - / / operand . <nl> - return true ; <nl> - } else if ( user - > opcode ( ) = = HloOpcode : : kFusion & & <nl> - user - > fusion_kind ( ) = = HloInstruction : : FusionKind : : kLoop ) { <nl> - / / Find fusion parameter associated with ' operand ' . <nl> - auto it = std : : find_if ( <nl> - user - > fused_parameters ( ) . begin ( ) , user - > fused_parameters ( ) . end ( ) , <nl> - [ = ] ( HloInstruction * fused_param ) { <nl> - return user - > operand ( fused_param - > parameter_number ( ) ) = = operand ; <nl> - } ) ; <nl> - CHECK ( it ! = user - > fused_parameters ( ) . end ( ) ) ; <nl> - / / Iterate through all users of all buffer aliases of the buffer in the <nl> - / / points - to set of fusion parameter at ' index ' . <nl> - / / Return false if any uses are detected at ' index ' , returns true otherwise . <nl> - const LogicalBuffer * buffer = <nl> - points_to_analysis . GetBufferDefinedAt ( * it , index ) . ValueOrDie ( ) ; <nl> - for ( const BufferAlias & alias : <nl> - points_to_analysis . GetBufferAliases ( * buffer ) ) { <nl> - for ( HloInstruction * alias_user : alias . instruction ( ) - > users ( ) ) { <nl> - if ( DoesNotUseOperandBuffer ( alias . instruction ( ) , alias . index ( ) , <nl> - alias_user , points_to_analysis ) ) { <nl> - continue ; <nl> - } <nl> - / / Return false : use detected at ' buffer ' - > ' alias ' - > ' alias_user ' . <nl> - return false ; <nl> - } <nl> - } <nl> - / / Return true : found no uses of ' operand ' at ' index ' in ' user ' . <nl> - return true ; <nl> - } <nl> - return false ; <nl> - } <nl> - <nl> - bool DoesNotUseOperandBuffer ( const HloInstruction * operand , <nl> - const ShapeIndex & index , <nl> - const HloInstruction * user , <nl> - const HloDataflowAnalysis & dataflow ) { <nl> - CHECK ( user - > IsUserOf ( operand ) ) <nl> - < < " user : " < < user - > ToString ( ) < < " operand : " < < operand - > ToString ( ) ; <nl> - if ( user - > opcode ( ) = = HloOpcode : : kFusion & & <nl> - user - > fusion_kind ( ) = = HloInstruction : : FusionKind : : kLoop ) { <nl> - / / Find fusion parameter associated with ' operand ' . <nl> - HloInstruction * fusion_param = <nl> - user - > fused_parameter ( user - > operand_index ( operand ) ) ; <nl> - / / Iterate through all users of all uses of the fusion parameter value . <nl> - / / Return false if any uses are detected , returns true otherwise . <nl> - const HloValue & value = dataflow . GetValueDefinedAt ( fusion_param , index ) ; <nl> - return value . uses ( ) . empty ( ) ; <nl> - } else { <nl> - / / Return false if no value at ' operand ' and ' index ' is used at ' user ' . <nl> - for ( const HloValue * value : <nl> - dataflow . GetValueSet ( operand , index ) . values ( ) ) { <nl> - for ( const HloUse & use : value - > uses ( ) ) { <nl> - if ( use . instruction = = user ) { <nl> - return false ; <nl> - } <nl> - } <nl> - } <nl> - } <nl> - <nl> - return true ; <nl> - } <nl> - <nl> - namespace { <nl> - <nl> - / / Returns all uses of all aliases of ' instruction ' at ' index ' in ' uses ' . <nl> - / / Each use in ' uses ' is a pair ( HloInstruction * user , int64 operand_index ) <nl> - / / where ' user ' is a user of an alias of ' instruction ' at ' index ' , and <nl> - / / ' operand_index ' is the operand index at which the alias appears in the <nl> - / / operand list of ' user ' . <nl> - std : : vector < std : : pair < HloInstruction * , int64 > > GetAllUsesOfInstructionAtIndex ( <nl> - HloInstruction * instruction , const ShapeIndex & index , <nl> - const TuplePointsToAnalysis & points_to_analysis ) { <nl> - std : : vector < std : : pair < HloInstruction * , int64 > > uses ; <nl> - const PointsToSet : : BufferList & points_to = <nl> - points_to_analysis . GetPointsToSet ( instruction ) . element ( index ) ; <nl> - for ( const LogicalBuffer * buffer : points_to ) { <nl> - for ( const BufferAlias & alias : <nl> - points_to_analysis . GetBufferAliases ( * buffer ) ) { <nl> - for ( HloInstruction * alias_user : alias . instruction ( ) - > users ( ) ) { <nl> - if ( DoesNotUseOperandBuffer ( alias . instruction ( ) , alias . index ( ) , <nl> - alias_user , points_to_analysis ) ) { <nl> - continue ; <nl> - } <nl> - for ( int64 op_idx : alias_user - > OperandIndices ( alias . instruction ( ) ) ) { <nl> - uses . emplace_back ( alias_user , op_idx ) ; <nl> - } <nl> - } <nl> - } <nl> - } <nl> - return uses ; <nl> - } <nl> - <nl> - / / Returns true if there is exactly one use of ' operand ' at ' operand_index ' <nl> - / / in ' fusion . fused_instructions ' , where the singleton use is the fused <nl> - / / root at operand index ' use_operand_index ' . Returns false otherwise . <nl> - / / <nl> - / / REQUIRES : ' fusion ' opcode is a kFusion instruction . <nl> - bool HasUniqueFusedUseOfOperandAt ( <nl> - HloInstruction * operand , const ShapeIndex & operand_index , <nl> - HloInstruction * fusion , const int64 use_operand_index , <nl> - const TuplePointsToAnalysis & points_to_analysis ) { <nl> - CHECK_EQ ( HloOpcode : : kFusion , fusion - > opcode ( ) ) ; <nl> - / / Check that ' operand ' is unique in the operand list of ' fusion ' . <nl> - if ( fusion - > OperandIndices ( operand ) . size ( ) > 1 ) { <nl> - return false ; <nl> - } <nl> - / / Find fusion parameter associated with ' operand ' . <nl> - const auto & fused_params = fusion - > fused_parameters ( ) ; <nl> - auto fused_param_it = std : : find_if ( <nl> - fused_params . begin ( ) , fused_params . end ( ) , <nl> - [ & ] ( HloInstruction * fused_param ) { <nl> - return fusion - > operand ( fused_param - > parameter_number ( ) ) = = operand ; <nl> - } ) ; <nl> - if ( fused_param_it = = fused_params . end ( ) ) { <nl> - return false ; <nl> - } <nl> - auto * fused_param = * fused_param_it ; <nl> - / / Get all uses of ' operand ' at ' index ' from ' fusion . fused_instructions ' . <nl> - auto fused_param_uses = GetAllUsesOfInstructionAtIndex ( <nl> - fused_param , operand_index , points_to_analysis ) ; <nl> - / / Return true iff there is exactly one use of ' operand ' at ' index ' , and <nl> - / / this singleton use is the fused root ( at index in ' use_operand_indices ' ) . <nl> - return fused_param_uses . size ( ) = = 1 & & <nl> - fused_param_uses [ 0 ] . first = = fusion - > fused_expression_root ( ) & & <nl> - fused_param_uses [ 0 ] . second = = use_operand_index ; <nl> - } <nl> - <nl> - } / / namespace <nl> - <nl> - / / User and operand can share buffers iff both instructions emit the same shape <nl> - / / and layout , and ' user ' meets one of the following qualifications : <nl> - / / <nl> - / / ( 1 ) Is element - wise . Or . . . <nl> - / / ( 2 ) Is a loop fusion instruction where the only use of ' operand ' at ' index ' <nl> - / / in the set ' user . fused_instructions ' is a DynamicUpdateSlice fused root <nl> - / / at operand 0 . Or . . . <nl> - / / ( 3 ) Is a kDot - > kAdd output fusion instruction where the only use of <nl> - / / ' operand ' at ' index ' in the set ' user . fused_instructions ' is a kAdd fused <nl> - / / root at operand 0 or 1 . Or . . . <nl> - / / ( 4 ) The ' user ' of ' operand ' is DynamicUpdateSlice or While at operand index <nl> - / / 0 . <nl> - / / <nl> - / / ( 2 ) and ( 3 ) can only be determined if points - to analysis is available . <nl> - bool CanShareOperandBufferWithUser ( <nl> - HloInstruction * operand , const ShapeIndex & operand_index , <nl> - HloInstruction * user , const ShapeIndex & user_index , <nl> - const TuplePointsToAnalysis & points_to_analysis ) { <nl> - CHECK ( user - > IsUserOf ( operand ) ) <nl> - < < " user : " < < user - > ToString ( ) < < " operand : " < < operand - > ToString ( ) ; <nl> - const Shape & operand_subshape = <nl> - ShapeUtil : : GetSubshape ( operand - > shape ( ) , operand_index ) ; <nl> - const Shape & user_subshape = <nl> - ShapeUtil : : GetSubshape ( user - > shape ( ) , user_index ) ; <nl> - / / Check that operand and user emit the same shape and layout . <nl> - if ( ! ShapeUtil : : Equal ( operand_subshape , user_subshape ) ) { <nl> - return false ; <nl> - } <nl> - if ( user - > opcode ( ) = = HloOpcode : : kFusion ) { <nl> - if ( user - > fusion_kind ( ) = = HloInstruction : : FusionKind : : kLoop & & <nl> - user - > fused_expression_root ( ) - > opcode ( ) = = <nl> - HloOpcode : : kDynamicUpdateSlice ) { <nl> - / / Loop fusion with kDynamicUpdateSlice fused root . <nl> - / / <nl> - / / Returns true iff there is exactly one use of ' operand ' at shape index <nl> - / / ' operand_index ' , and this singleton use is the fused root at operand <nl> - / / index 0 . <nl> - return HasUniqueFusedUseOfOperandAt ( operand , operand_index , user , 0 , <nl> - points_to_analysis ) ; <nl> - } else if ( user - > fusion_kind ( ) = = HloInstruction : : FusionKind : : kOutput & & <nl> - user - > fused_expression_root ( ) - > opcode ( ) = = HloOpcode : : kAdd ) { <nl> - / / Output fusion with kAdd fused root . <nl> - <nl> - / / Check if one operand of kAdd fused root is kDot or kConvolution . <nl> - auto * add = user - > fused_expression_root ( ) ; <nl> - auto add_operand_it = <nl> - std : : find_if ( add - > operands ( ) . begin ( ) , add - > operands ( ) . end ( ) , <nl> - [ & ] ( HloInstruction * operand ) { <nl> - return operand - > opcode ( ) = = HloOpcode : : kConvolution | | <nl> - operand - > opcode ( ) = = HloOpcode : : kDot ; <nl> - } ) ; <nl> - if ( add_operand_it = = add - > operands ( ) . end ( ) ) { <nl> - return false ; <nl> - } <nl> - auto * matched_add_operand = * add_operand_it ; <nl> - / / Calculate operand index of ' add ' operand which was not matched above . <nl> - const int64 other_add_operand_index = <nl> - matched_add_operand = = add - > operand ( 0 ) ? 1 : 0 ; <nl> - / / Returns true iff there is exactly one use of ' operand ' at shape index <nl> - / / ' operand_index ' , and this singleton use is the fused root ( at operand <nl> - / / index ' other_add_operand_index ' ) . <nl> - return HasUniqueFusedUseOfOperandAt ( operand , operand_index , user , <nl> - other_add_operand_index , <nl> - points_to_analysis ) ; <nl> - } <nl> - } <nl> - if ( user - > opcode ( ) = = HloOpcode : : kDynamicUpdateSlice | | <nl> - user - > opcode ( ) = = HloOpcode : : kWhile ) { <nl> - / / We eliminated other users in BufferLiveness : : live_range_strictly_before , <nl> - / / so here we just need to check that the use is at operand index 0 . <nl> - std : : vector < int64 > operand_indices = user - > OperandIndices ( operand ) ; <nl> - return operand_indices . size ( ) = = 1 & & operand_indices [ 0 ] = = 0 ; <nl> - } <nl> - if ( user - > opcode ( ) = = HloOpcode : : kCall ) { <nl> - / / TODO ( b / 62548313 ) : Remove when buffer assignment is module scoped and <nl> - / / does not assign buffers to calls . <nl> - / / Find called computation parameter associated with ' operand ' . <nl> - const std : : vector < int64 > operand_indices = user - > OperandIndices ( operand ) ; <nl> - if ( operand_indices . size ( ) > 1 ) { <nl> - return false ; <nl> - } <nl> - CHECK_EQ ( 1 , operand_indices . size ( ) ) ; <nl> - auto * param = user - > to_apply ( ) - > parameter_instruction ( operand_indices [ 0 ] ) ; <nl> - / / Get all uses of ' operand ' at ' index ' in called computation . <nl> - auto param_uses = GetAllUsesOfInstructionAtIndex ( param , operand_index , <nl> - points_to_analysis ) ; <nl> - <nl> - / / Return true iff : <nl> - / / * ) There exists exactly one use of ' operand ' in called computation . <nl> - / / * ) The unique use is by the root instruction of called computation . <nl> - / / ( Note : we check the root of the called computation , because the <nl> - / / root result buffer is required to alias with the Call result buffer ) . <nl> - / / * ) The root instruction of the called computation is element - wise on <nl> - / / ' operand ' . <nl> - auto * callee_root = user - > to_apply ( ) - > root_instruction ( ) ; <nl> - return param_uses . size ( ) = = 1 & & param_uses [ 0 ] . first = = callee_root & & <nl> - callee_root - > IsElementwiseOnOperand ( param_uses [ 0 ] . second ) ; <nl> - } <nl> - / / Check if ' user ' is element - wise . <nl> - return user - > IsElementwise ( ) ; <nl> - } <nl> - <nl> - bool CanShareOperandBufferWithUser ( HloInstruction * operand , <nl> - const ShapeIndex & operand_index , <nl> - HloInstruction * user , <nl> - const ShapeIndex & user_index , <nl> - const HloDataflowAnalysis & dataflow ) { <nl> - CHECK ( user - > IsUserOf ( operand ) ) <nl> - < < " user : " < < user - > ToString ( ) < < " operand : " < < operand - > ToString ( ) ; <nl> - const Shape & operand_subshape = <nl> - ShapeUtil : : GetSubshape ( operand - > shape ( ) , operand_index ) ; <nl> - const Shape & user_subshape = <nl> - ShapeUtil : : GetSubshape ( user - > shape ( ) , user_index ) ; <nl> - / / Check that operand and user emit the same shape and layout . <nl> - if ( ! ShapeUtil : : Equal ( operand_subshape , user_subshape ) ) { <nl> - return false ; <nl> - } <nl> - <nl> - if ( user - > opcode ( ) = = HloOpcode : : kFusion ) { <nl> - / / Get the parameter associated with ' operand ' ; <nl> - HloInstruction * fusion_param = <nl> - user - > fused_parameter ( user - > operand_index ( operand ) ) ; <nl> - <nl> - const HloValue & value = <nl> - dataflow . GetValueDefinedAt ( fusion_param , operand_index ) ; <nl> - if ( value . uses ( ) . size ( ) ! = 1 ) { <nl> - return false ; <nl> - } <nl> - const HloUse & use = value . uses ( ) [ 0 ] ; <nl> - <nl> - if ( user - > fusion_kind ( ) = = HloInstruction : : FusionKind : : kLoop & & <nl> - user - > fused_expression_root ( ) - > opcode ( ) = = <nl> - HloOpcode : : kDynamicUpdateSlice ) { <nl> - / / Loop fusion with kDynamicUpdateSlice fused root . <nl> - / / <nl> - / / Returns true iff there is exactly one use of ' operand ' at shape index <nl> - / / ' operand_index ' , and this singleton use is the fused root at operand <nl> - / / index 0 . <nl> - return use . instruction = = user - > fused_expression_root ( ) & & <nl> - use . operand_number = = 0 ; <nl> - } else if ( user - > fusion_kind ( ) = = HloInstruction : : FusionKind : : kOutput & & <nl> - user - > fused_expression_root ( ) - > opcode ( ) = = HloOpcode : : kAdd ) { <nl> - / / Output fusion with kAdd fused root . <nl> - <nl> - / / Check if one operand of kAdd fused root is kDot , or kConvolution . <nl> - auto * add = user - > fused_expression_root ( ) ; <nl> - auto add_operand_it = <nl> - std : : find_if ( add - > operands ( ) . begin ( ) , add - > operands ( ) . end ( ) , <nl> - [ & ] ( HloInstruction * operand ) { <nl> - return operand - > opcode ( ) = = HloOpcode : : kConvolution | | <nl> - operand - > opcode ( ) = = HloOpcode : : kDot ; <nl> - } ) ; <nl> - if ( add_operand_it = = add - > operands ( ) . end ( ) ) { <nl> - return false ; <nl> - } <nl> - auto * matched_add_operand = * add_operand_it ; <nl> - / / Calculate operand index of ' add ' operand which was not matched above . <nl> - const int64 other_add_operand_index = <nl> - matched_add_operand = = add - > operand ( 0 ) ? 1 : 0 ; <nl> - / / Returns true iff there is exactly one use of ' operand ' at shape index <nl> - / / ' operand_index ' , and this singleton use is the fused root ( at operand <nl> - / / index ' other_add_operand_index ' ) . <nl> - return use . instruction = = user - > fused_expression_root ( ) & & <nl> - use . operand_number = = other_add_operand_index ; <nl> - } <nl> - } <nl> - if ( user - > opcode ( ) = = HloOpcode : : kDynamicUpdateSlice | | <nl> - user - > opcode ( ) = = HloOpcode : : kWhile ) { <nl> - / / We eliminated other users in BufferLiveness : : live_range_strictly_before , <nl> - / / so here we just need to check that the use is at operand index 0 . <nl> - std : : vector < int64 > operand_indices = user - > OperandIndices ( operand ) ; <nl> - return operand_indices . size ( ) = = 1 & & operand_indices [ 0 ] = = 0 ; <nl> - } <nl> - if ( user - > opcode ( ) = = HloOpcode : : kCall ) { <nl> - / / Get all uses of value defined by ' operand ' at ' operand_index ' . <nl> - const auto & uses = <nl> - dataflow . GetValueDefinedAt ( operand , operand_index ) . uses ( ) ; <nl> - / / Return true iff : <nl> - / / * ) There exists two uses of ' operand ' . <nl> - / / * ) One use is by ' user ' ( caller ) . <nl> - / / * ) One use is by root instruction of called computation ( callee root ) . <nl> - / / ( Note : we check the root of the called computation , because the <nl> - / / root result buffer is required to alias with the Call result buffer ) . <nl> - / / * ) The root instruction of the called computation is element - wise on <nl> - / / ' operand ' . <nl> - const bool found_caller_use = <nl> - std : : find_if ( uses . begin ( ) , uses . end ( ) , [ user ] ( const HloUse & use ) { <nl> - return use . instruction = = user ; <nl> - } ) ! = uses . end ( ) ; <nl> - auto * callee_root = user - > to_apply ( ) - > root_instruction ( ) ; <nl> - const bool found_elementwise_callee_use = <nl> - std : : find_if ( <nl> - uses . begin ( ) , uses . end ( ) , [ callee_root ] ( const HloUse & use ) { <nl> - return use . instruction = = callee_root & & <nl> - callee_root - > IsElementwiseOnOperand ( use . operand_number ) ; <nl> - } ) ! = uses . end ( ) ; <nl> - return uses . size ( ) = = 2 & & found_caller_use & & found_elementwise_callee_use ; <nl> - } <nl> - / / Check if ' user ' is element - wise . <nl> - return user - > IsElementwise ( ) ; <nl> - } <nl> - <nl> - } / / namespace xla <nl> deleted file mode 100644 <nl> index 28ef991880039 . . 0000000000000 <nl> mmm a / tensorflow / compiler / xla / service / liveness_util . h <nl> ppp / dev / null <nl> <nl> - / * Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> - <nl> - Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - you may not use this file except in compliance with the License . <nl> - You may obtain a copy of the License at <nl> - <nl> - http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - <nl> - Unless required by applicable law or agreed to in writing , software <nl> - distributed under the License is distributed on an " AS IS " BASIS , <nl> - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - See the License for the specific language governing permissions and <nl> - limitations under the License . <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - <nl> - / / A collection of utilities on the HLO graph . <nl> - <nl> - # ifndef TENSORFLOW_COMPILER_XLA_SERVICE_LIVENESS_UTIL_H_ <nl> - # define TENSORFLOW_COMPILER_XLA_SERVICE_LIVENESS_UTIL_H_ <nl> - <nl> - # include " tensorflow / compiler / xla / service / hlo_dataflow_analysis . h " <nl> - # include " tensorflow / compiler / xla / service / hlo_instruction . h " <nl> - # include " tensorflow / compiler / xla / service / tuple_points_to_analysis . h " <nl> - # include " tensorflow / compiler / xla / shape_util . h " <nl> - # include " tensorflow / compiler / xla / types . h " <nl> - <nl> - namespace xla { <nl> - <nl> - / / Returns true if ' user ' cannot possibly use the buffer at ' index ' in <nl> - / / ' operand ' . Returns false otherwise . <nl> - / / <nl> - / / REQUIRES : ' operand ' is an operand of ' user ' . <nl> - / / <nl> - / / TODO ( b / 65835246 ) : Remove TuplePointsToAnalysis overload when all users have <nl> - / / moved over to the dataflow overload . <nl> - bool DoesNotUseOperandBuffer ( const HloInstruction * operand , <nl> - const ShapeIndex & index , <nl> - const HloInstruction * user , <nl> - const TuplePointsToAnalysis & points_to_analysis ) ; <nl> - bool DoesNotUseOperandBuffer ( const HloInstruction * operand , <nl> - const ShapeIndex & index , <nl> - const HloInstruction * user , <nl> - const HloDataflowAnalysis & dataflow ) ; <nl> - <nl> - / / Returns true if ' user ' ( at ' user_index ' ) can share a buffer with its operand <nl> - / / ' operand ' ( at ' operand_index ' ) . Returns false otherwise . <nl> - / / <nl> - / / REQUIRES : ' operand ' is an operand of ' user ' . <nl> - / / <nl> - / / TODO ( b / 65835246 ) : Remove TuplePointsToAnalysis overload when all users have <nl> - / / moved over to the dataflow overload . <nl> - bool CanShareOperandBufferWithUser ( <nl> - HloInstruction * operand , const ShapeIndex & operand_index , <nl> - HloInstruction * user , const ShapeIndex & user_index , <nl> - const TuplePointsToAnalysis & points_to_analysis ) ; <nl> - bool CanShareOperandBufferWithUser ( HloInstruction * operand , <nl> - const ShapeIndex & operand_index , <nl> - HloInstruction * user , <nl> - const ShapeIndex & user_index , <nl> - const HloDataflowAnalysis & dataflow ) ; <nl> - <nl> - } / / namespace xla <nl> - <nl> - # endif / / TENSORFLOW_COMPILER_XLA_SERVICE_LIVENESS_UTIL_H_ <nl> deleted file mode 100644 <nl> index c01b52df62ee6 . . 0000000000000 <nl> mmm a / tensorflow / compiler / xla / service / liveness_util_test . cc <nl> ppp / dev / null <nl> <nl> - / * Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> - <nl> - Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - you may not use this file except in compliance with the License . <nl> - You may obtain a copy of the License at <nl> - <nl> - http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - <nl> - Unless required by applicable law or agreed to in writing , software <nl> - distributed under the License is distributed on an " AS IS " BASIS , <nl> - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - See the License for the specific language governing permissions and <nl> - limitations under the License . <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - <nl> - # include " tensorflow / compiler / xla / service / liveness_util . h " <nl> - <nl> - # include < memory > <nl> - <nl> - # include " tensorflow / compiler / xla / service / hlo_module . h " <nl> - # include " tensorflow / compiler / xla / service / tuple_points_to_analysis . h " <nl> - # include " tensorflow / compiler / xla / tests / hlo_test_base . h " <nl> - <nl> - namespace xla { <nl> - namespace { <nl> - <nl> - class PointsToAnalysisTestBase : public HloTestBase { <nl> - protected : <nl> - void BuildModule ( std : : unique_ptr < HloComputation > computation ) { <nl> - module_ = CreateNewModule ( ) ; <nl> - computation_ = module_ - > AddEntryComputation ( std : : move ( computation ) ) ; <nl> - } <nl> - <nl> - void RunAnalysis ( ) { <nl> - CHECK_NOTNULL ( module_ . get ( ) ) ; <nl> - points_to_analysis_ = <nl> - TuplePointsToAnalysis : : Run ( module_ . get ( ) ) . ConsumeValueOrDie ( ) ; <nl> - dataflow_analysis_ = HloDataflowAnalysis : : Run ( * module_ ) . ConsumeValueOrDie ( ) ; <nl> - } <nl> - <nl> - void BuildModuleAndRunAnalysis ( std : : unique_ptr < HloComputation > computation ) { <nl> - BuildModule ( std : : move ( computation ) ) ; <nl> - RunAnalysis ( ) ; <nl> - } <nl> - <nl> - std : : unique_ptr < HloModule > module_ ; <nl> - HloComputation * computation_ = nullptr ; <nl> - std : : unique_ptr < TuplePointsToAnalysis > points_to_analysis_ ; <nl> - std : : unique_ptr < HloDataflowAnalysis > dataflow_analysis_ ; <nl> - } ; <nl> - <nl> - class DoesNotUseOperandBufferTest : public PointsToAnalysisTestBase { } ; <nl> - <nl> - TEST_F ( DoesNotUseOperandBufferTest , GetTupleElement ) { <nl> - auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> - <nl> - Shape elem_shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> - auto tuple = builder . AddInstruction ( HloInstruction : : CreateParameter ( <nl> - 0 , ShapeUtil : : MakeTupleShape ( { elem_shape , elem_shape } ) , " tuple " ) ) ; <nl> - auto gte0 = builder . AddInstruction ( <nl> - HloInstruction : : CreateGetTupleElement ( elem_shape , tuple , 0 ) ) ; <nl> - auto gte1 = builder . AddInstruction ( <nl> - HloInstruction : : CreateGetTupleElement ( elem_shape , tuple , 1 ) ) ; <nl> - builder . AddInstruction ( <nl> - HloInstruction : : CreateBinary ( elem_shape , HloOpcode : : kAdd , gte0 , gte1 ) ) ; <nl> - <nl> - BuildModuleAndRunAnalysis ( builder . Build ( ) ) ; <nl> - <nl> - / / GetTupleElement instructions only access the top - level buffer of their <nl> - / / operand . <nl> - EXPECT_TRUE ( DoesNotUseOperandBuffer ( tuple , { 0 } , gte0 , * points_to_analysis_ ) ) ; <nl> - EXPECT_TRUE ( DoesNotUseOperandBuffer ( tuple , { 1 } , gte1 , * points_to_analysis_ ) ) ; <nl> - EXPECT_FALSE ( DoesNotUseOperandBuffer ( tuple , { } , gte0 , * points_to_analysis_ ) ) ; <nl> - EXPECT_FALSE ( DoesNotUseOperandBuffer ( tuple , { } , gte1 , * points_to_analysis_ ) ) ; <nl> - <nl> - EXPECT_TRUE ( DoesNotUseOperandBuffer ( tuple , { 0 } , gte0 , * dataflow_analysis_ ) ) ; <nl> - EXPECT_TRUE ( DoesNotUseOperandBuffer ( tuple , { 1 } , gte1 , * dataflow_analysis_ ) ) ; <nl> - EXPECT_FALSE ( DoesNotUseOperandBuffer ( tuple , { } , gte0 , * dataflow_analysis_ ) ) ; <nl> - EXPECT_FALSE ( DoesNotUseOperandBuffer ( tuple , { } , gte1 , * dataflow_analysis_ ) ) ; <nl> - } <nl> - <nl> - TEST_F ( DoesNotUseOperandBufferTest , FusedDynamicUpdateSlice ) { <nl> - auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> - <nl> - Shape data_shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> - auto tuple = builder . AddInstruction ( HloInstruction : : CreateParameter ( <nl> - 0 , ShapeUtil : : MakeTupleShape ( { data_shape , data_shape } ) , " tuple " ) ) ; <nl> - auto gte0 = builder . AddInstruction ( <nl> - HloInstruction : : CreateGetTupleElement ( data_shape , tuple , 0 ) ) ; <nl> - auto gte1 = builder . AddInstruction ( <nl> - HloInstruction : : CreateGetTupleElement ( data_shape , tuple , 1 ) ) ; <nl> - <nl> - / / Create a DynamicUpdateSlice instruction of tuple element 1 . <nl> - auto starts = builder . AddInstruction ( <nl> - HloInstruction : : CreateConstant ( Literal : : CreateR1 < int32 > ( { 2 } ) ) ) ; <nl> - auto update = builder . AddInstruction ( HloInstruction : : CreateConstant ( <nl> - Literal : : CreateR1 < float > ( { 2 . f , 2 . f , 2 . f } ) ) ) ; <nl> - auto dynamic_update_slice = <nl> - builder . AddInstruction ( HloInstruction : : CreateDynamicUpdateSlice ( <nl> - data_shape , gte1 , update , starts ) ) ; <nl> - builder . AddInstruction ( <nl> - HloInstruction : : CreateTuple ( { gte0 , dynamic_update_slice } ) ) ; <nl> - <nl> - BuildModule ( builder . Build ( ) ) ; <nl> - auto fusion = computation_ - > CreateFusionInstruction ( <nl> - { dynamic_update_slice , starts , update , gte1 } , <nl> - HloInstruction : : FusionKind : : kLoop ) ; <nl> - RunAnalysis ( ) ; <nl> - <nl> - / / The fusion instruction never uses tuple element 0 , but does use element 1 . <nl> - EXPECT_TRUE ( <nl> - DoesNotUseOperandBuffer ( tuple , { 0 } , fusion , * points_to_analysis_ ) ) ; <nl> - EXPECT_FALSE ( <nl> - DoesNotUseOperandBuffer ( tuple , { 1 } , fusion , * points_to_analysis_ ) ) ; <nl> - <nl> - EXPECT_TRUE ( DoesNotUseOperandBuffer ( tuple , { 0 } , fusion , * dataflow_analysis_ ) ) ; <nl> - EXPECT_FALSE ( <nl> - DoesNotUseOperandBuffer ( tuple , { 1 } , fusion , * dataflow_analysis_ ) ) ; <nl> - } <nl> - <nl> - class CanShareOperandBufferWithUserTest : public PointsToAnalysisTestBase { } ; <nl> - <nl> - TEST_F ( CanShareOperandBufferWithUserTest , ElementWiseSameShape ) { <nl> - auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> - <nl> - Shape shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> - auto param = builder . AddInstruction ( <nl> - HloInstruction : : CreateParameter ( 0 , shape , " param " ) ) ; <nl> - auto exp = builder . AddInstruction ( <nl> - HloInstruction : : CreateUnary ( shape , HloOpcode : : kExp , param ) ) ; <nl> - auto log = builder . AddInstruction ( <nl> - HloInstruction : : CreateUnary ( shape , HloOpcode : : kLog , exp ) ) ; <nl> - <nl> - BuildModuleAndRunAnalysis ( builder . Build ( ) ) ; <nl> - <nl> - EXPECT_TRUE ( <nl> - CanShareOperandBufferWithUser ( param , { } , exp , { } , * points_to_analysis_ ) ) ; <nl> - EXPECT_TRUE ( <nl> - CanShareOperandBufferWithUser ( exp , { } , log , { } , * points_to_analysis_ ) ) ; <nl> - <nl> - EXPECT_TRUE ( <nl> - CanShareOperandBufferWithUser ( param , { } , exp , { } , * dataflow_analysis_ ) ) ; <nl> - EXPECT_TRUE ( <nl> - CanShareOperandBufferWithUser ( exp , { } , log , { } , * dataflow_analysis_ ) ) ; <nl> - } <nl> - <nl> - TEST_F ( CanShareOperandBufferWithUserTest , ElementWiseDifferentShape ) { <nl> - auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> - <nl> - Shape in_shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> - Shape out_shape = ShapeUtil : : MakeShape ( PRED , { 8 } ) ; <nl> - auto param0 = builder . AddInstruction ( <nl> - HloInstruction : : CreateParameter ( 0 , in_shape , " param0 " ) ) ; <nl> - auto param1 = builder . AddInstruction ( <nl> - HloInstruction : : CreateParameter ( 1 , in_shape , " param1 " ) ) ; <nl> - auto result = builder . AddInstruction ( <nl> - HloInstruction : : CreateBinary ( out_shape , HloOpcode : : kEq , param0 , param1 ) ) ; <nl> - <nl> - BuildModuleAndRunAnalysis ( builder . Build ( ) ) ; <nl> - <nl> - EXPECT_FALSE ( CanShareOperandBufferWithUser ( param0 , { } , result , { } , <nl> - * points_to_analysis_ ) ) ; <nl> - EXPECT_FALSE ( CanShareOperandBufferWithUser ( param1 , { } , result , { } , <nl> - * points_to_analysis_ ) ) ; <nl> - <nl> - EXPECT_FALSE ( CanShareOperandBufferWithUser ( param0 , { } , result , { } , <nl> - * dataflow_analysis_ ) ) ; <nl> - EXPECT_FALSE ( CanShareOperandBufferWithUser ( param1 , { } , result , { } , <nl> - * dataflow_analysis_ ) ) ; <nl> - } <nl> - <nl> - TEST_F ( CanShareOperandBufferWithUserTest , CopyShares ) { <nl> - auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> - <nl> - Shape shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> - auto param = builder . AddInstruction ( <nl> - HloInstruction : : CreateParameter ( 0 , shape , " param " ) ) ; <nl> - auto exp = builder . AddInstruction ( <nl> - HloInstruction : : CreateUnary ( shape , HloOpcode : : kExp , param ) ) ; <nl> - auto copy = builder . AddInstruction ( <nl> - HloInstruction : : CreateUnary ( shape , HloOpcode : : kCopy , exp ) ) ; <nl> - <nl> - BuildModuleAndRunAnalysis ( builder . Build ( ) ) ; <nl> - <nl> - EXPECT_TRUE ( <nl> - CanShareOperandBufferWithUser ( param , { } , exp , { } , * points_to_analysis_ ) ) ; <nl> - EXPECT_TRUE ( <nl> - CanShareOperandBufferWithUser ( exp , { } , copy , { } , * points_to_analysis_ ) ) ; <nl> - <nl> - EXPECT_TRUE ( <nl> - CanShareOperandBufferWithUser ( param , { } , exp , { } , * dataflow_analysis_ ) ) ; <nl> - EXPECT_TRUE ( <nl> - CanShareOperandBufferWithUser ( exp , { } , copy , { } , * dataflow_analysis_ ) ) ; <nl> - } <nl> - <nl> - TEST_F ( CanShareOperandBufferWithUserTest , FusedDynamicUpdateSlice ) { <nl> - auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> - <nl> - Shape data_shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> - auto tuple = builder . AddInstruction ( HloInstruction : : CreateParameter ( <nl> - 0 , ShapeUtil : : MakeTupleShape ( { data_shape , data_shape } ) , " tuple " ) ) ; <nl> - auto gte0 = builder . AddInstruction ( <nl> - HloInstruction : : CreateGetTupleElement ( data_shape , tuple , 0 ) ) ; <nl> - auto gte1 = builder . AddInstruction ( <nl> - HloInstruction : : CreateGetTupleElement ( data_shape , tuple , 1 ) ) ; <nl> - <nl> - / / Create a DynamicUpdateSlice instruction of tuple element 1 . <nl> - auto starts = builder . AddInstruction ( <nl> - HloInstruction : : CreateConstant ( Literal : : CreateR1 < int32 > ( { 2 } ) ) ) ; <nl> - auto update = builder . AddInstruction ( HloInstruction : : CreateConstant ( <nl> - Literal : : CreateR1 < float > ( { 2 . f , 2 . f , 2 . f } ) ) ) ; <nl> - auto dynamic_update_slice = <nl> - builder . AddInstruction ( HloInstruction : : CreateDynamicUpdateSlice ( <nl> - data_shape , gte1 , update , starts ) ) ; <nl> - builder . AddInstruction ( <nl> - HloInstruction : : CreateTuple ( { gte0 , dynamic_update_slice } ) ) ; <nl> - <nl> - BuildModule ( builder . Build ( ) ) ; <nl> - auto fusion = computation_ - > CreateFusionInstruction ( <nl> - { dynamic_update_slice , starts , update , gte1 } , <nl> - HloInstruction : : FusionKind : : kLoop ) ; <nl> - RunAnalysis ( ) ; <nl> - <nl> - / / The fusion instruction can share with tuple element 1 . <nl> - EXPECT_FALSE ( CanShareOperandBufferWithUser ( tuple , { 0 } , fusion , { } , <nl> - * points_to_analysis_ ) ) ; <nl> - EXPECT_TRUE ( CanShareOperandBufferWithUser ( tuple , { 1 } , fusion , { } , <nl> - * points_to_analysis_ ) ) ; <nl> - <nl> - EXPECT_FALSE ( CanShareOperandBufferWithUser ( tuple , { 0 } , fusion , { } , <nl> - * dataflow_analysis_ ) ) ; <nl> - EXPECT_TRUE ( CanShareOperandBufferWithUser ( tuple , { 1 } , fusion , { } , <nl> - * dataflow_analysis_ ) ) ; <nl> - } <nl> - <nl> - TEST_F ( CanShareOperandBufferWithUserTest , DynamicUpdateSliceCanShare ) { <nl> - auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> - <nl> - Shape data_shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> - Shape update_shape = ShapeUtil : : MakeShape ( F32 , { 4 } ) ; <nl> - Shape starts_shape = ShapeUtil : : MakeShape ( S32 , { 1 } ) ; <nl> - auto data = builder . AddInstruction ( <nl> - HloInstruction : : CreateParameter ( 0 , data_shape , " data " ) ) ; <nl> - auto update = builder . AddInstruction ( <nl> - HloInstruction : : CreateParameter ( 1 , update_shape , " update " ) ) ; <nl> - auto starts = builder . AddInstruction ( <nl> - HloInstruction : : CreateParameter ( 2 , starts_shape , " starts " ) ) ; <nl> - auto dus = builder . AddInstruction ( HloInstruction : : CreateDynamicUpdateSlice ( <nl> - data_shape , data , update , starts ) ) ; <nl> - <nl> - BuildModuleAndRunAnalysis ( builder . Build ( ) ) ; <nl> - <nl> - / / The DynamicUpdateSlice instruction can share with the data operand , but not <nl> - / / with update or starts . <nl> - EXPECT_TRUE ( <nl> - CanShareOperandBufferWithUser ( data , { } , dus , { } , * points_to_analysis_ ) ) ; <nl> - EXPECT_FALSE ( <nl> - CanShareOperandBufferWithUser ( update , { } , dus , { } , * points_to_analysis_ ) ) ; <nl> - EXPECT_FALSE ( <nl> - CanShareOperandBufferWithUser ( starts , { } , dus , { } , * points_to_analysis_ ) ) ; <nl> - <nl> - EXPECT_TRUE ( <nl> - CanShareOperandBufferWithUser ( data , { } , dus , { } , * dataflow_analysis_ ) ) ; <nl> - EXPECT_FALSE ( <nl> - CanShareOperandBufferWithUser ( update , { } , dus , { } , * dataflow_analysis_ ) ) ; <nl> - EXPECT_FALSE ( <nl> - CanShareOperandBufferWithUser ( starts , { } , dus , { } , * dataflow_analysis_ ) ) ; <nl> - } <nl> - <nl> - TEST_F ( CanShareOperandBufferWithUserTest , FusedDotAdd ) { <nl> - auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> - Shape data_shape = ShapeUtil : : MakeShape ( F32 , { 2 , 2 } ) ; <nl> - <nl> - auto a = builder . AddInstruction ( HloInstruction : : CreateConstant ( <nl> - Literal : : CreateR2 < float > ( { { 1 . 0 , 0 . 0 } , { 0 . 0 , 1 . 0 } } ) ) ) ; <nl> - auto b = builder . AddInstruction ( HloInstruction : : CreateConstant ( <nl> - Literal : : CreateR2 < float > ( { { 2 . 0 , 2 . 0 } , { 2 . 0 , 2 . 0 } } ) ) ) ; <nl> - <nl> - DotDimensionNumbers dot_dnums ; <nl> - dot_dnums . add_lhs_contracting_dimensions ( 1 ) ; <nl> - dot_dnums . add_rhs_contracting_dimensions ( 0 ) ; <nl> - auto dot = builder . AddInstruction ( <nl> - HloInstruction : : CreateDot ( data_shape , a , b , dot_dnums ) ) ; <nl> - <nl> - auto one = builder . AddInstruction ( <nl> - HloInstruction : : CreateConstant ( Literal : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> - auto add_operand = builder . AddInstruction ( <nl> - HloInstruction : : CreateBroadcast ( data_shape , one , { 1 } ) ) ; <nl> - <nl> - auto add = builder . AddInstruction ( HloInstruction : : CreateBinary ( <nl> - data_shape , HloOpcode : : kAdd , dot , add_operand ) ) ; <nl> - <nl> - BuildModule ( builder . Build ( ) ) ; <nl> - auto fusion = computation_ - > CreateFusionInstruction ( <nl> - { add , dot } , HloInstruction : : FusionKind : : kOutput ) ; <nl> - RunAnalysis ( ) ; <nl> - <nl> - / / Output fused dot add should be able to share buffer with ' add_operand ' . <nl> - EXPECT_TRUE ( CanShareOperandBufferWithUser ( add_operand , { } , fusion , { } , <nl> - * points_to_analysis_ ) ) ; <nl> - <nl> - EXPECT_TRUE ( CanShareOperandBufferWithUser ( add_operand , { } , fusion , { } , <nl> - * dataflow_analysis_ ) ) ; <nl> - } <nl> - <nl> - TEST_F ( CanShareOperandBufferWithUserTest , OutputFusionCantAliasOperandBuffer ) { <nl> - auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> - Shape data_shape = ShapeUtil : : MakeShape ( F32 , { 2 , 2 } ) ; <nl> - <nl> - auto one = builder . AddInstruction ( <nl> - HloInstruction : : CreateConstant ( Literal : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> - auto operand = builder . AddInstruction ( <nl> - HloInstruction : : CreateBroadcast ( data_shape , one , { 1 } ) ) ; <nl> - <nl> - auto reverse = builder . AddInstruction ( <nl> - HloInstruction : : CreateReverse ( data_shape , operand , { 0 , 1 } ) ) ; <nl> - <nl> - auto two = builder . AddInstruction ( HloInstruction : : CreateConstant ( <nl> - Literal : : CreateR2 < float > ( { { 2 . 0 , 2 . 0 } , { 2 . 0 , 2 . 0 } } ) ) ) ; <nl> - <nl> - auto add = builder . AddInstruction ( <nl> - HloInstruction : : CreateBinary ( data_shape , HloOpcode : : kAdd , reverse , two ) ) ; <nl> - <nl> - BuildModule ( builder . Build ( ) ) ; <nl> - auto fusion = computation_ - > CreateFusionInstruction ( <nl> - { add , two , reverse } , HloInstruction : : FusionKind : : kOutput ) ; <nl> - RunAnalysis ( ) ; <nl> - <nl> - / / Output fused operand - > reverse - > add cannot alias operand buffer ' operand ' . <nl> - EXPECT_FALSE ( CanShareOperandBufferWithUser ( operand , { } , fusion , { } , <nl> - * points_to_analysis_ ) ) ; <nl> - <nl> - EXPECT_FALSE ( CanShareOperandBufferWithUser ( operand , { } , fusion , { } , <nl> - * dataflow_analysis_ ) ) ; <nl> - } <nl> - <nl> - TEST_F ( CanShareOperandBufferWithUserTest , WhileCanShare ) { <nl> - Shape data_shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> - <nl> - auto make_cond = [ this , & data_shape ] ( ) { <nl> - auto builder = HloComputation : : Builder ( TestName ( ) + " . Cond " ) ; <nl> - auto data = builder . AddInstruction ( <nl> - HloInstruction : : CreateParameter ( 0 , data_shape , " data " ) ) ; <nl> - builder . AddInstruction ( HloInstruction : : CreateBinary ( <nl> - ShapeUtil : : MakeShape ( PRED , { } ) , HloOpcode : : kEq , data , data ) ) ; <nl> - return builder . Build ( ) ; <nl> - } ; <nl> - <nl> - auto make_body = [ this , & data_shape ] ( ) { <nl> - auto builder = HloComputation : : Builder ( TestName ( ) + " . Body " ) ; <nl> - auto data = builder . AddInstruction ( <nl> - HloInstruction : : CreateParameter ( 0 , data_shape , " data " ) ) ; <nl> - builder . AddInstruction ( <nl> - HloInstruction : : CreateBinary ( data_shape , HloOpcode : : kAdd , data , data ) ) ; <nl> - return builder . Build ( ) ; <nl> - } ; <nl> - <nl> - module_ = CreateNewModule ( ) ; <nl> - HloComputation * cond_computation = <nl> - module_ - > AddEmbeddedComputation ( make_cond ( ) ) ; <nl> - HloComputation * body_computation = <nl> - module_ - > AddEmbeddedComputation ( make_body ( ) ) ; <nl> - <nl> - auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> - auto data = builder . AddInstruction ( <nl> - HloInstruction : : CreateParameter ( 0 , data_shape , " data " ) ) ; <nl> - auto whil = builder . AddInstruction ( HloInstruction : : CreateWhile ( <nl> - data_shape , cond_computation , body_computation , data ) ) ; <nl> - computation_ = module_ - > AddEntryComputation ( builder . Build ( ) ) ; <nl> - <nl> - RunAnalysis ( ) ; <nl> - <nl> - / / The While instruction can share with the data operand . <nl> - EXPECT_TRUE ( <nl> - CanShareOperandBufferWithUser ( data , { } , whil , { } , * points_to_analysis_ ) ) ; <nl> - <nl> - EXPECT_TRUE ( <nl> - CanShareOperandBufferWithUser ( data , { } , whil , { } , * dataflow_analysis_ ) ) ; <nl> - } <nl> - <nl> - / / Tests that Call can alias operand buffer if the only use of the operand <nl> - / / in the called computation is an elementwise instruction . <nl> - TEST_F ( CanShareOperandBufferWithUserTest , CallToComputationWithFusionRoot ) { <nl> - Shape shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> - / / Build sub - computation with fusion root . <nl> - auto sub_builder = HloComputation : : Builder ( TestName ( ) + " _sub " ) ; <nl> - auto sub_param = sub_builder . AddInstruction ( <nl> - HloInstruction : : CreateParameter ( 0 , shape , " sub_param " ) ) ; <nl> - auto one = sub_builder . AddInstruction ( <nl> - HloInstruction : : CreateConstant ( Literal : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> - auto ones = sub_builder . AddInstruction ( <nl> - HloInstruction : : CreateBroadcast ( shape , one , { 1 } ) ) ; <nl> - auto add = sub_builder . AddInstruction ( <nl> - HloInstruction : : CreateBinary ( shape , HloOpcode : : kAdd , sub_param , ones ) ) ; <nl> - <nl> - module_ = CreateNewModule ( ) ; <nl> - auto sub_computation = module_ - > AddEmbeddedComputation ( sub_builder . Build ( ) ) ; <nl> - sub_computation - > CreateFusionInstruction ( { add , ones } , <nl> - HloInstruction : : FusionKind : : kLoop ) ; <nl> - <nl> - / / Build entry - computation with kCall which calls ' sub_computation ' . <nl> - auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> - <nl> - auto param = builder . AddInstruction ( <nl> - HloInstruction : : CreateParameter ( 0 , shape , " param " ) ) ; <nl> - auto reverse = <nl> - builder . AddInstruction ( HloInstruction : : CreateReverse ( shape , param , { 0 } ) ) ; <nl> - auto call = builder . AddInstruction ( <nl> - HloInstruction : : CreateCall ( shape , { reverse } , sub_computation ) ) ; <nl> - computation_ = module_ - > AddEntryComputation ( builder . Build ( ) ) ; <nl> - <nl> - RunAnalysis ( ) ; <nl> - <nl> - EXPECT_TRUE ( CanShareOperandBufferWithUser ( reverse , { } , call , { } , <nl> - * points_to_analysis_ ) ) ; <nl> - EXPECT_TRUE ( CanShareOperandBufferWithUser ( reverse , { } , call , { } , <nl> - * dataflow_analysis_ ) ) ; <nl> - } <nl> - <nl> - } / / namespace <nl> - } / / namespace xla <nl> mmm a / tensorflow / compiler / xla / service / tuple_points_to_analysis . cc <nl> ppp b / tensorflow / compiler / xla / service / tuple_points_to_analysis . cc <nl> void TuplePointsToAnalysis : : InstructionToString ( <nl> } ) ; <nl> } <nl> <nl> + bool TuplePointsToAnalysis : : DoesNotUseOperandBuffer ( <nl> + const HloInstruction * operand , const ShapeIndex & index , <nl> + const HloInstruction * user ) const { <nl> + CHECK ( user - > IsUserOf ( operand ) ) <nl> + < < " user : " < < user - > ToString ( ) < < " operand : " < < operand - > ToString ( ) ; <nl> + if ( user - > opcode ( ) = = HloOpcode : : kGetTupleElement & & ! index . empty ( ) ) { <nl> + / / GetTupleElement instructions only access the top - level buffer of their <nl> + / / operand . <nl> + return true ; <nl> + } else if ( user - > opcode ( ) = = HloOpcode : : kFusion & & <nl> + user - > fusion_kind ( ) = = HloInstruction : : FusionKind : : kLoop ) { <nl> + / / Find fusion parameter associated with ' operand ' . <nl> + auto it = std : : find_if ( <nl> + user - > fused_parameters ( ) . begin ( ) , user - > fused_parameters ( ) . end ( ) , <nl> + [ = ] ( HloInstruction * fused_param ) { <nl> + return user - > operand ( fused_param - > parameter_number ( ) ) = = operand ; <nl> + } ) ; <nl> + CHECK ( it ! = user - > fused_parameters ( ) . end ( ) ) ; <nl> + / / Iterate through all users of all buffer aliases of the buffer in the <nl> + / / points - to set of fusion parameter at ' index ' . <nl> + / / Return false if any uses are detected at ' index ' , returns true otherwise . <nl> + const LogicalBuffer * buffer = GetBufferDefinedAt ( * it , index ) . ValueOrDie ( ) ; <nl> + for ( const BufferAlias & alias : GetBufferAliases ( * buffer ) ) { <nl> + for ( HloInstruction * alias_user : alias . instruction ( ) - > users ( ) ) { <nl> + if ( DoesNotUseOperandBuffer ( alias . instruction ( ) , alias . index ( ) , <nl> + alias_user ) ) { <nl> + continue ; <nl> + } <nl> + / / Return false : use detected at ' buffer ' - > ' alias ' - > ' alias_user ' . <nl> + return false ; <nl> + } <nl> + } <nl> + / / Return true : found no uses of ' operand ' at ' index ' in ' user ' . <nl> + return true ; <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> + / / Returns all uses of all aliases of ' instruction ' at ' index ' in ' uses ' . <nl> + / / Each use in ' uses ' is a pair ( HloInstruction * user , int64 operand_index ) <nl> + / / where ' user ' is a user of an alias of ' instruction ' at ' index ' , and <nl> + / / ' operand_index ' is the operand index at which the alias appears in the <nl> + / / operand list of ' user ' . <nl> + std : : vector < std : : pair < HloInstruction * , int64 > > <nl> + TuplePointsToAnalysis : : GetAllUsesOfInstructionAtIndex ( <nl> + HloInstruction * instruction , const ShapeIndex & index ) const { <nl> + std : : vector < std : : pair < HloInstruction * , int64 > > uses ; <nl> + const PointsToSet : : BufferList & points_to = <nl> + GetPointsToSet ( instruction ) . element ( index ) ; <nl> + for ( const LogicalBuffer * buffer : points_to ) { <nl> + for ( const BufferAlias & alias : GetBufferAliases ( * buffer ) ) { <nl> + for ( HloInstruction * alias_user : alias . instruction ( ) - > users ( ) ) { <nl> + if ( DoesNotUseOperandBuffer ( alias . instruction ( ) , alias . index ( ) , <nl> + alias_user ) ) { <nl> + continue ; <nl> + } <nl> + for ( int64 op_idx : alias_user - > OperandIndices ( alias . instruction ( ) ) ) { <nl> + uses . emplace_back ( alias_user , op_idx ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + return uses ; <nl> + } <nl> + <nl> + / / Returns true if there is exactly one use of ' operand ' at ' operand_index ' <nl> + / / in ' fusion . fused_instructions ' , where the singleton use is the fused <nl> + / / root at operand index ' use_operand_index ' . Returns false otherwise . <nl> + / / <nl> + / / REQUIRES : ' fusion ' opcode is a kFusion instruction . <nl> + bool TuplePointsToAnalysis : : HasUniqueFusedUseOfOperandAt ( <nl> + HloInstruction * operand , const ShapeIndex & operand_index , <nl> + HloInstruction * fusion , const int64 use_operand_index ) const { <nl> + CHECK_EQ ( HloOpcode : : kFusion , fusion - > opcode ( ) ) ; <nl> + / / Check that ' operand ' is unique in the operand list of ' fusion ' . <nl> + if ( fusion - > OperandIndices ( operand ) . size ( ) > 1 ) { <nl> + return false ; <nl> + } <nl> + / / Find fusion parameter associated with ' operand ' . <nl> + const auto & fused_params = fusion - > fused_parameters ( ) ; <nl> + auto fused_param_it = std : : find_if ( <nl> + fused_params . begin ( ) , fused_params . end ( ) , <nl> + [ & ] ( HloInstruction * fused_param ) { <nl> + return fusion - > operand ( fused_param - > parameter_number ( ) ) = = operand ; <nl> + } ) ; <nl> + if ( fused_param_it = = fused_params . end ( ) ) { <nl> + return false ; <nl> + } <nl> + auto * fused_param = * fused_param_it ; <nl> + / / Get all uses of ' operand ' at ' index ' from ' fusion . fused_instructions ' . <nl> + auto fused_param_uses = <nl> + GetAllUsesOfInstructionAtIndex ( fused_param , operand_index ) ; <nl> + / / Return true iff there is exactly one use of ' operand ' at ' index ' , and <nl> + / / this singleton use is the fused root ( at index in ' use_operand_indices ' ) . <nl> + return fused_param_uses . size ( ) = = 1 & & <nl> + fused_param_uses [ 0 ] . first = = fusion - > fused_expression_root ( ) & & <nl> + fused_param_uses [ 0 ] . second = = use_operand_index ; <nl> + } <nl> + <nl> + / / User and operand can share buffers iff both instructions emit the same shape <nl> + / / and layout , and ' user ' meets one of the following qualifications : <nl> + / / <nl> + / / ( 1 ) Is element - wise . Or . . . <nl> + / / ( 2 ) Is a loop fusion instruction where the only use of ' operand ' at ' index ' <nl> + / / in the set ' user . fused_instructions ' is a DynamicUpdateSlice fused root <nl> + / / at operand 0 . Or . . . <nl> + / / ( 3 ) Is a kDot - > kAdd output fusion instruction where the only use of <nl> + / / ' operand ' at ' index ' in the set ' user . fused_instructions ' is a kAdd fused <nl> + / / root at operand 0 or 1 . Or . . . <nl> + / / ( 4 ) The ' user ' of ' operand ' is DynamicUpdateSlice or While at operand index <nl> + / / 0 . <nl> + / / <nl> + / / ( 2 ) and ( 3 ) can only be determined if points - to analysis is available . <nl> + bool TuplePointsToAnalysis : : CanShareOperandBufferWithUser ( <nl> + HloInstruction * operand , const ShapeIndex & operand_index , <nl> + HloInstruction * user , const ShapeIndex & user_index ) const { <nl> + CHECK ( user - > IsUserOf ( operand ) ) <nl> + < < " user : " < < user - > ToString ( ) < < " operand : " < < operand - > ToString ( ) ; <nl> + const Shape & operand_subshape = <nl> + ShapeUtil : : GetSubshape ( operand - > shape ( ) , operand_index ) ; <nl> + const Shape & user_subshape = <nl> + ShapeUtil : : GetSubshape ( user - > shape ( ) , user_index ) ; <nl> + / / Check that operand and user emit the same shape and layout . <nl> + if ( ! ShapeUtil : : Equal ( operand_subshape , user_subshape ) ) { <nl> + return false ; <nl> + } <nl> + if ( user - > opcode ( ) = = HloOpcode : : kFusion ) { <nl> + if ( user - > fusion_kind ( ) = = HloInstruction : : FusionKind : : kLoop & & <nl> + user - > fused_expression_root ( ) - > opcode ( ) = = <nl> + HloOpcode : : kDynamicUpdateSlice ) { <nl> + / / Loop fusion with kDynamicUpdateSlice fused root . <nl> + / / <nl> + / / Returns true iff there is exactly one use of ' operand ' at shape index <nl> + / / ' operand_index ' , and this singleton use is the fused root at operand <nl> + / / index 0 . <nl> + return HasUniqueFusedUseOfOperandAt ( operand , operand_index , user , 0 ) ; <nl> + } else if ( user - > fusion_kind ( ) = = HloInstruction : : FusionKind : : kOutput & & <nl> + user - > fused_expression_root ( ) - > opcode ( ) = = HloOpcode : : kAdd ) { <nl> + / / Output fusion with kAdd fused root . <nl> + <nl> + / / Check if one operand of kAdd fused root is kDot or kConvolution . <nl> + auto * add = user - > fused_expression_root ( ) ; <nl> + auto add_operand_it = <nl> + std : : find_if ( add - > operands ( ) . begin ( ) , add - > operands ( ) . end ( ) , <nl> + [ & ] ( HloInstruction * operand ) { <nl> + return operand - > opcode ( ) = = HloOpcode : : kConvolution | | <nl> + operand - > opcode ( ) = = HloOpcode : : kDot ; <nl> + } ) ; <nl> + if ( add_operand_it = = add - > operands ( ) . end ( ) ) { <nl> + return false ; <nl> + } <nl> + auto * matched_add_operand = * add_operand_it ; <nl> + / / Calculate operand index of ' add ' operand which was not matched above . <nl> + const int64 other_add_operand_index = <nl> + matched_add_operand = = add - > operand ( 0 ) ? 1 : 0 ; <nl> + / / Returns true iff there is exactly one use of ' operand ' at shape index <nl> + / / ' operand_index ' , and this singleton use is the fused root ( at operand <nl> + / / index ' other_add_operand_index ' ) . <nl> + return HasUniqueFusedUseOfOperandAt ( operand , operand_index , user , <nl> + other_add_operand_index ) ; <nl> + } <nl> + } <nl> + if ( user - > opcode ( ) = = HloOpcode : : kDynamicUpdateSlice | | <nl> + user - > opcode ( ) = = HloOpcode : : kWhile ) { <nl> + / / We eliminated other users in BufferLiveness : : live_range_strictly_before , <nl> + / / so here we just need to check that the use is at operand index 0 . <nl> + std : : vector < int64 > operand_indices = user - > OperandIndices ( operand ) ; <nl> + return operand_indices . size ( ) = = 1 & & operand_indices [ 0 ] = = 0 ; <nl> + } <nl> + if ( user - > opcode ( ) = = HloOpcode : : kCall ) { <nl> + / / TODO ( b / 62548313 ) : Remove when buffer assignment is module scoped and <nl> + / / does not assign buffers to calls . <nl> + / / Find called computation parameter associated with ' operand ' . <nl> + const std : : vector < int64 > operand_indices = user - > OperandIndices ( operand ) ; <nl> + if ( operand_indices . size ( ) > 1 ) { <nl> + return false ; <nl> + } <nl> + CHECK_EQ ( 1 , operand_indices . size ( ) ) ; <nl> + auto * param = user - > to_apply ( ) - > parameter_instruction ( operand_indices [ 0 ] ) ; <nl> + / / Get all uses of ' operand ' at ' index ' in called computation . <nl> + auto param_uses = GetAllUsesOfInstructionAtIndex ( param , operand_index ) ; <nl> + <nl> + / / Return true iff : <nl> + / / * ) There exists exactly one use of ' operand ' in called computation . <nl> + / / * ) The unique use is by the root instruction of called computation . <nl> + / / ( Note : we check the root of the called computation , because the <nl> + / / root result buffer is required to alias with the Call result buffer ) . <nl> + / / * ) The root instruction of the called computation is element - wise on <nl> + / / ' operand ' . <nl> + auto * callee_root = user - > to_apply ( ) - > root_instruction ( ) ; <nl> + return param_uses . size ( ) = = 1 & & param_uses [ 0 ] . first = = callee_root & & <nl> + callee_root - > IsElementwiseOnOperand ( param_uses [ 0 ] . second ) ; <nl> + } <nl> + / / Check if ' user ' is element - wise . <nl> + return user - > IsElementwise ( ) ; <nl> + } <nl> + <nl> } / / namespace xla <nl> mmm a / tensorflow / compiler / xla / service / tuple_points_to_analysis . h <nl> ppp b / tensorflow / compiler / xla / service / tuple_points_to_analysis . h <nl> class TuplePointsToAnalysis : public DfsHloVisitorWithDefault { <nl> <nl> string ToString ( ) const ; <nl> <nl> + / / Returns true if ' user ' cannot possibly use the buffer at ' index ' in <nl> + / / ' operand ' . Returns false otherwise . <nl> + / / <nl> + / / REQUIRES : ' operand ' is an operand of ' user ' . <nl> + bool DoesNotUseOperandBuffer ( const HloInstruction * operand , <nl> + const ShapeIndex & index , <nl> + const HloInstruction * user ) const ; <nl> + <nl> + / / Returns true if ' user ' ( at ' user_index ' ) can share a buffer with its <nl> + / / operand ' operand ' ( at ' operand_index ' ) . Returns false otherwise . <nl> + / / <nl> + / / REQUIRES : ' operand ' is an operand of ' user ' . <nl> + bool CanShareOperandBufferWithUser ( HloInstruction * operand , <nl> + const ShapeIndex & operand_index , <nl> + HloInstruction * user , <nl> + const ShapeIndex & user_index ) const ; <nl> + <nl> private : <nl> explicit TuplePointsToAnalysis ( <nl> const HloModule * module , <nl> class TuplePointsToAnalysis : public DfsHloVisitorWithDefault { <nl> return & per_instruction_ [ id ] ; <nl> } <nl> <nl> + std : : vector < std : : pair < HloInstruction * , int64 > > GetAllUsesOfInstructionAtIndex ( <nl> + HloInstruction * instruction , const ShapeIndex & index ) const ; <nl> + bool HasUniqueFusedUseOfOperandAt ( HloInstruction * operand , <nl> + const ShapeIndex & operand_index , <nl> + HloInstruction * fusion , <nl> + const int64 use_operand_index ) const ; <nl> + <nl> / / The module this analysis is performed on . <nl> const HloModule * module_ ; <nl> <nl> mmm a / tensorflow / compiler / xla / service / tuple_points_to_analysis_test . cc <nl> ppp b / tensorflow / compiler / xla / service / tuple_points_to_analysis_test . cc <nl> TEST_F ( FusionPointsToAnalysisTest , FusionParam0TwoUsers ) { <nl> Run ( / * add_additional_gte0_user = * / true ) ; <nl> } <nl> <nl> + class PointsToAnalysisTestBase : public HloTestBase { <nl> + protected : <nl> + void BuildModule ( std : : unique_ptr < HloComputation > computation ) { <nl> + module_ = CreateNewModule ( ) ; <nl> + computation_ = module_ - > AddEntryComputation ( std : : move ( computation ) ) ; <nl> + } <nl> + <nl> + void RunAnalysis ( ) { <nl> + CHECK_NOTNULL ( module_ . get ( ) ) ; <nl> + points_to_analysis_ = <nl> + TuplePointsToAnalysis : : Run ( module_ . get ( ) ) . ConsumeValueOrDie ( ) ; <nl> + } <nl> + <nl> + void BuildModuleAndRunAnalysis ( std : : unique_ptr < HloComputation > computation ) { <nl> + BuildModule ( std : : move ( computation ) ) ; <nl> + RunAnalysis ( ) ; <nl> + } <nl> + <nl> + std : : unique_ptr < HloModule > module_ ; <nl> + HloComputation * computation_ = nullptr ; <nl> + std : : unique_ptr < TuplePointsToAnalysis > points_to_analysis_ ; <nl> + } ; <nl> + <nl> + class DoesNotUseOperandBufferTest : public PointsToAnalysisTestBase { } ; <nl> + <nl> + TEST_F ( DoesNotUseOperandBufferTest , GetTupleElement ) { <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + <nl> + Shape elem_shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> + auto tuple = builder . AddInstruction ( HloInstruction : : CreateParameter ( <nl> + 0 , ShapeUtil : : MakeTupleShape ( { elem_shape , elem_shape } ) , " tuple " ) ) ; <nl> + auto gte0 = builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( elem_shape , tuple , 0 ) ) ; <nl> + auto gte1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( elem_shape , tuple , 1 ) ) ; <nl> + builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( elem_shape , HloOpcode : : kAdd , gte0 , gte1 ) ) ; <nl> + <nl> + BuildModuleAndRunAnalysis ( builder . Build ( ) ) ; <nl> + <nl> + / / GetTupleElement instructions only access the top - level buffer of their <nl> + / / operand . <nl> + EXPECT_TRUE ( points_to_analysis_ - > DoesNotUseOperandBuffer ( tuple , { 0 } , gte0 ) ) ; <nl> + EXPECT_TRUE ( points_to_analysis_ - > DoesNotUseOperandBuffer ( tuple , { 1 } , gte1 ) ) ; <nl> + EXPECT_FALSE ( points_to_analysis_ - > DoesNotUseOperandBuffer ( tuple , { } , gte0 ) ) ; <nl> + EXPECT_FALSE ( points_to_analysis_ - > DoesNotUseOperandBuffer ( tuple , { } , gte1 ) ) ; <nl> + } <nl> + <nl> + TEST_F ( DoesNotUseOperandBufferTest , FusedDynamicUpdateSlice ) { <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + <nl> + Shape data_shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> + auto tuple = builder . AddInstruction ( HloInstruction : : CreateParameter ( <nl> + 0 , ShapeUtil : : MakeTupleShape ( { data_shape , data_shape } ) , " tuple " ) ) ; <nl> + auto gte0 = builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( data_shape , tuple , 0 ) ) ; <nl> + auto gte1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( data_shape , tuple , 1 ) ) ; <nl> + <nl> + / / Create a DynamicUpdateSlice instruction of tuple element 1 . <nl> + auto starts = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( Literal : : CreateR1 < int32 > ( { 2 } ) ) ) ; <nl> + auto update = builder . AddInstruction ( HloInstruction : : CreateConstant ( <nl> + Literal : : CreateR1 < float > ( { 2 . f , 2 . f , 2 . f } ) ) ) ; <nl> + auto dynamic_update_slice = <nl> + builder . AddInstruction ( HloInstruction : : CreateDynamicUpdateSlice ( <nl> + data_shape , gte1 , update , starts ) ) ; <nl> + builder . AddInstruction ( <nl> + HloInstruction : : CreateTuple ( { gte0 , dynamic_update_slice } ) ) ; <nl> + <nl> + BuildModule ( builder . Build ( ) ) ; <nl> + auto fusion = computation_ - > CreateFusionInstruction ( <nl> + { dynamic_update_slice , starts , update , gte1 } , <nl> + HloInstruction : : FusionKind : : kLoop ) ; <nl> + RunAnalysis ( ) ; <nl> + <nl> + / / The fusion instruction never uses tuple element 0 , but does use element 1 . <nl> + EXPECT_TRUE ( points_to_analysis_ - > DoesNotUseOperandBuffer ( tuple , { 0 } , fusion ) ) ; <nl> + EXPECT_FALSE ( <nl> + points_to_analysis_ - > DoesNotUseOperandBuffer ( tuple , { 1 } , fusion ) ) ; <nl> + } <nl> + <nl> + class CanShareOperandBufferWithUserTest : public PointsToAnalysisTestBase { } ; <nl> + <nl> + TEST_F ( CanShareOperandBufferWithUserTest , ElementWiseSameShape ) { <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + <nl> + Shape shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> + auto param = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , shape , " param " ) ) ; <nl> + auto exp = builder . AddInstruction ( <nl> + HloInstruction : : CreateUnary ( shape , HloOpcode : : kExp , param ) ) ; <nl> + auto log = builder . AddInstruction ( <nl> + HloInstruction : : CreateUnary ( shape , HloOpcode : : kLog , exp ) ) ; <nl> + <nl> + BuildModuleAndRunAnalysis ( builder . Build ( ) ) ; <nl> + <nl> + EXPECT_TRUE ( <nl> + points_to_analysis_ - > CanShareOperandBufferWithUser ( param , { } , exp , { } ) ) ; <nl> + EXPECT_TRUE ( <nl> + points_to_analysis_ - > CanShareOperandBufferWithUser ( exp , { } , log , { } ) ) ; <nl> + } <nl> + <nl> + TEST_F ( CanShareOperandBufferWithUserTest , ElementWiseDifferentShape ) { <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + <nl> + Shape in_shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> + Shape out_shape = ShapeUtil : : MakeShape ( PRED , { 8 } ) ; <nl> + auto param0 = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , in_shape , " param0 " ) ) ; <nl> + auto param1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 1 , in_shape , " param1 " ) ) ; <nl> + auto result = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( out_shape , HloOpcode : : kEq , param0 , param1 ) ) ; <nl> + <nl> + BuildModuleAndRunAnalysis ( builder . Build ( ) ) ; <nl> + <nl> + EXPECT_FALSE ( points_to_analysis_ - > CanShareOperandBufferWithUser ( param0 , { } , <nl> + result , { } ) ) ; <nl> + EXPECT_FALSE ( points_to_analysis_ - > CanShareOperandBufferWithUser ( param1 , { } , <nl> + result , { } ) ) ; <nl> + } <nl> + <nl> + TEST_F ( CanShareOperandBufferWithUserTest , CopyShares ) { <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + <nl> + Shape shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> + auto param = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , shape , " param " ) ) ; <nl> + auto exp = builder . AddInstruction ( <nl> + HloInstruction : : CreateUnary ( shape , HloOpcode : : kExp , param ) ) ; <nl> + auto copy = builder . AddInstruction ( <nl> + HloInstruction : : CreateUnary ( shape , HloOpcode : : kCopy , exp ) ) ; <nl> + <nl> + BuildModuleAndRunAnalysis ( builder . Build ( ) ) ; <nl> + <nl> + EXPECT_TRUE ( <nl> + points_to_analysis_ - > CanShareOperandBufferWithUser ( param , { } , exp , { } ) ) ; <nl> + EXPECT_TRUE ( <nl> + points_to_analysis_ - > CanShareOperandBufferWithUser ( exp , { } , copy , { } ) ) ; <nl> + } <nl> + <nl> + TEST_F ( CanShareOperandBufferWithUserTest , FusedDynamicUpdateSlice ) { <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + <nl> + Shape data_shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> + auto tuple = builder . AddInstruction ( HloInstruction : : CreateParameter ( <nl> + 0 , ShapeUtil : : MakeTupleShape ( { data_shape , data_shape } ) , " tuple " ) ) ; <nl> + auto gte0 = builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( data_shape , tuple , 0 ) ) ; <nl> + auto gte1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( data_shape , tuple , 1 ) ) ; <nl> + <nl> + / / Create a DynamicUpdateSlice instruction of tuple element 1 . <nl> + auto starts = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( Literal : : CreateR1 < int32 > ( { 2 } ) ) ) ; <nl> + auto update = builder . AddInstruction ( HloInstruction : : CreateConstant ( <nl> + Literal : : CreateR1 < float > ( { 2 . f , 2 . f , 2 . f } ) ) ) ; <nl> + auto dynamic_update_slice = <nl> + builder . AddInstruction ( HloInstruction : : CreateDynamicUpdateSlice ( <nl> + data_shape , gte1 , update , starts ) ) ; <nl> + builder . AddInstruction ( <nl> + HloInstruction : : CreateTuple ( { gte0 , dynamic_update_slice } ) ) ; <nl> + <nl> + BuildModule ( builder . Build ( ) ) ; <nl> + auto fusion = computation_ - > CreateFusionInstruction ( <nl> + { dynamic_update_slice , starts , update , gte1 } , <nl> + HloInstruction : : FusionKind : : kLoop ) ; <nl> + RunAnalysis ( ) ; <nl> + <nl> + / / The fusion instruction can share with tuple element 1 . <nl> + EXPECT_FALSE ( points_to_analysis_ - > CanShareOperandBufferWithUser ( tuple , { 0 } , <nl> + fusion , { } ) ) ; <nl> + EXPECT_TRUE ( points_to_analysis_ - > CanShareOperandBufferWithUser ( tuple , { 1 } , <nl> + fusion , { } ) ) ; <nl> + } <nl> + <nl> + TEST_F ( CanShareOperandBufferWithUserTest , DynamicUpdateSliceCanShare ) { <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + <nl> + Shape data_shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> + Shape update_shape = ShapeUtil : : MakeShape ( F32 , { 4 } ) ; <nl> + Shape starts_shape = ShapeUtil : : MakeShape ( S32 , { 1 } ) ; <nl> + auto data = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , data_shape , " data " ) ) ; <nl> + auto update = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 1 , update_shape , " update " ) ) ; <nl> + auto starts = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 2 , starts_shape , " starts " ) ) ; <nl> + auto dus = builder . AddInstruction ( HloInstruction : : CreateDynamicUpdateSlice ( <nl> + data_shape , data , update , starts ) ) ; <nl> + <nl> + BuildModuleAndRunAnalysis ( builder . Build ( ) ) ; <nl> + <nl> + / / The DynamicUpdateSlice instruction can share with the data operand , but not <nl> + / / with update or starts . <nl> + EXPECT_TRUE ( <nl> + points_to_analysis_ - > CanShareOperandBufferWithUser ( data , { } , dus , { } ) ) ; <nl> + EXPECT_FALSE ( <nl> + points_to_analysis_ - > CanShareOperandBufferWithUser ( update , { } , dus , { } ) ) ; <nl> + EXPECT_FALSE ( <nl> + points_to_analysis_ - > CanShareOperandBufferWithUser ( starts , { } , dus , { } ) ) ; <nl> + } <nl> + <nl> + TEST_F ( CanShareOperandBufferWithUserTest , FusedDotAdd ) { <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + Shape data_shape = ShapeUtil : : MakeShape ( F32 , { 2 , 2 } ) ; <nl> + <nl> + auto a = builder . AddInstruction ( HloInstruction : : CreateConstant ( <nl> + Literal : : CreateR2 < float > ( { { 1 . 0 , 0 . 0 } , { 0 . 0 , 1 . 0 } } ) ) ) ; <nl> + auto b = builder . AddInstruction ( HloInstruction : : CreateConstant ( <nl> + Literal : : CreateR2 < float > ( { { 2 . 0 , 2 . 0 } , { 2 . 0 , 2 . 0 } } ) ) ) ; <nl> + <nl> + DotDimensionNumbers dot_dnums ; <nl> + dot_dnums . add_lhs_contracting_dimensions ( 1 ) ; <nl> + dot_dnums . add_rhs_contracting_dimensions ( 0 ) ; <nl> + auto dot = builder . AddInstruction ( <nl> + HloInstruction : : CreateDot ( data_shape , a , b , dot_dnums ) ) ; <nl> + <nl> + auto one = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( Literal : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> + auto add_operand = builder . AddInstruction ( <nl> + HloInstruction : : CreateBroadcast ( data_shape , one , { 1 } ) ) ; <nl> + <nl> + auto add = builder . AddInstruction ( HloInstruction : : CreateBinary ( <nl> + data_shape , HloOpcode : : kAdd , dot , add_operand ) ) ; <nl> + <nl> + BuildModule ( builder . Build ( ) ) ; <nl> + auto fusion = computation_ - > CreateFusionInstruction ( <nl> + { add , dot } , HloInstruction : : FusionKind : : kOutput ) ; <nl> + RunAnalysis ( ) ; <nl> + <nl> + / / Output fused dot add should be able to share buffer with ' add_operand ' . <nl> + EXPECT_TRUE ( points_to_analysis_ - > CanShareOperandBufferWithUser ( <nl> + add_operand , { } , fusion , { } ) ) ; <nl> + } <nl> + <nl> + TEST_F ( CanShareOperandBufferWithUserTest , OutputFusionCantAliasOperandBuffer ) { <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + Shape data_shape = ShapeUtil : : MakeShape ( F32 , { 2 , 2 } ) ; <nl> + <nl> + auto one = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( Literal : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> + auto operand = builder . AddInstruction ( <nl> + HloInstruction : : CreateBroadcast ( data_shape , one , { 1 } ) ) ; <nl> + <nl> + auto reverse = builder . AddInstruction ( <nl> + HloInstruction : : CreateReverse ( data_shape , operand , { 0 , 1 } ) ) ; <nl> + <nl> + auto two = builder . AddInstruction ( HloInstruction : : CreateConstant ( <nl> + Literal : : CreateR2 < float > ( { { 2 . 0 , 2 . 0 } , { 2 . 0 , 2 . 0 } } ) ) ) ; <nl> + <nl> + auto add = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( data_shape , HloOpcode : : kAdd , reverse , two ) ) ; <nl> + <nl> + BuildModule ( builder . Build ( ) ) ; <nl> + auto fusion = computation_ - > CreateFusionInstruction ( <nl> + { add , two , reverse } , HloInstruction : : FusionKind : : kOutput ) ; <nl> + RunAnalysis ( ) ; <nl> + <nl> + / / Output fused operand - > reverse - > add cannot alias operand buffer ' operand ' . <nl> + EXPECT_FALSE ( points_to_analysis_ - > CanShareOperandBufferWithUser ( operand , { } , <nl> + fusion , { } ) ) ; <nl> + } <nl> + <nl> + TEST_F ( CanShareOperandBufferWithUserTest , WhileCanShare ) { <nl> + Shape data_shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> + <nl> + auto make_cond = [ this , & data_shape ] ( ) { <nl> + auto builder = HloComputation : : Builder ( TestName ( ) + " . Cond " ) ; <nl> + auto data = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , data_shape , " data " ) ) ; <nl> + builder . AddInstruction ( HloInstruction : : CreateBinary ( <nl> + ShapeUtil : : MakeShape ( PRED , { } ) , HloOpcode : : kEq , data , data ) ) ; <nl> + return builder . Build ( ) ; <nl> + } ; <nl> + <nl> + auto make_body = [ this , & data_shape ] ( ) { <nl> + auto builder = HloComputation : : Builder ( TestName ( ) + " . Body " ) ; <nl> + auto data = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , data_shape , " data " ) ) ; <nl> + builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( data_shape , HloOpcode : : kAdd , data , data ) ) ; <nl> + return builder . Build ( ) ; <nl> + } ; <nl> + <nl> + module_ = CreateNewModule ( ) ; <nl> + HloComputation * cond_computation = <nl> + module_ - > AddEmbeddedComputation ( make_cond ( ) ) ; <nl> + HloComputation * body_computation = <nl> + module_ - > AddEmbeddedComputation ( make_body ( ) ) ; <nl> + <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + auto data = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , data_shape , " data " ) ) ; <nl> + auto whil = builder . AddInstruction ( HloInstruction : : CreateWhile ( <nl> + data_shape , cond_computation , body_computation , data ) ) ; <nl> + computation_ = module_ - > AddEntryComputation ( builder . Build ( ) ) ; <nl> + <nl> + RunAnalysis ( ) ; <nl> + <nl> + / / The While instruction can share with the data operand . <nl> + EXPECT_TRUE ( <nl> + points_to_analysis_ - > CanShareOperandBufferWithUser ( data , { } , whil , { } ) ) ; <nl> + } <nl> + <nl> + / / Tests that Call can alias operand buffer if the only use of the operand <nl> + / / in the called computation is an elementwise instruction . <nl> + TEST_F ( CanShareOperandBufferWithUserTest , CallToComputationWithFusionRoot ) { <nl> + Shape shape = ShapeUtil : : MakeShape ( F32 , { 8 } ) ; <nl> + / / Build sub - computation with fusion root . <nl> + auto sub_builder = HloComputation : : Builder ( TestName ( ) + " _sub " ) ; <nl> + auto sub_param = sub_builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , shape , " sub_param " ) ) ; <nl> + auto one = sub_builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( Literal : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> + auto ones = sub_builder . AddInstruction ( <nl> + HloInstruction : : CreateBroadcast ( shape , one , { 1 } ) ) ; <nl> + auto add = sub_builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( shape , HloOpcode : : kAdd , sub_param , ones ) ) ; <nl> + <nl> + module_ = CreateNewModule ( ) ; <nl> + auto sub_computation = module_ - > AddEmbeddedComputation ( sub_builder . Build ( ) ) ; <nl> + sub_computation - > CreateFusionInstruction ( { add , ones } , <nl> + HloInstruction : : FusionKind : : kLoop ) ; <nl> + <nl> + / / Build entry - computation with kCall which calls ' sub_computation ' . <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + <nl> + auto param = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , shape , " param " ) ) ; <nl> + auto reverse = <nl> + builder . AddInstruction ( HloInstruction : : CreateReverse ( shape , param , { 0 } ) ) ; <nl> + auto call = builder . AddInstruction ( <nl> + HloInstruction : : CreateCall ( shape , { reverse } , sub_computation ) ) ; <nl> + computation_ = module_ - > AddEntryComputation ( builder . Build ( ) ) ; <nl> + <nl> + RunAnalysis ( ) ; <nl> + <nl> + EXPECT_TRUE ( points_to_analysis_ - > CanShareOperandBufferWithUser ( reverse , { } , <nl> + call , { } ) ) ; <nl> + } <nl> + <nl> } / / namespace <nl> } / / namespace xla <nl> mmm a / tensorflow / compiler / xla / tests / array_elementwise_ops_test . cc <nl> ppp b / tensorflow / compiler / xla / tests / array_elementwise_ops_test . cc <nl> XLA_TEST_F ( ArrayElementwiseOpTest , ClzU32s ) { <nl> ComputeAndCompareR1 < uint32 > ( & builder , { 32 , 31 , 27 , 15 , 9 , 3 , 0 } , { } ) ; <nl> } <nl> <nl> + XLA_TEST_F ( ArrayElementwiseOpTest , ClzS64s ) { <nl> + XlaBuilder builder ( TestName ( ) ) ; <nl> + auto a = <nl> + builder . ConstantR1 < int64 > ( { 0 , 1 , 0x80000000 , 0x7FFFFFFFF2345678ul , - 1 } ) ; <nl> + builder . Clz ( a ) ; <nl> + <nl> + ComputeAndCompareR1 < int64 > ( & builder , { 64 , 63 , 32 , 1 , 0 } , { } ) ; <nl> + } <nl> + <nl> XLA_TEST_F ( ArrayElementwiseOpTest , AddChainFoldLeft ) { <nl> / / a mmmmmm ( add ) mmmmmmmmm ( add ) <nl> / / / / <nl> mmm a / tensorflow / compiler / xla / tests / bfloat16_test . cc <nl> ppp b / tensorflow / compiler / xla / tests / bfloat16_test . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / tests / test_macros . h " <nl> # include " tensorflow / compiler / xla / tests / test_utils . h " <nl> # include " tensorflow / compiler / xla / util . h " <nl> - # include " tensorflow / compiler / xla / xla_data . pb . h " <nl> # include " tensorflow / core / platform / logging . h " <nl> # include " tensorflow / core / platform / test . h " <nl> # include " tensorflow / core / platform / types . h " <nl> mmm a / tensorflow / compiler / xla / tests / constants_test . cc <nl> ppp b / tensorflow / compiler / xla / tests / constants_test . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / literal_util . h " <nl> # include " tensorflow / compiler / xla / tests / client_library_test_base . h " <nl> # include " tensorflow / compiler / xla / tests / literal_test_util . h " <nl> - # include " tensorflow / compiler / xla / xla_data . pb . h " <nl> # include " tensorflow / core / platform / test . h " <nl> # include " tensorflow / core / platform / types . h " <nl> <nl> mmm a / tensorflow / compiler / xla / tests / multioutput_fusion_test . cc <nl> ppp b / tensorflow / compiler / xla / tests / multioutput_fusion_test . cc <nl> XLA_TEST_F ( MultiOutputFusionTest , FusionNodeIsRoot ) { <nl> * result , * Literal : : MakeTupleOwned ( Literal : : CreateR0 < int32 > ( 42 ) ) ) ) ; <nl> } <nl> <nl> + XLA_TEST_F ( MultiOutputFusionTest , MultiOutputLoopFusion ) { <nl> + const char * testcase = R " ( <nl> + HloModule m <nl> + <nl> + fused_computation { <nl> + p = f32 [ ] parameter ( 0 ) <nl> + multiply = f32 [ ] multiply ( p , p ) <nl> + less - than = pred [ ] less - than ( p , multiply ) <nl> + ROOT tuple = ( pred [ ] , f32 [ ] ) tuple ( less - than , multiply ) <nl> + } <nl> + <nl> + ENTRY PredFloatMOF { <nl> + p0 = f32 [ ] parameter ( 0 ) <nl> + fusion = ( pred [ ] , f32 [ ] ) fusion ( p0 ) , kind = kLoop , calls = fused_computation <nl> + gte0 = pred [ ] get - tuple - element ( fusion ) , index = 0 <nl> + gte1 = f32 [ ] get - tuple - element ( fusion ) , index = 1 <nl> + const = f32 [ ] constant ( 0 ) <nl> + ROOT select = f32 [ ] select ( gte0 , gte1 , const ) <nl> + } ) " ; <nl> + auto module = <nl> + HloRunner : : CreateModuleFromString ( testcase , GetDebugOptionsForTest ( ) ) <nl> + . ValueOrDie ( ) ; <nl> + auto param = Literal : : CreateR0 < float > ( 2 . 0 ) ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( auto result , <nl> + Execute ( std : : move ( module ) , { param . get ( ) } ) ) ; <nl> + EXPECT_TRUE ( LiteralTestUtil : : Equal ( * result , * Literal : : CreateR0 < float > ( 4 . 0 ) ) ) ; <nl> + } <nl> + <nl> + XLA_TEST_F ( MultiOutputFusionTest , MultiOutputLoopFeedingMap ) { <nl> + const char * testcase = R " ( <nl> + HloModule m <nl> + <nl> + fused_computation { <nl> + p = f32 [ ] parameter ( 0 ) <nl> + multiply = f32 [ ] multiply ( p , p ) <nl> + less - than = pred [ ] less - than ( p , multiply ) <nl> + ROOT tuple = ( pred [ ] , f32 [ ] ) tuple ( less - than , multiply ) <nl> + } <nl> + <nl> + map_computation { <nl> + p0 = f32 [ ] parameter ( 0 ) <nl> + fusion = ( pred [ ] , f32 [ ] ) fusion ( p0 ) , kind = kLoop , calls = fused_computation <nl> + gte0 = pred [ ] get - tuple - element ( fusion ) , index = 0 <nl> + gte1 = f32 [ ] get - tuple - element ( fusion ) , index = 1 <nl> + const = f32 [ ] constant ( 0 ) <nl> + ROOT select = f32 [ ] select ( gte0 , gte1 , const ) <nl> + } <nl> + <nl> + ENTRY MapMOF { <nl> + p1 = f32 [ 3 ] parameter ( 0 ) <nl> + ROOT map = f32 [ 3 ] map ( p1 ) , to_apply = map_computation <nl> + } ) " ; <nl> + auto module = <nl> + HloRunner : : CreateModuleFromString ( testcase , GetDebugOptionsForTest ( ) ) <nl> + . ValueOrDie ( ) ; <nl> + auto param = Literal : : CreateR1 < float > ( { 1 . 0 , 2 . 0 , 3 . 0 } ) ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( auto result , <nl> + Execute ( std : : move ( module ) , { param . get ( ) } ) ) ; <nl> + EXPECT_TRUE ( LiteralTestUtil : : Equal ( <nl> + * result , * Literal : : CreateR1 < float > ( { 0 . 0 , 4 . 0 , 9 . 0 } ) ) ) ; <nl> + } <nl> + <nl> } / / namespace <nl> } / / namespace xla <nl> mmm a / tensorflow / compiler / xla / tests / reduce_window_test . cc <nl> ppp b / tensorflow / compiler / xla / tests / reduce_window_test . cc <nl> INSTANTIATE_TEST_CASE_P ( <nl> class ReduceWindowTextTest : public HloTestBase { } ; <nl> <nl> TEST_F ( ReduceWindowTextTest , R2General256x384 ) { <nl> - const string & hlo_string = R " ( <nl> + const string hlo_string = R " ( <nl> HloModule R2Window <nl> mul { <nl> lhs = f32 [ ] parameter ( 0 ) <nl> ENTRY R2Window { <nl> } <nl> <nl> TEST_F ( ReduceWindowTextTest , R2General256x384Layout01 ) { <nl> - const string & hlo_string = R " ( <nl> + const string hlo_string = R " ( <nl> HloModule R2Window <nl> mul { <nl> lhs = f32 [ ] parameter ( 0 ) <nl> ROOT reduce - window = f32 [ 256 , 384 ] { 0 , 1 } reduce - window ( operand , constant ) , window = <nl> } <nl> <nl> TEST_F ( ReduceWindowTextTest , R2General2x5 ) { <nl> - const string & hlo_string = R " ( <nl> + const string hlo_string = R " ( <nl> HloModule R2Window <nl> mul { <nl> lhs = f32 [ ] parameter ( 0 ) <nl> ENTRY R2Window { <nl> } <nl> <nl> TEST_F ( ReduceWindowTextTest , R2EffectiveScalar ) { <nl> - const string & hlo_string = R " ( <nl> + const string hlo_string = R " ( <nl> HloModule R2Window <nl> mul { <nl> lhs = f32 [ ] parameter ( 0 ) <nl> ENTRY R2Window { <nl> } <nl> <nl> TEST_F ( ReduceWindowTextTest , R3EffectiveScalar ) { <nl> - const string & hlo_string = R " ( <nl> + const string hlo_string = R " ( <nl> HloModule R3Window <nl> mul { <nl> lhs = f32 [ ] parameter ( 0 ) <nl> ENTRY R3Window { <nl> } <nl> <nl> TEST_F ( HloTestBase , ReduceWindowIdentity ) { <nl> - const string & hlo_string = R " ( <nl> + const string hlo_string = R " ( <nl> HloModule ReduceWindowIdentity <nl> identity . pad_to_reduce_window { <nl> param0 = f32 [ ] parameter ( 0 ) <nl> identity . pad_to_reduce_window { <nl> ENTRY reduce - window - identity { <nl> operand = f32 [ 1 , 32 , 64 ] { 2 , 1 , 0 } parameter ( 0 ) <nl> constant . 4466 = f32 [ ] constant ( 0 ) <nl> - ROOT reduce - window = f32 [ 1 , 33 , 64 ] { 2 , 1 , 0 } reduce - window ( operand , constant . 4466 ) , window = { size = 1x1x1 pad = 0_0x1_0x0_0 } , to_apply = identity . pad_to_reduce_window <nl> + ROOT reduce - window = f32 [ 1 , 33 , 64 ] { 2 , 1 , 0 } reduce - window ( operand , constant . 4466 ) , window = { size = 1x1x1 pad = 0_0x1_0x0_0 } , to_apply = identity . pad_to_reduce_window <nl> + } <nl> + <nl> + ) " ; <nl> + EXPECT_TRUE ( RunAndCompare ( hlo_string , tensorflow : : gtl : : nullopt ) ) ; <nl> + } <nl> + <nl> + TEST_F ( HloTestBase , ReduceWindowS32 ) { <nl> + const string hlo_string = R " ( <nl> + HloModule reduce - window <nl> + <nl> + % identity . pad_to_reduce_window ( param0 : s32 [ ] , param1 : s32 [ ] ) - > s32 [ ] { <nl> + % param0 = s32 [ ] parameter ( 0 ) <nl> + ROOT % param1 = s32 [ ] parameter ( 1 ) <nl> + } <nl> + <nl> + ENTRY % reduce - window ( parameter . 0 : s32 [ 81 , 8 ] , parameter . 1 : s32 [ ] ) - > s32 [ 82 , 8 ] { <nl> + % parameter . 0 = s32 [ 81 , 8 ] { 1 , 0 } parameter ( 0 ) <nl> + % parameter . 1 = s32 [ ] parameter ( 1 ) <nl> + ROOT % reduce - window = s32 [ 82 , 8 ] { 1 , 0 } reduce - window ( s32 [ 81 , 8 ] { 1 , 0 } % parameter . 0 , s32 [ ] % parameter . 1 ) , window = { size = 1x1 pad = 0_1x0_0 } , to_apply = % identity . pad_to_reduce_window <nl> } <nl> <nl> ) " ; <nl> mmm a / tensorflow / compiler / xla / tests / reshape_motion_test . cc <nl> ppp b / tensorflow / compiler / xla / tests / reshape_motion_test . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / tests / client_library_test_base . h " <nl> # include " tensorflow / compiler / xla / tests / literal_test_util . h " <nl> # include " tensorflow / compiler / xla / tests / test_macros . h " <nl> - # include " tensorflow / compiler / xla / xla_data . pb . h " <nl> # include " tensorflow / core / lib / gtl / array_slice . h " <nl> # include " tensorflow / core / platform / test . h " <nl> # include " tensorflow / core / platform / types . h " <nl> mmm a / tensorflow / compiler / xla / tests / select_test . cc <nl> ppp b / tensorflow / compiler / xla / tests / select_test . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / tests / client_library_test_base . h " <nl> # include " tensorflow / compiler / xla / tests / literal_test_util . h " <nl> # include " tensorflow / compiler / xla / tests / test_macros . h " <nl> - # include " tensorflow / compiler / xla / xla_data . pb . h " <nl> # include " tensorflow / core / platform / test . h " <nl> # include " tensorflow / core / platform / types . h " <nl> <nl> mmm a / tensorflow / compiler / xla / tests / transpose_test . cc <nl> ppp b / tensorflow / compiler / xla / tests / transpose_test . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / tests / hlo_test_base . h " <nl> # include " tensorflow / compiler / xla / tests / literal_test_util . h " <nl> # include " tensorflow / compiler / xla / tests / test_macros . h " <nl> - # include " tensorflow / compiler / xla / xla_data . pb . h " <nl> # include " tensorflow / core / platform / test . h " <nl> <nl> namespace xla { <nl> mmm a / tensorflow / compiler / xla / tests / unary_op_test . cc <nl> ppp b / tensorflow / compiler / xla / tests / unary_op_test . cc <nl> int UnaryOpTest : : inf < int > ( ) { <nl> return 2147483647 ; <nl> } <nl> <nl> + template < > <nl> + int64 UnaryOpTest : : inf < int64 > ( ) { <nl> + return 0x7FFFFFFFFFFFFFFFl ; <nl> + } <nl> + <nl> template < > <nl> void UnaryOpTest : : AbsTestHelper < complex64 > ( ) { <nl> XlaBuilder builder ( TestName ( ) ) ; <nl> XLA_TEST_F ( UnaryOpTest , SignTestR0 ) { <nl> <nl> XLA_TEST_F ( UnaryOpTest , SignTestR1 ) { <nl> SignTestHelper < int > ( ) ; <nl> + SignTestHelper < int64 > ( ) ; <nl> SignTestHelper < float > ( ) ; <nl> SignTestHelper < complex64 > ( ) ; <nl> } <nl> mmm a / tensorflow / contrib / android / BUILD <nl> ppp b / tensorflow / contrib / android / BUILD <nl> cc_binary ( <nl> " - s " , <nl> " - Wl , - - gc - sections " , <nl> " - Wl , - - version - script " , # This line must be directly followed by LINKER_SCRIPT . <nl> - LINKER_SCRIPT , <nl> + " $ ( location { } ) " . format ( LINKER_SCRIPT ) , <nl> ] ) , <nl> linkshared = 1 , <nl> linkstatic = 1 , <nl> mmm a / tensorflow / contrib / checkpoint / __init__ . py <nl> ppp b / tensorflow / contrib / checkpoint / __init__ . py <nl> <nl> from tensorflow . contrib . checkpoint . python . split_dependency import split_dependency <nl> from tensorflow . contrib . checkpoint . python . visualize import dot_graph_from_checkpoint <nl> from tensorflow . core . protobuf . checkpointable_object_graph_pb2 import CheckpointableObjectGraph <nl> - from tensorflow . python . training . checkpointable import Checkpointable <nl> - from tensorflow . python . training . checkpointable import NoDependency <nl> - from tensorflow . python . training . checkpointable_utils import object_metadata <nl> + from tensorflow . python . training . checkpointable . base import Checkpointable <nl> + from tensorflow . python . training . checkpointable . base import NoDependency <nl> + from tensorflow . python . training . checkpointable . util import object_metadata <nl> <nl> from tensorflow . python . util . all_util import remove_undocumented <nl> <nl> mmm a / tensorflow / contrib / checkpoint / python / BUILD <nl> ppp b / tensorflow / contrib / checkpoint / python / BUILD <nl> py_library ( <nl> srcs = [ " containers . py " ] , <nl> srcs_version = " PY2AND3 " , <nl> visibility = [ " / / tensorflow : internal " ] , <nl> - deps = [ " / / tensorflow / python : checkpointable " ] , <nl> + deps = [ " / / tensorflow / python / training / checkpointable : base " ] , <nl> ) <nl> <nl> py_test ( <nl> py_test ( <nl> srcs = [ " containers_test . py " ] , <nl> deps = [ <nl> " : containers " , <nl> - " / / tensorflow / python : checkpointable " , <nl> " / / tensorflow / python : client_testlib " , <nl> " / / tensorflow / python : framework_test_lib " , <nl> " / / tensorflow / python : resource_variable_ops " , <nl> " / / tensorflow / python : training " , <nl> + " / / tensorflow / python / training / checkpointable : base " , <nl> " @ six_archive / / : six " , <nl> ] , <nl> ) <nl> mmm a / tensorflow / contrib / checkpoint / python / containers . py <nl> ppp b / tensorflow / contrib / checkpoint / python / containers . py <nl> <nl> from __future__ import division <nl> from __future__ import print_function <nl> <nl> - from tensorflow . python . training import checkpointable as checkpointable_lib <nl> + from tensorflow . python . training . checkpointable import base as checkpointable_lib <nl> <nl> <nl> class UniqueNameTracker ( checkpointable_lib . CheckpointableBase ) : <nl> mmm a / tensorflow / contrib / checkpoint / python / containers_test . py <nl> ppp b / tensorflow / contrib / checkpoint / python / containers_test . py <nl> <nl> from tensorflow . python . framework import test_util <nl> from tensorflow . python . ops import resource_variable_ops <nl> from tensorflow . python . platform import test <nl> - from tensorflow . python . training import checkpointable <nl> - from tensorflow . python . training import checkpointable_utils <nl> - from tensorflow . python . training . checkpointable_utils import object_metadata <nl> + from tensorflow . python . training . checkpointable import base as checkpointable <nl> + from tensorflow . python . training . checkpointable import util as checkpointable_utils <nl> <nl> <nl> class UniqueNameTrackerTests ( test . TestCase ) : <nl> def __init__ ( self ) : <nl> checkpoint_directory = self . get_temp_dir ( ) <nl> checkpoint_prefix = os . path . join ( checkpoint_directory , " ckpt " ) <nl> save_path = checkpoint . save ( checkpoint_prefix ) <nl> - metadata = object_metadata ( save_path ) <nl> + metadata = checkpointable_utils . object_metadata ( save_path ) <nl> dependency_names = [ ] <nl> for node in metadata . nodes : <nl> for child in node . children : <nl> mmm a / tensorflow / contrib / checkpoint / python / split_dependency . py <nl> ppp b / tensorflow / contrib / checkpoint / python / split_dependency . py <nl> <nl> import functools <nl> <nl> from tensorflow . python . ops import control_flow_ops <nl> - from tensorflow . python . training import checkpointable as checkpointable <nl> from tensorflow . python . training import saver as saver_lib <nl> + from tensorflow . python . training . checkpointable import base as checkpointable <nl> <nl> <nl> class _CallbackSaveable ( saver_lib . BaseSaverBuilder . SaveableObject ) : <nl> mmm a / tensorflow / contrib / checkpoint / python / split_dependency_test . py <nl> ppp b / tensorflow / contrib / checkpoint / python / split_dependency_test . py <nl> <nl> from tensorflow . python . framework import test_util <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import resource_variable_ops <nl> - from tensorflow . python . training import checkpointable <nl> - from tensorflow . python . training import checkpointable_utils <nl> + from tensorflow . python . training . checkpointable import base as checkpointable <nl> + from tensorflow . python . training . checkpointable import util as checkpointable_utils <nl> <nl> <nl> def _split_variable_closure ( variable ) : <nl> mmm a / tensorflow / contrib / checkpoint / python / visualize . py <nl> ppp b / tensorflow / contrib / checkpoint / python / visualize . py <nl> <nl> from __future__ import print_function <nl> <nl> from tensorflow . python import pywrap_tensorflow <nl> - from tensorflow . python . training import checkpointable <nl> - from tensorflow . python . training import checkpointable_utils <nl> + from tensorflow . python . training . checkpointable import base as checkpointable <nl> + from tensorflow . python . training . checkpointable import util as checkpointable_utils <nl> <nl> <nl> def dot_graph_from_checkpoint ( save_path ) : <nl> mmm a / tensorflow / contrib / checkpoint / python / visualize_test . py <nl> ppp b / tensorflow / contrib / checkpoint / python / visualize_test . py <nl> <nl> from tensorflow . python . keras . _impl . keras . layers import core <nl> from tensorflow . python . ops import resource_variable_ops <nl> from tensorflow . python . training import adam <nl> - from tensorflow . python . training import checkpointable_utils <nl> + from tensorflow . python . training . checkpointable import util as checkpointable_utils <nl> <nl> try : <nl> import pydot # pylint : disable = g - import - not - at - top <nl> mmm a / tensorflow / contrib / cmake / python_modules . txt <nl> ppp b / tensorflow / contrib / cmake / python_modules . txt <nl> tensorflow / python / summary <nl> tensorflow / python / summary / writer <nl> tensorflow / python / tools <nl> tensorflow / python / training <nl> + tensorflow / python / training / checkpointable <nl> tensorflow / python / user_ops <nl> tensorflow / python / util <nl> tensorflow / python / util / protobuf <nl> mmm a / tensorflow / contrib / coder / kernels / range_coder_ops_test . cc <nl> ppp b / tensorflow / contrib / coder / kernels / range_coder_ops_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / common_runtime / kernel_benchmark_testlib . h " <nl> # include " tensorflow / core / common_runtime / shape_refiner . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op . h " <nl> mmm a / tensorflow / contrib / compiler / jit_test . py <nl> ppp b / tensorflow / contrib / compiler / jit_test . py <nl> <nl> from tensorflow . python . framework import op_def_registry <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . framework import random_seed <nl> - from tensorflow . python . framework import test_util <nl> from tensorflow . python . ops import gradients <nl> from tensorflow . python . ops import init_ops <nl> from tensorflow . python . ops import math_ops <nl> def mulop ( x1 , x2 ) : <nl> self . assertEqual ( b " jit_scope_0 " , func_attrs [ " _XlaScope " ] . s ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class CompilationEnabledInGradientTest ( test . TestCase ) : <nl> <nl> def testCompilationInGradient ( self ) : <nl> mmm a / tensorflow / contrib / cudnn_rnn / python / kernel_tests / cudnn_rnn_test . py <nl> ppp b / tensorflow / contrib / cudnn_rnn / python / kernel_tests / cudnn_rnn_test . py <nl> <nl> from tensorflow . python . platform import tf_logging as logging <nl> from tensorflow . python . training import adagrad <nl> from tensorflow . python . training import adam <nl> - from tensorflow . python . training import checkpointable_utils <nl> from tensorflow . python . training import gradient_descent <nl> from tensorflow . python . training import momentum <nl> from tensorflow . python . training import rmsprop <nl> from tensorflow . python . training import saver as saver_lib <nl> + from tensorflow . python . training . checkpointable import util as checkpointable_utils <nl> <nl> <nl> CUDNN_LSTM = cudnn_rnn_ops . CUDNN_LSTM <nl> mmm a / tensorflow / contrib / cudnn_rnn / python / ops / cudnn_rnn_ops . py <nl> ppp b / tensorflow / contrib / cudnn_rnn / python / ops / cudnn_rnn_ops . py <nl> <nl> from tensorflow . python . ops import rnn_cell_impl <nl> from tensorflow . python . ops import state_ops <nl> from tensorflow . python . ops import variable_scope as vs <nl> - from tensorflow . python . training import checkpointable as checkpointable_lib <nl> from tensorflow . python . training import saver <nl> + from tensorflow . python . training . checkpointable import base as checkpointable_lib <nl> <nl> CUDNN_RNN_UNIDIRECTION = " unidirectional " <nl> CUDNN_RNN_BIDIRECTION = " bidirectional " <nl> mmm a / tensorflow / contrib / data / python / kernel_tests / BUILD <nl> ppp b / tensorflow / contrib / data / python / kernel_tests / BUILD <nl> py_test ( <nl> srcs = [ " batch_dataset_op_test . py " ] , <nl> srcs_version = " PY2AND3 " , <nl> tags = [ <nl> - " no_oss " , <nl> " no_pip " , <nl> ] , <nl> deps = [ <nl> py_test ( <nl> name = " reader_dataset_ops_test " , <nl> size = " medium " , <nl> srcs = [ " reader_dataset_ops_test . py " ] , <nl> + shard_count = 4 , <nl> srcs_version = " PY2AND3 " , <nl> tags = [ " no_pip " ] , <nl> deps = [ <nl> py_test ( <nl> " / / tensorflow / python : framework_ops " , <nl> " / / tensorflow / python : lib " , <nl> " / / tensorflow / python : parsing_ops " , <nl> + " / / tensorflow / python : string_ops " , <nl> " / / tensorflow / python : util " , <nl> " / / tensorflow / python / data / ops : iterator_ops " , <nl> " / / third_party / py / numpy " , <nl> mmm a / tensorflow / contrib / data / python / kernel_tests / reader_dataset_ops_test . py <nl> ppp b / tensorflow / contrib / data / python / kernel_tests / reader_dataset_ops_test . py <nl> <nl> from tensorflow . python . lib . io import python_io <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import parsing_ops <nl> + from tensorflow . python . ops import string_ops <nl> from tensorflow . python . platform import test <nl> from tensorflow . python . util import compat <nl> <nl> def testTFRecordWithCompressionCore ( self ) : <nl> lambda : self . _build_iterator_graph ( num_epochs * 2 ) , num_outputs ) <nl> <nl> <nl> + def _interleave ( iterators , cycle_length ) : <nl> + pending_iterators = iterators <nl> + open_iterators = [ ] <nl> + num_open = 0 <nl> + for i in range ( cycle_length ) : <nl> + if pending_iterators : <nl> + open_iterators . append ( pending_iterators . pop ( 0 ) ) <nl> + num_open + = 1 <nl> + <nl> + while num_open : <nl> + for i in range ( min ( cycle_length , len ( open_iterators ) ) ) : <nl> + if open_iterators [ i ] is None : <nl> + continue <nl> + try : <nl> + yield next ( open_iterators [ i ] ) <nl> + except StopIteration : <nl> + if pending_iterators : <nl> + open_iterators [ i ] = pending_iterators . pop ( 0 ) <nl> + else : <nl> + open_iterators [ i ] = None <nl> + num_open - = 1 <nl> + <nl> + <nl> class ReadBatchFeaturesTest ( test . TestCase ) : <nl> <nl> def setUp ( self ) : <nl> def _next_record ( file_indices ) : <nl> yield j , i <nl> <nl> def _next_record_interleaved ( file_indices , cycle_length ) : <nl> - return self . _interleave ( [ _next_record ( [ i ] ) for i in file_indices ] , <nl> - cycle_length ) <nl> + return _interleave ( [ _next_record ( [ i ] ) for i in file_indices ] , <nl> + cycle_length ) <nl> <nl> file_batch = [ ] <nl> keywords_batch_indices = [ ] <nl> def _next_record_interleaved ( file_indices , cycle_length ) : <nl> [ len ( file_batch ) , keywords_batch_max_len ] , record_batch <nl> ] <nl> <nl> - def _interleave ( self , iterators , cycle_length ) : <nl> - pending_iterators = iterators <nl> - open_iterators = [ ] <nl> - num_open = 0 <nl> - for i in range ( cycle_length ) : <nl> - if pending_iterators : <nl> - open_iterators . append ( pending_iterators . pop ( 0 ) ) <nl> - num_open + = 1 <nl> - <nl> - while num_open : <nl> - for i in range ( min ( cycle_length , len ( open_iterators ) ) ) : <nl> - if open_iterators [ i ] is None : <nl> - continue <nl> - try : <nl> - yield next ( open_iterators [ i ] ) <nl> - except StopIteration : <nl> - if pending_iterators : <nl> - open_iterators [ i ] = pending_iterators . pop ( 0 ) <nl> - else : <nl> - open_iterators [ i ] = None <nl> - num_open - = 1 <nl> - <nl> def _verify_records ( self , <nl> sess , <nl> batch_size , <nl> def testMakeCSVDataset_withShuffle ( self ) : <nl> self . assertFalse ( all_equal ) <nl> <nl> <nl> + class MakeTFRecordDatasetTest ( TFRecordDatasetTestBase ) : <nl> + <nl> + def _next_expected_batch ( self , <nl> + file_indices , <nl> + batch_size , <nl> + num_epochs , <nl> + cycle_length , <nl> + drop_final_batch , <nl> + use_parser_fn ) : <nl> + <nl> + def _next_record ( file_indices ) : <nl> + for j in file_indices : <nl> + for i in range ( self . _num_records ) : <nl> + yield j , i <nl> + <nl> + def _next_record_interleaved ( file_indices , cycle_length ) : <nl> + return _interleave ( [ _next_record ( [ i ] ) for i in file_indices ] , <nl> + cycle_length ) <nl> + <nl> + record_batch = [ ] <nl> + batch_index = 0 <nl> + for _ in range ( num_epochs ) : <nl> + if cycle_length = = 1 : <nl> + next_records = _next_record ( file_indices ) <nl> + else : <nl> + next_records = _next_record_interleaved ( file_indices , cycle_length ) <nl> + for f , r in next_records : <nl> + record = self . _record ( f , r ) <nl> + if use_parser_fn : <nl> + record = record [ 1 : ] <nl> + record_batch . append ( record ) <nl> + batch_index + = 1 <nl> + if len ( record_batch ) = = batch_size : <nl> + yield record_batch <nl> + record_batch = [ ] <nl> + batch_index = 0 <nl> + if record_batch and not drop_final_batch : <nl> + yield record_batch <nl> + <nl> + def _verify_records ( self , <nl> + sess , <nl> + outputs , <nl> + batch_size , <nl> + file_index , <nl> + num_epochs , <nl> + interleave_cycle_length , <nl> + drop_final_batch , <nl> + use_parser_fn ) : <nl> + if file_index is not None : <nl> + file_indices = [ file_index ] <nl> + else : <nl> + file_indices = range ( self . _num_files ) <nl> + <nl> + for expected_batch in self . _next_expected_batch ( <nl> + file_indices , batch_size , num_epochs , interleave_cycle_length , <nl> + drop_final_batch , use_parser_fn ) : <nl> + actual_batch = sess . run ( outputs ) <nl> + self . assertAllEqual ( expected_batch , actual_batch ) <nl> + <nl> + def _read_test ( self , batch_size , num_epochs , file_index = None , <nl> + num_parallel_reads = 1 , drop_final_batch = False , parser_fn = False ) : <nl> + if file_index is None : <nl> + file_pattern = self . test_filenames <nl> + else : <nl> + file_pattern = self . test_filenames [ file_index ] <nl> + <nl> + if parser_fn : <nl> + fn = lambda x : string_ops . substr ( x , 1 , 999 ) <nl> + else : <nl> + fn = None <nl> + <nl> + with ops . Graph ( ) . as_default ( ) as g : <nl> + with self . test_session ( graph = g ) as sess : <nl> + outputs = readers . make_tf_record_dataset ( <nl> + file_pattern = file_pattern , <nl> + num_epochs = num_epochs , <nl> + batch_size = batch_size , <nl> + parser_fn = fn , <nl> + num_parallel_reads = num_parallel_reads , <nl> + drop_final_batch = drop_final_batch , <nl> + shuffle = False ) . make_one_shot_iterator ( ) . get_next ( ) <nl> + self . _verify_records ( <nl> + sess , outputs , batch_size , file_index , num_epochs = num_epochs , <nl> + interleave_cycle_length = num_parallel_reads , <nl> + drop_final_batch = drop_final_batch , use_parser_fn = parser_fn ) <nl> + with self . assertRaises ( errors . OutOfRangeError ) : <nl> + sess . run ( outputs ) <nl> + <nl> + def testRead ( self ) : <nl> + for batch_size in [ 1 , 2 ] : <nl> + for num_epochs in [ 1 , 3 ] : <nl> + # Basic test : read from file 0 . <nl> + self . _read_test ( batch_size , num_epochs , 0 ) <nl> + <nl> + # Basic test : read from file 1 . <nl> + self . _read_test ( batch_size , num_epochs , 1 ) <nl> + <nl> + # Basic test : read from both files . <nl> + self . _read_test ( batch_size , num_epochs ) <nl> + <nl> + # Basic test : read from both files , with parallel reads . <nl> + self . _read_test ( batch_size , num_epochs , num_parallel_reads = 8 ) <nl> + <nl> + def testDropFinalBatch ( self ) : <nl> + for batch_size in [ 1 , 2 , 10 ] : <nl> + for num_epochs in [ 1 , 3 ] : <nl> + # Read from file 0 . <nl> + self . _read_test ( batch_size , num_epochs , 0 , drop_final_batch = True ) <nl> + <nl> + # Read from both files . <nl> + self . _read_test ( batch_size , num_epochs , drop_final_batch = True ) <nl> + <nl> + # Read from both files , with parallel reads . <nl> + self . _read_test ( batch_size , num_epochs , num_parallel_reads = 8 , <nl> + drop_final_batch = True ) <nl> + <nl> + def testParserFn ( self ) : <nl> + for batch_size in [ 1 , 2 ] : <nl> + for num_epochs in [ 1 , 3 ] : <nl> + for drop_final_batch in [ False , True ] : <nl> + self . _read_test ( batch_size , num_epochs , parser_fn = True , <nl> + drop_final_batch = drop_final_batch ) <nl> + self . _read_test ( batch_size , num_epochs , num_parallel_reads = 8 , <nl> + parser_fn = True , drop_final_batch = drop_final_batch ) <nl> + <nl> + def _shuffle_test ( self , batch_size , num_epochs , num_parallel_reads = 1 , <nl> + seed = None ) : <nl> + with ops . Graph ( ) . as_default ( ) as g : <nl> + with self . test_session ( graph = g ) as sess : <nl> + dataset = readers . make_tf_record_dataset ( <nl> + file_pattern = self . test_filenames , <nl> + num_epochs = num_epochs , <nl> + batch_size = batch_size , <nl> + num_parallel_reads = num_parallel_reads , <nl> + shuffle = True , <nl> + shuffle_seed = seed ) <nl> + iterator = dataset . make_initializable_iterator ( ) <nl> + next_element = iterator . get_next ( ) <nl> + <nl> + sess . run ( iterator . initializer ) <nl> + first_batches = [ ] <nl> + try : <nl> + while True : <nl> + first_batches . append ( sess . run ( next_element ) ) <nl> + except errors . OutOfRangeError : <nl> + pass <nl> + <nl> + sess . run ( iterator . initializer ) <nl> + second_batches = [ ] <nl> + try : <nl> + while True : <nl> + second_batches . append ( sess . run ( next_element ) ) <nl> + except errors . OutOfRangeError : <nl> + pass <nl> + <nl> + self . assertEqual ( len ( first_batches ) , len ( second_batches ) ) <nl> + if seed is not None : <nl> + # if you set a seed , should get the same results <nl> + for i in range ( len ( first_batches ) ) : <nl> + self . assertAllEqual ( first_batches [ i ] , second_batches [ i ] ) <nl> + <nl> + expected = [ ] <nl> + for f in range ( self . _num_files ) : <nl> + for r in range ( self . _num_records ) : <nl> + expected . extend ( [ self . _record ( f , r ) ] * num_epochs ) <nl> + <nl> + for batches in ( first_batches , second_batches ) : <nl> + actual = [ ] <nl> + for b in batches : <nl> + actual . extend ( b ) <nl> + self . assertAllEqual ( sorted ( expected ) , sorted ( actual ) ) <nl> + <nl> + def testShuffle ( self ) : <nl> + for batch_size in [ 1 , 2 ] : <nl> + for num_epochs in [ 1 , 3 ] : <nl> + for num_parallel_reads in [ 1 , 2 ] : <nl> + # Test that all expected elements are produced <nl> + self . _shuffle_test ( batch_size , num_epochs , num_parallel_reads ) <nl> + # Test that elements are produced in a consistent order if <nl> + # you specify a seed . <nl> + self . _shuffle_test ( batch_size , num_epochs , num_parallel_reads , <nl> + seed = 21345 ) <nl> + <nl> + <nl> if __name__ = = " __main__ " : <nl> test . main ( ) <nl> mmm a / tensorflow / contrib / data / python / ops / readers . py <nl> ppp b / tensorflow / contrib / data / python / ops / readers . py <nl> def _get_sorted_col_indices ( select_columns , column_names ) : <nl> return result <nl> <nl> <nl> + def _maybe_shuffle_and_repeat ( <nl> + dataset , num_epochs , shuffle , shuffle_buffer_size , shuffle_seed ) : <nl> + " " " Optionally shuffle and repeat dataset , as requested . " " " <nl> + if num_epochs ! = 1 and shuffle : <nl> + # Use shuffle_and_repeat for perf <nl> + return dataset . apply ( <nl> + shuffle_ops . shuffle_and_repeat ( shuffle_buffer_size , num_epochs , <nl> + shuffle_seed ) ) <nl> + elif shuffle : <nl> + return dataset . shuffle ( shuffle_buffer_size , shuffle_seed ) <nl> + elif num_epochs ! = 1 : <nl> + return dataset . repeat ( num_epochs ) <nl> + return dataset <nl> + <nl> + <nl> + def make_tf_record_dataset ( <nl> + file_pattern , <nl> + batch_size , <nl> + parser_fn = None , <nl> + num_epochs = None , <nl> + shuffle = True , <nl> + shuffle_buffer_size = None , <nl> + shuffle_seed = None , <nl> + prefetch_buffer_size = None , <nl> + num_parallel_reads = None , <nl> + num_parallel_parser_calls = None , <nl> + drop_final_batch = False ) : <nl> + " " " Reads and optionally parses TFRecord files into a dataset . <nl> + <nl> + Provides common functionality such as batching , optional parsing , shuffling , <nl> + and performant defaults . <nl> + <nl> + Args : <nl> + file_pattern : List of files or patterns of TFRecord file paths . <nl> + See @ { tf . gfile . Glob } for pattern rules . <nl> + batch_size : An int representing the number of records to combine <nl> + in a single batch . <nl> + parser_fn : ( Optional . ) A function accepting string input to parse <nl> + and process the record contents . This function must map records <nl> + to components of a fixed shape , so they may be batched . By <nl> + default , uses the record contents unmodified . <nl> + num_epochs : ( Optional . ) An int specifying the number of times this <nl> + dataset is repeated . If None ( the default ) , cycles through the <nl> + dataset forever . <nl> + shuffle : ( Optional . ) A bool that indicates whether the input <nl> + should be shuffled . Defaults to ` True ` . <nl> + shuffle_buffer_size : ( Optional . ) Buffer size to use for <nl> + shuffling . A large buffer size ensures better shuffling , but <nl> + increases memory usage and startup time . <nl> + shuffle_seed : ( Optional . ) Randomization seed to use for shuffling . <nl> + prefetch_buffer_size : ( Optional . ) An int specifying the number of <nl> + feature batches to prefetch for performance improvement . <nl> + Defaults to auto - tune . Set to 0 to disable prefetching . <nl> + num_parallel_reads : ( Optional . ) Number of threads used to read <nl> + records from files . By default or if set to a value > 1 , the <nl> + results will be interleaved . <nl> + num_parallel_parser_calls : ( Optional . ) Number of parallel <nl> + records to parse in parallel . Defaults to an automatic selection . <nl> + drop_final_batch : ( Optional . ) Whether the last batch should be <nl> + dropped in case its size is smaller than ` batch_size ` ; the <nl> + default behavior is not to drop the smaller batch . <nl> + <nl> + Returns : <nl> + A dataset , where each element matches the output of ` parser_fn ` <nl> + except it will have an additional leading ` batch - size ` dimension , <nl> + or a ` batch_size ` - length 1 - D tensor of strings if ` parser_fn ` is <nl> + unspecified . <nl> + " " " <nl> + files = dataset_ops . Dataset . list_files ( <nl> + file_pattern , shuffle = shuffle , seed = shuffle_seed ) <nl> + <nl> + if num_parallel_reads is None : <nl> + # Note : We considered auto - tuning this value , but there is a concern <nl> + # that this affects the mixing of records from different files , which <nl> + # could affect training convergence / accuracy , so we are defaulting to <nl> + # a constant for now . <nl> + num_parallel_reads = 24 <nl> + dataset = core_readers . TFRecordDataset ( <nl> + files , num_parallel_reads = num_parallel_reads ) <nl> + <nl> + if shuffle_buffer_size is None : <nl> + # TODO ( josh11b ) : Auto - tune this value when not specified <nl> + shuffle_buffer_size = 10000 <nl> + dataset = _maybe_shuffle_and_repeat ( <nl> + dataset , num_epochs , shuffle , shuffle_buffer_size , shuffle_seed ) <nl> + <nl> + if parser_fn is None : <nl> + if drop_final_batch : <nl> + dataset = dataset . apply ( batching . batch_and_drop_remainder ( batch_size ) ) <nl> + else : <nl> + dataset = dataset . batch ( batch_size ) <nl> + else : <nl> + # TODO ( josh11b ) : if num_parallel_parser_calls is None , use some function <nl> + # of num cores instead of map_and_batch ' s default behavior of one batch . <nl> + dataset = dataset . apply ( batching . map_and_batch ( <nl> + parser_fn , batch_size , num_parallel_calls = num_parallel_parser_calls , <nl> + drop_remainder = drop_final_batch ) ) <nl> + <nl> + if prefetch_buffer_size is None : <nl> + prefetch_buffer_size = - 1 # tf . config . data . AUTOTUNE <nl> + if prefetch_buffer_size = = 0 : <nl> + return dataset <nl> + else : <nl> + return dataset . prefetch ( buffer_size = prefetch_buffer_size ) <nl> + <nl> + <nl> def make_csv_dataset ( <nl> file_pattern , <nl> batch_size , <nl> def make_csv_dataset ( <nl> Args : <nl> file_pattern : List of files or patterns of file paths containing CSV <nl> records . See @ { tf . gfile . Glob } for pattern rules . <nl> - batch_size : An int representing the number of consecutive elements of this <nl> - dataset to combine in a single batch . <nl> + batch_size : An int representing the number of records to combine <nl> + in a single batch . <nl> column_names : An optional list of strings that corresponds to the CSV <nl> columns , in order . One per column of the input record . If this is not <nl> provided , infers the column names from the first row of the records . <nl> def make_csv_dataset ( <nl> If None , cycles through the dataset forever . <nl> shuffle : A bool that indicates whether the input should be shuffled . <nl> shuffle_buffer_size : Buffer size to use for shuffling . A large buffer size <nl> - ensures better shuffling , but would increase memory usage and startup <nl> - time . <nl> + ensures better shuffling , but increases memory usage and startup time . <nl> shuffle_seed : Randomization seed to use for shuffling . <nl> prefetch_buffer_size : An int specifying the number of feature batches to <nl> prefetch for performance improvement . Recommended value is the number of <nl> def decode_csv ( line ) : <nl> interleave_ops . parallel_interleave ( <nl> filename_to_dataset , cycle_length = num_parallel_reads , sloppy = sloppy ) ) <nl> <nl> - if num_epochs ! = 1 and shuffle : <nl> - # Use shuffle_and_repeat for perf <nl> - dataset = dataset . apply ( <nl> - shuffle_ops . shuffle_and_repeat ( shuffle_buffer_size , num_epochs , <nl> - shuffle_seed ) ) <nl> - elif shuffle : <nl> - dataset = dataset . shuffle ( shuffle_buffer_size , shuffle_seed ) <nl> - elif num_epochs ! = 1 : <nl> - dataset = dataset . repeat ( num_epochs ) <nl> + dataset = _maybe_shuffle_and_repeat ( <nl> + dataset , num_epochs , shuffle , shuffle_buffer_size , shuffle_seed ) <nl> <nl> # Use map_and_batch for perf <nl> # TODO ( b / 76425672 ) : use num_parallel_calls for better performance tuning when <nl> def make_batched_features_dataset ( file_pattern , <nl> Args : <nl> file_pattern : List of files or patterns of file paths containing <nl> ` Example ` records . See ` tf . gfile . Glob ` for pattern rules . <nl> - batch_size : An int representing the number of consecutive elements of this <nl> - dataset to combine in a single batch . <nl> + batch_size : An int representing the number of records to combine <nl> + in a single batch . <nl> features : A ` dict ` mapping feature keys to ` FixedLenFeature ` or <nl> ` VarLenFeature ` values . See ` tf . parse_example ` . <nl> reader : A function or class that can be <nl> def make_batched_features_dataset ( file_pattern , <nl> dataset = dataset . map ( lambda _ , v : v ) <nl> <nl> # Apply dataset repeat and shuffle transformations . <nl> - repeat_dataset = ( num_epochs ! = 1 ) <nl> - if repeat_dataset and shuffle : <nl> - # Used fused shuffle_and_repeat operation for better performance <nl> - dataset = dataset . apply ( <nl> - shuffle_ops . shuffle_and_repeat ( shuffle_buffer_size , num_epochs , <nl> - shuffle_seed ) ) <nl> - elif repeat_dataset : <nl> - dataset = dataset . repeat ( num_epochs ) <nl> - elif shuffle : <nl> - dataset = dataset . shuffle ( shuffle_buffer_size , shuffle_seed ) <nl> + dataset = _maybe_shuffle_and_repeat ( <nl> + dataset , num_epochs , shuffle , shuffle_buffer_size , shuffle_seed ) <nl> <nl> if drop_final_batch : <nl> dataset = dataset . apply ( batching . batch_and_drop_remainder ( batch_size ) ) <nl> def read_batch_features ( file_pattern , <nl> Args : <nl> file_pattern : List of files or patterns of file paths containing <nl> ` Example ` records . See ` tf . gfile . Glob ` for pattern rules . <nl> - batch_size : An int representing the number of consecutive elements of this <nl> - dataset to combine in a single batch . <nl> + batch_size : An int representing the number of records to combine <nl> + in a single batch . <nl> features : A ` dict ` mapping feature keys to ` FixedLenFeature ` or <nl> ` VarLenFeature ` values . See ` tf . parse_example ` . <nl> reader : A function or class that can be <nl> mmm a / tensorflow / contrib / distribute / python / BUILD <nl> ppp b / tensorflow / contrib / distribute / python / BUILD <nl> py_library ( <nl> " / / tensorflow / contrib / data / python / ops : batching " , <nl> " / / tensorflow / contrib / eager / python : datasets " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : checkpointable " , <nl> " / / tensorflow / python : control_flow_ops " , <nl> " / / tensorflow / python : device_util " , <nl> " / / tensorflow / python : distribute " , <nl> py_library ( <nl> " / / tensorflow / python : training " , <nl> " / / tensorflow / python : util " , <nl> " / / tensorflow / python / eager : context " , <nl> + " / / tensorflow / python / training / checkpointable : base " , <nl> " @ six_archive / / : six " , <nl> ] , <nl> ) <nl> py_library ( <nl> " : one_device_strategy " , <nl> " : tpu_strategy " , <nl> " / / tensorflow / contrib / optimizer_v2 : training " , <nl> + " / / tensorflow / python : distribute " , <nl> " / / tensorflow / python : framework_ops " , <nl> " / / tensorflow / python : training " , <nl> " / / tensorflow / python : util " , <nl> py_library ( <nl> ] , <nl> ) <nl> <nl> - py_test ( <nl> + cuda_py_test ( <nl> name = " cross_tower_ops_test " , <nl> srcs = [ " cross_tower_ops_test . py " ] , <nl> - srcs_version = " PY2AND3 " , <nl> - tags = [ <nl> - " no_pip " , <nl> - ] , <nl> - deps = [ <nl> + additional_deps = [ <nl> " : combinations " , <nl> " : cross_tower_ops " , <nl> " : values " , <nl> + " @ absl_py / / absl / testing : parameterized " , <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : constant_op " , <nl> " / / tensorflow / python : framework_ops " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python / eager : context " , <nl> " / / tensorflow / python / eager : test " , <nl> - " @ absl_py / / absl / testing : parameterized " , <nl> + ] , <nl> + tags = [ <nl> + " multi_and_single_gpu " , <nl> + " no_pip " , <nl> ] , <nl> ) <nl> <nl> mmm a / tensorflow / contrib / distribute / python / combinations . py <nl> ppp b / tensorflow / contrib / distribute / python / combinations . py <nl> def testOptimizer ( self , optimizer ) : <nl> from tensorflow . python . eager import context <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . training import adam <nl> + from tensorflow . python . training import distribute as distribute_lib <nl> from tensorflow . python . training import gradient_descent <nl> from tensorflow . python . util import tf_inspect <nl> <nl> def required_tpu ( self ) : <nl> return self . _required_tpu <nl> <nl> <nl> + default_strategy = NamedDistribution ( <nl> + " Default " , <nl> + distribute_lib . _default_distribution_strategy , # pylint : disable = protected - access <nl> + required_gpus = None ) <nl> one_device_strategy = NamedDistribution ( <nl> " OneDeviceCPU " , one_device_strategy . OneDeviceStrategy ( " / cpu : 0 " ) , <nl> - None ) <nl> + required_gpus = None ) <nl> tpu_strategy_single_iteration = NamedDistribution ( <nl> " TPUSingleIteration " , <nl> tpu_strategy . TPUStrategy ( iterations_per_step = 1 ) , <nl> required_tpu = True ) <nl> tpu_strategy = NamedDistribution ( <nl> " TPU " , tpu_strategy . TPUStrategy ( ) , required_tpu = True ) <nl> + # Note that we disable prefetching for testing since prefetching makes <nl> + # the input non - deterministic . <nl> mirrored_strategy_with_gpu_and_cpu = NamedDistribution ( <nl> " MirroredCPUAndGPU " , <nl> - mirrored_strategy . MirroredStrategy ( [ " / gpu : 0 " , " / cpu : 0 " ] ) , 1 ) <nl> - mirrored_strategy_without_prefetch = NamedDistribution ( <nl> - " MirroredCPUAndGPUNoPrefetch " , <nl> mirrored_strategy . MirroredStrategy ( <nl> - [ " / gpu : 0 " , " / cpu : 0 " ] , prefetch_on_device = False ) , 1 ) <nl> + [ " / gpu : 0 " , " / cpu : 0 " ] , prefetch_on_device = False ) , <nl> + required_gpus = 1 ) <nl> mirrored_strategy_with_two_gpus = NamedDistribution ( <nl> " Mirrored2GPUs " , <nl> - mirrored_strategy . MirroredStrategy ( [ " / gpu : 0 " , " / gpu : 1 " ] ) , 2 ) <nl> + mirrored_strategy . MirroredStrategy ( <nl> + [ " / gpu : 0 " , " / gpu : 1 " ] , prefetch_on_device = False ) , <nl> + required_gpus = 2 ) <nl> <nl> adam_optimizer_v1_fn = NamedObject ( <nl> " AdamV1 " , lambda : adam . AdamOptimizer ( 0 . 2 , epsilon = 1 ) ) <nl> mmm a / tensorflow / contrib / distribute / python / minimize_loss_test . py <nl> ppp b / tensorflow / contrib / distribute / python / minimize_loss_test . py <nl> def testTrainNetworkWithBatchNorm ( self , distribution , optimizer_fn , momentum , <nl> renorm = renorm , <nl> update_ops_in_tower_mode = not update_ops_in_cross_tower_mode ) <nl> <nl> - # Disable prefetching since that makes the specific input on each device <nl> - # to be non deterministic , and this test relies on specific input being <nl> - # on each device . <nl> + # Make sure prefetching is disabled since that makes the <nl> + # specific input on each device to be non deterministic , and <nl> + # this test relies on specific input being on each device . <nl> if isinstance ( distribution , mirrored_strategy . MirroredStrategy ) : <nl> - distribution . _prefetch_on_device = False <nl> + self . assertFalse ( distribution . _prefetch_on_device ) <nl> iterator = distribution . distribute_dataset ( <nl> dataset_fn ) . make_one_shot_iterator ( ) <nl> <nl> mmm a / tensorflow / contrib / distribute / python / mirrored_strategy_multigpu_test . py <nl> ppp b / tensorflow / contrib / distribute / python / mirrored_strategy_multigpu_test . py <nl> def run_fn ( device_id ) : <nl> self . assertEqual ( expected , self . evaluate ( unwrapped [ 0 ] ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class MirroredStrategyVariableCreationTest ( test . TestCase ) : <nl> <nl> config = config_pb2 . ConfigProto ( ) <nl> mmm a / tensorflow / contrib / distribute / python / mirrored_strategy_test . py <nl> ppp b / tensorflow / contrib / distribute / python / mirrored_strategy_test . py <nl> <nl> from tensorflow . python . training import distribute as distribute_lib <nl> <nl> <nl> - @ test_util . with_c_api <nl> class MirroredOneCPUDistributionTest ( strategy_test_lib . DistributionTestBase ) : <nl> <nl> def _get_distribution_strategy ( self ) : <nl> def testCallAndMergeExceptions ( self ) : <nl> self . _test_call_and_merge_exceptions ( self . _get_distribution_strategy ( ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class VariableCreatorStackTest ( test . TestCase ) : <nl> <nl> def testCreatorStacksAreThreadLocal ( self ) : <nl> mmm a / tensorflow / contrib / distribute / python / multi_worker_strategy_test . py <nl> ppp b / tensorflow / contrib / distribute / python / multi_worker_strategy_test . py <nl> <nl> from tensorflow . python . eager import test <nl> from tensorflow . python . framework import constant_op <nl> from tensorflow . python . framework import ops <nl> - from tensorflow . python . framework import test_util <nl> from tensorflow . python . training import server_lib <nl> <nl> <nl> - @ test_util . with_c_api <nl> class MultiWorkerStrategyTest ( multi_worker_test_base . MultiWorkerTestBase , <nl> strategy_test_lib . DistributionTestBase ) : <nl> <nl> mmm a / tensorflow / contrib / distribute / python / one_device_strategy_test . py <nl> ppp b / tensorflow / contrib / distribute / python / one_device_strategy_test . py <nl> <nl> from tensorflow . python . framework import test_util <nl> <nl> <nl> - @ test_util . with_c_api <nl> class OneDeviceStrategyTest ( strategy_test_lib . DistributionTestBase ) : <nl> <nl> def _get_distribution_strategy ( self ) : <nl> mmm a / tensorflow / contrib / distribute / python / shared_variable_creator_test . py <nl> ppp b / tensorflow / contrib / distribute / python / shared_variable_creator_test . py <nl> def testWrongPatterns ( self ) : <nl> self . assertEquals ( " foo_a " , self . _canonicalize ( " foo_a " ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class SharedVariableCreatorTest ( test . TestCase ) : <nl> <nl> @ test_util . run_in_graph_and_eager_modes ( ) <nl> mmm a / tensorflow / contrib / distribute / python / values . py <nl> ppp b / tensorflow / contrib / distribute / python / values . py <nl> <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import control_flow_ops <nl> from tensorflow . python . ops import math_ops <nl> - from tensorflow . python . training import checkpointable <nl> from tensorflow . python . training import device_util <nl> from tensorflow . python . training import distribute as distribute_lib <nl> from tensorflow . python . training import saver <nl> + from tensorflow . python . training . checkpointable import base as checkpointable <nl> from tensorflow . python . util import nest <nl> <nl> <nl> mmm a / tensorflow / contrib / distribute / python / values_test . py <nl> ppp b / tensorflow / contrib / distribute / python / values_test . py <nl> <nl> from tensorflow . python . util import nest <nl> <nl> <nl> - @ test_util . with_c_api <nl> class DistributedValuesTest ( test . TestCase ) : <nl> <nl> def testGetEager ( self ) : <nl> def testCanonicalization ( self ) : <nl> v = values . DistributedValues ( { " / device : cpu : 0 " : 42 } ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class DistributedDelegateTest ( test . TestCase ) : <nl> <nl> @ test_util . run_in_graph_and_eager_modes ( ) <nl> def _make_mirrored ( ) : <nl> return v , devices , mirrored <nl> <nl> <nl> - @ test_util . with_c_api <nl> class RegroupAndSelectDeviceTest ( test . TestCase ) : <nl> <nl> def _is_per_device ( self , result , expected , klass = values . PerDevice ) : <nl> def testNamedTupleEstimatorSpec ( self ) : <nl> merged_estimator_spec ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class PerDeviceDatasetTest ( test . TestCase ) : <nl> <nl> config = config_pb2 . ConfigProto ( ) <nl> def testValueErrorForIterator ( self ) : <nl> multi_worker_iterator . get_next ( ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class MirroredVariableTest ( test . TestCase ) : <nl> <nl> config = config_pb2 . ConfigProto ( ) <nl> def _make_tower_local ( method ) : <nl> return v , tower_local <nl> <nl> <nl> - @ test_util . with_c_api <nl> class TowerLocalVariableTest ( test . TestCase ) : <nl> <nl> config = config_pb2 . ConfigProto ( ) <nl> mmm a / tensorflow / contrib / distributions / python / kernel_tests / batch_reshape_test . py <nl> ppp b / tensorflow / contrib / distributions / python / kernel_tests / batch_reshape_test . py <nl> def test_bad_reshape_size ( self ) : <nl> <nl> else : <nl> with self . test_session ( ) : <nl> - with self . assertRaisesOpError ( r " ` batch_shape ` size must match " <nl> - r " ` distributions . batch_shape ` size " ) : <nl> + with self . assertRaisesOpError ( r " Shape sizes do not match . " ) : <nl> batch_reshape_lib . BatchReshape ( <nl> distribution = mvn , <nl> batch_shape = new_batch_shape_ph , <nl> def test_bad_reshape_size ( self ) : <nl> <nl> def test_non_positive_shape ( self ) : <nl> dims = 2 <nl> - new_batch_shape = [ - 1 , - 2 ] # - 1 * - 2 = 2 so will pass size check . <nl> - old_batch_shape = [ 2 ] <nl> + old_batch_shape = [ 4 ] <nl> + if self . is_static_shape : <nl> + # Unknown first dimension does not trigger size check . Note that <nl> + # any dimension < 0 is treated statically as unknown . <nl> + new_batch_shape = [ - 1 , 0 ] <nl> + else : <nl> + new_batch_shape = [ - 2 , - 2 ] # - 2 * - 2 = 4 , same size as the old shape . <nl> <nl> new_batch_shape_ph = ( <nl> constant_op . constant ( np . int32 ( new_batch_shape ) ) if self . is_static_shape <nl> def test_non_positive_shape ( self ) : <nl> mvn = mvn_lib . MultivariateNormalDiag ( scale_diag = scale_ph ) <nl> <nl> if self . is_static_shape : <nl> - with self . assertRaisesRegexp ( ValueError , r " . * must be positive . * " ) : <nl> + with self . assertRaisesRegexp ( ValueError , r " . * must be > = - 1 . * " ) : <nl> batch_reshape_lib . BatchReshape ( <nl> distribution = mvn , <nl> batch_shape = new_batch_shape_ph , <nl> def test_non_positive_shape ( self ) : <nl> <nl> else : <nl> with self . test_session ( ) : <nl> - with self . assertRaisesOpError ( r " . * must be positive . * " ) : <nl> + with self . assertRaisesOpError ( r " . * must be > = - 1 . * " ) : <nl> batch_reshape_lib . BatchReshape ( <nl> distribution = mvn , <nl> batch_shape = new_batch_shape_ph , <nl> mmm a / tensorflow / contrib / distributions / python / kernel_tests / bijectors / reshape_test . py <nl> ppp b / tensorflow / contrib / distributions / python / kernel_tests / bijectors / reshape_test . py <nl> <nl> <nl> from tensorflow . contrib . distributions . python . ops . bijectors . reshape import Reshape <nl> from tensorflow . python . framework import dtypes <nl> - from tensorflow . python . framework import ops <nl> from tensorflow . python . framework import tensor_shape <nl> - from tensorflow . python . framework import test_util <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops . distributions . bijector_test_util import assert_bijective_and_finite <nl> from tensorflow . python . platform import test <nl> <nl> <nl> - @ test_util . with_c_api <nl> class _ReshapeBijectorTest ( object ) : <nl> " " " Base class for testing the reshape transformation . <nl> <nl> def build_shapes ( self , * args , * * kwargs ) : <nl> raise NotImplementedError ( " Subclass failed to implement ` build_shapes ` . " ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ReshapeBijectorTestStatic ( test . TestCase , _ReshapeBijectorTest ) : <nl> <nl> def build_shapes ( self , shape_in , shape_out ) : <nl> def testBijectiveAndFinite ( self ) : <nl> bijector , x , y , event_ndims = 2 , rtol = 1e - 6 , atol = 0 ) <nl> <nl> def testInvalidDimensionsOpError ( self ) : <nl> - if ops . _USE_C_API : <nl> - error_message = " Invalid value in tensor used for shape : - 2 " <nl> - else : <nl> - error_message = " elements must be either positive integers or ` - 1 ` . " <nl> - self . _testInvalidDimensionsOpError ( error_message ) <nl> + self . _testInvalidDimensionsOpError ( <nl> + " Invalid value in tensor used for shape : - 2 " ) <nl> <nl> def testInputOutputMismatchOpError ( self ) : <nl> - if ops . _USE_C_API : <nl> - error_message = " Cannot reshape a tensor with " <nl> - else : <nl> - error_message = " Input to reshape is a tensor with " <nl> - self . _testInputOutputMismatchOpError ( error_message ) <nl> + self . _testInputOutputMismatchOpError ( " Cannot reshape a tensor with " ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ReshapeBijectorTestDynamic ( test . TestCase , _ReshapeBijectorTest ) : <nl> <nl> def build_shapes ( self , shape_in , shape_out ) : <nl> def testInputOutputMismatchOpError ( self ) : <nl> self . _testInputOutputMismatchOpError ( " Input to reshape is a tensor with " ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ReshapeBijectorTestDynamicNdims ( test . TestCase , _ReshapeBijectorTest ) : <nl> <nl> def build_shapes ( self , shape_in , shape_out ) : <nl> mmm a / tensorflow / contrib / distributions / python / ops / batch_reshape . py <nl> ppp b / tensorflow / contrib / distributions / python / ops / batch_reshape . py <nl> class BatchReshape ( distribution_lib . Distribution ) : <nl> This " meta - distribution " reshapes the batch dimensions of another <nl> distribution . <nl> <nl> - Note : Unlike ` tf . reshape ` , the ` BatchReshape ` distribution does not support <nl> - ` - 1 ` for flattening . <nl> - <nl> # # # # Examples <nl> <nl> ` ` ` python <nl> class BatchReshape ( distribution_lib . Distribution ) : <nl> <nl> dtype = np . float32 <nl> dims = 2 <nl> - new_batch_shape = [ 1 , 2 , 3 ] <nl> + new_batch_shape = [ 1 , 2 , - 1 ] <nl> old_batch_shape = [ 6 ] <nl> <nl> scale = np . ones ( old_batch_shape + [ dims ] , dtype ) <nl> def __init__ ( self , <nl> Args : <nl> distribution : The base distribution instance to reshape . Typically an <nl> instance of ` Distribution ` . <nl> - batch_shape : Positive ` int ` - like vector - shaped ` Tensor ` representing the <nl> - new shape of the batch dimensions . <nl> + batch_shape : Positive ` int ` - like vector - shaped ` Tensor ` representing <nl> + the new shape of the batch dimensions . Up to one dimension may contain <nl> + ` - 1 ` , meaning the remainder of the batch size . <nl> validate_args : Python ` bool ` , default ` False ` . When ` True ` distribution <nl> parameters are checked for validity despite possibly degrading runtime <nl> performance . When ` False ` invalid inputs may silently render incorrect <nl> def __init__ ( self , <nl> " " " <nl> parameters = distribution_util . parent_frame_arguments ( ) <nl> name = name or " BatchReshape " + distribution . name <nl> - self . _distribution = distribution <nl> with ops . name_scope ( name , values = [ batch_shape ] ) as name : <nl> - self . _batch_shape_ = ops . convert_to_tensor ( <nl> - batch_shape , <nl> - dtype = dtypes . int32 , <nl> - name = " batch_shape " ) <nl> - self . _batch_shape_static = tensor_util . constant_value ( self . _batch_shape_ ) <nl> - if self . _batch_shape_static is not None : <nl> - self . _batch_shape_static = np . int32 ( self . _batch_shape_static ) <nl> - self . _runtime_assertions = validate_init_args ( <nl> - self . _distribution , <nl> - self . _batch_shape_ , <nl> - validate_args , <nl> - self . _batch_shape_static ) <nl> + # The unexpanded batch shape may contain up to one dimension of - 1 . <nl> + self . _batch_shape_unexpanded = ops . convert_to_tensor ( <nl> + batch_shape , dtype = dtypes . int32 , name = " batch_shape " ) <nl> + validate_init_args_statically ( distribution , self . _batch_shape_unexpanded ) <nl> + batch_shape , batch_shape_static , runtime_assertions = calculate_reshape ( <nl> + distribution . batch_shape_tensor ( ) , self . _batch_shape_unexpanded , <nl> + validate_args ) <nl> + self . _distribution = distribution <nl> + self . _batch_shape_ = batch_shape <nl> + self . _batch_shape_static = batch_shape_static <nl> + self . _runtime_assertions = runtime_assertions <nl> super ( BatchReshape , self ) . __init__ ( <nl> - dtype = self . _distribution . dtype , <nl> - reparameterization_type = self . _distribution . reparameterization_type , <nl> + dtype = distribution . dtype , <nl> + reparameterization_type = distribution . reparameterization_type , <nl> validate_args = validate_args , <nl> allow_nan_stats = allow_nan_stats , <nl> parameters = parameters , <nl> graph_parents = ( <nl> - [ self . _batch_shape_ ] + <nl> - self . _distribution . _graph_parents ) , # pylint : disable = protected - access <nl> + [ self . _batch_shape_unexpanded ] + distribution . _graph_parents ) , # pylint : disable = protected - access <nl> name = name ) <nl> <nl> @ property <nl> def _batch_shape_tensor ( self ) : <nl> return array_ops . identity ( self . _batch_shape_ ) <nl> <nl> def _batch_shape ( self ) : <nl> - return tensor_shape . TensorShape ( self . _batch_shape_static ) <nl> + return self . _batch_shape_static <nl> <nl> def _event_shape_tensor ( self ) : <nl> with ops . control_dependencies ( self . _runtime_assertions ) : <nl> def _event_shape ( self ) : <nl> def _sample_n ( self , n , seed = None ) : <nl> with ops . control_dependencies ( self . _runtime_assertions ) : <nl> x = self . distribution . sample ( sample_shape = n , seed = seed ) <nl> - new_shape = array_ops . concat ( [ <nl> - [ n ] , <nl> - self . batch_shape_tensor ( ) , <nl> - self . event_shape_tensor ( ) , <nl> - ] , axis = 0 ) <nl> + new_shape = array_ops . concat ( <nl> + [ <nl> + [ n ] , <nl> + self . _batch_shape_unexpanded , <nl> + self . event_shape_tensor ( ) , <nl> + ] , <nl> + axis = 0 ) <nl> return array_ops . reshape ( x , new_shape ) <nl> <nl> def _log_prob ( self , x ) : <nl> def _sample_shape ( self , x ) : <nl> event_ndims = ( array_ops . size ( self . event_shape_tensor ( ) ) <nl> if self . event_shape . ndims is None <nl> else self . event_shape . ndims ) <nl> - batch_ndims = ( array_ops . size ( self . batch_shape_tensor ( ) ) <nl> - if self . batch_shape . ndims is None <nl> - else self . batch_shape . ndims ) <nl> + batch_ndims = ( <nl> + array_ops . size ( self . _batch_shape_unexpanded ) <nl> + if self . batch_shape . ndims is None else self . batch_shape . ndims ) <nl> sample_ndims = x_ndims - batch_ndims - event_ndims <nl> if isinstance ( sample_ndims , int ) : <nl> static_sample_shape = x . shape [ : sample_ndims ] <nl> def _call_reshape_input_output ( self , fn , x ) : <nl> self . event_shape_tensor ( ) , <nl> ] , axis = 0 ) <nl> result = fn ( array_ops . reshape ( x , old_shape ) ) <nl> - new_shape = array_ops . concat ( [ <nl> - sample_shape , <nl> - self . batch_shape_tensor ( ) , <nl> - ] , axis = 0 ) <nl> + new_shape = array_ops . concat ( <nl> + [ <nl> + sample_shape , <nl> + self . _batch_shape_unexpanded , <nl> + ] , axis = 0 ) <nl> result = array_ops . reshape ( result , new_shape ) <nl> if ( static_sample_shape . ndims is not None and <nl> self . batch_shape . ndims is not None ) : <nl> def _call_and_reshape_output ( <nl> if static_event_shape_list is None : <nl> static_event_shape_list = [ self . event_shape ] <nl> new_shape = array_ops . concat ( <nl> - [ self . batch_shape_tensor ( ) ] + event_shape_list , <nl> - axis = 0 ) <nl> + [ self . _batch_shape_unexpanded ] + event_shape_list , axis = 0 ) <nl> result = array_ops . reshape ( fn ( ) , new_shape ) <nl> if ( self . batch_shape . ndims is not None and <nl> self . event_shape . ndims is not None ) : <nl> def _validate_sample_arg ( self , x ) : <nl> event_ndims = ( array_ops . size ( self . event_shape_tensor ( ) ) <nl> if self . event_shape . ndims is None <nl> else self . event_shape . ndims ) <nl> - batch_ndims = ( array_ops . size ( self . batch_shape_tensor ( ) ) <nl> - if self . batch_shape . ndims is None <nl> - else self . batch_shape . ndims ) <nl> + batch_ndims = ( <nl> + array_ops . size ( self . _batch_shape_unexpanded ) <nl> + if self . batch_shape . ndims is None else self . batch_shape . ndims ) <nl> expected_batch_event_ndims = batch_ndims + event_ndims <nl> <nl> if ( isinstance ( x_ndims , int ) and <nl> def _validate_sample_arg ( self , x ) : <nl> return runtime_assertions <nl> <nl> <nl> - def validate_init_args ( <nl> - distribution , <nl> - batch_shape , <nl> - validate_args , <nl> - batch_shape_static ) : <nl> + def calculate_reshape ( original_shape , new_shape , validate = False , name = None ) : <nl> + " " " Calculates the reshaped dimensions ( replacing up to one - 1 in reshape ) . " " " <nl> + batch_shape_static = tensor_util . constant_value_as_shape ( new_shape ) <nl> + if batch_shape_static . is_fully_defined ( ) : <nl> + return np . int32 ( batch_shape_static . as_list ( ) ) , batch_shape_static , [ ] <nl> + with ops . name_scope ( name , " calculate_reshape " , [ original_shape , new_shape ] ) : <nl> + original_size = math_ops . reduce_prod ( original_shape ) <nl> + implicit_dim = math_ops . equal ( new_shape , - 1 ) <nl> + size_implicit_dim = ( <nl> + original_size / / math_ops . maximum ( 1 , - math_ops . reduce_prod ( new_shape ) ) ) <nl> + new_ndims = array_ops . shape ( new_shape ) <nl> + expanded_new_shape = array_ops . where ( # Assumes exactly one ` - 1 ` . <nl> + implicit_dim , array_ops . fill ( new_ndims , size_implicit_dim ) , new_shape ) <nl> + validations = [ ] if not validate else [ <nl> + check_ops . assert_rank ( <nl> + original_shape , 1 , message = " Original shape must be a vector . " ) , <nl> + check_ops . assert_rank ( <nl> + new_shape , 1 , message = " New shape must be a vector . " ) , <nl> + check_ops . assert_less_equal ( <nl> + math_ops . count_nonzero ( implicit_dim , dtype = dtypes . int32 ) , <nl> + 1 , <nl> + message = " At most one dimension can be unknown . " ) , <nl> + check_ops . assert_positive ( <nl> + expanded_new_shape , message = " Shape elements must be > = - 1 . " ) , <nl> + check_ops . assert_equal ( <nl> + math_ops . reduce_prod ( expanded_new_shape ) , <nl> + original_size , <nl> + message = " Shape sizes do not match . " ) , <nl> + ] <nl> + return expanded_new_shape , batch_shape_static , validations <nl> + <nl> + <nl> + def validate_init_args_statically ( distribution , batch_shape ) : <nl> " " " Helper to __init__ which makes or raises assertions . " " " <nl> - with ops . name_scope ( name = " validate_init_args " , <nl> - values = [ batch_shape ] + distribution . _graph_parents ) : # pylint : disable = protected - access <nl> - runtime_assertions = [ ] <nl> - <nl> - if batch_shape . shape . ndims is not None : <nl> - if batch_shape . shape . ndims ! = 1 : <nl> - raise ValueError ( " ` batch_shape ` must be a vector " <nl> - " ( saw rank : { } ) . " . format ( <nl> - batch_shape . shape . ndims ) ) <nl> - elif validate_args : <nl> - runtime_assertions + = [ <nl> - check_ops . assert_rank ( <nl> - batch_shape , <nl> - 1 , <nl> - message = " ` batch_shape ` must be a vector . " , <nl> - name = " assert_batch_shape_is_vector " ) , <nl> - ] <nl> - <nl> - batch_size_static = np . prod ( batch_shape_static ) <nl> - dist_batch_size_static = ( <nl> - None if not distribution . batch_shape . is_fully_defined ( ) <nl> - else np . prod ( distribution . batch_shape ) . value ) <nl> - <nl> - if batch_size_static is not None and dist_batch_size_static is not None : <nl> - if batch_size_static ! = dist_batch_size_static : <nl> - raise ValueError ( " ` batch_shape ` size ( { } ) must match " <nl> - " ` distribution . batch_shape ` size ( { } ) . " . format ( <nl> - batch_size_static , <nl> - dist_batch_size_static ) ) <nl> - elif validate_args : <nl> - runtime_assertions + = [ <nl> - check_ops . assert_equal ( <nl> - math_ops . reduce_prod ( batch_shape ) , <nl> - math_ops . reduce_prod ( distribution . batch_shape_tensor ( ) ) , <nl> - message = ( " ` batch_shape ` size must match " <nl> - " ` distributions . batch_shape ` size . " ) , <nl> - name = " assert_batch_size " ) , <nl> - ] <nl> - <nl> - if batch_shape_static is not None : <nl> - if np . any ( batch_shape_static < 1 ) : <nl> - raise ValueError ( " ` batch_shape ` elements must be positive " <nl> - " ( i . e . , larger than zero ) . " ) <nl> - elif validate_args : <nl> - runtime_assertions + = [ <nl> - check_ops . assert_positive ( <nl> - batch_shape , <nl> - message = ( " ` batch_shape ` elements must be positive " <nl> - " ( i . e . , larger than zero ) . " ) , <nl> - name = " assert_batch_shape_positive " ) <nl> - ] <nl> - <nl> - return runtime_assertions <nl> + if batch_shape . shape . ndims is not None : <nl> + if batch_shape . shape . ndims ! = 1 : <nl> + raise ValueError ( " ` batch_shape ` must be a vector " <nl> + " ( saw rank : { } ) . " . format ( batch_shape . shape . ndims ) ) <nl> + <nl> + batch_shape_static = tensor_util . constant_value_as_shape ( batch_shape ) <nl> + batch_size_static = batch_shape_static . num_elements ( ) <nl> + dist_batch_size_static = distribution . batch_shape . num_elements ( ) <nl> + <nl> + if batch_size_static is not None and dist_batch_size_static is not None : <nl> + if batch_size_static ! = dist_batch_size_static : <nl> + raise ValueError ( " ` batch_shape ` size ( { } ) must match " <nl> + " ` distribution . batch_shape ` size ( { } ) . " . format ( <nl> + batch_size_static , dist_batch_size_static ) ) <nl> + <nl> + if batch_shape_static . dims is not None : <nl> + if any ( <nl> + dim . value is not None and dim . value < 1 for dim in batch_shape_static ) : <nl> + raise ValueError ( " ` batch_shape ` elements must be > = - 1 . " ) <nl> mmm a / tensorflow / contrib / eager / python / BUILD <nl> ppp b / tensorflow / contrib / eager / python / BUILD <nl> py_library ( <nl> visibility = [ " / / tensorflow : internal " ] , <nl> deps = [ <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : checkpointable " , <nl> " / / tensorflow / python : control_flow_ops " , <nl> " / / tensorflow / python : dtypes " , <nl> " / / tensorflow / python : framework_ops " , <nl> py_library ( <nl> " / / tensorflow / python : variable_scope " , <nl> " / / tensorflow / python / eager : context " , <nl> " / / tensorflow / python / eager : function " , <nl> + " / / tensorflow / python / training / checkpointable : base " , <nl> ] , <nl> ) <nl> <nl> mmm a / tensorflow / contrib / eager / python / datasets . py <nl> ppp b / tensorflow / contrib / eager / python / datasets . py <nl> <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . ops import gen_dataset_ops <nl> from tensorflow . python . ops import resource_variable_ops <nl> - from tensorflow . python . training import checkpointable <nl> + from tensorflow . python . training . checkpointable import base as checkpointable <nl> from tensorflow . python . training . saver import BaseSaverBuilder <nl> <nl> _uid_counter = 0 <nl> mmm a / tensorflow / contrib / eager / python / datasets_test . py <nl> ppp b / tensorflow / contrib / eager / python / datasets_test . py <nl> <nl> from tensorflow . python . framework import sparse_tensor <nl> from tensorflow . python . ops import math_ops <nl> from tensorflow . python . ops import script_ops <nl> - from tensorflow . python . training import checkpointable_utils <nl> + from tensorflow . python . training . checkpointable import util as checkpointable_utils <nl> <nl> <nl> class IteratorTest ( test . TestCase ) : <nl> mmm a / tensorflow / contrib / eager / python / examples / spinn / spinn_test . py <nl> ppp b / tensorflow / contrib / eager / python / examples / spinn / spinn_test . py <nl> <nl> from tensorflow . contrib . summary import summary_test_util <nl> from tensorflow . python . eager import test <nl> from tensorflow . python . framework import test_util <nl> - from tensorflow . python . training import checkpointable_utils <nl> from tensorflow . python . training import saver <nl> + from tensorflow . python . training . checkpointable import util as checkpointable_utils <nl> # pylint : enable = g - bad - import - order <nl> <nl> <nl> mmm a / tensorflow / contrib / eager / python / metrics_impl . py <nl> ppp b / tensorflow / contrib / eager / python / metrics_impl . py <nl> <nl> from tensorflow . python . ops import math_ops <nl> from tensorflow . python . ops import summary_ops_v2 as summary_ops <nl> from tensorflow . python . ops import variable_scope <nl> - from tensorflow . python . training import checkpointable <nl> + from tensorflow . python . training . checkpointable import base as checkpointable <nl> <nl> _to_replace = re . compile ( " [ ^ A - Za - z0 - 9 . ] " ) <nl> <nl> mmm a / tensorflow / contrib / eager / python / metrics_test . py <nl> ppp b / tensorflow / contrib / eager / python / metrics_test . py <nl> <nl> from tensorflow . python . framework import test_util <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import summary_ops_v2 as summary_ops <nl> - from tensorflow . python . training import checkpointable_utils <nl> from tensorflow . python . training import training_util <nl> + from tensorflow . python . training . checkpointable import util as checkpointable_utils <nl> <nl> <nl> class MetricsTest ( test . TestCase ) : <nl> mmm a / tensorflow / contrib / eager / python / network_test . py <nl> ppp b / tensorflow / contrib / eager / python / network_test . py <nl> <nl> from tensorflow . python . ops import nn_ops <nl> from tensorflow . python . ops import resource_variable_ops <nl> from tensorflow . python . ops import variable_scope <nl> - from tensorflow . python . training import checkpointable_utils <nl> from tensorflow . python . training import training_util <nl> + from tensorflow . python . training . checkpointable import util as checkpointable_utils <nl> <nl> <nl> # pylint : disable = not - callable <nl> mmm a / tensorflow / contrib / eager / python / tfe . py <nl> ppp b / tensorflow / contrib / eager / python / tfe . py <nl> <nl> from tensorflow . python . ops . variable_scope import EagerVariableStore <nl> from tensorflow . python . ops import script_ops <nl> from tensorflow . python . ops import template <nl> - from tensorflow . python . training . checkpointable import Checkpointable <nl> - from tensorflow . python . training . checkpointable_utils import CheckpointableSaver <nl> - from tensorflow . python . training . checkpointable_utils import Checkpoint <nl> + from tensorflow . python . training . checkpointable . base import Checkpointable <nl> + from tensorflow . python . training . checkpointable . util import CheckpointableSaver <nl> + from tensorflow . python . training . checkpointable . util import Checkpoint <nl> from tensorflow . python . util . all_util import remove_undocumented <nl> <nl> py_func = script_ops . eager_py_func <nl> mmm a / tensorflow / contrib / estimator / python / estimator / rnn . py <nl> ppp b / tensorflow / contrib / estimator / python / estimator / rnn . py <nl> def rnn_logit_fn ( features , mode ) : <nl> rnn_outputs , _ = rnn . dynamic_rnn ( <nl> cell = cell , <nl> inputs = sequence_input , <nl> + sequence_length = sequence_length , <nl> dtype = dtypes . float32 , <nl> time_major = False ) <nl> last_activations = _select_last_activations ( rnn_outputs , sequence_length ) <nl> mmm a / tensorflow / contrib / framework / python / framework / tensor_util_test . py <nl> ppp b / tensorflow / contrib / framework / python / framework / tensor_util_test . py <nl> def test_assert_scalar_int ( self ) : <nl> [ 3 , 4 ] , dtype = dtypes . int32 ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class WithShapeTest ( test . TestCase ) : <nl> <nl> def _assert_with_shape ( self , tensor , expected_value , expected_shape , <nl> def test_with_shape_partial ( self ) : <nl> tensor_partial_shape . set_shape ( [ None , 2 ] ) <nl> <nl> for incompatible_shape in [ [ 0 ] , [ 1 ] ] : <nl> - if ops . _USE_C_API : <nl> - error_message = " Shapes must be equal rank , but are 2 and 1 " <nl> - else : <nl> - error_message = r " Shapes \ ( \ ? , 2 \ ) and \ ( [ 01 ] , \ ) are not compatible " <nl> self . assertRaisesRegexp ( <nl> - ValueError , error_message , <nl> + ValueError , " Shapes must be equal rank , but are 2 and 1 " , <nl> tensor_util . with_shape , incompatible_shape , tensor_partial_shape ) <nl> for incompatible_shape in [ [ 1 , 2 , 1 ] ] : <nl> self . assertRaisesRegexp ( ValueError , " Dimensions must be equal " , <nl> tensor_util . with_shape , incompatible_shape , <nl> tensor_partial_shape ) <nl> for incompatible_shape in [ [ 2 , 1 ] ] : <nl> - if ops . _USE_C_API : <nl> - error_message = ( r " Dimension 1 in both shapes must be equal , but are " <nl> - r " 2 and 1 . Shapes are \ [ \ ? , 2 \ ] and \ [ 2 , 1 \ ] . " ) <nl> - else : <nl> - error_message = r " Shapes \ ( \ ? , 2 \ ) and \ ( 2 , 1 \ ) are not compatible " <nl> self . assertRaisesRegexp ( <nl> - ValueError , error_message , <nl> + ValueError , <nl> + r " Dimension 1 in both shapes must be equal , but are 2 and 1 . " <nl> + r " Shapes are \ [ \ ? , 2 \ ] and \ [ 2 , 1 \ ] . " , <nl> tensor_util . with_shape , incompatible_shape , tensor_partial_shape ) <nl> <nl> compatible_shape = [ 2 , 2 ] <nl> mmm a / tensorflow / contrib / lite / Makefile <nl> ppp b / tensorflow / contrib / lite / Makefile <nl> GENDIR : = $ ( MAKEFILE_DIR ) / gen / obj / <nl> CXX : = $ ( CC_PREFIX ) gcc <nl> CXXFLAGS : = - - std = c + + 11 - O3 - DNDEBUG <nl> CC : = $ ( CC_PREFIX ) gcc <nl> - CFLAGS : = - O3 - DNDEBUG <nl> + CCFLAGS : = - O3 - DNDEBUG <nl> LDOPTS : = <nl> LDOPTS + = - L / usr / local / lib <nl> ARFLAGS : = - r <nl> mmm a / tensorflow / contrib / lite / builtin_op_data . h <nl> ppp b / tensorflow / contrib / lite / builtin_op_data . h <nl> typedef struct { <nl> TfLiteType output_type ; <nl> } TfLiteArgMaxParams ; <nl> <nl> + typedef struct { <nl> + TfLitePadding padding ; <nl> + int stride_width ; <nl> + int stride_height ; <nl> + } TfLiteTransposeConvParams ; <nl> + <nl> # ifdef __cplusplus <nl> } / / extern " C " <nl> # endif / / __cplusplus <nl> mmm a / tensorflow / contrib / lite / builtin_ops . h <nl> ppp b / tensorflow / contrib / lite / builtin_ops . h <nl> typedef enum { <nl> kTfLiteBuiltinSelect = 64 , <nl> kTfLiteBuiltinSlice = 65 , <nl> kTfLiteBuiltinSin = 66 , <nl> + kTfLiteBuiltinTransposeConv = 67 , <nl> } TfLiteBuiltinOperator ; <nl> <nl> # ifdef __cplusplus <nl> mmm a / tensorflow / contrib / lite / g3doc / tf_ops_compatibility . md <nl> ppp b / tensorflow / contrib / lite / g3doc / tf_ops_compatibility . md <nl> Options { <nl> } <nl> ` ` ` <nl> <nl> + * * CONV_2D_TRANSPOSE * * <nl> + <nl> + ` ` ` <nl> + Inputs { <nl> + 0 : output_shape <nl> + 1 : filter <nl> + 2 : 4D tensor <nl> + } <nl> + Outputs { <nl> + 0 : the transpose ( gradient ) of conv2d <nl> + } <nl> + Options { <nl> + padding : SAME | VALID <nl> + stride_w , stride_h : stride of the filter window <nl> + } <nl> + ` ` ` <nl> + <nl> * * DEPTHWISE_CONV_2D * * <nl> <nl> ` ` ` <nl> mmm a / tensorflow / contrib / lite / kernels / BUILD <nl> ppp b / tensorflow / contrib / lite / kernels / BUILD <nl> cc_library ( <nl> " svdf . cc " , <nl> " topk_v2 . cc " , <nl> " transpose . cc " , <nl> + " transpose_conv . cc " , <nl> " unidirectional_sequence_lstm . cc " , <nl> " unidirectional_sequence_rnn . cc " , <nl> ] , <nl> tf_cc_test ( <nl> ] , <nl> ) <nl> <nl> + tf_cc_test ( <nl> + name = " transpose_conv_test " , <nl> + size = " small " , <nl> + srcs = [ " transpose_conv_test . cc " ] , <nl> + tags = [ " tflite_not_portable_ios " ] , <nl> + deps = [ <nl> + " : builtin_ops " , <nl> + " / / tensorflow / contrib / lite : framework " , <nl> + " / / tensorflow / contrib / lite / kernels : test_util " , <nl> + " @ com_google_googletest / / : gtest " , <nl> + ] , <nl> + ) <nl> + <nl> filegroup ( <nl> name = " all_files " , <nl> srcs = glob ( <nl> mmm a / tensorflow / contrib / lite / kernels / bidirectional_sequence_lstm . cc <nl> ppp b / tensorflow / contrib / lite / kernels / bidirectional_sequence_lstm . cc <nl> TfLiteStatus CheckLstmTensorDimensions ( <nl> TF_LITE_ENSURE ( context , params - > cell_clip > = 0 ) ; <nl> TF_LITE_ENSURE ( context , params - > proj_clip > = 0 ) ; <nl> <nl> - TfLiteTensor * input_to_input_weights = <nl> + const TfLiteTensor * input_to_input_weights = <nl> GetOptionalInputTensor ( context , node , input_to_input_weights_tensor ) ; <nl> if ( input_to_input_weights ) { <nl> TF_LITE_ENSURE_EQ ( context , input_to_input_weights - > dims - > size , 2 ) ; <nl> TfLiteStatus CheckLstmTensorDimensions ( <nl> TF_LITE_ENSURE_EQ ( context , input_to_cell_weights - > dims - > data [ 0 ] , n_cell ) ; <nl> TF_LITE_ENSURE_EQ ( context , input_to_cell_weights - > dims - > data [ 1 ] , n_input ) ; <nl> <nl> - TfLiteTensor * recurrent_to_input_weights = <nl> + const TfLiteTensor * recurrent_to_input_weights = <nl> GetOptionalInputTensor ( context , node , recurrent_to_input_weights_tensor ) ; <nl> if ( recurrent_to_input_weights ) { <nl> TF_LITE_ENSURE_EQ ( context , recurrent_to_input_weights - > dims - > size , 2 ) ; <nl> TfLiteStatus CheckLstmTensorDimensions ( <nl> ( recurrent_to_input_weights = = nullptr ) ) ; <nl> TF_LITE_ENSURE ( context , cifg_weights_all_or_none = = true ) ; <nl> <nl> - TfLiteTensor * cell_to_input_weights = <nl> + const TfLiteTensor * cell_to_input_weights = <nl> GetOptionalInputTensor ( context , node , cell_to_input_weights_tensor ) ; <nl> if ( cell_to_input_weights ) { <nl> TF_LITE_ENSURE_EQ ( context , cell_to_input_weights - > dims - > size , 1 ) ; <nl> TF_LITE_ENSURE_EQ ( context , cell_to_input_weights - > dims - > data [ 0 ] , n_cell ) ; <nl> } <nl> <nl> - TfLiteTensor * cell_to_forget_weights = <nl> + const TfLiteTensor * cell_to_forget_weights = <nl> GetOptionalInputTensor ( context , node , cell_to_forget_weights_tensor ) ; <nl> if ( cell_to_forget_weights ) { <nl> TF_LITE_ENSURE_EQ ( context , cell_to_forget_weights - > dims - > size , 1 ) ; <nl> TF_LITE_ENSURE_EQ ( context , cell_to_forget_weights - > dims - > data [ 0 ] , n_cell ) ; <nl> } <nl> <nl> - TfLiteTensor * cell_to_output_weights = <nl> + const TfLiteTensor * cell_to_output_weights = <nl> GetOptionalInputTensor ( context , node , cell_to_output_weights_tensor ) ; <nl> if ( cell_to_output_weights ) { <nl> TF_LITE_ENSURE_EQ ( context , cell_to_output_weights - > dims - > size , 1 ) ; <nl> TfLiteStatus CheckLstmTensorDimensions ( <nl> TF_LITE_ENSURE ( context , peephole_weights_all_or_none = = true ) ; <nl> <nl> / / Make sure the input gate bias is present only when not a CIFG - LSTM . <nl> - TfLiteTensor * input_gate_bias = <nl> + const TfLiteTensor * input_gate_bias = <nl> GetOptionalInputTensor ( context , node , input_gate_bias_tensor ) ; <nl> if ( use_cifg ) { <nl> TF_LITE_ENSURE_EQ ( context , input_gate_bias , nullptr ) ; <nl> TfLiteStatus CheckLstmTensorDimensions ( <nl> TF_LITE_ENSURE_EQ ( context , output_gate_bias - > dims - > size , 1 ) ; <nl> TF_LITE_ENSURE_EQ ( context , output_gate_bias - > dims - > data [ 0 ] , n_cell ) ; <nl> <nl> - TfLiteTensor * projection_weights = <nl> + const TfLiteTensor * projection_weights = <nl> GetOptionalInputTensor ( context , node , projection_weights_tensor ) ; <nl> if ( projection_weights ) { <nl> TF_LITE_ENSURE_EQ ( context , projection_weights - > dims - > size , 2 ) ; <nl> TfLiteStatus CheckLstmTensorDimensions ( <nl> TF_LITE_ENSURE_EQ ( context , projection_weights - > dims - > data [ 1 ] , n_cell ) ; <nl> } <nl> <nl> - TfLiteTensor * projection_bias = <nl> + const TfLiteTensor * projection_bias = <nl> GetOptionalInputTensor ( context , node , projection_bias_tensor ) ; <nl> if ( projection_bias ) { <nl> TF_LITE_ENSURE_EQ ( context , projection_bias - > dims - > size , 1 ) ; <nl> TfLiteStatus Prepare ( TfLiteContext * context , TfLiteNode * node ) { <nl> fw_output_state - > allocation_type = kTfLiteArenaRwPersistent ; <nl> fw_cell_state - > allocation_type = kTfLiteArenaRwPersistent ; <nl> <nl> - TfLiteTensor * fw_input_to_input_weights = <nl> + const TfLiteTensor * fw_input_to_input_weights = <nl> GetOptionalInputTensor ( context , node , kFwInputToInputWeightsTensor ) ; <nl> const bool fw_use_cifg = ( fw_input_to_input_weights = = nullptr ) ; <nl> TfLiteIntArray * fw_scratch_buffer_size = TfLiteIntArrayCreate ( 2 ) ; <nl> TfLiteStatus Prepare ( TfLiteContext * context , TfLiteNode * node ) { <nl> bw_output_state - > allocation_type = kTfLiteArenaRwPersistent ; <nl> bw_cell_state - > allocation_type = kTfLiteArenaRwPersistent ; <nl> <nl> - TfLiteTensor * bw_input_to_input_weights = <nl> + const TfLiteTensor * bw_input_to_input_weights = <nl> GetOptionalInputTensor ( context , node , kBwInputToInputWeightsTensor ) ; <nl> const bool bw_use_cifg = ( bw_input_to_input_weights = = nullptr ) ; <nl> TfLiteIntArray * bw_scratch_buffer_size = TfLiteIntArrayCreate ( 2 ) ; <nl> TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> const int n_input = input - > dims - > data [ 2 ] ; <nl> <nl> / / Tensors for the forward cell . <nl> - TfLiteTensor * fw_input_to_input_weights = <nl> + const TfLiteTensor * fw_input_to_input_weights = <nl> GetOptionalInputTensor ( context , node , kFwInputToInputWeightsTensor ) ; <nl> const TfLiteTensor * fw_input_to_forget_weights = <nl> GetInput ( context , node , kFwInputToForgetWeightsTensor ) ; <nl> TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> const TfLiteTensor * fw_input_to_output_weights = <nl> GetInput ( context , node , kFwInputToOutputWeightsTensor ) ; <nl> <nl> - TfLiteTensor * fw_recurrent_to_input_weights = <nl> + const TfLiteTensor * fw_recurrent_to_input_weights = <nl> GetOptionalInputTensor ( context , node , kFwRecurrentToInputWeightsTensor ) ; <nl> const TfLiteTensor * fw_recurrent_to_forget_weights = <nl> GetInput ( context , node , kFwRecurrentToForgetWeightsTensor ) ; <nl> TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> const TfLiteTensor * fw_recurrent_to_output_weights = <nl> GetInput ( context , node , kFwRecurrentToOutputWeightsTensor ) ; <nl> <nl> - TfLiteTensor * fw_cell_to_input_weights = <nl> + const TfLiteTensor * fw_cell_to_input_weights = <nl> GetOptionalInputTensor ( context , node , kFwCellToInputWeightsTensor ) ; <nl> - TfLiteTensor * fw_cell_to_forget_weights = <nl> + const TfLiteTensor * fw_cell_to_forget_weights = <nl> GetOptionalInputTensor ( context , node , kFwCellToForgetWeightsTensor ) ; <nl> - TfLiteTensor * fw_cell_to_output_weights = <nl> + const TfLiteTensor * fw_cell_to_output_weights = <nl> GetOptionalInputTensor ( context , node , kFwCellToOutputWeightsTensor ) ; <nl> <nl> - TfLiteTensor * fw_input_gate_bias = <nl> + const TfLiteTensor * fw_input_gate_bias = <nl> GetOptionalInputTensor ( context , node , kFwInputGateBiasTensor ) ; <nl> const TfLiteTensor * fw_forget_gate_bias = <nl> GetInput ( context , node , kFwForgetGateBiasTensor ) ; <nl> TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> const TfLiteTensor * fw_output_gate_bias = <nl> GetInput ( context , node , kFwOutputGateBiasTensor ) ; <nl> <nl> - TfLiteTensor * fw_projection_weights = <nl> + const TfLiteTensor * fw_projection_weights = <nl> GetOptionalInputTensor ( context , node , kFwProjectionWeightsTensor ) ; <nl> - TfLiteTensor * fw_projection_bias = <nl> + const TfLiteTensor * fw_projection_bias = <nl> GetOptionalInputTensor ( context , node , kFwProjectionBiasTensor ) ; <nl> <nl> TfLiteTensor * fw_output_state = <nl> TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> TfLiteTensor * fw_output = GetOutput ( context , node , kFwOutputTensor ) ; <nl> <nl> / / Tensors for the backward cell . <nl> - TfLiteTensor * bw_input_to_input_weights = <nl> + const TfLiteTensor * bw_input_to_input_weights = <nl> GetOptionalInputTensor ( context , node , kBwInputToInputWeightsTensor ) ; <nl> const TfLiteTensor * bw_input_to_forget_weights = <nl> GetInput ( context , node , kBwInputToForgetWeightsTensor ) ; <nl> TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> const TfLiteTensor * bw_input_to_output_weights = <nl> GetInput ( context , node , kBwInputToOutputWeightsTensor ) ; <nl> <nl> - TfLiteTensor * bw_recurrent_to_input_weights = <nl> + const TfLiteTensor * bw_recurrent_to_input_weights = <nl> GetOptionalInputTensor ( context , node , kBwRecurrentToInputWeightsTensor ) ; <nl> const TfLiteTensor * bw_recurrent_to_forget_weights = <nl> GetInput ( context , node , kBwRecurrentToForgetWeightsTensor ) ; <nl> TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> const TfLiteTensor * bw_recurrent_to_output_weights = <nl> GetInput ( context , node , kBwRecurrentToOutputWeightsTensor ) ; <nl> <nl> - TfLiteTensor * bw_cell_to_input_weights = <nl> + const TfLiteTensor * bw_cell_to_input_weights = <nl> GetOptionalInputTensor ( context , node , kBwCellToInputWeightsTensor ) ; <nl> - TfLiteTensor * bw_cell_to_forget_weights = <nl> + const TfLiteTensor * bw_cell_to_forget_weights = <nl> GetOptionalInputTensor ( context , node , kBwCellToForgetWeightsTensor ) ; <nl> - TfLiteTensor * bw_cell_to_output_weights = <nl> + const TfLiteTensor * bw_cell_to_output_weights = <nl> GetOptionalInputTensor ( context , node , kBwCellToOutputWeightsTensor ) ; <nl> <nl> - TfLiteTensor * bw_input_gate_bias = <nl> + const TfLiteTensor * bw_input_gate_bias = <nl> GetOptionalInputTensor ( context , node , kBwInputGateBiasTensor ) ; <nl> const TfLiteTensor * bw_forget_gate_bias = <nl> GetInput ( context , node , kBwForgetGateBiasTensor ) ; <nl> TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> const TfLiteTensor * bw_output_gate_bias = <nl> GetInput ( context , node , kBwOutputGateBiasTensor ) ; <nl> <nl> - TfLiteTensor * bw_projection_weights = <nl> + const TfLiteTensor * bw_projection_weights = <nl> GetOptionalInputTensor ( context , node , kBwProjectionWeightsTensor ) ; <nl> - TfLiteTensor * bw_projection_bias = <nl> + const TfLiteTensor * bw_projection_bias = <nl> GetOptionalInputTensor ( context , node , kBwProjectionBiasTensor ) ; <nl> <nl> TfLiteTensor * bw_output_state = <nl> mmm a / tensorflow / contrib / lite / kernels / fully_connected . cc <nl> ppp b / tensorflow / contrib / lite / kernels / fully_connected . cc <nl> TfLiteStatus Prepare ( TfLiteContext * context , TfLiteNode * node ) { <nl> <nl> const TfLiteTensor * input = GetInput ( context , node , kInputTensor ) ; <nl> const TfLiteTensor * filter = GetInput ( context , node , kWeightsTensor ) ; <nl> - TfLiteTensor * bias = GetOptionalInputTensor ( context , node , kBiasTensor ) ; <nl> + const TfLiteTensor * bias = GetOptionalInputTensor ( context , node , kBiasTensor ) ; <nl> TfLiteTensor * output = GetOutput ( context , node , kOutputTensor ) ; <nl> <nl> / / Check all the parameters of tensor match within themselves and match the <nl> TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> <nl> const TfLiteTensor * input = GetInput ( context , node , kInputTensor ) ; <nl> const TfLiteTensor * filter = GetInput ( context , node , kWeightsTensor ) ; <nl> - TfLiteTensor * bias = GetOptionalInputTensor ( context , node , kBiasTensor ) ; <nl> + const TfLiteTensor * bias = GetOptionalInputTensor ( context , node , kBiasTensor ) ; <nl> TfLiteTensor * output = GetOutput ( context , node , kOutputTensor ) ; <nl> <nl> switch ( filter - > type ) { / / Already know in / out types are same . <nl> mmm a / tensorflow / contrib / lite / kernels / internal / optimized / optimized_ops . h <nl> ppp b / tensorflow / contrib / lite / kernels / internal / optimized / optimized_ops . h <nl> using VectorMap = typename std : : conditional < <nl> <nl> template < typename Scalar , int N > <nl> VectorMap < Scalar > MapAsVector ( Scalar * data , const Dims < N > & dims ) { <nl> - const int size = RequiredBufferSizeForDims ( dims ) ; <nl> + const int size = FlatSize ( dims ) ; <nl> return VectorMap < Scalar > ( data , size , 1 ) ; <nl> } <nl> <nl> inline void AddBiasAndEvalActivationFunction ( const float * bias_data , <nl> float output_activation_max ) { <nl> # ifdef USE_NEON <nl> gemmlowp : : ScopedProfilingLabel label ( " AddBiasAndEvalActivationFunction " ) ; <nl> - const int bias_size = bias_dims . sizes [ 3 ] * bias_dims . strides [ 3 ] ; <nl> - const int array_size = array_dims . sizes [ 3 ] * array_dims . strides [ 3 ] ; <nl> + const int bias_size = FlatSize ( bias_dims ) ; <nl> + const int array_size = FlatSize ( array_dims ) ; <nl> TFLITE_DCHECK_EQ ( ( array_size % bias_size ) , 0 ) ; <nl> float * array_ptr = array_data ; <nl> float * array_end_ptr = array_ptr + array_size ; <nl> inline void AddBiasAndEvalActivationFunction ( const float * bias_data , <nl> } <nl> # else / / not NEON <nl> gemmlowp : : ScopedProfilingLabel label ( " AddBiasAndEvalActivationFunction " ) ; <nl> - const int bias_size = bias_dims . sizes [ 3 ] * bias_dims . strides [ 3 ] ; <nl> - const int array_size = array_dims . sizes [ 3 ] * array_dims . strides [ 3 ] ; <nl> + const int bias_size = FlatSize ( bias_dims ) ; <nl> + const int array_size = FlatSize ( array_dims ) ; <nl> TFLITE_DCHECK_EQ ( ( array_size % bias_size ) , 0 ) ; <nl> for ( int array_offset = 0 ; array_offset < array_size ; <nl> array_offset + = bias_size ) { <nl> inline void GEMVForLstmCell ( const uint8 * input_data , const Dims < 4 > & input_dims , <nl> TFLITE_DCHECK ( IsPackedWithoutStrides ( weights_dims ) ) ; <nl> TFLITE_DCHECK ( IsPackedWithoutStrides ( bias_dims ) ) ; <nl> TFLITE_DCHECK ( IsPackedWithoutStrides ( output_dims ) ) ; <nl> - TFLITE_DCHECK_EQ ( ArraySize ( output_dims , 1 ) * ArraySize ( output_dims , 2 ) * <nl> - ArraySize ( output_dims , 3 ) , <nl> - 1 ) ; <nl> - const int input_size = input_dims . strides [ 3 ] ; <nl> + TFLITE_DCHECK_EQ ( FlatSizeSkipDim ( output_dims , 0 ) , 1 ) ; <nl> + const int input_size = FlatSizeSkipDim ( input_dims , 3 ) ; <nl> const int output_size = MatchingArraySize ( weights_dims , 1 , output_dims , 0 ) ; <nl> / / This special fast path for quantized LSTM cells does not try to support <nl> / / odd sizes that we haven ' t encountered in any LSTM cell , that would <nl> inline void GEMVForLstmCellWithSymmetricRange ( <nl> TFLITE_DCHECK ( IsPackedWithoutStrides ( weights_dims ) ) ; <nl> TFLITE_DCHECK ( IsPackedWithoutStrides ( bias_dims ) ) ; <nl> TFLITE_DCHECK ( IsPackedWithoutStrides ( output_dims ) ) ; <nl> - TFLITE_DCHECK_EQ ( ArraySize ( output_dims , 1 ) * ArraySize ( output_dims , 2 ) * <nl> - ArraySize ( output_dims , 3 ) , <nl> - 1 ) ; <nl> - const int input_size = input_dims . strides [ 3 ] ; <nl> + TFLITE_DCHECK_EQ ( FlatSizeSkipDim ( output_dims , 0 ) , 1 ) ; <nl> + const int input_size = FlatSizeSkipDim ( input_dims , 3 ) ; <nl> const int output_size = MatchingArraySize ( weights_dims , 1 , output_dims , 0 ) ; <nl> / / This special fast path for quantized LSTM cells does not try to support <nl> / / odd sizes that we haven ' t encountered in any LSTM cell , that would <nl> inline void FullyConnectedAsGEMV ( <nl> TFLITE_DCHECK ( IsPackedWithoutStrides ( filter_dims ) ) ; <nl> TFLITE_DCHECK ( IsPackedWithoutStrides ( bias_dims ) ) ; <nl> TFLITE_DCHECK ( IsPackedWithoutStrides ( output_dims ) ) ; <nl> - TFLITE_DCHECK_EQ ( ArraySize ( output_dims , 1 ) * ArraySize ( output_dims , 2 ) * <nl> - ArraySize ( output_dims , 3 ) , <nl> - 1 ) ; <nl> - const int input_size = input_dims . strides [ 3 ] ; <nl> + TFLITE_DCHECK_EQ ( FlatSizeSkipDim ( output_dims , 0 ) , 1 ) ; <nl> + const int input_size = FlatSizeSkipDim ( input_dims , 3 ) ; <nl> const int output_size = MatchingArraySize ( filter_dims , 1 , output_dims , 0 ) ; <nl> static constexpr int kPeel = 4 ; <nl> for ( int k = 0 ; k < input_size ; k + = 64 ) { <nl> inline void FullyConnected ( const uint8 * input_data , const Dims < 4 > & input_dims , <nl> / / but the current - - variable_batch hack consists in overwriting the 3rd <nl> / / dimension with the runtime batch size , as we don ' t keep track for each <nl> / / array of which dimension is the batch dimension in it . <nl> - const int batches = ArraySize ( output_dims , 1 ) * ArraySize ( output_dims , 2 ) * <nl> - ArraySize ( output_dims , 3 ) ; <nl> + const int batches = FlatSizeSkipDim ( output_dims , 0 ) ; <nl> # ifdef USE_NEON <nl> const int output_size = MatchingArraySize ( filter_dims , 1 , output_dims , 0 ) ; <nl> if ( batches = = 1 & & ! ( output_size % 4 ) ) { <nl> inline void FullyConnected ( <nl> / / but the current - - variable_batch hack consists in overwriting the 3rd <nl> / / dimension with the runtime batch size , as we don ' t keep track for each <nl> / / array of which dimension is the batch dimension in it . <nl> - const int batches = ArraySize ( output_dims , 1 ) * ArraySize ( output_dims , 2 ) * <nl> - ArraySize ( output_dims , 3 ) ; <nl> + const int batches = FlatSizeSkipDim ( output_dims , 0 ) ; <nl> const int output_depth = MatchingArraySize ( filter_dims , 1 , output_dims , 0 ) ; <nl> const int accum_depth = ArraySize ( filter_dims , 0 ) ; <nl> TFLITE_DCHECK ( IsPackedWithoutStrides ( input_dims ) ) ; <nl> inline void ExperimentalShuffledFullyConnected ( <nl> / / but the current - - variable_batch hack consists in overwriting the 3rd <nl> / / dimension with the runtime batch size , as we don ' t keep track for each <nl> / / array of which dimension is the batch dimension in it . <nl> - const int batches = ArraySize ( output_dims , 1 ) * ArraySize ( output_dims , 2 ) * <nl> - ArraySize ( output_dims , 3 ) ; <nl> + const int batches = FlatSizeSkipDim ( output_dims , 0 ) ; <nl> const int output_depth = MatchingArraySize ( weights_dims , 1 , output_dims , 0 ) ; <nl> const int accum_depth = ArraySize ( weights_dims , 0 ) ; <nl> TFLITE_DCHECK ( IsPackedWithoutStrides ( input_dims ) ) ; <nl> inline void Conv ( const uint8 * input_data , const Dims < 4 > & input_dims , <nl> } <nl> <nl> const int gemm_input_rows = gemm_input_dims - > sizes [ 0 ] ; <nl> - const int gemm_input_cols = gemm_input_dims - > sizes [ 1 ] * <nl> - gemm_input_dims - > sizes [ 2 ] * <nl> - gemm_input_dims - > sizes [ 3 ] ; <nl> + const int gemm_input_cols = FlatSizeSkipDim ( * gemm_input_dims , 0 ) ; <nl> const int filter_rows = filter_dims . sizes [ 3 ] ; <nl> - const int filter_cols = <nl> - filter_dims . sizes [ 0 ] * filter_dims . sizes [ 1 ] * filter_dims . sizes [ 2 ] ; <nl> + const int filter_cols = FlatSizeSkipDim ( filter_dims , 3 ) ; <nl> const int output_rows = output_dims . sizes [ 0 ] ; <nl> - const int output_cols = <nl> - output_dims . sizes [ 1 ] * output_dims . sizes [ 2 ] * output_dims . sizes [ 3 ] ; <nl> + const int output_cols = FlatSizeSkipDim ( output_dims , 0 ) ; <nl> TFLITE_DCHECK_EQ ( output_rows , filter_rows ) ; <nl> TFLITE_DCHECK_EQ ( output_cols , gemm_input_cols ) ; <nl> TFLITE_DCHECK_EQ ( filter_cols , gemm_input_rows ) ; <nl> void ConvAsGemm ( const uint8 * input_data , const Dims < 4 > & input_dims , <nl> Ac = = FusedActivationFunctionType : : kRelu1 , <nl> " " ) ; <nl> const int input_rows = input_dims . sizes [ 0 ] ; <nl> - const int input_cols = <nl> - input_dims . sizes [ 1 ] * input_dims . sizes [ 2 ] * input_dims . sizes [ 3 ] ; <nl> + const int input_cols = FlatSizeSkipDim ( input_dims , 0 ) ; <nl> const int filter_rows = filter_dims . sizes [ 3 ] ; <nl> - const int filter_cols = <nl> - filter_dims . sizes [ 0 ] * filter_dims . sizes [ 1 ] * filter_dims . sizes [ 2 ] ; <nl> + const int filter_cols = FlatSizeSkipDim ( filter_dims , 3 ) ; <nl> const int output_rows = output_dims . sizes [ 0 ] ; <nl> - const int output_cols = <nl> - output_dims . sizes [ 1 ] * output_dims . sizes [ 2 ] * output_dims . sizes [ 3 ] ; <nl> + const int output_cols = FlatSizeSkipDim ( output_dims , 0 ) ; <nl> TFLITE_DCHECK_EQ ( output_rows , filter_rows ) ; <nl> TFLITE_DCHECK_EQ ( output_cols , input_cols ) ; <nl> TFLITE_DCHECK_EQ ( filter_cols , input_rows ) ; <nl> void NonGlobalBatchNormalization ( <nl> const Dims < 4 > & output_dims ) { <nl> gemmlowp : : ScopedProfilingLabel label ( " NonGlobalBatchNormalization " ) ; <nl> const int batches = MatchingArraySize ( input_dims , 3 , output_dims , 3 ) ; <nl> - const int height = <nl> - MatchingArraySize ( input_dims , 2 , mean_dims , 2 , multiplier_dims , 2 , <nl> - offset_dims , 2 , output_dims , 2 ) ; <nl> - const int width = <nl> - MatchingArraySize ( input_dims , 1 , mean_dims , 1 , multiplier_dims , 1 , <nl> - offset_dims , 1 , output_dims , 1 ) ; <nl> - const int depth = <nl> - MatchingArraySize ( input_dims , 0 , mean_dims , 0 , multiplier_dims , 0 , <nl> - offset_dims , 0 , output_dims , 0 ) ; <nl> + const int inner_size = MatchingFlatSizeSkipDim ( <nl> + input_dims , 3 , mean_dims , multiplier_dims , offset_dims , output_dims ) ; <nl> <nl> for ( int b = 0 ; b < batches ; + + b ) { <nl> - for ( int y = 0 ; y < height ; + + y ) { <nl> - for ( int x = 0 ; x < width ; + + x ) { <nl> - for ( int c = 0 ; c < depth ; + + c ) { <nl> - output_data [ Offset ( output_dims , c , x , y , b ) ] = ActivationFunction < Ac > ( <nl> - ( input_data [ Offset ( input_dims , c , x , y , b ) ] - <nl> - mean_data [ Offset ( mean_dims , c , x , y , 0 ) ] ) * <nl> - multiplier_data [ Offset ( multiplier_dims , c , x , y , 0 ) ] + <nl> - offset_data [ Offset ( offset_dims , c , x , y , 0 ) ] ) ; <nl> - } <nl> - } <nl> + for ( int i = 0 ; i < inner_size ; + + i ) { <nl> + * output_data = ActivationFunction < Ac > ( <nl> + ( * input_data - mean_data [ i ] ) * multiplier_data [ i ] + offset_data [ i ] ) ; <nl> + + + output_data ; <nl> + + + input_data ; <nl> } <nl> } <nl> } <nl> void GlobalBatchNormalization ( const float * input_data , <nl> const Dims < 4 > & offset_dims , float * output_data , <nl> const Dims < 4 > & output_dims ) { <nl> gemmlowp : : ScopedProfilingLabel label ( " GlobalBatchNormalization " ) ; <nl> - const int batches = MatchingArraySize ( input_dims , 3 , output_dims , 3 ) ; <nl> - const int height = MatchingArraySize ( input_dims , 2 , output_dims , 2 ) ; <nl> - const int width = MatchingArraySize ( input_dims , 1 , output_dims , 1 ) ; <nl> + const int outer_size = MatchingFlatSizeSkipDim ( input_dims , 0 , output_dims ) ; <nl> const int depth = <nl> MatchingArraySize ( input_dims , 0 , mean_dims , 0 , multiplier_dims , 0 , <nl> offset_dims , 0 , output_dims , 0 ) ; <nl> <nl> - for ( int b = 0 ; b < batches ; + + b ) { <nl> - for ( int y = 0 ; y < height ; + + y ) { <nl> - for ( int x = 0 ; x < width ; + + x ) { <nl> - for ( int c = 0 ; c < depth ; + + c ) { <nl> - output_data [ Offset ( output_dims , c , x , y , b ) ] = ActivationFunction < Ac > ( <nl> - ( input_data [ Offset ( input_dims , c , x , y , b ) ] - <nl> - mean_data [ Offset ( mean_dims , c , 0 , 0 , 0 ) ] ) * <nl> - multiplier_data [ Offset ( multiplier_dims , c , 0 , 0 , 0 ) ] + <nl> - offset_data [ Offset ( offset_dims , c , 0 , 0 , 0 ) ] ) ; <nl> - } <nl> - } <nl> + for ( int i = 0 ; i < outer_size ; + + i ) { <nl> + for ( int c = 0 ; c < depth ; + + c ) { <nl> + * output_data = ActivationFunction < Ac > ( <nl> + ( * input_data - mean_data [ c ] ) * multiplier_data [ c ] + offset_data [ c ] ) ; <nl> + + + output_data ; <nl> + + + input_data ; <nl> } <nl> } <nl> } <nl> inline void Relu ( const float * input_data , const Dims < 4 > & input_dims , <nl> inline void Relu1 ( const float * input_data , const Dims < 4 > & input_dims , <nl> float * output_data , const Dims < 4 > & output_dims ) { <nl> gemmlowp : : ScopedProfilingLabel label ( " Relu1 ( not fused ) " ) ; <nl> - const int batches = MatchingArraySize ( input_dims , 3 , output_dims , 3 ) ; <nl> - const int height = MatchingArraySize ( input_dims , 2 , output_dims , 2 ) ; <nl> - const int width = MatchingArraySize ( input_dims , 1 , output_dims , 1 ) ; <nl> - const int depth = MatchingArraySize ( input_dims , 0 , output_dims , 0 ) ; <nl> - for ( int b = 0 ; b < batches ; + + b ) { <nl> - for ( int y = 0 ; y < height ; + + y ) { <nl> - for ( int x = 0 ; x < width ; + + x ) { <nl> - for ( int c = 0 ; c < depth ; + + c ) { <nl> - float val = input_data [ Offset ( input_dims , c , x , y , b ) ] ; <nl> - const float upper = 1 ; <nl> - const float lower = - 1 ; <nl> - float clamped = val > upper ? upper : val < lower ? lower : val ; <nl> - output_data [ Offset ( output_dims , c , x , y , b ) ] = clamped ; <nl> - } <nl> - } <nl> - } <nl> + const int flat_size = MatchingFlatSize ( input_dims , output_dims ) ; <nl> + for ( int i = 0 ; i < flat_size ; + + i ) { <nl> + const float val = input_data [ i ] ; <nl> + const float upper = 1 ; <nl> + const float lower = - 1 ; <nl> + const float clamped = val > upper ? upper : val < lower ? lower : val ; <nl> + output_data [ i ] = clamped ; <nl> } <nl> } <nl> <nl> inline void Relu6 ( const float * input_data , const Dims < 4 > & input_dims , <nl> float * output_data , const Dims < 4 > & output_dims ) { <nl> gemmlowp : : ScopedProfilingLabel label ( " Relu6 ( not fused ) " ) ; <nl> - const int batches = MatchingArraySize ( input_dims , 3 , output_dims , 3 ) ; <nl> - const int height = MatchingArraySize ( input_dims , 2 , output_dims , 2 ) ; <nl> - const int width = MatchingArraySize ( input_dims , 1 , output_dims , 1 ) ; <nl> - const int depth = MatchingArraySize ( input_dims , 0 , output_dims , 0 ) ; <nl> - for ( int b = 0 ; b < batches ; + + b ) { <nl> - for ( int y = 0 ; y < height ; + + y ) { <nl> - for ( int x = 0 ; x < width ; + + x ) { <nl> - for ( int c = 0 ; c < depth ; + + c ) { <nl> - float val = input_data [ Offset ( input_dims , c , x , y , b ) ] ; <nl> - const float upper = 6 ; <nl> - const float lower = 0 ; <nl> - float clamped = val > upper ? upper : val < lower ? lower : val ; <nl> - output_data [ Offset ( output_dims , c , x , y , b ) ] = clamped ; <nl> - } <nl> - } <nl> - } <nl> + const int flat_size = MatchingFlatSize ( input_dims , output_dims ) ; <nl> + for ( int i = 0 ; i < flat_size ; + + i ) { <nl> + const float val = input_data [ i ] ; <nl> + const float upper = 6 ; <nl> + const float lower = 0 ; <nl> + const float clamped = val > upper ? upper : val < lower ? lower : val ; <nl> + output_data [ i ] = clamped ; <nl> } <nl> } <nl> <nl> void L2Normalization ( const float * input_data , const Dims < 4 > & input_dims , <nl> float * output_data , const Dims < 4 > & output_dims ) { <nl> gemmlowp : : ScopedProfilingLabel label ( " L2Normalization " ) ; <nl> static_assert ( Ac = = FusedActivationFunctionType : : kNone , " " ) ; <nl> - const int batches = MatchingArraySize ( input_dims , 3 , output_dims , 3 ) ; <nl> - const int height = MatchingArraySize ( input_dims , 2 , output_dims , 2 ) ; <nl> - const int width = MatchingArraySize ( input_dims , 1 , output_dims , 1 ) ; <nl> + const int outer_size = MatchingFlatSizeSkipDim ( input_dims , 0 , output_dims ) ; <nl> const int depth = MatchingArraySize ( input_dims , 0 , output_dims , 0 ) ; <nl> - for ( int b = 0 ; b < batches ; + + b ) { <nl> - for ( int y = 0 ; y < height ; + + y ) { <nl> - for ( int x = 0 ; x < width ; + + x ) { <nl> - float squared_l2_norm = 0 ; <nl> - for ( int c = 0 ; c < depth ; + + c ) { <nl> - float val = input_data [ Offset ( input_dims , c , x , y , b ) ] ; <nl> - squared_l2_norm + = val * val ; <nl> - } <nl> - float inverse_l2_norm = 1 . 0f / std : : sqrt ( squared_l2_norm ) ; <nl> - for ( int c = 0 ; c < depth ; + + c ) { <nl> - output_data [ Offset ( output_dims , c , x , y , b ) ] = <nl> - input_data [ Offset ( input_dims , c , x , y , b ) ] * inverse_l2_norm ; <nl> - } <nl> - } <nl> + for ( int i = 0 ; i < outer_size ; + + i ) { <nl> + float squared_l2_norm = 0 ; <nl> + for ( int c = 0 ; c < depth ; + + c ) { <nl> + const float val = input_data [ depth * i + c ] ; <nl> + squared_l2_norm + = val * val ; <nl> + } <nl> + const float l2_norm = std : : sqrt ( squared_l2_norm ) ; <nl> + for ( int c = 0 ; c < depth ; + + c ) { <nl> + * output_data = * input_data / l2_norm ; <nl> + + + output_data ; <nl> + + + input_data ; <nl> } <nl> } <nl> } <nl> inline void L2Normalization ( const uint8 * input_data , const Dims < 4 > & input_dims , <nl> int32 input_zero_point , uint8 * output_data , <nl> const Dims < 4 > & output_dims ) { <nl> gemmlowp : : ScopedProfilingLabel label ( " L2Normalization / 8bit " ) ; <nl> - const int batches = MatchingArraySize ( input_dims , 3 , output_dims , 3 ) ; <nl> - const int height = MatchingArraySize ( input_dims , 2 , output_dims , 2 ) ; <nl> - const int width = MatchingArraySize ( input_dims , 1 , output_dims , 1 ) ; <nl> - const int depth = MatchingArraySize ( input_dims , 0 , output_dims , 0 ) ; <nl> TFLITE_DCHECK ( IsPackedWithoutStrides ( input_dims ) ) ; <nl> TFLITE_DCHECK ( IsPackedWithoutStrides ( output_dims ) ) ; <nl> - TFLITE_DCHECK_EQ ( batches , 1 ) ; <nl> - TFLITE_DCHECK_EQ ( height , 1 ) ; <nl> - TFLITE_DCHECK_EQ ( width , 1 ) ; <nl> + const int depth = MatchingArraySize ( input_dims , 0 , output_dims , 0 ) ; <nl> + const int outer_size = MatchingFlatSizeSkipDim ( input_dims , 0 , output_dims ) ; <nl> + TFLITE_DCHECK_EQ ( outer_size , 1 ) ; <nl> int32 square_l2_norm = 0 ; <nl> for ( int i = 0 ; i < depth ; i + + ) { <nl> int32 diff = input_data [ i ] - input_zero_point ; <nl> inline void Add ( const float * input1_data , const Dims < 4 > & input1_dims , <nl> float output_activation_min , float output_activation_max , <nl> float * output_data , const Dims < 4 > & output_dims ) { <nl> gemmlowp : : ScopedProfilingLabel label ( " Add " ) ; <nl> - / * const int batches = * / MatchingArraySize ( input1_dims , 3 , input2_dims , 3 , <nl> - output_dims , 3 ) ; <nl> - / * const int height = * / MatchingArraySize ( input1_dims , 2 , input2_dims , 2 , <nl> - output_dims , 2 ) ; <nl> - / * const int width = * / MatchingArraySize ( input1_dims , 1 , input2_dims , 1 , <nl> - output_dims , 1 ) ; <nl> - / * const int depth = * / MatchingArraySize ( input1_dims , 0 , input2_dims , 0 , <nl> - output_dims , 0 ) ; <nl> TFLITE_DCHECK ( IsPackedWithoutStrides ( input1_dims ) ) ; <nl> TFLITE_DCHECK ( IsPackedWithoutStrides ( input2_dims ) ) ; <nl> TFLITE_DCHECK ( IsPackedWithoutStrides ( output_dims ) ) ; <nl> <nl> int i = 0 ; <nl> - const int size = input1_dims . sizes [ 3 ] * input1_dims . strides [ 3 ] ; <nl> + const int size = MatchingFlatSize ( input1_dims , input2_dims , output_dims ) ; <nl> # ifdef USE_NEON <nl> const auto activation_min = vdupq_n_f32 ( output_activation_min ) ; <nl> const auto activation_max = vdupq_n_f32 ( output_activation_max ) ; <nl> inline void Add ( const int16 * input1_data , const Dims < 4 > & input1_dims , <nl> TFLITE_DCHECK_EQ ( output_activation_max , 32767 ) ; <nl> } <nl> <nl> - const int flat_size = RequiredBufferSizeForDims ( output_dims ) ; <nl> - TFLITE_DCHECK_EQ ( RequiredBufferSizeForDims ( input1_dims ) , flat_size ) ; <nl> - TFLITE_DCHECK_EQ ( RequiredBufferSizeForDims ( input2_dims ) , flat_size ) ; <nl> + const int flat_size = MatchingFlatSize ( output_dims , input1_dims , input2_dims ) ; <nl> <nl> TFLITE_DCHECK ( input1_shift = = 0 | | input2_shift = = 0 ) ; <nl> TFLITE_DCHECK_GE ( input1_shift , 0 ) ; <nl> void Add ( const int32 * input1_data , const Dims < 4 > & input1_dims , <nl> auto output_map = MapAsVector ( output_data , output_dims ) ; <nl> if ( AreSameDims ( input1_dims , input2_dims ) ) { <nl> output_map . array ( ) = input1_map . array ( ) + input2_map . array ( ) ; <nl> - } else if ( RequiredBufferSizeForDims ( input2_dims ) = = 1 ) { <nl> + } else if ( FlatSize ( input2_dims ) = = 1 ) { <nl> auto scalar = input2_data [ 0 ] ; <nl> output_map . array ( ) = input1_map . array ( ) + scalar ; <nl> - } else if ( RequiredBufferSizeForDims ( input1_dims ) = = 1 ) { <nl> + } else if ( FlatSize ( input1_dims ) = = 1 ) { <nl> auto scalar = input1_data [ 0 ] ; <nl> output_map . array ( ) = scalar + input2_map . array ( ) ; <nl> } else { <nl> inline void Mul ( const float * input1_data , const Dims < 4 > & input1_dims , <nl> float output_activation_min , float output_activation_max , <nl> float * output_data , const Dims < 4 > & output_dims ) { <nl> gemmlowp : : ScopedProfilingLabel label ( " Mul " ) ; <nl> - / * const int batches = * / MatchingArraySize ( input1_dims , 3 , input2_dims , 3 , <nl> - output_dims , 3 ) ; <nl> - / * const int height = * / MatchingArraySize ( input1_dims , 2 , input2_dims , 2 , <nl> - output_dims , 2 ) ; <nl> - / * const int width = * / MatchingArraySize ( input1_dims , 1 , input2_dims , 1 , <nl> - output_dims , 1 ) ; <nl> - / * const int depth = * / MatchingArraySize ( input1_dims , 0 , input2_dims , 0 , <nl> - output_dims , 0 ) ; <nl> TFLITE_DCHECK ( IsPackedWithoutStrides ( input1_dims ) ) ; <nl> TFLITE_DCHECK ( IsPackedWithoutStrides ( input2_dims ) ) ; <nl> TFLITE_DCHECK ( IsPackedWithoutStrides ( output_dims ) ) ; <nl> <nl> int i = 0 ; <nl> - const int size = input1_dims . sizes [ 3 ] * input1_dims . strides [ 3 ] ; <nl> + const int size = MatchingFlatSize ( input1_dims , input2_dims , output_dims ) ; <nl> # ifdef USE_NEON <nl> const auto activation_min = vdupq_n_f32 ( output_activation_min ) ; <nl> const auto activation_max = vdupq_n_f32 ( output_activation_max ) ; <nl> void Mul ( const int32 * input1_data , const Dims < 4 > & input1_dims , <nl> auto output_map = MapAsVector ( output_data , output_dims ) ; <nl> if ( AreSameDims ( input1_dims , input2_dims ) ) { <nl> output_map . array ( ) = input1_map . array ( ) * input2_map . array ( ) ; <nl> - } else if ( RequiredBufferSizeForDims ( input2_dims ) = = 1 ) { <nl> + } else if ( FlatSize ( input2_dims ) = = 1 ) { <nl> auto scalar = input2_data [ 0 ] ; <nl> output_map . array ( ) = input1_map . array ( ) * scalar ; <nl> - } else if ( RequiredBufferSizeForDims ( input1_dims ) = = 1 ) { <nl> + } else if ( FlatSize ( input1_dims ) = = 1 ) { <nl> auto scalar = input1_data [ 0 ] ; <nl> output_map . array ( ) = scalar * input2_map . array ( ) ; <nl> } else { <nl> inline void Mul ( const int16 * input1_data , const Dims < 4 > & input1_dims , <nl> / / This is a copy of the reference implementation . We do not currently have a <nl> / / properly optimized version . <nl> <nl> - const int flat_size = RequiredBufferSizeForDims ( output_dims ) ; <nl> - TFLITE_DCHECK_EQ ( RequiredBufferSizeForDims ( input1_dims ) , flat_size ) ; <nl> - TFLITE_DCHECK_EQ ( RequiredBufferSizeForDims ( input2_dims ) , flat_size ) ; <nl> + const int flat_size = MatchingFlatSize ( output_dims , input1_dims , input2_dims ) ; <nl> <nl> for ( int i = 0 ; i < flat_size ; i + + ) { <nl> / / F0 uses 0 integer bits , range [ - 1 , 1 ] . <nl> inline void Mul ( const int16 * input1_data , const Dims < 4 > & input1_dims , <nl> / / properly optimized version . <nl> TFLITE_DCHECK_LE ( output_activation_min , output_activation_max ) ; <nl> <nl> - const int flat_size = RequiredBufferSizeForDims ( output_dims ) ; <nl> - TFLITE_DCHECK_EQ ( RequiredBufferSizeForDims ( input1_dims ) , flat_size ) ; <nl> - TFLITE_DCHECK_EQ ( RequiredBufferSizeForDims ( input2_dims ) , flat_size ) ; <nl> + const int flat_size = MatchingFlatSize ( output_dims , input1_dims , input2_dims ) ; <nl> <nl> for ( int i = 0 ; i < flat_size ; i + + ) { <nl> / / F0 uses 0 integer bits , range [ - 1 , 1 ] . <nl> inline void Div ( const float * input1_data , const Dims < 4 > & input1_dims , <nl> const float * input2_data , const Dims < 4 > & input2_dims , <nl> float output_activation_min , float output_activation_max , <nl> float * output_data , const Dims < 4 > & output_dims ) { <nl> - const int batches = <nl> - MatchingArraySize ( input1_dims , 3 , input2_dims , 3 , output_dims , 3 ) ; <nl> - const int height = <nl> - MatchingArraySize ( input1_dims , 2 , input2_dims , 2 , output_dims , 2 ) ; <nl> - const int width = <nl> - MatchingArraySize ( input1_dims , 1 , input2_dims , 1 , output_dims , 1 ) ; <nl> - const int depth = <nl> - MatchingArraySize ( input1_dims , 0 , input2_dims , 0 , output_dims , 0 ) ; <nl> - for ( int b = 0 ; b < batches ; + + b ) { <nl> - for ( int y = 0 ; y < height ; + + y ) { <nl> - for ( int x = 0 ; x < width ; + + x ) { <nl> - for ( int c = 0 ; c < depth ; + + c ) { <nl> - output_data [ Offset ( output_dims , c , x , y , b ) ] = <nl> - ActivationFunctionWithMinMax ( <nl> - input1_data [ Offset ( input1_dims , c , x , y , b ) ] / <nl> - input2_data [ Offset ( input2_dims , c , x , y , b ) ] , <nl> - output_activation_min , output_activation_max ) ; <nl> - } <nl> - } <nl> - } <nl> + const int flat_size = MatchingFlatSize ( output_dims , input1_dims , input2_dims ) ; <nl> + for ( int i = 0 ; i < flat_size ; i + + ) { <nl> + output_data [ i ] = ActivationFunctionWithMinMax ( <nl> + input1_data [ i ] / input2_data [ i ] , output_activation_min , <nl> + output_activation_max ) ; <nl> } <nl> } <nl> <nl> inline void Sub ( const float * input1_data , const Dims < 4 > & input1_dims , <nl> const float * input2_data , const Dims < 4 > & input2_dims , <nl> float output_activation_min , float output_activation_max , <nl> float * output_data , const Dims < 4 > & output_dims ) { <nl> - const int batches = <nl> - MatchingArraySize ( input1_dims , 3 , input2_dims , 3 , output_dims , 3 ) ; <nl> - const int height = <nl> - MatchingArraySize ( input1_dims , 2 , input2_dims , 2 , output_dims , 2 ) ; <nl> - const int width = <nl> - MatchingArraySize ( input1_dims , 1 , input2_dims , 1 , output_dims , 1 ) ; <nl> - const int depth = <nl> - MatchingArraySize ( input1_dims , 0 , input2_dims , 0 , output_dims , 0 ) ; <nl> - for ( int b = 0 ; b < batches ; + + b ) { <nl> - for ( int y = 0 ; y < height ; + + y ) { <nl> - for ( int x = 0 ; x < width ; + + x ) { <nl> - for ( int c = 0 ; c < depth ; + + c ) { <nl> - output_data [ Offset ( output_dims , c , x , y , b ) ] = <nl> - ActivationFunctionWithMinMax ( <nl> - input1_data [ Offset ( input1_dims , c , x , y , b ) ] - <nl> - input2_data [ Offset ( input2_dims , c , x , y , b ) ] , <nl> - output_activation_min , output_activation_max ) ; <nl> - } <nl> - } <nl> - } <nl> + gemmlowp : : ScopedProfilingLabel label ( " Sub " ) ; <nl> + const int flat_size = MatchingFlatSize ( input1_dims , input2_dims , output_dims ) ; <nl> + for ( int i = 0 ; i < flat_size ; + + i ) { <nl> + output_data [ i ] = ActivationFunctionWithMinMax ( <nl> + input1_data [ i ] - input2_data [ i ] , output_activation_min , <nl> + output_activation_max ) ; <nl> } <nl> } <nl> <nl> void LstmCell ( const uint8 * input_data_uint8 , const Dims < 4 > & input_dims , <nl> gemmlowp : : ScopedProfilingLabel label ( <nl> " LstmCell / quantized ( 8bit external , 16bit internal ) " ) ; <nl> / / Gather dimensions information , and perform consistency checks . <nl> - const int batches = <nl> - MatchingArraySize ( input_dims , 3 , prev_activ_dims , 3 , prev_state_dims , 3 , <nl> - output_state_dims , 3 , output_activ_dims , 3 ) ; <nl> - const int height = <nl> - MatchingArraySize ( input_dims , 2 , prev_activ_dims , 2 , prev_state_dims , 2 , <nl> - output_state_dims , 2 , output_activ_dims , 2 ) ; <nl> - const int width = <nl> - MatchingArraySize ( input_dims , 1 , prev_activ_dims , 1 , prev_state_dims , 1 , <nl> - output_state_dims , 1 , output_activ_dims , 1 ) ; <nl> + const int outer_size = <nl> + MatchingFlatSizeSkipDim ( input_dims , 0 , prev_activ_dims , prev_state_dims , <nl> + output_state_dims , output_activ_dims ) ; <nl> TFLITE_CHECK_EQ ( ArraySize ( weights_dims , 2 ) , 1 ) ; <nl> TFLITE_CHECK_EQ ( ArraySize ( weights_dims , 3 ) , 1 ) ; <nl> const int input_depth = ArraySize ( input_dims , 0 ) ; <nl> void LstmCell ( const uint8 * input_data_uint8 , const Dims < 4 > & input_dims , <nl> MatchingArraySize ( prev_state_dims , 0 , prev_activ_dims , 0 , <nl> output_state_dims , 0 , output_activ_dims , 0 ) ; <nl> TFLITE_CHECK_EQ ( output_depth , intern_activ_depth / 4 ) ; <nl> - const int fc_batches = ArraySize ( activ_temp_dims , 1 ) * <nl> - ArraySize ( activ_temp_dims , 2 ) * <nl> - ArraySize ( activ_temp_dims , 3 ) ; <nl> + const int fc_batches = FlatSizeSkipDim ( activ_temp_dims , 0 ) ; <nl> const int fc_output_depth = <nl> MatchingArraySize ( weights_dims , 1 , activ_temp_dims , 0 ) ; <nl> const int fc_accum_depth = ArraySize ( weights_dims , 0 ) ; <nl> void LstmCell ( const uint8 * input_data_uint8 , const Dims < 4 > & input_dims , <nl> <nl> / / Rest of the LSTM cell : tanh and logistic math functions , and some adds <nl> / / and muls , all done in 16 - bit fixed - point . <nl> - const int outer_size = batches * width * height ; <nl> const int16 * input_gate_input_ptr = activ_temp_data_int16 ; <nl> const int16 * input_modulation_gate_input_ptr = <nl> activ_temp_data_int16 + output_depth ; <nl> void TensorFlowSplit ( const Scalar * input_data , const Dims < 4 > & input_dims , <nl> gemmlowp : : ScopedProfilingLabel label ( " TensorFlowSplit " ) ; <nl> TFLITE_DCHECK_GE ( outputs_count , 1 ) ; <nl> for ( int i = 0 ; i < outputs_count ; i + + ) { <nl> - / * batches = * / MatchingArraySize ( * output_dims [ i ] , 3 , input_dims , 3 ) ; <nl> - / * height = * / MatchingArraySize ( * output_dims [ i ] , 2 , input_dims , 2 ) ; <nl> - / * width = * / MatchingArraySize ( * output_dims [ i ] , 1 , input_dims , 1 ) ; <nl> + MatchingFlatSizeSkipDim ( * output_dims [ i ] , 0 , input_dims ) ; <nl> } <nl> - const int batches = MatchingArraySize ( * output_dims [ 0 ] , 3 , input_dims , 3 ) ; <nl> - const int height = MatchingArraySize ( * output_dims [ 0 ] , 2 , input_dims , 2 ) ; <nl> - const int width = MatchingArraySize ( * output_dims [ 0 ] , 1 , input_dims , 1 ) ; <nl> + const int outer_size = FlatSizeSkipDim ( input_dims , 0 ) ; <nl> TFLITE_DCHECK ( IsPackedWithoutStrides ( input_dims ) ) ; <nl> - / / for now we dont have a model with a TensorFlowSplit <nl> + / / For now we don ' t have a model with a TensorFlowSplit <nl> / / with fused activation function . <nl> TFLITE_DCHECK ( Ac = = FusedActivationFunctionType : : kNone ) ; <nl> - const int whb = width * height * batches ; <nl> const Scalar * input_ptr = input_data ; <nl> - for ( int k = 0 ; k < whb ; k + + ) { <nl> + for ( int k = 0 ; k < outer_size ; k + + ) { <nl> for ( int i = 0 ; i < outputs_count ; + + i ) { <nl> memcpy ( output_data [ i ] + k * output_dims [ i ] - > sizes [ 0 ] , input_ptr , <nl> output_dims [ i ] - > sizes [ 0 ] * sizeof ( Scalar ) ) ; <nl> inline void LocalResponseNormalization ( const float * input_data , <nl> float * output_data , <nl> const Dims < 4 > & output_dims ) { <nl> gemmlowp : : ScopedProfilingLabel label ( " LocalResponseNormalization " ) ; <nl> - / * const int batches = * / MatchingArraySize ( input_dims , 3 , output_dims , 3 ) ; <nl> - / * const int height = * / MatchingArraySize ( input_dims , 2 , output_dims , 2 ) ; <nl> - / * const int width = * / MatchingArraySize ( input_dims , 1 , output_dims , 1 ) ; <nl> - / * const int depth = * / MatchingArraySize ( input_dims , 0 , output_dims , 0 ) ; <nl> + MatchingFlatSize ( input_dims , output_dims ) ; <nl> <nl> const auto data_in = MapAsMatrixWithFirstDimAsRows ( input_data , input_dims ) ; <nl> auto data_out = MapAsMatrixWithFirstDimAsRows ( output_data , output_dims ) ; <nl> inline void Softmax ( const float * input_data , const Dims < 4 > & input_dims , <nl> float beta , float * output_data , <nl> const Dims < 4 > & output_dims ) { <nl> gemmlowp : : ScopedProfilingLabel label ( " Softmax " ) ; <nl> - / * const int batches = * / MatchingArraySize ( input_dims , 3 , output_dims , 3 ) ; <nl> - / * const int height = * / MatchingArraySize ( input_dims , 2 , output_dims , 2 ) ; <nl> - / * const int width = * / MatchingArraySize ( input_dims , 1 , output_dims , 1 ) ; <nl> - / * const int depth = * / MatchingArraySize ( input_dims , 0 , output_dims , 0 ) ; <nl> + MatchingFlatSize ( input_dims , output_dims ) ; <nl> <nl> const auto in_mat = MapAsMatrixWithFirstDimAsRows ( input_data , input_dims ) ; <nl> auto out_mat = MapAsMatrixWithFirstDimAsRows ( output_data , output_dims ) ; <nl> inline void Softmax ( const uint8 * input_data , const Dims < 4 > & input_dims , <nl> using FixedPoint0 = gemmlowp : : FixedPoint < int32 , 0 > ; <nl> <nl> gemmlowp : : ScopedProfilingLabel label ( " Softmax / 8bit " ) ; <nl> - const int batches = MatchingArraySize ( input_dims , 3 , output_dims , 3 ) ; <nl> - const int height = MatchingArraySize ( input_dims , 2 , output_dims , 2 ) ; <nl> - const int width = MatchingArraySize ( input_dims , 1 , output_dims , 1 ) ; <nl> + const int outer_size = MatchingFlatSizeSkipDim ( input_dims , 0 , output_dims ) ; <nl> const int depth = MatchingArraySize ( input_dims , 0 , output_dims , 0 ) ; <nl> <nl> - const int outer_size = batches * height * width ; <nl> - <nl> for ( int b = 0 ; b < outer_size ; + + b ) { <nl> const uint8 * input_data_ptr = input_data + b * depth ; <nl> uint8 * output_data_ptr = output_data + b * depth ; <nl> inline void Softmax ( const uint8 * input_data , const Dims < 4 > & input_dims , <nl> inline void LogSoftmax ( const float * input_data , const Dims < 4 > & input_dims , <nl> float * output_data , const Dims < 4 > & output_dims ) { <nl> gemmlowp : : ScopedProfilingLabel label ( " LogSoftmax " ) ; <nl> - const int batches = MatchingArraySize ( input_dims , 3 , output_dims , 3 ) ; <nl> - const int height = MatchingArraySize ( input_dims , 2 , output_dims , 2 ) ; <nl> - const int width = MatchingArraySize ( input_dims , 1 , output_dims , 1 ) ; <nl> + const int outer_size = MatchingFlatSizeSkipDim ( input_dims , 0 , output_dims ) ; <nl> const int depth = MatchingArraySize ( input_dims , 0 , output_dims , 0 ) ; <nl> <nl> - for ( int b = 0 ; b < batches ; + + b ) { <nl> - for ( int y = 0 ; y < height ; + + y ) { <nl> - for ( int x = 0 ; x < width ; + + x ) { <nl> - / / Find max element value which we ' ll use to ensure numerical stability <nl> - / / taking advantage of the following equality : <nl> - / / log ( exp ( x [ i ] ) / sum ( exp ( x [ i ] ) ) ) = = log ( exp ( x [ i ] + C ) / sum ( exp ( x [ i ] + C ) ) ) <nl> - float max = std : : numeric_limits < float > : : lowest ( ) ; <nl> - for ( int c = 0 ; c < depth ; + + c ) { <nl> - max = std : : max ( max , input_data [ Offset ( input_dims , c , x , y , b ) ] ) ; <nl> - } <nl> + for ( int i = 0 ; i < outer_size ; + + i ) { <nl> + const float * block_input_data = input_data + i * depth ; <nl> + float * block_output_data = output_data + i * depth ; <nl> + / / Find max element value which we ' ll use to ensure numerical stability <nl> + / / taking advantage of the following equality : <nl> + / / log ( exp ( x [ i ] ) / sum ( exp ( x [ i ] ) ) ) = = log ( exp ( x [ i ] + C ) / sum ( exp ( x [ i ] + C ) ) ) <nl> + float max = std : : numeric_limits < float > : : lowest ( ) ; <nl> + for ( int c = 0 ; c < depth ; + + c ) { <nl> + max = std : : max ( max , block_input_data [ c ] ) ; <nl> + } <nl> <nl> - / / Compute sum . <nl> - float sum = 0 . f ; <nl> - for ( int c = 0 ; c < depth ; + + c ) { <nl> - sum + = std : : exp ( input_data [ Offset ( input_dims , c , x , y , b ) ] - max ) ; <nl> - } <nl> + / / Compute sum . <nl> + float sum = 0 . f ; <nl> + for ( int c = 0 ; c < depth ; + + c ) { <nl> + sum + = std : : exp ( block_input_data [ c ] - max ) ; <nl> + } <nl> <nl> - / / Compute result . <nl> - const float log_sum = std : : log ( sum ) ; <nl> - for ( int c = 0 ; c < depth ; + + c ) { <nl> - output_data [ Offset ( output_dims , c , x , y , b ) ] = <nl> - input_data [ Offset ( input_dims , c , x , y , b ) ] - max - log_sum ; <nl> - } <nl> - } <nl> + / / Compute result . <nl> + const float log_sum = std : : log ( sum ) ; <nl> + for ( int c = 0 ; c < depth ; + + c ) { <nl> + block_output_data [ c ] = block_input_data [ c ] - max - log_sum ; <nl> } <nl> } <nl> } <nl> inline void LogSoftmax ( const uint8 * input_data , const Dims < 4 > & input_dims , <nl> const int depth = MatchingArraySize ( input_dims , 0 , output_dims , 0 ) ; <nl> <nl> for ( int i = 0 ; i < outer_size ; + + i ) { <nl> + const uint8 * block_input_data = input_data + i * depth ; <nl> + uint8 * block_output_data = output_data + i * depth ; <nl> uint8 max_in_row = 0 ; <nl> for ( int c = 0 ; c < depth ; + + c ) { <nl> - max_in_row = std : : max ( max_in_row , input_data [ i * depth + c ] ) ; <nl> + max_in_row = std : : max ( max_in_row , block_input_data [ c ] ) ; <nl> } <nl> <nl> FixedPointAccum sum_of_exps = FixedPointAccum : : Zero ( ) ; <nl> for ( int c = 0 ; c < depth ; + + c ) { <nl> - int32 input_diff = <nl> - static_cast < int32 > ( input_data [ i * depth + c ] ) - max_in_row ; <nl> + int32 input_diff = static_cast < int32 > ( block_input_data [ c ] ) - max_in_row ; <nl> if ( input_diff > = diff_min ) { <nl> const int32 input_diff_rescaled = <nl> MultiplyByQuantizedMultiplierGreaterThanOne ( <nl> inline void LogSoftmax ( const uint8 * input_data , const Dims < 4 > & input_dims , <nl> reverse_scaling_right_shift ) ) ; <nl> <nl> for ( int c = 0 ; c < depth ; + + c ) { <nl> - int32 input_diff = <nl> - static_cast < int32 > ( input_data [ i * depth + c ] ) - max_in_row ; <nl> + int32 input_diff = static_cast < int32 > ( block_input_data [ c ] ) - max_in_row ; <nl> if ( input_diff > adjusted_diff_min ) { <nl> const int32 input_diff_rescaled = <nl> MultiplyByQuantizedMultiplierGreaterThanOne ( <nl> inline void LogSoftmax ( const uint8 * input_data , const Dims < 4 > & input_dims , <nl> 31 - kScaledDiffIntegerBits - kOutputIntegerBits ) + <nl> 255 ; <nl> <nl> - output_data [ i * depth + c ] = static_cast < uint8 > ( <nl> + block_output_data [ c ] = static_cast < uint8 > ( <nl> std : : max ( std : : min ( unsat_output , static_cast < int32 > ( 255 ) ) , 0 ) ) ; <nl> } else { <nl> / / Set output to smallest value . <nl> - output_data [ i * depth + c ] = 0 ; <nl> + block_output_data [ c ] = 0 ; <nl> } <nl> } <nl> } <nl> inline void Logistic ( const uint8 * input_data , const Dims < 4 > & input_dims , <nl> int32 input_multiplier , int input_left_shift , <nl> uint8 * output_data , const Dims < 4 > & output_dims ) { <nl> gemmlowp : : ScopedProfilingLabel label ( " Logistic / Uint8 " ) ; <nl> - / * batches * / MatchingArraySize ( input_dims , 3 , output_dims , 3 ) ; <nl> - / * height * / MatchingArraySize ( input_dims , 2 , output_dims , 2 ) ; <nl> - / * width * / MatchingArraySize ( input_dims , 1 , output_dims , 1 ) ; <nl> - / * depth * / MatchingArraySize ( input_dims , 0 , output_dims , 0 ) ; <nl> - const int size = RequiredBufferSizeForDims ( input_dims ) ; <nl> + const int size = MatchingFlatSize ( input_dims , output_dims ) ; <nl> <nl> int c = 0 ; <nl> # ifdef USE_NEON <nl> inline void Logistic ( const uint8 * input_data , const Dims < 4 > & input_dims , <nl> inline void Logistic ( const int16 * input_data , const Dims < 4 > & input_dims , <nl> int16 * output_data , const Dims < 4 > & output_dims ) { <nl> gemmlowp : : ScopedProfilingLabel label ( " Logistic / Int16 " ) ; <nl> - const int flat_size = RequiredBufferSizeForDims ( output_dims ) ; <nl> - TFLITE_DCHECK_EQ ( RequiredBufferSizeForDims ( input_dims ) , flat_size ) ; <nl> + const int flat_size = MatchingFlatSize ( output_dims , input_dims ) ; <nl> <nl> for ( int i = 0 ; i < flat_size ; i + + ) { <nl> } <nl> inline void Tanh ( const uint8 * input_data , const Dims < 4 > & input_dims , <nl> uint8 * output_data , const Dims < 4 > & output_dims ) { <nl> / / Note that this is almost the exact same code as in Logistic ( ) . <nl> gemmlowp : : ScopedProfilingLabel label ( " Tanh " ) ; <nl> - / * batches * / MatchingArraySize ( input_dims , 3 , output_dims , 3 ) ; <nl> - / * height * / MatchingArraySize ( input_dims , 2 , output_dims , 2 ) ; <nl> - / * width * / MatchingArraySize ( input_dims , 1 , output_dims , 1 ) ; <nl> - / * depth * / MatchingArraySize ( input_dims , 0 , output_dims , 0 ) ; <nl> - const int size = RequiredBufferSizeForDims ( input_dims ) ; <nl> + const int size = MatchingFlatSize ( input_dims , output_dims ) ; <nl> <nl> int c = 0 ; <nl> int32_t output_zero_point = 128 ; <nl> inline void Tanh ( const int16 * input_data , const Dims < 4 > & input_dims , <nl> TFLITE_DCHECK_GE ( input_left_shift , 0 ) ; <nl> TFLITE_DCHECK_LE ( input_left_shift , 1 ) ; <nl> <nl> - const int flat_size = RequiredBufferSizeForDims ( output_dims ) ; <nl> - TFLITE_DCHECK_EQ ( RequiredBufferSizeForDims ( input_dims ) , flat_size ) ; <nl> + const int flat_size = MatchingFlatSize ( output_dims , input_dims ) ; <nl> <nl> int c = 0 ; <nl> const int16 * input_data_ptr = input_data ; <nl> inline void Dequantize ( const uint8 * input_data , const Dims < 4 > & input_dims , <nl> int32 zero_point , double scale , float * output_data , <nl> const Dims < 4 > & output_dims ) { <nl> gemmlowp : : ScopedProfilingLabel label ( " Dequantize " ) ; <nl> - const int batches = MatchingArraySize ( input_dims , 3 , output_dims , 3 ) ; <nl> - const int height = MatchingArraySize ( input_dims , 2 , output_dims , 2 ) ; <nl> - const int width = MatchingArraySize ( input_dims , 1 , output_dims , 1 ) ; <nl> - const int depth = MatchingArraySize ( input_dims , 0 , output_dims , 0 ) ; <nl> - for ( int b = 0 ; b < batches ; + + b ) { <nl> - for ( int y = 0 ; y < height ; + + y ) { <nl> - for ( int x = 0 ; x < width ; + + x ) { <nl> - for ( int c = 0 ; c < depth ; + + c ) { <nl> - int32 val = input_data [ Offset ( input_dims , c , x , y , b ) ] ; <nl> - float result = static_cast < float > ( scale * ( val - zero_point ) ) ; <nl> - output_data [ Offset ( output_dims , c , x , y , b ) ] = result ; <nl> - } <nl> - } <nl> - } <nl> + const int flat_size = MatchingFlatSize ( output_dims , input_dims ) ; <nl> + for ( int i = 0 ; i < flat_size ; + + i ) { <nl> + int32 val = input_data [ i ] ; <nl> + float result = static_cast < float > ( scale * ( val - zero_point ) ) ; <nl> + output_data [ i ] = result ; <nl> } <nl> } <nl> <nl> inline void FakeQuant ( const float * input_data , const Dims < 4 > & input_dims , <nl> & nudged_max , & nudged_scale ) ; <nl> const float inv_nudged_scale = 1 . 0f / nudged_scale ; <nl> <nl> - const int batches = MatchingArraySize ( input_dims , 3 , output_dims , 3 ) ; <nl> - const int height = MatchingArraySize ( input_dims , 2 , output_dims , 2 ) ; <nl> - const int width = MatchingArraySize ( input_dims , 1 , output_dims , 1 ) ; <nl> - const int depth = MatchingArraySize ( input_dims , 0 , output_dims , 0 ) ; <nl> - for ( int b = 0 ; b < batches ; + + b ) { <nl> - for ( int y = 0 ; y < height ; + + y ) { <nl> - for ( int x = 0 ; x < width ; + + x ) { <nl> - for ( int c = 0 ; c < depth ; + + c ) { <nl> - const float src_val = input_data [ Offset ( input_dims , c , x , y , b ) ] ; <nl> - const float clamped = <nl> - std : : min ( nudged_max , std : : max ( nudged_min , src_val ) ) ; <nl> - const float clamped_shifted = clamped - nudged_min ; <nl> - const float dst_val = <nl> - TfLiteRound ( clamped_shifted * inv_nudged_scale ) * nudged_scale + <nl> - nudged_min ; <nl> - output_data [ Offset ( output_dims , c , x , y , b ) ] = dst_val ; <nl> - } <nl> - } <nl> - } <nl> + const int flat_size = MatchingFlatSize ( output_dims , input_dims ) ; <nl> + for ( int i = 0 ; i < flat_size ; + + i ) { <nl> + const float src_val = input_data [ i ] ; <nl> + const float clamped = std : : min ( nudged_max , std : : max ( nudged_min , src_val ) ) ; <nl> + const float clamped_shifted = clamped - nudged_min ; <nl> + const float dst_val = <nl> + TfLiteRound ( clamped_shifted * inv_nudged_scale ) * nudged_scale + <nl> + nudged_min ; <nl> + output_data [ i ] = dst_val ; <nl> } <nl> } <nl> <nl> void Sub ( const T * input1_data , const Dims < 4 > & input1_dims , const T * input2_data , <nl> auto output_map = MapAsVector ( output_data , output_dims ) ; <nl> if ( AreSameDims ( input1_dims , input2_dims ) ) { <nl> output_map . array ( ) = input1_map . array ( ) - input2_map . array ( ) ; <nl> - } else if ( RequiredBufferSizeForDims ( input1_dims ) = = 1 ) { <nl> + } else if ( FlatSize ( input1_dims ) = = 1 ) { <nl> auto scalar = input1_data [ 0 ] ; <nl> output_map . array ( ) = scalar - input2_map . array ( ) ; <nl> - } else if ( RequiredBufferSizeForDims ( input2_dims ) = = 1 ) { <nl> + } else if ( FlatSize ( input2_dims ) = = 1 ) { <nl> auto scalar = input2_data [ 0 ] ; <nl> output_map . array ( ) = input1_map . array ( ) - scalar ; <nl> } else { <nl> void ArgMax ( const T3 * axis , const T1 * input_data , const Dims < 4 > & input_dims , <nl> / / input dimensions here . We enforce the constraint that the last dimension <nl> / / must always be 1 . <nl> TFLITE_DCHECK_EQ ( ArraySize ( output_dims , 0 ) , 1 ) ; <nl> - const int batches = MatchingArraySize ( input_dims , 3 , output_dims , 3 ) ; <nl> - const int height = MatchingArraySize ( input_dims , 2 , output_dims , 2 ) ; <nl> - const int width = MatchingArraySize ( input_dims , 1 , output_dims , 1 ) ; <nl> + const int outer_size = MatchingFlatSizeSkipDim ( input_dims , 0 , output_dims ) ; <nl> const int depth = ArraySize ( input_dims , 0 ) ; <nl> - for ( int b = 0 ; b < batches ; + + b ) { <nl> - for ( int y = 0 ; y < height ; + + y ) { <nl> - for ( int x = 0 ; x < width ; + + x ) { <nl> - auto max_value = input_data [ Offset ( input_dims , 0 , x , y , b ) ] ; <nl> - int max_index = 0 ; <nl> - for ( int d = 1 ; d < depth ; + + d ) { <nl> - const auto & curr_value = input_data [ Offset ( input_dims , d , x , y , b ) ] ; <nl> - if ( curr_value > max_value ) { <nl> - max_value = curr_value ; <nl> - max_index = d ; <nl> - } <nl> - } <nl> - output_data [ Offset ( output_dims , 0 , x , y , b ) ] = max_index ; <nl> + for ( int i = 0 ; i < outer_size ; + + i ) { <nl> + auto max_value = * input_data ; <nl> + + + input_data ; <nl> + int max_index = 0 ; <nl> + for ( int d = 1 ; d < depth ; + + d ) { <nl> + const auto & curr_value = * input_data ; <nl> + if ( curr_value > max_value ) { <nl> + max_value = curr_value ; <nl> + max_index = d ; <nl> } <nl> + + + input_data ; <nl> } <nl> + * output_data = max_index ; <nl> + + + output_data ; <nl> } <nl> } <nl> <nl> mmm a / tensorflow / contrib / lite / kernels / internal / reference / reference_ops . h <nl> ppp b / tensorflow / contrib / lite / kernels / internal / reference / reference_ops . h <nl> inline void GetInvSqrtQuantizedMultiplier ( int32 input , int32 * output_inv_sqrt , <nl> inline void L2Normalization ( const uint8 * input_data , const Dims < 4 > & input_dims , <nl> int32 input_zero_point , uint8 * output_data , <nl> const Dims < 4 > & output_dims ) { <nl> - const int batches = MatchingArraySize ( input_dims , 3 , output_dims , 3 ) ; <nl> - const int height = MatchingArraySize ( input_dims , 2 , output_dims , 2 ) ; <nl> - const int width = MatchingArraySize ( input_dims , 1 , output_dims , 1 ) ; <nl> const int depth = MatchingArraySize ( input_dims , 0 , output_dims , 0 ) ; <nl> - TFLITE_DCHECK_EQ ( batches , 1 ) ; <nl> - TFLITE_DCHECK_EQ ( height , 1 ) ; <nl> - TFLITE_DCHECK_EQ ( width , 1 ) ; <nl> + const int outer_size = MatchingFlatSizeSkipDim ( input_dims , 0 , output_dims ) ; <nl> + TFLITE_DCHECK_EQ ( outer_size , 1 ) ; <nl> int32 square_l2_norm = 0 ; <nl> for ( int i = 0 ; i < depth ; i + + ) { <nl> int32 diff = input_data [ Offset ( input_dims , i , 0 , 0 , 0 ) ] - input_zero_point ; <nl> inline void Add ( const int16 * input1_data , const Dims < 4 > & input1_dims , <nl> TFLITE_DCHECK_EQ ( output_activation_max , 32767 ) ; <nl> } <nl> <nl> - const int flat_size = RequiredBufferSizeForDims ( output_dims ) ; <nl> - TFLITE_DCHECK_EQ ( RequiredBufferSizeForDims ( input1_dims ) , flat_size ) ; <nl> - TFLITE_DCHECK_EQ ( RequiredBufferSizeForDims ( input2_dims ) , flat_size ) ; <nl> + const int flat_size = MatchingFlatSize ( output_dims , input1_dims , input2_dims ) ; <nl> <nl> TFLITE_DCHECK ( input1_shift = = 0 | | input2_shift = = 0 ) ; <nl> TFLITE_DCHECK_GE ( input1_shift , 0 ) ; <nl> inline void Mul ( const int16 * input1_data , const Dims < 4 > & input1_dims , <nl> int16 * output_data , const Dims < 4 > & output_dims ) { <nl> gemmlowp : : ScopedProfilingLabel label ( " Mul / Int16 " ) ; <nl> <nl> - const int flat_size = RequiredBufferSizeForDims ( output_dims ) ; <nl> - TFLITE_DCHECK_EQ ( RequiredBufferSizeForDims ( input1_dims ) , flat_size ) ; <nl> - TFLITE_DCHECK_EQ ( RequiredBufferSizeForDims ( input2_dims ) , flat_size ) ; <nl> + const int flat_size = MatchingFlatSize ( output_dims , input1_dims , input2_dims ) ; <nl> <nl> for ( int i = 0 ; i < flat_size ; i + + ) { <nl> / / F0 uses 0 integer bits , range [ - 1 , 1 ] . <nl> inline void Mul ( const int16 * input1_data , const Dims < 4 > & input1_dims , <nl> gemmlowp : : ScopedProfilingLabel label ( " Mul / Int16Uint8 " ) ; <nl> TFLITE_DCHECK_LE ( output_activation_min , output_activation_max ) ; <nl> <nl> - const int flat_size = RequiredBufferSizeForDims ( output_dims ) ; <nl> - TFLITE_DCHECK_EQ ( RequiredBufferSizeForDims ( input1_dims ) , flat_size ) ; <nl> - TFLITE_DCHECK_EQ ( RequiredBufferSizeForDims ( input2_dims ) , flat_size ) ; <nl> + const int flat_size = MatchingFlatSize ( output_dims , input1_dims , input2_dims ) ; <nl> <nl> for ( int i = 0 ; i < flat_size ; i + + ) { <nl> / / F0 uses 0 integer bits , range [ - 1 , 1 ] . <nl> inline void TransposeConv ( const float * input_data , const Dims < 4 > & input_dims , <nl> / / computing their influence on the output , rather than looping through the <nl> / / output elements in the typical " gather " access pattern of a conv . We <nl> / / therefore must initialize the output array to zero . <nl> - for ( int i = 0 ; i < RequiredBufferSizeForDims ( output_dims ) ; i + + ) { <nl> + for ( int i = 0 ; i < FlatSize ( output_dims ) ; i + + ) { <nl> output_data [ i ] = 0 . 0f ; <nl> } <nl> <nl> template < typename T , ComparisonFn < T > F > <nl> inline void Comparison ( const T * input1_data , const Dims < 4 > & input1_dims , <nl> const T * input2_data , const Dims < 4 > & input2_dims , <nl> bool * output_data , const Dims < 4 > & output_dims ) { <nl> - const int64_t batches = <nl> - MatchingArraySize ( input1_dims , 3 , input2_dims , 3 , output_dims , 3 ) ; <nl> - const int64_t height = <nl> - MatchingArraySize ( input1_dims , 2 , input2_dims , 2 , output_dims , 2 ) ; <nl> - const int64_t width = <nl> - MatchingArraySize ( input1_dims , 1 , input2_dims , 1 , output_dims , 1 ) ; <nl> - const int64_t depth = <nl> - MatchingArraySize ( input1_dims , 0 , input2_dims , 0 , output_dims , 0 ) ; <nl> - for ( int64_t i = 0 ; i < batches * height * width * depth ; + + i ) { <nl> + const int64_t flatsize = <nl> + MatchingFlatSize ( input1_dims , input2_dims , output_dims ) ; <nl> + for ( int64_t i = 0 ; i < flatsize ; + + i ) { <nl> output_data [ i ] = F ( input1_data [ i ] , input2_data [ i ] ) ; <nl> } <nl> } <nl> inline void Comparison ( int left_shift , const T * input1_data , <nl> int32 input2_offset , int32 input2_multiplier , <nl> int input2_shift , bool * output_data , <nl> const Dims < 4 > & output_dims ) { <nl> - const int64_t batches = <nl> - MatchingArraySize ( input1_dims , 3 , input2_dims , 3 , output_dims , 3 ) ; <nl> - const int64_t height = <nl> - MatchingArraySize ( input1_dims , 2 , input2_dims , 2 , output_dims , 2 ) ; <nl> - const int64_t width = <nl> - MatchingArraySize ( input1_dims , 1 , input2_dims , 1 , output_dims , 1 ) ; <nl> - const int64_t depth = <nl> - MatchingArraySize ( input1_dims , 0 , input2_dims , 0 , output_dims , 0 ) ; <nl> - for ( int64_t i = 0 ; i < batches * height * width * depth ; + + i ) { <nl> + const int64_t flatsize = <nl> + MatchingFlatSize ( input1_dims , input2_dims , output_dims ) ; <nl> + for ( int64_t i = 0 ; i < flatsize ; + + i ) { <nl> const int32 input1_val = input1_offset + input1_data [ i ] ; <nl> const int32 input2_val = input2_offset + input2_data [ i ] ; <nl> const int32 shifted_input1_val = input1_val * ( 1 < < left_shift ) ; <nl> inline void Select ( const D * input_condition_data , <nl> const Dims < 4 > & input_x_dims , const T * input_y_data , <nl> const Dims < 4 > & input_y_dims , T * output_data , <nl> const Dims < 4 > & output_dims ) { <nl> - const int64_t batches = <nl> - MatchingArraySize ( input_condition_dims , 3 , input_x_dims , 3 , input_y_dims , <nl> - 3 , output_dims , 3 ) ; <nl> - const int64_t height = <nl> - MatchingArraySize ( input_condition_dims , 2 , input_x_dims , 2 , input_y_dims , <nl> - 2 , output_dims , 2 ) ; <nl> - const int64_t width = MatchingArraySize ( input_condition_dims , 1 , input_x_dims , <nl> - 1 , input_y_dims , 1 , output_dims , 1 ) ; <nl> - const int64_t depth = MatchingArraySize ( input_condition_dims , 0 , input_x_dims , <nl> - 0 , input_y_dims , 0 , output_dims , 0 ) ; <nl> - <nl> - const int64_t num_elements = batches * height * width * depth ; <nl> - for ( int64_t i = 0 ; i < num_elements ; + + i ) { <nl> + const int64_t flatsize = <nl> + MatchingFlatSize ( input_x_dims , input_y_dims , output_dims ) ; <nl> + for ( int64_t i = 0 ; i < flatsize ; + + i ) { <nl> output_data [ i ] = <nl> input_condition_data [ i ] ? input_x_data [ i ] : input_y_data [ i ] ; <nl> } <nl> inline void RankOneSelect ( const D * input_condition_data , <nl> const T * input_x_data , const Dims < 4 > & input_x_dims , <nl> const T * input_y_data , const Dims < 4 > & input_y_dims , <nl> T * output_data , const Dims < 4 > & output_dims ) { <nl> - const int64_t rank = ArraySize ( input_condition_dims , 0 ) ; <nl> - <nl> - const int64_t batches = <nl> - MatchingArraySize ( input_x_dims , 3 , input_y_dims , 3 , output_dims , 3 ) ; <nl> - const int64_t height = <nl> - MatchingArraySize ( input_x_dims , 2 , input_y_dims , 2 , output_dims , 2 ) ; <nl> - const int64_t width = <nl> - MatchingArraySize ( input_x_dims , 1 , input_y_dims , 1 , output_dims , 1 ) ; <nl> - const int64_t depth = <nl> - MatchingArraySize ( input_x_dims , 0 , input_y_dims , 0 , output_dims , 0 ) ; <nl> - <nl> - TFLITE_DCHECK_EQ ( rank , batches ) ; <nl> + const int64_t rank = MatchingArraySize ( input_condition_dims , 0 , input_x_dims , <nl> + 3 , input_y_dims , 3 , output_dims , 3 ) ; <nl> + const int64_t inner_size = <nl> + MatchingFlatSizeSkipDim ( input_x_dims , 3 , input_y_dims , output_dims ) ; <nl> <nl> int64_t offset = 0 ; <nl> - int64_t size = depth * height * width ; <nl> for ( int64_t i = 0 ; i < rank ; i + + ) { <nl> const T * input_data = input_condition_data [ i ] ? input_x_data : input_y_data ; <nl> - memcpy ( output_data + offset , input_data + offset , size * sizeof ( T ) ) ; <nl> - offset + = size ; <nl> + memcpy ( output_data + offset , input_data + offset , inner_size * sizeof ( T ) ) ; <nl> + offset + = inner_size ; <nl> } <nl> } <nl> <nl> mmm a / tensorflow / contrib / lite / kernels / internal / types . h <nl> ppp b / tensorflow / contrib / lite / kernels / internal / types . h <nl> int MatchingArraySize ( const ArrayType1 & array1 , int index1 , <nl> <nl> template < int N > <nl> inline int FlatSize ( const Dims < N > & dims ) { <nl> - int max_offset = 0 ; <nl> - for ( int i = 0 ; i < N ; i + + ) { <nl> - max_offset + = ( dims . sizes [ i ] - 1 ) * dims . strides [ i ] ; <nl> + int flat_size = 1 ; <nl> + for ( int i = 0 ; i < N ; + + i ) { <nl> + flat_size * = dims . sizes [ i ] ; <nl> } <nl> - return max_offset + 1 ; <nl> + return flat_size ; <nl> } <nl> <nl> / / Deprecated . Prefer FlatSize . <nl> inline int RequiredBufferSizeForDims ( const Dims < 4 > & dims ) { <nl> / / arrays . <nl> template < int N > <nl> inline int MatchingFlatSize ( const Dims < N > & dims , const Dims < N > & check_dims_0 ) { <nl> - for ( int i = 0 ; i < N ; i + + ) { <nl> + for ( int i = 0 ; i < N ; + + i ) { <nl> TFLITE_DCHECK_EQ ( ArraySize ( dims , i ) , ArraySize ( check_dims_0 , i ) ) ; <nl> } <nl> return FlatSize ( dims ) ; <nl> inline int MatchingFlatSize ( const Dims < N > & dims , const Dims < N > & check_dims_0 ) { <nl> template < int N > <nl> inline int MatchingFlatSize ( const Dims < N > & dims , const Dims < N > & check_dims_0 , <nl> const Dims < N > & check_dims_1 ) { <nl> - for ( int i = 0 ; i < N ; i + + ) { <nl> + for ( int i = 0 ; i < N ; + + i ) { <nl> TFLITE_DCHECK_EQ ( ArraySize ( dims , i ) , ArraySize ( check_dims_0 , i ) ) ; <nl> } <nl> return MatchingFlatSize ( dims , check_dims_1 ) ; <nl> template < int N > <nl> inline int MatchingFlatSize ( const Dims < N > & dims , const Dims < N > & check_dims_0 , <nl> const Dims < N > & check_dims_1 , <nl> const Dims < N > & check_dims_2 ) { <nl> - for ( int i = 0 ; i < N ; i + + ) { <nl> + for ( int i = 0 ; i < N ; + + i ) { <nl> TFLITE_DCHECK_EQ ( ArraySize ( dims , i ) , ArraySize ( check_dims_0 , i ) ) ; <nl> } <nl> return FlatSize ( dims , check_dims_1 , check_dims_2 ) ; <nl> inline int MatchingFlatSize ( const Dims < N > & dims , const Dims < N > & check_dims_0 , <nl> const Dims < N > & check_dims_1 , <nl> const Dims < N > & check_dims_2 , <nl> const Dims < N > & check_dims_3 ) { <nl> - for ( int i = 0 ; i < N ; i + + ) { <nl> + for ( int i = 0 ; i < N ; + + i ) { <nl> TFLITE_DCHECK_EQ ( ArraySize ( dims , i ) , ArraySize ( check_dims_0 , i ) ) ; <nl> } <nl> return FlatSize ( dims , check_dims_1 , check_dims_2 , check_dims_3 ) ; <nl> template < int N > <nl> inline int FlatSizeSkipDim ( const Dims < N > & dims , int skip_dim ) { <nl> TFLITE_DCHECK ( skip_dim > = 0 & & skip_dim < N ) ; <nl> int flat_size = 1 ; <nl> - for ( int i = 0 ; i < N ; i + + ) { <nl> + for ( int i = 0 ; i < N ; + + i ) { <nl> flat_size * = ( i = = skip_dim ) ? 1 : dims . sizes [ i ] ; <nl> } <nl> return flat_size ; <nl> inline int FlatSizeSkipDim ( const Dims < N > & dims , int skip_dim ) { <nl> template < int N > <nl> inline int MatchingFlatSizeSkipDim ( const Dims < N > & dims , int skip_dim , <nl> const Dims < N > & check_dims_0 ) { <nl> - for ( int i = 0 ; i < N ; i + + ) { <nl> + for ( int i = 0 ; i < N ; + + i ) { <nl> if ( i ! = skip_dim ) { <nl> TFLITE_DCHECK_EQ ( ArraySize ( dims , i ) , ArraySize ( check_dims_0 , i ) ) ; <nl> } <nl> template < int N > <nl> inline int MatchingFlatSizeSkipDim ( const Dims < N > & dims , int skip_dim , <nl> const Dims < N > & check_dims_0 , <nl> const Dims < N > & check_dims_1 ) { <nl> - for ( int i = 0 ; i < N ; i + + ) { <nl> + for ( int i = 0 ; i < N ; + + i ) { <nl> if ( i ! = skip_dim ) { <nl> TFLITE_DCHECK_EQ ( ArraySize ( dims , i ) , ArraySize ( check_dims_0 , i ) ) ; <nl> } <nl> inline int MatchingFlatSizeSkipDim ( const Dims < N > & dims , int skip_dim , <nl> const Dims < N > & check_dims_0 , <nl> const Dims < N > & check_dims_1 , <nl> const Dims < N > & check_dims_2 ) { <nl> - for ( int i = 0 ; i < N ; i + + ) { <nl> + for ( int i = 0 ; i < N ; + + i ) { <nl> if ( i ! = skip_dim ) { <nl> TFLITE_DCHECK_EQ ( ArraySize ( dims , i ) , ArraySize ( check_dims_0 , i ) ) ; <nl> } <nl> inline int MatchingFlatSizeSkipDim ( const Dims < N > & dims , int skip_dim , <nl> const Dims < N > & check_dims_1 , <nl> const Dims < N > & check_dims_2 , <nl> const Dims < N > & check_dims_3 ) { <nl> - for ( int i = 0 ; i < N ; i + + ) { <nl> + for ( int i = 0 ; i < N ; + + i ) { <nl> if ( i ! = skip_dim ) { <nl> TFLITE_DCHECK_EQ ( ArraySize ( dims , i ) , ArraySize ( check_dims_0 , i ) ) ; <nl> } <nl> mmm a / tensorflow / contrib / lite / kernels / kernel_util . h <nl> ppp b / tensorflow / contrib / lite / kernels / kernel_util . h <nl> inline int64_t NumElements ( const TfLiteTensor * t ) { <nl> return count ; <nl> } <nl> <nl> - inline TfLiteTensor * GetOptionalInputTensor ( TfLiteContext * context , <nl> - const TfLiteNode * node , int index ) { <nl> + inline const TfLiteTensor * GetOptionalInputTensor ( TfLiteContext * context , <nl> + const TfLiteNode * node , <nl> + int index ) { <nl> const bool use_tensor = node - > inputs - > data [ index ] ! = kOptionalTensor ; <nl> if ( use_tensor ) { <nl> return & context - > tensors [ node - > inputs - > data [ index ] ] ; <nl> mmm a / tensorflow / contrib / lite / kernels / lstm . cc <nl> ppp b / tensorflow / contrib / lite / kernels / lstm . cc <nl> TfLiteStatus CheckInputTensorDimensions ( TfLiteContext * context , <nl> TF_LITE_ENSURE ( context , params - > cell_clip > = 0 ) ; <nl> TF_LITE_ENSURE ( context , params - > proj_clip > = 0 ) ; <nl> <nl> - TfLiteTensor * input_to_input_weights = <nl> + const TfLiteTensor * input_to_input_weights = <nl> GetOptionalInputTensor ( context , node , kInputToInputWeightsTensor ) ; <nl> if ( input_to_input_weights ) { <nl> TF_LITE_ENSURE_EQ ( context , input_to_input_weights - > dims - > size , 2 ) ; <nl> TfLiteStatus CheckInputTensorDimensions ( TfLiteContext * context , <nl> TF_LITE_ENSURE_EQ ( context , input_to_cell_weights - > dims - > data [ 0 ] , n_cell ) ; <nl> TF_LITE_ENSURE_EQ ( context , input_to_cell_weights - > dims - > data [ 1 ] , n_input ) ; <nl> <nl> - TfLiteTensor * recurrent_to_input_weights = <nl> + const TfLiteTensor * recurrent_to_input_weights = <nl> GetOptionalInputTensor ( context , node , kRecurrentToInputWeightsTensor ) ; <nl> if ( recurrent_to_input_weights ) { <nl> TF_LITE_ENSURE_EQ ( context , recurrent_to_input_weights - > dims - > size , 2 ) ; <nl> TfLiteStatus CheckInputTensorDimensions ( TfLiteContext * context , <nl> ( recurrent_to_input_weights = = nullptr ) ) ; <nl> TF_LITE_ENSURE ( context , cifg_weights_all_or_none = = true ) ; <nl> <nl> - TfLiteTensor * cell_to_input_weights = <nl> + const TfLiteTensor * cell_to_input_weights = <nl> GetOptionalInputTensor ( context , node , kCellToInputWeightsTensor ) ; <nl> if ( cell_to_input_weights ) { <nl> TF_LITE_ENSURE_EQ ( context , cell_to_input_weights - > dims - > size , 1 ) ; <nl> TF_LITE_ENSURE_EQ ( context , cell_to_input_weights - > dims - > data [ 0 ] , n_cell ) ; <nl> } <nl> <nl> - TfLiteTensor * cell_to_forget_weights = <nl> + const TfLiteTensor * cell_to_forget_weights = <nl> GetOptionalInputTensor ( context , node , kCellToForgetWeightsTensor ) ; <nl> if ( cell_to_forget_weights ) { <nl> TF_LITE_ENSURE_EQ ( context , cell_to_forget_weights - > dims - > size , 1 ) ; <nl> TF_LITE_ENSURE_EQ ( context , cell_to_forget_weights - > dims - > data [ 0 ] , n_cell ) ; <nl> } <nl> <nl> - TfLiteTensor * cell_to_output_weights = <nl> + const TfLiteTensor * cell_to_output_weights = <nl> GetOptionalInputTensor ( context , node , kCellToOutputWeightsTensor ) ; <nl> if ( cell_to_output_weights ) { <nl> TF_LITE_ENSURE_EQ ( context , cell_to_output_weights - > dims - > size , 1 ) ; <nl> TfLiteStatus CheckInputTensorDimensions ( TfLiteContext * context , <nl> TF_LITE_ENSURE ( context , peephole_weights_all_or_none = = true ) ; <nl> <nl> / / Make sure the input gate bias is present only when not a CIFG - LSTM . <nl> - TfLiteTensor * input_gate_bias = <nl> + const TfLiteTensor * input_gate_bias = <nl> GetOptionalInputTensor ( context , node , kInputGateBiasTensor ) ; <nl> if ( use_cifg ) { <nl> TF_LITE_ENSURE_EQ ( context , input_gate_bias , nullptr ) ; <nl> TfLiteStatus CheckInputTensorDimensions ( TfLiteContext * context , <nl> TF_LITE_ENSURE_EQ ( context , output_gate_bias - > dims - > size , 1 ) ; <nl> TF_LITE_ENSURE_EQ ( context , output_gate_bias - > dims - > data [ 0 ] , n_cell ) ; <nl> <nl> - TfLiteTensor * projection_weights = <nl> + const TfLiteTensor * projection_weights = <nl> GetOptionalInputTensor ( context , node , kProjectionWeightsTensor ) ; <nl> if ( projection_weights ) { <nl> TF_LITE_ENSURE_EQ ( context , projection_weights - > dims - > size , 2 ) ; <nl> TfLiteStatus CheckInputTensorDimensions ( TfLiteContext * context , <nl> TF_LITE_ENSURE_EQ ( context , projection_weights - > dims - > data [ 1 ] , n_cell ) ; <nl> } <nl> <nl> - TfLiteTensor * projection_bias = <nl> + const TfLiteTensor * projection_bias = <nl> GetOptionalInputTensor ( context , node , kProjectionBiasTensor ) ; <nl> if ( projection_bias ) { <nl> TF_LITE_ENSURE_EQ ( context , projection_bias - > dims - > size , 1 ) ; <nl> TfLiteStatus Prepare ( TfLiteContext * context , TfLiteNode * node ) { <nl> output_state - > allocation_type = kTfLiteArenaRwPersistent ; <nl> cell_state - > allocation_type = kTfLiteArenaRwPersistent ; <nl> <nl> - TfLiteTensor * input_to_input_weights = <nl> + const TfLiteTensor * input_to_input_weights = <nl> GetOptionalInputTensor ( context , node , kInputToInputWeightsTensor ) ; <nl> const bool use_cifg = ( input_to_input_weights = = nullptr ) ; <nl> if ( use_cifg ) { <nl> TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> auto * params = reinterpret_cast < TfLiteLSTMParams * > ( node - > builtin_data ) ; <nl> const TfLiteTensor * input = GetInput ( context , node , kInputTensor ) ; <nl> <nl> - TfLiteTensor * input_to_input_weights = <nl> + const TfLiteTensor * input_to_input_weights = <nl> GetOptionalInputTensor ( context , node , kInputToInputWeightsTensor ) ; <nl> const TfLiteTensor * input_to_forget_weights = <nl> GetInput ( context , node , kInputToForgetWeightsTensor ) ; <nl> TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> const TfLiteTensor * input_to_output_weights = <nl> GetInput ( context , node , kInputToOutputWeightsTensor ) ; <nl> <nl> - TfLiteTensor * recurrent_to_input_weights = <nl> + const TfLiteTensor * recurrent_to_input_weights = <nl> GetOptionalInputTensor ( context , node , kRecurrentToInputWeightsTensor ) ; <nl> const TfLiteTensor * recurrent_to_forget_weights = <nl> GetInput ( context , node , kRecurrentToForgetWeightsTensor ) ; <nl> TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> const TfLiteTensor * recurrent_to_output_weights = <nl> GetInput ( context , node , kRecurrentToOutputWeightsTensor ) ; <nl> <nl> - TfLiteTensor * cell_to_input_weights = <nl> + const TfLiteTensor * cell_to_input_weights = <nl> GetOptionalInputTensor ( context , node , kCellToInputWeightsTensor ) ; <nl> - TfLiteTensor * cell_to_forget_weights = <nl> + const TfLiteTensor * cell_to_forget_weights = <nl> GetOptionalInputTensor ( context , node , kCellToForgetWeightsTensor ) ; <nl> - TfLiteTensor * cell_to_output_weights = <nl> + const TfLiteTensor * cell_to_output_weights = <nl> GetOptionalInputTensor ( context , node , kCellToOutputWeightsTensor ) ; <nl> <nl> - TfLiteTensor * input_gate_bias = <nl> + const TfLiteTensor * input_gate_bias = <nl> GetOptionalInputTensor ( context , node , kInputGateBiasTensor ) ; <nl> const TfLiteTensor * forget_gate_bias = <nl> GetInput ( context , node , kForgetGateBiasTensor ) ; <nl> TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> const TfLiteTensor * output_gate_bias = <nl> GetInput ( context , node , kOutputGateBiasTensor ) ; <nl> <nl> - TfLiteTensor * projection_weights = <nl> + const TfLiteTensor * projection_weights = <nl> GetOptionalInputTensor ( context , node , kProjectionWeightsTensor ) ; <nl> - TfLiteTensor * projection_bias = <nl> + const TfLiteTensor * projection_bias = <nl> GetOptionalInputTensor ( context , node , kProjectionBiasTensor ) ; <nl> <nl> TfLiteTensor * output_state = GetOutput ( context , node , kOutputStateTensor ) ; <nl> mmm a / tensorflow / contrib / lite / kernels / pad . cc <nl> ppp b / tensorflow / contrib / lite / kernels / pad . cc <nl> struct PadContext { <nl> output = GetOutput ( context , node , 0 ) ; <nl> dims = NumDimensions ( input ) ; <nl> } <nl> - TfLiteTensor * constant_values ; <nl> + const TfLiteTensor * constant_values ; <nl> const TfLiteTensor * input ; <nl> const TfLiteTensor * paddings ; <nl> TfLiteTensor * output ; <nl> mmm a / tensorflow / contrib / lite / kernels / padding . h <nl> ppp b / tensorflow / contrib / lite / kernels / padding . h <nl> limitations under the License . <nl> # ifndef TENSORFLOW_CONTRIB_LITE_KERNELS_PADDING_H_ <nl> # define TENSORFLOW_CONTRIB_LITE_KERNELS_PADDING_H_ <nl> <nl> + # include " tensorflow / contrib / lite / builtin_op_data . h " <nl> + <nl> namespace tflite { <nl> <nl> inline int ComputePadding ( int stride , int dilation_rate , int in_size , <nl> inline int ComputePadding ( int stride , int dilation_rate , int in_size , <nl> return padding > 0 ? padding : 0 ; <nl> } <nl> <nl> + / / Matching GetWindowedOutputSize in TensorFlow . <nl> + inline int ComputeOutSize ( TfLitePadding padding , int image_size , <nl> + int filter_size , int stride ) { <nl> + switch ( padding ) { <nl> + case kTfLitePaddingSame : <nl> + return ( image_size + stride - 1 ) / stride ; <nl> + case kTfLitePaddingValid : <nl> + return ( image_size + stride - filter_size ) / stride ; <nl> + default : <nl> + return 0 ; <nl> + } <nl> + } <nl> + <nl> + inline TfLitePaddingValues ComputePaddingHeightWidth ( <nl> + int stride_height , int stride_width , int dilation_rate , int in_height , <nl> + int in_width , int filter_height , int filter_width , TfLitePadding padding ) { <nl> + int out_width = ComputeOutSize ( padding , in_width , filter_width , stride_width ) ; <nl> + int out_height = <nl> + ComputeOutSize ( padding , in_height , filter_height , stride_height ) ; <nl> + <nl> + TfLitePaddingValues padding_values ; <nl> + padding_values . height = <nl> + ComputePadding ( stride_height , 1 , in_height , filter_height , out_height ) ; <nl> + padding_values . width = <nl> + ComputePadding ( stride_width , 1 , in_width , filter_width , out_width ) ; <nl> + return padding_values ; <nl> + } <nl> } / / namespace tflite <nl> <nl> # endif / / TENSORFLOW_CONTRIB_LITE_KERNELS_PADDING_H_ <nl> mmm a / tensorflow / contrib / lite / kernels / register . cc <nl> ppp b / tensorflow / contrib / lite / kernels / register . cc <nl> TfLiteRegistration * Register_NEG ( ) ; <nl> TfLiteRegistration * Register_SELECT ( ) ; <nl> TfLiteRegistration * Register_SLICE ( ) ; <nl> TfLiteRegistration * Register_SIN ( ) ; <nl> + TfLiteRegistration * Register_TRANSPOSE_CONV ( ) ; <nl> <nl> BuiltinOpResolver : : BuiltinOpResolver ( ) { <nl> AddBuiltin ( BuiltinOperator_RELU , Register_RELU ( ) ) ; <nl> BuiltinOpResolver : : BuiltinOpResolver ( ) { <nl> AddBuiltin ( BuiltinOperator_SELECT , Register_SELECT ( ) ) ; <nl> AddBuiltin ( BuiltinOperator_SLICE , Register_SLICE ( ) ) ; <nl> AddBuiltin ( BuiltinOperator_SIN , Register_SIN ( ) ) ; <nl> + AddBuiltin ( BuiltinOperator_TRANSPOSE_CONV , Register_TRANSPOSE_CONV ( ) ) ; <nl> <nl> / / TODO ( andrewharp , ahentz ) : Move these somewhere more appropriate so that <nl> / / custom ops aren ' t always included by default . <nl> mmm a / tensorflow / contrib / lite / kernels / svdf . cc <nl> ppp b / tensorflow / contrib / lite / kernels / svdf . cc <nl> TfLiteStatus Prepare ( TfLiteContext * context , TfLiteNode * node ) { <nl> TF_LITE_ASSERT_EQ ( input - > dims - > data [ 1 ] , weights_feature - > dims - > data [ 1 ] ) ; <nl> TF_LITE_ASSERT_EQ ( weights_time - > dims - > data [ 0 ] , num_filters ) ; <nl> <nl> - TfLiteTensor * bias = GetOptionalInputTensor ( context , node , kBiasTensor ) ; <nl> + const TfLiteTensor * bias = GetOptionalInputTensor ( context , node , kBiasTensor ) ; <nl> if ( bias ) { <nl> TF_LITE_ASSERT_EQ ( bias - > dims - > data [ 0 ] , num_units ) ; <nl> } <nl> TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> TfLiteTensor * output = GetOutput ( context , node , kOutputTensor ) ; <nl> TfLiteTensor * scratch = GetTemporary ( context , node , / * index = * / 0 ) ; <nl> <nl> - TfLiteTensor * bias = GetOptionalInputTensor ( context , node , kBiasTensor ) ; <nl> + const TfLiteTensor * bias = GetOptionalInputTensor ( context , node , kBiasTensor ) ; <nl> <nl> const int rank = params - > rank ; <nl> const int batch_size = input - > dims - > data [ 0 ] ; <nl> mmm a / tensorflow / contrib / lite / kernels / topk_v2 . cc <nl> ppp b / tensorflow / contrib / lite / kernels / topk_v2 . cc <nl> TfLiteStatus ResizeOutput ( TfLiteContext * context , TfLiteNode * node ) { <nl> / / INT32 number of top results is supported . <nl> TF_LITE_ENSURE_EQ ( context , top_k - > type , kTfLiteInt32 ) ; <nl> / / Check that the tensor contains only one value . <nl> - TF_LITE_ENSURE_EQ ( context , NumDimensions ( top_k ) , 1 ) ; <nl> TF_LITE_ENSURE_EQ ( context , NumElements ( top_k ) , 1 ) ; <nl> - const int32 k = top_k - > data . i32 [ 0 ] ; <nl> + const int32 k = * GetTensorData < int32_t > ( top_k ) ; <nl> <nl> const TfLiteTensor * input = GetInput ( context , node , kInputTensor ) ; <nl> const int num_dimensions = NumDimensions ( input ) ; <nl> new file mode 100644 <nl> index 0000000000000 . . 3c99661029ed1 <nl> mmm / dev / null <nl> ppp b / tensorflow / contrib / lite / kernels / transpose_conv . cc <nl> <nl> + / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + # include < unistd . h > <nl> + # include < cassert > <nl> + # include < cmath > <nl> + # include < cstdio > <nl> + # include < cstdlib > <nl> + # include < iostream > <nl> + # include < limits > <nl> + <nl> + # include " tensorflow / contrib / lite / builtin_op_data . h " <nl> + # include " tensorflow / contrib / lite / context . h " <nl> + # include " tensorflow / contrib / lite / kernels / internal / optimized / optimized_ops . h " <nl> + # include " tensorflow / contrib / lite / kernels / internal / tensor . h " <nl> + # include " tensorflow / contrib / lite / kernels / kernel_util . h " <nl> + # include " tensorflow / contrib / lite / kernels / op_macros . h " <nl> + # include " tensorflow / contrib / lite / kernels / padding . h " <nl> + <nl> + namespace tflite { <nl> + namespace ops { <nl> + namespace builtin { <nl> + namespace transpose_conv { <nl> + <nl> + constexpr int kOutputShapeTensor = 0 ; <nl> + constexpr int kWeightsTensor = 1 ; <nl> + constexpr int kDataInputTensor = 2 ; <nl> + constexpr int kOutputTensor = 0 ; <nl> + <nl> + TfLiteStatus ResizeOutputShape ( TfLiteContext * context , <nl> + const TfLiteTensor * output_shape , <nl> + TfLiteTensor * output ) { <nl> + / / Currently only support int32 for output shape . <nl> + if ( output_shape - > type ! = kTfLiteInt32 ) { <nl> + context - > ReportError ( context , " Output shape is % d , not int32 . " , <nl> + output_shape - > type ) ; <nl> + return kTfLiteError ; <nl> + } <nl> + const int output_dimensions = NumElements ( output_shape ) ; <nl> + TfLiteIntArray * output_shape_array = TfLiteIntArrayCreate ( output_dimensions ) ; <nl> + for ( int i = 0 ; i < output_dimensions ; + + i ) { <nl> + output_shape_array - > data [ i ] = GetTensorData < int32_t > ( output_shape ) [ i ] ; <nl> + } <nl> + <nl> + return context - > ResizeTensor ( context , output , output_shape_array ) ; <nl> + } <nl> + <nl> + TfLiteStatus Prepare ( TfLiteContext * context , TfLiteNode * node ) { <nl> + TF_LITE_ENSURE_EQ ( context , NumInputs ( node ) , 3 ) ; <nl> + TF_LITE_ENSURE_EQ ( context , NumOutputs ( node ) , 1 ) ; <nl> + <nl> + const TfLiteTensor * output_shape = <nl> + GetInput ( context , node , kOutputShapeTensor ) ; <nl> + const TfLiteTensor * weights = GetInput ( context , node , kWeightsTensor ) ; <nl> + const TfLiteTensor * input = GetInput ( context , node , kDataInputTensor ) ; <nl> + TfLiteTensor * output = GetOutput ( context , node , kOutputTensor ) ; <nl> + <nl> + TF_LITE_ENSURE_EQ ( context , NumDimensions ( output_shape ) , 1 ) ; <nl> + TF_LITE_ENSURE_EQ ( context , NumDimensions ( input ) , 4 ) ; <nl> + TF_LITE_ENSURE_EQ ( context , NumDimensions ( weights ) , 4 ) ; <nl> + <nl> + / / Currenlty only supports float32 . <nl> + const TfLiteType data_type = input - > type ; <nl> + TF_LITE_ENSURE ( context , data_type = = kTfLiteFloat32 ) ; <nl> + TF_LITE_ENSURE_EQ ( context , output - > type , data_type ) ; <nl> + TF_LITE_ENSURE_EQ ( context , weights - > type , data_type ) ; <nl> + <nl> + / / Ensure that weights and inputs have the same channel dimension . <nl> + / / Note : TOCO will reorder weights in the following format : OHWI . <nl> + TF_LITE_ENSURE_EQ ( context , SizeOfDimension ( input , 3 ) , <nl> + SizeOfDimension ( weights , 0 ) ) ; <nl> + <nl> + if ( ! IsConstantTensor ( output_shape ) ) { <nl> + SetTensorToDynamic ( output ) ; <nl> + return kTfLiteOk ; <nl> + } <nl> + return ResizeOutputShape ( context , output_shape , output ) ; <nl> + } <nl> + <nl> + TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> + const TfLiteTensor * output_shape = <nl> + GetInput ( context , node , kOutputShapeTensor ) ; <nl> + const TfLiteTensor * weights = GetInput ( context , node , kWeightsTensor ) ; <nl> + const TfLiteTensor * input = GetInput ( context , node , kDataInputTensor ) ; <nl> + TfLiteTensor * output = GetOutput ( context , node , kOutputTensor ) ; <nl> + <nl> + const auto * params = <nl> + reinterpret_cast < TfLiteTransposeConvParams * > ( node - > builtin_data ) ; <nl> + <nl> + if ( IsDynamicTensor ( output ) ) { <nl> + TF_LITE_ENSURE_OK ( context , <nl> + ResizeOutputShape ( context , output_shape , output ) ) ; <nl> + } <nl> + <nl> + / / Get height and width of the output image . <nl> + const int width = SizeOfDimension ( output , 2 ) ; <nl> + const int height = SizeOfDimension ( output , 1 ) ; <nl> + const int filter_width = SizeOfDimension ( weights , 1 ) ; <nl> + const int filter_height = SizeOfDimension ( weights , 2 ) ; <nl> + <nl> + const int stride_width = params - > stride_width ; <nl> + const int stride_height = params - > stride_height ; <nl> + <nl> + const TfLitePaddingValues & padding_size = <nl> + ComputePaddingHeightWidth ( stride_height , stride_width , 1 , height , width , <nl> + filter_height , filter_width , params - > padding ) ; <nl> + <nl> + / / Currently only support float32 . <nl> + switch ( input - > type ) { <nl> + case kTfLiteFloat32 : <nl> + optimized_ops : : TransposeConv ( <nl> + GetTensorData < float > ( input ) , GetTensorDims ( input ) , <nl> + GetTensorData < float > ( weights ) , GetTensorDims ( weights ) , stride_width , <nl> + stride_height , padding_size . width , padding_size . height , <nl> + GetTensorData < float > ( output ) , GetTensorDims ( output ) ) ; <nl> + break ; <nl> + default : <nl> + context - > ReportError ( context , " Type % d , not currently supported . " , <nl> + input - > type ) ; <nl> + return kTfLiteError ; <nl> + } <nl> + return kTfLiteOk ; <nl> + } <nl> + <nl> + } / / namespace transpose_conv <nl> + <nl> + TfLiteRegistration * Register_TRANSPOSE_CONV ( ) { <nl> + static TfLiteRegistration r = { nullptr , nullptr , transpose_conv : : Prepare , <nl> + transpose_conv : : Eval } ; <nl> + return & r ; <nl> + } <nl> + <nl> + } / / namespace builtin <nl> + } / / namespace ops <nl> + } / / namespace tflite <nl> new file mode 100644 <nl> index 0000000000000 . . 52be08934997f <nl> mmm / dev / null <nl> ppp b / tensorflow / contrib / lite / kernels / transpose_conv_test . cc <nl> <nl> + / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + # include < cstdarg > <nl> + # include < gtest / gtest . h > <nl> + # include " tensorflow / contrib / lite / interpreter . h " <nl> + # include " tensorflow / contrib / lite / kernels / register . h " <nl> + # include " tensorflow / contrib / lite / kernels / test_util . h " <nl> + # include " tensorflow / contrib / lite / model . h " <nl> + <nl> + namespace tflite { <nl> + namespace { <nl> + <nl> + using : : testing : : ElementsAreArray ; <nl> + <nl> + class TransposeConvOpModel : public SingleOpModel { <nl> + public : <nl> + TransposeConvOpModel ( std : : initializer_list < int > input_shape , <nl> + std : : initializer_list < int > filter_shape , Padding padding , <nl> + int stride_w , int stride_h ) { <nl> + output_shape_ = AddInput ( TensorType_INT32 ) ; <nl> + filter_ = AddInput ( TensorType_FLOAT32 ) ; <nl> + input_ = AddInput ( TensorType_FLOAT32 ) ; <nl> + output_ = AddOutput ( TensorType_FLOAT32 ) ; <nl> + SetBuiltinOp ( <nl> + BuiltinOperator_TRANSPOSE_CONV , BuiltinOptions_TransposeConvOptions , <nl> + CreateTransposeConvOptions ( builder_ , padding , stride_w , stride_h ) <nl> + . Union ( ) ) ; <nl> + BuildInterpreter ( { { 4 } , filter_shape , input_shape } ) ; <nl> + } <nl> + <nl> + int output_shape ( ) { return output_shape_ ; } <nl> + int filter ( ) { return filter_ ; } <nl> + int input ( ) { return input_ ; } <nl> + <nl> + std : : vector < float > GetOutput ( ) { return ExtractVector < float > ( output_ ) ; } <nl> + std : : vector < int > GetOutputShape ( ) { return GetTensorShape ( output_ ) ; } <nl> + <nl> + private : <nl> + int output_shape_ ; <nl> + int filter_ ; <nl> + int input_ ; <nl> + int output_ ; <nl> + } ; <nl> + <nl> + / / Test case : <nl> + / / output = tf . nn . conv2d_backprop_input ( <nl> + / / tf . constant ( [ 1 , 4 , 4 , 1 ] ) , <nl> + / / tf . constant ( np . arange ( 1 , 10 ) , shape = [ 3 , 3 , 1 , 1 ] , dtype = tf . float32 ) , <nl> + / / tf . constant ( np . arange ( 1 , 17 ) , shape = [ 1 , 4 , 4 , 1 ] , dtype = tf . float32 ) , <nl> + / / [ 1 , 1 , 1 , 1 ] , <nl> + / / " SAME " ) <nl> + TEST ( TransposeConvOpModelTest , SimpleTest ) { <nl> + TransposeConvOpModel m ( { 1 , 4 , 4 , 1 } , { 1 , 3 , 3 , 1 } , Padding_SAME , 1 , 1 ) ; <nl> + m . PopulateTensor < int > ( m . output_shape ( ) , { 1 , 4 , 4 , 1 } ) ; <nl> + m . PopulateTensor < float > ( m . filter ( ) , { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 } ) ; <nl> + m . PopulateTensor < float > ( <nl> + m . input ( ) , { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 } ) ; <nl> + m . Invoke ( ) ; <nl> + <nl> + EXPECT_THAT ( m . GetOutput ( ) , <nl> + ElementsAreArray ( { 29 , 62 , 83 , 75 , 99 , 192 , 237 , 198 , 207 , 372 , <nl> + 417 , 330 , 263 , 446 , 485 , 365 } ) ) ; <nl> + EXPECT_THAT ( m . GetOutputShape ( ) , ElementsAreArray ( { 1 , 4 , 4 , 1 } ) ) ; <nl> + } <nl> + <nl> + / / Test case : <nl> + / / filter = tf . constant ( np . arange ( 1 , 19 ) , <nl> + / / shape = [ 3 , 3 , 1 , 2 ] , <nl> + / / dtype = tf . float32 ) <nl> + / / output = tf . nn . conv2d_backprop_input ( <nl> + / / tf . constant ( [ 1 , 4 , 4 , 1 ] ) , <nl> + / / filter , <nl> + / / tf . constant ( np . arange ( 1 , 33 ) , shape = [ 1 , 4 , 4 , 2 ] , dtype = tf . float32 ) , <nl> + / / [ 1 , 1 , 1 , 1 ] , <nl> + / / " SAME " ) <nl> + / / And filter value is derived by : <nl> + / / filter = tf . reshape ( tf . transpose ( filter , perm = [ 3 , 0 , 1 , 2 ] ) , shape = [ 18 , 1 ] ) <nl> + TEST ( TransposeConvOpModelTest , TwoFiltersTest ) { <nl> + TransposeConvOpModel m ( { 1 , 4 , 4 , 2 } , { 2 , 3 , 3 , 1 } , Padding_SAME , 1 , 1 ) ; <nl> + m . PopulateTensor < int > ( m . output_shape ( ) , { 1 , 4 , 4 , 1 } ) ; <nl> + m . PopulateTensor < float > ( m . filter ( ) , { 1 , 3 , 5 , 7 , 9 , 11 , 13 , 15 , 17 , 2 , 4 , 6 , <nl> + 8 , 10 , 12 , 14 , 16 , 18 } ) ; <nl> + m . PopulateTensor < float > ( <nl> + m . input ( ) , <nl> + { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 , <nl> + 17 , 18 , 19 , 20 , 21 , 22 , 23 , 24 , 25 , 26 , 27 , 28 , 29 , 30 , 31 , 32 } ) ; <nl> + m . Invoke ( ) ; <nl> + <nl> + EXPECT_THAT ( m . GetOutput ( ) , <nl> + ElementsAreArray ( { 184 , 412 , 568 , 528 , 678 , 1347 , 1689 , 1434 , 1494 , <nl> + 2715 , 3057 , 2442 , 1968 , 3352 , 3652 , 2760 } ) ) ; <nl> + EXPECT_THAT ( m . GetOutputShape ( ) , ElementsAreArray ( { 1 , 4 , 4 , 1 } ) ) ; <nl> + } <nl> + <nl> + / / Test case : <nl> + / / filter = tf . constant ( np . arange ( 1 , 19 ) , <nl> + / / shape = [ 3 , 3 , 1 , 2 ] , <nl> + / / dtype = tf . float32 ) <nl> + / / output = tf . nn . conv2d_backprop_input ( <nl> + / / tf . constant ( [ 1 , 6 , 6 , 1 ] ) , <nl> + / / filter , <nl> + / / tf . constant ( np . arange ( 1 , 33 ) , shape = [ 1 , 4 , 4 , 2 ] , dtype = tf . float32 ) , <nl> + / / [ 1 , 1 , 1 , 1 ] , <nl> + / / " VALID " ) <nl> + / / And filter value is derived by : <nl> + / / filter = tf . reshape ( tf . transpose ( filter , perm = [ 3 , 0 , 1 , 2 ] ) , shape = [ 1 , 18 ] ) <nl> + TEST ( TransposeConvOpModelTest , PaddingValidTest ) { <nl> + TransposeConvOpModel m ( { 1 , 4 , 4 , 2 } , { 2 , 3 , 3 , 1 } , Padding_VALID , 1 , 1 ) ; <nl> + m . PopulateTensor < int > ( m . output_shape ( ) , { 1 , 6 , 6 , 1 } ) ; <nl> + m . PopulateTensor < float > ( m . filter ( ) , { 1 , 3 , 5 , 7 , 9 , 11 , 13 , 15 , 17 , 2 , 4 , 6 , <nl> + 8 , 10 , 12 , 14 , 16 , 18 } ) ; <nl> + m . PopulateTensor < float > ( <nl> + m . input ( ) , <nl> + { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 , <nl> + 17 , 18 , 19 , 20 , 21 , 22 , 23 , 24 , 25 , 26 , 27 , 28 , 29 , 30 , 31 , 32 } ) ; <nl> + m . Invoke ( ) ; <nl> + <nl> + EXPECT_THAT ( <nl> + m . GetOutput ( ) , <nl> + ElementsAreArray ( { 5 , 22 , 59 , 101 , 114 , 83 , 52 , 184 , 412 , <nl> + 568 , 528 , 344 , 237 , 678 , 1347 , 1689 , 1434 , 879 , <nl> + 597 , 1494 , 2715 , 3057 , 2442 , 1431 , 856 , 1968 , 3352 , <nl> + 3652 , 2760 , 1548 , 689 , 1534 , 2543 , 2729 , 2010 , 1103 } ) ) ; <nl> + EXPECT_THAT ( m . GetOutputShape ( ) , ElementsAreArray ( { 1 , 6 , 6 , 1 } ) ) ; <nl> + } <nl> + <nl> + / / Test case : <nl> + / / filter = tf . constant ( np . arange ( 1 , 10 ) , <nl> + / / shape = [ 3 , 3 , 1 , 1 ] , <nl> + / / dtype = tf . float32 ) <nl> + / / output = tf . nn . conv2d_backprop_input ( <nl> + / / tf . constant ( [ 1 , 5 , 5 , 1 ] ) , <nl> + / / filter , <nl> + / / tf . constant ( np . arange ( 1 , 5 ) , shape = [ 1 , 2 , 2 , 1 ] , dtype = tf . float32 ) , <nl> + / / [ 1 , 2 , 2 , 1 ] , <nl> + / / " VALID " ) <nl> + TEST ( TransposeConvOpModelTest , StrideValidTest ) { <nl> + TransposeConvOpModel m ( { 1 , 2 , 2 , 1 } , { 1 , 3 , 3 , 1 } , Padding_VALID , 2 , 2 ) ; <nl> + m . PopulateTensor < int > ( m . output_shape ( ) , { 1 , 5 , 5 , 1 } ) ; <nl> + m . PopulateTensor < float > ( m . filter ( ) , { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 } ) ; <nl> + m . PopulateTensor < float > ( m . input ( ) , { 1 , 2 , 3 , 4 } ) ; <nl> + m . Invoke ( ) ; <nl> + <nl> + EXPECT_THAT ( <nl> + m . GetOutput ( ) , <nl> + ElementsAreArray ( { 1 , 2 , 5 , 4 , 6 , 4 , 5 , 14 , 10 , 12 , 10 , 14 , 36 , <nl> + 24 , 30 , 12 , 15 , 34 , 20 , 24 , 21 , 24 , 55 , 32 , 36 } ) ) ; <nl> + EXPECT_THAT ( m . GetOutputShape ( ) , ElementsAreArray ( { 1 , 5 , 5 , 1 } ) ) ; <nl> + } <nl> + <nl> + / / Test case : <nl> + / / filter = tf . constant ( np . arange ( 1 , 19 ) , <nl> + / / shape = [ 3 , 3 , 2 , 1 ] , <nl> + / / dtype = tf . float32 ) <nl> + / / output = tf . nn . conv2d_backprop_input ( <nl> + / / tf . constant ( [ 1 , 5 , 5 , 2 ] ) , <nl> + / / filter , <nl> + / / tf . constant ( np . arange ( 1 , 5 ) , shape = [ 1 , 2 , 2 , 1 ] , dtype = tf . float32 ) , <nl> + / / [ 1 , 2 , 2 , 1 ] , <nl> + / / " VALID " ) <nl> + TEST ( TransposeConvOpModelTest , MultiChannelTest ) { <nl> + TransposeConvOpModel m ( { 1 , 2 , 2 , 1 } , { 1 , 3 , 3 , 2 } , Padding_VALID , 2 , 2 ) ; <nl> + m . PopulateTensor < int > ( m . output_shape ( ) , { 1 , 5 , 5 , 2 } ) ; <nl> + m . PopulateTensor < float > ( m . filter ( ) , { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , <nl> + 13 , 14 , 15 , 16 , 17 , 18 } ) ; <nl> + m . PopulateTensor < float > ( m . input ( ) , { 1 , 2 , 3 , 4 } ) ; <nl> + m . Invoke ( ) ; <nl> + <nl> + EXPECT_THAT ( <nl> + m . GetOutput ( ) , <nl> + ElementsAreArray ( { 1 , 2 , 3 , 4 , 7 , 10 , 6 , 8 , 10 , 12 , 7 , 8 , 9 , <nl> + 10 , 25 , 28 , 18 , 20 , 22 , 24 , 16 , 20 , 24 , 28 , 62 , 72 , <nl> + 42 , 48 , 54 , 60 , 21 , 24 , 27 , 30 , 61 , 68 , 36 , 40 , 44 , <nl> + 48 , 39 , 42 , 45 , 48 , 103 , 110 , 60 , 64 , 68 , 72 } ) ) ; <nl> + EXPECT_THAT ( m . GetOutputShape ( ) , ElementsAreArray ( { 1 , 5 , 5 , 2 } ) ) ; <nl> + } <nl> + <nl> + / / Test case : <nl> + / / filter = tf . constant ( np . random . randint ( 1 , 10 , size = 9 ) , <nl> + / / shape = [ 3 , 3 , 1 , 1 ] , <nl> + / / dtype = tf . float32 ) <nl> + / / output = tf . nn . conv2d_backprop_input ( <nl> + / / tf . constant ( [ 1 , 3 , 4 , 1 ] ) , <nl> + / / filter , <nl> + / / tf . constant ( [ 323 , 521 ] , shape = [ 1 , 1 , 2 , 1 ] , dtype = tf . float32 ) , <nl> + / / [ 1 , 3 , 3 , 1 ] , <nl> + / / " SAME " ) <nl> + / / And filter value is derived by : <nl> + / / filter = tf . reshape ( tf . transpose ( filter , perm = [ 3 , 0 , 1 , 2 ] ) , shape = [ - 1 ] ) <nl> + TEST ( TransposeConvOpModelTest , AccuracyTest ) { <nl> + TransposeConvOpModel m ( { 1 , 1 , 2 , 1 } , { 1 , 3 , 3 , 1 } , Padding_SAME , 3 , 3 ) ; <nl> + m . PopulateTensor < int > ( m . output_shape ( ) , { 1 , 3 , 4 , 1 } ) ; <nl> + m . PopulateTensor < float > ( m . filter ( ) , { 9 , 5 , 6 , 9 , 8 , 5 , 3 , 1 , 4 } ) ; <nl> + m . PopulateTensor < float > ( m . input ( ) , { 323 , 521 } ) ; <nl> + m . Invoke ( ) ; <nl> + <nl> + EXPECT_THAT ( m . GetOutput ( ) , ElementsAreArray ( ArrayFloatNear ( <nl> + { 1615 . , 1938 . , 4689 . , 2605 . , 2584 . , 1615 . , <nl> + 4689 . , 4168 . , 323 . , 1292 . , 1563 . , 521 . } ) ) ) ; <nl> + EXPECT_THAT ( m . GetOutputShape ( ) , ElementsAreArray ( { 1 , 3 , 4 , 1 } ) ) ; <nl> + } <nl> + <nl> + } / / namespace <nl> + } / / namespace tflite <nl> + <nl> + int main ( int argc , char * * argv ) { <nl> + : : tflite : : LogToStderr ( ) ; <nl> + : : testing : : InitGoogleTest ( & argc , argv ) ; <nl> + return RUN_ALL_TESTS ( ) ; <nl> + } <nl> mmm a / tensorflow / contrib / lite / kernels / unidirectional_sequence_lstm . cc <nl> ppp b / tensorflow / contrib / lite / kernels / unidirectional_sequence_lstm . cc <nl> TfLiteStatus CheckInputTensorDimensions ( TfLiteContext * context , <nl> TF_LITE_ENSURE ( context , params - > cell_clip > = 0 ) ; <nl> TF_LITE_ENSURE ( context , params - > proj_clip > = 0 ) ; <nl> <nl> - TfLiteTensor * input_to_input_weights = <nl> + const TfLiteTensor * input_to_input_weights = <nl> GetOptionalInputTensor ( context , node , kInputToInputWeightsTensor ) ; <nl> if ( input_to_input_weights ) { <nl> TF_LITE_ENSURE_EQ ( context , input_to_input_weights - > dims - > size , 2 ) ; <nl> TfLiteStatus CheckInputTensorDimensions ( TfLiteContext * context , <nl> TF_LITE_ENSURE_EQ ( context , input_to_cell_weights - > dims - > data [ 0 ] , n_cell ) ; <nl> TF_LITE_ENSURE_EQ ( context , input_to_cell_weights - > dims - > data [ 1 ] , n_input ) ; <nl> <nl> - TfLiteTensor * recurrent_to_input_weights = <nl> + const TfLiteTensor * recurrent_to_input_weights = <nl> GetOptionalInputTensor ( context , node , kRecurrentToInputWeightsTensor ) ; <nl> if ( recurrent_to_input_weights ) { <nl> TF_LITE_ENSURE_EQ ( context , recurrent_to_input_weights - > dims - > size , 2 ) ; <nl> TfLiteStatus CheckInputTensorDimensions ( TfLiteContext * context , <nl> ( recurrent_to_input_weights = = nullptr ) ) ; <nl> TF_LITE_ENSURE ( context , cifg_weights_all_or_none = = true ) ; <nl> <nl> - TfLiteTensor * cell_to_input_weights = <nl> + const TfLiteTensor * cell_to_input_weights = <nl> GetOptionalInputTensor ( context , node , kCellToInputWeightsTensor ) ; <nl> if ( cell_to_input_weights ) { <nl> TF_LITE_ENSURE_EQ ( context , cell_to_input_weights - > dims - > size , 1 ) ; <nl> TF_LITE_ENSURE_EQ ( context , cell_to_input_weights - > dims - > data [ 0 ] , n_cell ) ; <nl> } <nl> <nl> - TfLiteTensor * cell_to_forget_weights = <nl> + const TfLiteTensor * cell_to_forget_weights = <nl> GetOptionalInputTensor ( context , node , kCellToForgetWeightsTensor ) ; <nl> if ( cell_to_forget_weights ) { <nl> TF_LITE_ENSURE_EQ ( context , cell_to_forget_weights - > dims - > size , 1 ) ; <nl> TF_LITE_ENSURE_EQ ( context , cell_to_forget_weights - > dims - > data [ 0 ] , n_cell ) ; <nl> } <nl> <nl> - TfLiteTensor * cell_to_output_weights = <nl> + const TfLiteTensor * cell_to_output_weights = <nl> GetOptionalInputTensor ( context , node , kCellToOutputWeightsTensor ) ; <nl> if ( cell_to_output_weights ) { <nl> TF_LITE_ENSURE_EQ ( context , cell_to_output_weights - > dims - > size , 1 ) ; <nl> TfLiteStatus CheckInputTensorDimensions ( TfLiteContext * context , <nl> TF_LITE_ENSURE ( context , peephole_weights_all_or_none = = true ) ; <nl> <nl> / / Make sure the input gate bias is present only when not a CIFG - LSTM . <nl> - TfLiteTensor * input_gate_bias = <nl> + const TfLiteTensor * input_gate_bias = <nl> GetOptionalInputTensor ( context , node , kInputGateBiasTensor ) ; <nl> if ( use_cifg ) { <nl> TF_LITE_ENSURE_EQ ( context , input_gate_bias , nullptr ) ; <nl> TfLiteStatus CheckInputTensorDimensions ( TfLiteContext * context , <nl> TF_LITE_ENSURE_EQ ( context , output_gate_bias - > dims - > size , 1 ) ; <nl> TF_LITE_ENSURE_EQ ( context , output_gate_bias - > dims - > data [ 0 ] , n_cell ) ; <nl> <nl> - TfLiteTensor * projection_weights = <nl> + const TfLiteTensor * projection_weights = <nl> GetOptionalInputTensor ( context , node , kProjectionWeightsTensor ) ; <nl> if ( projection_weights ) { <nl> TF_LITE_ENSURE_EQ ( context , projection_weights - > dims - > size , 2 ) ; <nl> TfLiteStatus CheckInputTensorDimensions ( TfLiteContext * context , <nl> TF_LITE_ENSURE_EQ ( context , projection_weights - > dims - > data [ 1 ] , n_cell ) ; <nl> } <nl> <nl> - TfLiteTensor * projection_bias = <nl> + const TfLiteTensor * projection_bias = <nl> GetOptionalInputTensor ( context , node , kProjectionBiasTensor ) ; <nl> if ( projection_bias ) { <nl> TF_LITE_ENSURE_EQ ( context , projection_bias - > dims - > size , 1 ) ; <nl> TfLiteStatus Prepare ( TfLiteContext * context , TfLiteNode * node ) { <nl> output_state - > allocation_type = kTfLiteArenaRwPersistent ; <nl> cell_state - > allocation_type = kTfLiteArenaRwPersistent ; <nl> <nl> - TfLiteTensor * input_to_input_weights = <nl> + const TfLiteTensor * input_to_input_weights = <nl> GetOptionalInputTensor ( context , node , kInputToInputWeightsTensor ) ; <nl> const bool use_cifg = ( input_to_input_weights = = nullptr ) ; <nl> if ( use_cifg ) { <nl> TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> auto * params = reinterpret_cast < TfLiteLSTMParams * > ( node - > builtin_data ) ; <nl> const TfLiteTensor * input = GetInput ( context , node , kInputTensor ) ; <nl> <nl> - TfLiteTensor * input_to_input_weights = <nl> + const TfLiteTensor * input_to_input_weights = <nl> GetOptionalInputTensor ( context , node , kInputToInputWeightsTensor ) ; <nl> const TfLiteTensor * input_to_forget_weights = <nl> GetInput ( context , node , kInputToForgetWeightsTensor ) ; <nl> TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> const TfLiteTensor * input_to_output_weights = <nl> GetInput ( context , node , kInputToOutputWeightsTensor ) ; <nl> <nl> - TfLiteTensor * recurrent_to_input_weights = <nl> + const TfLiteTensor * recurrent_to_input_weights = <nl> GetOptionalInputTensor ( context , node , kRecurrentToInputWeightsTensor ) ; <nl> const TfLiteTensor * recurrent_to_forget_weights = <nl> GetInput ( context , node , kRecurrentToForgetWeightsTensor ) ; <nl> TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> const TfLiteTensor * recurrent_to_output_weights = <nl> GetInput ( context , node , kRecurrentToOutputWeightsTensor ) ; <nl> <nl> - TfLiteTensor * cell_to_input_weights = <nl> + const TfLiteTensor * cell_to_input_weights = <nl> GetOptionalInputTensor ( context , node , kCellToInputWeightsTensor ) ; <nl> - TfLiteTensor * cell_to_forget_weights = <nl> + const TfLiteTensor * cell_to_forget_weights = <nl> GetOptionalInputTensor ( context , node , kCellToForgetWeightsTensor ) ; <nl> - TfLiteTensor * cell_to_output_weights = <nl> + const TfLiteTensor * cell_to_output_weights = <nl> GetOptionalInputTensor ( context , node , kCellToOutputWeightsTensor ) ; <nl> <nl> - TfLiteTensor * input_gate_bias = <nl> + const TfLiteTensor * input_gate_bias = <nl> GetOptionalInputTensor ( context , node , kInputGateBiasTensor ) ; <nl> const TfLiteTensor * forget_gate_bias = <nl> GetInput ( context , node , kForgetGateBiasTensor ) ; <nl> TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> const TfLiteTensor * output_gate_bias = <nl> GetInput ( context , node , kOutputGateBiasTensor ) ; <nl> <nl> - TfLiteTensor * projection_weights = <nl> + const TfLiteTensor * projection_weights = <nl> GetOptionalInputTensor ( context , node , kProjectionWeightsTensor ) ; <nl> - TfLiteTensor * projection_bias = <nl> + const TfLiteTensor * projection_bias = <nl> GetOptionalInputTensor ( context , node , kProjectionBiasTensor ) ; <nl> <nl> TfLiteTensor * output_state = GetOutput ( context , node , kOutputStateTensor ) ; <nl> mmm a / tensorflow / contrib / lite / model . cc <nl> ppp b / tensorflow / contrib / lite / model . cc <nl> TfLiteStatus ParseOpData ( const Operator * op , BuiltinOperator op_type , <nl> case BuiltinOperator_SLICE : { <nl> break ; <nl> } <nl> + case BuiltinOperator_TRANSPOSE_CONV : { <nl> + TfLiteTransposeConvParams * params = <nl> + MallocPOD < TfLiteTransposeConvParams > ( ) ; <nl> + if ( auto * transpose_conv_params = <nl> + op - > builtin_options_as_TransposeConvOptions ( ) ) { <nl> + params - > padding = parse_padding ( transpose_conv_params - > padding ( ) ) ; <nl> + params - > stride_width = transpose_conv_params - > stride_w ( ) ; <nl> + params - > stride_height = transpose_conv_params - > stride_h ( ) ; <nl> + } <nl> + * builtin_data = reinterpret_cast < void * > ( params ) ; <nl> + break ; <nl> + } <nl> case BuiltinOperator_DELEGATE : { <nl> / / TODO ( ycling ) : Revisit when supporting saving delegated models . <nl> error_reporter - > Report ( " DELEGATE op shouldn ' t exist in model . " ) ; <nl> mmm a / tensorflow / contrib / lite / nnapi_delegate . cc <nl> ppp b / tensorflow / contrib / lite / nnapi_delegate . cc <nl> void AddOpsAndParams ( tflite : : Interpreter * interpreter , <nl> case tflite : : BuiltinOperator_SELECT : <nl> case tflite : : BuiltinOperator_SLICE : <nl> case tflite : : BuiltinOperator_SIN : <nl> + case tflite : : BuiltinOperator_TRANSPOSE_CONV : <nl> FATAL ( " Op code % d is currently not delegated to NNAPI " , builtin ) ; <nl> nn_op_type = - 1 ; / / set to invalid <nl> break ; <nl> mmm a / tensorflow / contrib / lite / schema / schema . fbs <nl> ppp b / tensorflow / contrib / lite / schema / schema . fbs <nl> enum BuiltinOperator : byte { <nl> SELECT = 64 , <nl> SLICE = 65 , <nl> SIN = 66 , <nl> + TRANSPOSE_CONV = 67 , <nl> } <nl> <nl> / / Options for the builtin operators . <nl> union BuiltinOptions { <nl> LessEqualOptions , <nl> SelectOptions , <nl> SliceOptions , <nl> + TransposeConvOptions , <nl> } <nl> <nl> enum Padding : byte { SAME , VALID } <nl> table SelectOptions { <nl> table SliceOptions { <nl> } <nl> <nl> + table TransposeConvOptions { <nl> + padding : Padding ; <nl> + stride_w : int ; <nl> + stride_h : int ; <nl> + } <nl> + <nl> / / An OperatorCode can be an enum value ( BuiltinOperator ) if the operator is a <nl> / / builtin , or a string if the operator is custom . <nl> table OperatorCode { <nl> mmm a / tensorflow / contrib / lite / schema / schema_generated . h <nl> ppp b / tensorflow / contrib / lite / schema / schema_generated . h <nl> struct SelectOptionsT ; <nl> struct SliceOptions ; <nl> struct SliceOptionsT ; <nl> <nl> + struct TransposeConvOptions ; <nl> + struct TransposeConvOptionsT ; <nl> + <nl> struct OperatorCode ; <nl> struct OperatorCodeT ; <nl> <nl> enum BuiltinOperator { <nl> BuiltinOperator_SELECT = 64 , <nl> BuiltinOperator_SLICE = 65 , <nl> BuiltinOperator_SIN = 66 , <nl> + BuiltinOperator_TRANSPOSE_CONV = 67 , <nl> BuiltinOperator_MIN = BuiltinOperator_ADD , <nl> - BuiltinOperator_MAX = BuiltinOperator_SIN <nl> + BuiltinOperator_MAX = BuiltinOperator_TRANSPOSE_CONV <nl> } ; <nl> <nl> - inline BuiltinOperator ( & EnumValuesBuiltinOperator ( ) ) [ 66 ] { <nl> + inline BuiltinOperator ( & EnumValuesBuiltinOperator ( ) ) [ 67 ] { <nl> static BuiltinOperator values [ ] = { <nl> BuiltinOperator_ADD , <nl> BuiltinOperator_AVERAGE_POOL_2D , <nl> inline BuiltinOperator ( & EnumValuesBuiltinOperator ( ) ) [ 66 ] { <nl> BuiltinOperator_LESS_EQUAL , <nl> BuiltinOperator_SELECT , <nl> BuiltinOperator_SLICE , <nl> - BuiltinOperator_SIN <nl> + BuiltinOperator_SIN , <nl> + BuiltinOperator_TRANSPOSE_CONV <nl> } ; <nl> return values ; <nl> } <nl> inline const char * * EnumNamesBuiltinOperator ( ) { <nl> " SELECT " , <nl> " SLICE " , <nl> " SIN " , <nl> + " TRANSPOSE_CONV " , <nl> nullptr <nl> } ; <nl> return names ; <nl> enum BuiltinOptions { <nl> BuiltinOptions_LessEqualOptions = 46 , <nl> BuiltinOptions_SelectOptions = 47 , <nl> BuiltinOptions_SliceOptions = 48 , <nl> + BuiltinOptions_TransposeConvOptions = 49 , <nl> BuiltinOptions_MIN = BuiltinOptions_NONE , <nl> - BuiltinOptions_MAX = BuiltinOptions_SliceOptions <nl> + BuiltinOptions_MAX = BuiltinOptions_TransposeConvOptions <nl> } ; <nl> <nl> - inline BuiltinOptions ( & EnumValuesBuiltinOptions ( ) ) [ 49 ] { <nl> + inline BuiltinOptions ( & EnumValuesBuiltinOptions ( ) ) [ 50 ] { <nl> static BuiltinOptions values [ ] = { <nl> BuiltinOptions_NONE , <nl> BuiltinOptions_Conv2DOptions , <nl> inline BuiltinOptions ( & EnumValuesBuiltinOptions ( ) ) [ 49 ] { <nl> BuiltinOptions_GreaterEqualOptions , <nl> BuiltinOptions_LessEqualOptions , <nl> BuiltinOptions_SelectOptions , <nl> - BuiltinOptions_SliceOptions <nl> + BuiltinOptions_SliceOptions , <nl> + BuiltinOptions_TransposeConvOptions <nl> } ; <nl> return values ; <nl> } <nl> inline const char * * EnumNamesBuiltinOptions ( ) { <nl> " LessEqualOptions " , <nl> " SelectOptions " , <nl> " SliceOptions " , <nl> + " TransposeConvOptions " , <nl> nullptr <nl> } ; <nl> return names ; <nl> template < > struct BuiltinOptionsTraits < SliceOptions > { <nl> static const BuiltinOptions enum_value = BuiltinOptions_SliceOptions ; <nl> } ; <nl> <nl> + template < > struct BuiltinOptionsTraits < TransposeConvOptions > { <nl> + static const BuiltinOptions enum_value = BuiltinOptions_TransposeConvOptions ; <nl> + } ; <nl> + <nl> struct BuiltinOptionsUnion { <nl> BuiltinOptions type ; <nl> void * value ; <nl> struct BuiltinOptionsUnion { <nl> return type = = BuiltinOptions_SliceOptions ? <nl> reinterpret_cast < const SliceOptionsT * > ( value ) : nullptr ; <nl> } <nl> + TransposeConvOptionsT * AsTransposeConvOptions ( ) { <nl> + return type = = BuiltinOptions_TransposeConvOptions ? <nl> + reinterpret_cast < TransposeConvOptionsT * > ( value ) : nullptr ; <nl> + } <nl> + const TransposeConvOptionsT * AsTransposeConvOptions ( ) const { <nl> + return type = = BuiltinOptions_TransposeConvOptions ? <nl> + reinterpret_cast < const TransposeConvOptionsT * > ( value ) : nullptr ; <nl> + } <nl> } ; <nl> <nl> bool VerifyBuiltinOptions ( flatbuffers : : Verifier & verifier , const void * obj , BuiltinOptions type ) ; <nl> inline flatbuffers : : Offset < SliceOptions > CreateSliceOptions ( <nl> <nl> flatbuffers : : Offset < SliceOptions > CreateSliceOptions ( flatbuffers : : FlatBufferBuilder & _fbb , const SliceOptionsT * _o , const flatbuffers : : rehasher_function_t * _rehasher = nullptr ) ; <nl> <nl> + struct TransposeConvOptionsT : public flatbuffers : : NativeTable { <nl> + typedef TransposeConvOptions TableType ; <nl> + Padding padding ; <nl> + int32_t stride_w ; <nl> + int32_t stride_h ; <nl> + TransposeConvOptionsT ( ) <nl> + : padding ( Padding_SAME ) , <nl> + stride_w ( 0 ) , <nl> + stride_h ( 0 ) { <nl> + } <nl> + } ; <nl> + <nl> + struct TransposeConvOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers : : Table { <nl> + typedef TransposeConvOptionsT NativeTableType ; <nl> + enum { <nl> + VT_PADDING = 4 , <nl> + VT_STRIDE_W = 6 , <nl> + VT_STRIDE_H = 8 <nl> + } ; <nl> + Padding padding ( ) const { <nl> + return static_cast < Padding > ( GetField < int8_t > ( VT_PADDING , 0 ) ) ; <nl> + } <nl> + int32_t stride_w ( ) const { <nl> + return GetField < int32_t > ( VT_STRIDE_W , 0 ) ; <nl> + } <nl> + int32_t stride_h ( ) const { <nl> + return GetField < int32_t > ( VT_STRIDE_H , 0 ) ; <nl> + } <nl> + bool Verify ( flatbuffers : : Verifier & verifier ) const { <nl> + return VerifyTableStart ( verifier ) & & <nl> + VerifyField < int8_t > ( verifier , VT_PADDING ) & & <nl> + VerifyField < int32_t > ( verifier , VT_STRIDE_W ) & & <nl> + VerifyField < int32_t > ( verifier , VT_STRIDE_H ) & & <nl> + verifier . EndTable ( ) ; <nl> + } <nl> + TransposeConvOptionsT * UnPack ( const flatbuffers : : resolver_function_t * _resolver = nullptr ) const ; <nl> + void UnPackTo ( TransposeConvOptionsT * _o , const flatbuffers : : resolver_function_t * _resolver = nullptr ) const ; <nl> + static flatbuffers : : Offset < TransposeConvOptions > Pack ( flatbuffers : : FlatBufferBuilder & _fbb , const TransposeConvOptionsT * _o , const flatbuffers : : rehasher_function_t * _rehasher = nullptr ) ; <nl> + } ; <nl> + <nl> + struct TransposeConvOptionsBuilder { <nl> + flatbuffers : : FlatBufferBuilder & fbb_ ; <nl> + flatbuffers : : uoffset_t start_ ; <nl> + void add_padding ( Padding padding ) { <nl> + fbb_ . AddElement < int8_t > ( TransposeConvOptions : : VT_PADDING , static_cast < int8_t > ( padding ) , 0 ) ; <nl> + } <nl> + void add_stride_w ( int32_t stride_w ) { <nl> + fbb_ . AddElement < int32_t > ( TransposeConvOptions : : VT_STRIDE_W , stride_w , 0 ) ; <nl> + } <nl> + void add_stride_h ( int32_t stride_h ) { <nl> + fbb_ . AddElement < int32_t > ( TransposeConvOptions : : VT_STRIDE_H , stride_h , 0 ) ; <nl> + } <nl> + explicit TransposeConvOptionsBuilder ( flatbuffers : : FlatBufferBuilder & _fbb ) <nl> + : fbb_ ( _fbb ) { <nl> + start_ = fbb_ . StartTable ( ) ; <nl> + } <nl> + TransposeConvOptionsBuilder & operator = ( const TransposeConvOptionsBuilder & ) ; <nl> + flatbuffers : : Offset < TransposeConvOptions > Finish ( ) { <nl> + const auto end = fbb_ . EndTable ( start_ ) ; <nl> + auto o = flatbuffers : : Offset < TransposeConvOptions > ( end ) ; <nl> + return o ; <nl> + } <nl> + } ; <nl> + <nl> + inline flatbuffers : : Offset < TransposeConvOptions > CreateTransposeConvOptions ( <nl> + flatbuffers : : FlatBufferBuilder & _fbb , <nl> + Padding padding = Padding_SAME , <nl> + int32_t stride_w = 0 , <nl> + int32_t stride_h = 0 ) { <nl> + TransposeConvOptionsBuilder builder_ ( _fbb ) ; <nl> + builder_ . add_stride_h ( stride_h ) ; <nl> + builder_ . add_stride_w ( stride_w ) ; <nl> + builder_ . add_padding ( padding ) ; <nl> + return builder_ . Finish ( ) ; <nl> + } <nl> + <nl> + flatbuffers : : Offset < TransposeConvOptions > CreateTransposeConvOptions ( flatbuffers : : FlatBufferBuilder & _fbb , const TransposeConvOptionsT * _o , const flatbuffers : : rehasher_function_t * _rehasher = nullptr ) ; <nl> + <nl> struct OperatorCodeT : public flatbuffers : : NativeTable { <nl> typedef OperatorCode TableType ; <nl> BuiltinOperator builtin_code ; <nl> struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers : : Table { <nl> const SliceOptions * builtin_options_as_SliceOptions ( ) const { <nl> return builtin_options_type ( ) = = BuiltinOptions_SliceOptions ? static_cast < const SliceOptions * > ( builtin_options ( ) ) : nullptr ; <nl> } <nl> + const TransposeConvOptions * builtin_options_as_TransposeConvOptions ( ) const { <nl> + return builtin_options_type ( ) = = BuiltinOptions_TransposeConvOptions ? static_cast < const TransposeConvOptions * > ( builtin_options ( ) ) : nullptr ; <nl> + } <nl> const flatbuffers : : Vector < uint8_t > * custom_options ( ) const { <nl> return GetPointer < const flatbuffers : : Vector < uint8_t > * > ( VT_CUSTOM_OPTIONS ) ; <nl> } <nl> template < > inline const SliceOptions * Operator : : builtin_options_as < SliceOptions > <nl> return builtin_options_as_SliceOptions ( ) ; <nl> } <nl> <nl> + template < > inline const TransposeConvOptions * Operator : : builtin_options_as < TransposeConvOptions > ( ) const { <nl> + return builtin_options_as_TransposeConvOptions ( ) ; <nl> + } <nl> + <nl> struct OperatorBuilder { <nl> flatbuffers : : FlatBufferBuilder & fbb_ ; <nl> flatbuffers : : uoffset_t start_ ; <nl> inline flatbuffers : : Offset < SliceOptions > CreateSliceOptions ( flatbuffers : : FlatBuf <nl> _fbb ) ; <nl> } <nl> <nl> + inline TransposeConvOptionsT * TransposeConvOptions : : UnPack ( const flatbuffers : : resolver_function_t * _resolver ) const { <nl> + auto _o = new TransposeConvOptionsT ( ) ; <nl> + UnPackTo ( _o , _resolver ) ; <nl> + return _o ; <nl> + } <nl> + <nl> + inline void TransposeConvOptions : : UnPackTo ( TransposeConvOptionsT * _o , const flatbuffers : : resolver_function_t * _resolver ) const { <nl> + ( void ) _o ; <nl> + ( void ) _resolver ; <nl> + { auto _e = padding ( ) ; _o - > padding = _e ; } ; <nl> + { auto _e = stride_w ( ) ; _o - > stride_w = _e ; } ; <nl> + { auto _e = stride_h ( ) ; _o - > stride_h = _e ; } ; <nl> + } <nl> + <nl> + inline flatbuffers : : Offset < TransposeConvOptions > TransposeConvOptions : : Pack ( flatbuffers : : FlatBufferBuilder & _fbb , const TransposeConvOptionsT * _o , const flatbuffers : : rehasher_function_t * _rehasher ) { <nl> + return CreateTransposeConvOptions ( _fbb , _o , _rehasher ) ; <nl> + } <nl> + <nl> + inline flatbuffers : : Offset < TransposeConvOptions > CreateTransposeConvOptions ( flatbuffers : : FlatBufferBuilder & _fbb , const TransposeConvOptionsT * _o , const flatbuffers : : rehasher_function_t * _rehasher ) { <nl> + ( void ) _rehasher ; <nl> + ( void ) _o ; <nl> + struct _VectorArgs { flatbuffers : : FlatBufferBuilder * __fbb ; const TransposeConvOptionsT * __o ; const flatbuffers : : rehasher_function_t * __rehasher ; } _va = { & _fbb , _o , _rehasher } ; ( void ) _va ; <nl> + auto _padding = _o - > padding ; <nl> + auto _stride_w = _o - > stride_w ; <nl> + auto _stride_h = _o - > stride_h ; <nl> + return tflite : : CreateTransposeConvOptions ( <nl> + _fbb , <nl> + _padding , <nl> + _stride_w , <nl> + _stride_h ) ; <nl> + } <nl> + <nl> inline OperatorCodeT * OperatorCode : : UnPack ( const flatbuffers : : resolver_function_t * _resolver ) const { <nl> auto _o = new OperatorCodeT ( ) ; <nl> UnPackTo ( _o , _resolver ) ; <nl> inline bool VerifyBuiltinOptions ( flatbuffers : : Verifier & verifier , const void * ob <nl> auto ptr = reinterpret_cast < const SliceOptions * > ( obj ) ; <nl> return verifier . VerifyTable ( ptr ) ; <nl> } <nl> + case BuiltinOptions_TransposeConvOptions : { <nl> + auto ptr = reinterpret_cast < const TransposeConvOptions * > ( obj ) ; <nl> + return verifier . VerifyTable ( ptr ) ; <nl> + } <nl> default : return false ; <nl> } <nl> } <nl> inline void * BuiltinOptionsUnion : : UnPack ( const void * obj , BuiltinOptions type , c <nl> auto ptr = reinterpret_cast < const SliceOptions * > ( obj ) ; <nl> return ptr - > UnPack ( resolver ) ; <nl> } <nl> + case BuiltinOptions_TransposeConvOptions : { <nl> + auto ptr = reinterpret_cast < const TransposeConvOptions * > ( obj ) ; <nl> + return ptr - > UnPack ( resolver ) ; <nl> + } <nl> default : return nullptr ; <nl> } <nl> } <nl> inline flatbuffers : : Offset < void > BuiltinOptionsUnion : : Pack ( flatbuffers : : FlatBuff <nl> auto ptr = reinterpret_cast < const SliceOptionsT * > ( value ) ; <nl> return CreateSliceOptions ( _fbb , ptr , _rehasher ) . Union ( ) ; <nl> } <nl> + case BuiltinOptions_TransposeConvOptions : { <nl> + auto ptr = reinterpret_cast < const TransposeConvOptionsT * > ( value ) ; <nl> + return CreateTransposeConvOptions ( _fbb , ptr , _rehasher ) . Union ( ) ; <nl> + } <nl> default : return 0 ; <nl> } <nl> } <nl> inline BuiltinOptionsUnion : : BuiltinOptionsUnion ( const BuiltinOptionsUnion & u ) FL <nl> value = new SliceOptionsT ( * reinterpret_cast < SliceOptionsT * > ( u . value ) ) ; <nl> break ; <nl> } <nl> + case BuiltinOptions_TransposeConvOptions : { <nl> + value = new TransposeConvOptionsT ( * reinterpret_cast < TransposeConvOptionsT * > ( u . value ) ) ; <nl> + break ; <nl> + } <nl> default : <nl> break ; <nl> } <nl> inline void BuiltinOptionsUnion : : Reset ( ) { <nl> delete ptr ; <nl> break ; <nl> } <nl> + case BuiltinOptions_TransposeConvOptions : { <nl> + auto ptr = reinterpret_cast < TransposeConvOptionsT * > ( value ) ; <nl> + delete ptr ; <nl> + break ; <nl> + } <nl> default : break ; <nl> } <nl> value = nullptr ; <nl> mmm a / tensorflow / contrib / lite / testing / BUILD <nl> ppp b / tensorflow / contrib / lite / testing / BUILD <nl> gen_zipped_test_files ( <nl> " sub . zip " , <nl> " topk . zip " , <nl> " transpose . zip " , <nl> + " transpose_conv . zip " , <nl> " where . zip " , <nl> ] , <nl> ) <nl> cc_library ( <nl> deps = [ <nl> " : generate_testspec " , <nl> " : parse_testdata_lib " , <nl> - " : split " , <nl> " : tflite_driver " , <nl> - " : util " , <nl> - " / / tensorflow / contrib / lite : builtin_op_data " , <nl> " / / tensorflow / contrib / lite : framework " , <nl> " / / tensorflow / contrib / lite : string " , <nl> - " / / tensorflow / contrib / lite / kernels : builtin_ops " , <nl> ] , <nl> ) <nl> <nl> mmm a / tensorflow / contrib / lite / testing / generate_examples . py <nl> ppp b / tensorflow / contrib / lite / testing / generate_examples . py <nl> def build_inputs ( parameters , sess , inputs , outputs ) : <nl> <nl> make_zip_of_tests ( zip_path , test_parameters , build_graph , build_inputs ) <nl> <nl> + <nl> + # Since compute output_shape is fairly complicated for <nl> + # tf . nn . conv2d_backprop_input input_sizes argument , so we here first perform a <nl> + # " conv2d " operation to get the output , then we use the output to feed in <nl> + # tf . nn . conv2d_backprop_input . <nl> + # This test will depend on the " conv2d " operation ' s correctness . <nl> + def make_transpose_conv_tests ( zip_path ) : <nl> + " " " Make a set of tests to do transpose_conv . " " " <nl> + <nl> + # Tensorflow only supports equal strides <nl> + test_parameters = [ { <nl> + " input_shape " : [ [ 1 , 3 , 4 , 1 ] , [ 1 , 10 , 10 , 3 ] , [ 3 , 20 , 20 , 1 ] ] , <nl> + " filter_size " : [ [ 1 , 1 ] , [ 1 , 2 ] , [ 3 , 3 ] ] , <nl> + " strides " : [ [ 1 , 1 , 1 , 1 ] , [ 1 , 3 , 3 , 1 ] ] , <nl> + " padding " : [ " SAME " , " VALID " ] , <nl> + " data_format " : [ " NHWC " ] , <nl> + " channel_multiplier " : [ 1 , 2 ] , <nl> + } ] <nl> + <nl> + def get_tensor_shapes ( parameters ) : <nl> + input_shape = parameters [ " input_shape " ] <nl> + filter_size = parameters [ " filter_size " ] <nl> + filter_shape = filter_size + [ <nl> + input_shape [ 3 ] , parameters [ " channel_multiplier " ] <nl> + ] <nl> + return [ input_shape , filter_shape ] <nl> + <nl> + def build_graph ( parameters ) : <nl> + " " " Build a transpose_conv graph given ` parameters ` . " " " <nl> + input_shape , filter_shape = get_tensor_shapes ( parameters ) <nl> + input_tensor = tf . placeholder ( <nl> + dtype = tf . float32 , name = " input " , shape = input_shape ) <nl> + <nl> + filter_input = tf . placeholder ( <nl> + dtype = tf . float32 , name = " filter " , shape = filter_shape ) <nl> + <nl> + conv_outputs = tf . nn . conv2d ( <nl> + input_tensor , <nl> + filter_input , <nl> + strides = parameters [ " strides " ] , <nl> + padding = parameters [ " padding " ] , <nl> + data_format = parameters [ " data_format " ] ) <nl> + out = tf . nn . conv2d_backprop_input ( <nl> + input_shape , <nl> + filter_input , <nl> + conv_outputs , <nl> + strides = parameters [ " strides " ] , <nl> + padding = parameters [ " padding " ] , <nl> + data_format = parameters [ " data_format " ] ) <nl> + input_tensors = [ input_tensor , filter_input ] <nl> + return input_tensors , [ out ] <nl> + <nl> + def build_inputs ( parameters , sess , inputs , outputs ) : <nl> + input_shape , filter_shape = get_tensor_shapes ( parameters ) <nl> + values = [ <nl> + create_tensor_data ( np . float32 , input_shape ) , <nl> + create_tensor_data ( np . float32 , filter_shape ) <nl> + ] <nl> + return values , sess . run ( outputs , feed_dict = dict ( zip ( inputs , values ) ) ) <nl> + <nl> + make_zip_of_tests ( zip_path , test_parameters , build_graph , build_inputs ) <nl> + <nl> + <nl> # Toco binary path provided by the generate rule . <nl> bin_path = None <nl> <nl> + <nl> def main ( unused_args ) : <nl> global bin_path <nl> def mkdir_if_not_exist ( x ) : <nl> mmm a / tensorflow / contrib / lite / testing / generate_testspec . cc <nl> ppp b / tensorflow / contrib / lite / testing / generate_testspec . cc <nl> bool GenerateTestSpecFromTensorflowModel ( <nl> / / Invoke tensorflow model . <nl> TfDriver runner ( input_layer , input_layer_type , input_layer_shape , <nl> output_layer ) ; <nl> + if ( ! runner . IsValid ( ) ) { <nl> + cerr < < runner . GetErrorMessage ( ) < < endl ; <nl> + return false ; <nl> + } <nl> + <nl> runner . LoadModel ( tensorflow_model_path ) ; <nl> + if ( ! runner . IsValid ( ) ) { <nl> + cerr < < runner . GetErrorMessage ( ) < < endl ; <nl> + return false ; <nl> + } <nl> + <nl> for ( int i = 0 ; i < input_values . size ( ) ; i + + ) { <nl> runner . SetInput ( i , input_values [ i ] ) ; <nl> + if ( ! runner . IsValid ( ) ) { <nl> + cerr < < runner . GetErrorMessage ( ) < < endl ; <nl> + return false ; <nl> + } <nl> } <nl> + <nl> runner . Invoke ( ) ; <nl> + if ( ! runner . IsValid ( ) ) { <nl> + cerr < < runner . GetErrorMessage ( ) < < endl ; <nl> + return false ; <nl> + } <nl> <nl> / / Write test spec . <nl> stream < < " load_model : " < < tflite_model_path < < " \ n " ; <nl> bool GenerateTestSpecFromTensorflowModel ( <nl> } <nl> for ( int i = 0 ; i < output_layer . size ( ) ; i + + ) { <nl> stream < < " output : \ " " < < runner . ReadOutput ( i ) < < " \ " \ n " ; <nl> + if ( ! runner . IsValid ( ) ) { <nl> + cerr < < runner . GetErrorMessage ( ) < < endl ; <nl> + return false ; <nl> + } <nl> } <nl> stream < < " } \ n " ; <nl> <nl> mmm a / tensorflow / contrib / lite / testing / generated_examples_zip_test . cc <nl> ppp b / tensorflow / contrib / lite / testing / generated_examples_zip_test . cc <nl> INSTANTIATE_TESTS ( split ) <nl> INSTANTIATE_TESTS ( squeeze ) <nl> INSTANTIATE_TESTS ( strided_slice ) <nl> INSTANTIATE_TESTS ( sub ) <nl> + INSTANTIATE_TESTS ( topk ) <nl> INSTANTIATE_TESTS ( transpose ) <nl> + INSTANTIATE_TESTS ( transpose_conv ) <nl> INSTANTIATE_TESTS ( where ) <nl> <nl> } / / namespace testing <nl> mmm a / tensorflow / contrib / lite / testing / join . h <nl> ppp b / tensorflow / contrib / lite / testing / join . h <nl> limitations under the License . <nl> namespace tflite { <nl> namespace testing { <nl> <nl> - / / Join a list of data separated by delimieter . <nl> + / / Join a list of data separated by delimiter . <nl> template < typename T > <nl> string Join ( T * data , size_t len , const string & delimiter ) { <nl> if ( len = = 0 | | data = = nullptr ) { <nl> string Join ( T * data , size_t len , const string & delimiter ) { <nl> return result . str ( ) ; <nl> } <nl> <nl> + / / Join a list of uint8 data separated by a delimiter . Cast data to int before <nl> + / / placing it in the string to prevent values from being treated like chars . <nl> + template < > <nl> + inline string Join < uint8_t > ( uint8_t * data , size_t len , <nl> + const string & delimiter ) { <nl> + if ( len = = 0 | | data = = nullptr ) { <nl> + return " " ; <nl> + } <nl> + std : : stringstream result ; <nl> + result < < static_cast < int > ( data [ 0 ] ) ; <nl> + for ( int i = 1 ; i < len ; i + + ) { <nl> + result < < delimiter < < static_cast < int > ( data [ i ] ) ; <nl> + } <nl> + return result . str ( ) ; <nl> + } <nl> + <nl> } / / namespace testing <nl> } / / namespace tflite <nl> <nl> mmm a / tensorflow / contrib / lite / testing / test_runner . h <nl> ppp b / tensorflow / contrib / lite / testing / test_runner . h <nl> limitations under the License . <nl> # ifndef TENSORFLOW_CONTRIB_LITE_TESTING_TEST_RUNNER_H_ <nl> # define TENSORFLOW_CONTRIB_LITE_TESTING_TEST_RUNNER_H_ <nl> <nl> + # include < iostream > <nl> # include < memory > <nl> # include < string > <nl> # include < vector > <nl> class TestRunner { <nl> <nl> / / Invalidate the test runner , preventing it from executing any further . <nl> void Invalidate ( const string & error_message ) { <nl> + cerr < < error_message < < std : : endl ; <nl> error_message_ = error_message ; <nl> } <nl> bool IsValid ( ) const { return error_message_ . empty ( ) ; } <nl> mmm a / tensorflow / contrib / lite / testing / tf_driver . cc <nl> ppp b / tensorflow / contrib / lite / testing / tf_driver . cc <nl> void TfDriver : : LoadModel ( const string & bin_file_path ) { <nl> session_ . reset ( tensorflow : : NewSession ( options ) ) ; <nl> auto status = session_ - > Create ( graphdef ) ; <nl> if ( ! status . ok ( ) ) { <nl> - Invalidate ( " Failed to create session " ) ; <nl> + Invalidate ( " Failed to create session . " + status . error_message ( ) ) ; <nl> } <nl> } <nl> <nl> mmm a / tensorflow / contrib / lite / toco / tflite / operator . cc <nl> ppp b / tensorflow / contrib / lite / toco / tflite / operator . cc <nl> class ArgMax : public BuiltinOperator < ArgMaxOperator , : : tflite : : ArgMaxOptions , <nl> } <nl> } ; <nl> <nl> + class TransposeConv <nl> + : public BuiltinOperator < TransposeConvOperator , <nl> + : : tflite : : TransposeConvOptions , <nl> + : : tflite : : BuiltinOptions_TransposeConvOptions > { <nl> + public : <nl> + using BuiltinOperator : : BuiltinOperator ; <nl> + <nl> + flatbuffers : : Offset < TfLiteOptions > WriteOptions ( <nl> + const TocoOperator & op , <nl> + flatbuffers : : FlatBufferBuilder * builder ) const override { <nl> + auto padding = Padding : : Serialize ( op . padding . type ) ; <nl> + return : : tflite : : CreateTransposeConvOptions ( <nl> + * builder , padding , op . stride_width , op . stride_height ) ; <nl> + } <nl> + <nl> + void ReadOptions ( const TfLiteOptions & options , <nl> + TocoOperator * op ) const override { <nl> + op - > padding . type = Padding : : Deserialize ( options . padding ( ) ) ; <nl> + op - > stride_width = options . stride_w ( ) ; <nl> + op - > stride_height = options . stride_h ( ) ; <nl> + } <nl> + } ; <nl> + <nl> class TensorFlowUnsupported : public BaseOperator { <nl> public : <nl> using BaseOperator : : BaseOperator ; <nl> std : : vector < std : : unique_ptr < BaseOperator > > BuildOperatorList ( ) { <nl> new Cast ( : : tflite : : BuiltinOperator_CAST , OperatorType : : kCast ) ) ; <nl> ops . emplace_back ( <nl> new ArgMax ( : : tflite : : BuiltinOperator_ARG_MAX , OperatorType : : kArgMax ) ) ; <nl> + ops . emplace_back ( new TransposeConv ( : : tflite : : BuiltinOperator_TRANSPOSE_CONV , <nl> + OperatorType : : kTransposeConv ) ) ; <nl> <nl> / / Custom Operators . <nl> ops . emplace_back ( <nl> mmm a / tensorflow / contrib / lite / toco / tflite / operator_test . cc <nl> ppp b / tensorflow / contrib / lite / toco / tflite / operator_test . cc <nl> TEST_F ( OperatorTest , BuiltinArgMax ) { <nl> EXPECT_EQ ( op . output_data_type , output_toco_op - > output_data_type ) ; <nl> } <nl> <nl> + TEST_F ( OperatorTest , BuiltinTransposeConv ) { <nl> + TransposeConvOperator op ; <nl> + op . stride_width = 123 ; <nl> + op . stride_height = 124 ; <nl> + op . padding . type = PaddingType : : kValid ; <nl> + auto output_toco_op = SerializeAndDeserialize ( <nl> + GetOperator ( " TRANSPOSE_CONV " , OperatorType : : kTransposeConv ) , op ) ; <nl> + EXPECT_EQ ( op . stride_width , output_toco_op - > stride_width ) ; <nl> + EXPECT_EQ ( op . stride_height , output_toco_op - > stride_height ) ; <nl> + EXPECT_EQ ( op . padding . type , output_toco_op - > padding . type ) ; <nl> + } <nl> + <nl> TEST_F ( OperatorTest , TensorFlowUnsupported ) { <nl> TensorFlowUnsupportedOperator op ; <nl> op . tensorflow_op = " MyCustomUnsupportedOp " ; <nl> mmm a / tensorflow / contrib / lite / tools / BUILD <nl> ppp b / tensorflow / contrib / lite / tools / BUILD <nl> cc_library ( <nl> " / / tensorflow / contrib / lite : schema_fbs_version " , <nl> " / / tensorflow / contrib / lite : string_util " , <nl> " / / tensorflow / contrib / lite / schema : schema_fbs " , <nl> - " @ com_google_absl / / absl / base : core_headers " , <nl> ] , <nl> ) <nl> <nl> cc_test ( <nl> " : verifier " , <nl> " / / tensorflow / contrib / lite : framework " , <nl> " / / tensorflow / contrib / lite : schema_fbs_version " , <nl> - " / / tensorflow / contrib / lite : string_util " , <nl> " / / tensorflow / contrib / lite / schema : schema_fbs " , <nl> " / / tensorflow / contrib / lite / testing : util " , <nl> " / / tensorflow / core : framework_lite " , <nl> mmm a / tensorflow / contrib / losses / python / losses / loss_ops_test . py <nl> ppp b / tensorflow / contrib / losses / python / losses / loss_ops_test . py <nl> <nl> from tensorflow . contrib . losses . python . losses import loss_ops <nl> from tensorflow . python . framework import constant_op <nl> from tensorflow . python . framework import dtypes <nl> - from tensorflow . python . framework import errors_impl <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . framework import random_seed <nl> - from tensorflow . python . framework import test_util <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import init_ops <nl> from tensorflow . python . ops import math_ops <nl> def testLossWithDynamicallyShapedWeights2D ( self ) : <nl> self . assertAlmostEqual ( np . average ( weights ) * 10 . 0 , loss , 3 ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class SparseSoftmaxCrossEntropyLossTest ( test . TestCase ) : <nl> <nl> def testNoneWeightRaisesValueError ( self ) : <nl> def testInconsistentLabelShapeRaisesException ( self ) : <nl> labels = constant_op . constant ( [ [ 0 , 1 ] , [ 2 , 3 ] ] ) <nl> weights = constant_op . constant ( [ 1 . 2 , 3 . 4 , 5 . 6 , 7 . 8 ] ) <nl> <nl> - if ops . _USE_C_API : <nl> - error_type = ValueError <nl> - else : <nl> - error_type = errors_impl . InvalidArgumentError <nl> - with self . assertRaises ( error_type ) : <nl> + with self . assertRaises ( ValueError ) : <nl> loss_ops . sparse_softmax_cross_entropy ( <nl> logits , labels , weights = weights ) . eval ( ) <nl> <nl> mmm a / tensorflow / contrib / optimizer_v2 / checkpointable_utils_test . py <nl> ppp b / tensorflow / contrib / optimizer_v2 / checkpointable_utils_test . py <nl> <nl> from tensorflow . python . ops import state_ops <nl> from tensorflow . python . ops import template <nl> from tensorflow . python . ops import variable_scope <nl> - from tensorflow . python . training import checkpointable <nl> - from tensorflow . python . training import checkpointable_utils <nl> from tensorflow . python . training import saver as core_saver <nl> from tensorflow . python . training import training_util <nl> + from tensorflow . python . training . checkpointable import base as checkpointable <nl> + from tensorflow . python . training . checkpointable import util as checkpointable_utils <nl> <nl> <nl> class NonLayerCheckpointable ( checkpointable . Checkpointable ) : <nl> mmm a / tensorflow / contrib / optimizer_v2 / optimizer_v2 . py <nl> ppp b / tensorflow / contrib / optimizer_v2 / optimizer_v2 . py <nl> <nl> from tensorflow . python . ops import state_ops <nl> from tensorflow . python . ops import variable_scope <nl> from tensorflow . python . ops import variables <nl> - from tensorflow . python . training import checkpointable <nl> from tensorflow . python . training import distribute as distribute_lib <nl> from tensorflow . python . training import optimizer as optimizer_v1 <nl> from tensorflow . python . training import slot_creator <nl> + from tensorflow . python . training . checkpointable import base as checkpointable <nl> from tensorflow . python . util import nest <nl> <nl> <nl> mmm a / tensorflow / contrib / tensorrt / segment / segment_test . cc <nl> ppp b / tensorflow / contrib / tensorrt / segment / segment_test . cc <nl> limitations under the License . <nl> # include " tensorflow / contrib / tensorrt / segment / segment . h " <nl> # include " tensorflow / c / c_api . h " <nl> # include " tensorflow / core / framework / graph . pb . h " <nl> - # include " tensorflow / core / framework / node_def . pb . h " <nl> # include " tensorflow / core / lib / core / errors . h " <nl> # include " tensorflow / core / lib / core / status . h " <nl> # include " tensorflow / core / platform / test . h " <nl> mmm a / tensorflow / contrib / tensorrt / test / tf_trt_integration_test . py <nl> ppp b / tensorflow / contrib / tensorrt / test / tf_trt_integration_test . py <nl> <nl> from tensorflow . python . platform import googletest <nl> <nl> <nl> - @ test_util . with_c_api <nl> class IntegrationTest ( test_util . TensorFlowTestCase ) : <nl> " " " Class to test Tensorflow - TensorRT integration . " " " <nl> <nl> mmm a / tensorflow / contrib / tpu / profiler / dump_tpu_profile . cc <nl> ppp b / tensorflow / contrib / tpu / profiler / dump_tpu_profile . cc <nl> limitations under the License . <nl> # include " tensorflow / contrib / tpu / profiler / op_profile . pb . h " <nl> # include " tensorflow / contrib / tpu / profiler / trace_events . pb . h " <nl> # include " tensorflow / contrib / tpu / profiler / trace_events_to_json . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / lib / core / errors . h " <nl> # include " tensorflow / core / lib / io / compression . h " <nl> # include " tensorflow / core / lib / io / path . h " <nl> limitations under the License . <nl> # include " tensorflow / core / lib / strings / strcat . h " <nl> # include " tensorflow / core / platform / env . h " <nl> # include " tensorflow / core / platform / protobuf . h " <nl> - # include " tensorflow / core / protobuf / config . pb . h " <nl> - # include " tensorflow / core / util / event . pb . h " <nl> # include " tensorflow / core / util / events_writer . h " <nl> <nl> namespace tensorflow { <nl> mmm a / tensorflow / contrib / tpu / python / tpu / keras_support . py <nl> ppp b / tensorflow / contrib / tpu / python / tpu / keras_support . py <nl> <nl> from tensorflow . contrib . tpu . proto import compilation_result_pb2 as tpu_compilation_result <nl> from tensorflow . contrib . tpu . python . ops import tpu_ops <nl> from tensorflow . contrib . tpu . python . tpu import tpu <nl> + from tensorflow . contrib . tpu . python . tpu import tpu_optimizer <nl> from tensorflow . core . protobuf import config_pb2 <nl> from tensorflow . python . client import session as tf_session <nl> from tensorflow . python . estimator import model_fn as model_fn_lib <nl> def _valid_name ( tensor_name ) : <nl> return re . sub ( ' [ ^ a - zA - Z0 - 9_ - ] + ' , ' ' , tensor_name ) <nl> <nl> <nl> + def _replicated_optimizer ( opt , num_replicas ) : <nl> + " " " Wrap the optimizer ` opt ` with CrossShardOptimizer if applicable . " " " <nl> + if num_replicas = = 1 : <nl> + return opt <nl> + return keras_optimizers . TFOptimizer ( <nl> + optimizer = tpu_optimizer . CrossShardOptimizer ( opt . optimizer ) <nl> + ) <nl> + <nl> + <nl> class TPUFunction ( object ) : <nl> " " " K . function compatible interface for invoking a TPU compiled function . <nl> <nl> class TPUFunction ( object ) : <nl> instead of being injected as ` feed_dict ` items or fetches . <nl> " " " <nl> <nl> - def __init__ ( self , model , execution_mode ) : <nl> + def __init__ ( self , model , execution_mode , num_replicas = 1 ) : <nl> self . model = model <nl> self . execution_mode = execution_mode <nl> self . _compilation_cache = { } <nl> + self . num_replicas = num_replicas <nl> <nl> def _specialize_model ( self , input_specs ) : <nl> " " " Specialize ` self . model ` ( a Keras model ) for the given input shapes . " " " <nl> def _model_fn ( ) : <nl> # Call our model with our infeed inputs ( re - using the weights ) . <nl> model_outputs = self . model ( tpu_inputs ) <nl> child_model = models . Model ( inputs = tpu_inputs , outputs = model_outputs ) <nl> + <nl> if is_training or is_test : <nl> child_model . compile ( <nl> - optimizer = self . model . optimizer , <nl> + optimizer = _replicated_optimizer ( self . model . optimizer , <nl> + self . num_replicas ) , <nl> loss = self . model . loss , <nl> loss_weights = self . model . loss_weights , <nl> metrics = self . model . metrics , <nl> def _model_fn ( ) : <nl> return [ <nl> child_model . train_function . updates_op , <nl> tpu_ops . outfeed_enqueue_tuple ( <nl> - child_model . train_function . outputs , name = ' oufeed - enqueue - train ' ) <nl> + child_model . train_function . outputs , <nl> + name = ' outfeed - enqueue - train ' ) <nl> ] <nl> elif is_test : <nl> child_model . _make_test_function ( ) <nl> def _model_fn ( ) : <nl> ] <nl> return [ <nl> tpu_ops . outfeed_enqueue_tuple ( <nl> - child_model . test_function . outputs , name = ' outfeed - enqueue - test ' ) <nl> + child_model . test_function . outputs , <nl> + name = ' outfeed - enqueue - test ' ) <nl> ] <nl> elif is_predict : <nl> child_model . _make_predict_function ( ) <nl> def _model_fn ( ) : <nl> # Capture outfeed metadata computed during the rewrite . <nl> self . _outfeed_spec = None <nl> <nl> + # Generate out TPU operations using ` tpu . split_compile_and_replicate ` . <nl> + # ` compile_op ` can be used to test the TPU model compiles before execution . <nl> + # ` execute op ` replicates ` _model_fn ` ` num_replicas ` times , with each shard <nl> + # running on a different logical core . <nl> compile_op , execute_op = tpu . split_compile_and_replicate ( <nl> - _model_fn , inputs = [ [ ] ] ) <nl> + _model_fn , inputs = [ [ ] ] * self . num_replicas ) <nl> <nl> # Generate CPU side operations to enqueue features / labels and dequeue <nl> # outputs from the model call . <nl> - with ops . device ( ' / device : TPU : 0 ' ) : <nl> - infeed_tensors = [ ] <nl> - for spec in input_specs : <nl> - infeed_tensors . append ( <nl> - array_ops . placeholder ( <nl> - dtype = spec . dtype , <nl> - shape = spec . shape , <nl> - name = ' infeed - enqueue - % s ' % spec . name ) ) <nl> - <nl> - infeed_op = tpu_ops . infeed_enqueue_tuple ( <nl> - infeed_tensors , [ spec . shape for spec in input_specs ] , <nl> - name = ' infeed - enqueue - % s ' % self . execution_mode ) <nl> - <nl> - outfeed_op = tpu_ops . outfeed_dequeue_tuple ( <nl> - dtypes = [ spec . dtype for spec in self . _outfeed_spec ] , <nl> - shapes = [ spec . shape for spec in self . _outfeed_spec ] , <nl> - name = ' outfeed - dequeue - % s ' % self . execution_mode ) <nl> + infeed_op = [ ] <nl> + outfeed_op = [ ] <nl> + shard_infeed_tensors = [ ] <nl> + <nl> + for shard_id in range ( self . num_replicas ) : <nl> + with ops . device ( ' / device : TPU : % d ' % shard_id ) : <nl> + infeed_tensors = [ ] <nl> + for spec in input_specs : <nl> + infeed_tensors . append ( <nl> + array_ops . placeholder ( <nl> + dtype = spec . dtype , <nl> + shape = spec . shape , <nl> + name = ' infeed - enqueue - % s - % d ' % ( spec . name , shard_id ) ) ) <nl> + shard_infeed_tensors . append ( infeed_tensors ) <nl> + <nl> + infeed_op . append ( tpu_ops . infeed_enqueue_tuple ( <nl> + infeed_tensors , [ spec . shape for spec in input_specs ] , <nl> + name = ' infeed - enqueue - % s - % d ' % ( self . execution_mode , shard_id ) ) ) <nl> + <nl> + outfeed_op . extend ( tpu_ops . outfeed_dequeue_tuple ( <nl> + dtypes = [ spec . dtype for spec in self . _outfeed_spec ] , <nl> + shapes = [ spec . shape for spec in self . _outfeed_spec ] , <nl> + name = ' outfeed - dequeue - % s - % d ' % ( self . execution_mode , shard_id ) ) ) <nl> <nl> return TPUModelOp ( <nl> - compile_op , execute_op , infeed_tensors , infeed_op , outfeed_op ) <nl> + compile_op , execute_op , infeed_tensors = shard_infeed_tensors , <nl> + infeed_op = infeed_op , outfeed_op = outfeed_op ) <nl> <nl> def _test_model_compiles ( self , tpu_model_ops ) : <nl> " " " Verifies that the given TPUModelOp can be compiled via XLA . " " " <nl> def _test_model_compiles ( self , tpu_model_ops ) : <nl> logging . info ( ' Finished compiling . Time elapsed : % s secs ' , <nl> end_time - start_time ) <nl> <nl> + def _split_tensors ( self , inputs ) : <nl> + " " " Split input data across shards . <nl> + <nl> + Each input is sliced along the batch axis . <nl> + <nl> + Args : <nl> + inputs : List of Numpy arrays to run on the TPU . <nl> + <nl> + Returns : <nl> + List of lists containing the input to feed to each TPU shard . <nl> + " " " <nl> + if self . num_replicas = = 1 : <nl> + return [ inputs ] <nl> + <nl> + batch_size = inputs [ 0 ] . shape [ 0 ] <nl> + assert batch_size % self . num_replicas = = 0 , ( <nl> + ' batch_size must be divisible by num_replicas ' ) <nl> + shard_size = batch_size / / self . num_replicas <nl> + input_list = [ ] <nl> + for index in range ( self . num_replicas ) : <nl> + shard_inputs = [ x [ index * shard_size : ( index + 1 ) * shard_size ] <nl> + for x in inputs ] <nl> + input_list . append ( shard_inputs ) <nl> + return input_list <nl> + <nl> def __call__ ( self , inputs ) : <nl> assert isinstance ( inputs , list ) <nl> <nl> def __call__ ( self , inputs ) : <nl> else : <nl> input_tensors = self . model . _feed_inputs <nl> <nl> + shard_inputs = self . _split_tensors ( inputs ) <nl> + del inputs # To avoid accident usage . <nl> + <nl> # Compute an input specification ( used to generate infeed enqueue and <nl> # dequeue operations ) . We use the shape from our input array and the <nl> # dtype from our model . A user may pass in a float64 for a float32 <nl> # input : for model compatibility we still must generate a float32 infeed . <nl> input_specs = [ ] <nl> - for tensor , ary in zip ( input_tensors , inputs ) : <nl> + <nl> + # We use the shape and dtype from the first shard to compute the input <nl> + # metadata ( ` input_specs ` ) ; all replicas have the same type and shape . <nl> + for tensor , ary in zip ( input_tensors , shard_inputs [ 0 ] ) : <nl> input_specs . append ( <nl> tensor_spec . TensorSpec ( ary . shape , tensor . dtype , <nl> _valid_name ( tensor . name ) ) ) <nl> def __call__ ( self , inputs ) : <nl> tpu_model_ops = self . _compilation_cache [ shape_key ] <nl> <nl> infeed_dict = { } <nl> - for tensor , value in zip ( tpu_model_ops . infeed_tensors , inputs ) : <nl> - infeed_dict [ tensor ] = value <nl> + for infeed_tensors , inputs in zip ( tpu_model_ops . infeed_tensors , <nl> + shard_inputs ) : <nl> + for tensor , value in zip ( infeed_tensors , inputs ) : <nl> + infeed_dict [ tensor ] = value <nl> <nl> session = K . get_session ( ) <nl> _ , _ , outfeed_outputs = session . run ( [ <nl> def __call__ ( self , inputs ) : <nl> tpu_model_ops . outfeed_op <nl> ] , infeed_dict ) <nl> <nl> - return outfeed_outputs <nl> + # TODO ( xiejw ) : Decide how to reduce outputs , or just discard all but first . <nl> + return outfeed_outputs [ : len ( outfeed_outputs ) / / self . num_replicas ] <nl> <nl> <nl> @ experimental <nl> def shutdown_tpu_session ( session = None ) : <nl> class KerasTPUModel ( models . Model ) : <nl> " " " TPU compatible Keras model wrapper . " " " <nl> <nl> - def __init__ ( self , inputs , outputs , name = None ) : <nl> + def __init__ ( self , inputs , outputs , name , replicas = 1 ) : <nl> super ( models . Model , self ) . __init__ ( <nl> inputs = inputs , <nl> outputs = outputs , <nl> def __init__ ( self , inputs , outputs , name = None ) : <nl> self . predict_function = None <nl> self . test_function = None <nl> self . train_function = None <nl> + self . replicas = replicas <nl> <nl> def compile ( self , <nl> optimizer , <nl> def compile ( self , <nl> <nl> def _make_train_function ( self ) : <nl> if not self . train_function : <nl> - self . train_function = TPUFunction ( self , model_fn_lib . ModeKeys . TRAIN ) <nl> + self . train_function = TPUFunction ( self , model_fn_lib . ModeKeys . TRAIN , <nl> + num_replicas = self . replicas ) <nl> <nl> return self . train_function <nl> <nl> def _validate_shapes ( model ) : <nl> <nl> <nl> @ experimental <nl> - def tpu_model ( model ) : <nl> + def tpu_model ( model , replicas = None ) : <nl> + " " " Runs a model on TPU ( s ) . <nl> + <nl> + Usage : <nl> + ` ` ` <nl> + a = Input ( shape = ( 32 , ) ) <nl> + b = Dense ( 32 ) ( a ) <nl> + model = Model ( inputs = a , outputs = b ) <nl> + <nl> + model = keras_support . tpu_model ( model ) <nl> + model . compile ( <nl> + optimizer = tf . train . GradientDescentOptimizer ( learning_rate = 1 . 0 ) , <nl> + . . . ) <nl> + ` ` ` <nl> + <nl> + If ` replicas ` is set , replicates the model computation on all TPU cores . The <nl> + model computation is replicated ` num_replicas ` times ; each shard will run on a <nl> + different TPU core . <nl> + <nl> + Limitation : Currently , replication is only supported for training . <nl> + <nl> + Usage : <nl> + ` ` ` <nl> + a = Input ( shape = ( 32 , ) ) <nl> + b = Dense ( 32 ) ( a ) <nl> + model = Model ( inputs = a , outputs = b ) <nl> + <nl> + model = keras_support . tpu_model ( model , replicas = 2 ) <nl> + model . compile ( <nl> + optimizer = tf . train . GradientDescentOptimizer ( learning_rate = 1 . 0 ) , <nl> + . . . ) <nl> + ` ` ` <nl> + <nl> + Args : <nl> + model : A ` KerasTPUModel ` . <nl> + replicas : ( Optional ) Int , number of TPU cores which to create model <nl> + replicas . If ` None ` , the model runs on single core only , i . e . , no <nl> + replication . <nl> + <nl> + Returns : <nl> + A new ` KerasTPUModel ` instance . <nl> + " " " <nl> _validate_shapes ( model ) <nl> + # TODO ( xiejw ) : Validate TPU model . TPUModel only ? <nl> + # TODO ( xiejw ) : Validate replicas . Full or 1 . Shall we allow subset ? <nl> + # TODO ( xiejw ) : Adds reduction option . <nl> + replicas = 1 if replicas is None else replicas <nl> return KerasTPUModel ( <nl> - inputs = model . inputs , outputs = model . outputs , name = model . name ) <nl> + inputs = model . inputs , outputs = model . outputs , name = model . name , <nl> + replicas = replicas ) <nl> mmm a / tensorflow / contrib / tpu / python / tpu / tpu_context . py <nl> ppp b / tensorflow / contrib / tpu / python / tpu / tpu_context . py <nl> <nl> _LOCAL_MASTERS = ( ' ' , ' local ' ) <nl> <nl> <nl> - class _TPUContext ( object ) : <nl> + class TPUContext ( object ) : <nl> + " " " The context of current input_fn invocation . " " " <nl> + <nl> + def __init__ ( self , internal_ctx , input_device = None , invocation_index = None ) : <nl> + self . _internal_ctx = internal_ctx <nl> + self . _input_device = input_device <nl> + self . _invocation_index = invocation_index <nl> + <nl> + def current_input_fn_deployment ( self ) : <nl> + " " " The configuration of the current input_fn invocation . <nl> + <nl> + The configuration depends on ` TPUConfig . per_host_input_for_training ` . See <nl> + ` TPUConfig ` for details . <nl> + <nl> + Only set in params dict of input_fn <nl> + <nl> + Returns : <nl> + A tuple of <nl> + 1 . Device spec string : String , is the current CPU host where the <nl> + input_fn is invoked . <nl> + 2 . Current invocation index : Int , 0 - based index of the input_fn <nl> + invocation . See next item for details . <nl> + 3 . Total invocation count : Int , the total number of times to invoke the <nl> + input_fn on all CPU hosts . Each invocation will be passed with a new <nl> + ` TPUContext ` instance with current invocation index set properly . <nl> + 4 . Total number of replicas consumed by current_invocation : Int , the <nl> + number of replicas fed by the data returned by current input_fn . For <nl> + example , for per_core input pipeline deployment <nl> + and non - model - parallelism , total invocation count is equal to <nl> + the number of cores in the system and num replicas consumed by <nl> + current invocation is 1 . For per - host v2 input pipeline deployment , <nl> + total invocation count is equal to the number of hosts in the system <nl> + and num replicas consumed by current invocation is equal to number of <nl> + cores per host . <nl> + " " " <nl> + if self . _internal_ctx . is_input_sharded_per_core ( ) : <nl> + total_invocation_count = ( self . _internal_ctx . num_hosts <nl> + * self . _internal_ctx . num_of_replicas_per_host ) <nl> + replicas_consumed = 1 <nl> + else : <nl> + total_invocation_count = self . _internal_ctx . num_hosts <nl> + replicas_consumed = self . _internal_ctx . num_of_replicas_per_host <nl> + return ( self . _input_device , self . _invocation_index , <nl> + total_invocation_count , replicas_consumed ) <nl> + <nl> + @ property <nl> + def num_replicas ( self ) : <nl> + " " " The total number of replicas . <nl> + <nl> + For non - model - parallelism , num_replicas should be the total num of TPU <nl> + cores in the system . <nl> + <nl> + Returns : <nl> + The number of replicas . <nl> + " " " <nl> + return self . _internal_ctx . num_replicas <nl> + <nl> + def device_for_replica ( self , replica_id ) : <nl> + " " " Returns the tuple of ( CPU device and device ordinal ) for replica . <nl> + <nl> + This should be used for full replicate for non - model - parallelism . <nl> + <nl> + Args : <nl> + replica_id : Int , the replica index . <nl> + <nl> + Returns : <nl> + A tuple of device spec for CPU device and int device ordinal . <nl> + " " " <nl> + # Note that : For the non - model parallelism , the mapping could be <nl> + # a random permutation . The order should not matter in most cases <nl> + # as far as model is replicated to all cores in the system . <nl> + <nl> + # If the precise replica_id to device mapping is required , please <nl> + # set the computation_shape as [ 1 , 1 , 1 ] in TPUConfig to enable <nl> + # the model parallelism . <nl> + if self . _internal_ctx . model_parallelism_enabled : <nl> + return RuntimeError ( <nl> + ' device_for_replica is not yet implemented for model parallelism . ' <nl> + ' b / 79689078 . ' ) <nl> + <nl> + master = self . _internal_ctx . master_job <nl> + job_device = ' ' if master is None else ( ' / job : % s ' % master ) <nl> + <nl> + num_of_replicas_per_host = self . _internal_ctx . num_of_replicas_per_host <nl> + host_id = replica_id / num_of_replicas_per_host <nl> + ordinal_id = replica_id % num_of_replicas_per_host <nl> + <nl> + host_device = ' % s / task : % d / device : CPU : 0 ' % ( job_device , host_id ) <nl> + return ( host_device , ordinal_id ) <nl> + <nl> + <nl> + class _InternalTPUContext ( object ) : <nl> " " " A context holds immutable states of TPU computation . <nl> <nl> This immutable object holds TPUEstimator config , train / eval batch size , and <nl> class _TPUContext ( object ) : <nl> <nl> N . B . As ` mode ` is not immutable state in Estimator , but essential to <nl> distinguish between TPU training and evaluation , a common usage for <nl> - _TPUContext with ` mode ` is as follows : <nl> + _InternalTPUContext with ` mode ` is as follows : <nl> ` ` ` <nl> with _ctx . with_mode ( mode ) as ctx : <nl> if ctx . is_running_on_cpu ( ) : <nl> def _validate_tpu_configuration ( self ) : <nl> self . _lazy_validation_dict [ mode ] = True <nl> <nl> <nl> - class _OneCoreTPUContext ( _TPUContext ) : <nl> - " " " Special _TPUContext for one core usage . " " " <nl> + class _OneCoreTPUContext ( _InternalTPUContext ) : <nl> + " " " Special _InternalTPUContext for one core usage . " " " <nl> <nl> def __init__ ( self , config , train_batch_size , eval_batch_size , <nl> predict_batch_size , use_tpu ) : <nl> def _get_tpu_system_metadata ( self ) : <nl> <nl> def _get_tpu_context ( config , train_batch_size , eval_batch_size , <nl> predict_batch_size , use_tpu , eval_on_tpu ) : <nl> - " " " Returns an instance of ` _TPUContext ` . " " " <nl> + " " " Returns an instance of ` _InternalTPUContext ` . " " " <nl> <nl> if ( config . tpu_config . num_shards = = 1 and <nl> config . tpu_config . computation_shape is None ) : <nl> def _get_tpu_context ( config , train_batch_size , eval_batch_size , <nl> return _OneCoreTPUContext ( config , train_batch_size , eval_batch_size , <nl> predict_batch_size , use_tpu ) <nl> <nl> - return _TPUContext ( config , train_batch_size , eval_batch_size , <nl> - predict_batch_size , use_tpu , eval_on_tpu ) <nl> + return _InternalTPUContext ( config , train_batch_size , eval_batch_size , <nl> + predict_batch_size , use_tpu , eval_on_tpu ) <nl> mmm a / tensorflow / contrib / tpu / python / tpu / tpu_estimator . py <nl> ppp b / tensorflow / contrib / tpu / python / tpu / tpu_estimator . py <nl> def after_run ( self , run_context , run_values ) : <nl> raise errors . OutOfRangeError ( None , None , ' Stopped by stopping signal . ' ) <nl> <nl> <nl> - def generate_per_core_enqueue_ops_fn_for_host ( ctx , input_fn , <nl> - inputs_structure_recorder ) : <nl> + def generate_per_core_enqueue_ops_fn_for_host ( <nl> + ctx , input_fn , inputs_structure_recorder , host_device , host_id ) : <nl> " " " Generates infeed enqueue ops for per - core input_fn on a single host . " " " <nl> captured_infeed_queue = _CapturedObject ( ) <nl> <nl> def enqueue_ops_fn ( ) : <nl> per_host_sharded_inputs = [ ] <nl> for core_ordinal in range ( num_cores_per_host ) : <nl> with ops . name_scope ( ' ordinal_ % d ' % ( core_ordinal ) ) : <nl> - inputs = _Inputs . from_input_fn ( input_fn ( ) ) <nl> + user_context = tpu_context . TPUContext ( <nl> + internal_ctx = ctx , <nl> + input_device = host_device , <nl> + invocation_index = host_id * ctx . num_of_cores_per_host + core_ordinal <nl> + ) <nl> + inputs = _Inputs . from_input_fn ( input_fn ( user_context ) ) <nl> if inputs . is_dataset : <nl> raise TypeError ( <nl> ' ` input_fn ` returning ` Dataset ` is not yet supported in ' <nl> def generate_per_host_enqueue_ops_fn_for_host ( <nl> hooks = [ ] <nl> <nl> with ops . device ( device ) : <nl> - inputs = _Inputs . from_input_fn ( input_fn ( ) ) <nl> + user_context = tpu_context . TPUContext ( <nl> + internal_ctx = ctx , <nl> + input_device = device , <nl> + invocation_index = host_id ) <nl> + inputs = _Inputs . from_input_fn ( input_fn ( user_context ) ) <nl> <nl> is_dataset = inputs . is_dataset <nl> if ctx . mode = = model_fn_lib . ModeKeys . PREDICT : <nl> def generate_per_host_enqueue_ops_fn_for_host ( <nl> hooks . append ( inputs . dataset_initializer_hook ( ) ) <nl> <nl> # TODO ( ylc ) : Refactoring the code to merge the tpu ordinal logic here and the <nl> - # _TPUContext . tpu_ordinal_function . We should either introduce another <nl> + # _InternalTPUContext . tpu_ordinal_function . We should either introduce another <nl> # abstraction or a different helper method . <nl> def _tpu_ordinal_function_impl ( shard_index_in_host ) : <nl> # We put both enqueue / dequeue op at tpu . core ( 0 ) in each replica . <nl> def enqueue_ops_fn ( ) : <nl> def generate_per_host_v2_enqueue_ops_fn_for_host ( <nl> ctx , input_fn , inputs_structure_recorder , device , host_id ) : <nl> " " " Generates infeed enqueue ops for per - host input_fn on a single host . " " " <nl> - del host_id # unused <nl> captured_infeed_queue = _CapturedObject ( ) <nl> hooks = [ ] <nl> <nl> with ops . device ( device ) : <nl> - inputs = _Inputs . from_input_fn ( input_fn ( ) ) <nl> + user_context = tpu_context . TPUContext ( <nl> + internal_ctx = ctx , <nl> + input_device = device , <nl> + invocation_index = host_id ) <nl> + inputs = _Inputs . from_input_fn ( input_fn ( user_context ) ) <nl> <nl> is_dataset = inputs . is_dataset <nl> if not is_dataset : <nl> class _InputPipeline ( object ) : <nl> " " " ` _InputPipeline ` handles invoking ` input_fn ` and piping to infeed queue . <nl> <nl> ` _InputPipeline ` abstracts the per - core / per - host ` input_fn ` invocation from <nl> - call site . To be precise , based on the configuration in ` _TPUContext ` , it <nl> - invokes ` input_fn ` for all cores ( usually multi - host TPU training ) or for one <nl> - host ( usually for single - host TPU evaluation ) , and sends all ` features ` and <nl> - ` labels ` returned by ` input_fn ` to TPU infeed . For per - core invocation , <nl> - ` features ` and ` labels ` are piped to infeed directly , one tuple for each <nl> - core . For per - host invocation , ` features ` and ` labels ` are split at host <nl> - ( with respect to ` batch_axis ` ) and piped to all cores accordingly . <nl> + call site . To be precise , based on the configuration in <nl> + ` _InternalTPUContext ` , it invokes ` input_fn ` for all cores ( usually <nl> + multi - host TPU training ) or for one host ( usually for single - host TPU <nl> + evaluation ) , and sends all ` features ` and ` labels ` returned by ` input_fn ` to <nl> + TPU infeed . For per - core invocation , ` features ` and ` labels ` are piped to <nl> + infeed directly , one tuple for each core . For per - host invocation , ` features ` <nl> + and ` labels ` are split at host ( with respect to ` batch_axis ` ) and piped to all <nl> + cores accordingly . <nl> <nl> In addition , flatten / unflatten are handled by ` _InputPipeline ` also . Model <nl> inputs returned by the ` input_fn ` can have one of the following forms : <nl> def __init__ ( self , input_fn , batch_axis , ctx ) : <nl> batch_axis : A python tuple of int values describing how each tensor <nl> produced by the Estimator ` input_fn ` should be split across the TPU <nl> compute shards . <nl> - ctx : A ` _TPUContext ` instance with mode . <nl> + ctx : A ` _InternalTPUContext ` instance with mode . <nl> <nl> Raises : <nl> ValueError : If both ` sharded_features ` and ` num_cores ` are ` None ` . <nl> def _invoke_input_fn_and_record_structure ( self ) : <nl> with ops . name_scope ( ' input_pipeline_task % d ' % ( host_id ) ) : <nl> enqueue_ops_fn , captured_infeed_queue = ( <nl> generate_per_core_enqueue_ops_fn_for_host ( <nl> - self . _ctx , self . _input_fn , self . _inputs_structure_recorder ) ) <nl> + self . _ctx , self . _input_fn , self . _inputs_structure_recorder , <nl> + host_device , host_id ) ) <nl> <nl> if _WRAP_INPUT_FN_INTO_WHILE_LOOP : <nl> run_infeed_loop_on_coordinator = False <nl> def __init__ ( self , <nl> <nl> if use_tpu : <nl> # Perform some very basic validations . More validations will be found in <nl> - # _TPUContext . <nl> + # _InternalTPUContext . <nl> if train_batch_size is None : <nl> raise ValueError ( ' ` train_batch_size ` cannot be ` None ` ' ) <nl> util_lib . check_positive_integer ( train_batch_size , ' train_batch_size ' ) <nl> def __init__ ( self , <nl> self . _iterations_per_training_loop = ( <nl> self . _config . tpu_config . iterations_per_loop ) <nl> <nl> - # All properties passed to _TPUContext are immutable . <nl> + # All properties passed to _InternalTPUContext are immutable . <nl> # pylint : disable = protected - access <nl> self . _ctx = tpu_context . _get_tpu_context ( <nl> self . _config , train_batch_size , <nl> def _call_input_fn ( self , input_fn , mode ) : <nl> # tf . while_loop also . So , we either pass input_fn to model_fn or pass <nl> # dequeue_fn to model_fn . Here , ` input_fn ` is passed directly as <nl> # ` features ` in ` model_fn ` signature . <nl> - def _input_fn ( ) : <nl> + def _input_fn ( ctx ) : <nl> + kwargs [ ' params ' ] [ _CTX_KEY ] = ctx <nl> return input_fn ( * * kwargs ) <nl> <nl> return _input_fn <nl> mmm a / tensorflow / contrib / training / python / training / batch_sequences_with_states_test . py <nl> ppp b / tensorflow / contrib / training / python / training / batch_sequences_with_states_test . py <nl> <nl> from tensorflow . python . framework import errors_impl <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . framework import sparse_tensor <nl> - from tensorflow . python . framework import test_util <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import math_ops <nl> from tensorflow . python . ops import random_ops <nl> def testAdvancedPadding ( self ) : <nl> expected_seq4_batch2 = expected_seq4_batch2 ) <nl> <nl> <nl> - class BatchSequencesWithStatesTestWithCApi ( BatchSequencesWithStatesTest ) : <nl> - <nl> - def setUp ( self ) : <nl> - self . _prev_value = ops . _USE_C_API <nl> - ops . _USE_C_API = True <nl> - super ( BatchSequencesWithStatesTestWithCApi , self ) . setUp ( ) <nl> - <nl> - def tearDown ( self ) : <nl> - super ( BatchSequencesWithStatesTestWithCApi , self ) . tearDown ( ) <nl> - ops . _USE_C_API = self . _prev_value <nl> - <nl> - <nl> - @ test_util . with_c_api <nl> class PaddingTest ( test . TestCase ) : <nl> <nl> def testPaddingInvalidLengths ( self ) : <nl> mmm a / tensorflow / core / BUILD <nl> ppp b / tensorflow / core / BUILD <nl> proto_library ( <nl> visibility = [ " / / visibility : public " ] , <nl> ) <nl> <nl> + closure_proto_library ( <nl> + name = " example_protos_closure " , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ " : example_protos " ] , <nl> + ) <nl> + <nl> exports_files ( [ <nl> " framework / types . proto " , <nl> ] ) <nl> cc_library ( <nl> ] , <nl> hdrs = PLATFORM_BASE_HDRS , <nl> copts = tf_copts ( ) , <nl> + # TODO ( ahentz ) : remove use of this library so we can move it into ' platform ' <nl> + tags = [ " avoid_dep " ] , <nl> deps = [ <nl> " : lib_platform " , <nl> " / / tensorflow / core / platform / default / build_config : base " , <nl> LIB_INTERNAL_PRIVATE_HEADERS = [ " framework / resource_handle . h " ] + glob ( <nl> " platform / * * / cuda . h " , <nl> " platform / * * / stream_executor . h " , <nl> ] , <nl> - ) + tf_additional_lib_srcs ( <nl> - exclude = [ <nl> - " * * / * . cc " , <nl> - " * * / * test * " , <nl> - " platform / * * / cuda . h " , <nl> - " platform / * * / stream_executor . h " , <nl> - ] , <nl> ) <nl> <nl> LIB_INTERNAL_PUBLIC_HEADERS = tf_additional_lib_hdrs ( ) + [ <nl> alias ( <nl> actual = " : mobile_srcs " , <nl> visibility = [ " / / visibility : public " ] , <nl> ) <nl> - <nl> - closure_proto_library ( <nl> - name = " example_protos_closure " , <nl> - visibility = [ " / / visibility : public " ] , <nl> - deps = [ " : example_protos " ] , <nl> - ) <nl> mmm a / tensorflow / core / common_runtime / collective_param_resolver_local . cc <nl> ppp b / tensorflow / core / common_runtime / collective_param_resolver_local . cc <nl> void SortDevicesAndTasks ( CollectiveParams * cp ) { <nl> / / ring order implicit in the device order . <nl> void GenerateSubdivPerms ( const string & device , int source_rank , <nl> CollectiveParams * cp ) { <nl> - CHECK_GT ( cp - > instance . impl_details . subdiv_offsets . size ( ) , 0 ) ; <nl> - cp - > instance . impl_details . subdiv_permutations . resize ( <nl> - cp - > instance . impl_details . subdiv_offsets . size ( ) ) ; <nl> / / Each subdiv permutation is a ring formed by rotating each <nl> / / single - task subsequence of devices by an offset . This makes most <nl> / / sense when each task has the same number of devices but we can ' t <nl> mmm a / tensorflow / core / common_runtime / eager / BUILD <nl> ppp b / tensorflow / core / common_runtime / eager / BUILD <nl> tf_cuda_library ( <nl> " / / tensorflow / core : lib_internal " , <nl> " / / tensorflow / core : protos_all_cc " , <nl> " / / tensorflow / core : session_options " , <nl> + " / / tensorflow / core / distributed_runtime : worker_session " , <nl> + " / / tensorflow / core / distributed_runtime / eager : eager_client " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : grpc_server_lib " , <nl> ] , <nl> ) <nl> <nl> cc_library ( <nl> " / / tensorflow / core : core_cpu_lib " , <nl> " / / tensorflow / core : framework " , <nl> " / / tensorflow / core : lib " , <nl> + " / / tensorflow / core : lib_internal " , <nl> " / / tensorflow / core : protos_all_cc " , <nl> + " / / tensorflow / core / distributed_runtime / eager : eager_client " , <nl> + " / / tensorflow / core / distributed_runtime / eager : remote_execute_node " , <nl> ] , <nl> ) <nl> mmm a / tensorflow / core / common_runtime / eager / context . cc <nl> ppp b / tensorflow / core / common_runtime / eager / context . cc <nl> limitations under the License . <nl> # include " tensorflow / core / common_runtime / eager / context . h " <nl> <nl> # include " tensorflow / core / common_runtime / process_util . h " <nl> + # include " tensorflow / core / lib / core / blocking_counter . h " <nl> <nl> namespace tensorflow { <nl> <nl> EagerContext : : EagerContext ( const SessionOptions & opts , <nl> bool async , std : : unique_ptr < DeviceMgr > device_mgr , <nl> Rendezvous * rendezvous ) <nl> : policy_ ( default_policy ) , <nl> - device_manager_ ( std : : move ( device_mgr ) ) , <nl> - devices_ ( device_manager_ - > ListDevices ( ) ) , <nl> + local_device_manager_ ( std : : move ( device_mgr ) ) , <nl> + local_unowned_device_manager_ ( nullptr ) , <nl> + devices_ ( local_device_manager_ - > ListDevices ( ) ) , <nl> rendezvous_ ( rendezvous ) , <nl> thread_pool_ ( NewThreadPoolFromSessionOptions ( opts ) ) , <nl> pflr_ ( new ProcessFunctionLibraryRuntime ( <nl> - device_manager_ . get ( ) , opts . env , TF_GRAPH_DEF_VERSION , & func_lib_def_ , <nl> - { } , thread_pool_ . get ( ) ) ) , <nl> + local_device_manager_ . get ( ) , opts . env , TF_GRAPH_DEF_VERSION , <nl> + & func_lib_def_ , { } , thread_pool_ . get ( ) ) ) , <nl> log_device_placement_ ( opts . config . log_device_placement ( ) ) , <nl> async_default_ ( async ) { <nl> + InitDeviceMapAndAsync ( ) ; <nl> + } <nl> + <nl> + EagerContext : : EagerContext ( <nl> + const SessionOptions & opts , ContextDevicePlacementPolicy default_policy , <nl> + bool async , DeviceMgr * local_device_mgr , Rendezvous * rendezvous , <nl> + std : : unique_ptr < GrpcServer > server , <nl> + std : : unique_ptr < eager : : EagerClientCache > remote_eager_workers , <nl> + std : : unique_ptr < DeviceMgr > remote_device_manager , <nl> + const gtl : : FlatMap < string , uint64 > & remote_contexts ) <nl> + : policy_ ( default_policy ) , <nl> + local_unowned_device_manager_ ( local_device_mgr ) , <nl> + devices_ ( local_unowned_device_manager_ - > ListDevices ( ) ) , <nl> + rendezvous_ ( rendezvous ) , <nl> + thread_pool_ ( NewThreadPoolFromSessionOptions ( opts ) ) , <nl> + pflr_ ( new ProcessFunctionLibraryRuntime ( <nl> + local_unowned_device_manager_ , opts . env , TF_GRAPH_DEF_VERSION , <nl> + & func_lib_def_ , { } , thread_pool_ . get ( ) ) ) , <nl> + log_device_placement_ ( opts . config . log_device_placement ( ) ) , <nl> + async_default_ ( async ) , <nl> + server_ ( std : : move ( server ) ) , <nl> + remote_eager_workers_ ( std : : move ( remote_eager_workers ) ) , <nl> + remote_device_manager_ ( std : : move ( remote_device_manager ) ) , <nl> + remote_contexts_ ( remote_contexts ) { <nl> + InitDeviceMapAndAsync ( ) ; <nl> + } <nl> + <nl> + void EagerContext : : InitDeviceMapAndAsync ( ) { <nl> if ( async_default_ ) { <nl> executor_ . EnableAsync ( ) ; <nl> } <nl> EagerContext : : EagerContext ( const SessionOptions & opts , <nl> for ( auto * device : devices_ ) { <nl> devices_map_ [ device - > name ( ) ] = device ; <nl> } <nl> + <nl> + if ( remote_device_manager_ ! = nullptr ) { <nl> + for ( auto * device : remote_device_manager_ - > ListDevices ( ) ) { <nl> + if ( devices_map_ . find ( device - > name ( ) ) = = devices_map_ . end ( ) ) { <nl> + devices_map_ [ device - > name ( ) ] = device ; <nl> + devices_ . push_back ( device ) ; <nl> + } <nl> + } <nl> + } <nl> } <nl> <nl> bool EagerContext : : Async ( ) const { <nl> ContextDevicePlacementPolicy EagerContext : : GetDevicePlacementPolicy ( ) { <nl> } <nl> <nl> EagerContext : : ~ EagerContext ( ) { <nl> + if ( server_ ) { <nl> + / / TODO ( nareshmodi ) : Fix this . <nl> + LOG ( WARNING ) < < " Unable to destroy server_ object , so releasing instead . " <nl> + " GrpcServer doesn ' t support clean shutdown . " ; <nl> + server_ . release ( ) ; <nl> + } <nl> + <nl> + / / Close all remote contexts . <nl> + std : : vector < eager : : CloseContextRequest > requests ( remote_contexts_ . size ( ) ) ; <nl> + std : : vector < eager : : CloseContextResponse > responses ( remote_contexts_ . size ( ) ) ; <nl> + BlockingCounter counter ( static_cast < int > ( remote_contexts_ . size ( ) ) ) ; <nl> + <nl> + int i = 0 ; <nl> + for ( const auto & worker_and_context_id : remote_contexts_ ) { <nl> + auto * client = <nl> + remote_eager_workers_ - > GetClient ( worker_and_context_id . first ) ; <nl> + <nl> + requests [ i ] . set_context_id ( worker_and_context_id . second ) ; <nl> + client - > CloseContextAsync ( <nl> + & requests [ i ] , & responses [ i ] , <nl> + [ & worker_and_context_id , & counter ] ( const Status & s ) { <nl> + if ( ! s . ok ( ) ) { <nl> + LOG ( ERROR ) < < " Unable to close remote context with ID " <nl> + < < worker_and_context_id . second <nl> + < < " for worker : " < < worker_and_context_id . first <nl> + < < " due to " < < s . error_message ( ) ; <nl> + } <nl> + counter . DecrementCount ( ) ; <nl> + } ) ; <nl> + i + + ; <nl> + } <nl> + <nl> + counter . Wait ( ) ; <nl> + <nl> executor_ . WaitForAllPendingNodes ( ) . IgnoreError ( ) ; <nl> ClearCaches ( ) ; <nl> rendezvous_ - > Unref ( ) ; <nl> void EagerContext : : SetShouldStoreMetadata ( bool value ) { <nl> } <nl> } <nl> <nl> + namespace { <nl> + Status GetTaskName ( Device * d , string * task_name ) { <nl> + string ignored ; <nl> + if ( ! DeviceNameUtils : : SplitDeviceName ( d - > name ( ) , task_name , & ignored ) ) { <nl> + return errors : : InvalidArgument ( " Unable to parse device name : " , d - > name ( ) ) ; <nl> + } <nl> + <nl> + return Status : : OK ( ) ; <nl> + } <nl> + } / / namespace <nl> + <nl> + Status EagerContext : : GetClientAndContextID ( Device * device , <nl> + eager : : EagerClient * * client , <nl> + uint64 * context_id ) { <nl> + auto it = device_to_client_cache_ . find ( device ) ; <nl> + if ( it ! = device_to_client_cache_ . end ( ) ) { <nl> + * client = it - > second . first ; <nl> + * context_id = it - > second . second ; <nl> + } <nl> + string device_task_name ; <nl> + TF_RETURN_IF_ERROR ( GetTaskName ( device , & device_task_name ) ) ; <nl> + <nl> + * client = remote_eager_workers_ - > GetClient ( device_task_name ) ; <nl> + <nl> + if ( * client = = nullptr ) { <nl> + return errors : : InvalidArgument ( <nl> + " Unable to find eager client corresponding to device " , device - > name ( ) ) ; <nl> + } <nl> + <nl> + auto context_iterator = remote_contexts_ . find ( device_task_name ) ; <nl> + if ( context_iterator = = remote_contexts_ . end ( ) ) { <nl> + return errors : : Internal ( " Unable to find a context for handle on task : " , <nl> + device_task_name , " . This should not be possible " ) ; <nl> + } <nl> + * context_id = context_iterator - > second ; <nl> + <nl> + device_to_client_cache_ . insert ( { device , { * client , * context_id } } ) ; <nl> + <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / common_runtime / eager / context . h <nl> ppp b / tensorflow / core / common_runtime / eager / context . h <nl> limitations under the License . <nl> # include < vector > <nl> <nl> # include " tensorflow / core / common_runtime / device_factory . h " <nl> + # include " tensorflow / core / common_runtime / device_mgr . h " <nl> # include " tensorflow / core / common_runtime / eager / eager_executor . h " <nl> # include " tensorflow / core / common_runtime / eager / kernel_and_device . h " <nl> # include " tensorflow / core / common_runtime / function . h " <nl> # include " tensorflow / core / common_runtime / rendezvous_mgr . h " <nl> + # include " tensorflow / core / distributed_runtime / eager / eager_client . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / grpc_server_lib . h " <nl> # include " tensorflow / core / framework / rendezvous . h " <nl> # include " tensorflow / core / lib / core / stringpiece . h " <nl> # include " tensorflow / core / lib / core / threadpool . h " <nl> + # include " tensorflow / core / lib / gtl / flatmap . h " <nl> # include " tensorflow / core / lib / gtl / inlined_vector . h " <nl> # include " tensorflow / core / lib / gtl / map_util . h " <nl> # include " tensorflow / core / lib / gtl / stl_util . h " <nl> class EagerContext { <nl> std : : unique_ptr < DeviceMgr > device_mgr , <nl> Rendezvous * rendezvous ) ; <nl> <nl> + / / TODO ( nareshmodi ) : Split this into 2 classes and hide functionality behind <nl> + / / an interface . Alternatively , encapsulate remote state into a separate <nl> + / / class / struct . <nl> + / / <nl> + / / Constructs an eager context that is able to communicate with remote <nl> + / / workers . <nl> + / / <nl> + / / Additional remote - specific args are : <nl> + / / - server : A GrpcServer that exports the tensorflow . WorkerService . Note <nl> + / / that this class expects the server to already have been started . <nl> + / / - remote_eager_workers : A cache from which we can get " EagerClient " s to <nl> + / / communicate with remote eager services . <nl> + / / - remote_device_mgr : A DeviceMgr * which contains all remote devices <nl> + / / ( should contain no local devices ) . <nl> + / / - remote_contexts : A map containing task name to remote context ID . <nl> + explicit EagerContext ( <nl> + const SessionOptions & opts , ContextDevicePlacementPolicy default_policy , <nl> + bool async , DeviceMgr * local_device_mgr , Rendezvous * rendezvous , <nl> + std : : unique_ptr < GrpcServer > server , <nl> + std : : unique_ptr < eager : : EagerClientCache > remote_eager_workers , <nl> + std : : unique_ptr < DeviceMgr > remote_device_manager , <nl> + const gtl : : FlatMap < string , uint64 > & remote_contexts ) ; <nl> + <nl> ~ EagerContext ( ) ; <nl> <nl> / / Returns the function library runtime for the given device . <nl> class EagerContext { <nl> <nl> mutex * FunctionsMu ( ) { return & functions_mu_ ; } <nl> <nl> - tensorflow : : DeviceMgr * device_mgr ( ) { return device_manager_ . get ( ) ; } <nl> + const tensorflow : : DeviceMgr * local_device_mgr ( ) const { <nl> + return ( local_device_manager_ ! = nullptr ) ? local_device_manager_ . get ( ) <nl> + : local_unowned_device_manager_ ; <nl> + } <nl> + const tensorflow : : DeviceMgr * remote_device_mgr ( ) { <nl> + return remote_device_manager_ . get ( ) ; <nl> + } <nl> <nl> / / TODO ( apassos ) remove the need for this <nl> - void ReleaseDeviceMgr ( ) { device_manager_ . release ( ) ; } <nl> + void ReleaseDeviceMgr ( ) { local_device_manager_ . release ( ) ; } <nl> <nl> / / TODO ( apassos ) clean up RunMetadata storage . <nl> mutex * MetadataMu ( ) { return & metadata_mu_ ; } <nl> class EagerContext { <nl> <nl> FunctionLibraryDefinition * FuncLibDef ( ) { return & func_lib_def_ ; } <nl> <nl> + Status GetClientAndContextID ( Device * device , eager : : EagerClient * * client , <nl> + uint64 * context_id ) ; <nl> + <nl> private : <nl> + void InitDeviceMapAndAsync ( ) ; <nl> + <nl> const ContextDevicePlacementPolicy policy_ ; <nl> <nl> / / Note : we cannot use C + + 11 thread_local here as there is no concept of a <nl> class EagerContext { <nl> std : : unordered_map < std : : thread : : id , ContextDevicePlacementPolicy > <nl> thread_local_policies_ GUARDED_BY ( policy_map_mu_ ) ; <nl> <nl> - std : : unique_ptr < DeviceMgr > device_manager_ ; <nl> + / / Only one of the below is set . <nl> + std : : unique_ptr < DeviceMgr > local_device_manager_ ; <nl> + const DeviceMgr * local_unowned_device_manager_ ; <nl> + <nl> / / Devices owned by device_manager <nl> std : : vector < Device * > devices_ ; <nl> / / All devices are not owned . <nl> class EagerContext { <nl> mutable mutex async_map_mu_ ; <nl> std : : unordered_map < std : : thread : : id , bool > thread_local_async_ <nl> GUARDED_BY ( async_map_mu_ ) ; <nl> + <nl> + / / The server_ is not const since we release it when the context is destroyed . <nl> + / / Therefore the server_ object is not marked as const ( even though it should <nl> + / / be ) . <nl> + std : : unique_ptr < GrpcServer > server_ ; <nl> + const std : : unique_ptr < eager : : EagerClientCache > remote_eager_workers_ ; <nl> + const std : : unique_ptr < DeviceMgr > remote_device_manager_ ; <nl> + <nl> + const gtl : : FlatMap < string , uint64 > remote_contexts_ ; <nl> + gtl : : FlatMap < Device * , std : : pair < eager : : EagerClient * , uint64 > > <nl> + device_to_client_cache_ ; <nl> } ; <nl> <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / common_runtime / eager / execute . cc <nl> ppp b / tensorflow / core / common_runtime / eager / execute . cc <nl> limitations under the License . <nl> # include " tensorflow / core / common_runtime / eager / execute_node . h " <nl> # include " tensorflow / core / common_runtime / eager / kernel_and_device . h " <nl> # include " tensorflow / core / common_runtime / eager / tensor_handle . h " <nl> + # include " tensorflow / core / distributed_runtime / eager / eager_client . h " <nl> + # include " tensorflow / core / distributed_runtime / eager / remote_execute_node . h " <nl> # include " tensorflow / core / framework / step_stats . pb . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> # include " tensorflow / core / framework / types . h " <nl> # include " tensorflow / core / lib / core / status . h " <nl> # include " tensorflow / core / lib / gtl / inlined_vector . h " <nl> + # include " tensorflow / core / lib / random / random . h " <nl> # include " tensorflow / core / platform / env . h " <nl> # include " tensorflow / core / platform / mutex . h " <nl> <nl> std : : unique_ptr < TFE_Op > BuildXlaLaunch ( TFE_Op * op , TF_Status * status ) { <nl> } <nl> # endif / / TENSORFLOW_EAGER_USE_XLA <nl> <nl> + Status GetOutputDTypes ( EagerOperation * op , DataTypeVector * output_dtypes ) { <nl> + const auto & node_def = op - > MutableAttrs ( ) - > BuildNodeDef ( ) ; <nl> + const OpDef * op_def = nullptr ; <nl> + <nl> + TF_RETURN_IF_ERROR ( OpDefForOp ( op - > Name ( ) . c_str ( ) , & op_def ) ) ; <nl> + <nl> + TF_RETURN_IF_ERROR ( OutputTypesForNode ( node_def , * op_def , output_dtypes ) ) ; <nl> + <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> } / / namespace <nl> <nl> - Status EagerExecute ( EagerOperation * op , <nl> - gtl : : InlinedVector < TensorHandle * , 2 > * retvals , <nl> - int * num_retvals ) { <nl> + namespace { <nl> + bool IsLocal ( EagerContext * ctx , tensorflow : : Device * d ) { <nl> + if ( d = = nullptr | | ctx - > remote_device_mgr ( ) = = nullptr ) return true ; <nl> + tensorflow : : Device * tmp ; <nl> + return ctx - > local_device_mgr ( ) - > LookupDevice ( d - > name ( ) , & tmp ) . ok ( ) ; <nl> + } <nl> + <nl> + Status EagerLocalExecute ( EagerOperation * op , <nl> + gtl : : InlinedVector < TensorHandle * , 2 > * retvals , <nl> + int * num_retvals ) { <nl> EagerContext * ctx = op - > EagerContext ( ) ; <nl> auto status = ctx - > GetStatus ( ) ; <nl> if ( ! status . ok ( ) ) return status ; <nl> Status EagerExecute ( EagerOperation * op , <nl> return status ; <nl> } <nl> <nl> + Status EagerRemoteExecute ( EagerOperation * op , eager : : EagerClient * eager_client , <nl> + uint64 context_id , TensorHandle * * retvals , <nl> + int * num_retvals ) { <nl> + / / All tensors must be on the same device . <nl> + / / TODO ( nareshmodi ) : handle silent copies <nl> + eager : : EnqueueRequest request ; <nl> + eager : : EnqueueResponse response ; <nl> + <nl> + auto * remote_op = request . add_queue ( ) - > mutable_operation ( ) ; <nl> + <nl> + for ( auto * input : op - > Inputs ( ) ) { <nl> + tensorflow : : Device * input_device ; <nl> + TF_RETURN_IF_ERROR ( input - > Device ( & input_device ) ) ; <nl> + if ( op - > Device ( ) ! = input_device ) { <nl> + return tensorflow : : errors : : InvalidArgument ( <nl> + " Ops and inputs are not on the same device . Use " <nl> + " TFE_TensorHandleCopyToDevice to get ops on the same " <nl> + " device . Expected device : " , <nl> + op - > Device ( ) - > name ( ) , " , Actual device : " , input_device - > name ( ) ) ; <nl> + } <nl> + <nl> + tensorflow : : uint64 op_id ; <nl> + int32 output_num ; <nl> + TF_RETURN_IF_ERROR ( input - > RemoteAddress ( & op_id , & output_num ) ) ; <nl> + <nl> + auto * remote_op_input = remote_op - > add_inputs ( ) ; <nl> + remote_op_input - > set_op_id ( op_id ) ; <nl> + remote_op_input - > set_output_num ( output_num ) ; <nl> + } <nl> + <nl> + remote_op - > set_id ( op - > EagerContext ( ) - > NextId ( ) ) ; <nl> + remote_op - > set_name ( op - > Name ( ) ) ; <nl> + / / Inputs set above . <nl> + op - > Attrs ( ) . FillAttrValueMap ( remote_op - > mutable_attrs ( ) ) ; <nl> + remote_op - > set_device ( op - > Device ( ) - > name ( ) ) ; <nl> + <nl> + request . set_context_id ( context_id ) ; <nl> + <nl> + if ( op - > EagerContext ( ) - > Async ( ) ) { <nl> + tensorflow : : uint64 id = op - > EagerContext ( ) - > NextId ( ) ; <nl> + auto * node = new eager : : RemoteExecuteNode ( id , request , eager_client ) ; <nl> + op - > EagerContext ( ) - > ExecutorAdd ( node ) ; <nl> + } else { <nl> + Notification n ; <nl> + Status status ; <nl> + eager_client - > EnqueueAsync ( & request , & response , <nl> + [ & n , & status ] ( const Status & s ) { <nl> + status = s ; <nl> + n . Notify ( ) ; <nl> + } ) ; <nl> + n . WaitForNotification ( ) ; <nl> + if ( ! status . ok ( ) ) return status ; <nl> + } <nl> + <nl> + DataTypeVector output_dtypes ; <nl> + TF_RETURN_IF_ERROR ( GetOutputDTypes ( op , & output_dtypes ) ) ; <nl> + <nl> + if ( * num_retvals ! = output_dtypes . size ( ) ) { <nl> + return errors : : InvalidArgument ( <nl> + " num_retvals does not match expected output dtypes " ) ; <nl> + } <nl> + <nl> + tensorflow : : Device * op_device = op - > Device ( ) ; <nl> + EagerContext * ctx = op - > EagerContext ( ) ; <nl> + <nl> + const tensorflow : : uint64 id = remote_op - > id ( ) ; <nl> + for ( int i = 0 ; i < * num_retvals ; i + + ) { <nl> + / / TODO ( nareshmodi ) : Change the callback to instead add the decref to a list <nl> + / / of pending decrefs that we can send as a batch with the next execute . <nl> + std : : function < void ( ) > callback = [ ctx , eager_client , context_id , id , i ] ( ) { <nl> + eager : : EnqueueRequest request ; <nl> + request . set_context_id ( context_id ) ; <nl> + <nl> + auto * handle_to_decref = request . add_queue ( ) - > mutable_handle_to_decref ( ) ; <nl> + handle_to_decref - > set_op_id ( id ) ; <nl> + handle_to_decref - > set_output_num ( i ) ; <nl> + <nl> + if ( ctx - > Async ( ) ) { <nl> + tensorflow : : uint64 id = ctx - > NextId ( ) ; <nl> + auto * node = new eager : : RemoteExecuteNode ( id , request , eager_client ) ; <nl> + ctx - > ExecutorAdd ( node ) ; <nl> + } else { <nl> + Notification n ; <nl> + eager : : EnqueueResponse response ; <nl> + eager_client - > EnqueueAsync ( <nl> + & request , & response , <nl> + [ & n ] ( const tensorflow : : Status & s ) { n . Notify ( ) ; } ) ; <nl> + n . WaitForNotification ( ) ; <nl> + } <nl> + <nl> + return tensorflow : : Status : : OK ( ) ; <nl> + } ; <nl> + retvals [ i ] = new TensorHandle ( remote_op - > id ( ) , i , output_dtypes [ i ] , <nl> + std : : move ( callback ) , op_device , op_device , <nl> + op - > EagerContext ( ) ) ; <nl> + } <nl> + <nl> + return Status : : OK ( ) ; <nl> + } <nl> + } / / namespace <nl> + <nl> + Status EagerExecute ( EagerOperation * op , <nl> + gtl : : InlinedVector < TensorHandle * , 2 > * retvals , <nl> + int * num_retvals ) { <nl> + bool op_is_local = IsLocal ( op - > EagerContext ( ) , op - > Device ( ) ) ; <nl> + <nl> + if ( op_is_local ) { <nl> + return EagerLocalExecute ( op , retvals , num_retvals ) ; <nl> + } <nl> + <nl> + auto * ctx = op - > EagerContext ( ) ; <nl> + <nl> + tensorflow : : eager : : EagerClient * eager_client ; <nl> + tensorflow : : uint64 context_id ; <nl> + TF_RETURN_IF_ERROR ( <nl> + ctx - > GetClientAndContextID ( op - > Device ( ) , & eager_client , & context_id ) ) ; <nl> + <nl> + return EagerRemoteExecute ( op , eager_client , context_id , retvals - > data ( ) , <nl> + num_retvals ) ; <nl> + } <nl> + <nl> Status EagerExecute ( EagerContext * ctx , Device * device , <nl> const gtl : : InlinedVector < TensorHandle * , 4 > & op_inputs , <nl> KernelAndDevice * kernel , NodeExecStats * maybe_stats , <nl> Status EagerExecute ( EagerContext * ctx , Device * device , <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - Status EagerCopyToDevice ( TensorHandle * h , EagerContext * ctx , <nl> - const char * device_name , TensorHandle * * result ) { <nl> + namespace { <nl> + <nl> + Status LocalEagerCopyToDevice ( TensorHandle * h , EagerContext * ctx , Device * dstd , <nl> + TensorHandle * * result ) { <nl> TF_RETURN_IF_ERROR ( ctx - > GetStatus ( ) ) ; <nl> - Device * dstd = ctx - > HostCPU ( ) ; <nl> - if ( device_name ! = nullptr & & strlen ( device_name ) > 0 ) { <nl> - TF_RETURN_IF_ERROR ( ctx - > device_mgr ( ) - > LookupDevice ( device_name , & dstd ) ) ; <nl> - } <nl> if ( ctx - > Async ( ) ) { <nl> / / Note that ` h ` may not be currently ready . However execution order will <nl> / / make sure that ` h ` is ready before the copy is actually done . <nl> Status EagerCopyToDevice ( TensorHandle * h , EagerContext * ctx , <nl> } <nl> } <nl> <nl> + Status FindDeviceFromName ( EagerContext * ctx , const char * device_name , <nl> + Device * * device ) { <nl> + * device = ctx - > HostCPU ( ) ; <nl> + if ( device_name = = nullptr | | strlen ( device_name ) = = 0 ) { <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + auto status = ctx - > local_device_mgr ( ) - > LookupDevice ( device_name , device ) ; <nl> + if ( status . ok ( ) ) { <nl> + return status ; <nl> + } <nl> + <nl> + if ( ctx - > remote_device_mgr ( ) ! = nullptr ) { <nl> + return ctx - > remote_device_mgr ( ) - > LookupDevice ( device_name , device ) ; <nl> + } <nl> + <nl> + return status ; <nl> + } <nl> + <nl> + Status ExecuteSend ( EagerContext * ctx , tensorflow : : Device * device , <nl> + TensorHandle * h , StringPiece wire_id , <nl> + const string & recv_device ) { <nl> + const tensorflow : : AttrTypeMap * types ; <nl> + TF_RETURN_IF_ERROR ( tensorflow : : AttrTypeMapForOp ( " _Send " , & types ) ) ; <nl> + tensorflow : : EagerOperation op ( ctx , " _Send " , types ) ; <nl> + <nl> + op . AddInput ( h ) ; <nl> + <nl> + op . SetDevice ( device ) ; <nl> + <nl> + op . MutableAttrs ( ) - > Set ( " tensor_name " , wire_id ) ; <nl> + op . MutableAttrs ( ) - > Set ( " send_device " , device - > name ( ) ) ; <nl> + op . MutableAttrs ( ) - > Set ( <nl> + " send_device_incarnation " , <nl> + static_cast < int64 > ( device - > attributes ( ) . incarnation ( ) ) ) ; <nl> + op . MutableAttrs ( ) - > Set ( " recv_device " , recv_device ) ; <nl> + op . MutableAttrs ( ) - > Set ( " client_terminated " , false ) ; <nl> + <nl> + op . MutableAttrs ( ) - > Set ( " T " , h - > dtype ) ; <nl> + <nl> + int num_outputs = 0 ; <nl> + gtl : : InlinedVector < TensorHandle * , 2 > retvals ; <nl> + <nl> + return EagerExecute ( & op , & retvals , & num_outputs ) ; <nl> + } <nl> + <nl> + Status ExecuteRecv ( EagerContext * ctx , tensorflow : : Device * device , <nl> + DataType dtype , StringPiece wire_id , <nl> + const string & send_device , int64 send_device_incarnation , <nl> + TensorHandle * * result ) { <nl> + const tensorflow : : AttrTypeMap * types ; <nl> + TF_RETURN_IF_ERROR ( tensorflow : : AttrTypeMapForOp ( " _Recv " , & types ) ) ; <nl> + tensorflow : : EagerOperation op ( ctx , " _Recv " , types ) ; <nl> + <nl> + op . SetDevice ( device ) ; <nl> + <nl> + op . MutableAttrs ( ) - > Set ( " tensor_name " , wire_id ) ; <nl> + op . MutableAttrs ( ) - > Set ( " send_device " , send_device ) ; <nl> + op . MutableAttrs ( ) - > Set ( " send_device_incarnation " , send_device_incarnation ) ; <nl> + op . MutableAttrs ( ) - > Set ( " recv_device " , device - > name ( ) ) ; <nl> + op . MutableAttrs ( ) - > Set ( " client_terminated " , false ) ; <nl> + <nl> + op . MutableAttrs ( ) - > Set ( " tensor_type " , dtype ) ; <nl> + <nl> + int num_outputs = 1 ; <nl> + gtl : : InlinedVector < TensorHandle * , 2 > retvals ( num_outputs ) ; <nl> + <nl> + TF_RETURN_IF_ERROR ( EagerExecute ( & op , & retvals , & num_outputs ) ) ; <nl> + <nl> + * result = retvals . at ( 0 ) ; <nl> + <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + / / This gets a unique wire ID . We add a random identifier so that if the worker <nl> + / / has other clients that it is servicing , we don ' t have any collision . <nl> + string GetUniqueWireID ( ) { <nl> + static tensorflow : : uint64 random_seed = random : : New64 ( ) ; <nl> + static tensorflow : : mutex wireid_mutex ( tensorflow : : LINKER_INITIALIZED ) ; <nl> + static tensorflow : : int64 wireid GUARDED_BY ( wireid_mutex ) = 0 ; <nl> + tensorflow : : mutex_lock l ( wireid_mutex ) ; <nl> + return strings : : StrCat ( random_seed , " _ " , wireid + + ) ; <nl> + } <nl> + <nl> + } / / namespace <nl> + <nl> + Status EagerCopyToDevice ( TensorHandle * h , EagerContext * ctx , <nl> + const char * device_name , TensorHandle * * result ) { <nl> + tensorflow : : Device * send_device ; <nl> + TF_RETURN_IF_ERROR ( h - > Device ( & send_device ) ) ; <nl> + <nl> + if ( send_device = = nullptr ) { <nl> + send_device = ctx - > HostCPU ( ) ; <nl> + } <nl> + <nl> + bool sender_is_local = IsLocal ( ctx , send_device ) ; <nl> + <nl> + tensorflow : : Device * recv_device ; <nl> + TF_RETURN_IF_ERROR ( FindDeviceFromName ( ctx , device_name , & recv_device ) ) ; <nl> + <nl> + bool recver_is_local = IsLocal ( ctx , recv_device ) ; <nl> + <nl> + if ( sender_is_local & & recver_is_local ) { <nl> + return LocalEagerCopyToDevice ( h , ctx , recv_device , result ) ; <nl> + } else { <nl> + string wire_id = GetUniqueWireID ( ) ; <nl> + <nl> + TF_RETURN_IF_ERROR ( <nl> + ExecuteSend ( ctx , send_device , h , wire_id , recv_device - > name ( ) ) ) ; <nl> + <nl> + return ExecuteRecv ( ctx , recv_device , h - > dtype , wire_id , send_device - > name ( ) , <nl> + send_device - > attributes ( ) . incarnation ( ) , result ) ; <nl> + } <nl> + } <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / common_runtime / eager / execute . h <nl> ppp b / tensorflow / core / common_runtime / eager / execute . h <nl> limitations under the License . <nl> namespace tensorflow { <nl> <nl> / / Utility function that executes a fully constructed EagerOperation . <nl> + / / There are a few possible different combinations of how things can be <nl> + / / executed : <nl> + / / - Async ( the op context is configured to schedule asynchronously ) <nl> + / / Eager execute should return quickly after scheduling this operation to <nl> + / / execute . <nl> + / / - Remote ( the op device is on a remote task ) <nl> + / / Eager execute will send an RPC to execute the op on a remote device . <nl> + / / Note that in the Async + Remote case , EagerExecute should still return <nl> + / / quickly , but it will schedule the op to be executed remotely . <nl> Status EagerExecute ( <nl> EagerOperation * op , <nl> tensorflow : : gtl : : InlinedVector < tensorflow : : TensorHandle * , 2 > * retvals , <nl> mmm a / tensorflow / core / common_runtime / eager / tensor_handle . cc <nl> ppp b / tensorflow / core / common_runtime / eager / tensor_handle . cc <nl> bool TensorHandle : : IsReady ( ) { <nl> return is_ready_ ; <nl> } <nl> <nl> + bool TensorHandle : : IsRemote ( ) { <nl> + return remote_op_id_ > = 0 & & remote_output_num_ > = 0 ; <nl> + } <nl> + <nl> Status TensorHandle : : WaitReady ( ) { <nl> if ( node_id = = 0 ) return Status : : OK ( ) ; <nl> EagerExecutor * executor = nullptr ; <nl> Status TensorHandle : : WaitReady ( ) { <nl> } <nl> <nl> Status TensorHandle : : Tensor ( const tensorflow : : Tensor * * t ) { <nl> + if ( IsRemote ( ) ) { <nl> + return errors : : Unavailable ( <nl> + " Unable to get a tensor for a remote device . Please copy the tensor " <nl> + " handle to a local device using TFE_TensorHandleCopyToDevice " ) ; <nl> + } <nl> TF_RETURN_IF_ERROR ( WaitReady ( ) ) ; <nl> DCHECK ( IsReady ( ) ) ; <nl> * t = & tensor_ ; <nl> Status TensorHandle : : OpDevice ( tensorflow : : Device * * d ) { <nl> Status TensorHandle : : TensorAndDevice ( const tensorflow : : Tensor * * tensor , <nl> tensorflow : : Device * * device , <nl> tensorflow : : Device * * op_device ) { <nl> + if ( IsRemote ( ) ) { <nl> + return errors : : Unavailable ( <nl> + " Unable to get a tensor for a remote device . Please copy the tensor " <nl> + " handle to a local device using TFE_TensorHandleCopyToDevice " ) ; <nl> + } <nl> TF_RETURN_IF_ERROR ( WaitReady ( ) ) ; <nl> DCHECK ( IsReady ( ) ) ; <nl> * tensor = & tensor_ ; <nl> Status TensorHandle : : TensorAndDevice ( const tensorflow : : Tensor * * tensor , <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> + Status TensorHandle : : RemoteAddress ( uint64 * op_id , int32 * output_num ) { <nl> + if ( ! IsRemote ( ) ) { <nl> + return errors : : FailedPrecondition ( <nl> + " This TensorHandle refers to a local tensor handle " ) ; <nl> + } <nl> + * op_id = remote_op_id_ ; <nl> + * output_num = remote_output_num_ ; <nl> + <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> void TensorHandle : : SetTensorAndDevice ( const tensorflow : : Tensor & tensor , <nl> tensorflow : : Device * device , <nl> tensorflow : : Device * op_device ) { <nl> mmm a / tensorflow / core / common_runtime / eager / tensor_handle . h <nl> ppp b / tensorflow / core / common_runtime / eager / tensor_handle . h <nl> limitations under the License . <nl> namespace tensorflow { <nl> <nl> / / Associates a Tensor and a Device , used in the eager runtime . Internal version <nl> - / / executor_of the TFE_TensorHandle struct and the python EagerTensor class <nl> + / / of the TFE_TensorHandle struct and the python EagerTensor class <nl> / / ( unrelated to python TensorHandle ) . <nl> class TensorHandle : public core : : RefCounted { <nl> public : <nl> class TensorHandle : public core : : RefCounted { <nl> tensor_ ( t ) , <nl> device_ ( d ) , <nl> op_device_ ( op_device ) , <nl> + remote_op_id_ ( - 1 ) , <nl> + remote_output_num_ ( - 1 ) , <nl> ctx_ ( ctx ) , <nl> is_ready_ ( true ) { } <nl> <nl> class TensorHandle : public core : : RefCounted { <nl> tensor_ ( dtype ) , <nl> device_ ( nullptr ) , <nl> op_device_ ( nullptr ) , <nl> + remote_op_id_ ( - 1 ) , <nl> + remote_output_num_ ( - 1 ) , <nl> ctx_ ( ctx ) , <nl> is_ready_ ( ctx = = nullptr ) { <nl> DCHECK_GT ( node_id , 0 ) ; <nl> } <nl> <nl> - ~ TensorHandle ( ) override { } <nl> + / / Remote tensor handle constructor . <nl> + TensorHandle ( uint64 op_id , int32 output_num , DataType dtype , <nl> + std : : function < void ( ) > call_on_destroy , Device * d , <nl> + Device * op_device , EagerContext * ctx ) <nl> + : dtype ( dtype ) , <nl> + node_id ( 0 ) , <nl> + device_ ( d ) , <nl> + op_device_ ( op_device ) , <nl> + remote_op_id_ ( op_id ) , <nl> + remote_output_num_ ( output_num ) , <nl> + call_on_destroy_ ( std : : move ( call_on_destroy ) ) , <nl> + ctx_ ( ctx ) , <nl> + is_ready_ ( true ) { <nl> + DCHECK ( IsRemote ( ) ) < < " Op ID and output num should be > = 0 . Op ID : " <nl> + < < op_id < < " , Output num : " < < output_num ; <nl> + } <nl> + <nl> + ~ TensorHandle ( ) override { <nl> + if ( call_on_destroy_ ) { <nl> + call_on_destroy_ ( ) ; <nl> + } <nl> + } <nl> <nl> Status Tensor ( const tensorflow : : Tensor * * t ) ; <nl> <nl> class TensorHandle : public core : : RefCounted { <nl> tensorflow : : Device * * device , <nl> tensorflow : : Device * * op_device ) ; <nl> <nl> + / / Return the op_id and output num if the handle refers to a remote tensor . <nl> + Status RemoteAddress ( uint64 * op_id , int32 * output_num ) ; <nl> + <nl> / / Note that this can be called at most once , and only on non - ready handles , <nl> / / and makes them ready . <nl> void SetTensorAndDevice ( const tensorflow : : Tensor & tensor , <nl> class TensorHandle : public core : : RefCounted { <nl> <nl> bool IsReady ( ) ; <nl> <nl> + bool IsRemote ( ) ; <nl> + <nl> / / Id for the EagerNode that will compute the value pointed to by this handle . <nl> / / If the value is 0 , the handle is already ready , but not vice - versa . <nl> const uint64 node_id ; <nl> class TensorHandle : public core : : RefCounted { <nl> / / device_ for constant tensors . <nl> tensorflow : : Device * op_device_ ; <nl> <nl> + / / IDs required when this class is representing a remote tensor handle . <nl> + const uint64 remote_op_id_ ; <nl> + const int32 remote_output_num_ ; <nl> + <nl> + / / A callback that is executed when the class is destroyed . <nl> + / / <nl> + / / This is currently used for remote tensor handles . <nl> + const std : : function < void ( ) > call_on_destroy_ ; <nl> + <nl> mutex ctx_mutex_ ; <nl> <nl> / / ` ctx ` is only guaranteed to be set if the handle is not " ready " . This is <nl> mmm a / tensorflow / core / common_runtime / executor . cc <nl> ppp b / tensorflow / core / common_runtime / executor . cc <nl> void ExecutorState : : Process ( TaggedNode tagged_node , int64 scheduled_usec ) { <nl> <nl> if ( vlog_ ) { <nl> VLOG ( 1 ) < < " Process node : " < < id < < " step " < < params . step_id < < " " <nl> - < < SummarizeNode ( * node ) < < " is dead : " < < tagged_node . is_dead ; <nl> + < < SummarizeNode ( * node ) < < " is dead : " < < tagged_node . is_dead <nl> + < < " device : " < < device - > name ( ) ; <nl> } <nl> <nl> Entry * input_tensors = GetInputTensors ( input_frame , input_iter ) ; <nl> void ExecutorState : : Process ( TaggedNode tagged_node , int64 scheduled_usec ) { <nl> VLOG ( 2 ) < < " Async kernel done : " < < state - > item - > node - > id ( ) <nl> < < " step " < < step_id_ < < " " <nl> < < SummarizeNode ( * state - > item - > node ) <nl> - < < " is dead : " < < state - > tagged_node . is_dead ; <nl> + < < " is dead : " < < state - > tagged_node . is_dead <nl> + < < " device : " < < device - > name ( ) ; <nl> } <nl> <nl> / / Clears inputs . <nl> void ExecutorState : : Process ( TaggedNode tagged_node , int64 scheduled_usec ) { <nl> if ( vlog_ ) { <nl> VLOG ( 2 ) < < " Synchronous kernel done : " < < id < < " step " <nl> < < params . step_id < < " " < < SummarizeNode ( * node ) <nl> - < < " is dead : " < < tagged_node . is_dead ; <nl> + < < " is dead : " < < tagged_node . is_dead <nl> + < < " device : " < < device - > name ( ) ; <nl> } <nl> <nl> / / Clears inputs . <nl> mmm a / tensorflow / core / common_runtime / function_threadpool_test . cc <nl> ppp b / tensorflow / core / common_runtime / function_threadpool_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / framework / op . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor_testutil . h " <nl> - # include " tensorflow / core / framework / versions . pb . h " <nl> # include " tensorflow / core / graph / graph_constructor . h " <nl> # include " tensorflow / core / lib / core / notification . h " <nl> # include " tensorflow / core / lib / core / status . h " <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_device . cc <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_device . cc <nl> void BaseGPUDevice : : Compute ( OpKernel * op_kernel , OpKernelContext * context ) { <nl> } <nl> } <nl> <nl> + string BaseGPUDevice : : ComputeOpKernelDebugString ( const OpKernel & op_kernel , <nl> + const int & stream_id ) { <nl> + return strings : : StrCat ( op_kernel . name ( ) , " op " , op_kernel . type_string ( ) , <nl> + " on GPU " , tf_gpu_id_ . value ( ) , " stream [ " , stream_id , <nl> + " ] " ) ; <nl> + } <nl> + <nl> void BaseGPUDevice : : ComputeHelper ( OpKernel * op_kernel , <nl> OpKernelContext * context ) { <nl> GPUDeviceContext * gpu_device_context = device_contexts_ [ 0 ] ; <nl> void BaseGPUDevice : : ComputeHelper ( OpKernel * op_kernel , <nl> const bool vlog_2 = vlog_1 & & VLOG_IS_ON ( 2 ) ; <nl> <nl> if ( vlog_1 ) { <nl> - VLOG ( 1 ) < < " GpuDevice : : Compute " < < op_kernel - > name ( ) < < " op " <nl> - < < op_kernel - > type_string ( ) < < " on GPU " < < tf_gpu_id_ < < " stream [ " <nl> - < < stream_id < < " ] " ; <nl> + VLOG ( 1 ) < < " GpuDevice : : ComputeHelper " <nl> + < < ComputeOpKernelDebugString ( * op_kernel , stream_id ) ; <nl> } <nl> <nl> const auto num_streams = streams_ . size ( ) ; <nl> void BaseGPUDevice : : ComputeHelper ( OpKernel * op_kernel , <nl> / / all streams . Given that this flag is typically used for <nl> / / debugging it makes more sense to sync all GPU activity . <nl> context - > SetStatus ( GPUUtil : : SyncAll ( this ) ) ; <nl> + if ( vlog_1 ) { <nl> + VLOG ( 1 ) < < " GpuDevice : : ComputeHelper finished " <nl> + < < ComputeOpKernelDebugString ( * op_kernel , stream_id ) ; <nl> + } <nl> + } else if ( vlog_1 ) { <nl> + VLOG ( 1 ) < < " GpuDevice : : ComputeHelper scheduled " <nl> + < < ComputeOpKernelDebugString ( * op_kernel , stream_id ) ; <nl> + } <nl> + } else { <nl> + if ( vlog_1 ) { <nl> + VLOG ( 1 ) < < " GpuDevice : : ComputeHelper failed to schedule " <nl> + < < ComputeOpKernelDebugString ( * op_kernel , stream_id ) ; <nl> } <nl> } <nl> } <nl> Status BaseGPUDeviceFactory : : CreateDevices ( const SessionOptions & options , <nl> if ( num_gpus_to_use > valid_cuda_gpu_ids . size ( ) ) { <nl> num_gpus_to_use = valid_cuda_gpu_ids . size ( ) ; <nl> } <nl> - if ( ! valid_cuda_gpu_ids . empty ( ) ) { <nl> + / / If we aren ' t going to use any GPUs , don ' t initialize them . <nl> + if ( num_gpus_to_use > 0 & & ! valid_cuda_gpu_ids . empty ( ) ) { <nl> / / Save the original device . <nl> int original_device = 0 ; <nl> cudaError_t err = cudaGetDevice ( & original_device ) ; <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_device . h <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_device . h <nl> class BaseGPUDevice : public LocalDevice { <nl> <nl> void ComputeHelper ( OpKernel * op_kernel , OpKernelContext * context ) ; <nl> <nl> + string ComputeOpKernelDebugString ( const OpKernel & op_kernel , <nl> + const int & stream_id ) ; <nl> + <nl> / / This method returns an initialization status , in addition to <nl> / / calling the " done " StatusCallback , if there is a failure to <nl> / / allocate memory or if the tensor " from " is not DMA - copyable . <nl> new file mode 100644 <nl> index 0000000000000 . . f3922dde74ae2 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / distributed_runtime / eager / BUILD <nl> <nl> + package ( default_visibility = [ <nl> + " / / tensorflow : internal " , <nl> + ] ) <nl> + <nl> + licenses ( [ " notice " ] ) # Apache 2 . 0 <nl> + <nl> + exports_files ( [ " LICENSE " ] ) <nl> + <nl> + load ( <nl> + " / / tensorflow : tensorflow . bzl " , <nl> + " tf_cc_test " , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " remote_tensor_handle " , <nl> + hdrs = [ " remote_tensor_handle . h " ] , <nl> + deps = [ <nl> + " / / tensorflow / core : eager_service_proto_cc " , <nl> + " / / tensorflow / core : lib " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " eager_client " , <nl> + hdrs = [ " eager_client . h " ] , <nl> + deps = [ <nl> + " / / tensorflow / core : eager_service_proto_cc " , <nl> + " / / tensorflow / core : lib " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " remote_execute_node " , <nl> + hdrs = [ " remote_execute_node . h " ] , <nl> + deps = [ <nl> + " : eager_client " , <nl> + " / / tensorflow / core : eager_service_proto_cc " , <nl> + " / / tensorflow / core : lib " , <nl> + " / / tensorflow / core / common_runtime / eager : eager_executor " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " eager_service_impl " , <nl> + srcs = [ " eager_service_impl . cc " ] , <nl> + hdrs = [ <nl> + " eager_service_impl . h " , <nl> + ] , <nl> + deps = [ <nl> + " / / tensorflow / c : c_api_internal " , <nl> + " / / tensorflow / c : tf_status_helper " , <nl> + " / / tensorflow / core : core_cpu_internal " , <nl> + " / / tensorflow / core : eager_service_proto_cc " , <nl> + " / / tensorflow / core : framework_internal " , <nl> + " / / tensorflow / core : lib " , <nl> + " / / tensorflow / core : lib_internal " , <nl> + " / / tensorflow / core : protos_all_cc " , <nl> + " / / tensorflow / core / common_runtime / eager : context " , <nl> + " / / tensorflow / core / common_runtime / eager : eager_operation " , <nl> + " / / tensorflow / core / common_runtime / eager : execute " , <nl> + " / / tensorflow / core / common_runtime / eager : tensor_handle " , <nl> + " / / tensorflow / core / distributed_runtime : server_lib " , <nl> + " / / tensorflow / core / distributed_runtime : worker_cache " , <nl> + " / / tensorflow / core / distributed_runtime : worker_cache_wrapper " , <nl> + " / / tensorflow / core / distributed_runtime : worker_env " , <nl> + " / / tensorflow / core / distributed_runtime / eager : remote_tensor_handle " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : rpc_rendezvous_mgr " , <nl> + " @ grpc / / : grpc + + _unsecure " , <nl> + " @ grpc / / : grpc_unsecure " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_cc_test ( <nl> + name = " eager_service_impl_test " , <nl> + srcs = [ " eager_service_impl_test . cc " ] , <nl> + deps = [ <nl> + " : eager_service_impl " , <nl> + " / / tensorflow / c : c_api " , <nl> + " / / tensorflow / c : c_api_internal " , <nl> + " / / tensorflow / core : eager_service_proto_cc " , <nl> + " / / tensorflow / core : lib " , <nl> + " / / tensorflow / core : protos_all_cc " , <nl> + " / / tensorflow / core : test " , <nl> + " / / tensorflow / core : test_main " , <nl> + " / / tensorflow / core / common_runtime / eager : tensor_handle " , <nl> + " / / tensorflow / core / distributed_runtime : worker_env " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : rpc_rendezvous_mgr " , <nl> + ] , <nl> + ) <nl> new file mode 100644 <nl> index 0000000000000 . . 9ba8c8d80cb0d <nl> mmm / dev / null <nl> ppp b / tensorflow / core / distributed_runtime / eager / eager_client . h <nl> <nl> + / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_EAGER_CLIENT_H_ <nl> + # define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_EAGER_CLIENT_H_ <nl> + <nl> + # include " tensorflow / core / lib / core / status . h " <nl> + # include " tensorflow / core / platform / env . h " <nl> + # include " tensorflow / core / protobuf / eager_service . pb . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace eager { <nl> + <nl> + / / This is a base class that can be implemented by a variety of <nl> + / / transports ( e . g . gRPC which for each of the client methods makes an RPC ) . <nl> + class EagerClient { <nl> + public : <nl> + virtual ~ EagerClient ( ) { } <nl> + # define CLIENT_METHOD ( method ) \ <nl> + virtual void method # # Async ( const method # # Request * request , \ <nl> + method # # Response * response , \ <nl> + StatusCallback done ) = 0 ; <nl> + <nl> + CLIENT_METHOD ( CreateContext ) ; <nl> + CLIENT_METHOD ( Enqueue ) ; <nl> + CLIENT_METHOD ( WaitQueueDone ) ; <nl> + CLIENT_METHOD ( KeepAlive ) ; <nl> + CLIENT_METHOD ( CloseContext ) ; <nl> + CLIENT_METHOD ( RegisterFunction ) ; <nl> + <nl> + # undef CLIENT_METHOD <nl> + } ; <nl> + <nl> + / / Simple wrapper class that can be used to retrieve EagerClients . <nl> + class EagerClientCache { <nl> + public : <nl> + virtual ~ EagerClientCache ( ) { } <nl> + virtual EagerClient * GetClient ( const string & target ) = 0 ; <nl> + } ; <nl> + <nl> + } / / namespace eager <nl> + } / / namespace tensorflow <nl> + <nl> + # endif / / TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_EAGER_CLIENT_H_ <nl> new file mode 100644 <nl> index 0000000000000 . . 4bd74b81a7c43 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / distributed_runtime / eager / eager_service_impl . cc <nl> <nl> + / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / core / distributed_runtime / eager / eager_service_impl . h " <nl> + <nl> + # include " tensorflow / c / c_api_internal . h " <nl> + # include " tensorflow / c / tf_status_helper . h " <nl> + # include " tensorflow / core / common_runtime / device_mgr . h " <nl> + # include " tensorflow / core / common_runtime / eager / eager_operation . h " <nl> + # include " tensorflow / core / common_runtime / eager / execute . h " <nl> + # include " tensorflow / core / common_runtime / process_util . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / rpc_rendezvous_mgr . h " <nl> + # include " tensorflow / core / distributed_runtime / server_lib . h " <nl> + # include " tensorflow / core / distributed_runtime / worker_cache . h " <nl> + # include " tensorflow / core / distributed_runtime / worker_cache_wrapper . h " <nl> + # include " tensorflow / core / distributed_runtime / worker_env . h " <nl> + # include " tensorflow / core / framework / rendezvous . h " <nl> + # include " tensorflow / core / lib / core / error_codes . pb . h " <nl> + # include " tensorflow / core / lib / core / errors . h " <nl> + # include " tensorflow / core / lib / gtl / cleanup . h " <nl> + # include " tensorflow / core / lib / random / random . h " <nl> + # include " tensorflow / core / lib / strings / strcat . h " <nl> + # include " tensorflow / core / lib / strings / stringprintf . h " <nl> + # include " tensorflow / core / platform / cpu_info . h " <nl> + # include " tensorflow / core / platform / env . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace eager { <nl> + <nl> + namespace { <nl> + Status GetNumRetvals ( tensorflow : : EagerContext * context , const string & op_name , <nl> + const google : : protobuf : : Map < string , tensorflow : : AttrValue > & attrs , <nl> + int * num_retvals ) { <nl> + const tensorflow : : OpRegistrationData * op_reg_data = nullptr ; <nl> + auto status = tensorflow : : OpRegistry : : Global ( ) - > LookUp ( op_name , & op_reg_data ) ; <nl> + if ( errors : : IsNotFound ( status ) ) { <nl> + status = context - > FindFunctionOpData ( op_name , & op_reg_data ) ; <nl> + } <nl> + TF_RETURN_IF_ERROR ( status ) ; <nl> + <nl> + const tensorflow : : OpDef & op_def = op_reg_data - > op_def ; <nl> + <nl> + for ( const auto & output_arg : op_def . output_arg ( ) ) { <nl> + if ( ! output_arg . number_attr ( ) . empty ( ) ) { <nl> + auto iter = attrs . find ( output_arg . number_attr ( ) ) ; <nl> + if ( iter = = attrs . end ( ) ) { <nl> + return errors : : InvalidArgument ( " Unable to find number_attr " , <nl> + output_arg . number_attr ( ) , <nl> + " for Op : " , op_name ) ; <nl> + } <nl> + * num_retvals + = iter - > second . i ( ) ; <nl> + } else if ( ! output_arg . type_list_attr ( ) . empty ( ) ) { <nl> + auto iter = attrs . find ( output_arg . number_attr ( ) ) ; <nl> + if ( iter = = attrs . end ( ) ) { <nl> + return errors : : InvalidArgument ( " Unable to find number_attr " , <nl> + output_arg . number_attr ( ) , <nl> + " for Op : " , op_name ) ; <nl> + } <nl> + * num_retvals + = iter - > second . list ( ) . type_size ( ) ; <nl> + } else { <nl> + * num_retvals + = 1 ; <nl> + } <nl> + } <nl> + <nl> + return Status : : OK ( ) ; <nl> + } <nl> + } / / namespace <nl> + <nl> + Status EagerServiceImpl : : CreateContext ( const CreateContextRequest * request , <nl> + CreateContextResponse * response ) { <nl> + tensorflow : : RemoteRendezvous * r = env_ - > rendezvous_mgr - > Find ( 0 ) ; <nl> + std : : vector < tensorflow : : Device * > devices ; <nl> + TF_RETURN_IF_ERROR ( tensorflow : : DeviceFactory : : AddDevices ( <nl> + / / TODO ( nareshmodi ) : Correctly set the SessionOptions . <nl> + SessionOptions ( ) , <nl> + strings : : Printf ( " / job : % s / replica : 0 / task : % d " , <nl> + request - > server_def ( ) . job_name ( ) . data ( ) , <nl> + request - > server_def ( ) . task_index ( ) ) , <nl> + & devices ) ) ; <nl> + <nl> + response - > mutable_device_attributes ( ) - > Reserve ( devices . size ( ) ) ; <nl> + for ( auto & d : devices ) { <nl> + * response - > add_device_attributes ( ) = d - > attributes ( ) ; <nl> + } <nl> + <nl> + std : : unique_ptr < tensorflow : : DeviceMgr > device_mgr ( <nl> + new tensorflow : : DeviceMgr ( devices ) ) ; <nl> + std : : unique_ptr < tensorflow : : EagerContext > ctx ( new tensorflow : : EagerContext ( <nl> + SessionOptions ( ) , <nl> + tensorflow : : ContextDevicePlacementPolicy : : DEVICE_PLACEMENT_SILENT , <nl> + request - > async ( ) , std : : move ( device_mgr ) , r ) ) ; <nl> + <nl> + uint64 context_id ; <nl> + { <nl> + mutex_lock l ( contexts_mu_ ) ; <nl> + do { <nl> + context_id = random : : New64 ( ) ; <nl> + } while ( contexts_ . find ( context_id ) ! = contexts_ . end ( ) ) ; <nl> + contexts_ . emplace ( context_id , new ServerContext ( std : : move ( ctx ) ) ) ; <nl> + } <nl> + response - > set_context_id ( context_id ) ; <nl> + <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + Status EagerServiceImpl : : ExecuteOp ( const Operation & operation , <nl> + ServerContext * server_context ) { <nl> + std : : unique_ptr < tensorflow : : EagerOperation > op ; <nl> + const char * name = operation . name ( ) . c_str ( ) ; / / Shorthand <nl> + const tensorflow : : AttrTypeMap * types ; <nl> + auto status = tensorflow : : AttrTypeMapForOp ( name , & types ) ; <nl> + if ( status . ok ( ) ) { <nl> + op . reset ( <nl> + new tensorflow : : EagerOperation ( server_context - > Context ( ) , name , types ) ) ; <nl> + } else if ( errors : : IsNotFound ( status ) ) { <nl> + if ( server_context - > Context ( ) - > FindFunctionByName ( name ) ) { <nl> + op . reset ( new tensorflow : : EagerOperation ( server_context - > Context ( ) , name , <nl> + nullptr ) ) ; <nl> + } else { <nl> + return status ; <nl> + } <nl> + } else { <nl> + return status ; <nl> + } <nl> + <nl> + TF_RETURN_IF_ERROR ( op - > SetDevice ( operation . device ( ) . c_str ( ) ) ) ; <nl> + <nl> + for ( const auto & remote_handle : operation . inputs ( ) ) { <nl> + tensorflow : : TensorHandle * handle ; <nl> + TF_RETURN_IF_ERROR ( server_context - > GetTensorHandle ( <nl> + RemoteTensorHandleInternal ( remote_handle ) , & handle ) ) ; <nl> + <nl> + op - > AddInput ( handle ) ; <nl> + } <nl> + <nl> + for ( const auto & attr : operation . attrs ( ) ) { <nl> + op - > MutableAttrs ( ) - > Set ( attr . first , attr . second ) ; <nl> + } <nl> + <nl> + int num_retvals = 0 ; <nl> + / / TODO ( nareshmodi ) : Consider caching this . <nl> + TF_RETURN_IF_ERROR ( GetNumRetvals ( server_context - > Context ( ) , operation . name ( ) , <nl> + operation . attrs ( ) , & num_retvals ) ) ; <nl> + <nl> + tensorflow : : gtl : : InlinedVector < tensorflow : : TensorHandle * , 2 > retvals ; <nl> + TF_RETURN_IF_ERROR ( EagerExecute ( op . get ( ) , & retvals , & num_retvals ) ) ; <nl> + <nl> + server_context - > AddOperationOutputs ( retvals , operation . id ( ) ) ; <nl> + <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + Status EagerServiceImpl : : Enqueue ( const EnqueueRequest * request , <nl> + EnqueueResponse * response ) { <nl> + ServerContext * context = nullptr ; <nl> + TF_RETURN_IF_ERROR ( GetServerContext ( request - > context_id ( ) , & context ) ) ; <nl> + core : : ScopedUnref context_unref ( context ) ; <nl> + <nl> + for ( const auto & item : request - > queue ( ) ) { <nl> + if ( item . has_operation ( ) ) { <nl> + TF_RETURN_IF_ERROR ( ExecuteOp ( item . operation ( ) , context ) ) ; <nl> + } else { <nl> + TF_RETURN_IF_ERROR ( context - > DeleteTensorHandle ( <nl> + RemoteTensorHandleInternal ( item . handle_to_decref ( ) ) ) ) ; <nl> + } <nl> + } <nl> + <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + Status EagerServiceImpl : : WaitQueueDone ( const WaitQueueDoneRequest * request , <nl> + WaitQueueDoneResponse * response ) { <nl> + ServerContext * context = nullptr ; <nl> + TF_RETURN_IF_ERROR ( GetServerContext ( request - > context_id ( ) , & context ) ) ; <nl> + core : : ScopedUnref context_unref ( context ) ; <nl> + <nl> + if ( request - > op_id_size ( ) > 0 ) { <nl> + return errors : : Unimplemented ( <nl> + " EagerServiceImpl : : WaitQueueDone is not " <nl> + " implemented for particular op IDs . " ) ; <nl> + } <nl> + return context - > Context ( ) - > AsyncWait ( ) ; <nl> + } <nl> + <nl> + Status EagerServiceImpl : : KeepAlive ( const KeepAliveRequest * request , <nl> + KeepAliveResponse * response ) { <nl> + / / TODO ( nareshmodi ) : Automated context_id cleaning is not implemented <nl> + return errors : : Unimplemented ( <nl> + " EagerServiceImpl : : KeepAlive is not implemented . " ) ; <nl> + } <nl> + <nl> + Status EagerServiceImpl : : CloseContext ( const CloseContextRequest * request , <nl> + CloseContextResponse * response ) { <nl> + ServerContext * context = nullptr ; <nl> + if ( ! GetServerContext ( request - > context_id ( ) , & context ) . ok ( ) ) { <nl> + / / Swallow the error here . <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + core : : ScopedUnref context_unref ( context ) ; <nl> + <nl> + mutex_lock l ( contexts_mu_ ) ; <nl> + contexts_ . erase ( request - > context_id ( ) ) ; <nl> + <nl> + / / GetServerContext returns a newly Reffed copy of ServerContext , which is <nl> + / / unreffed by context_unref . Additionally , we need to unref it one time since <nl> + / / we are releasing it from the map . <nl> + context - > Unref ( ) ; <nl> + <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + Status EagerServiceImpl : : RegisterFunction ( <nl> + const RegisterFunctionRequest * request , <nl> + RegisterFunctionResponse * response ) { <nl> + ServerContext * context = nullptr ; <nl> + TF_RETURN_IF_ERROR ( GetServerContext ( request - > context_id ( ) , & context ) ) ; <nl> + core : : ScopedUnref context_unref ( context ) ; <nl> + <nl> + return context - > Context ( ) - > AddFunctionDef ( request - > function_def ( ) ) ; <nl> + } <nl> + <nl> + tensorflow : : Status EagerServiceImpl : : GetServerContext ( <nl> + uint64 context_id , ServerContext * * server_context ) { <nl> + mutex_lock l ( contexts_mu_ ) ; <nl> + auto iter = contexts_ . find ( context_id ) ; <nl> + if ( iter = = contexts_ . end ( ) ) { <nl> + * server_context = nullptr ; <nl> + return errors : : InvalidArgument ( strings : : Printf ( <nl> + " Unable to find a context_id matching the specified one " <nl> + " ( % lld ) . Perhaps the worker was restarted ? " , <nl> + context_id ) ) ; <nl> + } <nl> + <nl> + * server_context = iter - > second ; <nl> + ( * server_context ) - > Ref ( ) ; <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + } / / namespace eager <nl> + } / / namespace tensorflow <nl> new file mode 100644 <nl> index 0000000000000 . . ebd5269a57aa7 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / distributed_runtime / eager / eager_service_impl . h <nl> <nl> + / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_EAGER_SERVICE_IMPL_H_ <nl> + # define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_EAGER_SERVICE_IMPL_H_ <nl> + <nl> + # include < unordered_map > <nl> + <nl> + # include " tensorflow / core / common_runtime / eager / context . h " <nl> + # include " tensorflow / core / common_runtime / eager / tensor_handle . h " <nl> + # include " tensorflow / core / distributed_runtime / eager / remote_tensor_handle . h " <nl> + # include " tensorflow / core / distributed_runtime / worker_env . h " <nl> + # include " tensorflow / core / lib / core / refcount . h " <nl> + # include " tensorflow / core / lib / gtl / array_slice . h " <nl> + # include " tensorflow / core / lib / strings / stringprintf . h " <nl> + # include " tensorflow / core / protobuf / eager_service . pb . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace eager { <nl> + <nl> + / / A TensorFlow Eager Worker runs ops and supports worker to worker <nl> + / / Tensor transfer . <nl> + / / <nl> + / / See eager_service . proto for more details about each method . <nl> + / / This class can be wrapped by specific classes that implement rpc transports <nl> + / / over this ( e . g . gRPC ) . <nl> + class EagerServiceImpl { <nl> + public : <nl> + explicit EagerServiceImpl ( const WorkerEnv * env ) : env_ ( env ) { } <nl> + virtual ~ EagerServiceImpl ( ) { <nl> + for ( auto & entry : contexts_ ) { <nl> + entry . second - > Unref ( ) ; <nl> + } <nl> + } <nl> + <nl> + Status CreateContext ( const CreateContextRequest * request , <nl> + CreateContextResponse * response ) ; <nl> + <nl> + Status Enqueue ( const EnqueueRequest * request , EnqueueResponse * response ) ; <nl> + <nl> + Status WaitQueueDone ( const WaitQueueDoneRequest * request , <nl> + WaitQueueDoneResponse * response ) ; <nl> + <nl> + Status KeepAlive ( const KeepAliveRequest * request , <nl> + KeepAliveResponse * response ) ; <nl> + <nl> + Status CloseContext ( const CloseContextRequest * request , <nl> + CloseContextResponse * response ) ; <nl> + <nl> + Status RegisterFunction ( const RegisterFunctionRequest * request , <nl> + RegisterFunctionResponse * response ) ; <nl> + <nl> + protected : <nl> + / / This is the server - side execution context . All state regarding execution of <nl> + / / a client ' s ops is held in this server - side context ( all generated tensors , <nl> + / / and the EagerContext ) . <nl> + class ServerContext : public core : : RefCounted { <nl> + public : <nl> + explicit ServerContext ( std : : unique_ptr < tensorflow : : EagerContext > ctx ) <nl> + : ctx_ ( std : : move ( ctx ) ) { } <nl> + ~ ServerContext ( ) { <nl> + for ( const auto & entry : tensors_ ) { <nl> + entry . second - > Unref ( ) ; <nl> + } <nl> + } <nl> + <nl> + tensorflow : : EagerContext * Context ( ) const { return ctx_ . get ( ) ; } <nl> + <nl> + void AddOperationOutputs ( <nl> + const gtl : : ArraySlice < tensorflow : : TensorHandle * > & handles , <nl> + int64 operation_id ) { <nl> + mutex_lock l ( tensors_mu_ ) ; <nl> + for ( int i = 0 ; i < handles . size ( ) ; i + + ) { <nl> + / / TODO ( nareshmodi ) : Correctly handle operation_id not being unique . <nl> + tensors_ . emplace ( RemoteTensorHandleInternal ( operation_id , i ) , <nl> + handles [ i ] ) ; <nl> + } <nl> + } <nl> + <nl> + Status GetTensorHandle ( const RemoteTensorHandleInternal & remote_handle , <nl> + tensorflow : : TensorHandle * * handle ) { <nl> + mutex_lock l ( tensors_mu_ ) ; <nl> + auto iter = tensors_ . find ( remote_handle ) ; <nl> + if ( iter = = tensors_ . end ( ) ) { <nl> + return errors : : InvalidArgument ( <nl> + " Unable to find the relevant tensor remote_handle : Op ID : " , <nl> + remote_handle . op_id , " , Output num : " , remote_handle . output_num ) ; <nl> + } <nl> + <nl> + * handle = iter - > second ; <nl> + <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + Status DeleteTensorHandle ( const RemoteTensorHandleInternal & remote_handle ) { <nl> + mutex_lock l ( tensors_mu_ ) ; <nl> + auto iter = tensors_ . find ( remote_handle ) ; <nl> + if ( iter = = tensors_ . end ( ) ) { <nl> + return errors : : InvalidArgument ( <nl> + " Unable to find the relevant tensor remote_handle : Op ID : " , <nl> + remote_handle . op_id , " , Output num : " , remote_handle . output_num ) ; <nl> + } <nl> + <nl> + iter - > second - > Unref ( ) ; <nl> + tensors_ . erase ( iter ) ; <nl> + <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + private : <nl> + using RemoteTensorHandleMap = <nl> + gtl : : FlatMap < RemoteTensorHandleInternal , tensorflow : : TensorHandle * , <nl> + RemoteTensorHandleInternalHash , <nl> + RemoteTensorHandleInternalEquals > ; <nl> + <nl> + / / The context for this execution . <nl> + std : : unique_ptr < tensorflow : : EagerContext > ctx_ ; <nl> + <nl> + mutex tensors_mu_ ; <nl> + RemoteTensorHandleMap tensors_ GUARDED_BY ( tensors_mu_ ) ; <nl> + } ; <nl> + / / The returned ServerContext will need to be Unrefed . <nl> + tensorflow : : Status GetServerContext ( uint64 , ServerContext * * ) ; <nl> + <nl> + private : <nl> + Status ExecuteOp ( const Operation & operation , ServerContext * server_context ) ; <nl> + const WorkerEnv * const env_ ; / / Not owned . <nl> + <nl> + mutex contexts_mu_ ; <nl> + std : : unordered_map < uint64 , ServerContext * > contexts_ GUARDED_BY ( contexts_mu_ ) ; <nl> + <nl> + TF_DISALLOW_COPY_AND_ASSIGN ( EagerServiceImpl ) ; <nl> + } ; <nl> + <nl> + } / / namespace eager <nl> + } / / namespace tensorflow <nl> + <nl> + # endif / / TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_EAGER_SERVICE_IMPL_H_ <nl> new file mode 100644 <nl> index 0000000000000 . . f865ebe1be9c0 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / distributed_runtime / eager / eager_service_impl_test . cc <nl> <nl> + / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / core / distributed_runtime / eager / eager_service_impl . h " <nl> + <nl> + # include < string . h > <nl> + <nl> + # include " tensorflow / c / c_api_internal . h " <nl> + # include " tensorflow / core / common_runtime / eager / tensor_handle . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / rpc_rendezvous_mgr . h " <nl> + # include " tensorflow / core / distributed_runtime / worker_env . h " <nl> + # include " tensorflow / core / framework / attr_value . pb . h " <nl> + # include " tensorflow / core / lib / core / status_test_util . h " <nl> + # include " tensorflow / core / lib / strings / strcat . h " <nl> + # include " tensorflow / core / platform / logging . h " <nl> + # include " tensorflow / core / platform / macros . h " <nl> + # include " tensorflow / core / platform / protobuf . h " <nl> + # include " tensorflow / core / platform / test . h " <nl> + # include " tensorflow / core / platform / test_benchmark . h " <nl> + # include " tensorflow / core / protobuf / eager_service . pb . h " <nl> + # include " tensorflow / core / protobuf / tensorflow_server . pb . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace eager { <nl> + namespace { <nl> + <nl> + class TestEagerServiceImpl : public EagerServiceImpl { <nl> + public : <nl> + explicit TestEagerServiceImpl ( const WorkerEnv * env ) : EagerServiceImpl ( env ) { } <nl> + Status GetTensorHandle ( const uint64 context_id , <nl> + const RemoteTensorHandleInternal & remote_handle , <nl> + tensorflow : : TensorHandle * * handle ) { <nl> + ServerContext * context = nullptr ; <nl> + TF_RETURN_IF_ERROR ( GetServerContext ( context_id , & context ) ) ; <nl> + core : : ScopedUnref context_unref ( context ) ; <nl> + <nl> + return context - > GetTensorHandle ( remote_handle , handle ) ; <nl> + } <nl> + } ; <nl> + <nl> + void SetTensorProto ( AttrValue * val ) { <nl> + int64_t dims [ ] = { 2 , 2 } ; <nl> + float data [ ] = { 1 . 0f , 2 . 0f , 3 . 0f , 4 . 0f } ; <nl> + TF_Tensor * t = TF_AllocateTensor ( <nl> + TF_FLOAT , & dims [ 0 ] , sizeof ( dims ) / sizeof ( int64_t ) , sizeof ( data ) ) ; <nl> + memcpy ( TF_TensorData ( t ) , & data [ 0 ] , TF_TensorByteSize ( t ) ) ; <nl> + tensorflow : : Tensor tensor ; <nl> + TF_ASSERT_OK ( tensorflow : : TF_TensorToTensor ( t , & tensor ) ) ; <nl> + tensor . AsProtoTensorContent ( val - > mutable_tensor ( ) ) ; <nl> + TF_DeleteTensor ( t ) ; <nl> + } <nl> + <nl> + void AddOperationToEnqueueRequest ( <nl> + int64 id , const string & name , <nl> + const std : : vector < std : : pair < int64 , int32 > > & inputs , <nl> + const std : : unordered_map < string , AttrValue > & attrs , const string & device , <nl> + EnqueueRequest * request ) { <nl> + auto * operation = request - > add_queue ( ) - > mutable_operation ( ) ; <nl> + <nl> + operation - > set_id ( id ) ; <nl> + operation - > set_name ( name ) ; <nl> + operation - > set_device ( device ) ; <nl> + <nl> + for ( const auto & tensor_handle_pair : inputs ) { <nl> + auto * input = operation - > add_inputs ( ) ; <nl> + input - > set_op_id ( tensor_handle_pair . first ) ; <nl> + input - > set_output_num ( tensor_handle_pair . second ) ; <nl> + } <nl> + <nl> + for ( const auto & attr_entry : attrs ) { <nl> + ( * operation - > mutable_attrs ( ) ) [ attr_entry . first ] = attr_entry . second ; <nl> + } <nl> + } <nl> + <nl> + tensorflow : : FunctionDef MatMulFunction ( ) { <nl> + tensorflow : : FunctionDef def ; <nl> + CHECK ( tensorflow : : protobuf : : TextFormat : : ParseFromString ( <nl> + " signature { " <nl> + " name : ' MatMulFunction ' " <nl> + " input_arg { " <nl> + " name : ' a ' " <nl> + " type : DT_FLOAT " <nl> + " } " <nl> + " output_arg { " <nl> + " name : ' m ' " <nl> + " type : DT_FLOAT " <nl> + " } " <nl> + " } " <nl> + " node_def { " <nl> + " name : ' matmul ' " <nl> + " op : ' MatMul ' " <nl> + " input : ' a ' " <nl> + " input : ' a ' " <nl> + " attr { " <nl> + " key : ' T ' " <nl> + " value { " <nl> + " type : DT_FLOAT " <nl> + " } " <nl> + " } " <nl> + " } " <nl> + " ret { " <nl> + " key : ' m ' " <nl> + " value : ' matmul : product ' " <nl> + " } " , <nl> + & def ) ) ; <nl> + return def ; <nl> + } <nl> + <nl> + / / Test creates a context and attempts to execute some ops . <nl> + TEST ( EagerServiceImplTest , BasicTest ) { <nl> + WorkerEnv worker_env ; <nl> + worker_env . env = Env : : Default ( ) ; <nl> + tensorflow : : RpcRendezvousMgr rm ( & worker_env ) ; <nl> + worker_env . rendezvous_mgr = & rm ; <nl> + <nl> + TestEagerServiceImpl eager_service_impl ( & worker_env ) ; <nl> + <nl> + CreateContextRequest request ; <nl> + request . mutable_server_def ( ) - > set_job_name ( " localhost " ) ; <nl> + request . mutable_server_def ( ) - > set_task_index ( 0 ) ; <nl> + CreateContextResponse response ; <nl> + <nl> + TF_ASSERT_OK ( eager_service_impl . CreateContext ( & request , & response ) ) ; <nl> + <nl> + uint64 context_id = response . context_id ( ) ; <nl> + <nl> + EnqueueRequest remote_enqueue_request ; <nl> + remote_enqueue_request . set_context_id ( context_id ) ; <nl> + EnqueueResponse remote_enqueue_response ; <nl> + <nl> + std : : unordered_map < string , AttrValue > const_attrs ; <nl> + AttrValue val ; <nl> + val . set_type ( tensorflow : : DataType : : DT_FLOAT ) ; <nl> + const_attrs . insert ( { " dtype " , val } ) ; <nl> + val . Clear ( ) ; <nl> + SetTensorProto ( & val ) ; <nl> + const_attrs . insert ( { " value " , val } ) ; <nl> + <nl> + AddOperationToEnqueueRequest ( 1 , " Const " , { } , const_attrs , <nl> + " / job : localhost / replica : 0 / task : 0 / device : CPU : 0 " , <nl> + & remote_enqueue_request ) ; <nl> + <nl> + std : : unordered_map < string , AttrValue > attrs ; <nl> + val . Clear ( ) ; <nl> + val . set_type ( tensorflow : : DataType : : DT_FLOAT ) ; <nl> + attrs . insert ( { " T " , val } ) ; <nl> + val . Clear ( ) ; <nl> + val . set_b ( false ) ; <nl> + attrs . insert ( { " transpose_a " , val } ) ; <nl> + attrs . insert ( { " transpose_b " , val } ) ; <nl> + <nl> + AddOperationToEnqueueRequest ( 2 , " MatMul " , { { 1 , 0 } , { 1 , 0 } } , attrs , <nl> + " / job : localhost / replica : 0 / task : 0 / device : CPU : 0 " , <nl> + & remote_enqueue_request ) ; <nl> + <nl> + TF_ASSERT_OK ( eager_service_impl . Enqueue ( & remote_enqueue_request , <nl> + & remote_enqueue_response ) ) ; <nl> + <nl> + tensorflow : : TensorHandle * tensor_handle ; <nl> + TF_ASSERT_OK ( eager_service_impl . GetTensorHandle ( <nl> + response . context_id ( ) , RemoteTensorHandleInternal ( 2 , 0 ) , & tensor_handle ) ) ; <nl> + <nl> + / / This should be OK to do since we ' ve placed all computation on the CPU <nl> + / / device . <nl> + const tensorflow : : Tensor * t = nullptr ; <nl> + TF_ASSERT_OK ( tensor_handle - > Tensor ( & t ) ) ; <nl> + <nl> + auto actual = t - > flat < float > ( ) ; <nl> + <nl> + EXPECT_EQ ( 4 , actual . size ( ) ) ; <nl> + <nl> + EXPECT_EQ ( 7 , actual ( 0 ) ) ; <nl> + EXPECT_EQ ( 10 , actual ( 1 ) ) ; <nl> + EXPECT_EQ ( 15 , actual ( 2 ) ) ; <nl> + EXPECT_EQ ( 22 , actual ( 3 ) ) ; <nl> + <nl> + CloseContextRequest close_context_request ; <nl> + close_context_request . set_context_id ( context_id ) ; <nl> + CloseContextResponse close_context_response ; <nl> + TF_ASSERT_OK ( eager_service_impl . CloseContext ( & close_context_request , <nl> + & close_context_response ) ) ; <nl> + } <nl> + <nl> + / / Test creates a context and attempts to execute a function . <nl> + TEST ( EagerServiceImplTest , BasicFunctionTest ) { <nl> + WorkerEnv worker_env ; <nl> + worker_env . env = Env : : Default ( ) ; <nl> + tensorflow : : RpcRendezvousMgr rm ( & worker_env ) ; <nl> + worker_env . rendezvous_mgr = & rm ; <nl> + <nl> + TestEagerServiceImpl eager_service_impl ( & worker_env ) ; <nl> + <nl> + CreateContextRequest request ; <nl> + request . mutable_server_def ( ) - > set_job_name ( " localhost " ) ; <nl> + request . mutable_server_def ( ) - > set_task_index ( 0 ) ; <nl> + CreateContextResponse response ; <nl> + <nl> + TF_ASSERT_OK ( eager_service_impl . CreateContext ( & request , & response ) ) ; <nl> + <nl> + uint64 context_id = response . context_id ( ) ; <nl> + <nl> + RegisterFunctionRequest register_function_request ; <nl> + register_function_request . set_context_id ( context_id ) ; <nl> + * register_function_request . mutable_function_def ( ) = MatMulFunction ( ) ; <nl> + RegisterFunctionResponse register_function_response ; <nl> + <nl> + TF_ASSERT_OK ( eager_service_impl . RegisterFunction ( <nl> + & register_function_request , & register_function_response ) ) ; <nl> + <nl> + EnqueueRequest remote_enqueue_request ; <nl> + remote_enqueue_request . set_context_id ( context_id ) ; <nl> + EnqueueResponse remote_enqueue_response ; <nl> + <nl> + std : : unordered_map < string , AttrValue > const_attrs ; <nl> + AttrValue val ; <nl> + val . set_type ( tensorflow : : DataType : : DT_FLOAT ) ; <nl> + const_attrs . insert ( { " dtype " , val } ) ; <nl> + val . Clear ( ) ; <nl> + <nl> + SetTensorProto ( & val ) ; <nl> + const_attrs . insert ( { " value " , val } ) ; <nl> + <nl> + AddOperationToEnqueueRequest ( 1 , " Const " , { } , const_attrs , <nl> + " / job : localhost / replica : 0 / task : 0 / device : CPU : 0 " , <nl> + & remote_enqueue_request ) ; <nl> + AddOperationToEnqueueRequest ( <nl> + 2 , " MatMulFunction " , { { 1 , 0 } } , std : : unordered_map < string , AttrValue > ( ) , <nl> + " / job : localhost / replica : 0 / task : 0 / device : CPU : 0 " , & remote_enqueue_request ) ; <nl> + <nl> + TF_ASSERT_OK ( eager_service_impl . Enqueue ( & remote_enqueue_request , <nl> + & remote_enqueue_response ) ) ; <nl> + <nl> + const tensorflow : : Tensor * t = nullptr ; <nl> + tensorflow : : TensorHandle * tensor_handle ; <nl> + TF_ASSERT_OK ( eager_service_impl . GetTensorHandle ( <nl> + response . context_id ( ) , RemoteTensorHandleInternal ( 2 , 0 ) , & tensor_handle ) ) ; <nl> + TF_ASSERT_OK ( tensor_handle - > Tensor ( & t ) ) ; <nl> + <nl> + auto actual = t - > flat < float > ( ) ; <nl> + EXPECT_EQ ( 4 , actual . size ( ) ) ; <nl> + <nl> + EXPECT_EQ ( 7 , actual ( 0 ) ) ; <nl> + EXPECT_EQ ( 10 , actual ( 1 ) ) ; <nl> + EXPECT_EQ ( 15 , actual ( 2 ) ) ; <nl> + EXPECT_EQ ( 22 , actual ( 3 ) ) ; <nl> + <nl> + CloseContextRequest close_context_request ; <nl> + close_context_request . set_context_id ( context_id ) ; <nl> + CloseContextResponse close_context_response ; <nl> + TF_ASSERT_OK ( eager_service_impl . CloseContext ( & close_context_request , <nl> + & close_context_response ) ) ; <nl> + } <nl> + <nl> + } / / namespace <nl> + } / / namespace eager <nl> + } / / namespace tensorflow <nl> new file mode 100644 <nl> index 0000000000000 . . c4bd67aaedbec <nl> mmm / dev / null <nl> ppp b / tensorflow / core / distributed_runtime / eager / remote_execute_node . h <nl> <nl> + / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_REMOTE_EXECUTE_NODE_H_ <nl> + # define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_REMOTE_EXECUTE_NODE_H_ <nl> + <nl> + # include " tensorflow / core / common_runtime / eager / eager_executor . h " <nl> + # include " tensorflow / core / distributed_runtime / eager / eager_client . h " <nl> + # include " tensorflow / core / protobuf / eager_service . pb . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace eager { <nl> + <nl> + / / EnqueueNode is an implementation of EagerNode which enqueues an operation <nl> + / / via RPC in a remote EagerService . <nl> + class RemoteExecuteNode : public tensorflow : : EagerNode { <nl> + public : <nl> + RemoteExecuteNode ( tensorflow : : uint64 id , <nl> + const tensorflow : : eager : : EnqueueRequest & request , <nl> + tensorflow : : eager : : EagerClient * eager_client ) <nl> + : tensorflow : : EagerNode ( id ) , <nl> + request_ ( std : : move ( request ) ) , <nl> + eager_client_ ( eager_client ) { } <nl> + <nl> + tensorflow : : Status Run ( ) override { <nl> + tensorflow : : eager : : EnqueueResponse response ; <nl> + tensorflow : : Status status ; <nl> + Notification n ; <nl> + eager_client_ - > EnqueueAsync ( & request_ , & response , <nl> + [ & n , & status ] ( const tensorflow : : Status & s ) { <nl> + status . Update ( s ) ; <nl> + n . Notify ( ) ; <nl> + } ) ; <nl> + n . WaitForNotification ( ) ; <nl> + <nl> + return status ; <nl> + } <nl> + <nl> + private : <nl> + EnqueueRequest request_ ; <nl> + tensorflow : : eager : : EagerClient * <nl> + eager_client_ ; / / Not owned , and must outlive the RemoteExecuteNode . <nl> + } ; <nl> + <nl> + } / / namespace eager <nl> + } / / namespace tensorflow <nl> + <nl> + # endif / / TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_REMOTE_EXECUTE_NODE_H_ <nl> new file mode 100644 <nl> index 0000000000000 . . 25ec062c03553 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / distributed_runtime / eager / remote_tensor_handle . h <nl> <nl> + / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_REMOTE_TENSOR_HANDLE_H_ <nl> + # define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_REMOTE_TENSOR_HANDLE_H_ <nl> + <nl> + # include " tensorflow / core / platform / fingerprint . h " <nl> + # include " tensorflow / core / protobuf / eager_service . pb . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace eager { <nl> + <nl> + struct RemoteTensorHandleInternal { <nl> + explicit RemoteTensorHandleInternal ( const RemoteTensorHandle & tensor_handle ) <nl> + : op_id ( tensor_handle . op_id ( ) ) , output_num ( tensor_handle . output_num ( ) ) { } <nl> + RemoteTensorHandleInternal ( int64 op_id , int32 output_num ) <nl> + : op_id ( op_id ) , output_num ( output_num ) { } <nl> + int64 op_id ; <nl> + int32 output_num ; <nl> + } ; <nl> + <nl> + struct RemoteTensorHandleInternalHash { <nl> + std : : size_t operator ( ) ( const RemoteTensorHandleInternal & handle ) const { <nl> + return FingerprintCat64 ( handle . op_id , handle . output_num ) ; <nl> + } <nl> + } ; <nl> + <nl> + struct RemoteTensorHandleInternalEquals { <nl> + bool operator ( ) ( const RemoteTensorHandleInternal & first , <nl> + const RemoteTensorHandleInternal & second ) const { <nl> + return first . op_id = = second . op_id & & first . output_num = = second . output_num ; <nl> + } <nl> + } ; <nl> + <nl> + } / / namespace eager <nl> + } / / namespace tensorflow <nl> + <nl> + # endif / / TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_REMOTE_TENSOR_HANDLE_H_ <nl> mmm a / tensorflow / core / distributed_runtime / master_session . cc <nl> ppp b / tensorflow / core / distributed_runtime / master_session . cc <nl> class MasterSession : : ReffedClientGraph : public core : : RefCounted { <nl> / / it on / off and don ' t make use of the responses . <nl> for ( auto & p : partitions_ ) { <nl> LoggingRequest * req = new LoggingRequest ; <nl> - req - > set_rpc_logging ( active ) ; <nl> + if ( active ) { <nl> + req - > set_enable_rpc_logging ( true ) ; <nl> + } else { <nl> + req - > set_disable_rpc_logging ( true ) ; <nl> + } <nl> LoggingResponse * resp = new LoggingResponse ; <nl> Ref ( ) ; <nl> p . worker - > LoggingAsync ( req , resp , [ this , req , resp ] ( const Status & s ) { <nl> new file mode 100644 <nl> index 0000000000000 . . 1a3bd9d6bf0e4 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / distributed_runtime / rpc / eager / BUILD <nl> <nl> + licenses ( [ " notice " ] ) # Apache 2 . 0 <nl> + <nl> + exports_files ( [ " LICENSE " ] ) <nl> + <nl> + package ( default_visibility = [ <nl> + " / / tensorflow : internal " , <nl> + ] ) <nl> + <nl> + cc_library ( <nl> + name = " grpc_eager_service " , <nl> + srcs = [ " grpc_eager_service . cc " ] , <nl> + hdrs = [ " grpc_eager_service . h " ] , <nl> + deps = [ <nl> + " / / tensorflow / core : eager_service_proto_cc " , <nl> + " @ grpc / / : grpc + + _unsecure " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " grpc_eager_client " , <nl> + srcs = [ " grpc_eager_client . cc " ] , <nl> + hdrs = [ " grpc_eager_client . h " ] , <nl> + deps = [ <nl> + " / / tensorflow / core : eager_service_proto_cc " , <nl> + " / / tensorflow / core : lib " , <nl> + " / / tensorflow / core / distributed_runtime / eager : eager_client " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : grpc_channel " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : grpc_client_cq_tag " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : grpc_state " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : grpc_util " , <nl> + " / / tensorflow / core / distributed_runtime / rpc / eager : grpc_eager_service " , <nl> + " @ grpc / / : grpc + + _unsecure " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " grpc_eager_service_impl " , <nl> + srcs = [ " grpc_eager_service_impl . cc " ] , <nl> + hdrs = [ " grpc_eager_service_impl . h " ] , <nl> + deps = [ <nl> + " : grpc_eager_service " , <nl> + " / / tensorflow / core : framework " , <nl> + " / / tensorflow / core : ptr_util " , <nl> + " / / tensorflow / core / distributed_runtime / eager : eager_service_impl " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : grpc_call " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : grpc_channel " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : grpc_server_lib " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : grpc_util " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : grpc_worker_cache " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : grpc_worker_service " , <nl> + " @ grpc / / : grpc + + _unsecure " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " eager_grpc_server_lib " , <nl> + hdrs = [ " eager_grpc_server_lib . h " ] , <nl> + deps = [ <nl> + " : grpc_eager_service_impl " , <nl> + " / / tensorflow / core : core_cpu " , <nl> + " / / tensorflow / core / distributed_runtime : rendezvous_mgr_interface " , <nl> + " / / tensorflow / core / distributed_runtime : worker_cache_wrapper " , <nl> + " / / tensorflow / core / distributed_runtime / eager : eager_service_impl " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : grpc_server_lib " , <nl> + " / / tensorflow / core / distributed_runtime / rpc : grpc_worker_service " , <nl> + ] , <nl> + ) <nl> new file mode 100644 <nl> index 0000000000000 . . f5dc4c831d04a <nl> mmm / dev / null <nl> ppp b / tensorflow / core / distributed_runtime / rpc / eager / eager_grpc_server_lib . h <nl> <nl> + / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_EAGER_EAGER_GRPC_SERVER_LIB_H_ <nl> + # define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_EAGER_EAGER_GRPC_SERVER_LIB_H_ <nl> + <nl> + # include " tensorflow / core / common_runtime / device_factory . h " <nl> + # include " tensorflow / core / distributed_runtime / eager / eager_service_impl . h " <nl> + # include " tensorflow / core / distributed_runtime / rendezvous_mgr_interface . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / eager / grpc_eager_service_impl . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / grpc_server_lib . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / grpc_worker_service . h " <nl> + # include " tensorflow / core / distributed_runtime / worker_cache_wrapper . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace eager { <nl> + <nl> + class EagerGrpcServer : public GrpcServer { <nl> + public : <nl> + static Status Create ( const ServerDef & server_def , <nl> + std : : unique_ptr < EagerGrpcServer > * server ) { <nl> + std : : unique_ptr < EagerGrpcServer > ret ( new EagerGrpcServer ( server_def ) ) ; <nl> + <nl> + TF_RETURN_IF_ERROR ( ret - > InitEager ( ) ) ; <nl> + <nl> + * server = std : : move ( ret ) ; <nl> + <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + Status Start ( ) override { <nl> + TF_RETURN_IF_ERROR ( GrpcServer : : Start ( ) ) ; <nl> + <nl> + eager_service_ - > Start ( ) ; <nl> + <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + Status Stop ( ) override { <nl> + TF_RETURN_IF_ERROR ( GrpcServer : : Stop ( ) ) ; <nl> + <nl> + eager_service_ - > Stop ( ) ; <nl> + <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + using GrpcServer : : channel_cache ; <nl> + using GrpcServer : : master_env ; <nl> + using GrpcServer : : worker_env ; <nl> + <nl> + private : <nl> + EagerGrpcServer ( const ServerDef & server_def ) <nl> + : GrpcServer ( server_def , Env : : Default ( ) ) , <nl> + worker_name_ ( <nl> + strings : : StrCat ( " / job : " , server_def . job_name ( ) , <nl> + " / replica : 0 / task : " , server_def . task_index ( ) ) ) { } <nl> + <nl> + Status InitEager ( ) { <nl> + TF_RETURN_IF_ERROR ( this - > Init ( <nl> + [ this ] ( const WorkerEnv * worker_env , <nl> + : : grpc : : ServerBuilder * server_builder ) { <nl> + this - > eager_service_ . reset ( <nl> + new eager : : GrpcEagerServiceImpl ( worker_env , server_builder ) ) ; <nl> + } , <nl> + nullptr ) ) ; <nl> + <nl> + worker_session_ = WorkerSession : : CreateWithBorrowedDeviceMgr ( <nl> + " " , worker_name_ , <nl> + std : : unique_ptr < WorkerCacheInterface > ( <nl> + new WorkerCacheWrapper ( master_env ( ) - > worker_cache ) ) , <nl> + worker_env ( ) - > device_mgr , { } ) ; <nl> + <nl> + auto * r = worker_env ( ) - > rendezvous_mgr - > Find ( 0 ) ; <nl> + return r - > Initialize ( worker_session_ . get ( ) ) ; <nl> + } <nl> + <nl> + std : : unique_ptr < GrpcEagerServiceImpl > eager_service_ ; <nl> + std : : shared_ptr < WorkerSession > worker_session_ ; <nl> + const string worker_name_ ; <nl> + } ; / / namespace eager <nl> + <nl> + } / / namespace eager <nl> + } / / namespace tensorflow <nl> + <nl> + # endif / / TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_EAGER_EAGER_GRPC_SERVER_LIB_H_ <nl> new file mode 100644 <nl> index 0000000000000 . . 4786c43ee2c51 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / distributed_runtime / rpc / eager / grpc_eager_client . cc <nl> <nl> + / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / core / distributed_runtime / rpc / eager / grpc_eager_client . h " <nl> + <nl> + # include " grpc + + / generic / generic_stub . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / eager / grpc_eager_service . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / grpc_client_cq_tag . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / grpc_state . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / grpc_util . h " <nl> + # include " tensorflow / core / lib / core / status . h " <nl> + # include " tensorflow / core / platform / env . h " <nl> + # include " tensorflow / core / protobuf / eager_service . pb . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace eager { <nl> + namespace { <nl> + class GrpcEagerClient : public EagerClient { <nl> + public : <nl> + GrpcEagerClient ( const tensorflow : : SharedGrpcChannelPtr & channel , <nl> + : : grpc : : CompletionQueue * cq ) <nl> + : stub_ ( channel ) , cq_ ( cq ) { } <nl> + ~ GrpcEagerClient ( ) override { } <nl> + <nl> + # define CLIENT_METHOD ( method ) \ <nl> + void method # # Async ( const method # # Request * request , \ <nl> + method # # Response * response , StatusCallback done ) \ <nl> + override { \ <nl> + new RPCState < protobuf : : Message > ( \ <nl> + & stub_ , cq_ , " / tensorflow . eager . EagerService / " # method , * request , \ <nl> + response , std : : move ( done ) , nullptr ) ; \ <nl> + } <nl> + <nl> + CLIENT_METHOD ( CreateContext ) ; <nl> + CLIENT_METHOD ( Enqueue ) ; <nl> + CLIENT_METHOD ( WaitQueueDone ) ; <nl> + CLIENT_METHOD ( KeepAlive ) ; <nl> + CLIENT_METHOD ( CloseContext ) ; <nl> + CLIENT_METHOD ( RegisterFunction ) ; <nl> + <nl> + # undef CLIENT_METHOD <nl> + <nl> + private : <nl> + : : grpc : : GenericStub stub_ ; <nl> + : : grpc : : CompletionQueue * cq_ ; <nl> + } ; <nl> + <nl> + class GrpcEagerClientCache : public EagerClientCache { <nl> + public : <nl> + explicit GrpcEagerClientCache ( <nl> + std : : shared_ptr < tensorflow : : GrpcChannelCache > cache ) <nl> + : next_round_robin_assignment_ ( 0 ) , cache_ ( cache ) , threads_ ( 4 ) { } <nl> + <nl> + ~ GrpcEagerClientCache ( ) override { threads_ . clear ( ) ; } <nl> + <nl> + EagerClient * GetClient ( const string & target ) override { <nl> + auto it = clients_ . find ( target ) ; <nl> + if ( it = = clients_ . end ( ) ) { <nl> + tensorflow : : SharedGrpcChannelPtr shared = <nl> + cache_ - > FindWorkerChannel ( target ) ; <nl> + auto worker = std : : unique_ptr < EagerClient > ( new GrpcEagerClient ( <nl> + shared , threads_ [ AssignClientToThread ( target ) ] . completion_queue ( ) ) ) ; <nl> + <nl> + it = clients_ . emplace ( target , std : : move ( worker ) ) . first ; <nl> + } <nl> + <nl> + return it - > second . get ( ) ; <nl> + } <nl> + <nl> + private : <nl> + mutex assignment_mu_ ; <nl> + std : : unordered_map < std : : string , size_t > target_assignments_ <nl> + GUARDED_BY ( assignment_mu_ ) ; <nl> + size_t next_round_robin_assignment_ GUARDED_BY ( assignment_mu_ ) ; <nl> + <nl> + size_t AssignClientToThread ( const string & target ) { <nl> + / / Round - robin target assignment , but keeps the same target on the same <nl> + / / polling thread always , as this is important for gRPC performace <nl> + mutex_lock lock ( assignment_mu_ ) ; <nl> + auto it = target_assignments_ . find ( target ) ; <nl> + if ( it = = target_assignments_ . end ( ) ) { <nl> + it = target_assignments_ <nl> + . insert ( std : : make_pair ( <nl> + target , ( next_round_robin_assignment_ + + ) % threads_ . size ( ) ) ) <nl> + . first ; <nl> + } <nl> + return it - > second ; <nl> + } <nl> + <nl> + class GrpcEagerClientThread { <nl> + public : <nl> + GrpcEagerClientThread ( ) { <nl> + thread_ . reset ( Env : : Default ( ) - > StartThread ( <nl> + ThreadOptions ( ) , " eager_client_thread " , [ this ] ( ) { <nl> + void * tag ; <nl> + bool ok ; <nl> + while ( completion_queue_ . Next ( & tag , & ok ) ) { <nl> + GrpcClientCQTag * callback_tag = <nl> + static_cast < GrpcClientCQTag * > ( tag ) ; <nl> + callback_tag - > OnCompleted ( ok ) ; <nl> + } <nl> + } ) ) ; <nl> + } <nl> + <nl> + ~ GrpcEagerClientThread ( ) { <nl> + completion_queue_ . Shutdown ( ) ; <nl> + thread_ . reset ( ) ; <nl> + } <nl> + <nl> + : : grpc : : CompletionQueue * completion_queue ( ) { return & completion_queue_ ; } <nl> + <nl> + private : <nl> + : : grpc : : CompletionQueue completion_queue_ ; <nl> + std : : unique_ptr < Thread > thread_ ; <nl> + } ; / / GrpcEagerClientThread <nl> + <nl> + std : : shared_ptr < tensorflow : : GrpcChannelCache > cache_ ; <nl> + std : : unordered_map < string , std : : unique_ptr < EagerClient > > clients_ ; <nl> + std : : vector < GrpcEagerClientThread > threads_ ; <nl> + } ; <nl> + <nl> + } / / namespace <nl> + <nl> + EagerClientCache * NewGrpcEagerClientCache ( <nl> + std : : shared_ptr < tensorflow : : GrpcChannelCache > channel ) { <nl> + return new GrpcEagerClientCache ( channel ) ; <nl> + } <nl> + <nl> + } / / namespace eager <nl> + } / / namespace tensorflow <nl> new file mode 100644 <nl> index 0000000000000 . . 8a926da488477 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / distributed_runtime / rpc / eager / grpc_eager_client . h <nl> <nl> + / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_EAGER_GRPC_EAGER_CLIENT_H_ <nl> + # define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_EAGER_GRPC_EAGER_CLIENT_H_ <nl> + <nl> + # include " tensorflow / core / distributed_runtime / eager / eager_client . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / grpc_channel . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace eager { <nl> + / / The GrpcChannelCache is not owned . <nl> + EagerClientCache * NewGrpcEagerClientCache ( <nl> + std : : shared_ptr < tensorflow : : GrpcChannelCache > channel ) ; <nl> + } / / namespace eager <nl> + } / / namespace tensorflow <nl> + <nl> + # endif / / TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_EAGER_GRPC_EAGER_CLIENT_H_ <nl> new file mode 100644 <nl> index 0000000000000 . . 3fd7deaa868a9 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / distributed_runtime / rpc / eager / grpc_eager_service . cc <nl> <nl> + / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / core / distributed_runtime / rpc / eager / grpc_eager_service . h " <nl> + <nl> + # include " grpc + + / impl / codegen / async_stream . h " <nl> + # include " grpc + + / impl / codegen / async_unary_call . h " <nl> + # include " grpc + + / impl / codegen / channel_interface . h " <nl> + # include " grpc + + / impl / codegen / client_unary_call . h " <nl> + # include " grpc + + / impl / codegen / method_handler_impl . h " <nl> + # include " grpc + + / impl / codegen / rpc_service_method . h " <nl> + # include " grpc + + / impl / codegen / service_type . h " <nl> + # include " grpc + + / impl / codegen / sync_stream . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace eager { <nl> + <nl> + namespace grpc { <nl> + <nl> + static const char * grpcEagerService_method_names [ ] = { <nl> + " / tensorflow . eager . EagerService / CreateContext " , <nl> + " / tensorflow . eager . EagerService / Enqueue " , <nl> + " / tensorflow . eager . EagerService / WaitQueueDone " , <nl> + " / tensorflow . eager . EagerService / KeepAlive " , <nl> + " / tensorflow . eager . EagerService / CloseContext " , <nl> + " / tensorflow . eager . EagerService / RegisterFunction " , <nl> + } ; <nl> + <nl> + std : : unique_ptr < EagerService : : Stub > EagerService : : NewStub ( <nl> + const std : : shared_ptr < : : grpc : : ChannelInterface > & channel , <nl> + const : : grpc : : StubOptions & options ) { <nl> + std : : unique_ptr < EagerService : : Stub > stub ( new EagerService : : Stub ( channel ) ) ; <nl> + return stub ; <nl> + } <nl> + <nl> + EagerService : : Stub : : Stub ( <nl> + const std : : shared_ptr < : : grpc : : ChannelInterface > & channel ) <nl> + : channel_ ( channel ) , <nl> + rpcmethod_CreateContext_ ( grpcEagerService_method_names [ 0 ] , <nl> + : : grpc : : internal : : RpcMethod : : NORMAL_RPC , <nl> + channel ) , <nl> + rpcmethod_Enqueue_ ( grpcEagerService_method_names [ 1 ] , <nl> + : : grpc : : internal : : RpcMethod : : NORMAL_RPC , channel ) , <nl> + rpcmethod_WaitQueueDone_ ( grpcEagerService_method_names [ 2 ] , <nl> + : : grpc : : internal : : RpcMethod : : NORMAL_RPC , <nl> + channel ) , <nl> + rpcmethod_KeepAlive_ ( grpcEagerService_method_names [ 3 ] , <nl> + : : grpc : : internal : : RpcMethod : : NORMAL_RPC , channel ) , <nl> + rpcmethod_CloseContext_ ( grpcEagerService_method_names [ 4 ] , <nl> + : : grpc : : internal : : RpcMethod : : NORMAL_RPC , channel ) , <nl> + rpcmethod_RegisterFunction_ ( grpcEagerService_method_names [ 5 ] , <nl> + : : grpc : : internal : : RpcMethod : : NORMAL_RPC , <nl> + channel ) { } <nl> + <nl> + : : grpc : : Status EagerService : : Stub : : CreateContext ( <nl> + : : grpc : : ClientContext * context , const CreateContextRequest & request , <nl> + CreateContextResponse * response ) { <nl> + return : : grpc : : internal : : BlockingUnaryCall ( <nl> + channel_ . get ( ) , rpcmethod_CreateContext_ , context , request , response ) ; <nl> + } <nl> + <nl> + : : grpc : : Status EagerService : : Stub : : Enqueue ( : : grpc : : ClientContext * context , <nl> + const EnqueueRequest & request , <nl> + EnqueueResponse * response ) { <nl> + return : : grpc : : internal : : BlockingUnaryCall ( channel_ . get ( ) , rpcmethod_Enqueue_ , <nl> + context , request , response ) ; <nl> + } <nl> + <nl> + : : grpc : : Status EagerService : : Stub : : WaitQueueDone ( <nl> + : : grpc : : ClientContext * context , const WaitQueueDoneRequest & request , <nl> + WaitQueueDoneResponse * response ) { <nl> + return : : grpc : : internal : : BlockingUnaryCall ( <nl> + channel_ . get ( ) , rpcmethod_WaitQueueDone_ , context , request , response ) ; <nl> + } <nl> + <nl> + : : grpc : : Status EagerService : : Stub : : KeepAlive ( : : grpc : : ClientContext * context , <nl> + const KeepAliveRequest & request , <nl> + KeepAliveResponse * response ) { <nl> + return : : grpc : : internal : : BlockingUnaryCall ( <nl> + channel_ . get ( ) , rpcmethod_KeepAlive_ , context , request , response ) ; <nl> + } <nl> + <nl> + : : grpc : : Status EagerService : : Stub : : CloseContext ( <nl> + : : grpc : : ClientContext * context , const CloseContextRequest & request , <nl> + CloseContextResponse * response ) { <nl> + return : : grpc : : internal : : BlockingUnaryCall ( <nl> + channel_ . get ( ) , rpcmethod_CloseContext_ , context , request , response ) ; <nl> + } <nl> + <nl> + : : grpc : : Status EagerService : : Stub : : RegisterFunction ( <nl> + : : grpc : : ClientContext * context , const RegisterFunctionRequest & request , <nl> + RegisterFunctionResponse * response ) { <nl> + return : : grpc : : internal : : BlockingUnaryCall ( <nl> + channel_ . get ( ) , rpcmethod_RegisterFunction_ , context , request , response ) ; <nl> + } <nl> + <nl> + EagerService : : AsyncService : : AsyncService ( ) { <nl> + for ( int i = 0 ; i < 6 ; + + i ) { <nl> + AddMethod ( new : : grpc : : internal : : RpcServiceMethod ( <nl> + grpcEagerService_method_names [ i ] , <nl> + : : grpc : : internal : : RpcMethod : : NORMAL_RPC , nullptr ) ) ; <nl> + : : grpc : : Service : : MarkMethodAsync ( i ) ; <nl> + } <nl> + } <nl> + <nl> + EagerService : : AsyncService : : ~ AsyncService ( ) { } <nl> + <nl> + } / / namespace grpc <nl> + <nl> + } / / namespace eager <nl> + } / / namespace tensorflow <nl> new file mode 100644 <nl> index 0000000000000 . . d7b192ac857a4 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / distributed_runtime / rpc / eager / grpc_eager_service . h <nl> <nl> + / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_EAGER_GRPC_EAGER_SERVICE_H_ <nl> + # define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_EAGER_GRPC_EAGER_SERVICE_H_ <nl> + <nl> + # include " grpc + + / impl / codegen / async_stream . h " <nl> + # include " grpc + + / impl / codegen / async_unary_call . h " <nl> + # include " grpc + + / impl / codegen / proto_utils . h " <nl> + # include " grpc + + / impl / codegen / rpc_method . h " <nl> + # include " grpc + + / impl / codegen / service_type . h " <nl> + # include " grpc + + / impl / codegen / status . h " <nl> + # include " grpc + + / impl / codegen / stub_options . h " <nl> + # include " grpc + + / impl / codegen / sync_stream . h " <nl> + <nl> + # include " tensorflow / core / protobuf / eager_service . pb . h " <nl> + <nl> + namespace grpc { <nl> + class CompletionQueue ; <nl> + class Channel ; <nl> + class RpcService ; <nl> + class ServerCompletionQueue ; <nl> + class ServerContext ; <nl> + } / / namespace grpc <nl> + <nl> + namespace tensorflow { <nl> + namespace eager { <nl> + <nl> + namespace grpc { <nl> + <nl> + / / GRPC stubs of ` tensorflow . eager . EagerService ` , based on the <nl> + / / definition in " / / tensorflow / core / protobuf / eager_service . proto " , <nl> + / / and the gRPC generated stub and service classes . <nl> + / / See that file for the definition of methods and messages . <nl> + / / Similar to the Master / Worker tensorflow GRPC services , this is not gen ' ned <nl> + / / via a rule , but included as an implementation directly . <nl> + class EagerService final { <nl> + public : <nl> + class StubInterface { <nl> + public : <nl> + virtual ~ StubInterface ( ) { } <nl> + virtual : : grpc : : Status CreateContext ( : : grpc : : ClientContext * context , <nl> + const CreateContextRequest & request , <nl> + CreateContextResponse * response ) = 0 ; <nl> + virtual : : grpc : : Status Enqueue ( : : grpc : : ClientContext * context , <nl> + const EnqueueRequest & request , <nl> + EnqueueResponse * response ) = 0 ; <nl> + virtual : : grpc : : Status WaitQueueDone ( : : grpc : : ClientContext * context , <nl> + const WaitQueueDoneRequest & request , <nl> + WaitQueueDoneResponse * response ) = 0 ; <nl> + virtual : : grpc : : Status KeepAlive ( : : grpc : : ClientContext * context , <nl> + const KeepAliveRequest & request , <nl> + KeepAliveResponse * response ) = 0 ; <nl> + virtual : : grpc : : Status CloseContext ( : : grpc : : ClientContext * context , <nl> + const CloseContextRequest & request , <nl> + CloseContextResponse * response ) = 0 ; <nl> + virtual : : grpc : : Status RegisterFunction ( <nl> + : : grpc : : ClientContext * context , const RegisterFunctionRequest & request , <nl> + RegisterFunctionResponse * response ) = 0 ; <nl> + } ; <nl> + class Stub final : public StubInterface { <nl> + public : <nl> + Stub ( const std : : shared_ptr < : : grpc : : ChannelInterface > & channel ) ; <nl> + : : grpc : : Status CreateContext ( : : grpc : : ClientContext * context , <nl> + const CreateContextRequest & request , <nl> + CreateContextResponse * response ) override ; <nl> + : : grpc : : Status Enqueue ( : : grpc : : ClientContext * context , <nl> + const EnqueueRequest & request , <nl> + EnqueueResponse * response ) override ; <nl> + : : grpc : : Status WaitQueueDone ( : : grpc : : ClientContext * context , <nl> + const WaitQueueDoneRequest & request , <nl> + WaitQueueDoneResponse * response ) override ; <nl> + : : grpc : : Status KeepAlive ( : : grpc : : ClientContext * context , <nl> + const KeepAliveRequest & request , <nl> + KeepAliveResponse * response ) override ; <nl> + : : grpc : : Status CloseContext ( : : grpc : : ClientContext * context , <nl> + const CloseContextRequest & request , <nl> + CloseContextResponse * response ) override ; <nl> + : : grpc : : Status RegisterFunction ( <nl> + : : grpc : : ClientContext * context , const RegisterFunctionRequest & request , <nl> + RegisterFunctionResponse * response ) override ; <nl> + <nl> + private : <nl> + std : : shared_ptr < : : grpc : : ChannelInterface > channel_ ; <nl> + const : : grpc : : internal : : RpcMethod rpcmethod_CreateContext_ ; <nl> + const : : grpc : : internal : : RpcMethod rpcmethod_Enqueue_ ; <nl> + const : : grpc : : internal : : RpcMethod rpcmethod_WaitQueueDone_ ; <nl> + const : : grpc : : internal : : RpcMethod rpcmethod_KeepAlive_ ; <nl> + const : : grpc : : internal : : RpcMethod rpcmethod_CloseContext_ ; <nl> + const : : grpc : : internal : : RpcMethod rpcmethod_RegisterFunction_ ; <nl> + } ; <nl> + static std : : unique_ptr < Stub > NewStub ( <nl> + const std : : shared_ptr < : : grpc : : ChannelInterface > & channel , <nl> + const : : grpc : : StubOptions & options = : : grpc : : StubOptions ( ) ) ; <nl> + <nl> + class AsyncService : public : : grpc : : Service { <nl> + public : <nl> + AsyncService ( ) ; <nl> + virtual ~ AsyncService ( ) ; <nl> + void RequestCreateContext ( <nl> + : : grpc : : ServerContext * context , CreateContextRequest * request , <nl> + : : grpc : : ServerAsyncResponseWriter < CreateContextResponse > * response , <nl> + : : grpc : : CompletionQueue * new_call_cq , <nl> + : : grpc : : ServerCompletionQueue * notification_cq , void * tag ) { <nl> + : : grpc : : Service : : RequestAsyncUnary ( 0 , context , request , response , <nl> + new_call_cq , notification_cq , tag ) ; <nl> + } <nl> + void RequestEnqueue ( <nl> + : : grpc : : ServerContext * context , EnqueueRequest * request , <nl> + : : grpc : : ServerAsyncResponseWriter < EnqueueResponse > * response , <nl> + : : grpc : : CompletionQueue * new_call_cq , <nl> + : : grpc : : ServerCompletionQueue * notification_cq , void * tag ) { <nl> + : : grpc : : Service : : RequestAsyncUnary ( 1 , context , request , response , <nl> + new_call_cq , notification_cq , tag ) ; <nl> + } <nl> + void RequestWaitQueueDone ( <nl> + : : grpc : : ServerContext * context , WaitQueueDoneRequest * request , <nl> + : : grpc : : ServerAsyncResponseWriter < WaitQueueDoneResponse > * response , <nl> + : : grpc : : CompletionQueue * new_call_cq , <nl> + : : grpc : : ServerCompletionQueue * notification_cq , void * tag ) { <nl> + : : grpc : : Service : : RequestAsyncUnary ( 2 , context , request , response , <nl> + new_call_cq , notification_cq , tag ) ; <nl> + } <nl> + void RequestKeepAlive ( <nl> + : : grpc : : ServerContext * context , KeepAliveRequest * request , <nl> + : : grpc : : ServerAsyncResponseWriter < KeepAliveResponse > * response , <nl> + : : grpc : : CompletionQueue * new_call_cq , <nl> + : : grpc : : ServerCompletionQueue * notification_cq , void * tag ) { <nl> + : : grpc : : Service : : RequestAsyncUnary ( 3 , context , request , response , <nl> + new_call_cq , notification_cq , tag ) ; <nl> + } <nl> + void RequestCloseContext ( <nl> + : : grpc : : ServerContext * context , CloseContextRequest * request , <nl> + : : grpc : : ServerAsyncResponseWriter < CloseContextResponse > * response , <nl> + : : grpc : : CompletionQueue * new_call_cq , <nl> + : : grpc : : ServerCompletionQueue * notification_cq , void * tag ) { <nl> + : : grpc : : Service : : RequestAsyncUnary ( 4 , context , request , response , <nl> + new_call_cq , notification_cq , tag ) ; <nl> + } <nl> + void RequestRegisterFunction ( <nl> + : : grpc : : ServerContext * context , RegisterFunctionRequest * request , <nl> + : : grpc : : ServerAsyncResponseWriter < RegisterFunctionResponse > * response , <nl> + : : grpc : : CompletionQueue * new_call_cq , <nl> + : : grpc : : ServerCompletionQueue * notification_cq , void * tag ) { <nl> + : : grpc : : Service : : RequestAsyncUnary ( 5 , context , request , response , <nl> + new_call_cq , notification_cq , tag ) ; <nl> + } <nl> + } ; <nl> + } ; <nl> + <nl> + } / / namespace grpc <nl> + <nl> + } / / namespace eager <nl> + } / / namespace tensorflow <nl> + <nl> + # endif / / TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_EAGER_GRPC_EAGER_SERVICE_H_ <nl> new file mode 100644 <nl> index 0000000000000 . . b36c6dce868e4 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / distributed_runtime / rpc / eager / grpc_eager_service_impl . cc <nl> <nl> + / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / core / distributed_runtime / rpc / eager / grpc_eager_service_impl . h " <nl> + <nl> + # include " tensorflow / core / distributed_runtime / rpc / eager / grpc_eager_service . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / grpc_call . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / grpc_channel . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / grpc_server_lib . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / grpc_util . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / grpc_worker_cache . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / grpc_worker_service . h " <nl> + # include " tensorflow / core / util / ptr_util . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace eager { <nl> + <nl> + GrpcEagerServiceImpl : : GrpcEagerServiceImpl ( <nl> + const WorkerEnv * env , : : grpc : : ServerBuilder * server_builder ) <nl> + : local_impl_ ( env ) { <nl> + request_handler_threadpool_ = <nl> + MakeUnique < thread : : ThreadPool > ( env - > env , " EagerServiceRequestHandler " , 4 ) ; <nl> + server_builder - > RegisterService ( & service_ ) ; <nl> + cq_ = server_builder - > AddCompletionQueue ( ) ; <nl> + } <nl> + <nl> + void GrpcEagerServiceImpl : : DriveCQ ( ) { <nl> + # define ENQUEUE_REQUEST ( method ) \ <nl> + do { \ <nl> + Call < GrpcEagerServiceImpl , \ <nl> + tensorflow : : eager : : grpc : : EagerService : : AsyncService , method # # Request , \ <nl> + method # # Response > : : \ <nl> + EnqueueRequest ( & service_ , cq_ . get ( ) , \ <nl> + & grpc : : EagerService : : AsyncService : : Request # # method , \ <nl> + & GrpcEagerServiceImpl : : method # # Handler , false ) ; \ <nl> + } while ( 0 ) <nl> + ENQUEUE_REQUEST ( CreateContext ) ; <nl> + ENQUEUE_REQUEST ( Enqueue ) ; <nl> + ENQUEUE_REQUEST ( WaitQueueDone ) ; <nl> + ENQUEUE_REQUEST ( KeepAlive ) ; <nl> + ENQUEUE_REQUEST ( CloseContext ) ; <nl> + ENQUEUE_REQUEST ( RegisterFunction ) ; <nl> + # undef ENQUEUE_REQUEST <nl> + <nl> + void * tag ; / / Matches the operation started against this cq_ . <nl> + bool ok ; <nl> + <nl> + while ( true ) { <nl> + if ( ! cq_ - > Next ( & tag , & ok ) ) { <nl> + / / The queue is shutting down . <nl> + break ; <nl> + } <nl> + UntypedCall < GrpcEagerServiceImpl > : : Tag * callback_tag = <nl> + static_cast < UntypedCall < GrpcEagerServiceImpl > : : Tag * > ( tag ) ; <nl> + <nl> + if ( callback_tag ) { <nl> + callback_tag - > OnCompleted ( this , ok ) ; <nl> + } else { <nl> + cq_ - > Shutdown ( ) ; <nl> + break ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void GrpcEagerServiceImpl : : Start ( ) { <nl> + / / TODO ( nareshmodi ) separate thread for driving CQ <nl> + request_handler_threadpool_ - > Schedule ( [ this ] ( ) { DriveCQ ( ) ; } ) ; <nl> + } <nl> + <nl> + void GrpcEagerServiceImpl : : Stop ( ) { <nl> + / / This enqueues a special event ( with a null tag ) <nl> + / / that causes the completion queue to be shut down on the <nl> + / / polling thread . <nl> + shutdown_alarm_ = MakeUnique < : : grpc : : Alarm > ( <nl> + cq_ . get ( ) , gpr_now ( GPR_CLOCK_MONOTONIC ) , nullptr ) ; <nl> + } <nl> + <nl> + } / / namespace eager <nl> + } / / namespace tensorflow <nl> new file mode 100644 <nl> index 0000000000000 . . 65550caf64628 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / distributed_runtime / rpc / eager / grpc_eager_service_impl . h <nl> <nl> + / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_EAGER_GRPC_EAGER_SERVICE_IMPL_H_ <nl> + # define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_EAGER_GRPC_EAGER_SERVICE_IMPL_H_ <nl> + <nl> + # include " grpc + + / alarm . h " <nl> + # include " grpc + + / completion_queue . h " <nl> + # include " grpc + + / server_builder . h " <nl> + # include " tensorflow / core / distributed_runtime / eager / eager_service_impl . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / eager / grpc_eager_service . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / grpc_call . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / grpc_server_lib . h " <nl> + # include " tensorflow / core / distributed_runtime / rpc / grpc_util . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace eager { <nl> + <nl> + / / This class is a wrapper that handles communication for gRPC . <nl> + class GrpcEagerServiceImpl { <nl> + public : <nl> + template < class RequestMessage , class ResponseMessage > <nl> + using EagerCall = Call < GrpcEagerServiceImpl , grpc : : EagerService : : AsyncService , <nl> + RequestMessage , ResponseMessage > ; <nl> + <nl> + GrpcEagerServiceImpl ( const WorkerEnv * env , <nl> + : : grpc : : ServerBuilder * server_builder ) ; <nl> + virtual ~ GrpcEagerServiceImpl ( ) { } <nl> + <nl> + void Start ( ) ; <nl> + void Stop ( ) ; <nl> + <nl> + private : <nl> + # define HANDLER ( method ) \ <nl> + void method # # Handler ( EagerCall < method # # Request , method # # Response > * call ) { \ <nl> + request_handler_threadpool_ - > Schedule ( [ this , call ] ( ) { \ <nl> + call - > SendResponse ( \ <nl> + ToGrpcStatus ( local_impl_ . method ( & call - > request , & call - > response ) ) ) ; \ <nl> + } ) ; \ <nl> + Call < GrpcEagerServiceImpl , \ <nl> + tensorflow : : eager : : grpc : : EagerService : : AsyncService , method # # Request , \ <nl> + method # # Response > : : \ <nl> + EnqueueRequest ( & service_ , cq_ . get ( ) , \ <nl> + & grpc : : EagerService : : AsyncService : : Request # # method , \ <nl> + & GrpcEagerServiceImpl : : method # # Handler , false ) ; \ <nl> + } <nl> + HANDLER ( CreateContext ) ; <nl> + HANDLER ( Enqueue ) ; <nl> + HANDLER ( WaitQueueDone ) ; <nl> + HANDLER ( KeepAlive ) ; <nl> + HANDLER ( CloseContext ) ; <nl> + HANDLER ( RegisterFunction ) ; <nl> + # undef HANDLER <nl> + <nl> + EagerServiceImpl local_impl_ ; <nl> + <nl> + void DriveCQ ( ) ; <nl> + <nl> + std : : unique_ptr < : : grpc : : Alarm > shutdown_alarm_ ; <nl> + <nl> + std : : unique_ptr < : : grpc : : ServerCompletionQueue > cq_ ; <nl> + tensorflow : : eager : : grpc : : EagerService : : AsyncService service_ ; <nl> + <nl> + std : : unique_ptr < thread : : ThreadPool > request_handler_threadpool_ ; <nl> + <nl> + TF_DISALLOW_COPY_AND_ASSIGN ( GrpcEagerServiceImpl ) ; <nl> + } ; <nl> + <nl> + } / / namespace eager <nl> + } / / namespace tensorflow <nl> + <nl> + # endif / / TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_EAGER_GRPC_EAGER_SERVICE_IMPL_H_ <nl> mmm a / tensorflow / core / distributed_runtime / rpc / grpc_server_lib . cc <nl> ppp b / tensorflow / core / distributed_runtime / rpc / grpc_server_lib . cc <nl> Status GrpcServer : : WorkerCacheFactory ( const WorkerCacheFactoryOptions & options , <nl> GrpcChannelSpec channel_spec ; <nl> TF_RETURN_IF_ERROR ( ParseChannelSpec ( options , & channel_spec ) ) ; <nl> <nl> - std : : shared_ptr < GrpcChannelCache > channel_cache ( <nl> + channel_cache_ . reset ( <nl> NewGrpcChannelCache ( channel_spec , GetChannelCreationFunction ( ) ) ) ; <nl> <nl> string name_prefix = strings : : StrCat ( " / job : " , * options . job_name , " / replica : 0 " , <nl> " / task : " , options . task_index ) ; <nl> <nl> - const string host_port = channel_cache - > TranslateTask ( name_prefix ) ; <nl> + const string host_port = channel_cache_ - > TranslateTask ( name_prefix ) ; <nl> int requested_port ; <nl> <nl> if ( ! strings : : safe_strto32 ( str_util : : Split ( host_port , ' : ' ) [ 1 ] , <nl> & requested_port ) ) { <nl> return errors : : Internal ( " Could not parse port for local server from \ " " , <nl> - channel_cache - > TranslateTask ( name_prefix ) , " \ " . " ) ; <nl> + channel_cache_ - > TranslateTask ( name_prefix ) , " \ " . " ) ; <nl> } <nl> if ( requested_port ! = bound_port_ ) { <nl> return errors : : InvalidArgument ( " Requested port " , requested_port , <nl> Status GrpcServer : : WorkerCacheFactory ( const WorkerCacheFactoryOptions & options , <nl> } <nl> <nl> * worker_cache = NewGrpcWorkerCacheWithLocalWorker ( <nl> - channel_cache , worker_impl_ . get ( ) , name_prefix ) ; <nl> + channel_cache_ , worker_impl_ . get ( ) , name_prefix ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> mmm a / tensorflow / core / distributed_runtime / rpc / grpc_server_lib . h <nl> ppp b / tensorflow / core / distributed_runtime / rpc / grpc_server_lib . h <nl> class GrpcServer : public ServerInterface { <nl> int bound_port ( ) const { return bound_port_ ; } <nl> <nl> WorkerEnv * worker_env ( ) { return & worker_env_ ; } <nl> + MasterEnv * master_env ( ) { return & master_env_ ; } <nl> + <nl> + std : : shared_ptr < GrpcChannelCache > channel_cache ( ) { return channel_cache_ ; } <nl> <nl> const ServerDef & server_def ( ) const { return server_def_ ; } <nl> <nl> class GrpcServer : public ServerInterface { <nl> std : : unique_ptr < Master > master_impl_ ; <nl> AsyncServiceInterface * master_service_ = nullptr ; <nl> std : : unique_ptr < Thread > master_thread_ GUARDED_BY ( mu_ ) ; <nl> + std : : shared_ptr < GrpcChannelCache > channel_cache_ ; <nl> <nl> / / Implementation of a TensorFlow worker , and RPC polling thread . <nl> WorkerEnv worker_env_ ; <nl> mmm a / tensorflow / core / distributed_runtime / rpc / grpc_worker_service . cc <nl> ppp b / tensorflow / core / distributed_runtime / rpc / grpc_worker_service . cc <nl> void GrpcWorker : : LoggingAsync ( const LoggingRequest * request , <nl> if ( env ) { <nl> auto session_mgr = env - > session_mgr ; <nl> if ( session_mgr ) { <nl> - session_mgr - > SetLogging ( request - > rpc_logging ( ) ) ; <nl> + if ( request - > enable_rpc_logging ( ) ) { <nl> + session_mgr - > SetLogging ( true ) ; <nl> + } <nl> + / / NOTE ( mrry ) : Handle old masters that disable RPC logging by setting <nl> + / / ` request - > enable_rpc_logging ` to ` false ` . <nl> + if ( request - > disable_rpc_logging ( ) | | <nl> + ( ! request - > enable_rpc_logging ( ) & & <nl> + request - > fetch_step_id_size ( ) = = 0 ) ) { <nl> + session_mgr - > SetLogging ( false ) ; <nl> + } <nl> for ( const auto & step_id : request - > fetch_step_id ( ) ) { <nl> session_mgr - > RetrieveLogs ( step_id , response ) ; <nl> } <nl> mmm a / tensorflow / core / framework / node_def_util . cc <nl> ppp b / tensorflow / core / framework / node_def_util . cc <nl> Status OutputTypeForNode ( const NodeDef & node_def , const OpDef & op_def , <nl> node_def . name ( ) ) ; <nl> } <nl> <nl> + Status OutputTypesForNode ( const NodeDef & node_def , const OpDef & op_def , <nl> + DataTypeVector * outputs ) { <nl> + for ( const auto & arg : op_def . output_arg ( ) ) { <nl> + TF_RETURN_IF_ERROR ( AddArgToSig ( node_def , arg , outputs ) ) ; <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> Status InOutTypesForNode ( const NodeDef & node_def , const OpDef & op_def , <nl> DataTypeVector * inputs , DataTypeVector * outputs ) { <nl> for ( const auto & arg : op_def . input_arg ( ) ) { <nl> TF_RETURN_IF_ERROR ( AddArgToSig ( node_def , arg , inputs ) ) ; <nl> } <nl> - for ( const auto & arg : op_def . output_arg ( ) ) { <nl> - TF_RETURN_IF_ERROR ( AddArgToSig ( node_def , arg , outputs ) ) ; <nl> - } <nl> - return Status : : OK ( ) ; <nl> + return OutputTypesForNode ( node_def , op_def , outputs ) ; <nl> } <nl> <nl> Status ValidateNodeDef ( const NodeDef & node_def , const OpDef & op_def ) { <nl> mmm a / tensorflow / core / framework / node_def_util . h <nl> ppp b / tensorflow / core / framework / node_def_util . h <nl> Status InputTypeForNode ( const NodeDef & node_def , const OpDef & op_def , <nl> / / REQUIRES : ValidateOpDef ( op_def ) . ok ( ) <nl> Status OutputTypeForNode ( const NodeDef & node_def , const OpDef & op_def , <nl> int output_port , DataType * output_type ) ; <nl> + / / Computes the output types for a specific node . <nl> + / / REQUIRES : ValidateOpDef ( op_def ) . ok ( ) <nl> + Status OutputTypesForNode ( const NodeDef & node_def , const OpDef & op_def , <nl> + DataTypeVector * outputs ) ; <nl> / / Computes the input and output types for a specific node . <nl> / / REQUIRES : ValidateOpDef ( op_def ) . ok ( ) <nl> Status InOutTypesForNode ( const NodeDef & node_def , const OpDef & op_def , <nl> mmm a / tensorflow / core / grappler / optimizers / loop_optimizer . cc <nl> ppp b / tensorflow / core / grappler / optimizers / loop_optimizer . cc <nl> limitations under the License . <nl> # include " tensorflow / core / framework / attr_value . pb . h " <nl> # include " tensorflow / core / framework / node_def . pb . h " <nl> # include " tensorflow / core / framework / op . h " <nl> - # include " tensorflow / core / framework / tensor_shape . pb . h " <nl> # include " tensorflow / core / framework / types . h " <nl> # include " tensorflow / core / grappler / graph_view . h " <nl> # include " tensorflow / core / grappler / grappler_item . h " <nl> mmm a / tensorflow / core / grappler / utils / functions_test . cc <nl> ppp b / tensorflow / core / grappler / utils / functions_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / lib / core / status_test_util . h " <nl> # include " tensorflow / core / lib / gtl / map_util . h " <nl> # include " tensorflow / core / platform / test . h " <nl> - # include " tensorflow / core / protobuf / meta_graph . pb . h " <nl> <nl> namespace tensorflow { <nl> namespace grappler { <nl> mmm a / tensorflow / core / kernels / batching_util / adaptive_shared_batch_scheduler . h <nl> ppp b / tensorflow / core / kernels / batching_util / adaptive_shared_batch_scheduler . h <nl> class AdaptiveSharedBatchScheduler <nl> / / for num_batch_threads allows for large in_flight_batches_limit_ , which <nl> / / will harm latency for some time once load increases again . <nl> int64 num_batch_threads = port : : NumSchedulableCPUs ( ) ; <nl> + / / Lower bound for in_flight_batches_limit_ . As discussed above , can be used <nl> + / / to minimize the damage caused by the random walk under low load . <nl> + int64 min_in_flight_batches_limit = 1 ; <nl> / / Although batch selection is primarily based on age , this parameter <nl> / / specifies a preference for larger batches . A full batch will be <nl> / / scheduled before an older , nearly empty batch as long as the age gap is <nl> Status AdaptiveSharedBatchScheduler < TaskType > : : Create ( <nl> return errors : : InvalidArgument ( " num_batch_threads must be positive ; was " , <nl> options . num_batch_threads ) ; <nl> } <nl> + if ( options . min_in_flight_batches_limit < 1 ) { <nl> + return errors : : InvalidArgument ( <nl> + " min_in_flight_batches_limit must be > = 1 ; was " , <nl> + options . min_in_flight_batches_limit ) ; <nl> + } <nl> + if ( options . min_in_flight_batches_limit > options . num_batch_threads ) { <nl> + return errors : : InvalidArgument ( <nl> + " min_in_flight_batches_limit ( " , options . min_in_flight_batches_limit , <nl> + " ) must be < = num_batch_threads ( " , options . num_batch_threads , " ) " ) ; <nl> + } <nl> if ( options . full_batch_scheduling_boost_micros < 0 ) { <nl> return errors : : InvalidArgument ( <nl> " full_batch_scheduling_boost_micros can ' t be negative ; was " , <nl> Status AdaptiveSharedBatchScheduler < TaskType > : : Create ( <nl> " ) should not be larger than num_batch_threads ( " , <nl> options . num_batch_threads , " ) " ) ; <nl> } <nl> - if ( options . initial_in_flight_batches_limit < 1 ) { <nl> - return errors : : InvalidArgument ( <nl> - " initial_in_flight_batches_limit should be " <nl> - " greater than or equal to 1 ; was " , <nl> - options . initial_in_flight_batches_limit ) ; <nl> + if ( options . initial_in_flight_batches_limit < <nl> + options . min_in_flight_batches_limit ) { <nl> + return errors : : InvalidArgument ( " initial_in_flight_batches_limit ( " , <nl> + options . initial_in_flight_batches_limit , <nl> + " must be > = min_in_flight_batches_limit ( " , <nl> + options . min_in_flight_batches_limit , " ) " ) ; <nl> } <nl> if ( options . batches_to_average_over < 1 ) { <nl> return errors : : InvalidArgument ( <nl> void AdaptiveSharedBatchScheduler < TaskType > : : CallbackWrapper ( <nl> in_flight_batches_limit_ = <nl> std : : min ( in_flight_batches_limit_ , <nl> static_cast < double > ( options_ . num_batch_threads ) ) ; <nl> - in_flight_batches_limit_ = std : : max ( in_flight_batches_limit_ , 1 . 0 ) ; <nl> + in_flight_batches_limit_ = <nl> + std : : max ( in_flight_batches_limit_ , <nl> + static_cast < double > ( options_ . min_in_flight_batches_limit ) ) ; <nl> last_avg_latency_ms_ = current_avg_latency_ms ; <nl> last_latency_decreased_ = current_latency_decreased ; <nl> batch_count_ = 0 ; <nl> mmm a / tensorflow / core / kernels / batching_util / adaptive_shared_batch_scheduler_test . cc <nl> ppp b / tensorflow / core / kernels / batching_util / adaptive_shared_batch_scheduler_test . cc <nl> TEST ( AdaptiveSharedBatchSchedulerTest , BadOptions ) { <nl> options = Scheduler : : Options ( ) ; <nl> options . batches_to_average_over = - 5 ; <nl> EXPECT_FALSE ( Scheduler : : Create ( options , & scheduler ) . ok ( ) ) ; <nl> + options = Scheduler : : Options ( ) ; <nl> + options . min_in_flight_batches_limit = 0 ; <nl> + EXPECT_FALSE ( Scheduler : : Create ( options , & scheduler ) . ok ( ) ) ; <nl> + options = Scheduler : : Options ( ) ; <nl> + options . min_in_flight_batches_limit = 5 ; <nl> + options . num_batch_threads = 3 ; <nl> + EXPECT_FALSE ( Scheduler : : Create ( options , & scheduler ) . ok ( ) ) ; <nl> + options = Scheduler : : Options ( ) ; <nl> + options . initial_in_flight_batches_limit = 1 ; <nl> + options . min_in_flight_batches_limit = 2 ; <nl> + options . num_batch_threads = 3 ; <nl> + EXPECT_FALSE ( Scheduler : : Create ( options , & scheduler ) . ok ( ) ) ; <nl> } <nl> <nl> TEST ( AdaptiveSharedBatchSchedulerTest , InFlightBatchesLimit ) { <nl> mmm a / tensorflow / core / kernels / hexagon / BUILD <nl> ppp b / tensorflow / core / kernels / hexagon / BUILD <nl> tf_kernel_library ( <nl> " / / tensorflow / core : protos_all_cc " , <nl> " / / tensorflow / core / kernels : remote_fused_graph_execute_utils " , <nl> " / / third_party / eigen3 " , <nl> - " @ com_google_absl / / absl / memory " , <nl> ] , <nl> ) <nl> <nl> mmm a / tensorflow / core / platform / default / build_config / BUILD <nl> ppp b / tensorflow / core / platform / default / build_config / BUILD <nl> cc_library ( <nl> copts = tf_copts ( ) , <nl> ) <nl> <nl> + cc_library ( <nl> + name = " port " , <nl> + srcs = [ ] , <nl> + copts = tf_copts ( ) , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " protobuf " , <nl> + srcs = [ ] , <nl> + copts = tf_copts ( ) , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " env " , <nl> + srcs = [ ] , <nl> + copts = tf_copts ( ) , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " other " , <nl> + srcs = [ ] , <nl> + copts = tf_copts ( ) , <nl> + deps = [ <nl> + " @ com_googlesource_code_re2 / / : re2 " , <nl> + " @ farmhash_archive / / : farmhash " , <nl> + " @ fft2d " , <nl> + " @ highwayhash / / : sip_hash " , <nl> + " @ png_archive / / : png " , <nl> + ] , <nl> + ) <nl> + <nl> cc_library ( <nl> name = " platformlib " , <nl> copts = tf_copts ( ) , <nl> mmm a / tensorflow / core / protobuf / worker . proto <nl> ppp b / tensorflow / core / protobuf / worker . proto <nl> message RecvTensorResponse { <nl> / / Out - of - band request to begin or end logging , or <nl> / / to retrieve logs for particular steps . <nl> message LoggingRequest { <nl> - / / If true , RPC logging will be activated . <nl> - bool rpc_logging = 1 ; <nl> + / / If true , RPC logging will be enabled . <nl> + bool enable_rpc_logging = 1 ; <nl> + <nl> + / / If true , RPC logging will be disabled . <nl> + bool disable_rpc_logging = 4 ; <nl> <nl> / / If true , discard any saved logging data ( for all steps ) . <nl> bool clear = 2 ; <nl> mmm a / tensorflow / docs_src / install / install_linux . md <nl> ppp b / tensorflow / docs_src / install / install_linux . md <nl> Add this path to the ` LD_LIBRARY_PATH ` environmental variable : <nl> < code class = " devsite - terminal " > export LD_LIBRARY_PATH = $ { LD_LIBRARY_PATH : + $ { LD_LIBRARY_PATH } : } / usr / local / cuda / extras / CUPTI / lib64 < / code > <nl> < / pre > <nl> <nl> - For CUDA Toolkit < = 7 . 5 use : <nl> - <nl> - < pre class = " prettyprint lang - bsh " > <nl> - < code class = " devsite - terminal " > sudo apt - get install libcupti - dev < / code > <nl> - < / pre > <nl> - <nl> * * OPTIONAL * : For optimized performance during inference , install <nl> * NVIDIA & nbsp ; TensorRT & nbsp ; 3 . 0 * . To install the minimal amount of TensorRT <nl> runtime components required to use with the pre - built ` tensorflow - gpu ` package : <nl> mmm a / tensorflow / docs_src / install / install_sources . md <nl> ppp b / tensorflow / docs_src / install / install_sources . md <nl> The following NVIDIA < i > hardware < / i > must be installed on your system : <nl> <nl> The following NVIDIA < i > software < / i > must be installed on your system : <nl> <nl> - * [ CUDA Toolkit ] ( http : / / nvidia . com / cuda ) ( > = 7 . 0 ) . We recommend version 9 . 0 . <nl> + * [ CUDA Toolkit ] ( http : / / nvidia . com / cuda ) ( > = 8 . 0 ) . We recommend version 9 . 0 . <nl> For details , see <nl> [ NVIDIA ' s documentation ] ( http : / / docs . nvidia . com / cuda / cuda - installation - guide - linux / ) . <nl> Ensure that you append the relevant CUDA pathnames to the <nl> The following NVIDIA < i > software < / i > must be installed on your system : <nl> NVIDIA documentation . <nl> * [ GPU drivers ] ( http : / / nvidia . com / driver ) supporting your version of the CUDA <nl> Toolkit . <nl> - * [ cuDNN SDK ] ( http : / / developer . nvidia . com / cudnn ) ( > = v3 ) . We recommend version 7 . 0 . For details , see <nl> + * [ cuDNN SDK ] ( http : / / developer . nvidia . com / cudnn ) ( > = 6 . 0 ) . We recommend version 7 . 0 . For details , see <nl> [ NVIDIA ' s documentation ] ( http : / / docs . nvidia . com / deeplearning / sdk / cudnn - install / ) . <nl> * [ CUPTI ] ( http : / / docs . nvidia . com / cuda / cupti / ) ships with the CUDA Toolkit , but <nl> you also need to append its path to the ` LD_LIBRARY_PATH ` environment <nl> plan on executing tasks directly with ` bazel ` , without the pip installation , <nl> you may need to install additional python packages . For example , you should <nl> ` pip install mock enum34 ` before running TensorFlow ' s tests with bazel . <nl> <nl> - # # # Optional : install TensorFlow for GPU prerequisites <nl> - <nl> - If you do not have brew installed , install it by following <nl> - [ these instructions ] ( http : / / brew . sh / ) . <nl> - <nl> - After installing brew , install GNU coreutils by issuing the following command : <nl> - <nl> - < pre > $ < b > brew install coreutils < / b > < / pre > <nl> - <nl> - If you want to compile tensorflow and have XCode 7 . 3 and CUDA 7 . 5 installed , <nl> - note that Xcode 7 . 3 is not yet compatible with CUDA 7 . 5 . To remedy this <nl> - problem , do either of the following : <nl> - <nl> - * Upgrade to CUDA 8 . 0 . <nl> - * Download Xcode 7 . 2 and select it as your default by issuing the following <nl> - command : <nl> - <nl> - < pre > $ < b > sudo xcode - select - s / Applications / Xcode - 7 . 2 / Xcode . app < / b > < / pre > <nl> - <nl> - * * NOTE : * * Your system must fulfill the NVIDIA software requirements described <nl> - in one of the following documents : <nl> - <nl> - * @ { $ install_linux # NVIDIARequirements $ Installing TensorFlow on Linux } <nl> - * @ { $ install_mac # NVIDIARequirements $ Installing TensorFlow on Mac OS } <nl> - <nl> - <nl> < a name = " ConfigureInstallation " > < / a > <nl> # # Configure the installation <nl> <nl> Do you wish to build TensorFlow with CUDA support ? [ y / N ] < b > Y < / b > <nl> CUDA support will be enabled for TensorFlow <nl> Do you want to use clang as CUDA compiler ? [ y / N ] <nl> nvcc will be used as CUDA compiler <nl> - Please specify the CUDA SDK version you want to use , e . g . 7 . 0 . [ Leave empty to default to CUDA 9 . 0 ] : < b > 9 . 0 < / b > <nl> + Please specify the CUDA SDK version you want to use . [ Leave empty to default to CUDA 9 . 0 ] : < b > 9 . 0 < / b > <nl> Please specify the location where CUDA 9 . 0 toolkit is installed . Refer to README . md for more details . [ Default is / usr / local / cuda ] : <nl> Please specify which gcc should be used by nvcc as the host compiler . [ Default is / usr / bin / gcc ] : <nl> Please specify the cuDNN version you want to use . [ Leave empty to default to cuDNN 7 . 0 ] : < b > 7 < / b > <nl> mmm a / tensorflow / examples / android / BUILD <nl> ppp b / tensorflow / examples / android / BUILD <nl> cc_binary ( <nl> " - z defs " , <nl> " - s " , <nl> " - Wl , - - version - script " , # This line must be directly followed by LINKER_SCRIPT . <nl> - LINKER_SCRIPT , <nl> + " $ ( location { } ) " . format ( LINKER_SCRIPT ) , <nl> ] , <nl> linkshared = 1 , <nl> linkstatic = 1 , <nl> mmm a / tensorflow / java / BUILD <nl> ppp b / tensorflow / java / BUILD <nl> tf_cc_binary ( <nl> " - z defs " , <nl> " - s " , <nl> " - Wl , - - version - script " , # This line must be directly followed by LINKER_VERSION_SCRIPT <nl> - LINKER_VERSION_SCRIPT , <nl> + " $ ( location { } ) " . format ( LINKER_VERSION_SCRIPT ) , <nl> ] , <nl> } ) , <nl> linkshared = 1 , <nl> mmm a / tensorflow / python / BUILD <nl> ppp b / tensorflow / python / BUILD <nl> tf_gen_op_wrapper_private_py ( <nl> visibility = [ <nl> " / / learning / brain / python / ops : __pkg__ " , <nl> " / / tensorflow / python / kernel_tests : __pkg__ " , <nl> + " / / tensorflow / python / training / checkpointable : __pkg__ " , <nl> ] , <nl> ) <nl> <nl> py_library ( <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> " : array_ops " , <nl> - " : checkpointable " , <nl> " : control_flow_ops " , <nl> " : dtypes " , <nl> " : framework_ops " , <nl> py_library ( <nl> " : util " , <nl> " / / tensorflow / core : protos_all_py " , <nl> " / / tensorflow / python / eager : context " , <nl> + " / / tensorflow / python / training / checkpointable : base " , <nl> ] , <nl> ) <nl> <nl> py_library ( <nl> [ " training / * * / * . py " ] , <nl> exclude = [ <nl> " * * / * test * " , <nl> + " training / checkpointable / * * / * . py " , <nl> # The following targets have their own build rules ( same name as the <nl> # file ) : <nl> - " training / checkpointable . py " , <nl> " training / saveable_object . py " , <nl> " training / training_util . py " , <nl> ] , <nl> py_library ( <nl> " : array_ops " , <nl> " : array_ops_gen " , <nl> " : checkpoint_ops_gen " , <nl> - " : checkpointable " , <nl> " : client " , <nl> " : control_flow_ops " , <nl> " : data_flow_ops " , <nl> py_library ( <nl> " / / tensorflow / core : protos_all_py " , <nl> " / / tensorflow / python / eager : backprop " , <nl> " / / tensorflow / python / eager : context " , <nl> - " / / tensorflow / python / ops / losses " , <nl> # ` layers ` dependency only exists due to the use of a small utility . <nl> " / / tensorflow / python / keras : layers " , <nl> - ] , <nl> - ) <nl> - <nl> - py_library ( <nl> - name = " checkpointable " , <nl> - srcs = [ " training / checkpointable . py " ] , <nl> - srcs_version = " PY2AND3 " , <nl> - deps = [ <nl> - " : array_ops " , <nl> - " : constant_op " , <nl> - " : control_flow_ops " , <nl> - " : dtypes " , <nl> - " : io_ops_gen " , <nl> - " : ops " , <nl> - " : saveable_object " , <nl> - " : util " , <nl> - " / / tensorflow / python / eager : context " , <nl> - ] , <nl> - ) <nl> - <nl> - py_test ( <nl> - name = " checkpointable_test " , <nl> - srcs = [ " training / checkpointable_test . py " ] , <nl> - srcs_version = " PY2AND3 " , <nl> - deps = [ <nl> - " : checkpointable " , <nl> - " : client_testlib " , <nl> + " / / tensorflow / python / ops / losses " , <nl> + " / / tensorflow / python / training / checkpointable : base " , <nl> + " / / tensorflow / python / training / checkpointable : util " , <nl> ] , <nl> ) <nl> <nl> py_library ( <nl> ] , <nl> ) <nl> <nl> - py_test ( <nl> - name = " checkpointable_utils_test " , <nl> - srcs = [ " training / checkpointable_utils_test . py " ] , <nl> - srcs_version = " PY2AND3 " , <nl> - tags = [ <nl> - " no_windows " , # TODO : needs investigation on Windows <nl> - " notsan " , # b / 74395663 <nl> - ] , <nl> - deps = [ <nl> - " : checkpointable " , <nl> - " : constant_op " , <nl> - " : control_flow_ops " , <nl> - " : dtypes " , <nl> - " : framework_ops " , <nl> - " : framework_test_lib " , <nl> - " : init_ops " , <nl> - " : resource_variable_ops " , <nl> - " : session " , <nl> - " : state_ops " , <nl> - " : template " , <nl> - " : training " , <nl> - " : training_util " , <nl> - " : variable_scope " , <nl> - " / / tensorflow / python / eager : backprop " , <nl> - " / / tensorflow / python / eager : context " , <nl> - " / / tensorflow / python / eager : function " , <nl> - " / / tensorflow / python / eager : test " , <nl> - " / / tensorflow / python / keras : engine " , <nl> - " / / tensorflow / python / keras : layers " , <nl> - " @ six_archive / / : six " , <nl> - ] , <nl> - ) <nl> - <nl> py_test ( <nl> name = " distribute_test " , <nl> size = " small " , <nl> py_library ( <nl> " : variable_scope " , <nl> " : variables " , <nl> " / / tensorflow / python / eager : context " , <nl> - " / / tensorflow / python / estimator : util " , <nl> " / / tensorflow / python / keras : engine " , <nl> " / / third_party / py / numpy " , <nl> ] , <nl> py_library ( <nl> " : variable_scope " , <nl> " : variables " , <nl> " / / tensorflow / python / eager : context " , <nl> - " / / tensorflow / python / estimator : util " , <nl> " / / tensorflow / python / keras : layers " , <nl> " / / third_party / py / numpy " , <nl> " @ six_archive / / : six " , <nl> mmm a / tensorflow / python / client / session_clusterspec_prop_test . py <nl> ppp b / tensorflow / python / client / session_clusterspec_prop_test . py <nl> <nl> from tensorflow . python . platform import test <nl> from tensorflow . python . training import server_lib <nl> <nl> - ops . _USE_C_API = True <nl> <nl> # NOTE ( mrry ) : Dummy shape registration for ops used in the tests , since they <nl> # don ' t have C + + op registrations on which to attach C + + shape fns . <nl> def testClusterSpecPropagationWorker2Placement ( self ) : <nl> config = config_pb2 . ConfigProto ( cluster_def = cluster_def ) <nl> <nl> with ops . Graph ( ) . as_default ( ) as g , ops . device ( ' / job : worker / task : 1 ' ) : <nl> - with ops . device ( ' / cpu : 0 ' ) : <nl> + with ops . device ( ' / cpu : 0 ' ) : <nl> const = constant_op . constant ( 17 ) <nl> sess = session . Session ( server1 . target , config = config , graph = g ) <nl> run_options = config_pb2 . RunOptions ( <nl> def testClusterSpecPropagationIsolation ( self ) : <nl> with self . assertRaises ( errors . FailedPreconditionError ) : <nl> sess3 . run ( v ) <nl> <nl> - @ test_util . disable_c_api # Partial runs don ' t work with C API <nl> def testClusterSpecPropagationPartialRun ( self ) : <nl> " " " Test successful partial run with ClusterSpec propagation . " " " <nl> server1 = server_lib . Server . create_local_server ( ) <nl> mmm a / tensorflow / python / client / session_list_devices_test . py <nl> ppp b / tensorflow / python / client / session_list_devices_test . py <nl> <nl> from tensorflow . python . training import server_lib <nl> <nl> <nl> - class SessionListDevicesTestMethods ( object ) : <nl> - " " " Mixin with test methods . " " " <nl> + class SessionListDevicesTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testListDevices ( self ) : <nl> with session . Session ( ) as sess : <nl> def testListDevicesClusterSpecPropagation ( self ) : <nl> ' / job : worker / replica : 0 / task : 1 / device : CPU : 0 ' in device_names ) <nl> <nl> <nl> - class SessionListDevicesTest ( SessionListDevicesTestMethods , <nl> - test_util . TensorFlowTestCase ) : <nl> - " " " Test case that invokes test methods with _USE_C_API = False . " " " <nl> - <nl> - def setUp ( self ) : <nl> - self . prev_use_c_api = ops . _USE_C_API <nl> - ops . _USE_C_API = False <nl> - super ( SessionListDevicesTest , self ) . setUp ( ) <nl> - <nl> - def tearDown ( self ) : <nl> - ops . _USE_C_API = self . prev_use_c_api <nl> - super ( SessionListDevicesTest , self ) . tearDown ( ) <nl> - <nl> - <nl> - class SessionListDevicesWithCApiTest ( SessionListDevicesTestMethods , <nl> - test_util . TensorFlowTestCase ) : <nl> - " " " Test case that invokes test methods with _USE_C_API = True . " " " <nl> - <nl> - def setUp ( self ) : <nl> - self . prev_use_c_api = ops . _USE_C_API <nl> - ops . _USE_C_API = True <nl> - super ( SessionListDevicesWithCApiTest , self ) . setUp ( ) <nl> - <nl> - def tearDown ( self ) : <nl> - ops . _USE_C_API = self . prev_use_c_api <nl> - super ( SessionListDevicesWithCApiTest , self ) . tearDown ( ) <nl> - <nl> - <nl> if __name__ = = ' __main__ ' : <nl> googletest . main ( ) <nl> mmm a / tensorflow / python / client / session_partial_run_test . py <nl> ppp b / tensorflow / python / client / session_partial_run_test . py <nl> <nl> ops . RegisterShape ( ' ConstructionFails ' ) ( common_shapes . unknown_shape ) <nl> <nl> <nl> - class PartialRunTestMethods ( object ) : <nl> + class PartialRunTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def RunTestPartialRun ( self , sess ) : <nl> a = array_ops . placeholder ( dtypes . float32 , shape = [ ] ) <nl> def testPartialRunEmptyFetchesDist ( self ) : <nl> self . RunTestPartialRunEmptyFetches ( session . Session ( server . target ) ) <nl> <nl> <nl> - class PartialRunTest ( PartialRunTestMethods , test_util . TensorFlowTestCase ) : <nl> - " " " Test case that invokes test methods with _USE_C_API = False . " " " <nl> - <nl> - def setUp ( self ) : <nl> - self . prev_use_c_api = ops . _USE_C_API <nl> - ops . _USE_C_API = False <nl> - super ( PartialRunTest , self ) . setUp ( ) <nl> - <nl> - def tearDown ( self ) : <nl> - ops . _USE_C_API = self . prev_use_c_api <nl> - super ( PartialRunTest , self ) . tearDown ( ) <nl> - <nl> - <nl> - class PartialRunWithCApiTest ( PartialRunTestMethods , <nl> - test_util . TensorFlowTestCase ) : <nl> - " " " Test case that invokes test methods with _USE_C_API = True . " " " <nl> - <nl> - def setUp ( self ) : <nl> - self . prev_use_c_api = ops . _USE_C_API <nl> - ops . _USE_C_API = True <nl> - super ( PartialRunWithCApiTest , self ) . setUp ( ) <nl> - <nl> - def tearDown ( self ) : <nl> - ops . _USE_C_API = self . prev_use_c_api <nl> - super ( PartialRunWithCApiTest , self ) . tearDown ( ) <nl> - <nl> - <nl> if __name__ = = ' __main__ ' : <nl> googletest . main ( ) <nl> mmm a / tensorflow / python / client / session_test . py <nl> ppp b / tensorflow / python / client / session_test . py <nl> <nl> ops . RegisterShape ( ' ConstructionFails ' ) ( common_shapes . unknown_shape ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class SessionTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def setUp ( self ) : <nl> def exc_predicate ( e ) : <nl> # Run with a bogus handle . <nl> s . partial_run ( ' foo ' , r1 , feed_dict = { a : 1 , b : 2 } ) <nl> <nl> - def testOpConstructionErrorPayload ( self ) : <nl> - if ops . _USE_C_API : <nl> - return # No shape registration for ' ConstructionFails ' <nl> - <nl> - with session . Session ( ) : <nl> - failing_op = ops . get_default_graph ( ) . create_op ( <nl> - ' ConstructionFails ' , [ ] , [ ] , name = ' f ' ) <nl> - <nl> - def exc_predicate ( e ) : <nl> - return ( e . op = = failing_op and <nl> - e . error_code = = error_codes_pb2 . INVALID_ARGUMENT ) <nl> - <nl> - with self . assertRaisesOpError ( exc_predicate ) : <nl> - failing_op . run ( ) <nl> - <nl> def testErrorBasedOn ( self ) : <nl> with session . Session ( ) as sess : <nl> a = constant_op . constant ( 0 . 0 , shape = [ 2 , 3 ] ) <nl> def run_loop ( ) : <nl> if gdef is None : <nl> gdef = graph . as_graph_def ( ) <nl> else : <nl> - # NOTE ( skyewm ) : import_graph_def breaks the running threads without <nl> - # the C API enabled . This is not a regression so I didn ' t fix it . <nl> - if ops . _USE_C_API : <nl> - importer . import_graph_def ( gdef , name = ' import ' ) <nl> + importer . import_graph_def ( gdef , name = ' import ' ) <nl> <nl> stop . set ( ) <nl> for t in threads : <nl> mmm a / tensorflow / python / client / virtual_gpu_test . py <nl> ppp b / tensorflow / python / client / virtual_gpu_test . py <nl> def TestRandomGraph ( self , sess , op_placement = None , random_seed = None ) : <nl> return True <nl> <nl> <nl> - @ test_util . with_c_api <nl> class VirtualGpuTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def __init__ ( self , method_name ) : <nl> mmm a / tensorflow / python / data / kernel_tests / dataset_from_generator_op_test . py <nl> ppp b / tensorflow / python / data / kernel_tests / dataset_from_generator_op_test . py <nl> <nl> <nl> class DatasetConstructorTest ( test . TestCase ) : <nl> <nl> - def _testFromGenerator ( self , generator , elem_sequence , num_repeats ) : <nl> + def _testFromGenerator ( self , generator , elem_sequence , num_repeats , <nl> + output_types = None ) : <nl> + if output_types is None : <nl> + output_types = dtypes . int64 <nl> iterator = ( <nl> - dataset_ops . Dataset . from_generator ( generator , output_types = dtypes . int64 ) <nl> + dataset_ops . Dataset . from_generator ( generator , output_types = output_types ) <nl> . repeat ( num_repeats ) <nl> . prefetch ( 5 ) <nl> . make_initializable_iterator ( ) ) <nl> def testFromGeneratorUsingList ( self ) : <nl> def testFromGeneratorUsingNdarray ( self ) : <nl> generator = lambda : np . arange ( 100 , dtype = np . int64 ) <nl> elem_sequence = list ( generator ( ) ) <nl> - self . _testFromGenerator ( generator , elem_sequence , 1 ) <nl> - self . _testFromGenerator ( generator , elem_sequence , 5 ) <nl> + self . _testFromGenerator ( generator , elem_sequence , 1 , output_types = np . int64 ) <nl> + self . _testFromGenerator ( generator , elem_sequence , 5 , output_types = np . int64 ) <nl> <nl> def testFromGeneratorUsingGeneratorExpression ( self ) : <nl> # NOTE ( mrry ) : Generator * expressions * are not repeatable ( or in <nl> mmm a / tensorflow / python / data / ops / dataset_ops . py <nl> ppp b / tensorflow / python / data / ops / dataset_ops . py <nl> def gen ( ) : <nl> else : <nl> args = tuple ( ops . convert_n_to_tensor ( args , name = " args " ) ) <nl> <nl> - flattened_types = nest . flatten ( output_types ) <nl> + flattened_types = [ dtypes . as_dtype ( dt ) for dt in nest . flatten ( output_types ) ] <nl> flattened_shapes = nest . flatten ( output_shapes ) <nl> <nl> generator_state = Dataset . _GeneratorState ( generator ) <nl> mmm a / tensorflow / python / eager / backprop . py <nl> ppp b / tensorflow / python / eager / backprop . py <nl> <nl> from tensorflow . python . ops import gen_array_ops <nl> from tensorflow . python . ops import math_ops <nl> from tensorflow . python . ops import resource_variable_ops <nl> + from tensorflow . python . platform import tf_logging as logging <nl> from tensorflow . python . util import nest <nl> from tensorflow . python . util import tf_inspect <nl> from tensorflow . python . util . tf_export import tf_export <nl> class GradientTape ( object ) : <nl> be computed as : <nl> <nl> ` ` ` python <nl> - x = tf . constant ( 3 . ) <nl> - with tfe . GradientTape ( ) as g : <nl> + x = tf . constant ( 3 . 0 ) <nl> + with tf . GradientTape ( ) as g : <nl> g . watch ( x ) <nl> y = x * x <nl> - grad = g . gradient ( y , [ x ] ) [ 0 ] # Will compute to 6 . 0 <nl> + dy_dx = g . gradient ( y , x ) # Will compute to 6 . 0 <nl> ` ` ` <nl> <nl> GradientTapes can be nested to compute higher - order derivatives . For example , <nl> class GradientTape ( object ) : <nl> <nl> ` ` ` python <nl> x = tf . constant ( 3 . 0 ) <nl> - with tfe . GradientTape ( persistent = True ) as g : <nl> + with tf . GradientTape ( persistent = True ) as g : <nl> g . watch ( x ) <nl> y = x * x <nl> z = y * y <nl> def __init__ ( self , persistent = False ) : <nl> " " " <nl> self . _tape = None <nl> self . _persistent = persistent <nl> + self . _recording = False <nl> <nl> def __enter__ ( self ) : <nl> - self . _tape = tape . push_new_tape ( persistent = self . _persistent ) <nl> + " " " Enters a context inside which operations are recorded on this tape . " " " <nl> + self . _start_recording ( ) <nl> return self <nl> <nl> def __exit__ ( self , typ , value , traceback ) : <nl> + " " " Exits the recording context , no further operations are traced . " " " <nl> + if self . _recording : <nl> + self . _stop_recording ( ) <nl> + <nl> + def _start_recording ( self ) : <nl> + if self . _recording : <nl> + raise ValueError ( " Tape is already recording . " ) <nl> + self . _tape = tape . push_new_tape ( persistent = self . _persistent ) <nl> + self . _recording = True <nl> + <nl> + def _stop_recording ( self ) : <nl> + if not self . _recording : <nl> + raise ValueError ( " Tape is not recording . " ) <nl> tape . pop_tape ( self . _tape ) <nl> + self . _recording = False <nl> <nl> def watch ( self , tensor ) : <nl> " " " Ensures that ` tensor ` is being traced by this tape . <nl> def gradient ( self , target , sources , output_gradients = None ) : <nl> than once on a non - persistent tape . <nl> " " " <nl> if self . _tape is None : <nl> - raise RuntimeError ( " GradientTape . gradient can only be called once " <nl> - " on non - persistent tapes , and " <nl> - " only when the context manager has exited . " ) <nl> + raise RuntimeError ( " GradientTape . gradient can only be called once on " <nl> + " non - persistent tapes . " ) <nl> + if self . _recording : <nl> + if not self . _persistent : <nl> + self . _stop_recording ( ) <nl> + else : <nl> + logging . log_first_n ( logging . WARN , <nl> + " Calling GradientTape . gradient on a persistent " <nl> + " tape inside it ' s context is significantly less " <nl> + " efficient than calling it outside the context ( it " <nl> + " causes the gradient ops to be recorded on the " <nl> + " tape , leading to increased CPU and memory usage ) . " <nl> + " Only call GradientTape . gradient inside the " <nl> + " context if you actually want to trace the " <nl> + " gradient in order to compute higher order " <nl> + " derrivatives . " , 1 ) <nl> + <nl> flat_sources = nest . flatten ( sources ) <nl> flat_sources = [ _handle_or_self ( x ) for x in flat_sources ] <nl> <nl> mmm a / tensorflow / python / eager / backprop_test . py <nl> ppp b / tensorflow / python / eager / backprop_test . py <nl> def testGradientWithinTapeBlock ( self ) : <nl> self . evaluate ( v1 . initializer ) <nl> with backprop . GradientTape ( ) as t : <nl> loss = 2 * v1 <nl> - with self . assertRaises ( RuntimeError ) : <nl> - t . gradient ( loss , [ v1 ] ) <nl> + grad = t . gradient ( loss , v1 ) <nl> + self . assertAllEqual ( self . evaluate ( grad ) , 2 . 0 ) <nl> + <nl> with backprop . GradientTape ( persistent = True ) as t : <nl> loss = 2 * v1 <nl> - grad = t . gradient ( loss , [ v1 ] ) <nl> - self . assertAllEqual ( self . evaluate ( grad [ 0 ] ) , 2 . 0 ) <nl> + grad = t . gradient ( loss , v1 ) <nl> + self . assertAllEqual ( self . evaluate ( grad ) , 2 . 0 ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testNestedSelfContexts ( self ) : <nl> + v1 = resource_variable_ops . ResourceVariable ( 1 . ) <nl> + self . evaluate ( v1 . initializer ) <nl> + with backprop . GradientTape ( ) as t : <nl> + with self . assertRaises ( ValueError ) : <nl> + with t : <nl> + pass <nl> <nl> @ test_util . assert_no_new_tensors <nl> def testSecondGrad ( self ) : <nl> def testPersistentTape ( self ) : <nl> self . assertEqual ( self . evaluate ( dy_dx ) , 2 * 3 ) <nl> del g <nl> <nl> + @ test_util . assert_no_new_tensors <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testHigherOrderGradient ( self ) : <nl> + with backprop . GradientTape ( persistent = True ) as g : <nl> + x = constant_op . constant ( 3 . 0 ) <nl> + g . watch ( x ) <nl> + y = x * * 3 # y : = x ^ 3 <nl> + dy_dx = g . gradient ( y , x ) # dy / dx : = 3x ^ 2 <nl> + d2y_dx2 = g . gradient ( dy_dx , x ) # d2y / dx2 : = 6x <nl> + d3y_dx3 = g . gradient ( d2y_dx2 , x ) # d3y / dx3 : = 6 <nl> + x = 3 <nl> + self . assertEqual ( self . evaluate ( y ) , x * * 3 ) <nl> + self . assertEqual ( self . evaluate ( dy_dx ) , 3 * x * * 2 ) <nl> + self . assertEqual ( self . evaluate ( d2y_dx2 ) , 6 * x ) <nl> + self . assertEqual ( self . evaluate ( d3y_dx3 ) , 6 ) <nl> + del g <nl> + <nl> @ test_util . assert_no_new_tensors <nl> @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testPersistentNestedTape ( self ) : <nl> mmm a / tensorflow / python / estimator / BUILD <nl> ppp b / tensorflow / python / estimator / BUILD <nl> py_library ( <nl> " : gc " , <nl> " / / tensorflow / python : errors " , <nl> " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : summary " , <nl> + " / / tensorflow / python : util " , <nl> + " / / tensorflow / python / estimator : metric_keys " , <nl> + " / / tensorflow / python / estimator : util " , <nl> ] , <nl> ) <nl> <nl> mmm a / tensorflow / python / estimator / canned / boosted_trees . py <nl> ppp b / tensorflow / python / estimator / canned / boosted_trees . py <nl> def _get_transformed_features ( features , sorted_feature_columns ) : <nl> <nl> Args : <nl> features : a dicionary of name to Tensor . <nl> - feature_columns : a list / set of tf . feature_column . <nl> + sorted_feature_columns : a list / set of tf . feature_column , sorted by name . <nl> <nl> Returns : <nl> result_features : a list of the transformed features , sorted by the name . <nl> def __init__ ( self , example_ids , logits_dimension ) : <nl> elif dtypes . as_dtype ( dtypes . string ) . is_compatible_with ( example_ids . dtype ) : <nl> empty_key = ' ' <nl> else : <nl> - raise ValueError ( ' Unsupported example_id_feature dtype % s . ' , <nl> + raise ValueError ( ' Unsupported example_id_feature dtype % s . ' % <nl> example_ids . dtype ) <nl> # Cache holds latest < tree_id , node_id , logits > for each example . <nl> # tree_id and node_id are both int32 but logits is a float32 . <nl> def _grad_and_hess_for_logloss ( logits , labels ) : <nl> predictions = math_ops . reciprocal ( math_ops . exp ( - logits ) + 1 . 0 ) <nl> normalizer = math_ops . reciprocal ( <nl> math_ops . cast ( array_ops . size ( predictions ) , dtypes . float32 ) ) <nl> + labels = math_ops . cast ( labels , dtypes . float32 ) <nl> gradients = ( predictions - labels ) * normalizer <nl> hessians = predictions * ( 1 . 0 - predictions ) * normalizer <nl> return gradients , hessians <nl> mmm a / tensorflow / python / estimator / canned / boosted_trees_test . py <nl> ppp b / tensorflow / python / estimator / canned / boosted_trees_test . py <nl> def testInferBinaryClassifier ( self ) : <nl> self . assertAllClose ( [ [ 0 ] , [ 1 ] , [ 1 ] , [ 0 ] , [ 0 ] ] , <nl> [ pred [ ' class_ids ' ] for pred in predictions ] ) <nl> <nl> + def testTrainClassifierWithLabelVocabulary ( self ) : <nl> + apple , banana = ' apple ' , ' banana ' <nl> + def _input_fn_with_label_vocab ( ) : <nl> + return FEATURES_DICT , [ [ apple ] , [ banana ] , [ banana ] , [ apple ] , [ apple ] ] <nl> + predict_input_fn = numpy_io . numpy_input_fn ( <nl> + x = FEATURES_DICT , y = None , batch_size = 1 , num_epochs = 1 , shuffle = False ) <nl> + <nl> + est = boosted_trees . BoostedTreesClassifier ( <nl> + feature_columns = self . _feature_columns , <nl> + n_batches_per_layer = 1 , <nl> + n_trees = 1 , <nl> + max_depth = 5 , <nl> + label_vocabulary = [ apple , banana ] ) <nl> + est . train ( input_fn = _input_fn_with_label_vocab , steps = 5 ) <nl> + self . _assert_checkpoint ( <nl> + est . model_dir , global_step = 5 , finalized_trees = 1 , attempted_layers = 5 ) <nl> + eval_res = est . evaluate ( input_fn = _input_fn_with_label_vocab , steps = 1 ) <nl> + self . assertAllClose ( eval_res [ ' accuracy ' ] , 1 . 0 ) <nl> + predictions = list ( est . predict ( input_fn = predict_input_fn ) ) <nl> + self . assertAllClose ( [ [ 0 ] , [ 1 ] , [ 1 ] , [ 0 ] , [ 0 ] ] , <nl> + [ pred [ ' class_ids ' ] for pred in predictions ] ) <nl> + <nl> + def testTrainClassifierWithIntegerLabel ( self ) : <nl> + def _input_fn_with_integer_label ( ) : <nl> + return ( FEATURES_DICT , <nl> + constant_op . constant ( [ [ 0 ] , [ 1 ] , [ 1 ] , [ 0 ] , [ 0 ] ] , dtypes . int32 ) ) <nl> + predict_input_fn = numpy_io . numpy_input_fn ( <nl> + x = FEATURES_DICT , y = None , batch_size = 1 , num_epochs = 1 , shuffle = False ) <nl> + <nl> + est = boosted_trees . BoostedTreesClassifier ( <nl> + feature_columns = self . _feature_columns , <nl> + n_batches_per_layer = 1 , <nl> + n_trees = 1 , <nl> + max_depth = 5 ) <nl> + est . train ( input_fn = _input_fn_with_integer_label , steps = 5 ) <nl> + self . _assert_checkpoint ( <nl> + est . model_dir , global_step = 5 , finalized_trees = 1 , attempted_layers = 5 ) <nl> + eval_res = est . evaluate ( input_fn = _input_fn_with_integer_label , steps = 1 ) <nl> + self . assertAllClose ( eval_res [ ' accuracy ' ] , 1 . 0 ) <nl> + predictions = list ( est . predict ( input_fn = predict_input_fn ) ) <nl> + self . assertAllClose ( [ [ 0 ] , [ 1 ] , [ 1 ] , [ 0 ] , [ 0 ] ] , <nl> + [ pred [ ' class_ids ' ] for pred in predictions ] ) <nl> + <nl> def testTrainClassifierWithDataset ( self ) : <nl> train_input_fn = _make_train_input_fn_dataset ( is_classification = True ) <nl> predict_input_fn = numpy_io . numpy_input_fn ( <nl> mmm a / tensorflow / python / estimator / exporter . py <nl> ppp b / tensorflow / python / estimator / exporter . py <nl> <nl> import os <nl> <nl> from tensorflow . python . estimator import gc <nl> + from tensorflow . python . estimator import util <nl> + from tensorflow . python . estimator . canned import metric_keys <nl> from tensorflow . python . framework import errors_impl <nl> from tensorflow . python . platform import gfile <nl> from tensorflow . python . platform import tf_logging <nl> + from tensorflow . python . summary import summary_iterator <nl> from tensorflow . python . util . tf_export import tf_export <nl> <nl> <nl> def export ( self , estimator , export_path , checkpoint_path , eval_result , <nl> return export_result <nl> <nl> <nl> + def _loss_smaller ( best_eval_result , current_eval_result ) : <nl> + " " " Compares two evaluation results and returns true if the 2nd one is smaller . <nl> + <nl> + Both evaluation results should have the values for MetricKeys . LOSS , which are <nl> + used for comparison . <nl> + <nl> + Args : <nl> + best_eval_result : best eval metrics . <nl> + current_eval_result : current eval metrics . <nl> + <nl> + Returns : <nl> + True if the loss of current_eval_result is smaller ; otherwise , False . <nl> + <nl> + Raises : <nl> + ValueError : If input eval result is None or no loss is available . <nl> + " " " <nl> + default_key = metric_keys . MetricKeys . LOSS <nl> + if not best_eval_result or default_key not in best_eval_result : <nl> + raise ValueError ( <nl> + ' best_eval_result cannot be empty or no loss is found in it . ' ) <nl> + <nl> + if not current_eval_result or default_key not in current_eval_result : <nl> + raise ValueError ( <nl> + ' current_eval_result cannot be empty or no loss is found in it . ' ) <nl> + <nl> + return best_eval_result [ default_key ] > current_eval_result [ default_key ] <nl> + <nl> + <nl> + def _verify_compre_fn_args ( compare_fn ) : <nl> + " " " Verifies compare_fn arguments . " " " <nl> + args = set ( util . fn_args ( compare_fn ) ) <nl> + if ' best_eval_result ' not in args : <nl> + raise ValueError ( <nl> + ' compare_fn ( % s ) must include best_eval_result argument . ' % compare_fn ) <nl> + if ' current_eval_result ' not in args : <nl> + raise ValueError ( <nl> + ' compare_fn ( % s ) must include current_eval_result argument . ' % <nl> + compare_fn ) <nl> + non_valid_args = list ( args - set ( [ ' best_eval_result ' , ' current_eval_result ' ] ) ) <nl> + if non_valid_args : <nl> + raise ValueError ( ' compare_fn ( % s ) has following not expected args : % s ' % <nl> + ( compare_fn , non_valid_args ) ) <nl> + <nl> + <nl> + @ tf_export ( ' estimator . BestExporter ' ) <nl> + class BestExporter ( Exporter ) : <nl> + " " " This class exports the serving graph and checkpoints of the best models . <nl> + <nl> + This class performs a model export everytime when the new model is better <nl> + than any exsiting model . <nl> + " " " <nl> + <nl> + def __init__ ( self , <nl> + name = ' best_exporter ' , <nl> + serving_input_receiver_fn = None , <nl> + event_file_pattern = ' eval / * . tfevents . * ' , <nl> + compare_fn = _loss_smaller , <nl> + assets_extra = None , <nl> + as_text = False , <nl> + exports_to_keep = 5 ) : <nl> + " " " Create an ` Exporter ` to use with ` tf . estimator . EvalSpec ` . <nl> + <nl> + Example of creating a BestExporter for training and evluation : <nl> + ` ` ` python <nl> + def make_train_and_eval_fn ( ) : <nl> + # Set up feature columns . <nl> + categorial_feature_a = ( <nl> + tf . feature_column . categorical_column_with_hash_bucket ( . . . ) ) <nl> + categorial_feature_a_emb = embedding_column ( <nl> + categorical_column = categorial_feature_a , . . . ) <nl> + . . . # other feature columns <nl> + <nl> + estimator = tf . estimator . DNNClassifier ( <nl> + config = tf . estimator . RunConfig ( <nl> + model_dir = ' / my_model ' , save_summary_steps = 100 ) , <nl> + feature_columns = [ categorial_feature_a_emb , . . . ] , <nl> + hidden_units = [ 1024 , 512 , 256 ] ) <nl> + <nl> + serving_feature_spec = tf . feature_column . make_parse_example_spec ( <nl> + categorial_feature_a_emb ) <nl> + serving_input_receiver_fn = ( <nl> + tf . estimator . export . build_parsing_serving_input_receiver_fn ( <nl> + serving_feature_spec ) ) <nl> + <nl> + exporter = tf . estimator . BestExporter ( <nl> + name = " best_exporter " , <nl> + serving_input_receiver_fn = serving_input_receiver_fn , <nl> + exports_to_keep = 5 ) <nl> + <nl> + train_spec = tf . estimator . TrainSpec ( . . . ) <nl> + <nl> + eval_spec = [ tf . estimator . EvalSpec ( <nl> + input_fn = eval_input_fn , <nl> + steps = 100 , <nl> + exporters = exporter , <nl> + start_delay_secs = 0 , <nl> + throttle_secs = 5 ) ] <nl> + <nl> + return tf . estimator . DistributedTrainingSpec ( estimator , train_spec , <nl> + eval_spec ) <nl> + ` ` ` <nl> + <nl> + Args : <nl> + name : unique name of this ` Exporter ` that is going to be used in the <nl> + export path . <nl> + serving_input_receiver_fn : a function that takes no arguments and returns <nl> + a ` ServingInputReceiver ` . <nl> + event_file_pattern : event file name pattern relative to model_dir . If <nl> + None , however , the exporter would not be preemption - safe . To be <nl> + preemption - safe , event_file_pattern should be specified . <nl> + compare_fn : a function that compares two evaluation results and returns <nl> + true if current evaluation result is better . Follows the signature : <nl> + * Args : <nl> + * ` best_eval_result ` : This is the evaluation result of the best model . <nl> + * ` current_eval_result ` : This is the evaluation result of current <nl> + candidate model . <nl> + * Returns : <nl> + True if current evaluation result is better ; otherwise , False . <nl> + assets_extra : An optional dict specifying how to populate the assets . extra <nl> + directory within the exported SavedModel . Each key should give the <nl> + destination path ( including the filename ) relative to the assets . extra <nl> + directory . The corresponding value gives the full path of the source <nl> + file to be copied . For example , the simple case of copying a single <nl> + file without renaming it is specified as ` { ' my_asset_file . txt ' : <nl> + ' / path / to / my_asset_file . txt ' } ` . <nl> + as_text : whether to write the SavedModel proto in text format . Defaults to <nl> + ` False ` . <nl> + exports_to_keep : Number of exports to keep . Older exports will be <nl> + garbage - collected . Defaults to 5 . Set to ` None ` to disable garbage <nl> + collection . <nl> + <nl> + Raises : <nl> + ValueError : if any arguments is invalid . <nl> + " " " <nl> + self . _compare_fn = compare_fn <nl> + if self . _compare_fn is None : <nl> + raise ValueError ( ' ` compare_fn ` must not be None . ' ) <nl> + _verify_compre_fn_args ( self . _compare_fn ) <nl> + <nl> + self . _saved_model_exporter = _SavedModelExporter ( <nl> + name , serving_input_receiver_fn , assets_extra , as_text ) <nl> + <nl> + self . _event_file_pattern = event_file_pattern <nl> + self . _model_dir = None <nl> + self . _best_eval_result = None <nl> + <nl> + self . _exports_to_keep = exports_to_keep <nl> + if exports_to_keep is not None and exports_to_keep < = 0 : <nl> + raise ValueError ( <nl> + ' ` exports_to_keep ` , if provided , must be positive number ' ) <nl> + <nl> + @ property <nl> + def name ( self ) : <nl> + return self . _saved_model_exporter . name <nl> + <nl> + def export ( self , estimator , export_path , checkpoint_path , eval_result , <nl> + is_the_final_export ) : <nl> + export_result = None <nl> + <nl> + if self . _model_dir ! = estimator . model_dir ( ) and self . _event_file_pattern : <nl> + # Loads best metric from event files . <nl> + tf_logging . info ( ' Loading best metric from event files . ' ) <nl> + <nl> + self . _model_dir = estimator . model_dir ( ) <nl> + full_event_file_pattern = os . path . join ( self . _model_dir , <nl> + self . _event_file_pattern ) <nl> + self . _best_eval_result = self . _get_best_eval_result ( <nl> + full_event_file_pattern ) <nl> + <nl> + if self . _best_eval_result is None or self . _compare_fn ( <nl> + best_eval_result = self . _best_eval_result , <nl> + current_eval_result = eval_result ) : <nl> + tf_logging . info ( ' Performing best model export . ' ) <nl> + self . _best_eval_result = eval_result <nl> + export_result = self . _saved_model_exporter . export ( <nl> + estimator , export_path , checkpoint_path , eval_result , <nl> + is_the_final_export ) <nl> + self . _garbage_collect_exports ( export_path ) <nl> + <nl> + return export_result <nl> + <nl> + def _garbage_collect_exports ( self , export_dir_base ) : <nl> + " " " Deletes older exports , retaining only a given number of the most recent . <nl> + <nl> + Export subdirectories are assumed to be named with monotonically increasing <nl> + integers ; the most recent are taken to be those with the largest values . <nl> + <nl> + Args : <nl> + export_dir_base : the base directory under which each export is in a <nl> + versioned subdirectory . <nl> + " " " <nl> + if self . _exports_to_keep is None : <nl> + return <nl> + <nl> + def _export_version_parser ( path ) : <nl> + # create a simple parser that pulls the export_version from the directory . <nl> + filename = os . path . basename ( path . path ) <nl> + if not ( len ( filename ) = = 10 and filename . isdigit ( ) ) : <nl> + return None <nl> + return path . _replace ( export_version = int ( filename ) ) <nl> + <nl> + # pylint : disable = protected - access <nl> + keep_filter = gc . _largest_export_versions ( self . _exports_to_keep ) <nl> + delete_filter = gc . _negation ( keep_filter ) <nl> + for p in delete_filter ( <nl> + gc . _get_paths ( export_dir_base , parser = _export_version_parser ) ) : <nl> + try : <nl> + gfile . DeleteRecursively ( p . path ) <nl> + except errors_impl . NotFoundError as e : <nl> + tf_logging . warn ( ' Can not delete % s recursively : % s ' , p . path , e ) <nl> + # pylint : enable = protected - access <nl> + <nl> + def _get_best_eval_result ( self , event_files ) : <nl> + " " " Get the best eval result from event files . <nl> + <nl> + Args : <nl> + event_files : Absolute pattern of event files . <nl> + <nl> + Returns : <nl> + The best eval result . <nl> + " " " <nl> + if not event_files : <nl> + return None <nl> + <nl> + best_eval_result = None <nl> + for event_file in gfile . Glob ( os . path . join ( event_files ) ) : <nl> + for event in summary_iterator . summary_iterator ( event_file ) : <nl> + if event . HasField ( ' summary ' ) : <nl> + event_eval_result = { } <nl> + for value in event . summary . value : <nl> + if value . HasField ( ' simple_value ' ) : <nl> + event_eval_result [ value . tag ] = value . simple_value <nl> + if best_eval_result is None or self . _compare_fn ( <nl> + best_eval_result , event_eval_result ) : <nl> + best_eval_result = event_eval_result <nl> + return best_eval_result <nl> + <nl> + <nl> @ tf_export ( ' estimator . FinalExporter ' ) <nl> class FinalExporter ( Exporter ) : <nl> " " " This class exports the serving graph and checkpoints in the end . <nl> def __init__ ( self , <nl> Raises : <nl> ValueError : if any arguments is invalid . <nl> " " " <nl> - self . _saved_model_exporter = _SavedModelExporter ( name , <nl> - serving_input_receiver_fn , <nl> - assets_extra , as_text ) <nl> + self . _saved_model_exporter = _SavedModelExporter ( <nl> + name , serving_input_receiver_fn , assets_extra , as_text ) <nl> <nl> @ property <nl> def name ( self ) : <nl> def __init__ ( self , <nl> Raises : <nl> ValueError : if any arguments is invalid . <nl> " " " <nl> - self . _saved_model_exporter = _SavedModelExporter ( name , <nl> - serving_input_receiver_fn , <nl> - assets_extra , as_text ) <nl> + self . _saved_model_exporter = _SavedModelExporter ( <nl> + name , serving_input_receiver_fn , assets_extra , as_text ) <nl> self . _exports_to_keep = exports_to_keep <nl> if exports_to_keep is not None and exports_to_keep < = 0 : <nl> raise ValueError ( <nl> mmm a / tensorflow / python / estimator / exporter_test . py <nl> ppp b / tensorflow / python / estimator / exporter_test . py <nl> <nl> from tensorflow . python . util import compat <nl> <nl> <nl> + class BestExporterTest ( test . TestCase ) : <nl> + <nl> + def test_error_out_if_exports_to_keep_is_zero ( self ) : <nl> + <nl> + def _serving_input_receiver_fn ( ) : <nl> + pass <nl> + <nl> + with self . assertRaisesRegexp ( ValueError , " positive number " ) : <nl> + exporter = exporter_lib . BestExporter ( <nl> + name = " best_exporter " , <nl> + serving_input_receiver_fn = _serving_input_receiver_fn , <nl> + exports_to_keep = 0 ) <nl> + self . assertEqual ( " best_exporter " , exporter . name ) <nl> + <nl> + def test_best_exporter ( self ) : <nl> + <nl> + def _serving_input_receiver_fn ( ) : <nl> + pass <nl> + <nl> + export_dir_base = tempfile . mkdtemp ( ) <nl> + gfile . MkDir ( export_dir_base ) <nl> + gfile . MkDir ( export_dir_base + " / export " ) <nl> + gfile . MkDir ( export_dir_base + " / eval " ) <nl> + <nl> + exporter = exporter_lib . BestExporter ( <nl> + name = " best_exporter " , <nl> + serving_input_receiver_fn = _serving_input_receiver_fn , <nl> + assets_extra = { " from / path " : " to / path " } , <nl> + as_text = False , <nl> + exports_to_keep = 5 ) <nl> + estimator = test . mock . Mock ( spec = estimator_lib . Estimator ) <nl> + estimator . export_savedmodel . return_value = " export_result_path " <nl> + estimator . model_dir . return_value = export_dir_base <nl> + <nl> + export_result = exporter . export ( estimator , export_dir_base , <nl> + " checkpoint_path " , { } , False ) <nl> + <nl> + self . assertEqual ( " export_result_path " , export_result ) <nl> + estimator . export_savedmodel . assert_called_with ( <nl> + export_dir_base , <nl> + _serving_input_receiver_fn , <nl> + assets_extra = { " from / path " : " to / path " } , <nl> + as_text = False , <nl> + checkpoint_path = " checkpoint_path " , <nl> + strip_default_attrs = True ) <nl> + <nl> + def test_best_export_is_saved ( self ) : <nl> + <nl> + def _serving_input_receiver_fn ( ) : <nl> + pass <nl> + <nl> + export_dir_base = tempfile . mkdtemp ( ) <nl> + gfile . MkDir ( export_dir_base ) <nl> + gfile . MkDir ( export_dir_base + " / export " ) <nl> + gfile . MkDir ( export_dir_base + " / eval " ) <nl> + <nl> + exporter = exporter_lib . BestExporter ( <nl> + name = " best_exporter " , <nl> + serving_input_receiver_fn = _serving_input_receiver_fn , <nl> + assets_extra = { " from / path " : " to / path " } , <nl> + as_text = False , <nl> + exports_to_keep = 1 ) <nl> + estimator = test . mock . Mock ( spec = estimator_lib . Estimator ) <nl> + estimator . export_savedmodel . return_value = " export_result_path " <nl> + estimator . model_dir . return_value = export_dir_base <nl> + <nl> + export_result = exporter . export ( estimator , export_dir_base , <nl> + " checkpoint_path " , { " loss " : 0 . 5 } , False ) <nl> + <nl> + self . assertTrue ( estimator . export_savedmodel . called ) <nl> + self . assertEqual ( " export_result_path " , export_result ) <nl> + <nl> + export_result = exporter . export ( estimator , export_dir_base , <nl> + " checkpoint_path " , { " loss " : 0 . 6 } , False ) <nl> + self . assertEqual ( None , export_result ) <nl> + <nl> + export_result = exporter . export ( estimator , export_dir_base , <nl> + " checkpoint_path " , { " loss " : 0 . 4 } , False ) <nl> + self . assertEqual ( " export_result_path " , export_result ) <nl> + <nl> + def test_best_exporter_with_preemption ( self ) : <nl> + <nl> + def _serving_input_receiver_fn ( ) : <nl> + pass <nl> + <nl> + export_dir_base = tempfile . mkdtemp ( ) <nl> + gfile . MkDir ( export_dir_base ) <nl> + gfile . MkDir ( export_dir_base + " / export " ) <nl> + gfile . MkDir ( export_dir_base + " / eval " ) <nl> + <nl> + eval_dir_base = os . path . join ( export_dir_base , " eval_continuous " ) <nl> + estimator_lib . _write_dict_to_summary ( eval_dir_base , { " loss " : 50 } , 1 ) <nl> + estimator_lib . _write_dict_to_summary ( eval_dir_base , { " loss " : 60 } , 2 ) <nl> + <nl> + exporter = exporter_lib . BestExporter ( <nl> + name = " best_exporter " , <nl> + serving_input_receiver_fn = _serving_input_receiver_fn , <nl> + event_file_pattern = " eval_continuous / * . tfevents . * " , <nl> + assets_extra = { " from / path " : " to / path " } , <nl> + as_text = False , <nl> + exports_to_keep = 1 ) <nl> + <nl> + estimator = test . mock . Mock ( spec = estimator_lib . Estimator ) <nl> + estimator . model_dir . return_value = export_dir_base <nl> + estimator . export_savedmodel . return_value = " export_result_path " <nl> + <nl> + export_result = exporter . export ( estimator , export_dir_base , <nl> + " checkpoint_path " , { " loss " : 100 } , False ) <nl> + self . assertEqual ( None , export_result ) <nl> + <nl> + export_result = exporter . export ( estimator , export_dir_base , <nl> + " checkpoint_path " , { " loss " : 10 } , False ) <nl> + self . assertEqual ( " export_result_path " , export_result ) <nl> + <nl> + export_result = exporter . export ( estimator , export_dir_base , <nl> + " checkpoint_path " , { " loss " : 20 } , False ) <nl> + self . assertEqual ( None , export_result ) <nl> + <nl> + def test_garbage_collect_exports ( self ) : <nl> + export_dir_base = tempfile . mkdtemp ( ) <nl> + gfile . MkDir ( export_dir_base ) <nl> + gfile . MkDir ( export_dir_base + " / export " ) <nl> + gfile . MkDir ( export_dir_base + " / eval " ) <nl> + <nl> + export_dir_1 = _create_test_export_dir ( export_dir_base ) <nl> + export_dir_2 = _create_test_export_dir ( export_dir_base ) <nl> + export_dir_3 = _create_test_export_dir ( export_dir_base ) <nl> + export_dir_4 = _create_test_export_dir ( export_dir_base ) <nl> + <nl> + self . assertTrue ( gfile . Exists ( export_dir_1 ) ) <nl> + self . assertTrue ( gfile . Exists ( export_dir_2 ) ) <nl> + self . assertTrue ( gfile . Exists ( export_dir_3 ) ) <nl> + self . assertTrue ( gfile . Exists ( export_dir_4 ) ) <nl> + <nl> + def _serving_input_receiver_fn ( ) : <nl> + return array_ops . constant ( [ 1 ] ) , None <nl> + <nl> + exporter = exporter_lib . BestExporter ( <nl> + name = " best_exporter " , <nl> + serving_input_receiver_fn = _serving_input_receiver_fn , <nl> + exports_to_keep = 2 ) <nl> + estimator = test . mock . Mock ( spec = estimator_lib . Estimator ) <nl> + estimator . model_dir . return_value = export_dir_base <nl> + # Garbage collect all but the most recent 2 exports , <nl> + # where recency is determined based on the timestamp directory names . <nl> + exporter . export ( estimator , export_dir_base , None , None , False ) <nl> + <nl> + self . assertFalse ( gfile . Exists ( export_dir_1 ) ) <nl> + self . assertFalse ( gfile . Exists ( export_dir_2 ) ) <nl> + self . assertTrue ( gfile . Exists ( export_dir_3 ) ) <nl> + self . assertTrue ( gfile . Exists ( export_dir_4 ) ) <nl> + <nl> + <nl> class LatestExporterTest ( test . TestCase ) : <nl> <nl> def test_error_out_if_exports_to_keep_is_zero ( self ) : <nl> mmm a / tensorflow / python / feature_column / feature_column . py <nl> ppp b / tensorflow / python / feature_column / feature_column . py <nl> def _transform_feature ( self , inputs ) : <nl> feature_tensors . append ( ids_and_weights . id_tensor ) <nl> else : <nl> raise ValueError ( ' Unsupported column type . Given : { } ' . format ( key ) ) <nl> - return sparse_ops . _sparse_cross_hashed ( # pylint : disable = protected - access <nl> + return sparse_ops . sparse_cross_hashed ( <nl> inputs = feature_tensors , <nl> num_buckets = self . hash_bucket_size , <nl> hash_key = self . hash_key ) <nl> mmm a / tensorflow / python / feature_column / feature_column_test . py <nl> ppp b / tensorflow / python / feature_column / feature_column_test . py <nl> def get_keras_linear_model_predictions ( features , <nl> return retval <nl> <nl> <nl> - @ test_util . with_c_api <nl> class LinearModelTest ( test . TestCase ) : <nl> <nl> def test_raises_if_empty_feature_columns ( self ) : <nl> def test_raises_if_shape_mismatch ( self ) : <nl> price = fc . numeric_column ( ' price ' , shape = 2 ) <nl> with ops . Graph ( ) . as_default ( ) : <nl> features = { ' price ' : [ [ 1 . ] , [ 5 . ] ] } <nl> - if ops . _USE_C_API : <nl> - with self . assertRaisesRegexp ( <nl> - Exception , <nl> - r ' Cannot reshape a tensor with 2 elements to shape \ [ 2 , 2 \ ] ' ) : <nl> - predictions = fc . linear_model ( features , [ price ] ) <nl> - else : <nl> - predictions = fc . linear_model ( features , [ price ] ) <nl> - with _initialized_session ( ) : <nl> - with self . assertRaisesRegexp ( Exception , ' requested shape has 4 ' ) : <nl> - predictions . eval ( ) <nl> + with self . assertRaisesRegexp ( <nl> + Exception , <nl> + r ' Cannot reshape a tensor with 2 elements to shape \ [ 2 , 2 \ ] ' ) : <nl> + fc . linear_model ( features , [ price ] ) <nl> <nl> def test_dense_reshaping ( self ) : <nl> price = fc . numeric_column ( ' price ' , shape = [ 1 , 2 ] ) <nl> def test_with_rank_0_feature ( self ) : <nl> sess . run ( net , feed_dict = { features [ ' price ' ] : np . array ( 1 ) } ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class _LinearModelTest ( test . TestCase ) : <nl> <nl> def test_raises_if_empty_feature_columns ( self ) : <nl> def test_raises_if_shape_mismatch ( self ) : <nl> price = fc . numeric_column ( ' price ' , shape = 2 ) <nl> with ops . Graph ( ) . as_default ( ) : <nl> features = { ' price ' : [ [ 1 . ] , [ 5 . ] ] } <nl> - if ops . _USE_C_API : <nl> - with self . assertRaisesRegexp ( <nl> - Exception , <nl> - r ' Cannot reshape a tensor with 2 elements to shape \ [ 2 , 2 \ ] ' ) : <nl> - predictions = get_keras_linear_model_predictions ( features , [ price ] ) <nl> - else : <nl> - predictions = get_keras_linear_model_predictions ( features , [ price ] ) <nl> - with _initialized_session ( ) : <nl> - with self . assertRaisesRegexp ( Exception , ' requested shape has 4 ' ) : <nl> - predictions . eval ( ) <nl> + with self . assertRaisesRegexp ( <nl> + Exception , <nl> + r ' Cannot reshape a tensor with 2 elements to shape \ [ 2 , 2 \ ] ' ) : <nl> + get_keras_linear_model_predictions ( features , [ price ] ) <nl> <nl> def test_dense_reshaping ( self ) : <nl> price = fc . numeric_column ( ' price ' , shape = [ 1 , 2 ] ) <nl> def scale_matrix ( ) : <nl> self . assertAllEqual ( [ [ 2 , 2 ] , [ 2 , 2 ] , [ 2 , 2 ] ] , gradient ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class FunctionalInputLayerTest ( test . TestCase ) : <nl> <nl> def test_raises_if_empty_feature_columns ( self ) : <nl> def test_raises_if_shape_mismatch ( self ) : <nl> price = fc . numeric_column ( ' price ' , shape = 2 ) <nl> with ops . Graph ( ) . as_default ( ) : <nl> features = { ' price ' : [ [ 1 . ] , [ 5 . ] ] } <nl> - if ops . _USE_C_API : <nl> - with self . assertRaisesRegexp ( <nl> - Exception , <nl> - r ' Cannot reshape a tensor with 2 elements to shape \ [ 2 , 2 \ ] ' ) : <nl> - net = fc . input_layer ( features , [ price ] ) <nl> - else : <nl> - net = fc . input_layer ( features , [ price ] ) <nl> - with _initialized_session ( ) : <nl> - with self . assertRaisesRegexp ( Exception , ' requested shape has 4 ' ) : <nl> - net . eval ( ) <nl> + with self . assertRaisesRegexp ( <nl> + Exception , <nl> + r ' Cannot reshape a tensor with 2 elements to shape \ [ 2 , 2 \ ] ' ) : <nl> + fc . input_layer ( features , [ price ] ) <nl> <nl> def test_reshaping ( self ) : <nl> price = fc . numeric_column ( ' price ' , shape = [ 1 , 2 ] ) <nl> mmm a / tensorflow / python / framework / function_test . py <nl> ppp b / tensorflow / python / framework / function_test . py <nl> def testStableName ( self ) : <nl> def Foo ( x , y , z ) : <nl> return math_ops . tanh ( math_ops . matmul ( x , y ) + z ) <nl> <nl> - # We added more randomness to function names in C API . <nl> - # TODO ( iga ) : Remove this if statement when we switch to C API . <nl> - if ops . _USE_C_API : # pylint : disable = protected - access <nl> - if sys . byteorder = = " big " : <nl> - self . assertEqual ( " Foo_kEdkAG8SJvg " , <nl> - Foo . instantiate ( [ dtypes . float32 ] * 3 ) . name ) <nl> - else : <nl> - self . assertEqual ( " Foo_aCYSbwBkR5A " , <nl> - Foo . instantiate ( [ dtypes . float32 ] * 3 ) . name ) <nl> + if sys . byteorder = = " big " : <nl> + self . assertEqual ( " Foo_kEdkAG8SJvg " , <nl> + Foo . instantiate ( [ dtypes . float32 ] * 3 ) . name ) <nl> else : <nl> - self . assertEqual ( " Foo_d643acf7 " , <nl> + self . assertEqual ( " Foo_aCYSbwBkR5A " , <nl> Foo . instantiate ( [ dtypes . float32 ] * 3 ) . name ) <nl> <nl> def testSignatureHash ( self ) : <nl> mmm a / tensorflow / python / framework / importer_test . py <nl> ppp b / tensorflow / python / framework / importer_test . py <nl> <nl> from tensorflow . python . framework import importer <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . framework import test_ops # pylint : disable = unused - import <nl> - from tensorflow . python . framework import test_util <nl> from tensorflow . python . framework import versions <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import control_flow_ops <nl> <nl> from tensorflow . python . platform import test <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ImportGraphDefTest ( test . TestCase ) : <nl> <nl> def _MakeGraphDef ( self , <nl> def testEmptyNameScope ( self ) : <nl> return_elements = [ " foo " ] , <nl> name = " " ) <nl> <nl> - if ops . _USE_C_API : <nl> - self . assertEqual ( op . name , " foo " ) <nl> - else : <nl> - self . assertEqual ( op . name , " foo_1 " ) <nl> + self . assertEqual ( op . name , " foo " ) <nl> <nl> def testInputMap ( self ) : <nl> with ops . Graph ( ) . as_default ( ) : <nl> def testWhileLoop ( self ) : <nl> self . assertEqual ( sess . run ( imported_r ) , 10 ) <nl> <nl> def testTypeMismatchInGraphDef ( self ) : <nl> - if ops . _USE_C_API : <nl> - # TODO ( skyewm ) : improve error message <nl> - error_msg = ( " Input 0 of node import / B was passed int32 from import / A : 0 " <nl> - " incompatible with expected float . " ) <nl> - else : <nl> - error_msg = ( " Cannot convert a tensor of type int32 to an input of type " <nl> - " float " ) <nl> - <nl> + # TODO ( skyewm ) : improve error message <nl> + error_msg = ( " Input 0 of node import / B was passed int32 from import / A : 0 " <nl> + " incompatible with expected float . " ) <nl> with ops . Graph ( ) . as_default ( ) : <nl> with self . assertRaisesRegexp ( ValueError , error_msg ) : <nl> importer . import_graph_def ( <nl> def testShapeWhitelistViolation ( self ) : <nl> " Shapes ( ) and ( 43 , ) are not compatible " in str ( e . exception ) ) <nl> <nl> def testInvalidSignatureTooManyInputsInGraphDef ( self ) : <nl> - if ops . _USE_C_API : <nl> - # TODO ( skyewm ) : improve error message <nl> - error_msg = " NodeDef expected inputs ' ' do not match 1 inputs specified " <nl> - else : <nl> - error_msg = r " More inputs specified \ ( ' A : 0 ' \ ) than the op expects " <nl> - <nl> with ops . Graph ( ) . as_default ( ) : <nl> - with self . assertRaisesRegexp ( ValueError , error_msg ) : <nl> + # TODO ( skyewm ) : improve error message <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , <nl> + " NodeDef expected inputs ' ' do not match 1 inputs specified " ) : <nl> importer . import_graph_def ( <nl> self . _MakeGraphDef ( " " " <nl> node { name : ' A ' op : ' IntOutput ' } <nl> def testInvalidSignatureTooManyInputsInGraphDef ( self ) : <nl> " " " ) ) <nl> <nl> def testInvalidSignatureNotEnoughInputsInGraphDef ( self ) : <nl> - if ops . _USE_C_API : <nl> - # TODO ( skyewm ) : improve error message <nl> - error_msg = ( " NodeDef expected inputs ' int32 , float ' do not match 1 " <nl> - " inputs specified " ) <nl> - else : <nl> - error_msg = ( r " Input types mismatch \ ( expected ' int32 , float32 ' but " <nl> - r " got ' int32 ' \ ) " ) <nl> - <nl> with ops . Graph ( ) . as_default ( ) : <nl> - with self . assertRaisesRegexp ( ValueError , error_msg ) : <nl> + # TODO ( skyewm ) : improve error message <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , <nl> + " NodeDef expected inputs ' int32 , float ' do not match 1 inputs " <nl> + " specified " ) : <nl> importer . import_graph_def ( <nl> self . _MakeGraphDef ( " " " <nl> node { name : ' A ' op : ' IntOutput ' } <nl> def testInvalidSignatureNotEnoughInputsInGraphDef ( self ) : <nl> " " " ) ) <nl> <nl> def testMissingInputOpInGraphDef ( self ) : <nl> - if ops . _USE_C_API : <nl> - error_msg = " Node ' B ' : Unknown input node ' A : 0 ' " <nl> - else : <nl> - error_msg = " Input tensor ' A : 0 ' not found " <nl> - <nl> with ops . Graph ( ) . as_default ( ) : <nl> - with self . assertRaisesRegexp ( ValueError , error_msg ) : <nl> + with self . assertRaisesRegexp ( ValueError , <nl> + " Node ' B ' : Unknown input node ' A : 0 ' " ) : <nl> importer . import_graph_def ( <nl> self . _MakeGraphDef ( " " " <nl> node { name : ' B ' op : ' FloatInput ' input : ' A : 0 ' } <nl> def testMissingInputOpInGraphDefButAppearsInInputMap ( self ) : <nl> self . assertEqual ( b . inputs [ 0 ] , feed_a_0 ) <nl> <nl> def testMissingInputTensorInGraphDef ( self ) : <nl> - if ops . _USE_C_API : <nl> - error_msg = ( " Node ' B ' : Connecting to invalid output 1 of source node A " <nl> - " which has 1 outputs " ) <nl> - else : <nl> - error_msg = " Input tensor ' A : 1 ' not found " <nl> - <nl> with ops . Graph ( ) . as_default ( ) : <nl> - with self . assertRaisesRegexp ( ValueError , error_msg ) : <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , <nl> + " Node ' B ' : Connecting to invalid output 1 of source node A " <nl> + " which has 1 outputs " ) : <nl> importer . import_graph_def ( <nl> self . _MakeGraphDef ( " " " <nl> node { name : ' A ' op : ' FloatOutput ' } <nl> def testMissingInputTensorInGraphDef ( self ) : <nl> " " " ) ) <nl> <nl> def testMissingControlInputInGraphDef ( self ) : <nl> - if ops . _USE_C_API : <nl> - error_msg = r " Node ' B ' : Unknown input node ' \ ^ A ' " <nl> - else : <nl> - error_msg = r " Control input ' \ ^ A ' not found " <nl> - <nl> with ops . Graph ( ) . as_default ( ) : <nl> - with self . assertRaisesRegexp ( ValueError , error_msg ) : <nl> + with self . assertRaisesRegexp ( ValueError , <nl> + r " Node ' B ' : Unknown input node ' \ ^ A ' " ) : <nl> importer . import_graph_def ( <nl> self . _MakeGraphDef ( " " " <nl> node { name : ' B ' op : ' None ' input : ' ^ A ' } <nl> " " " ) ) <nl> <nl> def testInvalidTensorNameOutputIndexInGraphDef ( self ) : <nl> - if ops . _USE_C_API : <nl> - error_msg = " Node ' B ' : Unknown input node ' A : B ' " <nl> - else : <nl> - error_msg = " Cannot convert ' A : B ' to a tensor name . " <nl> - <nl> with ops . Graph ( ) . as_default ( ) : <nl> - with self . assertRaisesRegexp ( ValueError , error_msg ) : <nl> + with self . assertRaisesRegexp ( ValueError , <nl> + " Node ' B ' : Unknown input node ' A : B ' " ) : <nl> importer . import_graph_def ( <nl> self . _MakeGraphDef ( " " " <nl> node { name : ' B ' op : ' None ' input : ' A : B ' } <nl> " " " ) ) <nl> <nl> def testInvalidTensorNameInGraphDef ( self ) : <nl> - if ops . _USE_C_API : <nl> - error_msg = " Node ' B ' : Unknown input node ' A : B : 0 ' " <nl> - else : <nl> - error_msg = " Cannot convert ' A : B : 0 ' to a tensor name . " <nl> - <nl> with ops . Graph ( ) . as_default ( ) : <nl> - with self . assertRaisesRegexp ( ValueError , error_msg ) : <nl> + with self . assertRaisesRegexp ( ValueError , <nl> + " Node ' B ' : Unknown input node ' A : B : 0 ' " ) : <nl> importer . import_graph_def ( <nl> self . _MakeGraphDef ( " " " <nl> node { name : ' B ' op : ' None ' input : ' A : B : 0 ' } <nl> " " " ) ) <nl> <nl> def testMissingReturnOperation ( self ) : <nl> - if ops . _USE_C_API : <nl> - error_msg = " Requested return node ' B ' not found in graph def " <nl> - else : <nl> - error_msg = " return_element ' B ' not found in graph_def . " <nl> - <nl> with ops . Graph ( ) . as_default ( ) : <nl> - with self . assertRaisesRegexp ( ValueError , error_msg ) : <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , " Requested return node ' B ' not found in graph def " ) : <nl> importer . import_graph_def ( <nl> self . _MakeGraphDef ( " " " <nl> node { name : ' A ' op : ' None ' } <nl> def testMissingReturnOperation ( self ) : <nl> return_elements = [ " B " ] ) <nl> <nl> def testMissingReturnTensor ( self ) : <nl> - if ops . _USE_C_API : <nl> - error_msg = ( r " Invalid return output 1 of node ' A ' , which has 1 " <nl> - r " output \ ( s \ ) " ) <nl> - else : <nl> - error_msg = " return_element ' A : 1 ' not found in graph_def . " <nl> - <nl> with ops . Graph ( ) . as_default ( ) : <nl> - with self . assertRaisesRegexp ( ValueError , error_msg ) : <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , <nl> + r " Invalid return output 1 of node ' A ' , which has 1 output \ ( s \ ) " ) : <nl> importer . import_graph_def ( <nl> self . _MakeGraphDef ( " " " <nl> node { name : ' A ' op : ' IntOutput ' } <nl> " " " ) , <nl> return_elements = [ " A : 1 " ] ) <nl> <nl> - if ops . _USE_C_API : <nl> - error_msg = " Requested return tensor ' B : 0 ' not found in graph def " <nl> - else : <nl> - error_msg = " return_element ' B : 0 ' not found in graph_def . " <nl> - <nl> - with self . assertRaisesRegexp ( ValueError , error_msg ) : <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , " Requested return tensor ' B : 0 ' not found in graph def " ) : <nl> importer . import_graph_def ( <nl> self . _MakeGraphDef ( " " " <nl> node { name : ' A ' op : ' IntOutput ' } <nl> " " " ) , <nl> return_elements = [ " B : 0 " ] ) <nl> <nl> - if ops . _USE_C_API : <nl> - error_msg = " Cannot convert ' A : B : 0 ' to a tensor name . " <nl> - else : <nl> - error_msg = " return_element ' A : B : 0 ' not found in graph_def . " <nl> - <nl> - with self . assertRaisesRegexp ( ValueError , error_msg ) : <nl> + with self . assertRaisesRegexp ( ValueError , <nl> + " Cannot convert ' A : B : 0 ' to a tensor name . " ) : <nl> importer . import_graph_def ( <nl> self . _MakeGraphDef ( " " " <nl> node { name : ' A ' op : ' IntOutput ' } <nl> def testInputMapUnusedAsInput ( self ) : <nl> input_map = { " A : 2 " : constant_op . constant ( 5 . 0 ) } ) <nl> <nl> def testInputMapTypeMismatch ( self ) : <nl> - if ops . _USE_C_API : <nl> - error_msg = ( " Input 0 of node import / B was passed float from Const : 0 " <nl> - " incompatible with expected int32 . " ) <nl> - else : <nl> - error_msg = ( " Cannot convert a tensor of type float32 to an input of " <nl> - " type int32 . " ) <nl> with ops . Graph ( ) . as_default ( ) : <nl> - with self . assertRaisesRegexp ( ValueError , error_msg ) : <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , " Input 0 of node import / B was passed float from Const : 0 " <nl> + " incompatible with expected int32 . " ) : <nl> importer . import_graph_def ( <nl> self . _MakeGraphDef ( " " " <nl> node { name : ' A ' op : ' IntOutput ' } <nl> def testNamePrefixColocationAttrsNotFound ( self ) : <nl> value { list { s : ' loc : @ A ' } } <nl> } } " " " ) <nl> <nl> - if ops . _USE_C_API : <nl> - error_msg = " Node ' B ' expects to be colocated with unknown node ' A ' " <nl> - else : <nl> - error_msg = " does not exist during import " <nl> - <nl> with ops . Graph ( ) . as_default ( ) : <nl> - with self . assertRaisesRegexp ( ValueError , error_msg ) : <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , " Node ' B ' expects to be colocated with unknown node ' A ' " ) : <nl> importer . import_graph_def ( <nl> original_graph_def , return_elements = [ " B " ] , name = " imported_graph " ) <nl> <nl> def testInvalidInputForReturnOperations ( self ) : <nl> TypeError , " return_elements must be a list of strings . " ) : <nl> importer . import_graph_def ( self . _MakeGraphDef ( " " ) , return_elements = [ 7 ] ) <nl> <nl> - if ops . _USE_C_API : <nl> - error_msg = " Cannot convert ' a : b : c ' to a tensor name . " <nl> - else : <nl> - error_msg = " Requested return_element ' a : b : c ' not found in graph_def . " <nl> - with self . assertRaisesRegexp ( ValueError , error_msg ) : <nl> - importer . import_graph_def ( self . _MakeGraphDef ( " " ) , <nl> - return_elements = [ " a : b : c " ] ) <nl> + with self . assertRaisesRegexp ( ValueError , <nl> + " Cannot convert ' a : b : c ' to a tensor name . " ) : <nl> + importer . import_graph_def ( <nl> + self . _MakeGraphDef ( " " ) , return_elements = [ " a : b : c " ] ) <nl> <nl> def testDuplicateOperationNames ( self ) : <nl> - if ops . _USE_C_API : <nl> - error_msg = " Node ' A ' is not unique " <nl> - else : <nl> - error_msg = " Duplicate name ' A ' in GraphDef . " <nl> - <nl> - with ops . Graph ( ) . as_default ( ) : <nl> - with self . assertRaisesRegexp ( ValueError , error_msg ) : <nl> - importer . import_graph_def ( <nl> - self . _MakeGraphDef ( " " " <nl> - node { name : ' A ' op : ' IntOutput ' } <nl> - node { name : ' B ' op : ' IntOutput ' } <nl> - node { name : ' A ' op : ' IntOutput ' } <nl> - " " " ) ) <nl> + with self . assertRaisesRegexp ( ValueError , " Node ' A ' is not unique " ) : <nl> + importer . import_graph_def ( <nl> + self . _MakeGraphDef ( " " " <nl> + node { name : ' A ' op : ' IntOutput ' } <nl> + node { name : ' B ' op : ' IntOutput ' } <nl> + node { name : ' A ' op : ' IntOutput ' } <nl> + " " " ) ) <nl> <nl> def testWithExtensionAndAttr ( self ) : <nl> with ops . Graph ( ) . as_default ( ) as g : <nl> def testVersion ( self ) : <nl> min_consumer ) <nl> <nl> def testVersionLow ( self ) : <nl> - with ops . Graph ( ) . as_default ( ) as g : <nl> - pat = ( r " GraphDef producer version - 1 below min producer % d supported " <nl> - r " by TensorFlow \ S + \ . Please regenerate your graph . $ " % <nl> - versions . GRAPH_DEF_VERSION_MIN_PRODUCER ) <nl> - # C API throws error during import , Python - only throws error during run <nl> - if ops . _USE_C_API : <nl> - with self . assertRaisesRegexp ( Exception , pat ) : <nl> - importer . import_graph_def ( self . _MakeGraphDef ( " " , producer = - 1 ) ) <nl> - else : <nl> + with ops . Graph ( ) . as_default ( ) : <nl> + with self . assertRaisesRegexp ( <nl> + Exception , <nl> + r " GraphDef producer version - 1 below min producer % d supported " <nl> + r " by TensorFlow \ S + \ . Please regenerate your graph . $ " % <nl> + versions . GRAPH_DEF_VERSION_MIN_PRODUCER ) : <nl> importer . import_graph_def ( self . _MakeGraphDef ( " " , producer = - 1 ) ) <nl> - x = constant_op . constant ( <nl> - 7 ) # Need at least one op to get a C + + graph generated <nl> - with self . test_session ( graph = g ) as sess : <nl> - with self . assertRaisesRegexp ( Exception , pat ) : <nl> - sess . run ( x ) <nl> <nl> def testVersionHigh ( self ) : <nl> - with ops . Graph ( ) . as_default ( ) as g : <nl> - pat = ( r " GraphDef min consumer version % d above current version % d " <nl> - r " for TensorFlow \ S + \ . Please upgrade TensorFlow \ . $ " % <nl> - ( 1 < < 30 , versions . GRAPH_DEF_VERSION ) ) <nl> - <nl> - if ops . _USE_C_API : <nl> - with self . assertRaisesRegexp ( ValueError , pat ) : <nl> - importer . import_graph_def ( self . _MakeGraphDef ( " " , <nl> - min_consumer = 1 < < 30 ) ) <nl> - else : <nl> - # Python API only throws when graph is run <nl> + with ops . Graph ( ) . as_default ( ) : <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , <nl> + r " GraphDef min consumer version % d above current version % d " <nl> + r " for TensorFlow \ S + \ . Please upgrade TensorFlow \ . $ " % <nl> + ( 1 < < 30 , versions . GRAPH_DEF_VERSION ) ) : <nl> importer . import_graph_def ( self . _MakeGraphDef ( " " , min_consumer = 1 < < 30 ) ) <nl> - x = constant_op . constant ( <nl> - 7 ) # Need at least one op to get a C + + graph generated <nl> - with self . test_session ( graph = g ) as sess : <nl> - with self . assertRaisesRegexp ( Exception , pat ) : <nl> - sess . run ( x ) <nl> <nl> def testVersionAppliesToOpConstruction ( self ) : <nl> " " " These tests rely on shape fns in test_ops . cc . " " " <nl> def testDefaultAttrsRemoved ( self ) : <nl> " " " ) , <nl> return_elements = [ " A " ] , <nl> producer_op_list = producer_op_list ) <nl> - if ops . _USE_C_API : <nl> - error_msg = " Operation ' import / A ' has no attr named ' default_int ' . " <nl> - else : <nl> - error_msg = " No attr named ' default_int ' " <nl> - with self . assertRaisesRegexp ( ValueError , error_msg ) : <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , " Operation ' import / A ' has no attr named ' default_int ' . " ) : <nl> a [ 0 ] . get_attr ( " default_int " ) <nl> <nl> - # Unknown attrs cannot be imported using C API . This test will eventually be <nl> - # deleted . <nl> - if not ops . _USE_C_API : <nl> - # Attr only in producer_op_list with non - default value is preserved . <nl> - with ops . Graph ( ) . as_default ( ) : <nl> - a = importer . import_graph_def ( <nl> - self . _MakeGraphDef ( " " " <nl> - node { name : ' A ' op : ' OpWithFutureDefaultAttr ' <nl> - attr { key : ' default_int ' value { i : 987 } } } <nl> - " " " ) , <nl> - return_elements = [ " A " ] , <nl> - producer_op_list = producer_op_list ) <nl> - self . assertEqual ( 987 , a [ 0 ] . get_attr ( " default_int " ) ) <nl> - <nl> def testFunctions ( self ) : <nl> dtype = dtypes . float32 <nl> + <nl> @ function . Defun ( dtype , dtype , dtype , dtype ) <nl> def Grad ( x , y , dout1 , dout2 ) : # pylint : disable = unused - argument <nl> # Return the inputs for simplicity of testing . The correct return value <nl> def InnerFunc ( x ) : <nl> def testImportInsideDefun ( self ) : <nl> g = ops . Graph ( ) <nl> with g . as_default ( ) : <nl> + <nl> @ function . Defun ( ) <nl> def Add2 ( x , y ) : <nl> return math_ops . add ( x , y ) <nl> def TestFunc ( ) : <nl> def testImportGraphWithFunctionTwice ( self ) : <nl> g = ops . Graph ( ) <nl> with g . as_default ( ) : <nl> + <nl> @ function . Defun ( ) <nl> def Add2 ( x , y ) : <nl> return math_ops . add ( x , y ) <nl> mmm a / tensorflow / python / framework / meta_graph_test . py <nl> ppp b / tensorflow / python / framework / meta_graph_test . py <nl> def _TestDir ( test_name ) : <nl> # pylint : enable = invalid - name <nl> <nl> <nl> - @ test_util . with_c_api <nl> class SimpleMetaGraphTest ( test . TestCase ) : <nl> <nl> def testNoVariables ( self ) : <nl> def testVariableObjectsAreSharedAmongCollections ( self ) : <nl> self . assertIs ( global_vars [ 0 ] , trainable_vars [ 0 ] ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ScopedMetaGraphTest ( test . TestCase ) : <nl> <nl> def _testScopedExport ( self , test_dir , exported_filenames ) : <nl> def testClearDevices ( self ) : <nl> self . assertEqual ( " " , str ( graph2 . as_graph_element ( " matmul " ) . device ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class MetaGraphWithVariableScopeTest ( test . TestCase ) : <nl> <nl> def testMetricsCollection ( self ) : <nl> def _enqueue_vector ( sess , queue , values , shape = None ) : <nl> initializer = variables . local_variables_initializer ( ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ExportImportAcrossScopesTest ( test . TestCase ) : <nl> <nl> def testPartionedVariables ( self ) : <nl> mmm a / tensorflow / python / framework / op_def_library_test . py <nl> ppp b / tensorflow / python / framework / op_def_library_test . py <nl> def _unknown_shape ( op ) : <nl> return [ tensor_shape . unknown_shape ( ) for _ in op . outputs ] <nl> <nl> <nl> - @ test_util . with_c_api <nl> class OpDefLibraryTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def setUp ( self ) : <nl> def testStructuredOutputMultipleLists ( self ) : <nl> self . assertEqual ( t_c , [ x . dtype for x in c ] ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class OpDefLibraryGraphTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def setUp ( self ) : <nl> mmm a / tensorflow / python / framework / ops_test . py <nl> ppp b / tensorflow / python / framework / ops_test . py <nl> <nl> import weakref <nl> <nl> from tensorflow . core . framework import attr_value_pb2 <nl> - from tensorflow . core . framework import types_pb2 <nl> from tensorflow . core . protobuf import config_pb2 <nl> from tensorflow . python . client import session <nl> from tensorflow . python . eager import context <nl> <nl> from tensorflow . python . framework import versions <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import control_flow_ops <nl> - from tensorflow . python . ops import gen_array_ops <nl> from tensorflow . python . ops import math_ops <nl> from tensorflow . python . ops import resource_variable_ops <nl> from tensorflow . python . ops import resources <nl> <nl> ops . _set_call_cpp_shape_fn ( common_shapes . call_cpp_shape_fn ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ResourceTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testBuildGraph ( self ) : <nl> def testInitialize ( self ) : <nl> resources . shared_resources ( ) ) . eval ( ) ) , 0 ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class TensorAndShapeTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testShape ( self ) : <nl> def testShapeFunctionError ( self ) : <nl> _ = a + b <nl> <nl> <nl> - @ test_util . with_c_api <nl> class IndexedSlicesTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testToTensor ( self ) : <nl> def testScalarMul ( self ) : <nl> self . assertAllEqual ( x . indices . eval ( ) , [ 0 , 2 ] ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class NodeDefConstructorTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testNoArgs ( self ) : <nl> def _apply_op ( g , * args , * * kwargs ) : <nl> return op . outputs <nl> <nl> <nl> - @ test_util . with_c_api <nl> class OperationTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testNoInputs ( self ) : <nl> def func ( x ) : <nl> attr_value_pb2 . NameAttrList ( name = " MyFunc " ) ) <nl> <nl> # Try fetching missing attr <nl> - if ops . _USE_C_API : <nl> - error_msg = " Operation ' FuncAttr ' has no attr named ' FakeAttr ' . " <nl> - else : <nl> - error_msg = " No attr named ' FakeAttr ' in name : \ " FuncAttr \ " " <nl> - <nl> - with self . assertRaisesRegexp ( ValueError , error_msg ) : <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , " Operation ' FuncAttr ' has no attr named ' FakeAttr ' . " ) : <nl> op . get_attr ( " FakeAttr " ) <nl> <nl> # TODO ( b / 65162920 ) : remove this test when users who are directly mutating the <nl> def testSetAttr ( self ) : <nl> <nl> # TODO ( nolivia ) : test all error cases <nl> def testAddControlInput ( self ) : <nl> - # The C API dedups redundant control edges , pure Python does not <nl> - if ops . _USE_C_API : return <nl> - with ops . Graph ( ) . as_default ( ) : <nl> - x = constant_op . constant ( 1 ) . op <nl> - y = constant_op . constant ( 2 ) . op <nl> - z = constant_op . constant ( 3 ) . op <nl> - z . _add_control_input ( x ) # pylint : disable = protected - access <nl> - self . assertEqual ( z . control_inputs , [ x ] ) <nl> - z . _add_control_input ( x ) # pylint : disable = protected - access <nl> - self . assertEqual ( z . control_inputs , [ x , x ] ) <nl> - z . _add_control_inputs ( [ x , y , y ] ) # pylint : disable = protected - access <nl> - self . assertEqual ( z . control_inputs , [ x , x , x , y , y ] ) <nl> - self . assertEqual ( x . _control_outputs , [ z ] ) <nl> - <nl> - def testAddControlInputC ( self ) : <nl> - # The C API dedups redundant control edges , pure Python does not <nl> - if not ops . _USE_C_API : return <nl> with ops . Graph ( ) . as_default ( ) : <nl> x = constant_op . constant ( 1 ) . op <nl> y = constant_op . constant ( 2 ) . op <nl> def testRemoveAllControlInputs ( self ) : <nl> self . assertEqual ( list ( f . op . inputs ) , [ d , e ] ) <nl> <nl> def testControlInputCycle ( self ) : <nl> - # Non - C API path has a different error message <nl> - if not ops . _USE_C_API : return <nl> graph = ops . Graph ( ) <nl> with graph . as_default ( ) : <nl> z = constant_op . constant ( 0 ) <nl> def testUpdateInputTypeError ( self ) : <nl> sess . run ( z ) <nl> <nl> def testUpdateInputShapeError ( self ) : <nl> - # C - API throws the error differently . <nl> - if ops . _USE_C_API : <nl> - return <nl> - g = ops . Graph ( ) <nl> - with g . as_default ( ) : <nl> - w = constant_op . constant ( 2 , shape = [ 3 , 1 ] ) <nl> - x = constant_op . constant ( 0 , shape = [ 3 , 1 ] ) <nl> - y = constant_op . constant ( 1 , shape = [ 2 , 2 ] ) <nl> - z = w + x <nl> - z . op . _update_input ( 0 , y ) # pylint : disable = protected - access <nl> - <nl> - with session . Session ( graph = g ) as sess : <nl> - with self . assertRaisesRegexp ( errors . InvalidArgumentError , <nl> - r " Incompatible shapes : \ [ 2 , 2 \ ] vs . \ [ 3 , 1 \ ] " ) : <nl> - sess . run ( z ) <nl> - <nl> - def testUpdateInputShapeErrorC ( self ) : <nl> - if not ops . _USE_C_API : <nl> - return <nl> g = ops . Graph ( ) <nl> with g . as_default ( ) : <nl> w = constant_op . constant ( 2 , shape = [ 3 , 1 ] ) <nl> def testUpdateInputShapeErrorC ( self ) : <nl> z . op . _update_input ( 0 , y ) # pylint : disable = protected - access <nl> <nl> def testUpdateInputOutOfRange ( self ) : <nl> - # C - API throws the error differently . <nl> - if ops . _USE_C_API : return <nl> - g = ops . Graph ( ) <nl> - with g . as_default ( ) : <nl> - x = constant_op . constant ( 1 ) <nl> - with self . assertRaisesRegexp ( IndexError , " list index out of range " ) : <nl> - x . op . _update_input ( 1 , x ) # pylint : disable = protected - access <nl> - <nl> - def testUpdateInputOutOfRangeC ( self ) : <nl> - # C - API throws the error differently . <nl> - if not ops . _USE_C_API : return <nl> g = ops . Graph ( ) <nl> with g . as_default ( ) : <nl> x = constant_op . constant ( 1 ) <nl> def testOpDef ( self ) : <nl> y = constant_op . constant ( 1 ) <nl> z = x + y <nl> <nl> - # Pure Python mode doesn ' t create OpDefs for constants <nl> - if ops . _USE_C_API : <nl> - self . assertEqual ( x . op . op_def . name , " Const " ) <nl> - self . assertEqual ( len ( x . op . op_def . input_arg ) , 0 ) <nl> - self . assertEqual ( len ( x . op . op_def . output_arg ) , 1 ) <nl> + self . assertEqual ( x . op . op_def . name , " Const " ) <nl> + self . assertEqual ( len ( x . op . op_def . input_arg ) , 0 ) <nl> + self . assertEqual ( len ( x . op . op_def . output_arg ) , 1 ) <nl> <nl> self . assertEqual ( z . op . op_def . name , " Add " ) <nl> self . assertEqual ( len ( z . op . op_def . input_arg ) , 2 ) <nl> def testInputsAreImmutable ( self ) : <nl> op . inputs . append ( None ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class CreateOpTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testNodeDefArgs ( self ) : <nl> def testFinalized ( self ) : <nl> # the control flow context isn ' t set properly , but a more complicated use case <nl> # that might not be obvious to test will fail ) . Thus we instead explicitly test <nl> # the low - level behavior . <nl> - @ test_util . with_c_api <nl> class CreateOpFromTFOperationTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testBasic ( self ) : <nl> g = ops . Graph ( ) <nl> with g . as_default ( ) : <nl> x = test_ops . int_output ( ) <nl> - if ops . _USE_C_API : <nl> - c_op = ops . _create_c_op ( <nl> - g , ops . _NodeDef ( " IntInputIntOutput " , " myop " ) , [ x ] , [ ] ) <nl> - op = g . _create_op_from_tf_operation ( c_op ) <nl> - else : <nl> - # Test pure - Python version to make sure C API has same behavior . <nl> - op = test_ops . int_input_int_output ( x , name = " myop " ) . op <nl> + c_op = ops . _create_c_op ( <nl> + g , ops . _NodeDef ( " IntInputIntOutput " , " myop " ) , [ x ] , [ ] ) <nl> + op = g . _create_op_from_tf_operation ( c_op ) <nl> <nl> self . assertEqual ( op . name , " myop " ) <nl> self . assertEqual ( op . type , " IntInputIntOutput " ) <nl> def testShape ( self ) : <nl> g = ops . Graph ( ) <nl> with g . as_default ( ) : <nl> x = constant_op . constant ( [ [ 1 , 2 , 3 ] , [ 4 , 5 , 6 ] ] ) <nl> - if ops . _USE_C_API : <nl> - c_op = ops . _create_c_op ( g , ops . _NodeDef ( " Identity " , " myop " ) , [ x ] , [ ] ) <nl> - op = g . _create_op_from_tf_operation ( c_op ) <nl> - else : <nl> - # Test pure - Python version to make sure C API has same behavior . <nl> - op = array_ops . identity ( x , name = " myop " ) . op <nl> + c_op = ops . _create_c_op ( g , ops . _NodeDef ( " Identity " , " myop " ) , [ x ] , [ ] ) <nl> + op = g . _create_op_from_tf_operation ( c_op ) <nl> <nl> self . assertEqual ( op . name , " myop " ) <nl> self . assertEqual ( op . type , " Identity " ) <nl> def testShape ( self ) : <nl> def testUniqueName ( self ) : <nl> g = ops . Graph ( ) <nl> with g . as_default ( ) : <nl> - if ops . _USE_C_API : <nl> - c_op = ops . _create_c_op ( g , ops . _NodeDef ( " IntOutput " , " myop " ) , [ ] , [ ] ) <nl> - c_op2 = ops . _create_c_op ( g , ops . _NodeDef ( " IntOutput " , " myop_1 " ) , [ ] , [ ] ) <nl> - op = g . _create_op_from_tf_operation ( c_op ) <nl> - op2 = g . _create_op_from_tf_operation ( c_op2 ) <nl> - else : <nl> - # Test pure - Python version to make sure C API has same behavior . <nl> - op = test_ops . int_output ( name = " myop " ) . op <nl> - op2 = test_ops . int_output ( name = " myop_1 " ) . op <nl> + c_op = ops . _create_c_op ( g , ops . _NodeDef ( " IntOutput " , " myop " ) , [ ] , [ ] ) <nl> + c_op2 = ops . _create_c_op ( g , ops . _NodeDef ( " IntOutput " , " myop_1 " ) , [ ] , [ ] ) <nl> + op = g . _create_op_from_tf_operation ( c_op ) <nl> + op2 = g . _create_op_from_tf_operation ( c_op2 ) <nl> <nl> # Create ops with same names as op1 and op2 . We expect the new names to be <nl> # uniquified . <nl> def testCond ( self ) : <nl> x = test_ops . int_output ( ) <nl> <nl> def true_fn ( ) : <nl> - if ops . _USE_C_API : <nl> - ops . _create_c_op ( ops . get_default_graph ( ) , <nl> - ops . _NodeDef ( " IntInput " , " cond / myop " ) , [ x ] , [ ] ) <nl> - new_ops = g . _add_new_tf_operations ( ) <nl> - self . assertEqual ( len ( new_ops ) , 1 ) <nl> - else : <nl> - # Test pure - Python version to make sure C API has same behavior . <nl> - test_ops . int_input ( x , name = " myop " ) <nl> + ops . _create_c_op ( ops . get_default_graph ( ) , <nl> + ops . _NodeDef ( " IntInput " , " cond / myop " ) , [ x ] , [ ] ) <nl> + new_ops = g . _add_new_tf_operations ( ) <nl> + self . assertEqual ( len ( new_ops ) , 1 ) <nl> return x <nl> <nl> control_flow_ops . cond ( x < 10 , true_fn , lambda : x ) <nl> def testWhileLoop ( self ) : <nl> x = test_ops . int_output ( ) <nl> <nl> def body ( i ) : <nl> - if ops . _USE_C_API : <nl> - ops . _create_c_op ( ops . get_default_graph ( ) , <nl> - ops . _NodeDef ( " IntInput " , " myloop / myop " ) , [ x ] , [ ] ) <nl> - new_ops = g . _add_new_tf_operations ( ) <nl> - self . assertEqual ( len ( new_ops ) , 1 ) <nl> - else : <nl> - # Test pure - Python version to make sure C API has same behavior . <nl> - test_ops . int_input ( x , name = " myop " ) <nl> + ops . _create_c_op ( ops . get_default_graph ( ) , <nl> + ops . _NodeDef ( " IntInput " , " myloop / myop " ) , [ x ] , [ ] ) <nl> + new_ops = g . _add_new_tf_operations ( ) <nl> + self . assertEqual ( len ( new_ops ) , 1 ) <nl> return i <nl> <nl> control_flow_ops . while_loop ( lambda i : i < 10 , body , [ 0 ] , name = " myloop " ) <nl> def testWhileLoopWithInternalControlDep ( self ) : <nl> <nl> def body ( i ) : <nl> c = constant_op . constant ( 1 . 0 , name = " c " ) <nl> - if ops . _USE_C_API : <nl> - ops . _create_c_op ( ops . get_default_graph ( ) , <nl> - ops . _NodeDef ( " IntInput " , " myloop / myop " ) , [ x ] , [ ] ) <nl> - with ops . control_dependencies ( [ c ] ) : <nl> - new_ops = g . _add_new_tf_operations ( ) <nl> - self . assertEqual ( len ( new_ops ) , 1 ) <nl> - else : <nl> - with ops . control_dependencies ( [ c ] ) : <nl> - test_ops . int_input ( x , name = " myop " ) <nl> + ops . _create_c_op ( ops . get_default_graph ( ) , <nl> + ops . _NodeDef ( " IntInput " , " myloop / myop " ) , [ x ] , [ ] ) <nl> + with ops . control_dependencies ( [ c ] ) : <nl> + new_ops = g . _add_new_tf_operations ( ) <nl> + self . assertEqual ( len ( new_ops ) , 1 ) <nl> return i <nl> <nl> control_flow_ops . while_loop ( lambda i : i < 10 , body , [ 0 ] , name = " myloop " ) <nl> def testWhileLoopWithExternalControlDep ( self ) : <nl> c = constant_op . constant ( 1 . 0 ) <nl> <nl> def body ( i ) : <nl> - if ops . _USE_C_API : <nl> - ops . _create_c_op ( ops . get_default_graph ( ) , <nl> - ops . _NodeDef ( " IntInput " , " myloop / myop " ) , [ x ] , [ ] ) <nl> - with ops . control_dependencies ( [ c ] ) : <nl> - new_ops = g . _add_new_tf_operations ( ) <nl> - self . assertEqual ( len ( new_ops ) , 1 ) <nl> - else : <nl> - with ops . control_dependencies ( [ c ] ) : <nl> - test_ops . int_input ( x , name = " myop " ) <nl> + ops . _create_c_op ( ops . get_default_graph ( ) , <nl> + ops . _NodeDef ( " IntInput " , " myloop / myop " ) , [ x ] , [ ] ) <nl> + with ops . control_dependencies ( [ c ] ) : <nl> + new_ops = g . _add_new_tf_operations ( ) <nl> + self . assertEqual ( len ( new_ops ) , 1 ) <nl> return i <nl> <nl> control_flow_ops . while_loop ( lambda i : i < 10 , body , [ 0 ] , name = " myloop " ) <nl> def body ( i ) : <nl> self . assertIsNotNone ( op . control_inputs [ 0 ] . _get_control_flow_context ( ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ApplyOpTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testNodeDefArgs ( self ) : <nl> def testReferenceInput ( self ) : <nl> out_3 . op . node_def ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class NameStackTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testBasics ( self ) : <nl> def testInvalidNameRaisesError ( self ) : <nl> pass <nl> <nl> <nl> - @ test_util . with_c_api <nl> class NameTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testGenerateName ( self ) : <nl> def testNameScope ( self ) : <nl> g . create_op ( " FloatOutput " , [ ] , [ dtypes . float32 ] ) . name ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class DeviceTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testNoDevice ( self ) : <nl> def testOverwritingBehavior ( self ) : <nl> " " " , gd ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class MultithreadedGraphStateTest ( test_util . TensorFlowTestCase ) : <nl> <nl> class TestThread ( threading . Thread ) : <nl> def run ( self ) : <nl> self . assertEquals ( " foo " + s + " / FloatOutput_1 " , t . result [ 1 ] . name ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ObjectWithName ( object ) : <nl> <nl> def __init__ ( self , name ) : <nl> def name ( self ) : <nl> return self . _name <nl> <nl> <nl> - @ test_util . with_c_api <nl> class CollectionTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def test_get_collections ( self ) : <nl> def _CopyOverrideGrad ( op , x_grad ) : # pylint : disable = invalid - name <nl> return x_grad <nl> <nl> <nl> - @ test_util . with_c_api <nl> class RegistrationTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testRegisterGradients ( self ) : <nl> def testNonExistentOverride ( self ) : <nl> ops . get_gradient_function ( y . op ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ComparisonTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testMembershipAllowed ( self ) : <nl> def testMembershipAllowed ( self ) : <nl> self . assertTrue ( t1 not in [ t2 ] ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ControlDependenciesTest ( test_util . TensorFlowTestCase ) : <nl> <nl> - @ test_util . enable_c_api <nl> def testBasic ( self ) : <nl> g = ops . Graph ( ) <nl> with g . as_default ( ) : <nl> def testNoControlDependencyWithDataDependency ( self ) : <nl> self . assertEqual ( b . op . control_inputs , [ ] ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class OpScopeTest ( test_util . TensorFlowTestCase ) : <nl> <nl> @ test_util . run_in_graph_and_eager_modes ( ) <nl> def foo ( ) : <nl> self . assertEqual ( ops . get_name_scope ( ) , " " ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class GraphTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def setUp ( self ) : <nl> def testRunnableAfterInvalidShapeWithKernelLabelMap ( self ) : <nl> sess . run ( a ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class AttrScopeTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def _get_test_attrs ( self ) : <nl> def testLabelMap ( self ) : <nl> ops . RegisterShape ( " KernelLabel " ) ( common_shapes . scalar_shape ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class KernelLabelTest ( test_util . TensorFlowTestCase ) : <nl> <nl> - @ test_util . enable_c_api <nl> def testNoLabel ( self ) : <nl> with self . test_session ( ) : <nl> self . assertAllEqual ( b " My label is : default " , <nl> def testLabelMap ( self ) : <nl> self . assertAllEqual ( b " My label is : overload_2 " , overload_2 . eval ( ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class AsGraphDefTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testGraphDefVersion ( self ) : <nl> def _calc_a_forward_flops ( unused_graph , unused_node ) : <nl> return ops . OpStats ( " flops " , 20 ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class StatisticsTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testRegisteredNode ( self ) : <nl> def testAccumulateStatistics ( self ) : <nl> self . assertEqual ( 3 , flops_total . value ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ColocationGroupTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testBasic ( self ) : <nl> def testInconsistentDeviceWithinColocate ( self ) : <nl> self . assertEqual ( " / device : CPU : 0 " , b . device ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class DeprecatedTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testSuccess ( self ) : <nl> - # TODO ( skyewm ) : make g . graph_def_versions work with the C API enabled <nl> - if ops . _USE_C_API : return <nl> - <nl> with ops . Graph ( ) . as_default ( ) as g : <nl> - g . graph_def_versions . producer = 7 <nl> + test_util . set_producer_version ( g , 7 ) <nl> old = test_ops . old ( ) <nl> with self . test_session ( graph = g ) : <nl> old . run ( ) <nl> def testGraphConstructionFail ( self ) : <nl> with self . assertRaisesRegexp ( NotImplementedError , self . _error ( ) ) : <nl> test_ops . old ( ) <nl> <nl> - def testGraphExecutionFail ( self ) : <nl> - # TODO ( skyewm ) : make g . graph_def_versions work with the C API enabled <nl> - if ops . _USE_C_API : return <nl> - <nl> - with ops . Graph ( ) . as_default ( ) as g : <nl> - g . graph_def_versions . producer = 7 <nl> - old = test_ops . old ( ) <nl> - g . graph_def_versions . producer = versions . GRAPH_DEF_VERSION <nl> - with self . test_session ( graph = g ) : <nl> - with self . assertRaisesRegexp ( errors . UnimplementedError , self . _error ( ) ) : <nl> - old . run ( ) <nl> - <nl> <nl> - @ test_util . with_c_api <nl> class DenseTensorLikeTypeTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testSuccess ( self ) : <nl> def testBadClass ( self ) : <nl> DenseTensorLikeTypeTest . BadClassBadDtype ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class NameScopeTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testStripAndPrependScope ( self ) : <nl> def f ( ) : <nl> self . assertRaisesRegexp ( ValueError , " ' _ ' is not a valid scope name " , f ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class TracebackTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testTracebackWithStartLines ( self ) : <nl> def testTracebackWithStartLines ( self ) : <nl> self . assertEquals ( frame , frame_with_start_line [ : - 1 ] ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> - class OutputTypesTest ( test_util . TensorFlowTestCase ) : <nl> - " " " Tests Operation . _output_types property . <nl> - <nl> - This test should not exist as _output_types is a private property . <nl> - This property is used by util . copy_elements and its tests would normally <nl> - cover Operation . _output_types . However , we can ' t yet run these tests in C <nl> - API mode because their use _set_device method . This test will be deleted <nl> - once we port _set_device and run the copy tests with C API on . <nl> - " " " <nl> - # TODO ( iga ) : Remove this test <nl> - <nl> - def setUp ( self ) : <nl> - self . prev_use_c_api = ops . _USE_C_API # pylint : disable = protected - access <nl> - ops . _USE_C_API = True # pylint : disable = protected - access <nl> - <nl> - def tearDown ( self ) : <nl> - ops . _USE_C_API = self . prev_use_c_api # pylint : disable = protected - access <nl> - <nl> - def testOneOutput ( self ) : <nl> - g = ops . Graph ( ) <nl> - with g . as_default ( ) : <nl> - # Using a constant because creating unregistered ops <nl> - # doesn ' t work with the C API . <nl> - op = constant_op . constant ( 12 , dtype = dtypes . uint16 ) . op <nl> - # pylint : disable = protected - access <nl> - self . assertEqual ( [ types_pb2 . DT_UINT16 ] , op . _output_types ) <nl> - # pylint : enable = protected - access <nl> - <nl> - def testTwoDifferentOutputs ( self ) : <nl> - g = ops . Graph ( ) <nl> - with g . as_default ( ) : <nl> - x = constant_op . constant ( [ 1 , 1 , 2 , 4 , 4 , 4 , 7 , 8 , 8 ] , <nl> - dtype = dtypes . double ) <nl> - y , _ = gen_array_ops . unique ( x ) <nl> - self . assertEqual ( [ types_pb2 . DT_DOUBLE , types_pb2 . DT_INT32 ] , <nl> - y . op . _output_types ) # pylint : disable = protected - access <nl> - <nl> - def testThreeOutputs ( self ) : <nl> - g = ops . Graph ( ) <nl> - with g . as_default ( ) : <nl> - # Using a split operationt because creating unregistered ops <nl> - # doesn ' t work with the C API . <nl> - a = constant_op . constant ( " abc " , dtype = dtypes . string , shape = [ 5 , 30 ] ) <nl> - split0 , _ , _ = array_ops . split ( a , [ 4 , 15 , 11 ] , 1 ) <nl> - # pylint : disable = protected - access <nl> - self . assertEqual ( [ types_pb2 . DT_STRING ] * 3 , split0 . op . _output_types ) <nl> - # pylint : enable = protected - access <nl> - <nl> - <nl> - @ test_util . with_c_api <nl> class EnableEagerExecutionTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testBadArgumentsToEnableEagerExecution ( self ) : <nl> mmm a / tensorflow / python / framework / smart_cond_test . py <nl> ppp b / tensorflow / python / framework / smart_cond_test . py <nl> def raise_exception ( ) : <nl> raise RuntimeError ( " did not expect to be called " ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class SmartCondTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testTrue ( self ) : <nl> def testUnknown ( self ) : <nl> self . assertEqual ( y . eval ( feed_dict = { x : - 1 } ) , 2 ) <nl> <nl> def testEval ( self ) : <nl> - # Constant expression evaluation only works with the C API enabled . <nl> - if not ops . _USE_C_API : return <nl> - <nl> with ops . Graph ( ) . as_default ( ) : <nl> with session . Session ( ) : <nl> x = constant_op . constant ( 1 ) <nl> def testMissingArg2 ( self ) : <nl> smart_cond . smart_cond ( True , lambda : x ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class SmartCaseTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testTrue ( self ) : <nl> def testFalse ( self ) : <nl> self . assertEqual ( sess . run ( z ) , 1 ) <nl> <nl> def testMix ( self ) : <nl> - # Constant expression evaluation only works with the C API enabled . <nl> - if not ops . _USE_C_API : return <nl> - <nl> x = array_ops . placeholder ( dtype = dtypes . int32 , shape = [ ] ) <nl> y = constant_op . constant ( 10 ) <nl> conditions = [ ( x > 1 , lambda : constant_op . constant ( 1 ) ) , <nl> def testMix ( self ) : <nl> self . assertEqual ( sess . run ( z , feed_dict = { x : 0 } ) , 3 ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class SmartConstantValueTest ( test_util . TensorFlowTestCase ) : <nl> <nl> # TODO ( skyewm ) : this is essentially a regression test for <nl> mmm a / tensorflow / python / framework / subscribe_test . py <nl> ppp b / tensorflow / python / framework / subscribe_test . py <nl> <nl> from tensorflow . python . platform import googletest <nl> <nl> <nl> - @ test_util . with_c_api <nl> class SubscribeTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def _ExpectSubscribedIdentities ( self , container ) : <nl> mmm a / tensorflow / python / framework / test_util_test . py <nl> ppp b / tensorflow / python / framework / test_util_test . py <nl> <nl> from tensorflow . python . platform import googletest <nl> <nl> <nl> - @ test_util . with_c_api <nl> class TestUtilTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def test_assert_ops_in_graph ( self ) : <nl> def test_get_node_def_from_graph ( self ) : <nl> self . assertIsNone ( test_util . get_node_def_from_graph ( " bar " , graph_def ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class GarbageCollectionTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def test_no_reference_cycle_decorator ( self ) : <nl> mmm a / tensorflow / python / grappler / memory_optimizer_test . py <nl> ppp b / tensorflow / python / grappler / memory_optimizer_test . py <nl> <nl> from tensorflow . python . framework import meta_graph <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . framework import random_seed <nl> - from tensorflow . python . framework import test_util <nl> from tensorflow . python . grappler import tf_optimizer <nl> from tensorflow . python . ops import math_ops <nl> from tensorflow . python . ops import nn <nl> <nl> from tensorflow . python . training import training as train <nl> <nl> <nl> - @ test_util . with_c_api <nl> class MemoryOptimizerSwapTest ( test . TestCase ) : <nl> " " " Tests the Grappler memory optimizer . " " " <nl> <nl> def testSimpleSwap ( self ) : <nl> self . assertEqual ( ' c ' , node . input [ 1 ] ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class MemoryOptimizerRecomputeTest ( test . TestCase ) : <nl> " " " Tests the Python interface to recomputation rewrites . <nl> <nl> mmm a / tensorflow / python / keras / _impl / keras / callbacks . py <nl> ppp b / tensorflow / python / keras / _impl / keras / callbacks . py <nl> def __init__ ( self , <nl> <nl> def on_epoch_end ( self , epoch , logs = None ) : <nl> if requests is None : <nl> - raise ImportError ( ' RemoteMonitor requires ' ' the ` requests ` library . ' ) <nl> + raise ImportError ( ' RemoteMonitor requires the ` requests ` library . ' ) <nl> logs = logs or { } <nl> send = { } <nl> send [ ' epoch ' ] = epoch <nl> mmm a / tensorflow / python / keras / _impl / keras / callbacks_test . py <nl> ppp b / tensorflow / python / keras / _impl / keras / callbacks_test . py <nl> def test_TensorBoard_with_ReduceLROnPlateau ( self ) : <nl> assert os . path . exists ( temp_dir ) <nl> <nl> def test_RemoteMonitorWithJsonPayload ( self ) : <nl> - if h5py is None : <nl> + if requests is None : <nl> self . skipTest ( ' ` requests ` required to run this test ' ) <nl> with self . test_session ( ) : <nl> ( x_train , y_train ) , ( x_test , y_test ) = testing_utils . get_test_data ( <nl> mmm a / tensorflow / python / keras / _impl / keras / engine / base_layer . py <nl> ppp b / tensorflow / python / keras / _impl / keras / engine / base_layer . py <nl> <nl> from six . moves import zip # pylint : disable = redefined - builtin <nl> <nl> from tensorflow . python . eager import context <nl> - from tensorflow . python . estimator import util as function_utils <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . framework import tensor_shape <nl> <nl> from tensorflow . python . ops import init_ops <nl> from tensorflow . python . ops import variable_scope as vs <nl> from tensorflow . python . ops import variables as tf_variables <nl> - from tensorflow . python . training import checkpointable <nl> + from tensorflow . python . training . checkpointable import base as checkpointable <nl> from tensorflow . python . util import function_utils <nl> from tensorflow . python . util import nest <nl> from tensorflow . python . util import tf_decorator <nl> mmm a / tensorflow / python / keras / _impl / keras / engine / network . py <nl> ppp b / tensorflow / python / keras / _impl / keras / engine / network . py <nl> <nl> from tensorflow . python . keras . _impl . keras . utils . io_utils import ask_to_proceed_with_overwrite <nl> from tensorflow . python . keras . _impl . keras . utils . layer_utils import print_summary as print_layer_summary <nl> from tensorflow . python . platform import tf_logging as logging <nl> - from tensorflow . python . training import checkpointable <nl> - from tensorflow . python . training import checkpointable_utils <nl> + from tensorflow . python . training . checkpointable import base as checkpointable <nl> + from tensorflow . python . training . checkpointable import util as checkpointable_utils <nl> from tensorflow . python . util import nest <nl> from tensorflow . python . util import tf_inspect <nl> <nl> mmm a / tensorflow / python / keras / _impl / keras / layers / wrappers . py <nl> ppp b / tensorflow / python / keras / _impl / keras / layers / wrappers . py <nl> def step ( x , _ ) : <nl> step , <nl> inputs , <nl> initial_states = [ ] , <nl> - input_length = input_shape [ 0 ] , <nl> + input_length = input_shape [ 1 ] , <nl> unroll = False ) <nl> y = outputs <nl> else : <nl> mmm a / tensorflow / python / keras / _impl / keras / model_subclassing_test . py <nl> ppp b / tensorflow / python / keras / _impl / keras / model_subclassing_test . py <nl> <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import resource_variable_ops <nl> from tensorflow . python . platform import test <nl> - from tensorflow . python . training import checkpointable <nl> + from tensorflow . python . training . checkpointable import base as checkpointable <nl> from tensorflow . python . training . rmsprop import RMSPropOptimizer <nl> <nl> try : <nl> mmm a / tensorflow / python / kernel_tests / array_ops_test . py <nl> ppp b / tensorflow / python / kernel_tests / array_ops_test . py <nl> def testSizeDtype ( self ) : <nl> self . evaluate ( array_ops . size ( tensor , out_type = dtypes . int64 ) ) . dtype ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class SequenceMaskTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testExceptions ( self ) : <nl> def testOneDimensionalDtypeWithoutMaxlen ( self ) : <nl> # test dtype and default maxlen : <nl> res = array_ops . sequence_mask ( constant_op . constant ( [ 0 , 1 , 4 ] ) , <nl> dtype = dtypes . float32 ) <nl> - if ops . _USE_C_API : <nl> - self . assertAllEqual ( res . get_shape ( ) . as_list ( ) , [ 3 , 4 ] ) <nl> - else : <nl> - self . assertAllEqual ( res . get_shape ( ) . as_list ( ) , [ 3 , None ] ) <nl> + self . assertAllEqual ( res . get_shape ( ) . as_list ( ) , [ 3 , 4 ] ) <nl> self . assertAllEqual ( <nl> res . eval ( ) , <nl> [ [ 0 . 0 , 0 . 0 , 0 . 0 , 0 . 0 ] , [ 1 . 0 , 0 . 0 , 0 . 0 , 0 . 0 ] , [ 1 . 0 , 1 . 0 , 1 . 0 , 1 . 0 ] ] ) <nl> def testOneDimensionalWithoutMaxlen ( self ) : <nl> with self . test_session ( ) : <nl> res = array_ops . sequence_mask ( <nl> constant_op . constant ( [ 0 , 1 , 4 ] ) ) <nl> - if ops . _USE_C_API : <nl> - self . assertAllEqual ( res . get_shape ( ) . as_list ( ) , [ 3 , 4 ] ) <nl> - else : <nl> - self . assertAllEqual ( res . get_shape ( ) . as_list ( ) , [ 3 , None ] ) <nl> + self . assertAllEqual ( res . get_shape ( ) . as_list ( ) , [ 3 , 4 ] ) <nl> self . assertAllEqual ( <nl> res . eval ( ) , <nl> [ [ False , False , False , False ] , <nl> def testTwoDimensional ( self ) : <nl> # test dtype and default maxlen : <nl> res = array_ops . sequence_mask ( <nl> constant_op . constant ( [ [ 0 , 1 , 4 ] , [ 1 , 2 , 3 ] ] ) , dtype = dtypes . float32 ) <nl> - if ops . _USE_C_API : <nl> - self . assertAllEqual ( res . get_shape ( ) . as_list ( ) , [ 2 , 3 , 4 ] ) <nl> - else : <nl> - self . assertAllEqual ( res . get_shape ( ) . as_list ( ) , [ 2 , 3 , None ] ) <nl> + self . assertAllEqual ( res . get_shape ( ) . as_list ( ) , [ 2 , 3 , 4 ] ) <nl> self . assertAllEqual ( <nl> res . eval ( ) , <nl> [ [ [ 0 . 0 , 0 . 0 , 0 . 0 , 0 . 0 ] , [ 1 . 0 , 0 . 0 , 0 . 0 , 0 . 0 ] , [ 1 . 0 , 1 . 0 , 1 . 0 , 1 . 0 ] ] , <nl> mmm a / tensorflow / python / kernel_tests / control_flow_ops_py_test . py <nl> ppp b / tensorflow / python / kernel_tests / control_flow_ops_py_test . py <nl> <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . framework import sparse_tensor <nl> from tensorflow . python . framework import tensor_shape <nl> - from tensorflow . python . framework import test_util <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import control_flow_ops <nl> from tensorflow . python . ops import data_flow_ops <nl> def isum ( s , maximum_iterations = None ) : <nl> return r_s <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ControlFlowTest ( test . TestCase ) : <nl> <nl> def testRefIdentity ( self ) : <nl> def func ( x ) : <nl> 1 ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ControlFlowContextCheckTest ( test . TestCase ) : <nl> <nl> def _getWhileTensor ( self ) : <nl> def true_fn ( ) : <nl> math_ops . less ( 1 , 2 ) , true_fn , lambda : constant_op . constant ( 0 ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class TupleTest ( test . TestCase ) : <nl> <nl> def testTensors ( self ) : <nl> def testAcceptTensorsAsControlInputs ( self ) : <nl> self . assertEquals ( 1 , var . eval ( ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class AssertTest ( test . TestCase ) : <nl> <nl> def testGuardedAssertDoesNotCopyWhenTrue ( self ) : <nl> def testGuardedAssertDoesNotCopyWhenTrue ( self ) : <nl> self . assertEqual ( [ ] , guarded_memcpy_nodestat_names ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class WhileOpBenchmark ( test . Benchmark ) : <nl> " " " Evaluate the performance of while_loop op . " " " <nl> <nl> def benchmarkWhileOpUnrollSameDevicePlacement ( self ) : <nl> name = " unroll_same_device " , iters = iters , wall_time = duration ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class EagerTest ( test . TestCase ) : <nl> <nl> def testCond ( self ) : <nl> mmm a / tensorflow / python / kernel_tests / distributions / util_test . py <nl> ppp b / tensorflow / python / kernel_tests / distributions / util_test . py <nl> <nl> from tensorflow . python . framework import constant_op <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import ops <nl> - from tensorflow . python . framework import test_util <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import gradient_checker <nl> from tensorflow . python . ops import gradients_impl <nl> def _logit ( x ) : <nl> return np . log ( x ) - np . log1p ( - x ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class AssertCloseTest ( test . TestCase ) : <nl> <nl> def testAssertCloseIntegerDtype ( self ) : <nl> def testGetStaticPlaceholder ( self ) : <nl> self . assertEqual ( None , du . maybe_get_static_value ( x , dtype = np . float64 ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class GetLogitsAndProbsTest ( test . TestCase ) : <nl> <nl> def testImproperArguments ( self ) : <nl> def testLogitsMultidimShape ( self ) : <nl> logit . eval ( feed_dict = { l : np . ones ( [ int ( 2 * * 11 + 1 ) ] ) } ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class EmbedCheckCategoricalEventShapeTest ( test . TestCase ) : <nl> <nl> def testTooSmall ( self ) : <nl> def testUnsupportedDtype ( self ) : <nl> du . embed_check_categorical_event_shape ( param ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class EmbedCheckIntegerCastingClosedTest ( test . TestCase ) : <nl> <nl> def testCorrectlyAssertsNonnegative ( self ) : <nl> def testCorrectlyAssertsSmallestPossibleInteger ( self ) : <nl> x_checked . eval ( feed_dict = { x : np . array ( [ 1 , - 1 ] , dtype = np . int32 ) } ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class LogCombinationsTest ( test . TestCase ) : <nl> <nl> def testLogCombinationsBinomial ( self ) : <nl> def testLogCombinationsShape ( self ) : <nl> self . assertEqual ( [ 2 , 2 ] , log_binom . get_shape ( ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class DynamicShapeTest ( test . TestCase ) : <nl> <nl> def testSameDynamicShape ( self ) : <nl> def testSameDynamicShape ( self ) : <nl> } ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class RotateTransposeTest ( test . TestCase ) : <nl> <nl> def _np_rotate_transpose ( self , x , shift ) : <nl> def testRollDynamic ( self ) : <nl> shift : shift_value } ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class PickVectorTest ( test . TestCase ) : <nl> <nl> def testCorrectlyPicksVector ( self ) : <nl> def testCorrectlyPicksVector ( self ) : <nl> constant_op . constant ( False ) , x , y ) ) # No eval . <nl> <nl> <nl> - @ test_util . with_c_api <nl> class PreferStaticRankTest ( test . TestCase ) : <nl> <nl> def testNonEmptyConstantTensor ( self ) : <nl> def testDynamicRankEndsUpBeingScalar ( self ) : <nl> self . assertAllEqual ( 0 , rank . eval ( feed_dict = { x : 1 } ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class PreferStaticShapeTest ( test . TestCase ) : <nl> <nl> def testNonEmptyConstantTensor ( self ) : <nl> def testDynamicShapeEndsUpBeingScalar ( self ) : <nl> self . assertAllEqual ( np . array ( [ ] ) , shape . eval ( feed_dict = { x : 1 } ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class PreferStaticValueTest ( test . TestCase ) : <nl> <nl> def testNonEmptyConstantTensor ( self ) : <nl> def testDynamicValueEndsUpBeingScalar ( self ) : <nl> self . assertAllEqual ( np . array ( 1 ) , value . eval ( feed_dict = { x : 1 } ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class FillTriangularTest ( test . TestCase ) : <nl> <nl> def setUp ( self ) : <nl> def testCorrectlyMakesBatch7x7TriUpper ( self ) : <nl> self . _run_test ( self . _rng . randn ( 2 , 3 , int ( 7 * 8 / 2 ) ) , upper = True ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ReduceWeightedLogSumExp ( test . TestCase ) : <nl> <nl> def _reduce_weighted_logsumexp ( self , logx , w , axis , keep_dims = False ) : <nl> def testDocString ( self ) : <nl> du . reduce_weighted_logsumexp ( x , w , axis = [ 0 , 1 ] ) . eval ( ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class GenNewSeedTest ( test . TestCase ) : <nl> <nl> def testOnlyNoneReturnsNone ( self ) : <nl> def testOnlyNoneReturnsNone ( self ) : <nl> # TODO ( jvdillon ) : Merge this test back into : <nl> # tensorflow / python / kernel_tests / softplus_op_test . py <nl> # once TF core is accepting new ops . <nl> - @ test_util . with_c_api <nl> class SoftplusTest ( test . TestCase ) : <nl> <nl> def _npSoftplus ( self , np_features ) : <nl> mmm a / tensorflow / python / kernel_tests / large_concat_op_test . py <nl> ppp b / tensorflow / python / kernel_tests / large_concat_op_test . py <nl> <nl> <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import ops <nl> - from tensorflow . python . framework import test_util <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . platform import test <nl> <nl> <nl> - @ test_util . with_c_api <nl> class LargeConcatOpTest ( test . TestCase ) : <nl> " " " Tests that belong in concat_op_test . py , but run over large tensors . " " " <nl> <nl> mmm a / tensorflow / python / kernel_tests / reduce_join_op_test . py <nl> ppp b / tensorflow / python / kernel_tests / reduce_join_op_test . py <nl> def _testReduceJoin ( self , <nl> input_array , <nl> truth , <nl> truth_shape , <nl> - reduction_indices , <nl> + axis , <nl> keep_dims = False , <nl> separator = " " ) : <nl> " " " Compares the output of reduce_join to an expected result . <nl> def _testReduceJoin ( self , <nl> input_array : The string input to be joined . <nl> truth : An array or np . array of the expected result . <nl> truth_shape : An array or np . array of the expected shape . <nl> - reduction_indices : The indices to reduce over . <nl> + axis : The indices to reduce over . <nl> keep_dims : Whether or not to retain reduced dimensions . <nl> separator : The separator to use for joining . <nl> " " " <nl> with self . test_session ( ) : <nl> output = string_ops . reduce_join ( <nl> inputs = input_array , <nl> - reduction_indices = reduction_indices , <nl> + axis = axis , <nl> keep_dims = keep_dims , <nl> separator = separator ) <nl> output_array = output . eval ( ) <nl> def _testReduceJoin ( self , <nl> self . assertAllEqualUnicode ( truth , output_array ) <nl> self . assertAllEqual ( truth_shape , output . get_shape ( ) ) <nl> <nl> - def _testMultipleReduceJoin ( self , <nl> - input_array , <nl> - reduction_indices , <nl> - separator = " " ) : <nl> - " " " Tests reduce_join for one input and multiple reduction_indices . <nl> + def _testMultipleReduceJoin ( self , input_array , axis , separator = " " ) : <nl> + " " " Tests reduce_join for one input and multiple axes . <nl> <nl> Does so by comparing the output to that from nested reduce_string_joins . <nl> The correctness of single - dimension reduce_join is verified by other <nl> def _testMultipleReduceJoin ( self , <nl> <nl> Args : <nl> input_array : The input to test . <nl> - reduction_indices : The indices to reduce . <nl> + axis : The indices to reduce . <nl> separator : The separator to use when joining . <nl> " " " <nl> with self . test_session ( ) : <nl> output = string_ops . reduce_join ( <nl> - inputs = input_array , <nl> - reduction_indices = reduction_indices , <nl> - keep_dims = False , <nl> - separator = separator ) <nl> + inputs = input_array , axis = axis , keep_dims = False , separator = separator ) <nl> output_keep_dims = string_ops . reduce_join ( <nl> - inputs = input_array , <nl> - reduction_indices = reduction_indices , <nl> - keep_dims = True , <nl> - separator = separator ) <nl> + inputs = input_array , axis = axis , keep_dims = True , separator = separator ) <nl> <nl> truth = input_array <nl> - for index in reduction_indices : <nl> + for index in axis : <nl> truth = string_ops . reduce_join ( <nl> - inputs = truth , <nl> - reduction_indices = index , <nl> - keep_dims = True , <nl> - separator = separator ) <nl> - if not reduction_indices : <nl> + inputs = truth , axis = index , keep_dims = True , separator = separator ) <nl> + if not axis : <nl> truth = constant_op . constant ( truth ) <nl> - truth_squeezed = array_ops . squeeze ( truth , axis = reduction_indices ) <nl> + truth_squeezed = array_ops . squeeze ( truth , axis = axis ) <nl> output_array = output . eval ( ) <nl> output_keep_dims_array = output_keep_dims . eval ( ) <nl> truth_array = truth . eval ( ) <nl> def testRankOne ( self ) : <nl> input_array = [ " this " , " is " , " a " , " test " ] <nl> truth = " thisisatest " <nl> truth_shape = [ ] <nl> - self . _testReduceJoin ( input_array , truth , truth_shape , reduction_indices = 0 ) <nl> + self . _testReduceJoin ( input_array , truth , truth_shape , axis = 0 ) <nl> <nl> def testRankTwo ( self ) : <nl> input_array = [ [ " this " , " is " , " a " , " test " ] , <nl> def testRankTwo ( self ) : <nl> truth_dim_one = [ " thisisatest " , " pleasedonotpanic " ] <nl> truth_shape_dim_one = [ 2 ] <nl> self . _testReduceJoin ( <nl> - input_array , truth_dim_zero , truth_shape_dim_zero , reduction_indices = 0 ) <nl> + input_array , truth_dim_zero , truth_shape_dim_zero , axis = 0 ) <nl> self . _testReduceJoin ( <nl> - input_array , truth_dim_one , truth_shape_dim_one , reduction_indices = 1 ) <nl> + input_array , truth_dim_one , truth_shape_dim_one , axis = 1 ) <nl> <nl> - expected_val = " thisisatestpleasedonotpanic " <nl> - expected_shape = None <nl> - self . _testReduceJoin ( <nl> - input_array , expected_val , expected_shape , reduction_indices = None ) <nl> - <nl> - # When using Tensor for input with reduction_indices = None , shape is known . <nl> expected_val = " thisisatestpleasedonotpanic " <nl> expected_shape = [ ] <nl> - self . _testReduceJoin ( <nl> - constant_op . constant ( input_array ) , expected_val , <nl> - expected_shape , reduction_indices = None ) <nl> + self . _testReduceJoin ( input_array , expected_val , expected_shape , axis = None ) <nl> <nl> - # Using [ ] reduction_indices is a no - op . <nl> + # Using axis = [ ] is a no - op . <nl> expected_val = input_array <nl> expected_shape = [ 2 , 4 ] <nl> - self . _testReduceJoin ( <nl> - input_array , expected_val , expected_shape , reduction_indices = [ ] ) <nl> + self . _testReduceJoin ( input_array , expected_val , expected_shape , axis = [ ] ) <nl> <nl> def testRankFive ( self ) : <nl> input_array = _input_array ( num_dims = 5 ) <nl> truths = [ _joined_array ( num_dims = 5 , reduce_dim = i ) for i in xrange ( 5 ) ] <nl> truth_shape = [ 2 ] * 4 <nl> for i in xrange ( 5 ) : <nl> - self . _testReduceJoin ( <nl> - input_array , truths [ i ] , truth_shape , reduction_indices = i ) <nl> + self . _testReduceJoin ( input_array , truths [ i ] , truth_shape , axis = i ) <nl> <nl> def testNegative ( self ) : <nl> input_array = _input_array ( num_dims = 5 ) <nl> truths = [ _joined_array ( num_dims = 5 , reduce_dim = i ) for i in xrange ( 5 ) ] <nl> truth_shape = [ 2 ] * 4 <nl> for i in xrange ( 5 ) : <nl> - self . _testReduceJoin ( <nl> - input_array , truths [ i ] , truth_shape , reduction_indices = i - 5 ) <nl> + self . _testReduceJoin ( input_array , truths [ i ] , truth_shape , axis = i - 5 ) <nl> <nl> def testSingletonDimension ( self ) : <nl> input_arrays = [ <nl> def testSingletonDimension ( self ) : <nl> truth = _input_array ( num_dims = 5 ) <nl> truth_shape = [ 2 ] * 5 <nl> for i in xrange ( 6 ) : <nl> - self . _testReduceJoin ( <nl> - input_arrays [ i ] , truth , truth_shape , reduction_indices = i ) <nl> + self . _testReduceJoin ( input_arrays [ i ] , truth , truth_shape , axis = i ) <nl> <nl> def testSeparator ( self ) : <nl> input_array = [ [ " this " , " is " , " a " , " test " ] , <nl> def testSeparator ( self ) : <nl> input_array , <nl> truth_dim_zero , <nl> truth_shape_dim_zero , <nl> - reduction_indices = 0 , <nl> + axis = 0 , <nl> separator = " " ) <nl> self . _testReduceJoin ( <nl> input_array , <nl> truth_dim_one , <nl> truth_shape_dim_one , <nl> - reduction_indices = 1 , <nl> + axis = 1 , <nl> separator = " " ) <nl> <nl> def testUnknownShape ( self ) : <nl> def testUnknownShape ( self ) : <nl> truth_shape = None <nl> with self . test_session ( ) : <nl> placeholder = array_ops . placeholder ( dtypes . string , name = " placeholder " ) <nl> - reduced = string_ops . reduce_join ( placeholder , reduction_indices = 0 ) <nl> + reduced = string_ops . reduce_join ( placeholder , axis = 0 ) <nl> output_array = reduced . eval ( feed_dict = { placeholder . name : input_array } ) <nl> self . assertAllEqualUnicode ( truth , output_array ) <nl> self . assertAllEqual ( truth_shape , reduced . get_shape ( ) ) <nl> def testUnknownIndices ( self ) : <nl> truth_shape = None <nl> with self . test_session ( ) : <nl> placeholder = array_ops . placeholder ( dtypes . int32 , name = " placeholder " ) <nl> - reduced = string_ops . reduce_join ( <nl> - input_array , reduction_indices = placeholder ) <nl> + reduced = string_ops . reduce_join ( input_array , axis = placeholder ) <nl> output_array_dim_zero = reduced . eval ( feed_dict = { placeholder . name : [ 0 ] } ) <nl> output_array_dim_one = reduced . eval ( feed_dict = { placeholder . name : [ 1 ] } ) <nl> self . assertAllEqualUnicode ( truth_dim_zero , output_array_dim_zero ) <nl> def testKeepDims ( self ) : <nl> input_array , <nl> truth_dim_zero , <nl> truth_shape_dim_zero , <nl> - reduction_indices = 0 , <nl> + axis = 0 , <nl> keep_dims = True ) <nl> self . _testReduceJoin ( <nl> input_array , <nl> truth_dim_one , <nl> truth_shape_dim_one , <nl> - reduction_indices = 1 , <nl> + axis = 1 , <nl> keep_dims = True ) <nl> <nl> expected_val = [ [ " thisisatestpleasedonotpanic " ] ] <nl> expected_shape = [ 1 , 1 ] <nl> self . _testReduceJoin ( <nl> constant_op . constant ( input_array ) , expected_val , expected_shape , <nl> - keep_dims = True , reduction_indices = None ) <nl> + keep_dims = True , axis = None ) <nl> <nl> - # Using [ ] reduction_indices is a no - op . <nl> + # Using axis = [ ] is a no - op . <nl> expected_val = input_array <nl> expected_shape = [ 2 , 4 ] <nl> self . _testReduceJoin ( <nl> - input_array , expected_val , expected_shape , <nl> - keep_dims = True , reduction_indices = [ ] ) <nl> + input_array , expected_val , expected_shape , keep_dims = True , axis = [ ] ) <nl> <nl> def testMultiIndex ( self ) : <nl> num_dims = 3 <nl> def testMultiIndex ( self ) : <nl> # Also tests [ ] . <nl> for i in xrange ( num_dims + 1 ) : <nl> for permutation in itertools . permutations ( xrange ( num_dims ) , i ) : <nl> - self . _testMultipleReduceJoin ( input_array , reduction_indices = permutation ) <nl> + self . _testMultipleReduceJoin ( input_array , axis = permutation ) <nl> <nl> def testInvalidReductionIndices ( self ) : <nl> with self . test_session ( ) : <nl> with self . assertRaisesRegexp ( ValueError , " Invalid reduction dim " ) : <nl> - string_ops . reduce_join ( inputs = " " , reduction_indices = 0 ) <nl> + string_ops . reduce_join ( inputs = " " , axis = 0 ) <nl> with self . assertRaisesRegexp ( ValueError , <nl> " Invalid reduction dimension - 3 " ) : <nl> - string_ops . reduce_join ( inputs = [ [ " " ] ] , reduction_indices = - 3 ) <nl> + string_ops . reduce_join ( inputs = [ [ " " ] ] , axis = - 3 ) <nl> with self . assertRaisesRegexp ( ValueError , " Invalid reduction dimension 2 " ) : <nl> - string_ops . reduce_join ( inputs = [ [ " " ] ] , reduction_indices = 2 ) <nl> + string_ops . reduce_join ( inputs = [ [ " " ] ] , axis = 2 ) <nl> with self . assertRaisesRegexp ( ValueError , <nl> " Invalid reduction dimension - 3 " ) : <nl> - string_ops . reduce_join ( inputs = [ [ " " ] ] , reduction_indices = [ 0 , - 3 ] ) <nl> + string_ops . reduce_join ( inputs = [ [ " " ] ] , axis = [ 0 , - 3 ] ) <nl> with self . assertRaisesRegexp ( ValueError , " Invalid reduction dimension 2 " ) : <nl> - string_ops . reduce_join ( inputs = [ [ " " ] ] , reduction_indices = [ 0 , 2 ] ) <nl> + string_ops . reduce_join ( inputs = [ [ " " ] ] , axis = [ 0 , 2 ] ) <nl> <nl> def testZeroDims ( self ) : <nl> with self . test_session ( ) : <nl> inputs = np . zeros ( [ 0 , 1 ] , dtype = str ) <nl> <nl> # Reduction that drops the dim of size 0 . <nl> - output = string_ops . reduce_join ( inputs = inputs , reduction_indices = 0 ) <nl> + output = string_ops . reduce_join ( inputs = inputs , axis = 0 ) <nl> self . assertAllEqualUnicode ( [ " " ] , output . eval ( ) ) <nl> <nl> # Reduction that keeps the dim of size 0 . <nl> - output = string_ops . reduce_join ( inputs = inputs , reduction_indices = 1 ) <nl> + output = string_ops . reduce_join ( inputs = inputs , axis = 1 ) <nl> output_shape = output . eval ( ) . shape <nl> self . assertAllEqual ( [ 0 ] , output_shape ) <nl> <nl> def testInvalidArgsUnknownShape ( self ) : <nl> with self . test_session ( ) : <nl> placeholder = array_ops . placeholder ( dtypes . string , name = " placeholder " ) <nl> - index_too_high = string_ops . reduce_join ( placeholder , reduction_indices = 1 ) <nl> - duplicate_index = string_ops . reduce_join ( <nl> - placeholder , reduction_indices = [ - 1 , 1 ] ) <nl> + index_too_high = string_ops . reduce_join ( placeholder , axis = 1 ) <nl> + duplicate_index = string_ops . reduce_join ( placeholder , axis = [ - 1 , 1 ] ) <nl> with self . assertRaisesOpError ( " Invalid reduction dimension 1 " ) : <nl> index_too_high . eval ( feed_dict = { placeholder . name : [ " " ] } ) <nl> with self . assertRaisesOpError ( " Duplicate reduction dimension 1 " ) : <nl> def testInvalidArgsUnknownShape ( self ) : <nl> def testInvalidArgsUnknownIndices ( self ) : <nl> with self . test_session ( ) : <nl> placeholder = array_ops . placeholder ( dtypes . int32 , name = " placeholder " ) <nl> - reduced = string_ops . reduce_join ( <nl> - [ " test " , " test2 " ] , reduction_indices = placeholder ) <nl> + reduced = string_ops . reduce_join ( [ " test " , " test2 " ] , axis = placeholder ) <nl> <nl> with self . assertRaisesOpError ( " reduction dimension - 2 " ) : <nl> reduced . eval ( feed_dict = { placeholder . name : - 2 } ) <nl> mmm a / tensorflow / python / kernel_tests / resource_variable_ops_test . py <nl> ppp b / tensorflow / python / kernel_tests / resource_variable_ops_test . py <nl> <nl> from tensorflow . python . util import compat <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ResourceVariableOpsTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def tearDown ( self ) : <nl> mmm a / tensorflow / python / kernel_tests / scalar_test . py <nl> ppp b / tensorflow / python / kernel_tests / scalar_test . py <nl> <nl> from tensorflow . python . platform import test <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ScalarTest ( test . TestCase ) : <nl> <nl> def check ( self , op , args , error , correct = None ) : <nl> mmm a / tensorflow / python / kernel_tests / softmax_op_test . py <nl> ppp b / tensorflow / python / kernel_tests / softmax_op_test . py <nl> <nl> <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import errors_impl <nl> - from tensorflow . python . framework import test_util <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import nn_ops <nl> from tensorflow . python . platform import test <nl> from tensorflow . python . platform import tf_logging as logging <nl> <nl> <nl> - @ test_util . with_c_api <nl> class SoftmaxTest ( test . TestCase ) : <nl> <nl> def _npSoftmax ( self , features , dim = - 1 , log = False ) : <nl> mmm a / tensorflow / python / kernel_tests / sparse_cross_op_test . py <nl> ppp b / tensorflow / python / kernel_tests / sparse_cross_op_test . py <nl> class SparseCrossOpTest ( test . TestCase ) : <nl> <nl> def test_simple ( self ) : <nl> " " " Tests a simple scenario . " " " <nl> - op = sparse_ops . _sparse_cross ( [ <nl> + op = sparse_ops . sparse_cross ( [ <nl> self . _sparse_tensor ( [ [ ' batch1 - FC1 - F1 ' ] , <nl> [ ' batch2 - FC1 - F1 ' , ' batch2 - FC1 - F2 ' ] ] ) , <nl> self . _sparse_tensor ( [ [ ' batch1 - FC2 - F1 ' ] , <nl> def test_simple ( self ) : <nl> <nl> def test_dense ( self ) : <nl> " " " Tests only dense inputs . " " " <nl> - op = sparse_ops . _sparse_cross ( [ <nl> + op = sparse_ops . sparse_cross ( [ <nl> constant_op . constant ( [ [ ' batch1 - FC1 - F1 ' , ' batch1 - FC1 - F2 ' ] , <nl> [ ' batch2 - FC1 - F1 ' , ' batch2 - FC1 - F2 ' ] ] , <nl> dtypes . string ) , <nl> def test_dense ( self ) : <nl> <nl> def test_integer_mixed_string_sparse ( self ) : <nl> " " " Tests mixed type . " " " <nl> - op = sparse_ops . _sparse_cross ( [ <nl> + op = sparse_ops . sparse_cross ( [ <nl> self . _sparse_tensor ( [ [ 11 ] , [ 333 , 55555 ] ] ) , <nl> self . _sparse_tensor ( [ [ ' batch1 - FC2 - F1 ' ] , <nl> [ ' batch2 - FC2 - F1 ' , ' batch2 - FC2 - F2 ' ] ] ) <nl> def test_integer_mixed_string_sparse ( self ) : <nl> <nl> def test_integer_mixed_string_dense ( self ) : <nl> " " " Tests mixed dense inputs . " " " <nl> - op = sparse_ops . _sparse_cross ( [ <nl> + op = sparse_ops . sparse_cross ( [ <nl> constant_op . constant ( [ [ 11 , 333 ] , [ 55555 , 999999 ] ] , dtypes . int64 ) , <nl> constant_op . constant ( [ [ ' batch1 - FC2 - F1 ' , ' batch1 - FC2 - F2 ' ] , <nl> [ ' batch2 - FC2 - F1 ' , ' batch2 - FC2 - F2 ' ] ] , <nl> def test_integer_mixed_string_dense ( self ) : <nl> <nl> def test_sparse_cross_dense ( self ) : <nl> " " " Tests sparse and dense inputs . " " " <nl> - op = sparse_ops . _sparse_cross ( [ <nl> + op = sparse_ops . sparse_cross ( [ <nl> self . _sparse_tensor ( [ [ ' batch1 - FC1 - F1 ' ] , <nl> [ ' batch2 - FC1 - F1 ' , ' batch2 - FC1 - F2 ' ] ] ) , <nl> constant_op . constant ( [ [ ' batch1 - FC2 - F1 ' , ' batch1 - FC2 - F2 ' ] , <nl> def test_sparse_cross_dense ( self ) : <nl> <nl> def test_integer_sparse_input ( self ) : <nl> " " " Tests mixed type sparse and dense inputs . " " " <nl> - op = sparse_ops . _sparse_cross ( [ <nl> + op = sparse_ops . sparse_cross ( [ <nl> self . _sparse_tensor ( [ [ 11 ] , [ 333 , 5555 ] ] ) , <nl> constant_op . constant ( [ [ ' batch1 - FC2 - F1 ' , ' batch1 - FC2 - F2 ' ] , <nl> [ ' batch2 - FC2 - F1 ' , ' batch2 - FC2 - F2 ' ] ] , <nl> def test_integer_sparse_input ( self ) : <nl> <nl> def test_permutation_3x3x3 ( self ) : <nl> " " " Tests 3x3x3 permutation . " " " <nl> - op = sparse_ops . _sparse_cross ( [ <nl> + op = sparse_ops . sparse_cross ( [ <nl> self . _sparse_tensor ( <nl> [ [ ' batch1 - FC1 - F1 ' , ' batch1 - FC1 - F2 ' , ' batch1 - FC1 - F3 ' ] ] ) , <nl> self . _sparse_tensor ( <nl> def test_permutation_3x3x3 ( self ) : <nl> <nl> def test_permutation_3x1x2 ( self ) : <nl> " " " Tests 3x1x2 permutation . " " " <nl> - op = sparse_ops . _sparse_cross ( [ <nl> + op = sparse_ops . sparse_cross ( [ <nl> self . _sparse_tensor ( <nl> [ [ ' batch1 - FC1 - F1 ' , ' batch1 - FC1 - F2 ' , ' batch1 - FC1 - F3 ' ] ] ) , <nl> self . _sparse_tensor ( [ [ ' batch1 - FC2 - F1 ' ] ] ) , <nl> def test_large_batch ( self ) : <nl> col2 . append ( [ ' batch % d - FC2 - F1 ' % b ] ) <nl> col3 . append ( [ ' batch % d - FC3 - F1 ' % b , ' batch % d - FC3 - F2 ' % b ] ) <nl> <nl> - op = sparse_ops . _sparse_cross ( [ <nl> - self . _sparse_tensor ( col1 ) , self . _sparse_tensor ( col2 ) , <nl> + op = sparse_ops . sparse_cross ( [ <nl> + self . _sparse_tensor ( col1 ) , <nl> + self . _sparse_tensor ( col2 ) , <nl> self . _sparse_tensor ( col3 ) <nl> ] ) <nl> <nl> def test_one_column_empty ( self ) : <nl> <nl> The crossed tensor should be empty . <nl> " " " <nl> - op = sparse_ops . _sparse_cross ( [ <nl> + op = sparse_ops . sparse_cross ( [ <nl> self . _sparse_tensor ( [ [ ' batch1 - FC1 - F1 ' , ' batch1 - FC1 - F2 ' ] ] ) , <nl> self . _sparse_tensor ( [ ] , 1 ) , <nl> self . _sparse_tensor ( [ [ ' batch1 - FC3 - F1 ' , ' batch1 - FC3 - F2 ' ] ] ) <nl> def test_some_columns_empty ( self ) : <nl> <nl> Cross for the corresponding batch should be empty . <nl> " " " <nl> - op = sparse_ops . _sparse_cross ( [ <nl> + op = sparse_ops . sparse_cross ( [ <nl> self . _sparse_tensor ( [ [ ' batch1 - FC1 - F1 ' , ' batch1 - FC1 - F2 ' ] ] , 2 ) , <nl> self . _sparse_tensor ( [ [ ' batch1 - FC2 - F1 ' ] , [ ' batch2 - FC2 - F1 ' ] ] , 2 ) , <nl> self . _sparse_tensor ( [ [ ' batch1 - FC3 - F1 ' , ' batch1 - FC3 - F2 ' ] ] , 2 ) <nl> def test_all_columns_empty ( self ) : <nl> <nl> The crossed tensor should be empty . <nl> " " " <nl> - op = sparse_ops . _sparse_cross ( [ <nl> - self . _sparse_tensor ( [ ] ) , self . _sparse_tensor ( [ ] ) , <nl> + op = sparse_ops . sparse_cross ( [ <nl> + self . _sparse_tensor ( [ ] ) , <nl> + self . _sparse_tensor ( [ ] ) , <nl> self . _sparse_tensor ( [ ] ) <nl> ] ) <nl> with self . test_session ( ) as sess : <nl> self . _assert_sparse_tensor_empty ( sess . run ( op ) ) <nl> <nl> def test_hashed_zero_bucket_no_hash_key ( self ) : <nl> - op = sparse_ops . _sparse_cross_hashed ( <nl> - [ <nl> - self . _sparse_tensor ( [ [ ' batch1 - FC1 - F1 ' ] ] ) , <nl> - self . _sparse_tensor ( [ [ ' batch1 - FC2 - F1 ' ] ] ) , <nl> - self . _sparse_tensor ( [ [ ' batch1 - FC3 - F1 ' ] ] ) <nl> - ] ) <nl> + op = sparse_ops . sparse_cross_hashed ( [ <nl> + self . _sparse_tensor ( [ [ ' batch1 - FC1 - F1 ' ] ] ) , <nl> + self . _sparse_tensor ( [ [ ' batch1 - FC2 - F1 ' ] ] ) , <nl> + self . _sparse_tensor ( [ [ ' batch1 - FC3 - F1 ' ] ] ) <nl> + ] ) <nl> # Check actual hashed output to prevent unintentional hashing changes . <nl> expected_out = self . _sparse_tensor ( [ [ 1971693436396284976 ] ] ) <nl> with self . test_session ( ) as sess : <nl> self . _assert_sparse_tensor_equals ( expected_out , sess . run ( op ) ) <nl> <nl> def test_hashed_zero_bucket ( self ) : <nl> - op = sparse_ops . _sparse_cross_hashed ( <nl> + op = sparse_ops . sparse_cross_hashed ( <nl> [ <nl> self . _sparse_tensor ( [ [ ' batch1 - FC1 - F1 ' ] ] ) , <nl> self . _sparse_tensor ( [ [ ' batch1 - FC2 - F1 ' ] ] ) , <nl> def test_hashed_zero_bucket ( self ) : <nl> <nl> # TODO ( sibyl - Aix6ihai ) : Add benchmark to compare Hashed vs Non - hashed . <nl> def test_hashed_no_hash_key ( self ) : <nl> - op = sparse_ops . _sparse_cross_hashed ( <nl> + op = sparse_ops . sparse_cross_hashed ( <nl> [ <nl> self . _sparse_tensor ( [ [ ' batch1 - FC1 - F1 ' ] ] ) , <nl> self . _sparse_tensor ( [ [ ' batch1 - FC2 - F1 ' ] ] ) , <nl> def test_hashed_no_hash_key ( self ) : <nl> self . _assert_sparse_tensor_equals ( expected_out , sess . run ( op ) ) <nl> <nl> def test_hashed_output ( self ) : <nl> - op = sparse_ops . _sparse_cross_hashed ( <nl> + op = sparse_ops . sparse_cross_hashed ( <nl> [ <nl> self . _sparse_tensor ( [ [ ' batch1 - FC1 - F1 ' ] ] ) , <nl> self . _sparse_tensor ( [ [ ' batch1 - FC2 - F1 ' ] ] ) , <nl> def test_hashed__has_no_collision ( self ) : <nl> # As a result , all the crosses shouldn ' t collide . <nl> t1 = constant_op . constant ( [ [ 359 ] , [ 359 + 1024 ] ] ) <nl> t2 = constant_op . constant ( [ list ( range ( 10 ) ) , list ( range ( 10 ) ) ] ) <nl> - cross = sparse_ops . _sparse_cross_hashed ( <nl> - [ t2 , t1 ] , <nl> - num_buckets = 1024 , <nl> - hash_key = sparse_ops . _DEFAULT_HASH_KEY + 1 ) <nl> + cross = sparse_ops . sparse_cross_hashed ( <nl> + [ t2 , t1 ] , num_buckets = 1024 , hash_key = sparse_ops . _DEFAULT_HASH_KEY + 1 ) <nl> cross_dense = sparse_ops . sparse_tensor_to_dense ( cross ) <nl> with session . Session ( ) : <nl> values = cross_dense . eval ( ) <nl> def test_hashed__has_no_collision ( self ) : <nl> <nl> def test_hashed_3x1x2 ( self ) : <nl> " " " Tests 3x1x2 permutation with hashed output . " " " <nl> - op = sparse_ops . _sparse_cross_hashed ( <nl> + op = sparse_ops . sparse_cross_hashed ( <nl> [ <nl> self . _sparse_tensor ( <nl> [ [ ' batch1 - FC1 - F1 ' , ' batch1 - FC1 - F2 ' , ' batch1 - FC1 - F3 ' ] ] ) , <nl> mmm a / tensorflow / python / layers / convolutional_test . py <nl> ppp b / tensorflow / python / layers / convolutional_test . py <nl> <nl> <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import ops <nl> - from tensorflow . python . framework import test_util <nl> from tensorflow . python . layers import convolutional as conv_layers <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import init_ops <nl> <nl> from tensorflow . python . platform import test <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ConvTest ( test . TestCase ) : <nl> <nl> def testInvalidDataFormat ( self ) : <nl> def testConv3DChannelsFirst ( self ) : <nl> conv_layers . conv3d ( images , 32 , 9 , data_format = ' channels_first ' ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class SeparableConv1DTest ( test . TestCase ) : <nl> <nl> def testInvalidDataFormat ( self ) : <nl> def testConstraints ( self ) : <nl> self . assertEqual ( layer . bias_constraint , b_constraint ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class SeparableConv2DTest ( test . TestCase ) : <nl> <nl> def testInvalidDataFormat ( self ) : <nl> def testConstraints ( self ) : <nl> self . assertEqual ( layer . bias_constraint , b_constraint ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class Conv2DTransposeTest ( test . TestCase ) : <nl> <nl> def testInvalidDataFormat ( self ) : <nl> def testConstraints ( self ) : <nl> self . assertEqual ( layer . bias_constraint , b_constraint ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class Conv3DTransposeTest ( test . TestCase ) : <nl> <nl> def testInvalidDataFormat ( self ) : <nl> mmm a / tensorflow / python / ops / control_flow_ops_test . py <nl> ppp b / tensorflow / python / ops / control_flow_ops_test . py <nl> <nl> SingletonTestTuple = collections . namedtuple ( " SingletonTestTuple " , " a " ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class GroupTestCase ( test_util . TensorFlowTestCase ) : <nl> <nl> def _StripNode ( self , nd ) : <nl> def testPassingNonTensors ( self ) : <nl> control_flow_ops . group ( 1 , 2 ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ShapeTestCase ( test_util . TensorFlowTestCase ) : <nl> <nl> def testShape ( self ) : <nl> def testShape ( self ) : <nl> [ constant_op . constant ( 1 . 0 ) ] , tensor ) . get_shape ( ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class WithDependenciesTestCase ( test_util . TensorFlowTestCase ) : <nl> <nl> def testTupleDependencies ( self ) : <nl> def testListDependencies ( self ) : <nl> self . assertEquals ( 1 , counter . eval ( ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class SwitchTestCase ( test_util . TensorFlowTestCase ) : <nl> <nl> def testIndexedSlicesWithDenseShape ( self ) : <nl> def testGradientThroughSingleBranchOutsideOfContext ( self ) : <nl> self . assertEquals ( grad_x_false . eval ( ) , 0 . ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class CondTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testCondTrue ( self ) : <nl> - # Create new Graph and Session for each test so we pick up _USE_C_API <nl> - # correctly . <nl> with ops . Graph ( ) . as_default ( ) : <nl> with session . Session ( ) : <nl> x = constant_op . constant ( 2 ) <nl> def testCondDuplicateArg2 ( self ) : <nl> control_flow_ops . cond ( True , lambda : x , lambda : x , fn2 = lambda : x ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ContextTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testCondContext ( self ) : <nl> def _raw_shape ( shape ) : <nl> <nl> <nl> # TODO ( yori ) : Add tests for indexed slices . <nl> - @ test_util . with_c_api <nl> class DataTypesTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def assertAllEqualNested ( self , a , b ) : <nl> def body ( i , matrix ) : <nl> self . assertEqual ( matrix . get_shape ( ) , tensor_shape . TensorShape ( [ 2 , 2 ] ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class CaseTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testCase_withDefault ( self ) : <nl> def testCase_withoutDefault_oneCondition ( self ) : <nl> sess . run ( output , feed_dict = { x : 4 } ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class WhileLoopTestCase ( test_util . TensorFlowTestCase ) : <nl> <nl> @ test_util . run_in_graph_and_eager_modes ( ) <nl> mmm a / tensorflow / python / ops / distributions / distribution . py <nl> ppp b / tensorflow / python / ops / distributions / distribution . py <nl> def _log_prob ( self , value ) : <nl> <nl> For detailed usage examples of TensorFlow Distributions shapes , see <nl> [ this tutorial ] ( <nl> - https : / / github . com / tensorflow / probability / blob / master / tensorflow_probability / examples / jupyter_notebooks / Understanding % 20TensorFlow % 20Distributions % 20Shapes . ipynb ) <nl> + https : / / github . com / tensorflow / probability / blob / master / tensorflow_probability / examples / jupyter_notebooks / Understanding_TensorFlow_Distributions_Shapes . ipynb ) <nl> <nl> # # # # Parameter values leading to undefined statistics or distributions . <nl> <nl> mmm a / tensorflow / python / ops / gradients_test . py <nl> ppp b / tensorflow / python / ops / gradients_test . py <nl> def testAggregationMethodTree ( self ) : <nl> self . assertEqual ( 10 . 0 , grads [ 1 ] . eval ( ) ) <nl> <nl> def testNoGradientForStringOutputs ( self ) : <nl> - # This test can ' t be run twice because the TestStringOutput gradient can <nl> - # only be registered once . Just run with the C API enabled . <nl> - if not ops . _USE_C_API : return <nl> - <nl> with ops . Graph ( ) . as_default ( ) : <nl> <nl> def _TestOpGrad ( _ , float_grad , string_grad ) : <nl> def _Gradients ( ys , xs , * * kwargs ) : <nl> np . testing . assert_allclose ( a , b ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class FunctionGradientsTest ( test_util . TensorFlowTestCase ) : <nl> <nl> @ classmethod <nl> def testFunctionGradientWithGradFuncAndRegistration ( self ) : <nl> f . add_to_graph ( ops . Graph ( ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class StopGradientTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testStopGradient ( self ) : <nl> def testStopGradient ( self ) : <nl> assert igrad is None <nl> <nl> <nl> - @ test_util . with_c_api <nl> class PreventGradientTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testPreventGradient ( self ) : <nl> def testPreventGradient ( self ) : <nl> _ = gradients . gradients ( out , inp ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class HessianVectorProductTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testHessianVectorProduct ( self ) : <nl> def testHessianVectorProduct ( self ) : <nl> self . assertAllClose ( hess_v_value , hess_v_actual ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class HessianTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testHessian1D ( self ) : <nl> def testHessian2D_non_square_matrix ( self ) : <nl> self . assertAllClose ( hess_value , hess_actual . reshape ( ( m * n , m * n ) ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class IndexedSlicesToTensorTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testIndexedSlicesToTensor ( self ) : <nl> def testWarnings ( self ) : <nl> str ( w [ 0 ] . message ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class OnlyRealGradientsTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testRealOnly ( self ) : <nl> def TestFn ( ) : <nl> self . assertTrue ( None not in grads ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class CustomGradientTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testCustomGradientTrivial ( self ) : <nl> mmm a / tensorflow / python / ops / math_grad . py <nl> ppp b / tensorflow / python / ops / math_grad . py <nl> def _ProdGrad ( op , grad ) : <nl> # Calculate product , leaving out the current entry <nl> left = math_ops . cumprod ( reshaped , axis = 0 , exclusive = True ) <nl> right = math_ops . cumprod ( reshaped , axis = 0 , exclusive = True , reverse = True ) <nl> - y = array_ops . reshape ( left * right , permuted_shape ) <nl> + # For complex inputs , the gradient is in the conjugate direction . <nl> + y = array_ops . reshape ( math_ops . conj ( left ) * math_ops . conj ( right ) , <nl> + permuted_shape ) <nl> <nl> # Invert the transpose and reshape operations . <nl> # Make sure to set the statically known shape information through a reshape . <nl> mmm a / tensorflow / python / ops / math_grad_test . py <nl> ppp b / tensorflow / python / ops / math_grad_test . py <nl> def testProdGradientForNegativeAxis ( self ) : <nl> outputs , outputs . get_shape ( ) . as_list ( ) ) <nl> self . assertLess ( error , 1e - 4 ) <nl> <nl> + def testProdGradientComplex ( self ) : <nl> + for dtype in dtypes . complex64 , dtypes . complex128 : <nl> + inputs = constant_op . constant ( [ [ 1 + 3j , 2 - 1j ] , [ 3j , 4 ] ] , <nl> + dtype = dtype ) <nl> + outputs = math_ops . reduce_prod ( inputs ) <nl> + with self . test_session ( ) : <nl> + error = gradient_checker . compute_gradient_error ( <nl> + inputs , inputs . get_shape ( ) . as_list ( ) , <nl> + outputs , outputs . get_shape ( ) . as_list ( ) ) <nl> + self . assertLess ( error , 1e - 4 ) <nl> + <nl> + def testProdGradientForNegativeAxisComplex ( self ) : <nl> + for dtype in dtypes . complex64 , dtypes . complex128 : <nl> + inputs = constant_op . constant ( [ [ 1 + 3j , 2 - 1j ] , [ 3j , 4 ] ] , <nl> + dtype = dtype ) <nl> + outputs = math_ops . reduce_prod ( inputs , - 1 ) <nl> + with self . test_session ( ) : <nl> + error = gradient_checker . compute_gradient_error ( <nl> + inputs , inputs . get_shape ( ) . as_list ( ) , <nl> + outputs , outputs . get_shape ( ) . as_list ( ) ) <nl> + self . assertLess ( error , 1e - 4 ) <nl> + <nl> <nl> class SegmentMinOrMaxGradientTest ( test . TestCase ) : <nl> <nl> mmm a / tensorflow / python / ops / math_ops_test . py <nl> ppp b / tensorflow / python / ops / math_ops_test . py <nl> <nl> log = np . log <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ReduceTest ( test_util . TensorFlowTestCase ) : <nl> <nl> @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testReduceInvalidAxis ( self ) : <nl> math_ops . reduce_sum ( x , axis ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class LogSumExpTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testReduceLogSumExp ( self ) : <nl> def testInfinity ( self ) : <nl> self . assertEqual ( - np . inf , res ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class RoundTest ( test_util . TensorFlowTestCase ) : <nl> <nl> @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testRounding ( self ) : <nl> self . assertAllClose ( y_tf_np , y_np , atol = 1e - 2 ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ModTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testFloat ( self ) : <nl> def testFixed ( self ) : <nl> self . assertAllClose ( y_tf_np , y_np ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class SquaredDifferenceTest ( test_util . TensorFlowTestCase ) : <nl> <nl> @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testSquaredDifference ( self ) : <nl> self . assertAllClose ( z , z_tf ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ApproximateEqualTest ( test_util . TensorFlowTestCase ) : <nl> <nl> @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testApproximateEqual ( self ) : <nl> self . assertAllEqual ( z , z_tf ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ScalarMulTest ( test_util . TensorFlowTestCase ) : <nl> <nl> @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testAcceptsIndexedSlices ( self ) : <nl> self . assertAllEqual ( self . evaluate ( x . indices ) , [ 0 , 2 , 5 ] ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class AccumulateNTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testFloat ( self ) : <nl> def testInt ( self ) : <nl> self . assertAllEqual ( x [ 0 ] * 6 , math_ops . accumulate_n ( [ tf_x [ 0 ] ] * 6 ) . eval ( ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class AddNTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def testPartials ( self ) : <nl> def testGrad ( self ) : <nl> [ g . eval ( ) for g in add_n_grad ] ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class DivAndModTest ( test_util . TensorFlowTestCase ) : <nl> # TODO ( aselle ) : Test more types before exposing new division operators . <nl> <nl> mmm a / tensorflow / python / ops / nn_batchnorm_test . py <nl> ppp b / tensorflow / python / ops / nn_batchnorm_test . py <nl> <nl> from tensorflow . python . platform import test <nl> <nl> <nl> - @ test_util . with_c_api <nl> class BatchNormalizationTest ( test . TestCase ) : <nl> <nl> def _npBatchNorm ( self , x , m , v , beta , gamma , epsilon , <nl> def testBatchNormMixedPrecision ( self ) : <nl> param_dtype = dtypes . float32 , atol = 0 . 001 ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class SufficientStatisticsTest ( test . TestCase ) : <nl> <nl> def _npSuffStats ( self , x , axes , shift , keep_dims ) : <nl> def testSuffStats ( self ) : <nl> self . _testSuffStats ( [ 1 , 2 , 3 ] , [ 0 , 2 ] , shift , keep_dims , has_shape ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class NormalizeMomentsTest ( test . TestCase ) : <nl> <nl> def _npNormalizeMoments ( self , counts , mean_ss , variance_ss , shift ) : <nl> def testNormalizeMoments ( self ) : <nl> self . _testNormalizeMoments ( [ 2 , 3 ] , shift ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class MomentsTest ( test . TestCase ) : <nl> <nl> def _unweighted_moments ( self , x , axes , keep_dims = False , extra_out_grads = None ) : <nl> def testVarGlobalGradient ( self ) : <nl> self . _testGlobalGradient ( from_y = " var " ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class WeightedMomentsTest ( MomentsTest ) : <nl> " " " Tests for nn . weighted_moments . <nl> <nl> mmm a / tensorflow / python / ops / resource_variable_ops . py <nl> ppp b / tensorflow / python / ops / resource_variable_ops . py <nl> <nl> # pylint : disable = wildcard - import <nl> from tensorflow . python . ops . gen_resource_variable_ops import * <nl> # pylint : enable = wildcard - import <nl> - from tensorflow . python . training import checkpointable <nl> + from tensorflow . python . training . checkpointable import base as checkpointable <nl> from tensorflow . python . util import compat <nl> <nl> <nl> mmm a / tensorflow / python / ops / rnn . py <nl> ppp b / tensorflow / python / ops / rnn . py <nl> def dynamic_rnn ( cell , inputs , sequence_length = None , initial_state = None , <nl> nested ) tuple of Tensors each with dimensions ` [ batch_size , . . . ] ` . <nl> sequence_length : ( optional ) An int32 / int64 vector sized ` [ batch_size ] ` . <nl> Used to copy - through state and zero - out outputs when past a batch <nl> - element ' s sequence length . So it ' s more for correctness than performance . <nl> + element ' s sequence length . So it ' s more for performance than correctness . <nl> initial_state : ( optional ) An initial state for the RNN . <nl> If ` cell . state_size ` is an integer , this must be <nl> a ` Tensor ` of appropriate type and shape ` [ batch_size , cell . state_size ] ` . <nl> mmm a / tensorflow / python / ops / rnn_cell_impl . py <nl> ppp b / tensorflow / python / ops / rnn_cell_impl . py <nl> <nl> from tensorflow . python . ops import variable_scope as vs <nl> from tensorflow . python . ops import variables as tf_variables <nl> from tensorflow . python . platform import tf_logging as logging <nl> - from tensorflow . python . training import checkpointable <nl> + from tensorflow . python . training . checkpointable import base as checkpointable <nl> from tensorflow . python . util import nest <nl> from tensorflow . python . util . tf_export import tf_export <nl> <nl> mmm a / tensorflow / python / ops / sparse_ops . py <nl> ppp b / tensorflow / python / ops / sparse_ops . py <nl> def sparse_add ( a , b , thresh = 0 ) : <nl> a . dense_shape , b ) <nl> <nl> <nl> - def _sparse_cross ( inputs , name = None ) : <nl> + @ tf_export ( " sparse . cross " ) <nl> + def sparse_cross ( inputs , name = None ) : <nl> " " " Generates sparse cross from a list of sparse and dense tensors . <nl> <nl> For example , if the inputs are <nl> def _sparse_cross ( inputs , name = None ) : <nl> return _sparse_cross_internal ( inputs = inputs , hashed_output = False , name = name ) <nl> <nl> <nl> - def _sparse_cross_hashed ( inputs , num_buckets = 0 , hash_key = None , name = None ) : <nl> + _sparse_cross = sparse_cross <nl> + <nl> + <nl> + @ tf_export ( " sparse . cross_hashed " ) <nl> + def sparse_cross_hashed ( inputs , num_buckets = 0 , hash_key = None , name = None ) : <nl> " " " Generates hashed sparse cross from a list of sparse and dense tensors . <nl> <nl> For example , if the inputs are <nl> def _sparse_cross_hashed ( inputs , num_buckets = 0 , hash_key = None , name = None ) : <nl> name = name ) <nl> <nl> <nl> + _sparse_cross_hashed = sparse_cross_hashed <nl> + <nl> _DEFAULT_HASH_KEY = 0xDECAFCAFFE <nl> <nl> <nl> mmm a / tensorflow / python / ops / string_ops . py <nl> ppp b / tensorflow / python / ops / string_ops . py <nl> def _reduce_join_reduction_dims ( x , axis , reduction_indices ) : <nl> return axis <nl> else : <nl> # Fast path : avoid creating Rank and Range ops if ndims is known . <nl> - if isinstance ( x , ops . Tensor ) and x . get_shape ( ) . ndims is not None : <nl> + if x . get_shape ( ) . ndims is not None : <nl> return constant_op . constant ( <nl> np . arange ( x . get_shape ( ) . ndims - 1 , - 1 , - 1 ) , dtype = dtypes . int32 ) <nl> <nl> def reduce_join ( inputs , axis = None , <nl> separator = " " , <nl> name = None , <nl> reduction_indices = None ) : <nl> + inputs_t = ops . convert_to_tensor ( inputs ) <nl> reduction_indices = _reduce_join_reduction_dims ( <nl> - inputs , axis , reduction_indices ) <nl> + inputs_t , axis , reduction_indices ) <nl> return gen_string_ops . reduce_join ( <nl> - inputs = inputs , <nl> + inputs = inputs_t , <nl> reduction_indices = reduction_indices , <nl> keep_dims = keep_dims , <nl> separator = separator , <nl> mmm a / tensorflow / python / ops / template . py <nl> ppp b / tensorflow / python / ops / template . py <nl> <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . ops import variable_scope <nl> from tensorflow . python . platform import tf_logging as logging <nl> - from tensorflow . python . training import checkpointable <nl> + from tensorflow . python . training . checkpointable import base as checkpointable <nl> from tensorflow . python . util import tf_contextlib <nl> from tensorflow . python . util import tf_decorator <nl> from tensorflow . python . util . deprecation import deprecated <nl> mmm a / tensorflow / python / ops / variable_scope . py <nl> ppp b / tensorflow / python / ops / variable_scope . py <nl> def foo ( ) : <nl> name : The name of the new or existing variable . <nl> shape : Shape of the new or existing variable . <nl> dtype : Type of the new or existing variable ( defaults to ` DT_FLOAT ` ) . <nl> - initializer : Initializer for the variable if one is created . <nl> + initializer : Initializer for the variable if one is created . Can either be <nl> + an initializer object or a Tensor . If it ' s a Tensor , its shape must be known <nl> + unless validate_shape is False . <nl> regularizer : A ( Tensor - > Tensor or None ) function ; the result of <nl> applying it on a newly created variable will be added to the collection <nl> @ { tf . GraphKeys . REGULARIZATION_LOSSES } and can be used for regularization . <nl> def foo ( ) : <nl> partitions for each axis ( currently only one axis can be partitioned ) . <nl> validate_shape : If False , allows the variable to be initialized with a <nl> value of unknown shape . If True , the default , the shape of initial_value <nl> - must be known . <nl> + must be known . For this to be used the initializer must be a Tensor and <nl> + not an initializer object . <nl> use_resource : If False , creates a regular Variable . If true , creates an <nl> experimental ResourceVariable instead with well - defined semantics . <nl> Defaults to False ( will later change to True ) . When eager execution is <nl> mmm a / tensorflow / python / ops / variables . py <nl> ppp b / tensorflow / python / ops / variables . py <nl> <nl> from tensorflow . python . ops import math_ops <nl> from tensorflow . python . ops import state_ops <nl> from tensorflow . python . platform import tf_logging as logging <nl> - from tensorflow . python . training import checkpointable <nl> + from tensorflow . python . training . checkpointable import base as checkpointable <nl> from tensorflow . python . util import compat <nl> from tensorflow . python . util import tf_should_use <nl> from tensorflow . python . util . deprecation import deprecated <nl> mmm a / tensorflow / python / saved_model / saved_model_test . py <nl> ppp b / tensorflow / python / saved_model / saved_model_test . py <nl> def tearDownModule ( ) : <nl> file_io . delete_recursively ( test . get_temp_dir ( ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class SavedModelTest ( test . TestCase ) : <nl> <nl> def _get_export_dir ( self , label ) : <nl> - if ops . _USE_C_API : <nl> - label + = " _c_api " <nl> return os . path . join ( test . get_temp_dir ( ) , label ) <nl> <nl> def _init_and_validate_variable ( self , sess , variable_name , variable_value ) : <nl> def testInconsistentConsumerDefaultAttrs ( self ) : <nl> # does not have any attr values for the " TestAttr " node , and there is no <nl> # default specified in the TestAttr OpDef . <nl> sess = session . Session ( graph = ops . Graph ( ) ) <nl> - if ops . _USE_C_API : <nl> - error_message = " NodeDef missing attr ' T ' from Op < name = TestAttr " <nl> - else : <nl> - error_message = ( " Expected one attr with name . * T ( out ) ? . * in name : " <nl> - " \ " test_attr \ " . * " ) <nl> - with self . assertRaisesRegexp ( ValueError , error_message ) : <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , " NodeDef missing attr ' T ' from Op < name = TestAttr " ) : <nl> loader . load ( sess , [ " foo " ] , export_dir ) <nl> <nl> # Rewrite the SavedModel to change the type of the T attr in " test_attr " <nl> mmm a / tensorflow / python / tools / optimize_for_inference_test . py <nl> ppp b / tensorflow / python / tools / optimize_for_inference_test . py <nl> <nl> from tensorflow . python . tools import optimize_for_inference_lib <nl> <nl> <nl> - @ test_util . with_c_api <nl> class OptimizeForInferenceTest ( test . TestCase ) : <nl> <nl> def create_node_def ( self , op , name , inputs ) : <nl> mmm a / tensorflow / python / training / basic_session_run_hooks . py <nl> ppp b / tensorflow / python / training / basic_session_run_hooks . py <nl> def __init__ ( self , tensors , every_n_iter = None , every_n_secs = None , <nl> self . _tag_order = tensors <nl> tensors = { item : item for item in tensors } <nl> else : <nl> - self . _tag_order = tensors . keys ( ) <nl> + self . _tag_order = sorted ( tensors . keys ( ) ) <nl> self . _tensors = tensors <nl> self . _formatter = formatter <nl> self . _timer = ( <nl> new file mode 100644 <nl> index 0000000000000 . . a7ae6e50a9975 <nl> mmm / dev / null <nl> ppp b / tensorflow / python / training / checkpointable / BUILD <nl> <nl> + # Description : <nl> + # Utilities for reading and writing object - based checkpoints . <nl> + <nl> + package ( <nl> + default_visibility = [ <nl> + " / / tensorflow : internal " , <nl> + ] , <nl> + ) <nl> + <nl> + licenses ( [ " notice " ] ) # Apache 2 . 0 <nl> + <nl> + exports_files ( [ " LICENSE " ] ) <nl> + <nl> + load ( " / / tensorflow : tensorflow . bzl " , " py_test " ) <nl> + <nl> + py_library ( <nl> + name = " base " , <nl> + srcs = [ " base . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : constant_op " , <nl> + " / / tensorflow / python : control_flow_ops " , <nl> + " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : io_ops_gen " , <nl> + " / / tensorflow / python : ops " , <nl> + " / / tensorflow / python : saveable_object " , <nl> + " / / tensorflow / python : util " , <nl> + " / / tensorflow / python / eager : context " , <nl> + ] , <nl> + ) <nl> + <nl> + py_test ( <nl> + name = " base_test " , <nl> + srcs = [ " base_test . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : base " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " util " , <nl> + srcs = [ " util . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : base " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : constant_op " , <nl> + " / / tensorflow / python : control_flow_ops " , <nl> + " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : io_ops_gen " , <nl> + " / / tensorflow / python : ops " , <nl> + " / / tensorflow / python : saveable_object " , <nl> + " / / tensorflow / python : util " , <nl> + " / / tensorflow / python / eager : context " , <nl> + ] , <nl> + ) <nl> + <nl> + py_test ( <nl> + name = " util_test " , <nl> + srcs = [ " util_test . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + tags = [ <nl> + " no_windows " , # TODO : needs investigation on Windows <nl> + " notsan " , # b / 74395663 <nl> + ] , <nl> + deps = [ <nl> + " : base " , <nl> + " : util " , <nl> + " / / tensorflow / python : constant_op " , <nl> + " / / tensorflow / python : control_flow_ops " , <nl> + " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : framework_test_lib " , <nl> + " / / tensorflow / python : init_ops " , <nl> + " / / tensorflow / python : resource_variable_ops " , <nl> + " / / tensorflow / python : session " , <nl> + " / / tensorflow / python : state_ops " , <nl> + " / / tensorflow / python : template " , <nl> + " / / tensorflow / python : training " , <nl> + " / / tensorflow / python : training_util " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + " / / tensorflow / python / eager : backprop " , <nl> + " / / tensorflow / python / eager : context " , <nl> + " / / tensorflow / python / eager : function " , <nl> + " / / tensorflow / python / eager : test " , <nl> + " / / tensorflow / python / keras : engine " , <nl> + " / / tensorflow / python / keras : layers " , <nl> + " @ six_archive / / : six " , <nl> + ] , <nl> + ) <nl> similarity index 100 % <nl> rename from tensorflow / python / training / checkpointable . py <nl> rename to tensorflow / python / training / checkpointable / base . py <nl> similarity index 96 % <nl> rename from tensorflow / python / training / checkpointable_test . py <nl> rename to tensorflow / python / training / checkpointable / base_test . py <nl> mmm a / tensorflow / python / training / checkpointable_test . py <nl> ppp b / tensorflow / python / training / checkpointable / base_test . py <nl> <nl> from __future__ import print_function <nl> <nl> from tensorflow . python . platform import test <nl> - from tensorflow . python . training import checkpointable <nl> + from tensorflow . python . training . checkpointable import base as checkpointable <nl> <nl> <nl> class InterfaceTests ( test . TestCase ) : <nl> similarity index 99 % <nl> rename from tensorflow / python / training / checkpointable_utils . py <nl> rename to tensorflow / python / training / checkpointable / util . py <nl> mmm a / tensorflow / python / training / checkpointable_utils . py <nl> ppp b / tensorflow / python / training / checkpointable / util . py <nl> <nl> from tensorflow . python . ops import init_ops <nl> from tensorflow . python . ops import resource_variable_ops <nl> from tensorflow . python . ops import variable_scope <nl> - from tensorflow . python . training import checkpointable as checkpointable_lib <nl> from tensorflow . python . training import optimizer as optimizer_lib <nl> from tensorflow . python . training import saveable_object as saveable_object_lib <nl> from tensorflow . python . training import saver as saver_lib <nl> + from tensorflow . python . training . checkpointable import base as checkpointable_lib <nl> from tensorflow . python . util import deprecation <nl> from tensorflow . python . util . tf_export import tf_export <nl> <nl> similarity index 99 % <nl> rename from tensorflow / python / training / checkpointable_utils_test . py <nl> rename to tensorflow / python / training / checkpointable / util_test . py <nl> mmm a / tensorflow / python / training / checkpointable_utils_test . py <nl> ppp b / tensorflow / python / training / checkpointable / util_test . py <nl> <nl> from tensorflow . python . ops import template <nl> from tensorflow . python . ops import variable_scope <nl> from tensorflow . python . training import adam <nl> - from tensorflow . python . training import checkpointable <nl> - from tensorflow . python . training import checkpointable_utils <nl> from tensorflow . python . training import saver as saver_lib <nl> from tensorflow . python . training import training_util <nl> + from tensorflow . python . training . checkpointable import base as checkpointable <nl> + from tensorflow . python . training . checkpointable import util as checkpointable_utils <nl> <nl> <nl> class NonLayerCheckpointable ( checkpointable . Checkpointable ) : <nl> mmm a / tensorflow / python / training / optimizer . py <nl> ppp b / tensorflow / python / training / optimizer . py <nl> <nl> from tensorflow . python . ops import state_ops <nl> from tensorflow . python . ops import variable_scope <nl> from tensorflow . python . ops import variables <nl> - from tensorflow . python . training import checkpointable <nl> from tensorflow . python . training import distribute as distribute_lib <nl> from tensorflow . python . training import slot_creator <nl> + from tensorflow . python . training . checkpointable import base as checkpointable <nl> from tensorflow . python . util import nest <nl> from tensorflow . python . util . tf_export import tf_export <nl> <nl> mmm a / tensorflow / python / training / saver . py <nl> ppp b / tensorflow / python / training / saver . py <nl> <nl> from tensorflow . python . ops import variables <nl> from tensorflow . python . platform import gfile <nl> from tensorflow . python . platform import tf_logging as logging <nl> - from tensorflow . python . training import checkpointable <nl> from tensorflow . python . training import saveable_object <nl> from tensorflow . python . training import training_util <nl> from tensorflow . python . training . checkpoint_state_pb2 import CheckpointState <nl> + from tensorflow . python . training . checkpointable import base as checkpointable <nl> from tensorflow . python . util import compat <nl> from tensorflow . python . util . tf_export import tf_export <nl> <nl> mmm a / tensorflow / python / training / saver_test . py <nl> ppp b / tensorflow / python / training / saver_test . py <nl> <nl> from tensorflow . python . platform import test <nl> from tensorflow . python . summary import summary <nl> from tensorflow . python . training import adam <nl> - from tensorflow . python . training import checkpointable <nl> - from tensorflow . python . training import checkpointable_utils <nl> from tensorflow . python . training import gradient_descent <nl> from tensorflow . python . training import queue_runner_impl <nl> from tensorflow . python . training import saver as saver_module <nl> from tensorflow . python . training import saver_test_utils <nl> from tensorflow . python . training import training_util <nl> from tensorflow . python . training . checkpoint_state_pb2 import CheckpointState <nl> + from tensorflow . python . training . checkpointable import base as checkpointable <nl> + from tensorflow . python . training . checkpointable import util as checkpointable_utils <nl> from tensorflow . python . util import compat <nl> <nl> <nl> - @ test_util . with_c_api <nl> class SaverTest ( test . TestCase ) : <nl> <nl> def basicSaveRestore ( self , variable_op ) : <nl> def testSaveToURI ( self ) : <nl> save . save ( sess , save_path ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class SaveRestoreShardedTest ( test . TestCase ) : <nl> <nl> _WRITE_VERSION = saver_pb2 . SaverDef . V1 <nl> def testPartitionedResourceVariable ( self ) : <nl> self . _testPartitionedVariables ( use_resource = True ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class SaveRestoreShardedTestV2 ( SaveRestoreShardedTest ) : <nl> _WRITE_VERSION = saver_pb2 . SaverDef . V2 <nl> <nl> <nl> - @ test_util . with_c_api <nl> class MaxToKeepTest ( test . TestCase ) : <nl> <nl> def _get_test_dir ( self , dirname ) : <nl> def testNoMetaGraph ( self ) : <nl> self . assertFalse ( gfile . Exists ( save . _MetaGraphFilename ( s1 ) ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class KeepCheckpointEveryNHoursTest ( test . TestCase ) : <nl> <nl> def _get_test_dir ( self , dirname ) : <nl> def testNonSharded ( self , mock_time ) : <nl> self . assertTrue ( saver_module . checkpoint_exists ( s4 ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class SaveRestoreWithVariableNameMap ( test . TestCase ) : <nl> <nl> def _testNonReshape ( self , variable_op ) : <nl> def testNonReshapeVariable ( self ) : <nl> self . _testNonReshape ( variables . Variable ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class LatestCheckpointWithRelativePaths ( test . TestCase ) : <nl> <nl> @ staticmethod <nl> def testRelativePath ( self ) : <nl> self . assertEqual ( v0 . eval ( ) , 2 . 0 ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class CheckpointStateTest ( test . TestCase ) : <nl> <nl> def _get_test_dir ( self , dirname ) : <nl> def testCheckPointCompletesRelativePaths ( self ) : <nl> os . path . join ( save_dir , " . / model . ckpt - 687529 " ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class MetaGraphTest ( test . TestCase ) : <nl> <nl> def _get_test_dir ( self , dirname ) : <nl> def testPreserveDatasetAndFunctions ( self ) : <nl> sess . run ( " new_model / output : 0 " ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class CheckpointReaderTest ( test . TestCase ) : <nl> <nl> _WRITE_VERSION = saver_pb2 . SaverDef . V1 <nl> def testNonexistentPath ( self ) : <nl> pywrap_tensorflow . NewCheckpointReader ( " non - existent " ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class CheckpointReaderForV2Test ( CheckpointReaderTest ) : <nl> _WRITE_VERSION = saver_pb2 . SaverDef . V2 <nl> <nl> <nl> - @ test_util . with_c_api <nl> class WriteGraphTest ( test . TestCase ) : <nl> <nl> def _get_test_dir ( self , dirname ) : <nl> def testRecursiveCreate ( self ) : <nl> self . assertTrue ( os . path . exists ( path ) ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class SaverUtilsTest ( test . TestCase ) : <nl> <nl> def setUp ( self ) : <nl> def testGetCheckpointMtimes ( self ) : <nl> self . assertTrue ( mtimes [ 1 ] > = mtimes [ 0 ] ) <nl> <nl> <nl> - @ test_util . with_c_api <nl> class ScopedGraphTest ( test . TestCase ) : <nl> <nl> def _get_test_dir ( self , dirname ) : <nl> def call ( self , values ) : <nl> return ret <nl> <nl> <nl> - @ test_util . with_c_api <nl> class CheckpointableCompatibilityTests ( test . TestCase ) : <nl> <nl> # TODO ( allenl ) : Track down python3 reference cycles in these tests . <nl> mmm a / tensorflow / python / training / slot_creator_test . py <nl> ppp b / tensorflow / python / training / slot_creator_test . py <nl> <nl> from tensorflow . python . framework import constant_op <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import ops <nl> - from tensorflow . python . framework import test_util <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import random_ops <nl> from tensorflow . python . ops import variable_scope <nl> <nl> from tensorflow . python . training import slot_creator <nl> <nl> <nl> - @ test_util . with_c_api <nl> class SlotCreatorTest ( test . TestCase ) : <nl> <nl> def testCreateSlotFromVariable ( self ) : <nl> mmm a / tensorflow / python / training / training . py <nl> ppp b / tensorflow / python / training / training . py <nl> <nl> from tensorflow . python . training . basic_session_run_hooks import FeedFnHook <nl> from tensorflow . python . training . basic_session_run_hooks import ProfilerHook <nl> from tensorflow . python . training . basic_loops import basic_train_loop <nl> - from tensorflow . python . training . checkpointable_utils import Checkpoint <nl> + from tensorflow . python . training . checkpointable . util import Checkpoint <nl> from tensorflow . python . training . checkpoint_utils import init_from_checkpoint <nl> from tensorflow . python . training . checkpoint_utils import list_variables <nl> from tensorflow . python . training . checkpoint_utils import load_checkpoint <nl> mmm a / tensorflow / tools / api / generator / BUILD <nl> ppp b / tensorflow / tools / api / generator / BUILD <nl> genrule ( <nl> " api / saved_model / tag_constants / __init__ . py " , <nl> " api / saved_model / utils / __init__ . py " , <nl> " api / sets / __init__ . py " , <nl> + " api / sparse / __init__ . py " , <nl> " api / spectral / __init__ . py " , <nl> " api / summary / __init__ . py " , <nl> " api / sysconfig / __init__ . py " , <nl> mmm a / tensorflow / tools / api / generator / create_python_api . py <nl> ppp b / tensorflow / tools / api / generator / create_python_api . py <nl> <nl> <nl> import argparse <nl> import collections <nl> + import importlib <nl> import os <nl> import sys <nl> <nl> - # Populate ` sys . modules ` which will be traversed to find TensorFlow modules . <nl> - # Make sure your module gets imported in tensorflow / python / __init__ . py for it <nl> - # to be seen by this script . <nl> - import tensorflow . python # pylint : disable = unused - import <nl> - <nl> from tensorflow . python . util import tf_decorator <nl> <nl> <nl> _API_CONSTANTS_ATTR = ' _tf_api_constants ' <nl> _API_NAMES_ATTR = ' _tf_api_names ' <nl> _API_DIR = ' / api / ' <nl> - _DEFAULT_MODULE_FILTER = ' tensorflow . ' <nl> + _DEFAULT_PACKAGE = ' tensorflow . python ' <nl> _OUTPUT_MODULE = ' tensorflow . tools . api . generator . api ' <nl> _GENERATED_FILE_HEADER = " " " \ " \ " \ " Imports for Python API . <nl> <nl> def build ( self ) : <nl> # since we import from it using * import . <nl> underscore_names_str = ' , ' . join ( <nl> ' \ ' % s \ ' ' % name for name in self . _underscore_names_in_root ) <nl> - module_text_map [ ' ' ] + = ' ' ' <nl> + # We will always generate a root __init__ . py file to let us handle * <nl> + # imports consistently . Be sure to have a root __init__ . py file listed in <nl> + # the script outputs . <nl> + module_text_map [ ' ' ] = module_text_map . get ( ' ' , ' ' ) + ' ' ' <nl> _names_with_underscore = [ % s ] <nl> __all__ = [ s for s in dir ( ) if not s . startswith ( ' _ ' ) ] <nl> __all__ . extend ( [ s for s in _names_with_underscore ] ) <nl> def build ( self ) : <nl> return module_text_map <nl> <nl> <nl> - def get_api_init_text ( module_filter ) : <nl> + def get_api_init_text ( package ) : <nl> " " " Get a map from destination module to __init__ . py code for that module . <nl> <nl> Args : <nl> - module_filter : Substring used to filter module names to process . <nl> + package : Base python package containing python with target tf_export <nl> + decorators . <nl> <nl> Returns : <nl> A dictionary where <nl> def get_api_init_text ( module_filter ) : <nl> for module in list ( sys . modules . values ( ) ) : <nl> # Only look at tensorflow modules . <nl> if ( not module or not hasattr ( module , ' __name__ ' ) or <nl> - module_filter not in module . __name__ ) : <nl> + package not in module . __name__ ) : <nl> continue <nl> # Do not generate __init__ . py files for contrib modules for now . <nl> if ' . contrib . ' in module . __name__ or module . __name__ . endswith ( ' . contrib ' ) : <nl> def get_api_init_text ( module_filter ) : <nl> return module_code_builder . build ( ) <nl> <nl> <nl> - def create_api_files ( output_files , module_filter ) : <nl> + def create_api_files ( output_files , package ) : <nl> " " " Creates __init__ . py files for the Python API . <nl> <nl> Args : <nl> output_files : List of __init__ . py file paths to create . <nl> Each file must be under api / directory . <nl> - module_filter : Substring used to filter module names to process . <nl> + package : Base python package containing python with target tf_export <nl> + decorators . <nl> <nl> Raises : <nl> ValueError : if an output file is not under api / directory , <nl> def create_api_files ( output_files , module_filter ) : <nl> os . makedirs ( os . path . dirname ( file_path ) ) <nl> open ( file_path , ' a ' ) . close ( ) <nl> <nl> - module_text_map = get_api_init_text ( module_filter ) <nl> + module_text_map = get_api_init_text ( package ) <nl> <nl> # Add imports to output files . <nl> missing_output_files = [ ] <nl> def main ( ) : <nl> ' output . If multiple files are passed in , then we assume output files ' <nl> ' are listed directly as arguments . ' ) <nl> parser . add_argument ( <nl> - ' - - module_filter ' , default = _DEFAULT_MODULE_FILTER , type = str , <nl> - help = ' Only processes modules with names containing this substring . ' <nl> - ) <nl> + ' - - package ' , default = _DEFAULT_PACKAGE , type = str , <nl> + help = ' Base package that imports modules containing the target tf_export ' <nl> + ' decorators . ' ) <nl> args = parser . parse_args ( ) <nl> <nl> if len ( args . outputs ) = = 1 : <nl> def main ( ) : <nl> outputs = [ line . strip ( ) for line in output_list_file . read ( ) . split ( ' ; ' ) ] <nl> else : <nl> outputs = args . outputs <nl> - create_api_files ( outputs , args . module_filter ) <nl> + <nl> + # Populate ` sys . modules ` with modules containing tf_export ( ) . <nl> + importlib . import_module ( args . package ) <nl> + create_api_files ( outputs , args . package ) <nl> <nl> <nl> if __name__ = = ' __main__ ' : <nl> mmm a / tensorflow / tools / api / generator / create_python_api_test . py <nl> ppp b / tensorflow / tools / api / generator / create_python_api_test . py <nl> class TestClass ( object ) : <nl> <nl> <nl> _TEST_CONSTANT = 5 <nl> - _MODULE_NAME = ' test . tensorflow . test_module ' <nl> + _MODULE_NAME = ' tensorflow . python . test_module ' <nl> <nl> <nl> class CreatePythonApiTest ( test . TestCase ) : <nl> def tearDown ( self ) : <nl> <nl> def testFunctionImportIsAdded ( self ) : <nl> imports = create_python_api . get_api_init_text ( <nl> - module_filter = create_python_api . _DEFAULT_MODULE_FILTER ) <nl> + package = create_python_api . _DEFAULT_PACKAGE ) <nl> expected_import = ( <nl> - ' from test . tensorflow . test_module import test_op as test_op1 ' ) <nl> + ' from tensorflow . python . test_module ' <nl> + ' import test_op as test_op1 ' ) <nl> self . assertTrue ( <nl> expected_import in str ( imports ) , <nl> msg = ' % s not in % s ' % ( expected_import , str ( imports ) ) ) <nl> <nl> - expected_import = ' from test . tensorflow . test_module import test_op ' <nl> + expected_import = ( ' from tensorflow . python . test_module ' <nl> + ' import test_op ' ) <nl> self . assertTrue ( <nl> expected_import in str ( imports ) , <nl> msg = ' % s not in % s ' % ( expected_import , str ( imports ) ) ) <nl> <nl> def testClassImportIsAdded ( self ) : <nl> imports = create_python_api . get_api_init_text ( <nl> - module_filter = create_python_api . _DEFAULT_MODULE_FILTER ) <nl> - expected_import = ' from test . tensorflow . test_module import TestClass ' <nl> + package = create_python_api . _DEFAULT_PACKAGE ) <nl> + expected_import = ( ' from tensorflow . python . test_module ' <nl> + ' import TestClass ' ) <nl> self . assertTrue ( <nl> ' TestClass ' in str ( imports ) , <nl> msg = ' % s not in % s ' % ( expected_import , str ( imports ) ) ) <nl> <nl> def testConstantIsAdded ( self ) : <nl> imports = create_python_api . get_api_init_text ( <nl> - module_filter = create_python_api . _DEFAULT_MODULE_FILTER ) <nl> - expected = ' from test . tensorflow . test_module import _TEST_CONSTANT ' <nl> + package = create_python_api . _DEFAULT_PACKAGE ) <nl> + expected = ( ' from tensorflow . python . test_module ' <nl> + ' import _TEST_CONSTANT ' ) <nl> self . assertTrue ( expected in str ( imports ) , <nl> msg = ' % s not in % s ' % ( expected , str ( imports ) ) ) <nl> <nl> mmm a / tensorflow / tools / api / golden / tensorflow . - variable . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . - variable . pbtxt <nl> <nl> path : " tensorflow . Variable " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . ops . variables . Variable \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " SaveSliceInfo " <nl> new file mode 100644 <nl> index 0000000000000 . . 9694268199a29 <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / api / golden / tensorflow . estimator . - best - exporter . pbtxt <nl> <nl> + path : " tensorflow . estimator . BestExporter " <nl> + tf_class { <nl> + is_instance : " < class \ ' tensorflow . python . estimator . exporter . BestExporter \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . estimator . exporter . Exporter \ ' > " <nl> + is_instance : " < type \ ' object \ ' > " <nl> + member { <nl> + name : " name " <nl> + mtype : " < type \ ' property \ ' > " <nl> + } <nl> + member_method { <nl> + name : " __init__ " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' serving_input_receiver_fn \ ' , \ ' event_file_pattern \ ' , \ ' compare_fn \ ' , \ ' assets_extra \ ' , \ ' as_text \ ' , \ ' exports_to_keep \ ' ] , varargs = None , keywords = None , defaults = [ \ ' best_exporter \ ' , \ ' None \ ' , \ ' eval / * . tfevents . * \ ' , \ ' < function _loss_smaller instance > \ ' , \ ' None \ ' , \ ' False \ ' , \ ' 5 \ ' ] , " <nl> + } <nl> + member_method { <nl> + name : " export " <nl> + argspec : " args = [ \ ' self \ ' , \ ' estimator \ ' , \ ' export_path \ ' , \ ' checkpoint_path \ ' , \ ' eval_result \ ' , \ ' is_the_final_export \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + } <nl> mmm a / tensorflow / tools / api / golden / tensorflow . estimator . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . estimator . pbtxt <nl> tf_module { <nl> name : " BaselineRegressor " <nl> mtype : " < type \ ' type \ ' > " <nl> } <nl> + member { <nl> + name : " BestExporter " <nl> + mtype : " < type \ ' type \ ' > " <nl> + } <nl> member { <nl> name : " BoostedTreesClassifier " <nl> mtype : " < type \ ' type \ ' > " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . - model . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . - model . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . training . Model \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . network . Network \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . - sequential . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . - sequential . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . training . Model \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . network . Network \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - activation . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - activation . pbtxt <nl> path : " tensorflow . keras . layers . Activation " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . core . Activation \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - activity - regularization . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - activity - regularization . pbtxt <nl> path : " tensorflow . keras . layers . ActivityRegularization " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . core . ActivityRegularization \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - add . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - add . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . merge . Add \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . merge . _Merge \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - alpha - dropout . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - alpha - dropout . pbtxt <nl> path : " tensorflow . keras . layers . AlphaDropout " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . noise . AlphaDropout \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - average - pooling1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - average - pooling1 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . AveragePooling1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . Pooling1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - average - pooling2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - average - pooling2 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . AveragePooling2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . Pooling2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - average - pooling3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - average - pooling3 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . AveragePooling3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . Pooling3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - average . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - average . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . merge . Average \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . merge . _Merge \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - avg - pool1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - avg - pool1 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . AveragePooling1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . Pooling1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - avg - pool2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - avg - pool2 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . AveragePooling2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . Pooling2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - avg - pool3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - avg - pool3 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . AveragePooling3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . Pooling3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - batch - normalization . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - batch - normalization . pbtxt <nl> path : " tensorflow . keras . layers . BatchNormalization " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . normalization . BatchNormalization \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - bidirectional . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - bidirectional . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . wrappers . Bidirectional \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . wrappers . Wrapper \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - concatenate . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - concatenate . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . merge . Concatenate \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . merge . _Merge \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv - l - s - t - m2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv - l - s - t - m2 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional_recurrent . ConvRNN2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . recurrent . RNN \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activation " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv1 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv2 - d - transpose . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv2 - d - transpose . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv2 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv3 - d - transpose . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv3 - d - transpose . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv3 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - convolution1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - convolution1 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - convolution2 - d - transpose . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - convolution2 - d - transpose . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - convolution2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - convolution2 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - convolution3 - d - transpose . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - convolution3 - d - transpose . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - convolution3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - convolution3 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - cropping1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - cropping1 - d . pbtxt <nl> path : " tensorflow . keras . layers . Cropping1D " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Cropping1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - cropping2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - cropping2 - d . pbtxt <nl> path : " tensorflow . keras . layers . Cropping2D " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Cropping2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - cropping3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - cropping3 - d . pbtxt <nl> path : " tensorflow . keras . layers . Cropping3D " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Cropping3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - cu - d - n - n - g - r - u . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - cu - d - n - n - g - r - u . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . cudnn_recurrent . _CuDNNRNN \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . recurrent . RNN \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - cu - d - n - n - l - s - t - m . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - cu - d - n - n - l - s - t - m . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . cudnn_recurrent . _CuDNNRNN \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . recurrent . RNN \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - dense . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - dense . pbtxt <nl> path : " tensorflow . keras . layers . Dense " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . core . Dense \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - depthwise - conv2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - depthwise - conv2 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - dot . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - dot . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . merge . Dot \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . merge . _Merge \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - dropout . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - dropout . pbtxt <nl> path : " tensorflow . keras . layers . Dropout " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . core . Dropout \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - e - l - u . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - e - l - u . pbtxt <nl> path : " tensorflow . keras . layers . ELU " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . advanced_activations . ELU \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - embedding . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - embedding . pbtxt <nl> path : " tensorflow . keras . layers . Embedding " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . embeddings . Embedding \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - flatten . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - flatten . pbtxt <nl> path : " tensorflow . keras . layers . Flatten " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . core . Flatten \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - g - r - u - cell . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - g - r - u - cell . pbtxt <nl> path : " tensorflow . keras . layers . GRUCell " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . recurrent . GRUCell \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - g - r - u . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - g - r - u . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . recurrent . GRU \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . recurrent . RNN \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activation " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - gaussian - dropout . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - gaussian - dropout . pbtxt <nl> path : " tensorflow . keras . layers . GaussianDropout " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . noise . GaussianDropout \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - gaussian - noise . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - gaussian - noise . pbtxt <nl> path : " tensorflow . keras . layers . GaussianNoise " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . noise . GaussianNoise \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - average - pooling1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - average - pooling1 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalAveragePooling1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalPooling1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - average - pooling2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - average - pooling2 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalAveragePooling2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalPooling2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - average - pooling3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - average - pooling3 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalAveragePooling3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalPooling3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - avg - pool1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - avg - pool1 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalAveragePooling1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalPooling1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - avg - pool2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - avg - pool2 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalAveragePooling2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalPooling2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - avg - pool3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - avg - pool3 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalAveragePooling3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalPooling3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pool1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pool1 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalMaxPooling1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalPooling1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pool2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pool2 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalMaxPooling2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalPooling2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pool3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pool3 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalMaxPooling3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalPooling3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pooling1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pooling1 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalMaxPooling1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalPooling1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pooling2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pooling2 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalMaxPooling2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalPooling2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pooling3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pooling3 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalMaxPooling3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . GlobalPooling3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - input - layer . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - input - layer . pbtxt <nl> path : " tensorflow . keras . layers . InputLayer " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . input_layer . InputLayer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - l - s - t - m - cell . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - l - s - t - m - cell . pbtxt <nl> path : " tensorflow . keras . layers . LSTMCell " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . recurrent . LSTMCell \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - l - s - t - m . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - l - s - t - m . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . recurrent . LSTM \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . recurrent . RNN \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activation " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - lambda . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - lambda . pbtxt <nl> path : " tensorflow . keras . layers . Lambda " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . core . Lambda \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - layer . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - layer . pbtxt <nl> <nl> path : " tensorflow . keras . layers . Layer " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - leaky - re - l - u . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - leaky - re - l - u . pbtxt <nl> path : " tensorflow . keras . layers . LeakyReLU " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . advanced_activations . LeakyReLU \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - locally - connected1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - locally - connected1 - d . pbtxt <nl> path : " tensorflow . keras . layers . LocallyConnected1D " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . local . LocallyConnected1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - locally - connected2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - locally - connected2 - d . pbtxt <nl> path : " tensorflow . keras . layers . LocallyConnected2D " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . local . LocallyConnected2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - masking . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - masking . pbtxt <nl> path : " tensorflow . keras . layers . Masking " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . core . Masking \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pool1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pool1 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . MaxPooling1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . Pooling1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pool2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pool2 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . MaxPooling2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . Pooling2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pool3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pool3 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . MaxPooling3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . Pooling3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pooling1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pooling1 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . MaxPooling1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . Pooling1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pooling2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pooling2 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . MaxPooling2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . Pooling2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pooling3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pooling3 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . MaxPooling3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . Pooling3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - maximum . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - maximum . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . merge . Maximum \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . merge . _Merge \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - multiply . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - multiply . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . merge . Multiply \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . merge . _Merge \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - p - re - l - u . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - p - re - l - u . pbtxt <nl> path : " tensorflow . keras . layers . PReLU " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . advanced_activations . PReLU \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - permute . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - permute . pbtxt <nl> path : " tensorflow . keras . layers . Permute " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . core . Permute \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - r - n - n . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - r - n - n . pbtxt <nl> path : " tensorflow . keras . layers . RNN " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . recurrent . RNN \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - repeat - vector . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - repeat - vector . pbtxt <nl> path : " tensorflow . keras . layers . RepeatVector " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . core . RepeatVector \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - reshape . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - reshape . pbtxt <nl> path : " tensorflow . keras . layers . Reshape " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . core . Reshape \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - separable - conv1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - separable - conv1 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . SeparableConv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - separable - conv2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - separable - conv2 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . SeparableConv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - separable - convolution1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - separable - convolution1 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . SeparableConv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - separable - convolution2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - separable - convolution2 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . SeparableConv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - simple - r - n - n - cell . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - simple - r - n - n - cell . pbtxt <nl> path : " tensorflow . keras . layers . SimpleRNNCell " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . recurrent . SimpleRNNCell \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - simple - r - n - n . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - simple - r - n - n . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . recurrent . SimpleRNN \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . recurrent . RNN \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activation " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - softmax . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - softmax . pbtxt <nl> path : " tensorflow . keras . layers . Softmax " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . advanced_activations . Softmax \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - spatial - dropout1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - spatial - dropout1 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . core . SpatialDropout1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . core . Dropout \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - spatial - dropout2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - spatial - dropout2 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . core . SpatialDropout2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . core . Dropout \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - spatial - dropout3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - spatial - dropout3 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . core . SpatialDropout3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . core . Dropout \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - stacked - r - n - n - cells . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - stacked - r - n - n - cells . pbtxt <nl> path : " tensorflow . keras . layers . StackedRNNCells " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . recurrent . StackedRNNCells \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - thresholded - re - l - u . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - thresholded - re - l - u . pbtxt <nl> path : " tensorflow . keras . layers . ThresholdedReLU " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . advanced_activations . ThresholdedReLU \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - time - distributed . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - time - distributed . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . wrappers . TimeDistributed \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . wrappers . Wrapper \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - up - sampling1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - up - sampling1 - d . pbtxt <nl> path : " tensorflow . keras . layers . UpSampling1D " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . UpSampling1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - up - sampling2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - up - sampling2 - d . pbtxt <nl> path : " tensorflow . keras . layers . UpSampling2D " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . UpSampling2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - up - sampling3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - up - sampling3 - d . pbtxt <nl> path : " tensorflow . keras . layers . UpSampling3D " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . UpSampling3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - wrapper . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - wrapper . pbtxt <nl> path : " tensorflow . keras . layers . Wrapper " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . wrappers . Wrapper \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - zero - padding1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - zero - padding1 - d . pbtxt <nl> path : " tensorflow . keras . layers . ZeroPadding1D " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . ZeroPadding1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - zero - padding2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - zero - padding2 - d . pbtxt <nl> path : " tensorflow . keras . layers . ZeroPadding2D " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . ZeroPadding2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - zero - padding3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - zero - padding3 - d . pbtxt <nl> path : " tensorflow . keras . layers . ZeroPadding3D " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . ZeroPadding3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . models . - model . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . models . - model . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . training . Model \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . network . Network \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . models . - sequential . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . models . - sequential . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . training . Model \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . network . Network \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - average - pooling1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - average - pooling1 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . Pooling1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - average - pooling2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - average - pooling2 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . Pooling2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - average - pooling3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - average - pooling3 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . Pooling3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - batch - normalization . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - batch - normalization . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . normalization . BatchNormalization \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - conv1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - conv1 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - conv2 - d - transpose . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - conv2 - d - transpose . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - conv2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - conv2 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - conv3 - d - transpose . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - conv3 - d - transpose . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - conv3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - conv3 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - dense . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - dense . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . core . Dense \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - dropout . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - dropout . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . core . Dropout \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - flatten . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - flatten . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . core . Flatten \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - layer . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - layer . pbtxt <nl> path : " tensorflow . layers . Layer " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - max - pooling1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - max - pooling1 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . Pooling1D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - max - pooling2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - max - pooling2 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . Pooling2D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - max - pooling3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - max - pooling3 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . pooling . Pooling3D \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - separable - conv1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - separable - conv1 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - separable - conv2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - separable - conv2 - d . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . layers . convolutional . Conv \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - basic - l - s - t - m - cell . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - basic - l - s - t - m - cell . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . ops . rnn_cell_impl . RNNCell \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - basic - r - n - n - cell . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - basic - r - n - n - cell . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . ops . rnn_cell_impl . RNNCell \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - device - wrapper . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - device - wrapper . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . ops . rnn_cell_impl . RNNCell \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - dropout - wrapper . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - dropout - wrapper . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . ops . rnn_cell_impl . RNNCell \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - g - r - u - cell . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - g - r - u - cell . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . ops . rnn_cell_impl . RNNCell \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - l - s - t - m - cell . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - l - s - t - m - cell . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . ops . rnn_cell_impl . RNNCell \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - multi - r - n - n - cell . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - multi - r - n - n - cell . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . ops . rnn_cell_impl . RNNCell \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - r - n - n - cell . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - r - n - n - cell . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . ops . rnn_cell_impl . RNNCell \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - residual - wrapper . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - residual - wrapper . pbtxt <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . ops . rnn_cell_impl . RNNCell \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . keras . _impl . keras . engine . base_layer . Layer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " activity_regularizer " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . pbtxt <nl> tf_module { <nl> name : " sets " <nl> mtype : " < type \ ' module \ ' > " <nl> } <nl> + member { <nl> + name : " sparse " <nl> + mtype : " < type \ ' module \ ' > " <nl> + } <nl> member { <nl> name : " spectral " <nl> mtype : " < type \ ' module \ ' > " <nl> new file mode 100644 <nl> index 0000000000000 . . bbfe395031aec <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / api / golden / tensorflow . sparse . pbtxt <nl> <nl> + path : " tensorflow . sparse " <nl> + tf_module { <nl> + member_method { <nl> + name : " cross " <nl> + argspec : " args = [ \ ' inputs \ ' , \ ' name \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' ] , " <nl> + } <nl> + member_method { <nl> + name : " cross_hashed " <nl> + argspec : " args = [ \ ' inputs \ ' , \ ' num_buckets \ ' , \ ' hash_key \ ' , \ ' name \ ' ] , varargs = None , keywords = None , defaults = [ \ ' 0 \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> + } <nl> + } <nl> mmm a / tensorflow / tools / api / golden / tensorflow . train . - adadelta - optimizer . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . train . - adadelta - optimizer . pbtxt <nl> path : " tensorflow . train . AdadeltaOptimizer " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . training . adadelta . AdadeltaOptimizer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . training . optimizer . Optimizer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " GATE_GRAPH " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . train . - adagrad - d - a - optimizer . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . train . - adagrad - d - a - optimizer . pbtxt <nl> path : " tensorflow . train . AdagradDAOptimizer " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . training . adagrad_da . AdagradDAOptimizer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . training . optimizer . Optimizer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " GATE_GRAPH " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . train . - adagrad - optimizer . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . train . - adagrad - optimizer . pbtxt <nl> path : " tensorflow . train . AdagradOptimizer " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . training . adagrad . AdagradOptimizer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . training . optimizer . Optimizer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " GATE_GRAPH " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . train . - adam - optimizer . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . train . - adam - optimizer . pbtxt <nl> path : " tensorflow . train . AdamOptimizer " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . training . adam . AdamOptimizer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . training . optimizer . Optimizer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " GATE_GRAPH " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . train . - checkpoint . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . train . - checkpoint . pbtxt <nl> <nl> path : " tensorflow . train . Checkpoint " <nl> tf_class { <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable_utils . Checkpoint \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . Checkpointable \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . util . Checkpoint \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . Checkpointable \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " save_counter " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . train . - ftrl - optimizer . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . train . - ftrl - optimizer . pbtxt <nl> path : " tensorflow . train . FtrlOptimizer " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . training . ftrl . FtrlOptimizer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . training . optimizer . Optimizer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " GATE_GRAPH " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . train . - gradient - descent - optimizer . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . train . - gradient - descent - optimizer . pbtxt <nl> path : " tensorflow . train . GradientDescentOptimizer " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . training . gradient_descent . GradientDescentOptimizer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . training . optimizer . Optimizer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " GATE_GRAPH " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . train . - momentum - optimizer . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . train . - momentum - optimizer . pbtxt <nl> path : " tensorflow . train . MomentumOptimizer " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . training . momentum . MomentumOptimizer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . training . optimizer . Optimizer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " GATE_GRAPH " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . train . - optimizer . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . train . - optimizer . pbtxt <nl> <nl> path : " tensorflow . train . Optimizer " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . training . optimizer . Optimizer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " GATE_GRAPH " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . train . - proximal - adagrad - optimizer . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . train . - proximal - adagrad - optimizer . pbtxt <nl> path : " tensorflow . train . ProximalAdagradOptimizer " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . training . proximal_adagrad . ProximalAdagradOptimizer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . training . optimizer . Optimizer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " GATE_GRAPH " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . train . - proximal - gradient - descent - optimizer . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . train . - proximal - gradient - descent - optimizer . pbtxt <nl> path : " tensorflow . train . ProximalGradientDescentOptimizer " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . training . proximal_gradient_descent . ProximalGradientDescentOptimizer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . training . optimizer . Optimizer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " GATE_GRAPH " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . train . - r - m - s - prop - optimizer . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . train . - r - m - s - prop - optimizer . pbtxt <nl> path : " tensorflow . train . RMSPropOptimizer " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . training . rmsprop . RMSPropOptimizer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . training . optimizer . Optimizer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " GATE_GRAPH " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . train . - sync - replicas - optimizer . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . train . - sync - replicas - optimizer . pbtxt <nl> path : " tensorflow . train . SyncReplicasOptimizer " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . training . sync_replicas_optimizer . SyncReplicasOptimizer \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . training . optimizer . Optimizer \ ' > " <nl> - is_instance : " < class \ ' tensorflow . python . training . checkpointable . CheckpointableBase \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . training . checkpointable . base . CheckpointableBase \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> member { <nl> name : " GATE_GRAPH " <nl> mmm a / tensorflow / tools / lib_package / BUILD <nl> ppp b / tensorflow / tools / lib_package / BUILD <nl> genrule ( <nl> " @ fft2d / / : fft / readme . txt " , <nl> " @ gemmlowp / / : LICENSE " , <nl> " @ gif_archive / / : COPYING " , <nl> + " @ grpc / / : LICENSE " , <nl> + " @ grpc / / third_party / address_sorting : LICENSE " , <nl> + " @ grpc / / third_party / nanopb : LICENSE . txt " , <nl> " @ highwayhash / / : LICENSE " , <nl> " @ jemalloc / / : COPYING " , <nl> " @ jpeg / / : LICENSE . md " , <nl> mmm a / tensorflow / workspace . bzl <nl> ppp b / tensorflow / workspace . bzl <nl> def tf_workspace ( path_prefix = " " , tf_repo_name = " " ) : <nl> tf_http_archive ( <nl> name = " llvm " , <nl> urls = [ <nl> - " https : / / mirror . bazel . build / github . com / llvm - mirror / llvm / archive / a915f005cd63fd111bbca510236a5163a7e83576 . tar . gz " , <nl> - " https : / / github . com / llvm - mirror / llvm / archive / a915f005cd63fd111bbca510236a5163a7e83576 . tar . gz " , <nl> + " https : / / mirror . bazel . build / github . com / llvm - mirror / llvm / archive / e17809bf50a4cdf3cec3b9dc5c9f79d9a45fc32f . tar . gz " , <nl> + " https : / / github . com / llvm - mirror / llvm / archive / e17809bf50a4cdf3cec3b9dc5c9f79d9a45fc32f . tar . gz " , <nl> ] , <nl> - sha256 = " 1c81ec0f843ea2c9369ccfa1c1b20023dc9a999bf075ae192fcb89e23896d929 " , <nl> - strip_prefix = " llvm - a915f005cd63fd111bbca510236a5163a7e83576 " , <nl> + sha256 = " 1b75cb65517e41aaa70a95af55e45d08f37d0d44a192669b10d7b14b976dcc2a " , <nl> + strip_prefix = " llvm - e17809bf50a4cdf3cec3b9dc5c9f79d9a45fc32f " , <nl> build_file = clean_dep ( " / / third_party / llvm : llvm . BUILD " ) , <nl> ) <nl> <nl>
|
Merge pull request from zheng - xq / branch_196939548
|
tensorflow/tensorflow
|
27779fc94b9711a220b46dd97d1c9e25b5f6ba26
|
2018-05-17T14:39:04Z
|
mmm a / lib / IRGen / GenClass . cpp <nl> ppp b / lib / IRGen / GenClass . cpp <nl> void IRGenModule : : emitClassDecl ( ClassDecl * D ) { <nl> classTI . getLayout ( * this , selfType ) , <nl> classTI . getClassLayout ( * this , selfType ) ) ; <nl> emitNestedTypeDecls ( D - > getMembers ( ) ) ; <nl> - emitReflectionMetadata ( D ) ; <nl> + emitFieldMetadataRecord ( D ) ; <nl> } <nl> <nl> namespace { <nl> mmm a / lib / IRGen / GenDecl . cpp <nl> ppp b / lib / IRGen / GenDecl . cpp <nl> llvm : : Constant * IRGenModule : : emitProtocolConformances ( ) { <nl> <nl> SmallVector < llvm : : Constant * , 8 > elts ; <nl> for ( auto * conformance : ProtocolConformances ) { <nl> + emitAssociatedTypeMetadataRecord ( conformance ) ; <nl> + <nl> auto descriptorRef = getAddrOfLLVMVariableOrGOTEquivalent ( <nl> LinkEntity : : forProtocolDescriptor ( conformance - > getProtocol ( ) ) , <nl> getPointerAlignment ( ) , ProtocolDescriptorStructTy ) ; <nl> static bool shouldEmitCategory ( IRGenModule & IGM , ExtensionDecl * ext ) { <nl> } <nl> <nl> void IRGenModule : : emitExtension ( ExtensionDecl * ext ) { <nl> - emitAssociatedTypeMetadataRecord ( ext ) ; <nl> emitNestedTypeDecls ( ext - > getMembers ( ) ) ; <nl> <nl> / / Generate a category if the extension either introduces a <nl> mmm a / lib / IRGen / GenEnum . cpp <nl> ppp b / lib / IRGen / GenEnum . cpp <nl> const TypeInfo * TypeConverter : : convertEnumType ( TypeBase * key , CanType type , <nl> void IRGenModule : : emitEnumDecl ( EnumDecl * theEnum ) { <nl> emitEnumMetadata ( * this , theEnum ) ; <nl> emitNestedTypeDecls ( theEnum - > getMembers ( ) ) ; <nl> - emitReflectionMetadata ( theEnum ) ; <nl> + emitFieldMetadataRecord ( theEnum ) ; <nl> } <nl> <nl> void irgen : : emitSwitchAddressOnlyEnumDispatch ( IRGenFunction & IGF , <nl> mmm a / lib / IRGen / GenMeta . cpp <nl> ppp b / lib / IRGen / GenMeta . cpp <nl> void IRGenModule : : emitProtocolDecl ( ProtocolDecl * protocol ) { <nl> var - > setConstant ( true ) ; <nl> var - > setInitializer ( init ) ; <nl> <nl> - emitReflectionMetadata ( protocol ) ; <nl> + emitFieldMetadataRecord ( protocol ) ; <nl> } <nl> <nl> / / / \ brief Load a reference to the protocol descriptor for the given protocol . <nl> mmm a / lib / IRGen / GenReflection . cpp <nl> ppp b / lib / IRGen / GenReflection . cpp <nl> class ReflectionMetadataBuilder : public ConstantBuilder < > { <nl> class AssociatedTypeMetadataBuilder : public ReflectionMetadataBuilder { <nl> static const uint32_t AssociatedTypeRecordSize = 8 ; <nl> <nl> - llvm : : PointerUnion < const NominalTypeDecl * , const ExtensionDecl * > <nl> - NominalOrExtensionDecl ; <nl> + const ProtocolConformance * Conformance ; <nl> + ArrayRef < std : : pair < StringRef , CanType > > AssociatedTypes ; <nl> <nl> - void addConformance ( Module * ModuleContext , <nl> - CanType ConformingType , <nl> - const ProtocolConformance * Conformance ) { <nl> - SmallVector < std : : pair < StringRef , CanType > , 2 > AssociatedTypes ; <nl> - <nl> - auto collectTypeWitness = [ & ] ( const AssociatedTypeDecl * AssocTy , <nl> - const Substitution & Sub , <nl> - const TypeDecl * TD ) - > bool { <nl> - <nl> - auto Subst = ArchetypeBuilder : : mapTypeOutOfContext ( <nl> - Conformance - > getDeclContext ( ) , Sub . getReplacement ( ) ) ; <nl> - <nl> - AssociatedTypes . push_back ( { <nl> - AssocTy - > getNameStr ( ) , <nl> - Subst - > getCanonicalType ( ) <nl> - } ) ; <nl> - return false ; <nl> - } ; <nl> + void layout ( ) { <nl> + / / If the conforming type is generic , we just want to emit the <nl> + / / unbound generic type here . <nl> + auto * Nominal = Conformance - > getInterfaceType ( ) - > getAnyNominal ( ) ; <nl> + assert ( Nominal & & " Structural conformance ? " ) ; <nl> <nl> - Conformance - > forEachTypeWitness ( / * resolver * / nullptr , collectTypeWitness ) ; <nl> + PrettyStackTraceDecl DebugStack ( " emitting associated type metadata " , <nl> + Nominal ) ; <nl> <nl> - / / If there are no associated types , don ' t bother emitting any <nl> - / / metadata . <nl> - if ( AssociatedTypes . empty ( ) ) <nl> - return ; <nl> + auto * M = IGM . getSILModule ( ) . getSwiftModule ( ) ; <nl> <nl> - addTypeRef ( ModuleContext , ConformingType ) ; <nl> + addTypeRef ( M , Nominal - > getDeclaredType ( ) - > getCanonicalType ( ) ) ; <nl> <nl> auto ProtoTy = Conformance - > getProtocol ( ) - > getDeclaredType ( ) ; <nl> - addTypeRef ( ModuleContext , ProtoTy - > getCanonicalType ( ) ) ; <nl> + addTypeRef ( M , ProtoTy - > getCanonicalType ( ) ) ; <nl> <nl> addConstantInt32 ( AssociatedTypes . size ( ) ) ; <nl> addConstantInt32 ( AssociatedTypeRecordSize ) ; <nl> class AssociatedTypeMetadataBuilder : public ReflectionMetadataBuilder { <nl> auto NameGlobal = IGM . getAddrOfStringForTypeRef ( AssocTy . first ) ; <nl> addRelativeAddress ( NameGlobal ) ; <nl> addBuiltinTypeRefs ( AssocTy . second ) ; <nl> - addTypeRef ( ModuleContext , AssocTy . second ) ; <nl> - } <nl> - } <nl> - <nl> - const NominalTypeDecl * getNominalTypeDecl ( ) const { <nl> - return NominalOrExtensionDecl . dyn_cast < const NominalTypeDecl * > ( ) ; <nl> - } <nl> - <nl> - const ExtensionDecl * getExtensionDecl ( ) const { <nl> - return NominalOrExtensionDecl . dyn_cast < const ExtensionDecl * > ( ) ; <nl> - } <nl> - <nl> - void layout ( ) { <nl> - if ( auto Decl = getNominalTypeDecl ( ) ) { <nl> - PrettyStackTraceDecl DebugStack ( " emitting associated type metadata " , <nl> - Decl ) ; <nl> - for ( auto Conformance : Decl - > getAllConformances ( ) ) { <nl> - if ( Conformance - > isIncomplete ( ) ) <nl> - continue ; <nl> - addConformance ( Decl - > getModuleContext ( ) , <nl> - Decl - > getDeclaredType ( ) - > getCanonicalType ( ) , <nl> - Conformance ) ; <nl> - } <nl> - } else if ( auto Ext = getExtensionDecl ( ) ) { <nl> - PrettyStackTraceDecl DebugStack ( " emitting associated type metadata " , Ext ) ; <nl> - for ( auto Conformance : Ext - > getLocalConformances ( ) ) { <nl> - auto Decl = Ext - > getExtendedType ( ) - > getNominalOrBoundGenericNominal ( ) ; <nl> - addConformance ( Ext - > getDeclContext ( ) - > getParentModule ( ) , <nl> - Decl - > getDeclaredType ( ) - > getCanonicalType ( ) , <nl> - Conformance ) ; <nl> - } <nl> + addTypeRef ( M , AssocTy . second ) ; <nl> } <nl> } <nl> <nl> public : <nl> - AssociatedTypeMetadataBuilder ( IRGenModule & IGM , const NominalTypeDecl * Decl ) <nl> - : ReflectionMetadataBuilder ( IGM ) , NominalOrExtensionDecl ( Decl ) { } <nl> - <nl> - AssociatedTypeMetadataBuilder ( IRGenModule & IGM , const ExtensionDecl * Decl ) <nl> - : ReflectionMetadataBuilder ( IGM ) , NominalOrExtensionDecl ( Decl ) { } <nl> - <nl> + AssociatedTypeMetadataBuilder ( IRGenModule & IGM , <nl> + const ProtocolConformance * Conformance , <nl> + ArrayRef < std : : pair < StringRef , CanType > > AssociatedTypes ) <nl> + : ReflectionMetadataBuilder ( IGM ) , Conformance ( Conformance ) , <nl> + AssociatedTypes ( AssociatedTypes ) { } <nl> <nl> llvm : : GlobalVariable * emit ( ) { <nl> auto tempBase = std : : unique_ptr < llvm : : GlobalVariable > ( <nl> IRGenModule : : getAddrOfCaptureDescriptor ( SILFunction & Caller , <nl> return llvm : : ConstantExpr : : getBitCast ( var , CaptureDescriptorPtrTy ) ; <nl> } <nl> <nl> - void IRGenModule : : emitReflectionMetadata ( const NominalTypeDecl * Decl ) { <nl> + void IRGenModule : : <nl> + emitAssociatedTypeMetadataRecord ( const ProtocolConformance * Conformance ) { <nl> if ( ! IRGen . Opts . EnableReflectionMetadata ) <nl> return ; <nl> <nl> - emitFieldMetadataRecord ( Decl ) ; <nl> - emitAssociatedTypeMetadataRecord ( Decl ) ; <nl> - } <nl> + SmallVector < std : : pair < StringRef , CanType > , 2 > AssociatedTypes ; <nl> <nl> - void IRGenModule : : emitAssociatedTypeMetadataRecord ( const NominalTypeDecl * Decl ) { <nl> - if ( ! IRGen . Opts . EnableReflectionMetadata ) <nl> - return ; <nl> + auto collectTypeWitness = [ & ] ( const AssociatedTypeDecl * AssocTy , <nl> + const Substitution & Sub , <nl> + const TypeDecl * TD ) - > bool { <nl> <nl> - AssociatedTypeMetadataBuilder builder ( * this , Decl ) ; <nl> - auto var = builder . emit ( ) ; <nl> - if ( var ) <nl> - addUsedGlobal ( var ) ; <nl> - } <nl> + auto Subst = ArchetypeBuilder : : mapTypeOutOfContext ( <nl> + Conformance - > getDeclContext ( ) , Sub . getReplacement ( ) ) ; <nl> <nl> - void IRGenModule : : emitAssociatedTypeMetadataRecord ( const ExtensionDecl * Ext ) { <nl> - if ( ! IRGen . Opts . EnableReflectionMetadata ) <nl> + AssociatedTypes . push_back ( { <nl> + AssocTy - > getNameStr ( ) , <nl> + Subst - > getCanonicalType ( ) <nl> + } ) ; <nl> + return false ; <nl> + } ; <nl> + <nl> + Conformance - > forEachTypeWitness ( / * resolver * / nullptr , collectTypeWitness ) ; <nl> + <nl> + / / If there are no associated types , don ' t bother emitting any <nl> + / / metadata . <nl> + if ( AssociatedTypes . empty ( ) ) <nl> return ; <nl> <nl> - AssociatedTypeMetadataBuilder builder ( * this , Ext ) ; <nl> + AssociatedTypeMetadataBuilder builder ( * this , Conformance , AssociatedTypes ) ; <nl> auto var = builder . emit ( ) ; <nl> if ( var ) <nl> addUsedGlobal ( var ) ; <nl> mmm a / lib / IRGen / GenStruct . cpp <nl> ppp b / lib / IRGen / GenStruct . cpp <nl> irgen : : getPhysicalStructMemberAccessStrategy ( IRGenModule & IGM , <nl> void IRGenModule : : emitStructDecl ( StructDecl * st ) { <nl> emitStructMetadata ( * this , st ) ; <nl> emitNestedTypeDecls ( st - > getMembers ( ) ) ; <nl> - emitReflectionMetadata ( st ) ; <nl> + emitFieldMetadataRecord ( st ) ; <nl> } <nl> <nl> namespace { <nl> mmm a / lib / IRGen / IRGenModule . h <nl> ppp b / lib / IRGen / IRGenModule . h <nl> class IRGenModule { <nl> const HeapLayout & layout ) ; <nl> llvm : : Constant * getAddrOfBoxDescriptor ( CanType boxedType ) ; <nl> <nl> - void emitReflectionMetadata ( const NominalTypeDecl * Decl ) ; <nl> - void emitAssociatedTypeMetadataRecord ( const NominalTypeDecl * Decl ) ; <nl> - void emitAssociatedTypeMetadataRecord ( const ExtensionDecl * Ext ) ; <nl> + void emitAssociatedTypeMetadataRecord ( const ProtocolConformance * Conformance ) ; <nl> void emitFieldMetadataRecord ( const NominalTypeDecl * Decl ) ; <nl> void emitBuiltinReflectionMetadata ( ) ; <nl> std : : string getBuiltinTypeMetadataSectionName ( ) ; <nl> mmm a / test / Reflection / typeref_decoding . swift <nl> ppp b / test / Reflection / typeref_decoding . swift <nl> <nl> / / CHECK : typealias Outer = A <nl> / / CHECK : ( generic_type_parameter depth = 0 index = 0 ) <nl> <nl> - / / CHECK : - TypesToReflect . C4 : TypesToReflect . P1 <nl> - / / CHECK : typealias Inner = A <nl> - / / CHECK : ( generic_type_parameter depth = 0 index = 0 ) <nl> - <nl> - / / CHECK : - TypesToReflect . C4 : TypesToReflect . P2 <nl> - / / CHECK : typealias Outer = A <nl> - / / CHECK : ( generic_type_parameter depth = 0 index = 0 ) <nl> - <nl> - / / CHECK : - TypesToReflect . S4 : TypesToReflect . P1 <nl> - / / CHECK : typealias Inner = A <nl> - / / CHECK : ( generic_type_parameter depth = 0 index = 0 ) <nl> - <nl> - / / CHECK : - TypesToReflect . S4 : TypesToReflect . P2 <nl> - / / CHECK : typealias Outer = A <nl> - / / CHECK : ( generic_type_parameter depth = 0 index = 0 ) <nl> - <nl> / / CHECK : - TypesToReflect . S4 : TypesToReflect . P1 <nl> / / CHECK : typealias Inner = A <nl> / / CHECK : ( generic_type_parameter depth = 0 index = 0 ) <nl> <nl> / / CHECK : typealias Second = B <nl> / / CHECK : ( generic_type_parameter depth = 0 index = 1 ) <nl> <nl> - / / CHECK : - TypesToReflect . E4 : TypesToReflect . P1 <nl> - / / CHECK : typealias Inner = A <nl> - / / CHECK : ( generic_type_parameter depth = 0 index = 0 ) <nl> - <nl> - / / CHECK : - TypesToReflect . E4 : TypesToReflect . P2 <nl> - / / CHECK : typealias Outer = B <nl> - / / CHECK : ( generic_type_parameter depth = 0 index = 1 ) <nl> - <nl> - / / CHECK : - TypesToReflect . E4 : TypesToReflect . P3 <nl> - / / CHECK : typealias First = A <nl> - / / CHECK : ( generic_type_parameter depth = 0 index = 0 ) <nl> - <nl> - / / CHECK : typealias Second = B <nl> - / / CHECK : ( generic_type_parameter depth = 0 index = 1 ) <nl> - <nl> / / CHECK : - TypesToReflect . S : TypesToReflect . P4 <nl> / / CHECK : typealias Result = Swift . Int <nl> / / CHECK : ( struct Swift . Int ) <nl>
|
Reflection : Simplify associated type metadata emission
|
apple/swift
|
60dff0109356a4e0bb88f38cbbbad6909df9e575
|
2016-05-27T02:33:00Z
|
mmm a / include / spdlog / cfg / argv . h <nl> ppp b / include / spdlog / cfg / argv . h <nl> <nl> <nl> # pragma once <nl> # include < spdlog / cfg / helpers . h > <nl> - # include < spdlog / details / os . h > <nl> <nl> / / <nl> / / Init log levels using each argv entry that starts with " SPDLOG_LEVEL = " <nl>
|
Update argv . h
|
gabime/spdlog
|
b667bae65d65f4b868cfba41db1f08a83bb58df4
|
2020-03-06T14:26:11Z
|
mmm a / ChangeLog <nl> ppp b / ChangeLog <nl> <nl> + 2007 - 01 - 30 Tatsuhiro Tsujikawa < tujikawa at rednoah dot com > <nl> + <nl> + To fix segfault in Metalink download : <nl> + <nl> + * src / UrlRequestInfo . h <nl> + ( filename ) : Removed . <nl> + ( totalLength ) : Removed . <nl> + ( setTotalLength ) : Removed . <nl> + ( setFilename ) : Removed . <nl> + * src / MetalinkRequestInfo . cc <nl> + ( execute ) : Do not set filename and totalLength to reqInfo . <nl> + Set chunk checksum to reqInfo if it is not null . <nl> + <nl> + * src / HttpResponseCommand . cc <nl> + ( executeInternal ) : Fixed filename in log . <nl> + ( handleDefaultEncoding ) : Removed the call to initBitfield ( ) and <nl> + markAllPiecesDone ( ) . <nl> + ( handleOtherEncoding ) : Added HEAD method handling . <nl> + <nl> + * src / FtpNegotiateCommand . cc <nl> + ( executeInternal ) : Added SEQ_HEAD_OK handling . <nl> + ( recvSize ) : In HEAD handling , set sequence to SEQ_HEAD_OK and <nl> + return false . <nl> + <nl> + * src / FtpNegotiateCommand . h <nl> + ( SEQ_HEAD_OK ) : New definition . <nl> + <nl> + * src / SegmentMan . cc <nl> + ( initBitfield ) : Delete bitfield . <nl> + ( isChunkChecksumValidationReady ) : Fixed the condition . <nl> + <nl> + * src / UrlRequestInfo . cc : Fixed so that Metalink and Torrent download <nl> + works fine . <nl> + <nl> 2007 - 01 - 28 Tatsuhiro Tsujikawa < tujikawa at rednoah dot com > <nl> <nl> * src / Xml2MetalinkProcessor . h <nl> mmm a / configure <nl> ppp b / configure <nl> <nl> # ! / bin / sh <nl> # Guess values for system - dependent variables and create Makefiles . <nl> - # Generated by GNU Autoconf 2 . 61 for aria2c 0 . 10 . 0 . <nl> + # Generated by GNU Autoconf 2 . 61 for aria2c 0 . 10 . 0 + 1 . <nl> # <nl> # Report bugs to < t - tujikawa @ users . sourceforge . net > . <nl> # <nl> SHELL = $ { CONFIG_SHELL - / bin / sh } <nl> # Identity of this package . <nl> PACKAGE_NAME = ' aria2c ' <nl> PACKAGE_TARNAME = ' aria2c ' <nl> - PACKAGE_VERSION = ' 0 . 10 . 0 ' <nl> - PACKAGE_STRING = ' aria2c 0 . 10 . 0 ' <nl> + PACKAGE_VERSION = ' 0 . 10 . 0 + 1 ' <nl> + PACKAGE_STRING = ' aria2c 0 . 10 . 0 + 1 ' <nl> PACKAGE_BUGREPORT = ' t - tujikawa @ users . sourceforge . net ' <nl> <nl> ac_unique_file = " src / Socket . h " <nl> if test " $ ac_init_help " = " long " ; then <nl> # Omit some internal or obsolete options to make the list less imposing . <nl> # This message is too long to be a string in the A / UX 3 . 1 sh . <nl> cat < < _ACEOF <nl> - \ ` configure ' configures aria2c 0 . 10 . 0 to adapt to many kinds of systems . <nl> + \ ` configure ' configures aria2c 0 . 10 . 0 + 1 to adapt to many kinds of systems . <nl> <nl> Usage : $ 0 [ OPTION ] . . . [ VAR = VALUE ] . . . <nl> <nl> fi <nl> <nl> if test - n " $ ac_init_help " ; then <nl> case $ ac_init_help in <nl> - short | recursive ) echo " Configuration of aria2c 0 . 10 . 0 : " ; ; <nl> + short | recursive ) echo " Configuration of aria2c 0 . 10 . 0 + 1 : " ; ; <nl> esac <nl> cat < < \ _ACEOF <nl> <nl> fi <nl> test - n " $ ac_init_help " & & exit $ ac_status <nl> if $ ac_init_version ; then <nl> cat < < \ _ACEOF <nl> - aria2c configure 0 . 10 . 0 <nl> + aria2c configure 0 . 10 . 0 + 1 <nl> generated by GNU Autoconf 2 . 61 <nl> <nl> Copyright ( C ) 1992 , 1993 , 1994 , 1995 , 1996 , 1998 , 1999 , 2000 , 2001 , <nl> cat > config . log < < _ACEOF <nl> This file contains any messages produced by compilers while <nl> running configure , to aid debugging if configure makes a mistake . <nl> <nl> - It was created by aria2c $ as_me 0 . 10 . 0 , which was <nl> + It was created by aria2c $ as_me 0 . 10 . 0 + 1 , which was <nl> generated by GNU Autoconf 2 . 61 . Invocation command line was <nl> <nl> $ $ 0 $ @ <nl> fi <nl> <nl> # Define the identity of the package . <nl> PACKAGE = ' aria2c ' <nl> - VERSION = ' 0 . 10 . 0 ' <nl> + VERSION = ' 0 . 10 . 0 + 1 ' <nl> <nl> <nl> cat > > confdefs . h < < _ACEOF <nl> exec 6 > & 1 <nl> # report actual input values of CONFIG_FILES etc . instead of their <nl> # values after options handling . <nl> ac_log = " <nl> - This file was extended by aria2c $ as_me 0 . 10 . 0 , which was <nl> + This file was extended by aria2c $ as_me 0 . 10 . 0 + 1 , which was <nl> generated by GNU Autoconf 2 . 61 . Invocation command line was <nl> <nl> CONFIG_FILES = $ CONFIG_FILES <nl> Report bugs to < bug - autoconf @ gnu . org > . " <nl> _ACEOF <nl> cat > > $ CONFIG_STATUS < < _ACEOF <nl> ac_cs_version = " \ \ <nl> - aria2c config . status 0 . 10 . 0 <nl> + aria2c config . status 0 . 10 . 0 + 1 <nl> configured by $ 0 , generated by GNU Autoconf 2 . 61 , <nl> with options \ \ " ` echo " $ ac_configure_args " | sed ' s / ^ / / ; s / [ \ \ " " \ ` \ $ ] / \ \ \ \ & / g ' ` \ \ " <nl> <nl> mmm a / configure . ac <nl> ppp b / configure . ac <nl> <nl> # Process this file with autoconf to produce a configure script . <nl> # <nl> AC_PREREQ ( 2 . 59 ) <nl> - AC_INIT ( aria2c , 0 . 10 . 0 , t - tujikawa @ users . sourceforge . net ) <nl> + AC_INIT ( aria2c , 0 . 10 . 0 + 1 , t - tujikawa @ users . sourceforge . net ) <nl> AM_INIT_AUTOMAKE ( ) <nl> AM_PATH_CPPUNIT ( 1 . 10 . 2 ) <nl> AC_CONFIG_SRCDIR ( [ src / Socket . h ] ) <nl> mmm a / po / Makefile . in <nl> ppp b / po / Makefile . in <nl> <nl> # General Public License and is * not * in the public domain . <nl> <nl> PACKAGE = aria2c <nl> - VERSION = 0 . 10 . 0 <nl> + VERSION = 0 . 10 . 0 + 1 <nl> <nl> SHELL = / bin / sh <nl> <nl> mmm a / src / FtpNegotiationCommand . cc <nl> ppp b / src / FtpNegotiationCommand . cc <nl> bool FtpNegotiationCommand : : executeInternal ( Segment & segment ) { <nl> command - > setLowestDownloadSpeedLimit ( e - > option - > getAsInt ( PREF_LOWEST_SPEED_LIMIT ) ) ; <nl> e - > commands . push_back ( command ) ; <nl> return true ; <nl> + } else if ( sequence = = SEQ_HEAD_OK ) { <nl> + return true ; <nl> } else { <nl> e - > commands . push_back ( this ) ; <nl> return false ; <nl> bool FtpNegotiationCommand : : recvSize ( ) { <nl> throw new DlAbortEx ( EX_TOO_LARGE_FILE , size ) ; <nl> } <nl> if ( ! e - > segmentMan - > downloadStarted ) { <nl> - if ( req - > getMethod ( ) = = Request : : METHOD_HEAD ) { <nl> - e - > segmentMan - > downloadStarted = true ; <nl> - e - > segmentMan - > totalSize = size ; <nl> - e - > segmentMan - > initBitfield ( e - > option - > getAsInt ( PREF_SEGMENT_SIZE ) , <nl> - e - > segmentMan - > totalSize ) ; <nl> - e - > segmentMan - > markAllPiecesDone ( ) ; <nl> - e - > segmentMan - > isSplittable = false ; / / TODO because we don ' t want segment file to be saved . <nl> - return true ; <nl> - } <nl> e - > segmentMan - > downloadStarted = true ; <nl> e - > segmentMan - > totalSize = size ; <nl> e - > segmentMan - > initBitfield ( e - > option - > getAsInt ( PREF_SEGMENT_SIZE ) , <nl> e - > segmentMan - > totalSize ) ; <nl> - <nl> e - > segmentMan - > filename = Util : : urldecode ( req - > getFile ( ) ) ; <nl> + if ( req - > getMethod ( ) = = Request : : METHOD_HEAD ) { <nl> + e - > segmentMan - > isSplittable = false ; / / TODO because we don ' t want segment file to be saved . <nl> + sequence = SEQ_HEAD_OK ; <nl> + return false ; <nl> + } <nl> bool segFileExists = e - > segmentMan - > segmentFileExists ( ) ; <nl> if ( segFileExists ) { <nl> e - > segmentMan - > load ( ) ; <nl> mmm a / src / FtpNegotiationCommand . h <nl> ppp b / src / FtpNegotiationCommand . h <nl> class FtpNegotiationCommand : public AbstractCommand { <nl> SEQ_SEND_RETR , <nl> SEQ_RECV_RETR , <nl> SEQ_NEGOTIATION_COMPLETED , <nl> - SEQ_RETRY <nl> + SEQ_RETRY , <nl> + SEQ_HEAD_OK <nl> } ; <nl> bool recvGreeting ( ) ; <nl> bool sendUser ( ) ; <nl> mmm a / src / HttpResponseCommand . cc <nl> ppp b / src / HttpResponseCommand . cc <nl> bool HttpResponseCommand : : executeInternal ( Segment & segment ) { <nl> return handleDefaultEncoding ( headers ) ; <nl> } <nl> } else { <nl> - if ( determinFilename ( headers ) ! = e - > segmentMan - > filename ) { <nl> - throw new DlAbortEx ( EX_FILENAME_MISMATCH , req - > getFile ( ) . c_str ( ) , e - > segmentMan - > filename . c_str ( ) ) ; <nl> + string filenameInHeader = determinFilename ( headers ) ; <nl> + if ( filenameInHeader ! = e - > segmentMan - > filename ) { <nl> + throw new DlAbortEx ( EX_FILENAME_MISMATCH , <nl> + filenameInHeader . c_str ( ) , <nl> + e - > segmentMan - > filename . c_str ( ) ) ; <nl> } <nl> createHttpDownloadCommand ( ) ; <nl> return true ; <nl> bool HttpResponseCommand : : handleDefaultEncoding ( const HttpHeader & headers ) { <nl> if ( req - > getMethod ( ) = = Request : : METHOD_HEAD ) { <nl> e - > segmentMan - > downloadStarted = true ; <nl> e - > segmentMan - > totalSize = size ; <nl> - e - > segmentMan - > initBitfield ( e - > option - > getAsInt ( PREF_SEGMENT_SIZE ) , <nl> - e - > segmentMan - > totalSize ) ; <nl> - e - > segmentMan - > markAllPiecesDone ( ) ; <nl> e - > segmentMan - > isSplittable = false ; / / TODO because we don ' t want segment file to be saved . <nl> return true ; <nl> } <nl> bool HttpResponseCommand : : handleDefaultEncoding ( const HttpHeader & headers ) { <nl> } <nl> <nl> bool HttpResponseCommand : : handleOtherEncoding ( const string & transferEncoding , const HttpHeader & headers ) { <nl> + / / quick hack for method ' head ' <nl> + if ( req - > getMethod ( ) = = Request : : METHOD_HEAD ) { <nl> + e - > segmentMan - > downloadStarted = true ; <nl> + e - > segmentMan - > isSplittable = false ; <nl> + e - > segmentMan - > filename = determinFilename ( headers ) ; <nl> + e - > segmentMan - > totalSize = 0 ; <nl> + return true ; <nl> + } <nl> if ( e - > segmentMan - > shouldCancelDownloadForSafety ( ) ) { <nl> throw new FatalException ( EX_FILE_ALREADY_EXISTS , <nl> e - > segmentMan - > getFilePath ( ) . c_str ( ) , <nl> mmm a / src / MetalinkRequestInfo . cc <nl> ppp b / src / MetalinkRequestInfo . cc <nl> RequestInfos MetalinkRequestInfo : : execute ( ) { <nl> urls . push_back ( ( * itr ) - > url ) ; <nl> } <nl> UrlRequestInfoHandle reqInfo = new UrlRequestInfo ( urls , maxConnection , op ) ; <nl> - reqInfo - > setFilename ( entry - > filename ) ; <nl> - reqInfo - > setTotalLength ( entry - > size ) ; <nl> # ifdef ENABLE_MESSAGE_DIGEST <nl> reqInfo - > setChecksum ( checksum ) ; <nl> - reqInfo - > setDigestAlgo ( entry - > chunkChecksum - > digestAlgo ) ; <nl> - reqInfo - > setChunkChecksumLength ( entry - > chunkChecksum - > pieceLength ) ; <nl> - reqInfo - > setChunkChecksums ( entry - > chunkChecksum - > pieceHashes ) ; <nl> + if ( ! entry - > chunkChecksum . isNull ( ) ) { <nl> + reqInfo - > setDigestAlgo ( entry - > chunkChecksum - > digestAlgo ) ; <nl> + reqInfo - > setChunkChecksumLength ( entry - > chunkChecksum - > pieceLength ) ; <nl> + reqInfo - > setChunkChecksums ( entry - > chunkChecksum - > pieceHashes ) ; <nl> + } <nl> # endif / / ENABLE_MESSAGE_DIGEST <nl> nextReqInfos . push_front ( reqInfo ) ; <nl> } <nl> mmm a / src / SegmentMan . cc <nl> ppp b / src / SegmentMan . cc <nl> void SegmentMan : : init ( ) { <nl> } <nl> <nl> void SegmentMan : : initBitfield ( int segmentLength , long long int totalLength ) { <nl> + delete bitfield ; <nl> this - > bitfield = BitfieldManFactory : : getNewFactory ( ) - > createBitfieldMan ( segmentLength , totalLength ) ; <nl> } <nl> <nl> void SegmentMan : : checkIntegrity ( ) <nl> <nl> # ifdef ENABLE_MESSAGE_DIGEST <nl> bool SegmentMan : : isChunkChecksumValidationReady ( ) const { <nl> - return bitfield & & <nl> - ( ( int64_t ) pieceHashes . size ( ) ) * chunkHashLength = = ( ( int64_t ) bitfield - > getBlockLength ( ) ) * ( bitfield - > getMaxIndex ( ) + 1 ) ; <nl> + return bitfield & & totalSize > 0 & & <nl> + ( ( int64_t ) pieceHashes . size ( ) ) * chunkHashLength > = totalSize ; <nl> } <nl> # endif / / ENABLE_MESSAGE_DIGEST <nl> <nl> mmm a / src / UrlRequestInfo . cc <nl> ppp b / src / UrlRequestInfo . cc <nl> HeadResult UrlRequestInfo : : getHeadResult ( ) { <nl> op - > get ( PREF_REFERER ) , <nl> 1 , <nl> Request : : METHOD_HEAD ) ) ; <nl> + if ( requests . size ( ) = = 0 ) { <nl> + fail = true ; <nl> + return HeadResult ( ) ; <nl> + } <nl> Requests reserved ( requests . begin ( ) + 1 , requests . end ( ) ) ; <nl> requests . erase ( requests . begin ( ) + 1 , requests . end ( ) ) ; <nl> <nl> HeadResult UrlRequestInfo : : getHeadResult ( ) { <nl> HeadResult hr ; <nl> try { <nl> e - > run ( ) ; <nl> - hr . filename = e - > segmentMan - > filename ; <nl> - hr . totalLength = e - > segmentMan - > totalSize ; <nl> + if ( e - > segmentMan - > errors > 0 ) { <nl> + fail = true ; <nl> + } else { <nl> + hr . filename = e - > segmentMan - > filename ; <nl> + hr . totalLength = e - > segmentMan - > totalSize ; <nl> + } <nl> } catch ( RecoverableException * ex ) { <nl> logger - > error ( " Exception caught " , ex ) ; <nl> delete ex ; <nl> RequestInfos UrlRequestInfo : : execute ( ) { <nl> Requests requests ; <nl> Requests reserved ; <nl> printUrls ( urls ) ; <nl> + HeadResult hr = getHeadResult ( ) ; <nl> + if ( fail ) { <nl> + return RequestInfos ( ) ; <nl> + } <nl> + <nl> for_each ( urls . begin ( ) , urls . end ( ) , <nl> CreateRequest ( & requests , <nl> op - > get ( PREF_REFERER ) , <nl> op - > getAsInt ( PREF_SPLIT ) ) ) ; <nl> + <nl> + logger - > info ( " Head result : filename = % s , total length = % s " , <nl> + hr . filename . c_str ( ) , Util : : ullitos ( hr . totalLength , true ) . c_str ( ) ) ; <nl> <nl> - HeadResult hr ; <nl> - if ( filename . size ( ) & & totalLength > 0 ) { <nl> - hr . filename = filename ; <nl> - hr . totalLength = totalLength ; <nl> - } else { <nl> - hr = getHeadResult ( ) ; <nl> - if ( fail ) { <nl> - return RequestInfos ( ) ; <nl> - } <nl> - <nl> - logger - > info ( " Head result : filename = % s , total length = % s " , <nl> - hr . filename . c_str ( ) , Util : : ullitos ( hr . totalLength , true ) . c_str ( ) ) ; <nl> - } <nl> adjustRequestSize ( requests , reserved , maxConnections ) ; <nl> <nl> SharedHandle < ConsoleDownloadEngine > e ( DownloadEngineFactory : : newConsoleEngine ( op , requests , reserved ) ) ; <nl> - e - > segmentMan - > filename = hr . filename ; <nl> - e - > segmentMan - > totalSize = hr . totalLength ; <nl> - e - > segmentMan - > downloadStarted = true ; <nl> + if ( hr . totalLength > 0 ) { <nl> + e - > segmentMan - > filename = hr . filename ; <nl> + e - > segmentMan - > totalSize = hr . totalLength ; <nl> + e - > segmentMan - > downloadStarted = true ; <nl> + } <nl> # ifdef ENABLE_MESSAGE_DIGEST <nl> - e - > segmentMan - > digestAlgo = digestAlgo ; <nl> - e - > segmentMan - > chunkHashLength = chunkChecksumLength ; <nl> - e - > segmentMan - > pieceHashes = chunkChecksums ; <nl> + if ( chunkChecksumLength > 0 ) { <nl> + e - > segmentMan - > digestAlgo = digestAlgo ; <nl> + e - > segmentMan - > chunkHashLength = chunkChecksumLength ; <nl> + e - > segmentMan - > pieceHashes = chunkChecksums ; <nl> + } <nl> # endif / / ENABLE_MESSAGE_DIGEST <nl> <nl> if ( e - > segmentMan - > segmentFileExists ( ) ) { <nl> RequestInfos UrlRequestInfo : : execute ( ) { <nl> e - > segmentMan - > getFilePath ( ) . c_str ( ) , <nl> e - > segmentMan - > getSegmentFilePath ( ) . c_str ( ) ) ; <nl> } <nl> - e - > segmentMan - > initBitfield ( e - > option - > getAsInt ( PREF_SEGMENT_SIZE ) , <nl> - e - > segmentMan - > totalSize ) ; <nl> - if ( e - > segmentMan - > fileExists ( ) & & e - > option - > get ( PREF_CHECK_INTEGRITY ) = = V_TRUE ) { <nl> - e - > segmentMan - > diskWriter - > openExistingFile ( e - > segmentMan - > getFilePath ( ) ) ; <nl> + if ( e - > segmentMan - > totalSize > 0 ) { <nl> + e - > segmentMan - > initBitfield ( e - > option - > getAsInt ( PREF_SEGMENT_SIZE ) , <nl> + e - > segmentMan - > totalSize ) ; <nl> + if ( e - > segmentMan - > fileExists ( ) & & e - > option - > get ( PREF_CHECK_INTEGRITY ) = = V_TRUE ) { <nl> + e - > segmentMan - > diskWriter - > openExistingFile ( e - > segmentMan - > getFilePath ( ) ) ; <nl> # ifdef ENABLE_MESSAGE_DIGEST <nl> - e - > segmentMan - > markAllPiecesDone ( ) ; <nl> - e - > segmentMan - > checkIntegrity ( ) ; <nl> + e - > segmentMan - > markAllPiecesDone ( ) ; <nl> + e - > segmentMan - > checkIntegrity ( ) ; <nl> # endif / / ENABLE_MESSAGE_DIGEST <nl> - } else { <nl> - e - > segmentMan - > diskWriter - > initAndOpenFile ( e - > segmentMan - > getFilePath ( ) , <nl> - e - > segmentMan - > totalSize ) ; <nl> + } else { <nl> + e - > segmentMan - > diskWriter - > initAndOpenFile ( e - > segmentMan - > getFilePath ( ) , <nl> + e - > segmentMan - > totalSize ) ; <nl> + } <nl> } <nl> } <nl> Util : : setGlobalSignalHandler ( SIGINT , handler , 0 ) ; <nl> mmm a / src / UrlRequestInfo . h <nl> ppp b / src / UrlRequestInfo . h <nl> <nl> <nl> class HeadResult { <nl> public : <nl> - HeadResult ( ) : totalLength ( 0 ) { } <nl> string filename ; <nl> int64_t totalLength ; <nl> + public : <nl> + HeadResult ( ) : totalLength ( 0 ) { } <nl> } ; <nl> <nl> std : : ostream & operator < < ( std : : ostream & o , const HeadResult & hr ) ; <nl> class UrlRequestInfo : public RequestInfo { <nl> int32_t chunkChecksumLength ; <nl> Strings chunkChecksums ; <nl> # endif / / ENABLE_MESSAGE_DIGEST <nl> - string filename ; <nl> - int64_t totalLength ; <nl> <nl> RequestInfo * createNextRequestInfo ( ) const ; <nl> void adjustRequestSize ( Requests & requests , <nl> class UrlRequestInfo : public RequestInfo { <nl> UrlRequestInfo ( const Strings & urls , int maxConnections , Option * op ) : <nl> RequestInfo ( op ) , <nl> urls ( urls ) , <nl> - maxConnections ( maxConnections ) , <nl> + maxConnections ( maxConnections ) <nl> # ifdef ENABLE_MESSAGE_DIGEST <nl> + , <nl> digestAlgo ( DIGEST_ALGO_SHA1 ) , <nl> - chunkChecksumLength ( 0 ) , <nl> + chunkChecksumLength ( 0 ) <nl> # endif / / ENABLE_MESSAGE_DIGEST <nl> - totalLength ( 0 ) { } <nl> + { } <nl> <nl> virtual ~ UrlRequestInfo ( ) { } <nl> <nl> class UrlRequestInfo : public RequestInfo { <nl> this - > chunkChecksums = chunkChecksums ; <nl> } <nl> # endif / / ENABLE_MESSAGE_DIGEST <nl> - <nl> - void setTotalLength ( int64_t totalLength ) { <nl> - this - > totalLength = totalLength ; <nl> - } <nl> - <nl> - void setFilename ( const string & filename ) { <nl> - this - > filename = filename ; <nl> - } <nl> } ; <nl> <nl> typedef SharedHandle < UrlRequestInfo > UrlRequestInfoHandle ; <nl>
|
2007 - 01 - 30 Tatsuhiro Tsujikawa < tujikawa at rednoah dot com >
|
aria2/aria2
|
77a05c00b1410687a3045ffe693aaa0496361fc8
|
2007-01-29T16:46:48Z
|
mmm a / tools / editor / editor_node . cpp <nl> ppp b / tools / editor / editor_node . cpp <nl> EditorNode : : EditorNode ( ) { <nl> p - > add_separator ( ) ; <nl> p - > add_item ( " Revert Scene " , EDIT_REVERT ) ; <nl> p - > add_separator ( ) ; <nl> + # ifdef OSX_ENABLED <nl> p - > add_item ( " Quit to Project List " , RUN_PROJECT_MANAGER , KEY_MASK_SHIFT + KEY_MASK_ALT + KEY_Q ) ; <nl> + # else <nl> + p - > add_item ( " Quit to Project List " , RUN_PROJECT_MANAGER , KEY_MASK_SHIFT + KEY_MASK_CTRL + KEY_Q ) ; <nl> + # endif <nl> p - > add_item ( " Quit " , FILE_QUIT , KEY_MASK_CMD + KEY_Q ) ; <nl> <nl> recent_scenes = memnew ( PopupMenu ) ; <nl>
|
restored quit to project list keybinding , but kept change for OSX , closes
|
godotengine/godot
|
027072728e7dc95f2312f7bbaaa29a6be2140343
|
2016-01-10T18:57:24Z
|
mmm a / README . md <nl> ppp b / README . md <nl> For previous versions , please read : <nl> <nl> # # V3 changes <nl> <nl> + * < strong > v3 . 0 , 2020 - 10 - 10 , [ 3 . 0 release1 ( 3 . 0 . 144 ) ] [ r3 . 0r1 ] released . 122674 lines . < / strong > <nl> * v3 . 0 , 2020 - 10 - 10 , Fix [ # 1780 ] [ bug # 1780 ] , build fail on Ubuntu20 ( focal ) . 3 . 0 . 144 <nl> * v3 . 0 , 2020 - 09 - 14 , Prevent stop ingest for multiple times . 3 . 0 . 143 <nl> * v3 . 0 , 2020 - 09 - 10 , RTC : Change SO_REUSEPORT fail to warning . 3 . 0 . 142 <nl> For previous versions , please read : <nl> <nl> # # Releases <nl> <nl> + * 2020 - 10 - 10 , [ Release v3 . 0 - r1 ] [ r3 . 0r1 ] , 3 . 0 release1 , 3 . 0 . 144 , 122674 lines . <nl> * 2020 - 06 - 27 , [ Release v3 . 0 - r0 ] [ r3 . 0r0 ] , 3 . 0 release0 , 3 . 0 . 141 , 122674 lines . <nl> * 2020 - 03 - 29 , [ Release v3 . 0 - b3 ] [ r3 . 0b4 ] , 3 . 0 beta4 , 3 . 0 . 139 , 122674 lines . <nl> * 2020 - 03 - 18 , [ Release v3 . 0 - b3 ] [ r3 . 0b3 ] , 3 . 0 beta3 , 3 . 0 . 134 , 122509 lines . <nl> Winlin <nl> <nl> [ exo # 828 ] : https : / / github . com / google / ExoPlayer / pull / 828 <nl> <nl> + [ r3 . 0r1 ] : https : / / github . com / ossrs / srs / releases / tag / v3 . 0 - r1 <nl> [ r3 . 0r0 ] : https : / / github . com / ossrs / srs / releases / tag / v3 . 0 - r0 <nl> [ r3 . 0b4 ] : https : / / github . com / ossrs / srs / releases / tag / v3 . 0 - b4 <nl> [ r3 . 0b3 ] : https : / / github . com / ossrs / srs / releases / tag / v3 . 0 - b3 <nl>
|
Release 3 . 0r1 , 3 . 0 . 144
|
ossrs/srs
|
3809d43ee5a721f25d4d3ca985dfffd306d22166
|
2020-10-10T08:50:19Z
|
mmm a / stdlib / public / SDK / Foundation / URLComponents . swift <nl> ppp b / stdlib / public / SDK / Foundation / URLComponents . swift <nl> public struct URLComponents : ReferenceConvertible , Hashable , Equatable , _Mutabl <nl> set { _applyMutation { $ 0 . queryItems = newValue } } <nl> } <nl> <nl> - / / / Returns an array of query items for this ` URLComponents ` , in the order in which they appear in the original query string . Any percent - encoding in a query item name or value is retained <nl> - / / / <nl> - / / / The setter combines an array containing any number of ` URLQueryItem ` s , each of which represents a single key - value pair , into a query string and sets the ` URLComponents ` query property . This property assumes the query item names and values are already correctly percent - encoded , and that the query item names do not contain the query item delimiter characters ' & ' and ' = ' . Attempting to set an incorrectly percent - encoded query item or a query item name with the query item delimiter characters ' & ' and ' = ' will cause a ` fatalError ` . <nl> - @ available ( macOS 10 . 13 , iOS 11 . 0 , tvOS 11 . 0 , watchOS 4 . 0 , * ) <nl> - public var percentEncodedQueryItems : [ URLQueryItem ] ? { <nl> - get { return _handle . map { $ 0 . percentEncodedQueryItems } } <nl> - set { _applyMutation { $ 0 . percentEncodedQueryItems = newValue } } <nl> - } <nl> + / / / Returns an array of query items for this ` URLComponents ` , in the order in which they appear in the original query string . Any percent - encoding in a query item name or value is retained <nl> + / / / <nl> + / / / The setter combines an array containing any number of ` URLQueryItem ` s , each of which represents a single key - value pair , into a query string and sets the ` URLComponents ` query property . This property assumes the query item names and values are already correctly percent - encoded , and that the query item names do not contain the query item delimiter characters ' & ' and ' = ' . Attempting to set an incorrectly percent - encoded query item or a query item name with the query item delimiter characters ' & ' and ' = ' will cause a ` fatalError ` . <nl> + @ available ( macOS 10 . 13 , iOS 11 . 0 , tvOS 11 . 0 , watchOS 4 . 0 , * ) <nl> + public var percentEncodedQueryItems : [ URLQueryItem ] ? { <nl> + get { return _handle . map { $ 0 . percentEncodedQueryItems } } <nl> + set { _applyMutation { $ 0 . percentEncodedQueryItems = newValue } } <nl> + } <nl> <nl> public var hashValue : Int { <nl> return _handle . map { $ 0 . hash } <nl> mmm a / test / stdlib / TestURL . swift <nl> ppp b / test / stdlib / TestURL . swift <nl> class TestURL : TestURLSuper { <nl> expectEqual ( " global | nav " , first . value ) <nl> } <nl> <nl> - if # available ( OSX 10 . 13 , iOS 11 . 0 , * ) { <nl> - components . percentEncodedQuery = " name1 % E2 % 80 % A2 = value1 % E2 % 80 % A2 & name2 % E2 % 80 % A2 = value2 % E2 % 80 % A2 " <nl> - var qi = components . queryItems ! <nl> - expectNotNil ( qi ) <nl> - <nl> - expectEqual ( 2 , qi . count ) <nl> - <nl> - expectEqual ( " name1 • " , qi [ 0 ] . name ) <nl> - expectNotNil ( qi [ 0 ] . value ) <nl> - expectEqual ( " value1 • " , qi [ 0 ] . value ) <nl> - <nl> - expectEqual ( " name2 • " , qi [ 1 ] . name ) <nl> - expectNotNil ( qi [ 1 ] . value ) <nl> - expectEqual ( " value2 • " , qi [ 1 ] . value ) <nl> - <nl> - qi = components . percentEncodedQueryItems ! <nl> - expectNotNil ( qi ) <nl> - <nl> - expectEqual ( 2 , qi . count ) <nl> - <nl> - expectEqual ( " name1 % E2 % 80 % A2 " , qi [ 0 ] . name ) <nl> - expectNotNil ( qi [ 0 ] . value ) <nl> - expectEqual ( " value1 % E2 % 80 % A2 " , qi [ 0 ] . value ) <nl> - <nl> - expectEqual ( " name2 % E2 % 80 % A2 " , qi [ 1 ] . name ) <nl> - expectNotNil ( qi [ 0 ] . value ) <nl> - expectEqual ( " value2 % E2 % 80 % A2 " , qi [ 1 ] . value ) <nl> - <nl> - qi [ 0 ] . name = " % E2 % 80 % A2name1 " <nl> - qi [ 0 ] . value = " % E2 % 80 % A2value1 " <nl> - qi [ 1 ] . name = " % E2 % 80 % A2name2 " <nl> - qi [ 1 ] . value = " % E2 % 80 % A2value2 " <nl> - <nl> - components . percentEncodedQueryItems = qi <nl> - <nl> - expectEqual ( " % E2 % 80 % A2name1 = % E2 % 80 % A2value1 & % E2 % 80 % A2name2 = % E2 % 80 % A2value2 " , components . percentEncodedQuery ) <nl> - } <nl> - } <nl> + if # available ( OSX 10 . 13 , iOS 11 . 0 , tvOS 11 . 0 , watchOS 4 . 0 , * ) { <nl> + components . percentEncodedQuery = " name1 % E2 % 80 % A2 = value1 % E2 % 80 % A2 & name2 % E2 % 80 % A2 = value2 % E2 % 80 % A2 " <nl> + var qi = components . queryItems ! <nl> + expectNotNil ( qi ) <nl> + <nl> + expectEqual ( 2 , qi . count ) <nl> + <nl> + expectEqual ( " name1 • " , qi [ 0 ] . name ) <nl> + expectNotNil ( qi [ 0 ] . value ) <nl> + expectEqual ( " value1 • " , qi [ 0 ] . value ) <nl> + <nl> + expectEqual ( " name2 • " , qi [ 1 ] . name ) <nl> + expectNotNil ( qi [ 1 ] . value ) <nl> + expectEqual ( " value2 • " , qi [ 1 ] . value ) <nl> + <nl> + qi = components . percentEncodedQueryItems ! <nl> + expectNotNil ( qi ) <nl> + <nl> + expectEqual ( 2 , qi . count ) <nl> + <nl> + expectEqual ( " name1 % E2 % 80 % A2 " , qi [ 0 ] . name ) <nl> + expectNotNil ( qi [ 0 ] . value ) <nl> + expectEqual ( " value1 % E2 % 80 % A2 " , qi [ 0 ] . value ) <nl> + <nl> + expectEqual ( " name2 % E2 % 80 % A2 " , qi [ 1 ] . name ) <nl> + expectNotNil ( qi [ 0 ] . value ) <nl> + expectEqual ( " value2 % E2 % 80 % A2 " , qi [ 1 ] . value ) <nl> + <nl> + qi [ 0 ] . name = " % E2 % 80 % A2name1 " <nl> + qi [ 0 ] . value = " % E2 % 80 % A2value1 " <nl> + qi [ 1 ] . name = " % E2 % 80 % A2name2 " <nl> + qi [ 1 ] . value = " % E2 % 80 % A2value2 " <nl> + <nl> + components . percentEncodedQueryItems = qi <nl> + <nl> + expectEqual ( " % E2 % 80 % A2name1 = % E2 % 80 % A2value1 & % E2 % 80 % A2name2 = % E2 % 80 % A2value2 " , components . percentEncodedQuery ) <nl> + } <nl> + } <nl> <nl> func testURLResourceValues ( ) { <nl> <nl>
|
Fixed indentation in changes . Fixed availability in test .
|
apple/swift
|
6fbb2a4b25ce6f166f73a0d544c31b5591870dc2
|
2018-06-29T01:19:23Z
|
mmm a / torch / _utils . py <nl> ppp b / torch / _utils . py <nl> def _flatten_dense_tensors ( tensors ) : <nl> Since inputs are dense , the resulting tensor will be a concatenated 1D <nl> buffer . Element - wise operation on this buffer will be equivalent to <nl> operating individually . <nl> + <nl> + Arguments : <nl> + tensors ( Iterable [ Tensor ] ) : dense tensors to flatten . <nl> + <nl> + Returns : <nl> + A contiguous 1D buffer containing input tensors . <nl> " " " <nl> if len ( tensors ) = = 1 : <nl> return tensors [ 0 ] . contiguous ( ) . view ( - 1 ) <nl> def _flatten_dense_tensors ( tensors ) : <nl> def _flatten_sparse_tensors ( tensors ) : <nl> " " " Flatten sparse tensors into two contiguous 1D buffers , one of indices and <nl> one of values . Assume tensors are of same sparse type . <nl> + <nl> + Arguments : <nl> + tensors ( Iterable [ Tensor ] ) : sparse tensors to flatten . <nl> + <nl> + Returns : <nl> + A tuple of two contiguous 1D buffers , one containing input tensors ' <nl> + indices and the other containing the values . <nl> " " " <nl> flat_indices = _flatten_dense_tensors ( [ t . _indices ( ) for t in tensors ] ) <nl> flat_values = _flatten_dense_tensors ( [ t . _values ( ) for t in tensors ] ) <nl> def _flatten_sparse_tensors ( tensors ) : <nl> def _unflatten_dense_tensors ( flat , tensors ) : <nl> " " " View a flat buffer using the sizes of tensors . Assume that tensors are of <nl> same dense type , and that flat is given by _flatten_dense_tensors . <nl> + <nl> + Arguments : <nl> + flat ( Tensor ) : flattened dense tensors to unflatten . <nl> + tensors ( Iterable [ Tensor ] ) : dense tensors whose sizes will be used to <nl> + unflatten flat . <nl> + <nl> + Returns : <nl> + Unflattened dense tensors with sizes same as tensors and values from <nl> + flat . <nl> " " " <nl> outputs = [ ] <nl> offset = 0 <nl> def _unflatten_sparse_tensors ( flat , tensors ) : <nl> " " " View flat buffer ( containing indices and values ) using the sizes of <nl> tensors . Assume that tensors are of same sparse type , and that flat is given <nl> by _flatten_sparse_tensors . <nl> + <nl> + Arguments : <nl> + flat ( tuple ( Tensor , Tensor ) ) : flattened indices and values of sparse <nl> + tensors to unflatten . <nl> + tensors ( Iterable [ Tensor ] ) : sparse tensors whose sizes will be used to <nl> + unflatten flat . <nl> + <nl> + Returns : <nl> + Unflattened sparse tensors with sizes same as tensors and values from <nl> + flat . <nl> " " " <nl> flat_indices , flat_values = flat <nl> indices = _unflatten_dense_tensors ( flat_indices , [ t . _indices ( ) for t in tensors ] ) <nl> def _unflatten_sparse_tensors ( flat , tensors ) : <nl> <nl> def _reorder_tensors_as ( tensors , ordered_tensors ) : <nl> " " " Assume that tensors are of same order as ordered_tensors within their <nl> - types , e . g . from _take_tensors . Reorder them to be of same order as <nl> + types , e . g . , from _take_tensors . Reorder them to be of same order as <nl> ordered_tensors . <nl> + <nl> + Arguments : <nl> + tensors ( Iterable [ Tensor ] ) : tensors to be reordered . They should be of <nl> + the same order as ordered_tensors within their own types . <nl> + ordered_tensors ( Iterable [ Tensor ] ) : tensors whose order will be the <nl> + reference . <nl> + <nl> + Returns : <nl> + Ordered tuple of tensors with contents from tensors and order of <nl> + ordered_tensors . <nl> " " " <nl> type_dict = defaultdict ( list ) <nl> for tensor in tensors : <nl> def _reorder_tensors_as ( tensors , ordered_tensors ) : <nl> <nl> <nl> def _take_tensors ( tensors , size_limit ) : <nl> - " " " Group tensors into chunks . This generator yields a chunk at each call , <nl> + " " " Group tensors into chunks . This generator yields a chunk at each time , <nl> each containing tensors of same type up to certain byte limit in total size . <nl> - The yielded tensors are only ordered as the original sequence within its <nl> - types . <nl> <nl> Args : <nl> tensors ( Sequence ) : A sequence of tensors to be separated into chunks . <nl> size_limit ( int ) : The limit of each chunk in bytes . <nl> + <nl> + Yields : <nl> + Blocks of tensors of same type and within size_limit . The yielded <nl> + tensors are only ordered as the original sequence within its types . <nl> " " " <nl> buf_dict = defaultdict ( lambda : [ [ ] , 0 ] ) <nl> for tensor in tensors : <nl> mmm a / torch / cuda / comm . py <nl> ppp b / torch / cuda / comm . py <nl> def reduce_add_coalesced ( inputs , destination = None , buffer_size = 10485760 ) : <nl> A tuple of tensors containing an elementwise sum of each group of <nl> inputs , placed on the ` ` destination ` ` device . <nl> " " " <nl> - dense_tensors = [ ] # shape ( num_tensors , num_gpus ) <nl> + dense_tensors = [ [ ] for _ in inputs ] # shape ( num_gpus , num_tensors ) <nl> output = [ ] <nl> + ref_order = [ ] <nl> + # process sparse ones first since they may have different sizes on different gpus <nl> for tensor_at_gpus in zip ( * inputs ) : <nl> - if tensor_at_gpus [ 0 ] . is_sparse : <nl> + if all ( t . is_sparse for t in tensor_at_gpus ) : <nl> result = reduce_add ( tensor_at_gpus , destination ) <nl> output . append ( result ) <nl> + ref_order . append ( tensor_at_gpus [ 0 ] ) <nl> else : <nl> - dense_tensors . append ( tensor_at_gpus ) <nl> - itrs = [ _take_tensors ( tensors , buffer_size ) for tensors in zip ( * dense_tensors ) ] <nl> + for coll , t in zip ( dense_tensors , tensor_at_gpus ) : <nl> + coll . append ( t . to_dense ( ) if t . is_sparse else t ) <nl> + ref_order . append ( dense_tensors [ 0 ] [ - 1 ] ) <nl> + itrs = [ _take_tensors ( tensors , buffer_size ) for tensors in dense_tensors ] <nl> + # now the dense ones , which have consistent sizes <nl> for chunks in zip ( * itrs ) : <nl> - tensors = [ _flatten_dense_tensors ( chunk ) for chunk in chunks ] <nl> - result = reduce_add ( tensors , destination ) <nl> - output . extend ( _unflatten_dense_tensors ( result , chunks [ 0 ] ) ) <nl> - return tuple ( _reorder_tensors_as ( output , inputs [ 0 ] ) ) <nl> + flat_tensors = [ _flatten_dense_tensors ( chunk ) for chunk in chunks ] <nl> + flat_result = reduce_add ( flat_tensors , destination ) <nl> + output . extend ( _unflatten_dense_tensors ( flat_result , chunks [ 0 ] ) ) <nl> + return tuple ( _reorder_tensors_as ( output , ref_order ) ) <nl> <nl> <nl> def scatter ( tensor , devices , chunk_sizes = None , dim = 0 , streams = None ) : <nl>
|
comments and case where not all sparse ( )
|
pytorch/pytorch
|
fa5efab669d68f2931292176d3376e3519bdd0b4
|
2017-11-01T10:05:17Z
|
mmm a / dbms / src / Interpreters / evaluateConstantExpression . cpp <nl> ppp b / dbms / src / Interpreters / evaluateConstantExpression . cpp <nl> namespace <nl> using Conjunction = ColumnsWithTypeAndName ; <nl> using Disjunction = std : : vector < Conjunction > ; <nl> <nl> - Disjunction analyzeEquals ( const ASTIdentifier * identifier , const ASTLiteral * literal , const ExpressionActionsPtr & expr ) <nl> + Disjunction analyzeEquals ( const ASTIdentifier * identifier , const Field & value , const ExpressionActionsPtr & expr ) <nl> { <nl> - if ( ! identifier | | ! literal ) <nl> + if ( ! identifier | | value . isNull ( ) ) <nl> { <nl> return { } ; <nl> } <nl> namespace <nl> { <nl> ColumnWithTypeAndName column ; <nl> / / FIXME : what to do if field is not convertable ? <nl> - column . column = type - > createColumnConst ( 1 , convertFieldToType ( literal - > value , * type ) ) ; <nl> + column . column = type - > createColumnConst ( 1 , convertFieldToType ( value , * type ) ) ; <nl> column . name = name ; <nl> column . type = type ; <nl> return { { std : : move ( column ) } } ; <nl> namespace <nl> return { } ; <nl> } <nl> <nl> + Disjunction analyzeEquals ( const ASTIdentifier * identifier , const ASTLiteral * literal , const ExpressionActionsPtr & expr ) <nl> + { <nl> + if ( ! identifier | | ! literal ) <nl> + { <nl> + return { } ; <nl> + } <nl> + <nl> + return analyzeEquals ( identifier , literal - > value , expr ) ; <nl> + } <nl> + <nl> Disjunction andDNF ( const Disjunction & left , const Disjunction & right ) <nl> { <nl> if ( left . empty ( ) ) <nl> namespace <nl> const auto * left = fn - > arguments - > children . front ( ) . get ( ) ; <nl> const auto * right = fn - > arguments - > children . back ( ) . get ( ) ; <nl> const auto * identifier = left - > as < ASTIdentifier > ( ) ; <nl> - const auto * inner_fn = right - > as < ASTFunction > ( ) ; <nl> - <nl> - if ( ! inner_fn ) <nl> - { <nl> - return { } ; <nl> - } <nl> <nl> - const auto * tuple = inner_fn - > children . front ( ) - > as < ASTExpressionList > ( ) ; <nl> + Disjunction result ; <nl> <nl> - if ( ! tuple ) <nl> + if ( const auto * tuple_func = right - > as < ASTFunction > ( ) ; tuple_func & & tuple_func - > name = = " tuple " ) <nl> { <nl> - return { } ; <nl> - } <nl> + const auto * tuple_elements = tuple_func - > children . front ( ) - > as < ASTExpressionList > ( ) ; <nl> + for ( const auto & child : tuple_elements - > children ) <nl> + { <nl> + const auto * literal = child - > as < ASTLiteral > ( ) ; <nl> + const auto dnf = analyzeEquals ( identifier , literal , expr ) ; <nl> <nl> - Disjunction result ; <nl> + if ( dnf . empty ( ) ) <nl> + { <nl> + return { } ; <nl> + } <nl> <nl> - for ( const auto & child : tuple - > children ) <nl> + result . insert ( result . end ( ) , dnf . begin ( ) , dnf . end ( ) ) ; <nl> + } <nl> + } <nl> + else if ( const auto * tuple_literal = right - > as < ASTLiteral > ( ) ; <nl> + tuple_literal & & tuple_literal - > value . getType ( ) = = Field : : Types : : Tuple ) <nl> { <nl> - const auto * literal = child - > as < ASTLiteral > ( ) ; <nl> - const auto dnf = analyzeEquals ( identifier , literal , expr ) ; <nl> - <nl> - if ( dnf . empty ( ) ) <nl> + const auto & tuple = tuple_literal - > value . get < const Tuple & > ( ) ; <nl> + for ( const auto & child : tuple ) <nl> { <nl> - return { } ; <nl> - } <nl> + const auto dnf = analyzeEquals ( identifier , child , expr ) ; <nl> <nl> - result . insert ( result . end ( ) , dnf . begin ( ) , dnf . end ( ) ) ; <nl> + if ( dnf . empty ( ) ) <nl> + { <nl> + return { } ; <nl> + } <nl> + <nl> + result . insert ( result . end ( ) , dnf . begin ( ) , dnf . end ( ) ) ; <nl> + } <nl> + } <nl> + else <nl> + { <nl> + return { } ; <nl> } <nl> <nl> return result ; <nl> mmm a / dbms / src / Parsers / ExpressionElementParsers . cpp <nl> ppp b / dbms / src / Parsers / ExpressionElementParsers . cpp <nl> bool ParserCollectionOfLiterals < Collection > : : parseImpl ( Pos & pos , ASTPtr & node , <nl> Pos literal_begin = pos ; <nl> <nl> Collection arr ; <nl> - <nl> ParserLiteral literal_p ; <nl> <nl> + + pos ; <nl> - <nl> while ( pos . isValid ( ) ) <nl> { <nl> if ( ! arr . empty ( ) ) <nl> bool ParserCollectionOfLiterals < Collection > : : parseImpl ( Pos & pos , ASTPtr & node , <nl> { <nl> std : : shared_ptr < ASTLiteral > literal ; <nl> <nl> - / / / Parse one - element tuples ( e . g . ( 1 ) ) as single values for backward compatibility . <nl> + / / / Parse one - element tuples ( e . g . ( 1 ) ) later as single values for backward compatibility . <nl> if ( std : : is_same_v < Collection , Tuple > & & arr . size ( ) = = 1 ) <nl> - literal = std : : make_shared < ASTLiteral > ( arr [ 0 ] ) ; <nl> - else <nl> - literal = std : : make_shared < ASTLiteral > ( arr ) ; <nl> + return false ; <nl> <nl> + literal = std : : make_shared < ASTLiteral > ( arr ) ; <nl> literal - > begin = literal_begin ; <nl> literal - > end = + + pos ; <nl> node = literal ; <nl> bool ParserCollectionOfLiterals < Collection > : : parseImpl ( Pos & pos , ASTPtr & node , <nl> } <nl> else <nl> { <nl> - std : : stringstream msg ; <nl> - msg < < " comma or " < < getTokenName ( closing_bracket ) ; <nl> - expected . add ( pos , msg . str ( ) . c_str ( ) ) ; <nl> + String message = String ( " comma or " ) + getTokenName ( closing_bracket ) ; <nl> + expected . add ( pos , message . c_str ( ) ) ; <nl> return false ; <nl> } <nl> } <nl> mmm a / dbms / src / Parsers / ExpressionElementParsers . h <nl> ppp b / dbms / src / Parsers / ExpressionElementParsers . h <nl> class ParserCollectionOfLiterals : public IParserBase <nl> TokenType closing_bracket ; <nl> } ; <nl> <nl> + / / / A tuple of literals with same type . <nl> class ParserTupleOfLiterals : public IParserBase <nl> { <nl> public : <nl> mmm a / dbms / src / Processors / Executors / PipelineExecutor . h <nl> ppp b / dbms / src / Processors / Executors / PipelineExecutor . h <nl> class PipelineExecutor <nl> void executeSingleThread ( size_t thread_num , size_t num_threads ) ; <nl> void finish ( ) ; <nl> <nl> + public : <nl> String dumpPipeline ( ) const ; <nl> } ; <nl> <nl> mmm a / dbms / src / Processors / Formats / Impl / ConstantExpressionTemplate . cpp <nl> ppp b / dbms / src / Processors / Formats / Impl / ConstantExpressionTemplate . cpp <nl> <nl> # include < Columns / ColumnConst . h > <nl> + # include < Common / typeid_cast . h > <nl> # include < DataTypes / DataTypesNumber . h > <nl> # include < DataTypes / DataTypeNullable . h > <nl> # include < DataTypes / FieldToDataType . h > <nl> class ReplaceLiteralsVisitor <nl> info . type = std : : make_shared < DataTypeFloat64 > ( ) ; <nl> else if ( field_type = = Field : : Types : : String ) <nl> info . type = std : : make_shared < DataTypeString > ( ) ; <nl> - else if ( field_type = = Field : : Types : : Array ) <nl> + else if ( field_type = = Field : : Types : : Array | | field_type = = Field : : Types : : Tuple ) <nl> { <nl> info . special_parser . is_array = true ; <nl> info . type = applyVisitor ( FieldToDataType ( ) , info . literal - > value ) ; <nl> - auto nested_type = assert_cast < const DataTypeArray & > ( * info . type ) . getNestedType ( ) ; <nl> + <nl> + DataTypePtr nested_type ; <nl> + if ( auto array_type = typeid_cast < const DataTypeArray * > ( info . type . get ( ) ) ) <nl> + nested_type = array_type - > getNestedType ( ) ; <nl> + else if ( auto tuple_type = typeid_cast < const DataTypeTuple * > ( info . type . get ( ) ) ) <nl> + nested_type = tuple_type - > getElements ( ) [ 0 ] ; <nl> + else <nl> + throw Exception ( " Unexpected type " + info . type - > getName ( ) , ErrorCodes : : LOGICAL_ERROR ) ; <nl> <nl> / / / It can be Array ( Nullable ( nested_type ) ) <nl> bool array_of_nullable = false ; <nl> class ReplaceLiteralsVisitor <nl> info . special_parser . is_nullable = true ; <nl> } <nl> <nl> - info . type = std : : make_shared < DataTypeArray > ( nested_type ) ; <nl> + if ( field_type = = Field : : Types : : Tuple ) <nl> + { <nl> + const auto & tuple = info . literal - > value . get < const Tuple & > ( ) ; <nl> + DataTypes elements ( tuple . size ( ) ) ; <nl> + for ( size_t i = 0 ; i < tuple . size ( ) ; + + i ) <nl> + elements [ i ] = nested_type ; <nl> + info . type = std : : make_shared < DataTypeTuple > ( elements ) ; <nl> + } <nl> + else <nl> + { <nl> + info . type = std : : make_shared < DataTypeArray > ( nested_type ) ; <nl> + } <nl> } <nl> else <nl> throw Exception ( String ( " Unexpected literal type " ) + info . literal - > value . getTypeName ( ) + " . It ' s a bug " , <nl> bool ConstantExpressionTemplate : : parseLiteralAndAssertType ( ReadBuffer & istr , co <nl> { <nl> / / / TODO faster way to check types without using Parsers <nl> ParserArrayOfLiterals parser_array ; <nl> + ParserTupleOfLiterals parser_tuple ; <nl> + <nl> Tokens tokens_number ( istr . position ( ) , istr . buffer ( ) . end ( ) ) ; <nl> IParser : : Pos iterator ( tokens_number , settings . max_parser_depth ) ; <nl> Expected expected ; <nl> ASTPtr ast ; <nl> - <nl> - if ( ! parser_array . parse ( iterator , ast , expected ) ) <nl> + if ( ! parser_array . parse ( iterator , ast , expected ) & & ! parser_tuple . parse ( iterator , ast , expected ) ) <nl> return false ; <nl> istr . position ( ) = const_cast < char * > ( iterator - > begin ) ; <nl> <nl> - const Field & array = ast - > as < ASTLiteral & > ( ) . value ; <nl> - auto array_type = applyVisitor ( FieldToDataType ( ) , array ) ; <nl> - auto nested_type = assert_cast < const DataTypeArray & > ( * array_type ) . getNestedType ( ) ; <nl> + const Field & collection = ast - > as < ASTLiteral & > ( ) . value ; <nl> + auto collection_type = applyVisitor ( FieldToDataType ( ) , collection ) ; <nl> + <nl> + DataTypePtr nested_type ; <nl> + if ( auto array_type = typeid_cast < const DataTypeArray * > ( collection_type . get ( ) ) ) <nl> + nested_type = array_type - > getNestedType ( ) ; <nl> + else if ( auto tuple_type = typeid_cast < const DataTypeTuple * > ( collection_type . get ( ) ) ) <nl> + nested_type = tuple_type - > getElements ( ) [ 0 ] ; <nl> + <nl> + if ( ! nested_type ) <nl> + return false ; <nl> + <nl> if ( type_info . is_nullable ) <nl> if ( auto nullable = dynamic_cast < const DataTypeNullable * > ( nested_type . get ( ) ) ) <nl> nested_type = nullable - > getNestedType ( ) ; <nl> bool ConstantExpressionTemplate : : parseLiteralAndAssertType ( ReadBuffer & istr , co <nl> ( nested_type_info . isNativeInt ( ) & & type_info . nested_type = = Type : : Int64 ) | | <nl> ( nested_type_info . isFloat64 ( ) & & type_info . nested_type = = Type : : Float64 ) ) <nl> { <nl> - Field array_same_types = convertFieldToType ( array , * complex_type , nullptr ) ; <nl> + Field array_same_types = convertFieldToType ( collection , * complex_type , nullptr ) ; <nl> columns [ column_idx ] - > insert ( array_same_types ) ; <nl> return true ; <nl> } <nl>
|
tuple as literal
|
ClickHouse/ClickHouse
|
c1291e93483a3d5a6174c3a00e00225613e5b746
|
2020-03-24T12:55:35Z
|
mmm a / dbms / include / DB / Functions / FunctionsVisitParam . h <nl> ppp b / dbms / include / DB / Functions / FunctionsVisitParam . h <nl> <nl> # include < Poco / NumberFormatter . h > <nl> # include < Poco / UTF8Encoding . h > <nl> # include < Poco / Unicode . h > <nl> + # include < Poco / NumberParser . h > <nl> <nl> # include < DB / DataTypes / DataTypesNumberFixed . h > <nl> # include < DB / DataTypes / DataTypeString . h > <nl> struct HasParam <nl> { <nl> typedef UInt8 ResultType ; <nl> <nl> - static UInt8 extract ( const UInt8 * pos , const UInt8 * end ) <nl> + static UInt8 extract ( const UInt8 * begin , const UInt8 * end ) <nl> { <nl> return true ; <nl> } <nl> struct ExtractNumericType <nl> { <nl> typedef NumericType ResultType ; <nl> <nl> - static ResultType extract ( const UInt8 * pos , const UInt8 * end ) <nl> + static ResultType extract ( const UInt8 * begin , const UInt8 * end ) <nl> { <nl> - ReadBuffer in ( const_cast < char * > ( reinterpret_cast < const char * > ( pos ) ) , end - pos , 0 ) ; <nl> + ReadBuffer in ( const_cast < char * > ( reinterpret_cast < const char * > ( begin ) ) , end - begin , 0 ) ; <nl> <nl> / / / Учимся читать числа в двойных кавычках <nl> if ( ! in . eof ( ) & & * in . position ( ) = = ' " ' ) <nl> struct ExtractBool <nl> { <nl> typedef UInt8 ResultType ; <nl> <nl> - static UInt8 extract ( const UInt8 * pos , const UInt8 * end ) <nl> + static UInt8 extract ( const UInt8 * begin , const UInt8 * end ) <nl> { <nl> - return pos + 4 < = end & & 0 = = strncmp ( reinterpret_cast < const char * > ( pos ) , " true " , 4 ) ; <nl> + return begin + 4 < = end & & 0 = = strncmp ( reinterpret_cast < const char * > ( begin ) , " true " , 4 ) ; <nl> + } <nl> + } ; <nl> + <nl> + <nl> + struct ExtractRaw <nl> + { <nl> + static void extract ( const UInt8 * pos , const UInt8 * end , std : : vector < UInt8 > & res_data ) <nl> + { <nl> + if ( pos = = end ) <nl> + return ; <nl> + <nl> + UInt8 open_char = * pos ; <nl> + UInt8 close_char = 0 ; <nl> + switch ( open_char ) <nl> + { <nl> + case ' [ ' : <nl> + close_char = ' ] ' ; <nl> + break ; <nl> + case ' { ' : <nl> + close_char = ' } ' ; <nl> + break ; <nl> + case ' " ' : <nl> + close_char = ' " ' ; <nl> + break ; <nl> + } <nl> + <nl> + if ( close_char ! = 0 ) <nl> + { <nl> + size_t balance = 1 ; <nl> + char last_char = 0 ; <nl> + <nl> + res_data . push_back ( * pos ) ; <nl> + <nl> + + + pos ; <nl> + for ( ; pos ! = end & & balance > 0 ; + + pos ) <nl> + { <nl> + res_data . push_back ( * pos ) ; <nl> + <nl> + if ( open_char = = ' " ' & & * pos = = ' " ' ) <nl> + { <nl> + if ( last_char ! = ' \ \ ' ) <nl> + break ; <nl> + } <nl> + else <nl> + { <nl> + if ( * pos = = open_char ) <nl> + + + balance ; <nl> + if ( * pos = = close_char ) <nl> + - - balance ; <nl> + } <nl> + <nl> + if ( last_char = = ' \ \ ' ) <nl> + last_char = 0 ; <nl> + else <nl> + last_char = * pos ; <nl> + } <nl> + } <nl> + else <nl> + { <nl> + for ( ; pos ! = end & & * pos ! = ' , ' & & * pos ! = ' } ' ; + + pos ) <nl> + res_data . push_back ( * pos ) ; <nl> + } <nl> + } <nl> + } ; <nl> + <nl> + struct ExtractString <nl> + { <nl> + static bool tryParseDigit ( UInt8 c , UInt8 & res ) <nl> + { <nl> + if ( ' 0 ' < = c & & c < = ' 9 ' ) <nl> + { <nl> + res = c - ' 0 ' ; <nl> + return true ; <nl> + } <nl> + if ( ' A ' < = c & & c < = ' Z ' ) <nl> + { <nl> + res = c - ( ' A ' - 10 ) ; <nl> + return true ; <nl> + } <nl> + if ( ' a ' < = c & & c < = ' z ' ) <nl> + { <nl> + res = c - ( ' a ' - 10 ) ; <nl> + return true ; <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> + static bool tryUnhex ( const UInt8 * pos , const UInt8 * end , int & res ) <nl> + { <nl> + if ( pos + 3 > = end ) <nl> + return false ; <nl> + <nl> + res = 0 ; <nl> + { <nl> + UInt8 major , minor ; <nl> + if ( ! tryParseDigit ( * ( pos + + ) , major ) ) <nl> + return false ; <nl> + if ( ! tryParseDigit ( * ( pos + + ) , minor ) ) <nl> + return false ; <nl> + res | = ( major < < 4 ) | minor ; <nl> + } <nl> + res < < = 8 ; <nl> + { <nl> + UInt8 major , minor ; <nl> + if ( ! tryParseDigit ( * ( pos + + ) , major ) ) <nl> + return false ; <nl> + if ( ! tryParseDigit ( * ( pos + + ) , minor ) ) <nl> + return false ; <nl> + res | = ( major < < 4 ) | minor ; <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + static bool tryExtract ( const UInt8 * pos , const UInt8 * end , std : : vector < UInt8 > & res_data ) <nl> + { <nl> + if ( pos = = end | | * pos ! = ' " ' ) <nl> + return false ; <nl> + <nl> + + + pos ; <nl> + while ( pos ! = end ) <nl> + { <nl> + switch ( * pos ) <nl> + { <nl> + case ' \ \ ' : <nl> + + + pos ; <nl> + if ( pos > = end ) <nl> + return false ; <nl> + <nl> + switch ( * pos ) <nl> + { <nl> + case ' " ' : <nl> + res_data . push_back ( ' " ' ) ; <nl> + break ; <nl> + case ' \ \ ' : <nl> + res_data . push_back ( ' \ \ ' ) ; <nl> + break ; <nl> + case ' / ' : <nl> + res_data . push_back ( ' / ' ) ; <nl> + break ; <nl> + case ' b ' : <nl> + res_data . push_back ( ' \ b ' ) ; <nl> + break ; <nl> + case ' f ' : <nl> + res_data . push_back ( ' \ f ' ) ; <nl> + break ; <nl> + case ' n ' : <nl> + res_data . push_back ( ' \ n ' ) ; <nl> + break ; <nl> + case ' r ' : <nl> + res_data . push_back ( ' \ r ' ) ; <nl> + break ; <nl> + case ' t ' : <nl> + res_data . push_back ( ' \ t ' ) ; <nl> + break ; <nl> + case ' u ' : <nl> + { <nl> + + + pos ; <nl> + <nl> + int unicode ; <nl> + if ( ! tryUnhex ( pos , end , unicode ) ) <nl> + return false ; <nl> + <nl> + res_data . resize ( res_data . size ( ) + 6 ) ; / / / максимальный размер UTF8 многобайтовой последовательности <nl> + <nl> + Poco : : UTF8Encoding utf8 ; <nl> + int length = utf8 . convert ( unicode , const_cast < UInt8 * > ( & res_data [ 0 ] ) + res_data . size ( ) - 6 , 6 ) ; <nl> + <nl> + if ( ! length ) <nl> + return false ; <nl> + <nl> + res_data . resize ( res_data . size ( ) - 6 + length ) ; <nl> + break ; <nl> + } <nl> + default : <nl> + res_data . push_back ( * pos ) ; <nl> + break ; <nl> + } <nl> + + + pos ; <nl> + break ; <nl> + case ' " ' : <nl> + return true ; <nl> + default : <nl> + res_data . push_back ( * pos ) ; <nl> + + + pos ; <nl> + break ; <nl> + } <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> + static void extract ( const UInt8 * pos , const UInt8 * end , std : : vector < UInt8 > & res_data ) <nl> + { <nl> + size_t old_size = res_data . size ( ) ; <nl> + <nl> + if ( ! tryExtract ( pos , end , res_data ) ) <nl> + res_data . resize ( old_size ) ; <nl> } <nl> } ; <nl> <nl> struct ExtractParamImpl <nl> template < typename ParamExtractor > <nl> struct ExtractParamToStringImpl <nl> { <nl> + static void vector ( const std : : vector < UInt8 > & data , const ColumnString : : Offsets_t & offsets , <nl> + std : : string needle , <nl> + std : : vector < UInt8 > & res_data , ColumnString : : Offsets_t & res_offsets ) <nl> + { <nl> + res_data . reserve ( data . size ( ) / 5 ) ; <nl> + res_offsets . resize ( offsets . size ( ) ) ; <nl> + <nl> + / / / Ищем параметр просто как подстроку вида " name " : <nl> + needle = " \ " " + needle + " \ " : " ; <nl> + <nl> + const UInt8 * begin = & data [ 0 ] ; <nl> + const UInt8 * pos = begin ; <nl> + const UInt8 * end = pos + data . size ( ) ; <nl> + <nl> + / / / Текущий индекс в массиве строк . <nl> + size_t i = 0 ; <nl> + <nl> + / / / Искать будем следующее вхождение сразу во всех строках . <nl> + while ( pos < end & & NULL ! = ( pos = reinterpret_cast < UInt8 * > ( memmem ( pos , end - pos , needle . data ( ) , needle . size ( ) ) ) ) ) <nl> + { <nl> + / / / Определим , к какому индексу оно относится . <nl> + while ( begin + offsets [ i ] < pos ) <nl> + { <nl> + res_data . push_back ( 0 ) ; <nl> + res_offsets [ i ] = res_data . size ( ) ; <nl> + + + i ; <nl> + } <nl> + <nl> + / / / Проверяем , что вхождение не переходит через границы строк . <nl> + if ( pos + needle . size ( ) < begin + offsets [ i ] ) <nl> + ParamExtractor : : extract ( pos + needle . size ( ) , begin + offsets [ i ] , res_data ) ; <nl> + <nl> + pos = begin + offsets [ i ] ; <nl> + <nl> + res_data . push_back ( 0 ) ; <nl> + res_offsets [ i ] = res_data . size ( ) ; <nl> + + + i ; <nl> + } <nl> + } <nl> } ; <nl> <nl> <nl> struct NameVisitParamExtractUInt { static const char * get ( ) { return " visitPara <nl> struct NameVisitParamExtractInt { static const char * get ( ) { return " visitParamExtractInt " ; } } ; <nl> struct NameVisitParamExtractFloat { static const char * get ( ) { return " visitParamExtractFloat " ; } } ; <nl> struct NameVisitParamExtractBool { static const char * get ( ) { return " visitParamExtractBool " ; } } ; <nl> + struct NameVisitParamExtractRaw { static const char * get ( ) { return " visitParamExtractRaw " ; } } ; <nl> + struct NameVisitParamExtractString { static const char * get ( ) { return " visitParamExtractString " ; } } ; <nl> <nl> <nl> typedef FunctionsStringSearch < ExtractParamImpl < HasParam > , NameVisitParamHas > FunctionVisitParamHas ; <nl> typedef FunctionsStringSearch < ExtractParamImpl < ExtractNumericType < UInt64 > > , Nam <nl> typedef FunctionsStringSearch < ExtractParamImpl < ExtractNumericType < Int64 > > , NameVisitParamExtractInt > FunctionVisitParamExtractInt ; <nl> typedef FunctionsStringSearch < ExtractParamImpl < ExtractNumericType < Float64 > > , NameVisitParamExtractFloat > FunctionVisitParamExtractFloat ; <nl> typedef FunctionsStringSearch < ExtractParamImpl < ExtractBool > , NameVisitParamExtractBool > FunctionVisitParamExtractBool ; <nl> + typedef FunctionsStringSearchToString < ExtractParamToStringImpl < ExtractRaw > , NameVisitParamExtractRaw > FunctionVisitParamExtractRaw ; <nl> + typedef FunctionsStringSearchToString < ExtractParamToStringImpl < ExtractString > , NameVisitParamExtractString > FunctionVisitParamExtractString ; <nl> <nl> } <nl> \ No newline at end of file <nl> mmm a / dbms / src / Functions / FunctionFactory . cpp <nl> ppp b / dbms / src / Functions / FunctionFactory . cpp <nl> FunctionPtr FunctionFactory : : get ( <nl> else if ( name = = " visitParamExtractInt " ) return new FunctionVisitParamExtractInt ; <nl> else if ( name = = " visitParamExtractFloat " ) return new FunctionVisitParamExtractFloat ; <nl> else if ( name = = " visitParamExtractBool " ) return new FunctionVisitParamExtractBool ; <nl> + else if ( name = = " visitParamExtractRaw " ) return new FunctionVisitParamExtractRaw ; <nl> + else if ( name = = " visitParamExtractString " ) return new FunctionVisitParamExtractString ; <nl> <nl> else <nl> throw Exception ( " Unknown function " + name , ErrorCodes : : UNKNOWN_FUNCTION ) ; <nl>
|
ExtractString [ NOT TESTED ] and ExtractRaw [ # CONV - 7793 ]
|
ClickHouse/ClickHouse
|
ced71b0f9babce642cbef9d54ea3d6560feec7d5
|
2013-06-11T16:09:53Z
|
mmm a / hphp / hack / src / parser / parser_hack . ml <nl> ppp b / hphp / hack / src / parser / parser_hack . ml <nl> and function_body env = <nl> * ) <nl> [ Noop ] <nl> | _ - > <nl> - ( match statement_list env with <nl> + ( match statement_list ~ is_block_scope : false env with <nl> | [ ] - > [ Noop ] <nl> | _ when env . quick - > [ Noop ] <nl> | x - > x ) <nl> and with_ignored_yield env fn = <nl> ( * Statements * ) <nl> ( * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * ) <nl> <nl> - and statement_list env = <nl> + and enforce_block_scope_using env pos stmt = <nl> + match stmt with <nl> + | Using { us_is_block_scoped = false ; _ } - > <nl> + error_at env pos " Parse error : function - scoped ' using ' statement not supported in a block scope " <nl> + | _ - > <nl> + ( ) <nl> + <nl> + ( * is_block_scope = true if we have entered a { . . . } block * ) <nl> + and statement_list ~ is_block_scope env = <nl> match L . token env . file env . lb with <nl> | Trcb - > [ ] <nl> | Tlcb - > <nl> - let block = statement_list env in <nl> - Block block : : statement_list env <nl> + let block = statement_list ~ is_block_scope : true env in <nl> + Block block : : statement_list ~ is_block_scope env <nl> | Tsc - > <nl> - statement_list env <nl> + statement_list ~ is_block_scope env <nl> | Teof - > <nl> error_expect env " } " ; <nl> [ ] <nl> | _ - > <nl> + let pos = Pos . make env . file env . lb in <nl> L . back env . lb ; <nl> let error_state = ! ( env . errors ) in <nl> let stmt = statement env in <nl> if ! ( env . errors ) ! = error_state <nl> then L . next_newline_or_close_cb env . lb ; <nl> - stmt : : statement_list env <nl> + if is_block_scope then enforce_block_scope_using env pos stmt ; <nl> + stmt : : statement_list ~ is_block_scope env <nl> <nl> and using_statement_list env = <nl> match L . token env . file env . lb with <nl> | Trcb | Teof - > [ ] <nl> | Tlcb - > <nl> - let block = statement_list env in <nl> - Block block : : statement_list env <nl> + let block = statement_list ~ is_block_scope : true env in <nl> + Block block : : statement_list ~ is_block_scope : false env <nl> | Tsc - > <nl> using_statement_list env <nl> | _ - > <nl> and statement env = <nl> let stmt = statement_word env word in <nl> stmt <nl> | Tlcb - > <nl> - Block ( statement_list env ) <nl> + Block ( statement_list ~ is_block_scope : true env ) <nl> | Tsc - > <nl> Noop <nl> | Tunsafe - > <nl> and statement_word env = function <nl> | " do " - > statement_do env <nl> | " while " - > statement_while env <nl> | " await " - > statement_await env <nl> - | " using " - > statement_using false env <nl> + | " using " - > statement_using ~ has_await : false env <nl> | " for " - > statement_for env <nl> | " switch " - > statement_switch env <nl> | " foreach " - > statement_foreach env <nl> and statement_await env = <nl> let start = Pos . make env . file env . lb in <nl> match L . token env . file env . lb with <nl> | Tword when Lexing . lexeme env . lb = " using " - > <nl> - statement_using true env <nl> + statement_using ~ has_await : true env <nl> | _ - > <nl> L . back env . lb ; <nl> let e = expr_await env start in <nl> and statement_await env = <nl> ( * Block - scoped or function - scoped using , optionally prefixed by ` await ` . At <nl> * this point we have lexed ` await using ` or ` using ` . <nl> * ) <nl> - and statement_using has_await env = <nl> + and statement_using ~ has_await env = <nl> ( * If there is an opening ` ( ` then this could be either block - scoped or <nl> * function - scoped . If no opening ` ( ` then it ' s definitely function - scoped . <nl> * ) <nl> and statement_using has_await env = <nl> ( * Parse the remaining comma - separated expression list enclosed in parentheses * ) <nl> let last , el = using_expr_list_rest env in <nl> let e = Pos . btw start last , Expr_list ( e : : el ) in <nl> - statement_using_block_or_rest has_await e env <nl> + statement_using_block_or_rest ~ has_await e env <nl> | _ - > <nl> L . back env . lb ; <nl> let e = expr env in <nl> + ( * The remainder of the function - level statements * ) <nl> let st = using_statement_list env in <nl> ( * Step back over closing brace * ) <nl> L . back env . lb ; <nl> and statement_using has_await env = <nl> us_block = st ; } <nl> <nl> <nl> - and statement_using_block_or_rest has_await e env = <nl> + and statement_using_block_or_rest ~ has_await e env = <nl> match L . token env . file env . lb with <nl> | Tlcb - > <nl> ( * Block - scoped using * ) <nl> and statement_using_block_or_rest has_await e env = <nl> us_is_block_scoped = true ; <nl> us_has_await = has_await ; <nl> us_expr = e ; <nl> - us_block = statement_list env ; } <nl> + us_block = statement_list ~ is_block_scope : true env ; } <nl> | Tsc - > <nl> let st = using_statement_list env in <nl> ( * Step back over closing brace * ) <nl> new file mode 100644 <nl> index 00000000000 . . 2cae9230e6b <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / using / using_bad_syntax . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + class Handle implements IDisposable { <nl> + public function __dispose ( ) : void { } <nl> + public function foo ( ) : void { } <nl> + } <nl> + <nl> + function testit ( bool $ b ) : void { <nl> + / / This is legal <nl> + using ( $ x = new Handle ( ) ) ; <nl> + <nl> + / / This is not <nl> + if ( $ b ) { <nl> + using ( $ y = new Handle ( ) ) ; <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 0adde0887b6 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / using / using_bad_syntax . php . exp <nl> <nl> + File " using_bad_syntax . php " , line 15 , characters 5 - 9 : <nl> + Parse error : function - scoped ' using ' statement not supported in a block scope ( Parsing [ 1002 ] ) <nl> new file mode 100644 <nl> index 00000000000 . . e69de29bb2d <nl> new file mode 100644 <nl> index 00000000000 . . a4c130c4b25 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / using / using_bad_syntax_2 . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + class Handle implements IDisposable { <nl> + public function __dispose ( ) : void { } <nl> + public function foo ( ) : void { } <nl> + } <nl> + <nl> + function testit ( bool $ b ) : void { <nl> + / / This is legal <nl> + using ( $ x = new Handle ( ) ) ; <nl> + <nl> + / / This is not <nl> + if ( $ b ) { <nl> + using $ y = new Handle ( ) ; <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . ab367040054 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / using / using_bad_syntax_2 . php . exp <nl> <nl> + File " using_bad_syntax_2 . php " , line 15 , characters 5 - 9 : <nl> + Parse error : function - scoped ' using ' statement not supported in a block scope ( Parsing [ 1002 ] ) <nl> new file mode 100644 <nl> index 00000000000 . . e69de29bb2d <nl> new file mode 100644 <nl> index 00000000000 . . cd5f5524b55 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / using / using_bad_syntax_3 . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + class Handle implements IDisposable { <nl> + public function __dispose ( ) : void { } <nl> + public function foo ( ) : void { } <nl> + } <nl> + class ScopeGuard implements IDisposable { <nl> + public function __construct ( ( function ( ) : void ) $ f ) { } <nl> + public function __dispose ( ) : void { } <nl> + } <nl> + <nl> + function testit ( ) : void { <nl> + / / This is legal <nl> + using ( $ x = new Handle ( ) ) ; <nl> + <nl> + using ( $ w = new Handle ( ) ) { <nl> + / / This is not <nl> + using ( $ q = new ScopeGuard ( ( ) = = > { } ) ) ; <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 9505f89ff3a <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / using / using_bad_syntax_3 . php . exp <nl> <nl> + File " using_bad_syntax_3 . php " , line 19 , characters 5 - 9 : <nl> + Parse error : function - scoped ' using ' statement not supported in a block scope ( Parsing [ 1002 ] ) <nl> new file mode 100644 <nl> index 00000000000 . . e69de29bb2d <nl>
|
Hack legacy parser : reject function - scoped using statement in block scope
|
facebook/hhvm
|
eb7936cc2e617c7a16b62c78a646dc727be4a1db
|
2017-11-10T23:26:32Z
|
mmm a / source / common / buffer / BUILD <nl> ppp b / source / common / buffer / BUILD <nl> envoy_cc_library ( <nl> " / / include / envoy / buffer : buffer_interface " , <nl> " / / source / common / common : non_copyable " , <nl> " / / source / common / common : stack_array " , <nl> + " / / source / common / common : utility_lib " , <nl> " / / source / common / event : libevent_lib " , <nl> ] , <nl> ) <nl> mmm a / source / common / buffer / buffer_impl . h <nl> ppp b / source / common / buffer / buffer_impl . h <nl> <nl> <nl> # include " common / common / assert . h " <nl> # include " common / common / non_copyable . h " <nl> + # include " common / common / utility . h " <nl> # include " common / event / libevent . h " <nl> <nl> namespace Envoy { <nl> class Slice { <nl> <nl> using SlicePtr = std : : unique_ptr < Slice > ; <nl> <nl> - class OwnedSlice : public Slice { <nl> + class OwnedSlice : public Slice , public InlineStorage { <nl> public : <nl> / * * <nl> * Create an empty OwnedSlice . <nl> class OwnedSlice : public Slice { <nl> return slice ; <nl> } <nl> <nl> - / / Custom delete operator to keep C + + 14 from using the global operator delete ( void * , size_t ) , <nl> - / / which would result in the compiler error : <nl> - / / " exception cleanup for this placement new selects non - placement operator delete " <nl> - static void operator delete ( void * address ) { : : operator delete ( address ) ; } <nl> - <nl> private : <nl> - static void * operator new ( size_t object_size , size_t data_size ) { <nl> - return : : operator new ( object_size + data_size ) ; <nl> - } <nl> - <nl> OwnedSlice ( uint64_t size ) : Slice ( 0 , 0 , size ) { base_ = storage_ ; } <nl> <nl> / * * <nl> mmm a / source / common / common / BUILD <nl> ppp b / source / common / common / BUILD <nl> envoy_cc_library ( <nl> deps = [ <nl> " : assert_lib " , <nl> " : hash_lib " , <nl> + " : non_copyable " , <nl> " / / include / envoy / common : interval_set_interface " , <nl> " / / include / envoy / common : time_interface " , <nl> " / / source / common / singleton : const_singleton " , <nl> mmm a / source / common / common / utility . cc <nl> ppp b / source / common / common / utility . cc <nl> double WelfordStandardDeviation : : computeStandardDeviation ( ) const { <nl> return ( std : : isnan ( variance ) | | variance < 0 ) ? std : : nan ( " " ) : sqrt ( variance ) ; <nl> } <nl> <nl> + InlineString : : InlineString ( const char * str , size_t size ) : size_ ( size ) { <nl> + RELEASE_ASSERT ( size < = 0xffffffff , " size must fit in 32 bits " ) ; <nl> + memcpy ( data_ , str , size ) ; <nl> + } <nl> + <nl> } / / namespace Envoy <nl> mmm a / source / common / common / utility . h <nl> ppp b / source / common / common / utility . h <nl> <nl> <nl> # include " common / common / assert . h " <nl> # include " common / common / hash . h " <nl> + # include " common / common / non_copyable . h " <nl> <nl> # include " absl / strings / string_view . h " <nl> <nl> template < class Value > struct TrieLookupTable { <nl> TrieEntry < Value > root_ ; <nl> } ; <nl> <nl> + / / Mix - in class for allocating classes with variable - sized inlined storage . <nl> + / / <nl> + / / Use this class by inheriting from it , ensuring that : <nl> + / / - The variable sized array is declared as VarType [ ] as the last <nl> + / / member variable of the class . <nl> + / / - YourType accurately describes the type that will be stored there , <nl> + / / to enable the compiler to perform correct alignment . No casting <nl> + / / should be needed . <nl> + / / - The class constructor is private , because you need to allocate the <nl> + / / class the placed new operator exposed in the protected section below . <nl> + / / Constructing the class directly will not provide space for the <nl> + / / variable - size data . <nl> + / / - You expose a public factory method that return a placement - new , e . g . <nl> + / / static YourClass * alloc ( size_t num_elements , constructor_args . . . ) { <nl> + / / new ( num_elements * sizeof ( VarType ) ) YourClass ( constructor_args . . . ) ; <nl> + / / } <nl> + / / <nl> + / / See InlineString below for an example usage . <nl> + / / <nl> + / / <nl> + / / Perf note : The alignment will be correct and safe without further <nl> + / / consideration as long as there are no casts . But for micro - optimization , <nl> + / / consider this case : <nl> + / / struct MyStruct : public InlineStorage { uint64_t a_ ; uint16_t b_ ; uint8_t data_ [ ] ; } ; <nl> + / / When compiled with a typical compiler on a 64 - bit machine : <nl> + / / sizeof ( MyStruct ) = = 16 , because the compiler will round up from 10 for uint64_t alignment . <nl> + / / So : <nl> + / / calling new ( 6 ) MyStruct ( ) causes an allocation of 16 + 6 = 22 , rounded up to 24 bytes . <nl> + / / But data_ doesn ' t need 8 - byte alignment , so it will wind up adjacent to the uint16_t . <nl> + / / ( ( char * ) my_struct . data ) - ( ( char * ) & my_struct ) = = 10 <nl> + / / If we had instead declared data_ [ 6 ] , then the whole allocation would have fit in 16 bytes . <nl> + / / Instead : <nl> + / / - the starting address of data will not be 8 - byte aligned . This is not required <nl> + / / by the C + + standard for a uint8_t , but may be suboptimal on some processors . <nl> + / / - the 6 bytes of data will be at byte offsets 10 to 15 , and bytes 16 to 23 will be <nl> + / / unused . This may be surprising to some users , and suboptimal in resource usage . <nl> + / / One possible tweak is to declare data_ as a uint64_t [ ] , or to use an ` alignas ` <nl> + / / declaration . As always , micro - optimizations should be informed by <nl> + / / microbenchmarks , showing the benefit . <nl> + class InlineStorage : public NonCopyable { <nl> + public : <nl> + / / Custom delete operator to keep C + + 14 from using the global operator delete ( void * , size_t ) , <nl> + / / which would result in the compiler error : <nl> + / / " exception cleanup for this placement new selects non - placement operator delete " <nl> + static void operator delete ( void * address ) { : : operator delete ( address ) ; } <nl> + <nl> + protected : <nl> + / * * <nl> + * @ param object_size the size of the base object ; supplied automatically by the compiler . <nl> + * @ param data_size the amount of variable - size storage to be added , in bytes . <nl> + * @ return a variable - size object based on data_size_bytes . <nl> + * / <nl> + static void * operator new ( size_t object_size , size_t data_size_bytes ) { <nl> + return : : operator new ( object_size + data_size_bytes ) ; <nl> + } <nl> + } ; <nl> + <nl> + class InlineString ; <nl> + using InlineStringPtr = std : : unique_ptr < InlineString > ; <nl> + <nl> + / / Represents immutable string data , keeping the storage inline with the <nl> + / / object . These cannot be copied or held by value ; they must be created <nl> + / / as unique pointers . <nl> + / / <nl> + / / Note : this is not yet proven better ( smaller or faster ) than std : : string for <nl> + / / all applications , but memory - size improvements have been measured for one <nl> + / / application ( Stats : : SymbolTableImpl ) . This is presented here to serve as an <nl> + / / example of how to use InlineStorage . <nl> + class InlineString : public InlineStorage { <nl> + public : <nl> + / * * <nl> + * @ param str the string_view for which to create an InlineString <nl> + * @ return a unique_ptr to the InlineString containing the bytes of str . <nl> + * / <nl> + static InlineStringPtr create ( absl : : string_view str ) { <nl> + return InlineStringPtr ( new ( str . size ( ) ) InlineString ( str . data ( ) , str . size ( ) ) ) ; <nl> + } <nl> + <nl> + / * * <nl> + * @ return a std : : string copy of the InlineString . <nl> + * / <nl> + std : : string toString ( ) const { return std : : string ( data_ , size_ ) ; } <nl> + <nl> + / * * <nl> + * @ return a string_view into the InlineString . <nl> + * / <nl> + absl : : string_view toStringView ( ) const { return absl : : string_view ( data_ , size_ ) ; } <nl> + <nl> + / * * <nl> + * @ return the number of bytes in the string <nl> + * / <nl> + size_t size ( ) const { return size_ ; } <nl> + <nl> + / * * <nl> + * @ return a pointer to the first byte of the string . <nl> + * / <nl> + const char * data ( ) const { return data_ ; } <nl> + <nl> + private : <nl> + / / Constructor is declared private so that no one constructs one without the <nl> + / / proper size allocation . to accommodate the variable - size buffer . <nl> + InlineString ( const char * str , size_t size ) ; <nl> + <nl> + uint32_t size_ ; <nl> + char data_ [ ] ; <nl> + } ; <nl> + <nl> } / / namespace Envoy <nl> mmm a / source / common / stats / heap_stat_data . cc <nl> ppp b / source / common / stats / heap_stat_data . cc <nl> namespace Stats { <nl> HeapStatDataAllocator : : ~ HeapStatDataAllocator ( ) { ASSERT ( stats_ . empty ( ) ) ; } <nl> <nl> HeapStatData * HeapStatData : : alloc ( StatName stat_name , SymbolTable & symbol_table ) { <nl> - void * memory = : : malloc ( sizeof ( HeapStatData ) + stat_name . size ( ) ) ; <nl> - ASSERT ( memory ) ; <nl> - / / TODO ( fredlas ) call StatMerger : : verifyCombineLogicSpecified ( ) here ? <nl> symbol_table . incRefCount ( stat_name ) ; <nl> - return new ( memory ) HeapStatData ( stat_name ) ; <nl> + return new ( stat_name . size ( ) ) HeapStatData ( stat_name ) ; <nl> } <nl> <nl> void HeapStatData : : free ( SymbolTable & symbol_table ) { <nl> symbol_table . free ( statName ( ) ) ; <nl> - this - > ~ HeapStatData ( ) ; <nl> - : : free ( this ) ; / / matches malloc ( ) call above . <nl> + delete this ; <nl> } <nl> <nl> HeapStatData & HeapStatDataAllocator : : alloc ( StatName name ) { <nl> mmm a / source / common / stats / heap_stat_data . h <nl> ppp b / source / common / stats / heap_stat_data . h <nl> namespace Stats { <nl> * This structure is an alternate backing store for both CounterImpl and GaugeImpl . It is designed <nl> * so that it can be allocated efficiently from the heap on demand . <nl> * / <nl> - struct HeapStatData { <nl> + struct HeapStatData : public InlineStorage { <nl> private : <nl> explicit HeapStatData ( StatName stat_name ) { stat_name . copyToStorage ( symbol_storage_ ) ; } <nl> <nl> struct HeapStatData { <nl> std : : atomic < uint64_t > pending_increment_ { 0 } ; <nl> std : : atomic < uint16_t > flags_ { 0 } ; <nl> std : : atomic < uint16_t > ref_count_ { 1 } ; <nl> - SymbolTable : : Storage symbol_storage_ ; <nl> + SymbolTable : : Storage symbol_storage_ ; / / This is a ' using ' nickname for uint8_t [ ] . <nl> } ; <nl> <nl> template < class Stat > class HeapStat : public Stat { <nl> mmm a / source / common / stats / symbol_table_impl . cc <nl> ppp b / source / common / stats / symbol_table_impl . cc <nl> void SymbolTableImpl : : incRefCount ( const StatName & stat_name ) { <nl> auto decode_search = decode_map_ . find ( symbol ) ; <nl> ASSERT ( decode_search ! = decode_map_ . end ( ) ) ; <nl> <nl> - auto encode_search = encode_map_ . find ( * decode_search - > second ) ; <nl> + auto encode_search = encode_map_ . find ( decode_search - > second - > toStringView ( ) ) ; <nl> ASSERT ( encode_search ! = encode_map_ . end ( ) ) ; <nl> <nl> + + encode_search - > second . ref_count_ ; <nl> void SymbolTableImpl : : free ( const StatName & stat_name ) { <nl> auto decode_search = decode_map_ . find ( symbol ) ; <nl> ASSERT ( decode_search ! = decode_map_ . end ( ) ) ; <nl> <nl> - auto encode_search = encode_map_ . find ( * decode_search - > second ) ; <nl> + auto encode_search = encode_map_ . find ( decode_search - > second - > toStringView ( ) ) ; <nl> ASSERT ( encode_search ! = encode_map_ . end ( ) ) ; <nl> <nl> / / If that was the last remaining client usage of the symbol , erase the <nl> Symbol SymbolTableImpl : : toSymbol ( absl : : string_view sv ) { <nl> / / a string_view pointing to it in the encode_map_ . This allows us to only <nl> / / store the string once . We use unique_ptr so copies are not made as <nl> / / flat_hash_map moves values around . <nl> - auto str = std : : make_unique < std : : string > ( std : : string ( sv ) ) ; <nl> - auto encode_insert = encode_map_ . insert ( { * str , SharedSymbol ( next_symbol_ ) } ) ; <nl> + InlineStringPtr str = InlineString : : create ( sv ) ; <nl> + auto encode_insert = encode_map_ . insert ( { str - > toStringView ( ) , SharedSymbol ( next_symbol_ ) } ) ; <nl> ASSERT ( encode_insert . second ) ; <nl> auto decode_insert = decode_map_ . insert ( { next_symbol_ , std : : move ( str ) } ) ; <nl> ASSERT ( decode_insert . second ) ; <nl> absl : : string_view SymbolTableImpl : : fromSymbol ( const Symbol symbol ) const <nl> EXCLUSIVE_LOCKS_REQUIRED ( lock_ ) { <nl> auto search = decode_map_ . find ( symbol ) ; <nl> RELEASE_ASSERT ( search ! = decode_map_ . end ( ) , " no such symbol " ) ; <nl> - return { * search - > second } ; <nl> + return search - > second - > toStringView ( ) ; <nl> } <nl> <nl> void SymbolTableImpl : : newSymbol ( ) EXCLUSIVE_LOCKS_REQUIRED ( lock_ ) { <nl> void SymbolTableImpl : : debugPrint ( ) const { <nl> } <nl> std : : sort ( symbols . begin ( ) , symbols . end ( ) ) ; <nl> for ( Symbol symbol : symbols ) { <nl> - const std : : string & token = * decode_map_ . find ( symbol ) - > second ; <nl> - const SharedSymbol & shared_symbol = encode_map_ . find ( token ) - > second ; <nl> - ENVOY_LOG_MISC ( info , " { } : ' { } ' ( { } ) " , symbol , token , shared_symbol . ref_count_ ) ; <nl> + const InlineString & token = * decode_map_ . find ( symbol ) - > second ; <nl> + const SharedSymbol & shared_symbol = encode_map_ . find ( token . toStringView ( ) ) - > second ; <nl> + ENVOY_LOG_MISC ( info , " { } : ' { } ' ( { } ) " , symbol , token . toStringView ( ) , shared_symbol . ref_count_ ) ; <nl> } <nl> } <nl> # endif <nl> mmm a / source / common / stats / symbol_table_impl . h <nl> ppp b / source / common / stats / symbol_table_impl . h <nl> class SymbolTableImpl : public SymbolTable { <nl> / / The encode map stores both the symbol and the ref count of that symbol . <nl> / / Using absl : : string_view lets us only store the complete string once , in the decode map . <nl> using EncodeMap = absl : : flat_hash_map < absl : : string_view , SharedSymbol , StringViewHash > ; <nl> - using DecodeMap = absl : : flat_hash_map < Symbol , std : : unique_ptr < std : : string > > ; <nl> + using DecodeMap = absl : : flat_hash_map < Symbol , InlineStringPtr > ; <nl> EncodeMap encode_map_ GUARDED_BY ( lock_ ) ; <nl> DecodeMap decode_map_ GUARDED_BY ( lock_ ) ; <nl> <nl> mmm a / test / common / common / utility_test . cc <nl> ppp b / test / common / common / utility_test . cc <nl> TEST ( TrieLookupTable , LongestPrefix ) { <nl> EXPECT_EQ ( nullptr , trie . findLongestPrefix ( " " ) ) ; <nl> } <nl> <nl> + TEST ( InlineStorageTest , InlineString ) { <nl> + InlineStringPtr hello = InlineString : : create ( " Hello , world ! " ) ; <nl> + EXPECT_EQ ( " Hello , world ! " , hello - > toStringView ( ) ) ; <nl> + EXPECT_EQ ( " Hello , world ! " , hello - > toString ( ) ) ; <nl> + } <nl> + <nl> } / / namespace Envoy <nl> mmm a / test / common / stats / symbol_table_impl_test . cc <nl> ppp b / test / common / stats / symbol_table_impl_test . cc <nl> TEST ( SymbolTableTest , Memory ) { <nl> ENVOY_LOG_MISC ( info , <nl> " SymbolTableTest . Memory comparison skipped due to malloc - stats returning 0 . " ) ; <nl> } else { <nl> - / / Make sure we don ' t regress . Data as of 2019 / 01 / 04 : <nl> + / / Make sure we don ' t regress . Data as of 2019 / 05 / 29 : <nl> / / <nl> - / / libstdc + + : <nl> - / / mmmmmmmmm - <nl> / / string_mem_used : 7759488 <nl> - / / symbol_table_mem_used : 1744280 ( 4 . 45x ) <nl> - / / <nl> - / / libc + + : <nl> - / / mmmmmm - <nl> - / / string_mem_used : 6710912 <nl> - / / symbol_table_mem_used : 1743512 ( 3 . 85x ) <nl> + / / symbol_table_mem_used : 1726056 ( 4 . 5x ) <nl> + EXPECT_EQ ( string_mem_used , 7759488 ) ; <nl> EXPECT_LT ( symbol_table_mem_used , string_mem_used / 3 ) ; <nl> - EXPECT_LT ( symbol_table_mem_used , 1750000 ) ; <nl> + EXPECT_EQ ( symbol_table_mem_used , 1726056 ) ; <nl> } <nl> } <nl> <nl> mmm a / tools / spelling_dictionary . txt <nl> ppp b / tools / spelling_dictionary . txt <nl> accessors <nl> acls <nl> addr <nl> agg <nl> + alignas <nl> alignof <nl> alloc <nl> alloca <nl>
|
util : add Inline storage helper - class and use it in a few places . ( )
|
envoyproxy/envoy
|
a5e7b442b29d422277ccaa932ecee1644ee39feb
|
2019-05-31T00:11:53Z
|
mmm a / modules / imgproc / include / opencv2 / imgproc . hpp <nl> ppp b / modules / imgproc / include / opencv2 / imgproc . hpp <nl> taller than image . Since this is both an input and output parameter , you must ta <nl> of initializing it . Flood - filling cannot go across non - zero pixels in the input mask . For example , <nl> an edge detector output can be used as a mask to stop filling at edges . On output , pixels in the <nl> mask corresponding to filled pixels in the image are set to 1 or to the a value specified in flags <nl> - as described below . It is therefore possible to use the same mask in multiple calls to the function <nl> + as described below . Additionally , the function fills the border of the mask with ones to simplify <nl> + internal processing . It is therefore possible to use the same mask in multiple calls to the function <nl> to make sure the filled areas do not overlap . <nl> @ param seedPoint Starting point . <nl> @ param newVal New value of the repainted domain pixels . <nl>
|
imgproc : clarify usage of the mask in floodfill
|
opencv/opencv
|
a385fff3bf27865c51481ca006caf4064ed87aa8
|
2017-10-27T11:06:24Z
|
mmm a / Source / CNTKv2LibraryDll / API / CNTKLibrary . h <nl> ppp b / Source / CNTKv2LibraryDll / API / CNTKLibrary . h <nl> namespace CNTK <nl> <nl> <nl> <nl> - / / / Create an instance of the CNTK built - in OptimizedRNNStack operation on specified tensor input operands with the specified output shape <nl> - / / / <nl> - CNTK_API FunctionPtr OptimizedRNNStack ( const Variable & weights , const Variable & operand , const NDShape & c , const std : : wstring & name = L " " ) ; <nl> - <nl> - <nl> / / / <nl> / / / Create an instance of the CNTK built - in elementwise clip operation on the tensor operand <nl> / / / <nl>
|
Removed accidental commit
|
microsoft/CNTK
|
89ee3796858a5c654370d7511f725eb211ac051b
|
2016-11-09T23:17:09Z
|
mmm a / examples / odop_solar . py <nl> ppp b / examples / odop_solar . py <nl> <nl> import taichi as ti <nl> + import math <nl> <nl> <nl> @ ti . data_oriented <nl> class SolarSystem : <nl> def __init__ ( self , n , dt ) : <nl> + # initializer of the solar system simulator <nl> self . n = n <nl> self . dt = dt <nl> self . x = ti . Vector ( 2 , dt = ti . f32 , shape = n ) <nl> def __init__ ( self , n , dt ) : <nl> <nl> @ staticmethod <nl> @ ti . func <nl> - def random_around ( center , radius ) : <nl> - # random number in [ center - radius , center + radius ) <nl> - return center + radius * ( ti . random ( ) - 0 . 5 ) * 2 <nl> + def random_vector_in ( rmax ) : <nl> + # create a random vector <nl> + a = ti . random ( ) * math . tau <nl> + r = ti . random ( ) * rmax <nl> + return r * ti . Vector ( [ ti . cos ( a ) , ti . sin ( a ) ] ) <nl> <nl> @ ti . kernel <nl> def initialize ( self ) : <nl> + # initialization or reset <nl> for i in range ( self . n ) : <nl> - offset = ti . Vector ( [ 0 . 0 , self . random_around ( 0 . 3 , 0 . 15 ) ] ) <nl> - self . x [ i ] = self . center [ None ] + offset <nl> - self . v [ i ] = [ - offset [ 1 ] , offset [ 0 ] ] <nl> - self . v [ i ] * = 1 . 5 / offset . norm ( ) <nl> + offset = self . random_vector_in ( 0 . 5 ) <nl> + self . x [ i ] = self . center [ None ] + offset # Offset from center <nl> + self . v [ i ] = [ - offset . y , offset . x ] # Perpendicular to offset <nl> + self . v [ i ] + = self . random_vector_in ( 0 . 02 ) # Shaking <nl> + self . v [ i ] * = 1 / offset . norm ( ) * * 1 . 5 # Kepler ' s 3rd law <nl> <nl> @ ti . func <nl> def gravity ( self , pos ) : <nl> + # compute gravitational acceleration at pos <nl> offset = - ( pos - self . center [ None ] ) <nl> return offset / offset . norm ( ) * * 3 <nl> <nl> @ ti . kernel <nl> def integrate ( self ) : <nl> + # semi - implicit time integration <nl> for i in range ( self . n ) : <nl> self . v [ i ] + = self . dt * self . gravity ( self . x [ i ] ) <nl> self . x [ i ] + = self . dt * self . v [ i ] <nl> <nl> + def render ( self , gui ) : <nl> + # render the simulation scene on the GUI <nl> + gui . circle ( [ 0 . 5 , 0 . 5 ] , radius = 10 , color = 0xffaa88 ) <nl> + gui . circles ( solar . x . to_numpy ( ) , radius = 3 , color = 0xffffff ) <nl> <nl> - solar = SolarSystem ( 9 , 0 . 0005 ) <nl> + <nl> + solar = SolarSystem ( 8 , 0 . 0001 ) <nl> solar . center [ None ] = [ 0 . 5 , 0 . 5 ] <nl> solar . initialize ( ) <nl> <nl> - gui = ti . GUI ( " Solar System " , background_color = 0x25A6D9 ) <nl> + gui = ti . GUI ( " Solar System " , background_color = 0x0071a ) <nl> <nl> - while True : <nl> - if gui . get_event ( ) : <nl> - if gui . event . key = = gui . SPACE and gui . event . type = = gui . PRESS : <nl> + while gui . running : <nl> + # GUI event processing <nl> + if gui . get_event ( gui . PRESS ) : <nl> + if gui . event . key = = gui . SPACE : <nl> solar . initialize ( ) <nl> + elif gui . event . key = = gui . ESCAPE : <nl> + gui . running = False <nl> + <nl> for i in range ( 10 ) : <nl> solar . integrate ( ) <nl> - gui . circle ( [ 0 . 5 , 0 . 5 ] , radius = 20 , color = 0x8C274C ) <nl> - gui . circles ( solar . x . to_numpy ( ) , radius = 5 , color = 0xFFFFFF ) <nl> + <nl> + solar . render ( gui ) <nl> gui . show ( ) <nl>
|
[ Example ] Improve examples / odop_solar . py visual & comments ( )
|
taichi-dev/taichi
|
265e414b6f871b5903fcd8b92ac74b20e116a145
|
2020-06-19T14:24:43Z
|
mmm a / dbms / src / Server / PerformanceTest . cpp <nl> ppp b / dbms / src / Server / PerformanceTest . cpp <nl> class PerformanceTest <nl> tests_names ( std : : move ( tests_names_ ) ) , <nl> skip_names ( std : : move ( skip_names_ ) ) , <nl> tests_names_regexp ( std : : move ( tests_names_regexp_ ) ) , <nl> - skip_names_regexp ( std : : move ( skip_names_regexp_ ) ) , <nl> - <nl> + skip_names_regexp ( std : : move ( skip_names_regexp_ ) ) <nl> { <nl> if ( input_files . size ( ) < 1 ) <nl> { <nl>
|
Addition to prev . revision [ # CLICKHOUSE - 2 ] .
|
ClickHouse/ClickHouse
|
f0391a63585ff7b6f86e96e5f80d5685ce74ce50
|
2018-01-10T00:15:08Z
|
mmm a / src / core / ext / transport / chttp2 / transport / chttp2_transport . cc <nl> ppp b / src / core / ext / transport / chttp2 / transport / chttp2_transport . cc <nl> static void start_bdp_ping_locked ( void * tp , grpc_error * error ) { <nl> gpr_log ( GPR_INFO , " % s : Start BDP ping err = % s " , t - > peer_string , <nl> grpc_error_string ( error ) ) ; <nl> } <nl> + if ( error ! = GRPC_ERROR_NONE | | t - > closed_with_error ! = GRPC_ERROR_NONE ) { <nl> + return ; <nl> + } <nl> / * Reset the keepalive ping timer * / <nl> if ( t - > keepalive_state = = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING ) { <nl> grpc_timer_cancel ( & t - > keepalive_ping_timer ) ; <nl> static void finish_bdp_ping_locked ( void * tp , grpc_error * error ) { <nl> gpr_log ( GPR_INFO , " % s : Complete BDP ping err = % s " , t - > peer_string , <nl> grpc_error_string ( error ) ) ; <nl> } <nl> - if ( error ! = GRPC_ERROR_NONE ) { <nl> + if ( error ! = GRPC_ERROR_NONE | | t - > closed_with_error ! = GRPC_ERROR_NONE ) { <nl> GRPC_CHTTP2_UNREF_TRANSPORT ( t , " bdp_ping " ) ; <nl> return ; <nl> } <nl>
|
Merge pull request from yang - g / pingping
|
grpc/grpc
|
681f1c8a988197f57663df5232f120edc7230840
|
2019-04-01T19:06:57Z
|
mmm a / cocos / scripting / auto - generated <nl> ppp b / cocos / scripting / auto - generated <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit 74cb897b64f7325cf969341e9bc2d87fc7fb1bb7 <nl> + Subproject commit f03e84b93ae69080e369962ba2e95ea9b2fd91c7 <nl>
|
Merge pull request from CocosRobot / updategeneratedsubmodule_1389939079
|
cocos2d/cocos2d-x
|
805e2ad2bae6ab0523014e6644eb9b76ce03d588
|
2014-01-17T06:34:53Z
|
mmm a / js / server / tests / shell - documents . js <nl> ppp b / js / server / tests / shell - documents . js <nl> function readCollectionDocumentSuiteReadDocument ( ) { <nl> catch ( err ) { <nl> assertEqual ( ERRORS . ERROR_AVOCADO_CONFLICT . code , err . errorNum ) ; <nl> } <nl> + <nl> + var a3 = collection . delete ( a1 , true ) ; <nl> + <nl> + assertEqual ( a3 , true ) ; <nl> + <nl> + var a4 = collection . delete ( a1 , true ) ; <nl> + <nl> + assertEqual ( a4 , false ) ; <nl> } <nl> } ; <nl> } <nl> function readDocumentSuiteReadDocument ( ) { <nl> assertEqual ( a1 . _id , doc4 . _id ) ; <nl> assertEqual ( a4 . _rev , doc4 . _rev ) ; <nl> assertEqual ( 4 , doc4 . a ) ; <nl> + } , <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief delete a document <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + testDeleteDocument : function ( ) { <nl> + var a1 = collection . save ( { a : 1 } ) ; <nl> + <nl> + assertTypeOf ( " string " , a1 . _id ) ; <nl> + assertTypeOf ( " number " , a1 . _rev ) ; <nl> + <nl> + var a2 = db . _replace ( a1 , { a : 2 } ) ; <nl> + <nl> + assertEqual ( a1 . _id , a2 . _id ) ; <nl> + assertNotEqual ( a1 . _rev , a2 . _rev ) ; <nl> + <nl> + try { <nl> + db . _delete ( a1 ) ; <nl> + fail ( ) ; <nl> + } <nl> + catch ( err ) { <nl> + assertEqual ( ERRORS . ERROR_AVOCADO_CONFLICT . code , err . errorNum ) ; <nl> + } <nl> + <nl> + var a3 = db . _delete ( a1 , true ) ; <nl> + <nl> + assertEqual ( a3 , true ) ; <nl> + <nl> + var a4 = db . _delete ( a1 , true ) ; <nl> + <nl> + assertEqual ( a4 , false ) ; <nl> } <nl> } ; <nl> } <nl>
|
added delete test
|
arangodb/arangodb
|
012e2cc4039943f7bfa63030ed0bc7111f1d648b
|
2012-04-09T17:46:31Z
|
mmm a / dbms / include / DB / IO / HTTPCommon . h <nl> ppp b / dbms / include / DB / IO / HTTPCommon . h <nl> <nl> # pragma once <nl> <nl> - # include < Poco / Net / HTTPServerResponse . h > <nl> + <nl> + namespace Poco <nl> + { <nl> + namespace Net <nl> + { <nl> + class HTTPServerResponse ; <nl> + } <nl> + } <nl> + <nl> <nl> namespace DB <nl> { <nl> mmm a / dbms / include / DB / IO / WriteBufferFromHTTPServerResponse . h <nl> ppp b / dbms / include / DB / IO / WriteBufferFromHTTPServerResponse . h <nl> <nl> # include < experimental / optional > <nl> # include < mutex > <nl> <nl> - # include < Poco / Net / HTTPServerResponse . h > <nl> - <nl> - # include < DB / Common / Exception . h > <nl> - <nl> # include < DB / IO / WriteBuffer . h > <nl> # include < DB / IO / BufferWithOwnMemory . h > <nl> # include < DB / IO / WriteBufferFromOStream . h > <nl> - # include < DB / IO / WriteBufferFromString . h > <nl> # include < DB / IO / ZlibDeflatingWriteBuffer . h > <nl> # include < DB / IO / HTTPCommon . h > <nl> # include < DB / Common / NetException . h > <nl> <nl> # include < DB / Core / Progress . h > <nl> <nl> <nl> - namespace DB <nl> - { <nl> - <nl> - namespace ErrorCodes <nl> + namespace Poco <nl> { <nl> - extern const int LOGICAL_ERROR ; <nl> + namespace Net <nl> + { <nl> + class HTTPServerResponse ; <nl> + } <nl> } <nl> <nl> <nl> + namespace DB <nl> + { <nl> + <nl> / / / The difference from WriteBufferFromOStream is that this buffer gets the underlying std : : ostream <nl> / / / ( using response . send ( ) ) only after data is flushed for the first time . This is needed in HTTP <nl> / / / servers to change some HTTP headers ( e . g . response code ) before any data is sent to the client <nl> class WriteBufferFromHTTPServerResponse : public BufferWithOwnMemory < WriteBuffer <nl> / / / Must be called under locked mutex . <nl> / / / This method send headers , if this was not done already , <nl> / / / but not finish them with \ r \ n , allowing to send more headers subsequently . <nl> - void startSendHeaders ( ) <nl> - { <nl> - if ( ! headers_started_sending ) <nl> - { <nl> - headers_started_sending = true ; <nl> - <nl> - if ( add_cors_header ) <nl> - response . set ( " Access - Control - Allow - Origin " , " * " ) ; <nl> - <nl> - setResponseDefaultHeaders ( response ) ; <nl> - <nl> - std : : tie ( response_header_ostr , response_body_ostr ) = response . beginSend ( ) ; <nl> - } <nl> - } <nl> + void startSendHeaders ( ) ; <nl> <nl> / / / This method finish headers with \ r \ n , allowing to start to send body . <nl> - void finishSendHeaders ( ) <nl> - { <nl> - if ( ! headers_finished_sending ) <nl> - { <nl> - headers_finished_sending = true ; <nl> + void finishSendHeaders ( ) ; <nl> <nl> - / / / Send end of headers delimiter . <nl> - * response_header_ostr < < " \ r \ n " < < std : : flush ; <nl> - } <nl> - } <nl> - <nl> - void nextImpl ( ) override <nl> - { <nl> - { <nl> - std : : lock_guard < std : : mutex > lock ( mutex ) ; <nl> - <nl> - startSendHeaders ( ) ; <nl> - <nl> - if ( ! out ) <nl> - { <nl> - if ( compress ) <nl> - { <nl> - if ( compression_method = = ZlibCompressionMethod : : Gzip ) <nl> - * response_header_ostr < < " Content - Encoding : gzip \ r \ n " ; <nl> - else if ( compression_method = = ZlibCompressionMethod : : Zlib ) <nl> - * response_header_ostr < < " Content - Encoding : deflate \ r \ n " ; <nl> - else <nl> - throw Exception ( " Logical error : unknown compression method passed to WriteBufferFromHTTPServerResponse " , <nl> - ErrorCodes : : LOGICAL_ERROR ) ; <nl> - <nl> - / / / Use memory allocated for the outer buffer in the buffer pointed to by out . This avoids extra allocation and copy . <nl> - out_raw . emplace ( * response_body_ostr ) ; <nl> - deflating_buf . emplace ( out_raw . value ( ) , compression_method , compression_level , working_buffer . size ( ) , working_buffer . begin ( ) ) ; <nl> - out = & deflating_buf . value ( ) ; <nl> - } <nl> - else <nl> - { <nl> - out_raw . emplace ( * response_body_ostr , working_buffer . size ( ) , working_buffer . begin ( ) ) ; <nl> - out = & out_raw . value ( ) ; <nl> - } <nl> - } <nl> - <nl> - finishSendHeaders ( ) ; <nl> - } <nl> - <nl> - out - > position ( ) = position ( ) ; <nl> - out - > next ( ) ; <nl> - } <nl> + void nextImpl ( ) override ; <nl> <nl> public : <nl> WriteBufferFromHTTPServerResponse ( <nl> Poco : : Net : : HTTPServerResponse & response_ , <nl> bool compress_ = false , / / / If true - set Content - Encoding header and compress the result . <nl> ZlibCompressionMethod compression_method_ = ZlibCompressionMethod : : Gzip , <nl> - size_t size = DBMS_DEFAULT_BUFFER_SIZE ) <nl> - : BufferWithOwnMemory < WriteBuffer > ( size ) , response ( response_ ) , <nl> - compress ( compress_ ) , compression_method ( compression_method_ ) { } <nl> + size_t size = DBMS_DEFAULT_BUFFER_SIZE ) ; <nl> <nl> / / / Writes progess in repeating HTTP headers . <nl> - void onProgress ( const Progress & progress ) <nl> - { <nl> - std : : lock_guard < std : : mutex > lock ( mutex ) ; <nl> - <nl> - / / / Cannot add new headers if body was started to send . <nl> - if ( headers_finished_sending ) <nl> - return ; <nl> - <nl> - accumulated_progress . incrementPiecewiseAtomically ( progress ) ; <nl> - <nl> - if ( progress_watch . elapsed ( ) > = send_progress_interval_ms * 1000000 ) <nl> - { <nl> - progress_watch . restart ( ) ; <nl> - <nl> - / / / Send all common headers before our special progress headers . <nl> - startSendHeaders ( ) ; <nl> - <nl> - std : : string progress_string ; <nl> - { <nl> - WriteBufferFromString progress_string_writer ( progress_string ) ; <nl> - accumulated_progress . writeJSON ( progress_string_writer ) ; <nl> - } <nl> - <nl> - * response_header_ostr < < " X - ClickHouse - Progress : " < < progress_string < < " \ r \ n " < < std : : flush ; <nl> - } <nl> - } <nl> + void onProgress ( const Progress & progress ) ; <nl> <nl> / / / Send at least HTTP headers if no data has been sent yet . <nl> / / / Use after the data has possibly been sent and no error happened ( and thus you do not plan <nl> / / / to change response HTTP code . <nl> / / / This method is idempotent . <nl> - void finalize ( ) <nl> - { <nl> - if ( offset ( ) ) <nl> - { <nl> - next ( ) ; <nl> - } <nl> - else <nl> - { <nl> - / / / If no remaining data , just send headers . <nl> - std : : lock_guard < std : : mutex > lock ( mutex ) ; <nl> - startSendHeaders ( ) ; <nl> - finishSendHeaders ( ) ; <nl> - } <nl> - } <nl> + void finalize ( ) ; <nl> <nl> / / / Turn compression on or off . <nl> / / / The setting has any effect only if HTTP headers haven ' t been sent yet . <nl> class WriteBufferFromHTTPServerResponse : public BufferWithOwnMemory < WriteBuffer <nl> send_progress_interval_ms = send_progress_interval_ms_ ; <nl> } <nl> <nl> - ~ WriteBufferFromHTTPServerResponse ( ) <nl> - { <nl> - try <nl> - { <nl> - finalize ( ) ; <nl> - } <nl> - catch ( . . . ) <nl> - { <nl> - tryLogCurrentException ( __PRETTY_FUNCTION__ ) ; <nl> - } <nl> - } <nl> + ~ WriteBufferFromHTTPServerResponse ( ) ; <nl> } ; <nl> <nl> } <nl> mmm a / dbms / src / IO / HTTPCommon . cpp <nl> ppp b / dbms / src / IO / HTTPCommon . cpp <nl> <nl> # include < DB / IO / HTTPCommon . h > <nl> <nl> # include < Poco / Util / Application . h > <nl> + # include < Poco / Net / HTTPServerResponse . h > <nl> + <nl> <nl> namespace DB <nl> { <nl> new file mode 100644 <nl> index 00000000000 . . 2ddcb275f74 <nl> mmm / dev / null <nl> ppp b / dbms / src / IO / WriteBufferFromHTTPServerResponse . cpp <nl> <nl> + # include < Poco / Net / HTTPServerResponse . h > <nl> + <nl> + # include < DB / Common / Exception . h > <nl> + <nl> + # include < DB / IO / WriteBufferFromHTTPServerResponse . h > <nl> + # include < DB / IO / WriteBufferFromString . h > <nl> + # include < DB / IO / HTTPCommon . h > <nl> + # include < DB / Common / NetException . h > <nl> + # include < DB / Common / Stopwatch . h > <nl> + # include < DB / Core / Progress . h > <nl> + <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + namespace ErrorCodes <nl> + { <nl> + extern const int LOGICAL_ERROR ; <nl> + } <nl> + <nl> + <nl> + void WriteBufferFromHTTPServerResponse : : startSendHeaders ( ) <nl> + { <nl> + if ( ! headers_started_sending ) <nl> + { <nl> + headers_started_sending = true ; <nl> + <nl> + if ( add_cors_header ) <nl> + response . set ( " Access - Control - Allow - Origin " , " * " ) ; <nl> + <nl> + setResponseDefaultHeaders ( response ) ; <nl> + <nl> + std : : tie ( response_header_ostr , response_body_ostr ) = response . beginSend ( ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + void WriteBufferFromHTTPServerResponse : : finishSendHeaders ( ) <nl> + { <nl> + if ( ! headers_finished_sending ) <nl> + { <nl> + headers_finished_sending = true ; <nl> + <nl> + / / / Send end of headers delimiter . <nl> + * response_header_ostr < < " \ r \ n " < < std : : flush ; <nl> + } <nl> + } <nl> + <nl> + <nl> + void WriteBufferFromHTTPServerResponse : : nextImpl ( ) <nl> + { <nl> + { <nl> + std : : lock_guard < std : : mutex > lock ( mutex ) ; <nl> + <nl> + startSendHeaders ( ) ; <nl> + <nl> + if ( ! out ) <nl> + { <nl> + if ( compress ) <nl> + { <nl> + if ( compression_method = = ZlibCompressionMethod : : Gzip ) <nl> + * response_header_ostr < < " Content - Encoding : gzip \ r \ n " ; <nl> + else if ( compression_method = = ZlibCompressionMethod : : Zlib ) <nl> + * response_header_ostr < < " Content - Encoding : deflate \ r \ n " ; <nl> + else <nl> + throw Exception ( " Logical error : unknown compression method passed to WriteBufferFromHTTPServerResponse " , <nl> + ErrorCodes : : LOGICAL_ERROR ) ; <nl> + <nl> + / / / Use memory allocated for the outer buffer in the buffer pointed to by out . This avoids extra allocation and copy . <nl> + out_raw . emplace ( * response_body_ostr ) ; <nl> + deflating_buf . emplace ( out_raw . value ( ) , compression_method , compression_level , working_buffer . size ( ) , working_buffer . begin ( ) ) ; <nl> + out = & deflating_buf . value ( ) ; <nl> + } <nl> + else <nl> + { <nl> + out_raw . emplace ( * response_body_ostr , working_buffer . size ( ) , working_buffer . begin ( ) ) ; <nl> + out = & out_raw . value ( ) ; <nl> + } <nl> + } <nl> + <nl> + finishSendHeaders ( ) ; <nl> + } <nl> + <nl> + out - > position ( ) = position ( ) ; <nl> + out - > next ( ) ; <nl> + } <nl> + <nl> + <nl> + WriteBufferFromHTTPServerResponse : : WriteBufferFromHTTPServerResponse ( <nl> + Poco : : Net : : HTTPServerResponse & response_ , <nl> + bool compress_ , <nl> + ZlibCompressionMethod compression_method_ , <nl> + size_t size ) <nl> + : BufferWithOwnMemory < WriteBuffer > ( size ) , response ( response_ ) , <nl> + compress ( compress_ ) , compression_method ( compression_method_ ) <nl> + { <nl> + } <nl> + <nl> + <nl> + void WriteBufferFromHTTPServerResponse : : onProgress ( const Progress & progress ) <nl> + { <nl> + std : : lock_guard < std : : mutex > lock ( mutex ) ; <nl> + <nl> + / / / Cannot add new headers if body was started to send . <nl> + if ( headers_finished_sending ) <nl> + return ; <nl> + <nl> + accumulated_progress . incrementPiecewiseAtomically ( progress ) ; <nl> + <nl> + if ( progress_watch . elapsed ( ) > = send_progress_interval_ms * 1000000 ) <nl> + { <nl> + progress_watch . restart ( ) ; <nl> + <nl> + / / / Send all common headers before our special progress headers . <nl> + startSendHeaders ( ) ; <nl> + <nl> + std : : string progress_string ; <nl> + { <nl> + WriteBufferFromString progress_string_writer ( progress_string ) ; <nl> + accumulated_progress . writeJSON ( progress_string_writer ) ; <nl> + } <nl> + <nl> + * response_header_ostr < < " X - ClickHouse - Progress : " < < progress_string < < " \ r \ n " < < std : : flush ; <nl> + } <nl> + } <nl> + <nl> + <nl> + void WriteBufferFromHTTPServerResponse : : finalize ( ) <nl> + { <nl> + if ( offset ( ) ) <nl> + { <nl> + next ( ) ; <nl> + } <nl> + else <nl> + { <nl> + / / / If no remaining data , just send headers . <nl> + std : : lock_guard < std : : mutex > lock ( mutex ) ; <nl> + startSendHeaders ( ) ; <nl> + finishSendHeaders ( ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + WriteBufferFromHTTPServerResponse : : ~ WriteBufferFromHTTPServerResponse ( ) <nl> + { <nl> + try <nl> + { <nl> + finalize ( ) ; <nl> + } <nl> + catch ( . . . ) <nl> + { <nl> + tryLogCurrentException ( __PRETTY_FUNCTION__ ) ; <nl> + } <nl> + } <nl> + <nl> + } <nl>
|
Moved code to . cpp [ # CLICKHOUSE - 2027 ] .
|
ClickHouse/ClickHouse
|
3b0900e6cf6f4179d8ba7b2738ef201cab53b614
|
2017-01-30T05:13:58Z
|
mmm a / code / mathematical - algorithms / newton_raphson_method / newton_raphson . c <nl> ppp b / code / mathematical - algorithms / newton_raphson_method / newton_raphson . c <nl> <nl> + / * Part of Cosmos by OpenGenus Foundation * / <nl> # include < stdio . h > <nl> # include < math . h > <nl> float f ( float x ) <nl> float df ( float x ) <nl> { <nl> return log10 ( x ) + 0 . 43429 ; <nl> } <nl> - void main ( ) <nl> + int main ( ) <nl> { <nl> int itr , maxmitr ; <nl> float h , x0 , x1 , allerr ; <nl> new file mode 100644 <nl> index 0000000000 . . c48d74541d <nl> mmm / dev / null <nl> ppp b / code / mathematical - algorithms / newton_raphson_method / newton_raphson . cpp <nl> <nl> + / * Part of Cosmos by OpenGenus Foundation * / <nl> + # include < cmath > <nl> + # include < iostream > <nl> + # include < limits > <nl> + # include < string > <nl> + # include < stdexcept > <nl> + <nl> + / / This code requires you to enable the C + + 11 standard when compiling <nl> + <nl> + void helpAndExit ( ) { <nl> + std : : cout < < " Newton - Raphson iteration for the function x * log10 ( x ) - 1 . 2 \ n " <nl> + < < " \ nUsage : newton - raphson INITIAL_GUESS ERROR_LIMIT MAX_ITERATIONS \ n " <nl> + < < " \ tINITIAL_GUESS - The initial guess ( x0 ) . A number . \ n " <nl> + < < " \ tERROR_LIMIT - The stopping condition . A positive number . \ n " <nl> + < < " \ tMAX_ITERATIONS - The maximum number of allowed iterations . A positive number . \ n " <nl> + < < " \ n \ tExample : newton_raphson 1 0 . 001 1000 " < < std : : endl ; <nl> + exit ( 1 ) ; <nl> + } <nl> + <nl> + int main ( int argc , char * argv [ ] ) { <nl> + if ( argc ! = 4 ) <nl> + helpAndExit ( ) ; <nl> + <nl> + float x0 , allowedError ; <nl> + int maxIterations ; <nl> + <nl> + try { <nl> + x0 = std : : stof ( std : : string ( argv [ 1 ] ) ) ; <nl> + allowedError = std : : stof ( std : : string ( argv [ 2 ] ) ) ; <nl> + maxIterations = std : : stoi ( std : : string ( argv [ 3 ] ) ) ; <nl> + <nl> + if ( allowedError < 0 . 0f | | maxIterations < 0 ) <nl> + throw std : : domain_error ( " Error limit and number of iterations must be positive numbers ! " ) ; <nl> + <nl> + } catch ( std : : exception & e ) { <nl> + std : : cout < < e . what ( ) < < " \ n " < < std : : endl ; <nl> + helpAndExit ( ) ; <nl> + } <nl> + <nl> + auto f = [ ] ( const float x ) { return x * std : : log10 ( x ) - 1 . 2 ; } ; <nl> + auto df = [ ] ( const float x ) { return std : : log10 ( x ) + 0 . 43429 ; } ; <nl> + <nl> + for ( auto i = 1 ; i < maxIterations ; i + + ) <nl> + { <nl> + const auto error = f ( x0 ) / df ( x0 ) ; <nl> + <nl> + if ( std : : fabs ( error ) < allowedError ) <nl> + { <nl> + std : : cout < < " Conversion reached after : " < < i < < " iterations . Solution : " < < x0 < < std : : endl ; <nl> + return 0 ; <nl> + } <nl> + <nl> + x0 = x0 - error ; <nl> + } <nl> + <nl> + std : : cout < < " Convergence not reached after " < < maxIterations < < " iterations " < < std : : endl ; <nl> + <nl> + return 0 ; <nl> + } <nl>
|
Merge pull request from ulvgard / newton - raphson - cpp
|
OpenGenus/cosmos
|
f3ce28eea29b98008fa82a560ee31bff71fff473
|
2017-10-05T17:20:01Z
|
mmm a / editor / editor_file_dialog . cpp <nl> ppp b / editor / editor_file_dialog . cpp <nl> void EditorFileDialog : : update_dir ( ) { <nl> <nl> dir - > set_text ( dir_access - > get_current_dir ( ) ) ; <nl> <nl> - / / Disable " Open " button only when we in selecting file ( s ) mode or open dir mode . <nl> + / / Disable " Open " button only when selecting file ( s ) mode . <nl> get_ok ( ) - > set_disabled ( _is_open_should_be_disabled ( ) ) ; <nl> + switch ( mode ) { <nl> + <nl> + case MODE_OPEN_FILE : <nl> + case MODE_OPEN_FILES : <nl> + get_ok ( ) - > set_text ( TTR ( " Open " ) ) ; <nl> + break ; <nl> + case MODE_OPEN_DIR : <nl> + get_ok ( ) - > set_text ( TTR ( " Select Current Folder " ) ) ; <nl> + break ; <nl> + } <nl> } <nl> <nl> void EditorFileDialog : : _dir_entered ( String p_dir ) { <nl> void EditorFileDialog : : _item_selected ( int p_item ) { <nl> <nl> file - > set_text ( d [ " name " ] ) ; <nl> _request_single_thumbnail ( get_current_dir ( ) . plus_file ( get_current_file ( ) ) ) ; <nl> + } else if ( mode = = MODE_OPEN_DIR ) { <nl> + get_ok ( ) - > set_text ( TTR ( " Select This Folder " ) ) ; <nl> } <nl> <nl> get_ok ( ) - > set_disabled ( _is_open_should_be_disabled ( ) ) ; <nl> bool EditorFileDialog : : _is_open_should_be_disabled ( ) { <nl> <nl> Vector < int > items = item_list - > get_selected_items ( ) ; <nl> if ( items . size ( ) = = 0 ) <nl> - return true ; <nl> + return mode ! = MODE_OPEN_DIR ; / / In " Open folder " mode , having nothing selected picks the current folder . <nl> <nl> for ( int i = 0 ; i < items . size ( ) ; i + + ) { <nl> <nl> mmm a / scene / gui / file_dialog . cpp <nl> ppp b / scene / gui / file_dialog . cpp <nl> void FileDialog : : _tree_selected ( ) { <nl> <nl> file - > set_text ( d [ " name " ] ) ; <nl> } else if ( mode = = MODE_OPEN_DIR ) { <nl> - get_ok ( ) - > set_text ( RTR ( " Select this Folder " ) ) ; <nl> + get_ok ( ) - > set_text ( RTR ( " Select This Folder " ) ) ; <nl> } <nl> <nl> get_ok ( ) - > set_disabled ( _is_open_should_be_disabled ( ) ) ; <nl>
|
Merge pull request from YeldhamDev / dir_select_fix
|
godotengine/godot
|
29c557a29a7ec47217233726797c4b91374b0a5e
|
2018-09-22T08:31:21Z
|
mmm a / cocos2dx / touch_dispatcher / CCTouchHandler . cpp <nl> ppp b / cocos2dx / touch_dispatcher / CCTouchHandler . cpp <nl> CCTouchDelegate * CCTouchHandler : : getDelegate ( void ) <nl> <nl> void CCTouchHandler : : setDelegate ( CCTouchDelegate * pDelegate ) <nl> { <nl> + if ( m_pDelegate ) <nl> + { <nl> + pDelegate - > keep ( ) ; <nl> + } <nl> m_pDelegate = pDelegate ; <nl> + if ( m_pDelegate ) <nl> + { <nl> + m_pDelegate - > destroy ( ) ; <nl> + } <nl> } <nl> <nl> int CCTouchHandler : : getPriority ( void ) <nl>
|
issue :
|
cocos2d/cocos2d-x
|
69762e4b4cd62961b5ca701d14348b626085fdb5
|
2010-09-06T03:48:04Z
|
mmm a / tensorflow / tools / pip_package / setup . py <nl> ppp b / tensorflow / tools / pip_package / setup . py <nl> <nl> ' absl - py > = 0 . 1 . 6 ' , <nl> ' astor > = 0 . 6 . 0 ' , <nl> ' gast > = 0 . 2 . 0 ' , <nl> - ' google_pasta > = 0 . 1 . 1 ' , <nl> + ' google_pasta > = 0 . 1 . 2 ' , <nl> ' keras_applications > = 1 . 0 . 6 ' , <nl> ' keras_preprocessing > = 1 . 0 . 5 ' , <nl> ' numpy > = 1 . 14 . 5 , < 2 . 0 ' , <nl>
|
Bump google_pasta dependency to 0 . 1 . 2
|
tensorflow/tensorflow
|
d469b92c7c2627f074d80ce874f445dbabbac6f0
|
2019-02-19T19:17:43Z
|
mmm a / tensorflow / python / distribute / tpu_strategy . py <nl> ppp b / tensorflow / python / distribute / tpu_strategy . py <nl> def __init__ ( self , <nl> self . _require_static_shapes = True <nl> <nl> self . experimental_enable_get_next_as_optional = True <nl> + self . experimental_enable_dynamic_batch_size = True <nl> <nl> def _validate_colocate_with_variable ( self , colocate_with_variable ) : <nl> values . validate_colocate ( colocate_with_variable , self ) <nl> def replicated_fn ( replica_id , replica_args , replica_kwargs ) : <nl> <nl> # Construct and pass ` maximum_shapes ` so that we could support dynamic <nl> # shapes using dynamic padder . <nl> - if replicate_inputs : <nl> + if self . experimental_enable_dynamic_batch_size and replicate_inputs : <nl> maximum_shapes = [ ] <nl> flattened_list = nest . flatten ( replicate_inputs [ 0 ] ) <nl> for input_tensor in flattened_list : <nl>
|
Add an escape path for people to skip dynamic padding in TPUStrategy .
|
tensorflow/tensorflow
|
81f2b3dee65c307316616ad4ea04d9581a44a6fb
|
2019-10-02T18:34:34Z
|
mmm a / src / compiler / backend / ia32 / instruction - selector - ia32 . cc <nl> ppp b / src / compiler / backend / ia32 / instruction - selector - ia32 . cc <nl> SIMD_ANYTRUE_LIST ( VISIT_SIMD_ANYTRUE ) <nl> IA32OperandGenerator g ( this ) ; \ <nl> InstructionOperand temps [ ] = { g . TempRegister ( ) , g . TempSimd128Register ( ) } ; \ <nl> Emit ( kIA32 # # Opcode , g . DefineAsRegister ( node ) , \ <nl> - g . UseUnique ( node - > InputAt ( 0 ) ) , arraysize ( temps ) , temps ) ; \ <nl> + g . UseUniqueRegister ( node - > InputAt ( 0 ) ) , arraysize ( temps ) , temps ) ; \ <nl> } <nl> SIMD_ALLTRUE_LIST ( VISIT_SIMD_ALLTRUE ) <nl> # undef VISIT_SIMD_ALLTRUE <nl>
|
[ wasm - simd ] [ ia32 ] Fix all_true operand to be register
|
v8/v8
|
d4b58fec7d701208813c98f675a18e4ac2c0a508
|
2020-05-27T22:34:58Z
|
mmm a / imgui . cpp <nl> ppp b / imgui . cpp <nl> bool ImGui : : InputTextEx ( const char * label , char * buf , int buf_size , const ImVec2 <nl> if ( g . IO . InputCharacters [ 0 ] ) <nl> { <nl> / / Process text input ( before we check for Return because using some IME will effectively send a Return ? ) <nl> - if ( ! is_ctrl_down & & ! is_alt_down & & is_editable ) <nl> + if ( ! is_ctrl_down & & is_editable ) <nl> { <nl> for ( int n = 0 ; n < IM_ARRAYSIZE ( g . IO . InputCharacters ) & & g . IO . InputCharacters [ n ] ; n + + ) <nl> if ( unsigned int c = ( unsigned int ) g . IO . InputCharacters [ n ] ) <nl>
|
InputText ( ) fixed ignoring text inputs when ALT is pressed ( )
|
ocornut/imgui
|
0a1d6b6b74023039e3ec71ad8ad593fedc9cac83
|
2015-09-16T09:38:42Z
|
mmm a / osquery / devtools / shell . cpp <nl> ppp b / osquery / devtools / shell . cpp <nl> <nl> # define _LARGEFILE_SOURCE 1 <nl> # endif <nl> <nl> + # include < iostream > <nl> # include < stdlib . h > <nl> # include < string . h > <nl> # include < stdio . h > <nl>
|
iostream in shell
|
osquery/osquery
|
660ef01777ed28cd9de391ec9467242918259922
|
2014-10-03T20:48:31Z
|
mmm a / doc / reference . md <nl> ppp b / doc / reference . md <nl> title : Reference <nl> Pin numbers in Arduino correspond directly to the ESP8266 GPIO pin numbers . ` pinMode ` , ` digitalRead ` , and ` digitalWrite ` functions work as usual , so to read GPIO2 , call ` digitalRead ( 2 ) ` . <nl> <nl> Digital pins 0 — 15 can be ` INPUT ` , ` OUTPUT ` , or ` INPUT_PULLUP ` . <nl> - Pin 16 can be ` INPUT ` , ` OUTPUT ` or ` INPUT_PULLDOWN ` . At startup , pins are configured as ` INPUT ` . <nl> + Pin 16 can be ` INPUT ` , ` OUTPUT ` or ` INPUT_PULLDOWN_16 ` . At startup , pins are configured as ` INPUT ` . <nl> <nl> Pins may also serve other functions , like Serial , I2C , SPI . These functions are normally activated by the corresponding library . The diagram below shows pin mapping for the popular ESP - 12 module . <nl> <nl>
|
Merge pull request from reaper7 / patch - 2
|
esp8266/Arduino
|
544734bdc591826beef2546ad72403be6b5cd44e
|
2015-07-30T06:42:38Z
|
mmm a / tensorflow / tools / pip_package / BUILD <nl> ppp b / tensorflow / tools / pip_package / BUILD <nl> COMMON_PIP_DEPS = [ <nl> " / / tensorflow / contrib / autograph / converters : converters " , <nl> " / / tensorflow / contrib / autograph / converters : test_lib " , <nl> " / / tensorflow / contrib / autograph / impl : impl " , <nl> + " / / tensorflow / contrib / autograph / operators : operators " , <nl> " / / tensorflow / contrib / autograph / pyct : pyct " , <nl> " / / tensorflow / contrib / autograph / pyct / static_analysis : static_analysis " , <nl> " / / tensorflow / contrib / boosted_trees : boosted_trees_pip " , <nl>
|
Merge pull request from av8ramit / add_auto
|
tensorflow/tensorflow
|
45d7b767cf892efe362fca82bc96a6f16af318a9
|
2018-06-06T01:08:15Z
|
mmm a / hphp / hack / src / parsing / lexer_hack . mll <nl> ppp b / hphp / hack / src / parsing / lexer_hack . mll <nl> and xhptoken file = parse <nl> | ' / ' { Tslash } <nl> | ' \ " ' { Tdquote } <nl> | word { Tword } <nl> - | " < ! - - " { xhp_comment file lexbuf ; xhptoken file lexbuf } <nl> + ( * Signal when we ' ve hit a comment so that literal text regions <nl> + * get broken up by them . * ) <nl> + | " < ! - - " { Topen_xhp_comment } <nl> | _ { xhptoken file lexbuf } <nl> <nl> and xhpattr file = parse <nl> and fixme_state0 file = parse <nl> } <nl> | ws + { fixme_state0 file lexbuf <nl> } <nl> - | ' \ n ' { Lexing . new_line lexbuf ; <nl> + | ' \ n ' { Lexing . new_line lexbuf ; <nl> fixme_state0 file lexbuf <nl> } <nl> | ' [ ' { fixme_state1 file lexbuf } <nl> mmm a / hphp / hack / src / parsing / parser_hack . ml <nl> ppp b / hphp / hack / src / parsing / parser_hack . ml <nl> and xhp_attribute_string env start abs_start = <nl> | _ - > <nl> xhp_attribute_string env start abs_start <nl> <nl> + <nl> and xhp_body pos name env = <nl> + ( * First grab any literal text that appears before the next <nl> + * bit of markup * ) <nl> + let start = Pos . make env . file env . lb in <nl> + let abs_start = env . lb . Lexing . lex_curr_pos in <nl> + let text = xhp_text env start abs_start in <nl> + ( * Now handle any markup * ) <nl> + text @ xhp_body_inner pos name env <nl> + <nl> + ( * Grab literal text that appears inside of xhp . * ) <nl> + and xhp_text env start abs_start = <nl> + match L . xhptoken env . file env . lb with <nl> + ( * If we have hit something that is meaningful , <nl> + * we have to stop collecting literal text and go back <nl> + * to xhp_body . Grab any text , clean it up , and return . * ) <nl> + | Tlcb | Tlt | Topen_xhp_comment | Teof - > <nl> + L . back env . lb ; <nl> + <nl> + let len = env . lb . Lexing . lex_curr_pos - abs_start in <nl> + let pos = Pos . btw start ( Pos . make env . file env . lb ) in <nl> + <nl> + let content = String . sub env . lb . Lexing . lex_buffer abs_start len in <nl> + ( * need to squash whitespace down to a single space * ) <nl> + let squished = Str . global_replace Utils . nonempty_ws_regexp " " content in <nl> + ( * if it is empty or all whitespace just ignore it * ) <nl> + if squished = " " | | squished = " " then [ ] else <nl> + [ pos , String ( pos , squished ) ] <nl> + <nl> + | _ - > xhp_text env start abs_start <nl> + <nl> + ( * parses an xhp body where we know that the next token is not <nl> + * just more literal text * ) <nl> + and xhp_body_inner pos name env = <nl> match L . xhptoken env . file env . lb with <nl> | Tlcb when env . mode = FileInfo . Mdecl - > <nl> ignore_body env ; <nl> and xhp_body pos name env = <nl> | Tlt - > <nl> if is_xhp env <nl> then <nl> - ( match xhp env with <nl> - | ( _ , Xml ( _ , _ , _ ) ) as xml - > <nl> - xml : : xhp_body pos name env <nl> - | _ - > xhp_body pos name env ) <nl> + let xml = xhp env in <nl> + xml : : xhp_body pos name env <nl> else <nl> ( match L . xhptoken env . file env . lb with <nl> | Tslash - > <nl> and xhp_body pos name env = <nl> error_expect env name ; <nl> [ ] <nl> end <nl> - else xhp_body pos name env <nl> + else begin <nl> + error_expect env " closing tag name " ; <nl> + xhp_body pos name env <nl> + end <nl> | _ - > <nl> + error_at env pos " Stray < in xhp " ; <nl> L . back env . lb ; <nl> xhp_body pos name env <nl> ) <nl> | Teof - > <nl> error_at env pos " Xhp tag not closed " ; <nl> [ ] <nl> - | Tword - > <nl> + ( * The lexer returns open comments so that we can notice them and <nl> + * drop them from our text fields . Parse the comment and continue . * ) <nl> + | Topen_xhp_comment - > <nl> + xhp_comment env . file env . lb ; <nl> xhp_body pos name env <nl> - | _ - > xhp_body pos name env <nl> + ( * xhp_body_inner only gets called when one of the above was seen * ) <nl> + | _ - > assert false <nl> <nl> ( * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * ) <nl> ( * Typedefs * ) <nl> mmm a / hphp / hack / src / utils / utils . ml <nl> ppp b / hphp / hack / src / utils / utils . ml <nl> let with_context ~ enter ~ exit ~ do_ = <nl> raise e in <nl> exit ( ) ; <nl> result <nl> + <nl> + let nonempty_ws_regexp = Str . regexp " [ \ n \ t \ r \ 012 ] + " <nl> mmm a / hphp / hack / test / emitter / xhp . php <nl> ppp b / hphp / hack / test / emitter / xhp . php <nl> function escape ( string $ s ) : : lol { <nl> return < lol > { $ s } < / lol > ; <nl> } <nl> <nl> + function texty ( ) : : lol { <nl> + $ x = ' hi ' ; <nl> + return < lol > hmm < ! - - - - > bar amargh < ! - - - - > arrrrrrgh { $ x } sigh <nl> + welp { $ x } <nl> + a < / lol > ; <nl> + } <nl> + <nl> function make ( ) : : foo { <nl> return < foo val = " test " > <nl> - < lol > < / lol > <nl> - < lol > < / lol > <nl> + < lol > hello < / lol > <nl> + < lol > world < / lol > <nl> + < lol > testing . . . < / lol > <nl> + < lol > . . . < / lol > <nl> < / foo > ; <nl> } <nl> <nl> function test ( ) : void { <nl> var_dump ( $ x - > : arg2 ) ; <nl> var_dump ( $ x - > : val ) ; <nl> <nl> + var_dump ( texty ( ) ) ; <nl> + var_dump ( texty ( ) - > getChildren ( ) ) ; <nl> + <nl> var_dump ( escape ( " wheeeee " ) ) ; <nl> } <nl> mmm a / hphp / hack / test / typecheck / heredoc_token_inside_xhp . php . exp <nl> ppp b / hphp / hack / test / typecheck / heredoc_token_inside_xhp . php . exp <nl> @ @ - 1 + 1 , 2 @ @ <nl> - No errors <nl> + File " heredoc_token_inside_xhp . php " , line 14 , characters 13 - 16 : <nl> + Stray < in xhp ( Parsing [ 1002 ] ) <nl>
|
Collect literal text in xhp elements
|
facebook/hhvm
|
21c21b995848d427457d25486c9d22872ee25347
|
2015-08-12T21:30:47Z
|
mmm a / src / mongo / db / s / migration_destination_manager . cpp <nl> ppp b / src / mongo / db / s / migration_destination_manager . cpp <nl> repl : : OpTime MigrationDestinationManager : : cloneDocumentsFromDonor ( <nl> " causedBy_exceptionToStatus " _attr = causedBy ( redact ( exceptionToStatus ( ) ) ) ) ; <nl> } <nl> } } ; <nl> - auto inserterThreadJoinGuard = makeGuard ( [ & ] { <nl> - batches . closeProducerEnd ( ) ; <nl> - inserterThread . join ( ) ; <nl> - } ) ; <nl> <nl> - while ( true ) { <nl> - opCtx - > checkForInterrupt ( ) ; <nl> <nl> - auto res = fetchBatchFn ( opCtx ) ; <nl> - <nl> - opCtx - > checkForInterrupt ( ) ; <nl> - batches . push ( res . getOwned ( ) , opCtx ) ; <nl> - auto arr = res [ " objects " ] . Obj ( ) ; <nl> - if ( arr . isEmpty ( ) ) { <nl> - inserterThreadJoinGuard . dismiss ( ) ; <nl> + { <nl> + auto inserterThreadJoinGuard = makeGuard ( [ & ] { <nl> + batches . closeProducerEnd ( ) ; <nl> inserterThread . join ( ) ; <nl> - opCtx - > checkForInterrupt ( ) ; <nl> - break ; <nl> + } ) ; <nl> + <nl> + while ( true ) { <nl> + auto res = fetchBatchFn ( opCtx ) ; <nl> + try { <nl> + batches . push ( res . getOwned ( ) , opCtx ) ; <nl> + auto arr = res [ " objects " ] . Obj ( ) ; <nl> + if ( arr . isEmpty ( ) ) { <nl> + break ; <nl> + } <nl> + } catch ( const ExceptionFor < ErrorCodes : : ProducerConsumerQueueEndClosed > & ) { <nl> + break ; <nl> + } <nl> } <nl> - } <nl> + } / / This scope ensures that the guard is destroyed <nl> <nl> + / / This check is necessary because the consumer thread uses killOp to propagate errors to the <nl> + / / producer thread ( this thread ) <nl> + opCtx - > checkForInterrupt ( ) ; <nl> return lastOpApplied ; <nl> } <nl> <nl>
|
SERVER - 42617 Race in CloneDocumentsCatchesInsertErrors can causes it to return an unexpected error
|
mongodb/mongo
|
876b3af1091b299884869c34a41f7f37d4dcc0bb
|
2020-02-14T12:24:50Z
|
mmm a / gpu / Makefile <nl> ppp b / gpu / Makefile <nl> CUOBJ = impl / BroadcastSum . o \ <nl> $ ( LIBNAME ) . a : $ ( CPPOBJ ) $ ( CUOBJ ) <nl> ar r $ @ $ ^ <nl> <nl> + $ ( LIBNAME ) . $ ( SHAREDEXT ) : $ ( CPPOBJ ) $ ( CUOBJ ) <nl> + $ ( CC ) - L $ ( CUDAROOT ) / lib64 - lcublas - lcudart $ ( LDFLAGS ) \ <nl> + $ ( FAISSSHAREDFLAGS ) - o $ ( LIBNAME ) . $ ( SHAREDEXT ) $ ^ <nl> + <nl> <nl> test / demo_ivfpq_indexing_gpu : test / demo_ivfpq_indexing_gpu . cpp \ <nl> $ ( LIBNAME ) . a . . / libfaiss . a <nl> test / demo_ivfpq_indexing_gpu : test / demo_ivfpq_indexing_gpu . cpp \ <nl> <nl> clean : <nl> rm - rf * . o impl / * . o utils / * . o test / * . o $ ( LIBNAME ) . a \ <nl> + $ ( LIBNAME ) . $ ( SHAREDEXT ) \ <nl> . . / python / * swigfaiss_gpu * . . / * swigfaiss_gpu * <nl> <nl> <nl> mmm a / tutorial / cpp / 4 - GPU . cpp <nl> ppp b / tutorial / cpp / 4 - GPU . cpp <nl> int main ( ) { <nl> xq [ d * i ] + = i / 1000 . ; <nl> } <nl> <nl> - int k = 4 ; <nl> - <nl> faiss : : gpu : : StandardGpuResources res ; <nl> <nl> / / Using a flat index <nl> int main ( ) { <nl> index_flat . add ( nb , xb ) ; / / add vectors to the index <nl> printf ( " ntotal = % ld \ n " , index_flat . ntotal ) ; <nl> <nl> + int k = 4 ; <nl> + <nl> { / / search xq <nl> long * I = new long [ k * nq ] ; <nl> float * D = new float [ k * nq ] ; <nl> mmm a / tutorial / cpp / 5 - Multiple - GPUs . cpp <nl> ppp b / tutorial / cpp / 5 - Multiple - GPUs . cpp <nl> int main ( ) { <nl> <nl> int ngpus = faiss : : gpu : : getNumDevices ( ) ; <nl> <nl> + printf ( " Number of GPUs : % d \ n " , ngpus ) ; <nl> + <nl> std : : vector < faiss : : gpu : : GpuResources * > res ; <nl> std : : vector < int > devs ; <nl> for ( int i = 0 ; i < ngpus ; i + + ) { <nl> int main ( ) { <nl> ) ; <nl> <nl> printf ( " is_trained = % s \ n " , gpu_index - > is_trained ? " true " : " false " ) ; <nl> - gpu_index - > add ( nb , xb ) ; / / vectors to the index <nl> + gpu_index - > add ( nb , xb ) ; / / add vectors to the index <nl> printf ( " ntotal = % ld \ n " , gpu_index - > ntotal ) ; <nl> <nl> int k = 4 ; <nl> mmm a / tutorial / cpp / Makefile <nl> ppp b / tutorial / cpp / Makefile <nl> MAKEFILE_INC = . . / . . / makefile . inc <nl> <nl> - include $ ( MAKEFILE_INC ) <nl> <nl> + NVCCLDFLAGS = - Xcompiler \ " - Wl , - rpath = . . / . . / : . . / . . / gpu / \ " \ <nl> + - L . . / . . - L . . / . . / gpu - lfaiss - lgpufaiss <nl> + <nl> + LDFLAGS = - L . . / . . - Wl , - rpath = . . / . . - lfaiss <nl> + <nl> all : cpu gpu <nl> <nl> cpu : 1 - Flat 2 - IVFFlat 3 - IVFPQ <nl> <nl> gpu : 4 - GPU 5 - Multiple - GPUs <nl> <nl> - 1 - Flat : 1 - Flat . cpp . . / . . / libfaiss . a <nl> - $ ( CC ) - o $ @ $ ( CFLAGS ) $ ^ - I . . / . . / . . / $ ( LDFLAGS ) $ ( BLASLDFLAGS ) <nl> + 1 - Flat : 1 - Flat . cpp . . / . . / libfaiss . $ ( SHAREDEXT ) <nl> + $ ( CC ) - o $ @ $ ( CFLAGS ) $ < - I . . / . . / . . / $ ( LDFLAGS ) <nl> + <nl> + 2 - IVFFlat : 2 - IVFFlat . cpp . . / . . / libfaiss . $ ( SHAREDEXT ) <nl> + $ ( CC ) - o $ @ $ ( CFLAGS ) $ < - I . . / . . / . . / $ ( LDFLAGS ) <nl> + <nl> + 3 - IVFPQ : 3 - IVFPQ . cpp . . / . . / libfaiss . $ ( SHAREDEXT ) <nl> + $ ( CC ) - o $ @ $ ( CFLAGS ) $ < - I . . / . . / . . / $ ( LDFLAGS ) <nl> + <nl> + 4 - GPU : 4 - GPU . cpp . . / . . / libfaiss . $ ( SHAREDEXT ) . . / . . / gpu / libgpufaiss . $ ( SHAREDEXT ) <nl> + $ ( NVCC ) $ ( NVCCFLAGS ) - o $ @ $ < $ ( NVCCLDFLAGS ) - I . . / . . / . . / <nl> <nl> - 2 - IVFFlat : 2 - IVFFlat . cpp . . / . . / libfaiss . a <nl> - $ ( CC ) - o $ @ $ ( CFLAGS ) $ ^ - I . . / . . / . . / $ ( LDFLAGS ) $ ( BLASLDFLAGS ) <nl> + 5 - Multiple - GPUs : 5 - Multiple - GPUs . cpp . . / . . / libfaiss . $ ( SHAREDEXT ) \ <nl> + . . / . . / gpu / libgpufaiss . $ ( SHAREDEXT ) <nl> + $ ( NVCC ) $ ( NVCCFLAGS ) - o $ @ $ < $ ( NVCCLDFLAGS ) - I . . / . . / . . / <nl> <nl> - 3 - IVFPQ : 3 - IVFPQ . cpp . . / . . / libfaiss . a <nl> - $ ( CC ) - o $ @ $ ( CFLAGS ) $ ^ - I . . / . . / . . / $ ( LDFLAGS ) $ ( BLASLDFLAGS ) <nl> + . . / . . / libfaiss . $ ( SHAREDEXT ) : <nl> + cd . . / . . / & & make libfaiss . $ ( SHAREDEXT ) <nl> <nl> - 4 - GPU : 4 - GPU . cpp . . / . . / libfaiss . a . . / . . / gpu / libgpufaiss . a <nl> - $ ( NVCC ) $ ( NVCCFLAGS ) - o $ @ $ ^ - I . . / . . / . . / - Xcompiler - fopenmp - lcublas \ <nl> - $ ( BLASLDFLAGSNVCC ) <nl> + . . / . . / gpu / libgpufaiss . $ ( SHAREDEXT ) : <nl> + cd . . / . . / gpu / & & make libgpufaiss . $ ( SHAREDEXT ) <nl> <nl> - 5 - Multiple - GPUs : 5 - Multiple - GPUs . cpp . . / . . / libfaiss . a . . / . . / gpu / libgpufaiss . a <nl> - $ ( NVCC ) $ ( NVCCFLAGS ) - o $ @ $ ^ - I . . / . . / . . / - Xcompiler - fopenmp - lcublas \ <nl> - $ ( BLASLDFLAGSNVCC ) <nl> + clean : <nl> + rm - f 1 - Flat 2 - IVFFlat 3 - IVFPQ 4 - GPU 5 - Multiple - GPUs <nl> mmm a / tutorial / python / 4 - GPU . py <nl> ppp b / tutorial / python / 4 - GPU . py <nl> <nl> <nl> import faiss # make faiss available <nl> <nl> - print ( " number of GPUs : " , faiss . get_num_gpus ( ) ) <nl> - <nl> res = faiss . StandardGpuResources ( ) # use a single GPU <nl> <nl> # # Using a flat index <nl> mmm a / tutorial / python / 5 - Multiple - GPUs . py <nl> ppp b / tutorial / python / 5 - Multiple - GPUs . py <nl> <nl> <nl> print ( " number of GPUs : " , ngpus ) <nl> <nl> - index = faiss . index_cpu_to_all_gpus ( # build the index <nl> - faiss . IndexFlatL2 ( d ) <nl> + cpu_index = faiss . IndexFlatL2 ( d ) <nl> + <nl> + gpu_index = faiss . index_cpu_to_all_gpus ( # build the index <nl> + cpu_index <nl> ) <nl> <nl> - index . add ( xb ) # add vectors to the index <nl> - print ( index . ntotal ) <nl> + gpu_index . add ( xb ) # add vectors to the index <nl> + print ( gpu_index . ntotal ) <nl> <nl> k = 4 # we want to see 4 nearest neighbors <nl> - D , I = index . search ( xq , k ) # actual search <nl> + D , I = gpu_index . search ( xq , k ) # actual search <nl> print ( I [ : 5 ] ) # neighbors of the 5 first queries <nl> print ( I [ - 5 : ] ) # neighbors of the 5 last queries <nl>
|
Make tutorial examples use dynamic linkage . ( )
|
facebookresearch/faiss
|
16aed09c0ffb0d473921e627a3103ab1ad7c9dc2
|
2018-02-21T14:36:29Z
|
mmm a / hphp / hack / src / typing / typing_subtype . ml <nl> ppp b / hphp / hack / src / typing / typing_subtype . ml <nl> and simplify_subtype <nl> ( * Likewise , reduce nullable on left to a union * ) <nl> | Toption ty , Tunion _ - > <nl> let r = fst ty_sub in <nl> - env | > <nl> - simplify_subtype ~ seen_generic_params ~ this_ty ( MakeType . null r ) ty_super & & & <nl> - simplify_subtype ~ seen_generic_params ~ this_ty ty ty_super <nl> + let env , p1 = <nl> + simplify_subtype ~ seen_generic_params ~ this_ty ( MakeType . null r ) ty_super env in <nl> + if TL . is_unsat p1 <nl> + then invalid ( ) <nl> + else <nl> + let env , p2 = simplify_subtype ~ seen_generic_params ~ this_ty ty ty_super env in <nl> + env , TL . conj p1 p2 <nl> <nl> | Tabstract ( ( AKnewtype _ | AKdependent _ ) , Some ty ) , Tunion [ ] - > <nl> simplify_subtype ~ seen_generic_params ~ this_ty ty ty_super env <nl> new file mode 100644 <nl> index 00000000000 . . d0eaeefe92a <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / mixed_subtype . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + class Foo { <nl> + public function A ( ) : nothing { <nl> + throw new Exception ( ) ; <nl> + } <nl> + } <nl> + <nl> + class Bar extends Foo { <nl> + public function A ( ) : mixed { <nl> + throw new Exception ( ) ; <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 039a20d2175 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / mixed_subtype . php . exp <nl> <nl> + File " mixed_subtype . php " , line 10 , characters 7 - 9 : <nl> + Class Bar does not correctly implement all required members ( Typing [ 4110 ] ) <nl> + File " mixed_subtype . php " , line 10 , characters 19 - 21 : <nl> + Some members are incompatible with those declared in type Foo <nl> + Read the following to see why : <nl> + File " mixed_subtype . php " , line 11 , characters 19 - 19 : <nl> + Member A has the wrong type <nl> + File " mixed_subtype . php " , line 5 , characters 24 - 30 : <nl> + Expected nothing <nl> + File " mixed_subtype . php " , line 11 , characters 24 - 28 : <nl> + But got mixed <nl>
|
Improve error messages generated by subtyping checks with an option on the left and union on the right
|
facebook/hhvm
|
f704cb3a6a807aa309d2d6861b3cebeb015c9f8f
|
2019-07-10T02:55:32Z
|
mmm a / modules / dreamview / backend / hmi / hmi_worker . cc <nl> ppp b / modules / dreamview / backend / hmi / hmi_worker . cc <nl> DEFINE_string ( current_mode_db_key , " / apollo / hmi / status : current_mode " , <nl> DEFINE_string ( default_hmi_mode , " Mkz Standard Debug " , <nl> " Default HMI Mode when there is no cache . " ) ; <nl> <nl> - DEFINE_string ( container_meta_ini , " / apollo / meta . ini " , <nl> - " Container meta info file . " ) ; <nl> - <nl> namespace apollo { <nl> namespace dreamview { <nl> namespace { <nl> void HMIWorker : : RecordAudio ( const std : : string & data ) { <nl> } <nl> <nl> void HMIWorker : : StatusUpdateThreadLoop ( ) { <nl> - const size_t kLoopIntervalMs = 200 ; <nl> while ( ! stop_ ) { <nl> + static constexpr int kLoopIntervalMs = 200 ; <nl> std : : this_thread : : sleep_for ( std : : chrono : : milliseconds ( kLoopIntervalMs ) ) ; <nl> bool status_changed = false ; <nl> { <nl>
|
Dreamview : remove unused container_meta_ini flag & & let kLoopIntervalMs inside loop
|
ApolloAuto/apollo
|
153480812bc6f39822f744788516127ca67e1b81
|
2019-06-04T17:58:48Z
|
mmm a / AUTHORS <nl> ppp b / AUTHORS <nl> Daniel James < dnljms @ gmail . com > <nl> Paolo Giarrusso < p . giarrusso @ gmail . com > <nl> Daniel Andersson < kodandersson @ gmail . com > <nl> Alexander Botero - Lowry < alexbl @ FreeBSD . org > <nl> + Matt Hanselman < mjhanselman @ gmail . com > <nl> mmm a / tools / test . py <nl> ppp b / tools / test . py <nl> def GetConfiguration ( self , context ) : <nl> file = None <nl> try : <nl> ( file , pathname , description ) = imp . find_module ( ' testcfg ' , [ self . path ] ) <nl> - module = imp . load_module ( self . path , file , pathname , description ) <nl> + module = imp . load_module ( ' testcfg ' , file , pathname , description ) <nl> self . config = module . GetConfiguration ( context , self . path ) <nl> finally : <nl> if file : <nl>
|
Issue 63 : Running tests fails of there is a dot in the checkout path
|
v8/v8
|
6ace05ed2a59ec2de59842c95e1dce780f0b0404
|
2009-02-25T10:09:42Z
|
mmm a / stdlib / CMakeLists . txt <nl> ppp b / stdlib / CMakeLists . txt <nl> <nl> # Create convenience targets for the Swift standard library . <nl> + <nl> + set ( CMAKE_CXX_COMPILER " $ { SWIFT_NATIVE_LLVM_TOOLS_PATH } / clang + + " ) <nl> + set ( CMAKE_C_COMPILER " $ { SWIFT_NATIVE_LLVM_TOOLS_PATH } / clang " ) <nl> + <nl> add_custom_target ( swift - stdlib - all ) <nl> foreach ( SDK $ { SWIFT_SDKS } ) <nl> add_custom_target ( " swift - stdlib - $ { SWIFT_SDK_ $ { SDK } _LIB_SUBDIR } " ) <nl> mmm a / utils / build - script - impl <nl> ppp b / utils / build - script - impl <nl> for deployment_target in " $ { HOST_TARGET } " " $ { CROSS_TOOLS_DEPLOYMENT_TARGETS [ @ ] } " <nl> $ { DISTCC_PUMP } " $ { CMAKE } " - - build " $ { build_dir } " $ ( cmake_config_opt $ { product } ) - - $ { BUILD_ARGS } $ { build_targets [ @ ] } <nl> { set + x ; } 2 > / dev / null <nl> fi <nl> + <nl> + # When we are building LLVM create symlinks to the c + + headers . <nl> + if [ [ " $ { product } " = = " llvm " ] ] ; then <nl> + <nl> + # Find the location of the c + + header dir . <nl> + if [ [ " $ ( uname - s ) " = = " Darwin " ] ] ; then <nl> + HOST_CXX_DIR = $ ( dirname $ { HOST_CXX } ) <nl> + HOST_CXX_HEADERS_DIR = " $ HOST_CXX_DIR / . . / . . / usr / include / c + + " <nl> + else # Linux <nl> + HOST_CXX_HEADERS_DIR = " / usr / include / c + + " <nl> + fi <nl> + <nl> + # Find the path in which the local clang build is expecting to find <nl> + # the c + + header files . <nl> + BUILT_CXX_INCLUDE_DIR = " $ llvm_build_dir / include " <nl> + <nl> + echo " symlinking the system headers ( $ HOST_CXX_HEADERS_DIR ) into the local clang build directory ( $ BUILT_CXX_INCLUDE_DIR ) . " <nl> + ln - s - f " $ HOST_CXX_HEADERS_DIR " " $ BUILT_CXX_INCLUDE_DIR / c + + " <nl> + fi <nl> done <nl> done <nl> # END OF BUILD PHASE <nl> if [ [ " $ { INSTALLABLE_PACKAGE } " ] ] ; then <nl> { set + x ; } 2 > / dev / null <nl> fi <nl> fi <nl> + <nl>
|
Build the Swift runtime using the locally built clang compliler .
|
apple/swift
|
56eb498a74cea1547bfed302f42e68f3607e039e
|
2016-03-29T20:34:15Z
|
mmm a / ios / sdk / WeexSDK / Sources / Module / WXModalUIModule . m <nl> ppp b / ios / sdk / WeexSDK / Sources / Module / WXModalUIModule . m <nl> - ( void ) showToast : ( UIView * ) toastView superView : ( UIView * ) superView duration : ( do <nl> <nl> [ WXToastManager sharedManager ] . toastingView = toastView ; <nl> [ superView addSubview : toastView ] ; <nl> - <nl> + __weak typeof ( self ) weakSelf = self ; <nl> [ UIView animateWithDuration : 0 . 2 delay : duration options : UIViewAnimationOptionCurveEaseInOut animations : ^ { <nl> toastView . transform = CGAffineTransformConcat ( toastView . transform , CGAffineTransformMakeScale ( 0 . 8 , 0 . 8 ) ) ; <nl> } completion : ^ ( BOOL finished ) { <nl> - ( void ) showToast : ( UIView * ) toastView superView : ( UIView * ) superView duration : ( do <nl> [ queue removeObjectAtIndex : 0 ] ; <nl> if ( queue . count > 0 ) { <nl> WXToastInfo * info = [ queue firstObject ] ; <nl> - [ self showToast : info . toastView superView : info . superView duration : info . duration ] ; <nl> + [ weakSelf showToast : info . toastView superView : info . superView duration : info . duration ] ; <nl> } <nl> } <nl> } ] ; <nl>
|
* [ ios ] use weakself to resolve toast still show while the page is popped
|
apache/incubator-weex
|
e44104af2703557289601d496175fe207085dddd
|
2017-09-14T10:46:14Z
|
mmm a / src / mongo / db / repl / storage_interface_impl . cpp <nl> ppp b / src / mongo / db / repl / storage_interface_impl . cpp <nl> <nl> # include " mongo / db / repl / replication_coordinator . h " <nl> # include " mongo / db / repl / replication_coordinator_global . h " <nl> # include " mongo / db / repl / rs_initialsync . h " <nl> - # include " mongo / db / server_parameters . h " <nl> + # include " mongo / db / repl / task_runner . h " <nl> # include " mongo / db / service_context . h " <nl> # include " mongo / util / assert_util . h " <nl> # include " mongo / util / concurrency / old_thread_pool . h " <nl> # include " mongo / util / destructor_guard . h " <nl> # include " mongo / util / log . h " <nl> # include " mongo / util / mongoutils / str . h " <nl> + <nl> namespace mongo { <nl> namespace repl { <nl> <nl> const char StorageInterfaceImpl : : kBeginFieldName [ ] = " begin " ; <nl> namespace { <nl> using UniqueLock = stdx : : unique_lock < stdx : : mutex > ; <nl> <nl> - MONGO_EXPORT_STARTUP_SERVER_PARAMETER ( dataReplicatorInitialSyncInserterThreads , int , 4 ) ; <nl> - <nl> const BSONObj kInitialSyncFlag ( BSON ( StorageInterfaceImpl : : kInitialSyncFlagFieldName < < true ) ) ; <nl> } / / namespace <nl> <nl> StorageInterfaceImpl : : ~ StorageInterfaceImpl ( ) { <nl> DESTRUCTOR_GUARD ( shutdown ( ) ; ) ; <nl> } <nl> <nl> - void StorageInterfaceImpl : : startup ( ) { <nl> - _bulkLoaderThreads . reset ( <nl> - new OldThreadPool { dataReplicatorInitialSyncInserterThreads , " InitialSyncInserters - " } ) ; <nl> - } ; <nl> + void StorageInterfaceImpl : : startup ( ) { } <nl> <nl> - void StorageInterfaceImpl : : shutdown ( ) { <nl> - if ( _bulkLoaderThreads ) { <nl> - _bulkLoaderThreads - > join ( ) ; <nl> - _bulkLoaderThreads . reset ( ) ; <nl> - } <nl> - } <nl> + void StorageInterfaceImpl : : shutdown ( ) { } <nl> <nl> NamespaceString StorageInterfaceImpl : : getMinValidNss ( ) const { <nl> return _minValidNss ; <nl> mmm a / src / mongo / db / repl / storage_interface_impl . h <nl> ppp b / src / mongo / db / repl / storage_interface_impl . h <nl> <nl> # include " mongo / base / string_data . h " <nl> # include " mongo / bson / bsonobj . h " <nl> # include " mongo / db / catalog / index_create . h " <nl> - # include " mongo / db / db_raii . h " <nl> # include " mongo / db / namespace_string . h " <nl> # include " mongo / db / repl / storage_interface . h " <nl> - # include " mongo / db / repl / task_runner . h " <nl> - # include " mongo / util / concurrency / old_thread_pool . h " <nl> <nl> namespace mongo { <nl> namespace repl { <nl> class StorageInterfaceImpl : public StorageInterface { <nl> Status isAdminDbValid ( OperationContext * txn ) override ; <nl> <nl> private : <nl> - / / One thread per collection / TaskRunner <nl> - std : : unique_ptr < OldThreadPool > _bulkLoaderThreads ; <nl> const NamespaceString _minValidNss ; <nl> } ; <nl> <nl>
|
SERVER - 25084 removed unused StorageInterfaceImpl : : _bulkLoaderThreads
|
mongodb/mongo
|
f8c4d2025431d83cda24c1a193321952372674c7
|
2016-08-02T18:33:48Z
|
mmm a / stdlib / public / core / FloatingPoint . swift . gyb <nl> ppp b / stdlib / public / core / FloatingPoint . swift . gyb <nl> public enum FloatingPointSign : Int { <nl> <nl> / / / The sign for a negative value . <nl> case minus <nl> + <nl> + / / Explicit declarations of otherwise - synthesized members to make them <nl> + / / @ inlinable , promising that we will never change the implementation . <nl> + <nl> + @ inlinable <nl> + public init ? ( rawValue : Int ) { <nl> + switch rawValue { <nl> + case 0 : self = . plus <nl> + case 1 : self = . minus <nl> + default : return nil <nl> + } <nl> + } <nl> + <nl> + @ inlinable <nl> + public var rawValue : Int { <nl> + switch self { <nl> + case . plus : return 0 <nl> + case . minus : return 1 <nl> + } <nl> + } <nl> + <nl> + @ inlinable <nl> + public static func = = ( a : FloatingPointSign , b : FloatingPointSign ) - > Bool { <nl> + return a . rawValue = = b . rawValue <nl> + } <nl> } <nl> <nl> / / / The IEEE 754 floating - point classes . <nl>
|
Declare some FloatingPointSign members explicitly for @ inlinable ( )
|
apple/swift
|
f6b67f33feaefd9de3bc68d1e8c9a8b078d7e2ce
|
2018-04-20T22:28:53Z
|
mmm a / skin / Confluence / 720p / MusicOSD . xml <nl> ppp b / skin / Confluence / 720p / MusicOSD . xml <nl> <nl> < onright > 703 < / onright > <nl> < onup > 803 < / onup > <nl> < ondown > 803 < / ondown > <nl> + < onclick > Close < / onclick > <nl> < onclick > XBMC . RunScript ( $ INFO [ Skin . String ( LyricScript_Path ) ] ) < / onclick > <nl> < visible > Skin . HasSetting ( LyricScript_Enable ) + ! IsEmpty ( Skin . String ( LyricScript_Path ) ) < / visible > <nl> < / control > <nl>
|
Fixed : [ Confluence ] make sure we close the Music OSD when we click the Lyrics button
|
xbmc/xbmc
|
b23b4aebbc53c9d14e345a85c40ea775fa5e6572
|
2009-12-02T08:42:41Z
|
mmm a / test / mozilla / testcfg . py <nl> ppp b / test / mozilla / testcfg . py <nl> <nl> from testrunner . local import testsuite <nl> from testrunner . objects import testcase <nl> <nl> + SVN_SERVER = ( <nl> + " svn : / / svn . chromium . org / chrome / trunk / deps / third_party / mozilla - tests " ) <nl> + MOZILLA_VERSION = " 51236 " <nl> <nl> - MOZILLA_VERSION = " 2010 - 06 - 29 " <nl> <nl> - <nl> - EXCLUDED = [ " CVS " ] <nl> + EXCLUDED = [ " CVS " , " . svn " ] <nl> <nl> <nl> FRAMEWORK = " " " <nl> def DownloadData ( self ) : <nl> os . chdir ( old_cwd ) <nl> return <nl> <nl> - # No cached copy . Check out via CVS , and pack as . tar . gz for later use . <nl> - command = ( " cvs - d : pserver : anonymous @ cvs - mirror . mozilla . org : / cvsroot " <nl> - " co - D % s mozilla / js / tests " % MOZILLA_VERSION ) <nl> + # No cached copy . Check out via SVN , and pack as . tar . gz for later use . <nl> + command = ( " svn co - r % s % s mozilla / js / tests " % <nl> + ( MOZILLA_VERSION , SVN_SERVER ) ) <nl> code = subprocess . call ( command , shell = True ) <nl> if code ! = 0 : <nl> os . chdir ( old_cwd ) <nl>
|
[ test ] Fetch mozilla test data from SVN .
|
v8/v8
|
8c1742026aa0d8593706336e8d2c96abf6c63f91
|
2015-09-24T09:42:32Z
|
mmm a / lib / Parse / Lexer . cpp <nl> ppp b / lib / Parse / Lexer . cpp <nl> void Lexer : : lexImpl ( ) { <nl> return formToken ( tok : : unknown , TokStart ) ; <nl> <nl> case ' # ' : <nl> - / / @ is only a token in SIL mode . <nl> + / / # is only a token in SIL mode . <nl> if ( InSILMode ) <nl> return formToken ( tok : : sil_pound , TokStart ) ; <nl> diagnose ( CurPtr - 1 , diag : : lex_invalid_character ) ; <nl>
|
fix pasto
|
apple/swift
|
1433bd7287d8ebb8390be869402e9c9269b644d0
|
2013-07-25T03:31:27Z
|
mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> if ( HAVE_OPEN ) <nl> endif ( ) <nl> <nl> add_library ( fmt $ { FMT_SOURCES } $ { FMT_HEADERS } README . rst ChangeLog . rst ) <nl> + add_library ( fmt : : fmt ALIAS fmt ) <nl> <nl> # Starting with CMake 3 . 1 the CXX_STANDARD property can be used instead . <nl> # Don ' t export - std since it may break projects that use other standards . <nl> if ( FMT_INSTALL ) <nl> $ { PROJECT_SOURCE_DIR } / support / cmake / fmt - config . cmake . in <nl> $ { project_config } <nl> INSTALL_DESTINATION $ { FMT_CMAKE_DIR } ) <nl> - export ( TARGETS $ { INSTALL_TARGETS } <nl> + # Use a namespace because CMake provides better diagnostics for namespaced <nl> + # imported targets . <nl> + export ( TARGETS $ { INSTALL_TARGETS } NAMESPACE fmt : : <nl> FILE $ { PROJECT_BINARY_DIR } / $ { targets_export_name } . cmake ) <nl> <nl> # Install version , config and target files . <nl> install ( <nl> FILES $ { project_config } $ { version_config } <nl> DESTINATION $ { FMT_CMAKE_DIR } ) <nl> - install ( EXPORT $ { targets_export_name } DESTINATION $ { FMT_CMAKE_DIR } ) <nl> + install ( EXPORT $ { targets_export_name } DESTINATION $ { FMT_CMAKE_DIR } , <nl> + NAMESPACE fmt : : ) <nl> <nl> # Install the library and headers . <nl> install ( TARGETS $ { INSTALL_TARGETS } EXPORT $ { targets_export_name } <nl>
|
Add CMake namespace ( )
|
fmtlib/fmt
|
e02aacc6343a3db64a5963dea51bb8b64a555bae
|
2018-03-13T17:03:18Z
|
mmm a / test / Sema / exhaustive_switch . swift <nl> ppp b / test / Sema / exhaustive_switch . swift <nl> enum SR11212Tests { <nl> case upair ( Int , Int ) <nl> } <nl> <nl> - func sr11212_content_untupled_pattern_tupled ( u : Untupled ) - > ( Int , Int ) { <nl> + func sr11212_content_untupled_pattern_tupled1 ( u : Untupled ) - > ( Int , Int ) { <nl> switch u { <nl> case . upair ( ( let x , let y ) ) : return ( x , y ) <nl> / / expected - warning @ - 1 { { a tuple pattern cannot match several associated values at once , implicitly tupling the associated values and trying to match that instead } } <nl> } <nl> } <nl> <nl> - func sr11212_content_untupled_pattern_tupled_nested ( u : Untupled ) - > ( Int , Int ) { <nl> + func sr11212_content_untupled_pattern_tupled2 ( u : Untupled ) - > ( Int , Int ) { <nl> switch u { <nl> case . upair ( let ( x , y ) ) : return ( x , y ) <nl> / / expected - warning @ - 1 { { a tuple pattern cannot match several associated values at once , implicitly tupling the associated values and trying to match that instead } } <nl> } <nl> } <nl> <nl> - func sr11212_content_untupled_pattern_untupled ( u : Untupled ) - > ( Int , Int ) { <nl> + func sr11212_content_untupled_pattern_tupled3 ( u : Untupled ) - > ( Int , Int ) { <nl> + switch u { <nl> + case let . upair ( ( x , y ) ) : return ( x , y ) <nl> + / / expected - warning @ - 1 { { a tuple pattern cannot match several associated values at once , implicitly tupling the associated values and trying to match that instead } } <nl> + } <nl> + } <nl> + <nl> + func sr11212_content_untupled_pattern_untupled1 ( u : Untupled ) - > ( Int , Int ) { <nl> switch u { <nl> case . upair ( let x , let y ) : return ( x , y ) <nl> } <nl> } <nl> <nl> - func sr11212_content_untupled_pattern_ambiguous ( u : Untupled ) - > ( Int , Int ) { <nl> + func sr11212_content_untupled_pattern_untupled2 ( u : Untupled ) - > ( Int , Int ) { <nl> + switch u { <nl> + case let . upair ( x , y ) : return ( x , y ) <nl> + } <nl> + } <nl> + <nl> + func sr11212_content_untupled_pattern_ambiguous1 ( u : Untupled ) - > ( Int , Int ) { <nl> switch u { <nl> case . upair ( let u_ ) : return u_ <nl> / / expected - warning @ - 1 { { cannot match several associated values at once , implicitly tupling the associated values and trying to match that instead } } <nl> } <nl> } <nl> <nl> + func sr11212_content_untupled_pattern_ambiguous2 ( u : Untupled ) - > ( Int , Int ) { <nl> + switch u { <nl> + case let . upair ( u_ ) : return u_ <nl> + / / expected - warning @ - 1 { { cannot match several associated values at once , implicitly tupling the associated values and trying to match that instead } } <nl> + } <nl> + } <nl> + <nl> enum Tupled { <nl> case tpair ( ( Int , Int ) ) <nl> } <nl> <nl> - func sr11212_content_tupled_pattern_tupled ( t : Tupled ) - > ( Int , Int ) { <nl> + func sr11212_content_tupled_pattern_tupled1 ( t : Tupled ) - > ( Int , Int ) { <nl> switch t { <nl> case . tpair ( ( let x , let y ) ) : return ( x , y ) <nl> } <nl> } <nl> <nl> - func sr11212_content_tupled_pattern_tupled_nested ( t : Tupled ) - > ( Int , Int ) { <nl> + func sr11212_content_tupled_pattern_tupled2 ( t : Tupled ) - > ( Int , Int ) { <nl> switch t { <nl> case . tpair ( let ( x , y ) ) : return ( x , y ) <nl> } <nl> } <nl> <nl> - func sr11212_content_tupled_pattern_untupled ( t : Tupled ) - > ( Int , Int ) { <nl> + func sr11212_content_tupled_pattern_tupled3 ( t : Tupled ) - > ( Int , Int ) { <nl> + switch t { <nl> + case let . tpair ( ( x , y ) ) : return ( x , y ) <nl> + } <nl> + } <nl> + <nl> + func sr11212_content_tupled_pattern_untupled1 ( t : Tupled ) - > ( Int , Int ) { <nl> switch t { <nl> case . tpair ( let x , let y ) : return ( x , y ) <nl> / / expected - warning @ - 1 { { the enum case has a single tuple as an associated value , but there are several patterns here , implicitly tupling the patterns and trying to match that instead } } <nl> } <nl> } <nl> <nl> - func sr11212_content_tupled_pattern_ambiguous ( t : Tupled ) - > ( Int , Int ) { <nl> + func sr11212_content_tupled_pattern_untupled2 ( t : Tupled ) - > ( Int , Int ) { <nl> + switch t { <nl> + case let . tpair ( x , y ) : return ( x , y ) <nl> + / / expected - warning @ - 1 { { the enum case has a single tuple as an associated value , but there are several patterns here , implicitly tupling the patterns and trying to match that instead } } <nl> + } <nl> + } <nl> + <nl> + func sr11212_content_tupled_pattern_ambiguous1 ( t : Tupled ) - > ( Int , Int ) { <nl> switch t { <nl> case . tpair ( let t_ ) : return t_ <nl> } <nl> } <nl> <nl> + func sr11212_content_tupled_pattern_ambiguous2 ( t : Tupled ) - > ( Int , Int ) { <nl> + switch t { <nl> + case let . tpair ( t_ ) : return t_ <nl> + } <nl> + } <nl> + <nl> enum Box < T > { <nl> case box ( T ) <nl> } <nl> <nl> - func sr11212_content_generic_pattern_tupled ( b : Box < ( Int , Int ) > ) - > ( Int , Int ) { <nl> + func sr11212_content_generic_pattern_tupled1 ( b : Box < ( Int , Int ) > ) - > ( Int , Int ) { <nl> switch b { <nl> case . box ( ( let x , let y ) ) : return ( x , y ) <nl> } <nl> } <nl> <nl> - func sr11212_content_generic_pattern_tupled_nested ( b : Box < ( Int , Int ) > ) - > ( Int , Int ) { <nl> + func sr11212_content_generic_pattern_tupled2 ( b : Box < ( Int , Int ) > ) - > ( Int , Int ) { <nl> switch b { <nl> case . box ( let ( x , y ) ) : return ( x , y ) <nl> } <nl> } <nl> <nl> - func sr11212_content_generic_pattern_untupled ( b : Box < ( Int , Int ) > ) - > ( Int , Int ) { <nl> + func sr11212_content_generic_pattern_tupled3 ( b : Box < ( Int , Int ) > ) - > ( Int , Int ) { <nl> + switch b { <nl> + case let . box ( ( x , y ) ) : return ( x , y ) <nl> + } <nl> + } <nl> + <nl> + func sr11212_content_generic_pattern_untupled1 ( b : Box < ( Int , Int ) > ) - > ( Int , Int ) { <nl> switch b { <nl> case . box ( let x , let y ) : return ( x , y ) <nl> / / expected - warning @ - 1 { { the enum case has a single tuple as an associated value , but there are several patterns here , implicitly tupling the patterns and trying to match that instead } } <nl> } <nl> } <nl> <nl> - func sr11212_content_generic_pattern_ambiguous ( b : Box < ( Int , Int ) > ) - > ( Int , Int ) { <nl> + func sr11212_content_generic_pattern_untupled2 ( b : Box < ( Int , Int ) > ) - > ( Int , Int ) { <nl> + switch b { <nl> + case let . box ( x , y ) : return ( x , y ) <nl> + / / expected - warning @ - 1 { { the enum case has a single tuple as an associated value , but there are several patterns here , implicitly tupling the patterns and trying to match that instead } } <nl> + } <nl> + } <nl> + <nl> + func sr11212_content_generic_pattern_ambiguous1 ( b : Box < ( Int , Int ) > ) - > ( Int , Int ) { <nl> switch b { <nl> case . box ( let b_ ) : return b_ <nl> } <nl> } <nl> <nl> + func sr11212_content_generic_pattern_ambiguous2 ( b : Box < ( Int , Int ) > ) - > ( Int , Int ) { <nl> + switch b { <nl> + case let . box ( b_ ) : return b_ <nl> + } <nl> + } <nl> + <nl> } / / end SR11212Tests <nl>
|
Add test cases for more permutations .
|
apple/swift
|
25e5b1587085fa25430e3015c56672bfe7c136ac
|
2019-08-12T16:58:22Z
|
mmm a / dok / tester . dok <nl> ppp b / dok / tester . dok <nl> Saves an error if ' ' val ~ = condition ' ' is not true with the optional message . <nl> <nl> Saves an error if ' ' max ( abs ( ta - tb ) ) < condition ' ' is not true with the optional message . <nl> <nl> + = = = = assertTensorNe ( ta , tb , condition [ , message ] ) = = = = <nl> + { { anchor : torch . Tester . assertTensorNe } } <nl> + <nl> + Saves an error if ' ' max ( abs ( ta - tb ) ) > = condition ' ' is not true with the optional message . <nl> + <nl> + = = = = assertTableEq ( ta , tb , condition [ , message ] ) = = = = <nl> + { { anchor : torch . Tester . assertTableEq } } <nl> + <nl> + Saves an error if ' ' max ( abs ( ta - tb ) ) < condition ' ' is not true with the optional message . <nl> + <nl> + = = = = assertTableNe ( ta , tb , condition [ , message ] ) = = = = <nl> + { { anchor : torch . Tester . assertTableNe } } <nl> + <nl> + Saves an error if ' ' max ( abs ( ta - tb ) ) > = condition ' ' is not true with the optional message . <nl> + <nl> + = = = = assertError ( f [ , message ] ) = = = = <nl> + { { anchor : torch . Tester . assertError } } <nl> + <nl> + Saves an error if calling the function f ( ) does not return an error , with the optional message . <nl> + <nl> = = = = run ( ) = = = = <nl> { { anchor : torch . Tester . run } } <nl> <nl> mmm a / test / test . lua <nl> ppp b / test / test . lua <nl> function torchtest . logical ( ) <nl> end <nl> <nl> function torchtest . TestAssertError ( ) <nl> - tester : assertError ( function ( ) error ( ' hello ' ) end , ' Error not caught ' ) <nl> + mytester : assertError ( function ( ) error ( ' hello ' ) end , ' Error not caught ' ) <nl> end <nl> <nl> function torch . test ( ) <nl>
|
Document the three new asserts ( ) , and add unit test for TestAssertError ( )
|
pytorch/pytorch
|
d9a46534aa56b504ca9e1e2a1f3b4c7bb7bd2778
|
2012-12-07T20:10:29Z
|
mmm a / tensorflow / python / ops / nn_grad . py <nl> ppp b / tensorflow / python / ops / nn_grad . py <nl> def _BiasAddGradGrad ( op , received_grad ) : <nl> rank = array_ops . rank ( op . inputs [ 0 ] ) <nl> bias_shape = array_ops . shape ( received_grad ) <nl> <nl> - if data_format = = " NCHW " : <nl> + if data_format = = b " NCHW " : <nl> expanded_shape = array_ops . concat ( <nl> 0 , <nl> [ array_ops . ones_like ( shape [ : - 3 ] ) , bias_shape , array_ops . ones_like ( shape [ - 2 : ] ) ] <nl>
|
Make sure data format string is compared in ASCII . ( )
|
tensorflow/tensorflow
|
4336640da90998dd8c7e29c4d14e227de4291340
|
2016-10-06T00:25:58Z
|
mmm a / ELECTRON_VERSION <nl> ppp b / ELECTRON_VERSION <nl> @ @ - 1 + 1 @ @ <nl> - 11 . 0 . 0 - nightly . 20200821 <nl> \ No newline at end of file <nl> + 11 . 0 . 0 - nightly . 20200822 <nl> \ No newline at end of file <nl> mmm a / package . json <nl> ppp b / package . json <nl> <nl> { <nl> " name " : " electron " , <nl> - " version " : " 11 . 0 . 0 - nightly . 20200821 " , <nl> + " version " : " 11 . 0 . 0 - nightly . 20200822 " , <nl> " repository " : " https : / / github . com / electron / electron " , <nl> " description " : " Build cross platform desktop apps with JavaScript , HTML , and CSS " , <nl> " devDependencies " : { <nl> mmm a / shell / browser / resources / win / electron . rc <nl> ppp b / shell / browser / resources / win / electron . rc <nl> END <nl> / / <nl> <nl> VS_VERSION_INFO VERSIONINFO <nl> - FILEVERSION 11 , 0 , 0 , 20200821 <nl> - PRODUCTVERSION 11 , 0 , 0 , 20200821 <nl> + FILEVERSION 11 , 0 , 0 , 20200822 <nl> + PRODUCTVERSION 11 , 0 , 0 , 20200822 <nl> FILEFLAGSMASK 0x3fL <nl> # ifdef _DEBUG <nl> FILEFLAGS 0x1L <nl>
|
Bump v11 . 0 . 0 - nightly . 20200822
|
electron/electron
|
bda5ec519b994841530a428e045672724ba718ae
|
2020-08-22T18:29:05Z
|
mmm a / build / deps / github_hashes / facebook / fbthrift - rev . txt <nl> ppp b / build / deps / github_hashes / facebook / fbthrift - rev . txt <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit 588ffede6bafb3a55e97dbe39460acc8521329f5 <nl> + Subproject commit b42503196a899600216e013c69b7d02c29926a11 <nl> mmm a / build / deps / github_hashes / facebook / wangle - rev . txt <nl> ppp b / build / deps / github_hashes / facebook / wangle - rev . txt <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit 65fe3a03d48b804e10d6d12ef991e58700151eb8 <nl> + Subproject commit 4e05aa3f7fc1acb4b74822d19589232160e8bcba <nl>
|
Updating submodules
|
facebook/watchman
|
63244cfcab6f5869629679cd39d25f2f62f97336
|
2020-06-12T06:24:05Z
|
mmm a / lib / IDE / ExprContextAnalysis . cpp <nl> ppp b / lib / IDE / ExprContextAnalysis . cpp <nl> <nl> # include " swift / AST / Decl . h " <nl> # include " swift / AST / DeclContext . h " <nl> # include " swift / AST / Expr . h " <nl> + # include " swift / AST / Initializer . h " <nl> # include " swift / AST / LazyResolver . h " <nl> # include " swift / AST / Module . h " <nl> # include " swift / AST / Pattern . h " <nl> class ExprContextAnalyzer { <nl> } else <nl> return false ; <nl> } ) ; <nl> - DC - > walkContext ( Finder ) ; <nl> + <nl> + / / For ' Initializer ' context , we need to look into its parent because it <nl> + / / might constrain the initializer ' s type . <nl> + auto analyzeDC = isa < Initializer > ( DC ) ? DC - > getParent ( ) : DC ; <nl> + analyzeDC - > walkContext ( Finder ) ; <nl> <nl> if ( Finder . Ancestors . empty ( ) ) <nl> return ; <nl> mmm a / test / IDE / complete_in_accessors . swift <nl> ppp b / test / IDE / complete_in_accessors . swift <nl> func returnsInt ( ) - > Int { } <nl> <nl> / / WITH_MEMBER_DECLS_INIT : Begin completions <nl> / / WITH_MEMBER_DECLS_INIT - DAG : Decl [ Struct ] / CurrModule : FooStruct [ # FooStruct # ] { { ; name = . + $ } } <nl> - / / WITH_MEMBER_DECLS_INIT - DAG : Decl [ FreeFunction ] / CurrModule : returnsInt ( ) [ # Int # ] { { ; name = . + $ } } <nl> + / / WITH_MEMBER_DECLS_INIT - DAG : Decl [ FreeFunction ] / CurrModule / TypeRelation [ Identical ] : returnsInt ( ) [ # Int # ] { { ; name = . + $ } } <nl> / / WITH_MEMBER_DECLS_INIT - DAG : Decl [ InstanceMethod ] / CurrNominal : instanceFunc ( { # self : MemberAccessors # } ) [ # ( Int ) - > Float # ] { { ; name = . + $ } } <nl> / / WITH_MEMBER_DECLS_INIT : End completions <nl> <nl> mmm a / test / IDE / complete_unresolved_members . swift <nl> ppp b / test / IDE / complete_unresolved_members . swift <nl> <nl> / / RUN : % target - swift - ide - test - code - completion - source - filename % s - code - completion - token = GENERICPARAM_20 | % FileCheck % s - check - prefix = GENERICPARAM_1 <nl> / / RUN : % target - swift - ide - test - code - completion - source - filename % s - code - completion - token = GENERICPARAM_21 | % FileCheck % s - check - prefix = GENERICPARAM_1 <nl> <nl> + / / RUN : % target - swift - ide - test - code - completion - source - filename % s - code - completion - token = DECL_MEMBER_INIT_1 | % FileCheck % s - check - prefix = UNRESOLVED_3 <nl> + <nl> enum SomeEnum1 { <nl> case South <nl> case North <nl> func testingGenericParam2 < X > ( obj : C < X > ) { <nl> obj . t = . # ^ GENERICPARAM_21 ^ # <nl> / / Same as GENERICPARAM_1 . <nl> } <nl> + <nl> + struct TestingStruct { <nl> + var value : SomeEnum1 = . # ^ DECL_MEMBER_INIT_1 ^ # <nl> + } <nl>
|
[ CodeCompletion ] Analyze the parent of initializer
|
apple/swift
|
fd9b6a12de02de4fe4ffb7191f174ccaaba83da6
|
2019-02-19T22:22:24Z
|
mmm a / tensorflow / python / kernel_tests / BUILD <nl> ppp b / tensorflow / python / kernel_tests / BUILD <nl> tf_py_test ( <nl> size = " small " , <nl> srcs = [ " losses_test . py " ] , <nl> additional_deps = [ <nl> - " / / tensorflow / contrib / losses : losses_py " , <nl> " / / tensorflow / python / ops / losses " , <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : client_testlib " , <nl> mmm a / tensorflow / python / kernel_tests / losses_test . py <nl> ppp b / tensorflow / python / kernel_tests / losses_test . py <nl> <nl> <nl> import numpy as np <nl> <nl> - from tensorflow . contrib . losses . python . losses import loss_ops <nl> from tensorflow . python . framework import constant_op <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import errors_impl <nl> def setUp ( self ) : <nl> <nl> def testUnweighted ( self ) : <nl> with ops . Graph ( ) . as_default ( ) : <nl> - self . assertEqual ( 0 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 0 , len ( util . get_losses ( ) ) ) <nl> raw_losses = self . _raw_losses <nl> shape = self . _shape <nl> unweighted_losses = ( losses . compute_weighted_loss ( raw_losses ) , <nl> def testUnweighted ( self ) : <nl> raw_losses , weights = np . ones ( shape = shape [ 0 : 2 ] ) ) , <nl> losses . compute_weighted_loss ( <nl> raw_losses , weights = np . ones ( shape = shape ) ) ) <nl> - self . assertEqual ( 5 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 5 , len ( util . get_losses ( ) ) ) <nl> with self . test_session ( ) : <nl> for unweighted_loss in unweighted_losses : <nl> self . assertAllClose ( self . _unweighted_loss , unweighted_loss . eval ( ) ) <nl> <nl> def testScalarWeight ( self ) : <nl> with ops . Graph ( ) . as_default ( ) : <nl> - self . assertEqual ( 0 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 0 , len ( util . get_losses ( ) ) ) <nl> weight = 17 . 0 <nl> weighted_loss = losses . compute_weighted_loss ( <nl> self . _raw_losses , weights = weight ) <nl> - self . assertEqual ( 1 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 1 , len ( util . get_losses ( ) ) ) <nl> with self . test_session ( ) : <nl> self . assertAllClose ( <nl> np . mean ( weight * self . _raw_losses ) , weighted_loss . eval ( ) ) <nl> def testScalarWeight ( self ) : <nl> # ` loss17 ` should be the same as ` testScalarWeight ` . <nl> def testScalar1DWeight ( self ) : <nl> with ops . Graph ( ) . as_default ( ) : <nl> - self . assertEqual ( 0 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 0 , len ( util . get_losses ( ) ) ) <nl> loss1 = losses . compute_weighted_loss ( self . _raw_losses , weights = ( 1 . 0 , ) ) <nl> - self . assertEqual ( 1 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 1 , len ( util . get_losses ( ) ) ) <nl> weight = 17 . 0 <nl> loss17 = losses . compute_weighted_loss ( self . _raw_losses , weights = ( weight , ) ) <nl> - self . assertEqual ( 2 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 2 , len ( util . get_losses ( ) ) ) <nl> with self . test_session ( ) : <nl> self . assertAllClose ( self . _unweighted_loss * self . _shape [ 0 ] , <nl> loss1 . eval ( ) ) <nl> def testInvalid4DWeight ( self ) : <nl> <nl> def test3Weight ( self ) : <nl> with ops . Graph ( ) . as_default ( ) : <nl> - self . assertEqual ( 0 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 0 , len ( util . get_losses ( ) ) ) <nl> weights3 = ( 17 . 0 , 5 . 0 , 2 . 0 ) <nl> weighted_loss = losses . compute_weighted_loss ( <nl> self . _raw_losses , weights = weights3 ) <nl> - self . assertEqual ( 1 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 1 , len ( util . get_losses ( ) ) ) <nl> with self . test_session ( ) : <nl> weights3x1x1 = np . reshape ( weights3 , ( 3 , 1 , 1 ) ) <nl> self . assertAllClose ( <nl> def test3Weight ( self ) : <nl> <nl> def test3x1Weight ( self ) : <nl> with ops . Graph ( ) . as_default ( ) : <nl> - self . assertEqual ( 0 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 0 , len ( util . get_losses ( ) ) ) <nl> weights3x1 = ( <nl> ( 17 . 0 , ) , <nl> ( 5 . 0 , ) , <nl> ( 2 . 0 , ) , ) <nl> weighted_loss = losses . compute_weighted_loss ( <nl> self . _raw_losses , weights = weights3x1 ) <nl> - self . assertEqual ( 1 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 1 , len ( util . get_losses ( ) ) ) <nl> with self . test_session ( ) : <nl> weights3x1x1 = np . reshape ( weights3x1 , ( 3 , 1 , 1 ) ) <nl> self . assertAllClose ( <nl> def test3x1Weight ( self ) : <nl> # TODO ( ptucker ) : Bug : this should be the same as ` test3x1Weight ` . <nl> def test3x1x1Weight ( self ) : <nl> with ops . Graph ( ) . as_default ( ) : <nl> - self . assertEqual ( 0 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 0 , len ( util . get_losses ( ) ) ) <nl> weights3x1x1 = ( <nl> ( ( 17 . 0 , ) , ) , <nl> ( ( 5 . 0 , ) , ) , <nl> ( ( 2 . 0 , ) , ) , ) <nl> weighted_loss = losses . compute_weighted_loss ( <nl> self . _raw_losses , weights = weights3x1x1 ) <nl> - self . assertEqual ( 1 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 1 , len ( util . get_losses ( ) ) ) <nl> with self . test_session ( ) : <nl> self . assertAllClose ( <nl> np . mean ( weights3x1x1 * self . _raw_losses ) * self . _shape [ 1 ] , <nl> def test3x1x1Weight ( self ) : <nl> <nl> def test3x2Weight ( self ) : <nl> with ops . Graph ( ) . as_default ( ) : <nl> - self . assertEqual ( 0 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 0 , len ( util . get_losses ( ) ) ) <nl> weights3x2 = ( <nl> ( 17 . 0 , 3 . 0 ) , <nl> ( 5 . 0 , 31 . 0 ) , <nl> ( 2 . 0 , 7 . 0 ) , ) <nl> weighted_loss = losses . compute_weighted_loss ( <nl> self . _raw_losses , weights = weights3x2 ) <nl> - self . assertEqual ( 1 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 1 , len ( util . get_losses ( ) ) ) <nl> with self . test_session ( ) : <nl> weights3x2x1 = np . reshape ( weights3x2 , ( 3 , 2 , 1 ) ) <nl> self . assertAllClose ( <nl> def test3x2Weight ( self ) : <nl> # summed across dim 0 . <nl> def test1x2Weight ( self ) : <nl> with ops . Graph ( ) . as_default ( ) : <nl> - self . assertEqual ( 0 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 0 , len ( util . get_losses ( ) ) ) <nl> weights1x2 = ( ( <nl> 17 . 0 , <nl> 3 . 0 , ) , ) <nl> weighted_loss = losses . compute_weighted_loss ( <nl> self . _raw_losses , weights = weights1x2 ) <nl> - self . assertEqual ( 1 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 1 , len ( util . get_losses ( ) ) ) <nl> with self . test_session ( ) : <nl> weights1x2x1 = np . reshape ( weights1x2 , ( 1 , 2 , 1 ) ) <nl> self . assertAllClose ( <nl> def test1x2Weight ( self ) : <nl> # summed across dim 0 . <nl> def test1x2x1Weight ( self ) : <nl> with ops . Graph ( ) . as_default ( ) : <nl> - self . assertEqual ( 0 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 0 , len ( util . get_losses ( ) ) ) <nl> weights1x2x1 = ( ( <nl> ( 17 . 0 , ) , <nl> ( 3 . 0 , ) , ) , ) <nl> weighted_loss = losses . compute_weighted_loss ( <nl> self . _raw_losses , weights = weights1x2x1 ) <nl> - self . assertEqual ( 1 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 1 , len ( util . get_losses ( ) ) ) <nl> with self . test_session ( ) : <nl> self . assertAllClose ( <nl> np . mean ( weights1x2x1 * self . _raw_losses ) * self . _shape [ 0 ] , <nl> def test1x2x1Weight ( self ) : <nl> # summed across dims 0 & 1 . <nl> def test1x1x4Weight ( self ) : <nl> with ops . Graph ( ) . as_default ( ) : <nl> - self . assertEqual ( 0 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 0 , len ( util . get_losses ( ) ) ) <nl> weights1x1x4 = ( ( ( 17 . 0 , 13 . 0 , 2 . 0 , 5 . 0 ) , ) , ) <nl> weighted_loss = losses . compute_weighted_loss ( <nl> self . _raw_losses , weights = weights1x1x4 ) <nl> - self . assertEqual ( 1 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 1 , len ( util . get_losses ( ) ) ) <nl> shape = self . _shape <nl> with self . test_session ( ) : <nl> self . assertAllClose ( <nl> def test1x1x4Weight ( self ) : <nl> <nl> def test3x2x1Weight ( self ) : <nl> with ops . Graph ( ) . as_default ( ) : <nl> - self . assertEqual ( 0 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 0 , len ( util . get_losses ( ) ) ) <nl> weights3x2x1 = ( <nl> ( ( 17 . 0 , ) , ( 3 . 0 , ) ) , <nl> ( ( 5 . 0 , ) , ( 31 . 0 , ) ) , <nl> ( ( 2 . 0 , ) , ( 7 . 0 , ) ) , <nl> ) <nl> - weighted_loss = loss_ops . compute_weighted_loss ( <nl> + weighted_loss = losses . compute_weighted_loss ( <nl> self . _raw_losses , weights = weights3x2x1 ) <nl> - self . assertEqual ( 1 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 1 , len ( util . get_losses ( ) ) ) <nl> with self . test_session ( ) : <nl> self . assertAllClose ( <nl> np . mean ( weights3x2x1 * self . _raw_losses ) , <nl> def test3x2x1Weight ( self ) : <nl> # summed across dim 1 . <nl> def test3x1x4Weight ( self ) : <nl> with ops . Graph ( ) . as_default ( ) : <nl> - self . assertEqual ( 0 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 0 , len ( util . get_losses ( ) ) ) <nl> weights3x1x4 = ( <nl> ( ( 17 . 0 , 13 . 0 , 2 . 0 , 5 . 0 ) , ) , <nl> ( ( 5 . 0 , 31 . 0 , 17 . 0 , 5 . 0 ) , ) , <nl> ( ( 7 . 0 , 3 . 0 , 11 . 0 , 5 . 0 ) , ) , <nl> ) <nl> - weighted_loss = loss_ops . compute_weighted_loss ( <nl> + weighted_loss = losses . compute_weighted_loss ( <nl> self . _raw_losses , weights = weights3x1x4 ) <nl> - self . assertEqual ( 1 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 1 , len ( util . get_losses ( ) ) ) <nl> with self . test_session ( ) : <nl> self . assertAllClose ( <nl> np . mean ( weights3x1x4 * self . _raw_losses ) * self . _shape [ 1 ] , <nl> def test3x1x4Weight ( self ) : <nl> # summed across dim 0 . <nl> def test1x2x4Weight ( self ) : <nl> with ops . Graph ( ) . as_default ( ) : <nl> - self . assertEqual ( 0 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 0 , len ( util . get_losses ( ) ) ) <nl> weights1x2x4 = ( ( <nl> ( 17 . 0 , 13 . 0 , 2 . 0 , 5 . 0 ) , <nl> ( 3 . 0 , 13 . 0 , 11 . 0 , 2 . 0 ) , ) , ) <nl> weighted_loss = losses . compute_weighted_loss ( <nl> self . _raw_losses , weights = weights1x2x4 ) <nl> - self . assertEqual ( 1 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 1 , len ( util . get_losses ( ) ) ) <nl> with self . test_session ( ) : <nl> self . assertAllClose ( <nl> np . mean ( weights1x2x4 * self . _raw_losses ) * self . _shape [ 0 ] , <nl> def test1x2x4Weight ( self ) : <nl> <nl> def test3x2x4Weight ( self ) : <nl> with ops . Graph ( ) . as_default ( ) : <nl> - self . assertEqual ( 0 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 0 , len ( util . get_losses ( ) ) ) <nl> weights3x2x4 = ( <nl> ( <nl> ( 17 . 0 , 13 . 0 , 2 . 0 , 5 . 0 ) , <nl> def test3x2x4Weight ( self ) : <nl> ( 13 . 0 , 11 . 0 , 1 . 0 , 7 . 0 ) , ) , ) <nl> weighted_loss = losses . compute_weighted_loss ( <nl> self . _raw_losses , weights = weights3x2x4 ) <nl> - self . assertEqual ( 1 , len ( loss_ops . get_losses ( ) ) ) <nl> + self . assertEqual ( 1 , len ( util . get_losses ( ) ) ) <nl> with self . test_session ( ) : <nl> self . assertAllClose ( <nl> np . mean ( weights3x2x4 * self . _raw_losses ) , weighted_loss . eval ( ) ) <nl>
|
losses_test should test the core functions , not the contrib ones
|
tensorflow/tensorflow
|
c37309ce68cdf4520988952be6244b3429f17a19
|
2016-12-27T22:08:16Z
|
mmm a / jstests / core / profile_getmore . js <nl> ppp b / jstests / core / profile_getmore . js <nl> <nl> var cursor = coll . find ( { a : { $ gt : 0 } } ) . sort ( { a : 1 } ) . batchSize ( 2 ) ; <nl> cursor . next ( ) ; / / Perform initial query and consume first of 2 docs returned . <nl> <nl> - var cursorId = getLatestProfilerEntry ( testDB ) . cursorid ; / / Save cursorid from find . <nl> + var cursorId = <nl> + getLatestProfilerEntry ( testDB , { op : " query " } ) . cursorid ; / / Save cursorid from find . <nl> <nl> cursor . next ( ) ; / / Consume second of 2 docs from initial query . <nl> cursor . next ( ) ; / / getMore performed , leaving open cursor . <nl> <nl> - var profileObj = getLatestProfilerEntry ( testDB ) ; <nl> + var profileObj = getLatestProfilerEntry ( testDB , { op : " getmore " } ) ; <nl> <nl> assert . eq ( profileObj . ns , coll . getFullName ( ) , tojson ( profileObj ) ) ; <nl> assert . eq ( profileObj . op , " getmore " , tojson ( profileObj ) ) ; <nl> <nl> cursor . next ( ) ; / / Consume second of 2 docs from initial query . <nl> cursor . next ( ) ; / / getMore performed , leaving open cursor . <nl> <nl> - profileObj = getLatestProfilerEntry ( testDB ) ; <nl> + profileObj = getLatestProfilerEntry ( testDB , { op : " getmore " } ) ; <nl> <nl> assert . eq ( profileObj . hasSortStage , true , tojson ( profileObj ) ) ; <nl> <nl> <nl> cursor . next ( ) ; / / Perform initial query and consume first of 3 docs returned . <nl> cursor . itcount ( ) ; / / Exhaust the cursor . <nl> <nl> - profileObj = getLatestProfilerEntry ( testDB ) ; <nl> + profileObj = getLatestProfilerEntry ( testDB , { op : " getmore " } ) ; <nl> <nl> assert ( profileObj . hasOwnProperty ( " cursorid " ) , <nl> tojson ( profileObj ) ) ; / / cursorid should always be present on getMore . <nl> <nl> assert . commandWorked ( coll . createIndex ( { a : 1 } ) ) ; <nl> <nl> var cursor = coll . aggregate ( [ { $ match : { a : { $ gte : 0 } } } ] , { cursor : { batchSize : 0 } , hint : { a : 1 } } ) ; <nl> - var cursorId = getLatestProfilerEntry ( testDB ) . cursorid ; <nl> + var cursorId = getLatestProfilerEntry ( testDB , { " command . aggregate " : coll . getName ( ) } ) . cursorid ; <nl> assert . neq ( 0 , cursorId ) ; <nl> <nl> cursor . next ( ) ; / / Consume the result set . <nl> <nl> - profileObj = getLatestProfilerEntry ( testDB ) ; <nl> + profileObj = getLatestProfilerEntry ( testDB , { op : " getmore " } ) ; <nl> <nl> assert . eq ( profileObj . ns , coll . getFullName ( ) , tojson ( profileObj ) ) ; <nl> assert . eq ( profileObj . op , " getmore " , tojson ( profileObj ) ) ; <nl> <nl> cursor = coll . find ( docToInsert ) . comment ( " profile_getmore " ) . batchSize ( 2 ) ; <nl> assert . eq ( cursor . itcount ( ) , 4 ) ; / / Consume result set and trigger getMore . <nl> <nl> - profileObj = getLatestProfilerEntry ( testDB ) ; <nl> + profileObj = getLatestProfilerEntry ( testDB , { op : " getmore " } ) ; <nl> assert . eq ( ( typeof profileObj . originatingCommand . $ truncated ) , " string " , tojson ( profileObj ) ) ; <nl> assert . eq ( profileObj . originatingCommand . comment , " profile_getmore " , tojson ( profileObj ) ) ; <nl> } ) ( ) ; <nl>
|
SERVER - 31366 Use filters in getLatestProfilerEntry ( ) to make profile_getmore . js more robust .
|
mongodb/mongo
|
aff9c7e6afe0b78e91a724fe5e53ea4717d7d813
|
2017-10-06T13:42:26Z
|
mmm a / jstests / replsets / retryable_writes_direct_write_to_config_transactions . js <nl> ppp b / jstests / replsets / retryable_writes_direct_write_to_config_transactions . js <nl> <nl> / / the session to not work anymore for retryable writes for that session , but not for any other <nl> const lsidManual = config . transactions . find ( { ' _id . id ' : lsid1 } ) . toArray ( ) [ 0 ] . _id ; <nl> assert . writeOK ( config . transactions . remove ( { ' _id . id ' : lsid1 } ) ) ; <nl> - assert . writeOK ( config . transactions . insert ( { _id : lsidManual } ) ) ; <nl> + <nl> + / / Direct writes to the transactions table mark the session as killed and then asynchronously <nl> + / / complete the cleanup process . Because of this , by the time the insert below starts , it is <nl> + / / possible that the cleanup thread from the remove above has not yet completed and so the write <nl> + / / could fail with ConflictingOperationInProgress . <nl> + assert . commandWorkedOrFailedWithCode ( config . transactions . insert ( { _id : lsidManual } ) , <nl> + ErrorCodes . ConflictingOperationInProgress ) ; <nl> <nl> const lsid3 = UUID ( ) ; <nl> assert . commandWorked ( db . runCommand ( { <nl> mmm a / src / mongo / db / session . cpp <nl> ppp b / src / mongo / db / session . cpp <nl> OperationContext * Session : : currentOperation ( ) const { <nl> <nl> Session : : KillToken Session : : kill ( WithLock sessionCatalogLock , ErrorCodes : : Error reason ) { <nl> stdx : : lock_guard < stdx : : mutex > lg ( _mutex ) ; <nl> - uassert ( ErrorCodes : : ConflictingOperationInProgress , " Session already killed " , ! _killRequested ) ; <nl> + uassert ( ErrorCodes : : ConflictingOperationInProgress , <nl> + str : : stream ( ) < < " Session " < < getSessionId ( ) . getId ( ) <nl> + < < " is already killed and is in the process of being cleaned up " , <nl> + ! _killRequested ) ; <nl> _killRequested = true ; <nl> <nl> / / For currently checked - out sessions , interrupt the operation context so that the current owner <nl> mmm a / src / mongo / db / session_catalog . h <nl> ppp b / src / mongo / db / session_catalog . h <nl> class OperationContextSession { <nl> <nl> public : <nl> OperationContextSession ( OperationContext * opCtx , bool checkOutSession ) ; <nl> - <nl> ~ OperationContextSession ( ) ; <nl> <nl> / * * <nl>
|
SERVER - 38058 Make retryable_writes_direct_write_to_config_transactions . js expect ConflictingOperationInProgress
|
mongodb/mongo
|
2f640ed748b861a242b3ad153e821f6b459b2511
|
2018-11-13T14:07:24Z
|
mmm a / folly / experimental / observer / Observable - inl . h <nl> ppp b / folly / experimental / observer / Observable - inl . h <nl> class ObserverCreatorContext { <nl> <nl> } / / namespace detail <nl> <nl> + / / This master shared_ptr allows grabbing derived weak_ptrs , pointing to the <nl> + / / the same Context object , but using a separate reference count . Primary <nl> + / / shared_ptr destructor then blocks until all shared_ptrs obtained from <nl> + / / derived weak_ptrs are released . <nl> + template < typename Observable , typename Traits > <nl> + class ObserverCreator < Observable , Traits > : : ContextPrimaryPtr { <nl> + public : <nl> + explicit ContextPrimaryPtr ( std : : shared_ptr < Context > context ) <nl> + : contextPrimary_ ( std : : move ( context ) ) , <nl> + context_ ( <nl> + contextPrimary_ . get ( ) , <nl> + [ destroyBaton = destroyBaton_ ] ( Context * ) { <nl> + destroyBaton - > post ( ) ; <nl> + } ) { } <nl> + ~ ContextPrimaryPtr ( ) { <nl> + if ( context_ ) { <nl> + context_ . reset ( ) ; <nl> + destroyBaton_ - > wait ( ) ; <nl> + } <nl> + } <nl> + ContextPrimaryPtr ( const ContextPrimaryPtr & ) = delete ; <nl> + ContextPrimaryPtr ( ContextPrimaryPtr & & ) = default ; <nl> + ContextPrimaryPtr & operator = ( const ContextPrimaryPtr & ) = delete ; <nl> + ContextPrimaryPtr & operator = ( ContextPrimaryPtr & & ) = default ; <nl> + <nl> + Context * operator - > ( ) const { <nl> + return contextPrimary_ . get ( ) ; <nl> + } <nl> + <nl> + std : : weak_ptr < Context > get_weak ( ) { <nl> + return context_ ; <nl> + } <nl> + <nl> + private : <nl> + std : : shared_ptr < folly : : Baton < > > destroyBaton_ { <nl> + std : : make_shared < folly : : Baton < > > ( ) } ; <nl> + std : : shared_ptr < Context > contextPrimary_ ; <nl> + std : : shared_ptr < Context > context_ ; <nl> + } ; <nl> + <nl> template < typename Observable , typename Traits > <nl> template < typename . . . Args > <nl> ObserverCreator < Observable , Traits > : : ObserverCreator ( Args & & . . . args ) <nl> ObserverCreator < Observable , Traits > : : ObserverCreator ( Args & & . . . args ) <nl> template < typename Observable , typename Traits > <nl> Observer < typename ObserverCreator < Observable , Traits > : : T > <nl> ObserverCreator < Observable , Traits > : : getObserver ( ) & & { <nl> - / / This master shared_ptr allows grabbing derived weak_ptrs , pointing to the <nl> - / / the same Context object , but using a separate reference count . Master <nl> - / / shared_ptr destructor then blocks until all shared_ptrs obtained from <nl> - / / derived weak_ptrs are released . <nl> - class ContextMasterPointer { <nl> - public : <nl> - explicit ContextMasterPointer ( std : : shared_ptr < Context > context ) <nl> - : contextMaster_ ( std : : move ( context ) ) , <nl> - context_ ( <nl> - contextMaster_ . get ( ) , <nl> - [ destroyBaton = destroyBaton_ ] ( Context * ) { <nl> - destroyBaton - > post ( ) ; <nl> - } ) { } <nl> - ~ ContextMasterPointer ( ) { <nl> - if ( context_ ) { <nl> - context_ . reset ( ) ; <nl> - destroyBaton_ - > wait ( ) ; <nl> - } <nl> - } <nl> - ContextMasterPointer ( const ContextMasterPointer & ) = delete ; <nl> - ContextMasterPointer ( ContextMasterPointer & & ) = default ; <nl> - ContextMasterPointer & operator = ( const ContextMasterPointer & ) = delete ; <nl> - ContextMasterPointer & operator = ( ContextMasterPointer & & ) = default ; <nl> - <nl> - Context * operator - > ( ) const { <nl> - return contextMaster_ . get ( ) ; <nl> - } <nl> - <nl> - std : : weak_ptr < Context > get_weak ( ) { <nl> - return context_ ; <nl> - } <nl> - <nl> - private : <nl> - std : : shared_ptr < folly : : Baton < > > destroyBaton_ { <nl> - std : : make_shared < folly : : Baton < > > ( ) } ; <nl> - std : : shared_ptr < Context > contextMaster_ ; <nl> - std : : shared_ptr < Context > context_ ; <nl> - } ; <nl> / / We want to make sure that Context can only be destroyed when Core is <nl> / / destroyed . So we have to avoid the situation when subscribe callback is <nl> / / locking Context shared_ptr and remains the last to release it . <nl> / / We solve this by having Core hold the master shared_ptr and subscription <nl> / / callback gets derived weak_ptr . <nl> - ContextMasterPointer contextMaster ( context_ ) ; <nl> - auto contextWeak = contextMaster . get_weak ( ) ; <nl> + ContextPrimaryPtr contextPrimary ( context_ ) ; <nl> + auto contextWeak = contextPrimary . get_weak ( ) ; <nl> auto observer = makeObserver ( <nl> - [ context = std : : move ( contextMaster ) ] ( ) { return context - > get ( ) ; } ) ; <nl> + [ context = std : : move ( contextPrimary ) ] ( ) { return context - > get ( ) ; } ) ; <nl> <nl> context_ - > setCore ( observer . core_ ) ; <nl> context_ - > subscribe ( [ contextWeak = std : : move ( contextWeak ) ] { <nl> mmm a / folly / experimental / observer / Observable . h <nl> ppp b / folly / experimental / observer / Observable . h <nl> class ObserverCreator { <nl> <nl> private : <nl> using Context = detail : : ObserverCreatorContext < Observable , Traits > ; <nl> + class ContextPrimaryPtr ; <nl> <nl> std : : shared_ptr < Context > context_ ; <nl> } ; <nl>
|
Fix builds on windows
|
facebook/folly
|
ef52bd9a745de3859a9938c37da0ee15450d40ea
|
2020-09-22T15:32:03Z
|
mmm a / tensorflow / python / kernel_tests / variable_scope_test . py <nl> ppp b / tensorflow / python / kernel_tests / variable_scope_test . py <nl> def testVarScopeNameScope ( self ) : <nl> with tf . name_scope ( " scope2 " ) as sc2 : <nl> self . assertEqual ( sc2 , " scope4 / scope2 / " ) <nl> <nl> + def testVarScopeObjectReuse ( self ) : <nl> + with self . test_session ( ) : <nl> + vs = None <nl> + with tf . variable_scope ( " jump " , reuse = True ) as scope : <nl> + vs = scope <nl> + <nl> + with tf . variable_scope ( vs ) as jump : <nl> + self . assertTrue ( jump . reuse ) <nl> + <nl> + with tf . variable_scope ( vs , reuse = True ) as jump_reuse : <nl> + self . assertTrue ( jump_reuse . reuse ) <nl> + <nl> + with tf . variable_scope ( vs , reuse = False ) as jump_no_reuse : <nl> + self . assertFalse ( jump_no_reuse . reuse ) <nl> + <nl> + with tf . variable_scope ( " jump " , reuse = False ) as scope : <nl> + vs = scope <nl> + <nl> + with tf . variable_scope ( vs ) as jump : <nl> + self . assertFalse ( jump . reuse ) <nl> + <nl> + with tf . variable_scope ( vs , reuse = True ) as jump_reuse : <nl> + self . assertTrue ( jump_reuse . reuse ) <nl> + <nl> + with tf . variable_scope ( vs , reuse = False ) as jump_no_reuse : <nl> + self . assertFalse ( jump_no_reuse . reuse ) <nl> + <nl> def testVarOpScope ( self ) : <nl> with self . test_session ( ) : <nl> with tf . name_scope ( " scope1 " ) : <nl> mmm a / tensorflow / python / ops / variable_scope . py <nl> ppp b / tensorflow / python / ops / variable_scope . py <nl> def _pure_variable_scope ( name_or_scope , reuse = None , initializer = None , <nl> get_variable_scope ( ) # Ensure that a default exists , then get a pointer . <nl> # Get the reference to the collection as we want to modify it in place . <nl> default_varscope = ops . get_collection_ref ( _VARSCOPE_KEY ) <nl> + old = default_varscope [ 0 ] <nl> try : <nl> - old = default_varscope [ 0 ] <nl> - reuse = reuse or old . reuse # Re - using is inherited by sub - scopes . <nl> if isinstance ( name_or_scope , VariableScope ) : <nl> name_scope = name_or_scope . _name_scope # pylint : disable = protected - access <nl> # Handler for the case when we jump to a shared scope . <nl> def _pure_variable_scope ( name_or_scope , reuse = None , initializer = None , <nl> # a copy of the provided shared scope , possibly with changed reuse <nl> # and initializer , if the user requested this . <nl> default_varscope [ 0 ] = VariableScope ( <nl> - reuse , name = name_or_scope . name , <nl> + name_or_scope . reuse if reuse is None else reuse , <nl> + name = name_or_scope . name , <nl> initializer = name_or_scope . initializer , <nl> regularizer = name_or_scope . regularizer , <nl> caching_device = name_or_scope . caching_device , <nl> def _pure_variable_scope ( name_or_scope , reuse = None , initializer = None , <nl> # VariableScope with name extended by the provided one , and inherited <nl> # reuse and initializer ( except if the user provided values to set ) . <nl> new_name = old . name + " / " + name_or_scope if old . name else name_or_scope <nl> + reuse = reuse or old . reuse # Re - using is inherited by sub - scopes . <nl> default_varscope [ 0 ] = VariableScope ( <nl> reuse , name = new_name , <nl> initializer = old . initializer , <nl>
|
Update behavior of _pure_variable_scope so that , when passed a VariableScope object , it respects the reuse value of the object unless the reuse argument is not None .
|
tensorflow/tensorflow
|
641bd0d0191f4f603db44789d239dfc2507a9bb6
|
2016-04-20T23:21:39Z
|
mmm a / project / BuildDependencies / scripts / get_mingw_env . txt <nl> ppp b / project / BuildDependencies / scripts / get_mingw_env . txt <nl> libiconv - 1 . 13 . 1 - 1 - mingw32 - dev . tar . lzma $ KODI_MIRROR / build - deps <nl> make - 3 . 82 - 5 - mingw32 - bin . tar . lzma $ KODI_MIRROR / build - deps / win32 / mingw - msys / http : / / sourceforge . net / projects / mingw / files / MinGW / Extension / make / make - 3 . 82 - mingw32 / <nl> pkg - config - lite - 0 . 28 - 1_bin - win32 . zip $ KODI_MIRROR / build - deps / win32 / mingw - msys / http : / / sourceforge . net / projects / pkgconfiglite / files / http : / / sourceforge . net / projects / pkgconfiglite / files / <nl> gnutls - 3 . 2 . 3 - mingw32 . zip $ KODI_MIRROR / build - deps / win32 / mingw - msys / ftp : / / ftp . gnutls . org / gcrypt / gnutls / w32 / <nl> + libdcadec - git - 396e75652 - win32 . zip $ KODI_MIRROR / build - deps / win32 / mingw - msys / https : / / github . com / foo86 / dcadec / <nl> mmm a / project / Win32BuildSetup / buildffmpeg . sh <nl> ppp b / project / Win32BuildSetup / buildffmpeg . sh <nl> OPTIONS = " <nl> - - enable - runtime - cpudetect \ <nl> - - enable - dxva2 \ <nl> - - cpu = i686 \ <nl> mmmenable - gnutls " <nl> + - - enable - gnutls \ <nl> + - - enable - libdcadec " <nl> <nl> echo configuring $ LIBNAME <nl> . / configure - - extra - cflags = " - fno - common - I / xbmc / lib / win32 / ffmpeg_dxva2 - DNDEBUG " - - extra - ldflags = " - L / xbmc / system / players / dvdplayer " $ { OPTIONS } & & <nl> cp . libs / swscale - * . dll / xbmc / system / players / dvdplayer / <nl> # remove the bgprocessfile for signaling the process end <nl> if [ - f $ BGPROCESSFILE ] ; then <nl> rm $ BGPROCESSFILE <nl> - fi <nl> \ No newline at end of file <nl> + fi <nl>
|
Added prebuilt libdcadec from same source as linux version
|
xbmc/xbmc
|
8936a6c57875d676eba6a1f6e579380539881fbd
|
2015-09-01T11:36:24Z
|
mmm a / Marlin / src / Marlin . cpp <nl> ppp b / Marlin / src / Marlin . cpp <nl> void disable_all_steppers ( ) { <nl> ExtUI : : onFilamentRunout ( ExtUI : : getActiveTool ( ) ) ; <nl> # endif <nl> <nl> - const char tool = ' 0 ' <nl> - # if NUM_RUNOUT_SENSORS > 1 <nl> - + active_extruder <nl> - # endif <nl> - ; <nl> + # if ENABLED ( HOST_PROMPT_SUPPORT ) | | ENABLED ( HOST_ACTION_COMMANDS ) <nl> + const char tool = ' 0 ' <nl> + # if NUM_RUNOUT_SENSORS > 1 <nl> + + active_extruder <nl> + # endif <nl> + ; <nl> + # endif <nl> <nl> / / action : out_of_filament <nl> # if ENABLED ( HOST_PROMPT_SUPPORT ) <nl> mmm a / Marlin / src / lcd / language / language_it . h <nl> ppp b / Marlin / src / lcd / language / language_it . h <nl> <nl> # define MSG_GRADIENT _UxGT ( " Gradiente " ) <nl> # define MSG_FULL_GRADIENT _UxGT ( " Gradiente pieno " ) <nl> # define MSG_TOGGLE_MIX _UxGT ( " Alterna miscela " ) <nl> - / / # define MSG_CYCLE_MIX _UxGT ( " Ciclo miscela " ) <nl> + # define MSG_CYCLE_MIX _UxGT ( " Ciclo miscela " ) <nl> # define MSG_GRADIENT_MIX _UxGT ( " Miscela gradiente " ) <nl> # define MSG_REVERSE_GRADIENT _UxGT ( " Inverti gradiente " ) <nl> # define MSG_ACTIVE_VTOOL _UxGT ( " V - tool attivo " ) <nl> - / / # define MSG_START_VTOOL _UxGT ( " V - tool inizio " ) <nl> - / / # define MSG_END_VTOOL _UxGT ( " V - tool fine " ) <nl> + # define MSG_START_VTOOL _UxGT ( " V - tool iniziale " ) <nl> + # define MSG_END_VTOOL _UxGT ( " V - tool finale " ) <nl> # define MSG_GRADIENT_ALIAS _UxGT ( " V - tool alias " ) <nl> # define MSG_RESET_VTOOLS _UxGT ( " Ripristina V - tools " ) <nl> # define MSG_COMMIT_VTOOL _UxGT ( " Commit mix V - tool " ) <nl>
|
Update Italian language , fix unused var warning ( )
|
MarlinFirmware/Marlin
|
bdc2f10b90343b4796e5ecd31537f2c02258248e
|
2019-02-14T03:05:18Z
|
mmm a / src / networkaccessmanager . cpp <nl> ppp b / src / networkaccessmanager . cpp <nl> QNetworkReply * NetworkAccessManager : : createRequest ( Operation op , const QNetworkR <nl> { <nl> / / Get the URL string before calling the superclass . Seems to work around <nl> / / segfaults in Qt 4 . 8 : https : / / gist . github . com / 1430393 <nl> - QString url = req . url ( ) . toString ( ) ; <nl> + QByteArray url = req . url ( ) . toEncoded ( ) ; <nl> <nl> / / Pass duty to the superclass - Nothing special to do here ( yet ? ) <nl> QNetworkReply * reply = QNetworkAccessManager : : createRequest ( op , req , outgoingData ) ; <nl> void NetworkAccessManager : : handleStarted ( ) <nl> QVariantMap data ; <nl> data [ " stage " ] = " start " ; <nl> data [ " id " ] = m_ids . value ( reply ) ; <nl> - data [ " url " ] = reply - > url ( ) . toString ( ) ; <nl> + data [ " url " ] = reply - > url ( ) . toEncoded ( ) . data ( ) ; <nl> data [ " status " ] = reply - > attribute ( QNetworkRequest : : HttpStatusCodeAttribute ) ; <nl> data [ " statusText " ] = reply - > attribute ( QNetworkRequest : : HttpReasonPhraseAttribute ) ; <nl> data [ " contentType " ] = reply - > header ( QNetworkRequest : : ContentTypeHeader ) ; <nl> void NetworkAccessManager : : handleFinished ( QNetworkReply * reply ) <nl> QVariantMap data ; <nl> data [ " stage " ] = " end " ; <nl> data [ " id " ] = m_ids . value ( reply ) ; <nl> - data [ " url " ] = reply - > url ( ) . toString ( ) ; <nl> + data [ " url " ] = reply - > url ( ) . toEncoded ( ) . data ( ) ; <nl> data [ " status " ] = reply - > attribute ( QNetworkRequest : : HttpStatusCodeAttribute ) ; <nl> data [ " statusText " ] = reply - > attribute ( QNetworkRequest : : HttpReasonPhraseAttribute ) ; <nl> data [ " contentType " ] = reply - > header ( QNetworkRequest : : ContentTypeHeader ) ; <nl> mmm a / src / webpage . cpp <nl> ppp b / src / webpage . cpp <nl> void WebPage : : openUrl ( const QString & address , const QVariant & op , const QVariant <nl> if ( address = = " about : blank " ) { <nl> m_mainFrame - > setHtml ( BLANK_HTML ) ; <nl> } else { <nl> - m_mainFrame - > load ( QNetworkRequest ( QUrl ( address ) ) , networkOp , body ) ; <nl> + QUrl url = QUrl : : fromEncoded ( QByteArray ( address . toAscii ( ) ) ) ; <nl> + m_mainFrame - > load ( QNetworkRequest ( url ) , networkOp , body ) ; <nl> } <nl> } <nl> <nl>
|
used QUrl : : fromEncoded ( ) and QUrl : : toEncoded ( ) instead of QUrl ( string ) and QUrl : : toString ( ) when converting urls
|
ariya/phantomjs
|
3c0957834cf40014ab0d4e7312dd0d32cf6ada3b
|
2011-12-11T22:32:00Z
|
mmm a / src / mongo / db / query / lite_parsed_query . cpp <nl> ppp b / src / mongo / db / query / lite_parsed_query . cpp <nl> StatusWith < unique_ptr < LiteParsedQuery > > LiteParsedQuery : : makeAsOpQuery ( const str <nl> / / static <nl> StatusWith < unique_ptr < LiteParsedQuery > > LiteParsedQuery : : makeAsFindCmd ( const NamespaceString & ns , <nl> const BSONObj & query , <nl> + const BSONObj & sort , <nl> boost : : optional < int > limit ) { <nl> unique_ptr < LiteParsedQuery > pq ( new LiteParsedQuery ( ) ) ; <nl> <nl> pq - > _fromCommand = true ; <nl> pq - > _ns = ns . ns ( ) ; <nl> pq - > _filter = query . getOwned ( ) ; <nl> + pq - > _sort = sort . getOwned ( ) ; <nl> <nl> if ( limit ) { <nl> if ( limit < = 0 ) { <nl> mmm a / src / mongo / db / query / lite_parsed_query . h <nl> ppp b / src / mongo / db / query / lite_parsed_query . h <nl> class LiteParsedQuery { <nl> * / <nl> static StatusWith < std : : unique_ptr < LiteParsedQuery > > makeAsFindCmd ( const NamespaceString & ns , <nl> const BSONObj & query , <nl> + const BSONObj & sort , <nl> boost : : optional < int > limit ) ; <nl> <nl> / * * <nl> mmm a / src / mongo / db / query / lite_parsed_query_test . cpp <nl> ppp b / src / mongo / db / query / lite_parsed_query_test . cpp <nl> TEST ( LiteParsedQueryTest , ForbidMetaSortOnFieldWithoutMetaProject ) { <nl> } <nl> <nl> TEST ( LiteParsedQueryTest , MakeFindCmd ) { <nl> - auto result = LiteParsedQuery : : makeAsFindCmd ( NamespaceString ( " test . ns " ) , BSON ( " x " < < 1 ) , 2 ) ; <nl> + auto result = LiteParsedQuery : : makeAsFindCmd ( <nl> + NamespaceString ( " test . ns " ) , BSON ( " x " < < 1 ) , BSON ( " y " < < - 1 ) , 2 ) ; <nl> ASSERT_OK ( result . getStatus ( ) ) ; <nl> <nl> auto & & lpq = result . getValue ( ) ; <nl> TEST ( LiteParsedQueryTest , MakeFindCmd ) { <nl> ASSERT_EQUALS ( 2 , lpq - > getLimit ( ) ) ; <nl> <nl> ASSERT_EQUALS ( BSONObj ( ) , lpq - > getProj ( ) ) ; <nl> - ASSERT_EQUALS ( BSONObj ( ) , lpq - > getSort ( ) ) ; <nl> + ASSERT_EQUALS ( BSON ( " y " < < - 1 ) , lpq - > getSort ( ) ) ; <nl> ASSERT_EQUALS ( BSONObj ( ) , lpq - > getHint ( ) ) ; <nl> ASSERT_EQUALS ( BSONObj ( ) , lpq - > getMin ( ) ) ; <nl> ASSERT_EQUALS ( BSONObj ( ) , lpq - > getMax ( ) ) ; <nl> TEST ( LiteParsedQueryTest , MakeFindCmd ) { <nl> } <nl> <nl> TEST ( LiteParsedQueryTest , MakeFindCmdNoLimit ) { <nl> - auto result = <nl> - LiteParsedQuery : : makeAsFindCmd ( NamespaceString ( " test . ns " ) , BSON ( " x " < < 1 ) , boost : : none ) ; <nl> + auto result = LiteParsedQuery : : makeAsFindCmd ( <nl> + NamespaceString ( " test . ns " ) , BSON ( " x " < < 1 ) , BSONObj ( ) , boost : : none ) ; <nl> ASSERT_OK ( result . getStatus ( ) ) ; <nl> <nl> auto & & lpq = result . getValue ( ) ; <nl> TEST ( LiteParsedQueryTest , MakeFindCmdNoLimit ) { <nl> <nl> TEST ( LiteParsedQueryTest , MakeFindCmdBadLimit ) { <nl> auto status = <nl> - LiteParsedQuery : : makeAsFindCmd ( NamespaceString ( " test . ns " ) , BSON ( " x " < < 1 ) , 0 ) . getStatus ( ) ; <nl> + LiteParsedQuery : : makeAsFindCmd ( NamespaceString ( " test . ns " ) , BSON ( " x " < < 1 ) , BSONObj ( ) , 0 ) <nl> + . getStatus ( ) ; <nl> ASSERT_NOT_OK ( status ) ; <nl> ASSERT_EQUALS ( ErrorCodes : : BadValue , status . code ( ) ) ; <nl> } <nl> mmm a / src / mongo / s / client / shard_registry . cpp <nl> ppp b / src / mongo / s / client / shard_registry . cpp <nl> StatusWith < std : : vector < BSONObj > > ShardRegistry : : exhaustiveFind ( const HostAndPort <nl> status = Status : : OK ( ) ; <nl> } ; <nl> <nl> - unique_ptr < LiteParsedQuery > findCmd ( <nl> - fassertStatusOK ( 28688 , LiteParsedQuery : : makeAsFindCmd ( nss , query , std : : move ( limit ) ) ) ) ; <nl> + unique_ptr < LiteParsedQuery > findCmd ( fassertStatusOK ( <nl> + 28688 , LiteParsedQuery : : makeAsFindCmd ( nss , query , BSONObj ( ) , std : : move ( limit ) ) ) ) ; <nl> <nl> QueryFetcher fetcher ( _executor . get ( ) , host , nss , findCmd - > asFindCommand ( ) , fetcherCallback ) ; <nl> <nl>
|
SERVER - 19069 Add sort argument to LiteParsedQuery : : makeAsFindCommand
|
mongodb/mongo
|
8497ecf53aaceb5a0a30e6334819c5bcede89110
|
2015-06-22T21:09:11Z
|
mmm a / src / torque / file - visitor . cc <nl> ppp b / src / torque / file - visitor . cc <nl> Signature FileVisitor : : MakeSignature ( const CallableNodeSignature * signature ) { <nl> return result ; <nl> } <nl> <nl> + namespace { <nl> + <nl> + void PrintMacroSignatures ( std : : stringstream & s , <nl> + const std : : vector < Macro * > & macros ) { <nl> + for ( Macro * m : macros ) { <nl> + s < < " \ n " < < m - > signature ( ) ; <nl> + } <nl> + } <nl> + <nl> + } / / namespace <nl> + <nl> Callable * FileVisitor : : LookupCall ( const std : : string & name , <nl> const Arguments & arguments ) { <nl> Callable * result = nullptr ; <nl> Callable * FileVisitor : : LookupCall ( const std : : string & name , <nl> result = RuntimeFunction : : cast ( declarable ) ; <nl> } else if ( declarable - > IsMacroList ( ) ) { <nl> std : : vector < Macro * > candidates ; <nl> + std : : vector < Macro * > macros_with_same_name ; <nl> for ( Macro * m : MacroList : : cast ( declarable ) - > list ( ) ) { <nl> if ( IsCompatibleSignature ( m - > signature ( ) , parameter_types , <nl> arguments . labels ) ) { <nl> candidates . push_back ( m ) ; <nl> + } else if ( m - > name ( ) = = name ) { <nl> + macros_with_same_name . push_back ( m ) ; <nl> } <nl> } <nl> <nl> + if ( candidates . empty ( ) & & macros_with_same_name . empty ( ) ) { <nl> + return nullptr ; <nl> + } else if ( candidates . empty ( ) ) { <nl> + std : : stringstream stream ; <nl> + stream < < " cannot find macro with name \ " " < < name <nl> + < < " \ " and parameter type ( s ) ( " < < parameter_types <nl> + < < " ) , candidates are : " ; <nl> + PrintMacroSignatures ( stream , macros_with_same_name ) ; <nl> + ReportError ( stream . str ( ) ) ; <nl> + } <nl> + <nl> auto is_better_candidate = [ & ] ( Macro * a , Macro * b ) { <nl> return ParameterDifference ( a - > signature ( ) . parameter_types . types , <nl> parameter_types ) <nl> . StrictlyBetterThan ( ParameterDifference ( <nl> b - > signature ( ) . parameter_types . types , parameter_types ) ) ; <nl> } ; <nl> - if ( candidates . empty ( ) ) { <nl> - return nullptr ; <nl> - } <nl> + <nl> Macro * best = * std : : min_element ( candidates . begin ( ) , candidates . end ( ) , <nl> is_better_candidate ) ; <nl> for ( Macro * candidate : candidates ) { <nl> Callable * FileVisitor : : LookupCall ( const std : : string & name , <nl> std : : stringstream s ; <nl> s < < " ambiguous macro \ " " < < name < < " \ " with types ( " <nl> < < parameter_types < < " ) , candidates : " ; <nl> - for ( Macro * m : candidates ) { <nl> - s < < " \ n ( " < < m - > signature ( ) . parameter_types < < " ) = > " <nl> - < < m - > signature ( ) . return_type ; <nl> - } <nl> + PrintMacroSignatures ( s , candidates ) ; <nl> ReportError ( s . str ( ) ) ; <nl> } <nl> } <nl> mmm a / src / torque / types . cc <nl> ppp b / src / torque / types . cc <nl> std : : ostream & operator < < ( std : : ostream & os , const Signature & sig ) { <nl> os < < " . . . " ; <nl> } <nl> os < < " ) " ; <nl> - if ( ! sig . return_type - > IsVoid ( ) ) { <nl> - os < < " : " < < sig . return_type ; <nl> + os < < " : " < < sig . return_type ; <nl> + <nl> + if ( sig . labels . empty ( ) ) return os ; <nl> + <nl> + os < < " labels " ; <nl> + for ( size_t i = 0 ; i < sig . labels . size ( ) ; + + i ) { <nl> + if ( i > 0 ) os < < " , " ; <nl> + os < < sig . labels [ i ] . name ; <nl> + <nl> + if ( sig . labels [ i ] . types . size ( ) > 0 ) os < < " ( " < < sig . labels [ i ] . types < < " ) " ; <nl> } <nl> return os ; <nl> } <nl>
|
[ torque ] Improve error message when calling macros
|
v8/v8
|
a93d30d5323d3eb4eebd28a2cd1417cc19fce74d
|
2018-06-18T09:15:08Z
|
mmm a / tools / run_tests / dockerize / build_interop_image . sh <nl> ppp b / tools / run_tests / dockerize / build_interop_image . sh <nl> fi <nl> <nl> if [ " $ DOCKERHUB_ORGANIZATION " ! = " " ] <nl> then <nl> - DOCKER_IMAGE_NAME = $ DOCKERHUB_ORGANIZATION / $ BASE_IMAGE <nl> - docker pull $ DOCKER_IMAGE_NAME <nl> + BASE_IMAGE = $ DOCKERHUB_ORGANIZATION / $ BASE_IMAGE <nl> + docker pull $ BASE_IMAGE <nl> else <nl> # Make sure docker image has been built . Should be instantaneous if so . <nl> docker build - t $ BASE_IMAGE - - force - rm = true tools / dockerfile / interoptest / $ BASE_NAME | | exit $ ? <nl>
|
Merge pull request from adelez / ci2
|
grpc/grpc
|
c39d4c16a02d49a1e4b090309786643045287639
|
2017-07-18T06:26:15Z
|
mmm a / tensorflow / tensorboard / DEVELOPMENT . md <nl> ppp b / tensorflow / tensorboard / DEVELOPMENT . md <nl> Then , cd into the TensorBoard directory : <nl> <nl> and install dependencies : <nl> <nl> - ` npm run prepare ` <nl> + ` npm run prep ` <nl> <nl> Then , run gulp : ` gulp ` <nl> <nl> mmm a / tensorflow / tensorboard / package . json <nl> ppp b / tensorflow / tensorboard / package . json <nl> <nl> " description " : " Visualizers for TensorFlow " , <nl> " scripts " : { <nl> " test " : " gulp test " , <nl> - " prepare " : " npm install & & bower install & & typings install " , <nl> + " prep " : " npm install & & bower install & & typings install " , <nl> " compile " : " gulp compile " <nl> } , <nl> " keywords " : [ <nl>
|
Tensorboard change NPM script name prepare to prep ( )
|
tensorflow/tensorflow
|
a09eeb237157f161b2d70159ffd0276899f024a4
|
2017-04-16T02:59:34Z
|
mmm a / contracts / exchange / exchange . hpp <nl> ppp b / contracts / exchange / exchange . hpp <nl> namespace eosio { <nl> * an equal value of both sides of the order book and giving the issuer <nl> * the initial shares in that orderbook . <nl> * <nl> - * To prevent exessive rounding errors , the initial deposit should include <nl> + * To prevent excessive rounding errors , the initial deposit should include <nl> * a sizeable quantity of both the base and quote currencies and the exchange <nl> * shares should have a quantity 100x the quantity of the largest initial <nl> * deposit . <nl>
|
Merge pull request from leofantast / master
|
EOSIO/eos
|
982d41ab285a6f578ecd9e00a6cb2002fbbab629
|
2018-07-13T12:02:29Z
|
mmm a / tests / cpp - tests / Classes / MenuTest / MenuTest . cpp <nl> ppp b / tests / cpp - tests / Classes / MenuTest / MenuTest . cpp <nl> MenuLayerMainMenu : : MenuLayerMainMenu ( ) <nl> auto item3 = MenuItemLabel : : create ( labelAtlas , CC_CALLBACK_1 ( MenuLayerMainMenu : : menuCallbackDisabled , this ) ) ; <nl> item3 - > setDisabledColor ( Color3B ( 32 , 32 , 64 ) ) ; <nl> item3 - > setColor ( Color3B ( 200 , 200 , 255 ) ) ; <nl> - item3 - > setString ( " hello " ) ; <nl> CCLOG ( " test MenuItem Label getString : % s " , item3 - > getString ( ) . c_str ( ) ) ; <nl> / / Font Item <nl> auto item4 = MenuItemFont : : create ( " I toggle enable items " , [ & ] ( Ref * sender ) { <nl> mmm a / tests / js - tests / src / MenuTest / MenuTest . js <nl> ppp b / tests / js - tests / src / MenuTest / MenuTest . js <nl> var MenuLayerMainMenu = cc . Layer . extend ( { <nl> var item3 = new cc . MenuItemLabel ( labelAtlas , this . onMenuCallbackDisabled , this ) ; <nl> item3 . setDisabledColor ( cc . color ( 32 , 32 , 64 ) ) ; <nl> item3 . color = cc . color ( 200 , 200 , 255 ) ; <nl> - item3 . setString ( ' hello ' ) ; <nl> cc . log ( " test MenuItemLabel getString ( ) " + item3 . getString ( ) ) ; <nl> <nl> / / Font Item <nl>
|
Merge pull request from minggo / menu_test
|
cocos2d/cocos2d-x
|
fb3032b6fe266a6d9f500cb959227c631ade07b4
|
2016-03-30T06:44:24Z
|
mmm a / xbmc / cores / AudioEngine / Sinks / AESinkAUDIOTRACK . cpp <nl> ppp b / xbmc / cores / AudioEngine / Sinks / AESinkAUDIOTRACK . cpp <nl> bool CAESinkAUDIOTRACK : : Initialize ( AEAudioFormat & format , std : : string & device ) <nl> { <nl> m_format . m_frameSize = m_format . m_channelLayout . Count ( ) * ( CAEUtil : : DataFormatToBits ( m_format . m_dataFormat ) / 8 ) ; <nl> m_sink_frameSize = m_format . m_frameSize ; <nl> - / / aim at 200 ms buffer but at max 300 ms ( 2 * 0 . 15s ) and 50 ms periods <nl> + / / aim at 200 ms buffer and 50 ms periods <nl> m_audiotrackbuffer_sec = <nl> static_cast < double > ( m_min_buffer_size ) / ( m_sink_frameSize * m_sink_sampleRate ) ; <nl> - while ( m_audiotrackbuffer_sec < 0 . 15 ) <nl> + while ( m_audiotrackbuffer_sec < 0 . 2 ) <nl> { <nl> - m_min_buffer_size * = 2 ; <nl> + m_min_buffer_size + = min_buffer ; <nl> m_audiotrackbuffer_sec = <nl> static_cast < double > ( m_min_buffer_size ) / ( m_sink_frameSize * m_sink_sampleRate ) ; <nl> } <nl>
|
AESinkAudioTrack : Search correct buffer by incrementing in min_buffer units
|
xbmc/xbmc
|
46ce8cd4e20d6534037197f933da578c3656233d
|
2019-11-22T14:34:16Z
|
mmm a / tensorflow / core / BUILD <nl> ppp b / tensorflow / core / BUILD <nl> cc_library ( <nl> " / / tensorflow / core / platform : protobuf . cc " , <nl> ] , <nl> hdrs = [ <nl> + " lib / bfloat16 / bfloat16 . h " , <nl> " lib / core / errors . h " , <nl> " lib / core / status . h " , <nl> " lib / core / stringpiece . h " , <nl> " lib / strings / numbers . h " , <nl> " lib / strings / strcat . h " , <nl> - " / / tensorflow / core / lib / bfloat16 : bfloat16 . h " , <nl> " / / tensorflow / core / platform : init_main . h " , <nl> " / / tensorflow / core / platform : legacy_proto_hdrs " , <nl> " / / tensorflow / core / platform : logging . h " , <nl> cc_library ( <nl> " : platform_base " , <nl> " @ com_google_absl / / absl / strings " , <nl> " @ double_conversion / / : double - conversion " , <nl> - " / / tensorflow / core / lib / bfloat16 " , <nl> " / / tensorflow / core / platform : macros " , <nl> " / / tensorflow / core / platform : logging " , <nl> " / / tensorflow / core / platform : platform " , <nl> cc_library ( <nl> cc_library ( <nl> name = " lib " , <nl> hdrs = [ <nl> + " lib / bfloat16 / bfloat16 . h " , <nl> " lib / core / arena . h " , <nl> " lib / core / bitmap . h " , <nl> " lib / core / bits . h " , <nl> cc_library ( <nl> " : platform_other_hdrs " , <nl> " : platform_port_hdrs " , <nl> " : platform_protobuf_hdrs " , <nl> - " / / tensorflow / core / lib / bfloat16 : bfloat16 . h " , <nl> ] , <nl> visibility = [ " / / visibility : public " ] , <nl> deps = [ <nl> cc_library ( <nl> " framework / numeric_types . h " , <nl> " framework / tensor_types . h " , <nl> " framework / type_traits . h " , <nl> - " / / tensorflow / core / lib / bfloat16 : bfloat16 . h " , <nl> + " lib / bfloat16 / bfloat16 . h " , <nl> " / / tensorflow / core / platform : byte_order . h " , <nl> " / / tensorflow / core / platform : default / dynamic_annotations . h " , <nl> " / / tensorflow / core / platform : default / integral_types . h " , <nl> cc_library ( <nl> " @ nsync / / : nsync_cpp " , <nl> ] + [ <nl> " / / third_party / eigen3 " , <nl> - " / / tensorflow / core / lib / bfloat16 " , <nl> " / / tensorflow / core / platform / default / build_config : minimal " , <nl> " / / tensorflow / core / platform : types " , <nl> ] , <nl> filegroup ( <nl> " / / tensorflow / core / util / ctc : android_srcs " , <nl> " / / tensorflow / core / platform : legacy_srcs_no_runtime " , <nl> " / / tensorflow / core / profiler : mobile_srcs " , <nl> - " / / tensorflow / core / lib / bfloat16 : bfloat16 . h " , <nl> - " / / tensorflow / core / lib / bfloat16 : bfloat16 . cc " , <nl> ] + glob ( <nl> [ <nl> " client / * * / * . cc " , <nl> tf_proto_library_cc ( <nl> LIB_INTERNAL_PRIVATE_HEADERS = [ <nl> " framework / resource_handle . h " , <nl> " / / tensorflow / core / platform : legacy_lib_internal_headers " , <nl> - " / / tensorflow / core / lib / bfloat16 : bfloat16 . h " , <nl> ] + glob ( <nl> [ <nl> " lib / * * / * . h " , <nl> cc_library ( <nl> " @ com_google_absl / / absl / memory " , <nl> " @ com_google_absl / / absl / strings " , <nl> " / / third_party / eigen3 " , <nl> - " / / tensorflow / core / lib / bfloat16 " , <nl> " / / tensorflow / core / platform : abi " , <nl> " / / tensorflow / core / platform : cpu_info " , <nl> " / / tensorflow / core / platform / default / build_config : platformlib " , <nl> cc_library ( <nl> name = " png_internal " , <nl> srcs = [ " lib / png / png_io . cc " ] , <nl> hdrs = [ <nl> + " lib / bfloat16 / bfloat16 . h " , <nl> " lib / core / stringpiece . h " , <nl> " lib / png / png_io . h " , <nl> - " / / tensorflow / core / lib / bfloat16 : bfloat16 . h " , <nl> " / / tensorflow / core / platform : byte_order . h " , <nl> " / / tensorflow / core / platform : cpu_info . h " , <nl> " / / tensorflow / core / platform : default / integral_types . h " , <nl> cc_library ( <nl> cc_library ( <nl> name = " tflite_portable_logging " , <nl> hdrs = [ <nl> - " / / tensorflow / core / lib / bfloat16 : bfloat16 . h " , <nl> + " lib / bfloat16 / bfloat16 . h " , <nl> " / / tensorflow / core / platform : default / integral_types . h " , <nl> " / / tensorflow / core / platform : default / logging . h " , <nl> " / / tensorflow / core / platform : logging . h " , <nl> cc_library ( <nl> " / / tensorflow / core / platform : jpeg . h " , <nl> ] ) , <nl> hdrs = [ <nl> + " lib / bfloat16 / bfloat16 . h " , <nl> " lib / core / stringpiece . h " , <nl> " lib / jpeg / jpeg_handle . h " , <nl> " lib / jpeg / jpeg_mem . h " , <nl> - " / / tensorflow / core / lib / bfloat16 : bfloat16 . h " , <nl> " / / tensorflow / core / platform : default / dynamic_annotations . h " , <nl> " / / tensorflow / core / platform : default / integral_types . h " , <nl> " / / tensorflow / core / platform : default / logging . h " , <nl> cc_library ( <nl> " lib / strings / numbers . h " , <nl> ] ) , <nl> hdrs = [ <nl> + " lib / bfloat16 / bfloat16 . h " , <nl> " lib / core / stringpiece . h " , <nl> " lib / gif / gif_io . h " , <nl> " lib / gtl / cleanup . h " , <nl> - " / / tensorflow / core / lib / bfloat16 : bfloat16 . h " , <nl> " / / tensorflow / core / platform : default / dynamic_annotations . h " , <nl> " / / tensorflow / core / platform : default / integral_types . h " , <nl> " / / tensorflow / core / platform : default / logging . h " , <nl> cc_library ( <nl> " / / tensorflow / core / platform : png . h " , <nl> ] ) , <nl> hdrs = [ <nl> + " lib / bfloat16 / bfloat16 . h " , <nl> " lib / core / stringpiece . h " , <nl> " lib / png / png_io . h " , <nl> - " / / tensorflow / core / lib / bfloat16 : bfloat16 . h " , <nl> " / / tensorflow / core / platform : byte_order . h " , <nl> " / / tensorflow / core / platform : cpu_info . h " , <nl> " / / tensorflow / core / platform : default / integral_types . h " , <nl> deleted file mode 100644 <nl> index cb60b60c80e70 . . 0000000000000 <nl> mmm a / tensorflow / core / lib / bfloat16 / BUILD <nl> ppp / dev / null <nl> <nl> - package ( <nl> - default_visibility = [ <nl> - " / / tensorflow : __subpackages__ " , <nl> - ] , <nl> - licenses = [ " notice " ] , # Apache 2 . 0 <nl> - ) <nl> - <nl> - cc_library ( <nl> - name = " bfloat16 " , <nl> - srcs = [ " bfloat16 . cc " ] , <nl> - hdrs = [ " bfloat16 . h " ] , <nl> - deps = [ <nl> - " / / third_party / eigen3 " , <nl> - ] , <nl> - ) <nl> - <nl> - # TODO ( bmzhao ) : Remove the following once references in core / BUILD is removed . <nl> - exports_files ( <nl> - glob ( [ " * " ] ) , <nl> - ) <nl> mmm a / tensorflow / core / lib / bfloat16 / bfloat16 . h <nl> ppp b / tensorflow / core / lib / bfloat16 / bfloat16 . h <nl> limitations under the License . <nl> # include < cmath > <nl> # include < complex > <nl> <nl> + # include " tensorflow / core / platform / byte_order . h " <nl> + <nl> # ifdef __CUDACC__ <nl> / / All functions callable from CUDA code must be qualified with __device__ <nl> # define B16_DEVICE_FUNC __host__ __device__ <nl>
|
Automated rollback of commit e7d48dc6dd56bf7f4beb22776106c16eda11713b
|
tensorflow/tensorflow
|
a727bfa999910663db24e7c352e80993db7f325b
|
2019-08-07T17:55:09Z
|
mmm a / Makefile <nl> ppp b / Makefile <nl> endif <nl> <nl> # BIN = test / test_threaded_engine test / api_registry_test <nl> BIN = test / api_registry_test <nl> - OBJ = storage . o narray_op_cpu . o static_operator . o static_operator_cpu . o <nl> + OBJ = narray_op_cpu . o static_operator . o static_operator_cpu . o <nl> # add threaded engine after it is done <nl> OBJCXX11 = engine . o narray . o c_api . o registry . o symbol . o operator . o fully_connect_op_cpu . o cpu_storage . o gpu_storage . o storage . o <nl> CUOBJ = <nl>
|
[ storage ] fix Makefile
|
apache/incubator-mxnet
|
1ad28eb3a1f4da937f0e432c3b310e3299ac5f3e
|
2015-08-11T14:59:42Z
|
mmm a / default_app / main . js <nl> ppp b / default_app / main . js <nl> function loadApplicationByUrl ( appUrl ) { <nl> } <nl> <nl> function startRepl ( ) { <nl> + if ( process . platform = = = ' win32 ' ) { <nl> + console . error ( ' Electron REPL not currently supported on Windows ' ) <nl> + process . exit ( 1 ) <nl> + return <nl> + } <nl> + <nl> repl . start ( ' > ' ) . on ( ' exit ' , ( ) = > { <nl> process . exit ( 0 ) <nl> } ) <nl>
|
Merge pull request from electron / repl - error - on - windows
|
electron/electron
|
ae22980ee397045c8b842829ef2845adced4e6b9
|
2016-06-13T23:43:51Z
|
mmm a / lib / Demangling / Demangler . cpp <nl> ppp b / lib / Demangling / Demangler . cpp <nl> NodePointer Demangler : : demangleSymbol ( StringRef MangledName ) { <nl> break ; <nl> } <nl> } <nl> + if ( topLevel - > getNumChildren ( ) = = 0 ) <nl> + return nullptr ; <nl> + <nl> if ( EndPos < Text . size ( ) ) { <nl> topLevel - > addChild ( createNode ( Node : : Kind : : Suffix , Text . substr ( EndPos ) ) , * this ) ; <nl> } <nl> NodePointer Demangler : : demangleIdentifier ( ) { <nl> <nl> NodePointer Demangler : : demangleOperatorIdentifier ( ) { <nl> NodePointer Ident = popNode ( Node : : Kind : : Identifier ) ; <nl> + if ( ! Ident ) <nl> + return nullptr ; <nl> <nl> static const char op_char_table [ ] = " & @ / = > < * ! | + ? % - ~ ^ . " ; <nl> <nl> mmm a / lib / Demangling / NodeDumper . cpp <nl> ppp b / lib / Demangling / NodeDumper . cpp <nl> std : : string & & Demangle : : getNodeTreeAsString ( NodePointer Root ) { <nl> } <nl> <nl> void swift : : Demangle : : Node : : dump ( ) { <nl> - fputs ( getNodeTreeAsString ( this ) . c_str ( ) , stderr ) ; <nl> + std : : string TreeStr = getNodeTreeAsString ( this ) ; <nl> + fputs ( TreeStr . c_str ( ) , stderr ) ; <nl> } <nl> mmm a / test / Demangle / Inputs / manglings . txt <nl> ppp b / test / Demangle / Inputs / manglings . txt <nl> _T0s17MutableCollectionP1asAARzs012RandomAccessB0RzsAA11SubSequences013Bidirecti <nl> _T03foo4_123ABTf3psbpsb_n mmm > function signature specialization < Arg [ 0 ] = [ Constant Propagated String : u8 ' 123 ' ] , Arg [ 1 ] = [ Constant Propagated String : u8 ' 123 ' ] > of foo <nl> _T04main5innerys5Int32Vz_yADctF25closure_with_box_argumentxz_Bi32__lXXTf1nc_n mmm > function signature specialization < Arg [ 1 ] = [ Closure Propagated : closure_with_box_argument , Argument Types : [ < A > { var A } < Builtin . Int32 > ] > of main . inner ( inout Swift . Int32 , ( Swift . Int32 ) - > ( ) ) - > ( ) <nl> _T03foo6testityyyc_yyctF1a1bTf3pfpf_n mmm > function signature specialization < Arg [ 0 ] = [ Constant Propagated Function : a ] , Arg [ 1 ] = [ Constant Propagated Function : b ] > of foo . testit ( ( ) - > ( ) , ( ) - > ( ) ) - > ( ) <nl> + _SocketJoinOrLeaveMulticast mmm > _SocketJoinOrLeaveMulticast <nl> <nl> mmm a / utils / swift - api - dump . py <nl> ppp b / utils / swift - api - dump . py <nl> def collect_frameworks ( sdk ) : <nl> return ( sorted ( list ( frameworks ) ) , sdk_path ) <nl> <nl> <nl> + def get_short_sdk_name ( sdk ) : <nl> + matched = re . match ( " [ a - zA - Z ] + " , sdk ) <nl> + return matched . group ( 0 ) <nl> + <nl> def create_dump_module_api_args ( cmd_common , cmd_extra_args , sdk , module , <nl> target , output_dir , quiet , verbose ) : <nl> <nl> # Determine the SDK root and collect the set of frameworks . <nl> ( frameworks , sdk_root ) = collect_frameworks ( sdk ) <nl> <nl> + # Figure out the " short " name of the SDK <nl> + short_sdk_name = get_short_sdk_name ( sdk ) <nl> + <nl> # Determine the default target . <nl> if target : <nl> sdk_target = target <nl> else : <nl> - sdk_target = DEFAULT_TARGET_BASED_ON_SDK [ sdk ] <nl> + sdk_target = DEFAULT_TARGET_BASED_ON_SDK [ short_sdk_name ] <nl> <nl> # Determine the output idirectory <nl> - pretty_sdk = pretty_sdk_name ( sdk ) <nl> + pretty_sdk = pretty_sdk_name ( short_sdk_name ) <nl> sdk_output_dir = ' % s / % s ' % ( output_dir , pretty_sdk ) <nl> <nl> # Create the sets of arguments to dump_module_api . <nl>
|
Merge remote - tracking branch ' origin / master ' into master - next
|
apple/swift
|
627e7126ebfca3a7c4c4a612fd54cad41a40bfd0
|
2017-03-11T00:28:42Z
|
mmm a / src / core / hle / kernel / handle_table . cpp <nl> ppp b / src / core / hle / kernel / handle_table . cpp <nl> <nl> # include " core / core . h " <nl> # include " core / hle / kernel / errors . h " <nl> # include " core / hle / kernel / handle_table . h " <nl> + # include " core / hle / kernel / kernel . h " <nl> # include " core / hle / kernel / process . h " <nl> + # include " core / hle / kernel / scheduler . h " <nl> # include " core / hle / kernel / thread . h " <nl> <nl> namespace Kernel { <nl> constexpr u16 GetGeneration ( Handle handle ) { <nl> } <nl> } / / Anonymous namespace <nl> <nl> - HandleTable : : HandleTable ( ) { <nl> + HandleTable : : HandleTable ( KernelCore & kernel ) : kernel { kernel } { <nl> Clear ( ) ; <nl> } <nl> <nl> bool HandleTable : : IsValid ( Handle handle ) const { <nl> <nl> std : : shared_ptr < Object > HandleTable : : GetGeneric ( Handle handle ) const { <nl> if ( handle = = CurrentThread ) { <nl> - return SharedFrom ( GetCurrentThread ( ) ) ; <nl> + return SharedFrom ( kernel . CurrentScheduler ( ) . GetCurrentThread ( ) ) ; <nl> } else if ( handle = = CurrentProcess ) { <nl> - return SharedFrom ( Core : : System : : GetInstance ( ) . CurrentProcess ( ) ) ; <nl> + return SharedFrom ( kernel . CurrentProcess ( ) ) ; <nl> } <nl> <nl> if ( ! IsValid ( handle ) ) { <nl> mmm a / src / core / hle / kernel / handle_table . h <nl> ppp b / src / core / hle / kernel / handle_table . h <nl> <nl> <nl> namespace Kernel { <nl> <nl> + class KernelCore ; <nl> + <nl> enum KernelHandle : Handle { <nl> InvalidHandle = 0 , <nl> CurrentThread = 0xFFFF8000 , <nl> class HandleTable final : NonCopyable { <nl> / / / This is the maximum limit of handles allowed per process in Horizon <nl> static constexpr std : : size_t MAX_COUNT = 1024 ; <nl> <nl> - HandleTable ( ) ; <nl> + explicit HandleTable ( KernelCore & kernel ) ; <nl> ~ HandleTable ( ) ; <nl> <nl> / * * <nl> class HandleTable final : NonCopyable { <nl> <nl> / / / Head of the free slots linked list . <nl> u16 next_free_slot = 0 ; <nl> + <nl> + / / / Underlying kernel instance that this handle table operates under . <nl> + KernelCore & kernel ; <nl> } ; <nl> <nl> } / / namespace Kernel <nl> mmm a / src / core / hle / kernel / kernel . cpp <nl> ppp b / src / core / hle / kernel / kernel . cpp <nl> namespace Kernel { <nl> <nl> struct KernelCore : : Impl { <nl> explicit Impl ( Core : : System & system , KernelCore & kernel ) <nl> - : global_scheduler { kernel } , synchronization { system } , time_manager { system } , system { system } { } <nl> + : global_scheduler { kernel } , synchronization { system } , time_manager { system } , <nl> + global_handle_table { kernel } , system { system } { } <nl> <nl> void SetMulticore ( bool is_multicore ) { <nl> this - > is_multicore = is_multicore ; <nl> struct KernelCore : : Impl { <nl> <nl> / / This is the kernel ' s handle table or supervisor handle table which <nl> / / stores all the objects in place . <nl> - Kernel : : HandleTable global_handle_table ; <nl> + HandleTable global_handle_table ; <nl> <nl> / / / Map of named ports managed by the kernel , which can be retrieved using <nl> / / / the ConnectToPort SVC . <nl> mmm a / src / core / hle / kernel / process . cpp <nl> ppp b / src / core / hle / kernel / process . cpp <nl> void Process : : LoadModule ( CodeSet code_set , VAddr base_addr ) { <nl> Process : : Process ( Core : : System & system ) <nl> : SynchronizationObject { system . Kernel ( ) } , page_table { std : : make_unique < Memory : : PageTable > ( <nl> system ) } , <nl> - address_arbiter { system } , mutex { system } , system { system } { } <nl> + handle_table { system . Kernel ( ) } , address_arbiter { system } , mutex { system } , system { system } { } <nl> <nl> Process : : ~ Process ( ) = default ; <nl> <nl> mmm a / src / core / hle / kernel / process . h <nl> ppp b / src / core / hle / kernel / process . h <nl> class Process final : public SynchronizationObject { <nl> / / / List of threads waiting for a condition variable <nl> std : : unordered_map < VAddr , std : : list < std : : shared_ptr < Thread > > > cond_var_threads ; <nl> <nl> - / / / System context <nl> - Core : : System & system ; <nl> - <nl> - / / / Name of this process <nl> - std : : string name ; <nl> - <nl> / / / Address of the top of the main thread ' s stack <nl> VAddr main_thread_stack_top { } ; <nl> <nl> class Process final : public SynchronizationObject { <nl> <nl> / / / Process total image size <nl> std : : size_t image_size { } ; <nl> + <nl> + / / / Name of this process <nl> + std : : string name ; <nl> + <nl> + / / / System context <nl> + Core : : System & system ; <nl> } ; <nl> <nl> } / / namespace Kernel <nl> mmm a / src / core / hle / kernel / thread . cpp <nl> ppp b / src / core / hle / kernel / thread . cpp <nl> <nl> # include " common / logging / log . h " <nl> # include " common / thread_queue_list . h " <nl> # include " core / arm / arm_interface . h " <nl> - # ifdef ARCHITECTURE_x86_64 <nl> - # include " core / arm / dynarmic / arm_dynarmic_32 . h " <nl> - # include " core / arm / dynarmic / arm_dynarmic_64 . h " <nl> - # endif <nl> - # include " core / arm / cpu_interrupt_handler . h " <nl> - # include " core / arm / exclusive_monitor . h " <nl> # include " core / arm / unicorn / arm_unicorn . h " <nl> # include " core / core . h " <nl> - # include " core / core_timing . h " <nl> - # include " core / core_timing_util . h " <nl> # include " core / cpu_manager . h " <nl> # include " core / hardware_properties . h " <nl> # include " core / hle / kernel / errors . h " <nl> <nl> # include " core / hle / result . h " <nl> # include " core / memory . h " <nl> <nl> + # ifdef ARCHITECTURE_x86_64 <nl> + # include " core / arm / dynarmic / arm_dynarmic_32 . h " <nl> + # include " core / arm / dynarmic / arm_dynarmic_64 . h " <nl> + # endif <nl> + <nl> namespace Kernel { <nl> <nl> bool Thread : : ShouldWait ( const Thread * thread ) const { <nl> ResultCode Thread : : SetCoreAndAffinityMask ( s32 new_core , u64 new_affinity_mask ) { <nl> return RESULT_SUCCESS ; <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - / * * <nl> - * Gets the current thread <nl> - * / <nl> - Thread * GetCurrentThread ( ) { <nl> - return Core : : System : : GetInstance ( ) . CurrentScheduler ( ) . GetCurrentThread ( ) ; <nl> - } <nl> - <nl> } / / namespace Kernel <nl> mmm a / src / core / hle / kernel / thread . h <nl> ppp b / src / core / hle / kernel / thread . h <nl> class Thread final : public SynchronizationObject { <nl> std : : string name ; <nl> } ; <nl> <nl> - / * * <nl> - * Gets the current thread <nl> - * / <nl> - Thread * GetCurrentThread ( ) ; <nl> - <nl> } / / namespace Kernel <nl>
|
Merge pull request from lioncash / thread
|
yuzu-emu/yuzu
|
0648e023eac31b92c1df1d33a575b9c5266cfdaa
|
2020-07-16T13:02:04Z
|
mmm a / src / core / ext / transport / chttp2 / transport / chttp2_transport . cc <nl> ppp b / src / core / ext / transport / chttp2 / transport / chttp2_transport . cc <nl> static void perform_stream_op_locked ( void * stream_op , <nl> <nl> if ( op - > send_message ) { <nl> GRPC_STATS_INC_HTTP2_OP_SEND_MESSAGE ( ) ; <nl> - if ( t - > channelz_socket ! = nullptr ) { <nl> - t - > channelz_socket - > RecordMessageSent ( ) ; <nl> - } <nl> + t - > num_messages_in_next_write + + ; <nl> GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE ( <nl> op - > payload - > send_message . send_message - > length ( ) ) ; <nl> on_complete - > next_data . scratch | = CLOSURE_BARRIER_MAY_COVER_WRITE ; <nl> static void perform_stream_op_locked ( void * stream_op , <nl> <nl> if ( op - > recv_message ) { <nl> GRPC_STATS_INC_HTTP2_OP_RECV_MESSAGE ( ) ; <nl> - if ( t - > channelz_socket ! = nullptr ) { <nl> - t - > channelz_socket - > RecordMessageRecieved ( ) ; <nl> - } <nl> size_t before = 0 ; <nl> GPR_ASSERT ( s - > recv_message_ready = = nullptr ) ; <nl> GPR_ASSERT ( ! s - > pending_byte_stream ) ; <nl> mmm a / src / core / ext / transport / chttp2 / transport / frame_data . cc <nl> ppp b / src / core / ext / transport / chttp2 / transport / frame_data . cc <nl> grpc_error * grpc_deframe_unprocessed_incoming_frames ( <nl> GPR_ASSERT ( stream_out ! = nullptr ) ; <nl> GPR_ASSERT ( p - > parsing_frame = = nullptr ) ; <nl> p - > frame_size | = ( static_cast < uint32_t > ( * cur ) ) ; <nl> + if ( t - > channelz_socket ! = nullptr ) { <nl> + t - > channelz_socket - > RecordMessageReceived ( ) ; <nl> + } <nl> p - > state = GRPC_CHTTP2_DATA_FRAME ; <nl> + + cur ; <nl> message_flags = 0 ; <nl> mmm a / src / core / ext / transport / chttp2 / transport / internal . h <nl> ppp b / src / core / ext / transport / chttp2 / transport / internal . h <nl> struct grpc_chttp2_transport { <nl> grpc_chttp2_keepalive_state keepalive_state ; <nl> <nl> grpc_core : : RefCountedPtr < grpc_core : : channelz : : SocketNode > channelz_socket ; <nl> + uint32_t num_messages_in_next_write ; <nl> } ; <nl> <nl> typedef enum { <nl> mmm a / src / core / ext / transport / chttp2 / transport / writing . cc <nl> ppp b / src / core / ext / transport / chttp2 / transport / writing . cc <nl> void grpc_chttp2_end_write ( grpc_chttp2_transport * t , grpc_error * error ) { <nl> GPR_TIMER_SCOPE ( " grpc_chttp2_end_write " , 0 ) ; <nl> grpc_chttp2_stream * s ; <nl> <nl> + if ( t - > channelz_socket ! = nullptr ) { <nl> + t - > channelz_socket - > RecordMessagesSent ( t - > num_messages_in_next_write ) ; <nl> + } <nl> + t - > num_messages_in_next_write = 0 ; <nl> + <nl> while ( grpc_chttp2_list_pop_writing_stream ( t , & s ) ) { <nl> if ( s - > sending_bytes ! = 0 ) { <nl> update_list ( t , s , static_cast < int64_t > ( s - > sending_bytes ) , <nl> mmm a / src / core / lib / channel / channelz . cc <nl> ppp b / src / core / lib / channel / channelz . cc <nl> CallCountingHelper : : CallCountingHelper ( ) { <nl> CallCountingHelper : : ~ CallCountingHelper ( ) { } <nl> <nl> void CallCountingHelper : : RecordCallStarted ( ) { <nl> - gpr_atm_no_barrier_fetch_add ( & calls_started_ , ( gpr_atm ) 1 ) ; <nl> + gpr_atm_no_barrier_fetch_add ( & calls_started_ , static_cast < gpr_atm > ( 1 ) ) ; <nl> gpr_atm_no_barrier_store ( & last_call_started_millis_ , <nl> ( gpr_atm ) ExecCtx : : Get ( ) - > Now ( ) ) ; <nl> } <nl> grpc_json * ServerNode : : RenderJson ( ) { <nl> } <nl> / / ask CallCountingHelper to populate trace and call count data . <nl> call_counter_ . PopulateCallCounts ( json ) ; <nl> - json = top_level_json ; <nl> return top_level_json ; <nl> } <nl> <nl> + SocketNode : : SocketNode ( ) : BaseNode ( EntityType : : kSocket ) { } <nl> + <nl> void SocketNode : : RecordStreamStartedFromLocal ( ) { <nl> - gpr_atm_no_barrier_fetch_add ( & streams_started_ , ( gpr_atm ) 1 ) ; <nl> + gpr_atm_no_barrier_fetch_add ( & streams_started_ , static_cast < gpr_atm > ( 1 ) ) ; <nl> gpr_atm_no_barrier_store ( & last_local_stream_created_millis_ , <nl> ( gpr_atm ) ExecCtx : : Get ( ) - > Now ( ) ) ; <nl> } <nl> <nl> void SocketNode : : RecordStreamStartedFromRemote ( ) { <nl> - gpr_atm_no_barrier_fetch_add ( & streams_started_ , ( gpr_atm ) 1 ) ; <nl> + gpr_atm_no_barrier_fetch_add ( & streams_started_ , static_cast < gpr_atm > ( 1 ) ) ; <nl> gpr_atm_no_barrier_store ( & last_remote_stream_created_millis_ , <nl> ( gpr_atm ) ExecCtx : : Get ( ) - > Now ( ) ) ; <nl> } <nl> <nl> - void SocketNode : : RecordMessageSent ( ) { <nl> - gpr_atm_no_barrier_fetch_add ( & messages_sent_ , ( gpr_atm ) 1 ) ; <nl> + void SocketNode : : RecordMessagesSent ( uint32_t num_sent ) { <nl> + gpr_atm_no_barrier_fetch_add ( & messages_sent_ , static_cast < gpr_atm > ( num_sent ) ) ; <nl> gpr_atm_no_barrier_store ( & last_message_sent_millis_ , <nl> ( gpr_atm ) ExecCtx : : Get ( ) - > Now ( ) ) ; <nl> } <nl> <nl> - void SocketNode : : RecordMessageRecieved ( ) { <nl> - gpr_atm_no_barrier_fetch_add ( & messages_recieved_ , ( gpr_atm ) 1 ) ; <nl> - gpr_atm_no_barrier_store ( & last_message_recieved_millis_ , <nl> + void SocketNode : : RecordMessageReceived ( ) { <nl> + gpr_atm_no_barrier_fetch_add ( & messages_received_ , static_cast < gpr_atm > ( 1 ) ) ; <nl> + gpr_atm_no_barrier_store ( & last_message_received_millis_ , <nl> ( gpr_atm ) ExecCtx : : Get ( ) - > Now ( ) ) ; <nl> } <nl> <nl> grpc_json * SocketNode : : RenderJson ( ) { <nl> json_iterator = grpc_json_add_number_string_child ( <nl> json , json_iterator , " streamsFailed " , streams_failed_ ) ; <nl> } <nl> + gpr_timespec ts ; <nl> if ( messages_sent_ ! = 0 ) { <nl> json_iterator = grpc_json_add_number_string_child ( <nl> json , json_iterator , " messagesSent " , messages_sent_ ) ; <nl> - } <nl> - if ( messages_recieved_ ! = 0 ) { <nl> - json_iterator = grpc_json_add_number_string_child ( <nl> - json , json_iterator , " messagesRecieved " , messages_recieved_ ) ; <nl> - } <nl> - if ( keepalives_sent_ ! = 0 ) { <nl> - json_iterator = grpc_json_add_number_string_child ( <nl> - json , json_iterator , " keepAlivesSent " , keepalives_sent_ ) ; <nl> - } <nl> - gpr_timespec ts ; <nl> - if ( streams_started_ ! = 0 & & last_local_stream_created_millis_ ! = 0 ) { <nl> - ts = grpc_millis_to_timespec ( last_local_stream_created_millis_ , <nl> - GPR_CLOCK_REALTIME ) ; <nl> - json_iterator = grpc_json_create_child ( <nl> - json_iterator , json , " lastLocalStreamCreatedTimestamp " , <nl> - gpr_format_timespec ( ts ) , GRPC_JSON_STRING , true ) ; <nl> - } <nl> - if ( streams_started_ ! = 0 & & last_remote_stream_created_millis_ ! = 0 ) { <nl> - ts = grpc_millis_to_timespec ( last_remote_stream_created_millis_ , <nl> - GPR_CLOCK_REALTIME ) ; <nl> - json_iterator = grpc_json_create_child ( <nl> - json_iterator , json , " lastRemoteStreamCreatedTimestamp " , <nl> - gpr_format_timespec ( ts ) , GRPC_JSON_STRING , true ) ; <nl> - } <nl> - if ( messages_sent_ ! = 0 ) { <nl> ts = grpc_millis_to_timespec ( last_message_sent_millis_ , GPR_CLOCK_REALTIME ) ; <nl> json_iterator = <nl> grpc_json_create_child ( json_iterator , json , " lastMessageSentTimestamp " , <nl> gpr_format_timespec ( ts ) , GRPC_JSON_STRING , true ) ; <nl> } <nl> - if ( messages_recieved_ ! = 0 ) { <nl> - ts = grpc_millis_to_timespec ( last_message_recieved_millis_ , <nl> + if ( messages_received_ ! = 0 ) { <nl> + json_iterator = grpc_json_add_number_string_child ( <nl> + json , json_iterator , " messagesReceived " , messages_received_ ) ; <nl> + ts = grpc_millis_to_timespec ( last_message_received_millis_ , <nl> GPR_CLOCK_REALTIME ) ; <nl> json_iterator = grpc_json_create_child ( <nl> - json_iterator , json , " lastMessageRecievedTimestamp " , <nl> + json_iterator , json , " lastMessageReceivedTimestamp " , <nl> gpr_format_timespec ( ts ) , GRPC_JSON_STRING , true ) ; <nl> } <nl> - json = top_level_json ; <nl> + if ( keepalives_sent_ ! = 0 ) { <nl> + json_iterator = grpc_json_add_number_string_child ( <nl> + json , json_iterator , " keepAlivesSent " , keepalives_sent_ ) ; <nl> + } <nl> + if ( streams_started_ ! = 0 ) { <nl> + if ( last_local_stream_created_millis_ ! = 0 ) { <nl> + ts = grpc_millis_to_timespec ( last_local_stream_created_millis_ , <nl> + GPR_CLOCK_REALTIME ) ; <nl> + json_iterator = grpc_json_create_child ( <nl> + json_iterator , json , " lastLocalStreamCreatedTimestamp " , <nl> + gpr_format_timespec ( ts ) , GRPC_JSON_STRING , true ) ; <nl> + } <nl> + if ( last_remote_stream_created_millis_ ! = 0 ) { <nl> + ts = grpc_millis_to_timespec ( last_remote_stream_created_millis_ , <nl> + GPR_CLOCK_REALTIME ) ; <nl> + json_iterator = grpc_json_create_child ( <nl> + json_iterator , json , " lastRemoteStreamCreatedTimestamp " , <nl> + gpr_format_timespec ( ts ) , GRPC_JSON_STRING , true ) ; <nl> + } <nl> + } <nl> return top_level_json ; <nl> } <nl> <nl> mmm a / src / core / lib / channel / channelz . h <nl> ppp b / src / core / lib / channel / channelz . h <nl> class CallCountingHelper { <nl> <nl> void RecordCallStarted ( ) ; <nl> void RecordCallFailed ( ) { <nl> - gpr_atm_no_barrier_fetch_add ( & calls_failed_ , ( gpr_atm ( 1 ) ) ) ; <nl> + gpr_atm_no_barrier_fetch_add ( & calls_failed_ , static_cast < gpr_atm > ( 1 ) ) ; <nl> } <nl> void RecordCallSucceeded ( ) { <nl> - gpr_atm_no_barrier_fetch_add ( & calls_succeeded_ , ( gpr_atm ( 1 ) ) ) ; <nl> + gpr_atm_no_barrier_fetch_add ( & calls_succeeded_ , static_cast < gpr_atm > ( 1 ) ) ; <nl> } <nl> <nl> / / Common rendering of the call count data and last_call_started_timestamp . <nl> class ServerNode : public BaseNode { <nl> / / Handles channelz bookkeeping for sockets <nl> class SocketNode : public BaseNode { <nl> public : <nl> - SocketNode ( ) : BaseNode ( EntityType : : kSocket ) { } <nl> + SocketNode ( ) ; <nl> ~ SocketNode ( ) override { } <nl> <nl> grpc_json * RenderJson ( ) override ; <nl> class SocketNode : public BaseNode { <nl> void RecordStreamStartedFromLocal ( ) ; <nl> void RecordStreamStartedFromRemote ( ) ; <nl> void RecordStreamSucceeded ( ) { <nl> - gpr_atm_no_barrier_fetch_add ( & streams_succeeded_ , ( gpr_atm ( 1 ) ) ) ; <nl> + gpr_atm_no_barrier_fetch_add ( & streams_succeeded_ , static_cast < gpr_atm > ( 1 ) ) ; <nl> } <nl> void RecordStreamFailed ( ) { <nl> - gpr_atm_no_barrier_fetch_add ( & streams_failed_ , ( gpr_atm ( 1 ) ) ) ; <nl> + gpr_atm_no_barrier_fetch_add ( & streams_failed_ , static_cast < gpr_atm > ( 1 ) ) ; <nl> } <nl> - void RecordMessageSent ( ) ; <nl> - void RecordMessageRecieved ( ) ; <nl> + void RecordMessagesSent ( uint32_t num_sent ) ; <nl> + void RecordMessageReceived ( ) ; <nl> void RecordKeepaliveSent ( ) { <nl> - gpr_atm_no_barrier_fetch_add ( & keepalives_sent_ , ( gpr_atm ( 1 ) ) ) ; <nl> + gpr_atm_no_barrier_fetch_add ( & keepalives_sent_ , static_cast < gpr_atm > ( 1 ) ) ; <nl> } <nl> <nl> private : <nl> class SocketNode : public BaseNode { <nl> gpr_atm streams_succeeded_ = 0 ; <nl> gpr_atm streams_failed_ = 0 ; <nl> gpr_atm messages_sent_ = 0 ; <nl> - gpr_atm messages_recieved_ = 0 ; <nl> + gpr_atm messages_received_ = 0 ; <nl> gpr_atm keepalives_sent_ = 0 ; <nl> gpr_atm last_local_stream_created_millis_ = 0 ; <nl> gpr_atm last_remote_stream_created_millis_ = 0 ; <nl> gpr_atm last_message_sent_millis_ = 0 ; <nl> - gpr_atm last_message_recieved_millis_ = 0 ; <nl> + gpr_atm last_message_received_millis_ = 0 ; <nl> + UniquePtr < char > peer_string_ ; <nl> } ; <nl> <nl> / / Creation functions <nl> mmm a / test / core / end2end / tests / channelz . cc <nl> ppp b / test / core / end2end / tests / channelz . cc <nl> static grpc_slice generate_random_slice ( ) { <nl> size_t i ; <nl> static const char chars [ ] = " abcdefghijklmnopqrstuvwxyz1234567890 " ; <nl> char * output ; <nl> - const size_t output_size = 1024 * 1024 ; <nl> - output = static_cast < char * > ( gpr_malloc ( output_size ) ) ; <nl> - for ( i = 0 ; i < output_size - 1 ; + + i ) { <nl> + const size_t kOutputSize = 1024 * 1024 ; <nl> + output = static_cast < char * > ( gpr_malloc ( kOutputSize ) ) ; <nl> + for ( i = 0 ; i < kOutputSize - 1 ; + + i ) { <nl> output [ i ] = chars [ rand ( ) % static_cast < int > ( sizeof ( chars ) - 1 ) ] ; <nl> } <nl> - output [ output_size - 1 ] = ' \ 0 ' ; <nl> + output [ kOutputSize - 1 ] = ' \ 0 ' ; <nl> grpc_slice out = grpc_slice_from_copied_string ( output ) ; <nl> gpr_free ( output ) ; <nl> return out ; <nl> static void test_channelz ( grpc_end2end_test_config config ) { <nl> GPR_ASSERT ( nullptr = = strstr ( json , " \ " severity \ " : \ " CT_INFO \ " " ) ) ; <nl> gpr_free ( json ) ; <nl> <nl> + / / TODO ( ncteisen ) : add logic to query for socket id once child socket support <nl> + / / is in place . For now , we hardcode uuid = 5 , which we know is a socket . <nl> + json = grpc_channelz_get_socket ( 5 ) ; <nl> + GPR_ASSERT ( json ! = nullptr ) ; <nl> + gpr_log ( GPR_INFO , " % s " , json ) ; <nl> + GPR_ASSERT ( nullptr ! = strstr ( json , " \ " socketId \ " : \ " 5 \ " " ) ) ; <nl> + GPR_ASSERT ( nullptr ! = strstr ( json , " \ " streamsStarted \ " : \ " 2 \ " " ) ) ; <nl> + GPR_ASSERT ( nullptr ! = strstr ( json , " \ " streamsSucceeded \ " : \ " 2 \ " " ) ) ; <nl> + / / no messaged sent yet . <nl> + GPR_ASSERT ( nullptr = = strstr ( json , " \ " messagesSent \ " " ) ) ; <nl> + GPR_ASSERT ( nullptr = = strstr ( json , " \ " messagesReceived \ " " ) ) ; <nl> + gpr_free ( json ) ; <nl> + <nl> / / one successful request with payload to test socket data <nl> - / / TODO ( ncteisen ) : add some programatic spot checks on the socket json . <nl> run_one_request_with_payload ( config , f ) ; <nl> <nl> + json = grpc_channelz_get_socket ( 5 ) ; <nl> + GPR_ASSERT ( json ! = nullptr ) ; <nl> + gpr_log ( GPR_INFO , " % s " , json ) ; <nl> + GPR_ASSERT ( nullptr ! = strstr ( json , " \ " socketId \ " : \ " 5 \ " " ) ) ; <nl> + GPR_ASSERT ( nullptr ! = strstr ( json , " \ " streamsStarted \ " : \ " 3 \ " " ) ) ; <nl> + GPR_ASSERT ( nullptr ! = strstr ( json , " \ " streamsSucceeded \ " : \ " 3 \ " " ) ) ; <nl> + GPR_ASSERT ( nullptr ! = strstr ( json , " \ " messagesSent \ " : \ " 1 \ " " ) ) ; <nl> + GPR_ASSERT ( nullptr ! = strstr ( json , " \ " messagesReceived \ " : \ " 1 \ " " ) ) ; <nl> + gpr_free ( json ) ; <nl> + <nl> end_test ( & f ) ; <nl> config . tear_down_data ( & f ) ; <nl> } <nl>
|
reviewer feedback
|
grpc/grpc
|
86600071b0ec7dd405b970c2a2b0ef808b130967
|
2018-09-24T23:12:00Z
|
mmm a / test / lit . site . cfg . in <nl> ppp b / test / lit . site . cfg . in <nl> config . available_features . add ( " CMAKE_GENERATOR = @ CMAKE_GENERATOR @ " ) <nl> if " @ SWIFT_ENABLE_SOURCEKIT_TESTS @ " = = " TRUE " : <nl> config . available_features . add ( ' sourcekit ' ) <nl> <nl> + if " @ SWIFT_ENABLE_GUARANTEED_NORMAL_ARGUMENTS @ " = = " FALSE " : <nl> + config . available_features . add ( ' plus_one_runtime ' ) <nl> + else : <nl> + config . available_features . add ( ' plus_zero_runtime ' ) <nl> + <nl> # Let the main config do the real work . <nl> if config . test_exec_root is None : <nl> config . test_exec_root = os . path . dirname ( os . path . realpath ( __file__ ) ) <nl>
|
Merge pull request from gottesmm / pr - 212da7a86877edbd0d4240e458893d42eba8249b
|
apple/swift
|
181595cfa21f0fbbd14a5973b8af2a33eb8c6a31
|
2018-03-09T02:13:42Z
|
mmm a / modules / highgui / src / cap_unicap . cpp <nl> ppp b / modules / highgui / src / cap_unicap . cpp <nl> bool CvCapture_Unicap : : initDevice ( ) { <nl> } <nl> <nl> int i ; <nl> - for ( i = format . size_count - 1 ; i > 0 ; i - - ) <nl> - if ( format . sizes [ i ] . width = = desired_size . width & & <nl> - format . sizes [ i ] . height = = desired_size . height ) <nl> - break ; <nl> - format . size . width = format . sizes [ i ] . width ; <nl> - format . size . height = format . sizes [ i ] . height ; <nl> + if ( format . sizes ) <nl> + { <nl> + for ( i = format . size_count - 1 ; i > 0 ; i - - ) <nl> + if ( format . sizes [ i ] . width = = desired_size . width & & <nl> + format . sizes [ i ] . height = = desired_size . height ) <nl> + break ; <nl> + format . size . width = format . sizes [ i ] . width ; <nl> + format . size . height = format . sizes [ i ] . height ; <nl> + } <nl> <nl> if ( ! SUCCESS ( unicap_set_format ( handle , & format ) ) ) { <nl> shutdownDevice ( ) ; <nl>
|
Fixed segfault problem with cap_unicap ( ticket )
|
opencv/opencv
|
1564fe9a83958cb3d420b8657e46af7ba31f1407
|
2012-03-29T12:20:06Z
|
mmm a / HISTORY . md <nl> ppp b / HISTORY . md <nl> <nl> # Rocksdb Change Log <nl> # # Unreleased <nl> + # # # Public API Change <nl> + * Deprecate BlockBaseTableOptions . hash_index_allow_collision = false <nl> + * options . memtable_prefix_bloom_bits changes to options . memtable_prefix_bloom_bits_ratio and deprecate options . memtable_prefix_bloom_probes <nl> <nl> # # 4 . 9 . 0 ( 6 / 9 / 2016 ) <nl> # # # Public API changes <nl> mmm a / db / c . cc <nl> ppp b / db / c . cc <nl> void rocksdb_options_set_memtable_vector_rep ( rocksdb_options_t * opt ) { <nl> opt - > rep . memtable_factory . reset ( factory ) ; <nl> } <nl> <nl> - void rocksdb_options_set_memtable_prefix_bloom_bits ( <nl> - rocksdb_options_t * opt , uint32_t v ) { <nl> - opt - > rep . memtable_prefix_bloom_bits = v ; <nl> - } <nl> - <nl> - void rocksdb_options_set_memtable_prefix_bloom_probes ( <nl> - rocksdb_options_t * opt , uint32_t v ) { <nl> - opt - > rep . memtable_prefix_bloom_probes = v ; <nl> + void rocksdb_options_set_memtable_prefix_bloom_size_ratio ( <nl> + rocksdb_options_t * opt , double v ) { <nl> + opt - > rep . memtable_prefix_bloom_size_ratio = v ; <nl> } <nl> <nl> void rocksdb_options_set_memtable_prefix_bloom_huge_page_tlb_size ( <nl> mmm a / db / column_family . cc <nl> ppp b / db / column_family . cc <nl> ColumnFamilyOptions SanitizeOptions ( const DBOptions & db_options , <nl> if ( result . max_write_buffer_number_to_maintain < 0 ) { <nl> result . max_write_buffer_number_to_maintain = result . max_write_buffer_number ; <nl> } <nl> + / / bloom filter size shouldn ' t exceed 1 / 4 of memtable size . <nl> + if ( result . memtable_prefix_bloom_size_ratio > 0 . 25 ) { <nl> + result . memtable_prefix_bloom_size_ratio = 0 . 25 ; <nl> + } else if ( result . memtable_prefix_bloom_size_ratio < 0 ) { <nl> + result . memtable_prefix_bloom_size_ratio = 0 ; <nl> + } <nl> XFUNC_TEST ( " memtablelist_history " , " transaction_xftest_SanitizeOptions " , <nl> xf_transaction_set_memtable_history1 , <nl> xf_transaction_set_memtable_history , <nl> mmm a / db / db_bloom_filter_test . cc <nl> ppp b / db / db_bloom_filter_test . cc <nl> class BloomStatsTestWithParam <nl> <nl> options_ . create_if_missing = true ; <nl> options_ . prefix_extractor . reset ( rocksdb : : NewFixedPrefixTransform ( 4 ) ) ; <nl> - options_ . memtable_prefix_bloom_bits = 8 * 1024 ; <nl> + options_ . memtable_prefix_bloom_size_ratio = <nl> + 8 . 0 * 1024 . 0 / static_cast < double > ( options_ . write_buffer_size ) ; <nl> if ( use_block_table_ ) { <nl> BlockBasedTableOptions table_options ; <nl> table_options . hash_index_allow_collision = false ; <nl> mmm a / db / memtable . cc <nl> ppp b / db / memtable . cc <nl> <nl> <nl> namespace rocksdb { <nl> <nl> - MemTableOptions : : MemTableOptions ( <nl> - const ImmutableCFOptions & ioptions , <nl> - const MutableCFOptions & mutable_cf_options ) <nl> - : write_buffer_size ( mutable_cf_options . write_buffer_size ) , <nl> - arena_block_size ( mutable_cf_options . arena_block_size ) , <nl> - memtable_prefix_bloom_bits ( mutable_cf_options . memtable_prefix_bloom_bits ) , <nl> - memtable_prefix_bloom_probes ( <nl> - mutable_cf_options . memtable_prefix_bloom_probes ) , <nl> - memtable_prefix_bloom_huge_page_tlb_size ( <nl> - mutable_cf_options . memtable_prefix_bloom_huge_page_tlb_size ) , <nl> - inplace_update_support ( ioptions . inplace_update_support ) , <nl> - inplace_update_num_locks ( mutable_cf_options . inplace_update_num_locks ) , <nl> - inplace_callback ( ioptions . inplace_callback ) , <nl> - max_successive_merges ( mutable_cf_options . max_successive_merges ) , <nl> - filter_deletes ( mutable_cf_options . filter_deletes ) , <nl> - statistics ( ioptions . statistics ) , <nl> - merge_operator ( ioptions . merge_operator ) , <nl> - info_log ( ioptions . info_log ) { } <nl> + MemTableOptions : : MemTableOptions ( const ImmutableCFOptions & ioptions , <nl> + const MutableCFOptions & mutable_cf_options ) <nl> + : write_buffer_size ( mutable_cf_options . write_buffer_size ) , <nl> + arena_block_size ( mutable_cf_options . arena_block_size ) , <nl> + memtable_prefix_bloom_bits ( <nl> + static_cast < uint32_t > ( <nl> + static_cast < double > ( mutable_cf_options . write_buffer_size ) * <nl> + mutable_cf_options . memtable_prefix_bloom_size_ratio ) * <nl> + 8u ) , <nl> + memtable_prefix_bloom_huge_page_tlb_size ( <nl> + mutable_cf_options . memtable_prefix_bloom_huge_page_tlb_size ) , <nl> + inplace_update_support ( ioptions . inplace_update_support ) , <nl> + inplace_update_num_locks ( mutable_cf_options . inplace_update_num_locks ) , <nl> + inplace_callback ( ioptions . inplace_callback ) , <nl> + max_successive_merges ( mutable_cf_options . max_successive_merges ) , <nl> + filter_deletes ( mutable_cf_options . filter_deletes ) , <nl> + statistics ( ioptions . statistics ) , <nl> + merge_operator ( ioptions . merge_operator ) , <nl> + info_log ( ioptions . info_log ) { } <nl> <nl> MemTable : : MemTable ( const InternalKeyComparator & cmp , <nl> const ImmutableCFOptions & ioptions , <nl> MemTable : : MemTable ( const InternalKeyComparator & cmp , <nl> <nl> if ( prefix_extractor_ & & moptions_ . memtable_prefix_bloom_bits > 0 ) { <nl> prefix_bloom_ . reset ( new DynamicBloom ( <nl> - & allocator_ , <nl> - moptions_ . memtable_prefix_bloom_bits , ioptions . bloom_locality , <nl> - moptions_ . memtable_prefix_bloom_probes , nullptr , <nl> - moptions_ . memtable_prefix_bloom_huge_page_tlb_size , <nl> - ioptions . info_log ) ) ; <nl> + & allocator_ , moptions_ . memtable_prefix_bloom_bits , <nl> + ioptions . bloom_locality , 6 / * hard coded 6 probes * / , nullptr , <nl> + moptions_ . memtable_prefix_bloom_huge_page_tlb_size , ioptions . info_log ) ) ; <nl> } <nl> } <nl> <nl> mmm a / db / memtable . h <nl> ppp b / db / memtable . h <nl> struct MemTableOptions { <nl> size_t write_buffer_size ; <nl> size_t arena_block_size ; <nl> uint32_t memtable_prefix_bloom_bits ; <nl> - uint32_t memtable_prefix_bloom_probes ; <nl> size_t memtable_prefix_bloom_huge_page_tlb_size ; <nl> bool inplace_update_support ; <nl> size_t inplace_update_num_locks ; <nl> mmm a / db / prefix_test . cc <nl> ppp b / db / prefix_test . cc <nl> DEFINE_int64 ( write_buffer_size , 33554432 , " " ) ; <nl> DEFINE_int32 ( max_write_buffer_number , 2 , " " ) ; <nl> DEFINE_int32 ( min_write_buffer_number_to_merge , 1 , " " ) ; <nl> DEFINE_int32 ( skiplist_height , 4 , " " ) ; <nl> - DEFINE_int32 ( memtable_prefix_bloom_bits , 10000000 , " " ) ; <nl> - DEFINE_int32 ( memtable_prefix_bloom_probes , 10 , " " ) ; <nl> + DEFINE_double ( memtable_prefix_bloom_size_ratio , 0 . 1 , " " ) ; <nl> DEFINE_int32 ( memtable_prefix_bloom_huge_page_tlb_size , 2 * 1024 * 1024 , " " ) ; <nl> DEFINE_int32 ( value_size , 40 , " " ) ; <nl> <nl> class PrefixTest : public testing : : Test { <nl> options . min_write_buffer_number_to_merge = <nl> FLAGS_min_write_buffer_number_to_merge ; <nl> <nl> - options . memtable_prefix_bloom_bits = FLAGS_memtable_prefix_bloom_bits ; <nl> - options . memtable_prefix_bloom_probes = FLAGS_memtable_prefix_bloom_probes ; <nl> + options . memtable_prefix_bloom_size_ratio = <nl> + FLAGS_memtable_prefix_bloom_size_ratio ; <nl> options . memtable_prefix_bloom_huge_page_tlb_size = <nl> FLAGS_memtable_prefix_bloom_huge_page_tlb_size ; <nl> <nl> mmm a / db / version_set . h <nl> ppp b / db / version_set . h <nl> class Version ; <nl> class VersionSet ; <nl> class WriteBuffer ; <nl> class MergeContext ; <nl> - class ColumnFamilyData ; <nl> class ColumnFamilySet ; <nl> class TableCache ; <nl> class MergeIteratorBuilder ; <nl> mmm a / include / rocksdb / options . h <nl> ppp b / include / rocksdb / options . h <nl> struct ColumnFamilyOptions { <nl> Slice delta_value , <nl> std : : string * merged_value ) ; <nl> <nl> - / / if prefix_extractor is set and bloom_bits is not 0 , create prefix bloom <nl> - / / for memtable <nl> + / / if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0 , <nl> + / / create prefix bloom for memtable with the size of <nl> + / / write_buffer_size * memtable_prefix_bloom_size_ratio . <nl> + / / If it is larger than 0 . 25 , it is santinized to 0 . 25 . <nl> / / <nl> - / / Dynamically changeable through SetOptions ( ) API <nl> - uint32_t memtable_prefix_bloom_bits ; <nl> - <nl> - / / number of hash probes per key <nl> + / / Default : 0 ( disable ) <nl> / / <nl> / / Dynamically changeable through SetOptions ( ) API <nl> - uint32_t memtable_prefix_bloom_probes ; <nl> + double memtable_prefix_bloom_size_ratio ; <nl> <nl> / / Page size for huge page TLB for bloom in memtable . If < = 0 , not allocate <nl> / / from huge page TLB but from malloc . <nl> mmm a / java / rocksjni / options . cc <nl> ppp b / java / rocksjni / options . cc <nl> void Java_org_rocksdb_Options_setInplaceUpdateNumLocks ( <nl> <nl> / * <nl> * Class : org_rocksdb_Options <nl> - * Method : memtablePrefixBloomBits <nl> + * Method : memtablePrefixBloomSizeRatio <nl> * Signature : ( J ) I <nl> * / <nl> - jint Java_org_rocksdb_Options_memtablePrefixBloomBits ( <nl> - JNIEnv * env , jobject jobj , jlong jhandle ) { <nl> - return reinterpret_cast < rocksdb : : Options * > ( <nl> - jhandle ) - > memtable_prefix_bloom_bits ; <nl> - } <nl> - <nl> - / * <nl> - * Class : org_rocksdb_Options <nl> - * Method : setMemtablePrefixBloomBits <nl> - * Signature : ( JI ) V <nl> - * / <nl> - void Java_org_rocksdb_Options_setMemtablePrefixBloomBits ( <nl> - JNIEnv * env , jobject jobj , jlong jhandle , <nl> - jint jmemtable_prefix_bloom_bits ) { <nl> - reinterpret_cast < rocksdb : : Options * > ( <nl> - jhandle ) - > memtable_prefix_bloom_bits = <nl> - static_cast < int32_t > ( jmemtable_prefix_bloom_bits ) ; <nl> - } <nl> - <nl> - / * <nl> - * Class : org_rocksdb_Options <nl> - * Method : memtablePrefixBloomProbes <nl> - * Signature : ( J ) I <nl> - * / <nl> - jint Java_org_rocksdb_Options_memtablePrefixBloomProbes ( <nl> - JNIEnv * env , jobject jobj , jlong jhandle ) { <nl> - return reinterpret_cast < rocksdb : : Options * > ( <nl> - jhandle ) - > memtable_prefix_bloom_probes ; <nl> + jdouble Java_org_rocksdb_Options_memtablePrefixBloomSizeRatio ( JNIEnv * env , <nl> + jobject jobj , <nl> + jlong jhandle ) { <nl> + return reinterpret_cast < rocksdb : : Options * > ( jhandle ) <nl> + - > memtable_prefix_bloom_size_ratio ; <nl> } <nl> <nl> / * <nl> * Class : org_rocksdb_Options <nl> - * Method : setMemtablePrefixBloomProbes <nl> + * Method : setMemtablePrefixBloomSizeRatio <nl> * Signature : ( JI ) V <nl> * / <nl> - void Java_org_rocksdb_Options_setMemtablePrefixBloomProbes ( <nl> + void Java_org_rocksdb_Options_setMemtablePrefixBloomSizeRatio ( <nl> JNIEnv * env , jobject jobj , jlong jhandle , <nl> - jint jmemtable_prefix_bloom_probes ) { <nl> - reinterpret_cast < rocksdb : : Options * > ( <nl> - jhandle ) - > memtable_prefix_bloom_probes = <nl> - static_cast < int32_t > ( jmemtable_prefix_bloom_probes ) ; <nl> + jdouble jmemtable_prefix_bloom_size_ratio ) { <nl> + reinterpret_cast < rocksdb : : Options * > ( jhandle ) <nl> + - > memtable_prefix_bloom_size_ratio = <nl> + static_cast < double > ( jmemtable_prefix_bloom_size_ratio ) ; <nl> } <nl> <nl> / * <nl> void Java_org_rocksdb_ColumnFamilyOptions_setInplaceUpdateNumLocks ( <nl> <nl> / * <nl> * Class : org_rocksdb_ColumnFamilyOptions <nl> - * Method : memtablePrefixBloomBits <nl> + * Method : memtablePrefixBloomSizeRatio <nl> * Signature : ( J ) I <nl> * / <nl> - jint Java_org_rocksdb_ColumnFamilyOptions_memtablePrefixBloomBits ( <nl> + jdouble Java_org_rocksdb_ColumnFamilyOptions_memtablePrefixBloomSizeRatio ( <nl> JNIEnv * env , jobject jobj , jlong jhandle ) { <nl> - return reinterpret_cast < rocksdb : : ColumnFamilyOptions * > ( <nl> - jhandle ) - > memtable_prefix_bloom_bits ; <nl> - } <nl> - <nl> - / * <nl> - * Class : org_rocksdb_ColumnFamilyOptions <nl> - * Method : setMemtablePrefixBloomBits <nl> - * Signature : ( JI ) V <nl> - * / <nl> - void Java_org_rocksdb_ColumnFamilyOptions_setMemtablePrefixBloomBits ( <nl> - JNIEnv * env , jobject jobj , jlong jhandle , <nl> - jint jmemtable_prefix_bloom_bits ) { <nl> - reinterpret_cast < rocksdb : : ColumnFamilyOptions * > ( <nl> - jhandle ) - > memtable_prefix_bloom_bits = <nl> - static_cast < int32_t > ( jmemtable_prefix_bloom_bits ) ; <nl> - } <nl> - <nl> - / * <nl> - * Class : org_rocksdb_ColumnFamilyOptions <nl> - * Method : memtablePrefixBloomProbes <nl> - * Signature : ( J ) I <nl> - * / <nl> - jint Java_org_rocksdb_ColumnFamilyOptions_memtablePrefixBloomProbes ( <nl> - JNIEnv * env , jobject jobj , jlong jhandle ) { <nl> - return reinterpret_cast < rocksdb : : ColumnFamilyOptions * > ( <nl> - jhandle ) - > memtable_prefix_bloom_probes ; <nl> + return reinterpret_cast < rocksdb : : ColumnFamilyOptions * > ( jhandle ) <nl> + - > memtable_prefix_bloom_size_ratio ; <nl> } <nl> <nl> / * <nl> * Class : org_rocksdb_ColumnFamilyOptions <nl> - * Method : setMemtablePrefixBloomProbes <nl> + * Method : setMemtablePrefixBloomSizeRatio <nl> * Signature : ( JI ) V <nl> * / <nl> - void Java_org_rocksdb_ColumnFamilyOptions_setMemtablePrefixBloomProbes ( <nl> + void Java_org_rocksdb_ColumnFamilyOptions_setMemtablePrefixBloomSizeRatio ( <nl> JNIEnv * env , jobject jobj , jlong jhandle , <nl> - jint jmemtable_prefix_bloom_probes ) { <nl> - reinterpret_cast < rocksdb : : ColumnFamilyOptions * > ( <nl> - jhandle ) - > memtable_prefix_bloom_probes = <nl> - static_cast < int32_t > ( jmemtable_prefix_bloom_probes ) ; <nl> + jdouble jmemtable_prefix_bloom_size_ratio ) { <nl> + reinterpret_cast < rocksdb : : ColumnFamilyOptions * > ( jhandle ) <nl> + - > memtable_prefix_bloom_size_ratio = <nl> + static_cast < double > ( jmemtable_prefix_bloom_size_ratio ) ; <nl> } <nl> <nl> / * <nl> mmm a / java / src / main / java / org / rocksdb / ColumnFamilyOptions . java <nl> ppp b / java / src / main / java / org / rocksdb / ColumnFamilyOptions . java <nl> public long inplaceUpdateNumLocks ( ) { <nl> } <nl> <nl> @ Override <nl> - public ColumnFamilyOptions setMemtablePrefixBloomBits ( <nl> - final int memtablePrefixBloomBits ) { <nl> - setMemtablePrefixBloomBits ( nativeHandle_ , memtablePrefixBloomBits ) ; <nl> + public ColumnFamilyOptions setMemtablePrefixBloomSizeRatio ( <nl> + final double memtablePrefixBloomSizeRatio ) { <nl> + setMemtablePrefixBloomBits ( nativeHandle_ , memtablePrefixBloomSizeRatio ) ; <nl> return this ; <nl> } <nl> <nl> @ Override <nl> - public int memtablePrefixBloomBits ( ) { <nl> - return memtablePrefixBloomBits ( nativeHandle_ ) ; <nl> - } <nl> - <nl> - @ Override <nl> - public ColumnFamilyOptions setMemtablePrefixBloomProbes ( <nl> - final int memtablePrefixBloomProbes ) { <nl> - setMemtablePrefixBloomProbes ( nativeHandle_ , memtablePrefixBloomProbes ) ; <nl> - return this ; <nl> - } <nl> - <nl> - @ Override <nl> - public int memtablePrefixBloomProbes ( ) { <nl> - return memtablePrefixBloomProbes ( nativeHandle_ ) ; <nl> + public double memtablePrefixBloomSizeRatio ( ) { <nl> + return memtablePrefixBloomSizeRatio ( nativeHandle_ ) ; <nl> } <nl> <nl> @ Override <nl> private native void setInplaceUpdateNumLocks ( <nl> long handle , long inplaceUpdateNumLocks ) <nl> throws IllegalArgumentException ; <nl> private native long inplaceUpdateNumLocks ( long handle ) ; <nl> - private native void setMemtablePrefixBloomBits ( <nl> - long handle , int memtablePrefixBloomBits ) ; <nl> - private native int memtablePrefixBloomBits ( long handle ) ; <nl> - private native void setMemtablePrefixBloomProbes ( <nl> - long handle , int memtablePrefixBloomProbes ) ; <nl> - private native int memtablePrefixBloomProbes ( long handle ) ; <nl> + private native void setMemtablePrefixBloomBits ( long handle , double memtablePrefixBloomSizeRatio ) ; <nl> + private native double memtablePrefixBloomSizeRatio ( long handle ) ; <nl> private native void setBloomLocality ( <nl> long handle , int bloomLocality ) ; <nl> private native int bloomLocality ( long handle ) ; <nl> mmm a / java / src / main / java / org / rocksdb / ColumnFamilyOptionsInterface . java <nl> ppp b / java / src / main / java / org / rocksdb / ColumnFamilyOptionsInterface . java <nl> Object setVerifyChecksumsInCompaction ( <nl> long inplaceUpdateNumLocks ( ) ; <nl> <nl> / * * <nl> - * Sets the number of bits used in the prefix bloom filter . <nl> + * Sets the size ratio of the memtable used in the prefix bloom filter . <nl> * <nl> * This value will be used only when a prefix - extractor is specified . <nl> * <nl> - * @ param memtablePrefixBloomBits the number of bits used in the <nl> + * @ param memtablePrefixBloomSizeRatio the number of bits used in the <nl> * prefix bloom filter . <nl> * @ return the reference to the current option . <nl> * / <nl> - Object setMemtablePrefixBloomBits ( int memtablePrefixBloomBits ) ; <nl> + Object setMemtablePrefixBloomSizeRatio ( double memtablePrefixBloomSizeRatio ) ; <nl> <nl> / * * <nl> * Returns the number of bits used in the prefix bloom filter . <nl> Object setVerifyChecksumsInCompaction ( <nl> * @ return the number of bloom - bits . <nl> * @ see # useFixedLengthPrefixExtractor ( int ) <nl> * / <nl> - int memtablePrefixBloomBits ( ) ; <nl> - <nl> - / * * <nl> - * The number of hash probes per key used in the mem - table . <nl> - * <nl> - * @ param memtablePrefixBloomProbes the number of hash probes per key . <nl> - * @ return the reference to the current option . <nl> - * / <nl> - Object setMemtablePrefixBloomProbes ( int memtablePrefixBloomProbes ) ; <nl> - <nl> - / * * <nl> - * The number of hash probes per key used in the mem - table . <nl> - * <nl> - * @ return the number of hash probes per key . <nl> - * / <nl> - int memtablePrefixBloomProbes ( ) ; <nl> + double memtablePrefixBloomSizeRatio ( ) ; <nl> <nl> / * * <nl> * Control locality of bloom filter probes to improve cache miss rate . <nl> Object setVerifyChecksumsInCompaction ( <nl> * Default : 0 <nl> * <nl> * @ return the level of locality of bloom - filter probes . <nl> - * @ see # setMemtablePrefixBloomProbes ( int ) <nl> + * @ see # setBloomLocality ( int ) <nl> * / <nl> int bloomLocality ( ) ; <nl> <nl> mmm a / java / src / main / java / org / rocksdb / Options . java <nl> ppp b / java / src / main / java / org / rocksdb / Options . java <nl> public Options setInplaceUpdateNumLocks ( <nl> } <nl> <nl> @ Override <nl> - public int memtablePrefixBloomBits ( ) { <nl> - return memtablePrefixBloomBits ( nativeHandle_ ) ; <nl> + public double memtablePrefixBloomSizeRatio ( ) { <nl> + return memtablePrefixBloomSizeRatio ( nativeHandle_ ) ; <nl> } <nl> <nl> @ Override <nl> - public Options setMemtablePrefixBloomBits ( <nl> - final int memtablePrefixBloomBits ) { <nl> - setMemtablePrefixBloomBits ( nativeHandle_ , memtablePrefixBloomBits ) ; <nl> - return this ; <nl> - } <nl> - <nl> - @ Override <nl> - public int memtablePrefixBloomProbes ( ) { <nl> - return memtablePrefixBloomProbes ( nativeHandle_ ) ; <nl> - } <nl> - <nl> - @ Override <nl> - public Options setMemtablePrefixBloomProbes ( <nl> - final int memtablePrefixBloomProbes ) { <nl> - setMemtablePrefixBloomProbes ( nativeHandle_ , memtablePrefixBloomProbes ) ; <nl> + public Options setMemtablePrefixBloomSizeRatio ( final double memtablePrefixBloomSizeRatio ) { <nl> + setMemtablePrefixBloomSizeRatio ( nativeHandle_ , memtablePrefixBloomSizeRatio ) ; <nl> return this ; <nl> } <nl> <nl> private native void setInplaceUpdateNumLocks ( <nl> long handle , long inplaceUpdateNumLocks ) <nl> throws IllegalArgumentException ; <nl> private native long inplaceUpdateNumLocks ( long handle ) ; <nl> - private native void setMemtablePrefixBloomBits ( <nl> - long handle , int memtablePrefixBloomBits ) ; <nl> - private native int memtablePrefixBloomBits ( long handle ) ; <nl> - private native void setMemtablePrefixBloomProbes ( <nl> - long handle , int memtablePrefixBloomProbes ) ; <nl> - private native int memtablePrefixBloomProbes ( long handle ) ; <nl> + private native void setMemtablePrefixBloomSizeRatio ( <nl> + long handle , double memtablePrefixBloomSizeRatio ) ; <nl> + private native double memtablePrefixBloomSizeRatio ( long handle ) ; <nl> private native void setBloomLocality ( <nl> long handle , int bloomLocality ) ; <nl> private native int bloomLocality ( long handle ) ; <nl> mmm a / java / src / test / java / org / rocksdb / ColumnFamilyOptionsTest . java <nl> ppp b / java / src / test / java / org / rocksdb / ColumnFamilyOptionsTest . java <nl> public void inplaceUpdateNumLocks ( ) throws RocksDBException { <nl> } <nl> <nl> @ Test <nl> - public void memtablePrefixBloomBits ( ) { <nl> + public void memtablePrefixBloomSizeRatio ( ) { <nl> try ( final ColumnFamilyOptions opt = new ColumnFamilyOptions ( ) ) { <nl> - final int intValue = rand . nextInt ( ) ; <nl> - opt . setMemtablePrefixBloomBits ( intValue ) ; <nl> - assertThat ( opt . memtablePrefixBloomBits ( ) ) . isEqualTo ( intValue ) ; <nl> - } <nl> - } <nl> - <nl> - @ Test <nl> - public void memtablePrefixBloomProbes ( ) { <nl> - try ( final ColumnFamilyOptions opt = new ColumnFamilyOptions ( ) ) { <nl> - final int intValue = rand . nextInt ( ) ; <nl> - opt . setMemtablePrefixBloomProbes ( intValue ) ; <nl> - assertThat ( opt . memtablePrefixBloomProbes ( ) ) . isEqualTo ( intValue ) ; <nl> + final double doubleValue = rand . nextDouble ( ) ; <nl> + opt . setMemtablePrefixBloomSizeRatio ( doubleValue ) ; <nl> + assertThat ( opt . memtablePrefixBloomSizeRatio ( ) ) . isEqualTo ( doubleValue ) ; <nl> } <nl> } <nl> <nl> mmm a / java / src / test / java / org / rocksdb / OptionsTest . java <nl> ppp b / java / src / test / java / org / rocksdb / OptionsTest . java <nl> public void inplaceUpdateNumLocks ( ) throws RocksDBException { <nl> } <nl> <nl> @ Test <nl> - public void memtablePrefixBloomBits ( ) { <nl> + public void memtablePrefixBloomSizeRatio ( ) { <nl> try ( final Options opt = new Options ( ) ) { <nl> - final int intValue = rand . nextInt ( ) ; <nl> - opt . setMemtablePrefixBloomBits ( intValue ) ; <nl> - assertThat ( opt . memtablePrefixBloomBits ( ) ) . isEqualTo ( intValue ) ; <nl> - } <nl> - } <nl> - <nl> - @ Test <nl> - public void memtablePrefixBloomProbes ( ) { <nl> - try ( final Options opt = new Options ( ) ) { <nl> - final int intValue = rand . nextInt ( ) ; <nl> - opt . setMemtablePrefixBloomProbes ( intValue ) ; <nl> - assertThat ( opt . memtablePrefixBloomProbes ( ) ) . isEqualTo ( intValue ) ; <nl> + final double doubleValue = rand . nextDouble ( ) ; <nl> + opt . setMemtablePrefixBloomSizeRatio ( doubleValue ) ; <nl> + assertThat ( opt . memtablePrefixBloomSizeRatio ( ) ) . isEqualTo ( doubleValue ) ; <nl> } <nl> } <nl> <nl> mmm a / tools / db_bench_tool . cc <nl> ppp b / tools / db_bench_tool . cc <nl> DEFINE_int32 ( skip_table_builder_flush , false , " Skip flushing block in " <nl> <nl> DEFINE_int32 ( bloom_bits , - 1 , " Bloom filter bits per key . Negative means " <nl> " use default settings . " ) ; <nl> - DEFINE_int32 ( memtable_bloom_bits , 0 , " Bloom filter bits per key for memtable . " <nl> - " Negative means no bloom filter . " ) ; <nl> + DEFINE_double ( memtable_bloom_size_ratio , 0 , <nl> + " Ratio of memtable size used for bloom filter . 0 means no bloom " <nl> + " filter . " ) ; <nl> <nl> DEFINE_bool ( use_existing_db , false , " If true , do not destroy the existing " <nl> " database . If you set this flag and also specify a benchmark that " <nl> class Benchmark { <nl> exit ( 1 ) ; <nl> } <nl> } <nl> - options . memtable_prefix_bloom_bits = FLAGS_memtable_bloom_bits ; <nl> + options . memtable_prefix_bloom_size_ratio = FLAGS_memtable_bloom_size_ratio ; <nl> options . bloom_locality = FLAGS_bloom_locality ; <nl> options . max_file_opening_threads = FLAGS_file_opening_threads ; <nl> options . new_table_reader_for_compaction_inputs = <nl> mmm a / util / mutable_cf_options . cc <nl> ppp b / util / mutable_cf_options . cc <nl> void MutableCFOptions : : Dump ( Logger * log ) const { <nl> max_write_buffer_number ) ; <nl> Log ( log , " arena_block_size : % " ROCKSDB_PRIszt , <nl> arena_block_size ) ; <nl> - Log ( log , " memtable_prefix_bloom_bits : % " PRIu32 , <nl> - memtable_prefix_bloom_bits ) ; <nl> - Log ( log , " memtable_prefix_bloom_probes : % " PRIu32 , <nl> - memtable_prefix_bloom_probes ) ; <nl> + Log ( log , " memtable_prefix_bloom_ratio : % f " , <nl> + memtable_prefix_bloom_size_ratio ) ; <nl> Log ( log , " memtable_prefix_bloom_huge_page_tlb_size : % " ROCKSDB_PRIszt , <nl> memtable_prefix_bloom_huge_page_tlb_size ) ; <nl> Log ( log , " max_successive_merges : % " ROCKSDB_PRIszt , <nl> mmm a / util / mutable_cf_options . h <nl> ppp b / util / mutable_cf_options . h <nl> struct MutableCFOptions { <nl> : write_buffer_size ( options . write_buffer_size ) , <nl> max_write_buffer_number ( options . max_write_buffer_number ) , <nl> arena_block_size ( options . arena_block_size ) , <nl> - memtable_prefix_bloom_bits ( options . memtable_prefix_bloom_bits ) , <nl> - memtable_prefix_bloom_probes ( options . memtable_prefix_bloom_probes ) , <nl> + memtable_prefix_bloom_size_ratio ( <nl> + options . memtable_prefix_bloom_size_ratio ) , <nl> memtable_prefix_bloom_huge_page_tlb_size ( <nl> options . memtable_prefix_bloom_huge_page_tlb_size ) , <nl> max_successive_merges ( options . max_successive_merges ) , <nl> struct MutableCFOptions { <nl> : write_buffer_size ( 0 ) , <nl> max_write_buffer_number ( 0 ) , <nl> arena_block_size ( 0 ) , <nl> - memtable_prefix_bloom_bits ( 0 ) , <nl> - memtable_prefix_bloom_probes ( 0 ) , <nl> + memtable_prefix_bloom_size_ratio ( 0 ) , <nl> memtable_prefix_bloom_huge_page_tlb_size ( 0 ) , <nl> max_successive_merges ( 0 ) , <nl> filter_deletes ( false ) , <nl> struct MutableCFOptions { <nl> size_t write_buffer_size ; <nl> int max_write_buffer_number ; <nl> size_t arena_block_size ; <nl> - uint32_t memtable_prefix_bloom_bits ; <nl> - uint32_t memtable_prefix_bloom_probes ; <nl> + double memtable_prefix_bloom_size_ratio ; <nl> size_t memtable_prefix_bloom_huge_page_tlb_size ; <nl> size_t max_successive_merges ; <nl> bool filter_deletes ; <nl> mmm a / util / options . cc <nl> ppp b / util / options . cc <nl> ColumnFamilyOptions : : ColumnFamilyOptions ( ) <nl> inplace_update_support ( false ) , <nl> inplace_update_num_locks ( 10000 ) , <nl> inplace_callback ( nullptr ) , <nl> - memtable_prefix_bloom_bits ( 0 ) , <nl> - memtable_prefix_bloom_probes ( 6 ) , <nl> + memtable_prefix_bloom_size_ratio ( 0 . 0 ) , <nl> memtable_prefix_bloom_huge_page_tlb_size ( 0 ) , <nl> bloom_locality ( 0 ) , <nl> max_successive_merges ( 0 ) , <nl> ColumnFamilyOptions : : ColumnFamilyOptions ( const Options & options ) <nl> inplace_update_support ( options . inplace_update_support ) , <nl> inplace_update_num_locks ( options . inplace_update_num_locks ) , <nl> inplace_callback ( options . inplace_callback ) , <nl> - memtable_prefix_bloom_bits ( options . memtable_prefix_bloom_bits ) , <nl> - memtable_prefix_bloom_probes ( options . memtable_prefix_bloom_probes ) , <nl> + memtable_prefix_bloom_size_ratio ( <nl> + options . memtable_prefix_bloom_size_ratio ) , <nl> memtable_prefix_bloom_huge_page_tlb_size ( <nl> options . memtable_prefix_bloom_huge_page_tlb_size ) , <nl> bloom_locality ( options . bloom_locality ) , <nl> void ColumnFamilyOptions : : Dump ( Logger * log ) const { <nl> Header ( log , " Options . min_partial_merge_operands : % u " , <nl> min_partial_merge_operands ) ; <nl> / / TODO : easier config for bloom ( maybe based on avg key / value size ) <nl> - Header ( log , " Options . memtable_prefix_bloom_bits : % d " , <nl> - memtable_prefix_bloom_bits ) ; <nl> - Header ( log , " Options . memtable_prefix_bloom_probes : % d " , <nl> - memtable_prefix_bloom_probes ) ; <nl> + Header ( log , " Options . memtable_prefix_bloom_size_ratio : % f " , <nl> + memtable_prefix_bloom_size_ratio ) ; <nl> <nl> Header ( log , <nl> " Options . memtable_prefix_bloom_huge_page_tlb_size : % " ROCKSDB_PRIszt , <nl> mmm a / util / options_helper . cc <nl> ppp b / util / options_helper . cc <nl> bool ParseMemtableOptions ( const std : : string & name , const std : : string & value , <nl> } else if ( name = = " arena_block_size " ) { <nl> new_options - > arena_block_size = ParseSizeT ( value ) ; <nl> } else if ( name = = " memtable_prefix_bloom_bits " ) { <nl> - new_options - > memtable_prefix_bloom_bits = ParseUint32 ( value ) ; <nl> + / / deprecated <nl> + } else if ( name = = " memtable_prefix_bloom_size_ratio " ) { <nl> + new_options - > memtable_prefix_bloom_size_ratio = ParseDouble ( value ) ; <nl> } else if ( name = = " memtable_prefix_bloom_probes " ) { <nl> - new_options - > memtable_prefix_bloom_probes = ParseUint32 ( value ) ; <nl> + / / Deprecated <nl> } else if ( name = = " memtable_prefix_bloom_huge_page_tlb_size " ) { <nl> new_options - > memtable_prefix_bloom_huge_page_tlb_size = <nl> ParseSizeT ( value ) ; <nl> ColumnFamilyOptions BuildColumnFamilyOptions ( <nl> cf_opts . write_buffer_size = mutable_cf_options . write_buffer_size ; <nl> cf_opts . max_write_buffer_number = mutable_cf_options . max_write_buffer_number ; <nl> cf_opts . arena_block_size = mutable_cf_options . arena_block_size ; <nl> - cf_opts . memtable_prefix_bloom_bits = <nl> - mutable_cf_options . memtable_prefix_bloom_bits ; <nl> - cf_opts . memtable_prefix_bloom_probes = <nl> - mutable_cf_options . memtable_prefix_bloom_probes ; <nl> + cf_opts . memtable_prefix_bloom_size_ratio = <nl> + mutable_cf_options . memtable_prefix_bloom_size_ratio ; <nl> cf_opts . memtable_prefix_bloom_huge_page_tlb_size = <nl> mutable_cf_options . memtable_prefix_bloom_huge_page_tlb_size ; <nl> cf_opts . max_successive_merges = mutable_cf_options . max_successive_merges ; <nl> mmm a / util / options_helper . h <nl> ppp b / util / options_helper . h <nl> static std : : unordered_map < std : : string , OptionTypeInfo > cf_options_type_info = { <nl> { offsetof ( struct ColumnFamilyOptions , bloom_locality ) , <nl> OptionType : : kUInt32T , OptionVerificationType : : kNormal } } , <nl> { " memtable_prefix_bloom_bits " , <nl> - { offsetof ( struct ColumnFamilyOptions , memtable_prefix_bloom_bits ) , <nl> - OptionType : : kUInt32T , OptionVerificationType : : kNormal } } , <nl> + { 0 , OptionType : : kUInt32T , OptionVerificationType : : kDeprecated } } , <nl> + { " memtable_prefix_bloom_size_ratio " , <nl> + { offsetof ( struct ColumnFamilyOptions , memtable_prefix_bloom_size_ratio ) , <nl> + OptionType : : kDouble , OptionVerificationType : : kNormal } } , <nl> { " memtable_prefix_bloom_probes " , <nl> - { offsetof ( struct ColumnFamilyOptions , memtable_prefix_bloom_probes ) , <nl> - OptionType : : kUInt32T , OptionVerificationType : : kNormal } } , <nl> + { 0 , OptionType : : kUInt32T , OptionVerificationType : : kDeprecated } } , <nl> { " min_partial_merge_operands " , <nl> { offsetof ( struct ColumnFamilyOptions , min_partial_merge_operands ) , <nl> OptionType : : kUInt32T , OptionVerificationType : : kNormal } } , <nl> mmm a / util / options_settable_test . cc <nl> ppp b / util / options_settable_test . cc <nl> TEST_F ( OptionsSettableTest , ColumnFamilyOptionsAllFieldsSettable ) { <nl> " max_write_buffer_number_to_maintain = 84 ; " <nl> " verify_checksums_in_compaction = false ; " <nl> " merge_operator = aabcxehazrMergeOperator ; " <nl> - " memtable_prefix_bloom_bits = 4642 ; " <nl> + " memtable_prefix_bloom_size_ratio = 0 . 4642 ; " <nl> " paranoid_file_checks = true ; " <nl> " inplace_update_num_locks = 7429 ; " <nl> " optimize_filters_for_hits = false ; " <nl> " level_compaction_dynamic_level_bytes = false ; " <nl> " inplace_update_support = false ; " <nl> " compaction_style = kCompactionStyleFIFO ; " <nl> - " memtable_prefix_bloom_probes = 2511 ; " <nl> " purge_redundant_kvs_while_flush = true ; " <nl> " filter_deletes = false ; " <nl> " hard_pending_compaction_bytes_limit = 0 ; " <nl> mmm a / util / options_test . cc <nl> ppp b / util / options_test . cc <nl> TEST_F ( OptionsTest , GetOptionsFromMapTest ) { <nl> { " report_bg_io_stats " , " true " } , <nl> { " compaction_measure_io_stats " , " false " } , <nl> { " inplace_update_num_locks " , " 25 " } , <nl> - { " memtable_prefix_bloom_bits " , " 26 " } , <nl> - { " memtable_prefix_bloom_probes " , " 27 " } , <nl> + { " memtable_prefix_bloom_size_ratio " , " 0 . 26 " } , <nl> { " memtable_prefix_bloom_huge_page_tlb_size " , " 28 " } , <nl> { " bloom_locality " , " 29 " } , <nl> { " max_successive_merges " , " 30 " } , <nl> TEST_F ( OptionsTest , GetOptionsFromMapTest ) { <nl> static_cast < uint64_t > ( 24 ) ) ; <nl> ASSERT_EQ ( new_cf_opt . inplace_update_support , true ) ; <nl> ASSERT_EQ ( new_cf_opt . inplace_update_num_locks , 25U ) ; <nl> - ASSERT_EQ ( new_cf_opt . memtable_prefix_bloom_bits , 26U ) ; <nl> - ASSERT_EQ ( new_cf_opt . memtable_prefix_bloom_probes , 27U ) ; <nl> + ASSERT_EQ ( new_cf_opt . memtable_prefix_bloom_size_ratio , 0 . 26 ) ; <nl> ASSERT_EQ ( new_cf_opt . memtable_prefix_bloom_huge_page_tlb_size , 28U ) ; <nl> ASSERT_EQ ( new_cf_opt . bloom_locality , 29U ) ; <nl> ASSERT_EQ ( new_cf_opt . max_successive_merges , 30U ) ; <nl> TEST_F ( OptionsTest , GetColumnFamilyOptionsFromStringTest ) { <nl> const int64_t tera = 1024 * giga ; <nl> <nl> / / Units ( k ) <nl> - ASSERT_OK ( GetColumnFamilyOptionsFromString ( base_cf_opt , <nl> - " memtable_prefix_bloom_bits = 14k ; max_write_buffer_number = - 15K " , <nl> - & new_cf_opt ) ) ; <nl> - ASSERT_EQ ( new_cf_opt . memtable_prefix_bloom_bits , 14UL * kilo ) ; <nl> + ASSERT_OK ( GetColumnFamilyOptionsFromString ( <nl> + base_cf_opt , " max_write_buffer_number = - 15K " , & new_cf_opt ) ) ; <nl> ASSERT_EQ ( new_cf_opt . max_write_buffer_number , - 15 * kilo ) ; <nl> / / Units ( m ) <nl> ASSERT_OK ( GetColumnFamilyOptionsFromString ( base_cf_opt , <nl> mmm a / util / testutil . cc <nl> ppp b / util / testutil . cc <nl> void RandomInitCFOptions ( ColumnFamilyOptions * cf_opt , Random * rnd ) { <nl> / / double options <nl> cf_opt - > hard_rate_limit = static_cast < double > ( rnd - > Uniform ( 10000 ) ) / 13 ; <nl> cf_opt - > soft_rate_limit = static_cast < double > ( rnd - > Uniform ( 10000 ) ) / 13 ; <nl> + cf_opt - > memtable_prefix_bloom_size_ratio = <nl> + static_cast < double > ( rnd - > Uniform ( 10000 ) ) / 20000 . 0 ; <nl> <nl> / / int options <nl> cf_opt - > expanded_compaction_factor = rnd - > Uniform ( 100 ) ; <nl> void RandomInitCFOptions ( ColumnFamilyOptions * cf_opt , Random * rnd ) { <nl> <nl> / / uint32_t options <nl> cf_opt - > bloom_locality = rnd - > Uniform ( 10000 ) ; <nl> - cf_opt - > memtable_prefix_bloom_bits = rnd - > Uniform ( 10000 ) ; <nl> - cf_opt - > memtable_prefix_bloom_probes = rnd - > Uniform ( 10000 ) ; <nl> cf_opt - > min_partial_merge_operands = rnd - > Uniform ( 10000 ) ; <nl> cf_opt - > max_bytes_for_level_base = rnd - > Uniform ( 10000 ) ; <nl> <nl>
|
memtable_prefix_bloom_bits - > memtable_prefix_bloom_bits_ratio and deprecate memtable_prefix_bloom_probes
|
facebook/rocksdb
|
20699df8438d14568915bb3a8e7038ce224f1e1c
|
2016-06-10T19:12:10Z
|
mmm a / tools / run_tests / run_xds_tests . py <nl> ppp b / tools / run_tests / run_xds_tests . py <nl> def test_round_robin ( gcp , backend_service , instance_group ) : <nl> <nl> <nl> def test_secondary_locality_gets_no_requests_on_partial_primary_failure ( <nl> - gcp , backend_service , primary_instance_group , <nl> - secondary_zone_instance_group ) : <nl> + gcp , backend_service , primary_instance_group , <nl> + secondary_zone_instance_group ) : <nl> try : <nl> patch_backend_instances ( <nl> gcp , backend_service , <nl> def test_secondary_locality_gets_no_requests_on_partial_primary_failure ( <nl> <nl> <nl> def test_secondary_locality_gets_requests_on_primary_failure ( <nl> - gcp , backend_service , primary_instance_group , <nl> - secondary_zone_instance_group ) : <nl> + gcp , backend_service , primary_instance_group , <nl> + secondary_zone_instance_group ) : <nl> try : <nl> patch_backend_instances ( <nl> gcp , backend_service , <nl>
|
yapf
|
grpc/grpc
|
0ddf5565e3ad9174e44057bad0267a518ec3083c
|
2020-03-03T02:20:44Z
|
mmm a / doc / classes / Viewport . xml <nl> ppp b / doc / classes / Viewport . xml <nl> <nl> < return type = " bool " > <nl> < / return > <nl> < description > <nl> + Returns [ code ] true [ / code ] if the viewport is currently performing a drag operation . <nl> < / description > <nl> < / method > <nl> < method name = " input " > <nl>
|
Merge pull request from RobertBColton / patch - 1
|
godotengine/godot
|
5378a8f5b0d58b1622e737a39823ef5639e58405
|
2019-11-25T13:26:10Z
|
mmm a / tensorflow / lite / delegates / gpu / cl / BUILD <nl> ppp b / tensorflow / lite / delegates / gpu / cl / BUILD <nl> cc_library ( <nl> hdrs = [ " cl_arguments . h " ] , <nl> deps = [ <nl> " : arguments " , <nl> + " : buffer " , <nl> " : cl_context " , <nl> " : device_info " , <nl> " : gpu_object " , <nl> + " : linear_storage " , <nl> " : tensor " , <nl> " : tensor_type " , <nl> + " : texture2d " , <nl> " / / tensorflow / lite / delegates / gpu / common : access_type " , <nl> " / / tensorflow / lite / delegates / gpu / common : data_type " , <nl> " / / tensorflow / lite / delegates / gpu / common : status " , <nl> mmm a / tensorflow / lite / delegates / gpu / cl / buffer . cc <nl> ppp b / tensorflow / lite / delegates / gpu / cl / buffer . cc <nl> absl : : Status BufferDescriptor : : PerformGetPtrSelector ( <nl> return absl : : OkStatus ( ) ; <nl> } <nl> <nl> - absl : : Status BufferDescriptor : : CreateGPUObject ( CLContext * context , <nl> - GPUObjectPtr * result ) const { <nl> - Buffer gpu_buffer ; <nl> - RETURN_IF_ERROR ( gpu_buffer . CreateFromBufferDescriptor ( * this , context ) ) ; <nl> - * result = absl : : make_unique < Buffer > ( std : : move ( gpu_buffer ) ) ; <nl> - return absl : : OkStatus ( ) ; <nl> - } <nl> - <nl> Buffer : : Buffer ( cl_mem buffer , size_t size_in_bytes ) <nl> : buffer_ ( buffer ) , size_ ( size_in_bytes ) { } <nl> <nl> mmm a / tensorflow / lite / delegates / gpu / cl / buffer . h <nl> ppp b / tensorflow / lite / delegates / gpu / cl / buffer . h <nl> struct BufferDescriptor : public GPUObjectDescriptor { <nl> const std : : vector < std : : string > & args , <nl> const std : : vector < std : : string > & template_args , std : : string * result ) const ; <nl> <nl> - absl : : Status CreateGPUObject ( CLContext * context , <nl> - GPUObjectPtr * result ) const override ; <nl> void Release ( ) override ; <nl> } ; <nl> <nl> mmm a / tensorflow / lite / delegates / gpu / cl / cl_arguments . cc <nl> ppp b / tensorflow / lite / delegates / gpu / cl / cl_arguments . cc <nl> limitations under the License . <nl> # include " absl / strings / match . h " <nl> # include " absl / strings / str_cat . h " <nl> # include " absl / strings / substitute . h " <nl> + # include " tensorflow / lite / delegates / gpu / cl / buffer . h " <nl> # include " tensorflow / lite / delegates / gpu / cl / gpu_object . h " <nl> + # include " tensorflow / lite / delegates / gpu / cl / linear_storage . h " <nl> + # include " tensorflow / lite / delegates / gpu / cl / tensor . h " <nl> # include " tensorflow / lite / delegates / gpu / cl / tensor_type . h " <nl> + # include " tensorflow / lite / delegates / gpu / cl / texture2d . h " <nl> # include " tensorflow / lite / delegates / gpu / common / util . h " <nl> <nl> namespace tflite { <nl> std : : string GetDefaultSamplers ( const DeviceInfo & device_info ) { <nl> <nl> return result ; <nl> } <nl> + <nl> + absl : : Status CreateCLObject ( GPUObjectDescriptor * desc , CLContext * context , <nl> + GPUObjectPtr * result ) { <nl> + const auto * buffer_desc = dynamic_cast < const BufferDescriptor * > ( desc ) ; <nl> + if ( buffer_desc ) { <nl> + Buffer gpu_buffer ; <nl> + RETURN_IF_ERROR ( <nl> + gpu_buffer . CreateFromBufferDescriptor ( * buffer_desc , context ) ) ; <nl> + * result = absl : : make_unique < Buffer > ( std : : move ( gpu_buffer ) ) ; <nl> + return absl : : OkStatus ( ) ; <nl> + } <nl> + <nl> + const auto * texture_desc = dynamic_cast < const Texture2DDescriptor * > ( desc ) ; <nl> + if ( texture_desc ) { <nl> + Texture2D gpu_texture ; <nl> + RETURN_IF_ERROR ( <nl> + gpu_texture . CreateFromTexture2DDescriptor ( * texture_desc , context ) ) ; <nl> + * result = absl : : make_unique < Texture2D > ( std : : move ( gpu_texture ) ) ; <nl> + return absl : : OkStatus ( ) ; <nl> + } <nl> + <nl> + const auto * linear_desc = dynamic_cast < const TensorLinearDescriptor * > ( desc ) ; <nl> + if ( linear_desc ) { <nl> + LinearStorage gpu_storage ; <nl> + RETURN_IF_ERROR ( <nl> + gpu_storage . CreateFromTensorLinearDescriptor ( * linear_desc , context ) ) ; <nl> + * result = absl : : make_unique < LinearStorage > ( std : : move ( gpu_storage ) ) ; <nl> + return absl : : OkStatus ( ) ; <nl> + } <nl> + <nl> + const auto * tensor_desc = dynamic_cast < const TensorDescriptor * > ( desc ) ; <nl> + if ( tensor_desc ) { <nl> + Tensor gpu_tensor ; <nl> + RETURN_IF_ERROR ( gpu_tensor . CreateFromDescriptor ( * tensor_desc , context ) ) ; <nl> + * result = absl : : make_unique < Tensor > ( std : : move ( gpu_tensor ) ) ; <nl> + return absl : : OkStatus ( ) ; <nl> + } <nl> + <nl> + return absl : : InvalidArgumentError ( " Unknown GPU descriptor . " ) ; <nl> + } <nl> + <nl> } / / namespace <nl> <nl> / / Static <nl> absl : : Status CLArguments : : AllocateObjects ( const Arguments & args , <nl> objects_ . resize ( args . objects_ . size ( ) ) ; <nl> int i = 0 ; <nl> for ( auto & t : args . objects_ ) { <nl> - RETURN_IF_ERROR ( t . second - > CreateGPUObject ( context , & objects_ [ i ] ) ) ; <nl> + RETURN_IF_ERROR ( CreateCLObject ( t . second . get ( ) , context , & objects_ [ i ] ) ) ; <nl> i + + ; <nl> } <nl> return absl : : OkStatus ( ) ; <nl> mmm a / tensorflow / lite / delegates / gpu / cl / gpu_object . h <nl> ppp b / tensorflow / lite / delegates / gpu / cl / gpu_object . h <nl> class GPUObjectDescriptor { <nl> } <nl> virtual GPUResources GetGPUResources ( ) const { return GPUResources ( ) ; } <nl> <nl> - virtual absl : : Status CreateGPUObject ( <nl> - CLContext * context , std : : unique_ptr < GPUObject > * result ) const { <nl> - return absl : : OkStatus ( ) ; <nl> - } <nl> virtual void Release ( ) { } <nl> <nl> void SetAccess ( AccessType access_type ) { access_type_ = access_type ; } <nl> mmm a / tensorflow / lite / delegates / gpu / cl / linear_storage . cc <nl> ppp b / tensorflow / lite / delegates / gpu / cl / linear_storage . cc <nl> absl : : Status TensorLinearDescriptor : : PerformReadSelector ( <nl> } <nl> } <nl> <nl> - absl : : Status TensorLinearDescriptor : : CreateGPUObject ( <nl> - CLContext * context , GPUObjectPtr * result ) const { <nl> - LinearStorage gpu_storage ; <nl> - RETURN_IF_ERROR ( gpu_storage . CreateFromTensorLinearDescriptor ( * this , context ) ) ; <nl> - * result = absl : : make_unique < LinearStorage > ( std : : move ( gpu_storage ) ) ; <nl> - return absl : : OkStatus ( ) ; <nl> - } <nl> - <nl> void TensorLinearDescriptor : : UploadLinearData ( <nl> const tflite : : gpu : : Tensor < Linear , DataType : : FLOAT32 > & src , <nl> int aligned_size ) { <nl> mmm a / tensorflow / lite / delegates / gpu / cl / linear_storage . h <nl> ppp b / tensorflow / lite / delegates / gpu / cl / linear_storage . h <nl> struct TensorLinearDescriptor : public GPUObjectDescriptor { <nl> absl : : Status PerformReadSelector ( const std : : vector < std : : string > & args , <nl> std : : string * result ) const ; <nl> <nl> - absl : : Status CreateGPUObject ( CLContext * context , <nl> - GPUObjectPtr * result ) const override ; <nl> void Release ( ) override ; <nl> } ; <nl> <nl> mmm a / tensorflow / lite / delegates / gpu / cl / tensor . cc <nl> ppp b / tensorflow / lite / delegates / gpu / cl / tensor . cc <nl> absl : : Status CreateTensorShared ( const CLContext & context , const BHWDC & shape , <nl> <nl> } / / namespace <nl> <nl> - absl : : Status TensorDescriptor : : CreateGPUObject ( CLContext * context , <nl> - GPUObjectPtr * result ) const { <nl> - Tensor gpu_tensor ; <nl> - RETURN_IF_ERROR ( gpu_tensor . CreateFromDescriptor ( * this , context ) ) ; <nl> - * result = absl : : make_unique < Tensor > ( std : : move ( gpu_tensor ) ) ; <nl> - return absl : : OkStatus ( ) ; <nl> - } <nl> - <nl> Tensor : : Tensor ( cl_mem memory , bool memory_owner , const BHWC & shape , <nl> const TensorDescriptor & descriptor ) <nl> : memory_ ( memory ) , <nl> mmm a / tensorflow / lite / delegates / gpu / cl / tensor_type . h <nl> ppp b / tensorflow / lite / delegates / gpu / cl / tensor_type . h <nl> struct TensorDescriptor : public GPUObjectDescriptor { <nl> <nl> GPUResources GetGPUResources ( ) const override ; <nl> <nl> - absl : : Status CreateGPUObject ( CLContext * context , <nl> - GPUObjectPtr * result ) const override ; <nl> void Release ( ) override { data . clear ( ) ; } <nl> <nl> bool HasAxis ( Axis axis ) const ; <nl> mmm a / tensorflow / lite / delegates / gpu / cl / texture2d . cc <nl> ppp b / tensorflow / lite / delegates / gpu / cl / texture2d . cc <nl> absl : : Status Texture2DDescriptor : : PerformReadSelector ( <nl> return absl : : OkStatus ( ) ; <nl> } <nl> <nl> - absl : : Status Texture2DDescriptor : : CreateGPUObject ( CLContext * context , <nl> - GPUObjectPtr * result ) const { <nl> - Texture2D gpu_texture ; <nl> - RETURN_IF_ERROR ( gpu_texture . CreateFromTexture2DDescriptor ( * this , context ) ) ; <nl> - * result = absl : : make_unique < Texture2D > ( std : : move ( gpu_texture ) ) ; <nl> - return absl : : OkStatus ( ) ; <nl> - } <nl> - <nl> Texture2D : : Texture2D ( cl_mem texture , int width , int height , <nl> cl_channel_type type ) <nl> : texture_ ( texture ) , width_ ( width ) , height_ ( height ) , channel_type_ ( type ) { } <nl> mmm a / tensorflow / lite / delegates / gpu / cl / texture2d . h <nl> ppp b / tensorflow / lite / delegates / gpu / cl / texture2d . h <nl> struct Texture2DDescriptor : public GPUObjectDescriptor { <nl> absl : : Status PerformReadSelector ( const std : : vector < std : : string > & args , <nl> std : : string * result ) const ; <nl> <nl> - absl : : Status CreateGPUObject ( CLContext * context , <nl> - GPUObjectPtr * result ) const override ; <nl> void Release ( ) override ; <nl> } ; <nl> <nl>
|
Removed virtual method that uses OpenCL API element ( cl_context ) from GPUObjectDescriptor .
|
tensorflow/tensorflow
|
44baa200d29c41a69d751a302643905e8cea3b4f
|
2020-10-27T20:13:07Z
|
mmm a / xbmc / addons / Scraper . cpp <nl> ppp b / xbmc / addons / Scraper . cpp <nl> bool CScraper : : Supports ( const CONTENT_TYPE & content ) const <nl> bool CScraper : : SetPathSettings ( CONTENT_TYPE content , const std : : string & xml ) <nl> { <nl> m_pathContent = content ; <nl> - if ( ! LoadSettings ( false ) ) <nl> + if ( ! LoadSettings ( false , false ) ) <nl> return false ; <nl> <nl> if ( xml . empty ( ) ) <nl>
|
[ addons ] CScraper : don ' t load user setting values when initializing settings because those are retrieved from the database
|
xbmc/xbmc
|
90c432079b96493aac6f917f74c8628eaa7b89c4
|
2017-06-25T10:25:13Z
|
mmm a / ISSUE_TEMPLATE <nl> ppp b / ISSUE_TEMPLATE <nl> <nl> * * Operating system or device : * * <nl> <nl> <nl> - <nl> * * Issue description * * ( what happened , and what was expected ) : <nl> <nl> <nl> - <nl> * * Steps to reproduce : * * <nl> <nl> <nl> - <nl> * * Link to minimal example project * * ( optional but very welcome ) : <nl> - <nl>
|
Remove some extraneous newlines
|
godotengine/godot
|
6a25a647b73e352f3c447cb97bdf01550be4a6cd
|
2016-02-18T23:03:49Z
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.