diff --git "a/20230449_matlab.jsonl" "b/20230449_matlab.jsonl" new file mode 100644--- /dev/null +++ "b/20230449_matlab.jsonl" @@ -0,0 +1,980 @@ +{"plateform": "github", "repo_name": "LorisMarini/content_caching_with_reinforcement_learning-master", "name": "Configure_The_Network.m", "ext": ".m", "path": "content_caching_with_reinforcement_learning-master/network_configurations/Configure_The_Network.m", "size": 5652, "source_encoding": "utf_8", "md5": "a8761718fc1d3605ad9b8219e6207502", "text": "function [ Network_Parameters ] = Configure_The_Network( N_Networks, Diameter, Radius_Protected_Area, Step_Size, H, N, Alpha)\r\n\r\n%{\r\n------------------------- AUTHORSHIP -------------------------\r\n\r\nDeveloper: Loris Marini\r\nAffiliation: The University of Sydney\r\nContact: mrnlrs.tor@gmail.com\r\nNotes:\r\n\r\n------------------------- DESCRIPTION -------------------------\r\n\r\nThis function creates N_Networks different instances of a radio cell. If the \r\nfunction is run more than once there are two possibilities:\r\n\r\n1. The Network_Parameters differs from those defined previously. The function \r\n creates a subdirectory with a random name and saves the data there.\r\n\r\n2. The Network_Parameters is identical to the one already defined. The\r\n function does not do anyhting.\r\n \r\n\r\n------------------------- INPUT PARAMETERS -------------------------\r\n\r\n-- N_Networks --\r\nNumber of different networks to simulate\r\n\r\n-- Diameter -- \r\nDiameter of the cell in meters.\r\n\r\n-- Radius_Protected_Area --\r\nRadius of clear space around each user (Necessary to avoid ...)\r\n\r\n-- Step_Size --\r\nDistance between two points on the cell. Must be <= 1. The ratio Diameter/Step_Size \r\nis equal to the number of points along the diameter of the cell.\r\n\r\n-- H -- \r\nNumber of Helpers in the cell.\r\n\r\n-- N --\r\nNumber of users in the cell.\r\n\r\n-- Alpha --\r\nThe free space absorption coefficient \r\n\r\n------------------------- OUTPUT PARAMETERS -------------------------\r\n\r\n-- Parameters -- \r\nA struct containing all the relevant parameters for the function call.\r\n\r\n------------------------- EXAMPLE OF CALL -----------------------\r\n\r\n% Number of radio cells to deploy\r\nNrc = 2;\r\n\r\n% Cell diameter [m]\r\nDiam = 1000;\r\n\r\n% Clear space around each helper [m]:\r\nCS = 10;\r\n\r\n% Step size\r\nSS = 0.1\r\n\r\n% Number of helpers in the network\r\nNh = 20;\r\n\r\n% Number of users in the network\r\nNu = 100;\r\n\r\n% A propagation Loss of 1 [km^-1]\r\nAp = 1;\r\n\r\n[ Parameters ] = Configure_The_Network( Nrc, Diam, CS, SS, Nh, Nu, Ap)\r\n\r\n%}\r\n% ---------------------------- CODE --------------------------\r\n\r\n% Add the communications toolbox to path. This is a known issue in Matlab\r\n% 2017a. See this post for more info: \r\n\r\n% https://au.mathworks.com/matlabcentral/answers/350491-how-to-set-the-correct-path-\r\n% for-communications-toolbox-for-rtl-sdr-support-package-at-startup\r\n \r\naddpath(fullfile(matlabroot, 'toolbox', 'comm', 'comm'), '-end')\r\n\r\n% Define the path to the directory where you store the network configurations:\r\n% path_to_networks = 'C:\\Users\\lmar1564\\Documents\\MATLAB\\FemtoChaching\\Network_Configuration\\Configurations';\r\npath_to_networks = '/home/loris/Desktop/test';\r\n\r\n \r\n% Create a struct with the name and value of all relevant parameters used\r\n% in this function call:\r\nNetwork_Parameters = struct('Number_Of_Netoworks_Generated',N_Networks, 'Diameter',Diameter, ...\r\n 'Radius_Protected_Area',Radius_Protected_Area, 'Step_Size',Step_Size, ...\r\n 'N_Of_Helpers',H, 'N_Of_Users', N, 'Propagation_Loss_Alpha', Alpha ); \r\n \r\n \r\n% Define the name with which you save the Network_Parameters: \r\nparameters_saveas = ['Network_Parameters_' num2str(N_Networks),...\r\n '_Random_Networks_H' num2str(H) '_N' num2str(N) '.mat'];\r\n \r\n \r\ncd(path_to_networks); \r\nsearch = dir(parameters_saveas);\r\n\r\nif ~isempty( search )\r\n % Check they are identical\r\n l = load(parameters_saveas);\r\n Loaded_param = l.Network_Parameters;\r\n \r\n if isequal(Loaded_param, Network_Parameters)\r\n % File exists and has identical parameters. Do nothing\r\n disp('Configuration already exists.');\r\n return;\r\n \r\n elseif Loaded_param ~= Network_Parameters\r\n % perform the full simulation but save data in subdirectory that\r\n % starts with the idenfifiers of parameters_saveas plus a random\r\n % string of integers\r\n \r\n random_string = num2str(randi(9,1,7));\r\n random_string(random_string==' ') = [];\r\n \r\n subdir_name = ['N_Net_' num2str(N_Networks),'H' num2str(H) '_N' num2str(N),'_id_',random_string];\r\n % ???? \r\n \r\n end\r\nelse\r\n % Do nothing here and continue execution.\r\nend\r\n\r\n \r\n% Save to disk the network parameters in the 'path_to_networks' with the 'parameters_saveas' name: \r\nsave( fullfile( path_to_networks ,parameters_saveas), 'Network_Parameters');\r\n\r\n\r\nfor n = 1:1:N_Networks\r\n \r\n % Call the function Place_Users to randomly place users in a cell\r\n [ distances, fh ] = Place_Users( Diameter, Radius_Protected_Area, Step_Size, H,N );\r\n \r\n % Save distances matrix to file \r\n Matrix_Distances_Name = ['Distances_' num2str(n) '_of_' num2str(N_Networks) '_with_H' num2str(H) '_N' num2str(N) '.mat'];\r\n save( fullfile( path_to_networks ,Matrix_Distances_Name), 'distances');\r\n \r\n \r\n Network_Delays = Network_Normalised_Delays( distances, Alpha );\r\n \r\n % Save Matrix of delays\r\n Matrix_Delays_Name = ['Network_Delays_' num2str(n) '_of_' num2str(N_Networks) '_with_H' num2str(H) '_N' num2str(N) '.mat'];\r\n save( fullfile( path_to_networks ,Matrix_Delays_Name), 'Network_Delays');\r\n \r\n % Save the figure representing the cell with helpers and users\r\n Figure_Name = ['Network_Scenario_' num2str(n) '_of_' num2str(N_Networks) '_with_H' num2str(H) '_N' num2str(N) '.fig'];\r\n Figure_File_Name = fullfile(path_to_networks, Figure_Name);\r\n \r\n figure(fh);\r\n saveas(fh, Figure_File_Name);\r\nend\r\n\r\nclose all;\r\nend\r\n\r\n"} +{"plateform": "github", "repo_name": "LorisMarini/content_caching_with_reinforcement_learning-master", "name": "PLAY.m", "ext": ".m", "path": "content_caching_with_reinforcement_learning-master/Multi_Agent_CIDGPA_Framework/PLAY.m", "size": 11557, "source_encoding": "utf_8", "md5": "eef3771b11c2b4b66f71ed303fd16d17", "text": "\r\nfunction [ Conv_Actions, Game_Iterations, History_Delays, Convergence_Delays, New_Learning] = PLAY( Network_Delays, Popularities, Learning, Reward_Type, Resolution, P_Threshold )\r\n\r\n%{\r\n-------------------------- AUTHORSHIP ---------------------------------\r\n\r\nDeveloper: Loris Marini\r\nAffiliation: The University of Sydney\r\nContact: mrnlrs.tor@gmail.com\r\nNotes:\r\n\r\n--------------------------- DESCRIPTION -------------------------------\r\n\r\nThis script initializes a game of learning automata using DGPA\r\nreinforcement learning.\r\n\r\n----------------------------- DEPENDENCIES --------------------------------\r\n\r\nLearners_Files_Selection(...)\r\nUser_Weighted_Delay(...)\r\nUser_NCA_Selection(...)\r\nBest_File_Based_Reward()\r\nWeighted_Delay_Based_Reward()\r\n \r\n-------------------------------- INPUT ----------------------------------\r\n \r\nNetwork_Delays\r\n \r\nPopularities\r\n \r\nLearning\r\n The Set of Learners as Initialised by the 'INITIALIZE_Game_Of_DGPA'.\r\nReward_Type\r\n (See ...)\r\nResolution\r\n Typically 1, is the resolution step of the DGPA.\r\nP_Threshold \r\n Level of probability we consider to convergence.\r\n \r\n\r\n-------------------------------- OUTPUT ----------------------------------\r\n\r\nConv_Actions\r\n The set of actions for each learner that the game has converged to.\r\n\r\nWeighted_Delays\r\n The final weighted delay for each user.\r\n\r\nGAME_Delay_Performance\r\n The history of weighted delays for each user during the GAME.\r\n\r\n% ----------------------------- CODE ---------------------------------\r\n%}\r\n\r\n\r\nNP = size(Network_Delays,2); % Number of content providers (BS or Helpers) in the cell\r\nH = NP - 1; % Number of Helpers in the cell\r\nN = size(Network_Delays,1); % Number of users in the cell\r\nM = size(Learning,1); % Caching capability of each Helper\r\nF = H*M; % Total number of files that can be cached.\r\nS = 1:1:F; % S: Space of Actions. F: How many files we can offload from the BS.\r\nDelta = 1/(F.*Resolution); % Resulution Step\r\n\r\nConv_Actions = zeros(M,H); % The Matrix of actions to which learners converge during the game.\r\nGAME_Positive_Feedbacks = zeros(M,H); % ....\r\nGITER = 1; % Game Iteration Number.\r\nElapsed_Time = 0; % Time Initialisation.\r\nMin_Weighted_Delay = Inf*ones(1,N);\r\nMin_Average_Weighted_Delay = Inf*ones(1,H);\r\nAverage_Weighted_Delay = zeros(1,H);\r\n\r\n\r\nwhile ~Check_Game_Convergence( Learning, P_Threshold )\r\n \r\n tic; % Iteration Timing\r\n\r\n % Learners Select Files in Parallel (same time) \r\n \r\n [Available_Files, New_Learning] = Learners_Files_Selection( S, Learning );\r\n Learning = New_Learning;\r\n \r\n % Feedbacks from the users:\r\n \r\n % Pre-allocate cumulative Rewards for all users\r\n % Pre-allocate cumulative Penalsties for all users\r\n \r\n GAME_Rewards = zeros(M,H+1); \r\n GAME_Penalties = zeros(M,H+1); \r\n \r\n switch Reward_Type\r\n \r\n case 'Best_File_Based_Reward'\r\n \r\n for n = 1:1:N\r\n % Let each user choose which content they prefer using the\r\n % policy if nearest available (Nearest Content Available\r\n % NCA)\r\n User_Selections = User_NCA_Selection( n, S, Available_Files, Network_Delays);\r\n \r\n % Calculate the weighted latency that the user would experience\r\n Delay_Performance(GITER,n) = User_Weighted_Delay( User_Selections, Popularities );\r\n \r\n User_Delays = Network_Delays(n,:);\r\n \r\n % Let the user provide its own feedback\r\n [ Current_Rewards, ~, Current_Penalties, ~ ] = Best_File_Based_Reward( User_Delays, User_Selections, Popularities );\r\n \r\n % Update Rewards and Penalties\r\n GAME_Rewards = GAME_Rewards + Current_Rewards;\r\n GAME_Penalties = GAME_Penalties + Current_Penalties;\r\n end\r\n \r\n case 'Weighted_Delay_Based_Reward'\r\n \r\n for n = 1:1:N\r\n % Let each user choose which content they prefer using the\r\n % policy if nearest available (Nearest Content Available\r\n % NCA)\r\n User_Selections = User_NCA_Selection( n, S, Available_Files, Network_Delays);\r\n \r\n % Calculate the weighted latency that the user would experience\r\n Delay_Performance(GITER,n) = User_Weighted_Delay( User_Selections, Popularities );\r\n \r\n User_Delays = Network_Delays(n,:);\r\n \r\n \r\n if (Weighted_Delay < Min_Weighted_Delay(n))\r\n Min_Weighted_Delay(n) = Weighted_Delay;\r\n end\r\n if( GITER == 1)\r\n Current_Minima = Inf;\r\n elseif (GITER > 1)\r\n Current_Minima = Min_Weighted_Delay(n);\r\n end\r\n \r\n % Let the user provide its own feedback\r\n [ Current_Rewards, ~, Current_Penalties, ~ ]...\r\n = Weighted_Delay_Based_Reward( User_Delays, User_Selections, Weighted_Delay, Current_Minima , Popularities );\r\n \r\n % Update Rewards and Penalties\r\n GAME_Rewards = GAME_Rewards + Current_Rewards;\r\n GAME_Penalties = GAME_Penalties + Current_Penalties;\r\n end\r\n case 'Average_Weighted_Delay_Based_Reward'\r\n \r\n Tot_Rewards = zeros(M,H+1,N);\r\n Tot_Penalties = zeros(M,H+1,N);\r\n \r\n for n = 1:1:N\r\n % Let each user choose which content they prefer using the\r\n % policy if nearest available (Nearest Content Available\r\n % NCA)\r\n User_Selections(n,:,:,:) = User_NCA_Selection( n, S, Available_Files, Network_Delays);\r\n \r\n % Calculate the weighted latency that the user would experience\r\n This_User_Selections = squeeze(User_Selections(n,:,:,:));\r\n Delay_Performance(GITER,n) = User_Weighted_Delay( This_User_Selections, Popularities );\r\n \r\n % Let the user provide its own feedback\r\n User_Delays = Network_Delays(n,:);\r\n [ Current_Rewards, ~, Current_Penalties, ~ ] = Best_File_Based_Reward( User_Delays, This_User_Selections, Popularities );\r\n \r\n Tot_Rewards(:,:,n) = Current_Rewards;\r\n Tot_Penalties(:,:,n) = Current_Penalties;\r\n end\r\n \r\n for j=1:1:H\r\n % Take only the users connected to the learner (j,k)\r\n Who_to_Average = Network_Delays(:,j) ~= Inf;\r\n Average_Weighted_Delay(j) = sum( Weighted_Delay(Who_to_Average)) / sum(Who_to_Average);\r\n \r\n if (Average_Weighted_Delay(j) <= Min_Average_Weighted_Delay(j) )\r\n \r\n Min_Average_Weighted_Delay(j) = Average_Weighted_Delay(j);\r\n GAME_Rewards(:,j) = GAME_Rewards(:,j) + sum( squeeze( Tot_Rewards(:,j,:) ),2);\r\n GAME_Penalties(:,j) = GAME_Penalties(:,j) + sum( squeeze( Tot_Penalties(:,j,:) ),2);\r\n else\r\n GAME_Rewards(:,j) = GAME_Rewards(:,j);\r\n GAME_Penalties(:,j) = GAME_Penalties(:,j) + sum( squeeze( Tot_Rewards(:,j,:) ),2) + sum( squeeze( Tot_Penalties(:,j,:) ),2);\r\n end\r\n end \r\n \r\n History_Of_Average_Weighted_Delays(GITER,:) = Average_Weighted_Delay;\r\n\r\n end\r\n\r\n % GAME Learners Determine the Environment Feedback Democratically\r\n % 'Env_Feedback(k,j)'= 1 --> Larner(k,j) Rewarded.\r\n % 'Env_Feedback(k,j)'= 0 --> Learner(k,j) Penalised.\r\n \r\n for j = 1:1:H\r\n for k = 1:1:M\r\n Curr_Action = Learning(k,j).Ai;\r\n if (GAME_Rewards(k,j) > GAME_Penalties(k,j))\r\n GAME_Positive_Feedbacks(k,j) = GAME_Positive_Feedbacks(k,j) + 1;\r\n Learning(k,j).W(Curr_Action) = Learning(k,j).W(Curr_Action) + 1;\r\n Who_to_Divide = Learning(k,j).W ~= 0;\r\n Learning(k,j).D(Who_to_Divide) = Learning(k,j).W(Who_to_Divide)./ Learning(k,j).Z(Who_to_Divide); \r\n else\r\n % Do nothing.\r\n end\r\n end\r\n end\r\n \r\n % GAME Probabilities Vectors Update.\r\n \r\n [ Updated_Learning, Updated_Conv_Actions ] = New_P_Vectors_DGPA_A( Conv_Actions, Learning, Delta, P_Threshold);\r\n Learning = Updated_Learning;\r\n Conv_Actions = Updated_Conv_Actions;\r\n \r\n % Iteration Timing\r\n Time = toc;\r\n Elapsed_Time = Elapsed_Time + Time;\r\n Average_Time = Elapsed_Time/GITER;\r\n disp(['This is iteration number ' num2str(GITER) '. The Average Time per iteration is: ' num2str(Average_Time) '.'] );\r\n \r\n GITER = GITER + 1; \r\nend \r\n\r\nGame_Iterations = GITER - 1;\r\nNew_Learning = Learning; \r\n\r\n% Output Variables:\r\n\r\nswitch Reward_Type\r\n case 'Best_File_Based_Reward'\r\n History_Delays = Delay_Performance;\r\n Convergence_Delays = Delay_Performance(end,:);\r\n \r\n case 'Weighted_Delay_Based_Reward'\r\n History_Delays = Delay_Performance;\r\n Convergence_Delays = Delay_Performance(end,:);\r\n \r\n case 'Average_Weighted_Delay_Based_Reward'\r\n History_Delays = History_Of_Average_Weighted_Delays;\r\n Convergence_Delays = History_Of_Average_Weighted_Delays(end,:);\r\nend\r\n\r\n% VISUAL OUTPUT (Uncomment if needed in debugging mode)\r\n%{\r\ndisp('-------------------------------------------------------------------');\r\ndisp('=================== Convergence COMPLETE. ======================');\r\ndisp('-------------------------------------------------------------------');\r\n\r\n switch Reward_Type\r\n case 'Best_File_Based_Reward'\r\n \r\n disp(Reward_Type);\r\n disp(['The converged weighted delays are: ' num2str(Delay_Performance(GITER-1,:)) '.']);\r\n disp(['The Average Weighted Delay among all the users is: ' num2str(sum( Delay_Performance(GITER-1,:) )./N) '.']);\r\n disp(['The game with ' num2str(M*H) ' players (that is file memories) converged after ' num2str(GITER-1) ' iterations.']);\r\n \r\n case 'Weighted_Delay_Based_Reward'\r\n \r\n disp(Reward_Type);\r\n disp(['The converged weighted delays are: ' num2str(Delay_Performance(GITER-1,:)) '.']);\r\n disp(['The Average Weighted Delay among all the users is: ' num2str(sum(Delay_Performance(GITER-1,:))./N) '.']);\r\n disp(['The game with ' num2str(M*H) ' players (that is file memories) converged after ' num2str(GITER-1) ' iterations.']);\r\n \r\n case 'Average_Weighted_Delay_Based_Reward'\r\n \r\n disp(Reward_Type);\r\n disp(['The converged weighted delays are: ' num2str(Delay_Performance(GITER-1,:)) '.']);\r\n disp(['The Min_Average_Weighted_Delays, at convergence are : ' num2str(Min_Average_Weighted_Delay) '.']);\r\n disp(['The game with ' num2str(M*H) ' players (that is file memories) converged after ' num2str(GITER-1) ' iterations.']);\r\n end\r\n\r\n%}\r\n\r\nend\r\n\r\n"} +{"plateform": "github", "repo_name": "LorisMarini/content_caching_with_reinforcement_learning-master", "name": "INITIALIZE.m", "ext": ".m", "path": "content_caching_with_reinforcement_learning-master/Multi_Agent_CIDGPA_Framework/INITIALIZE.m", "size": 9684, "source_encoding": "utf_8", "md5": "148284711069c94d8689ab4b2a9b11dd", "text": "\r\nfunction [ Learning, Iterations ] = INITIALIZE( Learning_Setup, Network_Delays, M, Popularities, Reward_Type, Initialization_Number )\r\n%{\r\n-------------------------- AUTHORSHIP ---------------------------------\r\n\r\nDeveloper: Loris Marini\r\nAffiliation: The University of Sydney\r\nContact: mrnlrs.tor@gmail.com\r\nNotes:\r\n\r\n--------------------------- DESCRIPTION -------------------------------\r\n\r\nThis script initializes a game of learning automata using DGPA\r\nreinforcement learning.\r\n\r\n----------------------------- DEPENDENCIES --------------------------------\r\n\r\nAllocate_Learners(...)\r\nLearners_Files_Selection(...)\r\nUser_NCA_Selection(...)\r\nUser_Weighted_Delay(...)\r\n \r\nBest_File_Based_Reward()\r\nWeighted_Delay_Based_Reward()\r\n\r\n-------------------------------- INPUT ----------------------------------\r\n \r\nNetwork_Delays \r\n Matrix of delays NxH+1 where N= number of users and H+1= Number of providers \r\nM \r\n Caching capability of a single helper;\r\n \r\nPopularities\r\n File Popularities\r\n \r\nReward_Type\r\n \r\nInitialization_Number\r\n See NITIALIZE_Game_Of_DGPA\r\n\r\n-------------------------------- OUTPUT ----------------------------------\r\n\r\nLearning\r\n KxH Initialized learners\r\n\r\nIterations\r\n The actual number of iteratiosn that it took to initialize.\r\n\r\n% ----------------------------- CODE ---------------------------------\r\n%}\r\n\r\n\r\nH = size(Network_Delays,2) - 1; % Number of Helpers in the cell\r\nN = size(Network_Delays,1); % Number of users in the cell\r\nF = H*M; % Total number of files that can be cached.\r\nS = 1:1:F; % S: Space of Actions. F: How many files we can offload from the BS.\r\n\r\nN_Ini = Initialization_Number; % Initial #Iterations for estimation of D from each Learner\r\nINI_Positive_Feedbacks = zeros(M,H); % Initialise Environmental Feedback\r\nZeros = Inf; % Is a variable to control the learners with empty D.\r\nITER = 1; % Iteration Number\r\n\r\nMin_Weighted_Delay = Inf*ones(1,N);\r\nMin_Average_Weighted_Delay = Inf*ones(1,H);\r\nAverage_Weighted_Delay = zeros(1,H);\r\n\r\n\r\n% Loop until you make sure that all actions are selected at least N_Ini\r\n% times. We do it with 'Lesser_Selected' which is a variable that controls \r\n% the action that has been selected the least.\r\n\r\nLesser_Selected = 0; \r\n\r\n% Initialize learners\r\nLearning = Learners_Allocation( Learning_Setup, S, H,M,F );\r\n\r\n\r\nwhile (Lesser_Selected < N_Ini || Zeros > 0)\r\n \r\n % If we reach 10^4 iterations we declared the initialization to have\r\n % failed. since actions are selected at random, for small numbers of\r\n % N_Ini and smalle sets convergence should be reached much earlier.\r\n % However, larger sets and N_Init might need to adjust this threshold\r\n % to a larger value.\r\n \r\n \r\n if ITER > 10000\r\n error('Initialisation Failed.');\r\n end\r\n \r\n % Learners Select Files in Parallel (same time)\r\n \r\n [Available_Files, New_Learning] = Learners_Files_Selection( S, Learning );\r\n Learning = New_Learning;\r\n \r\n % Feedbacks from the users:\r\n \r\n % Pre-allocate cumulative Rewards for all users\r\n % Pre-allocate cumulative Penalsties for all users\r\n INI_Rewards = zeros(M,H+1); \r\n INI_Penalties = zeros(M,H+1); \r\n \r\n \r\n switch Reward_Type\r\n \r\n case 'Best_File_Based_Reward'\r\n \r\n for n = 1:1:N\r\n % Let each user choose which content they prefer using the\r\n % policy if nearest available (Nearest Content Available\r\n % NCA)\r\n user_selections = User_NCA_Selection( n, S, Available_Files, Network_Delays);\r\n \r\n % Calculate the weighted latency that the user would experience\r\n delays(ITER,n) = User_Weighted_Delay( user_selections, Popularities );\r\n \r\n this_user_delays = Network_Delays(n,:);\r\n \r\n % Let the user provide its own feedback\r\n [Current_Rewards, ~, Current_Penalties, ~ ] = Best_File_Based_Reward( this_user_delays, user_selections, Popularities );\r\n \r\n % Update Rewards and Penalties\r\n INI_Rewards = INI_Rewards + Current_Rewards;\r\n INI_Penalties = INI_Penalties + Current_Penalties;\r\n \r\n end\r\n \r\n case 'Weighted_Delay_Based_Reward'\r\n \r\n for n = 1:1:N\r\n \r\n % Let each user choose which content they prefer using the\r\n % policy if nearest available (Nearest Content Available\r\n % NCA)\r\n user_selections = User_NCA_Selection( n, S, Available_Files, Network_Delays);\r\n \r\n % Calculate the weighted latency that the user would experience\r\n delays(ITER,n) = User_Weighted_Delay( user_selections, Popularities );\r\n \r\n % Extract the actual delays that interest this user\r\n this_user_delays = Network_Delays(n,:);\r\n \r\n if (Weighted_Delay < Min_Weighted_Delay(n))\r\n \r\n Min_Weighted_Delay(n) = Weighted_Delay;\r\n end\r\n if( ITER == 1)\r\n Current_Minima = Inf;\r\n elseif (ITER > 1)\r\n Current_Minima = Min_Weighted_Delay(n);\r\n end\r\n \r\n [ Current_Rewards, ~, Current_Penalties, ~ ]...\r\n = Weighted_Delay_Based_Reward( this_user_delays, user_selections, Weighted_Delay, Current_Minima , Popularities );\r\n \r\n % Update Rewards and Penalties\r\n INI_Rewards = INI_Rewards + Current_Rewards;\r\n INI_Penalties = INI_Penalties + Current_Penalties;\r\n \r\n end\r\n case 'Average_Weighted_Delay_Based_Reward'\r\n \r\n Tot_Rewards = zeros(M,H+1,N);\r\n Tot_Penalties = zeros(M,H+1,N);\r\n \r\n for n = 1:1:N\r\n % Let each user choose which content they prefer using the\r\n % policy if nearest available (Nearest Content Available\r\n % NCA)\r\n user_selections(n,:,:,:) = User_NCA_Selection( n, S, Available_Files, Network_Delays);\r\n This_User_Selections = squeeze(user_selections(n,:,:,:));\r\n \r\n delays(ITER,n) = User_Weighted_Delay( This_User_Selections, Popularities );\r\n this_user_delays = Network_Delays(n,:);\r\n \r\n [ Current_Rewards, ~, Current_Penalties, ~ ] = Best_File_Based_Reward( this_user_delays,This_User_Selections, Popularities );\r\n Tot_Rewards(:,:,n) = Current_Rewards;\r\n Tot_Penalties(:,:,n) = Current_Penalties;\r\n end\r\n \r\n for j=1:1:H\r\n % Take only the users connected to the learner (j,k)\r\n Who_to_Average = Network_Delays(:,j) ~= Inf;\r\n Average_Weighted_Delay(j) = sum( Weighted_Delay(Who_to_Average)) / sum(Who_to_Average);\r\n \r\n if (Average_Weighted_Delay(j) <= Min_Average_Weighted_Delay(j) )\r\n \r\n Min_Average_Weighted_Delay(j) = Average_Weighted_Delay(j);\r\n INI_Rewards(:,j) = INI_Rewards(:,j) + sum( squeeze( Tot_Rewards(:,j,:) ),2); \r\n INI_Penalties(:,j) = INI_Penalties(:,j) + sum( squeeze( Tot_Penalties(:,j,:) ),2); \r\n else\r\n INI_Rewards(:,j) = INI_Rewards(:,j); \r\n INI_Penalties(:,j) = INI_Penalties(:,j) + sum( squeeze( Tot_Rewards(:,j,:) ),2) + sum( squeeze( Tot_Penalties(:,j,:) ),2); \r\n end\r\n end\r\n end\r\n \r\n % INI Learners Determine the Environment Feedback Democratically\r\n\r\n [ Learning, INI_Positive_Feedbacks ] = Determine_Environment_Feedback( Learning, INI_Rewards, INI_Penalties, INI_Positive_Feedbacks);\r\n \r\n % CHECKS\r\n \r\n Curr_min = Inf;\r\n for j = 1:1:H\r\n for k = 1:1:M\r\n if (min(Learning(k,j).Z) < Curr_min)\r\n Curr_min = min(Learning(k,j).Z);\r\n end\r\n end\r\n end\r\n Lesser_Selected = Curr_min;\r\n \r\n Zeros = 0;\r\n MaxNZeros = 0;\r\n for j = 1:1:H\r\n for k = 1:1:M\r\n % All the elements of D of (k,j) are zero. Until Zeros is ~= 0 we should keep initerating.\r\n if (max(Learning(k,j).D) == 0)\r\n Zeros = Zeros+1;\r\n end\r\n if (sum(Learning(k,j).D == 0) > MaxNZeros)\r\n MaxNZeros = sum(Learning(k,j).D == 0);\r\n end\r\n end\r\n end\r\n \r\n disp(['INITIALISATION: Iteration ' num2str(ITER) ]);\r\n disp(['The weighted delays are: ' num2str(delays(ITER,:)) '.There are ' num2str(Zeros) ' learners with a zero D vector.']);\r\n disp(['The maximum number of zeros in the D vectors is: ' num2str(MaxNZeros) '.'])\r\n ITER = ITER +1;\r\nend\r\nIterations = ITER - 1;\r\n\r\n% OUTPUT Variables\r\n\r\nLearning = Learning;\r\nNetwork_Delays = Network_Delays;\r\n \r\nend\r\n\r\n"} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "PWNMP.m", "ext": ".m", "path": "cone-greedy-master/NMF/dense_square/PWNMP.m", "size": 2143, "source_encoding": "utf_8", "md5": "5ee02a618bbd1dfeffef955fc3b18dc4", "text": "function [X_r, residual_time,test_error] = PWNMP(Y,X_0,R,nil,LMO_it,X_tst)\n\n%% variables init\nconvergence_check = zeros(1,R+1);\n\nX_r=X_0;\n\nS = zeros(size(X_0));\nalpha = 0;\ntest_error = zeros(R,1);\n\nresidual_time = zeros(1,R);\ncnt = 0;\nA = zeros(size(Y,1),1);\nB = zeros(size(Y,2),1);\nfor r = 1:R\n r\n %% call to the oracle\n grad = -(Y-X_r);\n [a,b,residual] = LMO(-grad,Y,X_r,nil,LMO_it);\n residual_time(r) = residual;\n Z = a*b';\n A = cat(2,A,a);\n B = cat(2,B,b);\n maxV=0;\n idxV=1;\n idxZ=-1;\n for it=1:numel(alpha)\n c = S(:,:,it);\n if dot(grad(:),c(:))>maxV\n idxV = it;\n maxV = dot(grad(:),c(:));\n end\n \n if dot(Z(:),c(:)) == norm(Z(:))^2\n idxZ = it;\n end\n end\n if idxZ == -1\n idxZ = numel(alpha) + 1;\n S = cat(3,S,Z);\n alpha = [alpha,0];\n end\n V = S(:,:,idxV);\n D = Z - V;\n if dot(-grad(:),D(:)) == 0\n break \n end\n \n gamma = dot(-grad(:),D(:))/norm(D(:));\n \n idxDelete = [];\n if idxV~=1\n if alpha(idxV)-gamma<=0\n gamma = alpha(idxV);\n cnt = cnt +1;\n idxDelete = idxV;\n else\n alpha(idxV) = alpha(idxV) - gamma;\n A(:,idxV) = A(:,idxV).*sqrt(alpha(idxV));\n B(:,idxV) = B(:,idxV).*sqrt(alpha(idxV));\n end\n end\n alpha(idxZ) = alpha(idxZ) + gamma;\n A(:,idxZ) = A(:,idxZ).*sqrt(alpha(idxZ));\n B(:,idxZ) = B(:,idxZ).*sqrt(alpha(idxZ));\n \n alpha(idxDelete) = [];\n S(:,:,idxDelete) = [];\n A(:,idxDelete) = [];\n B(:,idxDelete) = [];\n\n [Aprov, Bprov, ~] = NMF_GCD(Y,size(A,2)-1,20,A(:,2:end),B(:,2:end)',1);\n X_r = Aprov*Bprov;\n A = [zeros(size(X_r,1),1), normc(Aprov)];\n B = [zeros(size(X_r,2),1), normc(Bprov')];\n for itt = 1:size(A,2)\n S(:,:,itt) = sparse(A(:,itt)*B(:,itt)');\n %Sfull(:,:,itt) = Zprov;\n end\n \n alpha = [0,norms(Aprov).*norms(Bprov')];\n \n \n convergence_check(r) = 2*(sum(X_r(:).^2)-1);\n \n test_error(r) = 0.5*norm(X_tst - X_r,'fro')^2;\n r = size(S,3);\n\nend\nend\n\nfunction S = norms(S)\n S = sqrt(sum(S.^2,1));\nend"} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "nnls1_asgivens.m", "ext": ".m", "path": "cone-greedy-master/TensorFactorization/nnls1_asgivens.m", "size": 4652, "source_encoding": "utf_8", "md5": "fe4f132c0c6503c13c348aa65cc9e7ef", "text": "function [ x,y,success,iter ] = nnls1_asgivens( A,b,overwrite, isInputProd, init )\n% Nonnegativity-constrained least squares for single righthand side : minimize |Ax-b|_2\n% Jingu Kim (jingu.kim@gmail.com)\n%\n% Reference:\n% Jingu Kim and Haesun Park. Fast Nonnegative Matrix Factorization: An Activeset-like Method and Comparisons,\n% SIAM Journal on Scientific Computing, 33(6), pp. 3261-3281, 2011.\n%\n% Updated 2011.03.20: First implemented, overwrite option\n% Updated 2011.03.21: init option\n% Updated 2011.03.23: Givens updating not always\n\n if nargin<3, overwrite = false; end\n if nargin<4, isInputProd = false; end\n\n if isInputProd\n AtA=A;,Atb=b;\n else\n AtA=A'*A;, Atb=A'*b;\n end\n n=size(Atb,1);\n MAX_ITER = n*5;\n\n % set initial feasible solution\n if overwrite\n x = AtA\\Atb;\n x(x<0) = 0;\n PassiveList = find(x > 0)';\n R = chol(AtA(PassiveList,PassiveList));\n Rinv_b = (R')\\Atb(PassiveList);\n iter = 1;\n else\n if nargin<5\n PassiveList = [];\n R = [];\n Rinv_b = zeros(0,0);\n x = zeros(n,1);\n else\n x = init;\n x(x<0) = 0;\n PassiveList = find(x > 0)';\n R = chol(AtA(PassiveList,PassiveList));\n Rinv_b = (R')\\Atb(PassiveList);\n end\n iter=0;\n end\n\n success=1;\n while(success)\n if iter >= MAX_ITER, break, end\n % find unconstrained LS solution for the passive set\n if ~isempty(PassiveList)\n z = R\\Rinv_b;\n iter = iter + 1;\n else\n z = [];\n end\n z( abs(z)<1e-12 ) = 0; % One can uncomment this line for numerical stability.\n\n InfeaSet = find(z < 0);\n if isempty(InfeaSet) % if feasibile\n x(:) = 0;\n x(PassiveList) = z;\n y = AtA * x - Atb;\n y( PassiveList) = 0;\n y( abs(y)<1e-12 ) = 0; % One can uncomment this line for numerical stability.\n\n NonOptSet = find(y < 0);\n if isempty(NonOptSet), success=0; % check optimality\n else\n [minVal,minIx] = min(y);\n PassiveList = [PassiveList minIx]; % increase passive set\n [R,Rinv_b] = cholAdd(R,AtA(PassiveList,minIx),Rinv_b,Atb(minIx));\n end\n else % if not feasibile\n x_pass = x(PassiveList);\n x_infeaset = x_pass(InfeaSet);\n z_infeaset = z(InfeaSet);\n [minVal,minIx] = min(x_infeaset./(x_infeaset-z_infeaset));\n x_pass_new = x_pass+(z-x_pass).*minVal;\n x_pass_new(InfeaSet(minIx))=0;\n\n zeroSetSub = sort(find(x_pass_new==0),'descend');\n for i=1:length(zeroSetSub)\n subidx = zeroSetSub(i);\n PassiveList(subidx) = [];\n\n % Givens updating is not always better (maybe only in matlab?).\n if subidx >= 0.9 * size(R,2)\n R = cholDelete(R,subidx);\n else\n R = chol(AtA(PassiveList,PassiveList));\n end\n end\n Rinv_b = (R')\\Atb(PassiveList);\n x_pass_new(x_pass_new == 0) = [];\n x(:) = 0;\n x(PassiveList) = x_pass_new;\n end\n end\nend\n\nfunction [new_R,new_d] = cholAdd(R,v,d,val)\n if isempty(R)\n new_R = sqrt(v);\n new_d = val/new_R;\n else\n n = size(R,1);\n new_R = zeros(n+1,n+1);\n new_R(1:n,1:n)=R;\n\n vec = zeros(n+1,1);\n vec(1:n)=R'\\v(1:n);\n vec(n+1)=sqrt(v(n+1)-vec(1:n)'*vec(1:n));\n\n new_R(:,n+1) = vec;\n\n new_d = [d;zeros(1,1)];\n new_d(n+1) = (val-vec(1:n)'*d)/vec(n+1);\n end\nend\n\nfunction [new_R] = cholDelete(R,idx)\n n = size(R,1);\n new_R = R;\n new_R(:,idx) = [];\n\n for i=idx:n-1\n %G=getGivens(new_R(:,i),i,i+1);\n G=planerot(new_R([i i+1],i));\n new_R([i i+1],:)=G*new_R([i i+1],:);, new_R(i+1,i)=0;\n end\n new_R = new_R(1:n-1,1:n-1);\nend\n\n% function [G]=getGivens(a,i,j)\n% G=zeros(2,2);\n% [c,s]=givensRotation(a(i),a(j));\n% G(1,1)=c;\n% G(1,2)=s;\n% G(2,1)=-s;\n% G(2,2)=c;\n% end\n% \n% function [c,s]=givensRotation(a,b)\n% % Givens Rotation to annihilate b with respect to a\n% if(b==0)\n% c=1;s=0;\n% else\n% if (abs(b)>abs(a))\n% t=-a/b;\n% s=1/sqrt(1+t*t);\n% c=s*t;\n% else\n% t=-b/a;\n% c=1/sqrt(1+t*t);\n% s=c*t;\n% end\n% end\n% end\n"} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "cast_to_set.m", "ext": ".m", "path": "cone-greedy-master/TensorFactorization/cast_to_set.m", "size": 624, "source_encoding": "utf_8", "md5": "0a51464057642c461237748e511a466f", "text": "function rez = cast_to_set(rez,normalized,non_negative,sparsity)\n if strcmp(non_negative,'true')\n rez(rez<0)=0;\n end\n if ~strcmp(sparsity,'0')\n rez = to_sparse(rez,str2double(sparsity));\n end \n if strcmp(normalized,'true')\n rez=rez./norm(rez);\n end\nend\n\nfunction in = to_sparse(in,k)\n [~,sortIndex] = sort(abs(in(:)),'ascend'); %# Sort the values in ascending order \n assert(size(sortIndex,1)-k>=0,'non zero falues greater than number of entries')\n minIndex = sortIndex(1:size(sortIndex,1)-k); %# Get a linear index into A of the 5 largest values\n in(minIndex) = 0;\nend"} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "nmf.m", "ext": ".m", "path": "cone-greedy-master/TensorFactorization/nmf.m", "size": 23853, "source_encoding": "utf_8", "md5": "7462a6647d7acb938254b76c028c50d1", "text": "% Nonnegative Matrix Factorization Algorithms Toolbox\r\n%\r\n% Written by Jingu Kim (jingu.kim@gmail.com)\r\n% Work done at\r\n% School of Computational Science and Engineering\r\n% College of Computing, Georgia Institute of Technology\r\n%\r\n% Please send bug reports, comments, or questions to Jingu Kim.\r\n% This code comes with no guarantee or warranty of any kind.\r\n%\r\n% Reference:\r\n% [1] Jingu Kim, Yunlong He, and Haesun Park.\r\n% Algorithms for Nonnegative Matrix and Tensor Factorizations: A Unified View \r\n% Based on Block Coordinate Descent Framework.\r\n% Journal of Global Optimization, 58(2), pp. 285-319, 2014.\r\n%\r\n% [2] Jingu Kim and Haesun Park.\r\n% Fast Nonnegative Matrix Factorization: An Active-set-like Method And Comparisons.\r\n% SIAM Journal on Scientific Computing (SISC), 33(6), pp. 3261-3281, 2011.\r\n%\r\n% Last modified on 07/28/2013\r\n%\r\n% \r\n% A : Input data matrix (m x n)\r\n% k : Target low-rank\r\n% \r\n% (Below are optional arguments: can be set by providing name-value pairs)\r\n% \r\n% METHOD : Algorithm for solving NMF. Available values are as follows. Default is 'anls_bpp'.\r\n% 'anls_bpp' : ANLS with Block Principal Pivoting Method \r\n% 'anls_asgivens': ANLS with Active Set Method and Givens Updating\r\n% 'anls_asgroup' : ANLS with Active Set Method and Column Grouping\r\n% 'als' : Alternating Least Squares Method\r\n% 'hals' : Hierarchical Alternating Least Squares Method\r\n% 'mu' : Multiplicative Updating Method\r\n% See publication [1] (and references therein) for the details of these algorithms.\r\n% TOL : Stopping tolerance. Default is 1e-3. \r\n% If you want to obtain a more accurate solution,\r\n% decrease TOL and increase MAX_ITER at the same time.\r\n% Note that algorithms will need more time to terminate if you do so.\r\n% MAX_ITER : Maximum number of iterations. Default is 500.\r\n% MIN_ITER : Minimum number of iterations. Default is 20.\r\n% MAX_TIME : Maximum amount of time in seconds. Default is 1,000,000.\r\n% INIT : A struct containing initial values. INIT.W and INIT.H should contain \r\n% initial values of W and H of size (m x k) and (k x n), respectively. \r\n% When INIT is not given, W and H are randomly initialized.\r\n% VERBOSE : 0 (default) - No debugging information is collected.\r\n% 1 (debugging/experimental purpose) - History of computation is returned. \r\n% See 'REC' variable.\r\n% 2 (debugging/experimental purpose) - History of computation is additionally \r\n% printed on screen.\r\n% REG_W, REG_H : Regularization parameters for W and H.\r\n% Both REG_W and REG_H should be vector of two nonnegative numbers.\r\n% The first component is a parameter with Frobenius norm regularization, and\r\n% the second component is a parameter with L1-norm regularization.\r\n% For example, to promote sparsity in H, one might set \r\n% REG_W = [alpha 0] and REG_H = [0 beta]\r\n% where alpha and beta are positive numbers. \r\n% See papers [1] and [2] for more details.\r\n% Defaut is [0 0] for both REG_W and REG_H, which means no regularization.\r\n% \r\n% W : Obtained basis matrix (m x k)\r\n% H : Obtained coefficient matrix (k x n)\r\n% iter : Number of iterations\r\n% HIS : (debugging/experimental purpose) Auxiliary information about the execution\r\n% \r\n% nmf(A,10)\r\n% nmf(A,20,'verbose',2)\r\n% nmf(A,20,'verbose',1,'method','anls_bpp')\r\n% nmf(A,20,'verbose',1,'method','hals')\r\n% nmf(A,20,'verbose',1,'reg_w',[0.1 0],'reg_h',[0 0.5])\r\n\r\nfunction [W,H,iter,REC]=nmf(A,k,varargin)\r\n % parse parameters\r\n params = inputParser;\r\n params.addParamValue('method' ,'anls_bpp',@(x) ischar(x) );\r\n params.addParamValue('tol' ,1e-3 ,@(x) isscalar(x) & x > 0);\r\n params.addParamValue('min_iter' ,20 ,@(x) isscalar(x) & x > 0);\r\n params.addParamValue('max_iter' ,500 ,@(x) isscalar(x) & x > 0);\r\n params.addParamValue('max_time' ,1e6 ,@(x) isscalar(x) & x > 0);\r\n params.addParamValue('init' ,struct([]),@(x) isstruct(x));\r\n params.addParamValue('verbose' ,1 ,@(x) isscalar(x) & x >= 0);\r\n params.addParamValue('reg_w' ,[0 0] ,@(x) isvector(x) & length(x) == 2);\r\n params.addParamValue('reg_h' ,[0 0] ,@(x) isvector(x) & length(x) == 2);\r\n % The following options are reserved for debugging/experimental purposes. \r\n % Make sure to understand them before making changes\r\n params.addParamValue('subparams' ,struct([]),@(x) isstruct(x) );\r\n params.addParamValue('track_grad' ,1 ,@(x) isscalar(x) & x >= 0);\r\n params.addParamValue('track_prev' ,1 ,@(x) isscalar(x) & x >= 0);\r\n params.addParamValue('stop_criterion',2 ,@(x) isscalar(x) & x >= 0);\r\n params.parse(varargin{:});\r\n\r\n % copy from params object\r\n [m,n] = size(A);\r\n par = params.Results;\r\n par.m = m;\r\n par.n = n;\r\n par.k = k;\r\n\r\n % If stopping criterion is based on the gradient information, turn on 'track_grad' option.\r\n if par.stop_criterion > 0\r\n par.track_grad = 1;\r\n end\r\n\r\n % initialize\r\n if isempty(par.init)\r\n W = rand(m,k); H = rand(k,n);\r\n else\r\n W = par.init.W; H = par.init.H;\r\n end\r\n\r\n % This variable is for analysis/debugging, so it does not affect the output (W,H) of this program\r\n REC = struct([]);\r\n\r\n clear('init');\r\n init.norm_A = norm(A,'fro'); \r\n init.norm_W = norm(W,'fro');\r\n init.norm_H = norm(H,'fro');\r\n init.baseObj = getObj((init.norm_A)^2,W,H,par);\r\n\r\n if par.verbose % Collect initial information for analysis/debugging\r\n if par.track_grad\r\n [gradW,gradH] = getGradient(A,W,H,par);\r\n init.normGr_W = norm(gradW,'fro');\r\n init.normGr_H = norm(gradH,'fro');\r\n init.SC_NM_PGRAD = getInitCriterion(1,A,W,H,par,gradW,gradH);\r\n init.SC_PGRAD = getInitCriterion(2,A,W,H,par,gradW,gradH);\r\n init.SC_DELTA = getInitCriterion(3,A,W,H,par,gradW,gradH);\r\n else\r\n gradW = 0; gradH = 0;\r\n end\r\n\r\n if par.track_prev \r\n prev_W = W; prev_H = H;\r\n else\r\n prev_W = 0; prev_H = 0;\r\n end\r\n \r\n ver = prepareHIS(A,W,H,prev_W,prev_H,init,par,0,0,gradW,gradH);\r\n REC(1).init = init;\r\n REC.HIS = ver;\r\n\r\n if par.verbose == 2\r\n display(init);\r\n end\r\n\r\n tPrev = cputime;\r\n end\r\n\r\n initializer= str2func([par.method,'_initializer']);\r\n iterSolver = str2func([par.method,'_iterSolver']);\r\n iterLogger = str2func([par.method,'_iterLogger']);\r\n [W,H,par,val,ver] = feval(initializer,A,W,H,par);\r\n\r\n if par.verbose & ~isempty(ver)\r\n tTemp = cputime;\r\n REC.HIS = saveHIS(1,ver,REC.HIS);\r\n tPrev = tPrev+(cputime-tTemp);\r\n end\r\n\r\n REC(1).par = par;\r\n REC.start_time = datestr(now);\r\n display(par);\r\n\r\n tStart = cputime;, tTotal = 0;\r\n if par.track_grad\r\n initSC = getInitCriterion(par.stop_criterion,A,W,H,par);\r\n end\r\n SCconv = 0; SC_COUNT = 3;\r\n\r\n for iter=1:par.max_iter\r\n\r\n % Actual work of this iteration is executed here.\r\n [W,H,gradW,gradH,val] = feval(iterSolver,A,W,H,iter,par,val);\r\n\r\n if par.verbose % Collect information for analysis/debugging\r\n elapsed = cputime-tPrev;\r\n tTotal = tTotal + elapsed;\r\n\r\n clear('ver');\r\n ver = prepareHIS(A,W,H,prev_W,prev_H,init,par,iter,elapsed,gradW,gradH);\r\n\r\n ver = feval(iterLogger,ver,par,val,W,H,prev_W,prev_H);\r\n REC.HIS = saveHIS(iter+1,ver,REC.HIS);\r\n\r\n if par.track_prev, prev_W = W; prev_H = H; end\r\n if par.verbose == 2, display(ver);, end\r\n tPrev = cputime;\r\n end\r\n\r\n if (iter > par.min_iter)\r\n if (par.verbose && (tTotal > par.max_time)) || (~par.verbose && ((cputime-tStart)>par.max_time))\r\n break;\r\n elseif par.track_grad\r\n SC = getStopCriterion(par.stop_criterion,A,W,H,par,gradW,gradH);\r\n if (SC/initSC <= par.tol)\r\n SCconv = SCconv + 1;\r\n if (SCconv >= SC_COUNT), break;, end\r\n else\r\n SCconv = 0;\r\n end\r\n end\r\n end\r\n end\r\n [m,n]=size(A);\r\n [W,H]=normalize_by_W(W,H);\r\n \r\n if par.verbose\r\n final.elapsed_total = sum(REC.HIS.elapsed);\r\n else\r\n final.elapsed_total = cputime-tStart;\r\n end\r\n final.iterations = iter;\r\n sqErr = getSquaredError(A,W,H,init);\r\n final.relative_error = sqrt(sqErr)/init.norm_A;\r\n final.relative_obj = getObj(sqErr,W,H,par)/init.baseObj;\r\n final.W_density = length(find(W>0))/(m*k);\r\n final.H_density = length(find(H>0))/(n*k);\r\n\r\n if par.verbose\r\n REC.final = final;\r\n end\r\n REC.finish_time = datestr(now);\r\n display(final); \r\nend\r\n\r\n%----------------------------------------------------------------------------\r\n% Implementation of methods\r\n%----------------------------------------------------------------------------\r\n% 'anls_bpp' : ANLS with Block Principal Pivoting Method \r\n% See nnlsm_blockpivot.m for reference and details.\r\n\r\nfunction [W,H,par,val,ver] = anls_bpp_initializer(A,W,H,par)\r\n H = zeros(size(H));\r\n\r\n ver.turnZr_W = 0;\r\n ver.turnZr_H = 0;\r\n ver.turnNz_W = 0;\r\n ver.turnNz_H = 0;\r\n ver.numChol_W = 0;\r\n ver.numChol_H = 0;\r\n ver.numEq_W = 0;\r\n ver.numEq_H = 0;\r\n ver.suc_W = 0;\r\n ver.suc_H = 0;\r\n\r\n val(1).WtA = W'*A;\r\n val.WtW = W'*W;\r\nend\r\n\r\nfunction [W,H,gradW,gradH,val] = anls_bpp_iterSolver(A,W,H,iter,par,val)\r\n\r\n WtW_reg = applyReg(val.WtW,par,par.reg_h);\r\n [H,temp,suc_H,numChol_H,numEq_H] = nnlsm_blockpivot(WtW_reg,val.WtA,1,H);\r\n\r\n HHt_reg = applyReg(H*H',par,par.reg_w);\r\n [W,gradW,suc_W,numChol_W,numEq_W] = nnlsm_blockpivot(HHt_reg,H*A',1,W');\r\n W = W';\r\n\r\n val.WtA = W'*A;\r\n val.WtW = W'*W;\r\n\r\n if par.track_grad\r\n gradW = gradW';\r\n gradH = getGradientOne(val.WtW,val.WtA,H,par.reg_h,par);\r\n else\r\n gradW = 0;gradH =0;\r\n end\r\n\r\n val(1).numChol_W = numChol_W;\r\n val.numChol_H = numChol_H;\r\n val.numEq_W = numEq_W;\r\n val.numEq_H = numEq_H;\r\n val.suc_W = suc_W;\r\n val.suc_H = suc_H;\r\nend\r\n\r\nfunction [ver] = anls_bpp_iterLogger(ver,par,val,W,H,prev_W,prev_H)\r\n if par.track_prev\r\n ver.turnZr_W = length(find( (prev_W>0) & (W==0) ))/(par.m*par.k);\r\n ver.turnZr_H = length(find( (prev_H>0) & (H==0) ))/(par.n*par.k);\r\n ver.turnNz_W = length(find( (prev_W==0) & (W>0) ))/(par.m*par.k);\r\n ver.turnNz_H = length(find( (prev_H==0) & (H>0) ))/(par.n*par.k);\r\n end\r\n ver.numChol_W = val.numChol_W;\r\n ver.numChol_H = val.numChol_H;\r\n ver.numEq_W = val.numEq_W;\r\n ver.numEq_H = val.numEq_H;\r\n ver.suc_W = val.suc_W;\r\n ver.suc_H = val.suc_H;\r\nend\r\n\r\n% 'anls_asgivens': ANLS with Active Set Method and Givens Updating\r\n% See nnls1_asgivens.m for reference and details.\r\n\r\nfunction [W,H,par,val,ver] = anls_asgivens_initializer(A,W,H,par)\r\n H = zeros(size(H));\r\n\r\n ver.turnZr_W = 0;\r\n ver.turnZr_H = 0;\r\n ver.turnNz_W = 0;\r\n ver.turnNz_H = 0;\r\n ver.numChol_W = 0;\r\n ver.numChol_H = 0;\r\n ver.suc_W = 0;\r\n ver.suc_H = 0;\r\n\r\n val(1).WtA = W'*A;\r\n val.WtW = W'*W;\r\nend\r\n\r\nfunction [W,H,gradW,gradH,val] = anls_asgivens_iterSolver(A,W,H,iter,par,val)\r\n WtW_reg = applyReg(val.WtW,par,par.reg_h);\r\n ow = 0;\r\n suc_H = zeros(1,size(H,2));\r\n numChol_H = zeros(1,size(H,2));\r\n for i=1:size(H,2)\r\n [H(:,i),temp,suc_H(i),numChol_H(i)] = nnls1_asgivens(WtW_reg,val.WtA(:,i),ow,1,H(:,i));\r\n end\r\n\r\n suc_W = zeros(1,size(W,1));\r\n numChol_W = zeros(1,size(W,1));\r\n\r\n HHt_reg = applyReg(H*H',par,par.reg_w);\r\n HAt = H*A';\r\n Wt = W';\r\n gradWt = zeros(size(Wt));\r\n for i=1:size(W,1)\r\n [Wt(:,i),gradWt(:,i),suc_W(i),numChol_W(i)] = nnls1_asgivens(HHt_reg,HAt(:,i),ow,1,Wt(:,i));\r\n end\r\n W = Wt';\r\n\r\n val.WtA = W'*A;\r\n val.WtW = W'*W;\r\n\r\n if par.track_grad\r\n gradW = gradWt'; \r\n gradH = getGradientOne(val.WtW,val.WtA,H,par.reg_h,par);\r\n else\r\n gradW = 0; gradH =0;\r\n end\r\n\r\n val(1).numChol_W = sum(numChol_W);\r\n val.numChol_H = sum(numChol_H);\r\n val.suc_W = any(suc_W);\r\n val.suc_H = any(suc_H);\r\nend\r\n\r\nfunction [ver] = anls_asgivens_iterLogger(ver,par,val,W,H,prev_W,prev_H)\r\n if par.track_prev\r\n ver.turnZr_W = length(find( (prev_W>0) & (W==0) ))/(par.m*par.k);\r\n ver.turnZr_H = length(find( (prev_H>0) & (H==0) ))/(par.n*par.k);\r\n ver.turnNz_W = length(find( (prev_W==0) & (W>0) ))/(par.m*par.k);\r\n ver.turnNz_H = length(find( (prev_H==0) & (H>0) ))/(par.n*par.k);\r\n end\r\n ver.numChol_W = val.numChol_W;\r\n ver.numChol_H = val.numChol_H;\r\n ver.suc_W = val.suc_W;\r\n ver.suc_H = val.suc_H;\r\nend\r\n\r\n% 'anls_asgroup' : ANLS with Active Set Method and Column Grouping\r\n% See nnlsm_activeset.m for reference and details.\r\n\r\nfunction [W,H,par,val,ver] = anls_asgroup_initializer(A,W,H,par)\r\n [W,H,par,val,ver] = anls_bpp_initializer(A,W,H,par);\r\nend\r\n\r\nfunction [W,H,gradW,gradH,val] = anls_asgroup_iterSolver(A,W,H,iter,par,val)\r\n WtW_reg = applyReg(val.WtW,par,par.reg_h);\r\n ow = 0;\r\n [H,temp,suc_H,numChol_H,numEq_H] = nnlsm_activeset(WtW_reg,val.WtA,ow,1,H);\r\n\r\n HHt_reg = applyReg(H*H',par,par.reg_w);\r\n [W,gradW,suc_W,numChol_W,numEq_W] = nnlsm_activeset(HHt_reg,H*A',ow,1,W');\r\n W = W';\r\n\r\n val.WtA = W'*A;\r\n val.WtW = W'*W;\r\n\r\n if par.track_grad\r\n gradW = gradW'; \r\n gradH = getGradientOne(val.WtW,val.WtA,H,par.reg_h,par);\r\n else\r\n gradW = 0; gradH =0;\r\n end\r\n\r\n val(1).numChol_W = numChol_W;\r\n val.numChol_H = numChol_H;\r\n val.numEq_W = numEq_W;\r\n val.numEq_H = numEq_H;\r\n val.suc_W = suc_W;\r\n val.suc_H = suc_H;\r\nend\r\n\r\nfunction [ver] = anls_asgroup_iterLogger(ver,par,val,W,H,prev_W,prev_H)\r\n ver = anls_bpp_iterLogger(ver,par,val,W,H,prev_W,prev_H);\r\nend\r\n\r\n% 'als': Alternating Least Squares Method\r\n% Reference:\r\n% Berry, M. and Browne, M. and Langville, A. and Pauca, V. and Plemmons, R.\r\n% Algorithms and applications for approximate nonnegative matrix factorization.\r\n% Computational Statistics and Data Analysis, 52(1), pp. 155–173 ,2007\r\n\r\nfunction [W,H,par,val,ver] = als_initializer(A,W,H,par)\r\n ver = struct([]);\r\n\r\n val.WtA = W'*A;\r\n val.WtW = W'*W;\r\nend\r\n\r\nfunction [W,H,gradW,gradH,val] = als_iterSolver(A,W,H,iter,par,val)\r\n WtW_reg = applyReg(val.WtW,par,par.reg_h);\r\n H = WtW_reg\\val.WtA;\r\n H(H<0)=0;\r\n\r\n AHt = A*H';\r\n HHt_reg = applyReg(H*H',par,par.reg_w);\r\n Wt = HHt_reg\\AHt'; W=Wt';\r\n W(W<0)=0;\r\n\r\n % normalize : necessary for ALS\r\n [W,H,weights] = normalize_by_W(W,H);\r\n D = diag(weights);\r\n\r\n val.WtA = W'*A;\r\n val.WtW = W'*W;\r\n AHt = AHt*D;\r\n HHt_reg = D*HHt_reg*D;\r\n\r\n if par.track_grad\r\n gradW = W*HHt_reg - AHt;\r\n gradH = getGradientOne(val.WtW,val.WtA,H,par.reg_h,par);\r\n else\r\n gradH = 0; gradW = 0;\r\n end\r\nend\r\n\r\nfunction [ver] = als_iterLogger(ver,par,val,W,H,prev_W,prev_H)\r\nend\r\n\r\n% 'mu' : Multiplicative Updating Method\r\n% Reference:\r\n% Lee, D. D. and Seung, H. S.\r\n% Algorithms for Non-negative Matrix Factorization.\r\n% Advances in Neural Information Processing Systems 13, pp. 556-562, 2001 \r\n\r\nfunction [W,H,par,val,ver] = mu_initializer(A,W,H,par)\r\n ver = struct([]);\r\n\r\n val.WtA = W'*A;\r\n val.WtW = W'*W;\r\nend\r\n\r\nfunction [W,H,gradW,gradH,val] = mu_iterSolver(A,W,H,iter,par,val)\r\n epsilon = 1e-16;\r\n\r\n WtW_reg = applyReg(val.WtW,par,par.reg_h);\r\n H = H.*val.WtA./(WtW_reg*H + epsilon);\r\n\r\n HHt_reg = applyReg(H*H',par,par.reg_w);\r\n AHt = A*H';\r\n W = W.*AHt./(W*HHt_reg + epsilon);\r\n\r\n val.WtA = W'*A;\r\n val.WtW = W'*W;\r\n\r\n if par.track_grad\r\n gradW = W*HHt_reg - AHt;\r\n gradH = getGradientOne(val.WtW,val.WtA,H,par.reg_h,par);\r\n else\r\n gradH = 0; gradW = 0;\r\n end\r\nend\r\n\r\nfunction [ver] = mu_iterLogger(ver,par,val,W,H,prev_W,prev_H)\r\nend\r\n\r\n% 'hals' : Hierarchical Alternating Least Squares Method\r\n% Reference (See Algorithm 2):\r\n% Cichocki, A. and Phan, A.H.\r\n% Fast local algorithms for large scale nonnegative matrix and tensor factorizations.\r\n% IEICE Trans. Fundam. Electron. Commun. Comput. Sci. E92-A(3), 708–721 (2009)\r\n\r\nfunction [W,H,par,val,ver] = hals_initializer(A,W,H,par)\r\n [W,H]=normalize_by_W(W,H);\r\n\r\n val = struct([]);\r\n ver = struct([]);\r\nend\r\n\r\nfunction [W,H,gradW,gradH,val] = hals_iterSolver(A,W,H,iter,par,val)\r\n epsilon = 1e-16;\r\n\r\n WtA = W'*A;\r\n WtW = W'*W;\r\n WtW_reg = applyReg(WtW,par,par.reg_h);\r\n for i = 1:par.k\r\n H(i,:) = max(H(i,:) + WtA(i,:) - WtW_reg(i,:) * H,epsilon);\r\n end\r\n\r\n AHt = A*H';\r\n HHt_reg = applyReg(H*H',par,par.reg_w);\r\n for i = 1:par.k\r\n W(:,i) = max(W(:,i) * HHt_reg(i,i) + AHt(:,i) - W * HHt_reg(:,i),epsilon);\r\n if sum(W(:,i))>0\r\n W(:,i) = W(:,i)/norm(W(:,i));\r\n end\r\n end\r\n\r\n if par.track_grad\r\n gradW = W*HHt_reg - AHt;\r\n gradH = getGradientOne(W'*W,W'*A,H,par.reg_h,par);\r\n else\r\n gradH = 0; gradW = 0;\r\n end\r\nend\r\n\r\nfunction [ver] = hals_iterLogger(ver,par,val,W,H,prev_W,prev_H)\r\nend\r\n\r\n%----------------------------------------------------------------------------------------------\r\n% Utility Functions \r\n%----------------------------------------------------------------------------------------------\r\n\r\n% This function prepares information about execution for a experiment purpose\r\nfunction ver = prepareHIS(A,W,H,prev_W,prev_H,init,par,iter,elapsed,gradW,gradH)\r\n ver.iter = iter;\r\n ver.elapsed = elapsed;\r\n\r\n sqErr = getSquaredError(A,W,H,init);\r\n ver.rel_Error = sqrt(sqErr)/init.norm_A;\r\n ver.rel_Obj = getObj(sqErr,W,H,par)/init.baseObj;\r\n ver.norm_W = norm(W,'fro');\r\n ver.norm_H = norm(H,'fro');\r\n if par.track_prev\r\n ver.rel_Change_W = norm(W-prev_W,'fro')/init.norm_W;\r\n ver.rel_Change_H = norm(H-prev_H,'fro')/init.norm_H;\r\n end\r\n if par.track_grad\r\n ver.rel_NrPGrad_W = norm(projGradient(W,gradW),'fro')/init.normGr_W;\r\n ver.rel_NrPGrad_H = norm(projGradient(H,gradH),'fro')/init.normGr_H;\r\n ver.SC_NM_PGRAD = getStopCriterion(1,A,W,H,par,gradW,gradH)/init.SC_NM_PGRAD;\r\n ver.SC_PGRAD = getStopCriterion(2,A,W,H,par,gradW,gradH)/init.SC_PGRAD;\r\n ver.SC_DELTA = getStopCriterion(3,A,W,H,par,gradW,gradH)/init.SC_DELTA; \r\n end\r\n ver.density_W = length(find(W>0))/(par.m*par.k);\r\n ver.density_H = length(find(H>0))/(par.n*par.k);\r\nend\r\n\r\n% Execution information is collected in HIS variable\r\nfunction HIS = saveHIS(idx,ver,HIS)\r\n fldnames = fieldnames(ver);\r\n\r\n for i=1:length(fldnames)\r\n flname = fldnames{i};\r\n HIS.(flname)(idx) = ver.(flname);\r\n end\r\nend\r\n\r\n%-------------------------------------------------------------------------------\r\nfunction retVal = getInitCriterion(stopRule,A,W,H,par,gradW,gradH)\r\n% STOPPING_RULE : 1 - Normalized proj. gradient\r\n% 2 - Proj. gradient\r\n% 3 - Delta by H. Kim\r\n% 0 - None (want to stop by MAX_ITER or MAX_TIME)\r\n if nargin~=7\r\n [gradW,gradH] = getGradient(A,W,H,par);\r\n end\r\n [m,k]=size(W);, [k,n]=size(H);, numAll=(m*k)+(k*n);\r\n switch stopRule\r\n case 1\r\n retVal = norm([gradW(:); gradH(:)])/numAll;\r\n case 2\r\n retVal = norm([gradW(:); gradH(:)]);\r\n case 3\r\n retVal = getStopCriterion(3,A,W,H,par,gradW,gradH);\r\n case 0\r\n retVal = 1;\r\n end\r\nend\r\n%-------------------------------------------------------------------------------\r\nfunction retVal = getStopCriterion(stopRule,A,W,H,par,gradW,gradH)\r\n% STOPPING_RULE : 1 - Normalized proj. gradient\r\n% 2 - Proj. gradient\r\n% 3 - Delta by H. Kim\r\n% 0 - None (want to stop by MAX_ITER or MAX_TIME)\r\n if nargin~=7\r\n [gradW,gradH] = getGradient(A,W,H,par);\r\n end\r\n\r\n switch stopRule\r\n case 1\r\n pGradW = projGradient(W,gradW);\r\n pGradH = projGradient(H,gradH);\r\n pGrad = [pGradW(:); pGradH(:)];\r\n retVal = norm(pGrad)/length(pGrad);\r\n case 2\r\n pGradW = projGradient(W,gradW);\r\n pGradH = projGradient(H,gradH);\r\n pGrad = [pGradW(:); pGradH(:)];\r\n retVal = norm(pGrad);\r\n case 3\r\n resmat=min(H,gradH); resvec=resmat(:);\r\n resmat=min(W,gradW); resvec=[resvec; resmat(:)]; \r\n deltao=norm(resvec,1); %L1-norm\r\n num_notconv=length(find(abs(resvec)>0));\r\n retVal=deltao/num_notconv;\r\n case 0\r\n retVal = 1e100;\r\n end\r\nend\r\n%-------------------------------------------------------------------------------\r\nfunction sqErr = getSquaredError(A,W,H,init)\r\n sqErr = max((init.norm_A)^2 - 2*trace(H*(A'*W))+trace((W'*W)*(H*H')),0 );\r\nend\r\n\r\nfunction retVal = getObj(sqErr,W,H,par)\r\n retVal = 0.5 * sqErr;\r\n retVal = retVal + par.reg_w(1) * sum(sum(W.*W));\r\n retVal = retVal + par.reg_w(2) * sum(sum(W,2).^2);\r\n retVal = retVal + par.reg_h(1) * sum(sum(H.*H));\r\n retVal = retVal + par.reg_h(2) * sum(sum(H,1).^2);\r\nend\r\n\r\nfunction AtA = applyReg(AtA,par,reg)\r\n % Frobenius norm regularization\r\n if reg(1) > 0\r\n AtA = AtA + 2 * reg(1) * eye(par.k);\r\n end\r\n % L1-norm regularization\r\n if reg(2) > 0\r\n AtA = AtA + 2 * reg(2) * ones(par.k,par.k);\r\n end\r\nend\r\n\r\nfunction [grad] = modifyGradient(grad,X,reg,par)\r\n if reg(1) > 0\r\n grad = grad + 2 * reg(1) * X;\r\n end\r\n if reg(2) > 0\r\n grad = grad + 2 * reg(2) * ones(par.k,par.k) * X;\r\n end\r\nend\r\n\r\nfunction [grad] = getGradientOne(AtA,AtB,X,reg,par)\r\n grad = AtA*X - AtB;\r\n grad = modifyGradient(grad,X,reg,par);\r\nend\r\n\r\nfunction [gradW,gradH] = getGradient(A,W,H,par)\r\n HHt = H*H';\r\n HHt_reg = applyReg(HHt,par,par.reg_w);\r\n\r\n WtW = W'*W;\r\n WtW_reg = applyReg(WtW,par,par.reg_h);\r\n\r\n gradW = W*HHt_reg - A*H';\r\n gradH = WtW_reg*H - W'*A;\r\nend\r\n\r\n%-------------------------------------------------------------------------------\r\nfunction pGradF = projGradient(F,gradF)\r\n pGradF = gradF(gradF<0|F>0);\r\nend\r\n\r\n%-------------------------------------------------------------------------------\r\nfunction [W,H,weights] = normalize_by_W(W,H)\r\n norm2=sqrt(sum(W.^2,1));\r\n toNormalize = norm2>0;\r\n\r\n if any(toNormalize)\r\n W(:,toNormalize) = W(:,toNormalize)./repmat(norm2(toNormalize),size(W,1),1);\r\n H(toNormalize,:) = H(toNormalize,:).*repmat(norm2(toNormalize)',1,size(H,2));\r\n end\r\n\r\n weights = ones(size(norm2));\r\n weights(toNormalize) = norm2(toNormalize);\r\nend\r\n"} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "nnlsm_blockpivot.m", "ext": ".m", "path": "cone-greedy-master/TensorFactorization/nnlsm_blockpivot.m", "size": 4542, "source_encoding": "utf_8", "md5": "376a788b205edbb0344ec40fc5afbf9f", "text": "% Nonnegativity Constrained Least Squares with Multiple Righthand Sides \n% using Block Principal Pivoting method\n%\n% This software solves the following problem: given A and B, find X such that\n% minimize || AX-B ||_F^2 where X>=0 elementwise.\n%\n% Reference:\n% Jingu Kim and Haesun Park. Fast Nonnegative Matrix Factorization: An Activeset-like Method and Comparisons,\n% SIAM Journal on Scientific Computing, 33(6), pp. 3261-3281, 2011.\n%\n% Written by Jingu Kim (jingu.kim@gmail.com)\n% School of Computational Science and Engineering,\n% Georgia Institute of Technology\n%\n% Note that this algorithm assumes that the input matrix A has full column rank.\n% This code comes with no guarantee or warranty of any kind. \n% Please send bug reports, comments, or questions to Jingu Kim.\n%\n% Modified Feb-20-2009\n% Modified Mar-13-2011: numChol and numEq\n%\n% \n% A : input matrix (m x n) (by default), or A'*A (n x n) if isInputProd==1\n% B : input matrix (m x k) (by default), or A'*B (n x k) if isInputProd==1\n% isInputProd : (optional, default:0) if turned on, use (A'*A,A'*B) as input instead of (A,B)\n% init : (optional) initial value for X\n% \n% X : the solution (n x k)\n% Y : A'*A*X - A'*B where X is the solution (n x k)\n% success : 0 for success, 1 for failure.\n% Failure could only happen on a numericall very ill-conditioned problem.\n% numChol : number of unique cholesky decompositions done\n% numEqs : number of systems of linear equations solved\n\nfunction [ X,Y,success,numChol,numEq ] = nnlsm_blockpivot( A, B, isInputProd, init )\n if nargin<3, isInputProd=0;, end\n if isInputProd\n AtA = A;, AtB = B;\n else\n AtA = A'*A;, AtB = A'*B;\n end\n\n if size(AtA,1)==1\n X = AtB/AtA; X(X<0) = 0;\n Y = AtA*X - AtB;\n numChol = 1; numEq = size(AtB,2); success = 1;\n return\n end\n \n [n,k]=size(AtB);\n MAX_BIG_ITER = n*5;\n % set initial feasible solution\n X = zeros(n,k);\n if nargin<4\n Y = - AtB;\n PassiveSet = false(n,k);\n numChol = 0;\n numEq = 0;\n else\n PassiveSet = (init > 0);\n [ X,numChol,numEq] = normalEqComb(AtA,AtB,PassiveSet);\n Y = AtA * X - AtB;\n end\n % parameters\n pbar = 3;\n P = zeros(1,k);, P(:) = pbar;\n Ninf = zeros(1,k);, Ninf(:) = n+1;\n\n NonOptSet = (Y < 0) & ~PassiveSet;\n InfeaSet = (X < 0) & PassiveSet;\n NotGood = sum(NonOptSet)+sum(InfeaSet);\n NotOptCols = NotGood > 0;\n\n bigIter = 0;, success=0;\n while(~isempty(find(NotOptCols)))\n bigIter = bigIter+1;\n if ((MAX_BIG_ITER >0) && (bigIter > MAX_BIG_ITER)) % set max_iter for ill-conditioned (numerically unstable) case\n success = 1;, break\n end\n\n Cols1 = NotOptCols & (NotGood < Ninf);\n Cols2 = NotOptCols & (NotGood >= Ninf) & (P >= 1);\n Cols3Ix = find(NotOptCols & ~Cols1 & ~Cols2);\n if ~isempty(find(Cols1))\n P(Cols1) = pbar;,Ninf(Cols1) = NotGood(Cols1);\n PassiveSet(NonOptSet & repmat(Cols1,n,1)) = true;\n PassiveSet(InfeaSet & repmat(Cols1,n,1)) = false;\n end\n if ~isempty(find(Cols2))\n P(Cols2) = P(Cols2)-1;\n PassiveSet(NonOptSet & repmat(Cols2,n,1)) = true;\n PassiveSet(InfeaSet & repmat(Cols2,n,1)) = false;\n end\n if ~isempty(Cols3Ix)\n for i=1:length(Cols3Ix)\n Ix = Cols3Ix(i);\n toChange = max(find( NonOptSet(:,Ix)|InfeaSet(:,Ix) ));\n if PassiveSet(toChange,Ix)\n PassiveSet(toChange,Ix)=false;\n else\n PassiveSet(toChange,Ix)=true;\n end\n end\n end\n [ X(:,NotOptCols),tempChol,tempEq ] = normalEqComb(AtA,AtB(:,NotOptCols),PassiveSet(:,NotOptCols));\n numChol = numChol + tempChol;\n numEq = numEq + tempEq;\n X(abs(X)<1e-12) = 0; % One can uncomment this line for numerical stability.\n Y(:,NotOptCols) = AtA * X(:,NotOptCols) - AtB(:,NotOptCols);\n Y(abs(Y)<1e-12) = 0; % One can uncomment this line for numerical stability.\n \n % check optimality\n NotOptMask = repmat(NotOptCols,n,1);\n NonOptSet = NotOptMask & (Y < 0) & ~PassiveSet;\n InfeaSet = NotOptMask & (X < 0) & PassiveSet;\n NotGood = sum(NonOptSet)+sum(InfeaSet);\n NotOptCols = NotGood > 0;\n end\nend\n"} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "nnlsm_activeset.m", "ext": ".m", "path": "cone-greedy-master/TensorFactorization/nnlsm_activeset.m", "size": 5185, "source_encoding": "utf_8", "md5": "96f73fcf70f7083cd2d2a9bd9f71767a", "text": "% Nonnegativity Constrained Least Squares with Multiple Righthand Sides \n% using Active Set method\n%\n% This software solves the following problem: given A and B, find X such that\n% minimize || AX-B ||_F^2 where X>=0 elementwise.\n%\n% Reference:\n% Charles L. Lawson and Richard J. Hanson, Solving Least Squares Problems, \n% Society for Industrial and Applied Mathematics, 1995\n% M. H. Van Benthem and M. R. Keenan, \n% Fast Algorithm for the Solution of Large-scale Non-negativity-constrained Least Squares Problems,\n% J. Chemometrics 2004; 18: 441-450\n%\n% Written by Jingu Kim (jingu.kim@gmail.com)\n% School of Computational Science and Engineering,\n% Georgia Institute of Technology\n%\n% Please send bug reports, comments, or questions to Jingu Kim.\n%\n% Updated Feb-20-2010\n% Updated Mar-20-2011: numChol, numEq\n%\n% \n% A : input matrix (m x n) (by default), or A'*A (n x n) if isInputProd==1\n% B : input matrix (m x k) (by default), or A'*B (n x k) if isInputProd==1\n% overwrite : (optional, default:0) if turned on, unconstrained least squares solution is computed in the beginning\n% isInputProd : (optional, default:0) if turned on, use (A'*A,A'*B) as input instead of (A,B)\n% init : (optional) initial value for X\n% \n% X : the solution (n x k)\n% Y : A'*A*X - A'*B where X is the solution (n x k)\n% iter : number of systems of linear equations solved\n% success : 0 for success, 1 for failure.\n% Failure could only happen on a numericall very ill-conditioned problem.\n\nfunction [ X,Y,success,numChol,numEq ] = nnlsm_activeset( A, B, overwrite, isInputProd, init)\n if nargin<3, overwrite=0;, end\n if nargin<4, isInputProd=0;, end\n \n if isInputProd\n AtA=A;,AtB=B;\n else\n AtA=A'*A;, AtB=A'*B;\n end\n\n if size(AtA,1)==1\n X = AtB/AtA; X(X<0) = 0;\n Y = AtA*X - AtB;\n numChol = 1; numEq = size(AtB,2); success = 1;\n return\n end\n \n [n,k]=size(AtB);\n MAX_ITER = n*5;\n % set initial feasible solution\n if overwrite\n [X,numChol,numEq] = normalEqComb(AtA,AtB);\n PassSet = (X > 0);\n NotOptSet = any(X<0);\n elseif nargin>=5\n X = init;\n X(X<0)=0;\n PassSet = (X > 0);\n NotOptSet = true(1,k);\n numChol = 0;\n numEq = 0;\n else\n X = zeros(n,k);\n PassSet = false(n,k);\n NotOptSet = true(1,k);\n numChol = 0;\n numEq = 0;\n end\n \n Y = zeros(n,k);\n Y(:,~NotOptSet)=AtA*X(:,~NotOptSet) - AtB(:,~NotOptSet);\n NotOptCols = find(NotOptSet);\n \n bigIter = 0;, success=0;\n while(~isempty(NotOptCols))\n bigIter = bigIter+1;\n if ((MAX_ITER >0) && (bigIter > MAX_ITER)) % set max_iter for ill-conditioned (numerically unstable) case\n success = 1;, break\n end\n \n % find unconstrained LS solution for the passive set\n [ Z,tempChol,tempEq ] = normalEqComb(AtA,AtB(:,NotOptCols),PassSet(:,NotOptCols));\n numChol = numChol + tempChol;\n numEq = numEq + tempEq;\n\n Z(abs(Z)<1e-12) = 0; % One can uncomment this line for numerical stability.\n\n InfeaSubSet = Z < 0;\n InfeaSubCols = find(any(InfeaSubSet));\n FeaSubCols = find(all(~InfeaSubSet));\n \n if ~isempty(InfeaSubCols) % for infeasible cols\n ZInfea = Z(:,InfeaSubCols);\n InfeaCols = NotOptCols(InfeaSubCols);\n Alpha = zeros(n,length(InfeaSubCols));, Alpha(:) = Inf;\n [i,j] = find(InfeaSubSet(:,InfeaSubCols));\n InfeaSubIx = sub2ind(size(Alpha),i,j);\n if length(InfeaCols) == 1\n InfeaIx = sub2ind([n,k],i,InfeaCols * ones(length(j),1));\n else\n InfeaIx = sub2ind([n,k],i,InfeaCols(j)');\n end\n Alpha(InfeaSubIx) = X(InfeaIx)./(X(InfeaIx)-ZInfea(InfeaSubIx));\n\n [minVal,minIx] = min(Alpha);\n Alpha(:,:) = repmat(minVal,n,1);\n X(:,InfeaCols) = X(:,InfeaCols)+Alpha.*(ZInfea-X(:,InfeaCols));\n IxToActive = sub2ind([n,k],minIx,InfeaCols);\n X(IxToActive) = 0;\n PassSet(IxToActive) = false;\n end\n if ~isempty(FeaSubCols) % for feasible cols\n FeaCols = NotOptCols(FeaSubCols);\n X(:,FeaCols) = Z(:,FeaSubCols);\n Y(:,FeaCols) = AtA * X(:,FeaCols) - AtB(:,FeaCols);\n\n Y( abs(Y)<1e-12 ) = 0; % One can uncomment this line for numerical stability.\n \n NotOptSubSet = (Y(:,FeaCols) < 0) & ~PassSet(:,FeaCols);\n NewOptCols = FeaCols(all(~NotOptSubSet));\n UpdateNotOptCols = FeaCols(any(NotOptSubSet));\n if ~isempty(UpdateNotOptCols)\n [minVal,minIx] = min(Y(:,UpdateNotOptCols).*~PassSet(:,UpdateNotOptCols));\n PassSet(sub2ind([n,k],minIx,UpdateNotOptCols)) = true;\n end\n NotOptSet(NewOptCols) = false;\n NotOptCols = find(NotOptSet);\n end\n end\nend\n"} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "ncp.m", "ext": ".m", "path": "cone-greedy-master/TensorFactorization/ncp.m", "size": 16643, "source_encoding": "utf_8", "md5": "688c72175fc36a416b097532b15c08b8", "text": "% Nonnegative Tensor Factorization (Canonical Decomposition / PARAFAC)\n%\n% Written by Jingu Kim (jingu.kim@gmail.com)\n% School of Computational Science and Engineering,\n% Georgia Institute of Technology\n%\n% This software implements nonnegativity-constrained low-rank approximation of tensors in PARAFAC model.\n% Assuming that a k-way tensor X and target rank r are given, this software seeks F1, ... , Fk \n% by solving the following problem:\n%\n% minimize || X- sum_(j=1)^r (F1_j o F2_j o ... o Fk_j) ||_F^2 + G(F1, ... , Fk) + H(F1, ..., Fk)\n% where\n% G(F1, ... , Fk) = sum_(i=1)^k ( alpha_i * ||Fi||_F^2 ),\n% H(F1, ... , Fk) = sum_(i=1)^k ( beta_i sum_(j=1)^n || Fi_j ||_1^2 ).\n% such that\n% Fi >= 0 for all i.\n%\n% To use this software, it is necessary to first install MATLAB Tensor Toolbox\n% by Brett W. Bader and Tamara G. Kolda, available at http://csmr.ca.sandia.gov/~tgkolda/TensorToolbox/.\n% The latest version that was tested with this software is Version 2.4, March 2010.\n% Refer to the help manual of the toolbox for installation and basic usage.\n%\n% Reference:\n% Jingu Kim and Haesun Park.\n% Fast Nonnegative Tensor Factorization with an Active-set-like Method.\n% In High-Performance Scientific Computing: Algorithms and Applications, Springer, 2012, pp. 311-326.\n%\n% Please send bug reports, comments, or questions to Jingu Kim.\n% This code comes with no guarantee or warranty of any kind.\n%\n% Last modified 03/26/2012\n%\n% \n% X : Input data tensor. X is a 'tensor' object of tensor toolbox.\n% r : Target low-rank\n%\n% (Below are optional arguments: can be set by providing name-value pairs)\n%\n% METHOD : Algorithm for solving NMF. One of the following values:\n% 'anls_bpp' 'anls_asgroup' 'hals' 'mu'\n% See above paper (and references therein) for the details of these algorithms.\n% Default is 'anls_bpp'.\n% TOL : Stopping tolerance. Default is 1e-4. If you want to obtain a more accurate solution, \n% decrease TOL and increase MAX_ITER at the same time.\n% MIN_ITER : Minimum number of iterations. Default is 20.\n% MAX_ITER : Maximum number of iterations. Default is 200.\n% : A cell array that contains initial values for factors Fi.\n% See examples to learn how to set.\n% VERBOSE : 0 (default) - No debugging information is collected.\n% 1 (debugging/experimental purpose) - History of computation is returned. See 'REC' variable.\n% 2 (debugging/experimental purpose) - History of computation is additionally printed on screen.\n% \n% F : a 'ktensor' object that represent a factorized form of a tensor. See tensor toolbox for more info.\n% iter : Number of iterations\n% REC : (debugging/experimental purpose) Auxiliary information about the execution\n% \n% F = ncpp(X,5);\n% F = ncp(X,10,'tol',1e-3);\n% F = ncp(X,10,'tol',1e-3,'verbose',2);\n% F = ncp(X,7,'init',Finit,'tol',1e-5,'verbose',2);\n\nfunction [F,iter,REC]=ncp(X,r,varargin)\n % set parameters\n params = inputParser;\n params.addParamValue('method' ,'anls_bpp' ,@(x) ischar(x) );\n params.addParamValue('tol' ,1e-4 ,@(x) isscalar(x) & x > 0 );\n params.addParamValue('stop_criterion' ,1 ,@(x) isscalar(x) & x >= 0);\n params.addParamValue('min_iter' ,20 ,@(x) isscalar(x) & x > 0);\n params.addParamValue('max_iter' ,200 ,@(x) isscalar(x) & x > 0 );\n params.addParamValue('max_time' ,1e6 ,@(x) isscalar(x) & x > 0);\n params.addParamValue('init' ,cell(0) ,@(x) iscell(x) );\n params.addParamValue('verbose' ,0 ,@(x) isscalar(x) & x >= 0 );\n params.addParamValue('orderWays',[]);\n params.parse(varargin{:});\n \n % copy from params object\n par = params.Results;\n par.nWay = ndims(X);\n par.r = r;\n par.size = size(X);\n\n if isempty(par.orderWays)\n par.orderWays = [1:par.nWay]; \n end\n\n % set initial values\n if ~isempty(par.init)\n F_cell = par.init;\n par.init_type = 'User provided';\n par.init = cell(0);\n else\n Finit = cell(par.nWay,1);\n for i=1:par.nWay\n Finit{i}=rand(size(X,i),r);\n end\n F_cell = Finit;\n par.init_type = 'Randomly generated';\n end\n\n % This variable is for analysis/debugging, so it does not affect the output (W,H) of this program\n REC = struct([]);\n tPrev = cputime;\n REC(1).start_time = datestr(now);\n grad = getGradient(X,F_cell,par);\n ver= struct([]);\n\n clear('init');\n init.nr_X = norm(X);\n init.nr_grad_all = 0;\n for i=1:par.nWay\n this_value = norm(grad{i},'fro');\n init.(['nr_grad_',num2str(i)]) = this_value;\n init.nr_grad_all = init.nr_grad_all + this_value^2;\n end\n init.nr_grad_all = sqrt(init.nr_grad_all);\n REC(1).init = init;\n\n initializer= str2func([par.method,'_initializer']);\n iterSolver = str2func([par.method,'_iterSolver']);\n iterLogger = str2func([par.method,'_iterLogger']);\n\n % Collect initial information for analysis/debugging\n if par.verbose \n tTemp = cputime;\n prev_F_cell = F_cell;\n pGrad = getProjGradient(X,F_cell,par);\n ver = prepareHIS(ver,X,F_cell,ktensor(F_cell),prev_F_cell,pGrad,init,par,0,0);\n tPrev = tPrev+(cputime-tTemp);\n end\n\n % Execute initializer\n [F_cell,par,val,ver] = feval(initializer,X,F_cell,par,ver);\n\n if par.verbose & ~isempty(ver)\n tTemp = cputime;\n if par.verbose == 2, display(ver);, end\n REC.HIS = ver;\n tPrev = tPrev+(cputime-tTemp);\n end\n\n REC(1).par = par;\n tTemp = cputime; display(par); tPrev = tPrev+(cputime-tTemp);\n tStart = tPrev;, tTotal = 0; \n\n if (par.stop_criterion == 2) && ~isfield(ver,'rel_Error')\n F_kten = ktensor(F_cell);\n ver(1).rel_Error = getRelError(X,ktensor(F_cell),init);\n end\n\n % main iterations\n for iter=1:par.max_iter;\n cntu = 1;\n\n [F_cell,val] = feval(iterSolver,X,F_cell,iter,par,val);\n pGrad = getProjGradient(X,F_cell,par);\n F_kten = ktensor(F_cell);\n\n prev_Ver = ver;\n ver= struct([]);\n if (iter >= par.min_iter)\n if (par.verbose && (tTotal > par.max_time)) || (~par.verbose && ((cputime-tStart)>par.max_time))\n cntu = 0;\n else\n switch par.stop_criterion\n case 1\n ver(1).SC_PGRAD = getStopCriterion(pGrad,init,par);\n if (ver.SC_PGRAD0))/(size(F.U{i},1)*size(F.U{i},2));\n end\n final.rel_Error = getRelError(X,F_kten,init);\n REC.final = final;\n \n REC.finish_time = datestr(now);\n\n display(final);\nend\n\n%----------------------------------------------------------------------------------------------\n% Utility Functions \n%----------------------------------------------------------------------------------------------\nfunction ver = prepareHIS(ver,X,F,F_kten,prev_F,pGrad,init,par,iter,elapsed)\n ver(1).iter = iter;\n ver.elapsed = elapsed;\n if ~isfield(ver,'rel_Error')\n ver.rel_Error = getRelError(X,F_kten,init);\n end\n for i=1:par.nWay\n ver.(['f_change_',num2str(i)]) = norm(F{i}-prev_F{i});\n ver.(['f_density_',num2str(i)]) = length(find(F{i}>0))/(size(F{i},1)*size(F{i},2));\n ver.(['rel_nr_pgrad_',num2str(i)]) = norm(pGrad{i},'fro')/init.(['nr_grad_',num2str(i)]);\n end\nend\n\nfunction HIS = saveHIS(idx,ver,HIS)\n fldnames = fieldnames(ver);\n\n for i=1:length(fldnames)\n flname = fldnames{i};\n HIS.(flname)(idx) = ver.(flname);\n end\nend\n\nfunction rel_Error = getRelError(X,F_kten,init)\n rel_Error = sqrt(max(init.nr_X^2 + norm(F_kten)^2 - 2 * innerprod(X,F_kten),0))/init.nr_X;\nend\n\nfunction [grad] = getGradient(X,F,par)\n grad = cell(par.nWay,1);\n for k=1:par.nWay\n ways = 1:par.nWay;\n ways(k)='';\n XF = mttkrp(X,F,k);\n % Compute the inner-product matrix\n FF = ones(par.r,par.r);\n for i = ways\n FF = FF .* (F{i}'*F{i});\n end\n grad{k} = F{k} * FF - XF;\n end\nend\n\nfunction [pGrad] = getProjGradient(X,F,par)\n pGrad = cell(par.nWay,1);\n for k=1:par.nWay\n ways = 1:par.nWay;\n ways(k)='';\n XF = mttkrp(X,F,k);\n % Compute the inner-product matrix\n FF = ones(par.r,par.r);\n for i = ways\n FF = FF .* (F{i}'*F{i});\n end\n grad = F{k} * FF - XF;\n pGrad{k} = grad(grad<0|F{k}>0);\n end\nend\n\nfunction retVal = getStopCriterion(pGrad,init,par)\n retVal = 0;\n for i=1:par.nWay\n retVal = retVal + (norm(pGrad{i},'fro'))^2;\n end\n retVal = sqrt(retVal)/init.nr_grad_all;\nend\n\n% 'anls_bpp' : ANLS with Block Principal Pivoting Method \n% Reference:\n% Jingu Kim and Haesun Park.\n% Fast Nonnegative Tensor Factorization with an Active-set-like Method.\n% In High-Performance Scientific Computing: Algorithms and Applications, \n% Springer, 2012, pp. 311-326.\nfunction [F,par,val,ver] = anls_bpp_initializer(X,F,par,ver)\n F{par.orderWays(1)} = zeros(size(F{par.orderWays(1)}));\n\n for k=1:par.nWay\n ver(1).(['turnZr_',num2str(k)]) = 0;\n ver.(['turnNz_',num2str(k)]) = 0;\n ver.(['numChol_',num2str(k)]) = 0;\n ver.(['numEq_',num2str(k)]) = 0;\n ver.(['suc_',num2str(k)]) = 0;\n end\n val.FF = cell(par.nWay,1);\n for k=1:par.nWay\n val.FF{k} = F{k}'*F{k};\n end\nend\n\nfunction [F,val] = anls_bpp_iterSolver(X,F,iter,par,val)\n % solve NNLS problems for each factor\n for k=1:par.nWay\n curWay = par.orderWays(k);\n ways = 1:par.nWay;\n ways(curWay)='';\n XF = mttkrp(X,F,curWay);\n % Compute the inner-product matrix\n FF = ones(par.r,par.r);\n for i = ways\n FF = FF .* val.FF{i};\n end\n [Fthis,temp,sucThis,numCholThis,numEqThis] = nnlsm_blockpivot(FF,XF',1,F{curWay}');\n F{curWay}=Fthis';\n val(1).FF{curWay} = F{curWay}'*F{curWay};\n val.(['numChol_',num2str(k)]) = numCholThis;\n val.(['numEq_',num2str(k)]) = numEqThis;\n val.(['suc_',num2str(k)]) = sucThis;\n end\nend\n\nfunction [ver] = anls_bpp_iterLogger(ver,par,val,F,prev_F)\n for k=1:par.nWay\n ver.(['turnZr_',num2str(k)]) = length(find( (prev_F{k}>0) & (F{k}==0) ))/(size(F{k},1)*size(F{k},2));\n ver.(['turnNz_',num2str(k)]) = length(find( (prev_F{k}==0) & (F{k}>0) ))/(size(F{k},1)*size(F{k},2));\n ver.(['numChol_',num2str(k)]) = val.(['numChol_',num2str(k)]);\n ver.(['numEq_',num2str(k)]) = val.(['numEq_',num2str(k)]);\n ver.(['suc_',num2str(k)]) = val.(['suc_',num2str(k)]);\n end\nend\n\n% 'anls_asgroup' : ANLS with Active Set Method and Column Grouping\n% Reference:\n% Kim, H. and Park, H. and Elden, L.\n% Non-negative Tensor Factorization Based on Alternating Large-scale Non-negativity-constrained Least Squares.\n% In Proceedings of IEEE 7th International Conference on Bioinformatics and Bioengineering \n% (BIBE07), 2, pp. 1147-1151,2007\nfunction [F,par,val,ver] = anls_asgroup_initializer(X,F,par,ver)\n [F,par,val,ver] = anls_bpp_initializer(X,F,par,ver);\nend\n\nfunction [F,val] = anls_asgroup_iterSolver(X,F,iter,par,val)\n % solve NNLS problems for each factor\n for k=1:par.nWay\n curWay = par.orderWays(k);\n ways = 1:par.nWay;\n ways(curWay)='';\n XF = mttkrp(X,F,curWay);\n % Compute the inner-product matrix\n FF = ones(par.r,par.r);\n for i = ways\n FF = FF .* val.FF{i};\n end\n ow = 0;\n [Fthis,temp,sucThis,numCholThis,numEqThis] = nnlsm_activeset(FF,XF',ow,1,F{curWay}');\n F{curWay}=Fthis';\n val(1).FF{curWay} = F{curWay}'*F{curWay};\n val.(['numChol_',num2str(k)]) = numCholThis;\n val.(['numEq_',num2str(k)]) = numEqThis;\n val.(['suc_',num2str(k)]) = sucThis;\n end\nend\n\nfunction [ver] = anls_asgroup_iterLogger(ver,par,val,F,prev_F)\n ver = anls_bpp_iterLogger(ver,par,val,F,prev_F);\nend\n\n% 'mu' : Multiplicative Updating Method\n% Reference:\n% M. Welling and M. Weber.\n% Positive tensor factorization.\n% Pattern Recognition Letters, 22(12), pp. 1255???1261, 2001.\nfunction [F,par,val,ver] = mu_initializer(X,F,par,ver)\n val.FF = cell(par.nWay,1);\n for k=1:par.nWay\n val.FF{k} = F{k}'*F{k};\n end\nend\n\nfunction [F,val] = mu_iterSolver(X,F,iter,par,val)\n epsilon = 1e-16;\n\n for k=1:par.nWay\n curWay = par.orderWays(k);\n ways = 1:par.nWay;\n ways(curWay)='';\n % Calculate Fnew = X_(n) * khatrirao(all U except n, 'r').\n XF = mttkrp(X,F,curWay);\n % Compute the inner-product matrix\n FF = ones(par.r,par.r);\n for i = ways\n FF = FF .* val.FF{i};\n end\n F{curWay} = F{curWay}.*XF./(F{curWay}*FF+epsilon);\n val(1).FF{curWay} = F{curWay}'*F{curWay};\n end\nend\n\nfunction [ver] = mu_iterLogger(ver,par,val,F,prev_F)\nend\n\n% 'hals' : Hierarchical Alternating Least Squares Method\n% Reference:\n% Cichocki, A. and Phan, A.H.\n% Fast local algorithms for large scale nonnegative matrix and tensor factorizations.\n% IEICE Trans. Fundam. Electron. Commun. Comput. Sci. E92-A(3), 708???721 (2009)\nfunction [F,par,val,ver] = hals_initializer(X,F,par,ver)\n % normalize\n d = ones(1,par.r);\n for k=1:par.nWay-1\n curWay = par.orderWays(k);\n norm2 = sqrt(sum(F{curWay}.^2,1));\n F{curWay} = F{curWay}./repmat(norm2,size(F{curWay},1),1);\n d = d .* norm2;\n end\n curWay = par.orderWays(end);\n F{curWay} = F{curWay}.*repmat(d,size(F{curWay},1),1);\n\n val.FF = cell(par.nWay,1);\n for k=1:par.nWay\n val.FF{k} = F{k}'*F{k};\n end\nend\n\nfunction [F,val] = hals_iterSolver(X,F,iter,par,val)\n epsilon = 1e-16;\n\n d = sum(F{par.orderWays(end)}.^2,1);\n\n for k=1:par.nWay\n curWay = par.orderWays(k);\n ways = 1:par.nWay;\n ways(curWay)='';\n % Calculate Fnew = X_(n) * khatrirao(all U except n, 'r').\n XF = mttkrp(X,F,curWay);\n % Compute the inner-product matrix\n FF = ones(par.r,par.r);\n for i = ways\n FF = FF .* val.FF{i};\n end\n if k= 0. C and d must be real.\n%\n% X = LSQNONNEG(C,d,OPTIONS) minimizes with the default optimization\n% parameters replaced by values in the structure OPTIONS, an argument\n% created with the OPTIMSET function. See OPTIMSET for details. Used\n% options are Display and TolX. (A default tolerance TolX of \n% 10*MAX(SIZE(C))*NORM(C,1)*EPS is used.) \n% \n% X = LSQNONNEG(PROBLEM) finds the minimum for PROBLEM. PROBLEM is a\n% structure with the matrix 'C' in PROBLEM.C, the vector 'd' in\n% PROBLEM.d, the options structure in PROBLEM.options, and solver name\n% 'lsqnonneg' in PROBLEM.solver. \n%\n% [X,RESNORM] = LSQNONNEG(...) also returns the value of the squared 2-norm of \n% the residual: norm(d-C*X)^2.\n%\n% [X,RESNORM,RESIDUAL] = LSQNONNEG(...) also returns the value of the \n% residual: d-C*X.\n% \n% [X,RESNORM,RESIDUAL,EXITFLAG] = LSQNONNEG(...) returns an EXITFLAG that\n% describes the exit condition. Possible values of EXITFLAG and the\n% corresponding exit conditions are\n%\n% 1 LSQNONNEG converged with a solution X.\n% 0 Iteration count was exceeded. Increasing the tolerance\n% (OPTIONS.TolX) may lead to a solution.\n% \n% [X,RESNORM,RESIDUAL,EXITFLAG,OUTPUT] = LSQNONNEG(...) returns a structure\n% OUTPUT with the number of steps taken in OUTPUT.iterations, the type of \n% algorithm used in OUTPUT.algorithm, and the exit message in OUTPUT.message.\n%\n% [X,RESNORM,RESIDUAL,EXITFLAG,OUTPUT,LAMBDA] = LSQNONNEG(...) returns \n% the dual vector LAMBDA where LAMBDA(i) <= 0 when X(i) is (approximately) 0 \n% and LAMBDA(i) is (approximately) 0 when X(i) > 0.\n% \n% See also LSCOV, SLASH.\n\n% Copyright 1984-2016 The MathWorks, Inc. \n\n% Reference:\n% Lawson and Hanson, \"Solving Least Squares Problems\", Prentice-Hall, 1974.\n\n% Check if more inputs have been passed. In that case error.\nif nargin > 4\n error('MATLAB:lsqnonneg:TooManyInputs',...\n getString(message('MATLAB:optimfun:lsqnonneg:TooManyInputs')));\nend\n\ndefaultopt = struct('Display','notify','TolX','10*eps*norm(C,1)*length(C)');\n% If just 'defaults' passed in, return the default options in X\nif nargin == 1 && nargout <= 1 && isequal(C,'defaults')\n x = defaultopt;\n return\nend\n\nif nargin < 3\n options = [];\nend\n\nif nargin == 1\n % Detect problem structure input\n if isa(C,'struct')\n [C,d,options] = separateOptimStruct(C);\n else % Single input and non-structure.\n error('MATLAB:lsqnonneg:InputArg',...\n getString(message('MATLAB:optimfun:lsqnonneg:InputArg')));\n end\nend\n\nif nargin == 0\n error('MATLAB:lsqnonneg:NotEnoughInputs',...\n getString(message('MATLAB:optimfun:lsqnonneg:NotEnoughInputs')));\nend\n\nif ~isreal(C) || ~isreal(d)\n error('MATLAB:lsqnonneg:ComplexCorD',...\n getString(message('MATLAB:optimfun:lsqnonneg:ComplexCorD')));\nend\n\n% Check for non-double inputs\nif ~isa(C,'double') || ~isa(d,'double')\n error('MATLAB:lsqnonneg:NonDoubleInput',...\n getString(message('MATLAB:optimfun:lsqnonneg:NonDoubleInput')));\nend\n\n% Check if options was created with optimoptions\nif ~isempty(options) && isa(options,'optim.options.SolverOptions')\n error('MATLAB:lsqnonneg:ArgNotStruct',...\n getString(message('MATLAB:optimfun:commonMessages:ArgNotStruct', 3)));\nend\n% Check for deprecated syntax\noptions = deprecateX0(options,nargin,varargin{:});\n\nprinttype = optimget(options,'Display',defaultopt,'fast');\ntol = optimget(options,'TolX',defaultopt,'fast');\n\n% In case the defaults were gathered from calling: optimset('lsqnonneg'):\nif ischar(tol)\n if strcmpi(tol,'10*eps*norm(c,1)*length(c)')\n tol = 10*eps*norm(C,1)*length(C);\n else\n error('MATLAB:lsqnonneg:OptTolXNotPosScalar',...\n getString(message('MATLAB:optimfun:lsqnonneg:OptTolXNotPosScalar')));\n end\nend\n\nswitch printtype\n case {'notify','notify-detailed'}\n verbosity = 1;\n case {'none','off'}\n verbosity = 0;\n case {'iter','iter-detailed'}\n warning('MATLAB:lsqnonneg:InvalidDisplayValueIter',...\n getString(message('MATLAB:optimfun:lsqnonneg:InvalidDisplayValueIter')));\n verbosity = 3;\n case {'final','final-detailed'}\n verbosity = 2;\n otherwise\n error('MATLAB:lsqnonneg:InvalidOptParamDisplay',...\n getString(message('MATLAB:optimfun:lsqnonneg:InvalidOptParamDisplay')));\nend\n\nn = size(C,2);\n% Initialize vector of n zeros and Infs (to be used later)\nnZeros = zeros(n,1);\nwz = nZeros;\n\n% Initialize set of non-active columns to null\nP = false(n,1);\n% Initialize set of active columns to all and the initial point to zeros\nZ = true(n,1);\nx = nZeros;\n\nresid = d - C*x;\nw = C'*resid;\n\n% Set up iteration criterion\nouteriter = 0;\niter = 0;\nitmax = 50;%3*n;\nexitflag = 1;\nallres = zeros(itmax,1);\n\n% Outer loop to put variables into set to hold positive coefficients\nwhile any(Z) && any(w(Z) > tol)\n outeriter = outeriter + 1;\n % Reset intermediate solution z\n z = nZeros; \n % Create wz, a Lagrange multiplier vector of variables in the zero set.\n % wz must have the same size as w to preserve the correct indices, so\n % set multipliers to -Inf for variables outside of the zero set.\n wz(P) = -Inf;\n wz(Z) = w(Z);\n % Find variable with largest Lagrange multiplier\n [~,t] = max(wz);\n % Move variable t from zero set to positive set\n P(t) = true;\n Z(t) = false;\n % Compute intermediate solution using only variables in positive set\n z(P) = C(:,P)\\d;\n % inner loop to remove elements from the positive set which no longer belong\n while any(z(P) <= 0)\n iter = iter + 1;\n if iter > itmax\n msg = getString(message('MATLAB:optimfun:lsqnonneg:IterationCountExceeded'));\n if verbosity\n disp(msg)\n end\n exitflag = 0;\n output.iterations = outeriter;\n output.message = msg;\n output.algorithm = 'active-set';\n resnorm = sum(resid.*resid);\n x = z;\n lambda = w;\n return\n end\n % Find indices where intermediate solution z is approximately negative\n Q = (z <= 0) & P;\n % Choose new x subject to keeping new x nonnegative\n alpha = min(x(Q)./(x(Q) - z(Q)));\n x = x + alpha*(z - x);\n % Reset Z and P given intermediate values of x\n Z = ((abs(x) < tol) & P) | Z;\n P = ~Z;\n z = nZeros; % Reset z\n z(P) = C(:,P)\\d; % Re-solve for z\n \n \n end\n x = z;\n resid = d - C*x;\n allres(outeriter) = norm(resid)^2;\n w = C'*resid;\nend\n\nlambda = w;\nresnorm = resid'*resid;\noutput.iterations = outeriter;\noutput.algorithm = 'active-set';\n% msg = getString(message('MATLAB:optimfun:lsqnonneg:OptimizationTerminated'));\n% if verbosity > 1\n% disp(msg)\n% end\n%output.message = msg;\nallres(outeriter+1:end)= 10000000000000000;\n er_pre = 1000000000;\n for i = 1:itmax\n allres(i) = min(allres(i),er_pre);\n er_pre = allres(i);\n end\n\n\n%--------------------------------------------------------------------------\nfunction options = deprecateX0(options,numInputs,varargin)\n% Code to check if user has passed in x0. If so, ignore it and warn of its\n% deprecation. Also check whether the options have been passed in either\n% the 3rd or 4th input.\nif numInputs == 4\n % 4 inputs given; the 3rd (variable name \"options\") will be interpreted\n % as x0, and the 4th as options\n if ~isempty(options)\n % x0 is non-empty\n warning('MATLAB:lsqnonneg:ignoringX0',...\n getString(message('MATLAB:optimfun:lsqnonneg:ignoringX0')));\n end\n % Take the 4th argument as the options\n options = varargin{1};\nelseif numInputs == 3\n % Check if a non-empty or non-struct has been passed in for options\n % If so, assume that it's an attempt to pass x0\n if ~isstruct(options) && ~isempty(options)\n warning('MATLAB:lsqnonneg:ignoringX0',...\n getString(message('MATLAB:optimfun:lsqnonneg:ignoringX0')));\n % No options passed, set to empty\n options = [];\n end\nend\n"} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "experiment_EEAs.m", "ext": ".m", "path": "cone-greedy-master/HyperspectralImaging/experiment_EEAs.m", "size": 1254, "source_encoding": "utf_8", "md5": "6824855f221c01fc60b03f94ac1d27fd", "text": "% Running the algorithm on data set M \r\n\r\nfunction [Kall, Hall, resultsErr, Hall2, resultsErr2, nEEAs,resultsErr3,resultsErr4,resultsErr5] = experiment_EEAs( M , r )\r\n\r\n% 2. Running the EEAs on the full and subsampled data set\r\nmaxitNNLS = 10; \r\nnEEAs{1} = 'SPA '; \r\nnEEAs{2} = 'VCA '; \r\nnEEAs{3} = 'XRAY '; \r\nnEEAs{4} = 'H2NMF'; \r\nnEEAs{5} = 'SNPA '; \r\nfor algo = 1 : 5\r\n \r\n D = M; % full data set\r\n fprintf([nEEAs{algo}, ' on the full data set...']);\r\n \r\n %% Normalization to apply EEAs that need it (SPA, VCA, H2NMF, SNPA)\r\n \r\n e = cputime;\r\n K = EEAs(D,r,algo); % extract endmembers from 'dictionary'\r\n Kall{algo} = K;\r\n % Error\r\n [H, err] = plotnnlsHALSupdt(M,D(:,K),[],maxitNNLS,algo); % optimal weights\r\n [H2, Hslow,HPW,HA] = NNMPmatrix(M,D(:,K),maxitNNLS,algo,err,nEEAs{algo});\r\n Hall{algo} = H;\r\n Hall2{algo} = H2;\r\n\r\n resultsErr(algo) = 100*norm(M - D(:,K)*H,'fro')/norm(M,'fro');\r\n resultsErr2(algo) = 100*norm(M - D(:,K)*H2,'fro')/norm(M,'fro');\r\n resultsErr3(algo) = 100*norm(M - Hslow,'fro')/norm(M,'fro');\r\n resultsErr4(algo) = 100*norm(M - D(:,K)*HPW,'fro')/norm(M,'fro');\r\n resultsErr5(algo) = 100*norm(M - D(:,K)*HA,'fro')/norm(M,'fro');\r\n\r\n fprintf(' done.\\n');\r\nend\r\n\r\n"} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "fquad.m", "ext": ".m", "path": "cone-greedy-master/HyperspectralImaging/hierclust2nmf_v2/fquad.m", "size": 1774, "source_encoding": "utf_8", "md5": "85e40c6d6d0a040702758d817f0a5008", "text": "% Select treshold to split the entries of x into two subsets\r\n% \r\n% See Section 3.2 in \r\n% Gillis, Kuang, Park, `Hierarchical Clustering of Hyperspectral Images \r\n% using Rank-Two Nonnegative Matrix Factorization', arXiv. \r\n\r\nfunction [thres,delta,fobj] = fquad(x,s); \r\n\r\nif nargin == 1\r\n s = 0.01; % grid for the values of delta\r\nend\r\n\r\n[fdel,fdelp,delta,finter,gs] = fdelta(x,s); \r\n% fdel is the percentage of values smaller than delta\r\n% finter is the number of points in a small interval around delta\r\n\r\n\r\nwarning('off'); \r\nfobj = -log( fdel.* (1-fdel) ) + exp(finter); \r\n% Can potentially use other objectives: \r\n%fobj = -log( fdel.* (1-fdel) ) + 2.^(finter); \r\n%fobj = ( 2*(fdel - 0.5) ).^2 + finter.^2; \r\n%fobj = -log( fdel.* (1-fdel) ) + finter.^2; \r\n%fobj = ( 2*(fdel - 0.5) ).^2 + finter.^2; \r\nwarning('on');\r\n[a,b] = min(fobj); \r\nthres = delta(b); \r\n\r\n\r\n% Evaluate the function fdel = sum( x_i <= delta)/n and its derivate \r\n% for all delta in interval [0,1] with step s\r\nfunction [fdel,fdelp,delta,finter,gs] = fdelta(x,s); \r\n\r\nn = length(x); \r\nif nargin == 1\r\n s = 0.01; \r\nend\r\ndelta = 0:s:1; \r\nlD = length(delta); \r\n\r\ngs = 0.05; % Other values could be used, in [0,0.5]\r\n\r\nfor i = 1 : lD\r\n fdel(i) = sum(x <= delta(i))/n;\r\n if i == 2 % use only next point to evaluate fdelp(1)\r\n fdelp(1) = (fdel(2)-fdel(1))/s; \r\n elseif i >= 2 % use next and previous point to evaluate fdelp(i)\r\n fdelp(i-1) = (fdel(i)-fdel(i-2))/2/s; \r\n if i == lD % use only previous point to evaluate fdelp(lD)\r\n fdelp(lD) = (fdel(lD)-fdel(lD-1))/s; \r\n end\r\n end \r\n deltahigh = min(1,delta(i) + gs);\r\n deltalow = max(0,delta(i) - gs); \r\n finter(i) = ( sum(x <= deltahigh) - sum(x < deltalow) )/n/(deltahigh-deltalow) ;\r\nend"} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "anls_entry_rank2_precompute_opt.m", "ext": ".m", "path": "cone-greedy-master/HyperspectralImaging/hierclust2nmf_v2/anls_entry_rank2_precompute_opt.m", "size": 1376, "source_encoding": "utf_8", "md5": "12fa56b139f94a061bb74c871176ec31", "text": "% Solve min_H ||M - WH'||_2 s.t. H >= 0\n%\n% where left = W^TW and right = M^TW \n% \n% See Kuang, Park, `Fast Rank-2 Nonnegative Matrix Factorization \n% for Hierarchical Document Clustering', KDD '13. \n%\n% See also Algorithm 4 in \n% Gillis, Kuang, Park, `Hierarchical Clustering of Hyperspectral Images \n% using Rank-Two Nonnegative Matrix Factorization', arXiv. \n%\n% ****** Input ******\n% left : 2-by-2 matrix (or possibly 1-by-1)\n% right : n-by-2 matrix (or possibly n-by-1)\n%\n% ****** Output ******\n% H : nonnegative n-by-2 matrix, solution to KKT equations\n\nfunction H = anls_entry_rank2_precompute_opt(left, right)\n\nwarning('off'); \nif length(left) == 1\n H = max(0,right/left);\nelse\n H = (left \\ right')'; \n use_either = ~all(H>=0, 2);\n H(use_either, :) = anls_entry_rank2_binary(left, right(use_either,:)); \n H = H'; \nend\nwarning('on'); \n\n\n% Case where one entry in each column of H has to be equal to zero\nfunction solve_either = anls_entry_rank2_binary(left, right)\n\nn = size(right, 1);\n\nsolve_either = zeros(n, 2);\nsolve_either(:, 1) = max(0, right(:, 1) ./ left(1,1)); \nsolve_either(:, 2) = max(0, right(:, 2) ./ left(2,2)); \n\ncosine_either = solve_either.* repmat([sqrt(left(1,1)), sqrt(left(2,2))],n,1); \n\nchoose_first = (cosine_either(:, 1) >= cosine_either(:, 2));\nsolve_either(choose_first, 2) = 0;\nsolve_either(~choose_first, 1) = 0;"} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "hierclust2nmf.m", "ext": ".m", "path": "cone-greedy-master/HyperspectralImaging/hierclust2nmf_v2/hierclust2nmf.m", "size": 9170, "source_encoding": "utf_8", "md5": "2ed0455c900bc24ed801586e0ea7ebd8", "text": "% Hierarchical custering based on rank-two nonnegative matrix factorization\r\n%\r\n% Given a data matrix M (m-by-n) representing n data points in an\r\n% m-dimensional space, this algorithm computes a set of clusters obtained\r\n% using the hierarchical rank-two NMF method described in \r\n%\r\n% Gillis, Kuang, Park, `Hierarchical Clustering of Hyperspectral Images \r\n% using Rank-Two Nonnegative Matrix Factorization', arXiv. \r\n%\r\n% \r\n% ****** Input ******\r\n% M : m-by-n data matrix (or a H-by-L-by-m tensor)\r\n% n (=HL) is the number of pixels, m the number of wavelengths \r\n% r : number of clusters to generate, OR \r\n% for r = []: the user is asked how many clusters she/he wants, \r\n% OR she/he can choose the leaf node to be split based \r\n% on the displayed current clusters. (default)\r\n% algo : algorithm used to split the clusters\r\n% 1. rank-two NMF (default)\r\n% 2. k-means\r\n% 3. spherical k-means\r\n% \r\n% sol : it can be used to resume a previous solution computed by \r\n% hierclust2nmf (this allows to go deeper in the tree and \r\n% generate more clusters from a previous solution). \r\n% ---> Optional.\r\n% \r\n% displ: display the evolution of the hierarhical procedure\r\n%\r\n% ****** Output ******\r\n% IDX : an indicator vector in {1,2,...,r}^m, identifying r clusters\r\n% C, J : The columns of C are the spectral signatures of endmembers, \r\n% that is, the cluster centroids. \r\n% J is the index set locating the endmembers in the data set:\r\n% C = M(:,J) (for matrix format). \r\n% sol : contains the tree structure\r\n\r\nfunction [IDX, C, J, sol] = hierclust2nmf(M,r,algo,sol,displ)\r\n\r\nif nargin <= 4\r\n displ = 1;\r\nend\r\n\r\n[m,n] = size(M); \r\n\r\nif min(M(:)) < 0\r\n warning('The input matrix contains negative entries which have been set to zero'); \r\n M = max(M,0);\r\nend\r\n\r\n% The input is a tensor --> matrix format\r\nif length(size(M)) == 3\r\n [H,L,m] = size(M); \r\n n = H*L; \r\n A = zeros(m,n);\r\n for i = 1 : m\r\n A(i,:) = reshape(M(:,:,i),1,n);\r\n end\r\n clear M; M = A;\r\nend\r\n \r\nif nargin == 1 || isempty(r)\r\n if ~exist('algo') || isempty(algo)\r\n algo = 1; \r\n end\r\n y = 1; n = 0; \r\n b = input('Do you want to visually choose the cluster to be split? (y/n) \\n');\r\n [m,n] = size(M); \r\n if b == 'y' || b == 1\r\n if ~exist('L')\r\n H = input('What is the number of pixels in each row of your hyperspectral image? \\n');\r\n L = n/H; \r\n end\r\n r = Inf; \r\n manualsplit = 1; % Split according to user feedback\r\n elseif b == 'n' || b == 0\r\n r = input('How many clusters do you want to generate? '); \r\n manualsplit = 0; \r\n else\r\n error('Enter ''y'' or ''n''.')\r\n end\r\nelse \r\n manualsplit = 0; % Split according to the proposed criterion\r\n if nargin == 2\r\n algo = 1;\r\n end\r\nend\r\n\r\nif nargin < 4 || isempty(sol)\r\n % Intialization of the tree structure\r\n sol.K{1} = (1:n)'; % All clusters; the first node contains all pixels\r\n sol.allnodes = 1; % nodes numbering\r\n sol.maxnode = 1; % Last leaf node added\r\n sol.parents = [0 0]; % Parents of leaf nodes\r\n sol.childs = []; % Child(i,:) = child of node i\r\n sol.leafnodes = 1; % Current clustering: set of leaf nodes corresponding to selected clusters\r\n sol.e = -1; % Criterion to decide which leafnode to split \r\n sol.U(:,1) = ones(m,1); % Centroids\r\n sol.Ke(1) = 1; % index centroid: index of the endmember\r\n sol.count = 1; % Number of clusters generated so far\r\n sol.firstsv = 0; \r\nend\r\n\r\nif displ == 1\r\n fprintf('Hierarchical clustering started... \\n'); \r\nend\r\n\r\nwhile sol.count < r \r\n \r\n %***************************************************************\r\n % Update: split leaf nodes added at previous iteration\r\n %***************************************************************\r\n for k = 1 : length(sol.leafnodes)\r\n % Update leaf nodes not yet split\r\n if sol.e(sol.leafnodes(k)) == -1 && length(sol.K{sol.leafnodes(k)}) > 1\r\n % Update leaf node ind(k) by splitting it and adding its child nodes\r\n [Kc,Uc,sc] = splitclust(M(:,sol.K{sol.leafnodes(k)}),algo); \r\n \r\n if ~isempty(Kc{2}) % the second cluster has to be non-empty: this can occur for a rank-one matrix. \r\n % Add the two leaf nodes, child of nodes(sol.leafnodes(k))\r\n sol.allnodes = [sol.allnodes; sol.maxnode+1; sol.maxnode+2]; \r\n\r\n sol.parents(sol.maxnode+1,:) = [sol.leafnodes(k) 0]; \r\n sol.parents(sol.maxnode+2,:) = [sol.leafnodes(k) 0];\r\n\r\n sol.childs(sol.leafnodes(k), : ) = [sol.maxnode+1 sol.maxnode+2]; \r\n sol.childs(sol.maxnode+1 , :) = 0; \r\n sol.childs(sol.maxnode+2 , :) = 0; \r\n\r\n sol.K{sol.maxnode+1} = sol.K{sol.leafnodes(k)}(Kc{1}); \r\n sol.K{sol.maxnode+2} = sol.K{sol.leafnodes(k)}(Kc{2}); \r\n\r\n [sol.U(:,sol.maxnode+1),sol.firstsv(sol.maxnode+1), sol.Ke(sol.maxnode+1)] = reprvec(M(:,sol.K{sol.maxnode+1})); \r\n [sol.U(:,sol.maxnode+2),sol.firstsv(sol.maxnode+2), sol.Ke(sol.maxnode+2)] = reprvec(M(:,sol.K{sol.maxnode+2})); \r\n\r\n % Update criterion to choose next cluster to split\r\n sol.e([sol.maxnode+1 sol.maxnode+2]) = -1; \r\n\r\n % Compte the reduction in the error if kth cluster is split \r\n sol.e(sol.leafnodes(k)) = sol.firstsv(sol.maxnode+1)^2 + sol.firstsv(sol.maxnode+2)^2 - sol.firstsv(sol.leafnodes(k))^2; \r\n\r\n sol.maxnode = sol.maxnode+2; \r\n end\r\n end\r\n end\r\n\r\n %***************************************************************\r\n % Choose the cluster to split, split it, and update leaf nodes\r\n %***************************************************************\r\n if sol.count == 1 % Only one leaf node, the root node: split it. \r\n b = 1; \r\n elseif manualsplit == 0 % Split the node maximizing the critetion e\r\n [a,b] = max(sol.e(sol.leafnodes)); \r\n elseif manualsplit == 1 % Split w.r.t. user visual feedback\r\n [a,b] = max(sol.e(sol.leafnodes)); \r\n \r\n close all; a = affclust(sol.K(sol.leafnodes),H,L); \r\n fprintf('Which cluster do you want to split (between 1 and %2.0f)? \\n', sol.count);\r\n fprintf('Suggested cluster to split w.r.t. error: %2.0f \\n', b); \r\n fprintf('Type 0 if you want to stop. \\n'); \r\n fprintf('Type -1 if you want to fuse two clusters. \\n');\r\n b = input('Choice: '); \r\n if b == 0\r\n IDX = clu2vec(sol.K(sol.leafnodes)); \r\n for k = 1 : length(sol.leafnodes)\r\n J(k) = sol.K{sol.leafnodes(k)}(sol.Ke(sol.leafnodes(k))); \r\n end\r\n C = sol.U(:,sol.leafnodes); \r\n disp('*************************************************************'); \r\n return; \r\n end\r\n end\r\n if b == -1\r\n fprintf('Which clusters do you want to fuse? (between 1 and %2.0f)? \\n', sol.count); \r\n b1 = input('Choice 1: ');\r\n b2 = input('Choice 2: ');\r\n b = sort([b1 b2]); \r\n % Create a new node, child of the two fused ones, and update its entries\r\n sol.maxnode = sol.maxnode+1; \r\n sol.allnodes = [sol.allnodes; sol.maxnode+1]; \r\n sol.parents(sol.maxnode+1,:) = [sol.leafnodes(b(1)) sol.leafnodes(b(2))]; \r\n sol.childs(sol.maxnode+1,:) = 0; \r\n sol.K{sol.maxnode+1} = [sol.K{sol.leafnodes(b(1))}; sol.K{sol.leafnodes(b(2))}]; \r\n [u1,s1,ke1] = reprvec(M(:,sol.K{sol.maxnode+1})); \r\n sol.firstsv(sol.maxnode+1) = s1;\r\n sol.U(:,sol.maxnode+1) = u1; \r\n sol.Ke(:,sol.maxnode+1) = ke1; \r\n sol.e([sol.maxnode+1]) = -1; \r\n sol.e2([sol.maxnode+1]) = -1;\r\n \r\n % Update leaf nodes: delete two fused and add the new one\r\n sol.leafnodes = sol.leafnodes([1:b(1)-1 b(1)+1:b(2)-1 b(2)+1:end]);\r\n sol.leafnodes = [sol.leafnodes; sol.maxnode+1]; \r\n \r\n % Update counters\r\n sol.maxnode = sol.maxnode+1; \r\n sol.count = sol.count-1; \r\n else\r\n \r\n sol.leafnodes = [sol.leafnodes; sol.childs(sol.leafnodes(b),:)']; % Add its two children\r\n sol.leafnodes = sol.leafnodes([1:b-1 b+1:end]); % Remove bth leaf node\r\n\r\n if manualsplit == 0 && displ == 1 % Dispay progress in tree exploration\r\n if mod(sol.count,10) == 0, \r\n fprintf('%1.0f...\\n',sol.count); \r\n else\r\n fprintf('%1.0f...',sol.count); \r\n end\r\n if sol.count == r-1, \r\n fprintf('Done. \\n',sol.count); \r\n end\r\n elseif manualsplit == 1\r\n disp('*************************************************************'); \r\n end\r\n\r\n sol.count = sol.count+1; \r\n end\r\nend\r\n\r\nIDX = clu2vec(sol.K(sol.leafnodes)); \r\nfor k = 1 : length(sol.leafnodes)\r\n J(k) = sol.K{sol.leafnodes(k)}(sol.Ke(sol.leafnodes(k))); \r\nend\r\nC = sol.U(:,sol.leafnodes); "} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "fastsvds.m", "ext": ".m", "path": "cone-greedy-master/HyperspectralImaging/hierclust2nmf_v2/fastsvds.m", "size": 683, "source_encoding": "utf_8", "md5": "b81c39bffb76f2522e20bd1c42ae60b0", "text": "% \"Fast\" but less accurate SVD by computing the SVD of MM^T or M^TM \r\n% ***IF*** one of the dimensions of M is much smaller than the other. \r\n% Note. This is numerically less stable, but useful for large hyperspectral \r\n% images. \r\n\r\nfunction [u,s,v] = fastsvds(M,r); \r\n\r\n[m,n] = size(M); \r\nrationmn = 10; % Parameter, should be >= 1\r\n\r\nif m < rationmn*n \r\n MMt = M*M';\r\n [u,s,v] = svds(MMt,r); \r\n v = M'*u; \r\n v = v.*repmat( (sum(v.^2)+1e-16).^(-0.5),n,1); \r\n s = sqrt(s); \r\nelseif n < rationmn*m\r\n MtM = M'*M;\r\n [u,s,v] = svds(MtM,r); \r\n u = M*v; \r\n u = u.*repmat( (sum(u.^2)+1e-16).^(-0.5),m,1); \r\n s = sqrt(s); \r\nelse\r\n [u,s,v] = svds(M,r); \r\nend"} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "splitclust.m", "ext": ".m", "path": "cone-greedy-master/HyperspectralImaging/hierclust2nmf_v2/splitclust.m", "size": 1644, "source_encoding": "utf_8", "md5": "3db01b4e0e55f4537496b1675c54445a", "text": "% Given a matrix M, split its columns into two subsets\r\n% \r\n% See Section 3 in \r\n%\r\n% Gillis, Kuang, Park, `Hierarchical Clustering of Hyperspectral Images \r\n% using Rank-Two Nonnegative Matrix Factorization', arXiv. \r\n%\r\n%\r\n% ****** Input ******\r\n% M : m-by-n data matrix (or a H-by-L-by-m tensor) \r\n% algo : algorithm used to split the clusters\r\n% 1. rank-two NMF (default)\r\n% 2. k-means\r\n% 3. spherical k-means\r\n%\r\n% ****** Output ******\r\n% K : two clusters \r\n% U : corresponding centroids\r\n% s : first singular value of M\r\n\r\nfunction [K,U,s] = splitclust(M,algo); \r\n\r\nif nargin == 1\r\n algo = 1;\r\nend\r\nif algo == 1 % rank-2 NMF\r\n [U,V,s] = rank2nmf(M); \r\n % Normalize columns of V to sum to one\r\n V = V.*repmat( (sum(V)+1e-16).^(-1), 2,1); \r\n x = V(1,:)'; \r\n % Compute treshold to split cluster \r\n threshold = fquad(x); \r\n K{1} = find(x >= threshold); \r\n K{2} = find(x < threshold); \r\n \r\nelseif algo == 2 % k-means\r\n [u,s,v] = fastsvds(M,2); % Initialization: SVD+SPA\r\n Kf = FastSepNMF(s*v',2,0);\r\n U0 = u*s*v(Kf,:)'; \r\n\r\n [IDX,U] = kmeans(M', 2, 'EmptyAction','singleton','Start',U0'); \r\n U = U'; \r\n K{1} = find(IDX==1); \r\n K{2} = find(IDX==2); \r\n s = s(1); \r\n \r\nelseif algo == 3 % shperical k-means\r\n [u,s,v] = fastsvds(M,2); % Initialization: SVD+SPA \r\n Kf = FastSepNMF(s*v',2,0);\r\n U0 = u*s*v(Kf,:)'; \r\n \r\n [IDX,U] = spkmeans(M, U0); \r\n % or (?)\r\n %[IDX,U] = kmeans(M', 2, 'EmptyAction','singleton','Start',U0','Distance','cosine'); \r\n K{1} = find(IDX==1); \r\n K{2} = find(IDX==2); \r\n s = s(1); \r\nend"} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "vectoind.m", "ext": ".m", "path": "cone-greedy-master/HyperspectralImaging/hierclust2nmf_v2/vectoind.m", "size": 220, "source_encoding": "utf_8", "md5": "8820e9734a50ee8f7795f604051c3e34", "text": "% From cluster indicator vector to indicator matrix\r\n\r\nfunction V = vectoind(IDX,r)\r\n\r\nm = length(IDX); \r\nif nargin == 1\r\n r = max(IDX(:)); \r\nend\r\n\r\nV = zeros(m,r); \r\nfor i = 1 : r\r\n V(find(IDX==i),i) = 1; \r\nend"} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "rank2nmf.m", "ext": ".m", "path": "cone-greedy-master/HyperspectralImaging/hierclust2nmf_v2/rank2nmf.m", "size": 945, "source_encoding": "utf_8", "md5": "22891abe4abe34be96416d412279a329", "text": "% Given a data matrix M (m-by-n), computes a rank-two NMF of M. \r\n%\r\n% See Algorithm 3 in \r\n% \r\n% Gillis, Kuang, Park, `Hierarchical Clustering of Hyperspectral Images \r\n% using Rank-Two Nonnegative Matrix Factorization', arXiv. \r\n% \r\n% ****** Input ******\r\n% M : a nonnegative m-by-n data matrix \r\n%\r\n% ****** Output ******\r\n% (U,V) : a rank-two NMF of M\r\n% s1 : first singular value of M \r\n\r\nfunction [U,V,s1] = rank2nmf(M)\r\n\r\n[m,n] = size(M); \r\n\r\n% Best rank-two approximation of M\r\nif min(m,n) == 1\r\n [U,S,V] = fastsvds(M,1); \r\n U = abs(U); V = abs(V); s1 = S; \r\nelse\r\n [u,s,v] = fastsvds(M,2); \r\n s1 = s(1); \r\n K = FastSepNMF(s*v',2); \r\n U = zeros(size(M,1),2); \r\n if length(K) >= 1\r\n U(:,1) = max(u*s*v(K(1),:)',0); \r\n end\r\n if length(K) >= 2\r\n U(:,2) = max(u*s*v(K(2),:)',0); \r\n end\r\n % Compute corresponding optimal V \r\n V = anls_entry_rank2_precompute_opt(U'*U, M'*U); \r\nend"} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "affclust.m", "ext": ".m", "path": "cone-greedy-master/HyperspectralImaging/hierclust2nmf_v2/affclust.m", "size": 572, "source_encoding": "utf_8", "md5": "f25d9a40ba0a120bd94d132688082bb1", "text": "% Display clusters\r\n\r\nfunction [a, Vaff] = affclust(K,H,L,ncol,bw); \r\n\r\nn = H*L;\r\n\r\nif iscell(K)\r\n K = clu2vec(K,n); \r\nend\r\n\r\n% K is an indicator vector of type IDX\r\nA = vectoind(K); \r\nr = size(A,2); \r\n\r\n% 'Optimize' display in 16/9\r\nif nargin < 4\r\n ncol = 1; nrow = ceil(r/ncol);\r\n while (r > 1 && L*ncol*9 < H*nrow*16) || rem(r,ncol) == 1\r\n ncol = ncol+1; \r\n nrow = ceil(r/ncol);\r\n end\r\nend\r\nif nargin == 5 && bw == 1\r\n [a, Vaff] = affichage(A,ncol,H,L,bw); %bw==1 switches black and white\r\nelse\r\n [a, Vaff] = affichage(A,ncol,H,L); \r\nend"} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "affichage.m", "ext": ".m", "path": "cone-greedy-master/HyperspectralImaging/hierclust2nmf_v2/affichage.m", "size": 1155, "source_encoding": "utf_8", "md5": "ebfdbc0a5a5f07a8cbf18f410f322bdc", "text": "% Display (=affichage in French) of a NMF solution, for image datasets\r\n%\r\n% a = affichage(V,lig,Li,Co)\r\n%\r\n% Input.\r\n% V : (m x r) matrix whose colums contains vectorized images\r\n% lig : number of images per row in the display\r\n% (Co,Li) : dimensions of images\r\n% bw : if bw=1, reverse gray level\r\n%\r\n% Output.\r\n% Diplay columns of matrix V as images\r\n\r\nfunction [a, Vaff] = affichage(V,lig,Li,Co,bw)\r\n\r\nV = max(V,0); [m,r] = size(V); \r\nfor i = 1 : r\r\n if max(V(:,i)) > 0\r\n V(:,i) = V(:,i)/max(V(:,i));\r\n end\r\nend\r\nVaff = []; \r\nfor i = 1 : r\r\n ligne = floor((i-1)/lig)+1; col = i - (ligne-1)*lig;\r\n Vaff((ligne-1)*Li+1:ligne*Li,(col-1)*Co+1:col*Co) = reshape(V(:,i),Li,Co);\r\nend\r\n[m,n] = size(Vaff);\r\nfor i = 1 : n/Co-1\r\n Vaff = [Vaff(:,1:Co*i+i-1) 0.5*ones(m,1) Vaff(:,Co*i+i:end)];\r\nend\r\n[m,n] = size(Vaff);\r\nfor i = 1 : m/Li-1\r\n Vaff = [Vaff(1:Li*i+i-1,:); 0.5*ones(1,n); Vaff(Li*i+i:end,:)];\r\nend\r\nwarning('off'); figure; \r\nif nargin == 5 && bw == 1\r\n imshow(Vaff,[0 1]); \r\nelse\r\n imshow(1-Vaff,[0 1]); \r\nend\r\ncolormap(gray); \r\nwarning('on');\r\na = 1;"} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "nnlsm_blockpivot.m", "ext": ".m", "path": "cone-greedy-master/HyperspectralImaging/hierclust2nmf_v2/nnlsm_blockpivot.m", "size": 4413, "source_encoding": "utf_8", "md5": "cb9bf3455d6fd3ae19ea98b4dd754547", "text": "% Nonnegativity Constrained Least Squares with Multiple Righthand Sides \r\n% using Block Principal Pivoting method\r\n%\r\n% This software solves the following problem: given A and B, find X such that\r\n% minimize || AX-B ||_F^2 where X>=0 elementwise.\r\n%\r\n% Reference:\r\n% Jingu Kim and Haesun Park, Toward Faster Nonnegative Matrix Factorization: A New Algorithm and Comparisons,\r\n% In Proceedings of the 2008 Eighth IEEE International Conference on Data Mining (ICDM'08), 353-362, 2008\r\n%\r\n% Written by Jingu Kim (jingu@cc.gatech.edu)\r\n% Copyright 2008-2009 by Jingu Kim and Haesun Park, \r\n% School of Computational Science and Engineering,\r\n% Georgia Institute of Technology\r\n%\r\n% Check updated code at http://www.cc.gatech.edu/~jingu\r\n% Please send bug reports, comments, or questions to Jingu Kim.\r\n% This code comes with no guarantee or warranty of any kind. Note that this algorithm assumes that the\r\n% input matrix A has full column rank.\r\n%\r\n% Last modified Feb-20-2009\r\n%\r\n% \r\n% A : input matrix (m x n) (by default), or A'*A (n x n) if isInputProd==1\r\n% B : input matrix (m x k) (by default), or A'*B (n x k) if isInputProd==1\r\n% isInputProd : (optional, default:0) if turned on, use (A'*A,A'*B) as input instead of (A,B)\r\n% init : (optional) initial value for X\r\n% \r\n% X : the solution (n x k)\r\n% Y : A'*A*X - A'*B where X is the solution (n x k)\r\n% iter : number of iterations\r\n% success : 1 for success, 0 for failure.\r\n% Failure could only happen on a numericall very ill-conditioned problem.\r\n\r\nfunction [ X,Y,iter,success ] = nnlsm_blockpivot( A, B, isInputProd, init )\r\n if nargin<3, isInputProd=0;, end\r\n if isInputProd\r\n AtA = A;, AtB = B;\r\n else\r\n AtA = A'*A;, AtB = A'*B;\r\n end\r\n \r\n [n,k]=size(AtB);\r\n MAX_ITER = n*5;\r\n % set initial feasible solution\r\n X = zeros(n,k);\r\n if nargin<4\r\n Y = - AtB;\r\n PassiveSet = false(n,k);\r\n iter = 0;\r\n else\r\n PassiveSet = (init > 0);\r\n [ X,iter ] = solveNormalEqComb(AtA,AtB,PassiveSet);\r\n Y = AtA * X - AtB;\r\n end\r\n % parameters\r\n pbar = 3;\r\n P = zeros(1,k);, P(:) = pbar;\r\n Ninf = zeros(1,k);, Ninf(:) = n+1;\r\n iter = 0;\r\n\r\n NonOptSet = (Y < 0) & ~PassiveSet;\r\n InfeaSet = (X < 0) & PassiveSet;\r\n NotGood = sum(NonOptSet)+sum(InfeaSet);\r\n NotOptCols = NotGood > 0;\r\n \r\n bigIter = 0;, success=1;\r\n while(~isempty(find(NotOptCols)))\r\n bigIter = bigIter+1;\r\n if ((MAX_ITER >0) && (bigIter > MAX_ITER)) % set max_iter for ill-conditioned (numerically unstable) case\r\n success = 0;, break\r\n end\r\n\r\n Cols1 = NotOptCols & (NotGood < Ninf);\r\n Cols2 = NotOptCols & (NotGood >= Ninf) & (P >= 1);\r\n Cols3Ix = find(NotOptCols & ~Cols1 & ~Cols2);\r\n if ~isempty(find(Cols1))\r\n P(Cols1) = pbar;,Ninf(Cols1) = NotGood(Cols1);\r\n PassiveSet(NonOptSet & repmat(Cols1,n,1)) = true;\r\n PassiveSet(InfeaSet & repmat(Cols1,n,1)) = false;\r\n end\r\n if ~isempty(find(Cols2))\r\n P(Cols2) = P(Cols2)-1;\r\n PassiveSet(NonOptSet & repmat(Cols2,n,1)) = true;\r\n PassiveSet(InfeaSet & repmat(Cols2,n,1)) = false;\r\n end\r\n if ~isempty(Cols3Ix)\r\n for i=1:length(Cols3Ix)\r\n Ix = Cols3Ix(i);\r\n toChange = max(find( NonOptSet(:,Ix)|InfeaSet(:,Ix) ));\r\n if PassiveSet(toChange,Ix)\r\n PassiveSet(toChange,Ix)=false;\r\n else\r\n PassiveSet(toChange,Ix)=true;\r\n end\r\n end\r\n end\r\n NotOptMask = repmat(NotOptCols,n,1);\r\n [ X(:,NotOptCols),subiter ] = solveNormalEqComb(AtA,AtB(:,NotOptCols),PassiveSet(:,NotOptCols));\r\n iter = iter + subiter;\r\n X(abs(X)<1e-12) = 0; % for numerical stability\r\n Y(:,NotOptCols) = AtA * X(:,NotOptCols) - AtB(:,NotOptCols);\r\n Y(abs(Y)<1e-12) = 0; % for numerical stability\r\n \r\n % check optimality\r\n NonOptSet = NotOptMask & (Y < 0) & ~PassiveSet;\r\n InfeaSet = NotOptMask & (X < 0) & PassiveSet;\r\n NotGood = sum(NonOptSet)+sum(InfeaSet);\r\n NotOptCols = NotGood > 0;\r\n end\r\nend\r\n"} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "FastSepNMF.m", "ext": ".m", "path": "cone-greedy-master/HyperspectralImaging/hierclust2nmf_v2/FastSepNMF.m", "size": 3369, "source_encoding": "utf_8", "md5": "d1efacdacf79352c067116cbd6ec4a61", "text": "% FastSepNMF - Fast and robust recursive algorithm for separable NMF\r\n% \r\n% *** Description ***\r\n% At each step of the algorithm, the column of M maximizing ||.||_2 is \r\n% extracted, and M is updated by projecting its columns onto the orthogonal \r\n% complement of the extracted column. \r\n%\r\n% See N. Gillis and S.A. Vavasis, Fast and Robust Recursive Algorithms \r\n% for Separable Nonnegative Matrix Factorization, arXiv:1208.1237. \r\n% \r\n% See also https://sites.google.com/site/nicolasgillis/\r\n%\r\n% [J,normM,U] = FastSepNMF(M,r,normalize) \r\n%\r\n% ****** Input ******\r\n% M = WH + N : a (normalized) noisy separable matrix, that is, W is full rank, \r\n% H = [I,H']P where I is the identity matrix, H'>= 0 and its \r\n% columns sum to at most one, P is a permutation matrix, and\r\n% N is sufficiently small. \r\n% r : number of columns to be extracted. \r\n% normalize : normalize=1 will scale the columns of M so that they sum to one,\r\n% hence matrix H will satisfy the assumption above for any\r\n% nonnegative separable matrix M. \r\n% normalize=0 is the default value for which no scaling is\r\n% performed. For example, in hyperspectral imaging, this \r\n% assumption is already satisfied and normalization is not\r\n% necessary. \r\n%\r\n% ****** Output ******\r\n% J : index set of the extracted columns. \r\n% normM : the l2-norm of the columns of the last residual matrix. \r\n% U : normalized extracted columns of the residual. \r\n%\r\n% --> normM and U can be used to continue the recursion later on without \r\n% recomputing everything from scratch. \r\n%\r\n% This implementation of the algorithm is based on the formula \r\n% ||(I-uu^T)v||^2 = ||v||^2 - (u^T v)^2. \r\n\r\nfunction [J,normM,U] = FastSepNMF(M,r,normalize) \r\n\r\n[m,n] = size(M); \r\nJ = [];\r\n\r\nif nargin <= 2, normalize = 0; end\r\nif normalize == 1\r\n % Normalization of the columns of M so that they sum to one\r\n D = spdiags((sum(M).^(-1))', 0, n, n); M = M*D; \r\nend\r\n\r\nnormM = sum(M.^2); \r\nnM = max(normM); \r\n\r\ni = 1; \r\n% Perform r recursion steps (unless the relative approximation error is \r\n% smaller than 10^-9)\r\nwhile i <= r && max(normM)/nM > 1e-9 \r\n % Select the column of M with largest l2-norm\r\n [a,b] = max(normM); \r\n % Norm of the columns of the input matrix M \r\n if i == 1, normM1 = normM; end \r\n % Check ties up to 1e-6 precision\r\n b = find((a-normM)/a <= 1e-6); \r\n % In case of a tie, select column with largest norm of the input matrix M \r\n if length(b) > 1, [c,d] = max(normM1(b)); b = b(d); end\r\n % Update the index set, and extracted column\r\n J(i) = b; U(:,i) = M(:,b); \r\n \r\n % Compute (I-u_{i-1}u_{i-1}^T)...(I-u_1u_1^T) U(:,i), that is, \r\n % R^(i)(:,J(i)), where R^(i) is the ith residual (with R^(1) = M).\r\n for j = 1 : i-1\r\n U(:,i) = U(:,i) - U(:,j)*(U(:,j)'*U(:,i));\r\n end\r\n % Normalize U(:,i)\r\n U(:,i) = U(:,i)/norm(U(:,i)); \r\n \r\n % Compute v = u_i^T(I-u_{i-1}u_{i-1}^T)...(I-u_1u_1^T)\r\n v = U(:,i); \r\n for j = i-1 : -1 : 1\r\n v = v - (v'*U(:,j))*U(:,j); \r\n end\r\n \r\n % Update the norm of the columns of M after orhogonal projection using\r\n % the formula ||r^(i)_k||^2 = ||r^(i-1)_k||^2 - ( v^T m_k )^2 for all k. \r\n normM = normM - (v'*M).^2; \r\n \r\n i = i + 1; \r\nend"} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "clu2vec.m", "ext": ".m", "path": "cone-greedy-master/HyperspectralImaging/hierclust2nmf_v2/clu2vec.m", "size": 302, "source_encoding": "utf_8", "md5": "59890ba3279c8d45d1c01fb24000bdc6", "text": "% Transform a cluster cell to a vector \r\n\r\nfunction IDX = clu2vec(K,m,r); \r\n\r\nif nargin < 3\r\n r = length(K);\r\nend\r\nif nargin < 2\r\n % Compute max entry in K\r\n m = 0; \r\n for i = 1 : r\r\n m = max(0, max(K{i})); \r\n end\r\nend\r\nIDX = zeros(m,1); \r\nfor i = 1 : r \r\n IDX(K{i}) = i; \r\nend"} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "reprvec.m", "ext": ".m", "path": "cone-greedy-master/HyperspectralImaging/hierclust2nmf_v2/reprvec.m", "size": 726, "source_encoding": "utf_8", "md5": "d3e05d1279b90585355adc9d6de3c3a7", "text": "% Extract \"most\" representative column from a matrix M as follows: \r\n% \r\n% First, it computes the best rank-one approximation u v^T of M. \r\n% Then, it identifies the column of M minimizing the MRSA with the first\r\n% singular vector u of M. \r\n% \r\n% See Section 4.4.1 of \r\n% Gillis, Kuang, Park, `Hierarchical Clustering of Hyperspectral Images \r\n% using Rank-Two Nonnegative Matrix Factorization', arXiv. \r\n\r\nfunction [u,s,b] = reprvec(M); \r\n\r\n[u,s,v] = svds(M,1); \r\nu = abs(u); \r\n[m,n] = size(M); \r\n% Exctract the column of M approximating u the best (up to a translation and scaling)\r\nu = u - mean(u); \r\nMm = M - repmat(mean(M),m,1); \r\nerr = acos( (Mm'*u/norm(u))./( sqrt(sum(Mm.^2)) )' ); \r\n[a,b] = min(err); \r\nu = M(:,b); "} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "EEAs.m", "ext": ".m", "path": "cone-greedy-master/HyperspectralImaging/Endmember Extraction Algorithms/EEAs.m", "size": 328, "source_encoding": "utf_8", "md5": "b80a0699acd0816f24d73bf3969910ec", "text": "% Different EEA algorithms\r\n\r\nfunction K = EEAs(M,r,algo); \r\n\r\nif algo == 1 \r\n K = FastSepNMF(M,r); \r\nelseif algo == 2 \r\n K = VCA(M,'Endmembers',r,'verbose','off'); \r\nelseif algo == 3\r\n K = FastConicalHull(M,r); \r\nelseif algo == 4\r\n [~, ~, K] = hierclust2nmf(M,r,1,[],0);\r\nelseif algo == 5\r\n K = SNPA(M,r); \r\nend"} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "RVCA.m", "ext": ".m", "path": "cone-greedy-master/HyperspectralImaging/Endmember Extraction Algorithms/RVCA.m", "size": 358, "source_encoding": "utf_8", "md5": "5f0767712f593be37f43c851734d741d", "text": "% Robust VCA \r\n\r\nfunction K = RVCA(M,r,rparam); \r\n\r\nmaxiter = 10; \r\nif nargin <= 2\r\n rparam = 10;\r\nend\r\n\r\nemin = +Inf; \r\n\r\nfor i = 1 : rparam\r\n [A, K] = VCA(M,'Endmembers',r,'verbose','off'); \r\n H = nnlsHALSupdt(M,M(:,K),[],maxiter);\r\n err = norm(M-M(:,K)*H,'fro'); \r\n \r\n if err < emin\r\n Kf = K; \r\n emin = err;\r\n end\r\nend"} +{"plateform": "github", "repo_name": "locatelf/cone-greedy-master", "name": "SimplexProj.m", "ext": ".m", "path": "cone-greedy-master/HyperspectralImaging/Endmember Extraction Algorithms/SimplexProj.m", "size": 1374, "source_encoding": "utf_8", "md5": "f6f156f333432d0897537cbf10fa3595", "text": "function x = SimplexProj(y)\r\n\r\n% Given y, computes its projection x* onto the simplex \r\n% \r\n% Delta = { x | x >= 0 and sum(x) <= 1 }, \r\n% \r\n% that is, x* = argmin_x ||x-y||_2 such that x in Delta. \r\n% \r\n% \r\n% See Appendix A.1 in N. Gillis, Successive Nonnegative Projection \r\n% Algorithm for Robust Nonnegative Blind Source Separation, arXiv, 2013. \r\n% \r\n%\r\n% x = SimplexProj(y)\r\n%\r\n% ****** Input ******\r\n% y : input vector.\r\n%\r\n% ****** Output ******\r\n% x : projection of y onto Delta.\r\n\r\nx = max(y,0); \r\nK = find(sum(x) > 1); \r\nx(:,K) = blockSimplexProj(y(:,K));\r\n\r\nend\r\n\r\n\r\nfunction x = blockSimplexProj(y)\r\n\r\n% Same as function SimplexProj except that sum(max(Y,0)) > 1. \r\n[r,m] = size(y); \r\nys = sort(-y); ys = -ys;\r\nindi2 = 1:m; lambda = zeros(1,m); \r\nS(1,indi2) = 0; \r\nfor i = 2 : r\r\n if i == 2\r\n S(i,:) = (ys(1:i-1,:)-repmat(ys(i,:),i-1,1)); \r\n else\r\n S(i,:) = sum(ys(1:i-1,:)-repmat(ys(i,:),i-1,1)); \r\n end\r\n indi1 = find(S(i,indi2) >= 1); \r\n indi1 = indi2(indi1);\r\n indi2 = find(S(i,:) < 1);\r\n if ~isempty(indi1)\r\n if i == 1\r\n lambda(indi1) = -ys(1,indi1)+1;\r\n else\r\n lambda(indi1) = (1-S(i-1,indi1))/(i-1) - ys(i-1,indi1);\r\n end\r\n end\r\n if i == r\r\n lambda(indi2) = (1-S(r,indi2))/r - ys(r,indi2);\r\n end\r\nend\r\nx = max( y + repmat(lambda,r,1), 0); \r\nend"} +{"plateform": "github", "repo_name": "krasvas/LandRate-master", "name": "fixations_t2.m", "ext": ".m", "path": "LandRate-master/fixations_t2.m", "size": 3218, "source_encoding": "utf_8", "md5": "35b1c3cbde506d34b53f3d7774c7b5f1", "text": "% EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool.\n% Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens)\n% \n% This program is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% This program is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with this program. If not, see .\n% \n% For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr \n\n%import fixations list after t1 criterion,fixation id and t2 parameter\n%output:(fixx,fixy,number_t1,number_t2,duration,list_out_points)\n%fixx,fixy: coordinates of the center\n%number_t1: number of points before t2 criterion\n%number_t2:number of points after t2 criterion\n%list_out_points:points which are not used after t2 criterion\nfunction [fixx,fixy,number_t1,number_t2,start_time,end_time,duration,list_out_points]=fixations_t2(fixations,fixation_id,t2) %fixations after t1 criterion\nn=size(fixations);\nn=n(1,1); %number of all points\nfixations_id=zeros(1,4);\n\nfor i=1:n\n if fixations(i,4)==fixation_id\n fixations_id=[fixations_id;fixations(i,:)];\n end\nend\n\nn=size(fixations_id);\nn=n(1,1);\nfixations_id=fixations_id(2:n,:); %list of data points with the defined id\n\n%clustering according to criterion t2\nnumber_t1=size(fixations_id);\nx=fixations_id(:,1);\ny=fixations_id(:,2);\nt=fixations_id(:,3);\nnumber_t1=number_t1(1,1); %number of points before t2\n%initialize mean values of center\nfixx=mean(fixations_id(:,1));\nfixy=mean(fixations_id(:,2));\nd=0; %distance between cluster point and mean point\nfor i=1:number_t1\n d=distance2p(x(i),y(i),fixx,fixy);\n if d>t2\n fixations_id(i,4)=0;\n end\nend\n\n\n%initialize new list (data points according to t2)\nfixations_list_t2=zeros(1,4);\n\n%initialize list of points which are nto used aftere t2 criterion\nlist_out_points=zeros(1,4);\n\nfor i=1:number_t1\n if fixations_id(i,4)>0\n fixations_list_t2=[fixations_list_t2;fixations_id(i,:)];\n else\n list_out_points=[list_out_points;fixations_id(i,:)];\n end\nend\nn=size(fixations_list_t2);\nn=n(1,1);\nfixations_list_t2=fixations_list_t2(2:n,:);\n\nnumber_t2=size(fixations_list_t2);\nnumber_t2=number_t2(1,1);\nfixx=mean(fixations_list_t2(:,1));\nfixy=mean(fixations_list_t2(:,2));\n\n\nif number_t2>0\nstart_time=fixations_list_t2(1,3);\nend_time=fixations_list_t2(number_t2,3);\nduration=fixations_list_t2(number_t2,3)-fixations_list_t2(1,3);\nelse\n start_time=0;\n end_time=0;\n duration=0;\nend\n\n\nlist_out_points;\nn_out=size(list_out_points);\nn_out=n_out(1,1);\n\nif n_out>1\n list_out_points=list_out_points(2:n_out,:);\nelse\n list_out_points=[0 0 0 -1];%indicates that there are not points which are not used\nend\n\n\nend\n\n"} +{"plateform": "github", "repo_name": "krasvas/LandRate-master", "name": "angle_to_tracker.m", "ext": ".m", "path": "LandRate-master/angle_to_tracker.m", "size": 2653, "source_encoding": "utf_8", "md5": "d6c6e7e24857b7c66791375b69c56293", "text": "% EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool.\n% Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens)\n% \n% This program is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% This program is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with this program. If not, see .\n% \n% For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr\n\n%function angle_to_tracker\n%compute distance t in stimuli display from visual range in tracker units\n%input parameters:\n%-theta: visual angle range in degrees\n%-d: distance between subject and stimulus in mm\n%-tmm: the distance in mm which corresponds with 1 unit in tracker values\n%export matlab matrix:\n%-t:the corresponded spatial section in tracker units\n%call function example:\n%angle_to_tracker(8,555,301.1)\nfunction t=angle_to_tracker(theta,d,tmm)\n%transform angle from degrees to rads\ntheta=theta*pi()/180;\n%compute t\nt=(2*d/tmm)*tan(theta/2); %tracker units\n\n\nfprintf('\\n EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool. \\n')\nfprintf(' Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens) \\n')\nfprintf('\\n')\nfprintf(' This program is free software: you can redistribute it and/or modify\\n')\nfprintf(' it under the terms of the GNU General Public License as published by\\n')\nfprintf(' the Free Software Foundation, either version 3 of the License, or\\n')\nfprintf(' (at your option) any later version.\\n')\nfprintf('\\n')\nfprintf(' This program is distributed in the hope that it will be useful,\\n')\nfprintf(' but WITHOUT ANY WARRANTY; without even the implied warranty of\\n')\nfprintf(' MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\\n')\nfprintf(' GNU General Public License for more details.\\n')\nfprintf('\\n')\nfprintf(' You should have received a copy of the GNU General Public License\\n')\nfprintf(' along with this program. If not, see .\\n')\nfprintf('\\n')\nfprintf(' For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr\\n')\nend"} +{"plateform": "github", "repo_name": "krasvas/LandRate-master", "name": "min_duration.m", "ext": ".m", "path": "LandRate-master/min_duration.m", "size": 1455, "source_encoding": "utf_8", "md5": "08bbb2e381ecc7d1eaf94b5d1cd77dde", "text": "% EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool.\n% Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens)\n% \n% This program is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% This program is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with this program. If not, see .\n% \n% For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr\n\n%import the list of fixations (Center x, Center y, Number of data points\n%after t1, Number of data points after second criterion, Start time, End\n%time, Duration) and minimum duration\nfunction fixations=min_duration(fixation_list,minDur)\nn=size(fixation_list);\nn=n(1,1);\n\n%initialize new fixation list\nfixations=zeros(1,7);\nfor i=1:n\n if fixation_list(i,7)>minDur\n fixations=[fixations;fixation_list(i,:)];\n end\nend\nn=size(fixations);\nn=n(1,1);\nfixations=fixations(2:n,:);\nend\n"} +{"plateform": "github", "repo_name": "krasvas/LandRate-master", "name": "points_in_region.m", "ext": ".m", "path": "LandRate-master/points_in_region.m", "size": 1683, "source_encoding": "utf_8", "md5": "81e57f7705f5cae4ec62c23ff24f6996", "text": "% EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool.\n% Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens)\n% \n% This program is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% This program is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with this program. If not, see .\n% \n% For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr\n\n%x:horizontal coord from all records\n%y:vertical coord from all records\nfunction n_records_region=points_in_region(x,y,left_up_edge_x,left_up_edge_y,right_down_edge_x,right_down_edge_y)\n\n%edges of region\nright_up_edge_x=right_down_edge_x;\nright_up_edge_y=left_up_edge_y;\nleft_down_edge_x=left_up_edge_x;\nleft_down_edge_y=right_down_edge_y;\n\n%total number of records\nn=size(x);\nn=n(1,1);\n\n%number of records in region \nn_records_region=0;\nfor i=1:n\n if (x(i)>left_up_edge_x || x(i)==left_up_edge_x) &&( x(i)left_up_edge_y || y(i)==left_up_edge_y) &&( y(i).\n% \n% For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr\n\n%function ROI_analysis\n%analysis in predefined rectangle region of interest\n%input parameters:\n%-fixation_list: list of fixations(after fixation_detection)\n%-rois: regions of interest file((x_left_up,y_left_up,x_right_down,y_right_down,ROIs ID)\n%-roi_to_analyze: define the analyzed ROI from ROI ID (according to the IDs in rois file)\n%export matlab matrix:\n%-fixations_in_roi: fixations in the analyzed region of interest\n%call function example:\n%ROI_analysis(fixation_list,'ROIS.txt',2);\n%fixation_list:matlab matrix as computed from fixation_detection\n\nfunction fixations_in_roi=ROI_analysis(fixation_list,rois,roi_to_analyze)\nrois=load(rois);\nx_fix=fixation_list(:,1);\ny_fix=fixation_list(:,2);\nduration=fixation_list(:,7);\n%number of ROIs\nn_rois=size(rois);\nn_rois=n_rois(1,1);\n\n%classify fixations in ROis(0:indicates that fixation is out of ROI)\nfixations_classification=zeros(length(x_fix),1);\n\nfor i=1:length(x_fix)\n for j=1:n_rois\n if ((x_fix(i)>=rois(j,1) && x_fix(i)<=rois(j,3)) && (y_fix(i)<=rois(j,2) && y_fix(i)>=rois(j,4)))\n fixations_classification(i)=rois(j,5);\n end\n end\nend\n\nfixation_list(:,8)=fixations_classification;\n\n\n%build the matrix with fixations in selected roi\n%initialize fixations_in_roi(x_fixation,y_fixation,duration)\nfixations_in_roi=zeros(1,3);\nfor i=1:length(x_fix)\n if fixation_list(i,8)==roi_to_analyze\n fixations_in_roi=[fixations_in_roi;x_fix(i),y_fix(i),duration(i)];\n end\nend\nn_fixations=size(fixations_in_roi);\nn_fixations=n_fixations(1,1);\nif n_fixations>1\n fixations_in_roi=fixations_in_roi(2:n_fixations,:);\n \n %compute the number of fixations in roi\n number_fixations_in_roi=size(fixations_in_roi);\n number_fixations_in_roi=number_fixations_in_roi(1,1);\n \n %compute mean duration of fixations in roi\n mean_duration_roi=mean(fixations_in_roi(:,3));\n \n %compute % number of fixations in roi\n fixations_percentage_roi=number_fixations_in_roi/length(x_fix);\n \n %compute % duration of fixations in roi\n duration_percentage_roi=sum(fixations_in_roi(:,3))/sum(duration);\n \n %print results\n fprintf(' ROI Analysis\\n')\n fprintf('\\nID of selected ROI for analysis: %.f\\n',roi_to_analyze)\n fprintf('Number of fixations in selected ROI: %.f\\n',number_fixations_in_roi)\n if n_fixations>1\n fprintf('\\nFixation List in ROI:\\n')\n fprintf(' X_Fixation-Y_Fixation-Duration\\n')\n for i=1:number_fixations_in_roi\n fprintf(' %.4f %.4f %.1f\\n',fixations_in_roi(i,1),fixations_in_roi(i,2),fixations_in_roi(i,3))\n end\n \n fprintf('\\nMean duration of fixations in ROI: %.1f\\n',mean_duration_roi)\n fprintf('Number (%%) of fixations in ROI: %.2f%%\\n',fixations_percentage_roi*100)\n fprintf('Duration (%%) of fixations in ROI: %.2f%%\\n',duration_percentage_roi*100)\n end\n \n %plot_ROIs\n plot(x_fix,y_fix,'bo')\n hold on\n for i=1:n_rois\n roi_region=[rois(i,1),rois(i,2);rois(i,3),rois(i,2);rois(i,3),rois(i,4);rois(i,1),rois(i,4);rois(i,1),rois(i,2)];\n if i==roi_to_analyze\n fill(roi_region(:,1),roi_region(:,2),'r')\n alpha(0.7)\n end\n hold on\n end\n \n for i=1:n_rois\n roi_region=[rois(i,1),rois(i,2);rois(i,3),rois(i,2);rois(i,3),rois(i,4);rois(i,1),rois(i,4);rois(i,1),rois(i,2)];\n plot(roi_region(:,1),roi_region(:,2),'g-')\n hold on\n end\n \n \n axis('equal')\n title('Regions of Interest (ROIs) ','Color','k','FontSize',20)\n xlabel('Horizontal Coordinate','Color','k','FontSize',20)\n ylabel('Vertical Coordinate','Color','k','FontSize',20)\n set(gca,'FontSize',20)\n legend('Fixations','Selected ROI','ROIs','Location','SouthEastOutside')\nelse\n number_fixations_in_roi=0;\n fprintf(' ROI Analysis\\n')\n fprintf('\\nID of selected ROI for analysis: %.f\\n',roi_to_analyze)\n fprintf('Number of fixations in selected ROI: %.f\\n',number_fixations_in_roi)\nend\n\n\nfprintf('\\n EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool. \\n')\nfprintf(' Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens) \\n')\nfprintf('\\n')\nfprintf(' This program is free software: you can redistribute it and/or modify\\n')\nfprintf(' it under the terms of the GNU General Public License as published by\\n')\nfprintf(' the Free Software Foundation, either version 3 of the License, or\\n')\nfprintf(' (at your option) any later version.\\n')\nfprintf('\\n')\nfprintf(' This program is distributed in the hope that it will be useful,\\n')\nfprintf(' but WITHOUT ANY WARRANTY; without even the implied warranty of\\n')\nfprintf(' MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\\n')\nfprintf(' GNU General Public License for more details.\\n')\nfprintf('\\n')\nfprintf(' You should have received a copy of the GNU General Public License\\n')\nfprintf(' along with this program. If not, see .\\n')\nfprintf('\\n')\nfprintf(' For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr\\n')\nend\n"} +{"plateform": "github", "repo_name": "krasvas/LandRate-master", "name": "heatmap_generator.m", "ext": ".m", "path": "LandRate-master/heatmap_generator.m", "size": 6654, "source_encoding": "utf_8", "md5": "11496414805a2ede9a65bd0da6982178", "text": "% EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool.\n% Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens)\n% \n% This program is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% This program is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with this program. If not, see .\n% \n% For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr \n\n%Heatmap generator\n%Generate Heatmap Visualization computed from Raw Data \n%input parameters:\n%-records: list of records(x,y) from all subjects,x,y:in tracker units\n%-scene: stimulus image\n%-spacing: interval to define heatmap density\n%-max_hor: maximum horizontal value of tracker coordinate system\n%-max_ver: maximum vertical value of tracker coordinate system\n%-kernel_size_gaussian: kernel size for gaussian filtering\n%-s_gaussian:sigma for gaussian filtering\n%export matlab matrixes:\n%-heatmapRGB:heatmap values RGB\n%-heatmap:heatmap grayscale\n%call function example:\n%heatmap_generator('data.txt','stimulus.bmp',0.0833,1.25,1.00,5,3);\n\nfunction [heatmapRGB,heatmap]=heatmap_generator(records,scene,spacing,max_hor,max_ver,kernel_size_gaussian,s_gaussian)\n\nrecords=load(records);\nscene=imread(scene);\nx=records(:,1);\ny=records(:,2);\nn_scene=size(scene);\nn_1_scene=n_scene(1,1);\nn_2_scene=n_scene(1,2);\n\n%total number of points\nn=size(records);\nn=n(1,1);\n\n%create heatmap matrix\n%heatmap values: records frequency\n%heatmap dimensions\nheatmap_ver_values=(0:spacing:max_ver);\nheatmap_hor_values=(0:spacing:max_hor);\nheatmap_ver_number=size(heatmap_ver_values);\nheatmap_ver_number=heatmap_ver_number(1,2)-1;%number of vertical elements in heatmap matrix\nheatmap_hor_number=size(heatmap_hor_values);\nheatmap_hor_number=heatmap_hor_number(1,2)-1;%number of vertical elements in heatmap matrix\n%heatmap matrix initialization\nheatmap=zeros(heatmap_ver_number,heatmap_hor_number);\n\n%frequencies\nf=zeros(heatmap_hor_number*heatmap_ver_number,1);\nn_f=size(f);\nn_f=n_f(1,1);\n%initialize counters\nj=0;\ni=1;\nfor l=1:heatmap_ver_number\n for k=1:heatmap_hor_number\n i;\n j=j+1;\n heatmap(i,j)=points_in_region(x,y,heatmap_hor_values(k),heatmap_ver_values(l),heatmap_hor_values(k+1),heatmap_ver_values(l+1));\n end\n i=i+1;\n j=0;\nend\nheatmap_frequencies=heatmap;\n\n%adjust frequenies values to range 0-255\n%255:max frequency\n%0:no frequency\nheatmap=(255/max(max(heatmap)))*heatmap;\nheatmap=floor(heatmap);%integer values\n%take the inverse image\n%heatmap=255-heatmap;\nheatmap=uint8(heatmap);\n\n%resize heatmap to adjust to scene resolution\nheatmap=imresize(heatmap,[n_1_scene n_2_scene],'bicubic');\n\n%create RGB heatmap\nheatmapRGB=zeros(n_1_scene,n_2_scene,3);\n%initialize R,G,B\nR=zeros(n_1_scene,n_2_scene);\nG=zeros(n_1_scene,n_2_scene);\nB=zeros(n_1_scene,n_2_scene);\n\nfor i=1:1024\n for j=1:1280\n if (heatmap(i,j)==0 || heatmap(i,j)>0) && (heatmap(i,j)==25 || heatmap(i,j)<25)\n R(i,j)=10;\n G(i,j)=10;\n B(i,j)=10;\n \n elseif (heatmap(i,j)==26 || heatmap(i,j)>26) && (heatmap(i,j)==50 || heatmap(i,j)<50)\n R(i,j)=60;\n G(i,j)=60;\n B(i,j)=60;\n \n \n elseif (heatmap(i,j)==51 || heatmap(i,j)>51) && (heatmap(i,j)==75 || heatmap(i,j)<75)\n R(i,j)=0;\n G(i,j)=0;\n B(i,j)=255;\n \n elseif (heatmap(i,j)==76 || heatmap(i,j)>76) && (heatmap(i,j)==100 || heatmap(i,j)<100)\n R(i,j)=0;\n G(i,j)=255;\n B(i,j)=210;\n \n elseif (heatmap(i,j)==101 || heatmap(i,j)>101) && (heatmap(i,j)==125 || heatmap(i,j)<125)\n R(i,j)=0;\n G(i,j)=255;\n B(i,j)=75;\n \n elseif (heatmap(i,j)==126 || heatmap(i,j)>126) && (heatmap(i,j)==150 || heatmap(i,j)<150)\n R(i,j)=192;\n G(i,j)=255;\n B(i,j)=0;\n \n elseif (heatmap(i,j)==151 || heatmap(i,j)>151) && (heatmap(i,j)==175 || heatmap(i,j)<175)\n R(i,j)=255;\n G(i,j)=240;\n B(i,j)=0;\n \n elseif (heatmap(i,j)==176 || heatmap(i,j)>176) && (heatmap(i,j)==200 || heatmap(i,j)<200)\n R(i,j)=255;\n G(i,j)=192;\n B(i,j)=0;\n \n elseif (heatmap(i,j)==201 || heatmap(i,j)>201) && (heatmap(i,j)==225 || heatmap(i,j)<225)\n R(i,j)=255;\n G(i,j)=150;\n B(i,j)=0;\n \n else\n R(i,j)=255;\n G(i,j)=0;\n B(i,j)=0;\n end\n \n end\nend\nR=uint8(R);\nG=uint8(G);\nB=uint8(B);\nheatmapRGB(:,:,1)=R;\nheatmapRGB(:,:,2)=G;\nheatmapRGB(:,:,3)=B;\nheatmapRGB=uint8(heatmapRGB);\n\n\n%show heatmap after gaussian filtering\nfigure\nimshow(scene)\nhold on\nh = fspecial('gaussian',kernel_size_gaussian,s_gaussian);\nimshow(imfilter(heatmapRGB,h,'replicate'));\ntitle('Heatmap','Color','k','FontSize',14)\nalpha(0.6)\n\nfprintf('\\nHeatmap Visualization is completed successfully\\n')\n\n\n\nfprintf('\\n EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool. \\n')\nfprintf(' Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens) \\n')\nfprintf('\\n')\nfprintf(' This program is free software: you can redistribute it and/or modify\\n')\nfprintf(' it under the terms of the GNU General Public License as published by\\n')\nfprintf(' the Free Software Foundation, either version 3 of the License, or\\n')\nfprintf(' (at your option) any later version.\\n')\nfprintf('\\n')\nfprintf(' This program is distributed in the hope that it will be useful,\\n')\nfprintf(' but WITHOUT ANY WARRANTY; without even the implied warranty of\\n')\nfprintf(' MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\\n')\nfprintf(' GNU General Public License for more details.\\n')\nfprintf('\\n')\nfprintf(' You should have received a copy of the GNU General Public License\\n')\nfprintf(' along with this program. If not, see .\\n')\nfprintf('\\n')\nfprintf(' For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr\\n')\nend\n\n"} +{"plateform": "github", "repo_name": "krasvas/LandRate-master", "name": "heatmap_generator_EyeMMV_modified.m", "ext": ".m", "path": "LandRate-master/heatmap_generator_EyeMMV_modified.m", "size": 6927, "source_encoding": "utf_8", "md5": "5ca3103a40391da6e0a0aaf6652b4ee8", "text": "% EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool.\n% Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens)\n% \n% This program is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% This program is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with this program. If not, see .\n% \n% For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr \n\n%Heatmap generator\n%Generate Heatmap Visualization computed from Raw Data \n%input parameters:\n%-records: list of records(x,y) from all subjects,x,y:in tracker units\n%-scene: stimulus image\n%-spacing: interval to define heatmap density\n%-max_hor: maximum horizontal value of tracker coordinate system\n%-max_ver: maximum vertical value of tracker coordinate system\n%-kernel_size_gaussian: kernel size for gaussian filtering\n%-s_gaussian:sigma for gaussian filtering\n%export matlab matrixes:\n%-heatmapRGB:heatmap values RGB\n%-heatmap:heatmap grayscale\n%call function example:\n%heatmap_generator('data.txt','stimulus.bmp',0.0833,1.25,1.00,5,3);\n\n%new parameters\n%spacing_coefficient\n%stimulus_ID\n%max_hor,max_ver:in pixels\nfunction [heatmapRGB,heatmap]=heatmap_generator(records,scene,spacing_coef,max_hor,max_ver,kernel_size_gaussian,s_gaussian,stimulus_ID)\nfigure ('Name','Heatmap visualization')\nspacing=spacing_coef*max_hor;\n%records=load(records);\nscene=imread(scene);\nx=records(:,1);\ny=records(:,2);\nn_scene=size(scene);\nn_1_scene=n_scene(1,1);\nn_2_scene=n_scene(1,2);\n\n%total number of points\nn=size(records);\nn=n(1,1);\n\n%create heatmap matrix\n%heatmap values: records frequency\n%heatmap dimensions\nheatmap_ver_values=(0:spacing:max_ver);\nheatmap_hor_values=(0:spacing:max_hor);\nheatmap_ver_number=size(heatmap_ver_values);\nheatmap_ver_number=heatmap_ver_number(1,2)-1;%number of vertical elements in heatmap matrix\nheatmap_hor_number=size(heatmap_hor_values);\nheatmap_hor_number=heatmap_hor_number(1,2)-1;%number of vertical elements in heatmap matrix\n%heatmap matrix initialization\nheatmap=zeros(heatmap_ver_number,heatmap_hor_number);\n\n%frequencies\nf=zeros(heatmap_hor_number*heatmap_ver_number,1);\nn_f=size(f);\nn_f=n_f(1,1);\n%initialize counters\nj=0;\ni=1;\nfor l=1:heatmap_ver_number\n for k=1:heatmap_hor_number\n i;\n j=j+1;\n heatmap(i,j)=points_in_region(x,y,heatmap_hor_values(k),heatmap_ver_values(l),heatmap_hor_values(k+1),heatmap_ver_values(l+1));\n end\n i=i+1;\n j=0;\nend\nheatmap_frequencies=heatmap;\n\n%adjust frequenies values to range 0-255\n%255:max frequency\n%0:no frequency\nheatmap=(255/max(max(heatmap)))*heatmap;\nheatmap=floor(heatmap);%integer values\n%take the inverse image\n%heatmap=255-heatmap;\nheatmap=uint8(heatmap);\n\n%resize heatmap to adjust to scene resolution\nheatmap=imresize(heatmap,[n_1_scene n_2_scene],'bicubic');\n\n%create RGB heatmap\nheatmapRGB=zeros(n_1_scene,n_2_scene,3);\n%initialize R,G,B\nR=zeros(n_1_scene,n_2_scene);\nG=zeros(n_1_scene,n_2_scene);\nB=zeros(n_1_scene,n_2_scene);\n\nfor i=1:n_1_scene\n for j=1:n_2_scene\n if (heatmap(i,j)==0 || heatmap(i,j)>0) && (heatmap(i,j)==25 || heatmap(i,j)<25)\n R(i,j)=150;\n G(i,j)=150;\n B(i,j)=150;\n \n elseif (heatmap(i,j)==26 || heatmap(i,j)>26) && (heatmap(i,j)==50 || heatmap(i,j)<50)\n R(i,j)=100;\n G(i,j)=100;\n B(i,j)=100;\n \n \n elseif (heatmap(i,j)==51 || heatmap(i,j)>51) && (heatmap(i,j)==75 || heatmap(i,j)<75)\n R(i,j)=0;\n G(i,j)=0;\n B(i,j)=255;\n \n elseif (heatmap(i,j)==76 || heatmap(i,j)>76) && (heatmap(i,j)==100 || heatmap(i,j)<100)\n R(i,j)=0;\n G(i,j)=255;\n B(i,j)=210;\n \n elseif (heatmap(i,j)==101 || heatmap(i,j)>101) && (heatmap(i,j)==125 || heatmap(i,j)<125)\n R(i,j)=0;\n G(i,j)=255;\n B(i,j)=75;\n \n elseif (heatmap(i,j)==126 || heatmap(i,j)>126) && (heatmap(i,j)==150 || heatmap(i,j)<150)\n R(i,j)=192;\n G(i,j)=255;\n B(i,j)=0;\n \n elseif (heatmap(i,j)==151 || heatmap(i,j)>151) && (heatmap(i,j)==175 || heatmap(i,j)<175)\n R(i,j)=255;\n G(i,j)=240;\n B(i,j)=0;\n \n elseif (heatmap(i,j)==176 || heatmap(i,j)>176) && (heatmap(i,j)==200 || heatmap(i,j)<200)\n R(i,j)=255;\n G(i,j)=192;\n B(i,j)=0;\n \n elseif (heatmap(i,j)==201 || heatmap(i,j)>201) && (heatmap(i,j)==225 || heatmap(i,j)<225)\n R(i,j)=255;\n G(i,j)=150;\n B(i,j)=0;\n \n else\n R(i,j)=255;\n G(i,j)=0;\n B(i,j)=0;\n end\n \n end\nend\nR=uint8(R);\nG=uint8(G);\nB=uint8(B);\nheatmapRGB(:,:,1)=R;\nheatmapRGB(:,:,2)=G;\nheatmapRGB(:,:,3)=B;\nheatmapRGB=uint8(heatmapRGB);\n\n\n%show heatmap after gaussian filtering\n\nimshow(scene,'InitialMagnification','fit')\nhold on\nh = fspecial('gaussian',kernel_size_gaussian,s_gaussian);\nimshow(imfilter(heatmapRGB,h,'replicate'));\n\ntitle(['Heatmap visualization [Stimulus: ',num2str(stimulus_ID),']'])\nalpha(0.6)\n\n% fprintf('\\nHeatmap Visualization is completed successfully\\n')\n% \n% \n% \n% fprintf('\\n EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool. \\n')\n% fprintf(' Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens) \\n')\n% fprintf('\\n')\n% fprintf(' This program is free software: you can redistribute it and/or modify\\n')\n% fprintf(' it under the terms of the GNU General Public License as published by\\n')\n% fprintf(' the Free Software Foundation, either version 3 of the License, or\\n')\n% fprintf(' (at your option) any later version.\\n')\n% fprintf('\\n')\n% fprintf(' This program is distributed in the hope that it will be useful,\\n')\n% fprintf(' but WITHOUT ANY WARRANTY; without even the implied warranty of\\n')\n% fprintf(' MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\\n')\n% fprintf(' GNU General Public License for more details.\\n')\n% fprintf('\\n')\n% fprintf(' You should have received a copy of the GNU General Public License\\n')\n% fprintf(' along with this program. If not, see .\\n')\n% fprintf('\\n')\n% fprintf(' For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr\\n')\nend\n\n"} +{"plateform": "github", "repo_name": "krasvas/LandRate-master", "name": "fixations_3s.m", "ext": ".m", "path": "LandRate-master/fixations_3s.m", "size": 2772, "source_encoding": "utf_8", "md5": "dd807d2f0ba54a516cf1196248a718ce", "text": "% EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool.\n% Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens)\n% \n% This program is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% This program is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with this program. If not, see .\n% \n% For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr \n\n%applying 3s criterion\n%import fixations list after t1 criterion, fixation id\n%output:(fixx,fixy,number_t1,number_t2,duration)\n%fixx,fixy: coordinates of the center\n%number_t1: number of points before t2 criterion\n%number_3s:number of points after 3s criterion\nfunction [fixx,fixy,number_t1,number_3s,start_time,end_time,duration]=fixations_3s(fixations,fixation_id) %fixations after t1 criterion\nn=size(fixations);\nn=n(1,1); %number of all points\nfixations_id=zeros(1,4);\n\nfor i=1:n\n if fixations(i,4)==fixation_id\n fixations_id=[fixations_id;fixations(i,:)];\n end\nend\n\nn=size(fixations_id);\nn=n(1,1);\nfixations_id=fixations_id(2:n,:); %list of data points with the defined id\n\n%clustering according to criterion t2\nnumber_t1=size(fixations_id);\nx=fixations_id(:,1);\ny=fixations_id(:,2);\nt=fixations_id(:,3);\nnumber_t1=number_t1(1,1); %number of points before 3s criterion\n%initialize mean values of center\nfixx=mean(fixations_id(:,1));\nfixy=mean(fixations_id(:,2));\n%compute standard deviation of cluster\nsx=std(x);\nsy=std(y);\ns=sqrt((sx^2)+(sy^2));\nd=0; %distance between cluster point and mean point\n\n\nfor i=1:number_t1\n d=distance2p(x(i),y(i),fixx,fixy);\n if d>(3*s)\n fixations_id(i,4)=0;\n end\nend\n%initialize new list (data points according to 3s criterion)\nfixations_list_3s=zeros(1,4);\n\nfor i=1:number_t1\n if fixations_id(i,4)>0\n fixations_list_3s=[fixations_list_3s;fixations_id(i,:)];\n end\nend\n\nn=size(fixations_list_3s);\nn=n(1,1);\nfixations_list_3s=fixations_list_3s(2:n,:);\n\nnumber_3s=size(fixations_list_3s);\nnumber_3s=number_3s(1,1);\nfixx=mean(fixations_list_3s(:,1));\nfixy=mean(fixations_list_3s(:,2));\nstart_time=fixations_list_3s(1,3);\nend_time=fixations_list_3s(number_3s,3);\nduration=fixations_list_3s(number_3s,3)-fixations_list_3s(1,3);\n\nend\n\n"} +{"plateform": "github", "repo_name": "krasvas/LandRate-master", "name": "metrics_analysis.m", "ext": ".m", "path": "LandRate-master/metrics_analysis.m", "size": 10253, "source_encoding": "utf_8", "md5": "b519fe7acad925cc6d0c0035d27752b8", "text": "% EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool.\n% Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens)\n% \n% This program is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% This program is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with this program. If not, see .\n% \n% For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr \n\n%function metrics_analysis\n%eye movement metrics computations\n%input parameters:\n%-fixation_list: list of fixations(after fixation_detection)\n%-repeat_fixation_threshold: threshold in tracker units for repeat fixations\n%-spacing_scanpath: interval in tracker units to compute scanpath density\n%-spacing_transition: interval in tracker units to compute transition matrix density\n%-maxx: maximum horizontal coordinate in tracker coordinate system\n%-maxy: maximum vertical coordinate in tracker coordinate system\n%export matlab matrixes:\n%-repeat_fixations: list of repeat fixations\n%-saccades: list of saccadic movements\n%-transition_matrix: transition matrix according the giving sapcing\n%call function example:\n%metrics_analysis(fixation_list,0.10,0.25,0.25,1.25,1.00);\n%fixation_list:matlab matrix as computed from fixation_detection\n\nfunction [repeat_fixations,saccades,transition_matrix]=metrics_analysis(fixation_list,repeat_fixation_threshold,spacing_scanpath,spacing_transition,maxx,maxy)\n\n%Fixation analysis\n%compute total number of fixations\ntotal_number_fixations=size(fixation_list);\ntotal_number_fixations=total_number_fixations(1,1);\n\n%compute mean duration of fixations\nmean_durations_fixations=mean(fixation_list(:,7));\n\n%compute time to first fixation\ntime_to_first_fixation=fixation_list(1,5);\n\n%compute repeat fixations\n%initialize distances between fixations(fixation1,fixation2,distance_between_two_fixations)\ndistances_between_fixations=zeros(1,3);\nfor i=1:total_number_fixations\n for j=1:total_number_fixations\n if i~=j && i1\n repeat_fixations=repeat_fixations(2:n_repeat_fixations,:);\nend\n\n%compute total duration of fixations\ntotal_duration_fixations=sum(fixation_list(:,7));\n\n%Saccade analysis\n%build saccades list (x_start_point,y_start_point,x_end_point,y_end_point, duration, amplitude, direction angle,start_fixation,end_fixation)\nif total_number_fixations>2 || total_number_fixations==2\n total_number_saccades=total_number_fixations-1;\n %initialize saccades list\n saccades=zeros(total_number_saccades,9);\n j=2;%pointer\n for i=1:(total_number_fixations-1)\n %import start and end points in list\n saccades(i,1)=fixation_list(i,1);\n saccades(i,2)=fixation_list(i,2);\n saccades(i,3)=fixation_list(j,1);\n saccades(i,4)=fixation_list(j,2);\n \n %compute saccades durations\n saccades(i,5)=fixation_list(j,5)-fixation_list(i,6);\n \n %compute amplitude\n saccades(i,6)=distance2p(saccades(i,1),saccades(i,2),saccades(i,3),saccades(i,4));\n \n %compute direction angle\n saccades(i,7)=direction_angle(saccades(i,1),saccades(i,2),saccades(i,3),saccades(i,4));\n \n %name start and end fixations in each saccade\n saccades(i,8)=i;\n saccades(i,9)=j;\n \n j=j+1;\n end\n \n %compute mean duration of saccades\n mean_durations_saccades=mean(saccades(:,5));\nelse\n total_number_saccades=0;\nend\n\n%Scanpath analysis\n%compute scanpath length\nscanpath_length=sum(saccades(:,6));\n\n%compute scanpath duration\nscanpath_duration=fixation_list(total_number_fixations,6)-fixation_list(1,5);\n\n%compute saccade/fixation ratio\nratio_sf=sum(saccades(:,5))/sum(fixation_list(:,7));\n\n%compute scanpath spatial density\n%build points_areas_matrix(x,y,area_id)\n%initialize\npoints_areas_matrix=zeros(1,3);\nfor i=1:total_number_fixations\n area=find_point_area(fixation_list(i,1),fixation_list(i,2),maxx,maxy,spacing_scanpath);\n points_areas_matrix(i,1)=area(1);\n points_areas_matrix(i,2)=area(2);\n points_areas_matrix(i,3)=area(3);\n \nend\nn_scanpath_areas=area(4);\n%compute the number of unique areas between points areas\nnumber_different_areas=length(unique(points_areas_matrix(:,3)));\n%scanpath_density\nscanpath_density=number_different_areas/n_scanpath_areas;\n\n%create transition matrix\n%build fixation_transition_areas(x_fixation,y_fixation,x_center,y_center,transition_area)\n%initialize fixation_transition areas\nfixation_transition_areas=zeros(1,5);\nfor i=1:total_number_fixations\n fixation_transition_areas(i,1)=fixation_list(i,1);\n fixation_transition_areas(i,2)=fixation_list(i,2);\n area=find_point_area(fixation_list(i,1),fixation_list(i,2),maxx,maxy,spacing_transition);\n fixation_transition_areas(i,3:5)=area(1:3);\nend\n%transitions\ntransitions=fixation_transition_areas(:,5);\n%transition dimension corresponds to maximun number of areas\ntransition_dimension=area(4);\n%build transition matrix(>=1:indicates transition, 0: indicates no transition)\n%initialize\ntransition_matrix=zeros(transition_dimension);\n\nfor i=1:(total_number_fixations-1)\n for j=1:transition_dimension\n if j==transitions(i)\n transition_matrix(transitions(i+1),j)=transition_matrix(transitions(i+1),j)+1;\n end\n end\nend\n\n%compute the number of transitions\n%initialize number\nnumber_transitions=0;\nfor i=1:transition_dimension\n for j=1:transition_dimension\n if transition_matrix(i,j)>0\n number_transitions=number_transitions+1;\n end\n end\nend\n\n%compute transition density\ntransition_density=number_transitions/(transition_dimension^2);\n\n\n%print results\nfprintf('Eye Movement metrics analysis\\n\\n')\nfprintf('Input Parameters: \\n')\nfprintf(' Threshold for repeat fixations: %.3f\\n',repeat_fixation_threshold)\nfprintf(' Scanpath spacing (spatial density computation): %.3f\\n',spacing_scanpath)\nfprintf(' Transition matrix spacing: %.3f\\n',spacing_transition)\n\nfprintf('\\nFixation Metrics Analysis:\\n')\nfprintf(' Total number of fixations: %.f\\n',total_number_fixations)\nfprintf(' Mean duration of fixations: %.1f\\n',mean_durations_fixations)\nfprintf(' Time to first fixation: %.1f\\n',time_to_first_fixation)\nif n_repeat_fixations>1\n fprintf(' Repeat Fixations:\\n')\n fprintf(' (Fixation_1_id-Fixation_2_id-Distance)\\n')\n for i=1:(n_repeat_fixations-1)\n fprintf(' %.f %.f %.3f \\n',repeat_fixations(i,1),repeat_fixations(i,2),repeat_fixations(i,3))\n end\nend\nfprintf(' Total duration of all fixations: %.1f\\n',total_duration_fixations)\n\nfprintf('\\nSaccades Analysis:\\n')\nfprintf(' Total number of saccades: %.f\\n',total_number_saccades)\nif total_number_saccades>1\n fprintf(' Saccades list:\\n')\n fprintf(' (ID-X_Start_Point-Y_Start_Point-X_End_Point-Y_End_Point-\\n Duration-Amplitude-Direction_angle-Start_Fixation-End_Fixation)\\n')\n for i=1:total_number_saccades\n fprintf(' %.f %.4f %.4f %.4f %.4f %.1f %.3f %.3f %.f %.f\\n',i,saccades(i,1),saccades(i,2),saccades(i,3),saccades(i,4),saccades(i,5),saccades(i,6),saccades(i,7),saccades(i,8),saccades(i,9))\n end\nend\n\nfprintf('\\nScanpath Analysis:\\n')\nfprintf(' Scanpath length: %.3f\\n',scanpath_length)\nfprintf(' Scanpath duration: %.1f\\n',scanpath_duration)\nfprintf(' Saccades/Fixations Ratio: %.3f\\n',ratio_sf)\nfprintf(' Scanpath Spatial Density: %.3f\\n',scanpath_density)\nfprintf(' Transition Matrix:\\n')\nfprintf(' ')\nfor i=1:transition_dimension\nfprintf('-%.f',i)\nend\nfprintf('\\n')\nfor i=1:transition_dimension\n fprintf(' %.f-',i)\n for j=1:transition_dimension\n fprintf(' %.f',transition_matrix(i,j))\n end\n fprintf('\\n')\nend\nfprintf('Transition Density: %.3f\\n',transition_density)\nfprintf('________________________________________________________\\n')\nfprintf('\\nEnd of Metrics Analysis Report\\n')\n\n\nfprintf('\\n EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool. \\n')\nfprintf(' Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens) \\n')\nfprintf('\\n')\nfprintf(' This program is free software: you can redistribute it and/or modify\\n')\nfprintf(' it under the terms of the GNU General Public License as published by\\n')\nfprintf(' the Free Software Foundation, either version 3 of the License, or\\n')\nfprintf(' (at your option) any later version.\\n')\nfprintf('\\n')\nfprintf(' This program is distributed in the hope that it will be useful,\\n')\nfprintf(' but WITHOUT ANY WARRANTY; without even the implied warranty of\\n')\nfprintf(' MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\\n')\nfprintf(' GNU General Public License for more details.\\n')\nfprintf('\\n')\nfprintf(' You should have received a copy of the GNU General Public License\\n')\nfprintf(' along with this program. If not, see .\\n')\nfprintf('\\n')\nfprintf(' For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr\\n')\nend"} +{"plateform": "github", "repo_name": "krasvas/LandRate-master", "name": "visualizations.m", "ext": ".m", "path": "LandRate-master/visualizations.m", "size": 4618, "source_encoding": "utf_8", "md5": "b86cb5e5590444b6dd3ba5d7f54d2128", "text": "% EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool.\n% Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens)\n% \n% This program is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% This program is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with this program. If not, see .\n% \n% For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr\n\n%function visualizations\n%export different visualizations in tracker units\n%input parameters:\n%-data: raw data corresponded to one stimuli\n%-fixation_list: list of fixations(after fixation_detection)\n%-maxr: maximum value of radius in tracker units to represent fixations durations\n%call function example\n%visualizations('data.txt',fixations_list,0.1)\nfunction visualizations(data,fixations_list,maxr)\ndata=load(data);\n%x,y,t\nx=data(:,1);\ny=data(:,2);\nt=data(:,3);\n\n%x_fix,y_fix,duration\nx_fix=fixations_list(:,1);\ny_fix=fixations_list(:,2);\nduration=fixations_list(:,7);\nn_fix=length(x_fix);\n\n%x(t),y(t) visualizations\nfigure\nplot(t,x,'r-')\nhold on\nplot(t,y,'b-')\ntitle('Horizontal and Vertical Coordinates along Time','Color','k','FontSize',14)\nxlabel('Time','Color','k','FontSize',12)\nylabel('Coordinates','Color','k','FontSize',12)\nlegend('x(t)','y(t)','Location','SouthEastOutside')\ngrid on\n\n%raw data visualization\nfigure\nplot(x,y,'r+')\ntitle('Raw Data Distribution','Color','k','FontSize',14)\nxlabel('Horizontal Coordinate','Color','k','FontSize',12)\nylabel('Vertical Coordinate','Color','k','FontSize',12)\nhold on\nplot(x,y,'b--')\naxis('equal')\nlegend('Record Point','Records Trace','Location','SouthEastOutside')\ngrid on\n\n%space-time-cube\nfigure\nplot3(x,y,t,'r','LineWidth',1.5)\nhold on\nplot3(x,y,t,'b+')\ntitle('Space-Time-Cube','Color','k','FontSize',14)\nxlabel('Horizontal Coordinate','Color','k','FontSize',12)\nylabel('Vertical Coordinate','Color','k','FontSize',12)\nzlabel('Time','Color','k','FontSize',12)\nlegend('Records Trace','Record Point','Location','SouthEastOutside')\ngrid on\n\n%Scanpath visualization\nfigure\nplot(x_fix,y_fix,'gs')\nhold on\nplot(x_fix,y_fix,'-b')\ntitle('Scanpath (Fixations Duration & Saccades) ','Color','k','FontSize',14)\naxis('equal')\n\n\n%create circle points\nc=linspace(0,2*pi);\n%compute maxr_par\nmaxr_par=maxr/max(sqrt(duration)); %max r corresponds to max duration\nfor i=1:n_fix\n hold on\n %create circle with duration\n x_center=x_fix(i);\n y_center=y_fix(i);\n xc=(maxr_par*sqrt(duration(i)))*cos(c);\n yc=(maxr_par*sqrt(duration(i)))*sin(c);\n fill(x_center+xc,y_center+yc,'r');\n text(x_center,y_center,num2str(i),'HorizontalAlignment','Left','VerticalAlignment','Bottom','Color','m')\nend\nalpha(0.6)\nlegend('Fixation Center','Saccade','Radius represents the duration','Location','SouthEastOutside')\nxlabel('Horizontal Coordinate','Color','k','FontSize',10)\nylabel('Vertical Coordinate','Color','k','FontSize',10)\nfprintf('\\nVisualizations are plotted successfully\\n')\n\n\nfprintf('\\n EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool. \\n')\nfprintf(' Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens) \\n')\nfprintf('\\n')\nfprintf(' This program is free software: you can redistribute it and/or modify\\n')\nfprintf(' it under the terms of the GNU General Public License as published by\\n')\nfprintf(' the Free Software Foundation, either version 3 of the License, or\\n')\nfprintf(' (at your option) any later version.\\n')\nfprintf('\\n')\nfprintf(' This program is distributed in the hope that it will be useful,\\n')\nfprintf(' but WITHOUT ANY WARRANTY; without even the implied warranty of\\n')\nfprintf(' MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\\n')\nfprintf(' GNU General Public License for more details.\\n')\nfprintf('\\n')\nfprintf(' You should have received a copy of the GNU General Public License\\n')\nfprintf(' along with this program. If not, see .\\n')\nfprintf('\\n')\nfprintf(' For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr\\n')\nend\n\n\n"} +{"plateform": "github", "repo_name": "krasvas/LandRate-master", "name": "saccade_analysis_EyeMMV_modified.m", "ext": ".m", "path": "LandRate-master/saccade_analysis_EyeMMV_modified.m", "size": 10726, "source_encoding": "utf_8", "md5": "da79190df4a2472a20a0d9e862b846b2", "text": "% EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool.\n% Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens)\n% \n% This program is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% This program is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with this program. If not, see .\n% \n% For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr \n\n%function metrics_analysis\n%eye movement metrics computations\n%input parameters:\n%-fixation_list: list of fixations(after fixation_detection)\n%-repeat_fixation_threshold: threshold in tracker units for repeat fixations\n%-spacing_scanpath: interval in tracker units to compute scanpath density\n%-spacing_transition: interval in tracker units to compute transition matrix density\n%-maxx: maximum horizontal coordinate in tracker coordinate system\n%-maxy: maximum vertical coordinate in tracker coordinate system\n%export matlab matrixes:\n%-repeat_fixations: list of repeat fixations\n%-saccades: list of saccadic movements\n%-transition_matrix: transition matrix according the giving sapcing\n%call function example:\n%metrics_analysis(fixation_list,0.10,0.25,0.25,1.25,1.00);\n%fixation_list:matlab matrix as computed from fixation_detection\n\n\n%export saccade_list:[X_Start_Point][Y_Start_Point][X_End_Point][Y_End_Point][Duration][Amplitude][Direction_angle][Start_Fixation][End_Fixation])\nfunction saccades=saccade_analysis_EyeMMV_modified(fixation_list)%,repeat_fixation_threshold,spacing_scanpath,spacing_transition,maxx,maxy)\n\n%Fixation analysis\n%compute total number of fixations\ntotal_number_fixations=size(fixation_list);\ntotal_number_fixations=total_number_fixations(1,1);\n\n% %compute mean duration of fixations\n% mean_durations_fixations=mean(fixation_list(:,7));\n% \n% %compute time to first fixation\n% time_to_first_fixation=fixation_list(1,5);\n\n% %compute repeat fixations\n% %initialize distances between fixations(fixation1,fixation2,distance_between_two_fixations)\n% distances_between_fixations=zeros(1,3);\n% for i=1:total_number_fixations\n% for j=1:total_number_fixations\n% if i~=j && i1\n% repeat_fixations=repeat_fixations(2:n_repeat_fixations,:);\n% end\n% \n% %compute total duration of fixations\n% total_duration_fixations=sum(fixation_list(:,7));\n\n%Saccade analysis\n%build saccades list (x_start_point,y_start_point,x_end_point,y_end_point, duration, amplitude, direction angle,start_fixation,end_fixation)\nif total_number_fixations>2 || total_number_fixations==2\n total_number_saccades=total_number_fixations-1;\n %initialize saccades list\n saccades=zeros(total_number_saccades,9);\n j=2;%pointer\n for i=1:(total_number_fixations-1)\n %import start and end points in list\n saccades(i,1)=fixation_list(i,1);\n saccades(i,2)=fixation_list(i,2);\n saccades(i,3)=fixation_list(j,1);\n saccades(i,4)=fixation_list(j,2);\n \n %compute saccades durations\n saccades(i,5)=fixation_list(j,5)-fixation_list(i,6);\n \n %compute amplitude\n saccades(i,6)=distance2p(saccades(i,1),saccades(i,2),saccades(i,3),saccades(i,4));\n \n %compute direction angle\n saccades(i,7)=direction_angle(saccades(i,1),saccades(i,2),saccades(i,3),saccades(i,4));\n \n %name start and end fixations in each saccade\n saccades(i,8)=i;\n saccades(i,9)=j;\n \n j=j+1;\n end\n \n %compute mean duration of saccades\n mean_durations_saccades=mean(saccades(:,5));\nelse\n total_number_saccades=0;\nend\n\n% %Scanpath analysis\n% %compute scanpath length\n% scanpath_length=sum(saccades(:,6));\n% \n% %compute scanpath duration\n% scanpath_duration=fixation_list(total_number_fixations,6)-fixation_list(1,5);\n% \n% %compute saccade/fixation ratio\n% ratio_sf=sum(saccades(:,5))/sum(fixation_list(:,7));\n% \n% %compute scanpath spatial density\n% %build points_areas_matrix(x,y,area_id)\n% %initialize\n% points_areas_matrix=zeros(1,3);\n% for i=1:total_number_fixations\n% area=find_point_area(fixation_list(i,1),fixation_list(i,2),maxx,maxy,spacing_scanpath);\n% points_areas_matrix(i,1)=area(1);\n% points_areas_matrix(i,2)=area(2);\n% points_areas_matrix(i,3)=area(3);\n% \n% end\n% n_scanpath_areas=area(4);\n% %compute the number of unique areas between points areas\n% number_different_areas=length(unique(points_areas_matrix(:,3)));\n% %scanpath_density\n% scanpath_density=number_different_areas/n_scanpath_areas;\n% \n% %create transition matrix\n% %build fixation_transition_areas(x_fixation,y_fixation,x_center,y_center,transition_area)\n% %initialize fixation_transition areas\n% fixation_transition_areas=zeros(1,5);\n% for i=1:total_number_fixations\n% fixation_transition_areas(i,1)=fixation_list(i,1);\n% fixation_transition_areas(i,2)=fixation_list(i,2);\n% area=find_point_area(fixation_list(i,1),fixation_list(i,2),maxx,maxy,spacing_transition);\n% fixation_transition_areas(i,3:5)=area(1:3);\n% end\n% %transitions\n% transitions=fixation_transition_areas(:,5);\n% %transition dimension corresponds to maximun number of areas\n% transition_dimension=area(4);\n% %build transition matrix(>=1:indicates transition, 0: indicates no transition)\n% %initialize\n% transition_matrix=zeros(transition_dimension);\n% \n% for i=1:(total_number_fixations-1)\n% for j=1:transition_dimension\n% if j==transitions(i)\n% transition_matrix(transitions(i+1),j)=transition_matrix(transitions(i+1),j)+1;\n% end\n% end\n% end\n% \n% %compute the number of transitions\n% %initialize number\n% number_transitions=0;\n% for i=1:transition_dimension\n% for j=1:transition_dimension\n% if transition_matrix(i,j)>0\n% number_transitions=number_transitions+1;\n% end\n% end\n% end\n% \n% %compute transition density\n% transition_density=number_transitions/(transition_dimension^2);\n% \n% \n% %print results\n% fprintf('Eye Movement metrics analysis\\n\\n')\n% fprintf('Input Parameters: \\n')\n% fprintf(' Threshold for repeat fixations: %.3f\\n',repeat_fixation_threshold)\n% fprintf(' Scanpath spacing (spatial density computation): %.3f\\n',spacing_scanpath)\n% fprintf(' Transition matrix spacing: %.3f\\n',spacing_transition)\n% \n% fprintf('\\nFixation Metrics Analysis:\\n')\n% fprintf(' Total number of fixations: %.f\\n',total_number_fixations)\n% fprintf(' Mean duration of fixations: %.1f\\n',mean_durations_fixations)\n% fprintf(' Time to first fixation: %.1f\\n',time_to_first_fixation)\n% if n_repeat_fixations>1\n% fprintf(' Repeat Fixations:\\n')\n% fprintf(' (Fixation_1_id-Fixation_2_id-Distance)\\n')\n% for i=1:(n_repeat_fixations-1)\n% fprintf(' %.f %.f %.3f \\n',repeat_fixations(i,1),repeat_fixations(i,2),repeat_fixations(i,3))\n% end\n% end\n% fprintf(' Total duration of all fixations: %.1f\\n',total_duration_fixations)\n% \n% fprintf('\\nSaccades Analysis:\\n')\n% fprintf(' Total number of saccades: %.f\\n',total_number_saccades)\n% if total_number_saccades>1\n% fprintf(' Saccades list:\\n')\n% fprintf(' (ID-X_Start_Point-Y_Start_Point-X_End_Point-Y_End_Point-\\n Duration-Amplitude-Direction_angle-Start_Fixation-End_Fixation)\\n')\n% for i=1:total_number_saccades\n% fprintf(' %.f %.4f %.4f %.4f %.4f %.1f %.3f %.3f %.f %.f\\n',i,saccades(i,1),saccades(i,2),saccades(i,3),saccades(i,4),saccades(i,5),saccades(i,6),saccades(i,7),saccades(i,8),saccades(i,9))\n% end\n% end\n% \n% fprintf('\\nScanpath Analysis:\\n')\n% fprintf(' Scanpath length: %.3f\\n',scanpath_length)\n% fprintf(' Scanpath duration: %.1f\\n',scanpath_duration)\n% fprintf(' Saccades/Fixations Ratio: %.3f\\n',ratio_sf)\n% fprintf(' Scanpath Spatial Density: %.3f\\n',scanpath_density)\n% fprintf(' Transition Matrix:\\n')\n% fprintf(' ')\n% for i=1:transition_dimension\n% fprintf('-%.f',i)\n% end\n% fprintf('\\n')\n% for i=1:transition_dimension\n% fprintf(' %.f-',i)\n% for j=1:transition_dimension\n% fprintf(' %.f',transition_matrix(i,j))\n% end\n% fprintf('\\n')\n% end\n% fprintf('Transition Density: %.3f\\n',transition_density)\n% fprintf('________________________________________________________\\n')\n% fprintf('\\nEnd of Metrics Analysis Report\\n')\n% \n% \n% fprintf('\\n EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool. \\n')\n% fprintf(' Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens) \\n')\n% fprintf('\\n')\n% fprintf(' This program is free software: you can redistribute it and/or modify\\n')\n% fprintf(' it under the terms of the GNU General Public License as published by\\n')\n% fprintf(' the Free Software Foundation, either version 3 of the License, or\\n')\n% fprintf(' (at your option) any later version.\\n')\n% fprintf('\\n')\n% fprintf(' This program is distributed in the hope that it will be useful,\\n')\n% fprintf(' but WITHOUT ANY WARRANTY; without even the implied warranty of\\n')\n% fprintf(' MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\\n')\n% fprintf(' GNU General Public License for more details.\\n')\n% fprintf('\\n')\n% fprintf(' You should have received a copy of the GNU General Public License\\n')\n% fprintf(' along with this program. If not, see .\\n')\n% fprintf('\\n')\n% fprintf(' For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr\\n')\nend"} +{"plateform": "github", "repo_name": "krasvas/LandRate-master", "name": "find_point_area.m", "ext": ".m", "path": "LandRate-master/find_point_area.m", "size": 2943, "source_encoding": "utf_8", "md5": "bf1a31dbfba11e00783fb8c1b8ad7345", "text": "% EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool.\n% Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens)\n% \n% This program is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% This program is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with this program. If not, see .\n% \n% For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr \n\n%function find_point_area\n%input parameters\n%(x,y): coordinates of point\n%max_hor: maximum horizontal dimension of coordinate system\n%max_ver: maximum vertical dimension of coordinate system\n%spacing: spacing parameter to create transition matrix\n%export Point_area(x_center,y_center,area,maximum_number_of_areas)\n\nfunction Point_area=find_point_area(x,y,max_hor,max_ver,spacing)\n\nhor_spacing=[0:spacing:max_hor];\nver_spacing=[0:spacing:max_ver];\n\n%build matrix with the coordinates of centers of transition matrix squares\n%initialization (x,y,ID)\ncenters=zeros((length(hor_spacing)-1)*(length(ver_spacing)-1),3);\ncenters(1,1)=spacing/2;\ncenters(1,2)=spacing/2;\n\nfor i=2:(length(hor_spacing)-1)\n centers(i,1)=centers(i-1,1)+spacing;\n centers(i,2)=centers(1,2);\nend\n\nfor i=length(hor_spacing):length(hor_spacing)-1:((length(hor_spacing)-1)*(length(ver_spacing)-1))\n centers(i:(i-2+length(hor_spacing)),1)=centers(1:(length(hor_spacing)-1),1);\n centers(i:(i-2+length(hor_spacing)),2)=centers((i-((length(hor_spacing)-1))):(i-1),2)+spacing;\nend\n\n%create areas id\n%the matrix centers contains the square areas (x_center,y_center,id) of transition matrix\nfor i=1:(length(hor_spacing)-1)*(length(ver_spacing)-1)\n centers(i,3)=i;\nend\n\n%compute number of areas\nn_areas=(length(hor_spacing)-1)*(length(ver_spacing)-1);\n\n%find in which area the point is located\n%compute distances between point and centers\n%initialize distances(distance center from point,x_center,y_center,area)\ndistances=zeros(n_areas,4);\nfor i=1:n_areas\n distances(i,1)=distance2p(x,y,centers(i,1),centers(i,2));\n distances(i,2)=centers(i,1);\n distances(i,3)=centers(i,2);\n distances(i,4)=centers(i,3);\nend\n\n%the area corresponds to the minimum value of distance\nmin_value=min(distances(:,1));\n\n%find point corresponded center\nfor i=1:n_areas\n if distances(i,1)==min_value\n Point_area=distances(i,2:4);\n end\nend\n\n%maximun number of areas\nPoint_area=[Point_area,n_areas];\n\nend\n\n"} +{"plateform": "github", "repo_name": "krasvas/LandRate-master", "name": "scan_path_visualization_EyeMMV_modified.m", "ext": ".m", "path": "LandRate-master/scan_path_visualization_EyeMMV_modified.m", "size": 5064, "source_encoding": "utf_8", "md5": "b33ae032caf71d2aec57f5b53ebe1667", "text": "% EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool.\n% Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens)\n% \n% This program is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% This program is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with this program. If not, see .\n% \n% For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr\n\n%function visualizations\n%export different visualizations in tracker units\n%input parameters:\n%-data: raw data corresponded to one stimuli\n%-fixation_list: list of fixations(after fixation_detection)\n%-maxr: maximum value of radius in tracker units to represent fixations durations\n%call function example\n%visualizations('data.txt',fixations_list,0.1)\nfunction scan_path_visualization_EyeMMV_modified(fixations_list, maxr)\n% data=load(data);\n% %x,y,t\n% x=data(:,1);\n% y=data(:,2);\n% t=data(:,3);\n\n%x_fix,y_fix,duration\nx_fix=fixations_list(:,1);\ny_fix=fixations_list(:,2);\nduration=fixations_list(:,7);\nn_fix=length(x_fix);\n\n% %x(t),y(t) visualizations\n% figure\n% plot(t,x,'r-')\n% hold on\n% plot(t,y,'b-')\n% title('Horizontal and Vertical Coordinates along Time','Color','k','FontSize',14)\n% xlabel('Time','Color','k','FontSize',12)\n% ylabel('Coordinates','Color','k','FontSize',12)\n% legend('x(t)','y(t)','Location','SouthEastOutside')\n% grid on\n% \n% %raw data visualization\n% figure\n% plot(x,y,'r+')\n% title('Raw Data Distribution','Color','k','FontSize',14)\n% xlabel('Horizontal Coordinate','Color','k','FontSize',12)\n% ylabel('Vertical Coordinate','Color','k','FontSize',12)\n% hold on\n% plot(x,y,'b--')\n% axis('equal')\n% legend('Record Point','Records Trace','Location','SouthEastOutside')\n% grid on\n% \n% %space-time-cube\n% figure\n% plot3(x,y,t,'r','LineWidth',1.5)\n% hold on\n% plot3(x,y,t,'b+')\n% title('Space-Time-Cube','Color','k','FontSize',14)\n% xlabel('Horizontal Coordinate','Color','k','FontSize',12)\n% ylabel('Vertical Coordinate','Color','k','FontSize',12)\n% zlabel('Time','Color','k','FontSize',12)\n% legend('Records Trace','Record Point','Location','SouthEastOutside')\n% grid on\n\n\n%imshow(stimulus,'InitialMagnification','fit')\nhold on\n%Scanpath visualization\nplot(x_fix,y_fix,'gs')\nhold on\nplot(x_fix,y_fix,'-b')\n%title('Scanpath (Fixations Duration & Saccades) ','Color','k','FontSize',14)\n\n\n\n%create circle points\nc=linspace(0,2*pi);\n%compute maxr_par\nmaxr_par=maxr/max(sqrt(duration)); %max r corresponds to max duration\nfor i=1:n_fix\n hold on\n %create circle with duration\n x_center=x_fix(i);\n y_center=y_fix(i);\n xc=(maxr_par*sqrt(duration(i)))*cos(c);\n yc=(maxr_par*sqrt(duration(i)))*sin(c);\n fill(x_center+xc,y_center+yc,'r');\n text(x_center,y_center,num2str(duration(i)),'HorizontalAlignment','Left','VerticalAlignment','Bottom','Color','y')\nend\nalpha(0.8)\n\nhold on\n%show start and end fixation point\ntext(x_fix(1),y_fix(1),'START','HorizontalAlignment','Left','VerticalAlignment','top','Color','c')\nhold on\ntext(x_fix(n_fix),y_fix(n_fix),'END','HorizontalAlignment','Left','VerticalAlignment','top','Color','c')\n\n\n% legend('Fixation Center','Saccade','Radius represents the duration','Location','SouthEastOutside')\n% xlabel('Horizontal Coordinate','Color','k','FontSize',10)\n% ylabel('Vertical Coordinate','Color','k','FontSize',10)\n% fprintf('\\nVisualizations are plotted successfully\\n')\n\n\n% fprintf('\\n EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool. \\n')\n% fprintf(' Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens) \\n')\n% fprintf('\\n')\n% fprintf(' This program is free software: you can redistribute it and/or modify\\n')\n% fprintf(' it under the terms of the GNU General Public License as published by\\n')\n% fprintf(' the Free Software Foundation, either version 3 of the License, or\\n')\n% fprintf(' (at your option) any later version.\\n')\n% fprintf('\\n')\n% fprintf(' This program is distributed in the hope that it will be useful,\\n')\n% fprintf(' but WITHOUT ANY WARRANTY; without even the implied warranty of\\n')\n% fprintf(' MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\\n')\n% fprintf(' GNU General Public License for more details.\\n')\n% fprintf('\\n')\n% fprintf(' You should have received a copy of the GNU General Public License\\n')\n% fprintf(' along with this program. If not, see .\\n')\n% fprintf('\\n')\n% fprintf(' For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr\\n')\nend\n\n\n"} +{"plateform": "github", "repo_name": "krasvas/LandRate-master", "name": "distance2p.m", "ext": ".m", "path": "LandRate-master/distance2p.m", "size": 1124, "source_encoding": "utf_8", "md5": "cd76c27ed4b3f0855a9ff19c2bddc429", "text": "% EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool.\n% Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens)\n% \n% This program is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% This program is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with this program. If not, see .\n% \n% For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr \n\n%distance between 2 points\n%point 1(x1,y1)\n%point 2(x2,y2)\nfunction distance2p=distance2p(x1,y1,x2,y2)\ndx=x2-x1;\ndy=y2-y1;\ndistance2p=sqrt((dx^2)+(dy^2));\nend"} +{"plateform": "github", "repo_name": "krasvas/LandRate-master", "name": "values_normalization.m", "ext": ".m", "path": "LandRate-master/values_normalization.m", "size": 148, "source_encoding": "utf_8", "md5": "dff1706d74406c28b86fb02ac727bfc4", "text": "%Values normalization function between 0 and 1\r\nfunction normalized_values=values_normalization(values)\r\nnormalized_values=values./max(values);\r\nend"} +{"plateform": "github", "repo_name": "krasvas/LandRate-master", "name": "direction_angle.m", "ext": ".m", "path": "LandRate-master/direction_angle.m", "size": 1372, "source_encoding": "utf_8", "md5": "36805f28c8d3616a761d5db8a6e1dfe7", "text": "% EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool.\n% Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens)\n% \n% This program is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% This program is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with this program. If not, see .\n% \n% For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr \n\n%calculate direction angle(grads & degrees)\n%export direction angle in degrees\nfunction a12=direction_angle(x1,y1,x2,y2)\n\ndx=x2-x1;\ndy=y2-y1;\n\na=(200/pi())*atan(abs(dx)/abs(dy));\na=abs(a);\na12=a;\n\nif dx>0 & dy>0\na12=a;\nelseif dx>0 & dy<0\na12=200-a;\nelseif dx<0 & dy<0\na12=200+a;\nelseif dx<0 & dy>0\na12=400-a; \nelse\na12=a;\nend\n\nif a12>400\na12=a12-400;\nend\n\nif a12<0\na12=a12+400;\nend\n\na12=(360/400)*a12;\nend"} +{"plateform": "github", "repo_name": "krasvas/LandRate-master", "name": "fixation_detection_EyeMMV_modified.m", "ext": ".m", "path": "LandRate-master/fixation_detection_EyeMMV_modified.m", "size": 8663, "source_encoding": "utf_8", "md5": "c5aff6169465d2a52873e3049320b768", "text": "% This is a modified version of EyeMMV's toolbox fixation detection\n% algorithm\n\n\n% EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool.\n% Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens)\n% \n% This program is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% This program is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with this program. If not, see .\n% \n% For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr \n\n%function fixation_detection\n%detect fixations from raw data\n%input parameters:\n%-data:raw data (x y t),x,y: in tracker units(cartesian coordinate system),t: time in ms\n%-t1: spatial parameter t1 in tracker units\n%-t2: spatial parameter t2 in tracker units\n%-minDur: minimum value of fixation duration in ms\n%-maxx: maximum horizontal coordinate in tracker coordinate system\n%-maxy: maximum vertical coordinate in tracker coordinate system\n%export matlab matrixes:\n%-fixation_list_t2:list of fixations info computed with t1,t2,minDur criteria\n%-fixation_list_3s:list of fixations info computed with t1,3s,minDur criteria\n%call function example:\n%fixation_detection('data.txt',0.250,0.100,150,1.25,1.00);\nfunction final_fixation_list=fixation_detection_EyeMMV_modified(data,t1,t2,minDur,maxx,maxy,fixation_method)\n%read data according to the new format\nx=data(:,3);\ny=data(:,4);\nt=data(:,2);\n\n%adapt new format in EyeMMV's required format\ndata=[x y t];\nx=data(:,1);\ny=data(:,2);\nt=data(:,3);\nn=length(t);\n%build initial fixations list\n%categorize each data point to a fixation cluster according to tolerance 1\nfixations=[data,zeros(n,1)];\n%initialize pointers\nfixid=1; %fixation id\nmx=0; %mean coordinate x\nmy=0; %mean coordinate y\nd=0; %dinstance between data point and mean point\nfixpointer=1; %fixations pointer\nfor i=1:n\n mx=mean(x(fixpointer:i));\n my=mean(y(fixpointer:i));\n d=distance2p(mx,my,x(i),y(i));\n if d>t1\n fixid=fixid+1;\n fixpointer=i;\n end\n fixations(i,4)=fixid;\nend\n%end of clustering according to tolerance 1\n%number of fixation after clustering (t1)\nnumber_fixations=fixations(n,4);\n\n%initialize fixations list according spatial criteria\n%(Center x, Center y, Number of data points after t1, Number of data points\n%after second criterion, Start time, End time, Duration)\nfixation_list_t2=zeros(1,7);\nfixation_list_3s=zeros(1,7);\n\n%initialize the list of points which are not participate in fixation analysis\nlist_of_out_points=[0 0 0 -1];\n\n%print fixation list according to spatial criteria\nfor i=1:number_fixations\n [centerx_t2,centery_t2,n_t1_t2,n_t2,t1_t2,t2_t2,d_t2,out_points]=fixations_t2(fixations,i,t2);\n [centerx_3s,centery_3s,n_t1_3s,n_3s,t1_3s,t2_3s,d_3s]=fixations_3s(fixations,i);\n %build list(t2)\n fixation_list_t2(i,1)=centerx_t2;\n fixation_list_t2(i,2)=centery_t2;\n fixation_list_t2(i,3)=n_t1_t2;\n fixation_list_t2(i,4)=n_t2;\n fixation_list_t2(i,5)=t1_t2;\n fixation_list_t2(i,6)=t2_t2;\n fixation_list_t2(i,7)=d_t2;\n %build list(3s)\n fixation_list_3s(i,1)=centerx_3s;\n fixation_list_3s(i,2)=centery_3s;\n fixation_list_3s(i,3)=n_t1_3s;\n fixation_list_3s(i,4)=n_3s;\n fixation_list_3s(i,5)=t1_3s;\n fixation_list_3s(i,6)=t2_3s;\n fixation_list_3s(i,7)=d_3s;\n \n %build list of points which are not used\n list_of_out_points=[list_of_out_points;out_points];\nend\n\n%remove from list of out points the zeros records\nn_out=size(list_of_out_points);\nn_out=n_out(1,1);\nlist=zeros(1,4);\nfor i=1:n_out\n if list_of_out_points(i,4)==0\n list=[list;list_of_out_points(i,:)];\n end\nend\nn_list=size(list);\nn_list=n_list(1,1);\nif n_out>1\n list_of_out_points=list(2:n_list,:);\nelse\n list_of_out_points=0;\nend\n\n%applying duration threshold\nfixation_list_t2=min_duration(fixation_list_t2,minDur);\nfixation_list_3s=min_duration(fixation_list_3s,minDur);\n\n%export results\nn_t2=size(fixation_list_t2);\nn_t2=n_t2(1,1);\nn_3s=size(fixation_list_3s);\nn_3s=n_3s(1,1);\n\n\nif fixation_method=='t2'\n final_fixation_list=fixation_list_t2;\n \nelseif fixation_method=='3s'\n final_fixation_list=fixation_list_3s;\n \nend\n% fprintf(' Fixation Detection Report \\n\\n')\n% fprintf('Import Parameters: \\n')\n% fprintf(' Spatial Parameter t1: %.3f\\n',t1)\n% fprintf(' Spatial Parameter t2: %.3f\\n',t2)\n% fprintf(' Minimum Fixation Duration: %.2f\\n',minDur)\n% fprintf(' Maximum Coordinate in Horizontal Dimension: %.2f\\n',maxx)\n% fprintf(' Maximum Coordinate in Vertical Dimension: %.2f\\n\\n',maxy)\n% \n% fprintf('Number of Raw Data: %.f\\n',n)\n% fprintf('Number of Data used in the analysis(t1,t2,minDur): %.f\\n',sum(fixation_list_t2(:,4)))\n% fprintf('Number of Data used in the analysis(t1,3s,minDur): %.f\\n',sum(fixation_list_t2(:,4)))\n% \n% fprintf('\\nFixations: \\n')\n% fprintf(' Total Number of Fixations(t1,t2,minDur): %.f\\n',n_t2)\n% fprintf(' Total Number of Fixations(t1,3s,minDur): %.f\\n',n_3s)\n% \n% fprintf('\\nt1,t2,minDur:\\n')\n% fprintf(' ID-Xcenter-Ycenter-Nt1-Nt2-StartTime-EndTime-Duration\\n')\n% for i=1:n_t2\n% fprintf(' %.f %.4f %.4f %.f %.f %.4f %.4f %.4f\\n',i,fixation_list_t2(i,1),fixation_list_t2(i,2),fixation_list_t2(i,3),fixation_list_t2(i,4),fixation_list_t2(i,5),fixation_list_t2(i,6),fixation_list_t2(i,7))\n% end\n% fprintf('\\n')\n% fprintf('t1,3s,minDur:\\n')\n% fprintf(' ID-Xcenter-Ycenter-Nt1-N3s-StartTime-EndTime-Duration\\n')\n% for i=1:n_3s\n% fprintf(' %.f %.4f %.4f %.f %.f %.4f %.4f %.4f\\n',i,fixation_list_3s(i,1),fixation_list_3s(i,2),fixation_list_3s(i,3),fixation_list_3s(i,4),fixation_list_3s(i,5),fixation_list_3s(i,6),fixation_list_3s(i,7))\n% end\n% \n% \n% %plot records and fixations\n% figure\n% plot(x,y,'co')\n% set(gca,'Fontsize',20)\n% hold on\n% plot(fixation_list_t2(:,1),fixation_list_t2(:,2),'r+')\n% text(fixation_list_t2(:,1),fixation_list_t2(:,2),num2str(fixation_list_t2(:,7)),'HorizontalAlignment','Left','VerticalAlignment','Bottom','FontSize',20,'Color','r')\n% hold on\n% plot(fixation_list_3s(:,1),fixation_list_3s(:,2),'bs')\n% text(fixation_list_3s(:,1),fixation_list_3s(:,2),num2str(fixation_list_3s(:,7)),'HorizontalAlignment','Right','VerticalAlignment','Top','FontSize',20,'Color','b')\n% hold on \n% plot(list_of_out_points(:,1),list_of_out_points(:,2),'go')\n% legend('Raw Data','Fixations (t1, t2, minDur)','Fixations (t1, 3s, minDur)','Points out of analysis (t1,t2,minDur)','Location','Best')\n% title(['Raw Data and Fixations (t1=',num2str(t1),', t2=',num2str(t2),', minDur=',num2str(minDur),')'],'FontSize',20)\n% xlabel('Horizontal Coordinate','Color','k','FontSize',20)\n% ylabel('Vertical Coordinate','Color','k','FontSize',20)\n% axis('equal')\n% %plot screen outline\n% screen=[0,0;maxx,0;maxx,maxy;0,maxy;0,0]; %sreen region\n% hold on\n% plot(screen(:,1),screen(:,2),'-r')\n% \n% fprintf('\\n Raw Data and Fixations are visualized successfully\\n')\n% fprintf('________________________________________________________\\n')\n% \n% fprintf('End of Fixation Detection report\\n')\n% \n% \n% fprintf('\\n EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool. \\n')\n% fprintf(' Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens) \\n')\n% fprintf('\\n')\n% fprintf(' This program is free software: you can redistribute it and/or modify\\n')\n% fprintf(' it under the terms of the GNU General Public License as published by\\n')\n% fprintf(' the Free Software Foundation, either version 3 of the License, or\\n')\n% fprintf(' (at your option) any later version.\\n')\n% fprintf('\\n')\n% fprintf(' This program is distributed in the hope that it will be useful,\\n')\n% fprintf(' but WITHOUT ANY WARRANTY; without even the implied warranty of\\n')\n% fprintf(' MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\\n')\n% fprintf(' GNU General Public License for more details.\\n')\n% fprintf('\\n')\n% fprintf(' You should have received a copy of the GNU General Public License\\n')\n% fprintf(' along with this program. If not, see .\\n')\n% fprintf('\\n')\n% fprintf(' For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr\\n')\n\nend\n"} +{"plateform": "github", "repo_name": "krasvas/LandRate-master", "name": "visualizations_stimulus.m", "ext": ".m", "path": "LandRate-master/visualizations_stimulus.m", "size": 4473, "source_encoding": "utf_8", "md5": "f29efbcf0b0c5b77cde3750f23f30b48", "text": "% EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool.\n% Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens)\n% \n% This program is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% This program is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with this program. If not, see .\n% \n% For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr\n\n%function visualizations_stimulus\n%export different visualizations with stimulus image\n%input parameters:\n%-data: raw data corresponded to one stimuli\n%-stimulus: stimulus image\n%-fixation_list: list of fixations(after fixation_detection)\n%-maxr: maximum value of radius in pixels to represent fixations durations\n%call function example\n%visualizations_stimulus('data.txt','stimulus.bmp',fixations_list,100)\nfunction visualizations_stimulus(data,stimulus,fixations_list,maxr);\ndata=load(data);\nstimulus=imread(stimulus);\n%x,y,t\nx=data(:,1);\ny=data(:,2);\nt=data(:,3);\n\n%x_fix,y_fix,duration\nx_fix=fixations_list(:,1);\ny_fix=fixations_list(:,2);\nduration=fixations_list(:,7);\nn_fix=length(x_fix);\n\n%stimulus_size\nstimulus_size=size(stimulus);\nstimulus_vertical_size=stimulus_size(1,1);\nstimulus_horizontal_size=stimulus_size(1,2);\n\n%transform data to pixel coordinate system\nx=stimulus_vertical_size*x;\ny=stimulus_vertical_size*(1-y);\nx_fix=stimulus_vertical_size*x_fix;\ny_fix=stimulus_vertical_size*(1-y_fix);\n\n\n%raw data visualization\nfigure\nimshow(stimulus)\nhold on\nplot(x,y,'r+')\ntitle('Raw Data Distribution','Color','k','FontSize',14)\nxlabel('Horizontal Coordinate','Color','k','FontSize',12)\nylabel('Vertical Coordinate','Color','k','FontSize',12)\nhold on\nplot(x,y,'b--')\naxis('equal')\nlegend('Record Point','Records Trace','Location','SouthEastOutside')\n\n%Scanpath visualization\nfigure\nimshow(stimulus)\nhold on\nplot(x_fix,y_fix,'gs')\nhold on\nplot(x_fix,y_fix,'-b')\ntitle('Scanpath (Fixations Duration & Saccades) ','Color','k','FontSize',14)\naxis('equal')\n%create circle points\nc=linspace(0,2*pi);\n%compute maxr_par\nmaxr_par=maxr/max(sqrt(duration)); %max r corresponds to max duration\nfor i=1:n_fix\n hold on\n %create circle with duration\n x_center=x_fix(i);\n y_center=y_fix(i);\n xc=(maxr_par*sqrt(duration(i)))*cos(c);\n yc=(maxr_par*sqrt(duration(i)))*sin(c);\n fill(x_center+xc,y_center+yc,'r');\n text(x_center,y_center,num2str(i),'HorizontalAlignment','Left','VerticalAlignment','Bottom','Color','m')\nend\nalpha(0.6)\nlegend('Fixation Center','Saccade','Radius represents the duration','Location','SouthEastOutside')\nxlabel('Horizontal Coordinate','Color','k','FontSize',10)\nylabel('Vertical Coordinate','Color','k','FontSize',10)\nfprintf('\\nVisualizations are plotted successfully\\n')\nfprintf('\\nVisualizations with stimulus are plotted successfully\\n')\n\n\nfprintf('\\n EyeMMV toolbox (Eye Movements Metrics & Visualizations): An eye movement post-analysis tool. \\n')\nfprintf(' Copyright (C) 2014 Vassilios Krassanakis (National Technical University of Athens) \\n')\nfprintf('\\n')\nfprintf(' This program is free software: you can redistribute it and/or modify\\n')\nfprintf(' it under the terms of the GNU General Public License as published by\\n')\nfprintf(' the Free Software Foundation, either version 3 of the License, or\\n')\nfprintf(' (at your option) any later version.\\n')\nfprintf('\\n')\nfprintf(' This program is distributed in the hope that it will be useful,\\n')\nfprintf(' but WITHOUT ANY WARRANTY; without even the implied warranty of\\n')\nfprintf(' MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\\n')\nfprintf(' GNU General Public License for more details.\\n')\nfprintf('\\n')\nfprintf(' You should have received a copy of the GNU General Public License\\n')\nfprintf(' along with this program. If not, see .\\n')\nfprintf('\\n')\nfprintf(' For further information, please email me: krasanakisv@gmail.com or krasvas@mail.ntua.gr\\n')\nend\n\n"} +{"plateform": "github", "repo_name": "tomluc/Pinax-camera-model-master", "name": "RayTrace.m", "ext": ".m", "path": "Pinax-camera-model-master/MATLAB/Optimal_d_0/RayTrace.m", "size": 1672, "source_encoding": "utf_8", "md5": "cf6dfb029293b40cc5f65303955b0b07", "text": "%\r\n% Copyright (c) 2017 Jacobs University Robotics Group\r\n% All rights reserved.\r\n%\r\n%\r\n% Unless specified otherwise this code examples are released under \r\n% Creative Commons CC BY-NC-ND 4.0 license (free for non-commercial use). \r\n% Details may be found here: https://creativecommons.org/licenses/by-nc-nd/4.0/\r\n%\r\n%\r\n% If you are interested in using this code commercially, \r\n% please contact us.\r\n%\r\n% THIS SOFTWARE IS PROVIDED BY Jacobs Robotics ``AS IS'' AND ANY\r\n% EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\r\n% WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\r\n% DISCLAIMED. IN NO EVENT SHALL Jacobs Robotics BE LIABLE FOR ANY\r\n% DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\r\n% (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\r\n% LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\r\n% ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r\n% (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\r\n% SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r\n%\r\n% Contact: robotics@jacobs-university.de\r\n%\r\n\r\nfunction [refPts]=RayTrace (ray0, normal, mu, mu2, d0, d1,zero)\r\n\r\n\tv0=ray0/norm(ray0);\r\n\tnormal=normal/norm(normal);\r\n\tpi=zero+ d0*v0/(v0'*normal);\r\n normal=-normal;\r\n c=-normal'*v0; \r\n rglass=1/mu;\r\n rwater=1/mu2;\r\n v1=rglass*v0+(rglass*c -sqrt(1-rglass^2*(1-c^2)))*normal;\r\n\tv2=rwater*v0+(rwater*c -sqrt(1-rwater^2*(1-c^2)))*normal;\r\n\tnormal=-normal;\r\n po=pi+ d1*v1/(v1'*normal);\r\n\tv1=v1/norm(v1);\r\n v2=v2/norm(v2);\r\n\trefPts=[v0;pi;v1;po;v2];\r\n\t\r\nend"} +{"plateform": "github", "repo_name": "tomluc/Pinax-camera-model-master", "name": "optim_d_0.m", "ext": ".m", "path": "Pinax-camera-model-master/MATLAB/Optimal_d_0/optim_d_0.m", "size": 1848, "source_encoding": "utf_8", "md5": "f357020570d91223d794d7378da9c884", "text": "%\r\n% Copyright (c) 2017 Jacobs University Robotics Group\r\n% All rights reserved.\r\n%\r\n%\r\n% Unless specified otherwise this code examples are released under \r\n% Creative Commons CC BY-NC-ND 4.0 license (free for non-commercial use). \r\n% Details may be found here: https://creativecommons.org/licenses/by-nc-nd/4.0/\r\n%\r\n%\r\n% If you are interested in using this code commercially, \r\n% please contact us.\r\n%\r\n% THIS SOFTWARE IS PROVIDED BY Jacobs Robotics ``AS IS'' AND ANY\r\n% EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\r\n% WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\r\n% DISCLAIMED. IN NO EVENT SHALL Jacobs Robotics BE LIABLE FOR ANY\r\n% DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\r\n% (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\r\n% LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\r\n% ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r\n% (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\r\n% SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r\n%\r\n% Contact: robotics@jacobs-university.de\r\n\r\nfunction [diff] = optim_d_0(x)\r\n\r\nglobal dmax dmin K d1 ng nw dif\r\n\r\nd0=x;\r\n \r\nImgPts=zeros(3,300);\r\n\r\nfor i=1:20\r\n for j=1:15\r\n ImgPts(1,(j-1)*20+i)=50*i;\r\n ImgPts(2,(j-1)*20+i)=50*j;\r\n ImgPts(3,(j-1)*20+i)=1;\r\n end\r\nend\r\n\r\nZeroRays=inv(K)*ImgPts;\r\n\r\nnormal=[0;0;1];\r\n\r\nt=[0;0;0];\r\n\r\n\r\ndmin=9999;\r\ndmax=-9999;\r\n\r\nfor i=1:300\r\n xl= RayTrace (ZeroRays(:,i), normal, ng, nw, d0, d1,t);\r\n xl=xl(:);\r\n po=[xl(10);xl(11);xl(12)];\r\n v2=[xl(13);xl(14);xl(15)];\r\n pom=abs(po(3)-v2(3)*(po(1)/(2*v2(1)) + po(2)/(2*v2(2))));\r\n dmin=min(dmin,pom);\r\n dmax=max(dmax,pom);\r\nend\r\ndif=dmax-dmin;\r\ndiff=dif;\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"} +{"plateform": "github", "repo_name": "tomluc/Pinax-camera-model-master", "name": "SolveForwardProjectionCase3.m", "ext": ".m", "path": "Pinax-camera-model-master/MATLAB/Find_correction_map/SolveForwardProjectionCase3.m", "size": 5661, "source_encoding": "utf_8", "md5": "7d7e2e3c69273bfc11e9f03096a468ec", "text": "\r\n\r\n% Copyright 2009 Mitsubishi Electric Research Laboratories All Rights Reserved.\r\n% \r\n% Permission to use, copy and modify this software and its documentation without fee for educational, research and non-profit purposes, is hereby granted, provided that the above copyright notice and the following three paragraphs appear in all copies.\r\n% \r\n% To request permission to incorporate this software into commercial products contact: Vice President of Marketing and Business Development; Mitsubishi Electric Research Laboratories (MERL), 201 Broadway, Cambridge, MA 02139 or .\r\n% \r\n% IN NO EVENT SHALL MERL BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF MERL HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.\r\n% \r\n% MERL SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN \"AS IS\" BASIS, AND MERL HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS OR MODIFICATIONS. \r\n% \r\n\r\n\r\n% Solve Forward projection equation for Case 3 (two layer) (Air-Medium1-Medium2)\r\n% d is the distance of medium1 from camera\r\n% d2 is the total distance of medium2 from camera\r\n\r\n% p is given 3D point\r\n% n is the normal\r\n% mu is refractive index vector\r\n\r\n% M is the 3D point on the layer closest to camera where the first\r\n% refraction happens\r\n\r\n\r\n\r\nfunction [M] = SolveForwardProjectionCase3(d,d2,n,mu,p)\r\n\r\n\r\nmu1 = mu(1,1);\r\nmu2 = mu(2,1);\r\n\r\nM = [0;0;1];\r\n\r\n\r\n%find POR the plane of refraction\r\nPOR = cross(n,p);\r\nPOR = POR/norm(POR);\r\n\r\n% [z1,z2] defines a coordinate system on POR\r\n% axis is away from the camera. z1 is along the axis\r\nz1 = -n;\r\nz1 = z1/norm(z1);\r\n\r\n% find z2\r\nz2 = cross(POR,z1);\r\nz2 = z2/norm(z2);\r\n\r\n% find the projection of given 3D point on POR\r\nv = p'*z1;\r\nu = p'*z2;\r\n\r\n\r\n\r\n%solve 12thth degree equation\r\ns1 = (mu1^2 - 1)^2*(mu2^2 - 1)^2;\r\n\r\ns2 = (-4)*u*(mu1^2 - 1)^2*(mu2^2 - 1)^2;\r\n\r\ns3 = 4*u^2*(mu1^2 - 1)^2*(mu2^2 - 1)^2 + 2*(mu1^2 - 1)*(mu2^2 - 1)*((mu2^2 - 1)*(u^2*(mu1^2 - 1) + d^2*mu1^2) - (mu2^2 - 1)*(d - d2)^2 - (mu1^2 - 1)*(d2 - v)^2 + d^2*mu2^2*(mu1^2 - 1));\r\n\r\ns4 = - 2*(mu1^2 - 1)*(mu2^2 - 1)*(2*d^2*mu1^2*u*(mu2^2 - 1) + 2*d^2*mu2^2*u*(mu1^2 - 1)) - 4*u*(mu1^2 - 1)*(mu2^2 - 1)*((mu2^2 - 1)*(u^2*(mu1^2 - 1) + d^2*mu1^2) - (mu2^2 - 1)*(d - d2)^2 - (mu1^2 - 1)*(d2 - v)^2 + d^2*mu2^2*(mu1^2 - 1));\r\n\r\ns5 = ((mu2^2 - 1)*(u^2*(mu1^2 - 1) + d^2*mu1^2) - (mu2^2 - 1)*(d - d2)^2 - (mu1^2 - 1)*(d2 - v)^2 + d^2*mu2^2*(mu1^2 - 1))^2 + 2*(mu1^2 - 1)*(mu2^2 - 1)*(d^2*mu2^2*(u^2*(mu1^2 - 1) + d^2*mu1^2) - d^2*mu2^2*(d - d2)^2 - d^2*mu1^2*(d2 - v)^2 + d^2*mu1^2*u^2*(mu2^2 - 1)) - 4*(mu1^2 - 1)*(mu2^2 - 1)*(d - d2)^2*(d2 - v)^2 + 4*u*(mu1^2 - 1)*(mu2^2 - 1)*(2*d^2*mu1^2*u*(mu2^2 - 1) + 2*d^2*mu2^2*u*(mu1^2 - 1));\r\n\r\ns6 = -2*(2*d^2*mu1^2*u*(mu2^2 - 1) + 2*d^2*mu2^2*u*(mu1^2 - 1))*((mu2^2 - 1)*(u^2*(mu1^2 - 1) + d^2*mu1^2) - (mu2^2 - 1)*(d - d2)^2 - (mu1^2 - 1)*(d2 - v)^2 + d^2*mu2^2*(mu1^2 - 1)) - 4*u*(mu1^2 - 1)*(mu2^2 - 1)*(d^2*mu2^2*(u^2*(mu1^2 - 1) + d^2*mu1^2) - d^2*mu2^2*(d - d2)^2 - d^2*mu1^2*(d2 - v)^2 + d^2*mu1^2*u^2*(mu2^2 - 1)) - 4*d^4*mu1^2*mu2^2*u*(mu1^2 - 1)*(mu2^2 - 1);\r\n\r\ns7 = 2*((mu2^2 - 1)*(u^2*(mu1^2 - 1) + d^2*mu1^2) - (mu2^2 - 1)*(d - d2)^2 - (mu1^2 - 1)*(d2 - v)^2 + d^2*mu2^2*(mu1^2 - 1))*(d^2*mu2^2*(u^2*(mu1^2 - 1) + d^2*mu1^2) - d^2*mu2^2*(d - d2)^2 - d^2*mu1^2*(d2 - v)^2 + d^2*mu1^2*u^2*(mu2^2 - 1)) + (2*d^2*mu1^2*u*(mu2^2 - 1) + 2*d^2*mu2^2*u*(mu1^2 - 1))^2 - 4*(d - d2)^2*(d^2*mu1^2*(mu2^2 - 1) + d^2*mu2^2*(mu1^2 - 1))*(d2 - v)^2 + 10*d^4*mu1^2*mu2^2*u^2*(mu1^2 - 1)*(mu2^2 - 1);\r\n\r\ns8 = -2*(2*d^2*mu1^2*u*(mu2^2 - 1) + 2*d^2*mu2^2*u*(mu1^2 - 1))*(d^2*mu2^2*(u^2*(mu1^2 - 1) + d^2*mu1^2) - d^2*mu2^2*(d - d2)^2 - d^2*mu1^2*(d2 - v)^2 + d^2*mu1^2*u^2*(mu2^2 - 1)) - 4*d^4*mu1^2*mu2^2*u*((mu2^2 - 1)*(u^2*(mu1^2 - 1) + d^2*mu1^2) - (mu2^2 - 1)*(d - d2)^2 - (mu1^2 - 1)*(d2 - v)^2 + d^2*mu2^2*(mu1^2 - 1)) - 4*d^4*mu1^2*mu2^2*u^3*(mu1^2 - 1)*(mu2^2 - 1);\r\n\r\ns9 = (d^2*mu2^2*(u^2*(mu1^2 - 1) + d^2*mu1^2) - d^2*mu2^2*(d - d2)^2 - d^2*mu1^2*(d2 - v)^2 + d^2*mu1^2*u^2*(mu2^2 - 1))^2 + 2*d^4*mu1^2*mu2^2*u^2*((mu2^2 - 1)*(u^2*(mu1^2 - 1) + d^2*mu1^2) - (mu2^2 - 1)*(d - d2)^2 - (mu1^2 - 1)*(d2 - v)^2 + d^2*mu2^2*(mu1^2 - 1)) - 4*d^4*mu1^2*mu2^2*(d - d2)^2*(d2 - v)^2 + 4*d^4*mu1^2*mu2^2*u*(2*d^2*mu1^2*u*(mu2^2 - 1) + 2*d^2*mu2^2*u*(mu1^2 - 1));\r\n\r\ns10 = - 2*d^4*mu1^2*mu2^2*u^2*(2*d^2*mu1^2*u*(mu2^2 - 1) + 2*d^2*mu2^2*u*(mu1^2 - 1)) - 4*d^4*mu1^2*mu2^2*u*(d^2*mu2^2*(u^2*(mu1^2 - 1) + d^2*mu1^2) - d^2*mu2^2*(d - d2)^2 - d^2*mu1^2*(d2 - v)^2 + d^2*mu1^2*u^2*(mu2^2 - 1));\r\n\r\ns11 = 4*d^8*mu1^4*mu2^4*u^2 + 2*d^4*mu1^2*mu2^2*u^2*(d^2*mu2^2*(u^2*(mu1^2 - 1) + d^2*mu1^2) - d^2*mu2^2*(d - d2)^2 - d^2*mu1^2*(d2 - v)^2 + d^2*mu1^2*u^2*(mu2^2 - 1));\r\n\r\ns12 = (-4)*d^8*mu1^4*mu2^4*u^3;\r\n\r\ns13 = d^8*mu1^4*mu2^4*u^4;\r\n\r\n\r\n\r\n%[s1;s2;s3;s4;s5;s6;s7;s8;s9;s10;s11;s12;s13]\r\n\r\n\r\nsol = roots([s1;s2;s3;s4;s5;s6;s7;s8;s9;s10;s11;s12;s13]);\r\n\r\nidx = find(abs(imag(sol)) < 1e-6);\r\nif(isempty(idx))\r\n disp('no solution');\r\n return\r\nend\r\n\r\nsol1 = sol(idx);\r\nnn = size(sol1,1);\r\n\r\n\r\nNormal = [0;-1];\r\n\r\nfor ii = 1:nn\r\n \r\n x = sol1(ii,1);\r\n vi = [x;d];\r\n \r\n v2 = RefractedRay(vi,Normal,1,mu1);\r\n q2 = vi + (d-d2)*v2/(v2'*Normal);\r\n \r\n \r\n v3 = RefractedRay(v2,Normal,mu1,mu2);\r\n \r\n vrd = [u;v] - q2;\r\n \r\n e = abs(vrd(1)*v3(2) - vrd(2)*v3(1));\r\n \r\n if(e < 1e-4)\r\n M = x*z2 + d*z1;\r\n return\r\n end\r\nend\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"} +{"plateform": "github", "repo_name": "tomluc/Pinax-camera-model-master", "name": "RayTrace.m", "ext": ".m", "path": "Pinax-camera-model-master/MATLAB/Find_correction_map/RayTrace.m", "size": 1681, "source_encoding": "utf_8", "md5": "9a53429738aea5dd3b07e1a43324ac47", "text": "%\r\n% Copyright (c) 2017 Jacobs University Robotics Group\r\n% All rights reserved.\r\n%\r\n%\r\n% Unless specified otherwise this code examples are released under \r\n% Creative Commons CC BY-NC-ND 4.0 license (free for non-commercial use). \r\n% Details may be found here: https://creativecommons.org/licenses/by-nc-nd/4.0/\r\n%\r\n%\r\n% If you are interested in using this code commercially, \r\n% please contact us.\r\n%\r\n% THIS SOFTWARE IS PROVIDED BY Jacobs Robotics ``AS IS'' AND ANY\r\n% EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\r\n% WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\r\n% DISCLAIMED. IN NO EVENT SHALL Jacobs Robotics BE LIABLE FOR ANY\r\n% DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\r\n% (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\r\n% LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\r\n% ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r\n% (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\r\n% SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r\n%\r\n% Contact: robotics@jacobs-university.de\r\n%\r\n\r\nfunction [refPts]=RayTrace (ray0, normal, mu, mu2, d0, d1,zero)\r\n\r\n\tv0=ray0/norm(ray0);\r\n\tnormal=normal/norm(normal);\r\n\tpi=zero+ d0*v0/(v0'*normal);\r\n \r\n normal=-normal;\r\n c=-normal'*v0; \r\n rglass=1/mu;\r\n rwater=1/mu2;\r\n v1=rglass*v0+(rglass*c -sqrt(1-rglass^2*(1-c^2)))*normal;\r\n\tv2=rwater*v0+(rwater*c -sqrt(1-rwater^2*(1-c^2)))*normal;\r\n\tnormal=-normal;\r\n po=pi+ d1*v1/(v1'*normal);\r\n\tv1=v1/norm(v1);\r\n v2=v2/norm(v2);\r\n\trefPts=[v0;pi;v1;po;v2];\r\n\t\r\n\t\r\nend"} +{"plateform": "github", "repo_name": "tomluc/Pinax-camera-model-master", "name": "RefractedRay.m", "ext": ".m", "path": "Pinax-camera-model-master/MATLAB/Find_correction_map/RefractedRay.m", "size": 614, "source_encoding": "utf_8", "md5": "603b41ff3d8a155e43e94de324e462b9", "text": "\r\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n% Copyright (c) MERL 2012\r\n% CVPR 2012 Paper Title: A Theory of Multi-Layer Flat Refractive Geometry\r\n% Author: Amit Agrawal\r\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n\r\n\r\n% Compute refracted ray direction at a refraction boundary\r\n\r\n\r\nfunction [vr,a,b,tir] = RefractedRay(vi,n,mu1,mu2)\r\n\r\ntir = 0;\r\n\r\n\r\nn = n/norm(n);\r\n\r\nkk = mu1^2*(vi'*n)^2 - (mu1^2-mu2^2)*(vi'*vi);\r\n\r\nif(kk < 0)\r\n % total internal reflection\r\n tir = 1;\r\n a = 0;\r\n b = 0;\r\n vr = zeros(3,1);\r\n return\r\nend\r\n\r\na = mu1/mu2;\r\nb = -mu1*(vi'*n) - sqrt(kk);\r\nb = b/mu2;\r\n\r\nvr = a*vi + b*n;\r\n\r\n\r\n\r\n\r\n\r\n"} +{"plateform": "github", "repo_name": "V170SC/ESN-homeokinesis-master", "name": "mackeyglass_rk4.m", "ext": ".m", "path": "ESN-homeokinesis-master/ESNConceptorsTest/MackeyGlassGenerator/mackeyglass_rk4.m", "size": 1157, "source_encoding": "utf_8", "md5": "aab166beab25f407a8396d0149f4f480", "text": "%% mackeyglass_rk4\n% This function computes the numerical solution of the Mackey-Glass\n% delayed differential equation using the 4-th order Runge-Kutta method\n\n\n%%\n% $$k_1=\\Delta t \\cdot mackeyglass\\_eq(x(t), x(t-\\tau), a, b)$$\n%%\n% $$k_2=\\Delta t \\cdot mackeyglass\\_eq(x(t+\\frac{1}{2}k_1), x(t-\\tau), a, b)$$\n%%\n% $$k_3=\\Delta t \\cdot mackeyglass\\_eq(x(t+\\frac{1}{2}k_2), x(t-\\tau), a, b)$$\n%%\n% $$k_4=\\Delta t \\cdot mackeyglass\\_eq(x(t+k_3), x(t-\\tau), a, b)$$\n%%\n% $$x(t+\\Delta t) = x(t) + \\frac{k_1}{6}+ \\frac{k_2}{3} + \\frac{k_3}{6} + \\frac{k_4}{6}$$\n\n%%\n% Here is the code for , \n% the Mackey-Glass delayed differential equation\n\n\n%%\n% *Matlab code:*\nfunction x_t_plus_deltat = mackeyglass_rk4(x_t, x_t_minus_tau, deltat, a, b, n)\n k1 = deltat*mackeyglass_eq(x_t, x_t_minus_tau, a, b, n);\n k2 = deltat*mackeyglass_eq(x_t+0.5*k1, x_t_minus_tau, a, b, n);\n k3 = deltat*mackeyglass_eq(x_t+0.5*k2, x_t_minus_tau, a, b, n);\n k4 = deltat*mackeyglass_eq(x_t+k3, x_t_minus_tau, a, b, n);\n x_t_plus_deltat = (x_t + k1/6 + k2/3 + k3/3 + k4/6);\nend\n\n\n%%\n%\n% \n"} +{"plateform": "github", "repo_name": "V170SC/ESN-homeokinesis-master", "name": "mackeyglass_eq.m", "ext": ".m", "path": "ESN-homeokinesis-master/ESNConceptorsTest/MackeyGlassGenerator/mackeyglass_eq.m", "size": 357, "source_encoding": "utf_8", "md5": "7ee7c2d23205ea0883eaa806816697e7", "text": "%% makeyglass_eq\n% This function returns dx/dt of Mackey-Glass delayed differential equation\n\n%%\n% \n% $$\\frac{dx(t)}{dt}=\\frac{ax(t-\\tau)}{1+x(t-\\tau)^{10}}-bx(t)$$\n% \n\n%%\n% *Matlab code:*\nfunction x_dot = mackeyglass_eq(x_t, x_t_minus_tau, a, b, n)\n x_dot = -b*x_t + a*x_t_minus_tau/(1 + x_t_minus_tau^n);\nend\n\n\n%%\n%\n% \n"} +{"plateform": "github", "repo_name": "Regon94/Smooth-Particle-Hydrodynamics-master", "name": "Post_multifluid_energy.m", "ext": ".m", "path": "Smooth-Particle-Hydrodynamics-master/Post_multifluid_energy.m", "size": 4422, "source_encoding": "utf_8", "md5": "02c1a46f09d25f338dddf38fd7a2d0c0", "text": "%% Function to calculate the energy of each fluid in a multi-fluid system\r\n\r\nfunction [KE_alpha, KE_beta, PE_alpha, PE_beta] = Post_multifluid_energy()\r\n\r\n\r\n%% initialisation\r\nclear particles\r\n%close all\r\nclc\r\n\r\ntime = save_pos_t;\r\nn = length(time);\r\n\r\n\r\nE_kin_alpha = zeros(n,1);\r\nE_kin_beta = zeros(n,1);\r\nE_pot_alpha = zeros(n,1);\r\nE_pot_beta = zeros(n,1);\r\nE_pV_alpha = zeros(n,1);\r\nE_pV_beta = zeros(n,1);\r\nE_tot1_alpha = zeros(n,1);\r\nE_tot2_alpha = zeros(n,1);\r\nE_tot1_beta = zeros(n,1);\r\nE_tot2_beta = zeros(n,1);\r\n\r\n% particles = []\r\n%% loop over timesteps\r\nfor t = 2:n\r\n \r\n % particle data of current timestep\r\n particles(:,:) = save_pos(int_fluid,:,t);\r\n \r\n % kinetic Energy\r\n mass = particles(:,4);\r\n vel_sq = (particles(:,6).^2 + particles(:,7).^2); % v^2\r\n kin = mass .* vel_sq / 2;\r\n kin(isnan(kin)) = 0;\r\n \r\n % summing the KE of all the particles of each fluid\r\n % E_kin_alpha is going to rewrite the value at t for every iteration in\r\n % this loop\r\n i=1;\r\n for i = 1:int_fluid\r\n if particles(i,3) == 2\r\n E_kin_alpha(t) = E_kin_alpha(t) + kin(i);\r\n elseif particles(i,3) == 3\r\n E_kin_beta(t) = E_kin_beta(t) + kin(i);\r\n end\r\n end\r\n \r\n if (time(t) - t_damp) <= 1e-3\r\n % transient gravity force\r\n g = 0.5 * (sin((-0.5 + time(t)/t_damp) * pi) + 1 ) * abs(G(2));\r\n \r\n % potential Energy\r\n pot = mass .* g .* particles(:,2);\r\n pot(isnan(pot)) = 0;\r\n % Summing PE of all particles of each fluid at this timestep\r\n i=1;\r\n for i = 1:int_fluid\r\n if particles(i,3) == 2\r\n E_pot_alpha(t) = E_pot_alpha(t) + pot(i);\r\n elseif particles(i,3) == 3\r\n E_pot_beta(t) = E_pot_beta(t) + pot(i);\r\n end\r\n end\r\n% E_pot(t) = sum(pot);\r\n% E_pot_ref = E_pot(t);\r\n\r\n % pressure volume work\r\n volume = particles(:,4)./ particles(:,5);\r\n rho = particles(:,5);\r\n volume_0 = dx * dx;\r\n pV = mass .* c_0(2).^2./(7.*(7-1)).*((rho./rho_0(2)).^(7-1) + (7-1).*rho_0(2)./rho - 7);\r\n %pV = volume .* particles(:,8);\r\n %pV = p_0(2) ./ (1-gamma(2)) .* volume_0.^(gamma(2)) .* ( volume.^(1-gamma(2)) - volume_0.^(1-gamma(2))) + (Xi(2) - p_0(2)) .* (volume - volume_0); \r\n pV(isnan(pV)) = 0;\r\n i=1;\r\n for i = 1:int_fluid\r\n if particles(i,3) == 2\r\n E_pV_alpha(t) = E_pV_alpha(t) + pV(i);\r\n elseif particles(i,3) == 3\r\n E_pV_beta(t) = E_pV_beta(t) + pV(i);\r\n end\r\n end\r\n% E_pV(t) = sum(pV);\r\n% E_pV_ref = E_pV(t); \r\n \r\n elseif time(t) >= t_damp\r\n g = abs(G(2)); \r\n \r\n % potential Energy\r\n pot = mass .* g .* particles(:,2);\r\n pot(isnan(pot)) = 0;\r\n % Summing PE of all particles of each fluid at this timestep\r\n j=0;\r\n for j = 1:int_fluid\r\n if particles(j,3) == 2\r\n E_pot_alpha(t) = E_pot_alpha(t) + pot(j);\r\n elseif particles(j,3) == 3\r\n E_pot_beta(t) = E_pot_beta(t) + pot(j);\r\n end\r\n end\r\n% E_pot(t) = sum(pot) - E_pot_ref;\r\n \r\n % pressure volume work\r\n volume = particles(:,4)./ particles(:,5);\r\n rho = particles(:,5);\r\n volume_0 = dx * dx;\r\n pV = mass .* c_0(2).^2./(7.*(7-1)).*((rho./rho_0(2)).^(7-1) + (7-1).*rho_0(2)./rho - 7);\r\n %pV = volume .* particles(:,8);\r\n %pV = p_0(2) ./ (1-gamma(2)) .* volume_0.^(gamma(2)) .* ( volume.^(1-gamma(2)) - volume_0.^(1-gamma(2))) + (Xi(2) - p_0(2)) .* (volume - volume_0); \r\n pV(isnan(pV)) = 0;\r\n % Summing PE of all particles of each fluid at this timestep\r\n i=1;\r\n for i = 1:int_fluid\r\n if particles(i,3) == 2\r\n E_pV_alpha(t) = E_pV_alpha(t) + pV(i);\r\n elseif particles(i,3) == 3\r\n E_pV_beta(t) = E_pV_beta(t) + pV(i);\r\n end\r\n end\r\n% E_pV(t) = sum(pV) - E_pV_ref;\r\n end\r\n \r\n % total Energy\r\n E_tot1_alpha(t) = E_kin_alpha(t) + E_pot_alpha(t);\r\n E_tot2_alpha(t) = E_kin_alpha(t) + E_pot_alpha(t) + E_pV_alpha(t);\r\n E_tot1_beta(t) = E_kin_beta(t) + E_pot_beta(t);\r\n E_tot2_beta(t) = E_kin_beta(t) + E_pot_beta(t) + E_pV_beta(t); \r\n \r\nend\r\n\r\nend\r\n\r\n"} +{"plateform": "github", "repo_name": "Regon94/Smooth-Particle-Hydrodynamics-master", "name": "Free_Bubble.m", "ext": ".m", "path": "Smooth-Particle-Hydrodynamics-master/Free_Bubble.m", "size": 3971, "source_encoding": "utf_8", "md5": "2017ecdee885e0b5d75c900aa7fa6c45", "text": "%% Bubble Formation Dynamic Test Case\r\n% Roger Gonzalez\r\n% 12/06/17\r\n\r\nfunction [particles, rho_0,gamma,c_0,p_0,Xi,my,alpha, a_wall, int_fluid, int_boundary] = Free_Bubble(kernel, dx, d, v_max, alpha)\r\n\r\n % Square domain\r\n origin = [0 0];\r\n % first fluid phase\r\n % specify coordinates of edges for fluid\r\n f_lowleft = [-0.1 0.0]+origin;\r\n f_lowright = [ 0.1 0.0]+origin;\r\n f_upleft = [-0.1 0.2]+origin;\r\n f_upright = [ 0.1 0.2]+origin;\r\n % properties of fluid (will be stored)\r\n flag = 2;\r\n rho_0(flag) = 1; % density \r\n gamma(flag) = 7; % pressure exponent\r\n % artificial speed of sound, c_0 = 10 * v_max = 10 * sqrt(g*H)\r\n c_0(flag) = 10*v_max;\r\n p_0(flag) = rho_0(flag) * c_0(flag)^2 / gamma(flag);% reference pressure \r\n Xi(flag) = 0.0 * p_0(flag); % background pressure\r\n my(flag) = 0.01; % viscosity\r\n alpha(flag) = 0.02; % artificial visc factor\r\n % initial velocities\r\n vel = [0,0];\r\n % create particle matrix\r\n fluid = create_fluid(dx, f_lowleft, f_lowright, f_upleft, f_upright); \r\n particles = [];\r\n [particles, int_f1] = initialisation(particles, fluid, flag, rho_0(flag), dx, d, vel);\r\n\r\n %% second fluid phase\r\n multi_ph = 0;\r\n if multi_ph == 1\r\n f_lowleft = [-0.1 0.0]+origin;\r\n f_lowright = [ 0.1 0.0]+origin;\r\n f_upleft = [-0.1 0.2]+origin;\r\n f_upright = [ 0.1 0.2]+origin;\r\n % properties of fluid (will be stored)\r\n flag = 3;\r\n rho_0(flag) = 1; % density\r\n gamma(flag) = 7; % pressure exponent\r\n % artificial speed of sound, c_0 = 10 * v_max = 10 * sqrt(g*H)\r\n c_0(flag) = 10*v_max;\r\n p_0(flag) = rho_0(flag) * c_0(flag)^2 / gamma(flag); % reference pressure \r\n Xi(flag) = 0 * p_0(flag); % background pressure\r\n my(flag) = 0.01; % viscosity\r\n alpha(flag) = 0.02; % artificial visc factor\r\n % initial velocities\r\n vel = [0,0];\r\n % create particle matrix\r\n %fluid = create_fluid(dx, f_lowleft, f_lowright, f_upleft, f_upright);\r\n fluid = create_boundary(3, dx, f_lowleft, f_lowright, f_upleft, f_upright);\r\n [particles, int_f2] = initialisation(particles, fluid, flag, rho_0(flag), dx, d, vel);\r\n \r\n % integer of fluid particles in matrix\r\n int_fluid = 1:max(int_f2);\r\n else\r\n int_fluid = 1:max(int_f1);\r\n end\r\n \r\n %% specify coordinates of edges for boundary\r\n% b_lowleft = [-0.25 0.0]+origin;\r\n% b_lowright = [ 0.25 0.0]+origin;\r\n% b_upleft = [-0.25 0.5]+origin;\r\n% b_upright = [ 0.25 0.5]+origin;\r\n %properties\r\n v_wall = [0,0]; % prescribed wall velocity\r\n a_wall = [0,0]; % wall acceleration\r\n int_boundary = 0;\r\n% flag = 1; % for boundary\r\n% %set artificial viscosity of boundary to 0\r\n% alpha(flag) = 0;\r\n% % create boundary matrix\r\n% boundary = create_boundary(kernel, dx, b_lowleft, b_lowright, b_upleft, b_upright);\r\n% [particles, int_boundary] = initialisation(particles, boundary, flag, 0, dx, d, [0,0]);\r\n% particles(int_boundary,4) = v_wall(1);\r\n% particles(int_boundary,5) = v_wall(2);\r\n% \r\n %% plot initial positions of particles\r\n figure(1)\r\n hold on \r\n plot(particles(int_f1,1), particles(int_f1,2), '.')\r\n axis([-.4 0.4 -0.2 0.6])\r\n if multi_ph == 1\r\n plot(particles(int_f2,1), particles(int_f2,2), 'g.')\r\n end\r\n% plot(particles(int_boundary,1), particles(int_boundary,2), 'r.')\r\n% plot([b_upleft(1,1), b_lowleft(1,1), b_lowright(1,1), b_upright(1,1)],...\r\n% [b_upleft(1,2), b_lowleft(1,2), b_lowright(1,2), b_upright(1,2)], 'r', 'linewidth', 2)\r\n axis('equal')\r\n \r\n \r\nend"} +{"plateform": "github", "repo_name": "Regon94/Smooth-Particle-Hydrodynamics-master", "name": "der_color_field.m", "ext": ".m", "path": "Smooth-Particle-Hydrodynamics-master/der_color_field.m", "size": 1449, "source_encoding": "utf_8", "md5": "5ce0f648cfe22f24743494652a876eb1", "text": "%% Calculating the gradient of the smoothed color field\r\n%part of the Surface area minimization in Akinci Surface tension model\r\n%\r\n% Roger Gonzalez\r\n% 04/07/2017\r\n\r\nfunction [boundary_der_color_field] = der_color_field(particles, a, b, r_c, h, rho_0, p_0, Xi, gamma, eqn_of_state, range)\r\n%\r\n% a - fluid particle wrt which boundary values will be calculated\r\n% b - particle in question\r\n% r_c - kernel sampling range\r\n% h - initial distance between particles = dx\r\n\r\n\r\nboundary_der_color_field = 0;\r\n\r\n for k = range{b}(2:end)\r\n \r\n if particles(k,3) == 1\r\n rho_b = boundary_rho(particles, a, rho_0, p_0, Xi, gamma,eqn_of_state);\r\n m_b = rho_0(particles(a,3)) * h*h;\r\n else\r\n rho_b = particles(k,5);\r\n m_b = particles(k,4);\r\n end\r\n %distance between particles\r\n drx = particles(b,1) - particles(k,1);\r\n dry = particles(b,2) - particles(k,2);\r\n rad = sqrt(drx^2 + dry^2);\r\n W_der = kernel_der(1, 2, h, rad)/h;\r\n\r\n boundary_der_color_field = boundary_der_color_field + (m_b/rho_b) *W_der;\r\n end\r\n\r\n % Serves to make the term scale independent\r\n boundary_der_color_field = r_c * boundary_der_color_field;\r\n \r\n %Warning Signs\r\n if isnan(boundary_der_color_field)\r\n fprintf('Mass \\n',m_b);\r\n fprintf('desity \\n',rho_b);\r\n error('derivative of the color field is NaN')\r\n end\r\nend"} +{"plateform": "github", "repo_name": "Regon94/Smooth-Particle-Hydrodynamics-master", "name": "Meniscus.m", "ext": ".m", "path": "Smooth-Particle-Hydrodynamics-master/Meniscus.m", "size": 3808, "source_encoding": "utf_8", "md5": "78b1db669f26941672c04687dafe35fd", "text": "%% Surface Tension Force model\r\n% Roger Gonzalez\r\n% 21/06/17\r\n\r\nfunction [particles, rho_0,gamma,c_0,p_0,Xi,my,alpha, a_wall, int_fluid, int_boundary] = Meniscus(kernel, dx, d, v_max, alpha)\r\n\r\n % rectangular domain\r\n origin = [0 0];\r\n % first fluid phase\r\n % specify coordinates of edges for fluid\r\n f_lowleft = [-1.5 0.0]+origin;\r\n f_lowright = [ 1.5 0.0]+origin;\r\n f_upleft = [-1.5 0.5]+origin;\r\n f_upright = [ 1.5 0.5]+origin;\r\n % properties of fluid (will be stored)\r\n flag = 2;\r\n rho_0(flag) = 1000; % density \r\n gamma(flag) = 7; % pressure exponent\r\n % artificial speed of sound, c_0 = 10 * v_max = 10 * sqrt(g*H)\r\n c_0(flag) = 10*v_max;\r\n p_0(flag) = rho_0(flag) * c_0(flag)^2 / gamma(flag);% reference pressure \r\n Xi(flag) = 0.0 * p_0(flag); % background pressure\r\n my(flag) = 0.01; % viscosity\r\n alpha(flag) = 0.02; % artificial visc factor\r\n % initial velocities\r\n vel = [0,0];\r\n % create particle matrix\r\n fluid = create_fluid(dx, f_lowleft, f_lowright, f_upleft, f_upright); \r\n particles = [];\r\n [particles, int_f1] = initialisation(particles, fluid, flag, rho_0(flag), dx, d, vel);\r\n\r\n %% second fluid phase\r\n multi_ph = 0;\r\n if multi_ph == 1\r\n f_lowleft = [-0.2 0.0]+origin;\r\n f_lowright = [ 0.0 0.0]+origin;\r\n f_upleft = [-0.2 1.2]+origin;\r\n f_upright = [ 0.0 1.2]+origin;\r\n % properties of fluid (will be stored)\r\n flag = 3;\r\n rho_0(flag) = 1; % density\r\n gamma(flag) = 7; % pressure exponent\r\n % artificial speed of sound, c_0 = 10 * v_max = 10 * sqrt(g*H)\r\n c_0(flag) = 10*v_max;\r\n p_0(flag) = rho_0(flag) * c_0(flag)^2 / gamma(flag); % reference pressure \r\n Xi(flag) = 0 * p_0(flag); % background pressure\r\n my(flag) = 0.01; % viscosity\r\n alpha(flag) = alpha; % artificial visc factor\r\n % initial velocities\r\n vel = [0,0];\r\n % create particle matrix\r\n fluid = create_fluid(dx, f_lowleft, f_lowright, f_upleft, f_upright);\r\n [particles, int_f2] = initialisation(particles, fluid, flag, rho_0(flag), dx, d, vel);\r\n \r\n % integer of fluid particles in matrix\r\n int_fluid = 1:max(int_f2);\r\n else\r\n int_fluid = 1:max(int_f1);\r\n end\r\n \r\n %% specify coordinates of edges for boundary\r\n b_lowleft = [-1.5 0.0]+origin;\r\n b_lowright = [ 1.5 0.0]+origin;\r\n b_upleft = [-1.5 1.0]+origin;\r\n b_upright = [ 1.5 1.0]+origin;\r\n % properties\r\n v_wall = [0,0]; % prescribed wall velocity\r\n a_wall = [0,0]; % wall acceleration\r\n flag = 1; % for boundary\r\n %set artificial viscosity of boundary to 0\r\n alpha(flag) = 0;\r\n % create boundary matrix\r\n boundary = create_boundary(kernel, dx, b_lowleft, b_lowright, b_upleft, b_upright);\r\n [particles, int_boundary] = initialisation(particles, boundary, flag, 0, dx, d, [0,0]);\r\n particles(int_boundary,4) = v_wall(1);\r\n particles(int_boundary,5) = v_wall(2);\r\n \r\n %% plot initial positions of particles\r\n figure(1)\r\n hold on \r\n plot(particles(int_f1,1), particles(int_f1,2), '.')\r\n if multi_ph == 1\r\n plot(particles(int_f2,1), particles(int_f2,2), 'g.')\r\n end\r\n plot(particles(int_boundary,1), particles(int_boundary,2), 'r.')\r\n plot([b_upleft(1,1), b_lowleft(1,1), b_lowright(1,1), b_upright(1,1)],...\r\n [b_upleft(1,2), b_lowleft(1,2), b_lowright(1,2), b_upright(1,2)], 'r', 'linewidth', 2)\r\n axis('equal')\r\n \r\n \r\nend\r\n\r\n\r\n"} +{"plateform": "github", "repo_name": "Regon94/Smooth-Particle-Hydrodynamics-master", "name": "PairwiseForce.m", "ext": ".m", "path": "Smooth-Particle-Hydrodynamics-master/PairwiseForce.m", "size": 5508, "source_encoding": "utf_8", "md5": "79c32e9db906e42ecb4c0644cbf4e43a", "text": "%% Surface Tension Force model\r\n% Roger Gonzalez\r\n% 30/05/17\r\n\r\nfunction [PF, Virial_Pressure, adhesion_F, der_color_field_i] = PairwiseForce(ST_model, particles, h, dist, a, b, domain, rho_0, p_0, r_c, rho_b, m_b, Xi, gamma, eqn_of_state, int_boundary,idx_all)\r\n% Have to individually import mass and density for the boundary particle\r\n\r\n\r\n%% Cosine Pairwise Force\r\nif ST_model == 1\r\n phase_flag = abs(particles(a,3) - particles(b,3));\r\n %Calibration constants for surface tension\r\n if length(domain{a}) < 27 || phase_flag ~= 0\r\n S_ab = (16^2)*0.00001; %different fluid phases or surface particle\r\n VP_switch = 0;\r\n elseif phase_flag == 0 \r\n S_ab = (16^2)*0.1; %same fluid phase or bulk particle\r\n VP_switch = 1; % To ensure virial pressure only adds when all particles are in same phase\r\n end\r\n\r\n if dist < h\r\n PF = -100*S_ab*cos(1.5*pi*dist / (3*h));\r\n Virial_Pressure = 0;\r\n else\r\n PF = 0;\r\n xi = (h^3) *(-8 +9*pi^2) / (27*pi^2);\r\n Virial_Pressure = - xi*(particles(a,5)^2)*S_ab * VP_switch;\r\n end\r\n %Specific energy due to the pairwise force model\r\n% PF_spec_E = (8/81) * ((h^4)/(pi^4)) * (9/4*pi^3 - 6*pi -4) * particles(a,5) * particles(b,5) * S_ab;\r\n% assignin(ws, 'PF_spec_E', PF_spec_E);\r\nend\r\n\r\n%% PF-3\r\nif ST_model ==3\r\n phase_flag = abs(particles(a,3) - particles(b,3));\r\n if length(domain{a}) < 27 || phase_flag ~= 0 \r\n S_ab = (16^2)*2; %same fluid phase\r\n VP_switch = 1; % To ensure virial pressure only adds when all particles are in same phase\r\n else\r\n S_ab = (16^2)*0.00001; %different fluid phases\r\n VP_switch = 0;\r\n end\r\n\r\n if dist <= r_c\r\n eps=r_c/3.5;\r\n eps_0 = eps/2;\r\n A = (eps/eps_0)^3;\r\n psi_eps = exp(-(dist^2)/(2*(eps^2)));\r\n psi_eps_0 = exp(-(dist^2)/(2*(eps_0^2)));\r\n PF = S_ab*dist*(-A*psi_eps_0 + psi_eps);\r\n Virial_Pressure = 0;\r\n else\r\n PF = 0;\r\n xi = (r_c^3) *(-8 +9*pi^2) / (27*pi^2);\r\n Virial_Pressure = - xi*(particles(a,5)^2)*S_ab * VP_switch;\r\n end\r\n \r\nend\r\n\r\n%% Kernel Weighted Attraction (Becker & Teschner)\r\n% - Upon further consideration this model is pointless\r\n% - It is not useful with the kernel that we are using and less accurate\r\n% than Cohesion Force (Akinci)\r\n\r\nif ST_model == 4\r\n \r\n W = kernel_fct(1, 2, h, dist/h);\r\n PF = -( particles(b,4) / particles(a,4) ) * W;\r\n \r\n Virial_Pressure = 0;\r\n PF_spec_E = 0;\r\n \r\n if dist < 0.5*r_c\r\n PF=0;\r\n end\r\nend\r\n\r\n%% Cohesion Force model\r\n\r\nif ST_model == 5\r\n \r\n% NS = particles(:,1:2); %List of all particles\r\n% idx_all = rangesearch(NS,NS,r_c); %List of all particles within range of each fluid particle\r\n \r\n cohesion_spline = 0;\r\n adhesion_spline = 0;\r\n adhesion_F = 0;\r\n \r\n %Cohesion Force\r\n if dist <= r_c && dist > 0.5*r_c\r\n cohesion_spline = ((r_c-dist)^3)*(dist^3); \r\n elseif dist >= 0 && dist <= 0.5*r_c\r\n cohesion_spline = 2*((r_c-dist)^3)*(dist^3) - (r_c^6)/64;\r\n end \r\n cohesion_spline = (32/(pi*r_c^9))*cohesion_spline;\r\n cohesion_F = -particles(a,4) * particles(b,4) * cohesion_spline;\r\n \r\n %Surface Area Minimization\r\n %derivative of the color field for a fluid particle: particle in\r\n %question\r\n der_color_field_i = der_color_field(particles, a, a, r_c, h, rho_0, p_0, Xi, gamma, eqn_of_state, idx_all);\r\n \r\n % derivative of the color field of second particle\r\n der_color_field_j = der_color_field(particles, a, b, r_c, h, rho_0, p_0, Xi, gamma, eqn_of_state, idx_all);\r\n \r\n surf_area_min_F = -particles(a,4) * (der_color_field_i - der_color_field_j);\r\n \r\n if isnan(surf_area_min_F) || isnan(der_color_field_i) || isnan(der_color_field_i)\r\n fprintf('mass of fluid particle: %d \\n ',particles(a,4));\r\n fprintf('derivative of color field of a: %d \\n', der_color_field_i);\r\n fprintf('derivative of color field of b: %d \\n', der_color_field_j);\r\n error('Surface Area Minimization force is NaN')\r\n end\r\n \r\n %Adhesion Force\r\n if particles(b,3) == 1\r\n sum_boundary_vol = Boundary_Sampling_density(particles, 2, h, r_c, int_boundary, 1);\r\n temp_index = b - int_boundary(1)+1;\r\n BSD = sum_boundary_vol(temp_index);\r\n if dist > 0.5*r_c && dist <= r_c\r\n adhesion_spline = (0.007/(r_c^3.25)) * (-(4*dist^2)/r_c + 6*dist - 2*r_c)^(1/4);\r\n end\r\n adhesion_F = - particles(a,4) * rho_0(particles(a,3)) * BSD * adhesion_spline;\r\n end\r\n \r\n % Correction Factor to amplify forces of the particles with\r\n % neighbourhood deficiency\r\n corr_factor=(2*rho_0(particles(a,3))) / ( particles(a,5) + particles(b,5) );\r\n contact_angle = 120;\r\n gamma = 1-0.75*cosd(contact_angle);\r\n beta = 1+abs(0.5*cosd(contact_angle));\r\n% gamma=0.1;\r\n% beta=1.5;\r\n PF = corr_factor * gamma * (cohesion_F) + beta * adhesion_F;\r\n\r\n% % virial Pressure\r\n% Cosine Force Virial Pressure\r\n% eps = (h^3 *(9*(pi^2)-8))/(27*(pi^2));\r\n% Virial_Pressure = -eps * (particles(a,5)^2) * gamma\r\n\r\n% %Derived Virial Pressure Term\r\n% if dist <= r_c && dist > 0.5*r_c\r\n% eps = 1/1260; \r\n% elseif dist >= 0 && dist <= 0.5*r_c\r\n% eps = 61/645120;\r\n% end \r\n% Virial_Pressure = - pi * particles(a,5)^2 * gamma * particles(a,4) * particles(b,4) * r_c^3 * eps;\r\nend\r\n Virial_Pressure=0;\r\n\r\nend"} +{"plateform": "github", "repo_name": "Regon94/Smooth-Particle-Hydrodynamics-master", "name": "save_vtu.m", "ext": ".m", "path": "Smooth-Particle-Hydrodynamics-master/save_vtu.m", "size": 4555, "source_encoding": "utf_8", "md5": "20ab9ee84fcea2f326bfb92a45538c04", "text": "% Script to plot data from SPH slosh simulation\r\n\r\nfunction [] = save_vtu(particles,n_save, dirname)\r\n\r\n% specify n_save = 0 for boundary particles\r\n\r\n%% saving directory\r\n% check for existence of paraviewfiles/vtu directory. this is the directory where\r\n% the .vtu files will be stored. if it does not exist create it\r\n% dirname='VTU_Results';\r\ndirstatus=exist(dirname,'dir');\r\nif(dirstatus==0)\r\n mkdir(dirname)\r\nend\r\n\r\n\t\r\n%% test vtu output (ascii)\r\ni = n_save;\r\n%for i = 1:size(save_pos,3)\r\n\t% specify file name \r\n if n_save ~= 0\r\n name = strcat('slosh',num2str(i,'%3.3d'),'.dat');\r\n fid=eval(['fopen(''',dirname,'/PART' num2str(i,'%3.3d') '.vtu' ''',''w'' );']);\r\n else % for boundary particles\r\n %name = strcat('slosh',num2str(i,'%3.3d'),'.dat');\r\n fid=eval(['fopen(''',dirname,'/BOUND' num2str(i,'%3.3d') '.vtu' ''',''w'' );']);\r\n i = 1;\r\n end\r\n \r\n % specify data to store/ output\r\n % particles(:,:) = save_pos(:,:,i);\r\n np = size(particles,1);\r\n xp=particles(:,1); % position\r\n zp=particles(:,2);\r\n up=particles(:,6); % velocity\r\n wp=particles(:,7);\r\n rhop=particles(:,5); % density\r\n P=particles(:,8); % pressure\r\n\r\n % numbering\r\n idiv=2;\r\n nbeg1=1;\r\n nb=0;\r\n nend1=nb;\r\n nbeg2=nb+1;\r\n nend2=np;\r\n\r\n % output to file in vtu format\r\n fprintf(fid,'\\r\\n');\r\n fprintf(fid,'\\r\\n');\r\n fprintf(fid,' \\r\\n');\r\n fprintf(fid,' \\r\\n',np,np);\r\n\r\n % write in pressure data\r\n fprintf(fid,' \\r\\n');\r\n fprintf(fid,' \\r\\n');\r\n for ii=1:np\r\n fprintf(fid,'%f\\t',P(ii));\r\n fprintf(fid,'\\n');\r\n end\r\n fprintf(fid,'\\r\\n');\r\n fprintf(fid,' \\r\\n');\r\n\r\n % write density data\r\n fprintf(fid,' \\r\\n');\r\n for ii=1:np\r\n fprintf(fid,'%f\\t',rhop(ii));\r\n fprintf(fid,'\\n');\r\n end\r\n fprintf(fid,'\\r\\n');\r\n fprintf(fid,' \\r\\n');\r\n\r\n % this section is used to color different particles based the input idiv specified above.\r\n fprintf(fid,' \\r\\n');\r\n for ii=1:idiv\r\n eval(['nbeg=nbeg' int2str(ii) ';'])\r\n eval(['nend=nend' int2str(ii) ';'])\r\n for jj=nbeg:nend\r\n fprintf(fid,'%f\\t',ii);\r\n fprintf(fid,'\\n');\r\n end\r\n end\r\n fprintf(fid,'\\r\\n');\r\n fprintf(fid,' \\r\\n');\r\n\r\n % write velocity data\r\n fprintf(fid,' \\r\\n');\r\n for ii=1:np\r\n vel=[up(ii) 0 wp(ii)];\r\n fprintf(fid,'%f\\t %f\\t %f\\t',vel);\r\n fprintf(fid,'\\n');\r\n end\r\n fprintf(fid,'\\r\\n');\r\n fprintf(fid,' \\r\\n');\r\n fprintf(fid,' \\r\\n');\r\n\r\n % write particle position data\r\n fprintf(fid,' \\r\\n');\r\n fprintf(fid,' \\r\\n');\r\n for ii=1:np\r\n pos=[xp(ii) 0 zp(ii)];\r\n fprintf(fid,'%f\\t %f\\t %f\\t',pos);\r\n fprintf(fid,'\\n');\r\n end\r\n fprintf(fid,'\\r\\n');\r\n fprintf(fid,' \\r\\n');\r\n fprintf(fid,' \\r\\n');\r\n\r\n % write cell data. cell is of type vertex.\r\n fprintf(fid,' \\r\\n');\r\n fprintf(fid,' \\r\\n');\r\n for ii=1:np\r\n fprintf(fid,'%d\\t',ii-1);\r\n fprintf(fid,'\\n');\r\n end\r\n fprintf(fid,'\\r\\n');\r\n fprintf(fid,' \\r\\n');\r\n fprintf(fid,'\\r\\n');\r\n fprintf(fid,' \\r\\n');\r\n for ii=1:np\r\n fprintf(fid,'%d\\t',ii);\r\n fprintf(fid,'\\n');\r\n end\r\n fprintf(fid,'\\r\\n');\r\n fprintf(fid,' \\r\\n');\r\n fprintf(fid,'\\r\\n');\r\n fprintf(fid,' \\r\\n');\r\n for ii=1:np\r\n fprintf(fid,'%d\\t',1);\r\n fprintf(fid,'\\n');\r\n end\r\n fprintf(fid,'\\r\\n');\r\n fprintf(fid,' \\r\\n');\r\n fprintf(fid,' \\r\\n');\r\n fprintf(fid,' \\r\\n');\r\n fprintf(fid,' \\r\\n');\r\n fprintf(fid,'');\r\n fclose(fid);\r\n\r\n%end\r\n\r\n\t"} +{"plateform": "github", "repo_name": "Regon94/Smooth-Particle-Hydrodynamics-master", "name": "Drop.m", "ext": ".m", "path": "Smooth-Particle-Hydrodynamics-master/Drop.m", "size": 3806, "source_encoding": "utf_8", "md5": "e800b4320755500092ff36b0628c2c6c", "text": "%% Surface Tension Force model\r\n% Roger Gonzalez\r\n% 03/07/2017\r\n\r\nfunction [particles, rho_0,gamma,c_0,p_0,Xi,my,alpha, a_wall, int_fluid, int_boundary] = Drop(kernel, dx, d, v_max, alpha)\r\n\r\n % rectangular domain\r\n origin = [0 0];\r\n % first fluid phase\r\n % specify coordinates of edges for fluid\r\n f_lowleft = [-0.1 0.0]+origin;\r\n f_lowright = [ 0.1 0.0]+origin;\r\n f_upleft = [-0.1 0.1]+origin;\r\n f_upright = [ 0.1 0.1]+origin;\r\n % properties of fluid (will be stored)\r\n flag = 2;\r\n rho_0(flag) = 1000; % density \r\n gamma(flag) = 7; % pressure exponent\r\n % artificial speed of sound, c_0 = 10 * v_max = 10 * sqrt(g*H)\r\n c_0(flag) = 10*v_max;\r\n p_0(flag) = rho_0(flag) * c_0(flag)^2 / gamma(flag);% reference pressure \r\n Xi(flag) = 0.0 * p_0(flag); % background pressure\r\n my(flag) = 0.01; % viscosity\r\n alpha(flag) = 0.02; % artificial visc factor\r\n % initial velocities\r\n vel = [0,0];\r\n % create particle matrix\r\n fluid = create_fluid(dx, f_lowleft, f_lowright, f_upleft, f_upright); \r\n particles = [];\r\n [particles, int_f1] = initialisation(particles, fluid, flag, rho_0(flag), dx, d, vel);\r\n\r\n %% second fluid phase\r\n multi_ph = 0;\r\n if multi_ph == 1\r\n f_lowleft = [-0.2 0.0]+origin;\r\n f_lowright = [ 0.0 0.0]+origin;\r\n f_upleft = [-0.2 1.2]+origin;\r\n f_upright = [ 0.0 1.2]+origin;\r\n % properties of fluid (will be stored)\r\n flag = 3;\r\n rho_0(flag) = 1; % density\r\n gamma(flag) = 7; % pressure exponent\r\n % artificial speed of sound, c_0 = 10 * v_max = 10 * sqrt(g*H)\r\n c_0(flag) = 10*v_max;\r\n p_0(flag) = rho_0(flag) * c_0(flag)^2 / gamma(flag); % reference pressure \r\n Xi(flag) = 0 * p_0(flag); % background pressure\r\n my(flag) = 0.01; % viscosity\r\n alpha(flag) = alpha; % artificial visc factor\r\n % initial velocities\r\n vel = [0,0];\r\n % create particle matrix\r\n fluid = create_fluid(dx, f_lowleft, f_lowright, f_upleft, f_upright);\r\n [particles, int_f2] = initialisation(particles, fluid, flag, rho_0(flag), dx, d, vel);\r\n \r\n % integer of fluid particles in matrix\r\n int_fluid = 1:max(int_f2);\r\n else\r\n int_fluid = 1:max(int_f1);\r\n end\r\n \r\n %% specify coordinates of edges for boundary\r\n b_lowleft = [-0.2 0.0]+origin;\r\n b_lowright = [ 0.2 0.0]+origin;\r\n b_upleft = [-0.2 0.0]+origin;\r\n b_upright = [ 0.2 0.0]+origin;\r\n % properties\r\n v_wall = [0,0]; % prescribed wall velocity\r\n a_wall = [0,0]; % wall acceleration\r\n flag = 1; % for boundary\r\n %set artificial viscosity of boundary to 0\r\n alpha(flag) = 0;\r\n % create boundary matrix\r\n boundary = create_boundary(kernel, dx, b_lowleft, b_lowright, b_upleft, b_upright);\r\n [particles, int_boundary] = initialisation(particles, boundary, flag, 0, dx, d, [0,0]);\r\n particles(int_boundary,4) = v_wall(1);\r\n particles(int_boundary,5) = v_wall(2);\r\n \r\n %% plot initial positions of particles\r\n figure(1)\r\n hold on \r\n plot(particles(int_f1,1), particles(int_f1,2), '.')\r\n if multi_ph == 1\r\n plot(particles(int_f2,1), particles(int_f2,2), 'g.')\r\n end\r\n plot(particles(int_boundary,1), particles(int_boundary,2), 'r.')\r\n plot([b_upleft(1,1), b_lowleft(1,1), b_lowright(1,1), b_upright(1,1)],...\r\n [b_upleft(1,2), b_lowleft(1,2), b_lowright(1,2), b_upright(1,2)], 'r', 'linewidth', 2)\r\n axis('equal')\r\n \r\n \r\nend\r\n\r\n\r\n"} +{"plateform": "github", "repo_name": "fan9193/exercise2-master", "name": "trandn.m", "ext": ".m", "path": "exercise2-master/trandn.m", "size": 3549, "source_encoding": "utf_8", "md5": "307219e197d890623614a7c90eb2bef8", "text": "function x=trandn(l,u)\r\n%% truncated normal generator\r\n% * efficient generator of a vector of length(l)=length(u)\r\n% from the standard multivariate normal distribution,\r\n% truncated over the region [l,u];\r\n% infinite values for 'u' and 'l' are accepted;\r\n% * Remark:\r\n% If you wish to simulate a random variable\r\n% 'Z' from the non-standard Gaussian N(m,s^2)\r\n% conditional on la;\r\nif any(I)\r\n tl=l(I); tu=u(I); x(I)=ntail(tl,tu);\r\nend\r\n% case 2: l0 and\r\n% l and u are column vectors;\r\n% uses acceptance-rejection from Rayleigh distr. \r\n% similar to Marsaglia (1964);\r\nc=l.^2/2; n=length(l); f=expm1(c-u.^2/2);\r\nx=c-reallog(1+rand(n,1).*f); % sample using Rayleigh\r\n% keep list of rejected\r\nI=find(rand(n,1).^2.*x>c); d=length(I);\r\nwhile d>0 % while there are rejections\r\n cy=c(I); % find the thresholds of rejected\r\n y=cy-reallog(1+rand(d,1).*f(I));\r\n idx=rand(d,1).^2.*ytol, uses accept-reject from randn\r\nI=abs(u-l)>tol; x=l;\r\nif any(I)\r\n tl=l(I); tu=u(I); x(I)=trnd(tl,tu);\r\nend\r\n% case: abs(u-l)u); d=length(I);\r\nwhile d>0 % while there are rejections\r\n ly=l(I); % find the thresholds of rejected\r\n uy=u(I);\r\n y=randn(size(ly));\r\n idx=y>ly&y fwfilt && b > fwfilt && a < sz_sm(1)-100 && b < sz_sm(2)-100; % For FW, use larger window to eliminate erroneous urban misclassifications\r\n\t\tC = file(a-fwfilt:a+fwfilt,b-fwfilt:b+fwfilt);\r\n\t\tidx = find(C == 0); % If any pixels in C are shadows, they are not included in the mode function\r\n\t\tC(idx) = [];\r\n\t\tmod = mode(mode(C)); % Mode of window (by definition, lower value is selected if more than one mode value)\r\n\t\tif isnan(mod) == 1;\r\n\t\t\tdt_filt(a,b) = 0; % If NaN, assign zero because Arc won't load DT tiffs w/ NaNs\r\n\t\telseif mod == FW; % Check if mode FW is actually urban tree shadow\r\n\t\t\tidxfor = find(C == dev | C == FU); % Find upland forest, grass, and developed nearby\r\n\t\t\tif size(idxfor,1)>0.10*size(C,1)*size(C,2); % > 10% is upland, grass, or developed\r\n\t\t\t\tdt_filt(a,b) = FU; % Assumed to be non-wetland forest\r\n\t\t\telse dt_filt(a,b) = FW;\r\n\t\t\tend\r\n\t\telse dt_filt(a,b) = FU;\r\n\t\tend\r\n elseif isnan(file(a,b)) == 0;\r\n\t\tC = file(a-filt:a+filt,b-filt:b+filt);\r\n idx = find(C == 0); % If any pixels in C are shadows, they are not included in the mode function\r\n C(idx) = [];\r\n mod = mode(mode(C)); % Identify most common value (if more than one value, lower value is selected automatically)\r\n if isnan(mod) == 1; % Check if mode of box is NaN (redundancy)\r\n dt_filt(a,b) = 0; % If NaN, assign zero (Arc won't load DT tiffs w/ NaNs)\r\n\t\t elseif mod == FW;\r\n\t\t\tD = dt_filt(a-filt:a,b-filt:b);\r\n\t\t\tmodD = mode(mode(D));\r\n\t\t\tdt_filt(a,b) = modD;\r\n else dt_filt(a,b) = mod; % If mod is upland, marsh, water or bare/developed, assign it as such\r\n end\r\n else\r\n dt_filt(a,b) = 0;\r\n end\r\n end\r\n end\r\nend\r\n"} +{"plateform": "github", "repo_name": "USF-IMARS/wv-land-cover-master", "name": "wv_classify.m", "ext": ".m", "path": "wv-land-cover-master/3d_wetlands/wv_classify.m", "size": 37674, "source_encoding": "utf_8", "md5": "a528dc999ab6d7c2903872339d10beb3", "text": "%% WV2 Processing\r\n% Loads TIFF WorldView-2 image files preprocessed through Polar Geospatial\r\n% Laboratory python code, which orthorectifies and projects .NTF files and outputs as\r\n% TIFF files\r\n% Radiometrically calibrates digital count data\r\n% Atmospherically corrects images by subtracting Rayleigh Path Radiance\r\n% Converts image to surface reflectance by accounting for Earth-Sun\r\n% distance, solar zenith angle, and average spectral irradiance\r\n% Tests and optionally corrects for sunglint\r\n% Corrects for water column attenuation\r\n% Runs Decision Tree classification on each image\r\n% Optionally smooths results through moving-window filter\r\n% Outputs images as GEOTIFF files with geospatial information.\r\n\r\nfunction dt_filt = WV_Processing(images,id,met,crd_sys,dt,filt,loc,idnumber,rrs_out,class_out);\r\n\r\ntic\r\nd_t = str2num(dt);\r\nn = num2str(idnumber);\r\nid\r\nmet\r\ncoor_sys = crd_sys; % Change coordinate system code here\r\nfilter = str2num(filt);\r\nloc_out = rrs_out;\r\n\r\n% Assign constants for all images\r\nebw1 = 0.001*[47.3 54.3 63.0 37.4 57.4 39.3 98.9 99.6]; % Effective Bandwidth per WV2 band (nm converted to um units; from IMD metadata files)\r\nebw2 = 0.001*[40.5 54.0 61.8 38.1 58.5 38.7 100.4 88.9]; % WV3\r\nirr1 = [1758.2229 1974.2416 1856.4104 1738.4791 1559.4555 1342.0695 1069.7302 861.2866]; % Band-averaged Solar Spectral Irradiance (W/m2/um units)\r\nirr2 = [1757.89 2004.61 1830.18 1712.07 1535.33 1348.08 1055.94 858.77]; % WV3 (from Radiometric Use of WorldView-3 Imagery, Thuiller 2003 column Table 3)\r\ncw1 = [.4273 .4779 .5462 .6078 .6588 .7237 .8313 .9080]; % Center wavelength (used for Rayleigh correction; from Radiometric Use of WorldView-2 Imagery)\r\ncw2 = [.4274 .4819 .5471 .6043 .6601 .7227 .8240 .9136]; % WV3\r\ngamma = 0.01*[1.499 1.471 1.442 1.413 1.413 1.413 1.384 1.384]; % Factor used in Rayleigh Phase Function equation (Bucholtz 1995)\r\n\r\n [A, R] = geotiffread(images);\r\n szA = size(A);\r\n s = xml2struct(met);\r\n% save XMLtest.mat s\r\n % Extract calibration factors and acquisition time from metadata for each band\r\n if isfield(s,'IMD') == 1\r\n szB(1) = str2num(s.IMD.SOURCE_IMD.IMD.NUMROWS.Text); %#ok<*ST2NM>\r\n szB(2) = str2num(s.IMD.SOURCE_IMD.IMD.NUMCOLUMNS.Text);\r\n \t kf(1,1) = str2num(s.IMD.SOURCE_IMD.IMD.BAND_C.ABSCALFACTOR.Text);\r\n \t kf(2,1) = str2num(s.IMD.SOURCE_IMD.IMD.BAND_B.ABSCALFACTOR.Text);\r\n\t kf(3,1) = str2num(s.IMD.SOURCE_IMD.IMD.BAND_G.ABSCALFACTOR.Text);\r\n\t kf(4,1) = str2num(s.IMD.SOURCE_IMD.IMD.BAND_Y.ABSCALFACTOR.Text);\r\n\t kf(5,1) = str2num(s.IMD.SOURCE_IMD.IMD.BAND_R.ABSCALFACTOR.Text);\r\n\t kf(6,1) = str2num(s.IMD.SOURCE_IMD.IMD.BAND_RE.ABSCALFACTOR.Text);\r\n\t kf(7,1) = str2num(s.IMD.SOURCE_IMD.IMD.BAND_N.ABSCALFACTOR.Text);\r\n\t kf(8,1) = str2num(s.IMD.SOURCE_IMD.IMD.BAND_N2.ABSCALFACTOR.Text);\r\n\t aqyear = str2num(s.IMD.SOURCE_IMD.IMD.IMAGE.FIRSTLINETIME.Text(12:15)); % Extract Acquisition Time from metadata\r\n\t aqmonth = str2num(s.IMD.SOURCE_IMD.IMD.IMAGE.FIRSTLINETIME.Text(17:18)); % Extract Acquisition Time from metadata\r\n \t \t aqday = str2num(s.IMD.SOURCE_IMD.IMD.IMAGE.FIRSTLINETIME.Text(20:21)); % Extract Acquisition Time from metadata\r\n \t aqhour = str2num(s.IMD.SOURCE_IMD.IMD.IMAGE.FIRSTLINETIME.Text(23:24)); % Extract Acquisition Time from metadata\r\n aqminute = str2num(s.IMD.SOURCE_IMD.IMD.IMAGE.FIRSTLINETIME.Text(26:27)); % Extract Acquisition Time from metadata\r\n\t \t aqsecond = str2num(s.IMD.SOURCE_IMD.IMD.IMAGE.FIRSTLINETIME.Text(29:37)); % Extract Acquisition Time from metadata\r\n\t \t sunel = str2num(s.IMD.SOURCE_IMD.IMD.IMAGE.MEANSUNEL.Text); % Extract Mean Sun Elevation angle from metadata\r\n satview = str2num(s.IMD.SOURCE_IMD.IMD.IMAGE.MEANOFFNADIRVIEWANGLE.Text); % Extract Mean Off Nadir View angle from metadata\r\n\t sunaz = str2num(s.IMD.SOURCE_IMD.IMD.IMAGE.MEANSUNAZ.Text);\r\n sensaz = str2num(s.IMD.SOURCE_IMD.IMD.IMAGE.MEANSATAZ.Text);\r\n satel = str2num(s.IMD.SOURCE_IMD.IMD.IMAGE.MEANSATEL.Text);\r\n cl_cov = str2num(s.IMD.SOURCE_IMD.IMD.IMAGE.CLOUDCOVER.Text);\r\n\r\n\telseif isfield(s,'isd') == 1\r\n\t\t szB(1) = str2num(s.isd.IMD.NUMROWS.Text);\r\n szB(2) = str2num(s.isd.IMD.NUMCOLUMNS.Text);\r\n \t kf(1,1) = str2num(s.isd.IMD.BAND_C.ABSCALFACTOR.Text);\r\n \t kf(2,1) = str2num(s.isd.IMD.BAND_B.ABSCALFACTOR.Text);\r\n\t kf(3,1) = str2num(s.isd.IMD.BAND_G.ABSCALFACTOR.Text);\r\n\t kf(4,1) = str2num(s.isd.IMD.BAND_Y.ABSCALFACTOR.Text);\r\n\t kf(5,1) = str2num(s.isd.IMD.BAND_R.ABSCALFACTOR.Text);\r\n\t kf(6,1) = str2num(s.isd.IMD.BAND_RE.ABSCALFACTOR.Text);\r\n\t kf(7,1) = str2num(s.isd.IMD.BAND_N.ABSCALFACTOR.Text);\r\n\t kf(8,1) = str2num(s.isd.IMD.BAND_N2.ABSCALFACTOR.Text);\r\n\t aqyear = str2num(s.isd.IMD.IMAGE.FIRSTLINETIME.Text(1:4)); % Extract Acquisition Time from metadata\r\n\t aqmonth = str2num(s.isd.IMD.IMAGE.FIRSTLINETIME.Text(6:7)); % Extract Acquisition Time from metadata\r\n \t \t aqday = str2num(s.isd.IMD.IMAGE.FIRSTLINETIME.Text(9:10)); % Extract Acquisition Time from metadata\r\n \t aqhour = str2num(s.isd.IMD.IMAGE.FIRSTLINETIME.Text(12:13)); % Extract Acquisition Time from metadata\r\n aqminute = str2num(s.isd.IMD.IMAGE.FIRSTLINETIME.Text(15:16)); % Extract Acquisition Time from metadata\r\n\t \t aqsecond = str2num(s.isd.IMD.IMAGE.FIRSTLINETIME.Text(18:26)); % Extract Acquisition Time from metadata\r\n\t \t sunel = str2num(s.isd.IMD.IMAGE.MEANSUNEL.Text); % Extract Mean Sun Elevation angle from metadata\r\n satview = str2num(s.isd.IMD.IMAGE.MEANOFFNADIRVIEWANGLE.Text); % Extract Mean Off Nadir View angle from metadata\r\n\t sunaz = str2num(s.isd.IMD.IMAGE.MEANSUNAZ.Text);\r\n sensaz = str2num(s.isd.IMD.IMAGE.MEANSATAZ.Text);\r\n satel = str2num(s.isd.IMD.IMAGE.MEANSATEL.Text);\r\n cl_cov = str2num(s.isd.IMD.IMAGE.CLOUDCOVER.Text);\r\n\telse\r\n c = struct2cell(s.Children(2).Children(:));\r\n\t idx{1} = strfind(c(1,:),'NUMROWS');\r\n idx{2} = strfind(c(1,:),'NUMCOLUMNS');\r\n idx{3} = strfind(c(1,:),'BAND_C');\r\n idx{4} = strfind(c(1,:),'BAND_B');\r\n \t idx{5} = strfind(c(1,:),'BAND_G');\r\n \t idx{6} = strfind(c(1,:),'BAND_Y');\r\n \t idx{7} = strfind(c(1,:),'BAND_R');\r\n \t idx{8} = strfind(c(1,:),'BAND_RE');\r\n \t idx{9} = strfind(c(1,:),'BAND_N');\r\n \t idx{10} = strfind(c(1,:),'BAND_N2');\r\n idx{11} = strfind(c(1,:),'IMAGE');\r\n for i = 1:11;\r\n idxb(i,1:2) = find(not(cellfun('isempty',idx{i})));\r\n end\r\n szB(1) = str2num(s.Children(2).Children(idxb(1)).Children.Data);\r\n \t szB(2) = str2num(s.Children(2).Children(idxb(2)).Children.Data);\r\n\t kf(1,1) = str2num(s.Children(2).Children(idxb(3)).Children(26).Children.Data);\r\n\t kf(2,1) = str2num(s.Children(2).Children(idxb(4)).Children(26).Children.Data);\r\n\t kf(3,1) = str2num(s.Children(2).Children(idxb(5)).Children(26).Children.Data);\r\n\t kf(4,1) = str2num(s.Children(2).Children(idxb(6)).Children(26).Children.Data);\r\n\t kf(5,1) = str2num(s.Children(2).Children(idxb(7,1)).Children(26).Children.Data);\r\n\t kf(6,1) = str2num(s.Children(2).Children(idxb(8)).Children(26).Children.Data);\r\n\t kf(7,1) = str2num(s.Children(2).Children(idxb(9,1)).Children(26).Children.Data);\r\n\t kf(8,1) = str2num(s.Children(2).Children(idxb(10)).Children(26).Children.Data);\r\n\t aqyear = str2num(s.Children(2).Children(idxb(11,2)).Children(16).Children.Data(1:4));\r\n\t aqmonth = str2num(s.Children(2).Children(idxb(11,2)).Children(16).Children.Data(6:7));\r\n\t aqday = str2num(s.Children(2).Children(idxb(11,2)).Children(16).Children.Data(9:10));\r\n\t aqhour = str2num(s.Children(2).Children(idxb(11,2)).Children(16).Children.Data(12:13));\r\n\t aqminute = str2num(s.Children(2).Children(idxb(11,2)).Children(16).Children.Data(15:16));\r\n\t aqsecond = str2num(s.Children(2).Children(idxb(11,2)).Children(16).Children.Data(18:26));\r\n\t sunel = str2num(s.Children(2).Children(idxb(11,2)).Children(56).Children.Data);\r\n\t sunaz = str2num(s.Children(2).Children(idxb(11,2)).Children(50).Children.Data);\r\n\t satview = str2num(s.Children(2).Children(idxb(11,2)).Children(86).Children.Data);\r\n\t sensaz = str2num(s.Children(2).Children(idxb(11,2)).Children(62).Children.Data);\r\n\t satel = str2num(s.Children(2).Children(idxb(11,2)).Children(68).Children.Data);\r\n cl_cov = str2num(s.Children(2).Children(idxb(11,2)).Children(90).Children.Data);\r\n end\r\n \r\n szB(3) = 8;\r\n\r\n % Assign WV2 vs WV3 constant calibration factors\r\n\tif id(4) == '3'\r\n \t\t ebw = ebw2;\r\n \t\t irr = irr2;\r\n\t cw = cw2;\r\n\telse ebw = ebw1;\r\n\t irr = irr1;\r\n \t cw = cw1;\r\n\tend\r\n\r\n\t% Identify growing season vs senesced\r\n\tif aqmonth == 11 || aqmonth == 12 || aqmonth == 1 || aqmonth == 2\r\n\t\tseason = 0;\r\n\telse season = 1;\r\n\tend\r\n\t %% Calculate Earth-Sun distance and relevant geometry\r\n\t if aqmonth == 1 || aqmonth == 2;\r\n\t year = aqyear -1;\r\n\t month = aqmonth + 12;\r\n\t else year = aqyear;\r\n\t month = aqmonth;\r\n\t end\r\n\t UT = aqhour + (aqminute/60.0) + (aqsecond/3600.0); % Convert time to UT\r\n\t B1 = int64(year/100);\r\n\t B2 = 2-B1+int64(B1/4);\r\n\t JD = (int64(365.25*(year+4716)) +int64(30.6001*(month+1)) + aqday + UT/24.0 + B2 - 1524.5); % Julian date\r\n\t D = JD - 2451545.0;\r\n\t degs = double(357.529 + 0.98560028*D); % Degrees\r\n\t ESd = 1.00014 - 0.01671*cosd(degs) - 0.00014*cosd(2*degs); % Earth-Sun distance at given date (should be between 0.983 and 1.017)\r\n\t\r\n\t inc_ang = 90.0 - sunel;\r\n\t TZ = cosd(inc_ang); % Atmospheric spectral transmittance in solar path with solar zenith angle\r\n\t TV = cosd(satview); % Atmospheric spectral transmittance in view path with satellite view angle\r\n\r\n\t %% Calculate Rayleigh Path Radiance (Dash et al. 2012 and references therein)\r\n\t if sunaz > 180 % For the following equations, azimuths should be between -180 and +180 degrees\r\n\t sunaz = sunaz - 360;\r\n\t end\r\n\t if sensaz > 180\r\n\t sensaz = sensaz - 360;\r\n\t end\r\n\t \r\n\t az = abs(sensaz - 180 - sunaz); % Relative azimuth angle\r\n\t thetaplus = acosd(cosd(90-sunel)*cosd(90-satel) - sind(90-sunel)*sind(90-satel)*cosd(az)); % Scattering angles\r\n\r\n for d = 1:8;\r\n Pr(d) = (3/(4*(1+2*gamma(d))))*((1+3*gamma(d))+(1-gamma(d))*cosd(thetaplus)^2); % Rayleigh scattering phase function (described in Bucholtz 1995)\r\n end\r\n \r\n\t for d = 1:8;\r\n\t tau(d) =(0.008569*(cw(d)^-4)*(1 + 0.0113*(cw(d)^-2) + 0.00013*cw(d)^-4)); % Rayleigh optical thickness (assume standard pressure of 1013.25 mb)\r\n\t end\r\n\t \r\n\t % Rayleigh calculation (Dash et al., 2012)\r\n\t for d = 1:8;\r\n\t ray_rad{1,1}(d) = ((irr(1,d)/ESd)*1*tau(d)*Pr(d))/(4*pi*cosd(90-satel)); % Assume standard pressure (1013.25 mb)\r\n\t end\r\n\r\n\t % rrs constant calculation (Kerr et al. 2018 and Mobley 1994)\r\n\tG = single(1.7); % constant Li et al. 2019\r\n na = 1.00029; % Refractive index of air\r\n nw = 1.34; % Refractive index seawater\r\n inc_ang2 = real(asind(sind(90-satel)*nw/na)); % Incident angle for water-air from Snell's Law\r\n trans_aw = real(asind(sind(inc_ang)*na/nw)); % Transmission angle for air-water incident light from Snell's Law\r\n\t trans_wa = 90-satel; % Transmission angle for water-air incident light from Snell's Law\r\n\t pf1 = real(0.5*((sind(inc_ang - trans_aw)/(sind(inc_ang + trans_aw)))^2 + (tand(inc_ang - trans_aw)/(tand(inc_ang + trans_aw)))^2)); % Fresnel reflectance for air-water incident light (Mobley 1994)\r\n\t pf2 = real(0.5*((sind(inc_ang2 - trans_wa)/(sind(inc_ang2 + trans_wa)))^2 + (tand(inc_ang2 - trans_wa)/(tand(inc_ang2 + trans_wa)))^2));\r\n\t zeta = real(single((1-pf1)*(1-pf2)/(nw^2))); % rrs constant (~0.52) from Mobley 1994\r\n\r\n\r\n % Adjust file size: Input file (A) warped may contain more or fewer columns/rows than original NITF file, and some may be corrupt.\r\n sz(1) = min(szA(1),szB(1));\r\n sz(2) = min(szA(2),szB(2));\r\n sz(3) = 8;\r\n\r\n %% Assign NaN to no-data pixels and radiometrically calibrate and convert to Rrs\r\n Rrs = single(zeros(szA(1),szA(2),8)); % Create empty matrix for Rrs output\r\n for j = 1:sz(1); % Assign NaN to pixels of no data\r\n for k = 1:sz(2); % If a pixel contains data values other than \"zero\" or \"two thousand and forty seven\" in any band, it is calibrated; otherwise, it is considered \"no-data\" - this avoids a problem created during the orthorectification process wherein reprojecting the image may resample data\r\n if (A(j,k,1)) ~= 0 && (A(j,k,1)) ~= 2047 || (A(j,k,2)) ~= 0 && (A(j,k,2)) ~= 2047 || (A(j,k,3)) ~= 0 && (A(j,k,3)) ~= 2047 || (A(j,k,4)) ~= 0 && (A(j,k,4)) ~= 2047 || (A(j,k,5)) ~= 0 && (A(j,k,5)) ~= 2047 || (A(j,k,6)) ~= 0 && (A(j,k,6)) ~= 2047 || (A(j,k,7)) ~= 0 && (A(j,k,7)) ~= 2047 || (A(j,k,8)) ~= 0 && (A(j,k,8)) ~= 2047;\r\n for d = 1:8;\r\n Rrs(j,k,d) = single((pi*((single(A(j,k,d))*kf(d,1)/ebw(1,d)) - ray_rad{1,1}(1,d))*ESd^2)/(irr(1,d)*TZ*TV)); % Radiometrically calibrate and convert to Rrs (adapted from Radiometric Use of WorldView-2 Imagery(\r\n end\r\n else Rrs(j,k,:) = NaN;\r\n end\r\n end\r\n end\r\n\r\n clear A\r\n\r\n %% Output reflectance image\r\n% if Rrs_write == 1;\r\n%\t\tif id(4) == '3'\r\n%\t\t\tinfo = geotiffinfo(images);\r\n%\t\t\tgeoTags = info.GeoTIFFTags.GeoKeyDirectoryTag;\r\n%\t\t\ttiffTags = struct('TileLength',1024,'TileWidth',1024);\r\n%\t\t\tZ = [loc_out,id,'_',loc,'_RrsBT']\r\n%\t\t\tgeotiffwrite(Z,Rrs,R(1,1),'GeoKeyDirectoryTag',geoTags,'TiffType','bigtiff','TiffTags',tiffTags);\r\n%\t\telse\r\n%\t Z = [loc_out,id,'_',loc,'_Rrs']\r\n%\t geotiffwrite(Z,Rrs,R(1,1),'CoordRefSysCode',coor_sys);\r\n%\t\tend\r\n% end\r\n\r\n\tif d_t > 0; % Run DT and/or rrs conversion; otherwise end\r\n\r\n\t%% Setup for Deglint, Bathymetry, and Decision Tree\r\n\tb = 1;\r\n t = 1;\r\n\t u = 1;\r\n y = 0;\r\n\t v = 0;\r\n\t num_pix = 0;\r\n\t sum_SD(b) = 0;\r\n\t sum_veg(t) = 0;\r\n\t sum_veg2(t) = 0;\r\n\t dead_veg(t) = 0;\r\n\t sum_water_rrs(u) = 0;\r\n\t sz_ar = sz(1)*sz(2);\r\n\t water = zeros(sz_ar,9);\r\n\t for j = 1:sz(1);\r\n\t for k = 1:sz(2);\r\n\t if isnan(Rrs(j,k,1)) == 0\r\n num_pix = num_pix +1; % Count number of non-NaN pixels\r\n c_val(num_pix) = Rrs(j,k,1); % Record coastal band value for use in cloud mask prediction\r\n if (Rrs(j,k,7) - Rrs(j,k,2))/(Rrs(j,k,7) + Rrs(j,k,2)) < 0.65 && Rrs(j,k,5) > Rrs(j,k,4) && Rrs(j,k,4) > Rrs(j,k,3) % Sand & Developed\r\n sum_SD(b) = sum(Rrs(j,k,6:8));\r\n b = b+1;\r\n elseif (Rrs(j,k,8) - Rrs(j,k,5))/(Rrs(j,k,8) + Rrs(j,k,5)) > 0.65 && Rrs(j,k,7) > Rrs(j,k,3); % Identify vegetation (excluding grass)\r\n if ((Rrs(j,k,7) - Rrs(j,k,2))/(Rrs(j,k,7) + Rrs(j,k,2))) > 0.20; % Shadow filter\r\n\t sum_veg(t) = sum(Rrs(j,k,3:5)); % Sum bands 3-5 for selected veg to distinguish wetland from upland\r\n\t\t\t sum_veg2(t) = sum(Rrs(j,k,7:8));\r\n dead_veg(t) = (((Rrs(j,k,7) - Rrs(j,k,4))/3) + Rrs(j,k,4)) - Rrs(j,k,5); % Compute difference of predicted B5 value from actual valute\r\n\t t = t+1;\r\n end\r\n \t\t elseif Rrs(j,k,8) < 0.11 && Rrs(j,k,1) > 0 && Rrs(j,k,2) > 0 && Rrs(j,k,3) > 0 && Rrs(j,k,4) > 0 && Rrs(j,k,5) > 0 && Rrs(j,k,6) > 0 && Rrs(j,k,7) > 0 && Rrs(j,k,8) > 0; % Identify glint-free water\r\n water(u,1:8) = double(Rrs(j,k,:));\r\n\t\t\twater_rrs(1:6) = Rrs(j,k,1:6)./(zeta + G.*Rrs(j,k,1:6));\r\n\t\t\tif water_rrs(4) > water_rrs(2) && water_rrs(4) < 0.12 && water_rrs(5) < water_rrs(3)\r\n\t\t\t\tsum_water_rrs(u) = sum(water_rrs(3:5));\r\n\t\t\tend\r\n u = u+1;\r\n \t\tif Rrs(j,k,8)Rrs(j,k,7) && Rrs(j,k,6)>Rrs(j,k,7) && Rrs(j,k,6)>Rrs(j,k,5) && Rrs(j,k,4)>Rrs(j,k,5) && Rrs(j,k,4)>Rrs(j,k,3)\r\n v = v+1;\r\n water(u,9) = 3; % Mark array2>array1 glinted pixels\r\n else water(u,9) = 1; % Mark records of glint-free water\r\n end\r\n elseif Rrs(j,k,8)Rrs(j,k,7) && Rrs(j,k,6)>Rrs(j,k,7) && Rrs(j,k,6)>Rrs(j,k,5) && Rrs(j,k,4)>Rrs(j,k,5) && Rrs(j,k,4)>Rrs(j,k,3)\r\n water(u,9) = 3; % Mark array2>array1 glinted pixels\r\n water(u,1:8) = double(Rrs(j,k,:));\r\n u = u+1;\r\n \t\t\tv = v+1;\r\n% elseif (Rrs(j,k,4)-Rrs(j,k,8))/(Rrs(j,k,4)+Rrs(j,k,8)) < 0.55 && Rrs(j,k,8) < 0.2 && (Rrs(j,k,7)-Rrs(j,k,2))/(Rrs(j,k,7)+Rrs(j,k,2)) < 0.1 && (Rrs(j,k,8)-Rrs(j,k,5))/(Rrs(j,k,8)+Rrs(j,k,5)) < 0.3 && Rrs(j,k,1) > 0 && Rrs(j,k,2) > 0 && Rrs(j,k,3) > 0 && Rrs(j,k,4) > 0 && Rrs(j,k,5) > 0 && Rrs(j,k,6) > 0 && Rrs(j,k,7) > 0 && Rrs(j,k,8) > 0; % Identify glinted water\r\n% water(u,1:8) = double(Rrs(j,k,:));\r\n% u = u+1;\r\n% \t\t\t v = v+1;\r\n\r\n end\r\n\t end\r\n\t end\r\n\t end\r\n\t\tn_water = u; % Number of water pixels used to derive E_glint relationships\r\n\t\tn_glinted = v; % Number of glinted water pixels\r\n\r\n\t\tidx = find(water(:,1) == 0);\r\n\t\twater(idx,:) = [];\r\n water7 = water(:,7);\r\n water8 = water(:,8);\r\n mnNIR1 = min(water7(water7>0)); % Positive minimum Band 7 value used for deglinting\r\n mnNIR2 = min(water8(water8>0)); % Positive minimum Band 8 value used for deglinting\r\n \r\n idx_gf = find(water(:,9)==1); % Glint-free water\r\n\twater_gf = water(idx_gf,1:8);\r\n\r\n% Identify optically deep water average spectrum\r\n bn = 7; % Band number\r\n pctl_l = 5; % Percentile (5th percentile value of glint-free water n-band values chosen based on visual analysis of density slicing of Rrs image)\r\n pctl_u = 15;\r\n clear water_gfidx water_odw m0 m1\r\n water_gfidx = find(water_gf(:,bn) == prctile(water_gf(:,bn),pctl_l) & water_gf(:,bn) <= prctile(water_gf(:,bn),pctl_u));\r\n water_odw(:,1:8) = (water_gf(water_gfidx(1:end),1:8)); % Li et al. Dove BGR corresponds to WV2 BGY center wavelengths\r\n\r\n% Equations from Li et al. 2019 & Hu et al. 2012\r\n for h = 1:size(water_odw,1)\r\n% w1(h) = water_odw(h,3) - (water_odw(h,1) + (546-427)/(659-427)*(water_odw(h,5) - water_odw(h,1))); % Hu et al. 2012\r\n w2(h) = water_odw(h,3) - 0.46*water_odw(h,4) - 0.54*water_odw(h,1); % Li et al. 2019\r\n end\r\n\r\n\tif exist('w2')==1 \r\n\t w = median(w2(w2<0));\r\n\telse w = 0;\r\n\tend\r\n\r\n if w > -0.0005\r\n m0 = 0;\r\n m1 = 0;\r\n Update = 'Too Turbid for Benthic Mapping'\r\n else\r\n chla = 10^(-0.4909 + 191.659*w) % Hu et al. 2012 (Kerr limited chla to 1.0mg/m3; 0.1 mg/m3 WV Cay Sal most accurate value used)\r\n m0 = 52.083*exp(2.711*chla) % Revised from Li et al. 2019 with exponential scalar derived from Kerr FK WV image field data tuning parameters\r\n m1 = 50.156*exp(2.711*chla) % TARGET: 64.3 +/- 0.5 & 62.6 +/- 0.5, Predicted: 67.2 & 64.7\r\n end\r\n\r\n\tKd = [0.036 0.037 0.075 0.25 0.415]; %1.416]; %(Based on Kerr 2018 Fig 7a chl-conc 0.1 mg/m3 i.e. lowest RMSE water-depth predictor values)\r\n\r\n\t\tif v > 0.25*u\r\n\t\t\tUpdate = 'Deglinting'\r\n \t\tid2 = 'deglinted';\r\n \tfor b = 1:6 %% Calculate linear fitting of all MS bands vs NIR1 & NIR2 for deglinting in DT (Hedley et al. 2005)\r\n \tif b == 1 || b == 4 || b == 6\r\n slope1 = water(:,b)\\water(:,8);\r\n \t\t else slope1 = water(:,b)\\water(:,7);\r\n \tend\r\n \tE_glint(1,b) = single(slope1);\r\n \tend\r\n\t\t\tE_glint % = [0.8075 0.7356 0.8697 0.7236 0.9482 0.7902]\r\n\t\telse Update = 'Glint-free'\r\n id2 = 'glintfree';\r\n end\r\n \r\n %% Edge Detection via Morphological Index (improved over Huang & Zhang 2011, Ma et al. 2019)\r\n waterind = uint16((Rrs(:,:,3)-Rrs(:,:,8))./(Rrs(:,:,3)+Rrs(:,:,8)) > 0.15);\r\n img_sub2 = Rrs(:,:,2);\r\n img_sub5 = Rrs(:,:,5);\r\n img_sub7 = Rrs(:,:,7);\r\n\r\n Rrs_cloud = img_sub2./img_sub7;\r\n Rrs_cl2 = Rrs_cloud;\r\n Rrs_cl3 = Rrs_cloud;\r\n Rrs_cl2(Rrs_cloud >= 0.7) = 1;\r\n Rrs_cl2(Rrs_cloud < 0.7) = 0;\r\n Rrs_cl3(Rrs_cloud <= 0.9) = 1;\r\n Rrs_cl3(Rrs_cloud > 0.9) = 0;\r\n Rrs_clf = Rrs_cl2 + Rrs_cl3;\r\n Rrs_clf(Rrs_clf < 2) = 0;\r\n CLrrs = imbinarize(Rrs_clf);\r\n CL1 = uint16(imtophat(CLrrs,strel('disk',100))) - waterind;\r\n CL1(CL1<0) = 0;\r\n CLe = imerode(CL1,strel('disk',20));\r\n CLed = imdilate(CLe,strel('disk',150));\r\n Cloud = imfill(CLed,'holes');\r\n\tclear Rrs_cl2 Rrs_cl3 Rrs_clf CLrrs CL1 CLe CLed\r\n\r\n\tRrs_sh1 = Rrs_cloud;\r\n\tRrs_sh2 = Rrs_cloud;\r\n\tRrs_sh1(Rrs_cloud >= 1.3) = 1;\r\n\tRrs_sh1(Rrs_cloud < 1.3) = 0;\r\n\tRrs_sh2(Rrs_cloud <= 1.7) = 1;\r\n\tRrs_sh2(Rrs_cloud > 1.7) = 0;\r\n\tRrs_shf = Rrs_sh1 + Rrs_sh2;\r\n\tRrs_shf(Rrs_shf < 2) = 0;\r\n\tSHrrs = imbinarize(Rrs_shf);\r\n\tShadow = uint16(imtophat(SHrrs,strel('square',20)));\r\n\tclear Rrs_sh1 Rrs_sh2 Rrs_shf SHrrs\r\n\r\n Rrs_map = img_sub5./img_sub7;\r\n\tRrs_map2 = Rrs_map;\r\n\tRrs_map3 = Rrs_map;\r\n\tRrs_map2(Rrs_map >= 0.7) = 1;\r\n\tRrs_map2(Rrs_map < 0.7) = 0;\r\n\tRrs_map3(Rrs_map <= 1.1) = 1;\r\n\tRrs_map3(Rrs_map > 1.1) = 0;\r\n\tRrs_mapf = Rrs_map2 + Rrs_map3;\r\n\tRrs_mapf(Rrs_mapf < 2) = 0;\r\n BWrrs = imbinarize(Rrs_mapf);\r\n\r\n BW1 = uint16(imtophat(BWrrs,strel('square',30))) - waterind;\r\n\tBW1 = imdilate(BW1,strel('square',5)); % Expand developed to include shadows\r\n\tBW1(BW1<0) = 0;\r\n\r\n Cloud = Cloud - BW1;\r\n Cloud(Cloud<0) = 0;\r\n cld_idx = 0;\r\n if size(find(Cloud ==1),1) > 0.060*szA(1)*szA(2)\r\n cld_idx = 1;\r\n end\r\n\r\n\r\n%\tns = 2000;\r\n%\tBW = uint16(imtophat(BWrrs,strel('square',ns)));\r\n%\tCC = bwconncomp(BW);\r\n%\tnumPixels = cellfun(@numel,CC.PixelIdxList);\r\n%\tBW1idx = find(numPixels > 1000);\r\n%\tCC.PixelIdxList = CC.PixelIdxList(BW1idx);\r\n%\tCC.NumObjects = size(BW1idx,2);\r\n%\tBW3 = uint16(labelmatrix(CC));\r\n%\tBW3(BW3>0) = 1;\r\n%\tBW3e = uint16(imerode(BW3,strel('disk',100)));\r\n%\tBW3ed = uint16(imdilate(BW3e,strel('square',200)));\r\n%\tBW4 = imfill(BW3ed,'holes');\r\n\r\n\tBAI = (img_sub2 - img_sub7)./(img_sub2 + img_sub7); % Built Area Index\r\n\tBAI = BAI * -1; % Dev & soil negative, soil more negative (water high positive)\r\n\tBAI = imbinarize(BAI);\r\n\tBAI = imerode(BAI,strel('square',5));\r\n\r\n\tclear BW3 BW3e BW3ed BW2 BWrrs BWnew BWnewe BW1idx\r\n\r\n\tZtest = [loc_out,id,'_',loc,'_BW1']\r\n geotiffwrite(Ztest,BW1,R(1,1),'CoordRefSysCode',coor_sys);\r\n Ztest = [loc_out,id,'_',loc,'_BAI']\r\n geotiffwrite(Ztest,BAI,R(1,1),'CoordRefSysCode',coor_sys);\r\n\r\n\r\n\r\n %% Determine Rrs-infinite from glint-free water pixels\r\n rrs_inf = [0.00512 0.00686 0.008898 0.002553 0.001506 0.000403]; % Derived from Rrs_Kd_Model.xlsx for Default values\r\n\r\n %% Calculate target class metrics\r\n avg_SD_sum = mean(sum_SD(:));\r\n stdev_SD_sum = std(sum_SD(:));\r\n\tavg_veg_sum = mean(sum_veg(:))\r\n\tavg_dead_veg = mean(dead_veg(:));\r\n\tavg_mang_sum = mean(sum_veg2(:));\r\n\tidx_water2 = find(sum_water_rrs==0);\r\n\tsum_water_rrs(idx_water2) = [];\r\n\tavg_water_sum = mean(sum_water_rrs(:));\r\n\r\n\t if cl_cov > 0\r\n\t\t num_cld_pix = round(num_pix*cl_cov*0.01); % Number of cloud pixels (rounded down to nearest integer) based on metadata-reported percent cloud cover\r\n\t\t srt_c = sort(c_val,'descend'); % Sort all pixel blue-values in descending order. Cloud mask threshold will be num_cld_pix'th highest value\r\n\t\t cld_mask = srt_c(num_cld_pix); % Set cloud mask threshold\r\n\t else cld_mask = max(c_val)+1;\r\n\t end\r\n\r\n\r\n\t Bathy = single(zeros(szA(1),szA(2))); % Preallocate for Bathymetry\r\n\t Rrs_deglint = single(zeros(5,1)); % Preallocate for deglinted Rrs\r\n\t Rrs_0 = single(zeros(5,1)); %Preallocation for water-column corrected Rrs\r\n \t map = zeros(szA(1),szA(2),'uint8'); % Create empty matrix for classification output\r\n\r\n\tif d_t == 1; % Execute Deglinting, rrs, Bathymetry\r\n if v > u*0.25\r\n for j = 1:szA(1)\r\n for k = 1:szA(2)\r\n if isnan(Rrs(j,k,1)) == 0 && Rrs(j,k,8)<0.2\r\n % Deglint equation\r\n Rrs_deglint(1,1) = (Rrs(j,k,1) - (E_glint(1)*(Rrs(j,k,8) - mnNIR2)));\r\n Rrs_deglint(2,1) = (Rrs(j,k,2) - (E_glint(2)*(Rrs(j,k,7) - mnNIR1)));\r\n Rrs_deglint(3,1) = (Rrs(j,k,3) - (E_glint(3)*(Rrs(j,k,7) - mnNIR1)));\r\n Rrs_deglint(4,1) = (Rrs(j,k,4) - (E_glint(4)*(Rrs(j,k,8) - mnNIR2)));\r\n Rrs_deglint(5,1) = (Rrs(j,k,5) - (E_glint(5)*(Rrs(j,k,7) - mnNIR1)));\r\n Rrs_deglint(6,1) = (Rrs(j,k,6) - (E_glint(6)*(Rrs(j,k,8) - mnNIR2)));\r\n\r\n % Convert above-surface Rrs to below-surface rrs (Kerr et al. 2018)\r\n Rrs_0(1:5) = Rrs_deglint(j,k,1:5)./(zeta + G.*Rrs_deglint(j,k,1:5)); % Convert above-surface Rrs to subsurface rrs (Kerr et al. 2018, Lee et al. 1998)\r\n b1 = 63.6; % Turning parameters (Kerr 2018)\r\n b0 = -60.25;\r\n dp = b1*real(log(1000*Rrs_0(2))/log(1000*Rrs_0(3))) + b0; % Calculate depth (Stumpf 2003 ratio transform with Kerr et al. 2018 coefficients)\r\n if dp < 15 && dp > 0 % Parameters based on Kerr 2018 RMSE-based recommended constraints (depths greater than 15m inaccurate)\r\n Bathy(j,k) = dp;\r\n end\r\n for d = 1:5\r\n Rrs(j,k,d) = real(((Rrs_0(d)-rrs_inf(d))/exp(-2*Kd(1,d)*dp))+rrs_inf(d)); % Calculate water-column corrected benthic reflectance (Traganos 2017 & Maritorena 1994)\r\n end\r\n end\r\n end\r\n end\r\n else % For glint-free/low-glint images\r\n for j = 1:szA(1)\r\n for k = 1:szA(2)\r\n if isnan(Rrs(j,k,1)) == 0 && Rrs(j,k,8)<0.2\r\n Rrs_0(1:5) = Rrs(j,k,1:5)./(zeta + G.*Rrs(j,k,1:5)); % Convert above-surface Rrs to subsurface rrs (Kerr et al. 2018, Lee et al. 1998)\r\n b1 = 63.6; % Turning parameters (Kerr 2018 Table 6 average of 2 forward-modeling WorldView-2 results)\r\n b0 = -60.25;\r\n dp = b1*real(log(1000*Rrs_0(2))/log(1000*Rrs_0(3))) + b0; % Calculate depth (Stumpf 2003 ratio transform with Kerr et al. 2018 coefficients)\r\n if dp < 15 && dp > 0 % Parameters based on Kerr 2018 RMSE-based recommended constraints (depths greater than 15m inaccurate)\r\n Bathy(j,k) = dp;\r\n else dp = 0;\r\n end\r\n for d = 1:5\r\n Rrs(j,k,d) = real(((Rrs_0(d)-rrs_inf(d))/exp(-2*Kd(1,d)*dp))+rrs_inf(d)); % Calculate water-column corrected benthic reflectance (Traganos 2017 & Maritorena 1994)\r\n end\r\n end\r\n end\r\n end\r\n end\r\n\telseif d_t == 2; % Only run for Deglinted Rrs and Bathymetry, not Decision Tree\r\n\t update = 'Running DT'\r\n\tBS = 2;\r\n\tWA = 3;\r\n\tDG = 5;\r\n\tMA = 6;\r\n\tSC = 7;\r\n\tFW = 10;\r\n\tFU = 9;\r\n\tUG = 8;\r\n\tdev = 11;\r\n p = 1;\r\n for j = 1:szA(1)\r\n for k = 1:szA(2)\r\n if isnan(Rrs(j,k,1)) == 0\r\n %% Cloud Cover\r\n\t\t if Cloud(j,k) == 1 && BW1(j,k) ~= 1\r\n map(j,k) = 1; % Cloud\r\n %% Vegetation\r\n elseif (Rrs(j,k,7) - Rrs(j,k,5))/(Rrs(j,k,7) + Rrs(j,k,5)) > 0.20 && Rrs(j,k,7) > Rrs(j,k,3) % Vegetation pixels (NDVI)\r\n\t\t\t if ((Rrs(j,k,7) - Rrs(j,k,2))/(Rrs(j,k,7) + Rrs(j,k,2))) < 0.20 && (Rrs(j,k,7) - Rrs(j,k,8))/(Rrs(j,k,7) + Rrs(j,k,8)) > 0.01; % Shadowed-vegetation filter (B7/B8 ratio excludes marsh, which tends to have very similar values here)\r\n \tmap(j,k) = 0; % Shadow\r\n elseif sum(Rrs(j,k,3:5)) < avg_veg_sum\r\n if (Rrs(j,k,3) - Rrs(j,k,8))/(Rrs(j,k,3) + Rrs(j,k,8)) > -0.75 % ML\r\n if (Rrs(j,k,7) - Rrs(j,k,5))/(Rrs(j,k,7) + Rrs(j,k,5)) > 0.75 % M\r\n\t map(j,k) = FW; % Forested Wetland\r\n elseif sum(Rrs(j,k,3:5)) > 0.12 && sum(Rrs(j,k,7:8)) > 0.45 % ML\r\n map(j,k) = FU; % FORESTED UPLAND\r\n elseif (Rrs(j,k,7) - Rrs(j,k,5))/(Rrs(j,k,7) + Rrs(j,k,5)) > 0.60\r\n map(j,k) = FW; % Forested Wetland\r\n elseif Rrs(j,k,7) < 0.3 && sum(Rrs(j,k,7:8)) > 0.25\r\n\t\t\t\t\tif (Rrs(j,k,5) - Rrs(j,k,3))/(Rrs(j,k,5) + Rrs(j,k,3)) > 0.1\r\n\t\t\t\t\t\tmap(j,k) = DG; % Dead Grass\r\n\t\t\t\t\telseif Rrs(j,k,7) < 0.27 && sum(Rrs(j,k,7:8)) < 0.5\r\n\t\t\t\t\t\tmap(j,k) = MA; % Marsh\r\n\t\t\t\t\telse map(j,k) = FU; % Forested Upland\r\n\t\t\t\t\tend\r\n end\r\n\t\t\t\telseif (Rrs(j,k,4) - Rrs(j,k,5))/(Rrs(j,k,4) + Rrs(j,k,5)) > 0.08\r\n\t\t\t\t\tmap(j,k) = 6; % Marsh (was algal flat)\r\n\t\t\t\telse map(j,k) = FU; % Forested Upland\r\n end\r\n\t\t\t elseif (Rrs(j,k,8) - Rrs(j,k,5))/(Rrs(j,k,8) + Rrs(j,k,5)) > 0.65\r\n\t\t\t\tmap(j,k) = FU; % Forested Upland\r\n elseif Rrs(j,k,7) < 0.4 % Marsh, Scrub, Grass, Dead Veg\r\n\t\t\t\tif (Rrs(j,k,4) - Rrs(j,k,5))/(Rrs(j,k,4) + Rrs(j,k,5)) > 0.08\r\n\t\t\t\t\tmap(j,k) = 6; % Marsh (was algal flat)\r\n\t\t\t\telseif (Rrs(j,k,5) - Rrs(j,k,3))/(Rrs(j,k,5) + Rrs(j,k,3)) > 0.05 %&& Rrs(j,k,7) < 0.27 % Agriculture or senesced veg/grass\r\n\t\t\t\t\tmap(j,k) = DG; % Dead veg\r\n\t\t\t\telse map(j,k) = UG; % Grass\r\n\t\t\t\tend\r\n%\t\t\t elseif sum(Rrs(j,k,7:8)) < 0.8 && sum(Rrs(j,k,7:8)) > 0.65 % Live grass high, dead grass low\r\n%\t\t\t\tmap(j,k) = 10; % Upland Forest\r\n\t\t\t else map(j,k) = SC; % Scrub/shrub\r\n end\r\n %% Developed and Soil\r\n elseif (Rrs(j,k,7) - Rrs(j,k,2))/(Rrs(j,k,7) + Rrs(j,k,2)) < 0.60 && Rrs(j,k,5) > Rrs(j,k,4) && waterind(j,k) == 0 %Rrs(j,k,8) > 0.1 % && Rrs(j,k,4) > Rrs(j,k,3)\r\n if Rrs(j,k,5)/Rrs(j,k,7) > 0.7 && Rrs(j,k,5)/Rrs(j,k,7) < 1.1\r\n\t\t\t if BAI(j,k) == 0 && BW1(j,k) == 1 %BW4(j,k) == 1\r\n\t\t\t\t\tmap(j,k) = dev; %Developed. Was: BS; % Soil (fallow field)\r\n\t\t\t elseif BAI(j,k) == 1 && BW1(j,k) == 0\r\n\t\t\t\t\tmap(j,k) = BS; % Soil\r\n elseif BW1(j,k) == 1\r\n if sum(Rrs(j,k,1:2))<0.35\r\n if sum(Rrs(j,k,6:8)) < 0.85%avg_SD_sum\r\n map(j,k) = dev; % Developed\r\n else map(j,k) = BS; % Soil\r\n end\r\n elseif sum(Rrs(j,k,1:2)) > 0.6\r\n map(j,k) = dev;\r\n\t\t\t\telse map(j,k) = dev;\r\n end\r\n elseif sum(Rrs(j,k,6:8)) < avg_SD_sum\r\n map(j,k) = dev;\r\n else map(j,k) = dev; % Developed\r\n end\r\n else map(j,k) = BS; % Soil\r\n end\r\n %% Water\r\n elseif Rrs(j,k,8)<0.2 && Rrs(j,k,8)>0|| Rrs(j,k,8)0 || Rrs(j,k,8)>Rrs(j,k,7) && Rrs(j,k,6)>Rrs(j,k,7) && Rrs(j,k,6)>Rrs(j,k,5) && Rrs(j,k,4)>Rrs(j,k,5) && Rrs(j,k,4)>Rrs(j,k,3) && Rrs(j,k,8)>0% Identify all water (glinted and glint-free)\r\n if v > u*0.25 && u>0.1*num_pix\r\n % Deglint equation\r\n Rrs_deglint(1,1) = (Rrs(j,k,1) - (E_glint(1)*(Rrs(j,k,8) - mnNIR2)));\r\n Rrs_deglint(2,1) = (Rrs(j,k,2) - (E_glint(2)*(Rrs(j,k,7) - mnNIR1)));\r\n Rrs_deglint(3,1) = (Rrs(j,k,3) - (E_glint(3)*(Rrs(j,k,7) - mnNIR1)));\r\n Rrs_deglint(4,1) = (Rrs(j,k,4) - (E_glint(4)*(Rrs(j,k,8) - mnNIR2)));\r\n Rrs_deglint(5,1) = (Rrs(j,k,5) - (E_glint(5)*(Rrs(j,k,7) - mnNIR1)));\r\n Rrs_deglint(6,1) = (Rrs(j,k,6) - (E_glint(6)*(Rrs(j,k,8) - mnNIR2)));\r\n\r\n % Convert above-surface Rrs to below-surface rrs (Kerr et al. 2018)\r\n Rrs_0(1:5) = Rrs_deglint(1:5)./(zeta + G.*Rrs_deglint(1:5)); % Was Rrs_0=\r\n % Relative depth estimate\r\n dp = m0*real(log(1000*Rrs_0(1))/log(1000*Rrs_0(3))) - m1; % Calculate depth (Stumpf 2003 ratio transform with Kerr et al. 2018 coefficients)\r\n\r\n if dp < 15 && dp > 0 % Parameters based on Kerr 2018 RMSE-based recommended constraints (depths greater than 15m inaccurate)\r\n Bathy(j,k) = dp;\r\n else dp = 0;\r\n end\r\n\r\n% for d = 1:5\r\n% Rrs(j,k,d) = real(((Rrs_0(d)-rrs_inf(d))/exp(-2*Kd(1,d)*dp))+rrs_inf(d)); % Calculate water-column corrected benthic reflectance (Traganos 2017 & Maritorena 1994)\r\n% end\r\n\r\n %% DT\r\n if Shadow(j,k) == 1 && max(Rrs(j,k,:)) == Rrs(j,k,2) % Max band3-6 = turbid/shallow water\r\n map(j,k) = 0; % Shadow\r\n else map(j,k) = WA; % Deep water\r\n end\r\n else % For glint-free/low-glint images\r\n Rrs_0(1:5) = Rrs(j,k,1:5)./(zeta + G.*Rrs(j,k,1:5)); % Convert above-surface Rrs to subsurface rrs (Kerr et al. 2018, Lee et al. 1998)\r\n dp = m0*real(log(1000*Rrs_0(2))/log(1000*Rrs_0(3))) - m1; % Calculate depth (Stumpf 2003 ratio transform with Kerr et al. 2018 coefficients)\r\n if dp < 15 && dp > 0 % Parameters based on Kerr 2018 RMSE-based recommended constraints (depths greater than 15m inaccurate)\r\n Bathy(j,k) = dp;\r\n else dp = 0;\r\n end\r\n %% DT\r\n if Shadow(j,k) == 1 && max(Rrs(j,k,:)) == Rrs(j,k,2) % Max band3-6 = turbid/shallow water\r\n map(j,k) = 0; % Shadow/Unclassified\r\n else map(j,k) = WA; % Deep water\r\n% end\r\n end\r\n end % if v>u\r\n end % If water/land\r\n end % If isnan\r\n end % k\r\n\r\n end % j\r\n end\r\n\r\n\r\n%% DT Filter\r\n if filter > 0\r\n update = 'Filtering'\r\n dt_filt = DT_Filter(map,filter,sz(1),sz(2),dev,FW,FU,UG,WA);\r\n if cld_idx == 1\r\n AA = [loc_out,id,'_',loc,'_SOALCHI_filt_',num2str(filter),'_Cloudy'];\r\n else AA = [loc_out,id,'_',loc,'_SOALCHI_filt_',num2str(filter)];\r\n end\r\n geotiffwrite(AA,dt_filt,R(1,1),'CoordRefSysCode',coor_sys);\r\n else\r\n Z1 = [loc_out,id,'_',loc,'_Map_nofilt'];\r\n geotiffwrite(Z1,map,R(1,1),'CoordRefSysCode',coor_sys);\r\n end\r\n\r\n% TP(z,1) = m0;\r\n% TP(z,2) = m1;\r\n% TP(z,3) = chla;\r\n\r\n %% Output images\r\n% Z = [loc_out,id,'_',loc,'_Bathy_MAv1'];\r\n% geotiffwrite(Z,Bathy,R(1,1),'CoordRefSysCode',coor_sys);\r\n\r\n% Z2 = [Rrs_out,id,'_',loc,'_Rrs']; % last=52\r\n% geotiffwrite(Z2,Rrs,R(1,1),'CoordRefSysCode',coor_sys);\r\n%\r\n end % If dt>0\r\n\r\n\r\n wtime = toc;\r\n time_min = wtime/60;\r\n fprintf(1,'Matlab CPU time (minutes) = %f\\n', time_min);\r\n\r\nend\r\n\r\n"} +{"plateform": "github", "repo_name": "ruihou/caffe-3d-master", "name": "classification_demo.m", "ext": ".m", "path": "caffe-3d-master/matlab/demo/classification_demo.m", "size": 5466, "source_encoding": "utf_8", "md5": "45745fb7cfe37ef723c307dfa06f1b97", "text": "function [scores, maxlabel] = classification_demo(im, use_gpu)\n% [scores, maxlabel] = classification_demo(im, use_gpu)\n%\n% Image classification demo using BVLC CaffeNet.\n%\n% IMPORTANT: before you run this demo, you should download BVLC CaffeNet\n% from Model Zoo (http://caffe.berkeleyvision.org/model_zoo.html)\n%\n% ****************************************************************************\n% For detailed documentation and usage on Caffe's Matlab interface, please\n% refer to the Caffe Interface Tutorial at\n% http://caffe.berkeleyvision.org/tutorial/interfaces.html#matlab\n% ****************************************************************************\n%\n% input\n% im color image as uint8 HxWx3\n% use_gpu 1 to use the GPU, 0 to use the CPU\n%\n% output\n% scores 1000-dimensional ILSVRC score vector\n% maxlabel the label of the highest score\n%\n% You may need to do the following before you start matlab:\n% $ export LD_LIBRARY_PATH=/opt/intel/mkl/lib/intel64:/usr/local/cuda-5.5/lib64\n% $ export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libstdc++.so.6\n% Or the equivalent based on where things are installed on your system\n% and what versions are installed.\n%\n% Usage:\n% im = imread('../../examples/images/cat.jpg');\n% scores = classification_demo(im, 1);\n% [score, class] = max(scores);\n% Five things to be aware of:\n% caffe uses row-major order\n% matlab uses column-major order\n% caffe uses BGR color channel order\n% matlab uses RGB color channel order\n% images need to have the data mean subtracted\n\n% Data coming in from matlab needs to be in the order\n% [width, height, channels, images]\n% where width is the fastest dimension.\n% Here is the rough matlab code for putting image data into the correct\n% format in W x H x C with BGR channels:\n% % permute channels from RGB to BGR\n% im_data = im(:, :, [3, 2, 1]);\n% % flip width and height to make width the fastest dimension\n% im_data = permute(im_data, [2, 1, 3]);\n% % convert from uint8 to single\n% im_data = single(im_data);\n% % reshape to a fixed size (e.g., 227x227).\n% im_data = imresize(im_data, [IMAGE_DIM IMAGE_DIM], 'bilinear');\n% % subtract mean_data (already in W x H x C with BGR channels)\n% im_data = im_data - mean_data;\n\n% If you have multiple images, cat them with cat(4, ...)\n\n% Add caffe/matlab to your Matlab search PATH in order to use matcaffe\nif exist('../+caffe', 'dir')\n addpath('..');\nelse\n error('Please run this demo from caffe/matlab/demo');\nend\n\n% Set caffe mode\nif exist('use_gpu', 'var') && use_gpu\n caffe.set_mode_gpu();\n gpu_id = 0; % we will use the first gpu in this demo\n caffe.set_device(gpu_id);\nelse\n caffe.set_mode_cpu();\nend\n\n% Initialize the network using BVLC CaffeNet for image classification\n% Weights (parameter) file needs to be downloaded from Model Zoo.\nmodel_dir = '../../models/bvlc_reference_caffenet/';\nnet_model = [model_dir 'deploy.prototxt'];\nnet_weights = [model_dir 'bvlc_reference_caffenet.caffemodel'];\nphase = 'test'; % run with phase test (so that dropout isn't applied)\nif ~exist(net_weights, 'file')\n error('Please download CaffeNet from Model Zoo before you run this demo');\nend\n\n% Initialize a network\nnet = caffe.Net(net_model, net_weights, phase);\n\nif nargin < 1\n % For demo purposes we will use the cat image\n fprintf('using caffe/examples/images/cat.jpg as input image\\n');\n im = imread('../../examples/images/cat.jpg');\nend\n\n% prepare oversampled input\n% input_data is Height x Width x Channel x Num\ntic;\ninput_data = {prepare_image(im)};\ntoc;\n\n% do forward pass to get scores\n% scores are now Channels x Num, where Channels == 1000\ntic;\n% The net forward function. It takes in a cell array of N-D arrays\n% (where N == 4 here) containing data of input blob(s) and outputs a cell\n% array containing data from output blob(s)\nscores = net.forward(input_data);\ntoc;\n\nscores = scores{1};\nscores = mean(scores, 2); % take average scores over 10 crops\n\n[~, maxlabel] = max(scores);\n\n% call caffe.reset_all() to reset caffe\ncaffe.reset_all();\n\n% ------------------------------------------------------------------------\nfunction crops_data = prepare_image(im)\n% ------------------------------------------------------------------------\n% caffe/matlab/+caffe/imagenet/ilsvrc_2012_mean.mat contains mean_data that\n% is already in W x H x C with BGR channels\nd = load('../+caffe/imagenet/ilsvrc_2012_mean.mat');\nmean_data = d.mean_data;\nIMAGE_DIM = 256;\nCROPPED_DIM = 227;\n\n% Convert an image returned by Matlab's imread to im_data in caffe's data\n% format: W x H x C with BGR channels\nim_data = im(:, :, [3, 2, 1]); % permute channels from RGB to BGR\nim_data = permute(im_data, [2, 1, 3]); % flip width and height\nim_data = single(im_data); % convert from uint8 to single\nim_data = imresize(im_data, [IMAGE_DIM IMAGE_DIM], 'bilinear'); % resize im_data\nim_data = im_data - mean_data; % subtract mean_data (already in W x H x C, BGR)\n\n% oversample (4 corners, center, and their x-axis flips)\ncrops_data = zeros(CROPPED_DIM, CROPPED_DIM, 3, 10, 'single');\nindices = [0 IMAGE_DIM-CROPPED_DIM] + 1;\nn = 1;\nfor i = indices\n for j = indices\n crops_data(:, :, :, n) = im_data(i:i+CROPPED_DIM-1, j:j+CROPPED_DIM-1, :);\n crops_data(:, :, :, n+5) = crops_data(end:-1:1, :, :, n);\n n = n + 1;\n end\nend\ncenter = floor(indices(2) / 2) + 1;\ncrops_data(:,:,:,5) = ...\n im_data(center:center+CROPPED_DIM-1,center:center+CROPPED_DIM-1,:);\ncrops_data(:,:,:,10) = crops_data(end:-1:1, :, :, 5);\n"} +{"plateform": "github", "repo_name": "shainova/EMLN-master", "name": "bipartite_modularity_diag_coupling.m", "ext": ".m", "path": "EMLN-master/NEE2017/Modularity/bipartite_modularity_diag_coupling.m", "size": 2782, "source_encoding": "utf_8", "md5": "9feea2fb0a17cc601ebb4f1774696b75", "text": "% NOTE!!! This file accompanies the following publication and can\n% only be understood by reading the details in the manuscript and its\n% SI. Please cite the original publication if using this code.\n% \n% Pilosof S, Porter MA, Pascual M, Kefi S.\n% The multilayer nature of ecological networks.\n% Nature Ecology & Evolution (2017).\n\n\n% This function lays out the B matirx which can be fed to the Louvain\n% algorithm to calculate Q. It is programmed SPECIFICALLY for a a multilayer \n% network with 2 bipartite layers connected via one common set of nodes\n% (diagonal coupling).\n \nfunction [B_multilayer,twomu] = bipartite_modularity_diag_coupling(A1,A2,gamma,omega,show_Bmax)\n twomu=0; % Initialize twomu\n %==== Set up the modularity matrix for layer 1: B_1ayer1\n % This is simply modularity for a bipartite single-layer network\n %====\n [m,n]=size(A1);\n N1=m+n;\n k=sum(A1,2);\n d=sum(A1,1);\n mm=sum(k);\n B_bip=A1-gamma*k*d/mm;\n B=zeros(N1,N1);\n % One should be VERY careful in how the B matrix is composed. \n B(1:m,m+1:N1)=B_bip;\n B(m+1:N1,1:m)=B_bip';\n B_layer1=B;\n twomu=twomu+2*mm; % Add the 2*mm contributions:\n\n %==== Set up the modularity matrix for layer 2: B_1ayer2\n % This is simply modularity for a bipartite single-layer network\n %====\n [p,q]=size(A2);\n N2=p+q;\n k=sum(A2,2);\n d=sum(A2,1);\n mm=sum(k);\n B_bip=A2-gamma*k*d/mm;\n B=zeros(N2,N2);\n B(1:p,p+1:N2)=B_bip;\n B(p+1:N2,1:p)=B_bip';\n B_layer2=B;\n twomu=twomu+2*mm; % Add the 2*mm contributions:\n\n %The second term in twomu should be the total weight of all interlayer\n %edges, i.e.,since we only have interlayer edges between nodes in set1,\n %which is on the matrices rows, we use m:\n twomu=twomu+2*omega*m;\n\n %==== Now set up the B matrix of the multilayer\n % by combining the individual B matrices\n %====\n B_multilayer=zeros(N1+N2,N1+N2); % create the matrix, filled with zeros\n B_multilayer(1:N1,1:N1)=B_layer1; % Modularity matrix of layer 1 (set1 and set2)\n B_multilayer(N1+1:N1+N2,N1+1:N1+N2)=B_layer2; % Modularity matrix of layer 2 (set1 and set3)\n B_multilayer(N1+1:N1+m,1:m)=omega*diag(ones(m,1),0); % Interlayer interactions between the nodes which are in the rows (and hence the use of m)\n B_multilayer(1:m,N1+1:N1+m)=omega*diag(ones(m,1),0); % Interlayer interactions between the nodes which are in the rows (and hence the use of m)\n if ~issymmetric(B_multilayer)\n disp('The modularity matrix is NOT symmetric!!')\n end\n \n % The value above which there will be an abrupt drop in intra-layer\n % merges (see Bazzi et al 2015, pg. 32):\n if show_Bmax==1\n m1=max(max(B_layer1));\n m2=max(max(B_layer2));\n disp(max(m1,m2));\n end\nend\n"} +{"plateform": "github", "repo_name": "shainova/EMLN-master", "name": "modularity_weighted_multilayer_null2.m", "ext": ".m", "path": "EMLN-master/NEE2017/Modularity/modularity_weighted_multilayer_null2.m", "size": 4423, "source_encoding": "utf_8", "md5": "a439e7f70710149743c202654e640b2b", "text": "% NOTE!!! This file accompanies the following publication and can\n% only be understood by reading the details in the manuscript and its\n% SI. Please cite the original publication if using this code.\n% \n% Pilosof S, Porter MA, Pascual M, Kefi S.\n% The multilayer nature of ecological networks.\n% Nature Ecology & Evolution (2017).\n\n\n% This function is for the second null model from the publication\n% (reshuffling node identities)\n\n% The function takes as input if to permute the order of rows (=hosts) or\n% columns(=parasites) by entering 1 or 2, respectively. It also receives\n% the number of realizations to perform and the output folder to where\n% results are written. It writes as output a vector (Q) with the maximum\n% modularity values calculated by the function and a matrix (S) with module\n% affiliations of the state nodes. The number of columns in S corresponds\n% to the number of realizations (reshuffled networks) and the number of\n% rows is of length num_layers*num_nodes. The post processing of the output\n% is done in R and expalined in the R code. The number of modules is\n% calculated from S during the post processing.\n\nfunction []=modularity_weighted_multilayer_null2(hosts_or_parasites,runs,outputfolder)\n%% initialize\ngamma=1;\nQ_runs=[];\nS_runs=[];\n\nfiles=dir('host_parasite_abundance_weighted_layer_*.csv');\nomega=importdata('interlayer_relative_abundance_matrix.csv');\n\n%% Run main loop\nfor r=1:runs\n\tr\n %% Load layers and permute\n A=cell(1,length(files));\n for i = 1:length(files)\n bip_data=importdata(files(i).name);\n [p,q]=size(bip_data);\n %% Reshuffle the node order\n % The \"nodal\" null model (sensu Bassett et al., 2011 PNAS)\n % reshuffles the interlayer edges between nodes and their\n % counterparts in two consecutive layers by permuting the node\n % labels separately in each layer so that node identity is not\n % preserved. In the bipartite networks, this is akin to permuting\n % the order of rows (or columns) for every given layer. You can\n % permute by rows for hosts or by columsns for parasites by\n % commenting/uncommenting the following lines:\n \n if hosts_or_parasites==1\n rowPermutations=randperm(p);\n bip_data=bip_data(rowPermutations,:);\n end\n if hosts_or_parasites==2\n colPermutations=randperm(q);\n bip_data=bip_data(:,colPermutations);\n end\n \n onemode=zeros(p+q,p+q); \n onemode(1:p, (p+1):(p+q))=bip_data;\n onemode((p+1):(p+q), 1:p)=bip_data';\n A{i}=onemode;\n if ~issymmetric(A{i})\n disp(['layer ',num2str(i),' is NOT symmetric'])\n end\n end\n N=length(A{1});\n T=length(A);\n %% Create the modularity matrix\n B=spalloc(N*T,N*T,N*N*T+2*N*T);\n twomu=0;\n for s=1:T\n k=sum(A{s}); % this is the sum of degrees of the nodes in the two sets\n k=k(1:p); % This is just the first set\n d=sum(A{s}); % this is the sum of degrees of the nodes in the two sets\n d=d((p+1):(p+q)); % This is just the 2nd set\n m=sum(k); % Note the m instead of twom as in unipartite\n twomu=twomu+m; % Note that we add m and not 2m to the mu. In the unipartite version this is twomu=twomu+twom\n indx=[1:N]+(s-1)*N;\n % This calculates the matrix of probabilities accroding to eq. 15 in\n % Barber 2007\n P_ij=zeros(p,q);\n for i=1:p\n for j=1:q\n P_ij(i,j)=k(i)*d(j);\n end\n end\n % Here again we have to create a symmetric adjacency matrix out of the\n % bipartite.\n onemode=zeros(p+q,p+q);\n onemode(1:p, (p+1):(p+q))=P_ij;\n onemode((p+1):(p+q), 1:p)=P_ij';\n P_ij=onemode;\n B(indx,indx)=A{s}-gamma*P_ij/m; % Note the P_ij instead of k*k' as in the unipartite version. also the m in stead of 2m\n end\n twomu=twomu+2*sum(sum(omega)); \n %% Run modularity\n [S,Q] = genlouvain(B,10000,0,1,1);\n Q_runs = [Q_runs Q/twomu];\n S_runs = [S_runs S];\nend\n%% Write results\nif hosts_or_parasites==1\n dlmwrite([outputfolder,'/Q_null2_hosts.csv'],full(Q_runs)',',');\n dlmwrite([outputfolder,'/S_null2_hosts.csv'],S_runs,',');\nend\nif hosts_or_parasites==2\n dlmwrite([outputfolder,'/Q_null2_parasites.csv'],full(Q_runs)',',');\n dlmwrite([outputfolder,'/S_null2_parasites.csv'],S_runs,',');\nend\n \nend\n"} +{"plateform": "github", "repo_name": "shainova/EMLN-master", "name": "modularity_interlayer_infinity.m", "ext": ".m", "path": "EMLN-master/NEE2017/Modularity/modularity_interlayer_infinity.m", "size": 4086, "source_encoding": "utf_8", "md5": "1fa8825e8c7b6600a5d00d009e204c44", "text": "% NOTE!!! This file accompanies the following publication and can\n% only be understood by reading the details in the manuscript and its\n% SI. Please cite the original publication if using this code.\n% \n% Pilosof S, Porter MA, Pascual M, Kefi S.\n% The multilayer nature of ecological networks.\n% Nature Ecology & Evolution (2017).\n\n\n% This function analyses the modular structure of the observed temporal\n% multilayer networkwhen interlayer edges are infinity.\n\n% The input is the number of realizations (runs) the function should\n% perform due to the heuristic nature of the general Louvain algorithm, and\n% the folder to which to write the results. It writes as output a vector\n% (Q) with the maximum modularity values calculated by the function and a\n% matrix (S) with module affiliations of the state nodes. The number of\n% columns in S corresponds to the number of realizations and the number of\n% rows is of length num_layers*num_nodes. The post processing of the output\n% is done in R and expalined in the R code. The number of modules is\n% calculated from S during the post processing.\n\nfunction []=modularity_interlayer_infinity(runs,outputfolder)\n\n% initialize\ngamma=1;\nQ_runs=[];\nS_runs=[];\n\nfiles=dir('host_parasite_abundance_weighted_layer_*.csv'); % These are the different layers of the network, produced with the R code supplied.\n\n% Bipartite networks have to be transformed to unipartite (square matrix).\nA=cell(1,length(files));\nfor i = 1:length(files)\n bip_data=importdata(files(i).name);\n % Transform the pxq matrix into (p+q)x(p+q)\n [p,q]=size(bip_data);\n onemode=zeros(p+q,p+q); \n onemode(1:p, (p+1):(p+q))=bip_data;\n onemode((p+1):(p+q), 1:p)=bip_data';\n A{i}=onemode;\n if ~issymmetric(A{i})\n disp(['layer ',num2str(i),' is NOT symmetric'])\n end\nend\n\nN=length(A{1}); % Number of nodes (hosts+parasites)\nT=length(A); % Number of layers\n\nfor r=1:runs\n r\n B=spalloc(N*T,N*T,N*N*T+2*N*T); % create an empty modularity matrix\n twomu=0;\n for s=1:T\n %%%%%% BIPARTITE:\n % In case of unipartite undirected networks the probability P of an edge exisiting between two nodes \n % is proportional to the product of node degrees. This is\n % k_is*k_js/2m_s in eq. 1 in the SI of the paper. Note the 2*m_s because it is an\n % undirected unipartite network.\n % In the bipartite case, P is k_is*d_js/m_s. and the division is over\n % the number of edges m (see Barber 2007, eq. 15 for reasoning).\n % When networks are weighted this explanation refers to the strength\n % instead of degrees.\n\n k=sum(A{s}); % this is the sum of degrees of the nodes in the two sets\n k=k(1:p); % This is just the first set\n d=sum(A{s}); % this is the sum of degrees of the nodes in the two sets\n d=d((p+1):(p+q)); % This is just the 2nd set\n m=sum(k); % Note the m instead of twom as in unipartite\n twomu=twomu+m; % Note that we add m and not 2m to the mu. In the unipartite version this is twomu=twomu+twom\n indx=[1:N]+(s-1)*N;\n % This calculates the matrix of probabilities accroding to eq. 15 in\n % Barber 2007\n P_ij=zeros(p,q);\n for i=1:p\n for j=1:q\n P_ij(i,j)=k(i)*d(j);\n end\n end\n % Here again we have to create a smymetric adjacency matrix out of the bipartite.\n onemode=zeros(p+q,p+q);\n onemode(1:p, (p+1):(p+q))=P_ij;\n onemode((p+1):(p+q), 1:p)=P_ij';\n P_ij=onemode;\n B(indx,indx)=A{s}-gamma*P_ij/m; % Note the P_ij instead of k*k' as in the unipartite version. also the m instead of 2m\n end\n \n % This makes all the interlayer edges with a value of 10000 which\n % mimics infinity\n omega=10000;\n twomu=twomu+2*omega*N*(T-1);\n B = B + omega*spdiags(ones(N*T,2),[-N,N],N*T,N*T);\n\n [S,Q] = genlouvain(B,10000,0,1,1);\n Q_runs = [Q_runs Q/twomu];\n S_runs = [S_runs S];\nend\ndlmwrite([outputfolder,'/Q_obs_infinity.csv'],full(Q_runs)',',');\ndlmwrite([outputfolder,'/S_obs_infinity.csv'],S_runs,',');\n\nend\n"} +{"plateform": "github", "repo_name": "shainova/EMLN-master", "name": "modularity_weighted_multilayer_obs.m", "ext": ".m", "path": "EMLN-master/NEE2017/Modularity/modularity_weighted_multilayer_obs.m", "size": 4156, "source_encoding": "utf_8", "md5": "266904c5c388af9b8be6e1a43d0732ab", "text": "% NOTE!!! This file accompanies the following publication and can\n% only be understood by reading the details in the manuscript and its\n% SI. Please cite the original publication if using this code.\n% \n% Pilosof S, Porter MA, Pascual M, Kefi S.\n% The multilayer nature of ecological networks.\n% Nature Ecology & Evolution (2017).\n\n\n% This function analyses the modular structure of the observed temporal\n% multilayer network with quantitative interlayer AND intralyer edges. The\n% input is the number of realizations (runs) the function should perform\n% due to the heuristic nature of the general Louvain algorithm, and the\n% folder to which to write the results. It writes as output a vector (Q)\n% with the maximum modularity values calculated by the function and a\n% matrix (S) with module affiliations of the state nodes. The number of\n% columns in S corresponds to the number of realizations and the number of\n% rows is of length num_layers*num_nodes. The post processing of the output\n% is done in R and expalined in the R code. The number of modules is\n% calculated from S during the post processing.\n\nfunction []=modularity_weighted_multilayer_obs(runs,outputfolder)\n\n% initialize\ngamma=1;\nQ_runs=[];\nS_runs=[];\n\nfiles=dir('host_parasite_abundance_weighted_layer_*.csv'); % These are the different layers of the network, produced with the R code supplied.\n\n% Bipartite networks have to be transformed to unipartite (square matrix).\nA=cell(1,length(files));\nfor i = 1:length(files)\n bip_data=importdata(files(i).name);\n % Transform the pxq matrix into (p+q)x(p+q)\n [p,q]=size(bip_data);\n onemode=zeros(p+q,p+q); \n onemode(1:p, (p+1):(p+q))=bip_data;\n onemode((p+1):(p+q), 1:p)=bip_data';\n A{i}=onemode;\n if ~issymmetric(A{i})\n disp(['layer ',num2str(i),' is NOT symmetric'])\n end\nend\n\nN=length(A{1}); % Number of nodes (hosts+parasites)\nT=length(A); % Number of layers\n\nfor r=1:runs\n r\n B=spalloc(N*T,N*T,N*N*T+2*N*T); % create an empty modularity matrix\n twomu=0;\n for s=1:T\n %%%%%% BIPARTITE:\n % In case of unipartite undirected networks the probability P of an edge exisiting between two nodes \n % is proportional to the product of node degrees. This is\n % k_is*k_js/2m_s in eq. 1 in the SI of the paper. Note the 2*m_s because it is an\n % undirected unipartite network.\n % In the bipartite case, P is k_is*d_js/m_s. and the division is over\n % the number of edges m (see Barber 2007, eq. 15 for reasoning).\n % When networks are weighted this explanation refers to the strength\n % instead of degrees.\n\n k=sum(A{s}); % this is the sum of degrees of the nodes in the two sets\n k=k(1:p); % This is just the first set\n d=sum(A{s}); % this is the sum of degrees of the nodes in the two sets\n d=d((p+1):(p+q)); % This is just the 2nd set\n m=sum(k); % Note the m instead of twom as in unipartite\n twomu=twomu+m; % Note that we add m and not 2m to the mu. In the unipartite version this is twomu=twomu+twom\n indx=[1:N]+(s-1)*N;\n % This calculates the matrix of probabilities accroding to eq. 15 in\n % Barber 2007\n P_ij=zeros(p,q);\n for i=1:p\n for j=1:q\n P_ij(i,j)=k(i)*d(j);\n end\n end\n % Here again we have to create a smymetric adjacency matrix out of the bipartite.\n onemode=zeros(p+q,p+q);\n onemode(1:p, (p+1):(p+q))=P_ij;\n onemode((p+1):(p+q), 1:p)=P_ij';\n P_ij=onemode;\n B(indx,indx)=A{s}-gamma*P_ij/m; % Note the P_ij instead of k*k' as in the unipartite version. also the m instead of 2m\n end\n \n % Because all interlayer edges have different values, OMEGA IS A MATRIX.\n % This matrix was produced using the accompanying R code.\n omega=importdata('interlayer_relative_abundance_matrix.csv');\n twomu=twomu+2*sum(sum(omega)); \n B = B+omega; \n \n [S,Q] = genlouvain(B,10000,0,1,1);\n Q_runs = [Q_runs Q/twomu];\n S_runs = [S_runs S];\nend\ndlmwrite([outputfolder,'/Q_obs.csv'],full(Q_runs)',',');\ndlmwrite([outputfolder,'/S_obs.csv'],S_runs,',');\n\nend\n"} +{"plateform": "github", "repo_name": "shainova/EMLN-master", "name": "modularity_weighted_multilayer_null1.m", "ext": ".m", "path": "EMLN-master/NEE2017/Modularity/modularity_weighted_multilayer_null1.m", "size": 3654, "source_encoding": "utf_8", "md5": "a48fbfce9c1c4c393c423628e0ccad8c", "text": "% NOTE!!! This file accompanies the following publication and can\n% only be understood by reading the details in the manuscript and its\n% SI. Please cite the original publication if using this code.\n% \n% Pilosof S, Porter MA, Pascual M, Kefi S.\n% The multilayer nature of ecological networks.\n% Nature Ecology & Evolution (2017).\n\n\n% This function analyses the modular structure of the temporal multilaer\n% networks reshuffled according to the first null model (reshuffling\n% intralayer interactions). The set of reshuffled network layers was\n% produced using the R code accompanying this publication.\n\n% The function takes as input the number of realizations to perform, the\n% folder where the reshuffled network layers are stored and the output\n% folder to where results are written. It writes as output a vector (Q)\n% with the maximum modularity values calculated by the function and a\n% matrix (S) with module affiliations of the state nodes. The number of\n% columns in S corresponds to the number of realizations (reshuffled\n% networks) and the number of rows is of length num_layers*num_nodes. The\n% post processing of the output is done in R and expalined in the R code.\n% The number of modules is calculated from S during the post processing.\n\nfunction []=modularity_weighted_multilayer_null1(runs,inputfolder,outputfolder)\n\n% initialize\ngamma=1;\nQ_runs=[];\nS_runs=[];\n\nfor r=1:runs\n r\n files=dir([inputfolder,'/network_',num2str(r),'_*.csv']);\n A=cell(1,length(files));\n for i = 1:length(files)\n bip_data=importdata([inputfolder,'/',files(i).name]);\n % Transform the pxq matrix into (p+q)x(p+q)\n [p,q]=size(bip_data);\n onemode=zeros(p+q,p+q); \n onemode(1:p, (p+1):(p+q))=bip_data;\n onemode((p+1):(p+q), 1:p)=bip_data';\n A{i}=onemode;\n if ~issymmetric(A{i})\n error(['layer ',num2str(i),' is NOT symmetric'])\n end\n end\n\n N=length(A{1});\n T=length(A);\n B=spalloc(N*T,N*T,N*N*T+2*N*T);\n twomu=0;\n for s=1:T\n k=sum(A{s}); % this is the sum of degrees of the nodes in the two sets\n k=k(1:p); % This is just the first set\n d=sum(A{s}); % this is the sum of degrees of the nodes in the two sets\n d=d((p+1):(p+q)); % This is just the 2nd set\n m=sum(k); % Note the m instead of twom as in unipartite\n twomu=twomu+m; % Note that we add m and not 2m to the mu. In the unipartite version this is twomu=twomu+twom\n indx=[1:N]+(s-1)*N;\n % This calculates the matrix of probabilities accroding to eq. 15 in\n % Barber 2007\n P_ij=zeros(p,q);\n for i=1:p\n for j=1:q\n P_ij(i,j)=k(i)*d(j);\n end\n end\n % Here again we have to create a symetric adjacency matrix out of the\n % bipartite.\n onemode=zeros(p+q,p+q);\n onemode(1:p, (p+1):(p+q))=P_ij;\n onemode((p+1):(p+q), 1:p)=P_ij';\n P_ij=onemode;\n B(indx,indx)=A{s}-gamma*P_ij/m; % Note the P_ij instead of k*k' as in the unipartite version. also the m in stead of 2m\n end\n % This is if all interlayer edges have different values. OMEGA IS A\n % MATRIX. Note that it is the same matrix used for the observed network\n % because the null model only reshuffls the intralayer interactions.\n omega=importdata('interlayer_relative_abundance_matrix.csv');\n twomu=twomu+2*sum(sum(omega)); \n B = B+omega; \n [S,Q] = genlouvain(B,10000,0,1,1);\n Q_runs = [Q_runs Q/twomu];\n S_runs = [S_runs S];\nend\n\nfull(Q_runs)\ndlmwrite([outputfolder,'/Q_null1.csv'],full(Q_runs)',',');\ndlmwrite([outputfolder,'/S_null1.csv'],S_runs,',');\n\nend\n\n"} +{"plateform": "github", "repo_name": "shainova/EMLN-master", "name": "modularity_weighted_multilayer_null3.m", "ext": ".m", "path": "EMLN-master/NEE2017/Modularity/modularity_weighted_multilayer_null3.m", "size": 3373, "source_encoding": "utf_8", "md5": "aa4169e5f07c13619ac8715f8a34ae15", "text": "% NOTE!!! This file accompanies the following publication and can\n% only be understood by reading the details in the manuscript and its\n% SI. Please cite the original publication if using this code.\n% \n% Pilosof S, Porter MA, Pascual M, Kefi S.\n% The multilayer nature of ecological networks.\n% Nature Ecology & Evolution (2017).\n\n\n% This function is for the third null model from the publication\n% (permuting the order of layers).\n\n% The function takes as input the number of realizations to perform and the\n% output folder to where results are written. It writes as output a vector\n% (Q) with the maximum modularity values calculated by the function and a\n% matrix (S) with module affiliations of the state nodes. The number of\n% columns in S corresponds to the number of realizations (reshuffled\n% networks) and the number of rows is of length num_layers*num_nodes. The\n% post processing of the output is done in R and expalined in the R code.\n% The number of modules is calculated from S during the post processing.\n\nfunction []=modularity_weighted_multilayer_null3(runs,outputfolder)\n\n%% initialize\ngamma=1;\nQ_runs=[];\nS_runs=[];\n\nfiles=dir('host_parasite_abundance_weighted_layer_*.csv');\nomega=importdata('interlayer_relative_abundance_matrix.csv');\n\nA=cell(1,length(files));\nfor i = 1:length(files)\n bip_data=importdata(files(i).name);\n % Transform the pxq matrix into (p+q)x(p+q)\n [p,q]=size(bip_data);\n onemode=zeros(p+q,p+q); \n onemode(1:p, (p+1):(p+q))=bip_data;\n onemode((p+1):(p+q), 1:p)=bip_data';\n A{i}=onemode;\n if ~issymmetric(A{i})\n disp(['layer ',num2str(i),' is NOT symmetric'])\n end\nend\nN=length(A{1});\nT=length(A);\n\n%% Main loop\nfor r=1:runs\n r\n %% Here we permute the order of the layers\n layerPermutations=randperm(T);\n A_perm=cell(T,1);\n for x=1:T\n A_perm{x}=A{layerPermutations(x)};\n end\n \n %% Calculate modularity matrix\n B=spalloc(N*T,N*T,N*N*T+2*N*T);\n twomu=0;\n for s=1:T\n k=sum(A_perm{s}); % this is the sum of degrees of the nodes in the two sets\n k=k(1:p); % This is just the first set\n d=sum(A_perm{s}); % this is the sum of degrees of the nodes in the two sets\n d=d((p+1):(p+q)); % This is just the 2nd set\n m=sum(k); % Note the m instead of twom as in unipartite\n twomu=twomu+m; % Note that we add m and not 2m to the mu. In the unipartite version this is twomu=twomu+twom\n indx=[1:N]+(s-1)*N;\n % This calculates the matrix of probabilities accroding to eq. 15 in\n % Barber 2007\n P_ij=zeros(p,q);\n for i=1:p\n for j=1:q\n P_ij(i,j)=k(i)*d(j);\n end\n end\n % Here again we have to create a symmetric adjacency matrix out of the\n % bipartite.\n onemode=zeros(p+q,p+q);\n onemode(1:p, (p+1):(p+q))=P_ij;\n onemode((p+1):(p+q), 1:p)=P_ij';\n P_ij=onemode;\n B(indx,indx)=A_perm{s}-gamma*P_ij/m; % Note the P_ij instead of k*k' as in the unipartite version. also the m in stead of 2m\n end\n \n twomu=twomu+2*sum(sum(omega)); \n B = B+omega;\n \n %% Run modularity\n [S,Q] = genlouvain(B,10000,0,1,1);\n Q_runs = [Q_runs Q/twomu];\n S_runs = [S_runs S];\nend\n%% Write results\ndlmwrite([outputfolder,'/Q_null3.csv'],full(Q_runs)',',');\ndlmwrite([outputfolder,'/S_null3.csv'],S_runs,',');\n\nend\n"} +{"plateform": "github", "repo_name": "shainova/EMLN-master", "name": "genlouvain.m", "ext": ".m", "path": "EMLN-master/NEE2017/Modularity/genlouvain.m", "size": 11697, "source_encoding": "utf_8", "md5": "92c2a8f309fb987f9c38da6c4be282ac", "text": "function [S,Q] = genlouvain(B,limit,verbose,randord,randmove)\r\n%GENLOUVAIN Louvain-like community detection, specified quality function.\r\n% Version 2.0 (July 2014)\r\n%\r\n% [S,Q] = GENLOUVAIN(B) with matrix B implements a Louvain-like greedy\r\n% community detection method using the modularity/quality matrix B that\r\n% encodes the quality function Q, defined by summing over all elements\r\n% B(i,j) such that nodes i and j are placed in the same community.\r\n% Following Blondel et al. 2008, the algorithm proceeds in two phases\r\n% repeated iteratively: quality is optimized by moving one node at a time\r\n% until no such moves improve quality; the communities found to that\r\n% point are then aggregated to build a new network where each node\r\n% represents a community. The output vector S encodes the obtained\r\n% community assignments, with S(i) identifying the community to which\r\n% node i has been assigned. The output Q gives the quality of the\r\n% resulting partition of the network.\r\n%\r\n% [S,Q] = GENLOUVAIN(B) with function handle B such that B(i) returns\r\n% the ith column of the modularity/quality matrix uses this function\r\n% handle (to reduce the memory footprint for large networks) until the\r\n% number of groups is less than 10000 and then builds the B matrix\r\n% corresponding to the new aggregated network in subsequent passes. Use\r\n% [S,Q] = GENLOUVAIN(B,limit) to change this default=10000 limit.\r\n%\r\n% [S,Q] = GENLOUVAIN(B,limit,0) suppresses displayed text output.\r\n%\r\n% [S,Q] = GENLOUVAIN(B,limit,verbose,0) forces index-ordered (cf.\r\n% randperm-ordered) consideration of nodes, for deterministic results.\r\n%\r\n% [S,Q]=GENLOUVAIN(B,limit,verbose,randord,1) enables additional \r\n% randomization to obtain a broader sample of the quality function landscape \r\n% and mitigates some undesirable behavior for \"multislice\" modularity with \r\n% ordinal coupling. Without 'randmove' enabled, the algorithm exhibits an \r\n% abrupt change in behavior when the strength of the interslice coupling \r\n% approaches the maximum value of the intraslice modularity matrices. With \r\n% 'randmove' enabled, the algorithm moves the node under consideration to a \r\n% community chosen uniformly at random from all moves that increase the \r\n% quality function, instead of choosing the move that maximally increases the \r\n% quality function.\r\n%\r\n% Example (using adjacency matrix A)\r\n% k = full(sum(A));\r\n% twom = sum(k); \r\n% B = @(v) A(:,v) - k'*k(v)/twom;\r\n% [S,Q] = genlouvain(B); \r\n% Q = Q/twom;\r\n% finds community assignments for the undirected network encoded by the\r\n% symmetric adjacency matrix A. For small networks, one may obtain\r\n% reasonably efficient results even more simply by handling the full\r\n% modularity/quality matrix\r\n% B = A - k'*k/twom;\r\n% instead of the function handle. Intended use also includes the\r\n% \"multislice\" network quality function of Mucha et al. 2010, where B\r\n% encodes the interactions as an equivalent matrix (see examples posted\r\n% online at http://netwiki.amath.unc.edu/GenLouvain).\r\n%\r\n% Notes:\r\n% The matrix represented by B must be both symmetric and square. This\r\n% condition is not checked thoroughly if B is a function handle, but is\r\n% essential to the proper use of this routine.\r\n%\r\n% Under default options, this routine can return different results from\r\n% run to run because it considers nodes in pseudorandom (randperm)\r\n% order. Because of the potentially large number of nearly-optimal\r\n% partitions (Good et al. 2010), one is encouraged to investigate\r\n% results of repeated applications of this code (and, if possible, of\r\n% other computational heuristics). To force deterministic behavior,\r\n% ordering nodes by their index, pass zero as the fourth input:\r\n% GENLOUVAIN(B,limit,verbose,0).\r\n%\r\n% This algorithm is only \"Louvain-like\" in the sense that the two\r\n% phases are used iteratively in the same manner as in the Louvain\r\n% algorithm (Blondel et al. 2008). Because it operates on a general\r\n% quality/modularity matrix B, it does not include any analytical\r\n% formulas for quickly identifying the change in modularity from a\r\n% proposed move nor any improved efficiency obtained by their use. If\r\n% your problem uses one of the well-used null models included in other\r\n% codes, those codes should be much faster for your task.\r\n%\r\n% Past versions had a problem where accumulated subtraction error might\r\n% lead to an infinite loop with each pass oscillating between two or\r\n% more partitions yet incorrectly identifying increases in quality. We\r\n% believe this problem has been corrected by the relative change checks\r\n% in lines 178 and 269. If you encounter a similar problem, notify\r\n% Peter Mucha (mucha@unc.edu).\r\n%\r\n% The output Q provides the sum over the appropriate elements of B\r\n% without any rescaling. As such, we have rescaled Q in the example\r\n% above by 2m = sum(k) so that Q <= 1.\r\n%\r\n% The '~' for ignoring function returns (used for \"max\" below) are not\r\n% supported prior to R2009b. Replace (e.g. 'dummy') for pre-2009b.\r\n%\r\n% By using this code, the user implicitly acknowledges that the authors\r\n% accept no liability associated with that use. (What are you doing\r\n% with it anyway that might cause there to be a potential liability?!?)\r\n%\r\n% References:\r\n% Blondel, Vincent D., Jean-Loup Guillaume, Renaud Lambiotte, and\r\n% Etienne Lefebvre, \"Fast unfolding of communities in large networks,\"\r\n% Journal of Statistical Mechanics: Theory and Experiment, P10008\r\n% (2008).\r\n%\r\n% Fortunato, Santo, \"Community detection in graphs,\" Physics Reports\r\n% 486, 75-174 (2010).\r\n%\r\n% Mucha, Peter J., Thomas Richardson, Kevin Macon, Mason A. Porter, and\r\n% Jukka-Pekka Onnela. \"Community Structure in Time-Dependent,\r\n% Multiscale, and Multiplex Networks,\" Science 328, 876-878 (2010).\r\n%\r\n% Porter, M. A., J. P. Onnela, and P. J. Mucha, \"Communities in\r\n% networks,\" Notices of the American Mathematical Society 56, 1082-1097\r\n% & 1164-1166 (2009).\r\n%\r\n% Acknowledgments:\r\n% A special thank you to Stephen Reid, whose greedy.m code was the\r\n% original version that has over time developed into the present code, \r\n% and Marya Bazzi for noticing the problematic behavior of genlouvain for\r\n% ordinal interslice coupling and contributing code that developed into the \r\n% 'randmove' option.\r\n% Thank you also to Dani Bassett, Jesse Blocher, Mason Porter and Simi\r\n% Wang for inspiring improvements to the code.\r\n%\r\n% Citation: If you use this code, please cite as\r\n% Inderjit S. Jutla, Lucas G. S. Jeub, and Peter J. Mucha,\r\n% \"A generalized Louvain method for community detection implemented\r\n% in MATLAB,\" http://netwiki.amath.unc.edu/GenLouvain (2011-2014).\r\n\r\n%set default for maximum size of modularity matrix\r\nif nargin<2||isempty(limit)\r\n limit = 10000;\r\nend\r\n\r\n%set level of reported/displayed text output\r\nif nargin<3||isempty(verbose)\r\n verbose = 1;\r\nend\r\nif verbose\r\n mydisp = @(s) disp(s);\r\nelse\r\n mydisp = @(s) disp('');\r\nend\r\n\r\n%set randperm- v. index-ordered\r\nif nargin<4||isempty(randord)\r\n randord = 1;\r\nend\r\nif randord\r\n myord = @(n) randperm(n);\r\nelse\r\n myord = @(n) 1:n;\r\nend\r\n\r\n%set move function (maximal (original Louvain) or random improvement)\r\nif nargin<5||isempty(randmove)\r\n randmove=false;\r\nend\r\nif randmove\r\n movefunction='moverand';\r\nelse\r\n movefunction='move';\r\nend\r\n\r\n%initialise variables and do symmetry check\r\nif isa(B,'function_handle')\r\n n=length(B(1));\r\n S=(1:n)';\r\n M=B;\r\n it(:,1)=M(1);\r\n ii=find(it(2:end)>0,3)+1;\r\n ii=[1,ii'];\r\n for i=2:length(ii),\r\n it(:,i)=M(ii(i));\r\n end\r\n it=it(ii,:);\r\n if norm(it-it')>2*eps\r\n error('Function handle does not correspond to a symmetric matrix')\r\n end\r\nelse\r\n n = length(B);\r\n S = (1:n)';\r\n \r\n if nnz(B-B'),\r\n B=(B+B')/2;\r\n disp('WARNING: Forced symmetric B matrix')\r\n end\r\n M=B;\r\nend\r\n\r\ndtot=0; %keeps track of total change in modularity\r\n\r\n%Run using function handle, if provided\r\nwhile (isa(M,'function_handle')) %loop around each \"pass\" (in language of Blondel et al) with B function handle\r\n \r\n y = unique(S); %unique also puts elements in ascending order\r\n \r\n Sb=S; \r\n \r\n \r\n clocktime=clock;\r\n mydisp(['Merging ',num2str(length(y)),' communities ',num2str(clocktime(4:6))]);\r\n \r\n dstep=1;\t%keeps track of change in modularity in pass\r\n yb=[];\r\n while (~isequal(yb,y))&&(dstep/dtot>2*eps) %This is the loop around Blondel et al's \"first phase\"\r\n yb = y;\r\n dstep=0;\r\n group_handler('assign',y);\r\n for i=myord(length(M(1)))\r\n di=group_handler(movefunction,i,M(i));\r\n dstep=dstep+di;\r\n end\r\n \r\n dtot=dtot+dstep;\r\n y=group_handler('return');\r\n mydisp([num2str(length(unique(y))),' change: ',num2str(dstep),...\r\n ' total: ',num2str(dtot),' relative: ',num2str(dstep/dtot)]);\r\n \r\n end\r\n \r\n %group_handler implements tidyconfig\r\n for i=1:length(y)\r\n S(S==i)=y(i);\r\n end\r\n \r\n %calculate modularity and return if converged\r\n if isequal(Sb,S)\r\n Q=0;\r\n P=sparse(y,1:length(y),1);\r\n for i=1:length(M(1))\r\n Q=Q+(P*M(i))'*P(:,i);\r\n end\r\n return\r\n end\r\n \r\n %check wether #groups < limit \r\n t = length(unique(S));\r\n if (t>limit)\r\n metanetwork_reduce('assign',S); %inputs group information to metanetwork_reduce\r\n M=@(i) metanetwork_i(B,t,i); %use function handle if #groups>limit \r\n else\r\n metanetwork_reduce('assign',S);\r\n J = zeros(t); %convert to matrix if #groups small enough\r\n for c=1:t\r\n J(:,c)=metanetwork_i(B,t,c);\r\n end\r\n B = J;\r\n M=B;\r\n end\r\n \r\nend\r\n \r\n\r\nS2 = (1:length(B))';\r\nSb = [];\r\nwhile ~isequal(Sb,S2) %loop around each \"pass\" (in language of Blondel et al) with B matrix\r\n \r\n y = unique(S2); %unique also puts elements in ascending order\r\n Sb = S2;\r\n\r\n clocktime=clock;\r\n mydisp(['Merging ',num2str(length(y)),' communities ',num2str(clocktime(4:6))]);\r\n\r\n yb = [];\r\n \r\n dstep=1;\r\n \r\n while (~isequal(yb,y)) && (dstep/dtot>2*eps) %This is the loop around Blondel et al's \"first phase\"\r\n \r\n % mydisp([num2str(length(unique(y))),' ',num2str(Q)])\r\n yb = y;\r\n dstep=0;\r\n group_handler('assign',y);\r\n for i = myord(length(M))\r\n di=group_handler(movefunction,i,M(:,i));\r\n dstep=dstep+di;\r\n end\r\n dtot=dtot+dstep;\r\n y=group_handler('return');\r\n end\r\n \r\n for i = 1:length(y)\r\n S(S==i) = y(i);\r\n S2(S2==i) = y(i);\r\n end\r\n \r\n if isequal(Sb,S2)\r\n \tP=sparse(y,1:length(y),1);\r\n \tQ=sum(sum((P*M).*P));\r\n \treturn\r\n end\r\n \r\n M = metanetwork(B,S2); \r\nend\r\n\r\n%-----%\r\nfunction M = metanetwork(J,S)\r\n%Computes new aggregated network (communities --> nodes)\r\n PP = sparse(1:length(S),S,1);\r\n M = PP'*J*PP;\r\n M=full(M);\r\n\r\n\r\n%-----%\r\nfunction Mi = metanetwork_i(J,t,i) \r\n%ith column of metanetwork (used to create function handle)\r\n%J is a function handle\r\n Mi=zeros(t,1);\r\n ind=metanetwork_reduce('nodes',i);\r\n for j=ind\r\n Jj=J(j);\r\n P=metanetwork_reduce('reduce',Jj);\r\n Mi=Mi+P;\r\n end\r\n \r\n\r\n\r\n \r\n \r\n \r\n\r\n"} +{"plateform": "github", "repo_name": "shainova/EMLN-master", "name": "single_layer_bipartite_B_matrix.m", "ext": ".m", "path": "EMLN-master/NEE2017/Modularity/single_layer_bipartite_B_matrix.m", "size": 554, "source_encoding": "utf_8", "md5": "ce18365cfc84fce465315d920e40b606", "text": "% NOTE!!! This file accompanies the following publication and can\n% only be understood by reading the details in the manuscript and its\n% SI. Please cite the original publication if using this code.\n% \n% Pilosof S, Porter MA, Pascual M, Kefi S.\n% The multilayer nature of ecological networks.\n% Nature Ecology & Evolution (2017).\n\nfunction [B,mm]=single_layer_bipartite_B_matrix(A,gamma) \n [m,n]=size(A);\n N=m+n;\n k=sum(A,2);\n d=sum(A,1);\n mm=sum(k);\n B1=A-gamma*k*d/mm;\n B=zeros(N,N);\n B(1:m,m+1:N)=B1;\n B(m+1:N,1:m)=B1';\nend\n"} +{"plateform": "github", "repo_name": "shainova/EMLN-master", "name": "muxOctaveLib.m", "ext": ".m", "path": "EMLN-master/NEE2017/Reducibility/muxOctaveLib.m", "size": 58399, "source_encoding": "utf_8", "md5": "410cc4713aa4cfb0d67441e66f48983c", "text": "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n% MuxNetLib: Octave library for Multiplex Network Analysis in muxViz\n%\n% Version: 0.1\n% Last update: Nov 2015\n% Authors: Manlio De Domenico\n%\n% History:\n%\n% May 2014: First release, including part of muxNet\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n1;\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [A,N] = loadNetworkFromFile(fileName,Flags,Nodes=0,FirstNode=0)\n A = load(fileName);\n \n %if the first node is numbered by zero shift of 1 unit to relabel\n if FirstNode==0\n A(:,1) = A(:,1) + 1; \n A(:,2) = A(:,2) + 1; \n endif\n \n if Nodes==0\n N = max(max(A(:,1)),max(A(:,2)));\n else\n N = Nodes;\n endif\n \n if max(ismember(Flags, 'W'))==0\n %input is assumed to be an unweighted edge list\n %add a column of ones as weight for connected nodes\n %if there are more columns, we have to remove them\n \n if columns(A)>2\n A = A(:,1:2);\n endif\n\n A = [A 1.0*ones(size(A,1),1)];\n else\n if columns(A)>3\n A = A(:,1:3);\n endif\n endif\n\n A = spconvert(A);\n\n A(size(A,1)+1:N,size(A,2)+1:N) = 0;\n\n if ismember(\"D\",Flags) && ismember(\"U\",Flags)\n %input is undirected but provided in directed shape, we need to sum the transpose\n fprintf(2,\"#Input is undirected but in directed shape\\n\");\n A = A + A' - diag(diag(A));\n #the -diag() is to avoid counting twice the self-loops\n endif\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [NodesTensor,Layers,Nodes] = BuildMultiplexFromFileList(LayersList,Flags,Nodes=0,FirstNode=0) \n % for input from different folders and put the full path in the LayerList\n % Flags must be a cell array defining the network type: D(irected), W(eighted), U(ndirected with)D(irected input)\n \n NodesTensor = {};\n Ni = {};\n\n Layers = length(LayersList);\n for i = 1:Layers\n if strcmp(LayersList{i},\"NULL\")\n if Nodes == 0\n fprintf(2,\"\\tBuildMultiplexFromFileList: ERROR! You requested a null layer, without specifying the number of nodes. Aborting process.\\n\");\n exit\n end\n \n Ni{i} = Nodes;\n NodesTensor{i} = sparse(Nodes,Nodes);\n\n Ei = 0;\n file = \"Null layer\";\n printf(\"#Layer %d: %d Nodes %d Edges (file: %s)\\n\",i,Ni{i},Ei,file);\n else\n [NodesTensor{i},Ni{i}] = loadNetworkFromFile(LayersList{i},Flags,Nodes,FirstNode);\n\n path = cellstr(strsplit (LayersList{i}, \"/\"));\n file = path{length(path)};\n Ei = sum(sum(NodesTensor{i}>0));\n printf(\"#Layer %d: %d Nodes %d Edges (file: %s)\\n\",i,Ni{i},Ei,file);\n end\n \n \n %check that the number of nodes in each layer is the same\n if i>1\n if Ni{i} != Ni{i-1}\n error(\" BuildMultiplexFromFileList: ERROR! The number of nodes mismatches: %d (layer %d) vs %d (layer %d)\\n\",Ni{i},i,Ni{i-1},i-1);\n %pause;\n end\n end\n end \n\n printf(\"#Flags: %s\\n\",Flags);\n\n % if everything is fine, we assign the number of nodes\n Nodes = Ni{1};\n\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [M,L,N] = BuildSupraAdjacencyMatrixFromFile(fileName,Flags,Nodes=0,FirstNode=0)\n %auto-detection of number of layers and nodes (if not forced with Nodes parameter)\n %input is expected to be a weighted edge list with 5 columns:\n %node layer node layer [weight]\n %if flag W is not specified, weight = 1 is assumed\n \n A = load(fileName);\n \n %if the first node is numbered by zero shift of 1 unit to relabel\n if FirstNode==0\n A(:,1) = A(:,1) + 1; \n A(:,3) = A(:,3) + 1; \n endif\n\n %number of layers, assuming they are numbered starting from 1\n L = max(max(A(:,2)),max(A(:,4)));\n\n if Nodes==0\n N = max(max(A(:,1)),max(A(:,3)));\n else\n N = Nodes;\n endif\n \n if max(ismember(Flags, 'W'))==0\n if size(A)(2) == 5\n %input is weighted, but the W flag is missing: reset weights to 1\n A(:,5) = 1;\n else\n %input is assumed to be an unweighted edge list\n %add a column of ones as weight for connected nodes\n A = [A 1.0*ones(size(A,1),1)];\n endif\n endif\n \n M = [A(:,1) + (A(:,2)-1)*N A(:,3) + (A(:,4)-1)*N A(:,5)];\n\n M = spconvert(M);\n %M = full(M)\n %we work with sparse matrices\n \n M(size(M,1)+1:N*L,size(M,2)+1:N*L) = 0;\n\n if ismember(\"D\",Flags) && ismember(\"U\",Flags)\n %input is undirected but provided in directed shape, we need to sum the transpose\n fprintf(2,\"#Input is undirected but in directed shape\\n\");\n M = M + M' - diag(diag(M));\n #the -diag() is to avoid counting twice the self-loops\n endif\n\n printf(\"#%d Layers %d Nodes %d Edges (file: %s)\\n\",L,N,sum(sum(M>0)),fileName);\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nfunction LayersTensor = BuildLayersTensor(Layers,Nodes,OmegaParameter,MultisliceType)\n %Build the network of layers used to build the multiplex\n if Layers>1\n if strcmp(MultisliceType,\"ordered\")\n LayersTensor = (diag(ones(1,Layers-1),1) + diag(ones(1,Layers-1),-1))*OmegaParameter;\n end\n if strcmp(MultisliceType, \"categorical\")\n LayersTensor = ones(Layers,Layers)*OmegaParameter;\n LayersTensor = LayersTensor - diag(diag(LayersTensor));\n end\n else\n LayersTensor = 0;\n fprintf(2,\"--> I will proceed with algorithm for one layer\\n\");\n endif\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction SupraAdjacencyMatrix = BuildSupraAdjacencyMatrix(NodesTensor,LayersTensor,Layers,Nodes)\n Identity = speye(Nodes);\n \n %simple and easy way, probably the correct one\n SupraAdjacencyMatrix = sparse(blkdiag(NodesTensor{}) + kron(LayersTensor,Identity));\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [QMatrix,LMatrix,Eigenvalues] = SolveEigenvalueProblem(Matrix)\n [QMatrix,LMatrix] = eig(Matrix);\n Eigenvalues=sort(diag(LMatrix));\nendfunction\n\nfunction [QMatrix,LMatrix] = GetLargestEigenv(Matrix)\n %This flag force the library to use methods to find approximated leading eigenvalue/vector\n %However, empirical evidence shows that such methods (coming with octave) are not so\n %stable, therefore if the final output looks \"weird\", a full exact method for the calculation\n %should be used. Unfortunately, the exact method will heavily slow down the computation\n %Use with care at your own risk\n UseFastMethodLargestEigenvalue = 0;\n\n %we must distinguish between symmetric and nonsymmetric matrices to have correct results\n\n if !UseFastMethodLargestEigenvalue\n [QMatrix,LMatrix] = eig(Matrix);\n [LambdaVector,IdxVector]=sort(diag(LMatrix));\n Idx = length(LambdaVector);\n LeadingEigenvalue = LambdaVector(Idx);\n LeadingEigenvector = QMatrix(:,IdxVector(Idx));\n \n QMatrix = LeadingEigenvector;\n LMatrix = LeadingEigenvalue;\n else\n if all(all(Matrix == Matrix'))\n %symmetric\n [QMatrix,LMatrix] = eigs(Matrix,1,'la');\n else\n %asymmetric\n [QMatrix,LMatrix] = eigs(Matrix,1,'lr');\n endif\n \n %check if the eigenvector has all negative components.. in that case we change the sign\n %first, set to zero everything that is so small that can create problems even if it compatible with zero\n QMatrix(find(QMatrix>-1e-12 & QMatrix<1e-12)) = 0;\n %now verify that all components are negative and change sign\n if all(floor(QMatrix<0) + floor(QMatrix==0))\n QMatrix = -QMatrix;\n endif\n\n endif\n\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nfunction [NodesTensor] = SupraAdjacencyToNodesTensor(SupraAdjacencyMatrix,Layers,Nodes)\n % create the nodes tensor from a supradajcency matrix, ie, extracts diagonal blocks\n \n NodesTensor = {};\n\n for i = 1:Layers\n NodesTensor{i} = sparse(SupraAdjacencyMatrix(1+ (i-1)*Nodes:i*Nodes,1+ (i-1)*Nodes:i*Nodes));\n end\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [BlockTensor] = SupraAdjacencyToBlockTensor(SupraAdjacencyMatrix,Layers,Nodes)\n % create the nodes tensor from a supradajcency matrix, ie, extracts all blocks\n \n BlockTensor = {};\n\n for i = 1:Layers\n for j = 1:Layers\n %BlockTensor{(i-1)*Layers + j} = SupraAdjacencyMatrix(1+ (i-1)*Nodes:i*Nodes,1+ (j-1)*Nodes:j*Nodes);\n BlockTensor{i,j} = sparse(SupraAdjacencyMatrix(1+ (i-1)*Nodes:i*Nodes,1+ (j-1)*Nodes:j*Nodes));\n endfor\n endfor\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction Aggregate = GetAggregateMatrix(NodesTensor,Layers,Nodes) \n Aggregate = NodesTensor{1};\n \n for alpha = 2:Layers\n Aggregate += NodesTensor{alpha};\n endfor\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%% Reducibility of Multilayer Networks \n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction LaplacianMatrix = GetLaplacianMatrix(AdjacencyMatrix)\n %Calculate the laplacian matrix from an adjacency matrix\n \n N = length(AdjacencyMatrix);\n u = ones(N,1);\n\n %laplacian\n LaplacianMatrix = diag(AdjacencyMatrix*u) - AdjacencyMatrix;\n \n %check\n if sum(LaplacianMatrix*u) > 1.e-8\n error(\"ERROR! The Laplacian matrix has rows that don't sum to 0. Aborting process.\\n\");\n sum(LaplacianMatrix*u)\n %pause;\n endif\nendfunction\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction DensityMatrix = BuildDensityMatrix(AdjacencyMatrix)\n %Calculate the density matrix from an adjacency matrix\n % References: \n % S. L. Braunstein, S. Ghosh, S. Severini, Annals of Combinatorics 10, No 3, (2006)\n % M. De Domenico et al, Phys. Rev. X 3, 041022 (2013)\n \n DensityMatrix = GetLaplacianMatrix(AdjacencyMatrix);\n\n %normalize to degree sum\n DensityMatrix = DensityMatrix/(trace(DensityMatrix));\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction Eigenvalues = GetEigenvaluesOfDensityMatrix(DensityMatrix)\n %Calculate the eigenvalues of a density matrix\n % References: \n % M. De Domenico et al, Phys. Rev. X 3, 041022 (2013)\n\n Eigenvalues = eig(DensityMatrix);\n\n %check that eigenvalues sum to 1\n if abs(sum(Eigenvalues)-1)>1e-8\n error(\"ERROR! Eigenvalues dont sum to 1! Aborting process.\");\n endif\nendfunction \n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction Eigenvalues = GetEigenvaluesOfDensityMatrixFromAdjacencyMatrix(AdjacencyMatrix)\n DensityMatrix = BuildDensityMatrix(AdjacencyMatrix);\n Eigenvalues = GetEigenvaluesOfDensityMatrix(DensityMatrix);\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [RenyiEntropy] = GetRenyiEntropyFromAdjacencyMatrix(AdjacencyMatrix, q)\n %Calculate the quantum Renyi entropy of a network\n % References: \n % M. De Domenico et al, Phys. Rev. X 3, 041022 (2013)\n % M. De Domenico, V. Nicosia, A. Arenas, V. Latora, Nature Communications 6, 6864 (2015)\n\n Eigenvalues = GetEigenvaluesOfDensityMatrixFromAdjacencyMatrix(AdjacencyMatrix);\n\n if q==1.\n %Von Neuman quantum entropy\n RenyiEntropy = -sum(Eigenvalues(Eigenvalues>0).*log(Eigenvalues(Eigenvalues>0)));\n else\n %Renyi quantum entropy\n RenyiEntropy = (1 - sum(Eigenvalues(Eigenvalues>0).^q))/(q-1);\n endif\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [JSD] = GetJensenShannonDivergence(AdjacencyMatrix1,AdjacencyMatrix2,VNEntropy1,VNEntropy2)\n %Calculate the Jensen-Shannon Divergence of two networks\n % References: \n % M. De Domenico, V. Nicosia, A. Arenas, V. Latora, Nature Communications 6, 6864 (2015)\n\n\n %M = 0.5 * (RHO + SIGMA)\n %JSD: 0.5 * DKL( RHO || M ) + 0.5 * DKL( SIGMA || M )\n %DKL( A || B ) = tr[ A log A - A log B ] = -entropy(A) - tr[ A log B ]\n %\n %JSD: 0.5 * ( -entropy(RHO) - entropy(SIGMA) - tr[ RHO log M ] - tr[ SIGMA log M ] )\n % -0.5 * [ entropy(RHO) + entropy(SIGMA) ] - tr[ M log M ] )\n % -0.5 * [ entropy(RHO) + entropy(SIGMA) ] + entropy(M) \n\n DensityMatrix1 = BuildDensityMatrix(AdjacencyMatrix1);\n DensityMatrix2 = BuildDensityMatrix(AdjacencyMatrix2);\n DensityMatrixM = (DensityMatrix1 +DensityMatrix2)/2.;\n \n EigenvaluesM = eig(DensityMatrixM);\n CrossEntropyM = -sum(EigenvaluesM(EigenvaluesM>0).*log(EigenvaluesM(EigenvaluesM>0)));\n \n JSD = CrossEntropyM - 0.5*(VNEntropy1 + VNEntropy2);\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%% Topological Descriptors of Multilayer Networks \n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nfunction [AvGlobOverl] = GetAverageGlobalOverlapping(SupraAdjacencyMatrix,Layers,Nodes)\n % References: \n % M. De Domenico, V. Nicosia, A. Arenas, V. Latora, Nature Communications 6, 6864 (2015)\n\n if Layers==1\n fprintf(2,\"GetAverageGlobalOverlapping:ERROR! At least two layers required. Aborting process.\\n\");\n exit;\n endif\n\n NodesTensor = SupraAdjacencyToNodesTensor(SupraAdjacencyMatrix,Layers,Nodes);\n\n O = min(NodesTensor{1},NodesTensor{2});\n NormTotal = sum(sum(NodesTensor{1}));\n \n %assuming that LayerTensor is an undirected clique\n for l = 2:Layers\n O = min(O,NodesTensor{l});\n NormTotal = NormTotal + sum(sum(NodesTensor{l}));\n end\n\n AvGlobOverl = Layers*sum(sum(O))/NormTotal;\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nfunction [AvGlobOverlMatrix] = GetAverageGlobalNodeOverlappingMatrix(SupraAdjacencyMatrix,Layers,Nodes)\n % References: \n % M. De Domenico, V. Nicosia, A. Arenas, V. Latora, Nature Communications 6, 6864 (2015)\n\n if Layers==1\n fprintf(2,\"GetAverageGlobalNodeOverlappingMatrix:ERROR! At least two layers required. Aborting process.\\n\");\n exit;\n endif\n\n NodesTensor = SupraAdjacencyToNodesTensor(SupraAdjacencyMatrix,Layers,Nodes);\n\n existingNodes = {};\n \n for l = 1:Layers\n #find rows where sum by column is > zero to identify existing nodes. Apply modulus Nodes\n col = mod(find(sum(NodesTensor{l},2)!=0 ),Nodes);\n #Impose that where modulus give 0, there should be the largest ID (= Nodes)\n col(col==0) = Nodes;\n #same with cols\n row = mod(find(sum(NodesTensor{l},1)!=0 ),Nodes)';\n row(row==0) = Nodes;\n \n #merge the two (this approach is necessary to deal also with directed networks)\n existingNodes{l} = union(col, row);\n end\n\n \n for l1 = 1:Layers\n AvGlobOverlMatrix(l1,l1) = 1;\n for l2 = (l1+1):Layers\n AvGlobOverlMatrix(l1,l2) = length(intersect( existingNodes{l1}, existingNodes{l2} ))/Nodes;\n AvGlobOverlMatrix(l2,l1) = AvGlobOverlMatrix(l1,l2);\n end\n end\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nfunction [AvGlobOverlMatrix] = GetAverageGlobalOverlappingMatrix(SupraAdjacencyMatrix,Layers,Nodes)\n % References: \n % M. De Domenico, V. Nicosia, A. Arenas, V. Latora, Nature Communications 6, 6864 (2015)\n\n if Layers==1\n fprintf(2,\"GetAverageGlobalOverlappingMatrix:ERROR! At least two layers required. Aborting process.\\n\");\n exit;\n endif\n\n NodesTensor = SupraAdjacencyToNodesTensor(SupraAdjacencyMatrix,Layers,Nodes);\n\n for l1 = 1:Layers\n AvGlobOverlMatrix(l1,l1) = 1;\n Norm1 = sum(sum(NodesTensor{l1}));\n for l2 = (l1+1):Layers\n O = min(NodesTensor{l1},NodesTensor{l2});\n AvGlobOverlMatrix(l1,l2) = 2*sum(sum(O))/(Norm1 + sum(sum(NodesTensor{l2})));\n AvGlobOverlMatrix(l2,l1) = AvGlobOverlMatrix(l1,l2);\n end\n end\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [InterPearson,InterSpearman] = GetInterAssortativityTensor(SupraAdjacencyMatrix,Layers,Nodes,Flags,Type)\n\n if Layers==1\n fprintf(2,\"GetInterAssortativityTensor: ERROR! At least two layers required. Aborting process.\\n\");\n exit;\n endif\n\n NodesTensor = SupraAdjacencyToNodesTensor(SupraAdjacencyMatrix,Layers,Nodes);\n\n InterPearson = sparse(Layers,Layers);\n InterSpearman = sparse(Layers,Layers);\n\n if strcmp(Type,\"IO\") || strcmp(Type,\"OI\")\n InDegree = {};\n OutDegree = {};\n\n for l = 1:Layers\n InDegree{l} = GetMultiInDegree(NodesTensor{l},1,Nodes,Flags);\n OutDegree{l} = GetMultiOutDegree(NodesTensor{l},1,Nodes,Flags);\n end\n\n for l1 = 1:Layers\n for l2 = 1:Layers\n InterPearson(l1,l2) = corr(InDegree{l1},OutDegree{l2});\n InterSpearman(l1,l2) = spearman(InDegree{l1},OutDegree{l2});\n end\n end\n\n if strcmp(Type,\"OI\")\n InterPearson = InterPearson';\n InterSpearman = InterSpearman';\n endif\n else\n Degree = {};\n \n if strcmp(Type,\"OO\")\n for l = 1:Layers\n Degree{l} = GetMultiOutDegree(NodesTensor{l},1,Nodes,Flags);\n end\n endif\n \n if strcmp(Type,\"II\")\n for l = 1:Layers\n Degree{l} = GetMultiInDegree(NodesTensor{l},1,Nodes,Flags);\n end\n endif\n \n if strcmp(Type,\"TT\")\n for l = 1:Layers\n Degree{l} = GetMultiDegree(NodesTensor{l},1,Nodes,Flags);\n end\n endif\n \n \n for l1 = 1:Layers\n InterPearson(l1,l1) = 1;\n InterSpearman(l1,l1) = 1;\n for l2 = (l1+1):Layers\n InterPearson(l1,l2) = corr(Degree{l1},Degree{l2});\n InterSpearman(l1,l2) = spearman(Degree{l1},Degree{l2});\n \n InterPearson(l2,l1) = InterPearson(l1,l2);\n InterSpearman(l2,l1) = InterSpearman(l1,l2);\n end\n end\n endif\n\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction BinaryMatrix = binarizeMatrix(Matrix)\n BinaryMatrix = double(Matrix|Matrix);\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction MultiDegreeVector = GetMultiDegree(SupraAdjacencyMatrix,Layers,Nodes,Flags)\n % References: \n % M. De Domenico et al, Phys. Rev. X 3, 041022 (2013)\n \n MultiInDegreeVector = GetMultiInDegree(binarizeMatrix(SupraAdjacencyMatrix),Layers,Nodes,Flags);\n MultiOutDegreeVector = GetMultiOutDegree(binarizeMatrix(SupraAdjacencyMatrix),Layers,Nodes,Flags);\n \n if ismember(\"U\",Flags)\n MultiDegreeVector = (MultiInDegreeVector + MultiOutDegreeVector)/2;\n else\n MultiDegreeVector = MultiInDegreeVector + MultiOutDegreeVector;\n endif\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction MultiOutDegreeVector = GetMultiOutDegree(SupraAdjacencyMatrix,Layers,Nodes,Flags) \n % References: \n % M. De Domenico et al, Phys. Rev. X 3, 041022 (2013)\n\n \n if ismember(\"U\",Flags) \n %we proceed by considering the interlayers separately\n BlockTensor = SupraAdjacencyToBlockTensor(binarizeMatrix(SupraAdjacencyMatrix),Layers,Nodes);\n \n MultiOutDegreeVector = sparse(Nodes,1);\n\n %with the matrix U we reweight interlinks corresponding to same replicas\n U = ones(Nodes,Nodes);\n U(logical(speye(size(U)))) = 1/2;\n\n for i = 1:Layers\n for j = 1:Layers\n if i==j\n MultiOutDegreeVector += (BlockTensor{i,j}-diag(diag(BlockTensor{i,j})))*ones(Nodes,1) + diag(BlockTensor{i,j})*0.5;\n else\n MultiOutDegreeVector += (BlockTensor{i,j} .* U)*ones(Nodes,1);\n endif\n endfor\n endfor\n else\n SupraDegree = binarizeMatrix(SupraAdjacencyMatrix)*ones(Nodes*Layers,1); \n\n MultiOutDegreeVector = sum(reshape(SupraDegree,Nodes,Layers),2);\n endif\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction MultiInDegreeVector = GetMultiInDegree(SupraAdjacencyMatrix,Layers,Nodes,Flags)\n % References: \n % M. De Domenico et al, Phys. Rev. X 3, 041022 (2013)\n\n \n if ismember(\"U\",Flags)\n %using the following would consider multiple times the interlinks\n %SupraDegree = (binarizeMatrix(SupraAdjacencyMatrix)*ones(Nodes*Layers,1) + (ones(1,Nodes*Layers)*binarizeMatrix(SupraAdjacencyMatrix))')/2;\n \n %we proceed by considering the interlayers separately\n BlockTensor = SupraAdjacencyToBlockTensor(binarizeMatrix(SupraAdjacencyMatrix),Layers,Nodes);\n \n MultiInDegreeVector = sparse(Nodes,1);\n\n %with the matrix U we reweight interlinks corresponding to same replicas\n U = ones(Nodes,Nodes);\n U(logical(speye(size(U)))) = 1/2;\n\n for i = 1:Layers\n for j = 1:Layers\n if i==j\n MultiInDegreeVector += (ones(1,Nodes)*(BlockTensor{i,j}-diag(diag(BlockTensor{i,j}))))' + diag(BlockTensor{i,j})*0.5;\n else\n MultiInDegreeVector += (ones(1,Nodes)*(BlockTensor{i,j} .* U))';\n endif\n endfor\n endfor\n else\n SupraDegree = (ones(1,Nodes*Layers)*binarizeMatrix(SupraAdjacencyMatrix))';\n\n MultiInDegreeVector = sum(reshape(SupraDegree,Nodes,Layers),2);\n endif\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction MultiDegreeVector = GetMultiDegreeSum(SupraAdjacencyMatrix,Layers,Nodes,Flags)\n % References: \n % M. De Domenico et al, Phys. Rev. X 3, 041022 (2013)\n\n MultiInDegreeVector = GetMultiInDegreeSum(binarizeMatrix(SupraAdjacencyMatrix),Layers,Nodes,Flags);\n MultiOutDegreeVector = GetMultiOutDegreeSum(binarizeMatrix(SupraAdjacencyMatrix),Layers,Nodes,Flags);\n \n if ismember(\"U\",Flags)\n MultiDegreeVector = (MultiInDegreeVector + MultiOutDegreeVector)/2;\n else\n MultiDegreeVector = MultiInDegreeVector + MultiOutDegreeVector;\n endif\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction MultiOutDegreeVector = GetMultiOutDegreeSum(SupraAdjacencyMatrix,Layers,Nodes,Flags) \n %this degree include multiple times the interlinks\n % References: \n % M. De Domenico et al, Phys. Rev. X 3, 041022 (2013)\n\n\n SupraDegree = binarizeMatrix(SupraAdjacencyMatrix)*ones(Nodes*Layers,1); \n MultiOutDegreeVector = sum(reshape(SupraDegree,Nodes,Layers),2);\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction MultiInDegreeVector = GetMultiInDegreeSum(SupraAdjacencyMatrix,Layers,Nodes,Flags)\n %this degree include multiple times the interlinks\n % References: \n % M. De Domenico et al, Phys. Rev. X 3, 041022 (2013)\n\n \n SupraDegree = (ones(1,Nodes*Layers)*binarizeMatrix(SupraAdjacencyMatrix))'; \n MultiInDegreeVector = sum(reshape(SupraDegree,Nodes,Layers),2);\nendfunction\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction MultiStrengthVector = GetMultiStrength(SupraAdjacencyMatrix,Layers,Nodes,Flags)\n % References: \n % M. De Domenico et al, Phys. Rev. X 3, 041022 (2013)\n \n MultiInStrengthVector = GetMultiInStrength(SupraAdjacencyMatrix,Layers,Nodes,Flags);\n MultiOutStrengthVector = GetMultiOutStrength(SupraAdjacencyMatrix,Layers,Nodes,Flags);\n \n if ismember(\"U\",Flags)\n MultiStrengthVector = (MultiInStrengthVector + MultiOutStrengthVector)/2;\n else\n MultiStrengthVector = MultiInStrengthVector + MultiOutStrengthVector;\n endif\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction MultiOutStrengthVector = GetMultiOutStrength(SupraAdjacencyMatrix,Layers,Nodes,Flags) \n % References: \n % M. De Domenico et al, Phys. Rev. X 3, 041022 (2013)\n\n \n if ismember(\"U\",Flags) \n %we proceed by considering the interlayers separately\n BlockTensor = SupraAdjacencyToBlockTensor(SupraAdjacencyMatrix,Layers,Nodes);\n \n MultiOutStrengthVector = sparse(Nodes,1);\n\n %with the matrix U we reweight interlinks corresponding to same replicas\n U = ones(Nodes,Nodes);\n U(logical(speye(size(U)))) = 1/2;\n\n for i = 1:Layers\n for j = 1:Layers\n if i==j\n MultiOutStrengthVector += (BlockTensor{i,j}-diag(diag(BlockTensor{i,j})))*ones(Nodes,1) + diag(BlockTensor{i,j})*0.5;\n else\n MultiOutStrengthVector += (BlockTensor{i,j} .* U)*ones(Nodes,1);\n endif\n endfor\n endfor\n else\n SupraStrength = SupraAdjacencyMatrix*ones(Nodes*Layers,1); \n\n MultiOutStrengthVector = sum(reshape(SupraStrength,Nodes,Layers),2);\n endif\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction MultiInStrengthVector = GetMultiInStrength(SupraAdjacencyMatrix,Layers,Nodes,Flags)\n % References: \n % M. De Domenico et al, Phys. Rev. X 3, 041022 (2013)\n\n \n if ismember(\"U\",Flags)\n %using the following would consider multiple times the interlinks\n %SupraStrength = (SupraAdjacencyMatrix*ones(Nodes*Layers,1) + (ones(1,Nodes*Layers)*SupraAdjacencyMatrix)')/2;\n \n %we proceed by considering the interlayers separately\n BlockTensor = SupraAdjacencyToBlockTensor(SupraAdjacencyMatrix,Layers,Nodes);\n \n MultiInStrengthVector = sparse(Nodes,1);\n\n %with the matrix U we reweight interlinks corresponding to same replicas\n U = ones(Nodes,Nodes);\n U(logical(speye(size(U)))) = 1/2;\n\n for i = 1:Layers\n for j = 1:Layers\n if i==j\n MultiInStrengthVector += (ones(1,Nodes)*(BlockTensor{i,j}-diag(diag(BlockTensor{i,j}))))' + diag(BlockTensor{i,j})*0.5;\n else\n MultiInStrengthVector += (ones(1,Nodes)*(BlockTensor{i,j} .* U))';\n endif\n endfor\n endfor\n else\n SupraStrength = (ones(1,Nodes*Layers)*SupraAdjacencyMatrix)';\n\n MultiInStrengthVector = sum(reshape(SupraStrength,Nodes,Layers),2);\n endif\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction MultiStrengthVector = GetMultiStrengthSum(SupraAdjacencyMatrix,Layers,Nodes,Flags)\n % References: \n % M. De Domenico et al, Phys. Rev. X 3, 041022 (2013)\n\n MultiInStrengthVector = GetMultiInStrengthSum(SupraAdjacencyMatrix,Layers,Nodes,Flags);\n MultiOutStrengthVector = GetMultiOutStrengthSum(SupraAdjacencyMatrix,Layers,Nodes,Flags);\n \n if ismember(\"U\",Flags)\n MultiStrengthVector = (MultiInStrengthVector + MultiOutStrengthVector)/2;\n else\n MultiStrengthVector = MultiInStrengthVector + MultiOutStrengthVector;\n endif\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction MultiOutStrengthVector = GetMultiOutStrengthSum(SupraAdjacencyMatrix,Layers,Nodes,Flags) \n %this Strength include multiple times the interlinks\n % References: \n % M. De Domenico et al, Phys. Rev. X 3, 041022 (2013)\n\n\n SupraStrength = SupraAdjacencyMatrix*ones(Nodes*Layers,1); \n MultiOutStrengthVector = sum(reshape(SupraStrength,Nodes,Layers),2);\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction MultiInStrengthVector = GetMultiInStrengthSum(SupraAdjacencyMatrix,Layers,Nodes,Flags)\n %this Strength include multiple times the interlinks\n % References: \n % M. De Domenico et al, Phys. Rev. X 3, 041022 (2013)\n\n \n SupraStrength = (ones(1,Nodes*Layers)*SupraAdjacencyMatrix)'; \n MultiInStrengthVector = sum(reshape(SupraStrength,Nodes,Layers),2);\nendfunction\n\n\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction CentralityVector = GetOverallKatzCentrality(SupraAdjacencyMatrix,Layers,Nodes)\n %we pass the transpose of the transition matrix to get the left eigenvectors\n % References: \n % M. De Domenico, A. Sole-Ribalta, E. Omodei, S. Gomez, A. Arenas, Nature Communications 6, 6868 (2015)\n \n [QMatrix,LMatrix] = GetLargestEigenv(SupraAdjacencyMatrix');\n LeadingEigenvalue = LMatrix;\n \n %Katz kernel tensor\n deltaTensor = kron(speye(Nodes,Nodes),speye(Layers,Layers));\n \n %this ensures convergence of the Katz kernel tensor\n a = 0.99999/abs(LeadingEigenvalue);\n\n KatzKernelTensor = inv(deltaTensor - a*SupraAdjacencyMatrix);\n\n KatzCentralitySupraVector = KatzKernelTensor * ones(Nodes*Layers,1);\n CentralityVector = sum(reshape(KatzCentralitySupraVector,Nodes,Layers),2); \n CentralityVector = CentralityVector/max(CentralityVector);\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction CentralityVector = GetOverallEigenvectorCentrality(SupraAdjacencyMatrix,Layers,Nodes)\n % References: \n % M. De Domenico et al, Phys. Rev. X 3, 041022 (2013)\n % M. De Domenico, A. Sole-Ribalta, E. Omodei, S. Gomez, A. Arenas, Nature Communications 6, 6868 (2015)\n \n %we pass the transpose of the transition matrix to get the left eigenvectors\n [LeadingEigenvector,LeadingEigenvalue] = GetLargestEigenv(SupraAdjacencyMatrix');\n \n %LeadingEigenvector = LeadingEigenvector / sum(LeadingEigenvector);\n CentralityVector = sum(reshape(LeadingEigenvector,Nodes,Layers),2);\n CentralityVector = CentralityVector/max(CentralityVector);\n\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction CentralityVector = GetOverallHubCentrality(SupraAdjacencyMatrix,Layers,Nodes)\n %see review http://arxiv.org/pdf/0805.3322v2.pdf\n % References: \n % M. De Domenico, A. Sole-Ribalta, E. Omodei, S. Gomez, A. Arenas, Nature Communications 6, 6868 (2015)\n \n %build the A A'\n SupraMatrix = SupraAdjacencyMatrix*(SupraAdjacencyMatrix');\n\n %we pass the matrix to get the right eigenvectors\n %to deal with the possible degeneracy of the leading eigenvalue, we add an eps to the matrix\n %this ensures that we can apply the Perron-Frobenius theorem to say that there is a unique\n %leading eigenvector. Here we add eps, a very very small number (<1e-8, generally)\n [LeadingEigenvector,LeadingEigenvalue] = GetLargestEigenv(SupraMatrix+eps);\n\n %LeadingEigenvector = LeadingEigenvector / sum(LeadingEigenvector);\n CentralityVector = sum(reshape(LeadingEigenvector,Nodes,Layers),2);\n CentralityVector = CentralityVector/max(CentralityVector);\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction CentralityVector = GetOverallAuthCentrality(SupraAdjacencyMatrix,Layers,Nodes)\n %see review http://arxiv.org/pdf/0805.3322v2.pdf\n % References: \n % M. De Domenico, A. Sole-Ribalta, E. Omodei, S. Gomez, A. Arenas, Nature Communications 6, 6868 (2015)\n\n\n %build the A' A\n SupraMatrix = (SupraAdjacencyMatrix') * SupraAdjacencyMatrix;\n\n %we pass the matrix to get the right eigenvectors\n %to deal with the possible degeneracy of the leading eigenvalue, we add an eps to the matrix\n %this ensures that we can apply the Perron-Frobenius theorem to say that there is a unique\n %leading eigenvector. Here we add eps, a very very small number (<1e-8, generally)\n [LeadingEigenvector,LeadingEigenvalue] = GetLargestEigenv(SupraMatrix+eps);\n\n CentralityVector = sum(reshape(LeadingEigenvector,Nodes,Layers),2);\n CentralityVector = CentralityVector/max(CentralityVector);\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction CentralityVector = GetOverallMultiplexityCentrality(SupraAdjacencyMatrix,Layers,Nodes)\n #build the block tensor\n BlockTensor = SupraAdjacencyToBlockTensor(SupraAdjacencyMatrix,Layers,Nodes);\n existingNodes = {};\n nodeMultiplexity = zeros(1,Nodes);\n \n for l = 1:Layers\n #find rows where sum by column is > zero to identify existing nodes. Apply modulus Nodes\n col = mod(find(sum(BlockTensor{l,l},2)!=0 ),Nodes);\n #Impose that where modulus give 0, there should be the largest ID (= Nodes)\n col(col==0) = Nodes;\n #same with cols\n row = mod(find(sum(BlockTensor{l,l},1)!=0 ),Nodes)';\n row(row==0) = Nodes;\n \n #merge the two (this approach is necessary to deal also with directed networks)\n existingNodes{l} = union(col, row);\n for n = 1:Nodes\n nodeMultiplexity(n) += length(find(existingNodes{l}==n));\n end\n end\n \n CentralityVector = nodeMultiplexity'/Layers;\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [S,Q] = GetMultisliceCommunityGeneralizedLouvain(NodesTensor, Layers, Nodes, GammaParameter, OmegaParameter, Type)\n % This function is an interface between the genlouvain (see the corresponding function\n % for reference and license) and muxNet (see the above functions for reference and\n % license)\n \n % Type can be: \"ordered\" or \"categorical\"\n % See http://netwiki.amath.unc.edu/GenLouvain/GenLouvain\n \n % Ordered Multislice Matrix\n % Define the cell array A of square symmetric NxN matrices of equal size each representing \n % one of the layers ordered, undirected network \"slices\". \n \n % Categorical Multislice Matrix\n % The distinction between ordered slices and categorical slices manifests in the presence \n % of all-to-all identity arcs between slices.\n \n if strcmp(Type,\"categorical\")\r\n B = spalloc(Nodes*Layers,Nodes*Layers,(Nodes+Layers)*Nodes*Layers);\n end\n if strcmp(Type, \"ordered\")\n B = spalloc(Nodes*Layers,Nodes*Layers,Nodes*Nodes*Layers+2*Nodes*Layers);\n end\n\n twomu = 0;\n \n for s = 1:Layers\n k = sum(NodesTensor{s});\n twom = sum(k);\n twomu = twomu + twom;\n indx = [1:Nodes] + (s-1)*Nodes;\n B(indx,indx) = NodesTensor{s} - GammaParameter* k' * k /twom;\n end\n\n if strcmp(Type,\"categorical\")\r\n twomu = twomu + Layers*OmegaParameter*Nodes*(Layers-1);\n all2all = Nodes*[(-Layers+1):-1,1:(Layers-1)];\n B = B + OmegaParameter*spdiags(ones(Nodes*Layers,2*Layers-2),all2all,Nodes*Layers,Nodes*Layers);\n end\n if strcmp(Type, \"ordered\")\n twomu = twomu + 2*OmegaParameter*Nodes*(Layers-1);\n B = B + OmegaParameter*spdiags(ones(Nodes*Layers,2),[-Nodes,Nodes],Nodes*Layers,Nodes*Layers);\n end\n \n [S,Q] = genlouvain(B);\n Q = Q/twomu;\n Q = full(Q);\n S = reshape(S,Nodes,Layers);\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%% Random Walk in Multilayer Networks\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction SupraTransitionMatrix = BuildSupraTransitionMatrixFromSupraAdjacencyMatrix(SupraAdjacencyMatrix,Layers,Nodes)\n % References: \n % M. De Domenico et al, Phys. Rev. X 3, 041022 (2013)\n % M. De Domenico, A. Sole-Ribalta, S. Gomez, A. Arenas, PNAS 11, 8351 (2014) \n\n \n Order = Layers*Nodes;\n #SupraUnitVector = ones(Order,1);\n\n SupraAdjacencyMatrix = sparse(SupraAdjacencyMatrix);\n SupraStrengthMatrix = sum(SupraAdjacencyMatrix,2);\n DisconnectedNodes = size(SupraStrengthMatrix(SupraStrengthMatrix==0),1);\n SupraStrengthMatrix = sparse(diag(SupraStrengthMatrix));\n \n if DisconnectedNodes>0\n fprintf(2,\"#Trapping nodes (no outgoing-links): %d\\n\",DisconnectedNodes);\n endif\n \n SupraStrengthMatrix(SupraStrengthMatrix(:)~=0) = 1./SupraStrengthMatrix(SupraStrengthMatrix(:)~=0);\n SupraTransitionMatrix = SupraStrengthMatrix*SupraAdjacencyMatrix;\n\n alpha = 0.85; \n %to normalize correctly in the case of nodes with no outgoing links:\n SupraTransitionMatrix(find(sum(SupraTransitionMatrix,2)==0),:) = 1/Order;\n SupraTransitionMatrix(find(sum(SupraTransitionMatrix,2)!=0),:) = alpha * SupraTransitionMatrix(find(sum(SupraTransitionMatrix,2)!=0),:) + (1-alpha)/Order;\n \n %no more disconnected nodes\n DisconnectedNodes = 0;\n \n %check\n if abs(sum(sum(SupraTransitionMatrix,2))-Order+DisconnectedNodes) > 1e-6\n error(\" BuildSupraTransitionMatrixFromSupraAdjacencyMatrix: ERROR! Problems in building the supra-transition matrix -> %f. Aborting process.\",abs(sum(sum(SupraTransitionMatrix,2))-Order+DisconnectedNodes));\n %pause;\n endif\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction CentralityVector = GetOverallPageRankCentrality(SupraTransitionMatrix,Layers,Nodes)\n % References: \n % M. De Domenico, A. Sole-Ribalta, E. Omodei, S. Gomez, A. Arenas, Nature Communications 6, 6868 (2015)\n \n %we pass the transpose of the transition matrix to get the left eigenvectors\n [LeadingEigenvector,LeadingEigenvalue] = GetLargestEigenv(SupraTransitionMatrix');\n \n if abs(LeadingEigenvalue-1)>1e-6\n error(\"\\tGetRWOverallOccupationProbability: ERROR! Expected leading eigenvalue equal to 1, obtained %f\\n\",LeadingEigenvalue);\n %exit\n endif\n \n LeadingEigenvector = LeadingEigenvector / sum(LeadingEigenvector);\n CentralityVector = sum(reshape(LeadingEigenvector,Nodes,Layers),2);\n CentralityVector = CentralityVector/max(CentralityVector);\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%% Community Detection in Multislice Networks\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n%GENLOUVAIN Louvain-like community detection, specified quality function.\r\n% Version 1.2 (July 2012)\r\n%\r\n% [S,Q] = GENLOUVAIN(B) with matrix B implements a Louvain-like greedy\r\n% community detection method using the modularity/quality matrix B that\r\n% encodes the quality function Q, defined by summing over all elements\r\n% B(i,j) such that nodes i and j are placed in the same community.\r\n% Following Blondel et al. 2008, the algorithm proceeds in two phases\r\n% repeated iteratively: quality is optimized by moving one node at a time\r\n% until no such moves improve quality; the communities found to that\r\n% point are then aggregated to build a new network where each node\r\n% represents a community. The output vector S encodes the obtained\r\n% community assignments, with S(i) identifying the community to which\r\n% node i has been assigned. The output Q gives the quality of the\r\n% resulting partition of the network.\r\n%\r\n% [S,Q] = GENLOUVAIN(B) with function handle B such that B(i) returns\r\n% the ith column of the modularity/quality matrix uses this function\r\n% handle (to reduce the memory footprint for large networks) until the\r\n% number of groups is less than 10000 and then builds the B matrix\r\n% corresponding to the new aggregated network in subsequent passes. Use\r\n% [S,Q] = GENLOUVAIN(B,limit) to change this default=10000 limit.\r\n%\r\n% [S,Q] = GENLOUVAIN(B,limit,0) suppresses displayed text output.\r\n%\r\n% [S,Q] = GENLOUVAIN(B,limit,verbose,0) forces index-ordered (cf.\r\n% randperm-ordered) consideration of nodes, for deterministic results.\r\n%\r\n% Example (using adjacency matrix A)\r\n% k = full(sum(A));\r\n% twom = sum(k); \r\n% B = @(v) A(:,v) - k'*k(v)/twom;\r\n% [S,Q] = genlouvain(B); \r\n% Q = Q/twom;\r\n% finds community assignments for the undirected network encoded by the\r\n% symmetric adjacency matrix A. For small networks, one may obtain\r\n% reasonably efficient results even more simply by handling the full\r\n% modularity/quality matrix\r\n% B = A - k'*k/twom;\r\n% instead of the function handle. Intended use also includes the\r\n% \"multislice\" network quality function of Mucha et al. 2010, where B\r\n% encodes the interactions as an equivalent matrix (see examples posted\r\n% online at http://netwiki.amath.unc.edu/GenLouvain).\r\n%\r\n% Notes:\r\n% The matrix represented by B must be both symmetric and square. This\r\n% condition is not checked thoroughly if B is a function handle, but is\r\n% essential to the proper use of this routine.\r\n%\r\n% Under default options, this routine can return different results from\r\n% run to run because it considers nodes in pseudorandom (randperm)\r\n% order. Because of the potentially large number of nearly-optimal\r\n% partitions (Good et al. 2010), one is encouraged to investigate\r\n% results of repeated applications of this code (and, if possible, of\r\n% other computational heuristics). To force deterministic behavior,\r\n% ordering nodes by their index, pass zero as the fourth input:\r\n% GENLOUVAIN(B,limit,verbose,0).\r\n%\r\n% This algorithm is only \"Louvain-like\" in the sense that the two\r\n% phases are used iteratively in the same manner as in the Louvain\r\n% algorithm (Blondel et al. 2008). Because it operates on a general\r\n% quality/modularity matrix B, it does not include any analytical\r\n% formulas for quickly identifying the change in modularity from a\r\n% proposed move nor any improved efficiency obtained by their use. If\r\n% your problem uses one of the well-used null models included in other\r\n% codes, those codes should be much faster for your task.\r\n%\r\n% Past versions had a problem where accumulated subtraction error might\r\n% lead to an infinite loop with each pass oscillating between two or\r\n% more partitions yet incorrectly identifying increases in quality. We\r\n% believe this problem has been corrected by the relative change checks\r\n% in lines 178 and 273. If you encounter a similar problem, notify\r\n% Peter Mucha (mucha@unc.edu).\r\n%\r\n% The output Q provides the sum over the appropriate elements of B\r\n% without any rescaling. As such, we have rescaled Q in the example\r\n% above by 2m = sum(k) so that Q <= 1.\r\n%\r\n% The '~' for ignoring function returns (used for \"max\" below) are not\r\n% supported prior to R2009b. Replace (e.g. 'dummy') for pre-2009b.\r\n%\r\n% By using this code, the user implicitly acknowledges that the authors\r\n% accept no liability associated with that use. (What are you doing\r\n% with it anyway that might cause there to be a potential liability?!?)\r\n%\r\n% References:\r\n% Blondel, Vincent D., Jean-Loup Guillaume, Renaud Lambiotte, and\r\n% Etienne Lefebvre, \"Fast unfolding of communities in large networks,\"\r\n% Journal of Statistical Mechanics: Theory and Experiment, P10008\r\n% (2008).\r\n%\r\n% Fortunato, Santo, \"Community detection in graphs,\" Physics Reports\r\n% 486, 75-174 (2010).\r\n%\r\n% Mucha, Peter J., Thomas Richardson, Kevin Macon, Mason A. Porter, and\r\n% Jukka-Pekka Onnela. \"Community Structure in Time-Dependent,\r\n% Multiscale, and Multiplex Networks,\" Science 328, 876-878 (2010).\r\n%\r\n% Porter, M. A., J. P. Onnela, and P. J. Mucha, \"Communities in\r\n% networks,\" Notices of the American Mathematical Society 56, 1082-1097\r\n% & 1164-1166 (2009).\r\n%\r\n% Acknowledgments:\r\n% A special thank you to Stephen Reid, whose greedy.m code was the\r\n% original version that has over time developed into the present code.\r\n% Thank you also to Dani Bassett, Jesse Blocher, Mason Porter and Simi\r\n% Wang for inspiring improvements to the code.\r\n%\r\n% Citation: If you use this code, please cite as\r\n% Inderjit S. Jutla, Lucas G. S. Jeub, and Peter J. Mucha,\r\n% \"A generalized Louvain method for community detection implemented\r\n% in MATLAB,\" http://netwiki.amath.unc.edu/GenLouvain (2011-2012).\r\n\nfunction [S,Q] = genlouvain(B,limit,verbose,randord) \r\n %set default for maximum size of modularity matrix\r\n if nargin<2\r\n limit = 10000;\r\n end\r\n \r\n %set level of reported/displayed text output\r\n if nargin<3\r\n verbose = 1;\r\n end\r\n if verbose\r\n mydisp = @(s) disp(s);\r\n else\r\n mydisp = @(s) disp('');\r\n end\r\n \r\n %set randperm- v. index-ordered\r\n if nargin<4\r\n randord = 1;\r\n end\r\n if randord\r\n myord = @(n) randperm(n);\r\n else\r\n myord = @(n) 1:n;\r\n end\r\n \r\n %initialise variables and do symmetry check\r\n if isa(B,'function_handle')\r\n n=length(B(1));\r\n S=(1:n)';\r\n M=B;\r\n it(:,1)=M(1);\r\n ii=find(it(2:end)>0,3)+1;\r\n ii=[1,ii'];\r\n for i=2:length(ii),\r\n it(:,i)=M(ii(i));\r\n end\r\n it=it(ii,:);\r\n if nnz(it-it'),\r\n disp('WARNING: Function handle does not correspond to a symmetric matrix')\r\n end\r\n else\r\n n = length(B);\r\n S = (1:n)';\r\n M=B;\r\n if nnz(M-M'),\r\n B=(B+B')/2; disp('WARNING: Forced symmetric B matrix')\r\n end\r\n end\r\n \r\n dtot=0; %keeps track of total change in modularity\r\n \r\n %Run using function handle, if provided\r\n while (isa(M,'function_handle')) %loop around each \"pass\" (in language of Blondel et al) with B function handle\r\n \r\n y = unique(S); %unique also puts elements in ascending order\r\n Sb=S; \r\n yb = [];\r\n \r\n clocktime=clock;\r\n mydisp(['Merging ',num2str(length(y)),' communities ',num2str(clocktime(4:6))]);\r\n \r\n dstep=1;\t%keeps track of change in modularity in pass\r\n \r\n while (~isequal(yb,y))&&(dstep/dtot>2*eps) %This is the loop around Blondel et al's \"first phase\"\r\n % Q = 0;\r\n % %improves performance considerably if one doesn't compute modularity\r\n % %for the first pass (for display purposes only)\r\n % P = sparse(y,1:length(y),1); %Modularity Calculation\r\n % for i = 1:length(M(1))\r\n % Q = Q + (P*M(i))'*P(:,i);\r\n % end\r\n % mydisp([num2str(length(unique(y))),' ',num2str(Q)])\r\n yb = y;\r\n G=sparse(1:length(y),y,1); %no-mex version\r\n dstep=0;\r\n \r\n for i = myord(length(M(1))) %loop over nodes in pseudorandom order \r\n \r\n Mi = M(i);\r\n \r\n u = unique([y(i);y(Mi>0)]);\r\n \r\n dH=Mi'*G(:,u); %no-mex version\r\n %dH=modchange_y(Mi,y,u);\r\n \r\n yi=find(u==y(i));\r\n dH(yi) = dH(yi) - Mi(i);\r\n \r\n [~, k] = max(dH);\r\n \r\n %only move to different group if it is more optimized than\r\n %staying in same group (up to error with double precision)\r\n if(dH(k)>(dH(yi)))\r\n \tdtot=dtot+dH(k)-dH(yi);\r\n \tdstep=dstep+dH(k)-dH(yi);\r\n G(i,y(i))=0; %no-mex version\r\n G(i,u(k))=1; %no-mex version\r\n y(i) = u(k); \r\n end\r\n \r\n end\r\n \r\n mydisp([num2str(length(unique(y))),' change: ',num2str(dstep),...\r\n ' total: ',num2str(dtot),' relative: ',num2str(dstep/dtot)]);\r\n end\r\n \r\n %[S,y] = tidyconfig_c(S,y); %note tidyconfig reorders along node numbers\r\n y = tidyconfig(y); %no-mex version\r\n for i = 1:length(y) %no-mex version\r\n S(S==i) = y(i); %no-mex version\r\n end %no-mex version\r\n \r\n %calculate modularity and return if converged\r\n if isequal(Sb,S)\r\n Q=0;\r\n P=sparse(y,1:length(y),1);\r\n for i=1:length(M(1))\r\n Q=Q+(P*M(i))'*P(:,i);\r\n end\r\n return\r\n end\r\n \r\n %check wether #groups < limit \r\n t = length(unique(S));\r\n if (t>limit)\r\n M=@(i) metanetwork_i(B,S,t,i); %use function handle if #groups>limit \r\n else\r\n J = zeros(t); %convert to matrix if #groups small enough\r\n for c=1:t\r\n J(:,c)=metanetwork_i(B,S,t,c);\r\n end\r\n B = J;\r\n M=B;\r\n end\r\n \r\n end\r\n \r\n \r\n S2 = (1:length(B))';\r\n Sb = [];\r\n while ~isequal(Sb,S2) %loop around each \"pass\" (in language of Blondel et al) with B matrix\r\n \r\n y = unique(S2); %unique also puts elements in ascending order\r\n Sb = S2;\r\n \r\n clocktime=clock;\r\n mydisp(['Merging ',num2str(length(y)),' communities ',num2str(clocktime(4:6))]);\r\n \r\n yb = [];\r\n \r\n G=sparse(1:length(y),y,1);\r\n \r\n dstep=1;\r\n \r\n % P = G';\r\n % Q = sum(sum((P*M).*(P)));\r\n % Qb = -inf;\r\n \r\n while (~isequal(yb,y)) && (dstep/dtot>2*eps) %This is the loop around Blondel et al's \"first phase\"\r\n \r\n % mydisp([num2str(length(unique(y))),' ',num2str(Q)])\r\n yb = y;\r\n % Qb=Q;\r\n \r\n dstep=0;\r\n \r\n for i = myord(length(M))\r\n u = unique([y(i);y(M(:,i)>0)]);\r\n % dH = modchange_y(M(:,i),y,u); %relative changes in modularities\r\n dH = (M(:,i)'*G(:,u));\r\n \r\n yi=find(u==y(i));\r\n dH(yi) = dH(yi) - M(i,i);\r\n [~, k] = max(dH);\r\n %%only move to different group if it is more optimized than\r\n %%staying in same group (up to error with double precision)\r\n if(dH(k)>(dH(yi)))\r\n \tdtot=dtot+dH(k)-dH(yi);\r\n \tdstep=dstep+dH(k)-dH(yi);\r\n \tG(i,y(i))=0;\r\n \tG(i,u(k))=1;\r\n y(i) = u(k);\r\n end\r\n end\r\n \r\n % P=sparse(y,1:length(y),1);\r\n % Q = sum(sum((P*M).*(P)));\r\n \r\n end\r\n \r\n y = tidyconfig(y); %note tidyconfig reorders along node numbers\r\n for i = 1:length(y)\r\n S(S==i) = y(i);\r\n S2(S2==i) = y(i);\r\n end\r\n \r\n if isequal(Sb,S2)\r\n \tP=G';\r\n \tQ=sum(sum((P*M).*P));\r\n \treturn\r\n end\r\n \r\n M = metanetwork(B,S2); \r\n end\r\nendfunction\n\r\n%-----%\r\nfunction M = metanetwork(J,S)\r\n %Computes new aggregated network (communities --> nodes)\r\n if(issparse(J))\r\n m=max(S);\r\n [i,j,v]=find(J);\r\n M = sparse(S(i),S(j),v,m,m);\r\n else\r\n PP = sparse(1:length(S),S,1);\r\n M = PP'*J*PP;\r\n end\r\nendfunction\n\r\n%-----%\r\nfunction Mi = metanetwork_i(J,S,t,i) \r\n %ith column of metanetwork (used to create function handle)\r\n %J is a function handle\r\n \r\n Mi=sparse([],[],[],t,1);\r\n for j=find(S==i)'\r\n Jj=J(j);\r\n [ii,k,v]=find(Jj);\r\n Mi=Mi+sparse(S(ii),k,v,t,1);\r\n end\r\nendfunction\n\r\n%-----%\r\nfunction S = tidyconfig(S)\r\n %This function remains almost identical to that originally written by\r\n %Stephen Reid for his greedy.m code.\r\n % tidy up S i.e. S = [2 4 2 6] -> S = [1 2 1 3]\r\n T = zeros(length(S),1);\r\n for i = 1:length(S)\r\n if T(i) == 0\r\n T(S==S(i)) = max(T) + 1;\r\n end\r\n end\r\n S = T;\r\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%% Connected Components in Multilayer Networks\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\nfunction [Components,ComponentsSize] = GetConnectedComponentsExtended(SupraAdjacencyMatrix,Layers,Nodes)\n%\n% Returns the components of an undirected graph specified by the binary and \n% undirected adjacency matrix adj. Components and their constitutent nodes are \n% assigned the same index and stored in the vector, comps. The vector, comp_sizes,\n% contains the number of nodes beloning to each component.\n%\n% Note: disconnected nodes will appear as components with a component\n% size of 1\n%\n% J Goni, University of Navarra and Indiana University, 2009/2011\n%\n% This extended version treats each node in each layer as a independent entity\n% Manlio De Domenico, Universitat Rovira i Virgili, 2014\n\n\n#if ~any(SupraAdjacencyMatrix-triu(SupraAdjacencyMatrix))\n SupraAdjacencyMatrix = SupraAdjacencyMatrix | SupraAdjacencyMatrix';\n#end\n\n%if main diagonal of adj do not contain all ones, i.e. autoloops\nif sum(diag(SupraAdjacencyMatrix))~=size(SupraAdjacencyMatrix,1)\n %the main diagonal is set to ones\n SupraAdjacencyMatrix = SupraAdjacencyMatrix | speye(size(SupraAdjacencyMatrix));\nend\n\n%Dulmage-Mendelsohn decomposition\n[useless1,p,useless2,r] = dmperm(SupraAdjacencyMatrix);\n\n%p indicates a permutation (along rows and columns)\n%r is a vector indicating the component boundaries\n\n% List including the number of nodes of each component. ith entry is r(i+1)-r(i)\nComponentsSize = diff(r);\n\n% Number of components found.\nnum_comps = numel(ComponentsSize);\n\n% initialization\nComponents = sparse(1,Nodes*Layers); \n\n% first position of each component is set to one\nComponents(r(1:num_comps)) = ones(1,num_comps); \n\n% cumulative sum produces a label for each component (in a consecutive way)\nComponents = cumsum(Components); \n\n%re-order component labels according to adj.\nComponents(p) = Components; \n\nif sum(ComponentsSize) != Nodes*Layers\n printf(\"ERROR! The sum of components size is not equal to the number of nodes x layers. Aborting process.\\n\")\n exit\nend\n\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [Components,ComponentsSize] = GetConnectedComponentsSimple(SupraAdjacencyMatrix,Layers,Nodes)\n\n%as the extended, but each node and its replicas are treated as a unique entity\n\n[Components,ComponentsSize] = GetConnectedComponentsExtended(SupraAdjacencyMatrix,Layers,Nodes);\n\n%first we have to check if the same entity is assigned to the same component\n%eg, if node A in layer 1 is assigned to component 1 and node A in layer 2 is assigned to component 2\n%then it makes no sense to collapse the information: if they are a unique entity, the nodes\n%should be assigned to the same component, and this happens if they are interconnected or\n%if some of the replicas are isolated components while the others are interconnected\n\nif Layers > 1\n newComponents = sparse(1,Nodes);\n for n = 1:Nodes\n c = Components(n); %the component assigned to n in layer 1\n newComponents(n) = c;\n \n for l = 2:Layers\n ctmp = Components( (l-1)*Nodes + n );\n if ctmp != c\n %check if it is isolated\n if ComponentsSize(ctmp)!=1 && ComponentsSize(c)!=1 \n printf(\"Impossible to find meaningful connected components\\n\")\n printf(\"Node %d in layer 1 is in component %d (size %d) while\\n\",n,c,ComponentsSize(c))\n printf(\"Node %d (abs id: %d) in layer %d is in component %d (size %d)\\n\",n,(l-1)*Nodes + n,l,ctmp,ComponentsSize(ctmp))\n printf(\"Aborting process.\\n\")\n exit\n endif\n endif\n end\n end\nend\n\nComponents = sparse(1,Nodes);\ncomps = unique(newComponents);\n\n%readjust the components label\nfor i = 1:length(comps)\n c = comps(i);\n find(newComponents==c);\n Components( find(newComponents==c) ) = i;\nend\n\n%readjust the components size\nComponentsSize = sparse(1,full(max(Components)));\nfor c = 1:max(Components)\n ComponentsSize(c) = sum( Components==c );\nend\n\nif sum(ComponentsSize) != Nodes\n printf(\"ERROR! The sum of components size is not equal to the number of nodes. Aborting process.\\n\")\n exit\nend\n\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [GCC,GCCSize] = GetGiantConnectedComponentExtended(SupraAdjacencyMatrix,Layers,Nodes)\n %compute the giant connected component, each node in each layer is a independent entity\n\n [Components,ComponentsSize] = GetConnectedComponentsExtended(SupraAdjacencyMatrix,Layers,Nodes);\n\n %if there are multiple components with the same size, we choose the first one\n [value,cID] = max(ComponentsSize);\n GCC = find(Components==cID);\n GCCSize = value;\nendfunction\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [GCC,GCCSize] = GetGiantConnectedComponentSimple(SupraAdjacencyMatrix,Layers,Nodes)\n %as the extended, but each node and its replicas are treated as a unique entity\n \n [Components,ComponentsSize] = GetConnectedComponentsSimple(SupraAdjacencyMatrix,Layers,Nodes);\n \n %if there are multiple components with the same size, we choose the first one\n [value,cID] = max(ComponentsSize);\n GCC = find(Components==cID);\n GCCSize = value;\nendfunction"} +{"plateform": "github", "repo_name": "LiHeUA/FastESC-master", "name": "demo_EBMM.m", "ext": ".m", "path": "FastESC-master/EBMM_Release/demo_EBMM.m", "size": 4568, "source_encoding": "utf_8", "md5": "5e8cf84542aa9a2863c390e4c0f1bdd7", "text": "function demo_EBMM\n% Demot of Extended Basic Matrix Multiplication algorithm. \n% Select cT columns (or rows) from A (or B) to form C (or R) so that\n% AB\\approx CR. \n% Also verify Theorem 1 in [1].\n%\n% Details of this algorithm can be found in Alg. 2 in [1].\n% \n% [1] Li He, Nilanjan Ray and Hong Zhang, Fast Large-Scale Spectral \n% Clustering via Explicit Feature Mapping, submitted to IEEE Trans.\n% Cybernetics.\n%\n% Parameter:\n% A p*NT matrix A\n% B NT*q matrix B\n% N \tscalar choose c from N\n% T scalar # of submatrices in A and B\n% c scalar choose c from N\n%\n% Notation: \n% A^(t): the t-th column in matrix A\n% B_(t): the t-th row in matrix B\n% \n% Notice:\n% A should be structured as A = [A[1], A[2], ..., A[T]], where A[i] is a\n% p*N matrix. And \n% [B[1]]\n% B = [B[2]]\n% ...\n% [B[T]]\n% where B[i] is an N*q matrix.\n%\n% Main idea:\n%\n% 1. Split A into T submatrices, titled A[1], A[2],..., A[T], \n% A = [A[1], A[2], ..., A[T]]\n% and\n% [B[1]]\n% B = [B[2]]\n% ...\n% [B[T]]\n%\n% 2. Randomly with replacement pick the t-th index i_t \\in {1,...,N} with\n% probability Prob[i_t=k] = p_k, k=1,...,N.\n%\n% 3. For t=1,...,c, if i_t==k, then select the k-th columns in A[1],\n% A[2],...,A[T], scale by 1/sqrt(c*p_k) and form a new matrix C[t],\n% C[t]=[A[1]^(k), A[2]^(k),...,A[T]^(k)]/sqrt(c*p_k). And\n% [B[1]_(k)]\n% R[t] = [B[2]_(k)] /sqrt(c*p_k)\n% ...\n% [B[T]_(k)]\n%\n% 4. Build C=[C[1],C[2],...,C[T]], and \n% [R[1]]\n% R = [R[2]]\n% ...\n% [R[T]]\n%\n% 5. Then, E[CR]=AB. \n% \n% 6. For i=1,...,N, define \n%\n% H[i] = A[1]^(i)*B[1]_(i) + A[2]^(i)*B_(i) +...+ A[T]^(i)*B_(i)\n% \n% If\n%\n% p_i = ||H[i]||_F/sum(||H[i']||_F)\n%\n% Then, E[||AB-CR||_F^2] is minimal. \n%\n% Li He, heli@gdut.edu.cn\n\n%% 0. Initialization\nclc\n\nN = 6;\nT = 2;\nc = 3;\n% randomly generate A and B\nA = rand(100,N*T);\nB = rand(N*T,200);\n\np = size(A,1);\nq = size(B,2);\n\n% randomly generate the sampling probabilities; for arbitraty prob_col.,\n% E(CR)=AB should hold true\nprob_col = rand(1,N);\nprob_col = prob_col/sum(prob_col);\n\n%% 1. Verification of E(CR)=AB with Arbitrary Probabilities\n% we have in total N^c possible C (and R); exhaustively calculate all\n\ndisp('Exp 1: E(CR)=AB with arbitrary sampling probabilities')\n\n% generate all N^c possible indices\nindices = nchoosek_with_replacement(N,c);\n\n% probability of one C (or R) to appear\nprob_matrix = prod( prob_col(indices), 2 );\n\n% build C and R, and brute-forcely check E(CR)\n\n% ECR = sum( prob_matrix*C*R )\nECR = zeros(p,q);\n% E[||AB-CR||_F^2]\nECRF = 0;\n% ground truth AB\nAB = A*B;\n\nC = zeros(p,c*T);\nR = zeros(c*T,q);\nfor i=1:N^c\n % chosen columns (rows) among N^c possible choices\n index = indices(i,:);\n \n % build C and R\n for t=1:c\n ind = index(t); % index of one chosen column\n C(:,t:c:end) = A(:,ind:N:end)/sqrt(c*prob_col(ind));\n R(t:c:end,:) = B(ind:N:end,:)/sqrt(c*prob_col(ind));\n end\n \n % E(CR)\n ECR = ECR + prob_matrix(i)*C*R;\n % E(|AB-CR|_F^2)\n ECRF = ECRF + prob_matrix(i)*norm(C*R-AB,'fro')^2;\nend\n\ndisp(['||E(CR) - AB||_F = ' num2str(norm(ECR-AB,'fro'))])\n\n%% 2. Optimal Sapmling\n% if using the optimal sampling, then E[||AB-CR||_F^2] should be minimum\ndisp(' ');\ndisp('Exp 2: the optimal sampling will minimize E(|AB-CR|_F^2)')\n\n% get the optimal sampling probabilities\nprob_opt = EBMM_OptProb(A, B, N, T);\n\n% probability of one C (or R) to appear\nprob_matrix_opt = prod( prob_opt(indices), 2 );\n% ECR = sum( prob_matrix*C*R )\nECR_opt = zeros(p,q);\n% E[||AB-CR||_F^2]\nECRF_opt = 0;\n\n\nC = zeros(p,c*T);\nR = zeros(c*T,q);\nfor i=1:N^c\n % chosen columns (rows) among N^c possible choices\n index = indices(i,:);\n \n % build C and R\n for t=1:c\n ind = index(t); % index of one chosen column\n C(:,t:c:end) = A(:,ind:N:end)/sqrt(c*prob_opt(ind));\n R(t:c:end,:) = B(ind:N:end,:)/sqrt(c*prob_opt(ind));\n end\n \n ECR_opt = ECR_opt + prob_matrix_opt(i)*C*R;\n ECRF_opt = ECRF_opt + prob_matrix_opt(i)*norm(C*R-AB,'fro')^2;\nend\n\ndisp(['||E(CR_opt) - AB||_F = ' num2str(norm(ECR_opt-AB,'fro'))])\n\n% compare the F-norm error of CR_optimal with the CR in Experiment 1\ndisp(['E[||CR_opt - AB||_F^2 = ' num2str(ECRF_opt) ', E[||CR_Exp1 - AB||_F^2 = ' num2str(ECRF)])\n\n\n\nfunction indices = nchoosek_with_replacement(n,k)\nindices = cell(1,k);\n[indices{:}] = ndgrid(1:n);\nindices = indices(end:-1:1);\nindices = cat(k+1, indices{:});\nindices = reshape(indices, [n^k, k]);"} +{"plateform": "github", "repo_name": "mirtaheri/Grid-visualization-in-Matlab-master", "name": "plotCustMark.m", "ext": ".m", "path": "Grid-visualization-in-Matlab-master/funcs/plotCustMark.m", "size": 1239, "source_encoding": "utf_8", "md5": "2a019f8cd68d58ea1aae70790ecba3d0", "text": "\r\nfunction patchHndl = plotCustMark(xData,yData,markerDataX,markerDataY,markerSize, lineThick, face_color)\r\n\r\n% this function uses codes from: https://it.mathworks.com/matlabcentral/fileexchange/39487-custom-marker-plot\r\n\r\nxData = reshape(xData,length(xData),1) ;\r\nyData = reshape(yData,length(yData),1) ;\r\nmarkerDataX = markerSize * reshape(markerDataX,1,length(markerDataX)) ;\r\nmarkerDataY = markerSize * reshape(markerDataY,1,length(markerDataY)) ;\r\n\r\n%% prepare and plot the patches\r\nmarkerEdgeColor = [0 0 0] ;\r\nmarkerFaceColor = face_color ;\r\n% ------\r\nvertX = repmat(markerDataX,length(xData),1) ; vertX = vertX(:) ;\r\nvertY = repmat(markerDataY,length(yData),1) ; vertY = vertY(:) ;\r\n% ------\r\nvertX = repmat(xData,length(markerDataX),1) + vertX ;\r\nvertY = repmat(yData,length(markerDataY),1) + vertY ;\r\n% ------\r\nfaces = 0:length(xData):length(xData)*(length(markerDataY)-1) ;\r\nfaces = repmat(faces,length(xData),1) ;\r\nfaces = repmat((1:length(xData))',1,length(markerDataY)) + faces ;\r\n% ------\r\npatchHndl = patch('Faces',faces,'Vertices',[vertX vertY]);\r\nset(patchHndl,'FaceColor',markerFaceColor,'LineWidth', lineThick, 'EdgeColor',markerEdgeColor) ;\r\nhold on\r\n% -------------------------------------------------------------\r\n\r\n"} +{"plateform": "github", "repo_name": "leonid-pishchulin/poseval-master", "name": "savejson.m", "ext": ".m", "path": "poseval-master/matlab/external/jsonlab/savejson.m", "size": 18981, "source_encoding": "utf_8", "md5": "63859e6bc24eb998f433f53d5880015b", "text": "function json=savejson(rootname,obj,varargin)\n%\n% json=savejson(rootname,obj,filename)\n% or\n% json=savejson(rootname,obj,opt)\n% json=savejson(rootname,obj,'param1',value1,'param2',value2,...)\n%\n% convert a MATLAB object (cell, struct or array) into a JSON (JavaScript\n% Object Notation) string\n%\n% author: Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2011/09/09\n%\n% $Id$\n%\n% input:\n% rootname: the name of the root-object, when set to '', the root name\n% is ignored, however, when opt.ForceRootName is set to 1 (see below),\n% the MATLAB variable name will be used as the root name.\n% obj: a MATLAB object (array, cell, cell array, struct, struct array,\n% class instance).\n% filename: a string for the file name to save the output JSON data.\n% opt: a struct for additional options, ignore to use default values.\n% opt can have the following fields (first in [.|.] is the default)\n%\n% opt.FileName [''|string]: a file name to save the output JSON data\n% opt.FloatFormat ['%.10g'|string]: format to show each numeric element\n% of a 1D/2D array;\n% opt.ArrayIndent [1|0]: if 1, output explicit data array with\n% precedent indentation; if 0, no indentation\n% opt.ArrayToStruct[0|1]: when set to 0, savejson outputs 1D/2D\n% array in JSON array format; if sets to 1, an\n% array will be shown as a struct with fields\n% \"_ArrayType_\", \"_ArraySize_\" and \"_ArrayData_\"; for\n% sparse arrays, the non-zero elements will be\n% saved to _ArrayData_ field in triplet-format i.e.\n% (ix,iy,val) and \"_ArrayIsSparse_\" will be added\n% with a value of 1; for a complex array, the \n% _ArrayData_ array will include two columns \n% (4 for sparse) to record the real and imaginary \n% parts, and also \"_ArrayIsComplex_\":1 is added. \n% opt.ParseLogical [0|1]: if this is set to 1, logical array elem\n% will use true/false rather than 1/0.\n% opt.SingletArray [0|1]: if this is set to 1, arrays with a single\n% numerical element will be shown without a square\n% bracket, unless it is the root object; if 0, square\n% brackets are forced for any numerical arrays.\n% opt.SingletCell [1|0]: if 1, always enclose a cell with \"[]\" \n% even it has only one element; if 0, brackets\n% are ignored when a cell has only 1 element.\n% opt.ForceRootName [0|1]: when set to 1 and rootname is empty, savejson\n% will use the name of the passed obj variable as the \n% root object name; if obj is an expression and \n% does not have a name, 'root' will be used; if this \n% is set to 0 and rootname is empty, the root level \n% will be merged down to the lower level.\n% opt.Inf ['\"$1_Inf_\"'|string]: a customized regular expression pattern\n% to represent +/-Inf. The matched pattern is '([-+]*)Inf'\n% and $1 represents the sign. For those who want to use\n% 1e999 to represent Inf, they can set opt.Inf to '$11e999'\n% opt.NaN ['\"_NaN_\"'|string]: a customized regular expression pattern\n% to represent NaN\n% opt.JSONP [''|string]: to generate a JSONP output (JSON with padding),\n% for example, if opt.JSONP='foo', the JSON data is\n% wrapped inside a function call as 'foo(...);'\n% opt.UnpackHex [1|0]: conver the 0x[hex code] output by loadjson \n% back to the string form\n% opt.SaveBinary [0|1]: 1 - save the JSON file in binary mode; 0 - text mode.\n% opt.Compact [0|1]: 1- out compact JSON format (remove all newlines and tabs)\n%\n% opt can be replaced by a list of ('param',value) pairs. The param \n% string is equivallent to a field in opt and is case sensitive.\n% output:\n% json: a string in the JSON format (see http://json.org)\n%\n% examples:\n% jsonmesh=struct('MeshNode',[0 0 0;1 0 0;0 1 0;1 1 0;0 0 1;1 0 1;0 1 1;1 1 1],... \n% 'MeshTetra',[1 2 4 8;1 3 4 8;1 2 6 8;1 5 6 8;1 5 7 8;1 3 7 8],...\n% 'MeshTri',[1 2 4;1 2 6;1 3 4;1 3 7;1 5 6;1 5 7;...\n% 2 8 4;2 8 6;3 8 4;3 8 7;5 8 6;5 8 7],...\n% 'MeshCreator','FangQ','MeshTitle','T6 Cube',...\n% 'SpecialData',[nan, inf, -inf]);\n% savejson('jmesh',jsonmesh)\n% savejson('',jsonmesh,'ArrayIndent',0,'FloatFormat','\\t%.5g')\n%\n% license:\n% BSD License, see LICENSE_BSD.txt files for details\n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nif(nargin==1)\n varname=inputname(1);\n obj=rootname;\n if(isempty(varname)) \n varname='root';\n end\n rootname=varname;\nelse\n varname=inputname(2);\nend\nif(length(varargin)==1 && ischar(varargin{1}))\n opt=struct('filename',varargin{1});\nelse\n opt=varargin2struct(varargin{:});\nend\nopt.IsOctave=exist('OCTAVE_VERSION','builtin');\nif(isfield(opt,'norowbracket'))\n warning('Option ''NoRowBracket'' is depreciated, please use ''SingletArray'' and set its value to not(NoRowBracket)');\n if(~isfield(opt,'singletarray'))\n opt.singletarray=not(opt.norowbracket);\n end\nend\nrootisarray=0;\nrootlevel=1;\nforceroot=jsonopt('ForceRootName',0,opt);\nif((isnumeric(obj) || islogical(obj) || ischar(obj) || isstruct(obj) || ...\n iscell(obj) || isobject(obj)) && isempty(rootname) && forceroot==0)\n rootisarray=1;\n rootlevel=0;\nelse\n if(isempty(rootname))\n rootname=varname;\n end\nend\nif((isstruct(obj) || iscell(obj))&& isempty(rootname) && forceroot)\n rootname='root';\nend\n\nwhitespaces=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nif(jsonopt('Compact',0,opt)==1)\n whitespaces=struct('tab','','newline','','sep',',');\nend\nif(~isfield(opt,'whitespaces_'))\n opt.whitespaces_=whitespaces;\nend\n\nnl=whitespaces.newline;\n\njson=obj2json(rootname,obj,rootlevel,opt);\nif(rootisarray)\n json=sprintf('%s%s',json,nl);\nelse\n json=sprintf('{%s%s%s}\\n',nl,json,nl);\nend\n\njsonp=jsonopt('JSONP','',opt);\nif(~isempty(jsonp))\n json=sprintf('%s(%s);%s',jsonp,json,nl);\nend\n\n% save to a file if FileName is set, suggested by Patrick Rapin\nfilename=jsonopt('FileName','',opt);\nif(~isempty(filename))\n if(jsonopt('SaveBinary',0,opt)==1)\n\t fid = fopen(filename, 'wb');\n\t fwrite(fid,json);\n else\n\t fid = fopen(filename, 'wt');\n\t fwrite(fid,json,'char');\n end\n fclose(fid);\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=obj2json(name,item,level,varargin)\n\nif(iscell(item))\n txt=cell2json(name,item,level,varargin{:});\nelseif(isstruct(item))\n txt=struct2json(name,item,level,varargin{:});\nelseif(ischar(item))\n txt=str2json(name,item,level,varargin{:});\nelseif(isobject(item)) \n txt=matlabobject2json(name,item,level,varargin{:});\nelse\n txt=mat2json(name,item,level,varargin{:});\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=cell2json(name,item,level,varargin)\ntxt={};\nif(~iscell(item))\n error('input is not a cell');\nend\n\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item);\nws=jsonopt('whitespaces_',struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n')),varargin{:});\npadding0=repmat(ws.tab,1,level);\npadding2=repmat(ws.tab,1,level+1);\nnl=ws.newline;\nbracketlevel=~jsonopt('singletcell',1,varargin{:});\nif(len>bracketlevel)\n if(~isempty(name))\n txt={padding0, '\"', checkname(name,varargin{:}),'\": [', nl}; name=''; \n else\n txt={padding0, '[', nl};\n end\nelseif(len==0)\n if(~isempty(name))\n txt={padding0, '\"' checkname(name,varargin{:}) '\": []'}; name=''; \n else\n txt={padding0, '[]'};\n end\nend\nfor i=1:dim(1)\n if(dim(1)>1)\n txt(end+1:end+3)={padding2,'[',nl};\n end\n for j=1:dim(2)\n txt{end+1}=obj2json(name,item{i,j},level+(dim(1)>1)+(len>bracketlevel),varargin{:});\n if(j1)\n txt(end+1:end+3)={nl,padding2,']'};\n end\n if(ibracketlevel)\n txt(end+1:end+3)={nl,padding0,']'};\nend\ntxt = sprintf('%s',txt{:});\n\n%%-------------------------------------------------------------------------\nfunction txt=struct2json(name,item,level,varargin)\ntxt={};\nif(~isstruct(item))\n\terror('input is not a struct');\nend\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item);\nforcearray= (len>1 || (jsonopt('SingletArray',0,varargin{:})==1 && level>0));\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\npadding0=repmat(ws.tab,1,level);\npadding2=repmat(ws.tab,1,level+1);\npadding1=repmat(ws.tab,1,level+(dim(1)>1)+forcearray);\nnl=ws.newline;\n\nif(isempty(item)) \n if(~isempty(name)) \n txt={padding0, '\"', checkname(name,varargin{:}),'\": []'};\n else\n txt={padding0, '[]'};\n end\n return;\nend\nif(~isempty(name)) \n if(forcearray)\n txt={padding0, '\"', checkname(name,varargin{:}),'\": [', nl};\n end\nelse\n if(forcearray)\n txt={padding0, '[', nl};\n end\nend\nfor j=1:dim(2)\n if(dim(1)>1)\n txt(end+1:end+3)={padding2,'[',nl};\n end\n for i=1:dim(1)\n names = fieldnames(item(i,j));\n if(~isempty(name) && len==1 && ~forcearray)\n txt(end+1:end+5)={padding1, '\"', checkname(name,varargin{:}),'\": {', nl};\n else\n txt(end+1:end+3)={padding1, '{', nl};\n end\n if(~isempty(names))\n for e=1:length(names)\n\t txt{end+1}=obj2json(names{e},item(i,j).(names{e}),...\n level+(dim(1)>1)+1+forcearray,varargin{:});\n if(e1)\n txt(end+1:end+3)={nl,padding2,']'};\n end\n if(j1)\n txt={padding1, '\"', checkname(name,varargin{:}),'\": [', nl};\n end\nelse\n if(len>1)\n txt={padding1, '[', nl};\n end\nend\nfor e=1:len\n val=escapejsonstring(item(e,:));\n if(len==1)\n obj=['\"' checkname(name,varargin{:}) '\": ' '\"',val,'\"'];\n if(isempty(name))\n obj=['\"',val,'\"'];\n end\n txt(end+1:end+2)={padding1, obj};\n else\n txt(end+1:end+4)={padding0,'\"',val,'\"'};\n end\n if(e==len)\n sep='';\n end\n txt{end+1}=sep;\nend\nif(len>1)\n txt(end+1:end+3)={nl,padding1,']'};\nend\ntxt = sprintf('%s',txt{:});\n\n%%-------------------------------------------------------------------------\nfunction txt=mat2json(name,item,level,varargin)\nif(~isnumeric(item) && ~islogical(item))\n error('input is not an array');\nend\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\npadding1=repmat(ws.tab,1,level);\npadding0=repmat(ws.tab,1,level+1);\nnl=ws.newline;\nsep=ws.sep;\n\nif(length(size(item))>2 || issparse(item) || ~isreal(item) || ...\n (isempty(item) && any(size(item))) ||jsonopt('ArrayToStruct',0,varargin{:}))\n if(isempty(name))\n \ttxt=sprintf('%s{%s%s\"_ArrayType_\": \"%s\",%s%s\"_ArraySize_\": %s,%s',...\n padding1,nl,padding0,class(item),nl,padding0,regexprep(mat2str(size(item)),'\\s+',','),nl);\n else\n \ttxt=sprintf('%s\"%s\": {%s%s\"_ArrayType_\": \"%s\",%s%s\"_ArraySize_\": %s,%s',...\n padding1,checkname(name,varargin{:}),nl,padding0,class(item),nl,padding0,regexprep(mat2str(size(item)),'\\s+',','),nl);\n end\nelse\n if(numel(item)==1 && jsonopt('SingletArray',0,varargin{:})==0 && level>0)\n numtxt=regexprep(regexprep(matdata2json(item,level+1,varargin{:}),'^\\[',''),']','');\n else\n numtxt=matdata2json(item,level+1,varargin{:});\n end\n if(isempty(name))\n \ttxt=sprintf('%s%s',padding1,numtxt);\n else\n if(numel(item)==1 && jsonopt('SingletArray',0,varargin{:})==0)\n \ttxt=sprintf('%s\"%s\": %s',padding1,checkname(name,varargin{:}),numtxt);\n else\n \t txt=sprintf('%s\"%s\": %s',padding1,checkname(name,varargin{:}),numtxt);\n end\n end\n return;\nend\ndataformat='%s%s%s%s%s';\n\nif(issparse(item))\n [ix,iy]=find(item);\n data=full(item(find(item)));\n if(~isreal(item))\n data=[real(data(:)),imag(data(:))];\n if(size(item,1)==1)\n % Kludge to have data's 'transposedness' match item's.\n % (Necessary for complex row vector handling below.)\n data=data';\n end\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayIsComplex_\": ','1', sep);\n end\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayIsSparse_\": ','1', sep);\n if(size(item,1)==1)\n % Row vector, store only column indices.\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([iy(:),data'],level+2,varargin{:}), nl);\n elseif(size(item,2)==1)\n % Column vector, store only row indices.\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([ix,data],level+2,varargin{:}), nl);\n else\n % General case, store row and column indices.\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([ix,iy,data],level+2,varargin{:}), nl);\n end\nelse\n if(isreal(item))\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json(item(:)',level+2,varargin{:}), nl);\n else\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayIsComplex_\": ','1', sep);\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([real(item(:)) imag(item(:))],level+2,varargin{:}), nl);\n end\nend\ntxt=sprintf('%s%s%s',txt,padding1,'}');\n\n%%-------------------------------------------------------------------------\nfunction txt=matlabobject2json(name,item,level,varargin)\nif numel(item) == 0 %empty object\n st = struct();\nelse\n % \"st = struct(item);\" would produce an inmutable warning, because it\n % make the protected and private properties visible. Instead we get the\n % visible properties\n propertynames = properties(item);\n for p = 1:numel(propertynames)\n for o = numel(item):-1:1 % aray of objects\n st(o).(propertynames{p}) = item(o).(propertynames{p});\n end\n end\nend\ntxt=struct2json(name,st,level,varargin{:});\n\n%%-------------------------------------------------------------------------\nfunction txt=matdata2json(mat,level,varargin)\n\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\ntab=ws.tab;\nnl=ws.newline;\n\nif(size(mat,1)==1)\n pre='';\n post='';\n level=level-1;\nelse\n pre=sprintf('[%s',nl);\n post=sprintf('%s%s]',nl,repmat(tab,1,level-1));\nend\n\nif(isempty(mat))\n txt='[]';\n return;\nend\nfloatformat=jsonopt('FloatFormat','%.10g',varargin{:});\n%if(numel(mat)>1)\n formatstr=['[' repmat([floatformat ','],1,size(mat,2)-1) [floatformat sprintf('],%s',nl)]];\n%else\n% formatstr=[repmat([floatformat ','],1,size(mat,2)-1) [floatformat sprintf(',\\n')]];\n%end\n\nif(nargin>=2 && size(mat,1)>1 && jsonopt('ArrayIndent',1,varargin{:})==1)\n formatstr=[repmat(tab,1,level) formatstr];\nend\n\ntxt=sprintf(formatstr,mat');\ntxt(end-length(nl):end)=[];\nif(islogical(mat) && jsonopt('ParseLogical',0,varargin{:})==1)\n txt=regexprep(txt,'1','true');\n txt=regexprep(txt,'0','false');\nend\n%txt=regexprep(mat2str(mat),'\\s+',',');\n%txt=regexprep(txt,';',sprintf('],\\n['));\n% if(nargin>=2 && size(mat,1)>1)\n% txt=regexprep(txt,'\\[',[repmat(sprintf('\\t'),1,level) '[']);\n% end\ntxt=[pre txt post];\nif(any(isinf(mat(:))))\n txt=regexprep(txt,'([-+]*)Inf',jsonopt('Inf','\"$1_Inf_\"',varargin{:}));\nend\nif(any(isnan(mat(:))))\n txt=regexprep(txt,'NaN',jsonopt('NaN','\"_NaN_\"',varargin{:}));\nend\n\n%%-------------------------------------------------------------------------\nfunction newname=checkname(name,varargin)\nisunpack=jsonopt('UnpackHex',1,varargin{:});\nnewname=name;\nif(isempty(regexp(name,'0x([0-9a-fA-F]+)_','once')))\n return\nend\nif(isunpack)\n isoct=jsonopt('IsOctave',0,varargin{:});\n if(~isoct)\n newname=regexprep(name,'(^x|_){1}0x([0-9a-fA-F]+)_','${native2unicode(hex2dec($2))}');\n else\n pos=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','start');\n pend=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','end');\n if(isempty(pos))\n return;\n end\n str0=name;\n pos0=[0 pend(:)' length(name)];\n newname='';\n for i=1:length(pos)\n newname=[newname str0(pos0(i)+1:pos(i)-1) char(hex2dec(str0(pos(i)+3:pend(i)-1)))];\n end\n if(pos(end)~=length(name))\n newname=[newname str0(pos0(end-1)+1:pos0(end))];\n end\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction newstr=escapejsonstring(str)\nnewstr=str;\nisoct=exist('OCTAVE_VERSION','builtin');\nif(isoct)\n vv=sscanf(OCTAVE_VERSION,'%f');\n if(vv(1)>=3.8)\n isoct=0;\n end\nend\nif(isoct)\n escapechars={'\\\\','\\\"','\\/','\\a','\\f','\\n','\\r','\\t','\\v'};\n for i=1:length(escapechars);\n newstr=regexprep(newstr,escapechars{i},escapechars{i});\n end\n newstr=regexprep(newstr,'\\\\\\\\(u[0-9a-fA-F]{4}[^0-9a-fA-F]*)','\\$1');\nelse\n escapechars={'\\\\','\\\"','\\/','\\a','\\b','\\f','\\n','\\r','\\t','\\v'};\n for i=1:length(escapechars);\n newstr=regexprep(newstr,escapechars{i},regexprep(escapechars{i},'\\\\','\\\\\\\\'));\n end\n newstr=regexprep(newstr,'\\\\\\\\(u[0-9a-fA-F]{4}[^0-9a-fA-F]*)','\\\\$1');\nend\n"} +{"plateform": "github", "repo_name": "leonid-pishchulin/poseval-master", "name": "loadjson.m", "ext": ".m", "path": "poseval-master/matlab/external/jsonlab/loadjson.m", "size": 16145, "source_encoding": "ibm852", "md5": "7582071c5bd7f5e5f74806ce191a9078", "text": "function data = loadjson(fname,varargin)\n%\n% data=loadjson(fname,opt)\n% or\n% data=loadjson(fname,'param1',value1,'param2',value2,...)\n%\n% parse a JSON (JavaScript Object Notation) file or string\n%\n% authors:Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2011/09/09, including previous works from \n%\n% Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713\n% created on 2009/11/02\n% François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393\n% created on 2009/03/22\n% Joel Feenstra:\n% http://www.mathworks.com/matlabcentral/fileexchange/20565\n% created on 2008/07/03\n%\n% $Id$\n%\n% input:\n% fname: input file name, if fname contains \"{}\" or \"[]\", fname\n% will be interpreted as a JSON string\n% opt: a struct to store parsing options, opt can be replaced by \n% a list of ('param',value) pairs - the param string is equivallent\n% to a field in opt. opt can have the following \n% fields (first in [.|.] is the default)\n%\n% opt.SimplifyCell [0|1]: if set to 1, loadjson will call cell2mat\n% for each element of the JSON data, and group \n% arrays based on the cell2mat rules.\n% opt.FastArrayParser [1|0 or integer]: if set to 1, use a\n% speed-optimized array parser when loading an \n% array object. The fast array parser may \n% collapse block arrays into a single large\n% array similar to rules defined in cell2mat; 0 to \n% use a legacy parser; if set to a larger-than-1\n% value, this option will specify the minimum\n% dimension to enable the fast array parser. For\n% example, if the input is a 3D array, setting\n% FastArrayParser to 1 will return a 3D array;\n% setting to 2 will return a cell array of 2D\n% arrays; setting to 3 will return to a 2D cell\n% array of 1D vectors; setting to 4 will return a\n% 3D cell array.\n% opt.ShowProgress [0|1]: if set to 1, loadjson displays a progress bar.\n%\n% output:\n% dat: a cell array, where {...} blocks are converted into cell arrays,\n% and [...] are converted to arrays\n%\n% examples:\n% dat=loadjson('{\"obj\":{\"string\":\"value\",\"array\":[1,2,3]}}')\n% dat=loadjson(['examples' filesep 'example1.json'])\n% dat=loadjson(['examples' filesep 'example1.json'],'SimplifyCell',1)\n%\n% license:\n% BSD License, see LICENSE_BSD.txt files for details \n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nglobal pos inStr len esc index_esc len_esc isoct arraytoken\n\nif(regexp(fname,'^\\s*(?:\\[.+\\])|(?:\\{.+\\})\\s*$','once'))\n string=fname;\nelseif(exist(fname,'file'))\n try\n string = fileread(fname);\n catch\n try\n string = urlread(['file://',fname]);\n catch\n string = urlread(['file://',fullfile(pwd,fname)]);\n end\n end\nelse\n error('input file does not exist');\nend\n\npos = 1; len = length(string); inStr = string;\nisoct=exist('OCTAVE_VERSION','builtin');\narraytoken=find(inStr=='[' | inStr==']' | inStr=='\"');\njstr=regexprep(inStr,'\\\\\\\\',' ');\nescquote=regexp(jstr,'\\\\\"');\narraytoken=sort([arraytoken escquote]);\n\n% String delimiters and escape chars identified to improve speed:\nesc = find(inStr=='\"' | inStr=='\\' ); % comparable to: regexp(inStr, '[\"\\\\]');\nindex_esc = 1; len_esc = length(esc);\n\nopt=varargin2struct(varargin{:});\n\nif(jsonopt('ShowProgress',0,opt)==1)\n opt.progressbar_=waitbar(0,'loading ...');\nend\njsoncount=1;\nwhile pos <= len\n switch(next_char)\n case '{'\n data{jsoncount} = parse_object(opt);\n case '['\n data{jsoncount} = parse_array(opt);\n otherwise\n error_pos('Outer level structure must be an object or an array');\n end\n jsoncount=jsoncount+1;\nend % while\n\njsoncount=length(data);\nif(jsoncount==1 && iscell(data))\n data=data{1};\nend\n\nif(isfield(opt,'progressbar_'))\n close(opt.progressbar_);\nend\n\n%%-------------------------------------------------------------------------\nfunction object = parse_object(varargin)\n parse_char('{');\n object = [];\n if next_char ~= '}'\n while 1\n str = parseStr(varargin{:});\n if isempty(str)\n error_pos('Name of value at position %d cannot be empty');\n end\n parse_char(':');\n val = parse_value(varargin{:});\n object.(valid_field(str))=val;\n if next_char == '}'\n break;\n end\n parse_char(',');\n end\n end\n parse_char('}');\n if(isstruct(object))\n object=struct2jdata(object);\n end\n\n%%-------------------------------------------------------------------------\n\nfunction object = parse_array(varargin) % JSON array is written in row-major order\nglobal pos inStr isoct\n parse_char('[');\n object = cell(0, 1);\n dim2=[];\n arraydepth=jsonopt('JSONLAB_ArrayDepth_',1,varargin{:});\n pbar=-1;\n if(isfield(varargin{1},'progressbar_'))\n pbar=varargin{1}.progressbar_;\n end\n\n if next_char ~= ']'\n\tif(jsonopt('FastArrayParser',1,varargin{:})>=1 && arraydepth>=jsonopt('FastArrayParser',1,varargin{:}))\n [endpos, e1l, e1r]=matching_bracket(inStr,pos);\n arraystr=['[' inStr(pos:endpos)];\n arraystr=regexprep(arraystr,'\"_NaN_\"','NaN');\n arraystr=regexprep(arraystr,'\"([-+]*)_Inf_\"','$1Inf');\n arraystr(arraystr==sprintf('\\n'))=[];\n arraystr(arraystr==sprintf('\\r'))=[];\n %arraystr=regexprep(arraystr,'\\s*,',','); % this is slow,sometimes needed\n if(~isempty(e1l) && ~isempty(e1r)) % the array is in 2D or higher D\n \tastr=inStr((e1l+1):(e1r-1));\n \tastr=regexprep(astr,'\"_NaN_\"','NaN');\n \tastr=regexprep(astr,'\"([-+]*)_Inf_\"','$1Inf');\n \tastr(astr==sprintf('\\n'))=[];\n \tastr(astr==sprintf('\\r'))=[];\n \tastr(astr==' ')='';\n \tif(isempty(find(astr=='[', 1))) % array is 2D\n dim2=length(sscanf(astr,'%f,',[1 inf]));\n \tend\n else % array is 1D\n \tastr=arraystr(2:end-1);\n \tastr(astr==' ')='';\n \t[obj, count, errmsg, nextidx]=sscanf(astr,'%f,',[1,inf]);\n \tif(nextidx>=length(astr)-1)\n object=obj;\n pos=endpos;\n parse_char(']');\n return;\n \tend\n end\n if(~isempty(dim2))\n \tastr=arraystr;\n \tastr(astr=='[')='';\n \tastr(astr==']')='';\n \tastr(astr==' ')='';\n \t[obj, count, errmsg, nextidx]=sscanf(astr,'%f,',inf);\n \tif(nextidx>=length(astr)-1)\n object=reshape(obj,dim2,numel(obj)/dim2)';\n pos=endpos;\n parse_char(']');\n if(pbar>0)\n waitbar(pos/length(inStr),pbar,'loading ...');\n end\n return;\n \tend\n end\n arraystr=regexprep(arraystr,'\\]\\s*,','];');\n\telse\n arraystr='[';\n\tend\n try\n if(isoct && regexp(arraystr,'\"','once'))\n error('Octave eval can produce empty cells for JSON-like input');\n end\n object=eval(arraystr);\n pos=endpos;\n catch\n while 1\n newopt=varargin2struct(varargin{:},'JSONLAB_ArrayDepth_',arraydepth+1);\n val = parse_value(newopt);\n object{end+1} = val;\n if next_char == ']'\n break;\n end\n parse_char(',');\n end\n end\n end\n if(jsonopt('SimplifyCell',0,varargin{:})==1)\n try\n oldobj=object;\n object=cell2mat(object')';\n if(iscell(oldobj) && isstruct(object) && numel(object)>1 && jsonopt('SimplifyCellArray',1,varargin{:})==0)\n object=oldobj;\n elseif(size(object,1)>1 && ismatrix(object))\n object=object';\n end\n catch\n end\n end\n parse_char(']');\n \n if(pbar>0)\n waitbar(pos/length(inStr),pbar,'loading ...');\n end\n%%-------------------------------------------------------------------------\n\nfunction parse_char(c)\n global pos inStr len\n pos=skip_whitespace(pos,inStr,len);\n if pos > len || inStr(pos) ~= c\n error_pos(sprintf('Expected %c at position %%d', c));\n else\n pos = pos + 1;\n pos=skip_whitespace(pos,inStr,len);\n end\n\n%%-------------------------------------------------------------------------\n\nfunction c = next_char\n global pos inStr len\n pos=skip_whitespace(pos,inStr,len);\n if pos > len\n c = [];\n else\n c = inStr(pos);\n end\n\n%%-------------------------------------------------------------------------\n\nfunction newpos=skip_whitespace(pos,inStr,len)\n newpos=pos;\n while newpos <= len && isspace(inStr(newpos))\n newpos = newpos + 1;\n end\n\n%%-------------------------------------------------------------------------\nfunction str = parseStr(varargin)\n global pos inStr len esc index_esc len_esc\n % len, ns = length(inStr), keyboard\n if inStr(pos) ~= '\"'\n error_pos('String starting with \" expected at position %d');\n else\n pos = pos + 1;\n end\n str = '';\n while pos <= len\n while index_esc <= len_esc && esc(index_esc) < pos\n index_esc = index_esc + 1;\n end\n if index_esc > len_esc\n str = [str inStr(pos:len)];\n pos = len + 1;\n break;\n else\n str = [str inStr(pos:esc(index_esc)-1)];\n pos = esc(index_esc);\n end\n nstr = length(str);\n switch inStr(pos)\n case '\"'\n pos = pos + 1;\n if(~isempty(str))\n if(strcmp(str,'_Inf_'))\n str=Inf;\n elseif(strcmp(str,'-_Inf_'))\n str=-Inf;\n elseif(strcmp(str,'_NaN_'))\n str=NaN;\n end\n end\n return;\n case '\\'\n if pos+1 > len\n error_pos('End of file reached right after escape character');\n end\n pos = pos + 1;\n switch inStr(pos)\n case {'\"' '\\' '/'}\n str(nstr+1) = inStr(pos);\n pos = pos + 1;\n case {'b' 'f' 'n' 'r' 't'}\n str(nstr+1) = sprintf(['\\' inStr(pos)]);\n pos = pos + 1;\n case 'u'\n if pos+4 > len\n error_pos('End of file reached in escaped unicode character');\n end\n str(nstr+(1:6)) = inStr(pos-1:pos+4);\n pos = pos + 5;\n end\n otherwise % should never happen\n str(nstr+1) = inStr(pos);\n keyboard;\n pos = pos + 1;\n end\n end\n error_pos('End of file while expecting end of inStr');\n\n%%-------------------------------------------------------------------------\n\nfunction num = parse_number(varargin)\n global pos inStr isoct\n currstr=inStr(pos:min(pos+30,end));\n if(isoct~=0)\n numstr=regexp(currstr,'^\\s*-?(?:0|[1-9]\\d*)(?:\\.\\d+)?(?:[eE][+\\-]?\\d+)?','end');\n [num] = sscanf(currstr, '%f', 1);\n delta=numstr+1;\n else\n [num, one, err, delta] = sscanf(currstr, '%f', 1);\n if ~isempty(err)\n error_pos('Error reading number at position %d');\n end\n end\n pos = pos + delta-1;\n\n%%-------------------------------------------------------------------------\n\nfunction val = parse_value(varargin)\n global pos inStr len\n \n if(isfield(varargin{1},'progressbar_'))\n waitbar(pos/len,varargin{1}.progressbar_,'loading ...');\n end\n \n switch(inStr(pos))\n case '\"'\n val = parseStr(varargin{:});\n return;\n case '['\n val = parse_array(varargin{:});\n return;\n case '{'\n val = parse_object(varargin{:});\n return;\n case {'-','0','1','2','3','4','5','6','7','8','9'}\n val = parse_number(varargin{:});\n return;\n case 't'\n if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'true')\n val = true;\n pos = pos + 4;\n return;\n end\n case 'f'\n if pos+4 <= len && strcmpi(inStr(pos:pos+4), 'false')\n val = false;\n pos = pos + 5;\n return;\n end\n case 'n'\n if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'null')\n val = [];\n pos = pos + 4;\n return;\n end\n end\n error_pos('Value expected at position %d');\n%%-------------------------------------------------------------------------\n\nfunction error_pos(msg)\n global pos inStr len\n poShow = max(min([pos-15 pos-1 pos pos+20],len),1);\n if poShow(3) == poShow(2)\n poShow(3:4) = poShow(2)+[0 -1]; % display nothing after\n end\n msg = [sprintf(msg, pos) ': ' ...\n inStr(poShow(1):poShow(2)) '' inStr(poShow(3):poShow(4)) ];\n error( ['JSONparser:invalidFormat: ' msg] );\n\n%%-------------------------------------------------------------------------\n\nfunction str = valid_field(str)\nglobal isoct\n% From MATLAB doc: field names must begin with a letter, which may be\n% followed by any combination of letters, digits, and underscores.\n% Invalid characters will be converted to underscores, and the prefix\n% \"x0x[Hex code]_\" will be added if the first character is not a letter.\n pos=regexp(str,'^[^A-Za-z]','once');\n if(~isempty(pos))\n if(~isoct)\n str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once');\n else\n str=sprintf('x0x%X_%s',char(str(1)),str(2:end));\n end\n end\n if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' )))\n return;\n end\n if(~isoct)\n str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_');\n else\n pos=regexp(str,'[^0-9A-Za-z_]');\n if(isempty(pos))\n return;\n end\n str0=str;\n pos0=[0 pos(:)' length(str)];\n str='';\n for i=1:length(pos)\n str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))];\n end\n if(pos(end)~=length(str))\n str=[str str0(pos0(end-1)+1:pos0(end))];\n end\n end\n %str(~isletter(str) & ~('0' <= str & str <= '9')) = '_';\n\n%%-------------------------------------------------------------------------\nfunction endpos = matching_quote(str,pos)\nlen=length(str);\nwhile(pos1 && str(pos-1)=='\\'))\n endpos=pos;\n return;\n end \n end\n pos=pos+1;\nend\nerror('unmatched quotation mark');\n%%-------------------------------------------------------------------------\nfunction [endpos, e1l, e1r, maxlevel] = matching_bracket(str,pos)\nglobal arraytoken\nlevel=1;\nmaxlevel=level;\nendpos=0;\nbpos=arraytoken(arraytoken>=pos);\ntokens=str(bpos);\nlen=length(tokens);\npos=1;\ne1l=[];\ne1r=[];\nwhile(pos<=len)\n c=tokens(pos);\n if(c==']')\n level=level-1;\n if(isempty(e1r))\n e1r=bpos(pos);\n end\n if(level==0)\n endpos=bpos(pos);\n return\n end\n end\n if(c=='[')\n if(isempty(e1l))\n e1l=bpos(pos);\n end\n level=level+1;\n maxlevel=max(maxlevel,level);\n end\n if(c=='\"')\n pos=matching_quote(tokens,pos+1);\n end\n pos=pos+1;\nend\nif(endpos==0) \n error('unmatched \"]\"');\nend\n"} +{"plateform": "github", "repo_name": "leonid-pishchulin/poseval-master", "name": "loadubjson.m", "ext": ".m", "path": "poseval-master/matlab/external/jsonlab/loadubjson.m", "size": 13300, "source_encoding": "utf_8", "md5": "b15e959f758c5c2efa2711aa79c443fc", "text": "function data = loadubjson(fname,varargin)\n%\n% data=loadubjson(fname,opt)\n% or\n% data=loadubjson(fname,'param1',value1,'param2',value2,...)\n%\n% parse a JSON (JavaScript Object Notation) file or string\n%\n% authors:Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2013/08/01\n%\n% $Id$\n%\n% input:\n% fname: input file name, if fname contains \"{}\" or \"[]\", fname\n% will be interpreted as a UBJSON string\n% opt: a struct to store parsing options, opt can be replaced by \n% a list of ('param',value) pairs - the param string is equivallent\n% to a field in opt. opt can have the following \n% fields (first in [.|.] is the default)\n%\n% opt.SimplifyCell [0|1]: if set to 1, loadubjson will call cell2mat\n% for each element of the JSON data, and group \n% arrays based on the cell2mat rules.\n% opt.IntEndian [B|L]: specify the endianness of the integer fields\n% in the UBJSON input data. B - Big-Endian format for \n% integers (as required in the UBJSON specification); \n% L - input integer fields are in Little-Endian order.\n% opt.NameIsString [0|1]: for UBJSON Specification Draft 8 or \n% earlier versions (JSONLab 1.0 final or earlier), \n% the \"name\" tag is treated as a string. To load \n% these UBJSON data, you need to manually set this \n% flag to 1.\n%\n% output:\n% dat: a cell array, where {...} blocks are converted into cell arrays,\n% and [...] are converted to arrays\n%\n% examples:\n% obj=struct('string','value','array',[1 2 3]);\n% ubjdata=saveubjson('obj',obj);\n% dat=loadubjson(ubjdata)\n% dat=loadubjson(['examples' filesep 'example1.ubj'])\n% dat=loadubjson(['examples' filesep 'example1.ubj'],'SimplifyCell',1)\n%\n% license:\n% BSD License, see LICENSE_BSD.txt files for details \n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nglobal pos inStr len esc index_esc len_esc isoct arraytoken fileendian systemendian\n\nif(regexp(fname,'[\\{\\}\\]\\[]','once'))\n string=fname;\nelseif(exist(fname,'file'))\n fid = fopen(fname,'rb');\n string = fread(fid,inf,'uint8=>char')';\n fclose(fid);\nelse\n error('input file does not exist');\nend\n\npos = 1; len = length(string); inStr = string;\nisoct=exist('OCTAVE_VERSION','builtin');\narraytoken=find(inStr=='[' | inStr==']' | inStr=='\"');\njstr=regexprep(inStr,'\\\\\\\\',' ');\nescquote=regexp(jstr,'\\\\\"');\narraytoken=sort([arraytoken escquote]);\n\n% String delimiters and escape chars identified to improve speed:\nesc = find(inStr=='\"' | inStr=='\\' ); % comparable to: regexp(inStr, '[\"\\\\]');\nindex_esc = 1; len_esc = length(esc);\n\nopt=varargin2struct(varargin{:});\nfileendian=upper(jsonopt('IntEndian','B',opt));\n[os,maxelem,systemendian]=computer;\n\njsoncount=1;\nwhile pos <= len\n switch(next_char)\n case '{'\n data{jsoncount} = parse_object(opt);\n case '['\n data{jsoncount} = parse_array(opt);\n otherwise\n error_pos('Outer level structure must be an object or an array');\n end\n jsoncount=jsoncount+1;\nend % while\n\njsoncount=length(data);\nif(jsoncount==1 && iscell(data))\n data=data{1};\nend\n\n%%-------------------------------------------------------------------------\nfunction object = parse_object(varargin)\n parse_char('{');\n object = [];\n type='';\n count=-1;\n if(next_char == '$')\n type=inStr(pos+1); % TODO\n pos=pos+2;\n end\n if(next_char == '#')\n pos=pos+1;\n count=double(parse_number());\n end\n if next_char ~= '}'\n num=0;\n while 1\n if(jsonopt('NameIsString',0,varargin{:}))\n str = parseStr(varargin{:});\n else\n str = parse_name(varargin{:});\n end\n if isempty(str)\n error_pos('Name of value at position %d cannot be empty');\n end\n %parse_char(':');\n val = parse_value(varargin{:});\n num=num+1;\n object.(valid_field(str))=val;\n if next_char == '}' || (count>=0 && num>=count)\n break;\n end\n %parse_char(',');\n end\n end\n if(count==-1)\n parse_char('}');\n end\n if(isstruct(object))\n object=struct2jdata(object);\n end\n\n%%-------------------------------------------------------------------------\nfunction [cid,len]=elem_info(type)\nid=strfind('iUIlLdD',type);\ndataclass={'int8','uint8','int16','int32','int64','single','double'};\nbytelen=[1,1,2,4,8,4,8];\nif(id>0)\n cid=dataclass{id};\n len=bytelen(id);\nelse\n error_pos('unsupported type at position %d');\nend\n%%-------------------------------------------------------------------------\n\n\nfunction [data, adv]=parse_block(type,count,varargin)\nglobal pos inStr isoct fileendian systemendian\n[cid,len]=elem_info(type);\ndatastr=inStr(pos:pos+len*count-1);\nif(isoct)\n newdata=int8(datastr);\nelse\n newdata=uint8(datastr);\nend\nid=strfind('iUIlLdD',type);\nif(id<=5 && fileendian~=systemendian)\n newdata=swapbytes(typecast(newdata,cid));\nend\ndata=typecast(newdata,cid);\nadv=double(len*count);\n\n%%-------------------------------------------------------------------------\n\n\nfunction object = parse_array(varargin) % JSON array is written in row-major order\nglobal pos inStr\n parse_char('[');\n object = cell(0, 1);\n dim=[];\n type='';\n count=-1;\n if(next_char == '$')\n type=inStr(pos+1);\n pos=pos+2;\n end\n if(next_char == '#')\n pos=pos+1;\n if(next_char=='[')\n dim=parse_array(varargin{:});\n count=prod(double(dim));\n else\n count=double(parse_number());\n end\n end\n if(~isempty(type))\n if(count>=0)\n [object, adv]=parse_block(type,count,varargin{:});\n if(~isempty(dim))\n object=reshape(object,dim);\n end\n pos=pos+adv;\n return;\n else\n endpos=matching_bracket(inStr,pos);\n [cid,len]=elem_info(type);\n count=(endpos-pos)/len;\n [object, adv]=parse_block(type,count,varargin{:});\n pos=pos+adv;\n parse_char(']');\n return;\n end\n end\n if next_char ~= ']'\n while 1\n val = parse_value(varargin{:});\n object{end+1} = val;\n if next_char == ']'\n break;\n end\n %parse_char(',');\n end\n end\n if(jsonopt('SimplifyCell',0,varargin{:})==1)\n try\n oldobj=object;\n object=cell2mat(object')';\n if(iscell(oldobj) && isstruct(object) && numel(object)>1 && jsonopt('SimplifyCellArray',1,varargin{:})==0)\n object=oldobj;\n elseif(size(object,1)>1 && ismatrix(object))\n object=object';\n end\n catch\n end\n end\n if(count==-1)\n parse_char(']');\n end\n\n%%-------------------------------------------------------------------------\n\nfunction parse_char(c)\n global pos inStr len\n skip_whitespace;\n if pos > len || inStr(pos) ~= c\n error_pos(sprintf('Expected %c at position %%d', c));\n else\n pos = pos + 1;\n skip_whitespace;\n end\n\n%%-------------------------------------------------------------------------\n\nfunction c = next_char\n global pos inStr len\n skip_whitespace;\n if pos > len\n c = [];\n else\n c = inStr(pos);\n end\n\n%%-------------------------------------------------------------------------\n\nfunction skip_whitespace\n global pos inStr len\n while pos <= len && isspace(inStr(pos))\n pos = pos + 1;\n end\n\n%%-------------------------------------------------------------------------\nfunction str = parse_name(varargin)\n global pos inStr\n bytelen=double(parse_number());\n if(length(inStr)>=pos+bytelen-1)\n str=inStr(pos:pos+bytelen-1);\n pos=pos+bytelen;\n else\n error_pos('End of file while expecting end of name');\n end\n%%-------------------------------------------------------------------------\n\nfunction str = parseStr(varargin)\n global pos inStr\n % len, ns = length(inStr), keyboard\n type=inStr(pos);\n if type ~= 'S' && type ~= 'C' && type ~= 'H'\n error_pos('String starting with S expected at position %d');\n else\n pos = pos + 1;\n end\n if(type == 'C')\n str=inStr(pos);\n pos=pos+1;\n return;\n end\n bytelen=double(parse_number());\n if(length(inStr)>=pos+bytelen-1)\n str=inStr(pos:pos+bytelen-1);\n pos=pos+bytelen;\n else\n error_pos('End of file while expecting end of inStr');\n end\n\n%%-------------------------------------------------------------------------\n\nfunction num = parse_number(varargin)\n global pos inStr isoct fileendian systemendian\n id=strfind('iUIlLdD',inStr(pos));\n if(isempty(id))\n error_pos('expecting a number at position %d');\n end\n type={'int8','uint8','int16','int32','int64','single','double'};\n bytelen=[1,1,2,4,8,4,8];\n datastr=inStr(pos+1:pos+bytelen(id));\n if(isoct)\n newdata=int8(datastr);\n else\n newdata=uint8(datastr);\n end\n if(id<=5 && fileendian~=systemendian)\n newdata=swapbytes(typecast(newdata,type{id}));\n end\n num=typecast(newdata,type{id});\n pos = pos + bytelen(id)+1;\n\n%%-------------------------------------------------------------------------\n\nfunction val = parse_value(varargin)\n global pos inStr\n\n switch(inStr(pos))\n case {'S','C','H'}\n val = parseStr(varargin{:});\n return;\n case '['\n val = parse_array(varargin{:});\n return;\n case '{'\n val = parse_object(varargin{:});\n return;\n case {'i','U','I','l','L','d','D'}\n val = parse_number(varargin{:});\n return;\n case 'T'\n val = true;\n pos = pos + 1;\n return;\n case 'F'\n val = false;\n pos = pos + 1;\n return;\n case {'Z','N'}\n val = [];\n pos = pos + 1;\n return;\n end\n error_pos('Value expected at position %d');\n%%-------------------------------------------------------------------------\n\nfunction error_pos(msg)\n global pos inStr len\n poShow = max(min([pos-15 pos-1 pos pos+20],len),1);\n if poShow(3) == poShow(2)\n poShow(3:4) = poShow(2)+[0 -1]; % display nothing after\n end\n msg = [sprintf(msg, pos) ': ' ...\n inStr(poShow(1):poShow(2)) '' inStr(poShow(3):poShow(4)) ];\n error( ['JSONparser:invalidFormat: ' msg] );\n\n%%-------------------------------------------------------------------------\n\nfunction str = valid_field(str)\nglobal isoct\n% From MATLAB doc: field names must begin with a letter, which may be\n% followed by any combination of letters, digits, and underscores.\n% Invalid characters will be converted to underscores, and the prefix\n% \"x0x[Hex code]_\" will be added if the first character is not a letter.\n pos=regexp(str,'^[^A-Za-z]','once');\n if(~isempty(pos))\n if(~isoct)\n str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once');\n else\n str=sprintf('x0x%X_%s',char(str(1)),str(2:end));\n end\n end\n if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' )))\n return;\n end\n if(~isoct)\n str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_');\n else\n pos=regexp(str,'[^0-9A-Za-z_]');\n if(isempty(pos))\n return;\n end\n str0=str;\n pos0=[0 pos(:)' length(str)];\n str='';\n for i=1:length(pos)\n str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))];\n end\n if(pos(end)~=length(str))\n str=[str str0(pos0(end-1)+1:pos0(end))];\n end\n end\n %str(~isletter(str) & ~('0' <= str & str <= '9')) = '_';\n\n%%-------------------------------------------------------------------------\nfunction endpos = matching_quote(str,pos)\nlen=length(str);\nwhile(pos1 && str(pos-1)=='\\'))\n endpos=pos;\n return;\n end \n end\n pos=pos+1;\nend\nerror('unmatched quotation mark');\n%%-------------------------------------------------------------------------\nfunction [endpos, e1l, e1r, maxlevel] = matching_bracket(str,pos)\nglobal arraytoken\nlevel=1;\nmaxlevel=level;\nendpos=0;\nbpos=arraytoken(arraytoken>=pos);\ntokens=str(bpos);\nlen=length(tokens);\npos=1;\ne1l=[];\ne1r=[];\nwhile(pos<=len)\n c=tokens(pos);\n if(c==']')\n level=level-1;\n if(isempty(e1r))\n e1r=bpos(pos);\n end\n if(level==0)\n endpos=bpos(pos);\n return\n end\n end\n if(c=='[')\n if(isempty(e1l))\n e1l=bpos(pos);\n end\n level=level+1;\n maxlevel=max(maxlevel,level);\n end\n if(c=='\"')\n pos=matching_quote(tokens,pos+1);\n end\n pos=pos+1;\nend\nif(endpos==0) \n error('unmatched \"]\"');\nend\n\n"} +{"plateform": "github", "repo_name": "leonid-pishchulin/poseval-master", "name": "saveubjson.m", "ext": ".m", "path": "poseval-master/matlab/external/jsonlab/saveubjson.m", "size": 17723, "source_encoding": "utf_8", "md5": "3414421172c05225dfbd4a9c8c76e6b3", "text": "function json=saveubjson(rootname,obj,varargin)\n%\n% json=saveubjson(rootname,obj,filename)\n% or\n% json=saveubjson(rootname,obj,opt)\n% json=saveubjson(rootname,obj,'param1',value1,'param2',value2,...)\n%\n% convert a MATLAB object (cell, struct or array) into a Universal \n% Binary JSON (UBJSON) binary string\n%\n% author: Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2013/08/17\n%\n% $Id$\n%\n% input:\n% rootname: the name of the root-object, when set to '', the root name\n% is ignored, however, when opt.ForceRootName is set to 1 (see below),\n% the MATLAB variable name will be used as the root name.\n% obj: a MATLAB object (array, cell, cell array, struct, struct array,\n% class instance)\n% filename: a string for the file name to save the output UBJSON data\n% opt: a struct for additional options, ignore to use default values.\n% opt can have the following fields (first in [.|.] is the default)\n%\n% opt.FileName [''|string]: a file name to save the output JSON data\n% opt.ArrayToStruct[0|1]: when set to 0, saveubjson outputs 1D/2D\n% array in JSON array format; if sets to 1, an\n% array will be shown as a struct with fields\n% \"_ArrayType_\", \"_ArraySize_\" and \"_ArrayData_\"; for\n% sparse arrays, the non-zero elements will be\n% saved to _ArrayData_ field in triplet-format i.e.\n% (ix,iy,val) and \"_ArrayIsSparse_\" will be added\n% with a value of 1; for a complex array, the \n% _ArrayData_ array will include two columns \n% (4 for sparse) to record the real and imaginary \n% parts, and also \"_ArrayIsComplex_\":1 is added. \n% opt.ParseLogical [1|0]: if this is set to 1, logical array elem\n% will use true/false rather than 1/0.\n% opt.SingletArray [0|1]: if this is set to 1, arrays with a single\n% numerical element will be shown without a square\n% bracket, unless it is the root object; if 0, square\n% brackets are forced for any numerical arrays.\n% opt.SingletCell [1|0]: if 1, always enclose a cell with \"[]\" \n% even it has only one element; if 0, brackets\n% are ignored when a cell has only 1 element.\n% opt.ForceRootName [0|1]: when set to 1 and rootname is empty, saveubjson\n% will use the name of the passed obj variable as the \n% root object name; if obj is an expression and \n% does not have a name, 'root' will be used; if this \n% is set to 0 and rootname is empty, the root level \n% will be merged down to the lower level.\n% opt.JSONP [''|string]: to generate a JSONP output (JSON with padding),\n% for example, if opt.JSON='foo', the JSON data is\n% wrapped inside a function call as 'foo(...);'\n% opt.UnpackHex [1|0]: conver the 0x[hex code] output by loadjson \n% back to the string form\n%\n% opt can be replaced by a list of ('param',value) pairs. The param \n% string is equivallent to a field in opt and is case sensitive.\n% output:\n% json: a binary string in the UBJSON format (see http://ubjson.org)\n%\n% examples:\n% jsonmesh=struct('MeshNode',[0 0 0;1 0 0;0 1 0;1 1 0;0 0 1;1 0 1;0 1 1;1 1 1],... \n% 'MeshTetra',[1 2 4 8;1 3 4 8;1 2 6 8;1 5 6 8;1 5 7 8;1 3 7 8],...\n% 'MeshTri',[1 2 4;1 2 6;1 3 4;1 3 7;1 5 6;1 5 7;...\n% 2 8 4;2 8 6;3 8 4;3 8 7;5 8 6;5 8 7],...\n% 'MeshCreator','FangQ','MeshTitle','T6 Cube',...\n% 'SpecialData',[nan, inf, -inf]);\n% saveubjson('jsonmesh',jsonmesh)\n% saveubjson('jsonmesh',jsonmesh,'meshdata.ubj')\n%\n% license:\n% BSD License, see LICENSE_BSD.txt files for details\n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nif(nargin==1)\n varname=inputname(1);\n obj=rootname;\n if(isempty(varname)) \n varname='root';\n end\n rootname=varname;\nelse\n varname=inputname(2);\nend\nif(length(varargin)==1 && ischar(varargin{1}))\n opt=struct('filename',varargin{1});\nelse\n opt=varargin2struct(varargin{:});\nend\nopt.IsOctave=exist('OCTAVE_VERSION','builtin');\nif(isfield(opt,'norowbracket'))\n warning('Option ''NoRowBracket'' is depreciated, please use ''SingletArray'' and set its value to not(NoRowBracket)');\n if(~isfield(opt,'singletarray'))\n opt.singletarray=not(opt.norowbracket);\n end\nend\nrootisarray=0;\nrootlevel=1;\nforceroot=jsonopt('ForceRootName',0,opt);\nif((isnumeric(obj) || islogical(obj) || ischar(obj) || isstruct(obj) || ...\n iscell(obj) || isobject(obj)) && isempty(rootname) && forceroot==0)\n rootisarray=1;\n rootlevel=0;\nelse\n if(isempty(rootname))\n rootname=varname;\n end\nend\nif((isstruct(obj) || iscell(obj))&& isempty(rootname) && forceroot)\n rootname='root';\nend\njson=obj2ubjson(rootname,obj,rootlevel,opt);\nif(~rootisarray)\n json=['{' json '}'];\nend\n\njsonp=jsonopt('JSONP','',opt);\nif(~isempty(jsonp))\n json=[jsonp '(' json ')'];\nend\n\n% save to a file if FileName is set, suggested by Patrick Rapin\nfilename=jsonopt('FileName','',opt);\nif(~isempty(filename))\n fid = fopen(filename, 'wb');\n fwrite(fid,json);\n fclose(fid);\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=obj2ubjson(name,item,level,varargin)\n\nif(iscell(item))\n txt=cell2ubjson(name,item,level,varargin{:});\nelseif(isstruct(item))\n txt=struct2ubjson(name,item,level,varargin{:});\nelseif(ischar(item))\n txt=str2ubjson(name,item,level,varargin{:});\nelseif(isobject(item)) \n txt=matlabobject2ubjson(name,item,level,varargin{:});\nelse\n txt=mat2ubjson(name,item,level,varargin{:});\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=cell2ubjson(name,item,level,varargin)\ntxt='';\nif(~iscell(item))\n error('input is not a cell');\nend\n\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nbracketlevel=~jsonopt('singletcell',1,varargin{:});\nlen=numel(item); % let's handle 1D cell first\nif(len>bracketlevel) \n if(~isempty(name))\n txt=[N_(checkname(name,varargin{:})) '[']; name=''; \n else\n txt='['; \n end\nelseif(len==0)\n if(~isempty(name))\n txt=[N_(checkname(name,varargin{:})) 'Z']; name=''; \n else\n txt='Z'; \n end\nend\nfor j=1:dim(2)\n if(dim(1)>1)\n txt=[txt '['];\n end\n for i=1:dim(1)\n txt=[txt obj2ubjson(name,item{i,j},level+(len>bracketlevel),varargin{:})];\n end\n if(dim(1)>1)\n txt=[txt ']'];\n end\nend\nif(len>bracketlevel)\n txt=[txt ']'];\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=struct2ubjson(name,item,level,varargin)\ntxt='';\nif(~isstruct(item))\n\terror('input is not a struct');\nend\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item);\nforcearray= (len>1 || (jsonopt('SingletArray',0,varargin{:})==1 && level>0));\n\nif(~isempty(name)) \n if(forcearray)\n txt=[N_(checkname(name,varargin{:})) '['];\n end\nelse\n if(forcearray)\n txt='[';\n end\nend\nfor j=1:dim(2)\n if(dim(1)>1)\n txt=[txt '['];\n end\n for i=1:dim(1)\n names = fieldnames(item(i,j));\n if(~isempty(name) && len==1 && ~forcearray)\n txt=[txt N_(checkname(name,varargin{:})) '{']; \n else\n txt=[txt '{']; \n end\n if(~isempty(names))\n for e=1:length(names)\n\t txt=[txt obj2ubjson(names{e},item(i,j).(names{e}),...\n level+(dim(1)>1)+1+forcearray,varargin{:})];\n end\n end\n txt=[txt '}'];\n end\n if(dim(1)>1)\n txt=[txt ']'];\n end\nend\nif(forcearray)\n txt=[txt ']'];\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=str2ubjson(name,item,level,varargin)\ntxt='';\nif(~ischar(item))\n error('input is not a string');\nend\nitem=reshape(item, max(size(item),[1 0]));\nlen=size(item,1);\n\nif(~isempty(name)) \n if(len>1)\n txt=[N_(checkname(name,varargin{:})) '['];\n end\nelse\n if(len>1)\n txt='[';\n end\nend\nfor e=1:len\n val=item(e,:);\n if(len==1)\n obj=[N_(checkname(name,varargin{:})) '' '',S_(val),''];\n if(isempty(name))\n obj=['',S_(val),''];\n end\n txt=[txt,'',obj];\n else\n txt=[txt,'',['',S_(val),'']];\n end\nend\nif(len>1)\n txt=[txt ']'];\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=mat2ubjson(name,item,level,varargin)\nif(~isnumeric(item) && ~islogical(item))\n error('input is not an array');\nend\n\nif(length(size(item))>2 || issparse(item) || ~isreal(item) || ...\n (isempty(item) && any(size(item))) ||jsonopt('ArrayToStruct',0,varargin{:}))\n cid=I_(uint32(max(size(item))));\n if(isempty(name))\n \ttxt=['{' N_('_ArrayType_'),S_(class(item)),N_('_ArraySize_'),I_a(size(item),cid(1)) ];\n else\n if(isempty(item))\n txt=[N_(checkname(name,varargin{:})),'Z'];\n return;\n else\n \t txt=[N_(checkname(name,varargin{:})),'{',N_('_ArrayType_'),S_(class(item)),N_('_ArraySize_'),I_a(size(item),cid(1))];\n end\n end\nelse\n if(isempty(name))\n \ttxt=matdata2ubjson(item,level+1,varargin{:});\n else\n if(numel(item)==1 && jsonopt('SingletArray',0,varargin{:})==0)\n numtxt=regexprep(regexprep(matdata2ubjson(item,level+1,varargin{:}),'^\\[',''),']','');\n \ttxt=[N_(checkname(name,varargin{:})) numtxt];\n else\n \t txt=[N_(checkname(name,varargin{:})),matdata2ubjson(item,level+1,varargin{:})];\n end\n end\n return;\nend\nif(issparse(item))\n [ix,iy]=find(item);\n data=full(item(find(item)));\n if(~isreal(item))\n data=[real(data(:)),imag(data(:))];\n if(size(item,1)==1)\n % Kludge to have data's 'transposedness' match item's.\n % (Necessary for complex row vector handling below.)\n data=data';\n end\n txt=[txt,N_('_ArrayIsComplex_'),'T'];\n end\n txt=[txt,N_('_ArrayIsSparse_'),'T'];\n if(size(item,1)==1)\n % Row vector, store only column indices.\n txt=[txt,N_('_ArrayData_'),...\n matdata2ubjson([iy(:),data'],level+2,varargin{:})];\n elseif(size(item,2)==1)\n % Column vector, store only row indices.\n txt=[txt,N_('_ArrayData_'),...\n matdata2ubjson([ix,data],level+2,varargin{:})];\n else\n % General case, store row and column indices.\n txt=[txt,N_('_ArrayData_'),...\n matdata2ubjson([ix,iy,data],level+2,varargin{:})];\n end\nelse\n if(isreal(item))\n txt=[txt,N_('_ArrayData_'),...\n matdata2ubjson(item(:)',level+2,varargin{:})];\n else\n txt=[txt,N_('_ArrayIsComplex_'),'T'];\n txt=[txt,N_('_ArrayData_'),...\n matdata2ubjson([real(item(:)) imag(item(:))],level+2,varargin{:})];\n end\nend\ntxt=[txt,'}'];\n\n%%-------------------------------------------------------------------------\nfunction txt=matlabobject2ubjson(name,item,level,varargin)\nif numel(item) == 0 %empty object\n st = struct();\nelse\n % \"st = struct(item);\" would produce an inmutable warning, because it\n % make the protected and private properties visible. Instead we get the\n % visible properties\n propertynames = properties(item);\n for p = 1:numel(propertynames)\n for o = numel(item):-1:1 % aray of objects\n st(o).(propertynames{p}) = item(o).(propertynames{p});\n end\n end\nend\ntxt=struct2ubjson(name,st,level,varargin{:});\n\n%%-------------------------------------------------------------------------\nfunction txt=matdata2ubjson(mat,level,varargin)\nif(isempty(mat))\n txt='Z';\n return;\nend\ntype='';\nhasnegtive=(mat<0);\nif(isa(mat,'integer') || isinteger(mat) || (isfloat(mat) && all(mod(mat(:),1) == 0)))\n if(isempty(hasnegtive))\n if(max(mat(:))<=2^8)\n type='U';\n end\n end\n if(isempty(type))\n % todo - need to consider negative ones separately\n id= histc(abs(max(mat(:))),[0 2^7 2^15 2^31 2^63]);\n if(isempty(id~=0))\n error('high-precision data is not yet supported');\n end\n key='iIlL';\n\ttype=key(id~=0);\n end\n txt=[I_a(mat(:),type,size(mat))];\nelseif(islogical(mat))\n logicalval='FT';\n if(numel(mat)==1)\n txt=logicalval(mat+1);\n else\n txt=['[$U#' I_a(size(mat),'l') typecast(swapbytes(uint8(mat(:)')),'uint8')];\n end\nelse\n if(numel(mat)==1)\n txt=['[' D_(mat) ']'];\n else\n txt=D_a(mat(:),'D',size(mat));\n end\nend\n\n%txt=regexprep(mat2str(mat),'\\s+',',');\n%txt=regexprep(txt,';',sprintf('],['));\n% if(nargin>=2 && size(mat,1)>1)\n% txt=regexprep(txt,'\\[',[repmat(sprintf('\\t'),1,level) '[']);\n% end\nif(any(isinf(mat(:))))\n txt=regexprep(txt,'([-+]*)Inf',jsonopt('Inf','\"$1_Inf_\"',varargin{:}));\nend\nif(any(isnan(mat(:))))\n txt=regexprep(txt,'NaN',jsonopt('NaN','\"_NaN_\"',varargin{:}));\nend\n\n%%-------------------------------------------------------------------------\nfunction newname=checkname(name,varargin)\nisunpack=jsonopt('UnpackHex',1,varargin{:});\nnewname=name;\nif(isempty(regexp(name,'0x([0-9a-fA-F]+)_','once')))\n return\nend\nif(isunpack)\n isoct=jsonopt('IsOctave',0,varargin{:});\n if(~isoct)\n newname=regexprep(name,'(^x|_){1}0x([0-9a-fA-F]+)_','${native2unicode(hex2dec($2))}');\n else\n pos=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','start');\n pend=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','end');\n if(isempty(pos))\n return;\n end\n str0=name;\n pos0=[0 pend(:)' length(name)];\n newname='';\n for i=1:length(pos)\n newname=[newname str0(pos0(i)+1:pos(i)-1) char(hex2dec(str0(pos(i)+3:pend(i)-1)))];\n end\n if(pos(end)~=length(name))\n newname=[newname str0(pos0(end-1)+1:pos0(end))];\n end\n end\nend\n%%-------------------------------------------------------------------------\nfunction val=N_(str)\nval=[I_(int32(length(str))) str];\n%%-------------------------------------------------------------------------\nfunction val=S_(str)\nif(length(str)==1)\n val=['C' str];\nelse\n val=['S' I_(int32(length(str))) str];\nend\n%%-------------------------------------------------------------------------\nfunction val=I_(num)\nif(~isinteger(num))\n error('input is not an integer');\nend\nif(num>=0 && num<255)\n val=['U' data2byte(swapbytes(cast(num,'uint8')),'uint8')];\n return;\nend\nkey='iIlL';\ncid={'int8','int16','int32','int64'};\nfor i=1:4\n if((num>0 && num<2^(i*8-1)) || (num<0 && num>=-2^(i*8-1)))\n val=[key(i) data2byte(swapbytes(cast(num,cid{i})),'uint8')];\n return;\n end\nend\nerror('unsupported integer');\n\n%%-------------------------------------------------------------------------\nfunction val=D_(num)\nif(~isfloat(num))\n error('input is not a float');\nend\n\nif(isa(num,'single'))\n val=['d' data2byte(num,'uint8')];\nelse\n val=['D' data2byte(num,'uint8')];\nend\n%%-------------------------------------------------------------------------\nfunction data=I_a(num,type,dim,format)\nid=find(ismember('iUIlL',type));\n\nif(id==0)\n error('unsupported integer array');\nend\n\n% based on UBJSON specs, all integer types are stored in big endian format\n\nif(id==1)\n data=data2byte(swapbytes(int8(num)),'uint8');\n blen=1;\nelseif(id==2)\n data=data2byte(swapbytes(uint8(num)),'uint8');\n blen=1;\nelseif(id==3)\n data=data2byte(swapbytes(int16(num)),'uint8');\n blen=2;\nelseif(id==4)\n data=data2byte(swapbytes(int32(num)),'uint8');\n blen=4;\nelseif(id==5)\n data=data2byte(swapbytes(int64(num)),'uint8');\n blen=8;\nend\n\nif(nargin>=3 && length(dim)>=2 && prod(dim)~=dim(2))\n format='opt';\nend\nif((nargin<4 || strcmp(format,'opt')) && numel(num)>1)\n if(nargin>=3 && (length(dim)==1 || (length(dim)>=2 && prod(dim)~=dim(2))))\n cid=I_(uint32(max(dim)));\n data=['$' type '#' I_a(dim,cid(1)) data(:)'];\n else\n data=['$' type '#' I_(int32(numel(data)/blen)) data(:)'];\n end\n data=['[' data(:)'];\nelse\n data=reshape(data,blen,numel(data)/blen);\n data(2:blen+1,:)=data;\n data(1,:)=type;\n data=data(:)';\n data=['[' data(:)' ']'];\nend\n%%-------------------------------------------------------------------------\nfunction data=D_a(num,type,dim,format)\nid=find(ismember('dD',type));\n\nif(id==0)\n error('unsupported float array');\nend\n\nif(id==1)\n data=data2byte(single(num),'uint8');\nelseif(id==2)\n data=data2byte(double(num),'uint8');\nend\n\nif(nargin>=3 && length(dim)>=2 && prod(dim)~=dim(2))\n format='opt';\nend\nif((nargin<4 || strcmp(format,'opt')) && numel(num)>1)\n if(nargin>=3 && (length(dim)==1 || (length(dim)>=2 && prod(dim)~=dim(2))))\n cid=I_(uint32(max(dim)));\n data=['$' type '#' I_a(dim,cid(1)) data(:)'];\n else\n data=['$' type '#' I_(int32(numel(data)/(id*4))) data(:)'];\n end\n data=['[' data];\nelse\n data=reshape(data,(id*4),length(data)/(id*4));\n data(2:(id*4+1),:)=data;\n data(1,:)=type;\n data=data(:)';\n data=['[' data(:)' ']'];\nend\n%%-------------------------------------------------------------------------\nfunction bytes=data2byte(varargin)\nbytes=typecast(varargin{:});\nbytes=bytes(:)';\n"} +{"plateform": "github", "repo_name": "lhmRyan/dual-purpose-hashing-DPH-master", "name": "classification_demo.m", "ext": ".m", "path": "dual-purpose-hashing-DPH-master/matlab/demo/classification_demo.m", "size": 5466, "source_encoding": "utf_8", "md5": "45745fb7cfe37ef723c307dfa06f1b97", "text": "function [scores, maxlabel] = classification_demo(im, use_gpu)\n% [scores, maxlabel] = classification_demo(im, use_gpu)\n%\n% Image classification demo using BVLC CaffeNet.\n%\n% IMPORTANT: before you run this demo, you should download BVLC CaffeNet\n% from Model Zoo (http://caffe.berkeleyvision.org/model_zoo.html)\n%\n% ****************************************************************************\n% For detailed documentation and usage on Caffe's Matlab interface, please\n% refer to the Caffe Interface Tutorial at\n% http://caffe.berkeleyvision.org/tutorial/interfaces.html#matlab\n% ****************************************************************************\n%\n% input\n% im color image as uint8 HxWx3\n% use_gpu 1 to use the GPU, 0 to use the CPU\n%\n% output\n% scores 1000-dimensional ILSVRC score vector\n% maxlabel the label of the highest score\n%\n% You may need to do the following before you start matlab:\n% $ export LD_LIBRARY_PATH=/opt/intel/mkl/lib/intel64:/usr/local/cuda-5.5/lib64\n% $ export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libstdc++.so.6\n% Or the equivalent based on where things are installed on your system\n% and what versions are installed.\n%\n% Usage:\n% im = imread('../../examples/images/cat.jpg');\n% scores = classification_demo(im, 1);\n% [score, class] = max(scores);\n% Five things to be aware of:\n% caffe uses row-major order\n% matlab uses column-major order\n% caffe uses BGR color channel order\n% matlab uses RGB color channel order\n% images need to have the data mean subtracted\n\n% Data coming in from matlab needs to be in the order\n% [width, height, channels, images]\n% where width is the fastest dimension.\n% Here is the rough matlab code for putting image data into the correct\n% format in W x H x C with BGR channels:\n% % permute channels from RGB to BGR\n% im_data = im(:, :, [3, 2, 1]);\n% % flip width and height to make width the fastest dimension\n% im_data = permute(im_data, [2, 1, 3]);\n% % convert from uint8 to single\n% im_data = single(im_data);\n% % reshape to a fixed size (e.g., 227x227).\n% im_data = imresize(im_data, [IMAGE_DIM IMAGE_DIM], 'bilinear');\n% % subtract mean_data (already in W x H x C with BGR channels)\n% im_data = im_data - mean_data;\n\n% If you have multiple images, cat them with cat(4, ...)\n\n% Add caffe/matlab to your Matlab search PATH in order to use matcaffe\nif exist('../+caffe', 'dir')\n addpath('..');\nelse\n error('Please run this demo from caffe/matlab/demo');\nend\n\n% Set caffe mode\nif exist('use_gpu', 'var') && use_gpu\n caffe.set_mode_gpu();\n gpu_id = 0; % we will use the first gpu in this demo\n caffe.set_device(gpu_id);\nelse\n caffe.set_mode_cpu();\nend\n\n% Initialize the network using BVLC CaffeNet for image classification\n% Weights (parameter) file needs to be downloaded from Model Zoo.\nmodel_dir = '../../models/bvlc_reference_caffenet/';\nnet_model = [model_dir 'deploy.prototxt'];\nnet_weights = [model_dir 'bvlc_reference_caffenet.caffemodel'];\nphase = 'test'; % run with phase test (so that dropout isn't applied)\nif ~exist(net_weights, 'file')\n error('Please download CaffeNet from Model Zoo before you run this demo');\nend\n\n% Initialize a network\nnet = caffe.Net(net_model, net_weights, phase);\n\nif nargin < 1\n % For demo purposes we will use the cat image\n fprintf('using caffe/examples/images/cat.jpg as input image\\n');\n im = imread('../../examples/images/cat.jpg');\nend\n\n% prepare oversampled input\n% input_data is Height x Width x Channel x Num\ntic;\ninput_data = {prepare_image(im)};\ntoc;\n\n% do forward pass to get scores\n% scores are now Channels x Num, where Channels == 1000\ntic;\n% The net forward function. It takes in a cell array of N-D arrays\n% (where N == 4 here) containing data of input blob(s) and outputs a cell\n% array containing data from output blob(s)\nscores = net.forward(input_data);\ntoc;\n\nscores = scores{1};\nscores = mean(scores, 2); % take average scores over 10 crops\n\n[~, maxlabel] = max(scores);\n\n% call caffe.reset_all() to reset caffe\ncaffe.reset_all();\n\n% ------------------------------------------------------------------------\nfunction crops_data = prepare_image(im)\n% ------------------------------------------------------------------------\n% caffe/matlab/+caffe/imagenet/ilsvrc_2012_mean.mat contains mean_data that\n% is already in W x H x C with BGR channels\nd = load('../+caffe/imagenet/ilsvrc_2012_mean.mat');\nmean_data = d.mean_data;\nIMAGE_DIM = 256;\nCROPPED_DIM = 227;\n\n% Convert an image returned by Matlab's imread to im_data in caffe's data\n% format: W x H x C with BGR channels\nim_data = im(:, :, [3, 2, 1]); % permute channels from RGB to BGR\nim_data = permute(im_data, [2, 1, 3]); % flip width and height\nim_data = single(im_data); % convert from uint8 to single\nim_data = imresize(im_data, [IMAGE_DIM IMAGE_DIM], 'bilinear'); % resize im_data\nim_data = im_data - mean_data; % subtract mean_data (already in W x H x C, BGR)\n\n% oversample (4 corners, center, and their x-axis flips)\ncrops_data = zeros(CROPPED_DIM, CROPPED_DIM, 3, 10, 'single');\nindices = [0 IMAGE_DIM-CROPPED_DIM] + 1;\nn = 1;\nfor i = indices\n for j = indices\n crops_data(:, :, :, n) = im_data(i:i+CROPPED_DIM-1, j:j+CROPPED_DIM-1, :);\n crops_data(:, :, :, n+5) = crops_data(end:-1:1, :, :, n);\n n = n + 1;\n end\nend\ncenter = floor(indices(2) / 2) + 1;\ncrops_data(:,:,:,5) = ...\n im_data(center:center+CROPPED_DIM-1,center:center+CROPPED_DIM-1,:);\ncrops_data(:,:,:,10) = crops_data(end:-1:1, :, :, 5);\n"} +{"plateform": "github", "repo_name": "yuqingtong1990/webrtc_vs2015-master", "name": "apmtest.m", "ext": ".m", "path": "webrtc_vs2015-master/webrtc/modules/audio_processing/test/apmtest.m", "size": 9470, "source_encoding": "utf_8", "md5": "ad72111888b4bb4b7c4605d0bf79d572", "text": "function apmtest(task, testname, filepath, casenumber, legacy)\n%APMTEST is a tool to process APM file sets and easily display the output.\n% APMTEST(TASK, TESTNAME, CASENUMBER) performs one of several TASKs:\n% 'test' Processes the files to produce test output.\n% 'list' Prints a list of cases in the test set, preceded by their\n% CASENUMBERs.\n% 'show' Uses spclab to show the test case specified by the\n% CASENUMBER parameter.\n%\n% using a set of test files determined by TESTNAME:\n% 'all' All tests.\n% 'apm' The standard APM test set (default).\n% 'apmm' The mobile APM test set.\n% 'aec' The AEC test set.\n% 'aecm' The AECM test set.\n% 'agc' The AGC test set.\n% 'ns' The NS test set.\n% 'vad' The VAD test set.\n%\n% FILEPATH specifies the path to the test data files.\n%\n% CASENUMBER can be used to select a single test case. Omit CASENUMBER,\n% or set to zero, to use all test cases.\n%\n\nif nargin < 5 || isempty(legacy)\n % Set to true to run old VQE recordings.\n legacy = false;\nend\n\nif nargin < 4 || isempty(casenumber)\n casenumber = 0;\nend\n\nif nargin < 3 || isempty(filepath)\n filepath = 'data/';\nend\n\nif nargin < 2 || isempty(testname)\n testname = 'all';\nend\n\nif nargin < 1 || isempty(task)\n task = 'test';\nend\n\nif ~strcmp(task, 'test') && ~strcmp(task, 'list') && ~strcmp(task, 'show')\n error(['TASK ' task ' is not recognized']);\nend\n\nif casenumber == 0 && strcmp(task, 'show')\n error(['CASENUMBER must be specified for TASK ' task]);\nend\n\ninpath = [filepath 'input/'];\noutpath = [filepath 'output/'];\nrefpath = [filepath 'reference/'];\n\nif strcmp(testname, 'all')\n tests = {'apm','apmm','aec','aecm','agc','ns','vad'};\nelse\n tests = {testname};\nend\n\nif legacy\n progname = './test';\nelse\n progname = './process_test';\nend\n\nglobal farFile;\nglobal nearFile;\nglobal eventFile;\nglobal delayFile;\nglobal driftFile;\n\nif legacy\n farFile = 'vqeFar.pcm';\n nearFile = 'vqeNear.pcm';\n eventFile = 'vqeEvent.dat';\n delayFile = 'vqeBuf.dat';\n driftFile = 'vqeDrift.dat';\nelse\n farFile = 'apm_far.pcm';\n nearFile = 'apm_near.pcm';\n eventFile = 'apm_event.dat';\n delayFile = 'apm_delay.dat';\n driftFile = 'apm_drift.dat';\nend\n\nsimulateMode = false;\nnErr = 0;\nnCases = 0;\nfor i=1:length(tests)\n simulateMode = false;\n\n if strcmp(tests{i}, 'apm')\n testdir = ['apm/'];\n outfile = ['out'];\n if legacy\n opt = ['-ec 1 -agc 2 -nc 2 -vad 3'];\n else\n opt = ['--no_progress -hpf' ...\n ' -aec --drift_compensation -agc --fixed_digital' ...\n ' -ns --ns_moderate -vad'];\n end\n\n elseif strcmp(tests{i}, 'apm-swb')\n simulateMode = true;\n testdir = ['apm-swb/'];\n outfile = ['out'];\n if legacy\n opt = ['-fs 32000 -ec 1 -agc 2 -nc 2'];\n else\n opt = ['--no_progress -fs 32000 -hpf' ...\n ' -aec --drift_compensation -agc --adaptive_digital' ...\n ' -ns --ns_moderate -vad'];\n end\n elseif strcmp(tests{i}, 'apmm')\n testdir = ['apmm/'];\n outfile = ['out'];\n opt = ['-aec --drift_compensation -agc --fixed_digital -hpf -ns ' ...\n '--ns_moderate'];\n\n else\n error(['TESTNAME ' tests{i} ' is not recognized']);\n end\n\n inpathtest = [inpath testdir];\n outpathtest = [outpath testdir];\n refpathtest = [refpath testdir];\n\n if ~exist(inpathtest,'dir')\n error(['Input directory ' inpathtest ' does not exist']);\n end\n\n if ~exist(refpathtest,'dir')\n warning(['Reference directory ' refpathtest ' does not exist']);\n end\n\n [status, errMsg] = mkdir(outpathtest);\n if (status == 0)\n error(errMsg);\n end\n\n [nErr, nCases] = recurseDir(inpathtest, outpathtest, refpathtest, outfile, ...\n progname, opt, simulateMode, nErr, nCases, task, casenumber, legacy);\n\n if strcmp(task, 'test') || strcmp(task, 'show')\n system(['rm ' farFile]);\n system(['rm ' nearFile]);\n if simulateMode == false\n system(['rm ' eventFile]);\n system(['rm ' delayFile]);\n system(['rm ' driftFile]);\n end\n end\nend\n\nif ~strcmp(task, 'list')\n if nErr == 0\n fprintf(1, '\\nAll files are bit-exact to reference\\n', nErr);\n else\n fprintf(1, '\\n%d files are NOT bit-exact to reference\\n', nErr);\n end\nend\n\n\nfunction [nErrOut, nCases] = recurseDir(inpath, outpath, refpath, ...\n outfile, progname, opt, simulateMode, nErr, nCases, task, casenumber, ...\n legacy)\n\nglobal farFile;\nglobal nearFile;\nglobal eventFile;\nglobal delayFile;\nglobal driftFile;\n\ndirs = dir(inpath);\nnDirs = 0;\nnErrOut = nErr;\nfor i=3:length(dirs) % skip . and ..\n nDirs = nDirs + dirs(i).isdir;\nend\n\n\nif nDirs == 0\n nCases = nCases + 1;\n\n if casenumber == nCases || casenumber == 0\n\n if strcmp(task, 'list')\n fprintf([num2str(nCases) '. ' outfile '\\n'])\n else\n vadoutfile = ['vad_' outfile '.dat'];\n outfile = [outfile '.pcm'];\n\n % Check for VAD test\n vadTest = 0;\n if ~isempty(findstr(opt, '-vad'))\n vadTest = 1;\n if legacy\n opt = [opt ' ' outpath vadoutfile];\n else\n opt = [opt ' --vad_out_file ' outpath vadoutfile];\n end\n end\n\n if exist([inpath 'vqeFar.pcm'])\n system(['ln -s -f ' inpath 'vqeFar.pcm ' farFile]);\n elseif exist([inpath 'apm_far.pcm'])\n system(['ln -s -f ' inpath 'apm_far.pcm ' farFile]);\n end\n\n if exist([inpath 'vqeNear.pcm'])\n system(['ln -s -f ' inpath 'vqeNear.pcm ' nearFile]);\n elseif exist([inpath 'apm_near.pcm'])\n system(['ln -s -f ' inpath 'apm_near.pcm ' nearFile]);\n end\n\n if exist([inpath 'vqeEvent.dat'])\n system(['ln -s -f ' inpath 'vqeEvent.dat ' eventFile]);\n elseif exist([inpath 'apm_event.dat'])\n system(['ln -s -f ' inpath 'apm_event.dat ' eventFile]);\n end\n\n if exist([inpath 'vqeBuf.dat'])\n system(['ln -s -f ' inpath 'vqeBuf.dat ' delayFile]);\n elseif exist([inpath 'apm_delay.dat'])\n system(['ln -s -f ' inpath 'apm_delay.dat ' delayFile]);\n end\n\n if exist([inpath 'vqeSkew.dat'])\n system(['ln -s -f ' inpath 'vqeSkew.dat ' driftFile]);\n elseif exist([inpath 'vqeDrift.dat'])\n system(['ln -s -f ' inpath 'vqeDrift.dat ' driftFile]);\n elseif exist([inpath 'apm_drift.dat'])\n system(['ln -s -f ' inpath 'apm_drift.dat ' driftFile]);\n end\n\n if simulateMode == false\n command = [progname ' -o ' outpath outfile ' ' opt];\n else\n if legacy\n inputCmd = [' -in ' nearFile];\n else\n inputCmd = [' -i ' nearFile];\n end\n\n if exist([farFile])\n if legacy\n inputCmd = [' -if ' farFile inputCmd];\n else\n inputCmd = [' -ir ' farFile inputCmd];\n end\n end\n command = [progname inputCmd ' -o ' outpath outfile ' ' opt];\n end\n % This prevents MATLAB from using its own C libraries.\n shellcmd = ['bash -c \"unset LD_LIBRARY_PATH;'];\n fprintf([command '\\n']);\n [status, result] = system([shellcmd command '\"']);\n fprintf(result);\n\n fprintf(['Reference file: ' refpath outfile '\\n']);\n\n if vadTest == 1\n equal_to_ref = are_files_equal([outpath vadoutfile], ...\n [refpath vadoutfile], ...\n 'int8');\n if ~equal_to_ref\n nErr = nErr + 1;\n end\n end\n\n [equal_to_ref, diffvector] = are_files_equal([outpath outfile], ...\n [refpath outfile], ...\n 'int16');\n if ~equal_to_ref\n nErr = nErr + 1;\n end\n\n if strcmp(task, 'show')\n % Assume the last init gives the sample rate of interest.\n str_idx = strfind(result, 'Sample rate:');\n fs = str2num(result(str_idx(end) + 13:str_idx(end) + 17));\n fprintf('Using %d Hz\\n', fs);\n\n if exist([farFile])\n spclab(fs, farFile, nearFile, [refpath outfile], ...\n [outpath outfile], diffvector);\n %spclab(fs, diffvector);\n else\n spclab(fs, nearFile, [refpath outfile], [outpath outfile], ...\n diffvector);\n %spclab(fs, diffvector);\n end\n end\n end\n end\nelse\n\n for i=3:length(dirs)\n if dirs(i).isdir\n [nErr, nCases] = recurseDir([inpath dirs(i).name '/'], outpath, ...\n refpath,[outfile '_' dirs(i).name], progname, opt, ...\n simulateMode, nErr, nCases, task, casenumber, legacy);\n end\n end\nend\nnErrOut = nErr;\n\nfunction [are_equal, diffvector] = ...\n are_files_equal(newfile, reffile, precision, diffvector)\n\nare_equal = false;\ndiffvector = 0;\nif ~exist(newfile,'file')\n warning(['Output file ' newfile ' does not exist']); \n return\nend\n\nif ~exist(reffile,'file')\n warning(['Reference file ' reffile ' does not exist']); \n return\nend\n\nfid = fopen(newfile,'rb');\nnew = fread(fid,inf,precision);\nfclose(fid);\n\nfid = fopen(reffile,'rb');\nref = fread(fid,inf,precision);\nfclose(fid);\n\nif length(new) ~= length(ref)\n warning('Reference is not the same length as output');\n minlength = min(length(new), length(ref));\n new = new(1:minlength);\n ref = ref(1:minlength);\nend\ndiffvector = new - ref;\n\nif isequal(new, ref)\n fprintf([newfile ' is bit-exact to reference\\n']);\n are_equal = true;\nelse\n if isempty(new)\n warning([newfile ' is empty']);\n return\n end\n snr = snrseg(new,ref,80);\n fprintf('\\n');\n are_equal = false;\nend\n"} +{"plateform": "github", "repo_name": "yuqingtong1990/webrtc_vs2015-master", "name": "plot_neteq_delay.m", "ext": ".m", "path": "webrtc_vs2015-master/webrtc/modules/audio_coding/neteq/test/delay_tool/plot_neteq_delay.m", "size": 5563, "source_encoding": "utf_8", "md5": "8b6a66813477863da513b1e6971dbc97", "text": "function [delay_struct, delayvalues] = plot_neteq_delay(delayfile, varargin)\n\n% InfoStruct = plot_neteq_delay(delayfile)\n% InfoStruct = plot_neteq_delay(delayfile, 'skipdelay', skip_seconds)\n%\n% Henrik Lundin, 2006-11-17\n% Henrik Lundin, 2011-05-17\n%\n\ntry\n s = parse_delay_file(delayfile);\ncatch\n error(lasterr);\nend\n\ndelayskip=0;\nnoplot=0;\narg_ptr=1;\ndelaypoints=[];\n\ns.sn=unwrap_seqno(s.sn);\n\nwhile arg_ptr+1 <= nargin\n switch lower(varargin{arg_ptr})\n case {'skipdelay', 'delayskip'}\n % skip a number of seconds in the beginning when calculating delays\n delayskip = varargin{arg_ptr+1};\n arg_ptr = arg_ptr + 2;\n case 'noplot'\n noplot=1;\n arg_ptr = arg_ptr + 1;\n case {'get_delay', 'getdelay'}\n % return a vector of delay values for the points in the given vector\n delaypoints = varargin{arg_ptr+1};\n arg_ptr = arg_ptr + 2;\n otherwise\n warning('Unknown switch %s\\n', varargin{arg_ptr});\n arg_ptr = arg_ptr + 1;\n end\nend\n\n% find lost frames that were covered by one-descriptor decoding\none_desc_ix=find(isnan(s.arrival));\nfor k=1:length(one_desc_ix)\n ix=find(s.ts==max(s.ts(s.ts(one_desc_ix(k))>s.ts)));\n s.sn(one_desc_ix(k))=s.sn(ix)+1;\n s.pt(one_desc_ix(k))=s.pt(ix);\n s.arrival(one_desc_ix(k))=s.arrival(ix)+s.decode(one_desc_ix(k))-s.decode(ix);\nend\n\n% remove duplicate received frames that were never decoded (RED codec)\nif length(unique(s.ts(isfinite(s.ts)))) < length(s.ts(isfinite(s.ts)))\n ix=find(isfinite(s.decode));\n s.sn=s.sn(ix);\n s.ts=s.ts(ix);\n s.arrival=s.arrival(ix);\n s.playout_delay=s.playout_delay(ix);\n s.pt=s.pt(ix);\n s.optbuf=s.optbuf(ix);\n plen=plen(ix);\n s.decode=s.decode(ix);\nend\n\n% find non-unique sequence numbers\n[~,un_ix]=unique(s.sn);\nnonun_ix=setdiff(1:length(s.sn),un_ix);\nif ~isempty(nonun_ix)\n warning('RTP sequence numbers are in error');\nend\n \n% sort vectors\n[s.sn,sort_ix]=sort(s.sn);\ns.ts=s.ts(sort_ix);\ns.arrival=s.arrival(sort_ix);\ns.decode=s.decode(sort_ix);\ns.playout_delay=s.playout_delay(sort_ix);\ns.pt=s.pt(sort_ix);\n\nsend_t=s.ts-s.ts(1);\nif length(s.fs)<1\n warning('No info about sample rate found in file. Using default 8000.');\n s.fs(1)=8000;\n s.fschange_ts(1)=min(s.ts);\nelseif s.fschange_ts(1)>min(s.ts)\n s.fschange_ts(1)=min(s.ts);\nend\n\nend_ix=length(send_t);\nfor k=length(s.fs):-1:1\n start_ix=find(s.ts==s.fschange_ts(k));\n send_t(start_ix:end_ix)=send_t(start_ix:end_ix)/s.fs(k)*1000;\n s.playout_delay(start_ix:end_ix)=s.playout_delay(start_ix:end_ix)/s.fs(k)*1000;\n s.optbuf(start_ix:end_ix)=s.optbuf(start_ix:end_ix)/s.fs(k)*1000;\n end_ix=start_ix-1;\nend\n\ntot_time=max(send_t)-min(send_t);\n\nseq_ix=s.sn-min(s.sn)+1;\nsend_t=send_t+max(min(s.arrival-send_t),0);\n\nplot_send_t=nan*ones(max(seq_ix),1);\nplot_send_t(seq_ix)=send_t;\nplot_nw_delay=nan*ones(max(seq_ix),1);\nplot_nw_delay(seq_ix)=s.arrival-send_t;\n\ncng_ix=find(s.pt~=13); % find those packets that are not CNG/SID\n \nif noplot==0\n h=plot(plot_send_t/1000,plot_nw_delay);\n set(h,'color',0.75*[1 1 1]);\n hold on\n if any(s.optbuf~=0)\n peak_ix=find(s.optbuf(cng_ix)<0); % peak mode is labeled with negative values\n no_peak_ix=find(s.optbuf(cng_ix)>0); %setdiff(1:length(cng_ix),peak_ix);\n h1=plot(send_t(cng_ix(peak_ix))/1000,...\n s.arrival(cng_ix(peak_ix))+abs(s.optbuf(cng_ix(peak_ix)))-send_t(cng_ix(peak_ix)),...\n 'r.');\n h2=plot(send_t(cng_ix(no_peak_ix))/1000,...\n s.arrival(cng_ix(no_peak_ix))+abs(s.optbuf(cng_ix(no_peak_ix)))-send_t(cng_ix(no_peak_ix)),...\n 'g.');\n set([h1, h2],'markersize',1)\n end\n %h=plot(send_t(seq_ix)/1000,s.decode+s.playout_delay-send_t(seq_ix));\n h=plot(send_t(cng_ix)/1000,s.decode(cng_ix)+s.playout_delay(cng_ix)-send_t(cng_ix));\n set(h,'linew',1.5);\n hold off\n ax1=axis;\n axis tight\n ax2=axis;\n axis([ax2(1:3) ax1(4)])\nend\n\n\n% calculate delays and other parameters\n\ndelayskip_ix = find(send_t-send_t(1)>=delayskip*1000, 1 );\n\nuse_ix = intersect(cng_ix,... % use those that are not CNG/SID frames...\n intersect(find(isfinite(s.decode)),... % ... that did arrive ...\n (delayskip_ix:length(s.decode))')); % ... and are sent after delayskip seconds\n\nmean_delay = mean(s.decode(use_ix)+s.playout_delay(use_ix)-send_t(use_ix));\nneteq_delay = mean(s.decode(use_ix)+s.playout_delay(use_ix)-s.arrival(use_ix));\n\nNpack=max(s.sn(delayskip_ix:end))-min(s.sn(delayskip_ix:end))+1;\nnw_lossrate=(Npack-length(s.sn(delayskip_ix:end)))/Npack;\nneteq_lossrate=(length(s.sn(delayskip_ix:end))-length(use_ix))/Npack;\n\ndelay_struct=struct('mean_delay',mean_delay,'neteq_delay',neteq_delay,...\n 'nw_lossrate',nw_lossrate,'neteq_lossrate',neteq_lossrate,...\n 'tot_expand',round(s.tot_expand),'tot_accelerate',round(s.tot_accelerate),...\n 'tot_preemptive',round(s.tot_preemptive),'tot_time',tot_time,...\n 'filename',delayfile,'units','ms','fs',unique(s.fs));\n \nif not(isempty(delaypoints))\n delayvalues=interp1(send_t(cng_ix),...\n s.decode(cng_ix)+s.playout_delay(cng_ix)-send_t(cng_ix),...\n delaypoints,'nearest',NaN);\nelse\n delayvalues=[];\nend\n\n\n\n% SUBFUNCTIONS %\n\nfunction y=unwrap_seqno(x)\n\njumps=find(abs((diff(x)-1))>65000);\n\nwhile ~isempty(jumps)\n n=jumps(1);\n if x(n+1)-x(n) < 0\n % negative jump\n x(n+1:end)=x(n+1:end)+65536;\n else\n % positive jump\n x(n+1:end)=x(n+1:end)-65536;\n end\n \n jumps=find(abs((diff(x(n+1:end))-1))>65000);\nend\n\ny=x;\n\nreturn;\n"} +{"plateform": "github", "repo_name": "kalov/ShapePFCN-master", "name": "classification_demo.m", "ext": ".m", "path": "ShapePFCN-master/caffe-ours/matlab/demo/classification_demo.m", "size": 5412, "source_encoding": "utf_8", "md5": "8f46deabe6cde287c4759f3bc8b7f819", "text": "function [scores, maxlabel] = classification_demo(im, use_gpu)\n% [scores, maxlabel] = classification_demo(im, use_gpu)\n%\n% Image classification demo using BVLC CaffeNet.\n%\n% IMPORTANT: before you run this demo, you should download BVLC CaffeNet\n% from Model Zoo (http://caffe.berkeleyvision.org/model_zoo.html)\n%\n% ****************************************************************************\n% For detailed documentation and usage on Caffe's Matlab interface, please\n% refer to Caffe Interface Tutorial at\n% http://caffe.berkeleyvision.org/tutorial/interfaces.html#matlab\n% ****************************************************************************\n%\n% input\n% im color image as uint8 HxWx3\n% use_gpu 1 to use the GPU, 0 to use the CPU\n%\n% output\n% scores 1000-dimensional ILSVRC score vector\n% maxlabel the label of the highest score\n%\n% You may need to do the following before you start matlab:\n% $ export LD_LIBRARY_PATH=/opt/intel/mkl/lib/intel64:/usr/local/cuda-5.5/lib64\n% $ export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libstdc++.so.6\n% Or the equivalent based on where things are installed on your system\n%\n% Usage:\n% im = imread('../../examples/images/cat.jpg');\n% scores = classification_demo(im, 1);\n% [score, class] = max(scores);\n% Five things to be aware of:\n% caffe uses row-major order\n% matlab uses column-major order\n% caffe uses BGR color channel order\n% matlab uses RGB color channel order\n% images need to have the data mean subtracted\n\n% Data coming in from matlab needs to be in the order\n% [width, height, channels, images]\n% where width is the fastest dimension.\n% Here is the rough matlab for putting image data into the correct\n% format in W x H x C with BGR channels:\n% % permute channels from RGB to BGR\n% im_data = im(:, :, [3, 2, 1]);\n% % flip width and height to make width the fastest dimension\n% im_data = permute(im_data, [2, 1, 3]);\n% % convert from uint8 to single\n% im_data = single(im_data);\n% % reshape to a fixed size (e.g., 227x227).\n% im_data = imresize(im_data, [IMAGE_DIM IMAGE_DIM], 'bilinear');\n% % subtract mean_data (already in W x H x C with BGR channels)\n% im_data = im_data - mean_data;\n\n% If you have multiple images, cat them with cat(4, ...)\n\n% Add caffe/matlab to you Matlab search PATH to use matcaffe\nif exist('../+caffe', 'dir')\n addpath('..');\nelse\n error('Please run this demo from caffe/matlab/demo');\nend\n\n% Set caffe mode\nif exist('use_gpu', 'var') && use_gpu\n caffe.set_mode_gpu();\n gpu_id = 0; % we will use the first gpu in this demo\n caffe.set_device(gpu_id);\nelse\n caffe.set_mode_cpu();\nend\n\n% Initialize the network using BVLC CaffeNet for image classification\n% Weights (parameter) file needs to be downloaded from Model Zoo.\nmodel_dir = '../../models/bvlc_reference_caffenet/';\nnet_model = [model_dir 'deploy.prototxt'];\nnet_weights = [model_dir 'bvlc_reference_caffenet.caffemodel'];\nphase = 'test'; % run with phase test (so that dropout isn't applied)\nif ~exist(net_weights, 'file')\n error('Please download CaffeNet from Model Zoo before you run this demo');\nend\n\n% Initialize a network\nnet = caffe.Net(net_model, net_weights, phase);\n\nif nargin < 1\n % For demo purposes we will use the cat image\n fprintf('using caffe/examples/images/cat.jpg as input image\\n');\n im = imread('../../examples/images/cat.jpg');\nend\n\n% prepare oversampled input\n% input_data is Height x Width x Channel x Num\ntic;\ninput_data = {prepare_image(im)};\ntoc;\n\n% do forward pass to get scores\n% scores are now Channels x Num, where Channels == 1000\ntic;\n% The net forward function. It takes in a cell array of N-D arrays\n% (where N == 4 here) containing data of input blob(s) and outputs a cell\n% array containing data from output blob(s)\nscores = net.forward(input_data);\ntoc;\n\nscores = scores{1};\nscores = mean(scores, 2); % take average scores over 10 crops\n\n[~, maxlabel] = max(scores);\n\n% call caffe.reset_all() to reset caffe\ncaffe.reset_all();\n\n% ------------------------------------------------------------------------\nfunction crops_data = prepare_image(im)\n% ------------------------------------------------------------------------\n% caffe/matlab/+caffe/imagenet/ilsvrc_2012_mean.mat contains mean_data that\n% is already in W x H x C with BGR channels\nd = load('../+caffe/imagenet/ilsvrc_2012_mean.mat');\nmean_data = d.mean_data;\nIMAGE_DIM = 256;\nCROPPED_DIM = 227;\n\n% Convert an image returned by Matlab's imread to im_data in caffe's data\n% format: W x H x C with BGR channels\nim_data = im(:, :, [3, 2, 1]); % permute channels from RGB to BGR\nim_data = permute(im_data, [2, 1, 3]); % flip width and height\nim_data = single(im_data); % convert from uint8 to single\nim_data = imresize(im_data, [IMAGE_DIM IMAGE_DIM], 'bilinear'); % resize im_data\nim_data = im_data - mean_data; % subtract mean_data (already in W x H x C, BGR)\n\n% oversample (4 corners, center, and their x-axis flips)\ncrops_data = zeros(CROPPED_DIM, CROPPED_DIM, 3, 10, 'single');\nindices = [0 IMAGE_DIM-CROPPED_DIM] + 1;\nn = 1;\nfor i = indices\n for j = indices\n crops_data(:, :, :, n) = im_data(i:i+CROPPED_DIM-1, j:j+CROPPED_DIM-1, :);\n crops_data(:, :, :, n+5) = crops_data(end:-1:1, :, :, n);\n n = n + 1;\n end\nend\ncenter = floor(indices(2) / 2) + 1;\ncrops_data(:,:,:,5) = ...\n im_data(center:center+CROPPED_DIM-1,center:center+CROPPED_DIM-1,:);\ncrops_data(:,:,:,10) = crops_data(end:-1:1, :, :, 5);\n"} +{"plateform": "github", "repo_name": "usgs/landslides-mLS-master", "name": "mLS.m", "ext": ".m", "path": "landslides-mLS-master/mLS.m", "size": 7878, "source_encoding": "utf_8", "md5": "5fc99d5ed047ae1cd3d70caf7f13cc6c", "text": "% This script is provided as a supplementary material of a paper\r\n% published in Earth Surface Processes and Landforms. The details of the \r\n% method followed in the given script is described in the corresponding \r\n% paper. If you publish use this script or a its modified version please \r\n% cite the following paper:\r\n% Tanyas, H., K.E. Allstadt, and C.J. van Westen, 2018, \r\n% An updated method for estimating landslide-event magnitude, Earth Surface \r\n% Processes and Landforms. DOI: 10.1002/esp.4359\r\n\r\n% The Pupose Of The Script And The Input Parametes \r\n% This script is provided for the accurate estimation of landslide-event\r\n% magnitude. We used Matlab R2015b to test the given script. It basically\r\n% requires three input parameters to estimate landslide-event magnitude: \r\n% cutoff smallest area that follows power law)and beta (power-law exponent)\r\n% values of frequency-size distribution (FAD) of landslides, and a horizontal\r\n% array (Area) with landslide sizes. This script can also calculate the\r\n% uncertainty in landslide-event magnitude if uncertainties in cutoff and\r\n% beta values are given as input parameters.\r\n\r\n% The power-law distribution can be captured in both cumulative and \r\n% non-cumulative FADs, and the power-law exponent (beta) for a non-cumulative \r\n% FAD can be transferred to its cumulative equivalent, alpha, using the relation \r\n% alpha=beta-1 (Guzzetti et al., 2002). In this code, the calculations are carried \r\n% out on the non-cumulative FAD.\r\n\r\n% The cutoff is the landslide size where frequency-size distribution curve\r\n% diverges form the power-law. In this code its unit is meter square.\r\n\r\n% Beta value resfers to the slope of the frequency-size distribution. It also\r\n% called as power-law exponent (scaling parameter, beta). For most landslide\r\n% inventories, non-cumulative power-law exponents occur in the range of \r\n% 1.4–3.4, with a central tendency of 2.3–2.5 (Stark and Guzzetti, 2009;\r\n% Van Den Eeckhaut et al., 2007).\r\n\r\n% The unit of landslide sizes are in meter square.\r\n\r\n% To obtain the cutoff & beta values and thier uncertainties the method\r\n% suggested by Clauset et al.(2009) can be used. The original scripts \r\n% (plfit.m and plvar.m) of Clauset et al. (2009) can be downloaded from the\r\n% following link to calculate these parameters: \r\n% http://www.santafe.edu/~aaronc/powerlaws/ \r\n\r\n% The Output Of The Script\r\n% When you run the mLS function, for the given sample data, the corresponding \r\n% mLS (landslide-event magnitude) will be obtained. If the uncertainties in \r\n% cutoff (cutoff_error) and beta (beta_error) values are provided, the\r\n% uncertainty in mLS (error) is also estimated by the script. As an output\r\n% of this code, a plot showing the frequency-area distribution of the given \r\n% landslides and the corresponding power-law fit are also obtained.\r\n% mLS does not have any unit.\r\n\r\nfunction [mLS,error]=mLS(Area,cutoff,beta,beta_error,cutoff_error)\r\n\r\n% In the following lines, the bins are defined with an array. We took 2 as \r\n% the minimum bin size and we used increasing bin sizes. We increase the \r\n% bin widths while the landslide size increases, so that bin widths become\r\n% approximately equal in logarithmic coordinates. To create a long array for \r\n% the bins we tentatively pick 120 for the size of x1 vector defined below. \r\n\r\nx1(1,1)=2;\r\nfor i=2:120\r\n x1(1,i)=x1(1,i-1)*1.2;\r\nend\r\nx2=log10(x1);\r\n\r\nFreq=histc(Area,x1); %Frequency values are calculated for each bin \r\ns=size(x1);\r\ns=s(1,2);\r\ninternal=zeros(1,s);\r\n\r\nfor i=2:s\r\n internal(1,i)=x1(1,i)-x1(1,i-1);\r\nend\r\ninternal(1,1)=min(x1);\r\nFD=Freq./internal;\r\n\r\nx1_rev = abs(x1-cutoff); % the index of value that is closest to cutoff value is identified along the x1 array \r\n[indexMidpoint indexMidpoint] = min(x1_rev);\r\n\r\nx=x1(indexMidpoint:end); % the x (size bines) array for the frequeny-size distribution is defined \r\ny=FD(indexMidpoint:end); % the y (frequency densities) array for the frequeny-size distribution is defined \r\n\r\nif beta>0 % beta value have to be negative\r\n beta=-1*beta;\r\nend\r\nbeta_stored=beta;\r\n\r\nconstant=y(1,1)/cutoff^beta; % The c constant is calculated along the power-low where x=cutoff\r\nfit_y=constant*x1.^beta; % Frequency-density values calculated for the defined power-law fit\r\nfit_y_stored=fit_y; \r\n\r\nmidx=10^((log10(max(Area))+(log10(cutoff)))/2); % The x and y values at mid-point location is read along the power-law fit\r\nmidy=constant*midx^beta;\r\n\r\nRefmidx=4.876599623713225e+04; % X value for the mid point of the Northridge (reference point) inventory\r\nRefmidy=8.364725347860417e-04; % Y value for the mid point of the Northridge (reference point) inventory\r\nac=Refmidy/(11111*Refmidx^beta); % the c' constant (as) is calculated here for the mid-point of \r\n % the Northridge inventory as a reference point where mLS=log(11111) \r\n \r\nmLS=log10((midy/(ac*midx^(beta)))); % mLS is calculated in this line\r\nmLS_stored=mLS;\r\n\r\n% Uncertainty in mLS will be calculated if the required inputs are given\r\n% To do that Monte-Carlo simulation is run 10,000 times \r\n\r\nif exist('beta_error')==1 && exist('cutoff_error')==1 \r\n beta_interval_N=((beta+beta_error)-(beta-beta_error))/(499); %Number of elements to create an array including 500 uniformly distributed beta values is defined \r\n beta_interval=(beta-beta_error):beta_interval_N:(beta+beta_error); %An array including 500 uniformly distributed beta values is defined \r\n\r\n cutoff_min=(cutoff-cutoff_error); \r\n if cutoff_min<=0\r\n cutoff_min=2;\r\n else\r\n cutoff_min=cutoff_min;\r\n end\r\n cutoff_max=(cutoff+cutoff_error);\r\n cutoff_interval_N=((cutoff_max)-(cutoff_min))/(499); %Number of elements to create an array including 500 uniformly distributed cutoff values is defined \r\n cutoff_interval=(cutoff_min):cutoff_interval_N:(cutoff_max); %An array including 500 uniformly distributed cutoff values is defined \r\n\r\n beta_mean=mean(beta_interval); %Mean of beta values is identified\r\n beta_std=std(beta_interval); %Standard deviation of beta vaues is identified\r\n cutoff_mean=mean(cutoff_interval); %Mean of cutoff values is identified\r\n cutoff_std=std(cutoff_interval); %Standard deviation of cutoff values is identified\r\n \r\n for i=1:10000 %mLS values are calculated for randomly sampled beta and cutoff values below\r\n cutoff=normrnd(cutoff_mean,cutoff_std);\r\n beta=normrnd(beta_mean,beta_std);\r\n constant=y(1,1)/cutoff^beta;\r\n fit_y=constant*x1.^beta; \r\n midx=10^((log10(max(Area))+(log10(cutoff)))/2);\r\n ac=Refmidy/(11111*Refmidx^beta);\r\n mLS_array(i,1)=log10((midy/(ac*midx^(beta))));\r\n end\r\n\r\n mLS_array=mLS_array(all(~isinf(mLS_array),2),:); % \"Inf\" cells are removed from the array \r\n error=std(mLS_array(:)); %Uncertainty of mLS calcultated as a first standard deviation of mLS values\r\nelse\r\n disp('Uncertainty in mLS will not be calculated because the variable \"cutoff_error\" and \"beta_error\" is missing')\r\n error='?'\r\nend\r\n\r\n% A graph showing the frequency-area distribution of the given landslides \r\n% and the corresponding power-law fit are plotted.\r\nloglog(x1,fit_y_stored,'-','LineWidth',2,'Color','r');hold on\r\nloglog(x1,FD,'ok','MarkerSize',8,'MarkerFaceColor','b','MarkerEdgeColor','k')\r\naxis([1 1.E+7 1.E-6 1000])\r\nset(get(gca,'Xlabel'),'string','Landslide Area (m^2)','FontSize',12, 'FontUnits','points','FontWeight','normal')\r\nset(get(gca,'Ylabel'),'string','Frequency Density (m^-^2)','FontSize',12, 'FontUnits','points','FontWeight','normal')\r\nstr={['\\beta = ',num2str(beta_stored)];['mLS = ',num2str(mLS_stored),(char(177)),num2str(error)]};\r\ntext(x1(1,1),(min(FD(FD>0)*10)),str,'FontSize',12) \r\nend\r\n"} +{"plateform": "github", "repo_name": "WenbingLv/NPC-radiomics-master", "name": "getGLCM_Symmetric.m", "ext": ".m", "path": "NPC-radiomics-master/getGLCM_Symmetric.m", "size": 5813, "source_encoding": "utf_8", "md5": "5c7c1e015f2cfab250ac285142fb04c5", "text": "function coocMat = getGLCM_Symmetric(varargin)\r\n%inputStr = {TumorVolume,'Distance',[],'Direction',[],'numgray',levelsM+1};\r\n%\r\n%ljlubme@gmail.com\r\n%Southern Medical University\r\n%\r\n%Default settings\r\ncoocMat= NaN;\r\ndistance = [1;2;4;8];\r\nnumLevels = 16;\r\n\r\noffSet = [1 0 0; 1 1 0; 0 1 0; -1 1 0]; %2D Co-Occurrence directions 0,45,90,135 degrees\r\n%the additional 9 directions of 3D volume\r\ndimension3 = [0 0 1; 1 0 1; -1 0 1; 0,1,1; 0 -1 1; 1 1 1; -1 1 1; 1 1 -1; 1 1 -1]; \r\noffSet = cat(1,offSet,dimension3);%13 directions\r\n\r\n%checking inputs\r\ndata = varargin{1};\r\ntemp = size(data);\r\nif size(temp)<3\r\n disp('Error: This program is designed for 3 dimensional data')\r\n return;\r\nend\r\nnumInput = size(varargin,2);\r\nfor inputs =2:numInput\r\n temp = varargin{1,inputs};\r\n if ~ischar(temp)\r\n continue;\r\n end\r\n temp = upper(temp);\r\n switch (temp)\r\n \r\n case 'DIRECTION'\r\n temp2 = int8(varargin{1,inputs+1});\r\n if size(size(temp2),2) ~=2\r\n disp('Error: Direction input is formatted poorly')\r\n return;\r\n end\r\n if size(temp2,2) ~=3\r\n disp(['Error: Incorrect number of columns in ' ... \r\n 'direction variable'])\r\n return;\r\n end\r\n if max(max(temp2))>1 | min(min(temp2))<-1\r\n disp('Error: Direction values can only be {-1,0,1}')\r\n return;\r\n end\r\n offSet = temp2;\r\n \r\n case 'DISTANCE'\r\n temp2 = int8(varargin{1,inputs+1});\r\n if size(size(temp2)) ~= 2\r\n disp('Error: Incorrect formatting of distance variable')\r\n return;\r\n end\r\n \r\n if sum(sum(size(temp2))) ~= max(size(temp2)+1)\r\n disp(['Error: Distance variable is to be a one ' ...\r\n 'dimensional array'])\r\n return;\r\n end\r\n \r\n distance = temp2;\r\n \r\n case 'NUMGRAY'\r\n temp2 = varargin{1,inputs+1};\r\n if temp2<1\r\n disp('The number of graylevels must be positive')\r\n return;\r\n end\r\n numLevels = uint16(temp2);\r\n end\r\nend\r\n\r\nnoDirections = size(offSet,1); %number of directions, currently 13\r\ncoocMat = zeros(numLevels, numLevels, noDirections, size(distance,2));\r\n\r\nfor dist =1:size(distance,2) %distance\r\n [coocMat(:,:,:,dist)] = graycooc3d(data(:,:,:),distance(dist),numLevels,offSet); \r\nend\r\nreturn\r\n\r\nfunction [new_coMat]= graycooc3d(I,distance,numLevels,offSet)\r\n%I = the 3D image matrix\r\n%distance = a vector of the distances to analyze in\r\n%numLevels = the number of graylevels to be used\r\n%offSet = a matrix of the directions to analyze in\r\n%coMat the Co-Occurrence matrices produced%%\r\n%**************Variable initialization/Declaration**********************\r\n%harMat =0;\r\nnoDirections = size(offSet,1); %number of directions, currently 13\r\ncoMat = zeros(numLevels,numLevels,noDirections);\r\n%**************************Beginning analysis*************************\r\n%Order of loops: Direction, slice, graylevel, graylevel locations\r\nfor direction =1:noDirections %currently 13 (for the 3d image)\r\n\r\n tempMat = zeros(numLevels,numLevels,size(I,3));\r\n for slicej =1:size(I,3)\r\n for j=1:numLevels %graylevel\r\n \r\n %find all the instances of that graylevel\r\n [rowj,colj] = find(I(:,:,slicej)==j); \r\n\r\n %populating the Cooc matrix.\r\n for tempCount = 1:size(rowj,1) \r\n rowT = rowj(tempCount) + distance*offSet(direction,1);\r\n colT = colj(tempCount) + distance*offSet(direction,2);\r\n sliceT = slicej + distance*offSet(direction,3);\r\n rowTnegative = rowj(tempCount) - distance*offSet(direction,1);%the symmetry of GLCM. \r\n colTnegative = colj(tempCount) - distance*offSet(direction,2);%the symmetry of GLCM.\r\n sliceTnegative = slicej - distance*offSet(direction,3);%the symmetry of GLCM. \r\n [I1, I2, I3] = size(I); \r\n if rowT <= I1 && colT <= I2 && sliceT <= I3\r\n if rowT > 0 && colT > 0 && sliceT > 0\r\n %Error checking for NANs and Infinite numbers\r\n IIntensity = I(rowT,colT,sliceT);\r\n \r\n if ~isnan(IIntensity)\r\n if ~isinf(IIntensity)\r\n tempMat(j,IIntensity,slicej)= tempMat...\r\n (j,IIntensity,slicej)+1;\r\n end\r\n end\r\n end\r\n end\r\n if rowTnegative <= I1 && colTnegative <= I2 && sliceTnegative <= I3\r\n if rowTnegative > 0 && colTnegative > 0 && sliceTnegative > 0\r\n \r\n %Error checking for NANs and Infinite numbers\r\n \r\n IIntensitynegative = I(rowTnegative,colTnegative,sliceTnegative);% added by\r\n if ~isnan(IIntensitynegative)\r\n if ~isinf(IIntensitynegative)\r\n tempMat(j,IIntensitynegative,slicej)= tempMat...\r\n (j,IIntensitynegative,slicej)+1;\r\n \r\n end\r\n end\r\n end\r\n end\r\n end\r\n \r\n end\r\n\r\n end\r\n for slicej =1:size(I,3)\r\n coMat(:,:,direction)= coMat(:,:,direction)+tempMat(:,:,slicej);\r\n end \r\nend\r\nnew_coMat=coMat(:,:,:);\r\nreturn\r\n\r\n\r\n\r\n"} +{"plateform": "github", "repo_name": "WenbingLv/NPC-radiomics-master", "name": "getGLCM_Asymmetric.m", "ext": ".m", "path": "NPC-radiomics-master/getGLCM_Asymmetric.m", "size": 5891, "source_encoding": "utf_8", "md5": "d600aef8916ea9dd698c1241d56050d9", "text": "function coocMat = getGLCM_Asymmetric(varargin)\r\n%inputStr = {TumorVolume,'Distance',[],'Direction',[],'numgray',levelsM+1};\r\n%\r\n%ljlubme@gmail.com\r\n%Southern Medical University\r\n%\r\n\r\n%Default settings\r\ncoocMat= NaN;\r\ndistance = [1;2;4;8]; \r\nnumLevels = 16;\r\noffSet = [1 0 0; 1 1 0; 0 1 0; -1 1 0]; %2D Co-Occurrence directions 0,45,90,135 degrees\r\n%the additional 9 directions of 3D volume\r\ndimension3 = [0 0 1; 1 0 1; -1 0 1; 0,1,1; 0 -1 1; 1 1 1; -1 1 1; 1 1 -1; 1 1 -1];\r\noffSet = cat(1,offSet,dimension3);%13 directions\r\n\r\n%checking inputs\r\ndata = varargin{1};\r\ntemp = size(data);\r\nif size(temp)<3\r\n disp('Error: This program is designed for 3 dimensional data')\r\n return;\r\nend\r\nnumInput = size(varargin,2);\r\nfor inputs =2:numInput\r\n temp = varargin{1,inputs};\r\n if ~ischar(temp)\r\n continue;\r\n end\r\n temp = upper(temp);\r\n switch (temp)\r\n \r\n case 'DIRECTION'\r\n temp2 = int8(varargin{1,inputs+1});\r\n if size(size(temp2),2) ~=2\r\n disp('Error: Direction input is formatted poorly')\r\n return;\r\n end\r\n if size(temp2,2) ~=3\r\n disp(['Error: Incorrect number of columns in ' ... \r\n 'direction variable'])\r\n return;\r\n end\r\n if max(max(temp2))>1 | min(min(temp2))<-1\r\n disp('Error: Direction values can only be {-1,0,1}')\r\n return;\r\n end\r\n offSet = temp2;\r\n \r\n case 'DISTANCE'\r\n temp2 = int8(varargin{1,inputs+1});\r\n if size(size(temp2)) ~= 2\r\n disp('Error: Incorrect formatting of distance variable')\r\n return;\r\n end\r\n \r\n if sum(sum(size(temp2))) ~= max(size(temp2)+1)\r\n disp(['Error: Distance variable is to be a one ' ...\r\n 'dimensional array'])\r\n return;\r\n end\r\n \r\n distance = temp2;\r\n \r\n case 'NUMGRAY'\r\n temp2 = varargin{1,inputs+1};\r\n if temp2<1\r\n disp('The number of graylevels must be positive')\r\n return;\r\n end\r\n numLevels = uint16(temp2);\r\n end\r\nend\r\n\r\nnoDirections = size(offSet,1); %number of directions, currently 13\r\ncoocMat = zeros(numLevels, numLevels, noDirections, size(distance,2));\r\n\r\nfor dist =1:size(distance,2) %distance\r\n [coocMat(:,:,:,dist)] = graycooc3d(data(:,:,:),distance(dist),numLevels,offSet); \r\nend\r\nreturn\r\n\r\nfunction [new_coMat]= graycooc3d(I,distance,numLevels,offSet)\r\n%I = the 3D image matrix\r\n%distance = a vector of the distances to analyze in\r\n%numLevels = the number of graylevels to be used\r\n%offSet = a matrix of the directions to analyze in\r\n%coMat the Co-Occurrence matrices produced\r\n\r\n%**************Variable initialization/Declaration**********************\r\nnoDirections = size(offSet,1); %number of directions, currently 13\r\ncoMat = zeros(numLevels,numLevels,noDirections);\r\n\r\n%**************************Beginning analysis*************************\r\n%Order of loops: Direction, slice, graylevel, graylevel locations\r\nfor direction =1:noDirections %currently 13 (for the 3d image)\r\n\r\n tempMat = zeros(numLevels,numLevels,size(I,3));\r\n for slicej =1:size(I,3)\r\n for j=1:numLevels %graylevel\r\n %find all the instances of that graylevel\r\n [rowj,colj] = find(I(:,:,slicej)==j); \r\n %populating the Cooc matrix.\r\n for tempCount = 1:size(rowj,1) \r\n rowT = rowj(tempCount) + distance*offSet(direction,1);\r\n colT = colj(tempCount) + distance*offSet(direction,2);\r\n sliceT = slicej + distance*offSet(direction,3);\r\n rowTnegative = rowj(tempCount) - distance*offSet(direction,1);%the symmetry of GLCM. \r\n colTnegative = colj(tempCount) - distance*offSet(direction,2);%the symmetry of GLCM.\r\n sliceTnegative = slicej - distance*offSet(direction,3);%the symmetry of GLCM. \r\n [I1, I2, I3] = size(I); \r\n if rowT <= I1 && colT <= I2 && sliceT <= I3\r\n if rowT > 0 && colT > 0 && sliceT > 0\r\n %Error checking for NANs and Infinite numbers\r\n IIntensity = I(rowT,colT,sliceT); \r\n if ~isnan(IIntensity)\r\n if ~isinf(IIntensity)\r\n tempMat(j,IIntensity,slicej)= tempMat...\r\n (j,IIntensity,slicej)+1;\r\n end\r\n end\r\n end\r\n end\r\n% if rowTnegative <= I1 && colTnegative <= I2 && sliceTnegative <= I3\r\n% if rowTnegative > 0 && colTnegative > 0 && sliceTnegative > 0\r\n% \r\n% %Error checking for NANs and Infinite numbers\r\n% \r\n% IIntensitynegative = I(rowTnegative,colTnegative,sliceTnegative);% added by \r\n% if ~isnan(IIntensitynegative)\r\n% if ~isinf(IIntensitynegative)\r\n% %Matlab doesn't have a ++ operator.\r\n% tempMat(j,IIntensitynegative,slicej)= tempMat...\r\n% (j,IIntensitynegative,slicej)+1;\r\n% \r\n% end\r\n% end\r\n% end\r\n% end\r\n end\r\n \r\n end\r\n\r\n end\r\n for slicej =1:size(I,3)\r\n coMat(:,:,direction)= coMat(:,:,direction)+tempMat(:,:,slicej);\r\n end \r\nend\r\nnew_coMat=coMat(:,:,:);\r\nreturn\r\n\r\n"} +{"plateform": "github", "repo_name": "WenbingLv/NPC-radiomics-master", "name": "computeBoundingBox.m", "ext": ".m", "path": "NPC-radiomics-master/computeBoundingBox.m", "size": 3017, "source_encoding": "utf_8", "md5": "71c3aad5fb0ffd5ca96a185b0ee529e2", "text": "function [boxBound] = computeBoundingBox(mask)\n% -------------------------------------------------------------------------\n% function [boxBound] = computeBoundingBox(mask)\n% -------------------------------------------------------------------------\n% DESCRIPTION: \n% This function computes the smallest box containing the whole region of \n% interest (ROI). It is adapted from the function compute_boundingbox.m\n% of CERR .\n% -------------------------------------------------------------------------\n% INPUTS:\n% - mask: 3D array, with 1's inside the ROI, and 0's outside the ROI.\n% -------------------------------------------------------------------------\n% OUTPUTS:\n% - boxBound: Bounds of the smallest box containing the ROI. \n% Format: [minRow, maxRow;\n% minColumn, maxColumns;\n% minSlice, maxSlice]\n% -------------------------------------------------------------------------\n% AUTHOR(S): \n% - Martin Vallieres \n% - CERR development team \n% -------------------------------------------------------------------------\n% HISTORY:\n% - Creation: May 2015\n%--------------------------------------------------------------------------\n% STATEMENT:\n% This file is part of , \n% a package providing MATLAB programming tools for radiomics analysis.\n% --> Copyright (C) 2015 Martin Vallieres\n% --> Copyright 2010, Joseph O. Deasy, on behalf of the CERR development team\n% \n% This package is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n%\n% This package is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n%\n% You should have received a copy of the GNU General Public License\n% along with this package. If not, see .\n% -------------------------------------------------------------------------\n\n[iV,jV,kV] = find3d(mask);\nboxBound(1,1) = min(iV);\nboxBound(1,2) = max(iV);\nboxBound(2,1) = min(jV);\nboxBound(2,2) = max(jV);\nboxBound(3,1) = min(kV);\nboxBound(3,2) = max(kV);\n\nend\n\n\n% CERR UTILITY FUNCTIONS (can be found at: https://github.com/adityaapte/CERR)\nfunction [iV,jV,kV] = find3d(mask3M)\nindV = find(mask3M(:));\n[iV,jV,kV] = fastind2sub(size(mask3M),indV);\niV = iV';\njV = jV';\nkV = kV';\nend\n\nfunction varargout = fastind2sub(siz,ndx)\nnout = max(nargout,1);\nif length(siz)<=nout,\n siz = [siz ones(1,nout-length(siz))];\nelse\n siz = [siz(1:nout-1) prod(siz(nout:end))];\nend\nn = length(siz);\nk = [1 cumprod(siz(1:end-1))];\nndx = ndx - 1;\nfor i = n:-1:1,\n varargout{i} = floor(ndx/k(i)) + 1;\n ndx = ndx - (varargout{i}-1) * k(i);\nend\nend"} +{"plateform": "github", "repo_name": "PerfXLab/caffe_perfdnn-master", "name": "classification_demo.m", "ext": ".m", "path": "caffe_perfdnn-master/matlab/demo/classification_demo.m", "size": 5466, "source_encoding": "utf_8", "md5": "45745fb7cfe37ef723c307dfa06f1b97", "text": "function [scores, maxlabel] = classification_demo(im, use_gpu)\n% [scores, maxlabel] = classification_demo(im, use_gpu)\n%\n% Image classification demo using BVLC CaffeNet.\n%\n% IMPORTANT: before you run this demo, you should download BVLC CaffeNet\n% from Model Zoo (http://caffe.berkeleyvision.org/model_zoo.html)\n%\n% ****************************************************************************\n% For detailed documentation and usage on Caffe's Matlab interface, please\n% refer to the Caffe Interface Tutorial at\n% http://caffe.berkeleyvision.org/tutorial/interfaces.html#matlab\n% ****************************************************************************\n%\n% input\n% im color image as uint8 HxWx3\n% use_gpu 1 to use the GPU, 0 to use the CPU\n%\n% output\n% scores 1000-dimensional ILSVRC score vector\n% maxlabel the label of the highest score\n%\n% You may need to do the following before you start matlab:\n% $ export LD_LIBRARY_PATH=/opt/intel/mkl/lib/intel64:/usr/local/cuda-5.5/lib64\n% $ export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libstdc++.so.6\n% Or the equivalent based on where things are installed on your system\n% and what versions are installed.\n%\n% Usage:\n% im = imread('../../examples/images/cat.jpg');\n% scores = classification_demo(im, 1);\n% [score, class] = max(scores);\n% Five things to be aware of:\n% caffe uses row-major order\n% matlab uses column-major order\n% caffe uses BGR color channel order\n% matlab uses RGB color channel order\n% images need to have the data mean subtracted\n\n% Data coming in from matlab needs to be in the order\n% [width, height, channels, images]\n% where width is the fastest dimension.\n% Here is the rough matlab code for putting image data into the correct\n% format in W x H x C with BGR channels:\n% % permute channels from RGB to BGR\n% im_data = im(:, :, [3, 2, 1]);\n% % flip width and height to make width the fastest dimension\n% im_data = permute(im_data, [2, 1, 3]);\n% % convert from uint8 to single\n% im_data = single(im_data);\n% % reshape to a fixed size (e.g., 227x227).\n% im_data = imresize(im_data, [IMAGE_DIM IMAGE_DIM], 'bilinear');\n% % subtract mean_data (already in W x H x C with BGR channels)\n% im_data = im_data - mean_data;\n\n% If you have multiple images, cat them with cat(4, ...)\n\n% Add caffe/matlab to your Matlab search PATH in order to use matcaffe\nif exist('../+caffe', 'dir')\n addpath('..');\nelse\n error('Please run this demo from caffe/matlab/demo');\nend\n\n% Set caffe mode\nif exist('use_gpu', 'var') && use_gpu\n caffe.set_mode_gpu();\n gpu_id = 0; % we will use the first gpu in this demo\n caffe.set_device(gpu_id);\nelse\n caffe.set_mode_cpu();\nend\n\n% Initialize the network using BVLC CaffeNet for image classification\n% Weights (parameter) file needs to be downloaded from Model Zoo.\nmodel_dir = '../../models/bvlc_reference_caffenet/';\nnet_model = [model_dir 'deploy.prototxt'];\nnet_weights = [model_dir 'bvlc_reference_caffenet.caffemodel'];\nphase = 'test'; % run with phase test (so that dropout isn't applied)\nif ~exist(net_weights, 'file')\n error('Please download CaffeNet from Model Zoo before you run this demo');\nend\n\n% Initialize a network\nnet = caffe.Net(net_model, net_weights, phase);\n\nif nargin < 1\n % For demo purposes we will use the cat image\n fprintf('using caffe/examples/images/cat.jpg as input image\\n');\n im = imread('../../examples/images/cat.jpg');\nend\n\n% prepare oversampled input\n% input_data is Height x Width x Channel x Num\ntic;\ninput_data = {prepare_image(im)};\ntoc;\n\n% do forward pass to get scores\n% scores are now Channels x Num, where Channels == 1000\ntic;\n% The net forward function. It takes in a cell array of N-D arrays\n% (where N == 4 here) containing data of input blob(s) and outputs a cell\n% array containing data from output blob(s)\nscores = net.forward(input_data);\ntoc;\n\nscores = scores{1};\nscores = mean(scores, 2); % take average scores over 10 crops\n\n[~, maxlabel] = max(scores);\n\n% call caffe.reset_all() to reset caffe\ncaffe.reset_all();\n\n% ------------------------------------------------------------------------\nfunction crops_data = prepare_image(im)\n% ------------------------------------------------------------------------\n% caffe/matlab/+caffe/imagenet/ilsvrc_2012_mean.mat contains mean_data that\n% is already in W x H x C with BGR channels\nd = load('../+caffe/imagenet/ilsvrc_2012_mean.mat');\nmean_data = d.mean_data;\nIMAGE_DIM = 256;\nCROPPED_DIM = 227;\n\n% Convert an image returned by Matlab's imread to im_data in caffe's data\n% format: W x H x C with BGR channels\nim_data = im(:, :, [3, 2, 1]); % permute channels from RGB to BGR\nim_data = permute(im_data, [2, 1, 3]); % flip width and height\nim_data = single(im_data); % convert from uint8 to single\nim_data = imresize(im_data, [IMAGE_DIM IMAGE_DIM], 'bilinear'); % resize im_data\nim_data = im_data - mean_data; % subtract mean_data (already in W x H x C, BGR)\n\n% oversample (4 corners, center, and their x-axis flips)\ncrops_data = zeros(CROPPED_DIM, CROPPED_DIM, 3, 10, 'single');\nindices = [0 IMAGE_DIM-CROPPED_DIM] + 1;\nn = 1;\nfor i = indices\n for j = indices\n crops_data(:, :, :, n) = im_data(i:i+CROPPED_DIM-1, j:j+CROPPED_DIM-1, :);\n crops_data(:, :, :, n+5) = crops_data(end:-1:1, :, :, n);\n n = n + 1;\n end\nend\ncenter = floor(indices(2) / 2) + 1;\ncrops_data(:,:,:,5) = ...\n im_data(center:center+CROPPED_DIM-1,center:center+CROPPED_DIM-1,:);\ncrops_data(:,:,:,10) = crops_data(end:-1:1, :, :, 5);\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "initialiseProcedure.m", "ext": ".m", "path": "TeachingCode-master/ArduinoAnomaloscope/xxxContributed/arduinoHFP/initialiseProcedure.m", "size": 1144, "source_encoding": "utf_8", "md5": "86882fe4db83c69d7a985d7d13165b79", "text": "\r\nfunction [increaseKey, decreaseKey, deltaKey, finishKey, ...\r\n increaseInputs, decreaseInputs, deltaIndex, rDeltas]=initialiseProcedure\r\nincreaseKey=KbName('up'); % key code for increasing red intensity\r\ndecreaseKey=KbName('down'); % key code for decreaseing red intensity\r\ndeltaKey=KbName('space'); % key code for changing red delta\r\nfinishKey=KbName('q'); % key code for finishing procedure and recording data\r\n\r\n% THIS IS IMPORTANT: increase/decrease inputs are the messages telling Arduino\r\n% how to change red intensity. increaseInput{i} and decreaseInput{i} have\r\n% the same absolute value, but opposite sign (e.g., increaseInput{1}=20,\r\n% decreaseInput{1}=-20. so that a single index can change the delta of\r\n% both. in the current arduino code, 'q'/'r' change the intensity by 20\r\n% bytes (0-255), 'w'/'t' by 5, 'e', 'y' by 1. To change how much each\r\n% affects the code, you need to change the arduino FlickeringLight code.\r\n% (by changing the if statements for each input signal)\r\nincreaseInputs={'q', 'w', 'e'}; \r\ndecreaseInputs={'r', 't', 'y'};\r\ndeltaIndex=1;\r\nrDeltas=[20, 5, 1];\r\n\r\nend"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "ArduinoMethodOfAdjustmentHFP.m", "ext": ".m", "path": "TeachingCode-master/ArduinoAnomaloscope/xxxContributed/arduinoHFP/ArduinoMethodOfAdjustmentHFP.m", "size": 3327, "source_encoding": "utf_8", "md5": "2afbe378afee5e39e4b9675765af3179", "text": "\r\n% if the code doesn't work, check that the arduino port (written in\r\n% ConstantsHFP) is the right one (for windows, check Device Manager->ports)\r\n\r\nfunction ArduinoImplementedHFP\r\n\r\n% clear everything before starting program\r\ndelete(instrfindall)\r\nclear\r\naddpath('C:\\Users\\mediaworld\\Documents\\MATLAB\\internship\\HFP_Code\\HFP_Code');\r\n\r\n% call arduino object\r\nserialObj=serialport(ConstantsHFP.serialPort, 9600);\r\n\r\n% create variables\r\n[increaseKey, decreaseKey, deltaKey,... \r\n finishKey, ...\r\n increaseInputs, decreaseInputs, ...\r\n deltaIndex, rDeltas]=initialiseProcedure;\r\n\r\n\r\n% adjust flicker settings\r\nwhile true\r\n fopen(serialObj);\r\n \r\n ListenChar(0)\r\n DoYouWantToStart=input('would you like to run a new trial? (yes or no) ', 's');\r\n if strcmp(DoYouWantToStart, 'yes')==0\r\n disp('aborting mission')\r\n break;\r\n end\r\n ListenChar(2)\r\n\r\n\r\n % setup random initial flicker setting ('s' is the message, in the \r\n % FlickeringLight.ino code that stands for randomised start)\r\n fprintf(serialObj, 's');\r\n \r\n\r\n % Record red and green initial values for final save\r\n rInit=read(serialObj, 6, \"char\");\r\n gInit=read(serialObj, 6, \"char\");\r\n\r\n pause(2)\r\n disp('starting new trial');\r\n \r\n % run trial\r\n while true\r\n % get keyboard input\r\n [secs, keyCode, deltaSecs]=KbPressWait();\r\n\r\n \r\n if keyCode(increaseKey)\r\n\r\n % if user asks to increase light, select increase amount\r\n % corresponding to current delta. See initialiseProcedure.m for more info\r\n arduinoInput=increaseInputs{deltaIndex};\r\n\r\n fprintf(serialObj, arduinoInput);\r\n\r\n elseif keyCode(decreaseKey)\r\n \r\n % decrease intensity. for more info look at\r\n % initialiseProcedure.m\r\n arduinoInput=decreaseInputs{deltaIndex};\r\n\r\n fprintf(serialObj, arduinoInput);\r\n \r\n elseif keyCode(deltaKey)\r\n deltaIndex=deltaIndex+1;\r\n if deltaIndex>length(rDeltas)\r\n deltaIndex=1;\r\n end\r\n \r\n elseif keyCode(finishKey)\r\n\r\n disp('Printing final results... please wait')\r\n % print initial and final red value\r\n \r\n fprintf(serialObj, 'f');\r\n \r\n\r\n %in case you want to eliminate one of these \"read\" commands,\r\n %remember to cancel the correspondent part in the \"f\" if\r\n %statement in the FlickeringLight arduino code\r\n initialRed=read(serialObj, 6, \"char\");\r\n finalRed=read(serialObj, 6, \"char\");\r\n fprintf(\"Initial Red Value = %d,\\n\", str2num(initialRed));\r\n fprintf(\"Final Red Value = %d \\n\", str2num(finalRed));\r\n\r\n % save everything\r\n ListenChar(0)\r\n WantToSave=input(\"Would you like to save these data? (yes or no) \", 's');\r\n if strcmp(WantToSave, 'yes')\r\n disp(\"Saving results...\");\r\n SaveHFPResultsTable(finalRed, rInit, gInit);\r\n \r\n else\r\n disp(\"Results not saved\");\r\n end\r\n ListenChar(2)\r\n break;\r\n else\r\n continue;\r\n end\r\n end\r\ndelete(instrfindall);\r\n\r\n%save data\r\n\r\nend\r\nListenChar(0)"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "RenderSpectrumOnMonitorTutorial.m", "ext": ".m", "path": "TeachingCode-master/ICVS2020Tutorials/RenderSpectrumOnMonitorTutorial.m", "size": 11826, "source_encoding": "utf_8", "md5": "749776d43b6f5da2a72aa5cc8f8786df", "text": "% RenderSpectrumOnMonitorTutorial\n%\n% Exercise to learn about rendering metamers on a monitor.\n%\n% This tutorial is available in the github repository\n% https://github.com/BrainardLab/TeachingCode\n% You can either clone the respository or just download a copy from\n% that page (see green \"Code\" button).\n%\n% To run this, you will need both the Psychophysics Toolbox (PsychToolbox)\n% and the BrainardLabToolbox on your path. You can get the PsychToolbox\n% from\n% psychtoolbox.org\n% You can get the BrainardLabToolbox from\n% https://github.com/BrainardLab/BrainardLabToolbox\n%\n% If you use the ToolboxToolbox (https://github.com/toolboxhub/toolboxtoolbox)\n% and install the TeachingCode repository in your projects folder, you can\n% install the dependencies by using\n% tbUseProject('TeachingCode')\n% at the Matlab prompt.\n%\n% You also need the calibration file NEC_MultisyncPA241W.mat, which is in\n% the same directory as this tutorial in the github respository.\n%\n% There is a video that goes through this script and unpacks the\n% calculations. It may be streamed from this link\n% https://www.dropbox.com/s/v0ylynxteh7jc2j/RenderASpectrum.Orig.mp4?dl=0\n% and downloaded from this link\n% https://www.dropbox.com/s/v0ylynxteh7jc2j/RenderASpectrum.Orig.mp4?dl=1\n% The downloaded version will play at higher resolution.\n%\n% A video lecture on using matrix-vector representations in colorimetric\n% calculations is available here\n% Stream - https://www.dropbox.com/s/lvtr3r60olmho3d/ConeFundamentalsLinearTransform.Orig.mp4?dl=0\n% Download - https://www.dropbox.com/s/lvtr3r60olmho3d/ConeFundamentalsLinearTransform.Orig.mp4?dl=1\n% As with the video above, the downloaded version will play at higher resolution.\n%\n% See also: RenderSpectrumOnMonitorForDogTutorial, RenderImageOnMonitorForDogTutorial\n\n% History:\n% 08/01/2020 dhb Wrote for ICVS from other tutorials that weren't quite\n% what we wanted.\n\n%% Clear\nclear; close all;\n\n%% Load and examine a test calibration file\n%\n% These are measurements from an LCD monitor, with data stored in a\n% structure that describes key monitor properties.\ncalData = load('NEC_MultisyncPA241W');\ncal = calData.cals{end};\n\n% Get wavelength sampling of functions in cal file.\nS = cal.rawData.S;\nwls = SToWls(S);\n\n% For simplicity, let's assume that no light comes off the monitor when the\n% input is set to zero. This isn't true for real monitors, but we don't\n% need to fuss with that aspect at the start. \ncal.processedData.P_ambient = zeros(size(cal.processedData.P_ambient));\n\n%% Plot the spectra of the three monitor primaries.\n%\n% For this monitor each primary is determined by the emission spectra of\n% one of the phosphors on its faceplate, but that's a detail. Whwat we care\n% about are the spectra, not how they were instrumented physically.\n%\n% Each primary spectrum is in a separate column of the matrix cal.processedData.P_device.\n% In MATLAB,can use the : operator to help extract various pieces of a\n% matrix. So:\nredPhosphor = cal.processedData.P_device(:,1);\ngreenPhosphor = cal.processedData.P_device(:,2);\nbluePhosphor = cal.processedData.P_device(:,3);\nfigure(1);clf; hold on\nset(gca,'FontName','Helvetica','FontSize',18);\nplot(wls,redPhosphor,'r','LineWidth',3);\nplot(wls,greenPhosphor,'g','LineWidth',3);\nplot(wls,bluePhosphor,'b','LineWidth',3);\ntitle( 'Monitor channel spectra','FontSize',24);\nxlabel( 'Wavelength [ nm ]','FontSize',24); ylabel( 'Radiance [ W / m^2 / sr / wlbin ]','FontSize',24);\nhold off\n\n%% Get human cone spectral sensitivities\n%\n% Here we use the Stockman-Sharpe 2-degree fundamentals, which are also the\n% CIE fundamentals. They are stored as a .mat file in the Psychophysics\n% Toolbox. See \"help PsychColorimetricMatFiles'.\n%\n% By convention in the Psychtoolbox, we store sensitivities as the rows of\n% a matrix. Spline the wavelength sampling to match that in the calibration\n% file.\nload T_cones_ss2\nT_cones = SplineCmf(S_cones_ss2,T_cones_ss2,S);\n\n% Make a plot\nfigure(2); clf; hold on\nset(gca,'FontName','Helvetica','FontSize',18);\nplot(wls,T_cones(1,:),'r','LineWidth',3);\nplot(wls,T_cones(2,:),'g','LineWidth',3);;\nplot(wls,T_cones(3,:),'b','LineWidth',3);\ntitle( 'LMS Cone Fundamentals','FontSize',24);\nxlabel( 'Wavelength','FontSize',24); ylabel( 'Sensitivity','FontSize',24);\nhold off\n\n%% Get a spectrum to render\n%\n% We want to render a spectrum on the monitor so that the light coming off the monitor has\n% the same effect on the human cones as the spectrum would have. So we\n% need a spectrum. We'll CIE daylight D65, since we have it available.\n%\n% The spectrum we read in is too bright to render on our monitor, so we\n% also scale it down into a more reasonable range so we don't have to worry\n% about that below.\nload spd_D65\nspectrumToRender = SplineSpd(S_D65,spd_D65,S)/0.75e4;\n\n% If you want a different spectrum, this is operation on the D65\n% produces a spectrum with more long wavelenght power and that renders\n% pinkish.\n% spectrumToRender = 1.5*max(spectrumToRender(:))*ones(size(spectrumToRender))-spectrumToRender;\n\n% Make a plot of the spectrum to render\nfigure(3); clf; hold on\nplot(wls,spectrumToRender,'k','LineWidth',3);\ntitle('Metamers','FontSize',24);\nxlabel('Wavelength','FontSize',24); ylabel( 'Power','FontSize',24);\n\n%% Compute the cone excitations from the spectrum we want to render\n%\n% This turns out to be a simple matrix multiply in Matlab. The\n% sensitivities are in the rows of T_cones and the spectral radiance is in\n% the column vector spectrumToRender. For each row of T_cones, the matrix\n% multiple consists of weighting the spectrum by the sensitivity at each\n% wavelength and adding them up.\n%\n% Implicit here is that the units of spectrum give power per wavelength\n% sampling bin, another important detail you'd want to think about to get\n% units right for a real application and that we won't worry about here.\nLMSToRender = T_cones*spectrumToRender;\n\n%% We want to find a mixture of the monitor primaries that produces the same excitations\n%\n% Let's use the column vector [r g b]' to denote the amount of each primary\n% we'll ultimately want in the mixture. By convention we'll think of r, g,\n% and b as proportions relative to the maximum amount of each phosphor\n% available on the monitor.\n%\n% It might be useful to make a plot of the example spectrum and see that it\n% indeed looks like a mixture of the primary spectra.\nrgbExample = [0.2 0.5 0.9]';\nmonitorSpectrumExample = rgbExample(1)*redPhosphor + rgbExample(2)*greenPhosphor + rgbExample(3)*bluePhosphor;\n\n% We can also compute the spectrum coming off the monitor for any choice of r,\n% g, and b using a matrix multiply. In this case, think of the\n% multiplication as weighting each of the columns of cal.processedData.P_device (which\n% are the primary spectra) and then summing them. You can verify that this\n% gives the same answer as the expanded form just above.\nmonitorSpectrumExampleCheck = cal.processedData.P_device*rgbExample;\n\n% We can also compute the LMS cone excitations for this example. This is\n% just a column vector of three numbers.\nmonitorLMSExample = T_cones*monitorSpectrumExample;\n\n% Now note that we can combine the two steps above, precomputing the matrix\n% that maps between the rgb vector and the LMS excitations that result.\n%\n% You can verify that monitorLMSExample and monitorLMSExampleCheck are the\n% same as each other.\nrgbToLMSMatrix = T_cones*cal.processedData.P_device;\nmonitorLMSExampleCheck = rgbToLMSMatrix*rgbExample;\n\n% We want to go the other way, starting with LMSToRender and obtaining an\n% rgb vector that produces it. This is basically inverting the relation\n% above, which is easy in Matlab.\nLMSTorgbMatrix = inv(rgbToLMSMatrix);\nrgbThatRender = LMSTorgbMatrix*LMSToRender;\n\n% Let's check that it worked. The check values here should be the same as\n% LMSToRender.\nrenderedSpectrum = cal.processedData.P_device*rgbThatRender;\nLMSToRenderCheck = T_cones*renderedSpectrum;\n\n% Add rendered spectrum to plot of target spectrum. You can see that they\n% are of the same overall scale but differ in relative spectra. These two\n% spectra are metamers - they produce the same excitations in the cones and\n% will look the same to a human observer.\nfigure(3);\nplot(wls,renderedSpectrum,'k:','LineWidth',3);\n\n%% Make an image that shows the color\n%\n% We know the proportions of each of the monitor primaries required to\n% produce a metamer to the spectrum we wanted to render. Now we'd like to\n% look at this rendered spectrum. We have to assume that the properties of\n% the monitor we're using are the same as the one in the calibration file,\n% which isn't exactly true but will be close enough for illustrative\n% purposes.\n\n% What we need to do is find RGB values to put in the image so that we get\n% the desired rgb propotions in the mixture that comes off. This is a\n% little tricky, because the relation between the RGB values we put into an\n% image and the rgb values that come off is non-linear. This non-linearity\n% is called the gamma curve of the monitor, and we have to correct for it,\n% a process known as gamma correction.\n\n% As part of the monitor calibration, we meausured the gamma curves of our\n% monitor, and they are in the calibration structure. Let's have a look\nfigure(4); clf; hold on\nset(gca,'FontName','Helvetica','FontSize',18);\ngammaInput = cal.processedData.gammaInput;\nredGamma = cal.processedData.gammaTable(:,1);\ngreenGamma = cal.processedData.gammaTable(:,2);\nblueGamma = cal.processedData.gammaTable(:,3);\nplot(gammaInput,redGamma,'r','LineWidth',3);\nplot(gammaInput,greenGamma,'g','LineWidth',3);\nplot(gammaInput,blueGamma,'b','LineWidth',3);\ntitle( 'Monitor gamma curves','FontSize',24);\nxlabel( 'Input RGB','FontSize',24); ylabel( 'Mixture rgb','FontSize',24);\n\n% We need to invert this curve - for each of our desired rgb values we need\n% to find the corresponding RGB. That's not too hard, we can just do\n% exhasutive search. This is done here in a little subfunction called\n% SimpleGammaCorrection at the bottom of this file.\nnLevels = length(gammaInput);\nR = SimpleGammaCorrection(gammaInput,redGamma,rgbThatRender(1));\nG = SimpleGammaCorrection(gammaInput,greenGamma,rgbThatRender(2));\nB = SimpleGammaCorrection(gammaInput,blueGamma,rgbThatRender(3)); \nRGBThatRender = [R G B]';\n\n% Make an and show the color image. We get (on my Apple Display) a slightly\n% bluish gray, which is about right for D65 given that we aren't using a\n% calibration of this display.\nnPixels = 256;\ntheImage = zeros(nPixels,nPixels,3);\nfor ii = 1:nPixels\n for jj = 1:nPixels\n theImage(ii,jj,:) = RGBThatRender;\n end\nend\nfigure(5);\nimshow(theImage);\n\n%% Go from RGB back to the spectrum coming off the monitor\n%\n% There will be a very small difference between rgbFromRGB and\n% rgbThatRender because the gamma correction quantizes the RGB\n% values to discrete levels. \nrgbFromRGB(1) = SimpleGammaCorrection(redGamma,gammaInput,RGBThatRender(1));\nrgbFromRGB(2) = SimpleGammaCorrection(greenGamma,gammaInput,RGBThatRender(2));\nrgbFromRGB(3) = SimpleGammaCorrection(blueGamma,gammaInput,RGBThatRender(3));\nrgbFromRGB = rgbFromRGB';\nspectrumFromRGB = cal.processedData.P_device*rgbFromRGB;\nfigure(3);\nplot(wls,spectrumFromRGB,'r:','LineWidth',2);\n\nfunction output = SimpleGammaCorrection(gammaInput,gamma,input)\n% output = SimpleGammaCorrection(gammaInput,gamma,input)\n%\n% Perform gamma correction by exhaustive search. Just to show idea,\n% not worried about efficiency.\n%\n% 9/14/08 ijk Wrote it.\n% 12/2/09 dhb Update for [0,1] input table.\n% 08/01/20 dhb Get rid of extraneous input variable\n\nmin_diff = Inf;\nfor i=1:length(gammaInput)\n currentdiff = abs(gamma(i)-input);\n if(currentdiff < min_diff)\n min_diff = currentdiff;\n output = i;\n end\nend\noutput = gammaInput(output);\nend"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "ColourCamouflageImageTutorial.m", "ext": ".m", "path": "TeachingCode-master/ICVS2020Tutorials/ColourCamouflageImageTutorial.m", "size": 8062, "source_encoding": "utf_8", "md5": "f7e8af66320136d261df52d4cfeecef3", "text": "% ColourCamouflageImageTutorial\n%\n% Example code to colour a 3-colour image with dichromat confusion colours \n% for use in camouflage example.\n%\n% To run this, you will need both the Psychophysics Toolbox (PsychToolbox)\n% and the BrainardLabToolbox on your path. You can get the PsychToolbox\n% from\n% psychtoolbox.org\n% You can get the BrainardLabToolbox from\n% https://github.com/BrainardLab/BrainardLabToolbox\n%\n% You also need the kmeans_fast_Color function from Matlab Central\n% https://uk.mathworks.com/matlabcentral/fileexchange/44598-fast-kmeans-algorithm-code?s_tid=mwa_osa_a\n%\n% If you use the ToolboxToolbox (https://github.com/toolboxhub/toolboxtoolbox)\n% and install the TeachingCode repository in your projects folder, you can\n% install the above dependencies by using\n% tbUseProject('TeachingCode')\n% at the Matlab prompt.\n%\n% You also need the calibration file NEC_MultisyncPA241W.mat and the image\n% animal-silhouette-squirrel.jpg which are in the same directory as this\n% tutorial in the TeachingCode github respository.\n%\n% Before running this, you may want to become familiar with\n% RenderSpectrumOnMonitorTutorial.\n%\n% See also: RenderSpectrumOnMonitorTutorial.\n\n% History:\n% Written 04/08/2020 (that's 08/04/2020 for Americans) by Hannah Smithson\n% using example code from the RenderSpectrumOnMonitorTutorial\n% 08/06/20 dhb Some commenting.\n\n%% Clear old variables, and close figure windows\nclear; close all;\n\n%% Set some key parameters\nrgbBackground = [0.2 0.2 0.0]'; % for a 'natural looking' image, choose a brown background\nconeContrastsForTarget = [1.4 1.4 1.4]; % triplet specifying multiplier on the background LMS (e.g. [1.2, 1.0, 1.0] is 20% L-cone contrast)\nconeContrastsForCamouflage = [1.3 0.7 1.0]; % triplet specifying multiplier on the background LMS (e.g. [1.2, 1.0, 1.0] is 20% L-cone contrast) \n\nnumberOfCamoBlobs = 700; % specify the number of \"camouflage\" blobs\nnumberOfIntensityBlobs = 300; % specify the number of blobs used to add intensity variation\n\nsizeScaleFactorCamo = 0.03; % size of blobs (fraction of image width)\nsizeScaleFactorIntensity = 0.05;\n\n%% Load and a test calibration file\n%\n% These are measurements from an LCD monitor, with data stored in a\n% structure that describes key monitor properties.\ncalData = load('NEC_MultisyncPA241W');\ncal = calData.cals{end};\n\n% Get wavelength sampling of functions in cal file.\nS = cal.rawData.S;\nwls = SToWls(S);\n\n% For simplicity, let's assume that no light comes off the monitor when the\n% input is set to zero. This isn't true for real monitors, but we don't\n% need to fuss with that aspect at the start. \ncal.processedData.P_ambient = zeros(size(cal.processedData.P_ambient));\n\n%% Get human cone spectral sensitivities\n%\n% Here we use the Stockman-Sharpe 2-degree fundamentals, which are also the\n% CIE fundamentals. They are stored as a .mat file in the Psychophysics\n% Toolbox. See \"help PsychColorimetricMatFiles'.\n%\n% By convention in the Psychtoolbox, we store sensitivities as the rows of\n% a matrix. Spline the wavelength sampling to match that in the calibration\n% file.\nload T_cones_ss2\nT_cones = SplineCmf(S_cones_ss2,T_cones_ss2,S);\n\n%% We want to find a mixture of the monitor primaries that produces a given LMS excitation\n%\n% Use the calibration data to generate the rgbToLMSMatrix\n% (more info available in RenderSpectrumOnMonitor tutorial)\n%\n% We use the column vector [r g b]' to denote the amount of each primary\n% we'll ultimately want in the mixture. By convention we'll think of r, g,\n% and b as proportions relative to the maximum amount of each phosphor\n% available on the monitor.\nrgbToLMSMatrix = T_cones*cal.processedData.P_device;\nlmsBackground = rgbToLMSMatrix*rgbBackground;\n\n% Now set an LMS triplet for the \"figure\", which differs from the ground\n% only in L-cone excitation - a 20% increase in L-cone excitation\nlmsTarget = coneContrastsForTarget' .* lmsBackground;\nlmsCamouflage = coneContrastsForCamouflage' .* lmsBackground;\n\n% We want to go the other way, starting with lmsFigure and obtaining an\n% rgb vector that produces it. This is basically inverting the relation\n% above, which is easy in Matlab.\nLMSTorgbMatrix = inv(rgbToLMSMatrix);\nrgbTarget = LMSTorgbMatrix*lmsTarget;\nrgbCamouflage = LMSTorgbMatrix*lmsCamouflage;\n\n%% Make an image that shows the three colors - background, target and camouflage\n%\n% What we need to do is find RGB values to put in the image so that we get\n% the desired rgb propotions in the mixture that comes off. This is a\n% little tricky, because the relation between the RGB values we put into an\n% image and the rgb values that come off is non-linear. This non-linearity\n% is called the gamma curve of the monitor, and we have to correct for it,\n% a process known as gamma correction.\nRGBBackground = GammaCorrectionForTriplet(rgbBackground, cal);\nRGBTarget = GammaCorrectionForTriplet(rgbTarget, cal);\nRGBCamouflage = GammaCorrectionForTriplet(rgbCamouflage, cal);\n\nnPixels = 256;\ntheImageBackground = cat(3, ones(nPixels)*RGBBackground(1), ones(nPixels)*RGBBackground(2), ones(nPixels)*RGBBackground(3));\ntheImageTarget = cat(3, ones(nPixels)*RGBTarget(1), ones(nPixels)*RGBTarget(2), ones(nPixels)*RGBTarget(3));\ntheImageCamouflage = cat(3, ones(nPixels)*RGBCamouflage(1), ones(nPixels)*RGBCamouflage(2), ones(nPixels)*RGBCamouflage(3));\nfigure;\nimshow(cat(1, theImageBackground, theImageTarget, theImageCamouflage));\n\n%% Make a more naturalistic image of camouflaged targets\n%\n% Read in a binary black and white image\nA = imread('animal-silhouette-squirrel.jpg'); % white = background; black = target\nimW = size(A, 1);\nimH = size(A, 2);\n\n% Show the original image\nfigure\nimagesc(A)\n\n%% Convert the white to grey and add camo blobs\nA = 0.5 * A; \nfor i = 1:numberOfCamoBlobs\n A = insertShape(A,'FilledCircle',[imH*rand(1), imW*rand(1), sizeScaleFactorCamo*imW*rand(1)], 'Color', 'white','Opacity',1.0);\nend\n\n% Show the grey scale image with blobs\nfigure\nimshow(A)\n\n% If the original image has smoothing or compression, cluster 3 pixel values\n[threeColImage,vec_mean] = kmeans_fast_Color(A, 3);\n\n% Make a colour map from the colours we defined\nmap = cat(1, RGBTarget, RGBBackground, RGBCamouflage); \nfigure\nimshow(threeColImage, map);\n\n% Add intensity noise to true colour image\nA = ind2rgb(threeColImage, map);\nfor i = 1:numberOfIntensityBlobs\n A = insertShape(A,'FilledCircle',[imH*rand(1), imW*rand(1), sizeScaleFactorIntensity*imW*rand(1)], 'Color', 'black','Opacity',0.1);\nend\nfigure\nimshow(A)\n\n\nfunction useRGB = GammaCorrectionForTriplet(desiredrgb, cal)\n\n% As part of the monitor calibration, we meausured the gamma curves of our\n% monitor, and they are in the calibration structure. Let's have a look\ngammaInput = cal.processedData.gammaInput;\nredGamma = cal.processedData.gammaTable(:,1);\ngreenGamma = cal.processedData.gammaTable(:,2);\nblueGamma = cal.processedData.gammaTable(:,3);\n\n% We need to invert this gamma curve - for each of our desired rgb values we need\n% to find the corresponding RGB. That's not too hard, we can just do\n% exhasutive search. This is done here in a little subfunction called\n% SimpleGammaCorrection at the bottom of this file.\nR = SimpleGammaCorrection(gammaInput,redGamma,desiredrgb(1));\nG = SimpleGammaCorrection(gammaInput,greenGamma,desiredrgb(2));\nB = SimpleGammaCorrection(gammaInput,blueGamma,desiredrgb(3)); \nuseRGB = [R G B];\n\nend\n\nfunction output = SimpleGammaCorrection(gammaInput,gamma,input)\n% output = SimpleGammaCorrection(gammaInput,gamma,input)\n%\n% Perform gamma correction by exhaustive search. Just to show idea,\n% not worried about efficiency.\n%\n% 9/14/08 ijk Wrote it.\n% 12/2/09 dhb Update for [0,1] input table.\n% 08/01/20 dhb Get rid of extraneous input variable\n\nmin_diff = Inf;\nfor i=1:length(gammaInput)\n currentdiff = abs(gamma(i)-input);\n if(currentdiff < min_diff)\n min_diff = currentdiff;\n output = i;\n end\nend\noutput = gammaInput(output);\n\nend\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "RenderSpectrumOnMonitorForDogTutorial.m", "ext": ".m", "path": "TeachingCode-master/ICVS2020Tutorials/RenderSpectrumOnMonitorForDogTutorial.m", "size": 11021, "source_encoding": "utf_8", "md5": "0e4da36aa7791abdabd1128cfd429255", "text": "% RenderSpectrumOnMonitorForDogTutorial\n%\n% Exercise to learn about rendering metamers on a monitor. This version is\n% for a dichromat. As an example, we'll use the cone spectral\n% sensitivities of the dog.\n%\n% Before working through this tutorial, you should work through the\n% tutorial RenderSpectrumOnMonitorTutorial. After you understand this one,\n% you can look at RenderImageOnMonitorForDogTutorial, which applies the\n% idea to render images rather than a single spectrum.\n%\n% This tutorial is available in the github repository\n% https://github.com/BrainardLab/TeachingCode\n% You can either clone the respository or just download a copy from\n% that page (see green \"Code\" button).\n%\n% To run this, you will need both the Psychophysics Toolbox (PsychToolbox)\n% and the BrainardLabToolbox on your path. You can get the PsychToolbox\n% from\n% psychtoolbox.org\n% You can get the BrainardLabToolbox from\n% https://github.com/BrainardLab/BrainardLabToolbox\n%\n% If you use the ToolboxToolbox (https://github.com/toolboxhub/toolboxtoolbox)\n% and install the TeachingCode repository in your projects folder, you can\n% install the dependencies by using\n% tbUseProject('TeachingCode')\n% at the Matlab prompt.\n%\n% You also need the calibration file NEC_MultisyncPA241W.mat, which is in\n% the same directory as this tutorial in the github respository.\n%\n% See also: RenderSpectrumOnMonitorTutorial, RenderImageOnMonitorForDogTutorial\n\n% History:\n% 08/02/2020 dhb Wrote for ICVS from other tutorials that weren't quite\n% what we wanted.\n\n%% Clear\nclear; close all;\n\n%% Load and examine a test calibration file\n%\n% These are measurements from an LCD monitor, with data stored in a\n% structure that describes key monitor properties.\ncalData = load('NEC_MultisyncPA241W');\ncal = calData.cals{end};\n\n% Get wavelength sampling of functions in cal file.\nS = cal.rawData.S;\nwls = SToWls(S);\n\n% For simplicity, let's assume that no light comes off the monitor when the\n% input is set to zero. This isn't true for real monitors, but we don't\n% need to fuss with that aspect at the start. \ncal.processedData.P_ambient = zeros(size(cal.processedData.P_ambient));\n\n%% Plot the spectra of the three monitor primaries.\n%\n% For this monitor each primary is determined by the emission spectra of\n% one of the phosphors on its faceplate, but that's a detail. Whwat we care\n% about are the spectra, not how they were instrumented physically.\n%\n% Each primary spectrum is in a separate column of the matrix cal.processedData.P_device.\n% In MATLAB,can use the : operator to help extract various pieces of a\n% matrix. So:\nredPhosphor = cal.processedData.P_device(:,1);\ngreenPhosphor = cal.processedData.P_device(:,2);\nbluePhosphor = cal.processedData.P_device(:,3);\nfigure(1);clf; hold on\nset(gca,'FontName','Helvetica','FontSize',18);\nplot(wls,redPhosphor,'r','LineWidth',3);\nplot(wls,greenPhosphor,'g','LineWidth',3);\nplot(wls,bluePhosphor,'b','LineWidth',3);\ntitle( 'Monitor channel spectra','FontSize',24);\nxlabel( 'Wavelength [ nm ]','FontSize',24); ylabel( 'Radiance [ W / m^2 / sr / wlbin ]','FontSize',24);\nhold off\n\n%% Get animal spectral sensitivities\n%\n% Here we use the dog, a dichromat.\n%\n% By convention in the Psychtoolbox, we store sensitivities as the rows of\n% a matrix. Spline the wavelength sampling to match that in the calibration\n% file.\n%\n% T_dogrec has the dog L cone, dog S cone, and dog rod in its three\n% rows. We only want the cones for high light level viewing.\nload T_dogrec\nT_cones = SplineCmf(S_dogrec,T_dogrec([1,2],:),S);\nload T_cones_ss2\nT_cones = SplineCmf(S_cones_ss2,T_cones_ss2,S);\nT_cones = T_cones([1,3],:);\n\n% Make a plot\nfigure(2); clf; hold on\nset(gca,'FontName','Helvetica','FontSize',18);\nplot(wls,T_cones(1,:),'r','LineWidth',3);\nplot(wls,T_cones(2,:),'b','LineWidth',3);\ntitle( 'LS Cone Fundamentals','FontSize',24);\nxlabel( 'Wavelength','FontSize',24); ylabel( 'Sensitivity','FontSize',24);\nhold off\n\n%% Get a spectrum to render\n%\n% We want to render a spectrum on the monitor so that the light coming off\n% the monitor has the same effect on the human cones as the spectrum would\n% have. So we need a spectrum. We'll use a spectrum computed from CIE D65\n% that renders pinkish for a human. (See alternate spectrum in\n% RenderSpectrumOnMonitorTutorial).\nload spd_D65\nspectrumToRender = SplineSpd(S_D65,spd_D65,S)/0.75e4;\nspectrumToRender = 1.5*max(spectrumToRender(:))*ones(size(spectrumToRender))-spectrumToRender;\n\n% Make a plot of the spectrum to render\nfigure(3); clf; hold on\nplot(wls,spectrumToRender,'k','LineWidth',3);\ntitle('Metamers','FontSize',24);\nxlabel('Wavelength','FontSize',24); ylabel( 'Power','FontSize',24);\n\n%% Compute the cone excitations from the spectrum we want to render\n%\n% This turns out to be a simple matrix multiply in Matlab. The\n% sensitivities are in the rows of T_cones and the spectral radiance is in\n% the column vector spectrumToRender. For each row of T_cones, the matrix\n% multiple consists of weighting the spectrum by the sensitivity at each\n% wavelength and adding them up.\n%\n% Implicit here is that the units of spectrum give power per wavelength\n% sampling bin, another important detail you'd want to think about to get\n% units right for a real application and that we won't worry about here.\nLSToRender = T_cones*spectrumToRender;\n\n%% We want to find a mixture of the monitor primaries that produces the same excitations\n%\n% Since there are only two primaries needed, we'll average the red and green\n% and treat that as a yellow primary.\n%\n% Let's use the column vector [y b]' to denote the amount of each primary\n% we'll ultimately want in the mixture. By convention we'll think of y\n% and b as proportions relative to the maximum amount of each primary\n% available on the monitor.\n%\n% It might be useful to make a plot of the example spectrum and see that it\n% indeed looks like a mixture of the primary spectra.\nybExample = [0.2 0.5]';\nmonitorSpectrumExample = ybExample(1)*(redPhosphor+greenPhosphor)/2 + ybExample(2)*bluePhosphor;\n\n% We can also compute the spectrum coming off the monitor for any choice of\n% y and b using a matrix multiply. In this case, think of the\n% multiplication as weighting each of the columns of monitorBasis (defined\n% below) and then summing them. You can verify that this gives the same\n% answer as the expanded form just above.\nmonitorBasis = [(redPhosphor+greenPhosphor)/2 bluePhosphor];\nmonitorSpectrumExampleCheck = monitorBasis*ybExample;\n\n% We can also compute the LMS cone excitations for this example. This is\n% just a column vector of three numbers.\nmonitorLMSExample = T_cones*monitorSpectrumExample;\n\n% Now note that we can combine the two steps above, precomputing the matrix\n% that maps between the rgb vector and the LS excitations that result.\n%\n% You can verify that monitorLMSExample and monitorLMSExampleCheck are the\n% same as each other.\nybToLSMatrix = T_cones*monitorBasis;\nmonitorLMSExampleCheck = ybToLSMatrix*ybExample;\n\n% We want to go the other way, starting with LSToRender and obtaining an\n% rgb vector that produces it. This is basically inverting the relation\n% above, which is easy in Matlab.\nLSTorgbMatrix = inv(ybToLSMatrix);\nybThatRender = LSTorgbMatrix*LSToRender;\n\n% Let's check that it worked. The check values here should be the same as\n% LMSToRender.\nrenderedSpectrum = monitorBasis*ybThatRender;\nLSToRenderCheck = T_cones*renderedSpectrum;\n\n% Add rendered spectrum to plot of target spectrum. You can see that they\n% are of the same overall scale but differ in relative spectra. These two\n% spectra are metamers - they produce the same excitations in the cones and\n% will look the same to a human observer.\nfigure(3);\nplot(wls,renderedSpectrum,'k:','LineWidth',3);\n\n%% Make an image that shows the color\n%\n% We know the proportions of each of the monitor primaries required to\n% produce a metamer to the spectrum we wanted to render. Now we'd like to\n% look at this rendered spectrum. We have to assume that the properties of\n% the monitor we're using are the same as the one in the calibration file,\n% which isn't exactly true but will be close enough for illustrative\n% purposes.\n\n% What we need to do is find RGB values to put in the image so that we get\n% the desired rgb propotions in the mixture that comes off. This is a\n% little tricky, because the relation between the RGB values we put into an\n% image and the rgb values that come off is non-linear. This non-linearity\n% is called the gamma curve of the monitor, and we have to correct for it,\n% a process known as gamma correction.\n\n% As part of the monitor calibration, we meausured the gamma curves of our\n% monitor, and they are in the calibration structure. Let's have a look\nfigure(4); clf; hold on\nset(gca,'FontName','Helvetica','FontSize',18);\ngammaInput = cal.processedData.gammaInput;\nredGamma = cal.processedData.gammaTable(:,1);\ngreenGamma = cal.processedData.gammaTable(:,2);\nblueGamma = cal.processedData.gammaTable(:,3);\nplot(gammaInput,redGamma,'r','LineWidth',3);\nplot(gammaInput,greenGamma,'g','LineWidth',3);\nplot(gammaInput,blueGamma,'b','LineWidth',3);\ntitle( 'Monitor gamma curves','FontSize',24);\nxlabel( 'Input RGB','FontSize',24); ylabel( 'Mixture rgb','FontSize',24);\n\n% We need to convert our yb values to rgb values at this point. But that's\n% easy r = g = y/2.\nrgbThatRender = [ybThatRender(1)/2 ybThatRender(1)/2 ybThatRender(2)]';\n\n% Check that this rgb does what we want\nrenderedSpectrum1 = cal.processedData.P_device*rgbThatRender;\nLSToRenderCheck1 = T_cones*renderedSpectrum1;\n\n% Then we invert the RGB gamma curve - for each of our desired rgb values we need\n% to find the corresponding RGB. That's not too hard, we can just do\n% exhasutive search. This is done here in a little subfunction called\n% SimpleGammaCorrection at the bottom of this file.\nnLevels = length(gammaInput);\nR = SimpleGammaCorrection(gammaInput,redGamma,rgbThatRender(1));\nG = SimpleGammaCorrection(gammaInput,greenGamma,rgbThatRender(2));\nB = SimpleGammaCorrection(gammaInput,blueGamma,rgbThatRender(3)); \nRGBThatRender = [R G B]';\n\n% Make an and show the color image. We get (on my Apple Display) a slightly\n% bluish gray, which is about right for D65 given that we aren't using a\n% calibration of this display.\nnPixels = 256;\ntheImage = zeros(nPixels,nPixels,3);\nfor ii = 1:nPixels\n for jj = 1:nPixels\n theImage(ii,jj,:) = RGBThatRender;\n end\nend\nfigure(5);\nimshow(theImage);\n\nfunction output = SimpleGammaCorrection(gammaInput,gamma,input)\n% output = SimpleGammaCorrection(gammaInput,gamma,input)\n%\n% Perform gamma correction by exhaustive search. Just to show idea,\n% not worried about efficiency.\n%\n% 9/14/08 ijk Wrote it.\n% 12/2/09 dhb Update for [0,1] input table.\n% 08/01/20 dhb Get rid of extraneous input variable\n\nmin_diff = Inf;\nfor i=1:length(gammaInput)\n currentdiff = abs(gamma(i)-input);\n if(currentdiff < min_diff)\n min_diff = currentdiff;\n output = i;\n end\nend\noutput = gammaInput(output);\nend"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "RenderImageOnMonitorForDogTutorial.m", "ext": ".m", "path": "TeachingCode-master/ICVS2020Tutorials/RenderImageOnMonitorForDogTutorial.m", "size": 5976, "source_encoding": "utf_8", "md5": "3d978efe6447db441c9fa1acd1786e2c", "text": "% RenderImageOnMonitorForDogTutorial\n%\n% Render an RGB image as a metamer for a dichromat. This tutorial builds\n% on the ideas introduced in RenderSpectrumOnMonitorTutorial and\n% RenderSpectrumOnMonitorForDogTutorial.\n%\n% In this version, you can control the metameric image you produce by\n% changing the parameter lambda near the top.\n%\n% This tutorial is available in the github repository\n% https://github.com/BrainardLab/TeachingCode\n% You can either clone the respository or just download a copy from\n% that page (see green \"Code\" button).\n%\n% To run this, you will need both the Psychophysics Toolbox (PsychToolbox)\n% and the BrainardLabToolbox on your path. You can get the PsychToolbox\n% from\n% psychtoolbox.org\n% You can get the BrainardLabToolbox from\n% https://github.com/BrainardLab/BrainardLabToolbox\n%\n% If you use the ToolboxToolbox (https://github.com/toolboxhub/toolboxtoolbox)\n% and install the TeachingCode repository in your projects folder, you can\n% install the dependencies by using\n% tbUseProject('TeachingCode')\n% at the Matlab prompt.\n%\n% You also need the calibration file NEC_MultisyncPA241W.mat, which is in\n% the same directory as this script in the github respository.\n%\n% See also: RenderSpectrumOnMonitorTutorial, RenderSpectrumOnMonitorForDogTutorial\n\n% History\n% 08/03/2020 dhb Wrote it.\n\n%% Clear\nclear; close all;\n\n%% Parameters\n%\n% The lambda parameter governs how the red and green primary spectra are\n% mixed to produce the \"yellow\" primary for the simulated two primary\n% device. Varying this will change the metameric image you produce. This\n% variable should stay between 0 and 1.\nlambda = 0.7;\n\n%% Load and examine a test calibration file\n%\n% These are measurements from an LCD monitor, with data stored in a\n% structure that describes key monitor properties.\ncalData = load('NEC_MultisyncPA241W');\ncal = calData.cals{end};\nredPhosphor = cal.processedData.P_device(:,1);\ngreenPhosphor = cal.processedData.P_device(:,2);\nbluePhosphor = cal.processedData.P_device(:,3);\n\n% Get wavelength sampling of functions in cal file.\nS = cal.rawData.S;\nwls = SToWls(S);\n\n% For simplicity, let's assume that no light comes off the monitor when the\n% input is set to zero. This isn't true for real monitors, but we don't\n% need to fuss with that aspect at the start. \ncal.processedData.P_ambient = zeros(size(cal.processedData.P_ambient));\n\n%% Load human cone spectral sensitivities\nload T_cones_ss2\nT_conesTrichrom = SplineCmf(S_cones_ss2,T_cones_ss2,S);\n\n%% Get animal spectral sensitivities\n%\n% Here we use the dog, a dichromat.\n%\n% By convention in the Psychtoolbox, we store sensitivities as the rows of\n% a matrix. Spline the wavelength sampling to match that in the calibration\n% file.\n%\n% T_dogrec has the dog L cone, dog S cone, and dog rod in its three\n% rows. We only want the cones for high light level viewing.\nload T_dogrec\nT_conesDichrom = SplineCmf(S_dogrec,T_dogrec([1,2],:),S);\n\n% If you want ground squirrel instead comment in these lines. You could\n% also set T_conesDichrom to some pair of the human LMS cones to generate\n% metameric image for human dichromats.\n% load T_ground\n% T_conesDichrom = SplineCmf(S_dogrec,T_dogrec([1,2],:),S);\n\n%% Get an image to render\n%\n% This one comes with Matlab, just need to map through the color lookup\n% table in variable map to produce a full color image.\nload mandrill\nRGBImage = zeros(size(X,1),size(X,2),3);\nfor ii = 1:size(X,1)\n for jj = 1:size(X,2)\n RGBImage(ii,jj,:) = map(X(ii,jj),:);\n end\nend\nfigure(1); imshow(RGBImage);\n\n%% Ungamma correct the image\n%\n% Cal format strings out each pixel as a column in a 3 by n*m matrix.\n% Convenient for color transformations. Put the image in cal format.\n[RGBCal,m,n] = ImageToCalFormat(RGBImage);\n\n% Inverse gamma correction to get rgb from RGB\nrgbCal = zeros(size(RGBCal));\nfor ii = 1:m*n\n for cc = 1:3\n rgbCal(cc,ii) = SimpleGammaCorrection(cal.processedData.gammaTable(:,cc),cal.processedData.gammaInput,RGBCal(cc,ii));\n end\nend\n\n%% Get spectrum and LS coordinates from rgb\ntheSpectrumCal = cal.processedData.P_device*rgbCal;\ntheLSCal = T_conesDichrom*theSpectrumCal;\n\n%% Make virtual two primary monitor and find yb values that produce metamers\n%\n% Use a controlable mixture of red and green, with parameter lambda\n% determining how much of each.\nmonitorBasis = [lambda*redPhosphor+(1-lambda)*greenPhosphor bluePhosphor];\nybToLSMatrix = T_conesDichrom*monitorBasis;\nLSToybMatrix = inv(ybToLSMatrix);\nybCal = LSToybMatrix*theLSCal;\n\n%% Promote yb to rgb using our knowledge of how we built the primaries\n% \n% Use lambda to determine ratio of red to green, to match the way we set up\n% the combined phosphor.\nrgbMetamerCal = [lambda*ybCal(1,:) ; (1-lambda)*ybCal(1,:) ; ybCal(2,:)];\n\n%% Check that we get the desired LS excitations ro numerical precision\ntheLSCalCheck = T_conesDichrom*cal.processedData.P_device*rgbMetamerCal;\nif (max(abs(theLSCal(:)-theLSCalCheck(:))) > 1e-10)\n error('Do not get desired LS values');\nend\n\n%% Gamma correct to get RGB for the metamer, convert back to image format, and display\nRGBMetamerCal = zeros(size(RGBCal));\nfor ii = 1:m*n\n for cc = 1:3\n RGBMetamerCal(cc,ii) = SimpleGammaCorrection(cal.processedData.gammaInput,cal.processedData.gammaTable(:,cc),rgbMetamerCal(cc,ii));\n end\nend\nRGBMetamerImage = CalFormatToImage(RGBMetamerCal,m,n);\nfigure(2);\nimshow(RGBMetamerImage);\n\nfunction output = SimpleGammaCorrection(gammaInput,gamma,input)\n% output = SimpleGammaCorrection(gammaInput,gamma,input)\n%\n% Perform gamma correction by exhaustive search. Just to show idea,\n% not worried about efficiency.\n%\n% 9/14/08 ijk Wrote it.\n% 12/2/09 dhb Update for [0,1] input table.\n% 08/01/20 dhb Get rid of extraneous input variable\n\nmin_diff = Inf;\nfor i=1:length(gammaInput)\n currentdiff = abs(gamma(i)-input);\n if(currentdiff < min_diff)\n min_diff = currentdiff;\n output = i;\n end\nend\noutput = gammaInput(output);\nend\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "GLW_CircularApertureStimulus.m", "ext": ".m", "path": "TeachingCode-master/GLWindowExamples/GLW_CircularApertureStimulus.m", "size": 3239, "source_encoding": "utf_8", "md5": "4a08aa08a3f6335936cb7c5f2a94a11e", "text": "function GLW_CircularApertureStimulus()\n% GLW_CircularApertureStimulus() \n%\n% Demonstrate how to generate a noise stimulus with a circular aperture using\n% GLWindow.\n%\n% The program terminates when the user presses the'q' key.\n% \n%\n\n% 12/3/13 npc Wrote it.\n\n % Generate 256x256 noise stimulus\n imageSize = 256;\n stimMatrix = rand(imageSize, imageSize)-0.5;\n \n % Generate circular aperture\n mask = GenerateSoftCircularAperture(imageSize);\n \n % Apply the mask to the stimulus\n imageMatrix = 0.5 + (stimMatrix .* mask);\n \n % Create an RGB version for display by GLWindow\n imageMatrixRGB = repmat(imageMatrix, [1 1 3]);\n \n % Get information about the displays attached to our system.\n displayInfo = mglDescribeDisplays;\n\n % We will present everything to the last display. Get its ID.\n lastDisplay = length(displayInfo);\n\n % Get the screen size\n screenSizeInPixels = displayInfo(lastDisplay).screenSizePixel;\n \n win = [];\n try \n % Create a full-screen GLWindow object\n win = GLWindow( 'SceneDimensions', screenSizeInPixels, ...\n 'BackgroundColor', [0.5 0.5 0.5],...\n 'windowID', lastDisplay);\n\n % Open the window \n win.open;\n \n % Add stimulus image to the GLWindow\n centerPosition = [0 0];\n win.addImage(centerPosition, size(imageMatrix), ...\n imageMatrixRGB, 'Name', 'stimulus');\n \n % Render the scene\n win.draw;\n \n % Wait for a character keypress.\n ListenChar(2);\n FlushEvents;\n \n disp('Press q to exit');\n Speak('Press q to exit', 'Alex');\n \n keepLooping = true;\n while (keepLooping)\n \n if CharAvail\n % Get the key\n theKey = GetChar;\n \n if (theKey == 'q')\n keepLooping = false;\n end \n end\n end\n \n % Close the window.\n win.close;\n ListenChar(0);\n\n catch e\n disp('An exception was raised');\n\n % Disable character listening.\n ListenChar(0);\n\n % Close the window if it was succesfully created.\n if ~isempty(win)\n win.close;\n end\n\n % Send the error back to the Matlab command window.\n rethrow(e);\n\n end % try\n \nend\n\n\nfunction aperture = GenerateSoftCircularAperture(imageSize)\n% aperture = GenerateSoftCircularAperture(imageSize)\n%\n% This function generates a soft circular aperture that is used to window the test image.\n%\n% 12/10/12 npc Wrote it.\n% 12/13/12 npc Changed computation of soft border to decrease the width of\n% the transition area, and thus display more of the image\n \n x = [-imageSize/2:imageSize/2-1] + 0.5;\n [X,Y] = meshgrid(x,x);\n \n radius = sqrt(X.^2 + Y.^2);\n softRadius = (imageSize/2)*0.9;\n softSigma = (imageSize/2 - softRadius) / 3.0;\n delta = radius - softRadius;\n \n aperture = ones(size(delta));\n indices = find(delta > 0);\n aperture(indices) = exp(-0.5*(delta(indices)/softSigma).^2);\n \nend"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "GLW_DriftingGrating.m", "ext": ".m", "path": "TeachingCode-master/GLWindowExamples/GLW_DriftingGrating.m", "size": 5238, "source_encoding": "utf_8", "md5": "29ff1094bb6efc08206567a0e2b1a378", "text": "function GLW_DriftingGrating\n% GLW_DriftingGrating Demonstrates how to drift a grating in GLWindow.\n%\n% Syntax:\n% GLW_DriftingGrating\n%\n% Description:\n% The function drifts a grating. Might not be completely done\n%\n% Press - 'd' to dump image of window into a file\n% - 'q' to quit\n\n% 12/5/12 dhb Wrote it from code lying around, in part due to Adam Gifford.\n% 11/xx/20 dhb Drifting version.\n\ntry \n % Choose the last attached screen as our target screen, and figure out its\n % screen dimensions in pixels. Using these to open the GLWindow keeps\n % the aspect ratio of stuff correct.\n d = mglDescribeDisplays;\n screenDims = d(end).screenSizePixel;\n \n % Open the window.\n win = GLWindow('SceneDimensions', screenDims,'windowId',length(d));\n win.open;\n \n % Load a calibration file for gamma correction. Put\n % your calibration file here.\n calFile = 'PTB3TestCal';\n S = WlsToS((380:4:780)');\n load T_xyz1931\n T_xyz = SplineCmf(S_xyz1931,683*T_xyz1931,S);\n igertCalSV = LoadCalFile(calFile);\n igertCalSV = SetGammaMethod(igertCalSV,0);\n igertCalSV = SetSensorColorSpace(igertCalSV,T_xyz,S);\n \n % Draw a neutral background at roughly half the\n % dispaly maximum luminance.\n bgrgb = [0.5 0.5 0.5]';\n bgRGB1 = PrimaryToSettings(igertCalSV,bgrgb);\n win.BackgroundColor = bgRGB1';\n win.draw;\n \n % Create gabor patches with specified parameters\n sine = false;\n pixelSize = min(screenDims);\n contrast = 0.9;\n sf = 1;\n sigma = 0.5;\n theta = 0;\n nPhases = 100;\n phases = linspace(0,360,nPhases);\n xdist = 0;\n ydist = 0;\n for ii = 1:nPhases\n % Make gabor in each phase\n gaborrgb{ii} = createGabor(pixelSize,contrast,sf,theta,phases(ii),sigma);\n \n if (~sine)\n gaborrgb{ii}(gaborrgb{ii} > 0.5) = 1;\n gaborrgb{ii}(gaborrgb{ii} < 0.5) = 0;\n end\n \n % Gamma correct\n [calForm1 c1 r1] = ImageToCalFormat(gaborrgb{ii});\n [RGB] = PrimaryToSettings(igertCalSV,calForm1);\n gaborRGB{ii} = CalFormatToImage(RGB,c1,r1);\n \n win.addImage([xdist ydist], [pixelSize pixelSize], gaborRGB{ii}, 'Name',sprintf('theGabor%d',ii));\n end\n \n % Temporal params\n hz = 0.5;\n frameRate = d.refreshRate;\n framesPerPhase = round((frameRate/hz)/nPhases);\n \n % Wait for a key to quit\n ListenChar(2);\n FlushEvents;\n whichPhase = 1;\n whichFrame = 1;\n oldPhase = nPhases;\n flicker = true;\n win.enableObject(sprintf('theGabor%d',nPhases));\n while true\n if (whichFrame == 1)\n if (flicker)\n win.disableObject(sprintf('theGabor%d',oldPhase));\n win.enableObject(sprintf('theGabor%d',whichPhase));\n oldPhase = whichPhase;\n whichPhase = whichPhase + 1;\n if (whichPhase > nPhases)\n whichPhase = 1;\n end\n end\n end\n win.draw;\n whichFrame = whichFrame + 1;\n if (whichFrame > framesPerPhase)\n whichFrame = 1;\n end\n \n key = 'z';\n if (CharAvail)\n key = GetChar; \n end\n switch key\n % Quit\n case 'q'\n break;\n case 'u'\n flicker = false;\n win.disableObject(sprintf('theGabor%d',oldPhase));\n case 'f'\n flicker = true;\n otherwise\n end\n end\n \n % Clean up and exit\n win.close;\n ListenChar(0);\n \n % Error handler\ncatch e\n ListenChar(0);\n if ~isempty(win)\n win.close;\n end\n rethrow(e);\nend\n\n\nfunction theGabor = createGabor(meshSize,contrast,sf,theta,phase,sigma)\n%\n% Input\n% meshSize: size of meshgrid (and ultimately size of image).\n% Must be an even integer\n% contrast: contrast on a 0-1 scale\n% sf: spatial frequency in cycles/image\n% cycles/pixel = sf/meshSize\n% theta: gabor orientation in degrees, clockwise relative to positive x axis.\n% theta = 0 means horizontal grating\n% phase: gabor phase in degrees.\n% phase = 0 means sin phase at center, 90 means cosine phase at center\n% sigma: standard deviation of the gaussian filter expressed as fraction of image\n%\n% Output\n% theGabor: the gabor patch as rgb primary (not gamma corrected) image\n\n\n% Create a mesh on which to compute the gabor\nif rem(meshSize,2) ~= 0\n error('meshSize must be an even integer');\nend\nres = [meshSize meshSize];\nxCenter=res(1)/2;\nyCenter=res(2)/2;\n[gab_x gab_y] = meshgrid(0:(res(1)-1), 0:(res(2)-1));\n\n% Compute the oriented sinusoidal grating\na=cos(deg2rad(theta));\nb=sin(deg2rad(theta));\nsinWave=sin((2*pi/meshSize)*sf*(b*(gab_x - xCenter) - a*(gab_y - yCenter)) + deg2rad(phase));\n\n% Compute the Gaussian window\nx_factor=-1*(gab_x-xCenter).^2;\ny_factor=-1*(gab_y-yCenter).^2;\nvarScale=2*(sigma*meshSize)^2;\ngaussianWindow = exp(x_factor/varScale+y_factor/varScale);\n\n% Compute gabor. Numbers here run from -1 to 1.\ntheGabor=gaussianWindow.*sinWave;\n\n% Convert to contrast\ntheGabor = (0.5+0.5*contrast*theGabor);\n\n% Convert single plane to rgb\ntheGabor = repmat(theGabor,[1 1 3]);\n\n\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "GLW_Gabor.m", "ext": ".m", "path": "TeachingCode-master/GLWindowExamples/GLW_Gabor.m", "size": 4810, "source_encoding": "utf_8", "md5": "1d932035a25694b0cc0366af7fae8500", "text": "function GLW_Gabor\n% GLW_Gabor Demonstrates how to show a gabor patch in GLWindow.\n%\n% Syntax:\n% GLW_Gabor\n%\n% Description:\n% The function createGabor at the end does the work of\n% creating the gabor patch. \n% \n% Also demonstrated is how to use the PTB calibration routines\n% to gamma correct the gabor.\n%\n% Press - 'd' to dump image of window into a file\n% - 'q' to quit\n\n% 12/5/12 dhb Wrote it from code lying around, in part due to Adam Gifford.\n\ntry \n % Choose the last attached screen as our target screen, and figure out its\n % screen dimensions in pixels. Using these to open the GLWindow keeps\n % the aspect ratio of stuff correct.\n d = mglDescribeDisplays;\n screenDims = d(end).screenSizePixel;\n \n % Open the window.\n win = GLWindow('SceneDimensions', screenDims,'windowId',length(d));\n win.open;\n \n % Load a calibration file for gamma correction. Put\n % your calibration file here.\n calFile = 'PTB3TestCal';\n S = WlsToS((380:4:780)');\n load T_xyz1931\n T_xyz = SplineCmf(S_xyz1931,683*T_xyz1931,S);\n igertCalSV = LoadCalFile(calFile);\n igertCalSV = SetGammaMethod(igertCalSV,0);\n igertCalSV = SetSensorColorSpace(igertCalSV,T_xyz,S);\n \n % Draw a neutral background at roughly half the\n % dispaly maximum luminance.\n bgrgb = [0.5 0.5 0.5]';\n bgRGB1 = PrimaryToSettings(igertCalSV,bgrgb);\n win.BackgroundColor = bgRGB1';\n win.draw;\n \n % Add central fixation cross, just for fun\n win.addLine([-20 0], [20 0], 3, [1 1 1],'Name','fixHorz');\n win.addLine([0 20], [0 -20], 3, [1 1 1],'Name', 'fixVert');\n \n % Create two gabor patches with specified parameters\n pixelSize = 400;\n contrast1 = 0.75;\n contrast2 = 0.25;\n sf1 = 6;\n sf2 = 3;\n sigma1 = 0.1;\n sigma2 = 0.2;\n theta1 = 0;\n theta2 = 75;\n phase1 = 90;\n phase2 = 0;\n xdist = 400;\n ydist = 0;\n gabor1rgb = createGabor(pixelSize,contrast1,sf1,theta1,phase1,sigma1);\n gabor2rgb = createGabor(pixelSize,contrast2,sf2,theta2,phase2,sigma2);\n \n % Gamma correct\n [calForm1 c1 r1] = ImageToCalFormat(gabor1rgb);\n [calForm2 c2 r2] = ImageToCalFormat(gabor2rgb);\n [RGB1] = PrimaryToSettings(igertCalSV,calForm1);\n [RGB2] = PrimaryToSettings(igertCalSV,calForm2);\n gabor1RGB = CalFormatToImage(RGB1,c1,r1);\n gabor2RGB = CalFormatToImage(RGB2,c2,r2);\n \n % Add to display and draw. One is on left and the other on the right.\n win.addImage([-xdist ydist], [pixelSize pixelSize], gabor1RGB, 'Name','leftGabor');\n win.addImage([xdist ydist], [pixelSize pixelSize], gabor2RGB, 'Name','rightGabor');\n win.enableObject('leftGabor');\n win.enableObject('rightGabor');\n win.draw;\n \n % Wait for a key to quit\n ListenChar(2);\n FlushEvents;\n while true\n win.draw;\n key = GetChar;\n \n switch key\n % Quit\n case 'q'\n break;\n case 'd'\n win.dumpSceneToTiff('GLGabors.tif');\n otherwise\n break;\n end\n end\n \n % Clean up and exit\n win.close;\n ListenChar(0);\n \n % Error handler\ncatch e\n ListenChar(0);\n if ~isempty(win)\n win.close;\n end\n rethrow(e);\nend\n\n\nfunction theGabor = createGabor(meshSize,contrast,sf,theta,phase,sigma)\n%\n% Input\n% meshSize: size of meshgrid (and ultimately size of image).\n% Must be an even integer\n% contrast: contrast on a 0-1 scale\n% sf: spatial frequency in cycles/image\n% cycles/pixel = sf/meshSize\n% theta: gabor orientation in degrees, clockwise relative to positive x axis.\n% theta = 0 means horizontal grating\n% phase: gabor phase in degrees.\n% phase = 0 means sin phase at center, 90 means cosine phase at center\n% sigma: standard deviation of the gaussian filter expressed as fraction of image\n%\n% Output\n% theGabor: the gabor patch as rgb primary (not gamma corrected) image\n\n\n% Create a mesh on which to compute the gabor\nif rem(meshSize,2) ~= 0\n error('meshSize must be an even integer');\nend\nres = [meshSize meshSize];\nxCenter=res(1)/2;\nyCenter=res(2)/2;\n[gab_x gab_y] = meshgrid(0:(res(1)-1), 0:(res(2)-1));\n\n% Compute the oriented sinusoidal grating\na=cos(deg2rad(theta));\nb=sin(deg2rad(theta));\nsinWave=sin((2*pi/meshSize)*sf*(b*(gab_x - xCenter) - a*(gab_y - yCenter)) + deg2rad(phase));\n\n% Compute the Gaussian window\nx_factor=-1*(gab_x-xCenter).^2;\ny_factor=-1*(gab_y-yCenter).^2;\nvarScale=2*(sigma*meshSize)^2;\ngaussianWindow = exp(x_factor/varScale+y_factor/varScale);\n\n% Compute gabor. Numbers here run from -1 to 1.\ntheGabor=gaussianWindow.*sinWave;\n\n% Convert to contrast\ntheGabor = (0.5+0.5*contrast*theGabor);\n\n% Convert single plane to rgb\ntheGabor = repmat(theGabor,[1 1 3]);\n\n\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "GLW_Mouse.m", "ext": ".m", "path": "TeachingCode-master/GLWindowExamples/GLW_Mouse.m", "size": 5836, "source_encoding": "utf_8", "md5": "fa2bd80d702af2e7d6a36896e5e0834e", "text": "function GLW_Mouse(fullScreen)\n% GLW_Mouse Shows how to capture/set the mouse with GLWindow.\n%\n% Syntax:\n% GLW_Mouse\n% GLW_Mouse(false)\n%\n% Description:\n% Demonstrates how to capture mouse position and button clicks and how to\n% set the mouse position while using GLWindow. Mouse functionality is\n% provided by the MGL libraries. At the beginning of the program the mouse\n% is forced to the middle of the display. Clicking the mouse prints out the\n% RGB value of the pixel that was clicked if using the system mouse cursor.\n%\n% If the rendered cursor is enabled, detection of the pixel values isn't\n% possible without doing some geometry calculations or reading non RGB pixel\n% data, which this example doesn't get into.\n%\n% Bring the cursor back into the Matlab command window and hit 'q' to\n% quit.\n%\n% Input:\n% fullScreen (logical) - Toggles fullscreen mode on/off. Defaults to true.\n\n% This global lets us access some low level OpenGL values.\nglobal GL;\n\nif nargin == 0\n\tfullScreen = true;\nend\n\n% Dimensions of our GLWindow scene.\nscreenDimsCm = [48 30];\n\n% If we want to display the cursor as a dot instead of the system cursor,\n% enable this flag. The consequence of not using the system cursor and instead\n% using a rendered cursor is that it's harder to get the underlying RGB values\n% and we don't do that here.\nuseSystemCursor = true;\n\n% Create the GLWindow object.\nwin = GLWindow('FullScreen', logical(fullScreen), 'SceneDimensions', screenDimsCm, ...\n\t'HideCursor', ~useSystemCursor);\n\ntry\n\t% Add a blue oval.\n\twin.addOval([0 0], [5 5], [0 0 1], 'Name', 'square');\n\t\n\tif ~useSystemCursor\n\t\t% Add a small oval to represent our mouse position. Add this last\n\t\t% to your GLWindow so that it is always on top of your other\n\t\t% objects.\n\t\twin.addOval([0 0], [0.25 0.25], [1 0 0], 'Name', 'mouse');\n\tend\n\t\n\t% Open up the display.\n\twin.open;\n\t\n\t% Store the pixel dimensions of the display. We'll use this later to\n\t% convert pixel values into SceneDimensions values.\n\tscreenDimsPx = win.DisplayInfo(win.WindowID).screenSizePixel;\n\t\n\t% Enable keyboard capture.\n\tListenChar(2);\n\tFlushEvents;\n\t\n\t% Flag to keep track of mouse button state.\n\talreadyPressed = false;\n\t\n\t% Force the mouse to the center of the screen. The mouse coordinate\n\t% system is in pixels, with lower left as 0,0 and increasing to the\n\t% right and upwards. This coordinate system differs from that used to\n % draw objects, which we agree is irritating.\n\tfprintf('- Moving mouse to center of the display.\\n');\n\tmglSetMousePosition(screenDimsPx(1)/2, screenDimsPx(2)/2, win.WindowID);\n\t\n\t% Loop continuously until 'q' is pressed.\n\tkeepLooping = true;\n\twhile keepLooping\n\t\tif CharAvail\n\t\t\tswitch GetChar\n\t\t\t\tcase 'q'\n\t\t\t\t\tkeepLooping = false;\n\t\t\tend\n\t\telse\n\t\t\t% Get the current mouse state. The mouse state has 3 fields:\n\t\t\t% buttons, x, y. x and y will give you the horizontal and\n\t\t\t% vertical pixel position of the mouse relative to the\n\t\t\t% specified screen where (0,0) is the bottom left corner of the\n\t\t\t% display. The GLWindow object contains a property 'WindowID'\n\t\t\t% that gives us the target screen for mglGetMouse.\n\t\t\tmouseInfo = mglGetMouse(win.WindowID);\n\t\t\t\n\t\t\t% Look to see if the user is pressing a button. We keep track\n\t\t\t% of button state so that we don't register the same button\n\t\t\t% press multiple times.\n\t\t\tif mouseInfo.buttons > 0 && ~alreadyPressed\n\t\t\t\tif useSystemCursor\n\t\t\t\t\t% Print out the RGB value of the pixel the mouse was on.\n\t\t\t\t\t% To do this we make a low level OpenGL call to read pixels\n\t\t\t\t\t% straight from the framebuffer. This call also returns\n\t\t\t\t\t% the alpha (transparency) value as the 4th value in the\n % return vector.\n if (mouseInfo.x > 0 & mouseInfo.y > 0)\n glReadBuffer(GL.FRONT);\n pxRGBA = squeeze(glReadPixels(mouseInfo.x, mouseInfo.y, 1, 1, GL.RGB, GL.UNSIGNED_BYTE)) / 255;\n fprintf('- Pixel at position %0.1f, %0.1f in RGB: [%g, %g, %g]\\n', mouseInfo.x, mouseInfo.y ,pxRGBA(1), pxRGBA(2), pxRGBA(3));\n glReadBuffer(GL.BACK);\n end\n\t\t\t\telse\n\t\t\t\t\tfprintf('- Mouse clicked at pixel (%d, %d)\\n', mouseInfo.x, mouseInfo.y);\n\t\t\t\tend\n\t\t\t\t\n\t\t\t\t% Toggle that the button is being pressed.\n\t\t\t\talreadyPressed = true;\n\t\t\telseif mouseInfo.buttons == 0\n\t\t\t\t% If the button isn't currently being pressed we can turn\n\t\t\t\t% off the alreadyPressed flag.\n\t\t\t\talreadyPressed = false;\n\t\t\tend\n\t\t\t\n\t\t\tif ~useSystemCursor\n\t\t\t\t% Move our circle to the position of the mouse so it looks like\n\t\t\t\t% we're moving around a cursor. We first need to put the\n\t\t\t\t% mouse pixel coordinates into the same units as\n\t\t\t\t% SceneDimensions in our GLWindow object. There's a short\n\t\t\t\t% function at the bottom of this file that does this.\n\t\t\t\tmousePos = px2cm([mouseInfo.x mouseInfo.y], screenDimsPx, screenDimsCm);\n\t\t\t\twin.setObjectProperty('mouse', 'Center', mousePos);\n\t\t\tend\n\t\t\t\n\t\t\t% Render the scene.\n\t\t\twin.draw;\n\t\tend\n\tend\n\t\n\t% Clean up.\n\tListenChar(0);\n\twin.close;\ncatch e\n\tListenChar(0);\n\twin.close;\n\trethrow(e);\nend\n\n\nfunction cmCoords = px2cm(pxCoords, screenDimsPx, screenDimsCm)\n% px2cm - Converts a position in pixels to centimeters.\n%\n% Syntax:\n% cmCoords = px2cm(pxCoords, screenDimsPx, screenDimsCm)\n%\n% Input:\n% pxCoords (Mx2) - Pixel coordinates.\n% screenDimsPx (1x2) - Screen dimensions in pixels.\n% screenDimsCm (1x2) - Screen dimensions in centimeters.\n%\n% Output:\n% cmCoords (Mx2) - Coordinates in centimeters.\n\nif nargin ~= 3\n\terror('Usage: EyeTracker.px2cm(pxCoords, screenDimsPx, screenDimsCm)');\nend\n\ncmCoords = zeros(size(pxCoords));\ncmCoords(:,1) = pxCoords(:,1) * screenDimsCm(1) / screenDimsPx(1) - screenDimsCm(1)/2;\ncmCoords(:,2) = -screenDimsCm(2)/2 + pxCoords(:,2) * screenDimsCm(2) / screenDimsPx(2);\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "GLW_Text.m", "ext": ".m", "path": "TeachingCode-master/GLWindowExamples/GLW_Text.m", "size": 3299, "source_encoding": "utf_8", "md5": "2c62be36a85ef53bcb2a78bba3193d3d", "text": "function GLW_Text(fullScreen)\n% GLW_Text Demonstrates how to show text with GLWindow\n%\n% Syntax:\n% GLW_Text\n% GLW_Text(fullScreen)\n%\n% Description:\n% Opens a window and shows the string 'red' on the screen.\n%\n% Press - 'r' to change the word\n% - 'c' to change the color of the text\n% - 'e' to enable the text (i.e. display it)\n% - 'd' to disable the text (i.e. hide it)\n% - 'q' to quit\n%\n% Input:\n% fullScreen (logical) - If true, the last screen attached to the computer\n% will be opened in fullscreen mode. If false, a regular window is opened\n% on the main screen. Defaults to true.\n\nerror(nargchk(0, 1, nargin));\n\nif ~exist('fullScreen', 'var')\n\tfullScreen = true;\nend\n\n% We can set the coordinate range of our OpenGL window.\nsceneDimensions = [50 30];\n\n% Background color (RGB) of the window.\nbgRGB = [0 0 0];\n\n% Create the GLWindow object.\nwin = GLWindow('FullScreen', fullScreen, ...\n\t\t\t 'SceneDimensions', sceneDimensions, ...\n\t\t\t 'BackgroundColor', bgRGB);\n\t\t \ntry\t\n\t% Add some text. At minimum, we have to pass the text to display to\n\t% 'addText', but there are other parameters we can set including its\n\t% location, color, and font size.\n txtString = 'red';\n enableState = true;\n\twin.addText(txtString, ... % Text to display\n\t\t 'Center', [0 0], ... % Where to center the text. (x,y)\n\t\t\t\t'FontSize', 100, ... % Font size\n\t\t\t\t'Color', [1 0 0], ... % RGB color\n\t\t\t\t'Name', 'myText'); % Identifier for the object.\n\t\n\t% Open the window.\n\twin.open;\n\t\n\t% Initialize our keyboard capture.\n\tListenChar(2);\n\tFlushEvents;\n\t\n\twhile true\n\t\t% Draw the scene.\n\t\twin.draw;\n\t\t\n\t\t% Wait for a keypress.\n\t\tswitch GetChar\n\t\t\t% Quit\n\t\t\tcase 'q'\n\t\t\t\tbreak;\n\t\t\t\n\t\t\t% Randomly change the text.\n\t\t\tcase 'r'\n\t\t\t\tswitch ceil(rand*6)\n\t\t\t\t\tcase 1\n\t\t\t\t\t\ttxtString = 'red';\n\t\t\t\t\tcase 2\n\t\t\t\t\t\ttxtString = 'green';\n\t\t\t\t\tcase 3\n\t\t\t\t\t\ttxtString = 'blue';\n\t\t\t\t\tcase 4\n\t\t\t\t\t\ttxtString = 'yellow';\n\t\t\t\t\tcase 5\n\t\t\t\t\t\ttxtString = 'brown';\n\t\t\t\t\tcase 6\n\t\t\t\t\t\ttxtString = 'pink';\n\t\t\t\tend\n\t\t\t\t\n\t\t\t\t% This will replace whatever text was shown before.\n\t\t\t\twin.setText('myText', txtString);\n \n % Change color\n case 'c'\n % For technical reasons, you can't change the color\n % of a text object directly. But, you can 're-add'\n % it with the same name and it will replace the \n % previous version. This this code acts to change\n % the color of the currently displayed string.\n win.addText(txtString, ... % Text to display\n\t\t 'Center', [0 0], ... % Where to center the text. (x,y)\n\t\t\t\t'FontSize', 100, ... % Font size\n\t\t\t\t'Color', rand(1,3), ... % RGB color\n 'Enabled',enableState, ... % Preseve enable/disable\n\t\t\t\t'Name', 'myText'); % Identifier for the object.\n \n % Enable\n case 'e'\n enableState = true;\n win.enableObject('myText'); \n \n case 'd'\n enableState = false;\n win.disableObject('myText');\n\t\tend\n\tend\n\t\n\tcleanup(win);\ncatch e\n\tcleanup(win);\n\trethrow(e);\nend\n\n\nfunction cleanup(win)\nif ~isempty(win)\n\twin.close;\nend\nListenChar(0);\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "GetTheResponse.m", "ext": ".m", "path": "TeachingCode-master/GLWindowExamples/GLW_PhaseDistort/GetTheResponse.m", "size": 3680, "source_encoding": "utf_8", "md5": "6a5242c995de8001a5ccd02583cac7e8", "text": "function [answerIsCorrect, quitExp] = GetTheResponse(win, imageSize, whichSide, leftImagePosition, rightImagePosition)\n% [answerInCorrect, quitExp] = GetTheResponse(win, imageSize, whichSide, leftImagePosition, rightImagePosition)\n%\n% This function positions the mouse on the center of the screen and waits\n% for the user to click on one or the two images that are displayed on the\n% experimental window.\n%\n% If the user clicks on the correct/wrong image (specified by parameter\n% whichSide), the returned parameter 'answerIsCorrect' is set to true/false.\n%\n% If the user clicks outside of either image, we remain in the loop until\n% the mouse click occurs within one of the two image areas.\n%\n% If the user enter a 'q' key, the loop is terminated and the returned\n% parameter 'quitExp' is set to true.\n% \n%\n% 12/10/12 npc Wrote it.\n%\n % move cursor to center of screen\n screenSizeInPixels = win.DisplayInfo(win.WindowID).screenSizePixel;\n mouseHomeX = screenSizeInPixels(1)/2;\n mouseHomeY = screenSizeInPixels(2)/2;\n mglSetMousePosition(mouseHomeX, mouseHomeY,win.WindowID);\n \n % compute bounding rects for left and right image\n rect0 = SetRect(0,0, imageSize, imageSize);\n leftImageRect = CenterRectOnPointd(rect0, leftImagePosition(1) + mouseHomeX, ...\n leftImagePosition(2) + mouseHomeY);\n rightImageRect = CenterRectOnPointd(rect0, rightImagePosition(1) + mouseHomeX, ...\n rightImagePosition(2) + mouseHomeY);\n \n quitExp = false;\n keepLooping = true;\n answerIsCorrect = false;\n \n while (keepLooping)\n \n if CharAvail\n % Get the key\n theKey = GetChar;\n \n if (theKey == 'q')\n keepLooping = false;\n quitExp = true;\n end \n else \n % Get the mouse state \n mouseInfo = mglGetMouse(win.WindowID);\n \n % Check to see if a mouse button was pressed\n if (mouseInfo.buttons > 0) \n [keepLooping, answerIsCorrect] = ...\n CheckWhichImageWasSelected(mouseInfo.x, mouseInfo.y, leftImageRect, rightImageRect, whichSide); \n end\n end\n \n end % while keepLooping\n \n if (~quitExp)\n GiveFeedback(answerIsCorrect); \n end\n \nend\n \nfunction [keepLooping, answerIsCorrect] = CheckWhichImageWasSelected(mouseX, mouseY, leftImageRect, rightImageRect, whichSide)\n% [keepLooping, answerIsCorrect] = CheckWhichImageWasSelected(win, mouseX, mouseY, leftImageRect, rightImageRect, whichSide)\n% \n% Determine if the mouse click occurred within the left or the right image\n% and determine whether the anser is correct or wrong. If the mouse click was\n% outside of both image areas, remain in the polling loop.\n%\n% 12/10/12 npc Wrote it.\n%\n answerIsCorrect = false;\n \n % If the user did not click on the left or the right image, remain in the polling loop\n if ((~IsInRect(mouseX, mouseY, leftImageRect))&&(~IsInRect(mouseX, mouseY, rightImageRect)))\n keepLooping = true;\n return;\n end\n \n % Ok, we have a hit. Exit mouse/keyboard polling loop \n keepLooping = false;\n \n % Determine if the mouse click was on the correct image\n if (IsInRect(mouseX, mouseY, leftImageRect)) \n % mouse click on LEFT image\n if (whichSide == 0)\n answerIsCorrect = true;\n end\n elseif (IsInRect(mouseX, mouseY, rightImageRect))\n % mouse click on RIGHT image\n if (whichSide == 1)\n answerIsCorrect = true;\n end\n end\n \nend\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "LoadImagesAndComputeTheirSpectra.m", "ext": ".m", "path": "TeachingCode-master/GLWindowExamples/GLW_PhaseDistort/LoadImagesAndComputeTheirSpectra.m", "size": 3523, "source_encoding": "utf_8", "md5": "c04e731ea8750c9a0e2e9608650c1853", "text": "function [image1Struct, image2Struct, imageSize] = LoadImagesAndComputeTheirSpectra(imageResizingFactor)\n% [image1struct, image2struct, imageSize] = LoadImagesAndComputeTheirSpectra(imageResizingFactor)\n%\n% Load images, resize them according to parameter imageResizingFactor\n% (via bicubic interpolation) and perform Fourier analysis on them. \n% Convention assumes that images are in MAT files with image name \n% equal to file name. Images are also assumed scaled in range 0-255.\n%\n% The returned image structs contain the following fields:\n% - Name : a string with the name of the image\n% - ImageMatrix : the image data, i.e, a [rows x cols] matrix\n% - Amplitude : the amplitude spectrum of the image, i.e., a [rows x cols] matrix\n% - Phase : the phase spectrum of the image, i.e., a [rows x cols] matrix\n% - RGBdata : the RGB version of the image data, i.e, a [rows x cols x 3] matrix \n% \n% 12/10/12 npc Wrote it.\n%\n\n image1Struct = loadAndAnalyze('reagan128', imageResizingFactor);\n image2Struct = loadAndAnalyze('einstein128', imageResizingFactor);\n \n if (isempty(image1Struct) || isempty(image2Struct))\n imageSize = 0;\n else\n [imageSize,~] = size(image1Struct.ImageMatrix);\n end\n \nend\n\nfunction imageStruct = loadAndAnalyze(imageName, imageResizingFactor)\n% Load image, interpolate and compute its amplitude/phase spectra\n%\n if (exist(sprintf('%s.mat', imageName)) == 2) \n % load mat file with image\n load(imageName, '-mat');\n eval(['imageMatrix = ' imageName ';']);\n \n % interpolate imageMatrix by imageResizingFactor\n if (imageResizingFactor > 1)\n imageMatrix = intepolateImage(imageMatrix, imageResizingFactor);\n end\n \n % flip image upside-down and normalize it\n imageMatrix = flipud(imageMatrix) / 255;\n \n % compute spectral analysis\n imageFT = fft2(imageMatrix);\n \n % generate imageStruct\n imageStruct = struct; \n imageStruct.Name = imageName;\n imageStruct.ImageMatrix = imageMatrix;\n imageStruct.Amplitude = abs(imageFT);\n imageStruct.Phase = angle(imageFT);\n imageStruct.RGBdata = repmat(imageStruct.ImageMatrix, [1 1 3]);\n else \n % file does not exist. Print message\n fprintf('Did not find image %s', sprintf('%s.mat', imageName));\n imageStruct = [];\n end\nend\n\nfunction newImage = intepolateImage(image, factor)\n% Intepolate image by the given factor\n%\n % make sure newImageSize is an even number\n newImageSize = ceil(size(image,1) * factor);\n if (mod(newImageSize,2) == 1)\n newImageSize = newImageSize-1;\n end\n \n % compute original and interpolated indices\n x = [1:size(image,1)]; \n xi = 1+[0:newImageSize-1]/newImageSize*size(image,1);\n [X,Y] = meshgrid(x,x); \n [XI,YI] = meshgrid(xi,xi);\n\n % do the interpolation\n newImage = interp2(X,Y, image, XI, YI, 'cubic*');\n \n % take care of any nan values\n newImage(isnan(newImage)) = 0;\n \n % enable this flag to generate a figure showing the original and intepolated images\n displayIntepolatedPhotos = false;\n \n if (displayIntepolatedPhotos)\n figure(2); clf;\n subplot(1,2,1);\n imagesc(image); axis square\n set(gca, 'CLim', [0 255]);\n subplot(1,2,2);\n imagesc(newImage); axis square\n set(gca, 'CLim', [0 255]);\n colormap(gray);\n end \nend"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "PhaseDistortDemoGUI.m", "ext": ".m", "path": "TeachingCode-master/GLWindowExamples/GLW_PhaseDistort/PhaseDistortDemoGUI.m", "size": 5375, "source_encoding": "utf_8", "md5": "e5f168581d0225e06a5760f0e0990ec2", "text": "function varargout = PhaseDistortDemoGUI(varargin)\n\n % Begin initialization code - DO NOT EDIT\n gui_Singleton = 1;\n gui_State = struct('gui_Name', mfilename, ...\n 'gui_Singleton', gui_Singleton, ...\n 'gui_OpeningFcn', @PhaseDistortDemoGUI_OpeningFcn, ...\n 'gui_OutputFcn', @PhaseDistortDemoGUI_OutputFcn, ...\n 'gui_LayoutFcn', [] , ...\n 'gui_Callback', []);\n if nargin && ischar(varargin{1})\n gui_State.gui_Callback = str2func(varargin{1});\n end\n\n if nargout\n [varargout{1:nargout}] = gui_mainfcn(gui_State, varargin{:});\n else\n gui_mainfcn(gui_State, varargin{:});\n end\n\n % End initialization code - DO NOT EDIT\nend\n\n% --- Executes just before PhaseDistortDemoGUI is made visible.\nfunction PhaseDistortDemoGUI_OpeningFcn(hObject, eventdata, handles, varargin)\n % This function has no output args, see OutputFcn.\n % hObject handle to figure\n % eventdata reserved - to be defined in a future version of MATLAB\n % handles structure with handles and user data (see GUIDATA)\n % varargin command line arguments to PhaseDistortDemoGUI (see VARARGIN)\n\n % Choose default command line output for PhaseDistortDemoGUI\n handles.output = hObject;\n\n experimentParams = struct;\n experimentParams.generatePhaseFieldFromPinkNoise = false;\n experimentParams.useSoftCircularAperture = false;\n experimentParams.imageResizingFactor = 1.5;\n experimentParams.questTrials = 60;\n \n handles.experimentParams = experimentParams;\n \n set(handles.WindowImagesCheckbox, 'Value', handles.experimentParams.useSoftCircularAperture);\n if (experimentParams.generatePhaseFieldFromPinkNoise)\n set(handles.Noise1fMethodButton, 'Value', 1);\n set(handles.DavidMethodButton, 'Value', 0);\n else\n set(handles.Noise1fMethodButton, 'Value', 0);\n set(handles.DavidMethodButton, 'Value', 1);\n end\n \n set(handles.ResizingFactor, 'String', sprintf('%2.2f', handles.experimentParams.imageResizingFactor));\n set(handles.QuestTrials, 'String', sprintf('%2.0f', handles.experimentParams.questTrials));\n \n % Update handles structure\n guidata(hObject, handles);\n \n % UIWAIT makes PhaseDistortDemoGUI wait for user response (see UIRESUME)\n % uiwait(handles.figure1);\nend\n\n% --- Outputs from this function are returned to the command line.\nfunction varargout = PhaseDistortDemoGUI_OutputFcn(hObject, eventdata, handles) \n varargout{1} = handles.output;\nend\n\n% --- Executes on button press in StartExperiment.\nfunction StartExperiment_Callback(hObject, eventdata, handles)\n \n handles.experimentParams.imageResizingFactor = str2double(get(handles.ResizingFactor, 'String'));\n handles.experimentParams.questTrials = str2double(get(handles.QuestTrials, 'String'));\n handles.experimentParams.useSoftCircularAperture = get(handles.WindowImagesCheckbox, 'Value');\n handles.experimentParams\n \n PhaseDistortDemo(handles.experimentParams.generatePhaseFieldFromPinkNoise, ...\n handles.experimentParams.useSoftCircularAperture, ...\n handles.experimentParams.imageResizingFactor, ...\n handles.experimentParams.questTrials, ...\n handles.ResultsAxes); \nend\n\nfunction WindowImagesCheckbox_Callback(hObject, eventdata, handles)\n handles.experimentParams.useSoftCircularAperture = get(hObject,'Value');\nend\n\n\nfunction ResizingFactor_Callback(hObject, eventdata, handles)\n handles.experimentParams.imageResizingFactor = str2double(get(hObject,'String'));\nend\n\nfunction QuestTrials_Callback(hObject, eventdata, handles)\n handles.experimentParams.questTrials = str2double(get(hObject,'String'));\nend\n\nfunction ResizingFactor_CreateFcn(hObject, eventdata, handles)\n if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor'))\n set(hObject,'BackgroundColor','white');\n end\nend\n\nfunction edit2_Callback(hObject, eventdata, handles)\n\nend\n\nfunction edit2_CreateFcn(hObject, eventdata, handles)\n if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor'))\n set(hObject,'BackgroundColor','white');\n end\nend\n\nfunction RandomPhaseGenerationSelector_SelectionChangeFcn(hObject, eventdata, handles)\n\n switch get(eventdata.NewValue,'Tag') % Get Tag of selected object.\n case 'DavidMethodButton'\n handles.experimentParams.generatePhaseFieldFromPinkNoise = false;\n case 'Noise1fMethodButton'\n handles.experimentParams.generatePhaseFieldFromPinkNoise = true;\n otherwise\n % Code for when there is no match.\n end\n \nend\n\nfunction figure1_CreateFcn(hObject, eventdata, handles)\nend\n\nfunction QuestTrials_CreateFcn(hObject, eventdata, handles)\n if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor'))\n set(hObject,'BackgroundColor','white');\n end\nend\n\nfunction edit4_Callback(hObject, eventdata, handles)\nend\n\nfunction edit4_CreateFcn(hObject, eventdata, handles)\n if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor'))\n set(hObject,'BackgroundColor','white');\n end\nend\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "backpropTutorial.m", "ext": ".m", "path": "TeachingCode-master/MatlabTutorials/backpropTutorial.m", "size": 5769, "source_encoding": "utf_8", "md5": "6899dc4f89555b0feb61209b349d76d9", "text": "function backpropTutorial\n% backpropTutorial.m\n%\n% Illustrate backprop, by trying to use it to fit a function with a two\n% layer network. The initial idea was to set this up to reproduce some\n% of the fits shown in Figure 4.12 of Bishop, using the backpropagation\n% algorithm described later in the chapter. \n%\n% The line example works perfectly. For the parabola, dualramp, and step\n% functions, the network does approximately the right thing, but the quality of fit\n% is markedly worse than shown in Figure 4.12. The sinusoid is not fit \n% at all well.\n%\n% The example in Bishop uses a different algorithm to set the weights, and\n% this may be the problem. Or we may still have a bug in the backprop\n% implementation here.\n%\n% For some of the test functions, the training can get stuck in a local\n% minima. Whether it does this or not is pretty sensitive to the initial\n% weights, learning rate, etc. For the most part, the current parameters\n% seem to work pretty well for the functions being fit (except for the\n% sine).\n%\n% 8/27/08 dhb Wrote it.\n% 8/27/08 dhb and others Squashed several bugs during reading group meeting.\n% 8/27/08 dhb Store previous weights from output layer for hidder layer update.\n% dhb Get tanh derivative correct.\n% dhb Variable learning rate.\n% dhb More functions to try to fit.\n\n%% Clear and close\nclear; close all;\n\n%% Define network dimension and initialize weights. The number of weights\n% includes the additive term.\nnInputUnits = 2;\nnHiddenUnits = 5;\n inputWeights = rand(nHiddenUnits-1,nInputUnits);\n outputWeights = rand(1,nHiddenUnits);\n\n%% Define function to try to fit. Change the string\n% to one of the options in the case statement to try\n% different functions.\ninputFunction = 'dualramp';\nnTrainingPoints = 1000;\nx = 2*rand(nInputUnits-1,nTrainingPoints)-1;\nswitch(inputFunction)\n case 'parabola'\n t = x.^2;\n case 'line'\n t = x;\n case 'step'\n t = sign(x);\n case 'dualramp'\n t = abs(x);\n case 'sine'\n t = sin(2*pi*x);\nend\n \n%% Plot of target function\nfunPlot = figure; clf; hold on\nset(gca,'FontName','Helvetica','FontSize',14);\nplot(x,t,'ro','MarkerSize',2,'MarkerFaceColor','r');\n\n%% Add plot of the initial network response\ny0 = ComputeNetwork(x,inputWeights,outputWeights);\nplot(x,y0,'go','MarkerSize',2,'MarkerFaceColor','g');\ndrawnow;\n\n%% Set up learning and error tracking parameters\nn0 = 0.2;\ndecayExponent = 0.01;\nerrIndex = 1;\nerr(errIndex) = sum((t-y0).^2);\nerrPlot = figure; clf;\nset(gca,'FontName','Helvetica','FontSize',14);\nplotEvery = 100;\n\n%% Train the network, using the backprop algorithm\nnTrainingIterations = 5000;\nfor i = 1:nTrainingIterations\n % Print and plot of incremental error\n if (rem(i,100) == 0)\n yNow = ComputeNetwork(x,inputWeights,outputWeights);\n errIndex = errIndex+1;\n err(errIndex) = sum((t-yNow).^2);\n figure(errPlot); hold on\n plot(plotEvery*((1:errIndex)-1),err(1:errIndex),'k');\n end\n \n % Choose a training value from training set\n randomObservationIndices = randperm(nTrainingPoints);\n randomObservationIndex = randomObservationIndices(1);\n xTrain = x(:,randomObservationIndex);\n xTrainOnes = [ones(1,size(xTrain,2)) ; xTrain];\n tTrain = t(randomObservationIndex);\n \n % Compute network values for this training exemplar\n [yCurrent,yCurrentLinear,hiddenCurrent,hiddenCurrentLinear] = ComputeNetwork(xTrain,inputWeights,outputWeights);\n \n % Update learning rate\n n = n0/(i^decayExponent);\n \n % Update output weights\n deltaOut = (yCurrent-tTrain);\n outputWeights0 = outputWeights;\n for j = 1:nHiddenUnits\n outputWeights(1,j) = outputWeights(1,j) - n*deltaOut*hiddenCurrent(j);\n end\n \n % Backprop to input weights\n for j = 2:nHiddenUnits\n deltaHidden = nonlinderiv(hiddenCurrentLinear(j-1))*deltaOut*outputWeights0(1,j);\n for k = 1:nInputUnits\n inputWeights(j-1,k) = inputWeights(j-1,k) - n*deltaHidden*xTrainOnes(k); \n end\n end\nend\n\n% Labels for error plot\nfigure(errPlot);\nxlabel('Iteration','FontName','Helvetica','FontSize',18);\nylabel('Summed Squared Error','FontName','Helvetica','FontSize',18);\n\n%% Add plot final network response, in black\nfigure(funPlot);\ny = ComputeNetwork(x,inputWeights,outputWeights);\nplot(x,y,'ko','MarkerSize',2,'MarkerFaceColor','k');\nxlim([-1 1.5]);\nxlabel('X','FontName','Helvetica','FontSize',18);\nylabel('Y','FontName','Helvetica','FontSize',18);\nlegend('target','initial','final');\n\n%% Done\nend\n\n\n%% Forward network computation. Linear output layer and non-linearity on\n% output of hidden units.\nfunction [response,responseLinear,hiddenResponse,hiddenResponseLinear] = ComputeNetwork(x,inputWeights,outputWeights)\n % Compute response of hidden units\n x = [ones(1,size(x,2)) ; x];\n hiddenResponseLinear = inputWeights*x;\n hiddenResponse = nonlin(hiddenResponseLinear);\n \n % Compute output response\n hiddenResponse = [ones(1,size(hiddenResponse,2)) ; hiddenResponse];\n responseLinear = outputWeights*hiddenResponse;\n response = responseLinear; \nend\n\n%% Nonlinear function. Can change this and the corresponding derivative\n% function if you want to use another non-linearity (e.g. logistic).\nfunction y = nonlin(x)\n y = tanh(x);\nend\n\n%% Nonlinear function derivative.\nfunction y = nonlinderiv(x)\n y = tanhderiv(x);\nend\n\n%% Derivative of hyperbolic tangent.\nfunction y = tanhderiv(x)\n y = 1-tanh(x).^2;\nend\n\n% Logistic function\nfunction y = logit(x)\n y = 1./(1+exp(-x));\nend\n\n% Logistic derivative\nfunction y = logitderiv(x)\n y = logit(x).*(1-logit(x));\nend\n\n\n\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "crossvalTutorial.m", "ext": ".m", "path": "TeachingCode-master/MatlabTutorials/crossvalTutorial.m", "size": 2991, "source_encoding": "utf_8", "md5": "3fcbfceda64bb678c4eb5f1e92ce3200", "text": "function crossvalTutorial\n% crossvalTutorial\n%\n% Quick little tutorial to show how to cross-validate some data.\n%\n% 12/16/16 dhb, ar Wrote the skeleton.\n\n%% Clear\nclear; close all;\n\n%% Parameters\nnIndependentValues = 10;\nnReplications = 100;\nnoiseSd = 10;\nnFolds = 8;\nc1 = 5;\nc2 = -3;\n\n%% Let's generate a dataset of random numbers that are described by a quadratic\nxVals = repmat(linspace(0,10,nIndependentValues),nReplications,1);\nyObserved = zeros(size(xVals));\nfor jj = 1:nReplications\n xMatTemp = [xVals(jj,:) ; xVals(jj,:).^ 2];\n yTemp = [c1 c2]*xMatTemp;\n yObserved(jj,:) = yTemp + normrnd(0,noiseSd,1,size(yObserved,2));\nend\n\n%% Plot the simulated data\nfigure; clf; hold on\nfor jj = 1:size(yObserved,2) \n plot(xVals(jj,:),yObserved(jj,:),'ro','MarkerSize',8,'MarkerFaceColor','r');\nend\n\n%% Do a cross-validated fit, using the crossval function\n%\n% We'll do both linear and quadratic fits\n%\n% Linear\nlinearCrossValErr = crossval(@linearFit,xVals,yObserved,'KFold',nFolds);\nmeanLinearCrossValErr = mean(linearCrossValErr);\n\n% Quadratic\nquadraticCrossValErr = crossval(@quadraticFit,xVals,yObserved,'KFold',nFolds);\nmeanQuadraticCrossValErr = mean(quadraticCrossValErr);\n\n% Report who won\nif (quadraticCrossValErr < linearCrossValErr)\n fprintf('Crossval method: Correctly identified that it was quadratic\\n');\nelse\n fprintf('Crossval method: Incorrectly think it is linear\\n');\nend\n\n%% Now we'll do the same thing using the cvpartition class.\n%\n% This is a bit less slick for this simple example, but much\n% more flexible when the fit functions need to be more complicated.\nc = cvpartition(nReplications,'Kfold',nFolds);\nfor kk = 1:nFolds\n % Get indices for kkth fold\n trainingIndex = c.training(kk);\n testIndex = c.test(kk);\n check = trainingIndex + testIndex;\n if (any(check ~= 1))\n error('We do not understand cvparitiion''s kFold indexing scheme');\n end\n \n % Get linear and quadratic error for this fold\n linearCrossValErr(kk) = linearFit(xVals(trainingIndex,:),yObserved(trainingIndex,:),xVals(testIndex,:),yObserved(testIndex,:));\n quadraticCrossValErr(kk) = quadraticFit(xVals(trainingIndex,:),yObserved(trainingIndex,:),xVals(testIndex,:),yObserved(testIndex,:));\nend\n\n% Get mean error for two types of it\nmeanQuadraticCrossValErr = mean(quadraticCrossValErr);\nmeanLinearCrossValErr = mean(linearCrossValErr);\n\n% Report who won\nif (quadraticCrossValErr < linearCrossValErr)\n fprintf('CVParitition method: Correctly identified that it was quadratic\\n');\nelse\n fprintf('CVParitition method: Incorrectly think it is linear\\n');\nend\n\nend\n\nfunction testVal = linearFit(xTrain,yTrain,xTest,yTest)\n c = xTrain(:)\\yTrain(:);\n yPred = xTest(:)*c;\n yDiff = yPred(:)-yTest(:);\n testVal = sum(yDiff.^2);\nend\n\nfunction testVal = quadraticFit(xTrain,yTrain,xTest,yTest)\n c = [xTrain(:) xTrain(:).^2]\\yTrain(:);\n yPred = [xTest(:) xTest(:).^2]*c;\n yDiff = yPred(:)-yTest(:);\n testVal = sum(yDiff.^2);\nend"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "fourierFitTutorial.m", "ext": ".m", "path": "TeachingCode-master/MatlabTutorials/fourierFitTutorial.m", "size": 6908, "source_encoding": "utf_8", "md5": "a67ecd1ea0d9e92848a1435151551bbe", "text": "function fourierFitTutorial\n% fourierFitTutorial\n%\n% Demonstrate how to fit fourier functions to data, using optimization\n% toolbox. Both unconstrained and constrained. Shows fmincon in action.\n%\n% 4/21/09 dhb Started on it.\n% 7/15/09 dhb Check optim version and handle inconsistences in options.\n\n%% Clear\nclear; close all;\n\n%% Generate a test data set with three elements in the data set. Make shape similar but not identical.\nnoisesd = 0.2;\nfitorder = 2;\ntestHues = 1:40;\nxset{1} = (testHues-1)/length(testHues+1);\nxset{2} = xset{1};\nxset{3} = xset{1};\ncoeffstrue{1} = [2 1 1 0.4 0.25 0.1 0.2];\nyset{1} = ComputeFourierModel(coeffstrue{1},xset{1}) + normrnd(0,noisesd,size(xset{1}));\ncoeffstrue{2} = [1 0.3 1.2 0.5 0.25 -0.1 0.0];\nyset{2} = ComputeFourierModel(coeffstrue{2},xset{2}) + normrnd(0,noisesd,size(xset{1}));\ncoeffstrue{3} = [1.5 0.5 0.9 0.3 0.35 0 0.2];\nyset{3} = ComputeFourierModel(coeffstrue{3},xset{3}) + normrnd(0,noisesd,size(xset{1}));\n\n%% Fit the dataset, unconstrained\n[coeffsunset,ypredunset,errorunset] = FitUnconstrainedModel(xset,yset,fitorder);\n\n%% Fit the dataset, constrained\n[coeffsconset,ypredconset,errorconset] = FitConstrainedModel(xset,yset,fitorder);\n\n%% Report what happened\nfigure; clf;\nsubplot(3,1,1); hold on\nplot(xset{1},yset{1},'ro','MarkerFaceColor','r','MarkerSize',6);\nplot(xset{1},ypredunset{1},'r');\nplot(xset{1},ypredconset{1},'b');\nylim([0 5]);\nsubplot(3,1,2); hold on\nplot(xset{2},yset{2},'ro','MarkerFaceColor','r','MarkerSize',6);\nplot(xset{2},ypredunset{2},'r');\nplot(xset{2},ypredconset{2},'b');\nylim([0 5]);\nsubplot(3,1,3); hold on\nplot(xset{3},yset{3},'ro','MarkerFaceColor','r','MarkerSize',6);\nplot(xset{3},ypredunset{3},'r');\nplot(xset{3},ypredconset{3},'b');\nylim([0 5]);\n\nend\n\nfunction [coeffsset,ypredset,errorset] = FitUnconstrainedModel(xset,yset,order)\n% [coeffset,ypred] = FitUnconstrainedModel(x,y,order)\n%\n% Fit the fourier model of given order separately to each data set in the\n% passed cell arrays xset and yset. Return cell arrays giving fit coefficients,\n% predictions, and errors.\n%\n% 4/21/09 dhb Wrote it.\n\n% Optimization options\noptions = optimset('fmincon');\nif (verLessThan('optim','4.1'))\n options = optimset(options,'Diagnostics','off','Display','off','LargeScale','off');\nelse\n options = optimset(options,'Diagnostics','off','Display','off','LargeScale','off','Algorithm','active-set');\nend\n\n% Define length of coefficients from order\ncoeffs00 = zeros(1,3+2*(order-1));\n\n% Do each set separately\nnSets = length(xset);\nfor i = 1:nSets\n x = xset{i};\n y = yset{i};\n\n % Initialize guess and set bounds, based loosely on data.\n coeffs0 = coeffs00;\n coeffs0(1) = mean(y);\n coeffs0(2) = mean(y);\n lb = [min(y) -10*max(abs(y))*ones(1,length(coeffs0(2:end)))];\n ub = [max(y) 10*max(abs(y))*ones(1,length(coeffs0(2:end)))];\n \n % Do the fit.\n coeffsset{i} = fmincon(@FitUnconstrainedFun,coeffs0,[],[],[],[],...\n lb,ub,[],options,x,y);\n\n % Get final prediction and error for return\n ypredset{i} = ComputeFourierModel(coeffsset{i},x);\n errorset{i} = EvaluateModelFit(y,ypredset{i});\nend\n\nend\n\nfunction f = FitUnconstrainedFun(coeffs,x,y)\n% f = FitUnconstrainedFun(coeffs,x,y)\n%\n% Error function for unconstrained model fit.\n%\n% 4/21/09 dhb Wrote it.\n\nypred = ComputeFourierModel(coeffs,x);\nf = EvaluateModelFit(y,ypred);\nend\n\nfunction [coeffsset,ypredset,errorset] = FitConstrainedModel(xset,yset,order,guesscoeffs)\n% [coeffset,ypred] = FitConstrainedModel(x,y,order,guesscoeffs)\n%\n% Fit the fourier model of given order separately to each data set in the\n% passed cell arrays xset and yset. Return cell arrays giving fit coefficients,\n% predictions, and errors. The fit is constrained so that each element of the\n% dataset has the same modulation shape, but the modulation mean and depth can\n% vary.\n%\n% 4/21/09 dhb Wrote it.\n\n% Optimization options\noptions = optimset('fmincon');\nif (verLessThan('optim','4.1'))\n options = optimset(options,'Diagnostics','off','Display','off','LargeScale','off');\nelse\n options = optimset(options,'Diagnostics','off','Display','off','LargeScale','off','Algorithm','active-set');\nend\n\n% Grab number of sets\nnSets = length(xset);\n\n% Initialize guess and set bounds, based loosely on data.\nconcoeffs0 = [zeros(1,2*nSets) zeros(1,1+2*(order-1))];\nminy = Inf;\nmaxy = -Inf;\nindex = 1;\nfor i = 1:nSets\n concoeffs0(2*(i-1)+1) = mean(yset{i});\n concoeffs0(2*(i-1)+2) = mean(yset{i});\n index = index+2;\n if (min(yset{i}) < miny)\n miny = min(yset{i});\n end\n if (max(yset{i}) > maxy)\n maxy = max(yset{i});\n end\nend\nlb = [-10*max(abs([miny maxy]))*ones(1,length(concoeffs0))];\nub = [10*max(abs([miny maxy]))*ones(1,length(concoeffs0))];\nfor i = 1:nSets\n lb(2*(i-1)+1) = miny;\n ub(2*(i-1)+1) = maxy;\nend\n\n% Do the numerical fit\nconcoeffs = fmincon(@FitConstrainedFun,concoeffs0,[],[],[],[],...\n lb,ub,[],options,xset,yset);\n\n% Get final prediction and error for return\ncoeffsset = UnpackConCoeffs(concoeffs,nSets);\nfor i = 1:nSets\n ypredset{i} = ComputeFourierModel(coeffsset{i},xset{i});\n errorset{i} = EvaluateModelFit(yset{i},ypredset{i});\nend\nend\n\nfunction f = FitConstrainedFun(concoeffs,xset,yset)\n% f = FitUnconstrainedFun(coeffs,xset,yset)\n%\n% Error function for constrained model fit.\n%\n% 4/21/09 dhb Wrote it.\n\n% Unpack constrained coefficients\nnSets = length(xset);\ncoeffsset = UnpackConCoeffs(concoeffs,nSets);\n\n% Get error for each set and sum\nf = 0;\nfor i = 1:nSets\n ypred = ComputeFourierModel(coeffsset{i},xset{i});\n f = f + EvaluateModelFit(yset{i},ypred);\nend\nend\n\nfunction coeffsset = UnpackConCoeffs(concoeffs,nSets)\n% coeffsset = UnpackConCoeffs(concoeffs,nSets)\n%\n% Unpack array of constrained coefficients into cell array\n% in form to evaluate each component separately.\n%\n% 4/21/09 dhb Wrote it.\n\nindex = 1;\nfor i = 1:nSets\n coeffsset{i}(1) = concoeffs(index);\n index = index+1;\n coeffsset{i}(2) = concoeffs(index);\n index = index+1;\nend\nfor i = 1:nSets\n coeffsset{i}(3:3+length(concoeffs(index:end))-1) = concoeffs(index:end);\nend\n \nend\n\nfunction ypred = ComputeFourierModel(coeffs,x)\n% ypred = ComputeFourierModel(coeffs,x)\n%\n% ypred = coeffs(1) + coeffs(2)*(sin(2*pi*x) + coeffs(3)*cos(2*pi*x) + coeffs(4)*sin(2*pi*2*x) + coeffs(5)*cos(2*pi*2*x) + ...\n%\n% The order of the equation is determined from the length of coeffs.\n% The input x is assumed to be in the range [0-1].\n%\n% 4/21/09 dhb Wrote it.\n\n% Modulation\na = coeffs(1);\nb = coeffs(2);\n\nmodulation = sin(2*pi*x) + coeffs(3)*cos(2*pi*x);\nfor i = 1:length(coeffs(4:end))/2\n modulation = modulation + coeffs(2*(i-1)+4)*sin(2*pi*(i+1)*x) + coeffs(2*(i-1)+5)*cos(2*pi*(i+1)*x);\nend\nypred = a + b*modulation;\n\nend\n\nfunction f = EvaluateModelFit(y,ypred)\n% f = EvaluateModelFit(y,ypred)\n%\n% 4/21/09 dhb Wrote it.\n\nresid = y-ypred;\nf = sqrt(sum(resid.^2)/length(resid));\n\nend\n\n\n\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "crossContextMLDScalingTutorial.m", "ext": ".m", "path": "TeachingCode-master/MatlabTutorials/crossContextMLDScalingTutorial.m", "size": 5968, "source_encoding": "utf_8", "md5": "6678160b25bded3b8fd03551f9351930", "text": "function CrossContextMLDSScalingTutorial\n% CrossContextMLDSScalingTutorial\n%\n% Suppose we have cross-context data of the form, see stimulus\n% X, seen in context 1, and choose which of two alternatives, Y1 and Y2,\n% seen in context 2, that is most like X. \n% \n% We want to take a bunch of data of this form, where Y1 and Y2 vary\n% trial-to-trial, and find the value of Y that is the best match to X. \n%\n% We assume that the Y's live in an N dimensional perceptual space.\n% This seems like an MDS setup, with triad data. As discussed\n% in Maloney and Yang (2003), triads are a special case of \n% the MLDS stimuli, with one stimulus repeated twice. \n%\n% The other difference here is the change of context, but I don't think\n% that is fundamental.\n%\n% If we're willing to take the scales as one-dimensional and assume that\n% the scale in one context maps into the other in some parametric\n% way, we could find the parametric transformation.\n%\n% This code simulates out the basic MLDS analysis for a case like the above,\n% to make sure nothing goes too wonky.\n%\n% NOTE: We how have a toolbox, BrainardLabToolbox/MLDSColorSelection, that\n% implements much of what is here. This should be modified to call through\n% that, and the two should cross-reference each other.\n%\n% 1/10/12 dhb Wrote it.\n\n%% Clear and close\nclear; close all;\n\n%% Parameters\nsigma = 0.10;\nnY = 10;\nnSimulatePerPair = 100;\n\n%% Generate a list of X and Y stimuli\nx = 0.55;\nyOfX = MapXToY(x);\nlinY = logspace(log10(0.5),log10(0.6),nY);\ny = MapXToY(linY);\n\n%% Simulate out probabilities for pairwise trials\nthePairs = nchoosek(1:nY,2);\nnPairs = size(thePairs,1);\ntheSimResponse1 = zeros(nPairs,1);\ntheTheoryResponse1 = zeros(nPairs,1);\nfor i = 1:nPairs\n n1 = 0;\n for j = 1:nSimulatePerPair\n if (SimulateResponse1(x,y(thePairs(i,1)),y(thePairs(i,2)),sigma,@MapXToY))\n n1 = n1 + 1;\n end\n end\n theSimResponse1(i) = n1;\n theTheoryProb1(i) = ComputeProb1(x,y(thePairs(i,1)),y(thePairs(i,2)),sigma,@MapXToY);\nend\ntheSimProb1 = theSimResponse1/nSimulatePerPair;\n\n%% Make sure that simulated data matches the theoretical model. Nothing\n% else is going to work if this isn't done correctly\nfigure; clf; hold on\nplot(theTheoryProb1,theSimProb1,'ro','MarkerSize',4,'MarkerFaceColor','r');\nxlabel('Theory'); ylabel('Simulation');\naxis('square'); axis([0 1 0 1]);\nplot([0 1],[0 1],'k');\n\n%% Find the maximum likelihood solution for x and the y's. We\n% fix the origin at y(1) and the scale via sigma, which are thus\n% assumed known.\n\n% Compute log likelihood of actual simulated data as a baseline\nlogLikelyTrue = ComputeLogLikelihood(thePairs,theSimResponse1,nSimulatePerPair,MapXToY(x),y,sigma); \n\n% Search to find the best solution\noptions = optimset('fmincon');\noptions = optimset(options,'Diagnostics','off','Display','iter','LargeScale','off','Algorithm','active-set');\ny1 = y(1);\nx0 = [x linY(2:end)];\nvlb = -10*max(x0)*ones(size(x0));\nvub = 10*max(x0)*ones(size(x0));\nfitX = fmincon(@(x)FitContextFCScalingFun(x,y1,thePairs,theSimResponse1,nSimulatePerPair,sigma),x0,[],[],[],[],vlb,vub,[],options);\nxFit = fitX(1);\nyFit = [y1 fitX(2:end)];\nlogLikelyFit = ComputeLogLikelihood(thePairs,theSimResponse1,nSimulatePerPair,xFit,yFit,sigma); \nfprintf('Log likelihood true: %g, fit: %g\\n',logLikelyTrue,logLikelyFit);\n\n%% Plot the recovered configuration\nfigure; clf; hold on\nplot(y,yFit,'ro','MarkerSize',4,'MarkerFaceColor','r');\nplot(MapXToY(x),xFit,'bo','MarkerSize',4,'MarkerFaceColor','b');\nxlabel('Simulated'); ylabel('Fit');\naxis('square');\nminVal = min([y yFit]);\nmaxVal = max([y yFit]);\naxis([minVal maxVal minVal maxVal]);\nplot([minVal maxVal],[minVal maxVal],'k');\n\nend\n\n%% f = FitContextFCScalingFun(x,y1,thePairs,theResponse1,nTrials,sigma)\n% \n% Error function for the numerical search.\nfunction f = FitContextFCScalingFun(x,y1,thePairs,theResponse1,nTrials,sigma)\n\nif (any(isnan(x)))\n error('Entry of x is Nan');\nend\nxFit = x(1);\nyFit = [y1 x(2:end)];\nlogLikely = ComputeLogLikelihood(thePairs,theResponse1,nTrials,xFit,yFit,sigma);\nf = -logLikely;\n\nend\n\n%% logLikely = ComputeLogLikelihood(thePairs,theResponse1,nTrials,xFit,yFit,sigma)\n%\n% Compute likelihood of data for any configuration\nfunction logLikely = ComputeLogLikelihood(thePairs,theResponse1,nTrials,xFit,yFit,sigma)\n\nnPairs = size(thePairs,1);\nlogLikely = 0;\nfor i = 1:nPairs\n theTheoryProb1 = ComputeProb1(xFit,yFit(thePairs(i,1)),yFit(thePairs(i,2)),sigma,@IdentityMap);\n if (isnan(theTheoryProb1))\n error('Returned probability is NaN');\n end\n if (isinf(theTheoryProb1))\n error('Returend probability is Inf');\n end\n logLikely = logLikely + theResponse1(i)*log10(theTheoryProb1) + (nTrials-theResponse1(i))*log10(1-theTheoryProb1);\nend\nif (isnan(logLikely))\n error('Returned likelihood is NaN');\nend\n\nend\n\n%% p1 = ComputeProb1(x,y1,y2,sigma,mapFunction)\n%\n% Compute probability of responding 1 given target and pair.\n% The passed mapFunction simulates the effect of context change \n% between x domain and y domain\nfunction p1 = ComputeProb1(x,y1,y2,sigma,mapFunction)\n\nyOfX = mapFunction(x);\ndiff1 = y1-yOfX;\ndiff2 = y2-yOfX;\ndiffDiff = abs(diff1)-abs(diff2);\np1 = normcdf(-diffDiff,0,sigma);\nif (p1 == 0)\n p1 = 0.0001;\nelseif (p1 == 1)\n p1 = 0.9999;\nend\n\nend\n\n%% response1 = SimulateResponse1(x,y1,y2,sigma,mapFunction)\n%\n% Simulate a trial given target and pair.\n% The passed mapFunction simulates the effect of context change \n% between x domain and y domain\nfunction response1 = SimulateResponse1(x,y1,y2,sigma,mapFunction)\n\nyOfX = mapFunction(x);\ndiff1 = y1-yOfX;\ndiff2 = y2-yOfX;\nif (abs(diff1)-abs(diff2) + normrnd(0,sigma) <= 0)\n response1 = 1;\nelse\n response1 = 0;\nend\n\nend\n\n%% yOfX = MapXToY(x)\n%\n% Example map function\nfunction yOfX = MapXToY(x)\n\nyOfX = x.^0.8 - 0.1;\n\nend\n\n%% yOfX = IdentityMap(x)\n%\n% Identity map function. When simulating fit\n% we use this.\nfunction yOfX = IdentityMap(x)\n \nyOfX = x;\n \nend"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "glmTutorial.m", "ext": ".m", "path": "TeachingCode-master/MatlabTutorials/glmTutorial.m", "size": 4724, "source_encoding": "utf_8", "md5": "e130c3872af22a7d5482aa8d8594dce6", "text": "function glmTutorial\n%\n% Demonstrate how to use Matlab's Statistics Toolbox glm routines\n% to fit data.\n%\n% This is right basic idea, but needs a little fixing up still.\n%\n% Need to:\n% a) Add better comments.\n% b) Show how to wrap a parameter search around the parameters of\n% the linking function.\n% c) Worry about the regime where the linking function is such that\n% the glm routines return NaN because of ill-conditioning.\n%\n% 9/21/13 dhb Wrote it.\n\n%% Clear\nclear; close all;\n\n%% Parameters.\n%\n% We construct a linear function of some\n% random numbers, bTrue gives the weights.\nbTrue = [2 3 5]';\nxDim = length(bTrue);\nnObservations = 100;\nnoiseSd = 0.01;\n\n%% Link function and its parameters.\n%\n% We assume that the observed data are a Naka-Rushton function\n% of the linear values. The way the glm stuff works,\n% this means that the linking function is the inverse of the\n% Naka-Rushton function.\nglobal linkParams\nlinkParams.type = 'AffinePower';\nswitch (linkParams.type)\n case 'InverseNakaRushton'\n linkParams.params(1) = 10;\n linkParams.params(2) = 3;\n linkParams.params(3) = 2;\n linkS.Link = @ForwardLink;\n linkS.Derivative = @DerivativeLink;\n linkS.Inverse = @InverseLink;\n case 'AffinePower'\n linkParams.params(1) = 1;\n linkParams.params(2) = 0.5;\n linkS.Link = @ForwardLink;\n linkS.Derivative = @DerivativeLink;\n linkS.Inverse = @InverseLink;\n case 'Power'\n linkParams.params(1) = 2;\n linkS.Link = @ForwardLink;\n linkS.Derivative = @DerivativeLink;\n linkS.Inverse = @InverseLink;\n otherwise\n error('Unknown link function type');\nend\n\n%% X variables\nX = rand(nObservations,xDim);\n\n%% Linear y is a linear function of X\nyLinear = X*bTrue;\nyNonLinear = InverseLink(yLinear);\nyObserved = yNonLinear + noiseSd*randn(size(yNonLinear));\n\n%% Figure\n[~,index] = sort(yLinear);\ntheFig = figure; clf;\nhold on\nplot(yLinear(index),yObserved(index),'ro','MarkerSize',8,'MarkerFaceColor','r');\nplot(yLinear(index),yNonLinear(index),'r');\n\n%% GLM fit\nwarnState = warning('off','stats:glmfit:IterationLimit');\nGLM = GeneralizedLinearModel.fit(X,yObserved,'Distribution','normal','Link',linkS,'Intercept',false);\nwarning(warnState);\nbFit = GLM.Coefficients.Estimate\nyLinearPred = X*bFit;\nyNonLinearPred = InverseLink(yLinearPred);\nfigure(theFig);\nplot(yLinear(index),yNonLinearPred(index),'b');\nxlabel('True linear value');\nylabel('Obs/Predicted nonlinear value');\n\n%% This is just a check that the PTB NakaRushton function properly\n% inverts itself.\nswitch (linkParams.type)\n case 'NakaRushton'\n linearInvertCheck = ForwardLink(yNonLinearPred);\n if (any(abs(yLinearPred-linearInvertCheck) > 1e-7))\n error('Naka-Rushton inversion error');\n end\n \n % A little more testing\n figure; clf;\n derivCheck = DerivativeLink(yNonLinearPred);\n subplot(2,1,1); hold on\n plot(yNonLinearPred(index),linearInvertCheck(index),'r');\n subplot(2,1,2); hold on\n plot(yNonLinearPred(index),derivCheck(index),'r');\nend\n\n\nend\n\nfunction out = ForwardLink(in)\n\nglobal linkParams\n\nswitch (linkParams.type)\n case 'InverseNakaRushton'\n in(in < 0) = 0;\n in(in > linkParams.params(1)) = linkParams.params(1);\n out = InvertNakaRushton(linkParams.params,in);\n case 'AffinePower'\n in(in < 0) = 0;\n out = linkParams.params(1) + in.^linkParams.params(2);\n case 'Power'\n in(in < 0) = 0;\n out = in.^linkParams.params(1);\n otherwise\n error('Unknown link function type');\nend\n\nend\n\nfunction out = DerivativeLink(in)\n\nglobal linkParams\n\nswitch (linkParams.type)\n case 'InverseNakaRushton'\n in(in < 0) = 0;\n in(in > linkParams.params(1)) = linkParams.params(1);\n out = DerivativeInvertNakaRushton(linkParams.params,in);\n case 'AffinePower'\n in(in < 0) = 0;\n out = linkParams.params(2)*in.^(linkParams.params(2)-1);\n case 'Power'\n in(in < 0) = 0;\n out = linkParams.params(2)*in.^(linkParams.params(2)-1);\n otherwise\n error('Unknown link function type');\nend\n\nend\n\nfunction out = InverseLink(in)\n\nglobal linkParams\n\nswitch (linkParams.type)\n case 'InverseNakaRushton'\n % Force input into required range\n in(in < 0) = 0;\n out = ComputeNakaRushton(linkParams.params,in);\n case 'AffinePower'\n in(in < 0) = 0;\n tmp = in - linkParams.params(1);\n tmp(tmp < 0) = 0;\n out = tmp.^(1/linkParams.params(2));\n case 'Power'\n in(in < 0) = 0;\n out = in.^(1/linkParams.params(2));\n otherwise\n error('Unknown link function type');\nend\n\nend\n\n\n\n\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "TestPredictNRAffineMatchesAnaIndividual.m", "ext": ".m", "path": "TeachingCode-master/MatlabTutorials/lightessModelsTutorial/TestPredictNRAffineMatchesAnaIndividual.m", "size": 29899, "source_encoding": "utf_8", "md5": "d4db10f878711848957271786b92778a", "text": "function TestPredictNRAffineMatches\n% TestPredictNRAffineMatches\n%\n% Work out what the little model does for various choices of input\n%\n% 12/4/10 dhb Wrote it.\n% 4/20/11 dhb Lot's of little changes. Switch polarity of data plots\n\n%% Clear\nclear; close all;\n\n% Define relevant directories. \ncurrentDir = pwd; \ndataDir = '/Users/Shared/Matlab/Experiments/HDRExperiments/HDRAna'; \nwhichDataSet = 3; \n%% Choose model parameters and generate predictions, plot.\n% Let's one explore what the model can do.\n%DO_PRELIM_STUFF = 0;\n% if (DO_PRELIM_STUFF)\n% yRef = logspace(-2,4);\n% \n% %% Set up parameters\n% params0.rmaxRef = 1.0;\n% params0.gainRef = 10e-4;\n% params0.offsetRef = 0;\n% params0.expRef = 2.5;\n% params0.rmax = 1.01;\n% params0.gain = 0.5*10e-4;\n% params0.offset = 1;\n% params0.exp = 1;\n% \n% %% Plot effect of mucking with exponent\n% figure; clf; hold on\n% exponents = [1.5 2 2.5];\n% for i = 1:length(exponents)\n% params0.exp = exponents(i);\n% yMatch{i} = PredictNRAffineMatches(yRef,params0);\n% \n% % Plot\n% plot(log10(yMatch{i}),log10(yRef),'k','LineWidth',3);\n% xlim([-3 5]); ylim([-3 5]);\n% xlabel('Log10 Target Lum');\n% ylabel('Log10 Standard Lum/Refl');\n% end\n% end\npaletteGlossy = [-2.0458e+00 -1.8447e+00 -1.6840e+00 -1.4881e+00 -1.3251e+00 -1.1838e+00 -1.0424e+00 -9.2046e-01 -8.1417e-01 -7.1175e-01 ...\n -6.2160e-01 -5.3180e-01 -4.5087e-01 -3.7192e-01 -2.9654e-01 -2.2746e-01 -1.6488e-01 -1.0768e-01 -4.3064e-02];\n\npaletteMatte = [ NaN NaN NaN -1.4660 -1.3279 -1.1379 -1.0155 -0.8726 -0.7791 -0.6807 -0.6038 ...\n -0.5046 -0.4332 -0.3622 -0.3050 -0.2280 -0.1671 -0.0941 -0.0472];\npaletteGlossyTruncated = [NaN NaN NaN paletteGlossy(:,4:end)];\n\n%% Fit some matching data\nSARAH_TEST_DATA = 0;\nif (SARAH_TEST_DATA)\n \nelse\n switch whichDataSet\n case 1\n conditionList = {'FullGlossyfull','FullGlossytruncated', 'FullMattetruncated', 'WhiteGlossyfull','WhiteGlossytruncated', 'WhiteMattetruncated'};\n subjectList = {'bam', 'cly', 'flv', 'lta' ,'ncd', 'rpd', 'stg', 'tfm'};\n keepSomeData = [\n [ -1.3802 -1.2294 -0.9707 -0.7100 -0.3612 0.0249 0.3078 0.5715 0.7699 0.9778 1.0846 1.3089 1.4555 1.5644 1.7196 1.9207 2.0673 2.2239 2.3914];\n \n [ NaN NaN NaN -0.9522 -0.6050 -0.2339 0.1047 0.4402 0.6977 0.8384 1.0023 1.2422 1.4114 1.5759 1.7508 1.9397 2.0528 2.2656 2.4112];\n \n [ NaN NaN NaN -1.0312 -0.7059 -0.2358 0.1020 0.4629 0.6309 0.8030 1.0495 1.1958 1.3473 1.5350 1.6646 1.8909 2.0447 2.2378 2.3792];\n \n [ NaN -0.0746 0.0318 0.2519 0.6330 0.9303 1.1223 1.3450 1.5182 1.7405 1.8143 2.0090 1.9072 2.2261 NaN 2.4317 NaN NaN NaN];\n \n [ NaN NaN NaN -0.0043 0.4266 0.6850 0.9769 1.2355 1.4810 1.6573 1.7507 1.9130 2.0836 2.1939 NaN 2.4042 NaN NaN NaN];\n \n [ NaN NaN NaN 0.0356 0.3936 0.7326 0.9986 1.2143 1.4117 1.5808 1.7714 1.9066 2.0828 2.1358 2.3293 2.3833 NaN NaN NaN];]; \n case 2\n conditionList = {'FullGlossyfull2','FullMattetruncated2', 'Full30Glossyfull2', 'Full30Mattetruncated2', 'White30Glossyfull','White30Mattetruncated', 'Gray30Glossyfull', 'Gray30Mattetruncated'};\n subjectList = {'bam', 'cly', 'ncd', 'stg', 'tfm'};\n \n keepSomeData = [\n [ -1.0849 -0.9622 -0.8648 -0.6281 -0.3869 -0.1036 0.2128 0.4567 0.7647 0.9488 1.1614 ...\n 1.3723 1.5101 1.6622 1.7990 1.9214 2.0359 2.2311 2.3199];\n \n [ NaN NaN NaN -0.8392 -0.5873 -0.1825 0.1171 0.4655 0.7031 0.9464 1.1034 ...\n 1.3530 1.4774 1.6270 1.8231 1.9047 2.1262 2.2451 2.2713]\n \n [ NaN NaN NaN NaN NaN 1.2849 1.2088 1.2595 1.3693 1.4580 1.5826 ...\n 1.7126 1.8139 1.9713 2.1010 2.1720 2.3164 2.4515 2.5170];\n \n [ NaN NaN NaN NaN NaN NaN 1.1963 1.2787 1.3236 1.4529 1.5473 ...\n 1.6579 1.8419 1.8985 2.0625 2.1901 2.3280 2.4015 2.5215];\n \n [ NaN NaN NaN NaN NaN 1.2475 1.2973 1.3907 1.6027 1.7238 1.8637...\n 1.9849 2.0992 2.2393 2.3454 2.4286 2.5200 2.5425 2.6196];\n \n [NaN NaN NaN NaN NaN 1.2689 1.2883 1.4364 1.5560 1.7329 1.8336...\n 1.9731 2.1075 2.2431 2.3248 2.4025 2.5065 2.5691 2.6030];\n \n [ NaN NaN NaN -0.0572 -0.0016 0.0921 0.2724 0.4301 0.5819 0.7071 0.7965 ...\n 0.9089 1.0300 1.0749 1.1734 1.2381 1.2656 NaN NaN];\n \n [NaN NaN NaN NaN -0.0920 0.0354 0.2398 0.4029 0.5708 0.6956 0.7910...\n 0.9222 1.0426 1.0873 1.1855 1.2379 NaN NaN NaN];\n ]\n case 3\n subjectList={'ajh', 'arp', 'kjn', 'orp' ,'rvn'};\n \n conditionList = {'FullGlossyfull3', 'FullGlossyfull4','Full30Glossyfull3','Full30Glossyfull4'...\n , 'Full1000Glossyfull3', 'FullGray30Glossyfull', 'FullGray1000Glossyfull', ...\n 'FullMeanPlusGlossyfull', 'Full30MeanPlusGlossyfull', 'FullGray30MeanPlusGlossyfull', 'FullMeanPlusGlossyfull2', 'FullMeanMinusGlossyfull2'};\n keepSomeData = [[-1.2469 -1.0194 -0.6968 -0.3888 -0.1960 0.1387 0.3627 0.6095 0.8034 0.8159 ...\n 0.9833 1.1952 1.3942 1.5388 1.5710 1.7905 2.0848 2.0719 2.3168]; % full3\n [ -1.4012 -0.9285 -0.7862 -0.4952 -0.2476 0.0172 0.2259 0.4565 0.5586 0.7049 ...\n 0.8431 1.0677 1.1933 1.3972 1.6246 1.7266 1.8868 2.1460 2.2618]; % full4\n \n [NaN NaN NaN NaN 1.1345 1.1445 1.2867 1.3138 1.3704 1.5017 ...\n 1.5732 1.6708 1.7791 1.8904 1.9778 2.0832 2.2022 2.3184 2.4071]; % full30 3\n \n [NaN NaN NaN NaN NaN NaN 1.1932 1.2166 1.2841 1.4061 ...\n 1.4842 1.6065 1.6801 1.8158 1.9317 2.0486 2.1714 2.2893 2.4259]; % full30 4\n \n [ -0.3140 -0.2027 -0.0686 0.0819 0.2873 0.4310 0.5986 0.7905 0.8960 1.0847 ...\n 1.2355 1.3290 1.4651 1.6356 1.7116 1.8833 1.9983 2.1780 2.3949]; % full 1000\n \n [ NaN NaN NaN NaN 0.4098 0.4786 0.6039 0.7330 0.8416 0.8923 ...\n 0.9797 1.1226 1.1993 1.3123 1.4279 1.5174 1.6544 1.6851 NaN]; % full Gray30\n \n [ -1.0961 -0.8952 -0.7221 -0.4952 -0.3652 -0.1803 -0.0603 0.0522 0.3139 0.3222 ...\n 0.4816 0.6810 0.8161 0.9925 1.1563 1.3792 1.5010 1.6713 1.7328]; % full gray 1000;\n \n [-1.2028 -0.9204 -0.6084 -0.2414 -0.0021 0.0723 0.2916 0.5297 0.6825 0.8876 ...\n 0.9969 1.2277 1.2544 1.4292 1.6247 1.8370 2.0001 2.1447 2.2880]; % full mean plus;\n \n [NaN NaN NaN NaN NaN NaN 1.1726 1.2939 1.3940 1.5356 ...\n 1.5940 1.7435 1.8141 1.9606 2.0642 2.1749 2.3042 2.3794 2.4674]; % full30 mean plus;\n \n [NaN NaN NaN NaN 0.4270 0.4158 0.5322 0.6765 0.7749 0.8527 ...\n 0.9992 1.1176 1.2819 1.3642 1.4917 1.6065 1.6876 NaN NaN]; % fullgray30\n \n [-7.5486e-01 -6.3016e-01 -3.7002e-01 -7.5043e-02 2.5521e-01 4.1869e-01 6.5650e-01 8.2140e-01 9.3936e-01 ...\n 1.1518e+00 1.3266e+00 1.3894e+00 1.4861e+00 1.7282e+00 1.8061e+00 1.9940e+00 2.1053e+00 2.2826e+00 2.3641e+00]; % fullmeanplus2\n \n [-8.8795e-01 -7.5641e-01 -5.6947e-01 -4.3999e-01 -2.9319e-01 -1.1604e-01 5.8502e-02 2.5986e-01 3.7464e-01 ...\n 5.2778e-01 6.5286e-01 8.2851e-01 9.8959e-01 1.1379e+00 1.4417e+00 1.6340e+00 1.7811e+00 2.0558e+00 2.1961e+00]; % fullmeanminus\n ];\n end\n \n \n for i = 1:length(conditionList)\n for j=1:length(subjectList)\n luminanceMatchesPerChip{i,j} = load(fullfile(dataDir,'data',conditionList{i},subjectList{j},[conditionList{i} '-' subjectList{j} '-TestAverages.txt']));\n luminanceMatchesPerChip{i,j} = luminanceMatchesPerChip{i,j}(:, 2:end);\n % averageLumMatchesPerSubject{j}(:,i) = nanmean(luminanceMatchesPerChip{i,j},2); \n averageLumMatchesPerSubject{i}(:,j) = nanmean(luminanceMatchesPerChip{i,j},2);\n end\n end\n \n averageLumMatchesPerSubjectAll{i} = averageLumMatchesPerSubject{i};\n %% See how many matches per subject are made. \n % If less than 3 subjects assigned anything to this chip, drop them. \n for g = 1: length(averageLumMatchesPerSubject)\n nSubjectMatches{g} = sum(isnan(averageLumMatchesPerSubject{g}),2); \n end\n \n for g = 1: length(nSubjectMatches)\n for r = 1:length(nSubjectMatches{g})\n if (nSubjectMatches{g}(r) > 2)\n averageLumMatchesPerSubject{g}(r,:) = nan(1, size(averageLumMatchesPerSubject{g},2));\n end\n end\n end\n cd(currentDir); \n xDataLim = [-2 3];\n yDataLim = [-3 0];\n \n fid = fopen(['ANA_TEST_DATA/ParamDump_All' num2str(whichDataSet) '_ALL.txt'],'w');\n figPrefix = 'ANA_TEST_DATA/';\n RESPONSE_REMAP = 0;\nend\n\n\n\n% Optional search to find reference parameters that put the responses rougly equally spaced on the y-axis. This isn't theoretically critical, but\n% seems as good an idea as any.\nFITREF = 0;\nif (FITREF)\n if (verLessThan('optim','4.1'))\n error('Your version of the optimization toolbox is too old. Update it.');\n end\n options = optimset('fmincon');\n options = optimset(options,'Diagnostics','off','Display','iter','LargeScale','off','Algorithm','active-set');\n options = optimset(options,'MaxFunEvals',1200);\n targetRespRef = linspace(critValue,1-critValue,length(someDataRef));\n conTolRef = (targetRespRef(2)-targetRespRef(1));\n x0 = ParamsToListRef(params0);\n vlb = [x0(1)/100 x0(2) -100*mean(someDataRef) 1];\n vub = [x0(1)*100 x0(2) 100*mean(someDataRef) 4];\n x1 = fmincon(@InlineMinFunctionRef,x0,[],[],[],[],vlb,vub,@InlineConFunctionRef,options);\n params0 = ListToParamsRef(x1,params0);\nend\n\n%% Now do the fitting wrt to the reference paramters\n% rangeFig = figure;\n dataFig = figure;\n %position = get(gcf,'Position');\n %position(3) = 1000; position(4) = 400;\n %set(gcf,'Position',position);\n \n\nallData = cell(1,length(conditionList)); \nfor i = 1:length(conditionList)\n temptemp = [];\n for j = 1:length(subjectList)\n temptemp = [temptemp; [averageLumMatchesPerSubject{i}(:,j)']];\n end\n %allDataMean(i,:) = nanmean(temptemp, 1);\n acrossSubjectLumAverages{i} = NaN(size(paletteGlossy',1),1);\n for g = 1:size(paletteGlossy',1)\n okindex = ~isnan(averageLumMatchesPerSubject{i}(g,:)');\n tt=mean(averageLumMatchesPerSubject{i}(g,okindex))';\n acrossSubjectLumAverages{i}(g,1)=tt;\n end\n temptemp = [temptemp; acrossSubjectLumAverages{i}(:)'] ; \n allData{i} = temptemp; \n clear temptemp\nend\n\n%% for debugging purposes.\nfor i = 1:length(conditionList)\ncheck = keepSomeData(i,:) - acrossSubjectLumAverages{i}(:)'; \nend\n%%\nfor i = 1: length(conditionList)\n figure(dataFig); clf;\n someData = allData{i};\n for whichData = 1:(size(someData,1))\n \n switch(conditionList{i})\n case {'FullGlossyfull','WhiteGlossyfull', 'FullGlossyfull2','Full30Glossyfull2', 'White30Glossyfull','Gray30Glossyfull',...\n 'FullGlossyfull3', 'Full30Glossyfull3', 'Full1000Glossyfull3', 'FullGray30Glossyfull'...\n 'FullGlossyfull4', 'Full30Glossyfull4', 'FullGray1000Glossyfull', 'FullGray30MeanPlusGlossyfull'...\n 'FullMeanPlusGlossyfull', 'Full30MeanPlusGlossyfull', 'FullMeanPlusGlossyfull2', 'FullMeanMinusGlossyfull2'}\n someDataRef = 10.^[paletteGlossy(1,:)];\n case {'FullGlossytruncated','WhiteGlossytruncated'}\n someDataRef = 10.^paletteMatte(1,:);\n case {'FullMattetruncated','WhiteMattetruncated', 'FullMattetruncated2','Full30Mattetruncated2','White30Mattetruncated','Gray30Mattetruncated'}\n someDataRef = 10.^paletteMatte(1,:);\n end\n %% Initialize parameters. Set reference rmax to 1 and exponent to 2. Find\n % gain and offset that map the luminances across the central\n % portion\n % of the response range.\n useDataRef = someDataRef;\n clear params0\n params0.rmaxRef = 1.0;\n params0.expRef = 3;\n critValue = 0.01;\n minResp = InvertNakaRushton([params0.rmaxRef 1 params0.expRef],critValue);\n maxResp = InvertNakaRushton([params0.rmaxRef 1 params0.expRef],1-critValue);\n minRef = min(someDataRef);\n maxRef = max(someDataRef);\n params0.gainRef = (maxResp-minResp)/(maxRef-minRef);\n params0.offsetRef = minRef-minResp/params0.gainRef;\n paramsRefNoFit = params0;\n lumVals = logspace(log10(someDataRef(1)),log10(someDataRef(end)),1000);\n lumVals = logspace(-3,0,1000);\n ySub = params0.gainRef*(lumVals-params0.offsetRef);\n ySub(ySub <= 0) = 0+eps;\n respRefForRemap = ComputeNakaRushton([params0.rmaxRef 1 params0.expRef],ySub);\n respRefRemapped = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respRefForRemap)/params0.gainRef+params0.offsetRef);\n %remapFig = figure; clf; hold on\n %plot(respRefForRemap,respRefRemapped,'r','LineWidth',2);\n %xlim([0 1]);\n %ylim(yDataLim);\n %xlabel('Visual Response');\n %ylabel('Predicted Reflectance Match');\n% cd(figPrefix);\n% \n% \n% if SARAH_TEST_DATA;\n% \n% else\n% savefig(['ResponseRemapping' num2str(whichDataSet) '.pdf'],remapFig,'pdf');\n% save(['ResponseRemappingData' num2str(whichDataSet)],'respRefForRemap','respRefRemapped');\n% \n% end\n% cd ..\n% \n \n \n someDataMatch = 10.^[someData(whichData,:)];\n \n \n \n okIndex = find(~isnan(someDataMatch));\n useDataMatch = someDataMatch(okIndex);\n useDataRef = someDataRef(okIndex);\n \n if whichDataSet == 1\n subplot(3,3,whichData,'FontSize', 4); hold on\n else\n subplot(3,2,whichData,'FontSize', 4); hold on\n end\n plot(log10(useDataMatch),log10(useDataRef),'bo','MarkerFaceColor','b','MarkerSize',4);\n \n xlabel('Log10 Target Lum', 'FontSize',6);\n ylabel('Log10 Standard Lum/Refl', 'FontSize',6);\n \n setAverage = size(someData,1); \n if whichData == setAverage\n title(['Average' conditionList{i}], 'FontSize',6);\n else\n title([subjectList{whichData} ' ' conditionList{i}], 'FontSize',6);\n end\n % Parameter search options\n if (verLessThan('optim','4.1'))\n error('Your version of the optimization toolbox is too old. Update it.');\n end\n options = optimset('fmincon');\n options = optimset(options,'Diagnostics','off','Display','off','LargeScale','off','Algorithm','active-set');\n \n % Initialize match parameters in same way\n endPointWeight = 0;\n params0.rmax = params0.rmaxRef;\n params0.exp = params0.expRef;\n params0.gain = params0.gainRef;\n params0.offset = params0.offsetRef;\n someDataPred0 = PredictNRAffineMatches(someDataRef,params0);\n %plot(log10(someDataRef),log10(someDataPred0),'y','LineWidth',1);\n %params0\n if whichData == setAverage\n fprintf(fid,['Dataset ' conditionList{i} ' Average ' '\\n']);\n else\n fprintf(fid,['Dataset ' conditionList{i} ' ' subjectList{whichData} '\\n']);\n end\n fprintf(fid,'\\tReference params: gain = %0.2g, offset = %0.2g, rmax = %0.5g, exp = %0.2g\\n',params0.gainRef,params0.offsetRef,params0.rmaxRef,params0.expRef);\n \n % Fit, first just gain\n x0 = ParamsToList(params0);\n vlb = [x0(1) x0(2)/100 x0(3:end)];\n vub = [x0(1) x0(2)*100 x0(3:end)];\n x1 = fmincon(@InlineMinFunction,x0,[],[],[],[],vlb,vub,[],options);\n params0 = ListToParams(x1,params0);\n someDataPred1 = PredictNRAffineMatches(someDataRef,params0);\n %plot(log10(someDataPred1),log10(someDataRef),'b','LineWidth',1);\n %params0\n g = params0.gainRef/params0.gain;\n l0 = -params0.offsetRef + params0.offset/g;\n fprintf(fid,'\\tGain only model: gain = %0.2g, offset = %0.2g, log10 gain change = %0.2g, log10 effective offset = %0.2g, rmax = %0.5g, exp = %0.2g\\n',...\n params0.gain,params0.offset,log10(g),log10(l0),params0.rmax,params0.exp);\n \n % Fit, gain and offset\n vlb = [x1(1) x1(2)/100 x1(3) -100*abs(x1(4)) x1(5)];\n vub = [x1(1) x1(2)*100 x1(3) 100*abs(x1(4)) x1(5)];\n x2 = fmincon(@InlineMinFunction,x1,[],[],[],[],vlb,vub,[],options);\n params0 = ListToParams(x2,params0);\n someDataPred2 = PredictNRAffineMatches(someDataRef,params0);\n g = params0.gainRef/params0.gain;\n l0 = -params0.offsetRef + params0.offset/g;\n fprintf(fid,'\\tGain/Offset model: gain = %0.2g, offset = %0.2g, log10 gain change = %0.2g, log10 effective offset = %0.2g, rmax = %0.5g, exp = %0.2g\\n',...\n params0.gain,params0.offset,log10(g),log10(l0),params0.rmax,params0.exp);\n paramsGainOffset = params0;\n %params0\n \n % Exp\n FITEXP = 1;\n if (FITEXP)\n vlb = [x2(1) x2(2)/100 x2(3) -100*abs(x2(4)) 0.5];\n vub = [x2(1) x2(2)*100 x2(3) 100*abs(x2(4)) 4];\n endPointWeight = 10;\n x3 = fmincon(@InlineMinFunction,x2,[],[],[],[],vlb,vub,[],options);\n endPointWeight = 0;\n x3 = fmincon(@InlineMinFunction,x3,[],[],[],[],vlb,vub,[],options);\n params0 = ListToParams(x3,params0);\n someDataPred3 = PredictNRAffineMatches(someDataRef,params0);\n else\n x3 = x2;\n someDataPred3 = PredictNRAffineMatches(someDataRef,params0);\n end\n fprintf(fid,'\\tGain/Offset/Exp model: gain = %0.2g, offset = %0.2g, log10 gain change = %0.2g, log10 effective offset = %0.2g, rmax = %0.5g, exp = %0.2g\\n',...\n params0.gain,params0.offset,log10(g),log10(l0),params0.rmax,params0.exp);\n \n % Let rMax vary too. This doesn't add much if exponent varies.. Tp the fits, so I\n % uncluttered plots by removing. Have not looked at whether varying\n % rMax can be substituted for varying the exponent.\n FITMAX = 0;\n if (FITMAX)\n vlb = [x3(1) x3(2)/100 0.5 -100*abs(x3(4)) x3(5)];\n vub = [x3(1) x3(2)*100 2 100*abs(x3(4)) x3(5)];\n x = fmincon(@InlineMinFunction,x3,[],[],[],[],vlb,vub,[],options);\n params0 = ListToParams(x,params0);\n someDataPred = PredictNRAffineMatches(someDataRef,params0);\n plot(log10(someDataPred3),log10(someDataRef),'k','LineWidth',1.5);\n %params0\n else\n x = x3;\n someDataPred = PredictNRAffineMatches(someDataRef,params0);\n end\n \n % Dump of interesting parameters\n g = params0.gainRef/params0.gain;\n l0 = -params0.offsetRef + params0.offset/g;\n fprintf(fid,'\\tPredicted (actual) black point %0.2g (%0.2g); white point %0.2g (%0.2g)\\n',someDataPred(1),someDataMatch(1),someDataPred(end),someDataMatch(end));\n fprintf(fid,'\\tOne-in predicted black point %0.2g (%0.2g); white point %0.2g (%0.2g)\\n',someDataPred(2),someDataMatch(2),someDataPred(end-1),someDataMatch(end-1));\n \n % Plot stuff of interest\n plot(log10(someDataPred),log10(someDataRef),'r','LineWidth',2);\n plot(log10(someDataPred2),log10(someDataRef),'g','LineWidth',1);\n xlim(xDataLim); ylim(yDataLim);\n \n % Add plot of response functions for ref and match\n % Subtract the old offset, and truncate below 0 to zero.\n % We allow an optional remapping of the response back to the\n % luminance/reflectance space of the reference matches. This\n % mapping is static across contexts. This turns out not to\n % terribly interesting.\n lumVals = logspace(-2,3,1000);\n ySub = params0.gainRef*(lumVals-params0.offsetRef);\n ySub(ySub <= 0) = 0+eps;\n respRefSmooth = ComputeNakaRushton([params0.rmaxRef 1 params0.expRef],ySub);\n if (RESPONSE_REMAP)\n respRefSmooth = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respRefSmooth)/params0.gainRef+params0.offsetRef);\n end\n \n ySub = params0.gainRef*(someDataRef-params0.offsetRef);\n ySub(ySub <= 0) = 0+eps;\n respRef = ComputeNakaRushton([params0.rmaxRef 1 params0.expRef],ySub);\n if (RESPONSE_REMAP)\n respRef = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respRef)/params0.gainRef+params0.offsetRef);\n end\n \n ySub = params0.gain*(lumVals-params0.offset);\n ySub(ySub <= 0) = 0+eps;\n respMatchSmooth = ComputeNakaRushton([params0.rmax 1 params0.exp],ySub);\n if (RESPONSE_REMAP)\n respMatchSmooth = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respMatchSmooth)/params0.gainRef+params0.offsetRef);\n end\n \n ySub = params0.gain*(someDataMatch-params0.offset);\n ySub(ySub <= 0) = 0+eps;\n respMatch = ComputeNakaRushton([params0.rmax 1 params0.exp],ySub);\n if (RESPONSE_REMAP)\n respMatch = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respMatch)/params0.gainRef+params0.offsetRef);\n end\n \n ySub = paramsGainOffset.gain*(lumVals-paramsGainOffset.offset);\n ySub(ySub <= 0) = 0+eps;\n respGainOffsetSmooth = ComputeNakaRushton([paramsGainOffset.rmax 1 paramsGainOffset.exp],ySub);\n if (RESPONSE_REMAP)\n respGainOffsetSmooth = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respGainOffsetSmooth)/params0.gainRef+params0.offsetRef);\n end\n \n ySub = paramsRefNoFit.gainRef*(lumVals-paramsRefNoFit.offsetRef);\n ySub(ySub <= 0) = 0+eps;\n respRefNoFitSmooth = ComputeNakaRushton([paramsRefNoFit.rmaxRef 1 paramsRefNoFit.expRef],ySub);\n% if (RESPONSE_REMAP)\n% respRefNoFitSmooth = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respRefNoFitSmooth)/params0.gainRef+params0.offsetRef);\n% end\n% \n% % subplot(1,2,2); hold on\n% % plot(log10(someDataRef),respRef,'ko','MarkerFaceColor','k','MarkerSize',6);\n% % plot(log10(lumVals),respRefSmooth,'k:','LineWidth',2);\n% % %plot(log10(lumVals),respRefNoFitSmooth,'b','LineWidth',1);\n% %\n% % plot(log10(someDataMatch),respMatch,'bo','MarkerFaceColor','b','MarkerSize',8);\n% % plot(log10(lumVals),respMatchSmooth,'r','LineWidth',2);\n% % plot(log10(lumVals),respGainOffsetSmooth,'g','LineWidth',1);\n% %\n% xlim(xDataLim);\n% if (RESPONSE_REMAP)\n% ylim(yDataLim);\n% ylabel('Remapped Response');\n% else\n% ylim([0 1.2]);\n% ylabel('Response');\n% end\n% xlabel('Log10 luminance');\n% \n% % Save figure\n cd(figPrefix);\n savefig(['TestFit_' num2str(whichDataSet) ' ' conditionList{i} 'ALL.pdf'],dataFig,'pdf');\n cd('..');\n \n fprintf(fid,'\\n');\n \n % %% Fill output summary structure\n % if (SARAH_TEST_DATA)\n %\n % else\n % summaryStructs(whichData).whitePoint = someDataPred(end);\n % summaryStructs(whichData).blackPoint = someDataPred(1);\n % summaryStructs(whichData).range = someDataPred(end) - someDataPred(1);\n % summaryStructs(whichData).exp = params0.exp;\n % predictExpFromWB(whichData,1) = summaryStructs(whichData).whitePoint;\n % predictExpFromWB(whichData,2) = log10(summaryStructs(whichData).range);\n % expVals(whichData,1) = summaryStructs(whichData).exp;\n % %% Range versus exp figure\n % figure(rangeFig)\n % subplot(1,2,1); hold on\n % plot(summaryStructs(whichData).range,summaryStructs(whichData).exp,'ro','MarkerFaceColor','r','MarkerSize',8);\n % xlabel('Range'); ylabel('Exponent');\n % xlim([0 300]); ylim([0 4]);\n % subplot(1,2,2); hold on\n % plot(summaryStructs(whichData).whitePoint,summaryStructs(whichData).exp,'ro','MarkerFaceColor','r','MarkerSize',8);\n % xlabel('White Point'); ylabel('Exponent');\n % xlim([0 300]); ylim([0 4]);\n % end\n \n end\nend\nfclose(fid);\n\n%% Try to predict exponents\n% expRegCoefs = predictExpFromWB\\expVals;\n% predictedExpVals = predictExpFromWB*expRegCoefs;\n% expPredFig = figure; clf; hold on\n% plot(expVals,predictedExpVals,'ro','MarkerSize',8,'MarkerFaceColor','r');\n% plot([0 4],[0 4],'k');\n% xlim([0 4]); ylim([0 4]);\n% xlabel('Exponent'); ylabel('Predicted Exponent');\n% axis('square');\n\n%% Write out summary structs\n% cd(figPrefix);\n% if (SARAH_TEST_DATA)\n% \n% else\n% WriteStructsToText(['SummaryDataAll', num2str(whichDataSet), '.txt'],summaryStructs);\n% end\n% cd('..');\n\n%% Save plot of exponent versus range\n% cd(figPrefix);\n% if (SARAH_TEST_DATA)\n% \n% else\n% savefig(['ExpVersusRange', num2str(whichDataSet),'.pdf'],rangeFig,'pdf');\n% end\n% cd('..');\n\n%% INLINE FUNCTION TO BE USED FOR CTF MINIMIZATION.\n% Inline functions have the feature that any variable they use that is\n% not defined in the function has its value inherited\n% from the workspace of wherever they were invoked.\n%\n% Variables set here are also in the base workspace, and can change the values of\n% variables with the same name there. This can produce all sorts of problems,\n% so be careful.\n function f = InlineMinFunction(x)\n paramsInline = ListToParams(x,params0);\n yPred = PredictNRAffineMatches(useDataRef,paramsInline);\n yPred(yPred <= 0) = 0 + eps;\n yDiff = log10(useDataMatch)-log10(yPred);\n f = sum(yDiff.^2) + endPointWeight*yDiff(1).^2 + endPointWeight*yDiff(end).^2;\n end\n\n%% INLINE FUNCTION TO BE USED FOR REF MINIMIZATION.\n% Inline functions have the feature that any variable they use that is\n% not defined in the function has its value inherited\n% from the workspace of wherever they were invoked.\n%\n% Variables set here are also in the base workspace, and can change the values of\n% variables with the same name there. This can produce all sorts of problems,\n% so be careful.\n function f = InlineMinFunctionRef(x)\n paramsInline = ListToParamsRef(x,params0);\n \n % Subtract the old offset, and truncate below 0 to zero\n ySub = paramsInline.gainRef*(useDataRef-paramsInline.offsetRef);\n ySub(ySub <= 0) = 0+eps;\n respRef = ComputeNakaRushton([paramsInline.rmaxRef 1 paramsInline.expRef],ySub);\n yDiff = targetRespRef-respRef;\n f = sum(abs(yDiff));\n %f = sum(yDiff.^2);\n end\n function [g,geq] = InlineConFunctionRef(x)\n paramsInline = ListToParamsRef(x,params0);\n \n % Subtract the old offset, and truncate below 0 to zero\n ySub = paramsInline.gainRef*(useDataRef-paramsInline.offsetRef);\n ySub(ySub <= 0) = 0+eps;\n respRef = ComputeNakaRushton([paramsInline.rmaxRef 1 paramsInline.expRef],ySub);\n yDiff = targetRespRef-respRef;\n g = max(abs(yDiff))-conTolRef;\n geq = 0;\n end\nend\n\n%% Param translation\nfunction params = ListToParams(x,params0)\n\nparams = params0;\nparams.gainRef = x(1);\nparams.gain = x(2);\nparams.rmax = x(3);\nparams.offset = x(4);\nparams.exp = x(5);\n\nend\n\nfunction x = ParamsToList(params)\n\nx = [params.gainRef params.gain params.rmax params.offset params.exp];\n\nend\n\nfunction params = ListToParamsRef(x,params0)\n\nparams = params0;\nparams.gainRef = x(1);\nparams.rmaxRef = x(2);\nparams.offsetRef = x(3);\nparams.expRef = x(4);\n\nend\n\nfunction x = ParamsToListRef(params)\n\nx = [params.gainRef params.rmaxRef params.offsetRef params.expRef];\n\nend\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "TestPredictNRAffineMatchesAna.m", "ext": ".m", "path": "TeachingCode-master/MatlabTutorials/lightessModelsTutorial/TestPredictNRAffineMatchesAna.m", "size": 29372, "source_encoding": "utf_8", "md5": "b34c4e02b1fa332bece2bf4d9fc26526", "text": "function TestNRAPredictMatches\n% TestNRAPredictMatches\n%\n% Work out what the little model does for various choices of input\n%\n% 12/4/10 dhb Wrote it.\n% 4/20/11 dhb Lot's of little changes. Switch polarity of data plots\n\n%% Clear\nclear; close all;\n\n% Define relevant directories. \ncurrentDir = pwd; \ndataDir = '/Users/Shared/Matlab/Experiments/HDRExperiments/HDRAna'; \nwhichDataSet = 2; \n%% Choose model parameters and generate predictions, plot.\n% Let's one explore what the model can do.\n%DO_PRELIM_STUFF = 0;\n% if (DO_PRELIM_STUFF)\n% yRef = logspace(-2,4);\n% \n% %% Set up parameters\n% params0.rmaxRef = 1.0;\n% params0.gainRef = 10e-4;\n% params0.offsetRef = 0;\n% params0.expRef = 2.5;\n% params0.rmax = 1.01;\n% params0.gain = 0.5*10e-4;\n% params0.offset = 1;\n% params0.exp = 1;\n% \n% %% Plot effect of mucking with exponent\n% figure; clf; hold on\n% exponents = [1.5 2 2.5];\n% for i = 1:length(exponents)\n% params0.exp = exponents(i);\n% yMatch{i} = NRAPredictMatches(yRef,params0);\n% \n% % Plot\n% plot(log10(yMatch{i}),log10(yRef),'k','LineWidth',3);\n% xlim([-3 5]); ylim([-3 5]);\n% xlabel('Log10 Target Lum');\n% ylabel('Log10 Standard Lum/Refl');\n% end\n% end\npaletteGlossy = [-2.0458e+00 -1.8447e+00 -1.6840e+00 -1.4881e+00 -1.3251e+00 -1.1838e+00 -1.0424e+00 -9.2046e-01 -8.1417e-01 -7.1175e-01 ...\n -6.2160e-01 -5.3180e-01 -4.5087e-01 -3.7192e-01 -2.9654e-01 -2.2746e-01 -1.6488e-01 -1.0768e-01 -4.3064e-02];\n\npaletteMatte = [ NaN NaN NaN -1.4660 -1.3279 -1.1379 -1.0155 -0.8726 -0.7791 -0.6807 -0.6038 ...\n -0.5046 -0.4332 -0.3622 -0.3050 -0.2280 -0.1671 -0.0941 -0.0472];\npaletteGlossyTruncated = [NaN NaN NaN paletteGlossy(:,4:end)];\n\n%% Fit some matching data\nSARAH_TEST_DATA = 0;\nif (SARAH_TEST_DATA)\n \nelse\n switch whichDataSet\n case 1\n conditionList = {'FullGlossyfull','FullGlossytruncated', 'FullMattetruncated', 'WhiteGlossyfull','WhiteGlossytruncated', 'WhiteMattetruncated'};\n subjectList = {'bam', 'cly', 'flv', 'lta' ,'ncd', 'rpd', 'stg', 'tfm'};\n keepSomeData = [\n [ -1.3802 -1.2294 -0.9707 -0.7100 -0.3612 0.0249 0.3078 0.5715 0.7699 0.9778 1.0846 1.3089 1.4555 1.5644 1.7196 1.9207 2.0673 2.2239 2.3914];\n \n [ NaN NaN NaN -0.9522 -0.6050 -0.2339 0.1047 0.4402 0.6977 0.8384 1.0023 1.2422 1.4114 1.5759 1.7508 1.9397 2.0528 2.2656 2.4112];\n \n [ NaN NaN NaN -1.0312 -0.7059 -0.2358 0.1020 0.4629 0.6309 0.8030 1.0495 1.1958 1.3473 1.5350 1.6646 1.8909 2.0447 2.2378 2.3792];\n \n [ NaN -0.0746 0.0318 0.2519 0.6330 0.9303 1.1223 1.3450 1.5182 1.7405 1.8143 2.0090 1.9072 2.2261 NaN 2.4317 NaN NaN NaN];\n \n [ NaN NaN NaN -0.0043 0.4266 0.6850 0.9769 1.2355 1.4810 1.6573 1.7507 1.9130 2.0836 2.1939 NaN 2.4042 NaN NaN NaN];\n \n [ NaN NaN NaN 0.0356 0.3936 0.7326 0.9986 1.2143 1.4117 1.5808 1.7714 1.9066 2.0828 2.1358 2.3293 2.3833 NaN NaN NaN];]; \n case 2\n conditionList = {'FullGlossyfull2','FullMattetruncated2', 'Full30Glossyfull2', 'Full30Mattetruncated2', 'White30Glossyfull','White30Mattetruncated', 'Gray30Glossyfull', 'Gray30Mattetruncated'};\n subjectList = {'bam', 'cly', 'ncd', 'stg', 'tfm'};\n \n keepSomeData = [\n [ -1.0849 -0.9622 -0.8648 -0.6281 -0.3869 -0.1036 0.2128 0.4567 0.7647 0.9488 1.1614 ...\n 1.3723 1.5101 1.6622 1.7990 1.9214 2.0359 2.2311 2.3199];\n \n [ NaN NaN NaN -0.8392 -0.5873 -0.1825 0.1171 0.4655 0.7031 0.9464 1.1034 ...\n 1.3530 1.4774 1.6270 1.8231 1.9047 2.1262 2.2451 2.2713]\n \n [ NaN NaN NaN NaN NaN 1.2849 1.2088 1.2595 1.3693 1.4580 1.5826 ...\n 1.7126 1.8139 1.9713 2.1010 2.1720 2.3164 2.4515 2.5170];\n \n [ NaN NaN NaN NaN NaN NaN 1.1963 1.2787 1.3236 1.4529 1.5473 ...\n 1.6579 1.8419 1.8985 2.0625 2.1901 2.3280 2.4015 2.5215];\n \n [ NaN NaN NaN NaN NaN 1.2475 1.2973 1.3907 1.6027 1.7238 1.8637...\n 1.9849 2.0992 2.2393 2.3454 2.4286 2.5200 2.5425 2.6196];\n \n [NaN NaN NaN NaN NaN 1.2689 1.2883 1.4364 1.5560 1.7329 1.8336...\n 1.9731 2.1075 2.2431 2.3248 2.4025 2.5065 2.5691 2.6030];\n \n [ NaN NaN NaN -0.0572 -0.0016 0.0921 0.2724 0.4301 0.5819 0.7071 0.7965 ...\n 0.9089 1.0300 1.0749 1.1734 1.2381 1.2656 NaN NaN];\n \n [NaN NaN NaN NaN -0.0920 0.0354 0.2398 0.4029 0.5708 0.6956 0.7910...\n 0.9222 1.0426 1.0873 1.1855 1.2379 NaN NaN NaN];]; \n case 3\n subjectList={'ajh', 'arp', 'kjn', 'orp' ,'rvn'};\n \n conditionList = {'FullGlossyfull3', 'FullGlossyfull4','Full30Glossyfull3','Full30Glossyfull4'...\n , 'Full1000Glossyfull3', 'FullGray30Glossyfull', 'FullGray1000Glossyfull', ...\n 'FullMeanPlusGlossyfull', 'Full30MeanPlusGlossyfull', 'FullGray30MeanPlusGlossyfull', 'FullMeanPlusGlossyfull2', 'FullMeanMinusGlossyfull2'};\n keepSomeData = [[-1.2469 -1.0194 -0.6968 -0.3888 -0.1960 0.1387 0.3627 0.6095 0.8034 0.8159 ...\n 0.9833 1.1952 1.3942 1.5388 1.5710 1.7905 2.0848 2.0719 2.3168]; % full3\n [ -1.4012 -0.9285 -0.7862 -0.4952 -0.2476 0.0172 0.2259 0.4565 0.5586 0.7049 ...\n 0.8431 1.0677 1.1933 1.3972 1.6246 1.7266 1.8868 2.1460 2.2618]; % full4\n \n [NaN NaN NaN NaN 1.1345 1.1445 1.2867 1.3138 1.3704 1.5017 ...\n 1.5732 1.6708 1.7791 1.8904 1.9778 2.0832 2.2022 2.3184 2.4071]; % full30 3\n \n [NaN NaN NaN NaN NaN NaN 1.1932 1.2166 1.2841 1.4061 ...\n 1.4842 1.6065 1.6801 1.8158 1.9317 2.0486 2.1714 2.2893 2.4259]; % full30 4\n \n [ -0.3140 -0.2027 -0.0686 0.0819 0.2873 0.4310 0.5986 0.7905 0.8960 1.0847 ...\n 1.2355 1.3290 1.4651 1.6356 1.7116 1.8833 1.9983 2.1780 2.3949]; % full 1000\n \n [ NaN NaN NaN NaN 0.4098 0.4786 0.6039 0.7330 0.8416 0.8923 ...\n 0.9797 1.1226 1.1993 1.3123 1.4279 1.5174 1.6544 1.6851 NaN]; % full Gray30\n \n [ -1.0961 -0.8952 -0.7221 -0.4952 -0.3652 -0.1803 -0.0603 0.0522 0.3139 0.3222 ...\n 0.4816 0.6810 0.8161 0.9925 1.1563 1.3792 1.5010 1.6713 1.7328]; % full gray 1000;\n \n [-1.2028 -0.9204 -0.6084 -0.2414 -0.0021 0.0723 0.2916 0.5297 0.6825 0.8876 ...\n 0.9969 1.2277 1.2544 1.4292 1.6247 1.8370 2.0001 2.1447 2.2880]; % full mean plus;\n \n [NaN NaN NaN NaN NaN NaN 1.1726 1.2939 1.3940 1.5356 ...\n 1.5940 1.7435 1.8141 1.9606 2.0642 2.1749 2.3042 2.3794 2.4674]; % full30 mean plus;\n \n [NaN NaN NaN NaN 0.4270 0.4158 0.5322 0.6765 0.7749 0.8527 ...\n 0.9992 1.1176 1.2819 1.3642 1.4917 1.6065 1.6876 NaN NaN]; % fullgray30\n \n [-7.5486e-01 -6.3016e-01 -3.7002e-01 -7.5043e-02 2.5521e-01 4.1869e-01 6.5650e-01 8.2140e-01 9.3936e-01 ...\n 1.1518e+00 1.3266e+00 1.3894e+00 1.4861e+00 1.7282e+00 1.8061e+00 1.9940e+00 2.1053e+00 2.2826e+00 2.3641e+00]; % fullmeanplus2\n \n [-8.8795e-01 -7.5641e-01 -5.6947e-01 -4.3999e-01 -2.9319e-01 -1.1604e-01 5.8502e-02 2.5986e-01 3.7464e-01 ...\n 5.2778e-01 6.5286e-01 8.2851e-01 9.8959e-01 1.1379e+00 1.4417e+00 1.6340e+00 1.7811e+00 2.0558e+00 2.1961e+00]; % fullmeanminus\n ];\n end\n \n \n for i = 1:length(conditionList)\n for j=1:length(subjectList)\n luminanceMatchesPerChip{i,j} = load(fullfile(dataDir,'data',conditionList{i},subjectList{j},[conditionList{i} '-' subjectList{j} '-TestAverages.txt']));\n luminanceMatchesPerChip{i,j} = luminanceMatchesPerChip{i,j}(:, 2:end);\n % averageLumMatchesPerSubject{j}(:,i) = nanmean(luminanceMatchesPerChip{i,j},2); \n averageLumMatchesPerSubject{i}(:,j) = nanmean(luminanceMatchesPerChip{i,j},2);\n end\n end\n \n averageLumMatchesPerSubjectAll{i} = averageLumMatchesPerSubject{i};\n %% See how many matches per subject are made. \n % If less than 3 subjects assigned anything to this chip, drop them. \n for g = 1: length(averageLumMatchesPerSubject)\n nSubjectMatches{g} = sum(isnan(averageLumMatchesPerSubject{g}),2); \n end\n \n for g = 1: length(nSubjectMatches)\n for r = 1:length(nSubjectMatches{g})\n if (nSubjectMatches{g}(r) > 2)\n averageLumMatchesPerSubject{g}(r,:) = nan(1, size(averageLumMatchesPerSubject{g},2));\n end\n end\n end\n cd(currentDir); \n xDataLim = [-2 3];\n yDataLim = [-3 0];\n \n fid = fopen(['ANA_TEST_DATA/ParamDump_All' num2str(whichDataSet) '.txt'],'w');\n figPrefix = 'ANA_TEST_DATA/';\n RESPONSE_REMAP = 0;\nend\n\n\n\n% Optional search to find reference parameters that put the responses rougly equally spaced on the y-axis. This isn't theoretically critical, but\n% seems as good an idea as any.\nFITREF = 0;\nif (FITREF)\n if (verLessThan('optim','4.1'))\n error('Your version of the optimization toolbox is too old. Update it.');\n end\n options = optimset('fmincon');\n options = optimset(options,'Diagnostics','off','Display','iter','LargeScale','off','Algorithm','active-set');\n options = optimset(options,'MaxFunEvals',1200);\n targetRespRef = linspace(critValue,1-critValue,length(someDataRef));\n conTolRef = (targetRespRef(2)-targetRespRef(1));\n x0 = ParamsToListRef(params0);\n vlb = [x0(1)/100 x0(2) -100*mean(someDataRef) 1];\n vub = [x0(1)*100 x0(2) 100*mean(someDataRef) 4];\n x1 = fmincon(@InlineMinFunctionRef,x0,[],[],[],[],vlb,vub,@InlineConFunctionRef,options);\n params0 = ListToParamsRef(x1,params0);\nend\n\n%% Now do the fitting wrt to the reference paramters\n% rangeFig = figure;\nrangeFig = figure; \ndataFig = figure;\n position = get(gcf,'Position');\n position(3) = 1000; position(4) = 400;\n set(gcf,'Position',position);\n\nallData = cell(1,length(conditionList)); \nfor i = 1:length(conditionList)\n temptemp = [];\n for j = 1:length(subjectList)\n temptemp = [temptemp; [averageLumMatchesPerSubject{i}(:,j)']];\n end\n %allDataMean(i,:) = nanmean(temptemp, 1);\n acrossSubjectLumAverages{i} = NaN(size(paletteGlossy',1),1);\n for g = 1:size(paletteGlossy',1)\n okindex = ~isnan(averageLumMatchesPerSubject{i}(g,:)');\n tt=mean(averageLumMatchesPerSubject{i}(g,okindex))';\n acrossSubjectLumAverages{i}(g,1)=tt;\n end\n temptemp = [temptemp; acrossSubjectLumAverages{i}(:)'] ; \n allData{i} = temptemp; \n clear temptemp\nend\nclear okindex; \n%% for debugging purposes.\nfor i = 1:length(conditionList)\ncheck = keepSomeData(i,:) - acrossSubjectLumAverages{i}(:)'; \nend\n%%\nsomeData = []; \nfor i = 1:length(conditionList)\n someData = [someData; acrossSubjectLumAverages{i}(:)'];\nend\n\ncheck = keepSomeData - someData\nfor whichData = 1:size(someData,1)\n \n switch (whichDataSet)\n case 1\n if whichData == 1 || whichData == 4\n someDataRef = 10.^[paletteGlossy(1,:)];\n elseif whichData == 2 || whichData == 5\n someDataRef = 10.^[paletteMatte(1,:)];\n elseif whichData == 3 || whichData == 6\n someDataRef = 10.^[paletteMatte(1,:)];\n end\n case 2\n if whichData == 1 || whichData == 3 || whichData == 5 || whichData == 7\n someDataRef = 10.^[paletteGlossy(1,:)];\n elseif whichData == 2 || whichData == 4 || whichData == 6 || whichData == 8\n someDataRef = 10.^[paletteMatte(1,:)];\n \n end\n case 3\n someDataRef = 10.^[paletteGlossy(1,:)];\n \n end\n %% Initialize parameters. Set reference rmax to 1 and exponent to 2. Find\n % gain and offset that map the luminances across the central portion\n % of the response range.\n useDataRef = someDataRef;\n clear params0\n params0.rmaxRef = 1.0;\n params0.expRef = 3;\n critValue = 0.01;\n minResp = InvertNakaRushton([params0.rmaxRef 1 params0.expRef],critValue);\n maxResp = InvertNakaRushton([params0.rmaxRef 1 params0.expRef],1-critValue);\n minRef = min(someDataRef);\n maxRef = max(someDataRef);\n params0.gainRef = (maxResp-minResp)/(maxRef-minRef);\n params0.offsetRef = minRef-minResp/params0.gainRef;\n paramsRefNoFit = params0;\n \n if whichData == 1\n %% Plot of remapping between response and reference log10\n % luminance/reflectance\n lumVals = logspace(log10(someDataRef(1)),log10(someDataRef(end)),1000);\n lumVals = logspace(-3,0,1000);\n ySub = params0.gainRef*(lumVals-params0.offsetRef);\n ySub(ySub <= 0) = 0+eps;\n respRefForRemap = ComputeNakaRushton([params0.rmaxRef 1 params0.expRef],ySub);\n respRefRemapped = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respRefForRemap)/params0.gainRef+params0.offsetRef);\n remapFig = figure; clf; hold on\n plot(respRefForRemap,respRefRemapped,'r','LineWidth',2);\n xlim([0 1]);\n ylim(yDataLim);\n xlabel('Visual Response');\n ylabel('Predicted Reflectance Match');\n cd(figPrefix);\n %savefig('ResponseRemapping.pdf',remapFig,'pdf');\n %save('ResponseRemappingData','respRefForRemap','respRefRemapped');\n savefig(['ResponseRemapping' num2str(whichDataSet) '.pdf'],remapFig,'pdf');\n save(['ResponseRemappingData' num2str(whichDataSet)],'respRefForRemap','respRefRemapped');\n cd ..\n end\n \n \n \n someDataMatch = 10.^[someData(whichData,:)]\n \n \n \n okIndex = find(~isnan(someDataMatch));\n useDataMatch = someDataMatch(okIndex);\n useDataRef = someDataRef(okIndex);\n \n figure(dataFig); clf;\n subplot(1,2,1); hold on\n plot(log10(useDataMatch),log10(useDataRef),'bo','MarkerFaceColor','b','MarkerSize',8);\n xlabel('Log10 Target Lum');\n ylabel('Log10 Standard Lum/Refl');\n \n % Parameter search options\n if (verLessThan('optim','4.1'))\n error('Your version of the optimization toolbox is too old. Update it.');\n end\n options = optimset('fmincon');\n options = optimset(options,'Diagnostics','off','Display','off','LargeScale','off','Algorithm','active-set');\n \n % Initialize match parameters in same way\n endPointWeight = 0;\n params0.rmax = params0.rmaxRef;\n params0.exp = params0.expRef;\n params0.gain = params0.gainRef;\n params0.offset = params0.offsetRef;\n someDataPred0 = NRAPredictMatches(someDataRef,params0);\n %plot(log10(someDataRef),log10(someDataPred0),'y','LineWidth',1);\n %params0\n fprintf(fid,['Dataset ' conditionList{whichData} '\\n']);\n fprintf(fid,'\\tReference params: gain = %0.2g, offset = %0.2g, rmax = %0.5g, exp = %0.2g\\n',params0.gainRef,params0.offsetRef,params0.rmaxRef,params0.expRef);\n \n % Fit, first just gain\n x0 = ParamsToList(params0);\n vlb = [x0(1) x0(2)/100 x0(3:end)];\n vub = [x0(1) x0(2)*100 x0(3:end)];\n x1 = fmincon(@InlineMinFunction,x0,[],[],[],[],vlb,vub,[],options);\n params0 = ListToParams(x1,params0);\n someDataPred1 = NRAPredictMatches(someDataRef,params0);\n %plot(log10(someDataPred1),log10(someDataRef),'b','LineWidth',1);\n %params0\n g = params0.gainRef/params0.gain;\n l0 = -params0.offsetRef + params0.offset/g;\n fprintf(fid,'\\tGain only model: gain = %0.2g, offset = %0.2g, log10 gain change = %0.2g, log10 effective offset = %0.2g, rmax = %0.5g, exp = %0.2g\\n',...\n params0.gain,params0.offset,log10(g),log10(l0),params0.rmax,params0.exp);\n \n % Fit, gain and offset\n vlb = [x1(1) x1(2)/100 x1(3) -100*abs(x1(4)) x1(5)];\n vub = [x1(1) x1(2)*100 x1(3) 100*abs(x1(4)) x1(5)];\n x2 = fmincon(@InlineMinFunction,x1,[],[],[],[],vlb,vub,[],options);\n params0 = ListToParams(x2,params0);\n someDataPred2 = NRAPredictMatches(someDataRef,params0);\n g = params0.gainRef/params0.gain;\n l0 = -params0.offsetRef + params0.offset/g;\n fprintf(fid,'\\tGain/Offset model: gain = %0.2g, offset = %0.2g, log10 gain change = %0.2g, log10 effective offset = %0.2g, rmax = %0.5g, exp = %0.2g\\n',...\n params0.gain,params0.offset,log10(g),log10(l0),params0.rmax,params0.exp);\n paramsGainOffset = params0;\n %params0\n \n % Exp\n FITEXP = 1;\n if (FITEXP)\n vlb = [x2(1) x2(2)/100 x2(3) -100*abs(x2(4)) 0.5];\n vub = [x2(1) x2(2)*100 x2(3) 100*abs(x2(4)) 4];\n endPointWeight = 10;\n x3 = fmincon(@InlineMinFunction,x2,[],[],[],[],vlb,vub,[],options);\n endPointWeight = 0;\n x3 = fmincon(@InlineMinFunction,x3,[],[],[],[],vlb,vub,[],options);\n params0 = ListToParams(x3,params0);\n someDataPred3 = NRAPredictMatches(someDataRef,params0);\n else\n x3 = x2;\n someDataPred3 = NRAPredictMatches(someDataRef,params0);\n end\n fprintf(fid,'\\tGain/Offset/Exp model: gain = %0.2g, offset = %0.2g, log10 gain change = %0.2g, log10 effective offset = %0.2g, rmax = %0.5g, exp = %0.2g\\n',...\n params0.gain,params0.offset,log10(g),log10(l0),params0.rmax,params0.exp);\n \n % Let rMax vary too. This doesn't add much if exponent varies.. Tp the fits, so I\n % uncluttered plots by removing. Have not looked at whether varying\n % rMax can be substituted for varying the exponent.\n FITMAX = 0;\n if (FITMAX)\n vlb = [x3(1) x3(2)/100 0.5 -100*abs(x3(4)) x3(5)];\n vub = [x3(1) x3(2)*100 2 100*abs(x3(4)) x3(5)];\n x = fmincon(@InlineMinFunction,x3,[],[],[],[],vlb,vub,[],options);\n params0 = ListToParams(x,params0);\n someDataPred = NRAPredictMatches(someDataRef,params0);\n plot(log10(someDataPred3),log10(someDataRef),'k','LineWidth',1.5);\n %params0\n else\n x = x3;\n someDataPred = NRAPredictMatches(someDataRef,params0);\n end\n \n % Dump of interesting parameters\n g = params0.gainRef/params0.gain;\n l0 = -params0.offsetRef + params0.offset/g;\n fprintf(fid,'\\tPredicted (actual) black point %0.2g (%0.2g); white point %0.2g (%0.2g)\\n',someDataPred(1),someDataMatch(1),someDataPred(end),log10(someDataMatch(end)));\n fprintf(fid,'\\tOne-in predicted black point %0.2g (%0.2g); white point %0.2g (%0.2g)\\n',someDataPred(2),someDataMatch(2),someDataPred(end-1),log10(someDataMatch(end-1)));\n \n % Plot stuff of interest\n plot(log10(someDataPred),log10(someDataRef),'r','LineWidth',3);\n plot(log10(someDataPred2),log10(someDataRef),'g','LineWidth',1);\n xlim(xDataLim); ylim(yDataLim);\n \n % Add plot of response functions for ref and match\n % Subtract the old offset, and truncate below 0 to zero.\n % We allow an optional remapping of the response back to the\n % luminance/reflectance space of the reference matches. This\n % mapping is static across contexts. This turns out not to\n % terribly interesting.\n lumVals = logspace(-2,3,1000);\n ySub = params0.gainRef*(lumVals-params0.offsetRef);\n ySub(ySub <= 0) = 0+eps;\n respRefSmooth = ComputeNakaRushton([params0.rmaxRef 1 params0.expRef],ySub);\n if (RESPONSE_REMAP)\n respRefSmooth = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respRefSmooth)/params0.gainRef+params0.offsetRef);\n end\n \n ySub = params0.gainRef*(someDataRef-params0.offsetRef);\n ySub(ySub <= 0) = 0+eps;\n respRef = ComputeNakaRushton([params0.rmaxRef 1 params0.expRef],ySub);\n if (RESPONSE_REMAP)\n respRef = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respRef)/params0.gainRef+params0.offsetRef);\n end\n \n ySub = params0.gain*(lumVals-params0.offset);\n ySub(ySub <= 0) = 0+eps;\n respMatchSmooth = ComputeNakaRushton([params0.rmax 1 params0.exp],ySub);\n if (RESPONSE_REMAP)\n respMatchSmooth = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respMatchSmooth)/params0.gainRef+params0.offsetRef);\n end\n \n ySub = params0.gain*(someDataMatch-params0.offset);\n ySub(ySub <= 0) = 0+eps;\n respMatch = ComputeNakaRushton([params0.rmax 1 params0.exp],ySub);\n if (RESPONSE_REMAP)\n respMatch = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respMatch)/params0.gainRef+params0.offsetRef);\n end\n \n ySub = paramsGainOffset.gain*(lumVals-paramsGainOffset.offset);\n ySub(ySub <= 0) = 0+eps;\n respGainOffsetSmooth = ComputeNakaRushton([paramsGainOffset.rmax 1 paramsGainOffset.exp],ySub);\n if (RESPONSE_REMAP)\n respGainOffsetSmooth = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respGainOffsetSmooth)/params0.gainRef+params0.offsetRef);\n end\n \n ySub = paramsRefNoFit.gainRef*(lumVals-paramsRefNoFit.offsetRef);\n ySub(ySub <= 0) = 0+eps;\n respRefNoFitSmooth = ComputeNakaRushton([paramsRefNoFit.rmaxRef 1 paramsRefNoFit.expRef],ySub);\n if (RESPONSE_REMAP)\n respRefNoFitSmooth = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respRefNoFitSmooth)/params0.gainRef+params0.offsetRef);\n end\n \n subplot(1,2,2); hold on\n plot(log10(someDataRef),respRef,'ko','MarkerFaceColor','k','MarkerSize',6);\n plot(log10(lumVals),respRefSmooth,'k:','LineWidth',2);\n %plot(log10(lumVals),respRefNoFitSmooth,'b','LineWidth',1);\n \n plot(log10(someDataMatch),respMatch,'bo','MarkerFaceColor','b','MarkerSize',8);\n plot(log10(lumVals),respMatchSmooth,'r','LineWidth',2);\n plot(log10(lumVals),respGainOffsetSmooth,'g','LineWidth',1);\n \n xlim(xDataLim);\n if (RESPONSE_REMAP)\n ylim(yDataLim);\n ylabel('Remapped Response');\n else\n ylim([0 1.2]);\n ylabel('Response');\n end\n xlabel('Log10 luminance');\n \n % Save figure\n cd(figPrefix);\n savefig(['TestFit_' num2str(whichDataSet) ' ' conditionList{whichData} '.pdf'],dataFig,'pdf');\n cd('..');\n \n fprintf(fid,'\\n');\n \n %% Fill output summary structure\n if (SARAH_TEST_DATA)\n summaryStructs(whichData-1).whitePoint = someDataPred(end);\n summaryStructs(whichData-1).blackPoint = someDataPred(1);\n summaryStructs(whichData-1).range = someDataPred(end) - someDataPred(1);\n summaryStructs(whichData-1).exp = params0.exp;\n predictExpFromWB(whichData-1,1) = summaryStructs(whichData-1).whitePoint;\n predictExpFromWB(whichData-1,2) = log10(summaryStructs(whichData-1).range);\n expVals(whichData-1,1) = summaryStructs(whichData-1).exp;\n %% Range versus exp figure\n figure(rangeFig)\n subplot(1,2,1); hold on\n plot(summaryStructs(whichData-1).range,summaryStructs(whichData-1).exp,'ro','MarkerFaceColor','r','MarkerSize',8);\n xlabel('Range'); ylabel('Exponent');\n xlim([0 300]); ylim([0 4]);\n subplot(1,2,2); hold on\n plot(summaryStructs(whichData-1).whitePoint,summaryStructs(whichData-1).exp,'ro','MarkerFaceColor','r','MarkerSize',8);\n xlabel('White Point'); ylabel('Exponent');\n xlim([0 300]); ylim([0 4]);\n else\n summaryStructs(whichData).whitePoint = someDataPred(end);\n summaryStructs(whichData).blackPoint = someDataPred(1);\n summaryStructs(whichData).range = someDataPred(end) - someDataPred(1);\n summaryStructs(whichData).exp = params0.exp;\n predictExpFromWB(whichData,1) = summaryStructs(whichData).whitePoint;\n predictExpFromWB(whichData,2) = log10(summaryStructs(whichData).range);\n expVals(whichData,1) = summaryStructs(whichData).exp;\n %% Range versus exp figure\n figure(rangeFig)\n subplot(1,2,1); hold on\n plot(summaryStructs(whichData).range,summaryStructs(whichData).exp,'ro','MarkerFaceColor','r','MarkerSize',8);\n xlabel('Range'); ylabel('Exponent');\n xlim([0 300]); ylim([0 4]);\n subplot(1,2,2); hold on\n plot(summaryStructs(whichData).whitePoint,summaryStructs(whichData).exp,'ro','MarkerFaceColor','r','MarkerSize',8);\n xlabel('White Point'); ylabel('Exponent');\n xlim([0 300]); ylim([0 4]);\n end\n \nend\nfclose(fid);\n\n%% Try to predict exponents\n% expRegCoefs = predictExpFromWB\\expVals;\n% predictedExpVals = predictExpFromWB*expRegCoefs;\n% expPredFig = figure; clf; hold on\n% plot(expVals,predictedExpVals,'ro','MarkerSize',8,'MarkerFaceColor','r');\n% plot([0 4],[0 4],'k');\n% xlim([0 4]); ylim([0 4]);\n% xlabel('Exponent'); ylabel('Predicted Exponent');\n% axis('square');\n\n%% Write out summary structs\ncd(figPrefix);\nif (SARAH_TEST_DATA)\n WriteStructsToText('SummaryData.txt',summaryStructs);\nelse\n WriteStructsToText(['SummaryData', num2str(whichDataSet), '.txt'],summaryStructs);\nend\ncd('..');\n\n%% Save plot of exponent versus range\ncd(figPrefix);\nif (SARAH_TEST_DATA)\n savefig(['ExpVersusRange.pdf'],rangeFig,'pdf');\nelse\n savefig(['ExpVersusRange', num2str(whichDataSet),'.pdf'],rangeFig,'pdf');\nend\ncd('..');\n\n%% INLINE FUNCTION TO BE USED FOR CTF MINIMIZATION.\n% Inline functions have the feature that any variable they use that is\n% not defined in the function has its value inherited\n% from the workspace of wherever they were invoked.\n%\n% Variables set here are also in the base workspace, and can change the values of\n% variables with the same name there. This can produce all sorts of problems,\n% so be careful.\n function f = InlineMinFunction(x)\n paramsInline = ListToParams(x,params0);\n yPred = NRAPredictMatches(useDataRef,paramsInline);\n yPred(yPred <= 0) = 0 + eps;\n yDiff = log10(useDataMatch)-log10(yPred);\n f = sum(yDiff.^2) + endPointWeight*yDiff(1).^2 + endPointWeight*yDiff(end).^2;\n end\n\n%% INLINE FUNCTION TO BE USED FOR REF MINIMIZATION.\n% Inline functions have the feature that any variable they use that is\n% not defined in the function has its value inherited\n% from the workspace of wherever they were invoked.\n%\n% Variables set here are also in the base workspace, and can change the values of\n% variables with the same name there. This can produce all sorts of problems,\n% so be careful.\n function f = InlineMinFunctionRef(x)\n paramsInline = ListToParamsRef(x,params0);\n \n % Subtract the old offset, and truncate below 0 to zero\n ySub = paramsInline.gainRef*(useDataRef-paramsInline.offsetRef);\n ySub(ySub <= 0) = 0+eps;\n respRef = ComputeNakaRushton([paramsInline.rmaxRef 1 paramsInline.expRef],ySub);\n yDiff = targetRespRef-respRef;\n f = sum(abs(yDiff));\n %f = sum(yDiff.^2);\n end\n function [g,geq] = InlineConFunctionRef(x)\n paramsInline = ListToParamsRef(x,params0);\n \n % Subtract the old offset, and truncate below 0 to zero\n ySub = paramsInline.gainRef*(useDataRef-paramsInline.offsetRef);\n ySub(ySub <= 0) = 0+eps;\n respRef = ComputeNakaRushton([paramsInline.rmaxRef 1 paramsInline.expRef],ySub);\n yDiff = targetRespRef-respRef;\n g = max(abs(yDiff))-conTolRef;\n geq = 0;\n end\nend\n\n%% Param translation\nfunction params = ListToParams(x,params0)\n\nparams = params0;\nparams.gainRef = x(1);\nparams.gain = x(2);\nparams.rmax = x(3);\nparams.offset = x(4);\nparams.exp = x(5);\n\nend\n\nfunction x = ParamsToList(params)\n\nx = [params.gainRef params.gain params.rmax params.offset params.exp];\n\nend\n\nfunction params = ListToParamsRef(x,params0)\n\nparams = params0;\nparams.gainRef = x(1);\nparams.rmaxRef = x(2);\nparams.offsetRef = x(3);\nparams.expRef = x(4);\n\nend\n\nfunction x = ParamsToListRef(params)\n\nx = [params.gainRef params.rmaxRef params.offsetRef params.expRef];\n\nend\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "TestPredictNRAffineMatchesContol.m", "ext": ".m", "path": "TeachingCode-master/MatlabTutorials/lightessModelsTutorial/TestPredictNRAffineMatchesContol.m", "size": 21771, "source_encoding": "utf_8", "md5": "3fb8e64c27a74be3bd8a49675a9464b0", "text": "function TestPredictNRAffineMatchesContol\n% TestPredictNRAffineMatchesControl\n%\n% Fit the model through the control conditions. \n%\n% 05/20/11 ar Adapded it in order to Model bunch of old controls previously done by Sarah. \n\n%% Clear\nclear; close all;\n\n% Define relevant directories. \ncurrentDir = pwd; \ndataDir = '/Users/Shared/Matlab/Experiments/HDRExperiments/HDRAna'; \nwhichDataSet = 1; \n%% Choose model parameters and generate predictions, plot.\n% Let's one explore what the model can do.\n%DO_PRELIM_STUFF = 0;\n% if (DO_PRELIM_STUFF)\n% yRef = logspace(-2,4);\n% \n% %% Set up parameters\n% params0.rmaxRef = 1.0;\n% params0.gainRef = 10e-4;\n% params0.offsetRef = 0;\n% params0.expRef = 2.5;\n% params0.rmax = 1.01;\n% params0.gain = 0.5*10e-4;\n% params0.offset = 1;\n% params0.exp = 1;\n% \n% %% Plot effect of mucking with exponent\n% figure; clf; hold on\n% exponents = [1.5 2 2.5];\n% for i = 1:length(exponents)\n% params0.exp = exponents(i);\n% yMatch{i} = NRAPredictMatches(yRef,params0);\n% \n% % Plot\n% plot(log10(yMatch{i}),log10(yRef),'k','LineWidth',3);\n% xlim([-3 5]); ylim([-3 5]);\n% xlabel('Log10 Target Lum');\n% ylabel('Log10 Standard Lum/Refl');\n% end\n% end\n\n\n%% Palette Values from Sarah's old experiments. \npaletteGlossy = [ -2.1460 -1.8211 -1.7097 -1.5066 -1.3266 -1.1874 -1.0410 -0.9402 -0.8007 -0.7012 -0.6070 ...\n -0.5109 -0.4395 -0.3639 -0.3139 -0.2053 -0.1452 -0.0880 -0.0307];\n\npaletteMatte = [ NaN NaN NaN -1.3619 -1.3298 -1.1938 -0.9993 -0.9174 -0.8027 -0.7206 -0.6117 ...\n -0.5474 -0.4374 -0.3707 -0.2806 -0.2226 -0.1525 -0.1009 -0.0419 ];\n\n%% Fit some matching data\nSARAH_TEST_DATA = 0;\nif (SARAH_TEST_DATA)\n \nelse\n switch whichDataSet\n case 1\n conditionList = {'SIonGlossy','SIoffGlossy', 'SIoldMatte', 'fMonGlossy','fMoffGlossy', 'fMoldMatte',};\n % subjectList = {'bam', 'cly', 'flv', 'lta' ,'ncd', 'rpd', 'stg', 'tfm'};\n someData = [[-1.1820 -0.7709 -0.5714 -0.2102 -0.0518 0.1396 0.3406 0.4518 0.6188 0.8408 0.9734 1.1930 1.2922 1.4202 1.5267 1.6482 1.7918 2.0094 2.2232 ]; \n [ -1.2288 -0.7391 -0.3809 -0.2950 -0.1586 0.1467 0.2993 0.4713 0.6734 0.7679 0.9406 1.0131 1.2507 1.3145 1.4443 1.6137 1.7227 2.0653 2.1752]; \n [ NaN NaN NaN -0.5785 -0.2625 -0.0359 0.2090 0.4355 0.5728 0.7679 0.9699 1.1320 1.2803 1.4505 1.6429 1.7727 1.8928 2.0566 2.1480]; \n [-1.1133 -0.7398 -0.4359 -0.1763 0.0267 0.3351 0.5433 0.7359 0.8915 1.0952 1.2348 1.3061 1.4766 1.6375 1.7163 1.9082 1.9294 2.1959 2.1790]; \n [-1.2353 -0.4408 -0.3123 -0.1259 0.1121 0.2837 0.4506 0.7405 0.9927 0.9704 1.2497 1.4010 1.4841 1.6258 1.6844 1.7980 2.0851 2.1959 2.2573]; \n [ NaN NaN NaN -0.6949 -0.2629 0.0446 0.3898 0.5828 0.8021 1.0371 1.0989 1.3152 1.4972 1.6377 1.7715 1.9127 2.0254 2.1249 2.2717]; ] \n \n \n \n \n end\n \n cd(currentDir); \n xDataLim = [-2 3];\n yDataLim = [-3 0];\n \n fid = fopen(['ANA_TEST_DATA/ParamDump_Control' num2str(whichDataSet) '.txt'],'w');\n figPrefix = 'ANA_TEST_DATA/';\n RESPONSE_REMAP = 0;\nend\n\n\n\n% Optional search to find reference parameters that put the responses rougly equally spaced on the y-axis. This isn't theoretically critical, but\n% seems as good an idea as any.\nFITREF = 0;\nif (FITREF)\n if (verLessThan('optim','4.1'))\n error('Your version of the optimization toolbox is too old. Update it.');\n end\n options = optimset('fmincon');\n options = optimset(options,'Diagnostics','off','Display','iter','LargeScale','off','Algorithm','active-set');\n options = optimset(options,'MaxFunEvals',1200);\n targetRespRef = linspace(critValue,1-critValue,length(someDataRef));\n conTolRef = (targetRespRef(2)-targetRespRef(1));\n x0 = ParamsToListRef(params0);\n vlb = [x0(1)/100 x0(2) -100*mean(someDataRef) 1];\n vub = [x0(1)*100 x0(2) 100*mean(someDataRef) 4];\n x1 = fmincon(@InlineMinFunctionRef,x0,[],[],[],[],vlb,vub,@InlineConFunctionRef,options);\n params0 = ListToParamsRef(x1,params0);\nend\n\n%% Now do the fitting wrt to the reference paramters\n% rangeFig = figure;\nrangeFig = figure; \ndataFig = figure;\n position = get(gcf,'Position');\n position(3) = 1000; position(4) = 400;\n set(gcf,'Position',position);\n \n\n% allData = cell(1,length(conditionList)); \n% for i = 1:length(conditionList)\n% temptemp = [];\n% for j = 1:length(subjectList)\n% temptemp = [temptemp; [averageLumMatchesPerSubject{i}(:,j)']];\n% end\n% %allDataMean(i,:) = nanmean(temptemp, 1);\n% acrossSubjectLumAverages{i} = NaN(size(paletteGlossy',1),1);\n% for g = 1:size(paletteGlossy',1)\n% okindex = ~isnan(averageLumMatchesPerSubject{i}(g,:)');\n% tt=mean(averageLumMatchesPerSubject{i}(g,okindex))';\n% acrossSubjectLumAverages{i}(g,1)=tt;\n% end\n% temptemp = [temptemp; acrossSubjectLumAverages{i}(:)'] ; \n% allData{i} = temptemp; \n% clear temptemp\n% end\n% clear okindex; \n% %% for debugging purposes.\n% for i = 1:length(conditionList)\n% check = keepSomeData(i,:) - acrossSubjectLumAverages{i}(:)'; \n% end\n% %%\n% someData = []; \n% for i = 1:length(conditionList)\n% someData = [someData; acrossSubjectLumAverages{i}(:)'];\n% end\nfor whichData = 1:size(someData,1)\n \n switch (whichDataSet)\n case 1\n if whichData == 1 || whichData == 2 || whichData == 4 || whichData == 5\n someDataRef = 10.^[paletteGlossy(1,:)];\n elseif whichData == 3 || whichData == 6\n someDataRef = 10.^[paletteMatte(1,:)];\n end\n \n end\n %% Initialize parameters. Set reference rmax to 1 and exponent to 2. Find\n % gain and offset that map the luminances across the central portion\n % of the response range.\n useDataRef = someDataRef;\n clear params0\n params0.rmaxRef = 1.0;\n params0.expRef = 3;\n critValue = 0.01;\n minResp = InvertNakaRushton([params0.rmaxRef 1 params0.expRef],critValue);\n maxResp = InvertNakaRushton([params0.rmaxRef 1 params0.expRef],1-critValue);\n minRef = min(someDataRef);\n maxRef = max(someDataRef);\n params0.gainRef = (maxResp-minResp)/(maxRef-minRef);\n params0.offsetRef = minRef-minResp/params0.gainRef;\n paramsRefNoFit = params0;\n \n if whichData == 1\n %% Plot of remapping between response and reference log10\n % luminance/reflectance\n lumVals = logspace(log10(someDataRef(1)),log10(someDataRef(end)),1000);\n lumVals = logspace(-3,0,1000);\n ySub = params0.gainRef*(lumVals-params0.offsetRef);\n ySub(ySub <= 0) = 0+eps;\n respRefForRemap = ComputeNakaRushton([params0.rmaxRef 1 params0.expRef],ySub);\n respRefRemapped = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respRefForRemap)/params0.gainRef+params0.offsetRef);\n remapFig = figure; clf; hold on\n plot(respRefForRemap,respRefRemapped,'r','LineWidth',2);\n xlim([0 1]);\n ylim(yDataLim);\n xlabel('Visual Response');\n ylabel('Predicted Reflectance Match');\n cd(figPrefix);\n %savefig('ResponseRemapping.pdf',remapFig,'pdf');\n %save('ResponseRemappingData','respRefForRemap','respRefRemapped');\n savefig(['ResponseRemappingControl' num2str(whichDataSet) '.pdf'],remapFig,'pdf');\n save(['ResponseRemappingDataControl' num2str(whichDataSet)],'respRefForRemap','respRefRemapped');\n cd ..\n end\n \n \n \n someDataMatch = 10.^[someData(whichData,:)];\n \n \n \n okIndex = find(~isnan(someDataMatch));\n useDataMatch = someDataMatch(okIndex);\n useDataRef = someDataRef(okIndex);\n \n figure(dataFig); clf;\n subplot(1,2,1); hold on\n plot(log10(useDataMatch),log10(useDataRef),'bo','MarkerFaceColor','b','MarkerSize',8);\n xlabel('Log10 Target Lum');\n ylabel('Log10 Standard Lum/Refl');\n \n % Parameter search options\n if (verLessThan('optim','4.1'))\n error('Your version of the optimization toolbox is too old. Update it.');\n end\n options = optimset('fmincon');\n options = optimset(options,'Diagnostics','off','Display','off','LargeScale','off','Algorithm','active-set');\n \n % Initialize match parameters in same way\n endPointWeight = 0;\n params0.rmax = params0.rmaxRef;\n params0.exp = params0.expRef;\n params0.gain = params0.gainRef;\n params0.offset = params0.offsetRef;\n someDataPred0 = NRAPredictMatches(someDataRef,params0);\n %plot(log10(someDataRef),log10(someDataPred0),'y','LineWidth',1);\n %params0\n fprintf(fid,['Dataset ' conditionList{whichData} '\\n']);\n fprintf(fid,'\\tReference params: gain = %0.2g, offset = %0.2g, rmax = %0.5g, exp = %0.2g\\n',params0.gainRef,params0.offsetRef,params0.rmaxRef,params0.expRef);\n \n % Fit, first just gain\n x0 = ParamsToList(params0);\n vlb = [x0(1) x0(2)/100 x0(3:end)];\n vub = [x0(1) x0(2)*100 x0(3:end)];\n x1 = fmincon(@InlineMinFunction,x0,[],[],[],[],vlb,vub,[],options);\n params0 = ListToParams(x1,params0);\n someDataPred1 = NRAPredictMatches(someDataRef,params0);\n %plot(log10(someDataPred1),log10(someDataRef),'b','LineWidth',1);\n %params0\n g = params0.gainRef/params0.gain;\n l0 = -params0.offsetRef + params0.offset/g;\n fprintf(fid,'\\tGain only model: gain = %0.2g, offset = %0.2g, log10 gain change = %0.2g, log10 effective offset = %0.2g, rmax = %0.5g, exp = %0.2g\\n',...\n params0.gain,params0.offset,log10(g),log10(l0),params0.rmax,params0.exp);\n \n % Fit, gain and offset\n vlb = [x1(1) x1(2)/100 x1(3) -100*abs(x1(4)) x1(5)];\n vub = [x1(1) x1(2)*100 x1(3) 100*abs(x1(4)) x1(5)];\n x2 = fmincon(@InlineMinFunction,x1,[],[],[],[],vlb,vub,[],options);\n params0 = ListToParams(x2,params0);\n someDataPred2 = NRAPredictMatches(someDataRef,params0);\n g = params0.gainRef/params0.gain;\n l0 = -params0.offsetRef + params0.offset/g;\n fprintf(fid,'\\tGain/Offset model: gain = %0.2g, offset = %0.2g, log10 gain change = %0.2g, log10 effective offset = %0.2g, rmax = %0.5g, exp = %0.2g\\n',...\n params0.gain,params0.offset,log10(g),log10(l0),params0.rmax,params0.exp);\n paramsGainOffset = params0;\n %params0\n \n % Exp\n FITEXP = 1;\n if (FITEXP)\n vlb = [x2(1) x2(2)/100 x2(3) -100*abs(x2(4)) 0.5];\n vub = [x2(1) x2(2)*100 x2(3) 100*abs(x2(4)) 4];\n endPointWeight = 10;\n x3 = fmincon(@InlineMinFunction,x2,[],[],[],[],vlb,vub,[],options);\n endPointWeight = 0;\n x3 = fmincon(@InlineMinFunction,x3,[],[],[],[],vlb,vub,[],options);\n params0 = ListToParams(x3,params0);\n someDataPred3 = NRAPredictMatches(someDataRef,params0);\n else\n x3 = x2;\n someDataPred3 = NRAPredictMatches(someDataRef,params0);\n end\n fprintf(fid,'\\tGain/Offset/Exp model: gain = %0.2g, offset = %0.2g, log10 gain change = %0.2g, log10 effective offset = %0.2g, rmax = %0.5g, exp = %0.2g\\n',...\n params0.gain,params0.offset,log10(g),log10(l0),params0.rmax,params0.exp);\n \n % Let rMax vary too. This doesn't add much if exponent varies.. Tp the fits, so I\n % uncluttered plots by removing. Have not looked at whether varying\n % rMax can be substituted for varying the exponent.\n FITMAX = 0;\n if (FITMAX)\n vlb = [x3(1) x3(2)/100 0.5 -100*abs(x3(4)) x3(5)];\n vub = [x3(1) x3(2)*100 2 100*abs(x3(4)) x3(5)];\n x = fmincon(@InlineMinFunction,x3,[],[],[],[],vlb,vub,[],options);\n params0 = ListToParams(x,params0);\n someDataPred = NRAPredictMatches(someDataRef,params0);\n plot(log10(someDataPred3),log10(someDataRef),'k','LineWidth',1.5);\n %params0\n else\n x = x3;\n someDataPred = NRAPredictMatches(someDataRef,params0);\n end\n \n % Dump of interesting parameters\n g = params0.gainRef/params0.gain;\n l0 = -params0.offsetRef + params0.offset/g;\n fprintf(fid,'\\tPredicted (actual) black point %0.2g (%0.2g); white point %0.2g (%0.2g)\\n',someDataPred(1),someDataMatch(1),someDataPred(end),someDataMatch(end));\n fprintf(fid,'\\tOne-in predicted black point %0.2g (%0.2g); white point %0.2g (%0.2g)\\n',someDataPred(2),someDataMatch(2),someDataPred(end-1),someDataMatch(end-1));\n \n % Plot stuff of interest\n plot(log10(someDataPred),log10(someDataRef),'r','LineWidth',3);\n plot(log10(someDataPred2),log10(someDataRef),'g','LineWidth',1);\n xlim(xDataLim); ylim(yDataLim);\n \n % Add plot of response functions for ref and match\n % Subtract the old offset, and truncate below 0 to zero.\n % We allow an optional remapping of the response back to the\n % luminance/reflectance space of the reference matches. This\n % mapping is static across contexts. This turns out not to\n % terribly interesting.\n lumVals = logspace(-2,3,1000);\n ySub = params0.gainRef*(lumVals-params0.offsetRef);\n ySub(ySub <= 0) = 0+eps;\n respRefSmooth = ComputeNakaRushton([params0.rmaxRef 1 params0.expRef],ySub);\n if (RESPONSE_REMAP)\n respRefSmooth = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respRefSmooth)/params0.gainRef+params0.offsetRef);\n end\n \n ySub = params0.gainRef*(someDataRef-params0.offsetRef);\n ySub(ySub <= 0) = 0+eps;\n respRef = ComputeNakaRushton([params0.rmaxRef 1 params0.expRef],ySub);\n if (RESPONSE_REMAP)\n respRef = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respRef)/params0.gainRef+params0.offsetRef);\n end\n \n ySub = params0.gain*(lumVals-params0.offset);\n ySub(ySub <= 0) = 0+eps;\n respMatchSmooth = ComputeNakaRushton([params0.rmax 1 params0.exp],ySub);\n if (RESPONSE_REMAP)\n respMatchSmooth = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respMatchSmooth)/params0.gainRef+params0.offsetRef);\n end\n \n ySub = params0.gain*(someDataMatch-params0.offset);\n ySub(ySub <= 0) = 0+eps;\n respMatch = ComputeNakaRushton([params0.rmax 1 params0.exp],ySub);\n if (RESPONSE_REMAP)\n respMatch = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respMatch)/params0.gainRef+params0.offsetRef);\n end\n \n ySub = paramsGainOffset.gain*(lumVals-paramsGainOffset.offset);\n ySub(ySub <= 0) = 0+eps;\n respGainOffsetSmooth = ComputeNakaRushton([paramsGainOffset.rmax 1 paramsGainOffset.exp],ySub);\n if (RESPONSE_REMAP)\n respGainOffsetSmooth = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respGainOffsetSmooth)/params0.gainRef+params0.offsetRef);\n end\n \n ySub = paramsRefNoFit.gainRef*(lumVals-paramsRefNoFit.offsetRef);\n ySub(ySub <= 0) = 0+eps;\n respRefNoFitSmooth = ComputeNakaRushton([paramsRefNoFit.rmaxRef 1 paramsRefNoFit.expRef],ySub);\n if (RESPONSE_REMAP)\n respRefNoFitSmooth = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respRefNoFitSmooth)/params0.gainRef+params0.offsetRef);\n end\n \n subplot(1,2,2); hold on\n plot(log10(someDataRef),respRef,'ko','MarkerFaceColor','k','MarkerSize',6);\n plot(log10(lumVals),respRefSmooth,'k:','LineWidth',2);\n %plot(log10(lumVals),respRefNoFitSmooth,'b','LineWidth',1);\n \n plot(log10(someDataMatch),respMatch,'bo','MarkerFaceColor','b','MarkerSize',8);\n plot(log10(lumVals),respMatchSmooth,'r','LineWidth',2);\n plot(log10(lumVals),respGainOffsetSmooth,'g','LineWidth',1);\n \n xlim(xDataLim);\n if (RESPONSE_REMAP)\n ylim(yDataLim);\n ylabel('Remapped Response');\n else\n ylim([0 1.2]);\n ylabel('Response');\n end\n xlabel('Log10 luminance');\n \n % Save figure\n cd(figPrefix);\n savefig(['TestFit_Control' num2str(whichDataSet) ' ' conditionList{whichData} '.pdf'],dataFig,'pdf');\n cd('..');\n \n fprintf(fid,'\\n');\n \n %% Fill output summary structure\n if (SARAH_TEST_DATA)\n summaryStructs(whichData-1).whitePoint = someDataPred(end);\n summaryStructs(whichData-1).blackPoint = someDataPred(1);\n summaryStructs(whichData-1).range = someDataPred(end) - someDataPred(1);\n summaryStructs(whichData-1).exp = params0.exp;\n predictExpFromWB(whichData-1,1) = summaryStructs(whichData-1).whitePoint;\n predictExpFromWB(whichData-1,2) = log10(summaryStructs(whichData-1).range);\n expVals(whichData-1,1) = summaryStructs(whichData-1).exp;\n %% Range versus exp figure\n figure(rangeFig)\n subplot(1,2,1); hold on\n plot(summaryStructs(whichData-1).range,summaryStructs(whichData-1).exp,'ro','MarkerFaceColor','r','MarkerSize',8);\n xlabel('Range'); ylabel('Exponent');\n xlim([0 300]); ylim([0 4]);\n subplot(1,2,2); hold on\n plot(summaryStructs(whichData-1).whitePoint,summaryStructs(whichData-1).exp,'ro','MarkerFaceColor','r','MarkerSize',8);\n xlabel('White Point'); ylabel('Exponent');\n xlim([0 300]); ylim([0 4]);\n else\n summaryStructs(whichData).whitePoint = someDataPred(end);\n summaryStructs(whichData).blackPoint = someDataPred(1);\n summaryStructs(whichData).range = someDataPred(end) - someDataPred(1);\n summaryStructs(whichData).exp = params0.exp;\n predictExpFromWB(whichData,1) = summaryStructs(whichData).whitePoint;\n predictExpFromWB(whichData,2) = log10(summaryStructs(whichData).range);\n expVals(whichData,1) = summaryStructs(whichData).exp;\n %% Range versus exp figure\n figure(rangeFig)\n subplot(1,2,1); hold on\n plot(summaryStructs(whichData).range,summaryStructs(whichData).exp,'ro','MarkerFaceColor','r','MarkerSize',8);\n xlabel('Range'); ylabel('Exponent');\n xlim([0 300]); ylim([0 4]);\n subplot(1,2,2); hold on\n plot(summaryStructs(whichData).whitePoint,summaryStructs(whichData).exp,'ro','MarkerFaceColor','r','MarkerSize',8);\n xlabel('White Point'); ylabel('Exponent');\n xlim([0 300]); ylim([0 4]);\n end\n \nend\nfclose(fid);\n\n%% Try to predict exponents\nexpRegCoefs = predictExpFromWB\\expVals;\npredictedExpVals = predictExpFromWB*expRegCoefs;\nexpPredFig = figure; clf; hold on\nplot(expVals,predictedExpVals,'ro','MarkerSize',8,'MarkerFaceColor','r');\nplot([0 4],[0 4],'k');\nxlim([0 4]); ylim([0 4]);\nxlabel('Exponent'); ylabel('Predicted Exponent');\naxis('square');\n\n%% Write out summary structs\ncd(figPrefix);\nif (SARAH_TEST_DATA)\n WriteStructsToText('SummaryDataControl.txt',summaryStructs);\nelse\n WriteStructsToText(['SummaryDataControl', num2str(whichDataSet), '.txt'],summaryStructs);\nend\ncd('..');\n\n%% Save plot of exponent versus range\ncd(figPrefix);\nif (SARAH_TEST_DATA)\n savefig(['ExpVersusRangeControl.pdf'],rangeFig,'pdf');\nelse\n savefig(['ExpVersusRangeControl', num2str(whichDataSet),'.pdf'],rangeFig,'pdf');\nend\ncd('..');\n\n%% INLINE FUNCTION TO BE USED FOR CTF MINIMIZATION.\n% Inline functions have the feature that any variable they use that is\n% not defined in the function has its value inherited\n% from the workspace of wherever they were invoked.\n%\n% Variables set here are also in the base workspace, and can change the values of\n% variables with the same name there. This can produce all sorts of problems,\n% so be careful.\n function f = InlineMinFunction(x)\n paramsInline = ListToParams(x,params0);\n yPred = NRAPredictMatches(useDataRef,paramsInline);\n yPred(yPred <= 0) = 0 + eps;\n yDiff = log10(useDataMatch)-log10(yPred);\n f = sum(yDiff.^2) + endPointWeight*yDiff(1).^2 + endPointWeight*yDiff(end).^2;\n end\n\n%% INLINE FUNCTION TO BE USED FOR REF MINIMIZATION.\n% Inline functions have the feature that any variable they use that is\n% not defined in the function has its value inherited\n% from the workspace of wherever they were invoked.\n%\n% Variables set here are also in the base workspace, and can change the values of\n% variables with the same name there. This can produce all sorts of problems,\n% so be careful.\n function f = InlineMinFunctionRef(x)\n paramsInline = ListToParamsRef(x,params0);\n \n % Subtract the old offset, and truncate below 0 to zero\n ySub = paramsInline.gainRef*(useDataRef-paramsInline.offsetRef);\n ySub(ySub <= 0) = 0+eps;\n respRef = ComputeNakaRushton([paramsInline.rmaxRef 1 paramsInline.expRef],ySub);\n yDiff = targetRespRef-respRef;\n f = sum(abs(yDiff));\n %f = sum(yDiff.^2);\n end\n function [g,geq] = InlineConFunctionRef(x)\n paramsInline = ListToParamsRef(x,params0);\n \n % Subtract the old offset, and truncate below 0 to zero\n ySub = paramsInline.gainRef*(useDataRef-paramsInline.offsetRef);\n ySub(ySub <= 0) = 0+eps;\n respRef = ComputeNakaRushton([paramsInline.rmaxRef 1 paramsInline.expRef],ySub);\n yDiff = targetRespRef-respRef;\n g = max(abs(yDiff))-conTolRef;\n geq = 0;\n end\nend\n\n%% Param translation\nfunction params = ListToParams(x,params0)\n\nparams = params0;\nparams.gainRef = x(1);\nparams.gain = x(2);\nparams.rmax = x(3);\nparams.offset = x(4);\nparams.exp = x(5);\n\nend\n\nfunction x = ParamsToList(params)\n\nx = [params.gainRef params.gain params.rmax params.offset params.exp];\n\nend\n\nfunction params = ListToParamsRef(x,params0)\n\nparams = params0;\nparams.gainRef = x(1);\nparams.rmaxRef = x(2);\nparams.offsetRef = x(3);\nparams.expRef = x(4);\n\nend\n\nfunction x = ParamsToListRef(params)\n\nx = [params.gainRef params.rmaxRef params.offsetRef params.expRef];\n\nend\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "TestPredictNRAffineMatches.m", "ext": ".m", "path": "TeachingCode-master/MatlabTutorials/lightessModelsTutorial/TestPredictNRAffineMatches.m", "size": 21578, "source_encoding": "utf_8", "md5": "ebf3f7a8b2bbe30f79780c97aeb9448a", "text": "function TestPredictNRAffineMatches\n% TestPredictNRAffineMatches\n%\n% Work out what the little model does for various choices of input\n%\n% 12/4/10 dhb Wrote it.\n% 4/20/11 dhb Lot's of little changes. Switch polarity of data plots\n\n%% Clear\nclear; close all;\n\n%% Choose model parameters and generate predictions, plot.\n% Let's one explore what the model can do.\nDO_PRELIM_STUFF = 0;\nif (DO_PRELIM_STUFF)\n yRef = logspace(-2,4);\n \n %% Set up parameters\n params0.rmaxRef = 1.0;\n params0.gainRef = 10e-4;\n params0.offsetRef = 0;\n params0.expRef = 2.5;\n params0.rmax = 1.01;\n params0.gain = 0.5*10e-4;\n params0.offset = 1;\n params0.exp = 1;\n \n %% Plot effect of mucking with exponent\n figure; clf; hold on\n exponents = [1.5 2 2.5];\n for i = 1:length(exponents)\n params0.exp = exponents(i);\n yMatch{i} = PredictNRAffineMatches(yRef,params0);\n \n % Plot\n plot(log10(yMatch{i}),log10(yRef),'k','LineWidth',3);\n xlim([-3 5]); ylim([-3 5]);\n xlabel('Log10 Target Lum');\n ylabel('Log10 Standard Lum/Refl');\n end\nend\n\n%% Fit some matching data\nSARAH_TEST_DATA = 0;\nif (SARAH_TEST_DATA)\n someData = [ ...\n %[-1.4666 -1.3279 -1.1376 -1.0155 -0.8727 -0.7791 -0.6807 -0.6039 -0.5046 -0.4332 -0.3622 -0.3050 -0.2280 -0.1671 -0.0941 -0.0472]; ...\n -0.3425 -0.1963 0.0211 0.2595 0.4734 0.5964 0.8080 0.9776 1.1431 1.3214 1.4757 1.6494 1.8117 1.9341 2.0687 2.1320;...\n NaN -0.7632 -0.6575 -0.4410 -0.3869 -0.2870 -0.0619 -0.0495 0.1377 0.3261 0.5331 0.8137 1.0949 1.2788 1.4755 1.7163;...\n -0.8488 -0.6492 -0.3507 -0.1405 0.0320 0.1059 0.5055 0.5369 0.6712 0.9123 1.1550 1.4602 1.6382 1.7404 1.9184 2.0872;...\n -0.7557 -0.5774 -0.3305 -0.0248 0.1117 0.4900 0.5283 0.7715 0.8772 1.0994 1.3277 1.4880 1.7048 1.7955 2.0763 2.1066;...\n -0.6644 -0.3730 -0.2039 -0.0068 0.2048 0.4702 0.6319 0.8008 0.9775 1.1454 1.4122 1.5620 1.6963 1.8275 1.8847 2.1487;...\n -0.4542 -0.1567 0.0871 0.3464 0.5848 0.7929 1.0680 1.1379 1.2462 1.4850 1.6129 1.7910 1.9263 1.9863 2.2012 NaN;...\n -0.3636 -0.1035 0.2051 0.4746 0.7457 0.9043 1.1863 1.2533 1.4154 1.6568 1.7909 1.9330 2.0616 2.0808 2.1693 2.2539;...\n -0.1974 0.0098 0.2721 0.6128 0.8488 1.0728 1.2161 1.3985 1.4178 1.6738 1.7163 1.9348 1.9615 2.1480 2.1840 2.2982;...\n -0.2089 0.1448 0.4346 0.7253 0.9232 1.1556 1.3072 1.4958 1.5687 1.7282 1.8244 2.0339 2.0361 2.1448 2.2066 NaN];\n \n someDataRef = 10.^[someData(1,:)];\n xDataLim = [-2 3];\n yDataLim = [-2 3];\n fid = fopen('SARAH_TEST_DATA/ParamDump.txt','w');\n figPrefix = 'SARAH_TEST_DATA/';\n RESPONSE_REMAP = 0;\nelse\n someData = [ ...\n [-2.0458e+00 -1.8447e+00 -1.6840e+00 -1.4881e+00 -1.3251e+00 -1.1838e+00 -1.0424e+00 -9.2046e-01 -8.1417e-01 -7.1175e-01 ...\n -6.2160e-01 -5.3180e-01 -4.5087e-01 -3.7192e-01 -2.9654e-01 -2.2746e-01 -1.6488e-01 -1.0768e-01 -4.3064e-02]; %palette\n \n [-1.4012 -0.9285 -0.7862 -0.4952 -0.2476 0.0172 0.2259 0.4565 0.5586 0.7049 ...\n 0.8431 1.0677 1.1933 1.3972 1.6246 1.7266 1.8868 2.1460 2.2618]; % full4\n \n [-1.2469 -1.0194 -0.6968 -0.3888 -0.1960 0.1387 0.3627 0.6095 0.8034 0.8159 ...\n 0.9833 1.1952 1.3942 1.5388 1.5710 1.7905 2.0848 2.0719 2.3168]; % full3\n \n [NaN NaN NaN NaN 1.1345 1.1445 1.2867 1.3138 1.3704 1.5017 ...\n 1.5732 1.6708 1.7791 1.8904 1.9778 2.0832 2.2022 2.3184 2.4071]; % full30 3\n \n [NaN NaN NaN NaN NaN NaN 1.1932 1.2166 1.2841 1.4061 ...\n 1.4842 1.6065 1.6801 1.8158 1.9317 2.0486 2.1714 2.2893 2.4259]; % full30 4\n \n [ -0.3140 -0.2027 -0.0686 0.0819 0.2873 0.4310 0.5986 0.7905 0.8960 1.0847 ...\n 1.2355 1.3290 1.4651 1.6356 1.7116 1.8833 1.9983 2.1780 2.3949]; % full 1000\n \n [ NaN NaN NaN NaN 0.4098 0.4786 0.6039 0.7330 0.8416 0.8923 ...\n 0.9797 1.1226 1.1993 1.3123 1.4279 1.5174 1.6544 1.6851 NaN]; % full Gray30\n \n [ -1.0961 -0.8952 -0.7221 -0.4952 -0.3652 -0.1803 -0.0603 0.0522 0.3139 0.3222 ...\n 0.4816 0.6810 0.8161 0.9925 1.1563 1.3792 1.5010 1.6713 1.7328]; % full gray 1000;\n \n [-1.2028 -0.9204 -0.6084 -0.2414 -0.0021 0.0723 0.2916 0.5297 0.6825 0.8876 ...\n 0.9969 1.2277 1.2544 1.4292 1.6247 1.8370 2.0001 2.1447 2.2880]; % full mean plus;\n \n [NaN NaN NaN NaN NaN NaN 1.1726 1.2939 1.3940 1.5356 ...\n 1.5940 1.7435 1.8141 1.9606 2.0642 2.1749 2.3042 2.3794 2.4674]; % full30 mean plus;\n \n [NaN NaN NaN NaN 0.4270 0.4158 0.5322 0.6765 0.7749 0.8527 ...\n 0.9992 1.1176 1.2819 1.3642 1.4917 1.6065 1.6876 NaN NaN]; % fullgray30\n \n [-7.5486e-01 -6.3016e-01 -3.7002e-01 -7.5043e-02 2.5521e-01 4.1869e-01 6.5650e-01 8.2140e-01 9.3936e-01 ...\n 1.1518e+00 1.3266e+00 1.3894e+00 1.4861e+00 1.7282e+00 1.8061e+00 1.9940e+00 2.1053e+00 2.2826e+00 2.3641e+00]; % fullmeanplus2\n \n [-8.8795e-01 -7.5641e-01 -5.6947e-01 -4.3999e-01 -2.9319e-01 -1.1604e-01 5.8502e-02 2.5986e-01 3.7464e-01 ...\n 5.2778e-01 6.5286e-01 8.2851e-01 9.8959e-01 1.1379e+00 1.4417e+00 1.6340e+00 1.7811e+00 2.0558e+00 2.1961e+00]; % fullmeanminus\n ];\n \n \n someDataRef = 10.^[someData(1,:)];\n xDataLim = [-2 3];\n yDataLim = [-3 0];\n fid = fopen('ANA_TEST_DATA/ParamDump.txt','w');\n figPrefix = 'ANA_TEST_DATA/';\n RESPONSE_REMAP = 0;\nend\n\n%% Initialize parameters. Set reference rmax to 1 and exponent to 2. Find\n% gain and offset that map the luminances across the central portion\n% of the response range.\nuseDataRef = someDataRef;\nclear params0\nparams0.rmaxRef = 1.0;\nparams0.expRef = 3;\ncritValue = 0.01;\nminResp = InvertNakaRushton([params0.rmaxRef 1 params0.expRef],critValue);\nmaxResp = InvertNakaRushton([params0.rmaxRef 1 params0.expRef],1-critValue);\nminRef = min(someDataRef);\nmaxRef = max(someDataRef);\nparams0.gainRef = (maxResp-minResp)/(maxRef-minRef);\nparams0.offsetRef = minRef-minResp/params0.gainRef;\nparamsRefNoFit = params0;\n\n% Optional search to find reference parameters that put the responses rougly equally spaced on the y-axis. This isn't theoretically critical, but\n% seems as good an idea as any.\nFITREF = 0;\nif (FITREF)\n if (verLessThan('optim','4.1'))\n error('Your version of the optimization toolbox is too old. Update it.');\n end\n options = optimset('fmincon');\n options = optimset(options,'Diagnostics','off','Display','iter','LargeScale','off','Algorithm','active-set');\n options = optimset(options,'MaxFunEvals',1200);\n targetRespRef = linspace(critValue,1-critValue,length(someDataRef));\n conTolRef = (targetRespRef(2)-targetRespRef(1));\n x0 = ParamsToListRef(params0);\n vlb = [x0(1)/100 x0(2) -100*mean(someDataRef) 1];\n vub = [x0(1)*100 x0(2) 100*mean(someDataRef) 4];\n x1 = fmincon(@InlineMinFunctionRef,x0,[],[],[],[],vlb,vub,@InlineConFunctionRef,options);\n params0 = ListToParamsRef(x1,params0);\nend\n\n%% Plot of remapping between response and reference log10\n% luminance/reflectance\nlumVals = logspace(log10(someDataRef(1)),log10(someDataRef(end)),1000);\nlumVals = logspace(-3,0,1000);\nySub = params0.gainRef*(lumVals-params0.offsetRef);\nySub(ySub <= 0) = 0+eps;\nrespRefForRemap = ComputeNakaRushton([params0.rmaxRef 1 params0.expRef],ySub);\nrespRefRemapped = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respRefForRemap)/params0.gainRef+params0.offsetRef);\nremapFig = figure; clf; hold on\nplot(respRefForRemap,respRefRemapped,'r','LineWidth',2);\nxlim([0 1]);\nylim(yDataLim);\nxlabel('Visual Response');\nylabel('Predicted Reflectance Match');\ncd(figPrefix);\nsavefig('ResponseRemapping.pdf',remapFig,'pdf');\nsave('ResponseRemappingData','respRefForRemap','respRefRemapped');\ncd ..\n\n%% Now do the fitting wrt to the reference paramters\nrangeFig = figure;\ndataFig = figure;\nposition = get(gcf,'Position');\nposition(3) = 1000; position(4) = 400;\nset(gcf,'Position',position);\nfor whichData = 2:size(someData,1)\n %clear params0\n someDataMatch = 10.^[someData(whichData,:)];\n \n okIndex = find(~isnan(someDataMatch));\n useDataMatch = someDataMatch(okIndex);\n useDataRef = someDataRef(okIndex);\n \n figure(dataFig); clf;\n subplot(1,2,1); hold on\n plot(log10(useDataMatch),log10(useDataRef),'bo','MarkerFaceColor','b','MarkerSize',8);\n xlabel('Log10 Target Lum');\n ylabel('Log10 Standard Lum/Refl');\n \n % Parameter search options\n if (verLessThan('optim','4.1'))\n error('Your version of the optimization toolbox is too old. Update it.');\n end\n options = optimset('fmincon');\n options = optimset(options,'Diagnostics','off','Display','off','LargeScale','off','Algorithm','active-set');\n \n % Initialize match parameters in same way\n endPointWeight = 0;\n params0.rmax = params0.rmaxRef;\n params0.exp = params0.expRef;\n params0.gain = params0.gainRef;\n params0.offset = params0.offsetRef;\n someDataPred0 = PredictNRAffineMatches(someDataRef,params0);\n %plot(log10(someDataRef),log10(someDataPred0),'y','LineWidth',1);\n %params0\n fprintf(fid,'Dataset %d\\n',whichData);\n fprintf(fid,'\\tReference params: gain = %0.2g, offset = %0.2g, rmax = %0.5g, exp = %0.2g\\n',params0.gainRef,params0.offsetRef,params0.rmaxRef,params0.expRef);\n \n % Fit, first just gain\n x0 = ParamsToList(params0);\n vlb = [x0(1) x0(2)/100 x0(3:end)];\n vub = [x0(1) x0(2)*100 x0(3:end)];\n x1 = fmincon(@InlineMinFunction,x0,[],[],[],[],vlb,vub,[],options);\n params0 = ListToParams(x1,params0);\n someDataPred1 = PredictNRAffineMatches(someDataRef,params0);\n %plot(log10(someDataPred1),log10(someDataRef),'b','LineWidth',1);\n %params0\n g = params0.gainRef/params0.gain;\n l0 = -params0.offsetRef + params0.offset/g;\n fprintf(fid,'\\tGain only model: gain = %0.2g, offset = %0.2g, log10 gain change = %0.2g, log10 effective offset = %0.2g, rmax = %0.5g, exp = %0.2g\\n',...\n params0.gain,params0.offset,log10(g),log10(l0),params0.rmax,params0.exp);\n \n % Fit, gain and offset\n vlb = [x1(1) x1(2)/100 x1(3) -100*abs(x1(4)) x1(5)];\n vub = [x1(1) x1(2)*100 x1(3) 100*abs(x1(4)) x1(5)];\n x2 = fmincon(@InlineMinFunction,x1,[],[],[],[],vlb,vub,[],options);\n params0 = ListToParams(x2,params0);\n someDataPred2 = PredictNRAffineMatches(someDataRef,params0);\n g = params0.gainRef/params0.gain;\n l0 = -params0.offsetRef + params0.offset/g;\n fprintf(fid,'\\tGain/Offset model: gain = %0.2g, offset = %0.2g, log10 gain change = %0.2g, log10 effective offset = %0.2g, rmax = %0.5g, exp = %0.2g\\n',...\n params0.gain,params0.offset,log10(g),log10(l0),params0.rmax,params0.exp);\n paramsGainOffset = params0;\n %params0\n \n % Exp\n FITEXP = 1;\n if (FITEXP)\n vlb = [x2(1) x2(2)/100 x2(3) -100*abs(x2(4)) 0.5];\n vub = [x2(1) x2(2)*100 x2(3) 100*abs(x2(4)) 4];\n endPointWeight = 10;\n x3 = fmincon(@InlineMinFunction,x2,[],[],[],[],vlb,vub,[],options);\n endPointWeight = 0;\n x3 = fmincon(@InlineMinFunction,x3,[],[],[],[],vlb,vub,[],options);\n params0 = ListToParams(x3,params0);\n someDataPred3 = PredictNRAffineMatches(someDataRef,params0);\n else\n x3 = x2;\n someDataPred3 = PredictNRAffineMatches(someDataRef,params0);\n end\n fprintf(fid,'\\tGain/Offset/Exp model: gain = %0.2g, offset = %0.2g, log10 gain change = %0.2g, log10 effective offset = %0.2g, rmax = %0.5g, exp = %0.2g\\n',...\n params0.gain,params0.offset,log10(g),log10(l0),params0.rmax,params0.exp);\n \n % Let rMax vary too. This doesn't add much if exponent varies.. Tp the fits, so I\n % uncluttered plots by removing. Have not looked at whether varying\n % rMax can be substituted for varying the exponent.\n FITMAX = 0;\n if (FITMAX)\n vlb = [x3(1) x3(2)/100 0.5 -100*abs(x3(4)) x3(5)];\n vub = [x3(1) x3(2)*100 2 100*abs(x3(4)) x3(5)];\n x = fmincon(@InlineMinFunction,x3,[],[],[],[],vlb,vub,[],options);\n params0 = ListToParams(x,params0);\n someDataPred = PredictNRAffineMatches(someDataRef,params0);\n plot(log10(someDataPred3),log10(someDataRef),'k','LineWidth',1.5);\n %params0\n else\n x = x3;\n someDataPred = PredictNRAffineMatches(someDataRef,params0);\n end\n \n % Dump of interesting parameters\n g = params0.gainRef/params0.gain;\n l0 = -params0.offsetRef + params0.offset/g;\n fprintf(fid,'\\tPredicted (actual) black point %0.2g (%0.2g); white point %0.2g (%0.2g)\\n',someDataPred(1),someDataMatch(1),someDataPred(end),someDataMatch(end));\n fprintf(fid,'\\tOne-in predicted black point %0.2g (%0.2g); white point %0.2g (%0.2g)\\n',someDataPred(2),someDataMatch(2),someDataPred(end-1),someDataMatch(end-1));\n \n % Plot stuff of interest\n plot(log10(someDataPred),log10(someDataRef),'r','LineWidth',3);\n plot(log10(someDataPred2),log10(someDataRef),'g','LineWidth',1);\n xlim(xDataLim); ylim(yDataLim);\n\n % Add plot of response functions for ref and match\n % Subtract the old offset, and truncate below 0 to zero.\n % We allow an optional remapping of the response back to the\n % luminance/reflectance space of the reference matches. This\n % mapping is static across contexts. This turns out not to\n % terribly interesting.\n lumVals = logspace(-2,3,1000); \n ySub = params0.gainRef*(lumVals-params0.offsetRef);\n ySub(ySub <= 0) = 0+eps;\n respRefSmooth = ComputeNakaRushton([params0.rmaxRef 1 params0.expRef],ySub);\n if (RESPONSE_REMAP)\n respRefSmooth = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respRefSmooth)/params0.gainRef+params0.offsetRef);\n end\n \n ySub = params0.gainRef*(someDataRef-params0.offsetRef);\n ySub(ySub <= 0) = 0+eps;\n respRef = ComputeNakaRushton([params0.rmaxRef 1 params0.expRef],ySub);\n if (RESPONSE_REMAP)\n respRef = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respRef)/params0.gainRef+params0.offsetRef);\n end\n \n ySub = params0.gain*(lumVals-params0.offset);\n ySub(ySub <= 0) = 0+eps;\n respMatchSmooth = ComputeNakaRushton([params0.rmax 1 params0.exp],ySub);\n if (RESPONSE_REMAP)\n respMatchSmooth = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respMatchSmooth)/params0.gainRef+params0.offsetRef);\n end\n \n ySub = params0.gain*(someDataMatch-params0.offset);\n ySub(ySub <= 0) = 0+eps;\n respMatch = ComputeNakaRushton([params0.rmax 1 params0.exp],ySub);\n if (RESPONSE_REMAP)\n respMatch = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respMatch)/params0.gainRef+params0.offsetRef);\n end\n \n ySub = paramsGainOffset.gain*(lumVals-paramsGainOffset.offset);\n ySub(ySub <= 0) = 0+eps;\n respGainOffsetSmooth = ComputeNakaRushton([paramsGainOffset.rmax 1 paramsGainOffset.exp],ySub);\n if (RESPONSE_REMAP)\n respGainOffsetSmooth = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respGainOffsetSmooth)/params0.gainRef+params0.offsetRef);\n end\n \n ySub = paramsRefNoFit.gainRef*(lumVals-paramsRefNoFit.offsetRef);\n ySub(ySub <= 0) = 0+eps;\n respRefNoFitSmooth = ComputeNakaRushton([paramsRefNoFit.rmaxRef 1 paramsRefNoFit.expRef],ySub);\n if (RESPONSE_REMAP)\n respRefNoFitSmooth = log10(InvertNakaRushton([params0.rmaxRef 1 params0.expRef],respRefNoFitSmooth)/params0.gainRef+params0.offsetRef);\n end\n \n subplot(1,2,2); hold on\n plot(log10(someDataRef),respRef,'ko','MarkerFaceColor','k','MarkerSize',6);\n plot(log10(lumVals),respRefSmooth,'k:','LineWidth',2);\n %plot(log10(lumVals),respRefNoFitSmooth,'b','LineWidth',1);\n \n plot(log10(someDataMatch),respMatch,'bo','MarkerFaceColor','b','MarkerSize',8);\n plot(log10(lumVals),respMatchSmooth,'r','LineWidth',2);\n plot(log10(lumVals),respGainOffsetSmooth,'g','LineWidth',1);\n \n xlim(xDataLim);\n if (RESPONSE_REMAP)\n ylim(yDataLim);\n ylabel('Remapped Response');\n else\n ylim([0 1.2]);\n ylabel('Response');\n end\n xlabel('Log10 luminance');\n \n % Save figure\n cd(figPrefix);\n savefig(['TestFit_' num2str(whichData) '.pdf'],dataFig,'pdf');\n cd('..');\n \n fprintf(fid,'\\n');\n \n %% Fill output summary structure\n summaryStructs(whichData-1).whitePoint = someDataPred(end);\n summaryStructs(whichData-1).blackPoint = someDataPred(1);\n summaryStructs(whichData-1).range = someDataPred(end) - someDataPred(1);\n summaryStructs(whichData-1).exp = params0.exp;\n predictExpFromWB(whichData-1,1) = summaryStructs(whichData-1).whitePoint;\n predictExpFromWB(whichData-1,2) = log10(summaryStructs(whichData-1).range);\n expVals(whichData-1,1) = summaryStructs(whichData-1).exp;\n \n %% Range versus exp figure\n figure(rangeFig)\n subplot(1,2,1); hold on\n plot(summaryStructs(whichData-1).range,summaryStructs(whichData-1).exp,'ro','MarkerFaceColor','r','MarkerSize',8);\n xlabel('Range'); ylabel('Exponent');\n xlim([0 300]); ylim([0 4]);\n subplot(1,2,2); hold on\n plot(summaryStructs(whichData-1).whitePoint,summaryStructs(whichData-1).exp,'ro','MarkerFaceColor','r','MarkerSize',8);\n xlabel('White Point'); ylabel('Exponent');\n xlim([0 300]); ylim([0 4]);\nend\nfclose(fid);\n\n%% Try to predict exponents\nexpRegCoefs = predictExpFromWB\\expVals;\npredictedExpVals = predictExpFromWB*expRegCoefs;\nexpPredFig = figure; clf; hold on\nplot(expVals,predictedExpVals,'ro','MarkerSize',8,'MarkerFaceColor','r');\nplot([0 4],[0 4],'k');\nxlim([0 4]); ylim([0 4]);\nxlabel('Exponent'); ylabel('Predicted Exponent');\naxis('square');\n\n%% Write out summary structs\ncd(figPrefix);\nWriteStructsToText('SummaryData.txt',summaryStructs);\ncd('..');\n\n%% Save plot of exponent versus range\ncd(figPrefix);\nsavefig(['ExpVersusRange.pdf'],rangeFig,'pdf');\ncd('..');\n \n%% INLINE FUNCTION TO BE USED FOR CTF MINIMIZATION.\n% Inline functions have the feature that any variable they use that is\n% not defined in the function has its value inherited\n% from the workspace of wherever they were invoked.\n%\n% Variables set here are also in the base workspace, and can change the values of\n% variables with the same name there. This can produce all sorts of problems,\n% so be careful.\n function f = InlineMinFunction(x)\n paramsInline = ListToParams(x,params0);\n yPred = PredictNRAffineMatches(useDataRef,paramsInline);\n yPred(yPred <= 0) = 0 + eps;\n yDiff = log10(useDataMatch)-log10(yPred);\n f = sum(yDiff.^2) + endPointWeight*yDiff(1).^2 + endPointWeight*yDiff(end).^2;\n end\n\n%% INLINE FUNCTION TO BE USED FOR REF MINIMIZATION.\n% Inline functions have the feature that any variable they use that is\n% not defined in the function has its value inherited\n% from the workspace of wherever they were invoked.\n%\n% Variables set here are also in the base workspace, and can change the values of\n% variables with the same name there. This can produce all sorts of problems,\n% so be careful.\n function f = InlineMinFunctionRef(x)\n paramsInline = ListToParamsRef(x,params0);\n \n % Subtract the old offset, and truncate below 0 to zero\n ySub = paramsInline.gainRef*(useDataRef-paramsInline.offsetRef);\n ySub(ySub <= 0) = 0+eps;\n respRef = ComputeNakaRushton([paramsInline.rmaxRef 1 paramsInline.expRef],ySub);\n yDiff = targetRespRef-respRef;\n f = sum(abs(yDiff));\n %f = sum(yDiff.^2);\n end\n function [g,geq] = InlineConFunctionRef(x)\n paramsInline = ListToParamsRef(x,params0);\n \n % Subtract the old offset, and truncate below 0 to zero\n ySub = paramsInline.gainRef*(useDataRef-paramsInline.offsetRef);\n ySub(ySub <= 0) = 0+eps;\n respRef = ComputeNakaRushton([paramsInline.rmaxRef 1 paramsInline.expRef],ySub);\n yDiff = targetRespRef-respRef;\n g = max(abs(yDiff))-conTolRef;\n geq = 0;\n end\nend\n\n%% Param translation\nfunction params = ListToParams(x,params0)\n\nparams = params0;\nparams.gainRef = x(1);\nparams.gain = x(2);\nparams.rmax = x(3);\nparams.offset = x(4);\nparams.exp = x(5);\n\nend\n\nfunction x = ParamsToList(params)\n\nx = [params.gainRef params.gain params.rmax params.offset params.exp];\n\nend\n\nfunction params = ListToParamsRef(x,params0)\n\nparams = params0;\nparams.gainRef = x(1);\nparams.rmaxRef = x(2);\nparams.offsetRef = x(3);\nparams.expRef = x(4);\n\nend\n\nfunction x = ParamsToListRef(params)\n\nx = [params.gainRef params.rmaxRef params.offsetRef params.expRef];\n\nend\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "rayleighMatchPittDiagramTutorial.m", "ext": ".m", "path": "TeachingCode-master/MatlabTutorials/rayleighMatchPittDiagramTutorial/rayleighMatchPittDiagramTutorial.m", "size": 12692, "source_encoding": "utf_8", "md5": "8f45f8a84d49fb70384e5e670eaac090", "text": "% Illustrate how Rayleigh matches and Pitt diagram work\n%\n% Description:\n% Simulate Rayleigh match performance and plot in the form of what\n% I think is called a Pitt diagram. Illustrates the principles of\n% color vision testing by anomaloscope.\n%\n% The simulated anomaloscope allows adjustment of a monochromatic test\n% and the ratio of two monochromatic primaries in the match. The routine\n% computes the cone responses to the test and match and from these a\n% color difference. Matches are predicted for test intensity and mixing\n% ratio parameters where the color difference is below a criterion.\n%\n% The locus of matches is plotted in a Pitt diagram, where the x-axis is\n% the mixing ratio and the y-axis is the test intensity. The output\n% diagram reproduces the qualitative features of the one that came in the\n% manual for our anamoloscope.\n%\n% The color difference model is very simple and is briefly described in\n% the header comments for routine ComputeConfusions, which is at the\n% bottom of this file.\n%\n% You can play around with the modeled observers and the properties of\n% the simulated anomaloscope by adjusting parameters.\n\n% History\n% 07/03/19 dhb Wrote it.\n% 09/03/19 dhb, dce Modified to use Asano et al. individual difference\n% parameters, but in the end does the same thing.\n% However, and enterprising person can now examine\n% the effect of changing photopigment density.\n\n%% Clear\nclear; close all;\n\n%% Parameters\n%\n\n% Cone lambda max. Set two of them to be the same\n% to create a dichromat, etc.\nlambdaMaxes = [ ...\n [558.9 550 420.7]' ... % Deuteranomalous\n [538 530.3 420.7]' ... % Protanomalous\n [558.9 530.3 420.7]' ... % Normal trichromat\n [558.9 530.3 420.7]' ... % Normal trichromat\n [558.9 530.3 420.7]']; % Normal trichromat\n \n% We actually specify the cones as a shift relative to a \n% nomogram generated lambda max. These base values are given\n% here. If you make this match the above, then all shifts\n% end up as zero. But you can specify deviations, and control\n% what the shift is relative to.\nbaseLambdaMaxes = [ ...\n [558.9 550 420.7]' ... \n [538 530.3 420.7]' ... \n [558.9 530.3 420.7]' ... \n [558.9 530.3 420.7]' ... \n [558.9 530.3 420.7]']; \n\n% You can also allow the specified photopigment density to\n% vary. Enter these as percent changes relative to nominal\n% values. Can be positive or negative.\ndphotopigments = [ ...\n [0 0 0]' ... \n [0 0 0]' ... \n [0 0 0]' ... \n [-90 0 0]' ... \n [0 90 0]'];\n\ntheColors = [ 'r' 'g' 'k' 'b' 'y'];\ntheLegend = {'DA' 'PA' 'N' 'LDen' 'MDen' };\n\n% Convert specified lambda max values to shifts from the nominal CIE\n% standard values.\nnominalLambdaMax = [558.9 530.3 420.7];\nfor ii = 1:size(lambdaMaxes,2)\n indDiffParams(ii).dlens= 0;\n indDiffParams(ii).dmac = 0;\n indDiffParams(ii).dphotopigment = dphotopigments(:,ii)';\n indDiffParams(ii).lambdaMaxShift = lambdaMaxes(:,ii)' - baseLambdaMaxes(:,ii)';\n indDiffParams(ii).shiftType = 'linear';\nend\n\n% Threshold difference below which it is a match\n% Fussed with this by hand to adjust plot to taste.\nthresholdVal = 0.12;\n\n% Apparatus range parameters\n%\n% Mixing ratio of zero is all green primary, 1 is all red\n% (assuming shorter primary is first one specified in routine \n% below.)\ntestIntensityRange = 0.01:0.001:0.4;\nmixingRatioRange = 0.1:0.001:1;\n\n%% Loop to calculate matching locus for each set of cones\n%\n% For each set of specified cone pigment lambda max, this\n% computes the matching range and adds to the Pitt diagram\n% plot.\ntheFigure = figure; clf; hold on\nfor kk = 1:size(lambdaMaxes,2)\n \n %lambdaMax = lambdaMaxes(:,kk);\n\n \n % Function below does the work, based on lambdaMax.\n % Most operating parameters are set in the function itself.\n [testIntensity{kk},mixingRatio{kk},matchDiff{kk}] = ComputeConfusions(baseLambdaMaxes(:,kk),indDiffParams(kk),testIntensityRange,mixingRatioRange);\n \n % This plot will show the color difference as a function of mixing ratio\n % and test intensity, one plot per set of lambda max values. I found these \n % useful for development but not all that instructive in the end, so\n % they conditional and off by default.\n diffPlots = false;\n if (diffPlots)\n figure; clf; hold on\n mesh(mixingRatio{kk},testIntensity{kk},matchDiff{kk});\n colormap(winter)\n view([2 14]);\n zlim([0 2]);\n xlim([min(mixingRatioRange) max(mixingRatioRange)]);\n ylim([min(testIntensityRange) max(testIntensityRange)]);\n xlabel(' Mixing Ratio (0 -> green; 1 -> red)');\n ylabel('Test Intensity');\n zlabel('Color Difference');\n end\n \n figure(theFigure);\n index = find(matchDiff{kk} < thresholdVal);\n plot(mixingRatio{kk}(index),testIntensity{kk}(index),[theColors(kk) 'o'],'MarkerFaceColor',theColors(kk));\n \nend\n\n% Finish off the plot\nfigure(theFigure);\nxlim([min(mixingRatioRange) max(mixingRatioRange)]);\nylim([min(testIntensityRange) max(testIntensityRange)]);\nxlabel(' Mixing Ratio (0 -> green; 1 -> red)');\nylabel('Test Intensity');\naxis('square')\nlegend(theLegend);\ntitle('Pitt Diagram')\nFigureSave('pittDiagram.pdf',theFigure,'pdf');\n \n% Compute locus of confusions in intensity-ratio plot\n%\n% Syntax:\n% [testIntensity,mixingRatio,matchDiff] = ComputeConfusions(lambdaMax,indDiffParams,testIntensityRange,mixingRatioRange)\n%\n% Description:\n% Take lambdaMax values and generate receptor fundamentals. Then loop\n% over all test intensities and mixing ratio combinations and compute a\n% measure of color difference between test and corresponding match.\n%\n% Many key parameters are specified within this routine rather than\n% passed, because this is a tutorial script. These include primary\n% wavelengths, matching primary intensities, parameters describing color\n% difference calculation, etc.\n%\n% The color difference is computed based on vector length in an\n% post-receptoral contrast space, with different weights applied to the\n% different post-receptoral contrast directions. It is a very rough and\n% ready calculation, but this aspect is not key to demonstrate the\n% principles we are interested in here.\n%\n% Inputs:\n% lambdaMax Column vector of three receptor photopigment lambda\n% max (wavelength of peak sensitivity) values, in nm.\n% indDiffParams Passed to ComputeCIEConeFundamentals.\n% Ignored if empty. If you pass this\n% structure, then lambdaMax should be empty,\n% and vice-versa. That is, only adjust the\n% fundamentals using one of the two available\n% methods.\n% testIntensityRange Row vector of test intensities. Arbitrary\n% units. Values between 0 and 1 are about\n% right given the way the other parameters are\n% set.\n% mixingRatioRange Row vector of g/r mixing ratios. 0 means all\n% green primary, 1 means all red. Here green\n% and red are really defined by the\n% wavelengths of the two matching primaries\n% defined in the parameters for this routine.\n%\n% Outputs:\n% testIntensity Matrix where entry i,j is the test intensity\n% given by the ith intensity in testIntensityRange,\n% and j indexes the mixing ratios.\n% mixingRatio Matrix where entry i,j is the mixingRatio\n% given by the jth tentry of mixingRatioRange,\n% and i indexes the test intensities\n% matchDiff Matrix of color differences, where entry i,j\n% corresponds to the test intensity and mixing\n% ratio in entry i,j of matrices testIntensity\n% and mixingRatio.\n\n% History:\n% 07/04/19 dhb Made this its own routine.\n\nfunction [testIntensity,mixingRatio,matchDiff] = ComputeConfusions(lambdaMax,indDiffParams,testIntensityRange,mixingRatioRange)\n\n% Check\n% if (~isempty(indDiffParams) & ~isempty(lambdaMax))\n% error('Don''t risk using two different ways to adjust cone fundamentals.');\n% end\n\n% Observer parameters\nfieldSizeDegs = 2;\nobserverAge = 32;\npupilDiameterMM = 3;\n\n% Wavelength sampling. Life is easiest at 1 nm sampling.\nS = [380 1 401];\nwls = SToWls(S);\n\n% Apparatus parameters. These match the Nagel in wavelengths.\ntestWavelength = 589;\nmatchWavelength1 = 545;\nmatchWavelength2 = 670;\n\n% I fussed with these to rotate the D line to be horizontal in the plot.\n% In real life, they are parameters of the apparatus.\nmatchIntensity1 = 0.12;\nmatchIntensity2 = 2.5;\n\n% Compute indices so that we can set spectra below\ntestIndex = find(wls == testWavelength);\nmatchIndex1 = find(wls == matchWavelength1);\nmatchIndex2 = find(wls == matchWavelength2);\n\n% Color difference computation parameters.\n% I fussed with these to make the uncertainty\n% regions look a bit like those in our device's\n% diagram.\nLMRatio = 2;\nlumWeight = 4;\nrgWeight = 2;\nsWeight = 0.5;\n\n% Act like we have an added background that suppresses S cone signals.\n% Otherwise small S cone differences explode when we compute contrast,\n% because of small denominator.\naddedBackgroundCones = [0 0 1]';\n\n% Generate match spectra before application of mixing ratio\nmatchSpectrum1 = zeros(size(wls)); matchSpectrum1(matchIndex1) = matchIntensity1;\nmatchSpectrum2 = zeros(size(wls)); matchSpectrum2(matchIndex2) = matchIntensity2;\n\n% Generate the cones\n%\n% The weird looking call around the CompueCIEConeFundamentals has the net\n% effect of putting the cone fundamentals into energy units, and then we\n% normalize each to a peak of one.\n%\n% See ComputeCIEConeFundamentals for more info, and for other ways to shift\n% individual difference parameters.\nT_cones = EnergyToQuanta(S, ...\n ComputeCIEConeFundamentals(S,fieldSizeDegs,observerAge,pupilDiameterMM,lambdaMax, ...\n [],[],[],[],[],indDiffParams)')';\n\nfor ii = 1:size(T_cones,1)\n T_cones(ii,:) = T_cones(ii,:)/max(T_cones(ii,:));\nend\n\n% Make diagnostic plot of cone fundamentals?\nFUNDAMENTAL_PLOTS = false;\nfigure; clf; hold on;\nplot(SToWls(S),T_cones(1,:),'r','LineWidth',2);\nplot(SToWls(S),T_cones(2,:),'g','LineWidth',2);\nplot(SToWls(S),T_cones(3,:),'b','LineWidth',2);\nxlabel('Wavelength');\nylabel('Fundamental');\n\n% Compute cone respones to test and match\n%\n% We just do this for all possible test intensities and match ratios, as\n% specified in the parameters section.\n\n% Construct each test and compute cone responses\nfor ii = 1:length(testIntensityRange)\n testIntensity = testIntensityRange(ii);\n testSpectrum{ii} = zeros(size(wls)); testSpectrum{ii}(testIndex) = testIntensity;\n testCones{ii} = T_cones*testSpectrum{ii};\nend\n\n% Construct each match and compute cone responses\nfor jj = 1:length(mixingRatioRange)\n mixingRatio = mixingRatioRange(jj);\n matchSpectrum{jj} = (1-mixingRatio)*matchSpectrum1 + (mixingRatio)*matchSpectrum2;\n matchCones{jj} = T_cones*matchSpectrum{jj};\nend\n\n% Compute a measure of color difference for each test/match pairing\n%\n% We'll take the test as contributing to the adapting background and compute difference as\n% cone contrast with respect to that plus the added background as specfied\n% above.\nfor ii = 1:length(testIntensityRange)\n for jj = 1:length(mixingRatioRange)\n effectiveBackgroundCones{ii} = testCones{ii} + addedBackgroundCones;\n coneContrastDiff = (testCones{ii}-matchCones{jj})./effectiveBackgroundCones{ii};\n \n % Approximate three post-receptoral constrasts\n lumContrast(ii,jj) = (LMRatio*coneContrastDiff(1)+coneContrastDiff(2))/(LMRatio+1);\n rgContrast(ii,jj) = coneContrastDiff(1)-coneContrastDiff(2);\n sContrast(ii,jj) = coneContrastDiff(3);\n \n % Take weighted sum of squares. I'm making weights up on grounds\n % that rg is most sensitive, lum next, and s last. Very back of\n % envelope and may not be right for uniform fields.\n testIntensity(ii,jj) = testIntensityRange(ii);\n mixingRatio(ii,jj) = mixingRatioRange(jj);\n matchDiff(ii,jj) = sqrt((lumWeight*lumContrast(ii,jj))^2 + (rgWeight*rgContrast(ii,jj))^2 + (sWeight*sContrast(ii,jj))^2);\n end\nend\n\nend\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "rayleighMatchPittDiagramTutorialDensity.m", "ext": ".m", "path": "TeachingCode-master/MatlabTutorials/rayleighMatchPittDiagramTutorial/rayleighMatchPittDiagramTutorialDensity.m", "size": 12691, "source_encoding": "utf_8", "md5": "b6b9153e08cc556256ce82415e7128ba", "text": "% Illustrate how Rayleigh matches and Pitt diagram work\n%\n% Description:\n% Simulate Rayleigh match performance and plot in the form of what\n% I think is called a Pitt diagram. Illustrates the principles of\n% color vision testing by anomaloscope.\n%\n% The simulated anomaloscope allows adjustment of a monochromatic test\n% and the ratio of two monochromatic primaries in the match. The routine\n% computes the cone responses to the test and match and from these a\n% color difference. Matches are predicted for test intensity and mixing\n% ratio parameters where the color difference is below a criterion.\n%\n% The locus of matches is plotted in a Pitt diagram, where the x-axis is\n% the mixing ratio and the y-axis is the test intensity. The output\n% diagram reproduces the qualitative features of the one that came in the\n% manual for our anamoloscope.\n%\n% The color difference model is very simple and is briefly described in\n% the header comments for routine ComputeConfusions, which is at the\n% bottom of this file.\n%\n% You can play around with the modeled observers and the properties of\n% the simulated anomaloscope by adjusting parameters.\n\n% History\n% 07/03/19 dhb Wrote it.\n% 09/03/19 dhb, dce Modified to use Asano et al. individual difference\n% parameters, but in the end does the same thing.\n% However, and enterprising person can now examine\n% the effect of changing photopigment density.\n\n%% Clear\nclear; close all;\n\n%% Parameters\n%\n\n% Cone lambda max. Set two of them to be the same\n% to create a dichromat, etc.\nlambdaMaxes = [ ...\n [558.9 550 420.7]' ... % Deuteranomalous\n [538 530.3 420.7]' ... % Protanomalous\n [558.9 530.3 420.7]' ... % Normal trichromat\n [558.9 530.3 420.7]' ... % Normal trichromat\n [558.9 530.3 420.7]']; % Normal trichromat\n \n% We actually specify the cones as a shift relative to a \n% nomogram generated lambda max. These base values are given\n% here. If you make this match the above, then all shifts\n% end up as zero. But you can specify deviations, and control\n% what the shift is relative to.\nbaseLambdaMaxes = [ ...\n [558.9 550 420.7]' ... \n [538 530.3 420.7]' ... \n [558.9 530.3 420.7]' ... \n [558.9 530.3 420.7]' ... \n [558.9 530.3 420.7]']; \n\n% You can also allow the specified photopigment density to\n% vary. Enter these as percent changes relative to nominal\n% values. Can be positive or negative.\ndphotopigments = [ ...\n [0 0 0]' ... \n [0 0 0]' ... \n [0 0 0]' ... \n [-90 0 0]' ... \n [0 90 0]'];\n\ntheColors = [ 'r' 'g' 'k' 'b' 'y'];\ntheLegend = {'DA' 'PA' 'N' 'LDen' 'MDen' };\n\n% Convert specified lambda max values to shifts from the nominal CIE\n% standard values.\nnominalLambdaMax = [558.9 530.3 420.7];\nfor ii = 1:size(lambdaMaxes,2)\n indDiffParams(ii).dlens= 0;\n indDiffParams(ii).dmac = 0;\n indDiffParams(ii).dphotopigment = dphotopigments(:,ii)';\n indDiffParams(ii).lambdaMaxShift = lambdaMaxes(:,ii)' - baseLambdaMaxes(:,ii)';\n indDiffParams(ii).shiftType = 'linear';\nend\n\n% Threshold difference below which it is a match\n% Fussed with this by hand to adjust plot to taste.\nthresholdVal = 0.12;\n\n% Apparatus range parameters\n%\n% Mixing ratio of zero is all green primary, 1 is all red\n% (assuming shorter primary is first one specified in routine \n% below.)\ntestIntensityRange = 0.01:0.001:0.4;\nmixingRatioRange = 0.1:0.001:1;\n\n%% Loop to calculate matching locus for each set of cones\n%\n% For each set of specified cone pigment lambda max, this\n% computes the matching range and adds to the Pitt diagram\n% plot.\ntheFigure = figure; clf; hold on\nfor kk = 1:size(lambdaMaxes,2)\n \n %lambdaMax = lambdaMaxes(:,kk);\n\n \n % Function below does the work, based on lambdaMax.\n % Most operating parameters are set in the function itself.\n [testIntensity{kk},mixingRatio{kk},matchDiff{kk}] = ComputeConfusions(baseLambdaMaxes(:,kk),indDiffParams(kk),testIntensityRange,mixingRatioRange);\n \n % This plot will show the color difference as a function of mixing ratio\n % and test intensity, one plot per set of lambda max values. I found these \n % useful for development but not all that instructive in the end, so\n % they conditional and off by default.\n diffPlots = false;\n if (diffPlots)\n figure; clf; hold on\n mesh(mixingRatio{kk},testIntensity{kk},matchDiff{kk});\n colormap(winter)\n view([2 14]);\n zlim([0 2]);\n xlim([min(mixingRatioRange) max(mixingRatioRange)]);\n ylim([min(testIntensityRange) max(testIntensityRange)]);\n xlabel(' Mixing Ratio (0 -> green; 1 -> red)');\n ylabel('Test Intensity');\n zlabel('Color Difference');\n end\n \n figure(theFigure);\n index = find(matchDiff{kk} < thresholdVal);\n plot(mixingRatio{kk}(index),testIntensity{kk}(index),[theColors(kk) 'o'],'MarkerFaceColor',theColors(kk));\n \nend\n\n% Finish off the plot\nfigure(theFigure);\nxlim([min(mixingRatioRange) max(mixingRatioRange)]);\nylim([min(testIntensityRange) max(testIntensityRange)]);\nxlabel(' Mixing Ratio (0 -> green; 1 -> red)');\nylabel('Test Intensity');\naxis('square')\nlegend(theLegend);\ntitle('Pitt Diagram')\nFigureSave('pittDiagram.pdf',theFigure,'pdf');\n \n% Compute locus of confusions in intensity-ratio plot\n%\n% Syntax:\n% [testIntensity,mixingRatio,matchDiff] = ComputeConfusions(lambdaMax,indDiffParams,testIntensityRange,mixingRatioRange)\n%\n% Description:\n% Take lambdaMax values and generate receptor fundamentals. Then loop\n% over all test intensities and mixing ratio combinations and compute a\n% measure of color difference between test and corresponding match.\n%\n% Many key parameters are specified within this routine rather than\n% passed, because this is a tutorial script. These include primary\n% wavelengths, matching primary intensities, parameters describing color\n% difference calculation, etc.\n%\n% The color difference is computed based on vector length in an\n% post-receptoral contrast space, with different weights applied to the\n% different post-receptoral contrast directions. It is a very rough and\n% ready calculation, but this aspect is not key to demonstrate the\n% principles we are interested in here.\n%\n% Inputs:\n% lambdaMax Column vector of three receptor photopigment lambda\n% max (wavelength of peak sensitivity) values, in nm.\n% indDiffParams Passed to ComputeCIEConeFundamentals.\n% Ignored if empty. If you pass this\n% structure, then lambdaMax should be empty,\n% and vice-versa. That is, only adjust the\n% fundamentals using one of the two available\n% methods.\n% testIntensityRange Row vector of test intensities. Arbitrary\n% units. Values between 0 and 1 are about\n% right given the way the other parameters are\n% set.\n% mixingRatioRange Row vector of g/r mixing ratios. 0 means all\n% green primary, 1 means all red. Here green\n% and red are really defined by the\n% wavelengths of the two matching primaries\n% defined in the parameters for this routine.\n%\n% Outputs:\n% testIntensity Matrix where entry i,j is the test intensity\n% given by the ith intensity in testIntensityRange,\n% and j indexes the mixing ratios.\n% mixingRatio Matrix where entry i,j is the mixingRatio\n% given by the jth tentry of mixingRatioRange,\n% and i indexes the test intensities\n% matchDiff Matrix of color differences, where entry i,j\n% corresponds to the test intensity and mixing\n% ratio in entry i,j of matrices testIntensity\n% and mixingRatio.\n\n% History:\n% 07/04/19 dhb Made this its own routine.\n\nfunction [testIntensity,mixingRatio,matchDiff] = ComputeConfusions(lambdaMax,indDiffParams,testIntensityRange,mixingRatioRange)\n\n% Check\n% if (~isempty(indDiffParams) & ~isempty(lambdaMax))\n% error('Don''t risk using two different ways to adjust cone fundamentals.');\n% end\n\n% Observer parameters\nfieldSizeDegs = 2;\nobserverAge = 32;\npupilDiameterMM = 3;\n\n% Wavelength sampling. Life is easiest at 1 nm sampling.\nS = [380 1 401];\nwls = SToWls(S);\n\n% Apparatus parameters. These match the Nagel in wavelengths.\ntestWavelength = 589;\nmatchWavelength1 = 545;\nmatchWavelength2 = 670;\n\n% I fussed with these to rotate the D line to be horizontal in the plot.\n% In real life, they are parameters of the apparatus.\nmatchIntensity1 = 0.12;\nmatchIntensity2 = 2.5;\n\n% Compute indices so that we can set spectra below\ntestIndex = find(wls == testWavelength);\nmatchIndex1 = find(wls == matchWavelength1);\nmatchIndex2 = find(wls == matchWavelength2);\n\n% Color difference computation parameters.\n% I fussed with these to make the uncertainty\n% regions look a bit like those in our device's\n% diagram.\nLMRatio = 2;\nlumWeight = 4;\nrgWeight = 2;\nsWeight = 0.5;\n\n% Act like we have an added background that suppresses S cone signals.\n% Otherwise small S cone differences explode when we compute contrast,\n% because of small denominator.\naddedBackgroundCones = [0 0 1]';\n\n% Generate match spectra before application of mixing ratio\nmatchSpectrum1 = zeros(size(wls)); matchSpectrum1(matchIndex1) = matchIntensity1;\nmatchSpectrum2 = zeros(size(wls)); matchSpectrum2(matchIndex2) = matchIntensity2;\n\n% Generate the cones\n%\n% The weird looking call around the CompueCIEConeFundamentals has the net\n% effect of putting the cone fundamentals into energy units, and then we\n% normalize each to a peak of one.\n%\n% See ComputeCIEConeFundamentals for more info, and for other ways to shift\n% individual difference parameters.\nT_cones = EnergyToQuanta(S, ...\n ComputeCIEConeFundamentals(S,fieldSizeDegs,observerAge,pupilDiameterMM,lambdaMax, ...\n [],[],[],[],[],indDiffParams)')';\n\nfor ii = 1:size(T_cones,1)\n T_cones(ii,:) = T_cones(ii,:)/max(T_cones(ii,:));\nend\n\n% Make diagnostic plot of cone fundamentals?\nFUNDAMENTAL_PLOTS = true;\nfigure; clf; hold on;\nplot(SToWls(S),T_cones(1,:),'r','LineWidth',2);\nplot(SToWls(S),T_cones(2,:),'g','LineWidth',2);\nplot(SToWls(S),T_cones(3,:),'b','LineWidth',2);\nxlabel('Wavelength');\nylabel('Fundamental');\n\n% Compute cone respones to test and match\n%\n% We just do this for all possible test intensities and match ratios, as\n% specified in the parameters section.\n\n% Construct each test and compute cone responses\nfor ii = 1:length(testIntensityRange)\n testIntensity = testIntensityRange(ii);\n testSpectrum{ii} = zeros(size(wls)); testSpectrum{ii}(testIndex) = testIntensity;\n testCones{ii} = T_cones*testSpectrum{ii};\nend\n\n% Construct each match and compute cone responses\nfor jj = 1:length(mixingRatioRange)\n mixingRatio = mixingRatioRange(jj);\n matchSpectrum{jj} = (1-mixingRatio)*matchSpectrum1 + (mixingRatio)*matchSpectrum2;\n matchCones{jj} = T_cones*matchSpectrum{jj};\nend\n\n% Compute a measure of color difference for each test/match pairing\n%\n% We'll take the test as contributing to the adapting background and compute difference as\n% cone contrast with respect to that plus the added background as specfied\n% above.\nfor ii = 1:length(testIntensityRange)\n for jj = 1:length(mixingRatioRange)\n effectiveBackgroundCones{ii} = testCones{ii} + addedBackgroundCones;\n coneContrastDiff = (testCones{ii}-matchCones{jj})./effectiveBackgroundCones{ii};\n \n % Approximate three post-receptoral constrasts\n lumContrast(ii,jj) = (LMRatio*coneContrastDiff(1)+coneContrastDiff(2))/(LMRatio+1);\n rgContrast(ii,jj) = coneContrastDiff(1)-coneContrastDiff(2);\n sContrast(ii,jj) = coneContrastDiff(3);\n \n % Take weighted sum of squares. I'm making weights up on grounds\n % that rg is most sensitive, lum next, and s last. Very back of\n % envelope and may not be right for uniform fields.\n testIntensity(ii,jj) = testIntensityRange(ii);\n mixingRatio(ii,jj) = mixingRatioRange(jj);\n matchDiff(ii,jj) = sqrt((lumWeight*lumContrast(ii,jj))^2 + (rgWeight*rgContrast(ii,jj))^2 + (sWeight*sContrast(ii,jj))^2);\n end\nend\n\nend\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "exploreMemBiasTutorial.m", "ext": ".m", "path": "TeachingCode-master/MatlabTutorials/exploreMemBiasTutorial/exploreMemBiasTutorial.m", "size": 10221, "source_encoding": "utf_8", "md5": "53a869d8f12366bbce514f93cab2eb50", "text": "function exploreMemBiasTutorial\n% exploreMemBiasTutorial\n%\n% Work out predictions of a very simple memory model. The idea is to see\n% what the predictions are if we start with the ideas that\n% a) there is a non-linear transduction between the stimulus variable and perceptual response.\n% b) noise is added in the perceptual domain\n% c) noise can have different variance depending on delay.\n%\n% This is worked out for the circular stimulus variable hue, but nothing\n% much would change in the model for an intensive variable.\n%\n% 4/25/09 dhb Started on it.\n\n%% Clear\nclear; close all;\n\n%% Parameters\n% Specify precision as noise of a Gaussian variable. There is a separate\n% noise for the test stimulus and the comparison stimulus. This is\n% because we want to model both simultaneous presentation and delayed, and we'll\n% do this by mucking with the variances.\ntestSd = 0.06;\ncomparisonSd = 0.06;\nscaleBase = 2*max([testSd comparisonSd]);\ntestHueRawIndex = 300;\nnComparison = 100;\nnMatchSimulate = 10000;\nnPsychoSimulate = 1000;\nnFitSimulate = nPsychoSimulate;\nnStimulusHues = 600;\nrespType = 'naka';\nresponseOrder = 2;\nresponseCoeefs = 0.1*[0 1 1 0.4 0.25 0.5 0.2];\nfittype = 'c';\n\n%% Generate the non-linear response function. This is veridical plus a sum of some\n% sinusoids.\nstimulusHues = linspace(0,1,nStimulusHues);\nstimulusHues = [stimulusHues];\nswitch (respType)\n case 'fourier'\n responseFun = stimulusHues + ComputeFourierModel(responseCoeefs,stimulusHues);\n case 'naka'\n responseFun = (stimulusHues.^6)./(stimulusHues.^6 + 0.4.^6);\nend\n\n\nsumPlot = figure('WindowStyle','docked'); clf;\nsubplot(4,1,1); hold on\nplot(stimulusHues,responseFun,'r','LineWidth',2);\nxlim([0 1]); ylim([0 1]);\nxlabel('Hue','FontSize',16);\nylabel('Response','FontSize',16);\ntitle('Underlying Psychophysical Function','FontSize',16);\n\n%% Compute psychometric function through a specified test hue. Response will be 1 (aka \"yes\")\n% if subjects thinks comparison presentation is of higher perceptual hue, 0 (aka \"no\") otherwise.\ntestHueIndex = testHueRawIndex;\ntestHue = stimulusHues(testHueIndex);\nmeanResponseTest = responseFun(testHueIndex);\ncomparisonIndices = testHueIndex-nComparison:testHueIndex+nComparison;\ncomparisonHues = stimulusHues(comparisonIndices);\nfor i = 1:length(comparisonIndices)\n meanResponseComparison = responseFun(comparisonIndices(i));\n probYes(i) = SimulateProbYes(meanResponseTest,meanResponseComparison,testSd,comparisonSd,nPsychoSimulate); %#ok\nend\n\n% Fit simulated data\npfitdata = [comparisonHues', probYes', nFitSimulate*ones(size(probYes'))];\npfitstruct = pfit(pfitdata,'no plot','matrix_format','xyn', ...\n 'shape', fittype, 'n_intervals', 1, 'runs',0, 'sens',0, ...\n 'compute_stats', 0, 'cuts', [0.5], 'verbose', 0);\nprobYesFit = psigpsi(fittype, pfitstruct.params.est, comparisonHues');\npse = findthreshold(fittype,pfitstruct.params.est,0.5,'performance');\nthresh = findthreshold(fittype,pfitstruct.params.est,0.75,'performance') - ...\n findthreshold(fittype,pfitstruct.params.est,0.25,'performance');\n\n% Little plot\nfigure('WindowStyle','docked'); clf; hold on\nplot(comparisonHues,probYes,'ko','MarkerSize',2,'MarkerFaceColor','k');\nplot([testHue testHue],[0 0.5],'b');\nplot([comparisonHues(1) testHue],[0.5 0.5],'b');\nplot(comparisonHues,probYesFit,'r','LineWidth',2);\nplot([pse pse],[0 0.5],'g');\nxlabel('Comparison Hue','FontSize',16);\nylabel('Prob \"Yes\"','FontSize',16);\ntitle(sprintf('Psychometric function, test hue %g',testHue),'FontSize',16);\nxlim([comparisonHues(1) comparisonHues(end)])\nylim([0 1]);\n\n%% Find average match for each test hue\nnPrint = 25;\ntheMatchedStimuli = zeros(nMatchSimulate,nStimulusHues-2*nComparison-1);\nmatchProgPlot = figure('WindowStyle','docked');\nfor t = 1:nStimulusHues-2*nComparison-1\n testHueIndex = nComparison+t;\n testHuesMatch(t) = stimulusHues(testHueIndex);\n meanResponseTest = responseFun(testHueIndex);\n noiseDrawsTest = normrnd(0,testSd,nMatchSimulate,1);\n for i = 1:nMatchSimulate\n theResponse = meanResponseTest + noiseDrawsTest(i);\n noiseDrawsComparison = normrnd(0,comparisonSd,size(responseFun));\n comparisonResponses = responseFun + noiseDrawsComparison;\n [nil,index] = min(abs(comparisonResponses-theResponse));\n theMatchedStimuli(i,t) = stimulusHues(index(1));\n end\n meanMatch(t) = mean(theMatchedStimuli(:,t));\n medianMatch(t) = median(theMatchedStimuli(:,t));\n \n % Diagnostic plot if desired\n if (rem(t,nPrint) == 0)\n figure(matchProgPlot); clf; hold on\n [n,x] = hist(theMatchedStimuli(:,t),25);\n bar(x,n);\n plot([testHuesMatch(t) testHuesMatch(t)],[0 1.2*max(n)],'k','LineWidth',2);\n plot([meanMatch(t) meanMatch(t)],[0 1.2*max(n)],'r','LineWidth',2);\n %plot([medianMatch(t) medianMatch(t)],[0 1.2*max(n)],'g','LineWidth',2);\n xlabel('Matches','FontSize',16);\n ylabel('Count','FontSize',16);\n xlim([0 1]);\n ylim([0 1.2*max(n)]);\n title(sprintf('Match distribution, test hue %0.2g',testHuesMatch(t)),'FontSize',16);\n drawnow;\n saveas(matchProgPlot,sprintf('Matches_%g_%g_%0.2g.png',testSd,comparisonSd,testHuesMatch(t)),'png');\n fprintf('Computing mean match for test hue %d of %d\\n',t,nStimulusHues-2*nComparison-1);\n end \nend\n\n% Plot of simulated matches\nfigure(sumPlot);\nsubplot(4,1,3); hold on\nplot(testHuesMatch,meanMatch-testHuesMatch,'r','LineWidth',2);\n%plot(testHuesMatch,medianMatch-testHuesMatch,'b','LineWidth',2);\nxlim([0 1]);\nylim([-scaleBase scaleBase]);\nxlabel('Test Hue','FontSize',16);\nylabel('Match Bias','FontSize',16);\n\n%% Now compute out PSE as a function of test hue, as well as threshold\nnPrint = 25;\nprogPlot = figure('WindowStyle','docked');\nfor t = 1:nStimulusHues-2*nComparison-1\n testHueIndex = nComparison+t;\n testHues(t) = stimulusHues(testHueIndex);\n meanResponseTest = responseFun(testHueIndex);\n comparisonIndices = testHueIndex-nComparison:testHueIndex+nComparison;\n comparisonHues = stimulusHues(comparisonIndices);\n for i = 1:length(comparisonIndices)\n meanResponseComparison = responseFun(comparisonIndices(i));\n probYes(i) = SimulateProbYes(meanResponseTest,meanResponseComparison,testSd,comparisonSd,nPsychoSimulate); %#ok\n end\n \n % Fit\n pfitdata = [comparisonHues', probYes', nFitSimulate*ones(size(probYes'))];\n pfitstruct = pfit(pfitdata,'no plot','matrix_format','xyn', ...\n \t'shape', fittype, 'n_intervals', 1, 'runs',0, 'sens',0, ...\n \t'compute_stats', 0, 'cuts', [0.5], 'verbose', 0);\n probYesFit = psigpsi(fittype, pfitstruct.params.est, comparisonHues');\n pses(t) = findthreshold(fittype,pfitstruct.params.est,0.5,'performance');\n threshs(t) = findthreshold(fittype,pfitstruct.params.est,0.75,'performance') - ...\n findthreshold(fittype,pfitstruct.params.est,0.25,'performance');\n \n % Fit central part\n centralIndex = nComparison-10:nComparison+10;\n pfitdata = [comparisonHues(centralIndex)', probYes(centralIndex)', nFitSimulate*ones(size(centralIndex'))];\n pfitstruct1 = pfit(pfitdata,'no plot','matrix_format','xyn', ...\n \t'shape', fittype, 'n_intervals', 1, 'runs',0, 'sens',0, ...\n \t'compute_stats', 0, 'cuts', [0.5], 'verbose', 0);\n probYesFit1 = psigpsi(fittype, pfitstruct1.params.est, comparisonHues(centralIndex)');\n pses1(t) = findthreshold(fittype,pfitstruct1.params.est,0.5,'performance');\n\n % Diagnostic plot if desired\n if (rem(t,nPrint) == 0)\n figure(progPlot); clf; hold on\n plot(comparisonHues,probYes,'ko','MarkerSize',2,'MarkerFaceColor','k');\n plot(comparisonHues,probYesFit,'r','LineWidth',2);\n plot(comparisonHues(centralIndex),probYesFit1,'b','LineWidth',2);\n plot([testHues(t) testHues(t)],[0 1],'b');\n plot([pses(t) pses(t)],[0 1],'g');\n xlabel('Comparison Hue','FontSize',16);\n ylabel('Prob \"Yes\"','FontSize',16);\n title(sprintf('Psychometric function, test hue %0.2g',testHues(t)),'FontSize',16);\n xlim([comparisonHues(1) comparisonHues(end)])\n xlim([0 1]);\n ylim([0 1]);\n drawnow;\n saveas(progPlot,sprintf('Psycho_%g_%g_%0.2g.png',testSd,comparisonSd,testHues(t)),'png');\n fprintf('Computing PSE for test hue %d of %d\\n',t,nStimulusHues-2*nComparison-1);\n end \nend\n\n% Plot of simulated PSEs\nfigure(sumPlot);\nsubplot(4,1,2); hold on\nplot(testHues,1./threshs,'r','LineWidth',2);\nxlim([0 1]);\n%ylim([0 scaleBase]);\nxlabel('Test Hue','FontSize',16);\nylabel('Inverse Threshold','FontSize',16);\nsubplot(4,1,4); hold on\nplot(testHues,pses-testHues,'r','LineWidth',2);\nplot(testHues,pses1-testHues,'b','LineWidth',2);\nxlim([0 1]);\nylim([-scaleBase scaleBase]);\nxlabel('Test Hue','FontSize',16);\nylabel('PSE Bias','FontSize',16);\nsaveas(sumPlot,sprintf('Summary_%g_%g.png',testSd,comparisonSd),'png');\n\nend\n\n%% Subfunctions\n\nfunction probYes = SimulateProbYes(responseTest,responseComparison,testSd,comparisonSd,nSimulate)\n% probYes = SimulateProbYes(responseTest,responseComparison,testSd,comparisonSd,nSimulate)\n%\n% Simulate out the number of times that the comparison is judged as larger on the response variable\n% than the test. I'm sure there is an analytic solution, but it's a little tricky because we\n% allow different standard deviations for the test and comparison noise.\n%\n% 4/25/09 dhb Wrote it.\n\ndiffNoise = normrnd(0,comparisonSd,nSimulate,1)-normrnd(0,testSd,nSimulate,1);\nnYes = length(find(responseComparison-responseTest+diffNoise > 0));\nprobYes = nYes/nSimulate;\nend\n\nfunction ypred = ComputeFourierModel(coeffs,x)\n% ypred = ComputeFourierModel(coeffs,x)\n%\n% ypred = coeffs(1) + coeffs(2)*(sin(2*pi*x) + coeffs(3)*cos(2*pi*x) + coeffs(4)*sin(2*pi*2*x) + coeffs(5)*cos(2*pi*2*x) + ...\n%\n% The order of the equation is determined from the length of coeffs.\n% The input x is assumed to be in the range [0-1].\n%\n% 4/21/09 dhb Wrote it.\n\n% Modulation\na = coeffs(1);\nb = coeffs(2);\n\nmodulation = sin(2*pi*x) + coeffs(3)*cos(2*pi*x);\nfor i = 1:length(coeffs(4:end))/2\n modulation = modulation + coeffs(2*(i-1)+4)*sin(2*pi*(i+1)*x) + coeffs(2*(i-1)+5)*cos(2*pi*(i+1)*x);\nend\nypred = a + b*modulation;\n\nend\n\n\n\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "psychofitTutorialYN.m", "ext": ".m", "path": "TeachingCode-master/MatlabTutorials/psychofitTutorial/psychofitTutorialYN.m", "size": 6771, "source_encoding": "utf_8", "md5": "875ba9d234f3d08d50560f4097522ab0", "text": "function psychofitTutorialYN\n% psychofitTutorialYN\n%\n% Show basic use Palamedes toolboxe to simulate and\n% fit psychophysical data. This one for Y/N method of constant stimuli.\n%\n% You need both the psignifit and Palamedes toolboxes on your path, as well\n% as the Brainard lab staircase class and the Psychtoolbox.\n%\n% 4/30/09 dhb Wrote it.\n% 10/18/09 dhb Add some fits with Palamedes, just for grins\n% 10/19/09 dhb Added TAFC example as well as Y/N. Cleaned up and added comments.\n% 10/19/09 dhb Use staircase class for a TAFC staircase example.\n% 5/6/11 dhb Fix initial guess of slope for Palamedes. This was inverted, but worked by luck previously.\n% 10/31/12 dhb Fix what is printed out for Y/N staircase threshold.\n% dhb Y/N thresh defined as 75% point minus 50% point.\n% dhb Save figures, and a few more lines on the figs.\n% dhb Add option to simulate adapting bias.\n% 11/14/13 dhb Tune up a bunch of little things.\n% 10/21/14 dhb Added better comments for staircasing stuff.\n% 11/22/17 dhb Strip down to Y/N Palemedes and ver 1.8.2 of Palemedes.\n\n%% Clear\nclear; close all;\n\n%% Specify precision as noise of a Gaussian variable\n%\n% Simulated Y/N experiment is for test bigger or less than \n% comparison.\nnoiseSd = 0.06;\ntestStimulus = 100;\nnComparisonFit = 100;\nadaptingBias = 0;\nnComparison = 10;\nnSimulate = 40;\n\n%% Simulate Y/N psychometric function and fit. The cumulative normal is a pretty natural choice\n% for y/n psychometric data, and that's what's shown here.\n%\n% There are lots of variants to the fitting that could be used, in the sense that we could\n% allow for lapse rates, etc. But this form should work pretty well for most purposes. It's\n% always a good idea to plot the fit against the actual data and make sure it is reasonable.\n% For staircase data, this requires some binning (not demonstrated here.)\ncomparisonStimuli = linspace(testStimulus-4*noiseSd,testStimulus+4*noiseSd,nComparison);\ncomparisonStimuliFit = linspace(testStimulus-4*noiseSd,testStimulus+4*noiseSd,nComparisonFit);\nfor i = 1:nComparison\n nYes(i) = SimulateProbYes(testStimulus,comparisonStimuli(i),0,noiseSd,nSimulate,adaptingBias); %#ok\nend\n\n% PALAMEDES\n\n% Psychometric function form\nPF = @PAL_CumulativeNormal; % Alternatives: PAL_Gumbel, PAL_Weibull, PAL_CumulativeNormal, PAL_HyperbolicSecant\n\n% The first two parameters of the psychometric function define its position and shape.\n%\n% The third is the guess rate, which determines the value the function\n% takes on at low values of x. For a perfect subject this would be 0,\n% but there might be lapses (see below) for small x as well as high x.\n%\n% The fourth parameter is the lapse rate - the asymptotic performance at \n% high values of x. For a perfect subject, this would be 0, but sometimes\n% subjects have a \"lapse\" and get the answer wrong even when the stimulus\n% is easy to see. We can search over this, but shouldn't allow it to take\n% on unreasonable values. 0.05 as an upper limit isn't crazy.\n%\n% paramsFree is a boolean vector that determins what parameters get\n% searched over. 1: free parameter, 0: fixed parameter\nparamsFree = [1 1 1 1]; \n\n% Initial guess. Setting the first parameter to the middle of the stimulus\n% range and the second to 1 puts things into a reasonable ballpark here.\nparamsValues0 = [mean(comparisonStimuli') 1/((max(comparisonStimuli')-min(comparisonStimuli'))/4) 0 0];\n\n% This puts limits on the range of the lapse rate. And we pass an option\n% to the fit function that forces the guess and lapse rates to be equal,\n% which is reasonable for this case.\nlapseLimits = [0 0.05];\n\n% Set up standard options for Palamedes search\noptions = PAL_minimize('options');\n\n% Fit with Palemedes Toolbox. The parameter constraints match the psignifit parameters above. Some thinking is\n% required to initialize the parameters sensibly. We know that the mean of the cumulative normal should be \n% roughly within the range of the comparison stimuli, so we initialize this to the mean. The standard deviation\n% should be some moderate fraction of the range of the stimuli, so again this is used as the initializer.\n[paramsValues] = PAL_PFML_Fit(...\n comparisonStimuli',nYes',nSimulate*ones(size(nYes')), ...\n paramsValues0,paramsFree,PF,'searchOptions',options, ...\n 'lapseLimits',lapseLimits,'gammaEQlambda',true);\nprobYesFitPal = PF(paramsValues,comparisonStimuliFit');\npsePal = PF(paramsValues,0.5,'inverse');\nthreshPal = PF(paramsValues,0.75,'inverse')-psePal;\n\n% Plot of Y/N simulation. When the red and green overlap (which they do in all my tests), it\n% means that psignfit and Palamedes agree.\nfigure; clf; hold on\nplot(comparisonStimuli,nYes/nSimulate,'ko','MarkerSize',6,'MarkerFaceColor','k');\nplot([testStimulus testStimulus],[0 1],'b');\nplot(comparisonStimuliFit,probYesFitPal,'g','LineWidth',1);\nplot([psePal psePal],[0 1],'g','LineWidth',1);\nplot([psePal psePal+threshPal],[0.75 0.75],'g','LineWidth',1);\nxlabel('Comparison','FontSize',16);\nylabel('Prob \"Yes\"','FontSize',16);\ntitle(sprintf('Y/N psychometric function'),'FontSize',16);\nxlim([comparisonStimuli(1) comparisonStimuli(end)])\nylim([0 1]);\nif (exist('FigureSave','file'))\n FigureSave('PsychoYN',gcf,'pdf');\nelse\n saveas('gcf','PsychoYN','pdf');\nend\n\n% Printout of interesting parameters.\nfprintf('Y/N simulated data\\n');\nfprintf('Palamedes pse: %g, thresh: %g\\n',psePal,threshPal);\nfprintf('\\n');\n\nend\n\n%% Subfunctions for simulating observer\n\nfunction nYes = SimulateProbYes(responseTest,responseComparison,testSd,comparisonSd,nSimulate,adaptingBias)\n% probYes = SimulateProbYes(responseTest,responseComparison,testSd,comparisonSd,nSimulate,adaptingBias)\n%\n% Simulate out the number of times that the comparison is judged as larger on the response variable\n% than the test. I'm sure there is an analytic solution, but it's a little tricky because we\n% allow different standard deviations for the test and comparison noise.\n%\n% Assume experiment is based on comparison of noisy draws from underlying comparison and test \n% distributions. You can also think of responseTest as a criterion. Passing testSd = 0 makes\n% the criterion noise free, and other testSd may be thought of as criterial noise.\n%\n% The parameter adaptingBias is expressed in the same units as the internal response, and is subtracted\n% from the comparison response before the decision. It simulates an adaptive effect. Typically passed\n% as zero. It could also be regarded as a criterion that is shifted from the standard, if you are\n% thinking in TSD terms.\n%\n% 4/25/09 dhb Wrote it.\n\ndiffNoise = normrnd(0,comparisonSd,nSimulate,1)-normrnd(0,testSd,nSimulate,1);\nnYes = length(find(responseComparison-adaptingBias-responseTest+diffNoise > 0));\nend\n\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "psychofitTutorialTAFCStaircase.m", "ext": ".m", "path": "TeachingCode-master/MatlabTutorials/psychofitTutorial/psychofitTutorialTAFCStaircase.m", "size": 8302, "source_encoding": "utf_8", "md5": "493bbea8ee0a6593d8f99f7e9e661094", "text": "function psychofitTutorialTAFCStaircase\n% psychofitTutorialTAFCStaircase\n%\n% Show a staircase procedure and illustrate how to aggregate data and fit.\n%\n% You need the Palamedes toolboxe (1.8.2) and BrainardLabToolbox for this to work.\n\n% 10/30/17 dhb Separated out and updated.\n\n%% Clear\nclear; close all;\n\n%% Specify precision as noise of a Gaussian variable\n%\n% Simulation parameters\nnoiseSd = 0.06;\ntestStimulus = 100;\nnComparisonFit = 100;\nnComparison = 10;\nnSimulate = 40;\nnComparisonSds = 4;\nthresholdCriterionCorrect = 0.75;\nbaseStepSize = 0.10;\n\n%% Set up stimulus range\ncomparisonStimuli = linspace(testStimulus,testStimulus+nComparisonSds*noiseSd,nComparison);\ncomparisonStimuliFit = linspace(testStimulus,testStimulus+nComparisonSds*noiseSd,nComparisonFit);\n\n%% Staircase type. You can specify either 'quest' or 'standard'. \nstaircaseType = 'standard';\n\n%% Do a staircase for a TAFC experiment. Uses our Staircase class.\n% The code below runs either 1 or 3 interleaved staircases.\n% The use of 1 or 3 is hard-coded, with parameters for each\n% set in the switch statement below\nnInterleavedStaircases = 3;\nmaxDelta = max(comparisonStimuli)-testStimulus;\nminDelta = 0.01;\n\n% Initialize staircases. Initialization is slightly different for 'standard'\n% and 'quest' versions. All parameters other than 'MaxValue' and 'MinValue'\n% are required, and this is enforced by the class constructor function.\n%\n% The logic for TAFC staircases is similar to Y/N, but we want to set \n% ups/downs or criterionCorr to aim above 50%, whereas in Y/N we typically\n% aim at 50%.\nfor k = 1:nInterleavedStaircases\n % Set starting value for the staircase at a random level between\n % min and max.\n initialDelta = (maxDelta-minDelta)*3*rand(1)+minDelta;\n switch(staircaseType)\n case 'standard'\n stepSizes = [2*baseStepSize baseStepSize baseStepSize/4];\n switch (nInterleavedStaircases)\n case 1\n % Parameters for just one staircase\n numTrialsPerStaircase = 50;\n nUps = [2];\n nDowns = [1];\n case 3\n % Parameters for three interleaved\n % Can also make the up/down rule vary\n % across the staircases, to spread trials\n % a little more.\n numTrialsPerStaircase = 30; \n nUps = [2 2 2];\n nDowns = [1 1 1];\n otherwise\n error('Don''t know how to deal with specified number of staircases');\n end\n st{k} = Staircase(staircaseType,initialDelta, ...\n 'StepSizes', stepSizes, 'NUp', nUps(k), 'NDown', nDowns(k), ...\n 'MaxValue', maxDelta, 'MinValue', minDelta);\n otherwise\n error('Unknown staircase type specified');\n end\nend\n\n% Simulate interleaved staircases\nfor i = 1:numTrialsPerStaircase\n order = Shuffle(1:nInterleavedStaircases);\n for k = 1:nInterleavedStaircases\n comparisonDelta = getCurrentValue(st{order(k)});\n response = SimulateTAFC(testStimulus,testStimulus+comparisonDelta,noiseSd,noiseSd,1);\n st{order(k)} = updateForTrial(st{order(k)},comparisonDelta,response);\n end\nend\n\n% Analyze staircase data\nvaluesStair = []; responsesStair = [];\nfor k = 1:nInterleavedStaircases\n threshStair(k) = getThresholdEstimate(st{k});\n [valuesSingleStair{k},responsesSingleStair{k}] = getTrials(st{k});\n valuesStair = [valuesStair valuesSingleStair{k}];\n responsesStair = [responsesStair responsesSingleStair{k}];\nend\n[meanValues,nCorrectStair,nTrialsStair] = GetAggregatedStairTrials(valuesStair,responsesStair,10);\n\n% Palamedes fit\n%\n% Fit with Palemedes Toolbox. The parameter constraints match the psignifit parameters above. Again, some\n% thought is required to initialize reasonably. The threshold parameter is reasonably taken to be in the\n% range of the comparison stimuli, where here 0 means that the comparison is the same as the test. The \n% second parameter should be on the order of 1/2, so we just hard code that. As with Y/N, really want to \n% plot the fit against the data to make sure it is reasonable in practice.\n\n% Define what psychometric functional form to fit.\n%\n% Alternatives: PAL_Gumbel, PAL_Weibull, PAL_CumulativeNormal, PAL_HyperbolicSecant\nPF = @PAL_Weibull; \n\n% The first two parameters of the Weibull define its shape.\n%\n% The third is the guess rate, which determines the value the function\n% takes on at x = 0. For TAFC, this should be locked at 0.5.\n%\n% The fourth parameter is the lapse rate - the asymptotic performance at \n% high values of x. For a perfect subject, this would be 0, but sometimes\n% subjects have a \"lapse\" and get the answer wrong even when the stimulus\n% is easy to see. We can search over this, but shouldn't allow it to take\n% on unreasonable values. 0.05 as an upper limit isn't crazy.\n%\n% paramsFree is a boolean vector that determins what parameters get\n% searched over. 1: free parameter, 0: fixed parameter\nparamsFree = [1 1 0 1]; \n\n% Initial guess. Setting the first parameter to the middle of the stimulus\n% range and the second to 1 puts things into a reasonable ballpark here.\nparamsValues0 = [mean(comparisonStimuli'-testStimulus) 1 0.5 0.01];\n\n% This puts limits on the range of the lapse rate\nlapseLimits = [0 0.05];\n\n% Set up standard options for Palamedes search\noptions = PAL_minimize('options');\n\n% Do the search to get the parameters\n[paramsValues] = PAL_PFML_Fit(...\n valuesStair',responsesStair',ones(size(responsesStair')), ...\n paramsValues0,paramsFree,PF,'searchOptions',options,'lapseLimits',lapseLimits);\n\nprobCorrFitStair = PF(paramsValues,comparisonStimuliFit'-testStimulus);\nthreshPalStair = PF(paramsValues,thresholdCriterionCorrect,'inverse');\n\n% Figure\nstairFig = figure; clf;\ncolors = ['r' 'g' 'b' 'k' 'y' 'c'];\nsubplot(1,2,1); hold on\nfor k = 1:nInterleavedStaircases\n xvalues = 1:numTrialsPerStaircase;\n index = find(responsesSingleStair{k} == 0);\n plot(xvalues,valuesSingleStair{k},[colors(k) '-']);\n plot(xvalues,valuesSingleStair{k},[colors(k) 'o'],'MarkerFaceColor',colors(k),'MarkerSize',6);\n if (~isempty(index))\n plot(xvalues(index),valuesSingleStair{k}(index),[colors(k) 'o'],'MarkerFaceColor','w','MarkerSize',6);\n end\n plot(xvalues,threshStair(k)*ones(1,numTrialsPerStaircase),colors(k));\nend\nxlabel('Trial Number','FontSize',16);\nylabel('Level','FontSize',16);\ntitle(sprintf('TAFC staircase plot'),'FontSize',16);\n\nsubplot(1,2,2); hold on\nplot(meanValues,nCorrectStair./nTrialsStair,'ko','MarkerSize',6,'MarkerFaceColor','k');\nplot(comparisonStimuliFit-testStimulus,probCorrFitStair,'r','LineWidth',2);\nplot([threshPalStair threshPalStair],[0 thresholdCriterionCorrect],'r','LineWidth',2);\nxlabel('Delta Stimulus','FontSize',16);\nylabel('Prob Correct','FontSize',16);\ntitle(sprintf('TAFC staircase psychometric function'),'FontSize',16);\nxlim([comparisonStimuli(1)-testStimulus comparisonStimuli(end)-testStimulus])\nylim([0 1]);\nif (exist('FigureSave','file'))\n FigureSave('StaircaseFC',gcf','pdf');\nelse\n saveas(gcf','StaircaseFC','pdf');\nend\n\nfprintf('Staircase simulated data\\n');\nfor k = 1:nInterleavedStaircases\n fprintf('\\tTAFC staircase %d threshold estimate: %g\\n',k,threshStair(k));\nend\nfprintf('Palamedes''s threshold estimate from staircase data: %g\\n',threshPalStair);\n\nend\n\n\n%% Subfunctions for simulating observer\n\nfunction nCorrect = SimulateTAFC(responseTest,responseComparison,testSd,comparisonSd,nSimulate)\n% nCorrect = SimulateTAFC(responseTest,responseComparison,testSd,comparisonSd,nSimulate)\n%\n% Simulate out the number of times that a TAFC task is done correctly, with judgment greater\n% corresponding to greater noisy response. \n%\n% 4/25/09 dhb Wrote it.\n\nnCorrect = 0;\nfor i = 1:nSimulate\n responseTestNoise = responseTest+normrnd(0,testSd,1,1);\n responseComparisonNoise = responseComparison+normrnd(0,comparisonSd,1,1);\n \n if (responseComparison > responseTest & responseComparisonNoise > responseTestNoise)\n nCorrect = nCorrect+1;\n elseif (responseComparison <= responseTest & responseComparisonNoise <= responseTestNoise)\n nCorrect = nCorrect+1;\n end\nend\nend\n\n\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "psychofitTutorial2014.m", "ext": ".m", "path": "TeachingCode-master/MatlabTutorials/psychofitTutorial/psychofitTutorial2014.m", "size": 22190, "source_encoding": "utf_8", "md5": "0385fcfe145686c5be6fb0f57f0c1129", "text": "function psychofitTutorial\n% psychofitTutorial2014\n%\n% Show basic use of psignifit and Palamedes toolboxes to simulate and\n% fit psychophysical data. Has cases for Y/N and TAFC, and shows \n% both method of constant stimuli and staircase procedures.\n%\n% This is set up for our local version of psignifit, where the function psi has been\n% renamed psigpsi to avoid a name conflict with MATLAB's own psi function.\n%\n% You need both the psignifit and Palamedes toolboxes on your path, as well\n% as the Brainard lab staircase class and the Psychtoolbox.\n%\n% * [NOTE: DHB - This is a somewhat outdated version, as both psignifit and\n% Palamedes have changed since this was written. And, it does not use\n% mQUESTPlus. Starting to update parts in a new version today, \n\n% 4/30/09 dhb Wrote it.\n% 10/18/09 dhb Add some fits with Palamedes, just for grins\n% 10/19/09 dhb Added TAFC example as well as Y/N. Cleaned up and added comments.\n% 10/19/09 dhb Use staircase class for a TAFC staircase example.\n% 5/6/11 dhb Fix initial guess of slope for Palamedes. This was inverted, but worked by luck previously.\n% 10/31/12 dhb Fix what is printed out for Y/N staircase threshold.\n% dhb Y/N thresh defined as 75% point minus 50% point.\n% dhb Save figures, and a few more lines on the figs.\n% dhb Add option to simulate adapting bias.\n% 11/14/13 dhb Tune up a bunch of little things.\n% 10/21/14 dhb Added better comments for staircasing stuff.\n\n%% Clear\nclear; close all; clear classes;\n\n%% Specify precision as noise of a Gaussian variable\n%\n% Simulated Y/N experiment is for test bigger or less than \n% comparison.\nnoiseSd = 0.06;\ntestStimulus = 100;\nnComparisonFit = 100;\nadaptingBias = 0;\nnComparison = 10;\nnSimulate = 40;\n\n%% Staircase type. You can specify either 'quest' or 'standard'. \nstaircaseType = 'standard';\n\n%% Simulate Y/N psychometric function and fit. The cumulative normal is a pretty natural choice\n% for y/n psychometric data, and that's what's shown here.\n%\n% There are lots of variants to the fitting that could be used, in the sense that we could\n% allow for lapse rates, etc. But this form should work pretty well for most purposes. It's\n% always a good idea to plot the fit against the actual data and make sure it is reasonable.\n% For staircase data, this requires some binning (not demonstrated here.)\ncomparisonStimuli = linspace(testStimulus-4*noiseSd,testStimulus+4*noiseSd,nComparison);\ncomparisonStimuliFit = linspace(testStimulus-4*noiseSd,testStimulus+4*noiseSd,nComparisonFit);\nfor i = 1:nComparison\n nYes(i) = SimulateProbYes(testStimulus,comparisonStimuli(i),0,noiseSd,nSimulate,adaptingBias); %#ok\nend\n\n% PSIGNIFIT\n% Fit simulated data, psignifit. These parameters do a one interval (y/n) fit. Both lambda (lapse rate) and\n% gamma (value for -Inf input) are locked at 0.\nfittype = 'c';\npfitdata = [comparisonStimuli', nYes', nSimulate*ones(size(nYes'))];\npfitstruct = pfit(pfitdata,'no plot','matrix_format','xrn', ...\n 'shape', fittype, 'n_intervals', 1, 'runs', 0, 'sens', 0, ...\n 'compute_stats', 0, 'cuts', [0.5], 'verbose', 0, 'fix_lambda',0,'fix_gamma',0);\nprobYesFitPsig = psigpsi(fittype, pfitstruct.params.est, comparisonStimuliFit');\npsePsig = findthreshold(fittype,pfitstruct.params.est,0.5,'performance');\nthreshPsig = findthreshold(fittype,pfitstruct.params.est,0.75,'performance') - ...\n findthreshold(fittype,pfitstruct.params.est,0.5,'performance');\n\n% PALAMEDES\n% Fit with Palemedes Toolbox. The parameter constraints match the psignifit parameters above. Some thinking is\n% required to initialize the parameters sensibly. We know that the mean of the cumulative normal should be \n% roughly within the range of the comparison stimuli, so we initialize this to the mean. The standard deviation\n% should be some moderate fraction of the range of the stimuli, so again this is used as the initializer.\nPF = @PAL_CumulativeNormal; % Alternatives: PAL_Gumbel, PAL_Weibull, PAL_CumulativeNormal, PAL_HyperbolicSecant\nPFI = @PAL_inverseCumulativeNormal;\nparamsFree = [1 1 0 0]; % 1: free parameter, 0: fixed parameter\nparamsValues0 = [mean(comparisonStimuli') 1/((max(comparisonStimuli')-min(comparisonStimuli'))/4) 0 0];\noptions = optimset('fminsearch'); % Type help optimset\noptions.TolFun = 1e-09; % Increase required precision on LL\noptions.Display = 'off'; % Suppress fminsearch messages\nlapseLimits = [0 1]; % Limit range for lambda\n[paramsValues] = PAL_PFML_Fit(...\n comparisonStimuli',nYes',nSimulate*ones(size(nYes')), ...\n paramsValues0,paramsFree,PF,'searchOptions',options, ...\n 'lapseLimits',lapseLimits);\nprobYesFitPal = PF(paramsValues,comparisonStimuliFit');\npsePal = PFI(paramsValues,0.5);\nthreshPal = PFI(paramsValues,0.75)-PFI(paramsValues,0.5);\n\n% Plot of Y/N simulation. When the red and green overlap (which they do in all my tests), it\n% means that psignfit and Palamedes agree.\nfigure; clf; hold on\nplot(comparisonStimuli,nYes/nSimulate,'ko','MarkerSize',6,'MarkerFaceColor','k');\nplot([testStimulus testStimulus],[0 1],'b');\nplot(comparisonStimuliFit,probYesFitPsig,'r','LineWidth',2);\nplot(comparisonStimuliFit,probYesFitPal,'g','LineWidth',1);\nplot([psePsig psePsig],[0 1],'r','LineWidth',2);\nplot([psePal psePal],[0 1],'g','LineWidth',1);\nplot([psePsig psePsig+threshPsig],[0.75 0.75],'r','LineWidth',2);\nplot([psePal psePal+threshPal],[0.75 0.75],'g','LineWidth',1);\nxlabel('Comparison','FontSize',16);\nylabel('Prob \"Yes\"','FontSize',16);\ntitle(sprintf('Y/N psychometric function'),'FontSize',16);\nxlim([comparisonStimuli(1) comparisonStimuli(end)])\nylim([0 1]);\nif (exist('FigureSave','file'))\n FigureSave('PsychoYN',gcf,'pdf');\nelse\n saveas('gcf','PsychoYN','pdf');\nend\n\n% Printout of interesting parameters.\nfprintf('Y/N simulated data\\n');\nfprintf('Psignifit pse: %g, thresh: %g\\n',psePsig,threshPsig);\nfprintf('Palamedes pse: %g, thresh: %g\\n',psePal,threshPal);\nfprintf('\\n');\n\n%% Do a staircase for a Y/N experiment. Uses our Staircase class.\n% The code below runs three interleaved staircases.\n% For 'quest', three different criterion percent correct values are used.\n% For 'standard', three different up/down rules are used.\n% The use of 3 is hard-coded, in the sense that the vector lengths of the\n% criterion/up-down vectors must match this number.\n%\n% The variables maxDelta and minDelta below represent the range of trial values\n% that the staircase will range between.\nnumTrialsPerStaircase = 50;\nmaxDelta = max(comparisonStimuli)-testStimulus;\nminDelta = -maxDelta;\n\n% Initialize staircases. Initialization is slightly different for 'standard'\n% and 'quest' versions. All parameters other than 'MaxValue' and 'MinValue'\n% are required, and this is enforced by the class constructor function.\nnInterleavedStaircases = 3;\nfor k = 1:nInterleavedStaircases\n % Set starting value for the staircase at a random level between\n % min and max.\n initialDelta = (maxDelta-minDelta)*rand(1)+minDelta;\n \n switch(staircaseType)\n case 'standard'\n % The staircase starts at the largest step size and decreases with\n % each reversal. When it gets to the minimum value in the list, it\n % stays there.\n stepSizes = [maxDelta/2 maxDelta/4 maxDelta/8];\n \n % Set the up/dow rule for each staircase. N-Up, M-Down means (counterintuitively)\n % that it requires N positive responses to decrease the level and M negative responses\n % to decrease it. The choices shown here tend to spread the values around the 50-50\n % response point.\n nUps = [1 1 2];\n nDowns = [2 1 1];\n st{k} = Staircase(staircaseType,initialDelta, ...\n 'StepSizes', stepSizes, 'NUp', nUps(k), 'NDown', nDowns(k), ...\n 'MaxValue', maxDelta, 'MinValue', minDelta);\n\n case 'quest'\n criterionCorrs = [.4 .5 .6];\n st{k} = Staircase(staircaseType,initialDelta, ...\n 'Beta', 2, 'Delta', 0.01, 'PriorSD',1000, ...\n 'TargetThreshold', criterionCorrs(k), 'Gamma', 0, ...\n 'MaxValue', maxDelta, 'MinValue', minDelta);\n end\nend\n\n% Simulate interleaved staircases\nfor i = 1:numTrialsPerStaircase\n order = Shuffle(1:nInterleavedStaircases);\n for k = 1:nInterleavedStaircases\n comparisonDelta = getCurrentValue(st{order(k)});\n response = SimulateProbYes(testStimulus,testStimulus+comparisonDelta,0,noiseSd,1,adaptingBias);\n st{order(k)} = updateForTrial(st{order(k)},comparisonDelta,response);\n end\nend\n\n% Analyze staircase data\nvaluesStair = []; responsesStair = [];\nfor k = 1:nInterleavedStaircases\n pseStair(k) = getThresholdEstimate(st{k});\n [valuesSingleStair{k},responsesSingleStair{k}] = getTrials(st{k});\n valuesStair = [valuesStair valuesSingleStair{k}];\n responsesStair = [responsesStair responsesSingleStair{k}];\nend\n[meanValues,nCorrectStair,nTrialsStair] = GetAggregatedStairTrials(valuesStair,responsesStair,10);\n\n% Fit staircase data using Palamedes\nparamsValues0(1) = 0;\nparamsValues0(2) = 1/((max(valuesStair) - min(valuesStair))/4); \n[paramsValuesStair] = PAL_PFML_Fit(...\n valuesStair',responsesStair',ones(size(responsesStair')), ...\n paramsValues0,paramsFree,PF,'searchOptions',options, ...\n 'lapseLimits',lapseLimits);\nprobYesFitStair = PF(paramsValuesStair,comparisonStimuliFit'-testStimulus);\npsePalStair = PFI(paramsValuesStair,0.5);\nthreshPalStair = PFI(paramsValuesStair,0.75)-PFI(paramsValuesStair,0.5);\n\n% Figure\nstairFig = figure; clf;\ncolors = ['r' 'g' 'b' 'k' 'y' 'c'];\nsubplot(1,2,1); hold on\nfor k = 1:nInterleavedStaircases\n xvalues = 1:numTrialsPerStaircase;\n index = find(responsesSingleStair{k} == 0);\n plot(xvalues,valuesSingleStair{k},[colors(k) '-']);\n plot(xvalues,valuesSingleStair{k},[colors(k) 'o'],'MarkerFaceColor',colors(k),'MarkerSize',6);\n if (~isempty(index))\n plot(xvalues(index),valuesSingleStair{k}(index),[colors(k) 'o'],'MarkerFaceColor','w','MarkerSize',6);\n end\n plot(xvalues,pseStair(k)*ones(1,numTrialsPerStaircase),colors(k));\nend\nxlabel('Trial Number','FontSize',16);\nylabel('Level','FontSize',16);\ntitle(sprintf('Y/N staircase plot'),'FontSize',16);\n\nsubplot(1,2,2); hold on\nplot(meanValues,nCorrectStair./nTrialsStair,'ko','MarkerSize',6,'MarkerFaceColor','k');\nplot(comparisonStimuliFit-testStimulus,probYesFitStair,'r','LineWidth',2);\nplot([psePalStair psePalStair],[0 0.5],'r','LineWidth',2);\nplot([psePalStair psePalStair+threshPalStair],[0.75 0.75],'g','LineWidth',2);\nxlabel('Delta Stimulus','FontSize',16);\nylabel('Prob Yes','FontSize',16);\ntitle(sprintf('Y/N staircase psychometric function'),'FontSize',16);\nxlim([comparisonStimuli(1)-testStimulus comparisonStimuli(end)-testStimulus])\nylim([0 1]);\nif (exist('FigureSave','file'))\n FigureSave('StaircaseYN',gcf','pdf');\nelse\n saveas(gcf,'StaircaseYN','pdf');\nend\n\nfprintf('Staircase simulated data\\n');\nfor k = 1:nInterleavedStaircases\n fprintf('\\tY/N staircase %d threshold estimate: %g\\n',k,pseStair(k));\nend\nfprintf('Palamedes''s threshold estimate from staircase data: %g\\n',threshPalStair);\nfprintf('\\n');\n\n%% Simulate TAFC psychometric function and fit. Here the Weibull is a more natural functional\n% form, and we show its use for both toolboxes.\n%\n% Unlike Y/N, the most natural x axis for TAFC is the increment of the comparison relative to\n% the test, so that a 0 comparison corresponds to chance performance.\n%\n% As with Y/N simulation above, we don't allow for a lapse rate in this demo. \ncomparisonStimuli = linspace(testStimulus,testStimulus+6*noiseSd,nComparison);\ncomparisonStimuliFit = linspace(testStimulus,testStimulus+6*noiseSd,nComparisonFit);\nfor i = 1:nComparison\n nCorrect(i) = SimulateTAFC(testStimulus,comparisonStimuli(i),noiseSd,noiseSd,nSimulate,adaptingBias); %#ok\nend\n\n% PSIGNIFIT\n% Fit simulated data, psignifit. These parameters do a one interval (y/n) fit. Both lambda (lapse rate) and\n% gamma (value for -Inf input) are locked at 0.\ncriterionCorr = 0.82;\nfittype = 'w';\npfitdata = [comparisonStimuli'-testStimulus, nCorrect', nSimulate*ones(size(nCorrect'))];\npfitstruct = pfit(pfitdata,'no plot','matrix_format','xrn', ...\n 'shape', fittype, 'n_intervals', 2, 'runs', 0, 'sens', 0, ...\n 'compute_stats', 0, 'cuts', [0.5], 'verbose', 0, 'fix_lambda',0,'fix_gamma',0.5);\nprobCorrFitPsig = psigpsi(fittype, pfitstruct.params.est, comparisonStimuliFit'-testStimulus);\nthreshPsig = findthreshold(fittype,pfitstruct.params.est,criterionCorr,'performance');\n\n% PALAMEDES\n% Fit with Palemedes Toolbox. The parameter constraints match the psignifit parameters above. Again, some\n% thought is required to initialize reasonably. The threshold parameter is reasonably taken to be in the\n% range of the comparison stimuli, where here 0 means that the comparison is the same as the test. The \n% second parameter should be on the order of 1/2, so we just hard code that. As with Y/N, really want to \n% plot the fit against the data to make sure it is reasonable in practice.\nPF = @PAL_Weibull; % Alternatives: PAL_Gumbel, PAL_Weibull, PAL_CumulativeNormal, PAL_HyperbolicSecant\nPFI = @PAL_inverseWeibull;\nparamsFree = [1 1 0 0]; % 1: free parameter, 0: fixed parameter\nparamsValues0 = [mean(comparisonStimuli'-testStimulus) 1/2 0.5 0];\noptions = optimset('fminsearch'); % Type help optimset\noptions.TolFun = 1e-09; % Increase required precision on LL\noptions.Display = 'off'; % Suppress fminsearch messages\nlapseLimits = [0 1]; % Limit range for lambda\n[paramsValues] = PAL_PFML_Fit(...\n comparisonStimuli'-testStimulus,nCorrect',nSimulate*ones(size(nYes')), ...\n paramsValues0,paramsFree,PF,'searchOptions',options, ...\n 'lapseLimits',lapseLimits);\nprobCorrFitPal = PF(paramsValues,comparisonStimuliFit'-testStimulus);\nthreshPal = PFI(paramsValues,criterionCorr);\n\n% Plot of TAFC simulation. When the red and green overlap (which they do in all my tests), it\n% means that psignfit and Palamedes agree.\nfigure; clf; hold on\nplot(comparisonStimuli'-testStimulus,nCorrect/nSimulate,'ko','MarkerSize',6,'MarkerFaceColor','k');\nplot(comparisonStimuliFit-testStimulus,probCorrFitPsig,'r','LineWidth',2);\nplot(comparisonStimuliFit-testStimulus,probCorrFitPal,'g','LineWidth',1);\nplot([threshPsig threshPsig],[0 criterionCorr],'r','LineWidth',2);\nplot([threshPal threshPal],[0 criterionCorr],'g','LineWidth',1);\nxlabel('Delta Stimulus','FontSize',16);\nylabel('Prob Correct','FontSize',16);\ntitle(sprintf('TAFC psychometric function'),'FontSize',16);\nxlim([comparisonStimuli(1)-testStimulus comparisonStimuli(end)-testStimulus])\nylim([0 1]);\nif (exist('FigureSave','file'))\n\tFigureSave('PsychoFC',gcf,'pdf');\nelse\n saveas(gcf,'PsychFC','pdf');\nend\n\n% Printout\nfprintf('TAFC simulated data\\n');\nfprintf('Psignifit thresh: %g\\n',threshPsig);\nfprintf('Palamedes thresh: %g\\n',threshPal);\nfprintf('\\n');\n\n%% Do a staircase for a TAFC experiment. Uses our Staircase class.\n% The code below runs three interleaved staircases.\n% For 'quest', three different criterion percent correct values are used.\n% For 'standard', three different up/down rules are used.\n% The use of 3 is hard-coded, in the sense that the vector lengths of the\n% criterion/up-down vectors must match this number.\nnumTrialsPerStaircase = 30;\nmaxDelta = max(comparisonStimuli)-testStimulus;\nminDelta = 0.01;\n\n% Initialize staircases. Initialization is slightly different for 'standard'\n% and 'quest' versions. All parameters other than 'MaxValue' and 'MinValue'\n% are required, and this is enforced by the class constructor function.\n%\n% The logic for TAFC staircases is similar to Y/N, but we want to set \n% ups/downs or criterionCorr to aim above 50%, whereas in Y/N we typically\n% aim at 50%.\nnInterleavedStaircases = 3;\nfor k = 1:nInterleavedStaircases\n % Set starting value for the staircase at a random level between\n % min and max.\n initialDelta = (maxDelta-minDelta)*rand(1)+minDelta;\n switch(staircaseType)\n case 'standard'\n stepSizes = [2*threshPal threshPal threshPal/4];\n nUps = [3 2 3];\n nDowns = [1 1 2];\n st{k} = Staircase(staircaseType,initialDelta, ...\n 'StepSizes', stepSizes, 'NUp', nUps(k), 'NDown', nDowns(k), ...\n 'MaxValue', maxDelta, 'MinValue', minDelta);\n\n case 'quest'\n criterionCorrs = [criterionCorr-0.08 criterionCorr criterionCorr+0.08];\n st{k} = Staircase(staircaseType,initialDelta, ...\n 'Beta', 2, 'Delta', 0.01, 'PriorSD',1000, ...\n 'TargetThreshold', criterionCorrs(k),'Gamma', 0.5, ...\n 'MaxValue', maxDelta, 'MinValue', minDelta);\n end\nend\n\n% Simulate interleaved staircases\nfor i = 1:numTrialsPerStaircase\n order = Shuffle(1:nInterleavedStaircases);\n for k = 1:nInterleavedStaircases\n comparisonDelta = getCurrentValue(st{order(k)});\n response = SimulateTAFC(testStimulus,testStimulus+comparisonDelta,noiseSd,noiseSd,1,adaptingBias);\n st{order(k)} = updateForTrial(st{order(k)},comparisonDelta,response);\n end\nend\n\n% Analyze staircase data\nvaluesStair = []; responsesStair = [];\nfor k = 1:nInterleavedStaircases\n threshStair(k) = getThresholdEstimate(st{k});\n [valuesSingleStair{k},responsesSingleStair{k}] = getTrials(st{k});\n valuesStair = [valuesStair valuesSingleStair{k}];\n responsesStair = [responsesStair responsesSingleStair{k}];\nend\n[meanValues,nCorrectStair,nTrialsStair] = GetAggregatedStairTrials(valuesStair,responsesStair,10);\n\n% Fit staircase data using Palamedes\n[paramsValuesStair] = PAL_PFML_Fit(...\n valuesStair',responsesStair',ones(size(responsesStair')), ...\n paramsValues0,paramsFree,PF,'searchOptions',options, ...\n 'lapseLimits',lapseLimits);\nprobCorrFitStair = PF(paramsValuesStair,comparisonStimuliFit'-testStimulus);\nthreshPalStair = PFI(paramsValuesStair,criterionCorr);\n\n% Figure\nstairFig = figure; clf;\ncolors = ['r' 'g' 'b' 'k' 'y' 'c'];\nsubplot(1,2,1); hold on\nfor k = 1:nInterleavedStaircases\n xvalues = 1:numTrialsPerStaircase;\n index = find(responsesSingleStair{k} == 0);\n plot(xvalues,valuesSingleStair{k},[colors(k) '-']);\n plot(xvalues,valuesSingleStair{k},[colors(k) 'o'],'MarkerFaceColor',colors(k),'MarkerSize',6);\n if (~isempty(index))\n plot(xvalues(index),valuesSingleStair{k}(index),[colors(k) 'o'],'MarkerFaceColor','w','MarkerSize',6);\n end\n plot(xvalues,threshStair(k)*ones(1,numTrialsPerStaircase),colors(k));\nend\nxlabel('Trial Number','FontSize',16);\nylabel('Level','FontSize',16);\ntitle(sprintf('TAFC staircase plot'),'FontSize',16);\n\nsubplot(1,2,2); hold on\nplot(meanValues,nCorrectStair./nTrialsStair,'ko','MarkerSize',6,'MarkerFaceColor','k');\nplot(comparisonStimuliFit-testStimulus,probCorrFitStair,'r','LineWidth',2);\nplot([threshPalStair threshPalStair],[0 criterionCorr],'r','LineWidth',2);\nxlabel('Delta Stimulus','FontSize',16);\nylabel('Prob Correct','FontSize',16);\ntitle(sprintf('TAFC staircase psychometric function'),'FontSize',16);\nxlim([comparisonStimuli(1)-testStimulus comparisonStimuli(end)-testStimulus])\nylim([0 1]);\nif (exist('FigureSave','file'))\n FigureSave('StaircaseFC',gcf','pdf');\nelse\n saveas(gcf','StaircaseFC','pdf');\nend\n\nfprintf('Staircase simulated data\\n');\nfor k = 1:nInterleavedStaircases\n fprintf('\\tTAFC staircase %d threshold estimate: %g\\n',k,threshStair(k));\nend\nfprintf('Palamedes''s threshold estimate from staircase data: %g\\n',threshPalStair);\n\nend\n\n\n%% Subfunctions for simulating observer\n\nfunction nYes = SimulateProbYes(responseTest,responseComparison,testSd,comparisonSd,nSimulate,adaptingBias)\n% probYes = SimulateProbYes(responseTest,responseComparison,testSd,comparisonSd,nSimulate,adaptingBias)\n%\n% Simulate out the number of times that the comparison is judged as larger on the response variable\n% than the test. I'm sure there is an analytic solution, but it's a little tricky because we\n% allow different standard deviations for the test and comparison noise.\n%\n% Assume experiment is based on comparison of noisy draws from underlying comparison and test \n% distributions. You can also think of responseTest as a criterion. Passing testSd = 0 makes\n% the criterion noise free, and other testSd may be thought of as criterial noise.\n%\n% The parameter adaptingBias is expressed in the same units as the internal response, and is subtracted\n% from the comparison response before the decision. It simulates an adaptive effect. Typically passed\n% as zero. It could also be regarded as a criterion that is shifted from the standard, if you are\n% thinking in TSD terms.\n%\n% 4/25/09 dhb Wrote it.\n\ndiffNoise = normrnd(0,comparisonSd,nSimulate,1)-normrnd(0,testSd,nSimulate,1);\nnYes = length(find(responseComparison-adaptingBias-responseTest+diffNoise > 0));\nend\n\nfunction nCorrect = SimulateTAFC(responseTest,responseComparison,testSd,comparisonSd,nSimulate,adaptingBias)\n% probYes = SimulateProbYes(responseTest,responseComparison,testSd, comparisonSd,nSimulate,adaptingBias)\n%\n% Simulate out the number of times that a TAFC task is done correctly, with judgment greater\n% corresponding to greater noisy response. \n%\n% The parameter adaptingBias is expressed in the same units as the internal response, and is subtracted\n% from the comparison response before the decision. It simulates an adaptive effect. Typically passed\n% as zero. This can be a bit weird because the decision rule is coded on the assumption that the \n% comparison is always bigger than the test.\n%\n% 4/25/09 dhb Wrote it.\n\nnCorrect = 0;\nfor i = 1:nSimulate\n responseTestNoise = responseTest+normrnd(0,testSd,1,1);\n responseComparisonNoise = responseComparison+normrnd(0,comparisonSd,1,1)-adaptingBias;\n \n if (responseComparison > responseTest & responseComparisonNoise > responseTestNoise)\n nCorrect = nCorrect+1;\n elseif (responseComparison <= responseTest & responseComparisonNoise <= responseTestNoise)\n nCorrect = nCorrect+1;\n end\nend\nend\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "psychofitTutorialTAFC.m", "ext": ".m", "path": "TeachingCode-master/MatlabTutorials/psychofitTutorial/psychofitTutorialTAFC.m", "size": 5173, "source_encoding": "utf_8", "md5": "fa2b815076ca613d136e8e4c261f32ef", "text": "function psychofitTutorialTAFC\n% psychofitTutorialTAFC\n%\n% Show basic use of Palamedes toolboxes to simulate and\n% fit psychophysical data, TAFC, for method of constant stimuli.\n%\n% You need the Palamedes toolboxe (1.8.2) for this to work.\n\n% 04/30/09 dhb Broke out from 2014 version and updated.\n\n%% Clear\nclear; close all;\n\n%% Specify precision as noise of a Gaussian variable\n%\n% Simulation parameters\nnoiseSd = 0.06;\ntestStimulus = 100;\nnComparisonFit = 100;\nnComparison = 10;\nnSimulate = 40;\nnComparisonSds = 4;\nthresholdCriterionCorrect = 0.75;\n\n%% Simulate TAFC psychometric function and fit. Here the Weibull is a more natural functional\n% form, and we show its use for both toolboxes.\n%\n% The most natural x axis for TAFC is the increment of the comparison relative to\n% the test, so that a 0 comparison corresponds to chance performance.\ncomparisonStimuli = linspace(testStimulus,testStimulus+nComparisonSds*noiseSd,nComparison);\ncomparisonStimuliFit = linspace(testStimulus,testStimulus+nComparisonSds*noiseSd,nComparisonFit);\nfor i = 1:nComparison\n nCorrect(i) = SimulateTAFC(testStimulus,comparisonStimuli(i),noiseSd,noiseSd,nSimulate); %#ok\nend\n\n% Palamedes fit\n%\n% Fit with Palemedes Toolbox. The parameter constraints match the psignifit parameters above. Again, some\n% thought is required to initialize reasonably. The threshold parameter is reasonably taken to be in the\n% range of the comparison stimuli, where here 0 means that the comparison is the same as the test. The \n% second parameter should be on the order of 1/2, so we just hard code that. As with Y/N, really want to \n% plot the fit against the data to make sure it is reasonable in practice.\n\n% Define what psychometric functional form to fit.\n%\n% Alternatives: PAL_Gumbel, PAL_Weibull, PAL_CumulativeNormal, PAL_HyperbolicSecant\nPF = @PAL_Weibull; \n\n% The first two parameters of the Weibull define its shape.\n%\n% The third is the guess rate, which determines the value the function\n% takes on at x = 0. For TAFC, this should be locked at 0.5.\n%\n% The fourth parameter is the lapse rate - the asymptotic performance at \n% high values of x. For a perfect subject, this would be 0, but sometimes\n% subjects have a \"lapse\" and get the answer wrong even when the stimulus\n% is easy to see. We can search over this, but shouldn't allow it to take\n% on unreasonable values. 0.05 as an upper limit isn't crazy.\n%\n% paramsFree is a boolean vector that determins what parameters get\n% searched over. 1: free parameter, 0: fixed parameter\nparamsFree = [1 1 0 1]; \n\n% Initial guess. Setting the first parameter to the middle of the stimulus\n% range and the second to 1 puts things into a reasonable ballpark here.\nparamsValues0 = [mean(comparisonStimuli'-testStimulus) 1 0.5 0.01];\n\n% This puts limits on the range of the lapse rate\nlapseLimits = [0 0.05];\n\n% Set up standard options for Palamedes search\noptions = PAL_minimize('options');\n\n% Do the search to get the parameters\n[paramsValues] = PAL_PFML_Fit(...\n comparisonStimuli'-testStimulus,nCorrect',nSimulate*ones(size(nCorrect')), ...\n paramsValues0,paramsFree,PF,'searchOptions',options,'lapseLimits',lapseLimits);\n\n%% Make a smooth curve with the parameters\nprobCorrFitPal = PF(paramsValues,comparisonStimuliFit'-testStimulus);\n\n%% Invert psychometric function to find threshold\nthreshPal = PF(paramsValues,thresholdCriterionCorrect,'inverse');\n\n%% Plot of TAFC simulation\n%\n% The plot shows the simulated data, the fit, and the threshold from the\n% fit.\nfigure; clf; hold on\nplot(comparisonStimuli'-testStimulus,nCorrect/nSimulate,'ko','MarkerSize',6,'MarkerFaceColor','k');\nplot(comparisonStimuliFit-testStimulus,probCorrFitPal,'g','LineWidth',1);\nplot([threshPal threshPal],[0 thresholdCriterionCorrect],'g','LineWidth',1);\nxlabel('Delta Stimulus','FontSize',16);\nylabel('Prob Correct','FontSize',16);\ntitle(sprintf('TAFC psychometric function'),'FontSize',16);\nxlim([comparisonStimuli(1)-testStimulus comparisonStimuli(end)-testStimulus])\nylim([0 1.01]);\nif (exist('FigureSave','file'))\n\tFigureSave('PsychoTAFC',gcf,'pdf');\nelse\n saveas(gcf,'PsychTAFC','pdf');\nend\n\n% Printout\nfprintf('TAFC simulated data\\n');\nfprintf('Palamedes thresh: %g\\n',threshPal);\nfprintf('Parameters: %0.2g %0.2g %0.2g %0.2g\\n',paramsValues(1),paramsValues(2),paramsValues(3),paramsValues(4));\nfprintf('\\n');\n\nend\n\nfunction nCorrect = SimulateTAFC(responseTest,responseComparison,testSd,comparisonSd,nSimulate)\n% nCorrect = SimulateTAFC(responseTest,responseComparison,testSd,comparisonSd,nSimulate)\n%\n% Simulate out the number of times that a TAFC task is done correctly, with judgment greater\n% corresponding to greater noisy response. \n%\n% 4/25/09 dhb Wrote it.\n\nnCorrect = 0;\nfor i = 1:nSimulate\n responseTestNoise = responseTest+normrnd(0,testSd,1,1);\n responseComparisonNoise = responseComparison+normrnd(0,comparisonSd,1,1);\n \n if (responseComparison > responseTest & responseComparisonNoise > responseTestNoise)\n nCorrect = nCorrect+1;\n elseif (responseComparison <= responseTest & responseComparisonNoise <= responseTestNoise)\n nCorrect = nCorrect+1;\n end\nend\nend\n\n\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "poissonSetup.m", "ext": ".m", "path": "TeachingCode-master/MatlabTutorials/filteringAndNoise (Phil Nelson)/poissonSetup.m", "size": 716, "source_encoding": "utf_8", "md5": "26ec6970be1e3b049bf000cac6dbd324", "text": "%% pcn 9/07 poissonSetup.m\n% this function sets up the vector distrBins, which can then be used\n% to generate random integers in a Poisson distribution:\n% a. this function poissonSetup(Q) prepares the vector distrBins\n% b. to use it in your main routine, initialize with:\n% dist=poissonSetup(2)\n% (The argument selects the desired mean.) Then say:\n% [n,k]=histc(rand,dist)\n% which returns \n% k = a sample from the distribution, plus 1 \n% (and n = array with zeros except for bin #k)\n%\n%%\nfunction distrBins=poissonSetup(r);\ntopBin=max([10 round(10*r)]);\ntmpBins(1)=0;\nrunning=exp(-r);\ntmpBins(2)=running;\nfor m=1:(topBin-2);\n running=running*r/m;\n tmpBins(m+2)=running; end;\ndistrBins=cumsum(tmpBins);\nreturn;"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "MGL_MOGL_VertexArray.m", "ext": ".m", "path": "TeachingCode-master/MGLExamples/MGL_MOGL_VertexArray.m", "size": 8758, "source_encoding": "utf_8", "md5": "626bad60ec16d7eb3318b734aeb9e5e2", "text": "function MGL_MOGL_VertexArray\n% MGL_MOGL_VertexArray\n%\n% Description:\n% Shows how to create a simple shape with vertex arrays.\n\n% This setups up some OpenGL constants in the Matlab environment.\n% Essentially, anything in C OpenGL that starts with GL_ becomes GL.., e.g.\n% GL_RECT becomes GL.RECT. All GL_ are stored globally in the GL struct.\nglobal GL;\nInitializeMatlabOpenGL;\n\n% Setup some parameters we'll use.\nscreenDims = [50 30];\t\t% Width, height in centimeters of the display.\nbackgroundRGB = [0 0 0];\t% RGB of the background. All values are in the [0,1] range.\nscreenDist = 50;\t\t\t% The distance from the observer to the display.\ncubeSize = 10;\t\t\t\t% Size of one side of the cube.\n\n% This the half the distance between the observers 2 pupils. This value is\n% key in setting up the stereo perspective for the left and right eyes.\n% For a single screen setup, we'll use a value of 0 since we're not\n% actually in stereo.\nioOffset = 0;\n\n% Define the vertices of our cube.\nv = zeros(8, 3);\nv(1,:) = [-1 -1 1];\nv(2,:) = [1 -1 1];\nv(3,:) = [1 1 1];\nv(4,:) = [-1 1 1];\nv(5,:) = [-1 -1 -1];\nv(6,:) = [1 -1 -1];\nv(7,:) = [1 1 -1];\nv(8,:) = [-1 1 -1];\n\t\t\t \n% Now we define the vertex information for the vertex arrays we'll be\n% using. Essentially, we're defining the vertices for the OpenGL\n% primitives that will be used.\ncubeVerts = [v(1,:), v(2,:), v(3,:), v(4,:), ...\t% Front\n\t\t\t v(2,:), v(6,:), v(7,:), v(3,:), ...\t% Right\n\t\t\t v(6,:), v(5,:), v(8,:), v(7,:), ...\t% Back\n\t\t\t v(5,:), v(1,:), v(4,:), v(8,:), ...\t% Left\n\t\t\t v(4,:), v(3,:), v(7,:), v(8,:), ...\t% Top\n\t\t\t v(2,:), v(1,:), v(5,:), v(6,:)];\t\t% Bottom\n\t\t\t \n% Define the surface normals for the each vertex for every primitive. It\n% is possible to use shared vertices to reduce the number of specified\n% values, but it makes it trickier to define vertex normals. These need to\n% match the vertices defined above.\nn.front = [0 0 1];\nn.back = [0 0 -1];\nn.right = [1 0 0];\nn.left = [-1 0 0];\nn.up = [0 1 0];\nn.down = [0 -1 0];\ncubeNormals = [repmat(n.front, 1, 4), repmat(n.right, 1, 4), ...\n\t\t\t repmat(n.back, 1, 4), repmat(n.left, 1, 4), ...\n\t\t\t repmat(n.up, 1, 4), repmat(n.down, 1, 4)];\n\t\t \n% Define the vertex colors.\ncubeColors = repmat([1 0 0], 1, 24);\n\t\t\t \n\t\t\t \n% Now we define the indices of the vertices that we'll use to define the\n% cube. These are indices into the 'cubeVerts' array specified above.\n% Note that OpenGL uses 0 based indices unlike Matlab.\ncubeIndices = [0 1 2 3, ...\t\t\t% Front\n\t\t\t 4 5 6 7, ...\t\t\t% Right\n\t\t\t 8 9 10 11, ...\t\t% Back\n\t\t\t 12 13 14 15, ...\t\t% Left\n\t\t\t 16 17 18 19, ...\t\t% Top\n\t\t\t 20 21 22 23];\t\t% Bottom\n\n% Convert the indices to unsigned bytes for storage optimization.\ncubeIndices = uint8(cubeIndices);\n\ntry\n\tmglOpen;\n\t\n\t% We need to calculate a frustum to define our perspective matrix.\n\t% Using this data in combination with the glFrustum command, we can now\n\t% have a 3D rendering space instead of orthographic (2D).\n\tfrustum = calculateFrustum(screenDist, screenDims, ioOffset);\n\t\n\t% Setup what our background color will be. We only need to do this\n\t% once unless we want to change our background color in the middle of\n\t% the program.\n\tglClearColor(backgroundRGB(1), backgroundRGB(2), backgroundRGB(3), ...\n\t\t0); % This 4th value is the alpha value. We rarely care about it\n\t\t\t % for the background color.\n\t\t\t \n\t% Make sure we're testing for depth. Important if more than 1 thing is\n\t% on the screen and you don't want to deal with render order effects.\n\tglEnable(GL.DEPTH_TEST);\n\t\n\t% These help things rendered look nicer.\n\tglEnable(GL.BLEND);\n\tglEnable(GL.POLYGON_SMOOTH);\n\tglEnable(GL.LINE_SMOOTH);\n\tglEnable(GL.POINT_SMOOTH);\n\t\n\t% Turn on lighting.\n\tglLightfv(GL.LIGHT0, GL.AMBIENT, [0.5 0.5 0.5 1]);\n\tglLightfv(GL.LIGHT0, GL.DIFFUSE, [0.6 0.6 0.6 1]);\n\tglLightfv(GL.LIGHT0, GL.SPECULAR, [0.5 0.5 0.5 1]);\n\tglLightfv(GL.LIGHT0, GL.POSITION, [0 0 0 1]);\n\tglEnable(GL.LIGHTING);\n\tglEnable(GL.COLOR_MATERIAL);\n\tglEnable(GL.LIGHT0);\n\t\n\t% Turn on character listening. This function causes keyboard\n\t% characters to be gobbled up so they don't appear in any Matlab\n\t% window.\n\tmglEatKeys(1:50);\n\t\n\t% Clear the keyboard buffer.\n\tmglGetKeyEvent;\n\t\n\tkeepDrawing = true;\n\twhile keepDrawing\n\t\t% Look for a keyboard press.\n\t\tkey = mglGetKeyEvent;\n\t\t\n\t\t% If the nothing was pressed keeping drawing.\n\t\tif ~isempty(key)\n\t\t\t% We can react differently to each key press.\n\t\t\tswitch key.charCode\n\t\t\t\t\t\n\t\t\t\t% All other keys go here.\n\t\t\t\totherwise\n\t\t\t\t\tfprintf('Exiting...\\n');\n\t\t\t\t\t\n\t\t\t\t\t% Quit our drawing loop.\n\t\t\t\t\tkeepDrawing = false;\n\t\t\tend\n\t\tend\n\t\t\n\t\t% Setup the projection matrix. The projection matrix defines how\n\t\t% the OpenGL coordinate system maps onto the physical screen.\n\t\tglMatrixMode(GL.PROJECTION);\n\t\t\n\t\t% This gives us a clean slate to work with.\n\t\tglLoadIdentity;\t\t\n\t\t\n\t\t% Map our 3D rendering space to the display given a specific\n\t\t% distance from the screen to the subject and an interocular\n\t\t% offset. This is calculated at the beginning of the program.\n\t\tglFrustum(frustum.left, frustum.right, frustum.bottom, frustum.top, frustum.near, frustum.far);\n\t\t\n\t\t% Now we switch to the modelview mode, which is where we draw\n\t\t% stuff.\n\t\tglMatrixMode(GL.MODELVIEW);\n\t\tglLoadIdentity;\n\t\t\n\t\t% In 3D mode, we need to specify where the camera (the subject) is\n\t\t% in relation to the display. Essentially, for proper stereo, the\n\t\t% camera will be placed at the screen distance facing straight\n\t\t% ahead not at (0,0).\n\t\tgluLookAt(ioOffset, 0, screenDist, ... % Eye position\n\t\t\t\t ioOffset, 0, 0, ... % Fixation center\n\t\t\t\t 0, 1, 0);\t\t\t\t\t % Vector defining which way is up.\n\t\t\t \n\t\t% Clear our rendering space. If you don't do this rendered in the\n\t\t% buffer before will still be there. The scene is filled with the\n\t\t% background color specified above.\n\t\tglClear(mor(GL.COLOR_BUFFER_BIT, GL.DEPTH_BUFFER_BIT, GL.STENCIL_BUFFER_BIT, GL.ACCUM_BUFFER_BIT));\n\t\t\n\t\tglRotated(20, 1, 1, 1);\n\t\t\n\t\t% Set the size of the cube.\n\t\tglScaled(cubeSize/2, cubeSize/2, cubeSize/2);\n\t\t\t\t\n\t\t% Render the cube.\n\t\tglEnableClientState(GL.VERTEX_ARRAY);\n\t\tglEnableClientState(GL.NORMAL_ARRAY);\n\t\tglEnableClientState(GL.COLOR_ARRAY);\n\t\tglNormalPointer(GL.DOUBLE, 0, cubeNormals);\n\t\tglColorPointer(3, GL.DOUBLE, 0, cubeColors);\n\t\tglVertexPointer(3, GL.DOUBLE, 0, cubeVerts);\n\t\t\n\t\tglColorMaterial(GL.FRONT_AND_BACK, GL.AMBIENT_AND_DIFFUSE);\n\t\t\n\t\tglDrawElements(GL.QUADS, length(cubeIndices), GL.UNSIGNED_BYTE, cubeIndices);\n\t\tglDisableClientState(GL.VERTEX_ARRAY);\n\t\tglDisableClientState(GL.NORMAL_ARRAY);\n\t\tglDisableClientState(GL.COLOR_ARRAY);\n\t\t\n\t\t% This command sticks everything we just did onto the screen. It\n\t\t% syncs to the refresh rate of the display.\n\t\tmglFlush;\n\tend\n\t\n\t% Close the MGL window.\n\tmglClose;\n\t\n\t% Disable character listening.\n\tmglEatKeys([]);\ncatch e\n\t% Close the MGL window.\n\tmglClose;\n\t\n\t% Disable character listening.\n\tmglEatKeys([]);\n\t\n\t% Send the error to the Matlab command window.\n\trethrow(e);\nend\n\n\nfunction frustum = calculateFrustum(screenDistance, screenDims, horizontalOffset)\n% frustum = calculateFrustum(screenDistance, screenDims, horizontalOffset)\n%\n% Description:\n% Takes some basic screen information and calculates the frustum parameters\n% required to setup a 3D projection matrix.\n%\n% Input:\n% screenDistance (scalar) - Distance from the screen to the observer.\n% screenDims (1x2) - Dimensions of the screen. (width, height)\n% horizontal offset (scalar) - Horizontal shift of the observer from the\n% center of the display. Should be 0 for regular displays and half the\n% interocular distance for stereo setups.\n%\n% Output:\n% frust (struct) - Struct containing all calculated frustum parameters.\n% Contains the following fields.\n% 1. left - Left edge of the near clipping plane.\n%\t2. right - Right edge of the near clipping plane.\n%\t3. top - Top edge of the near clipping plane.\n%\t4. bottom - Bottom edge of the near clipping plane.\n%\t5. near - Distance from the observer to the near clipping plane.\n%\t6. far - Distance from the observer to the far clipping plane.\n\nif nargin ~= 3\n\terror('Usage: frustum = calculateFrustum(screenDistance, screenDims, horizontalOffset)');\nend\n\n% I chose these constants as reasonable values for the distances from the\n% camera for the type of experiments the Brainard lab does.\nfrustum.near = 1;\nfrustum.far = 100;\n\n% Use similar triangles to figure out the boundaries of the near clipping\n% plane based on the information about the screen size and its distance\n% from the camera.\nfrustum.right = (screenDims(1)/2 - horizontalOffset) * frustum.near / screenDistance;\nfrustum.left = -(screenDims(1)/2 + horizontalOffset) * frustum.near / screenDistance;\nfrustum.top = screenDims(2)/2 * frustum.near / screenDistance;\nfrustum.bottom = -frustum.top;\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "MGL_MOGL_NURBS.m", "ext": ".m", "path": "TeachingCode-master/MGLExamples/MGL_MOGL_NURBS.m", "size": 7889, "source_encoding": "utf_8", "md5": "6070c511717e4aeb6321fc2724c40d49", "text": "function MGL_MOGL_NURBS\n% MGL_MOGL_NURBS\n%\n% Description:\n% Opens a full screen MGL window with a black background, and renders a\n% NURBS surface.\n%\n% Keyboard Control:\n% 'q' - Exits the program.\n% 't', 'r' - Rotate the surface about the x-axis.\n\n% This setups up some OpenGL constants in the Matlab environment.\n% Essentially, anything in C OpenGL that starts with GL_ becomes GL.., e.g.\n% GL_RECT becomes GL.RECT. All GL. are stored globally in the GL struct.\nglobal GL;\nInitializeMatlabOpenGL;\n\n% Setup some parameters we'll use.\nscreenDims = [50 30];\t\t% Width, height in centimeters of the display.\nscreenDist = 50;\t\t\t% The distance from the observer to the display.\nbackgroundRGB = [0 0 0];\t% RGB of the background. All values are in the [0,1] range.\nrotationAmount = -80;\t\t% Degrees of rotation about the x-axis.\n\n% This the half the distance between the observers 2 pupils. This value is\n% key in setting up the stereo perspective for the left and right eyes.\n% For a single screen setup, we'll use a value of 0 since we're not\n% actually in stereo.\nioOffset = 0;\n\n% Define the NURBS surface.\nctlPoints = zeros(4,4,3);\ncPoints = zeros(1, 4*4*3);\ncIndex = 1;\nfor u = 1:4\n\tfor v = 1:4\n\t\tctlPoints(u,v,1) = 2.0*(u - 1.5);\n\t\tctlPoints(u,v,2) = 2.0*(v - 1.5);\n\t\t\n\t\tif ( (u == 2 || u == 3) && (v == 2 || v == 3))\n\t\t\tctlPoints(u,v,3) = 3.0;\n\t\telse\n\t\t\tctlPoints(u,v,3) = -3.0;\n\t\tend\n\t\t\n\t\t% Re-pack the control points data into an array that the glNurbs\n\t\t% functions below understand.\n\t\tcPoints(cIndex:(cIndex+2)) = ctlPoints(u,v,:);\n\t\tcIndex = cIndex + 3;\n\tend\nend\n\nknots = [0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0];\n\ntry\n\tmglOpen;\n\t\n\t% We need to calculate a frustum to define our perspective matrix.\n\t% Using this data in combination with the glFrustum command, we can now\n\t% have a 3D rendering space instead of orthographic (2D).\n\tfrustum = calculateFrustum(screenDist, screenDims, ioOffset);\n\t\n\t% Setup what our background color will be. We only need to do this\n\t% once unless we want to change our background color in the middle of\n\t% the program.\n\tglClearColor(backgroundRGB(1), backgroundRGB(2), backgroundRGB(3), ...\n\t\t0); % This 4th value is the alpha value. We rarely care about it\n\t\t\t % for the background color.\n\t\n\t% Make sure we're testing for depth. Important if more than 1 thing is\n\t% on the screen and you don't want to deal with render order effects.\n\tglEnable(GL.DEPTH_TEST);\n\t\n\t% These help things rendered look nicer.\n\tglEnable(GL.BLEND);\n\tglEnable(GL.POLYGON_SMOOTH);\n\tglEnable(GL.LINE_SMOOTH);\n\tglEnable(GL.POINT_SMOOTH);\n\t\n\t% Setup some lighting and material properties for the surface.\n\tglEnable(GL.LIGHTING);\n\tglEnable(GL.LIGHT0);\n\tmatDiffuse = [0.7, 0.7, 0.7, 1.0];\n\tmatSpecular = [1.0, 1.0, 1.0, 1.0];\n\tmatShininess = 100;\n\tglMaterialfv(GL.FRONT, GL.DIFFUSE, matDiffuse);\n\tglMaterialfv(GL.FRONT, GL.SPECULAR, matSpecular);\n\tglMaterialfv(GL.FRONT, GL.SHININESS, matShininess);\n\t\n\t% Have OpenGL do polygon normalization for us to make the surface look\n\t% smoother.\n\tglEnable(GL.AUTO_NORMAL);\n\tglEnable(GL.NORMALIZE);\n\t\n\t% Create the NURBS renderer.\n\ttheNurb = gluNewNurbsRenderer;\n\t\n\t% Turn on character listening. This function causes keyboard\n\t% characters to be gobbled up so they don't appear in any Matlab\n\t% window.\n\tmglEatKeys(1:50);\n\t\n\tkeepDrawing = true;\n\twhile keepDrawing\n\t\t% Look for a keyboard press.\n\t\tkey = mglGetKeyEvent;\n\t\t\n\t\t% If the nothing was pressed keeping drawing.\n\t\tif ~isempty(key)\n\t\t\t% We can react differently to each key press.\n\t\t\tswitch key.charCode\n\t\t\t\tcase 'r'\n\t\t\t\t\trotationAmount = rotationAmount + 10;\n\t\t\t\t\t\n\t\t\t\tcase 't'\n\t\t\t\t\trotationAmount = rotationAmount - 10;\n\t\t\t\t\t\n\t\t\t\t% All other keys go here.\n\t\t\t\tcase 'q'\n\t\t\t\t\tfprintf('Exiting...\\n');\n\t\t\t\t\t\n\t\t\t\t\t% Quit our drawing loop.\n\t\t\t\t\tkeepDrawing = false;\n\t\t\tend\n\t\tend\n\t\t\n\t\t% Setup the projection matrix. The projection matrix defines how\n\t\t% the OpenGL coordinate system maps onto the physical screen.\n\t\tglMatrixMode(GL.PROJECTION);\n\t\t\n\t\t% This gives us a clean slate to work with.\n\t\tglLoadIdentity;\t\t\n\t\t\n\t\t% Map our 3D rendering space to the display given a specific\n\t\t% distance from the screen to the subject and an interocular\n\t\t% offset. This is calculated at the beginning of the program.\n\t\tglFrustum(frustum.left, frustum.right, frustum.bottom, frustum.top, frustum.near, frustum.far);\n\t\t\n\t\t% Now we switch to the modelview mode, which is where we draw\n\t\t% stuff.\n\t\tglMatrixMode(GL.MODELVIEW);\n\t\tglLoadIdentity;\n\t\t\n\t\t% In 3D mode, we need to specify where the camera (the subject) is\n\t\t% in relation to the display. Essentially, for proper stereo, the\n\t\t% camera will be placed at the screen distance facing straight\n\t\t% ahead not at (0,0).\n\t\tgluLookAt(ioOffset, 0, screenDist, ... % Eye position\n\t\t\t\t ioOffset, 0, 0, ... % Fixation center\n\t\t\t\t 0, 1, 0);\t\t\t\t\t % Vector defining which way is up.\n\t\t\n\t\t% Clear our rendering space. If you don't do this rendered in the\n\t\t% buffer before will still be there. The scene is filled with the\n\t\t% background color specified above.\n\t\tglClear(mor(GL.COLOR_BUFFER_BIT, GL.DEPTH_BUFFER_BIT, GL.STENCIL_BUFFER_BIT, GL.ACCUM_BUFFER_BIT));\n\t\t\n\t\t% Rotate the surface.\n\t\tglRotated(rotationAmount, 1, 0, 0);\n\t\t\n\t\t% Move the surface to the center of the screen.\n\t\tglTranslated(-2, -2, 0);\n\t\t\n\t\t% Render the NURBS surface.\n\t\tgluBeginSurface(theNurb);\n\t\tgluNurbsSurface(theNurb, ...\n\t\t\t8, knots, 8, knots, ...\n\t\t\t4 * 3, 3, cPoints, ...\n\t\t\t4, 4, GL.MAP2_VERTEX_3);\n\t\tgluEndSurface(theNurb);\n\t\t\n\t\t% Show the control points.\n\t\tglPointSize(5.0);\n\t\tglDisable(GL.LIGHTING);\n\t\tglColor3f(1.0, 1.0, 0.0);\n\t\tglBegin(GL.POINTS);\n\t\tfor i = 1:4\n\t\t\tfor j = 1:4\n\t\t\t\tglVertex3fv(ctlPoints(i,j,:));\n\t\t\tend\n\t\tend\n\t\tglEnd;\n\t\tglEnable(GL.LIGHTING);\n\t\t\n\t\t% This command sticks everything we just did onto the screen. It\n\t\t% syncs to the refresh rate of the display.\n\t\tmglFlush;\n\tend\n\t\n\t% Close the MGL window.\n\tmglClose;\n\t\n\t% Disable character listening.\n\tmglEatKeys([]);\ncatch e\n\t% Close the MGL window.\n\tmglClose;\n\t\n\t% Disable character listening.\n\tmglEatKeys([]);\n\t\n\t% Send the error to the Matlab command window.\n\trethrow(e);\nend\n\n\nfunction frustum = calculateFrustum(screenDistance, screenDims, horizontalOffset)\n% frustum = calculateFrustum(screenDistance, screenDims, horizontalOffset)\n%\n% Description:\n% Takes some basic screen information and calculates the frustum parameters\n% required to setup a 3D projection matrix.\n%\n% Input:\n% screenDistance (scalar) - Distance from the screen to the observer.\n% screenDims (1x2) - Dimensions of the screen. (width, height)\n% horizontal offset (scalar) - Horizontal shift of the observer from the\n% center of the display. Should be 0 for regular displays and half the\n% interocular distance for stereo setups.\n%\n% Output:\n% frust (struct) - Struct containing all calculated frustum parameters.\n% Contains the following fields.\n% 1. left - Left edge of the near clipping plane.\n%\t2. right - Right edge of the near clipping plane.\n%\t3. top - Top edge of the near clipping plane.\n%\t4. bottom - Bottom edge of the near clipping plane.\n%\t5. near - Distance from the observer to the near clipping plane.\n%\t6. far - Distance from the observer to the far clipping plane.\n\nif nargin ~= 3\n\terror('Usage: frustum = calculateFrustum(screenDistance, screenDims, horizontalOffset)');\nend\n\n% I chose these constants as reasonable values for the distances from the\n% camera for the type of experiments the Brainard lab does.\nfrustum.near = 1;\nfrustum.far = 100;\n\n% Use similar triangles to figure out the boundaries of the near clipping\n% plane based on the information about the screen size and its distance\n% from the camera.\nfrustum.right = (screenDims(1)/2 - horizontalOffset) * frustum.near / screenDistance;\nfrustum.left = -(screenDims(1)/2 + horizontalOffset) * frustum.near / screenDistance;\nfrustum.top = screenDims(2)/2 * frustum.near / screenDistance;\nfrustum.bottom = -frustum.top;\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "MGL_MOGL_StereoWarping.m", "ext": ".m", "path": "TeachingCode-master/MGLExamples/MGL_MOGL_StereoWarping.m", "size": 9614, "source_encoding": "utf_8", "md5": "294340811667d6f6047cc2f76b701f14", "text": "function MGL_MOGL_StereoWarping\n\n% This setups up some OpenGL constants in the Matlab environment.\n% Essentially, anything in C OpenGL that starts with GL_ becomes GL.., e.g.\n% GL_RECT becomes GL.RECT. All GL_ are stored globally in the GL struct.\nglobal GL;\nInitializeMatlabOpenGL;\n\n% Setup some parameters we'll use.\nscreenDist = 76.4;\t\t\t% The distance from the observer to the display.\nbackgroundRGB = [0 0 0];\t% RGB of the background. All values are in the [0,1] range.\nrectDims = [2.54 2.54]*2;\t% Rectangle dimensions in centimeters.\nrectRGB = [1 0 0];\t\t\t% Color of the rectangle in RGB.\nrectPos = [0 0 0];\t\t\t% (x,y,z) position of the rectangle.\nrectInc = 1;\t\t\t\t% How much we'll move the rectangle for a given step.\n\n% The 2 screens we'll use have different IDs associated with them. We'll\n% set up some variables that make it easy to reference them. These IDs are\n% determined by the operating system, these are not arbitrary. Under \nscreenID.left = 3;\nscreenID.right = 2;\n\n% This the half the distance (cm) between the observers 2 pupils. This value is\n% key in setting up the stereo perspective for the left and right eyes.\nioOffset.left = -3;\nioOffset.right = 3;\n\n% Turn on character listening. This function causes keyboard\n% characters to be gobbled up so they don't appear in any Matlab\n% window.\nmglEatKeys(1:50);\n\n% Clear the keyboard buffer.\nmglGetKeyEvent;\n\ntry\n\t% Open both displays. We setup some of the OpenGL parameters for both\n\t% displays because OpenGL commands are only effective for the\n\t% currently active display as specified by mglSwitchDisplay.\n\tfor whichScreen = {'left', 'right'}\n\t\t% Pull out the string that's contained in the cell.\n\t\ti = whichScreen{1};\n\t\t\n\t\tmglSwitchDisplay(screenID.(i));\n\t\tmglOpen(screenID.(i));\n\t\t\n\t\t% Setup what our background color will be. We only need to do this\n\t\t% once unless we want to change our background color in the middle of\n\t\t% the program.\n\t\tglClearColor(backgroundRGB(1), backgroundRGB(2), backgroundRGB(3), ...\n\t\t\t0); % This 4th value is the alpha value. We rarely care about it\n\t\t\t\t % for the background color.\n\t\t\n\t\t% Make sure we're testing for depth. Important if more than 1 thing is\n\t\t% on the screen and you don't want to deal with render order effects.\n\t\tglEnable(GL.DEPTH_TEST);\n\t\t\n\t\t% These help things rendered look nicer.\n\t\tglEnable(GL.BLEND);\n\t\tglEnable(GL.POLYGON_SMOOTH);\n\t\tglEnable(GL.LINE_SMOOTH);\n\t\tglEnable(GL.POINT_SMOOTH);\n\t\t\n\t\t% Load in the calibration file for the display. We use this to\n\t\t% extract information needed for the framebuffer objet which\n\t\t% handles the warping, and some of the screen info.\n\t\tcalFileName = sprintf('StereoWarp-NoRadiance-%s', i);\n\t\tcal = LoadCalFile(calFileName);\n\t\t\n\t\t% Extract the screen dimensions.\n\t\tscreenDims.(i) = cal.warpParams.screenDims;\n\t\t\n\t\t% Create the framebuffer object.\n\t\tfbWidth = cal.warpParams.fbObject.width;\n\t\tfbHeight = cal.warpParams.fbObject.height;\n\t\tfbo.(i) = mglCreateFrameBufferObject(fbWidth, fbHeight);\n\t\t\n\t\t% Create the OpenGL display list we'll use to warp the vertices of\n\t\t% the framebuffer object onto the screen.\n\t\twarpList.(i) = mglCreateWarpList(cal.warpParams.actualGrid, [fbWidth fbHeight]);\n\t\t\n\t\t% Get the scene dimensions of the framebuffer object.\n\t\tfbSceneDims.(i) = cal.warpParams.fbSceneDims;\n\t\t\n\t\t% We need to calculate a frustum to define our perspective matrix.\n\t\t% Using this data in combination with the glFrustum command, we can now\n\t\t% have a 3D rendering space instead of orthographic (2D). The left and\n\t\t% right screen will have different frustums because the eye offset is\n\t\t% in different horizontal directions.\n\t\tfrustum.(i) = calculateFrustum(screenDist, fbSceneDims.(i), ioOffset.(i));\n\tend\n\t\n\tkeepDrawing = true;\n\twhile keepDrawing\n\t\t% Look for a keyboard press.\n\t\tkey = mglGetKeyEvent;\n\t\t\n\t\t% If the nothing was pressed keeping drawing.\n\t\tif ~isempty(key)\n\t\t\t% We can react differently to each key press.\n\t\t\tswitch key.charCode\n\t\t\t\tcase 'r'\n\t\t\t\t\trectRGB = rand(1,3);\n\t\t\t\t\t\n\t\t\t\t% Move the rectangle closer to the subject.\n\t\t\t\tcase 'j'\n\t\t\t\t\trectPos(3) = rectPos(3) + rectInc;\n\t\t\t\t\t\n\t\t\t\t% Move the rectangle further from the subject.\n\t\t\t\tcase 'k'\n\t\t\t\t\trectPos(3) = rectPos(3) - rectInc;\n\t\t\t\t\t\n\t\t\t\t% Move the rectangle left.\n\t\t\t\tcase 'a'\n\t\t\t\t\trectPos(1) = rectPos(1) - rectInc;\n\t\t\t\t\t\n\t\t\t\t% Move the rectangle right.\n\t\t\t\tcase 'd'\n\t\t\t\t\trectPos(1) = rectPos(1) + rectInc;\n\t\t\t\t\t\n\t\t\t\t% Move the rectangle up.\n\t\t\t\tcase 'w'\n\t\t\t\t\trectPos(2) = rectPos(2) + rectInc;\n\t\t\t\t\t\n\t\t\t\t% Move the rectangle down.\n\t\t\t\tcase 's'\n\t\t\t\t\trectPos(2) = rectPos(2) - rectInc;\n\t\t\t\t\t\n\t\t\t\t% All other keys go here.\n\t\t\t\totherwise\n\t\t\t\t\tfprintf('Exiting...\\n');\n\t\t\t\t\t\n\t\t\t\t\t% Quit our drawing loop.\n\t\t\t\t\tkeepDrawing = false;\n\t\t\tend\n\t\tend\n\t\t\n\t\t% We need to do the rendering for both displays.\n\t\tfor whichScreen = {'left', 'right'}\n\t\t\ti = whichScreen{1};\n\t\t\t\n\t\t\t% Direct all OpenGL commands to the appropriate screen.\n\t\t\tmglSwitchDisplay(screenID.(i));\n\t\t\t\n\t\t\t% Setup the framebuffer object so that we can render into it.\n\t\t\tmglBindFrameBufferObject(fbo.(i));\n\t\t\t\n\t\t\t% Setup the projection matrix. The projection matrix defines how\n\t\t\t% the OpenGL coordinate system maps onto the physical screen.\n\t\t\tglMatrixMode(GL.PROJECTION);\n\t\t\t\n\t\t\t% This gives us a clean slate to work with.\n\t\t\tglLoadIdentity;\n\t\t\t\n\t\t\t% Map our 3D rendering space to the display given a specific\n\t\t\t% distance from the screen to the subject and an interocular\n\t\t\t% offset. This is calculated at the beginning of the program.\n\t\t\tglFrustum(frustum.(i).left, frustum.(i).right, frustum.(i).bottom, ...\n\t\t\t\tfrustum.(i).top, frustum.(i).near, frustum.(i).far);\n\t\t\t\n\t\t\t% Now we switch to the modelview mode, which is where we draw\n\t\t\t% stuff.\n\t\t\tglMatrixMode(GL.MODELVIEW);\n\t\t\tglLoadIdentity;\n\t\t\t\n\t\t\t% In 3D mode, we need to specify where the camera (the subject) is\n\t\t\t% in relation to the display. Essentially, for proper stereo, the\n\t\t\t% camera will be placed at the screen distance facing straight\n\t\t\t% ahead not at (0,0).\n\t\t\tgluLookAt(ioOffset.(i), 0, screenDist, ... % Eye position\n\t\t\t\tioOffset.(i), 0, 0, ...\t\t\t\t % Fixation center\n\t\t\t\t0, 1, 0);\t\t\t\t\t\t\t % Vector defining which way is up.\n\t\t\t\n\t\t\t% Clear our rendering space. If you don't do this rendered in the\n\t\t\t% buffer before will still be there. The scene is filled with the\n\t\t\t% background color specified above.\n\t\t\tglClear(mor(GL.COLOR_BUFFER_BIT, GL.DEPTH_BUFFER_BIT, GL.STENCIL_BUFFER_BIT, GL.ACCUM_BUFFER_BIT));\n\t\t\t\n\t\t\t% Set the rectangle's color.\n\t\t\tglColor3dv(rectRGB);\n\t\t\t\n\t\t\t% This will center the rectangle on the screen. We call this prior\n\t\t\t% to specifying rectangle because all vertices are multiplied\n\t\t\t% against the current transformation matrix. In other words, the\n\t\t\t% order of operations happens in the opposite order they're written\n\t\t\t% in the code.\n\t\t\tglTranslated(-rectDims(1)/2 + rectPos(1), -rectDims(2)/2 + rectPos(2), rectPos(3));\n\t\t\t\n\t\t\t% Draw the rectangle.\n\t\t\tglBegin(GL.QUADS);\n\t\t\tglVertex2d(0, 0);\t\t\t\t\t\t% Lower left corner\n\t\t\tglVertex2d(rectDims(1), 0);\t\t\t\t% Lower right corner\n\t\t\tglVertex2d(rectDims(1), rectDims(2));\t% Upper right corner\n\t\t\tglVertex2d(0, rectDims(2));\t\t\t\t% Upper left corner.\n\t\t\tglEnd;\n\t\t\t\n\t\t\t% Stop rendering to the framebuffer object.\n\t\t\tmglUnbindFrameBufferObject;\n\t\t\t\n\t\t\t% Draw the framebuffer object to the screen.\n\t\t\tmglRenderWarpedFrameBufferObject(fbo.(i).texture, warpList.(i), screenDims.(i));\n\t\t\t\n\t\t\t% This command sticks everything we just did onto the screen. It\n\t\t\t% syncs to the refresh rate of the display.\n\t\t\tmglFlush;\n\t\tend\n\tend\n\t\n\t% Passing -1 to mglSwitchDisplay is a special option which closes any\n\t% MGL windows open on the screen.\n\tmglSwitchDisplay(-1);\ncatch e\n\t% Close any open MGL windows.\n\tmglSwitchDisplay(-1);\n\t\n\t% Disable character listening.\n\tmglEatKeys([]);\n\t\n\t% Send the error to the Matlab command window.\n\trethrow(e);\nend\n\n\nfunction frustum = calculateFrustum(screenDistance, screenDims, horizontalOffset)\n% frustum = calculateFrustum(screenDistance, screenDims, horizontalOffset)\n%\n% Description:\n% Takes some basic screen information and calculates the frustum parameters\n% required to setup a 3D projection matrix.\n%\n% Input:\n% screenDistance (scalar) - Distance from the screen to the observer.\n% screenDims (1x2) - Dimensions of the screen. (width, height)\n% horizontal offset (scalar) - Horizontal shift of the observer from the\n% center of the display. Should be 0 for regular displays and half the\n% interocular distance for stereo setups.\n%\n% Output:\n% frust (struct) - Struct containing all calculated frustum parameters.\n% Contains the following fields.\n% 1. left - Left edge of the near clipping plane.\n%\t2. right - Right edge of the near clipping plane.\n%\t3. top - Top edge of the near clipping plane.\n%\t4. bottom - Bottom edge of the near clipping plane.\n%\t5. near - Distance from the observer to the near clipping plane.\n%\t6. far - Distance from the observer to the far clipping plane.\n\nif nargin ~= 3\n\terror('Usage: frustum = calculateFrustum(screenDistance, screenDims, horizontalOffset)');\nend\n\n% I chose these constants as reasonable values for the distances from the\n% camera for the type of experiments the Brainard lab does.\nfrustum.near = 1;\nfrustum.far = 100;\n\n% Use similar triangles to figure out the boundaries of the near clipping\n% plane based on the information about the screen size and its distance\n% from the camera.\nfrustum.right = (screenDims(1)/2 - horizontalOffset) * frustum.near / screenDistance;\nfrustum.left = -(screenDims(1)/2 + horizontalOffset) * frustum.near / screenDistance;\nfrustum.top = screenDims(2)/2 * frustum.near / screenDistance;\nfrustum.bottom = -frustum.top;\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "MGL_MOGL_Surface.m", "ext": ".m", "path": "TeachingCode-master/MGLExamples/MGL_MOGL_Surface.m", "size": 9259, "source_encoding": "utf_8", "md5": "6d2177251935845d4826380586c28c74", "text": "function MGL_MOGL_Surface\n% MGL_MOGL_Surface\n%\n% Description:\n% Shows how to display an arbitrary surface/mesh.\n\n% This setups up some OpenGL constants in the Matlab environment.\n% Essentially, anything in C OpenGL that starts with GL_ becomes GL.., e.g.\n% GL_RECT becomes GL.RECT. All GL_ are stored globally in the GL struct.\nglobal GL;\nInitializeMatlabOpenGL;\n\n% Setup some parameters we'll use.\nscreenDims = [50 30];\t\t% Width, height in centimeters of the display.\nbackgroundRGB = [0 0 0];\t% RGB of the background. All values are in the [0,1] range.\nscreenDist = 50;\t\t\t% The distance from the observer to the display.\nrotationAmount = -80;\t\t% Amount of rotation to apply to the surface.\n\n% This the half the distance between the observers 2 pupils. This value is\n% key in setting up the stereo perspective for the left and right eyes.\n% For a single screen setup, we'll use a value of 0 since we're not\n% actually in stereo.\nioOffset = 0;\n\n% Create the (x,y,z) coordinates of a mesh.\n[meshData.x, meshData.y] = meshgrid(-8:0.1:8);\nr = sqrt(meshData.x .^ 2 + meshData.y .^ 2) + eps;\nmeshData.z = 5*sin(r)./r;\n\n% Now create the surface normals.\n[meshData.nx meshData.ny meshData.nz] = surfnorm(meshData.x, meshData.y, meshData.z);\n\ntry\n\tmglOpen;\n\t\n\t% We need to calculate a frustum to define our perspective matrix.\n\t% Using this data in combination with the glFrustum command, we can now\n\t% have a 3D rendering space instead of orthographic (2D).\n\tfrustum = calculateFrustum(screenDist, screenDims, ioOffset);\n\t\n\t% Setup what our background color will be. We only need to do this\n\t% once unless we want to change our background color in the middle of\n\t% the program.\n\tglClearColor(backgroundRGB(1), backgroundRGB(2), backgroundRGB(3), ...\n\t\t0); % This 4th value is the alpha value. We rarely care about it\n\t\t\t % for the background color.\n\t\t\t \n\t% Make sure we're testing for depth. Important if more than 1 thing is\n\t% on the screen and you don't want to deal with render order effects.\n\tglEnable(GL.DEPTH_TEST);\n\t\n\t% These help things rendered look nicer.\n\tglEnable(GL.BLEND);\n\tglEnable(GL.POLYGON_SMOOTH);\n\tglEnable(GL.LINE_SMOOTH);\n\tglEnable(GL.POINT_SMOOTH);\n\tglShadeModel(GL.SMOOTH);\n\t\n\t% Turn on lighting.\n\tglLightfv(GL.LIGHT0, GL.AMBIENT, [0.5 0.5 0.5 1]);\n\tglLightfv(GL.LIGHT0, GL.DIFFUSE, [0.6 0.6 0.6 1]);\n\tglLightfv(GL.LIGHT0, GL.SPECULAR, [0.5 0.5 0.5 1]);\n\tglLightfv(GL.LIGHT0, GL.POSITION, [20 10 0 0]);\n\tglEnable(GL.LIGHTING);\n\tglEnable(GL.COLOR_MATERIAL);\n\tglEnable(GL.LIGHT0);\n\n\t% Create our OpenGL display list. A display list is basically a group\n\t% of OpenGL commands that can be pre-computed and displayed later.\n\t% There is a reduction in overhead so the display lists improve\n\t% performance for complex renderings.\n\tdisplayList = createDisplayList(meshData);\n\t\n\t% Turn on character listening. This function causes keyboard\n\t% characters to be gobbled up so they don't appear in any Matlab\n\t% window.\n\tmglEatKeys(1:50);\n\t\n\t% Clear the keyboard buffer.\n\tmglGetKeyEvent;\n\t\n\tkeepDrawing = true;\n\twhile keepDrawing\n\t\t% Look for a keyboard press.\n\t\tkey = mglGetKeyEvent;\n\t\t\n\t\t% If the nothing was pressed keeping drawing.\n\t\tif ~isempty(key)\n\t\t\t% We can react differently to each key press.\n\t\t\tswitch key.charCode\n\t\t\t\tcase 'r'\n\t\t\t\t\trotationAmount = rotationAmount + 20;\n\t\t\t\t\t\n\t\t\t\t% All other keys go here.\n\t\t\t\totherwise\n\t\t\t\t\tfprintf('Exiting...\\n');\n\t\t\t\t\t\n\t\t\t\t\t% Quit our drawing loop.\n\t\t\t\t\tkeepDrawing = false;\n\t\t\tend\n\t\tend\n\t\t\n\t\t% Setup the projection matrix. The projection matrix defines how\n\t\t% the OpenGL coordinate system maps onto the physical screen.\n\t\tglMatrixMode(GL.PROJECTION);\n\t\t\n\t\t% This gives us a clean slate to work with.\n\t\tglLoadIdentity;\t\t\n\t\t\n\t\t% Map our 3D rendering space to the display given a specific\n\t\t% distance from the screen to the subject and an interocular\n\t\t% offset. This is calculated at the beginning of the program.\n\t\tglFrustum(frustum.left, frustum.right, frustum.bottom, frustum.top, frustum.near, frustum.far);\n\t\t\n\t\t% Now we switch to the modelview mode, which is where we draw\n\t\t% stuff.\n\t\tglMatrixMode(GL.MODELVIEW);\n\t\tglLoadIdentity;\n\t\t\n\t\t% In 3D mode, we need to specify where the camera (the subject) is\n\t\t% in relation to the display. Essentially, for proper stereo, the\n\t\t% camera will be placed at the screen distance facing straight\n\t\t% ahead not at (0,0).\n\t\tgluLookAt(ioOffset, 0, screenDist, ... % Eye position\n\t\t\t\t ioOffset, 0, 0, ... % Fixation center\n\t\t\t\t 0, 1, 0);\t\t\t\t\t % Vector defining which way is up.\n\t\t\t \n\t\t% Clear our rendering space. If you don't do this rendered in the\n\t\t% buffer before will still be there. The scene is filled with the\n\t\t% background color specified above.\n\t\tglClear(mor(GL.COLOR_BUFFER_BIT, GL.DEPTH_BUFFER_BIT, GL.STENCIL_BUFFER_BIT, GL.ACCUM_BUFFER_BIT));\n\t\t\n\t\tglColorMaterial(GL.FRONT_AND_BACK, GL.AMBIENT_AND_DIFFUSE);\n\t\t\n\t\t% This rotates the mesh so we can see it better.\n\t\tglRotated(rotationAmount, 1, 1, 1);\n\t\t\n\t\t% Set our specular lighting component manually.\n\t\tglMaterialfv(GL.FRONT, GL.SPECULAR, [0.1 0 0 1])\n\t\t\t\t\n\t\t% Use glColor to specify the ambient and diffuse material\n\t\t% properties of the surface.\n\t\tglColor3dv([1 0 0]);\n\t\t\n\t\t% Call the display list to render the mesh. We wrap the call to\n\t\t% the list with push and pop commands to save OpenGL state\n\t\t% information that might get modified by the display list.\n\t\tglPushMatrix;\n\t\tglPushAttrib(GL.ALL_ATTRIB_BITS);\n\t\tglCallList(displayList);\n\t\tglPopMatrix;\n\t\tglPopAttrib;\n\n\t\t% This command sticks everything we just did onto the screen. It\n\t\t% syncs to the refresh rate of the display.\n\t\tmglFlush;\n\tend\n\t\n\t% Close the MGL window.\n\tmglClose;\n\t\n\t% Disable character listening.\n\tmglEatKeys([]);\ncatch e\n\t% Close the MGL window.\n\tmglClose;\n\t\n\t% Disable character listening.\n\tmglEatKeys([]);\n\t\n\t% Send the error to the Matlab command window.\n\trethrow(e);\nend\n\n\nfunction displayList = createDisplayList(meshData)\n% displayList = createDisplayList(meshData)\n%\n% Description:\n% Generates a display list containing the specified mesh.\n%\n% Input:\n% meshData (struct) - Struct containing the vertex and surface normal data.\n%\n% Output:\n% displayList (scalar) - A pointer to the generated display list.\n\nglobal GL;\n\n% Create the empty display list.\ndisplayList = glGenLists(1);\n\n% We stick all the stuff we want in the display list between glNewList and\n% glEndList.\nglNewList(displayList, GL.COMPILE);\n\nglBegin(GL.QUADS);\n\tnumRows = size(meshData.x, 1);\n\tnumCols = size(meshData.x, 2);\n\t\n\t% Loop over all the vertices in the mesh to render all the rectangle\n\t% polygons.\n\tfor row = 1:numRows - 1\n\t\tfor col = 1:numCols - 1\n\t\t\t% Upper left corner.\n\t\t\tglNormal3d(meshData.nx(row, col), meshData.ny(row, col), meshData.nz(row, col));\n\t\t\tglVertex3d(meshData.x(row, col), meshData.y(row, col), meshData.z(row, col));\n\t\t\t\n\t\t\t% Lower left corner.\n\t\t\tglNormal3d(meshData.nx(row+1, col), meshData.ny(row+1, col), meshData.nz(row+1, col));\n\t\t\tglVertex3d(meshData.x(row+1, col), meshData.y(row+1, col), meshData.z(row+1, col));\n\t\t\t\n\t\t\t% Lower right corner.\n\t\t\tglNormal3d(meshData.nx(row+1, col+1), meshData.ny(row+1, col+1), meshData.nz(row+1, col+1));\n\t\t\tglVertex3d(meshData.x(row+1, col+1), meshData.y(row+1, col+1), meshData.z(row+1, col+1));\n\t\t\t\n\t\t\t% Upper right corner.\n\t\t\tglNormal3d(meshData.nx(row, col+1), meshData.ny(row, col+1), meshData.nz(row, col+1));\n\t\t\tglVertex3d(meshData.x(row, col+1), meshData.y(row, col+1), meshData.z(row, col+1));\n\t\tend\n\tend\nglEnd;\n\nglEndList;\n\n\nfunction frustum = calculateFrustum(screenDistance, screenDims, horizontalOffset)\n% frustum = calculateFrustum(screenDistance, screenDims, horizontalOffset)\n%\n% Description:\n% Takes some basic screen information and calculates the frustum parameters\n% required to setup a 3D projection matrix.\n%\n% Input:\n% screenDistance (scalar) - Distance from the screen to the observer.\n% screenDims (1x2) - Dimensions of the screen. (width, height)\n% horizontal offset (scalar) - Horizontal shift of the observer from the\n% center of the display. Should be 0 for regular displays and half the\n% interocular distance for stereo setups.\n%\n% Output:\n% frust (struct) - Struct containing all calculated frustum parameters.\n% Contains the following fields.\n% 1. left - Left edge of the near clipping plane.\n%\t2. right - Right edge of the near clipping plane.\n%\t3. top - Top edge of the near clipping plane.\n%\t4. bottom - Bottom edge of the near clipping plane.\n%\t5. near - Distance from the observer to the near clipping plane.\n%\t6. far - Distance from the observer to the far clipping plane.\n\nif nargin ~= 3\n\terror('Usage: frustum = calculateFrustum(screenDistance, screenDims, horizontalOffset)');\nend\n\n% I chose these constants as reasonable values for the distances from the\n% camera for the type of experiments the Brainard lab does.\nfrustum.near = 1;\nfrustum.far = 100;\n\n% Use similar triangles to figure out the boundaries of the near clipping\n% plane based on the information about the screen size and its distance\n% from the camera.\nfrustum.right = (screenDims(1)/2 - horizontalOffset) * frustum.near / screenDistance;\nfrustum.left = -(screenDims(1)/2 + horizontalOffset) * frustum.near / screenDistance;\nfrustum.top = screenDims(2)/2 * frustum.near / screenDistance;\nfrustum.bottom = -frustum.top;\n"} +{"plateform": "github", "repo_name": "BrainardLab/TeachingCode-master", "name": "MGL_MOGL_Rect3D.m", "ext": ".m", "path": "TeachingCode-master/MGLExamples/MGL_MOGL_Rect3D.m", "size": 7826, "source_encoding": "utf_8", "md5": "e7461951771b128e13418a47d363f41b", "text": "function MGL_MOGL_Rect3D\n% MGL_MOGL_Rect3D\n%\n% Description:\n% Opens a full screen MGL window with a black background, and renders a\n% rectangle in 3D space.\n%\n% Keyboard Control:\n% 'r' - Randomly change the rectangle color.\n% 'k' - Moves the rectangle further away.\n% 'j' - Moves the rectangle closer.\n% 'a' - Moves the rectangle left.\n% 'd' - Moves the rectangle right.\n% 'w' - Moves the rectangle up.\n% 's' - Moves the rectangle down.\n\n% This setups up some OpenGL constants in the Matlab environment.\n% Essentially, anything in C OpenGL that starts with GL_ becomes GL.., e.g.\n% GL_RECT becomes GL.RECT. All GL_ are stored globally in the GL struct.\nglobal GL;\nInitializeMatlabOpenGL;\n\n% Setup some parameters we'll use.\nscreenDims = [50 30];\t\t% Width, height in centimeters of the display.\nscreenDist = 50;\t\t\t% The distance from the observer to the display.\nbackgroundRGB = [0 0 0];\t% RGB of the background. All values are in the [0,1] range.\nrectDims = [10 6];\t\t\t% Rectangle dimensions in centimeters.\nrectRGB = [1 0 0];\t\t\t% Color of the rectangle in RGB.\nrectPos = [0 0 0];\t\t\t% (x,y,z) position of the rectangle.\nrectInc = 1;\t\t\t\t% How much we'll move the rectangle for a given step.\n\n% This the half the distance between the observers 2 pupils. This value is\n% key in setting up the stereo perspective for the left and right eyes.\n% For a single screen setup, we'll use a value of 0 since we're not\n% actually in stereo.\nioOffset = 0;\t\t\t\t\n\ntry\n\tmglOpen;\n\t\n\t% We need to calculate a frustum to define our perspective matrix.\n\t% Using this data in combination with the glFrustum command, we can now\n\t% have a 3D rendering space instead of orthographic (2D).\n\tfrustum = calculateFrustum(screenDist, screenDims, ioOffset);\n\t\n\t% Setup what our background color will be. We only need to do this\n\t% once unless we want to change our background color in the middle of\n\t% the program.\n\tglClearColor(backgroundRGB(1), backgroundRGB(2), backgroundRGB(3), ...\n\t\t0); % This 4th value is the alpha value. We rarely care about it\n\t\t\t % for the background color.\n\t\n\t% Make sure we're testing for depth. Important if more than 1 thing is\n\t% on the screen and you don't want to deal with render order effects.\n\tglEnable(GL.DEPTH_TEST);\n\t\n\t% These help things rendered look nicer.\n\tglEnable(GL.BLEND);\n\tglEnable(GL.POLYGON_SMOOTH);\n\tglEnable(GL.LINE_SMOOTH);\n\tglEnable(GL.POINT_SMOOTH);\n\t\n\t% Turn on character listening. This function causes keyboard\n\t% characters to be gobbled up so they don't appear in any Matlab\n\t% window.\n\tmglEatKeys(1:50);\n\t\n\t% Clear the keyboard buffer.\n\tmglGetKeyEvent;\n\t\n\tkeepDrawing = true;\n\twhile keepDrawing\n\t\t% Look for a keyboard press.\n\t\tkey = mglGetKeyEvent;\n\t\t\n\t\t% If the nothing was pressed keeping drawing.\n\t\tif ~isempty(key)\n\t\t\t% We can react differently to each key press.\n\t\t\tswitch key.charCode\n\t\t\t\tcase 'r'\n\t\t\t\t\trectRGB = rand(1,3);\n\t\t\t\t\t\n\t\t\t\t% Move the rectangle closer to the subject.\n\t\t\t\tcase 'j'\n\t\t\t\t\trectPos(3) = rectPos(3) + rectInc;\n\t\t\t\t\t\n\t\t\t\t% Move the rectangle further from the subject.\n\t\t\t\tcase 'k'\n\t\t\t\t\trectPos(3) = rectPos(3) - rectInc;\n\t\t\t\t\t\n\t\t\t\t% Move the rectangle left.\n\t\t\t\tcase 'a'\n\t\t\t\t\trectPos(1) = rectPos(1) - rectInc;\n\t\t\t\t\t\n\t\t\t\t% Move the rectangle right.\n\t\t\t\tcase 'd'\n\t\t\t\t\trectPos(1) = rectPos(1) + rectInc;\n\t\t\t\t\t\n\t\t\t\t% Move the rectangle up.\n\t\t\t\tcase 'w'\n\t\t\t\t\trectPos(2) = rectPos(2) + rectInc;\n\t\t\t\t\t\n\t\t\t\t% Move the rectangle down.\n\t\t\t\tcase 's'\n\t\t\t\t\trectPos(2) = rectPos(2) - rectInc;\n\t\t\t\t\t\n\t\t\t\t% All other keys go here.\n\t\t\t\totherwise\n\t\t\t\t\tfprintf('Exiting...\\n');\n\t\t\t\t\t\n\t\t\t\t\t% Quit our drawing loop.\n\t\t\t\t\tkeepDrawing = false;\n\t\t\tend\n\t\tend\n\t\t\n\t\t% Setup the projection matrix. The projection matrix defines how\n\t\t% the OpenGL coordinate system maps onto the physical screen.\n\t\tglMatrixMode(GL.PROJECTION);\n\t\t\n\t\t% This gives us a clean slate to work with.\n\t\tglLoadIdentity;\t\t\n\t\t\n\t\t% Map our 3D rendering space to the display given a specific\n\t\t% distance from the screen to the subject and an interocular\n\t\t% offset. This is calculated at the beginning of the program.\n\t\tglFrustum(frustum.left, frustum.right, frustum.bottom, frustum.top, frustum.near, frustum.far);\n\t\t\n\t\t% Now we switch to the modelview mode, which is where we draw\n\t\t% stuff.\n\t\tglMatrixMode(GL.MODELVIEW);\n\t\tglLoadIdentity;\n\t\t\n\t\t% In 3D mode, we need to specify where the camera (the subject) is\n\t\t% in relation to the display. Essentially, for proper stereo, the\n\t\t% camera will be placed at the screen distance facing straight\n\t\t% ahead not at (0,0).\n\t\tgluLookAt(ioOffset, 0, screenDist, ... % Eye position\n\t\t\t\t ioOffset, 0, 0, ... % Fixation center\n\t\t\t\t 0, 1, 0);\t\t\t\t\t % Vector defining which way is up.\n\t\t\n\t\t% Clear our rendering space. If you don't do this rendered in the\n\t\t% buffer before will still be there. The scene is filled with the\n\t\t% background color specified above.\n\t\tglClear(mor(GL.COLOR_BUFFER_BIT, GL.DEPTH_BUFFER_BIT, GL.STENCIL_BUFFER_BIT, GL.ACCUM_BUFFER_BIT));\n\t\t\n\t\t% Set the rectangle's color.\n\t\tglColor3dv(rectRGB);\n\t\t\n\t\t% This will center the rectangle on the screen. We call this prior\n\t\t% to specifying rectangle because all vertices are multiplied\n\t\t% against the current transformation matrix. In other words, the\n\t\t% order of operations happens in the opposite order they're written\n\t\t% in the code.\n\t\tglTranslated(-rectDims(1)/2 + rectPos(1), -rectDims(2)/2 + rectPos(2), rectPos(3));\n\t\t\n\t\t% Draw the rectangle.\n\t\tglBegin(GL.QUADS);\n\t\tglVertex2d(0, 0);\t\t\t\t\t\t% Lower left corner\n\t\tglVertex2d(rectDims(1), 0);\t\t\t\t% Lower right corner\n\t\tglVertex2d(rectDims(1), rectDims(2));\t% Upper right corner\n\t\tglVertex2d(0, rectDims(2));\t\t\t\t% Upper left corner.\n\t\tglEnd;\n\t\t\n\t\t% This command sticks everything we just did onto the screen. It\n\t\t% syncs to the refresh rate of the display.\n\t\tmglFlush;\n\tend\n\t\n\t% Close the MGL window.\n\tmglClose;\n\t\n\t% Disable character listening.\n\tmglEatKeys([]);\ncatch e\n\t% Close the MGL window.\n\tmglClose;\n\t\n\t% Disable character listening.\n\tmglEatKeys([]);\n\t\n\t% Send the error to the Matlab command window.\n\trethrow(e);\nend\n\n\nfunction frustum = calculateFrustum(screenDistance, screenDims, horizontalOffset)\n% frustum = calculateFrustum(screenDistance, screenDims, horizontalOffset)\n%\n% Description:\n% Takes some basic screen information and calculates the frustum parameters\n% required to setup a 3D projection matrix.\n%\n% Input:\n% screenDistance (scalar) - Distance from the screen to the observer.\n% screenDims (1x2) - Dimensions of the screen. (width, height)\n% horizontal offset (scalar) - Horizontal shift of the observer from the\n% center of the display. Should be 0 for regular displays and half the\n% interocular distance for stereo setups.\n%\n% Output:\n% frust (struct) - Struct containing all calculated frustum parameters.\n% Contains the following fields.\n% 1. left - Left edge of the near clipping plane.\n%\t2. right - Right edge of the near clipping plane.\n%\t3. top - Top edge of the near clipping plane.\n%\t4. bottom - Bottom edge of the near clipping plane.\n%\t5. near - Distance from the observer to the near clipping plane.\n%\t6. far - Distance from the observer to the far clipping plane.\n\nif nargin ~= 3\n\terror('Usage: frustum = calculateFrustum(screenDistance, screenDims, horizontalOffset)');\nend\n\n% I chose these constants as reasonable values for the distances from the\n% camera for the type of experiments the Brainard lab does.\nfrustum.near = 1;\nfrustum.far = 100;\n\n% Use similar triangles to figure out the boundaries of the near clipping\n% plane based on the information about the screen size and its distance\n% from the camera.\nfrustum.right = (screenDims(1)/2 - horizontalOffset) * frustum.near / screenDistance;\nfrustum.left = -(screenDims(1)/2 + horizontalOffset) * frustum.near / screenDistance;\nfrustum.top = screenDims(2)/2 * frustum.near / screenDistance;\nfrustum.bottom = -frustum.top;\n"} +{"plateform": "github", "repo_name": "quantizedmassivemimo/1bit_precoding_VLSI-master", "name": "precoder_sim.m", "ext": ".m", "path": "1bit_precoding_VLSI-master/precoder_sim.m", "size": 17150, "source_encoding": "iso_8859_13", "md5": "38b9849df5b7637eb31e03b1c657cfa0", "text": "% =========================================================================\n% -- Simulator for 1-bit Massive MU-MIMO Precoding in VLSI with CxPO\n% -------------------------------------------------------------------------\n% -- (c) 2016 Christoph Studer, Oscar Castañeda, and Sven Jacobsson\n% -- e-mail: studer@cornell.edu, oc66@cornell.edu, and\n% -- sven.jacobsson@ericsson.com (version 0.1; August 14, 2017)\n% -------------------------------------------------------------------------\n% -- If you use this simulator or parts of it, then you must cite our\n% -- journal paper:\n% -- Oscar Castañeda, Sven Jacobsson, Giuseppe Durisi, Mikael Coldrey,\n% -- Tom Goldstein, and Christoph Studer, \"1-bit Massive MU-MIMO\n% -- Precoding in VLSI,\" IEEE Journal on Emerging and Selected Topics in\n% -- Circuits and Systems (JETCAS), to appear in 2017\n% -- and clearly mention this in your paper\n% -------------------------------------------------------------------------\n% -- REMEMBER: C1PO + C2PO = C(1+2)PO = C3PO :)\n% =========================================================================\n\nfunction precoder_sim(varargin)\n\n% -- set up default/custom parameters\n\nif isempty(varargin)\n \n disp('using default simulation settings and parameters...')\n \n % set default simulation parameters\n par.runId = 0; % simulation ID (used to reproduce results)\n par.L = 2; % number of DAC levels per I or Q dimension (must be 2!!!)\n par.U = 16; % number of single-antenna users\n par.B = 256; % number of base-station antennas (B>>U)\n par.mod = '16QAM'; % modulation type: 'BPSK','QPSK','16QAM','64QAM','8PSK'\n par.trials = 1e3; % number of Monte-Carlo trials (transmissions)\n par.NTPdB_list = ... % list of normalized transmit power [dB] values\n -10:2:20; % to be simulated\n par.precoder = ... % precoding scheme(s) to be evaluated\n {'ZF','MRT','ZFQ','MRTQ','SQUID','C1PO','C2PO'};\n par.save = true; % save results (true,false)\n par.plot = true; % plot results (true,false)\n \n % *** SQUID specific\n %\n % note that the SQUID code includes two more algorithm parameters that\n % must be tuned for best performance (if you know what you are doing).\n par.SQUID.iterations = 200;\n \n % *** C1PO specific\n %\n % reasonable parameters for C1PO with different system configurations\n % please optimize manually for best performance (depends on # of iters)\n %\n % BxU | mod. | gamma | delta | rho\n % -------+-------+-------+-------+------\n % 32x16 | BPSK | 2^5 | 6.4 | 1.25\n % 64x16 | BPSK | 2^4 | 3.2 | 1.25\n % 128x16 | BPSK | 2^2 | 0.8 | 1.25\n % 256x16 | BPSK | 2^3 | 1.6 | 1.25\n % -------+-------+-------+-------+------\n % 32x16 | QPSK | 2^5 | 6.4 | 1.25\n % 64x16 | QPSK | 2^4 | 3.2 | 1.25\n % 128x16 | QPSK | 2^2 | 0.8 | 1.25\n % 256x16 | QPSK | 2^3 | 1.6 | 1.25\n % -------+-------+-------+-------+-------\n % 256x16 | 16QAM | 2^1 | 0.4 | 1.25\n % -------+-------+-------+-------+-------\n % 256x16 | 64QAM | 14 | 2.8 | 1.25\n \n par.C1PO.gamma = 2^1; % good for 256x16 with 16-QAM\n par.C1PO.rho = 1.25; % rho = gamma/(gamma-delta) [aka. pushfactor]\n par.C1PO.iterations = 25; % max number of iterations\n \n % *** C2PO specific\n %\n % reasonable parameters for C2PO with different system configurations\n % please optimize manually for best performance (depends on # of iters)\n %\n % BxU | mod. | tau | delta | rho\n % -------+-------+-------+-------+------\n % 32x16 | BPSK | 2^-6 | 12.8 | 1.25\n % 64x16 | BPSK | 2^-7 | 25.6 | 1.25\n % 128x16 | BPSK | 2^-7 | 25.6 | 1.25\n % 256x16 | BPSK | 2^-8 | 51.2 | 1.25\n % -------+-------+-------+-------+------\n % 32x16 | QPSK | 2^-6 | 12.8 | 1.25\n % 64x16 | QPSK | 2^-7 | 25.6 | 1.25\n % 128x16 | QPSK | 2^-7 | 25.6 | 1.25\n % 256x16 | QPSK | 2^-8 | 51.2 | 1.25\n % -------+-------+-------+-------+-------\n % 256x16 | 16QAM | 2^-8 | 51.2 | 1.25\n % -------+-------+-------+-------+-------\n % 256x16 | 64QAM | 2^-8 | 51.2 | 1.25\n \n par.C2PO.tau = 2^(-8); % good for 256x16 with 16-QAM\n par.C2PO.rho = 1.25; % rho = 1/(1-tau*delta) [aka. pushfactor]\n par.C2PO.iterations = 25; % max number of iterations\n \nelse\n \n disp('use custom simulation settings and parameters...')\n par = varargin{1}; % only argument is par structure\n \nend\n\n% -- initialization\n\n% the methods have only been checked for 1-bit transmission\n% an extension to multi-bit needs more work :)\nif par.L~=2\n error('This simulator is specifically designed for 1-bit scenarios')\nend\n\n% use runId random seed (enables reproducibility)\nrng(par.runId);\n\n% simulation name (used for saving results)\npar.simName = ['ERR_',num2str(par.U),'x',num2str(par.B), '_', ...\n par.mod, '_', num2str(par.trials),'Trials'];\n\n% set up Gray-mapped constellation alphabet (according to IEEE 802.11)\nswitch (par.mod)\n case 'BPSK',\n par.symbols = [ -1 1 ];\n case 'QPSK',\n par.symbols = [ -1-1i,-1+1i,+1-1i,+1+1i ];\n case '16QAM',\n par.symbols = [ -3-3i,-3-1i,-3+3i,-3+1i, ...\n -1-3i,-1-1i,-1+3i,-1+1i, ...\n +3-3i,+3-1i,+3+3i,+3+1i, ...\n +1-3i,+1-1i,+1+3i,+1+1i ];\n case '64QAM',\n par.symbols = [ -7-7i,-7-5i,-7-1i,-7-3i,-7+7i,-7+5i,-7+1i,-7+3i, ...\n -5-7i,-5-5i,-5-1i,-5-3i,-5+7i,-5+5i,-5+1i,-5+3i, ...\n -1-7i,-1-5i,-1-1i,-1-3i,-1+7i,-1+5i,-1+1i,-1+3i, ...\n -3-7i,-3-5i,-3-1i,-3-3i,-3+7i,-3+5i,-3+1i,-3+3i, ...\n +7-7i,+7-5i,+7-1i,+7-3i,+7+7i,+7+5i,+7+1i,+7+3i, ...\n +5-7i,+5-5i,+5-1i,+5-3i,+5+7i,+5+5i,+5+1i,+5+3i, ...\n +1-7i,+1-5i,+1-1i,+1-3i,+1+7i,+1+5i,+1+1i,+1+3i, ...\n +3-7i,+3-5i,+3-1i,+3-3i,+3+7i,+3+5i,+3+1i,+3+3i ];\n case '8PSK',\n par.symbols = [ exp(1i*2*pi/8*0), exp(1i*2*pi/8*1), ...\n exp(1i*2*pi/8*7), exp(1i*2*pi/8*6), ...\n exp(1i*2*pi/8*3), exp(1i*2*pi/8*2), ...\n exp(1i*2*pi/8*4), exp(1i*2*pi/8*5) ];\nend\n\n% compute symbol energy\npar.Es = mean(abs(par.symbols).^2);\n\n% - quantizer paremeters\n% optimal LSB for 2 < L < 16 quantization levels\nlsb_list = [ 1.59628628628629, ...\n 1.22515515515516, ...\n 0.994694694694695, ...\n 0.842052052052052, ...\n 0.734304304304304, ...\n 0.650500500500501, ...\n 0.584654654654655, ...\n 0.533773773773774, ...\n 0.491871871871872, ...\n 0.455955955955956, ...\n 0.423033033033033, ...\n 0.396096096096096, ...\n 0.375145145145145, ...\n 0.354194194194194, ...\n 0.336236236236236 ];\n% resolution (number of bits) of the DACs\npar.Q = log2(par.L);\n% least significant bit\npar.lsb = lsb_list(par.L-1)/sqrt(2*par.B);\n% clip level\npar.clip = par.lsb*par.L/2;\n% quantizer labels and thresholds\n[~, ~, par.labels, par.thresholds, ~] = uniquantiz(1, par.lsb, par.L);\n% normalization constant\npar.alpha = sqrt( 1/(2*par.B) ...\n /sum(par.labels.^2.*( ...\n normcdf(par.thresholds(2:end)*sqrt(2*par.B)) ...\n -normcdf(par.thresholds(1:end-1)*sqrt(2*par.B)))));\n% scale quantization labels\npar.labels = par.alpha*par.labels;\n% quantizer alphabet\npar.alphabet = combvec(par.labels, par.labels);\npar.alphabet = par.alphabet(1,:) + 1i*par.alphabet(2,:);\n% quantizer-mapping function\npar.quantizer = @(x) par.alpha * uniquantiz(x, par.lsb, par.L);\n% equivalent (average) quantizer gain\npar.F = par.alpha*par.lsb*...\n sum(normpdf(par.thresholds(2:end-1),0,1/sqrt(2*par.B)));\n\n% precompute bit labels\npar.bps = log2(length(par.symbols)); % number of bits per symbol\npar.bits = de2bi(0:length(par.symbols)-1,par.bps,'left-msb');\n\n% track simulation time\ntime_elapsed = 0;\n\n% -- start simulation\n\n% - initialize result arrays (detector x normalized transmit power)\n% vector error rate\nres.VER = zeros(length(par.precoder),length(par.NTPdB_list));\n% symbol error rate\nres.SER = zeros(length(par.precoder),length(par.NTPdB_list));\n% bit error rate\nres.BER = zeros(length(par.precoder),length(par.NTPdB_list));\n% error-vector magnitude\nres.EVM = zeros(length(par.precoder),length(par.NTPdB_list));\n% SINDR\nres.SINDR = zeros(length(par.precoder),length(par.NTPdB_list));\n% transmit power\nres.TxPower = zeros(length(par.precoder),length(par.NTPdB_list));\n% receive power\nres.RxPower = zeros(length(par.precoder),length(par.NTPdB_list));\n% simulation beamforming time\nres.TIME = zeros(length(par.precoder),length(par.NTPdB_list));\n\n% compute noise variances to be considered\nN0_list = 10.^(-par.NTPdB_list/10);\n\n% generate random bit stream (antenna x bit x trial)\nbits = randi([0 1],par.U,par.bps,par.trials);\n\n% trials loop\ntic\nfor t=1:par.trials\n \n % generate transmit symbol\n idx = bi2de(bits(:,:,t),'left-msb')+1;\n s = par.symbols(idx).';\n \n % generate iid Gaussian channel matrix and noise vector\n n = sqrt(0.5)*(randn(par.U,1)+1i*randn(par.U,1));\n H = sqrt(0.5)*(randn(par.U,par.B)+1i*randn(par.U,par.B));\n \n % algorithm loop\n for d=1:length(par.precoder)\n \n % normalized transmit power loop\n for k=1:length(par.NTPdB_list)\n \n % set noise variance\n N0 = N0_list(k);\n \n % record time used by the beamformer\n starttime = toc;\n \n % beamformers\n switch (par.precoder{d})\n % noise-independent\n case 'ZF', % ZF beamforming (infinite precision)\n [x, beta] = ZF(par, s, H);\n case 'ZFQ', % ZF beamforming (quantized)\n [x, beta] = ZF(par, s, H);\n x = par.quantizer(x);\n beta = beta/par.F;\n case 'MRT', % MRT beamforming (infinite precision)\n [x, beta] = MRT(par, s, H);\n case 'MRTQ', % MRT beamforming (quantized)\n [x, beta] = MRT(par, s, H);\n x = par.quantizer(x);\n beta = beta/par.F;\n case 'C1PO', % C1PO: biConvex 1-bit PrecOding\n [x, beta] = C1PO(par, s, H);\n case 'C2PO', % C2PO: C1PO with simpler preprocessing\n [x, beta] = C2PO(par, s, H);\n % noise-dependent\n case 'SQUID', % SQUID: Squared inifinity-norm relaxation with\n % Douglas-Rachford splitting\n [x, beta] = SQUID(par,s,H,N0);\n otherwise,\n error('par.precoder not specified')\n end\n \n % record beamforming simulation time\n res.TIME(d,k) = res.TIME(d,k) + (toc-starttime);\n \n % transmit data over noisy channel\n Hx = H*x;\n y = Hx + sqrt(N0)*n;\n \n % extract transmit and receive power\n res.TxPower(d,k) = res.TxPower(d,k) + mean(sum(abs(x).^2));\n res.RxPower(d,k) = res.RxPower(d,k) + mean(sum(abs(Hx).^2))/par.U;\n \n % user terminals can estimate the beamforming factor beta\n shat = beta*y;\n \n % perform user-side detection\n [~,idxhat] = min(abs(shat*ones(1,length(par.symbols)) ...\n -ones(par.U,1)*par.symbols).^2,[],2);\n bithat = par.bits(idxhat,:);\n \n % -- compute error and complexity metrics\n err = (idx~=idxhat);\n res.VER(d,k) = res.VER(d,k) + any(err);\n res.SER(d,k) = res.SER(d,k) + sum(err)/par.U;\n res.BER(d,k) = res.BER(d,k) + ...\n sum(sum(bits(:,:,t)~=bithat))/(par.U*par.bps);\n res.EVM(d,k) = res.EVM(d,k) + 100*norm(shat - s)^2/norm(s)^2;\n res.SINDR(d,k) = res.SINDR(d,k) + norm(s)^2/norm(shat - s)^2;\n \n end % NTP loop\n \n end % algorithm loop\n \n % keep track of simulation time\n if toc>10\n time=toc;\n time_elapsed = time_elapsed + time;\n fprintf('estimated remaining simulation time: %3.0f min.\\n',...\n time_elapsed*(par.trials/t-1)/60);\n tic\n end\n \nend % trials loop\n\n% normalize results\nres.VER = res.VER/par.trials;\nres.SER = res.SER/par.trials;\nres.BER = res.BER/par.trials;\nres.EVM = res.EVM/par.trials;\nres.SINDR = res.SINDR/par.trials;\nres.TxPower = res.TxPower/par.trials;\nres.RxPower = res.RxPower/par.trials;\nres.TIME = res.TIME/par.trials;\nres.time_elapsed = time_elapsed;\n\n% -- save final results (par and res structures)\n\nif par.save\n save([ par.simName '_' num2str(par.runId) ],'par','res');\nend\n\n% -- show results (generates fairly nice Matlab plots)\n\nif par.plot\n \n % - BER results\n marker_style = {'k-','b:','r--','y-.','g-.','bs--','mv--'};\n figure(1)\n for d=1:length(par.precoder)\n semilogy(par.NTPdB_list,res.BER(d,:),marker_style{d},'LineWidth',2);\n if (d==1)\n hold on\n end\n end\n hold off\n grid on\n box on\n xlabel('normalized transmit power [dB]','FontSize',12)\n ylabel('uncoded bit error rate (BER)','FontSize',12);\n if length(par.NTPdB_list) > 1\n axis([min(par.NTPdB_list) max(par.NTPdB_list) 1e-3 1]);\n end\n legend(par.precoder,'FontSize',12,'location','southwest')\n set(gca,'FontSize',12);\n \nend\n\nend\n\n%% Uniform quantizer\nfunction [v, q, vl, vt, c] = uniquantiz(y, lsb, L)\n\n% set clip level\nc = lsb*L/2;\n\n% clip signal\nif isreal(y)\n yc = max(min(y,c-lsb/1e5),-(c-lsb/1e5));\nelse\n yc = max(min(real(y),c-lsb/1e5),-(c-lsb/1e5)) ...\n + 1i*max(min(imag(y),c-lsb/1e5),-(c-lsb/1e5));\nend\n\n% quantizer\nif mod(L,2) == 0\n % midrise quantizer (without clipping)\n Q = @(x) lsb*floor(x/lsb) + lsb/2;\nelse\n % midtread quantizer (without clipping)\n Q = @(x) lsb*floor(x/lsb + 1/2);\nend\n\n% quantize signal\nif isreal(y)\n v = Q(yc);\nelse\n v = Q(real(yc)) + 1i*Q(imag(yc));\nend\n\n% quantization error\nq = v - y;\n\n% uniform quantization labels\nvl = lsb *((0:L-1) - (L-1)/2);\n\n% uniform quantization thresholds\nvt = [-realmax*ones(length(lsb),1), ...\n bsxfun(@minus, vl(:,2:end), lsb/2), ...\n realmax*ones(length(lsb),1)];\n\nend\n\n%% Zero-forcing beamforming (with infinite precision)\nfunction [x, beta] = ZF(par, s, H)\n\n% normalization constant (average gain)\nrho = sqrt((par.B-par.U)/(par.Es*par.U));\n\n% transmitted signal\nx = rho*H'/(H*H')*s;\n\n% beamforming factor\nbeta = 1/rho;\n\nend\n\n%% Maximum ratio transmission (MRT) beamforming (with infinite precision)\nfunction [x, beta, P] = MRT(par, s, H)\n\n% normalization constant\ngmrt = 1/sqrt(par.Es*par.U*par.B); % average gain\n% gmrt = 1/sqrt(par.Es*trace(H*H')); % instant gain\n\n% precoding matrix\nP = gmrt*H';\n\n% transmitted signal\nx = P*s;\n\n% scaling factor\nbeta = sqrt(par.U*par.Es/par.B);\n\nend\n\n%% C1PO: biConvex 1-bit PrecOding (Algorithm 1)\nfunction [x, beta] = C1PO(par,s,H)\n\n% initial guess\nx = H'*s;\n\n% preprocessing with exact inverse\ngammainv = 1/par.C1PO.gamma;\nAinv = inv(eye(par.B) + gammainv*H'*(eye(par.U)-s*s'/norm(s,2)^2)*H);\n\n% main C1PO algorithm loop\nfor i=2:par.C1PO.iterations\n x = par.C1PO.rho*(Ainv*x);\n x = min(max(real(x),-1),1) + 1i*min(max(imag(x),-1),1);\nend\nx = (sign(real(x))+1i*sign(imag(x)))/sqrt(2*par.B);\n\n% scaling factor\nbeta = norm(s,2)^2/(s'*H*x);\n\nend\n\n%% C2PO: biConvex 1-bit PrecOding with simplified processing (Algorithm 2)\nfunction [x, beta] = C2PO(par,s,H)\n\n% initial guess\nx = H'*s;\n\n% preprocessing with approximate inverse\ntau = par.C2PO.tau; % step size\nAinvapprox = eye(par.B) - tau*H'*(eye(par.U)-s*s'/norm(s,2)^2)*H ;\n\n% main C1PO algorithm loop\nfor i=2:par.C2PO.iterations\n x = par.C2PO.rho*(Ainvapprox*x);\n x = min(max(real(x),-1),1) + 1i*min(max(imag(x),-1),1);\nend\nx = (sign(real(x))+1i*sign(imag(x)))/sqrt(2*par.B);\n\n% scaling factor\nbeta = norm(s,2)^2/(s'*H*x);\n\nend\n\n\n%% Squared inifinity-norm relaxation with Douglas-Rachford splitting\n% (SQUID) (1-bit beamforming algorithm)\n\nfunction [x,beta] = SQUID(par,s,H,N0)\n\n% -- real-valued decomposition\nHR = [ real(H) -imag(H) ; imag(H) real(H) ];\nsR = [ real(s) ; imag(s) ];\n\n% -- initialization\nx = zeros(par.B*2,1);\ny = zeros(par.B*2,1);\ngain = 1; % ADMM algorithm parameter\nepsilon = 1e-5; % ADMM algorithm parameter\nAinv = inv(HR'*HR + 0.5/gain*eye(par.B*2));\nsREG = Ainv*(HR'*sR);\n\n% -- SQUID loop\nfor t=1:par.SQUID.iterations\n u = sREG + 0.5/gain*(Ainv*(2*x-y));\n xold = x;\n x = prox_infinityNorm2(y+u-x,2*2*par.U*par.B*N0);\n if norm(x-xold)/norm(x)0\n % -- truncation step\n xk = min(wabs,alphaopt).*sign(w);\nelse\n xk = zeros(size(w));\nend\n\nend\n"} +{"plateform": "github", "repo_name": "AnriKaede/IM-master", "name": "FMSearchTokenField.m", "ext": ".m", "path": "IM-master/mac/TeamTalk/interface/mainWindow/FMSearchTokenField.m", "size": 4519, "source_encoding": "utf_8", "md5": "2a89df28133e0c91280b5daf58944c94", "text": "//\n// FMSearchTokenField.m\n// Duoduo\n//\n// Created by zuoye on 13-12-23.\n// Copyright (c) 2013年 zuoye. All rights reserved.\n//\n\n#import \"FMSearchTokenField.h\"\n#import \"FMSearchTokenFieldCell.h\"\n\n@implementation FMSearchTokenField\n@synthesize sendActionWhenEditing=_sendActionWhenEditing;\n@synthesize alwaysSendActionWhenPressEnter=_alwaysSendActionWhenPressEnter;\n\n\n+(void)initialize {\n [FMSearchTokenField setCellClass:[FMSearchTokenFieldCell class]];\n}\n\n- (id)initWithFrame:(NSRect)frame\n{\n self = [super initWithFrame:frame];\n if (self) {\n self.sendActionWhenEditing = NO;\n self.alwaysSendActionWhenPressEnter = NO;\n self.m_tokenizingChar = 0x2c;\n /*\n var_32 = rdi;\n var_40 = *0x1002c3b58;\n rax = [[&var_32 super] initWithFrame:edx];\n if (rax != 0x0) {\n rbx.sendActionWhenEditing = 0x0;\n rbx.alwaysSendActionWhenPressEnter = 0x0;\n rax = [NSMutableString alloc];\n rax = [rax init];\n rbx.m_untokenizedStringValue = rax;\n rbx.m_tokenizingChar = 0x2c; //44\n }\n rax = rbx;\n return rax;\n\n */\n }\n return self;\n}\n\n-(void)setTokenizingChars:(NSString *) chars{\n self.m_tokenizingChar = [chars characterAtIndex:0];\n [self setTokenizingCharacterSet:[NSCharacterSet characterSetWithCharactersInString:chars]];\n}\n\n/*\nfunction methImpl_FMSearchTokenField_tokenCount {\n rbx = rdi;\n rax = [rdi tokenStyle];\n rcx = rax;\n rax = 0x1;\n if (rcx == 0x1) goto loc_0x100111ce7;\n goto loc_100111c3f;\n \nloc_100111ce7:\n return rax;\n \nloc_100111c3f:\n rax = [rbx currentEditor];\n if (rax == 0x0) goto loc_0x100111cf6;\n goto loc_100111c58;\n \nloc_100111cf6:\n rax = [rbx attributedStringValue];\n rax = [rax length];\n \nloc_100111c58:\n rax = [rax attributedString];\n rax = [rax length];\n r13 = 0x0;\n if (rax != 0x0) {\n rbx = 0x0;\n r13 = 0x0;\n do {\n rax = [r14 attribute:**NSAttachmentAttributeName atIndex:rbx effectiveRange:0x0];\n r13 = r13 - 0xff + CARRY(CF);\n rax = [r14 length];\n } while (rbx + 0x1 < rax);\n }\n rax = [r14 length];\n rax = ((rax != r13 ? 0xff : 0x0) & 0xff) + r13;\n goto loc_100111ce7;\n}\n*/\n\n/*\n $rdi == arg0 (ObjC: self)\n $rsi == arg1 (ObjC: op, or _cmd)\n $rdx == arg2 (ObjC: first arg of method)\n $rcx == arg3 (ObjC: second arg of method) cell\n $r8 == arg4 第三个参数\n $r9 == arg5 第四个\n */\n- (void)drawRect:(NSRect)dirtyRect{\n \n if([[self subviews] count]>0){\n NSView *view = [[self subviews] objectAtIndex:0];\n if ([view isKindOfClass:[FMSearchTokenFieldCell class]]) {\n NSRect cellRect ;\n if ([self cell]) {\n cellRect = [[self cell] bounds];\n }\n NSRect imageRect ;\n NSRect textRect;\n \n [(FMSearchTokenFieldCell *)view divideFrame:cellRect ToImageRect:&imageRect textRect:&textRect buttonRect:nil callFromView:YES];\n [view setFrame:cellRect];\n }\n }\n \n /*\n r12 = rdi;\n rax = [rdi subviews];\n rax = [rax count];\n if (rax != 0x0) {\n rax = [rbx objectAtIndex:0x0];\n r14 = rax;\n rax = [*0x1002c29e8 class];\n rax = [r14 isKindOfClass:rax];\n if (rax != 0x0) {\n rax = [r12 cell];\n r15 = rax;\n if (r12 != 0x0) {\n var_80 = [r12 bounds];\n }\n else {\n }\n rbx = *objc_msgSend;\n rdx = 0x0;\n (*objc_msgSend)(r15, @selector(divideFrame:ToImageRect:textRect:buttonRect:callFromView:), rdx, &var_112, 0x0, 0x1);\n var_72 = var_136;\n var_64 = var_128;\n var_56 = var_120;\n var_48 = var_112;\n [r14 setFrame:rdx];\n }\n }\n rax = &arg_0;\n var_32 = r12;\n var_40 = *0x1002c3b58;\n rax = [[&var_32 super] drawRect:edx];\n return rax;\n */\n \n\t[super drawRect:dirtyRect];\n\t\n}\n\n\n\n/*\n function methImpl_FMSearchTokenField_dealloc {\n rbx = rdi;\n r14 = objc_msg_release;\n [rbx.m_buttonTrackingArea release];\n [rbx.m_untokenizedStringValue release];\n var_0 = rbx;\n var_8 = *0x1002c3b58;\n rax = [[&var_0 super] dealloc];\n return rax;\n }\n */\n\n-(void)mouseDown:(NSEvent *)theEvent{\n \n}\n\n-(void)mouseUp:(NSEvent *)theEvent{\n \n}\n\n-(void)mouseEntered:(NSEvent *)theEvent{\n \n}\n\n-(void)mouseExited:(NSEvent *)theEvent{\n \n}\n\n-(BOOL)isFlipped{\n return NO;\n}\n\n@end\n\n"} +{"plateform": "github", "repo_name": "AnriKaede/IM-master", "name": "DDNinePartImage.m", "ext": ".m", "path": "IM-master/mac/TeamTalk/interface/mainWindow/searchField/DDNinePartImage.m", "size": 6722, "source_encoding": "utf_8", "md5": "6dac0c29b80d07b31ccfd0b48ec932de", "text": "//\n// DDNinePartImage.m\n// Duoduo\n//\n// Created by zuoye on 14-1-20.\n// Copyright (c) 2014年 zuoye. All rights reserved.\n//\n\n#import \"DDNinePartImage.h\"\n\n@implementation DDNinePartImage\n\n-(id)initWithNSImage:(NSImage *)image leftPartWidth:(CGFloat)leftWidth rightPartWidth:(CGFloat)rightWidth topPartHeight:(CGFloat)topHeight bottomPartHeight:(CGFloat)bottomHeight{\n return [self initWithNSImage:image leftPartWidth:leftWidth rightPartWidth:rightWidth topPartHeight:topHeight bottomPartHeight:bottomHeight flipped:NO];\n}\n\n-(id)initWithNSImage:(NSImage *)image leftPartWidth:(CGFloat)leftWidth rightPartWidth:(CGFloat)rightWidth topPartHeight:(CGFloat)topHeight bottomPartHeight:(CGFloat)bottomHeight flipped:(BOOL)flipped{\n self = [super init];\n if (self) {\n \n //size 是指块图片的大小.\n // topLeftCornerImage = [DDNinePartImage getPartImage:image withSize:<#(NSSize)#> fromRect:<#(NSRect)#>]\n }\n return self;\n}\n\n\n\n\n/*\n \n function meth_TXNinePartImage_dealloc {\n edi = arg_0;\n [*(edi + 0x1c) release]; //topLeftCornerImage\n [*(edi + 0x20) release]; //topEdgeImage\n [*(edi + 0x24) release]; //topRightCornerImage\n [*(edi + 0x28) release]; //leftEdgeImage\n [*(edi + 0x2c) release]; //centerImage\n [*(edi + 0x30) release]; //rightEdgeImage\n [*(edi + 0x34) release]; //bottomLeftCornerImage\n [*(edi + 0x38) release]; //bottomEdgeImage\n [*(edi + 0x3c) release]; //bottomRightCornerImage\n var_8 = edi;\n var_12 = *0xca2ce4;\n eax = [[&var_8 super] dealloc];\n return eax;\n }\n */\n\n\n\n-(void)drawInRect:(NSRect)rect fromRect:(NSRect)fromRect operation:(NSCompositingOperation)op fraction:(CGFloat)delta{\n [self drawInRect:rect compositingOperation:op alphaFraction:delta flipped:NO];\n}\n\n-(void)drawInRect:(NSRect)rect compositingOperation:(NSCompositingOperation)op alphaFraction:(CGFloat)alphaFraction flipped:(BOOL)isFlipped{\n NSGraphicsContext *context = [NSGraphicsContext currentContext];\n [context saveGraphicsState];\n [context setShouldAntialias:YES];\n NSDrawNinePartImage(rect, topLeftCornerImage, topEdgeImage, topRightCornerImage, leftEdgeImage, centerImage, rightEdgeImage, bottomLeftCornerImage, bottomEdgeImage, bottomRightCornerImage, op, alphaFraction, isFlipped);\n [context restoreGraphicsState];\n}\n\n+(NSImage *)getPartImage:(NSImage *)image withSize:(NSSize)size fromRect:(NSRect)rect{\n NSImage *im = [[NSImage alloc] initWithSize:size];\n [im lockFocus];\n [image drawInRect:NSMakeRect(0, 0, size.width, size.height) fromRect:rect operation:NSCompositeCopy fraction:1];\n [im unlockFocus];\n return im;\n}\n\n\n/*\n $rdi == arg0 (ObjC: self)\n $rsi == arg1 (ObjC: op, or _cmd)\n $rdx == arg2 (ObjC: first arg of method)\n $rcx == arg3 (ObjC: second arg of method) cell\n $r8 == arg4 第三个参数\n $r9 == arg5 第四个\n */\n/*\nfunction meth_TXNinePartImage_initWithNSImage_leftPartWidth_rightPartWidth_topPartHeight_bottomPartHeight_flipped_ {\n var_240 = arg_0;\n var_244 = *0xca2ce4;\n eax = [[&var_240 super] init];\n if (eax != 0x0) {\n ecx = arg_8;\n edi = ecx;\n eax = [ecx size];\n var_48 = eax;\n [edi size];\n floorf(edx);\n asm{ fstp tword [ss:ebp-0x108+var_60] };\n floorf(arg_14);\n asm{ fstp dword [ss:ebp-0x108+var_80] };\n asm{ fld tword [ss:ebp-0x108+var_60] };\n asm{ fstp dword [ss:ebp-0x108+var_92] };\n floorf(arg_C);\n asm{ fstp dword [ss:ebp-0x108+var_72] };\n xmm3 = var_80;\n var_52 = xmm3;\n xmm0 = var_92 - xmm3;\n var_60 = xmm0;\n var_224 = 0x0;\n var_228 = xmm0;\n xmm0 = var_72;\n var_56 = xmm0;\n var_232 = xmm0;\n var_236 = xmm3;\n eax = [TXNinePartImage getPartImage:edi withSize:xmm0 fromRect:xmm3];\n eax = [eax retain];\n *(esi + 0x1c) = eax;\n floorf(var_48);\n var_36 = esi;\n var_208 = var_56;\n asm{ fstp dword [ss:ebp-0x108+var_88] };\n floorf(arg_10);\n asm{ fstp dword [ss:ebp-0x108+var_76] };\n var_212 = var_60;\n xmm0 = var_88;\n var_48 = xmm0;\n xmm1 = var_76;\n var_44 = xmm1;\n xmm0 = xmm0 - var_56 - xmm1;\n var_40 = xmm0;\n var_216 = xmm0;\n xmm3 = var_52;\n var_220 = xmm3;\n esi = var_36;\n eax = [TXNinePartImage getPartImage:edi withSize:xmm0 fromRect:xmm3];\n eax = [eax retain];\n *(esi + 0x20) = eax;\n xmm0 = var_44;\n xmm1 = var_48 - xmm0;\n var_48 = xmm1;\n var_192 = xmm1;\n var_196 = var_60;\n var_200 = xmm0;\n xmm3 = var_52;\n var_204 = xmm3;\n eax = [TXNinePartImage getPartImage:edi withSize:xmm0 fromRect:xmm3];\n eax = [eax retain];\n *(esi + 0x24) = eax;\n floorf(arg_18);\n asm{ fstp dword [ss:ebp-0x108+var_84] };\n var_176 = 0x0;\n xmm1 = var_84;\n var_52 = xmm1;\n var_180 = xmm1;\n xmm2 = var_56;\n var_184 = xmm2;\n xmm0 = var_60 - xmm1;\n var_60 = xmm0;\n var_188 = xmm0;\n eax = [TXNinePartImage getPartImage:edi withSize:xmm2 fromRect:xmm0];\n eax = [eax retain];\n *(esi + 0x28) = eax;\n var_160 = var_56;\n var_164 = var_52;\n xmm2 = var_40;\n var_168 = xmm2;\n xmm3 = var_60;\n var_172 = xmm3;\n eax = [TXNinePartImage getPartImage:edi withSize:xmm2 fromRect:xmm3];\n eax = [eax retain];\n *(esi + 0x2c) = eax;\n var_144 = var_48;\n var_148 = var_52;\n xmm2 = var_44;\n var_152 = xmm2;\n xmm3 = var_60;\n var_156 = xmm3;\n eax = [TXNinePartImage getPartImage:edi withSize:xmm2 fromRect:xmm3];\n eax = [eax retain];\n *(esi + 0x30) = eax;\n var_128 = 0x0;\n var_132 = 0x0;\n xmm2 = var_56;\n var_136 = xmm2;\n xmm3 = var_52;\n var_140 = xmm3;\n eax = [TXNinePartImage getPartImage:edi withSize:xmm2 fromRect:xmm3];\n eax = [eax retain];\n *(esi + 0x34) = eax;\n var_112 = var_56;\n var_116 = 0x0;\n xmm3 = var_40;\n var_120 = xmm3;\n xmm2 = var_52;\n var_124 = xmm2;\n eax = [TXNinePartImage getPartImage:edi withSize:xmm3 fromRect:xmm2];\n eax = [eax retain];\n *(esi + 0x38) = eax;\n var_96 = var_48;\n var_100 = 0x0;\n xmm2 = var_44;\n var_104 = xmm2;\n xmm3 = var_52;\n var_108 = xmm3;\n eax = [TXNinePartImage getPartImage:edi withSize:xmm2 fromRect:xmm3];\n eax = [eax retain];\n *(esi + 0x3c) = eax;\n *(int8_t *)(esi + 0x40) = arg_1C;\n }\n eax = esi;\n return eax;\n}\n*/\n@end\n"} +{"plateform": "github", "repo_name": "AnriKaede/IM-master", "name": "echo_diagnostic.m", "ext": ".m", "path": "IM-master/win-client/3rdParty/src/libspeex/libspeex/echo_diagnostic.m", "size": 2076, "source_encoding": "utf_8", "md5": "8d5e7563976fbd9bd2eda26711f7d8dc", "text": "% Attempts to diagnose AEC problems from recorded samples\n%\n% out = echo_diagnostic(rec_file, play_file, out_file, tail_length)\n%\n% Computes the full matrix inversion to cancel echo from the \n% recording 'rec_file' using the far end signal 'play_file' using \n% a filter length of 'tail_length'. The output is saved to 'out_file'.\nfunction out = echo_diagnostic(rec_file, play_file, out_file, tail_length)\n\nF=fopen(rec_file,'rb');\nrec=fread(F,Inf,'short');\nfclose (F);\nF=fopen(play_file,'rb');\nplay=fread(F,Inf,'short');\nfclose (F);\n\nrec = [rec; zeros(1024,1)];\nplay = [play; zeros(1024,1)];\n\nN = length(rec);\ncorr = real(ifft(fft(rec).*conj(fft(play))));\nacorr = real(ifft(fft(play).*conj(fft(play))));\n\n[a,b] = max(corr);\n\nif b > N/2\n b = b-N;\nend\nprintf (\"Far end to near end delay is %d samples\\n\", b);\nif (b > .3*tail_length)\n printf ('This is too much delay, try delaying the far-end signal a bit\\n');\nelse if (b < 0)\n printf ('You have a negative delay, the echo canceller has no chance to cancel anything!\\n');\n else\n printf ('Delay looks OK.\\n');\n end\n end\nend\nN2 = round(N/2);\ncorr1 = real(ifft(fft(rec(1:N2)).*conj(fft(play(1:N2)))));\ncorr2 = real(ifft(fft(rec(N2+1:end)).*conj(fft(play(N2+1:end)))));\n\n[a,b1] = max(corr1);\nif b1 > N2/2\n b1 = b1-N2;\nend\n[a,b2] = max(corr2);\nif b2 > N2/2\n b2 = b2-N2;\nend\ndrift = (b1-b2)/N2;\nprintf ('Drift estimate is %f%% (%d samples)\\n', 100*drift, b1-b2);\nif abs(b1-b2) < 10\n printf ('A drift of a few (+-10) samples is normal.\\n');\nelse\n if abs(b1-b2) < 30\n printf ('There may be (not sure) excessive clock drift. Is the capture and playback done on the same soundcard?\\n');\n else\n printf ('Your clock is drifting! No way the AEC will be able to do anything with that. Most likely, you''re doing capture and playback from two different cards.\\n');\n end\n end\nend\nacorr(1) = .001+1.00001*acorr(1);\nAtA = toeplitz(acorr(1:tail_length));\nbb = corr(1:tail_length);\nh = AtA\\bb;\n\nout = (rec - filter(h, 1, play));\n\nF=fopen(out_file,'w');\nfwrite(F,out,'short');\nfclose (F);\n"} +{"plateform": "github", "repo_name": "AnriKaede/IM-master", "name": "echo_diagnostic.m", "ext": ".m", "path": "IM-master/android/app/src/main/jni/libspeex/echo_diagnostic.m", "size": 2076, "source_encoding": "utf_8", "md5": "8d5e7563976fbd9bd2eda26711f7d8dc", "text": "% Attempts to diagnose AEC problems from recorded samples\n%\n% out = echo_diagnostic(rec_file, play_file, out_file, tail_length)\n%\n% Computes the full matrix inversion to cancel echo from the \n% recording 'rec_file' using the far end signal 'play_file' using \n% a filter length of 'tail_length'. The output is saved to 'out_file'.\nfunction out = echo_diagnostic(rec_file, play_file, out_file, tail_length)\n\nF=fopen(rec_file,'rb');\nrec=fread(F,Inf,'short');\nfclose (F);\nF=fopen(play_file,'rb');\nplay=fread(F,Inf,'short');\nfclose (F);\n\nrec = [rec; zeros(1024,1)];\nplay = [play; zeros(1024,1)];\n\nN = length(rec);\ncorr = real(ifft(fft(rec).*conj(fft(play))));\nacorr = real(ifft(fft(play).*conj(fft(play))));\n\n[a,b] = max(corr);\n\nif b > N/2\n b = b-N;\nend\nprintf (\"Far end to near end delay is %d samples\\n\", b);\nif (b > .3*tail_length)\n printf ('This is too much delay, try delaying the far-end signal a bit\\n');\nelse if (b < 0)\n printf ('You have a negative delay, the echo canceller has no chance to cancel anything!\\n');\n else\n printf ('Delay looks OK.\\n');\n end\n end\nend\nN2 = round(N/2);\ncorr1 = real(ifft(fft(rec(1:N2)).*conj(fft(play(1:N2)))));\ncorr2 = real(ifft(fft(rec(N2+1:end)).*conj(fft(play(N2+1:end)))));\n\n[a,b1] = max(corr1);\nif b1 > N2/2\n b1 = b1-N2;\nend\n[a,b2] = max(corr2);\nif b2 > N2/2\n b2 = b2-N2;\nend\ndrift = (b1-b2)/N2;\nprintf ('Drift estimate is %f%% (%d samples)\\n', 100*drift, b1-b2);\nif abs(b1-b2) < 10\n printf ('A drift of a few (+-10) samples is normal.\\n');\nelse\n if abs(b1-b2) < 30\n printf ('There may be (not sure) excessive clock drift. Is the capture and playback done on the same soundcard?\\n');\n else\n printf ('Your clock is drifting! No way the AEC will be able to do anything with that. Most likely, you''re doing capture and playback from two different cards.\\n');\n end\n end\nend\nacorr(1) = .001+1.00001*acorr(1);\nAtA = toeplitz(acorr(1:tail_length));\nbb = corr(1:tail_length);\nh = AtA\\bb;\n\nout = (rec - filter(h, 1, play));\n\nF=fopen(out_file,'w');\nfwrite(F,out,'short');\nfclose (F);\n"} +{"plateform": "github", "repo_name": "truongd8593/1D-Shallow-Water-equations-master", "name": "Fr.m", "ext": ".m", "path": "1D-Shallow-Water-equations-master/Fr.m", "size": 207, "source_encoding": "utf_8", "md5": "b7adb7763a8a9f44c45ad2ce3924eec0", "text": "%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n% Froude\r\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\nfunction [froude] = Fr(U)\r\n global g;\r\n if ( U(1) == 0 )\r\n froude = 0.;\r\n else\r\n froude = U(2)/(U(1)*sqrt(g*U(1)));\r\n end\r\nend "} +{"plateform": "github", "repo_name": "posgraph/coupe.bilateral-texture-filtering-master", "name": "btf_2d_color_gpu.m", "ext": ".m", "path": "coupe.bilateral-texture-filtering-master/bilateralTextureFiltering/btf_2d_color_gpu.m", "size": 3053, "source_encoding": "utf_8", "md5": "d6329d1d843ceb6c8424dd6bc33deca4", "text": "function r_img = btf_2d_color_gpu(I, fr, n_iter, fr_blf)\n% btf_2d_color_gpu - Bilateral Texture Filtering\n%\n% S = btf_2d_color_gpu(I, fr, n_iter, fr_blf) extracts structure S from\n% input I, with scale parameter fr, joint filtering scale fr_blf and\n% iteration number n_iter. \n% \n% Paras: \n% @I : Input image, both grayscale and color images are acceptable.\n% @fr : Parameter specifying the maximum size of texture elements. \n% @n_iter : Number of itearations, 5 by default.\n% @fr_blf : Parameter specifying kernel size of joint bilateral filtering. \n% \n% Example\n% ==========\n% I = imread('input.png');\n% radius = 3;\n% iterations = 3;\n% radius_bf = radius * 2;\n% S = btf_2d_color_gpu(I, radius, iterations, radius_bf);\n%\n% ==========\n% The Code is created based on the following paper \n% [1] \"Bilateral Texture Filtering\", Hojin Cho, Hyunjoon Lee, Seungyong Lee, ACM Transactions on Graphics, \n% (SIGGRAPH 2014), 2014. \n% The code and the algorithm are for non-comercial use only.\n%\n\nglobal o_img\n\nif ~exist('fr_blf', 'var') || isempty(fr_blf),\n fr_blf = 2*fr;\nend\n\nif ~exist('n_iter', 'var') || isempty(n_iter),\n n_iter = 5;\nend\n\nsigma_avg = 0.05*sqrt(size(I, 3));\nsigma_alpha = 5;\n\ntic;\n\nI = gpuArray(im2single(I));\no_img = I;\n\nfor iter = 1:n_iter\n fprintf('iter = %d\\n', iter);\n L = I;\n Gc = cell(fr, 1);\n %Lcpu = gather(L);\n for i = fr:fr\n \n B = imfilter(L, fspecial('average', 2*i+1), 'symmetric');\n \n % MRTV\n Delta = comp_Delta_gpu(L, i);\n M = comp_MRTV_gpu(L, i);\n M = mean(M.*Delta, 3); \n\n % comp_S\n [S, M_min, ~] = comp_S(B, M, i);\n\n % alpha blending\n M_diff = M - M_min;\n\n alpha = sigmoid(M_diff, sigma_alpha);\n alpha = (alpha - 0.5) * 2; \n alpha = repmat(alpha, [1 1 size(S, 3)]);\n\n G = (alpha).*S + (1-alpha).*B;\n\n Gc{i} = G;\n end\n \n G = Gc{end};\n r_img = blf_2d_gpu(I, G, fr_blf, sigma_avg);\n I = r_img;\nend\n\nr_img = gather(I);\n\net = toc;\ndisp(['elapsed time = ' num2str(et)]);\n\nend\n\n\nfunction b = sigmoid(a, p)\n\nb = 1 ./ (1 + exp(-p.*a));\n\nend\n\n% comp_S\nfunction [S, M_min, min_idx] = comp_S(B, M, fr)\n\n[h, w, d] = size(B);\n\np_M = padarray(M, [fr fr], 'replicate');\np_B = padarray(B, [fr fr], 'symmetric');\npu = fr+1;\npb = pu+h-1;\npl = fr+1;\npr = pl+w-1;\n\n% minimum value\nS = B; %gpuArray(zeros(size(B), 'single'));\nM_min = M; %gpuArray(ones(h, w, 'single')*1000); % arbitrary large value\noX = gpuArray(zeros(h, w, 'single'));\noY = gpuArray(zeros(h, w, 'single'));\n\nmin_idx = gpuArray(reshape(1:h*w, [h w]));\np_min_idx = padarray(min_idx, [fr fr], 'symmetric');\n\nfor x = -fr:fr\n for y = -fr:fr\n n_M = p_M(pu+y:pb+y, pl+x:pr+x);\n n_B = p_B(pu+y:pb+y, pl+x:pr+x, :);\n n_min_idx = p_min_idx(pu+y:pb+y, pl+x:pr+x);\n \n idx = n_M < M_min;\n M_min = min(M_min, n_M);\n \n oX = oX.*(1-idx) + x.*idx;\n oY = oY.*(1-idx) + y.*idx;\n \n min_idx = n_min_idx.*idx + min_idx.*(1-idx);\n idx = repmat(idx, [1 1 d]);\n S = n_B.*idx + S.*(1-idx);\n end\nend\n\nend\n\n\n"} +{"plateform": "github", "repo_name": "latelee/caffe-master", "name": "classification_demo.m", "ext": ".m", "path": "caffe-master/matlab/demo/classification_demo.m", "size": 5466, "source_encoding": "utf_8", "md5": "45745fb7cfe37ef723c307dfa06f1b97", "text": "function [scores, maxlabel] = classification_demo(im, use_gpu)\n% [scores, maxlabel] = classification_demo(im, use_gpu)\n%\n% Image classification demo using BVLC CaffeNet.\n%\n% IMPORTANT: before you run this demo, you should download BVLC CaffeNet\n% from Model Zoo (http://caffe.berkeleyvision.org/model_zoo.html)\n%\n% ****************************************************************************\n% For detailed documentation and usage on Caffe's Matlab interface, please\n% refer to the Caffe Interface Tutorial at\n% http://caffe.berkeleyvision.org/tutorial/interfaces.html#matlab\n% ****************************************************************************\n%\n% input\n% im color image as uint8 HxWx3\n% use_gpu 1 to use the GPU, 0 to use the CPU\n%\n% output\n% scores 1000-dimensional ILSVRC score vector\n% maxlabel the label of the highest score\n%\n% You may need to do the following before you start matlab:\n% $ export LD_LIBRARY_PATH=/opt/intel/mkl/lib/intel64:/usr/local/cuda-5.5/lib64\n% $ export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libstdc++.so.6\n% Or the equivalent based on where things are installed on your system\n% and what versions are installed.\n%\n% Usage:\n% im = imread('../../examples/images/cat.jpg');\n% scores = classification_demo(im, 1);\n% [score, class] = max(scores);\n% Five things to be aware of:\n% caffe uses row-major order\n% matlab uses column-major order\n% caffe uses BGR color channel order\n% matlab uses RGB color channel order\n% images need to have the data mean subtracted\n\n% Data coming in from matlab needs to be in the order\n% [width, height, channels, images]\n% where width is the fastest dimension.\n% Here is the rough matlab code for putting image data into the correct\n% format in W x H x C with BGR channels:\n% % permute channels from RGB to BGR\n% im_data = im(:, :, [3, 2, 1]);\n% % flip width and height to make width the fastest dimension\n% im_data = permute(im_data, [2, 1, 3]);\n% % convert from uint8 to single\n% im_data = single(im_data);\n% % reshape to a fixed size (e.g., 227x227).\n% im_data = imresize(im_data, [IMAGE_DIM IMAGE_DIM], 'bilinear');\n% % subtract mean_data (already in W x H x C with BGR channels)\n% im_data = im_data - mean_data;\n\n% If you have multiple images, cat them with cat(4, ...)\n\n% Add caffe/matlab to your Matlab search PATH in order to use matcaffe\nif exist('../+caffe', 'dir')\n addpath('..');\nelse\n error('Please run this demo from caffe/matlab/demo');\nend\n\n% Set caffe mode\nif exist('use_gpu', 'var') && use_gpu\n caffe.set_mode_gpu();\n gpu_id = 0; % we will use the first gpu in this demo\n caffe.set_device(gpu_id);\nelse\n caffe.set_mode_cpu();\nend\n\n% Initialize the network using BVLC CaffeNet for image classification\n% Weights (parameter) file needs to be downloaded from Model Zoo.\nmodel_dir = '../../models/bvlc_reference_caffenet/';\nnet_model = [model_dir 'deploy.prototxt'];\nnet_weights = [model_dir 'bvlc_reference_caffenet.caffemodel'];\nphase = 'test'; % run with phase test (so that dropout isn't applied)\nif ~exist(net_weights, 'file')\n error('Please download CaffeNet from Model Zoo before you run this demo');\nend\n\n% Initialize a network\nnet = caffe.Net(net_model, net_weights, phase);\n\nif nargin < 1\n % For demo purposes we will use the cat image\n fprintf('using caffe/examples/images/cat.jpg as input image\\n');\n im = imread('../../examples/images/cat.jpg');\nend\n\n% prepare oversampled input\n% input_data is Height x Width x Channel x Num\ntic;\ninput_data = {prepare_image(im)};\ntoc;\n\n% do forward pass to get scores\n% scores are now Channels x Num, where Channels == 1000\ntic;\n% The net forward function. It takes in a cell array of N-D arrays\n% (where N == 4 here) containing data of input blob(s) and outputs a cell\n% array containing data from output blob(s)\nscores = net.forward(input_data);\ntoc;\n\nscores = scores{1};\nscores = mean(scores, 2); % take average scores over 10 crops\n\n[~, maxlabel] = max(scores);\n\n% call caffe.reset_all() to reset caffe\ncaffe.reset_all();\n\n% ------------------------------------------------------------------------\nfunction crops_data = prepare_image(im)\n% ------------------------------------------------------------------------\n% caffe/matlab/+caffe/imagenet/ilsvrc_2012_mean.mat contains mean_data that\n% is already in W x H x C with BGR channels\nd = load('../+caffe/imagenet/ilsvrc_2012_mean.mat');\nmean_data = d.mean_data;\nIMAGE_DIM = 256;\nCROPPED_DIM = 227;\n\n% Convert an image returned by Matlab's imread to im_data in caffe's data\n% format: W x H x C with BGR channels\nim_data = im(:, :, [3, 2, 1]); % permute channels from RGB to BGR\nim_data = permute(im_data, [2, 1, 3]); % flip width and height\nim_data = single(im_data); % convert from uint8 to single\nim_data = imresize(im_data, [IMAGE_DIM IMAGE_DIM], 'bilinear'); % resize im_data\nim_data = im_data - mean_data; % subtract mean_data (already in W x H x C, BGR)\n\n% oversample (4 corners, center, and their x-axis flips)\ncrops_data = zeros(CROPPED_DIM, CROPPED_DIM, 3, 10, 'single');\nindices = [0 IMAGE_DIM-CROPPED_DIM] + 1;\nn = 1;\nfor i = indices\n for j = indices\n crops_data(:, :, :, n) = im_data(i:i+CROPPED_DIM-1, j:j+CROPPED_DIM-1, :);\n crops_data(:, :, :, n+5) = crops_data(end:-1:1, :, :, n);\n n = n + 1;\n end\nend\ncenter = floor(indices(2) / 2) + 1;\ncrops_data(:,:,:,5) = ...\n im_data(center:center+CROPPED_DIM-1,center:center+CROPPED_DIM-1,:);\ncrops_data(:,:,:,10) = crops_data(end:-1:1, :, :, 5);\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "make_models_parallel.m", "ext": ".m", "path": "TreeQSM-master/src/make_models_parallel.m", "size": 8030, "source_encoding": "utf_8", "md5": "11981cd204b15a2aced81d8c7a0a25ad", "text": "% This file is part of TREEQSM.\n%\n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n%\n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n%\n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction QSMs = make_models_parallel(dataname,savename,Nmodels,inputs)\n\n% ---------------------------------------------------------------------\n% MAKE_MODELS.M Makes QSMs of given point clouds.\n%\n% Version 1.1.2\n% Latest update 9 May 2022\n%\n% Copyright (C) 2013-2022 Pasi Raumonen\n% ---------------------------------------------------------------------\n%\n% Makes QSMs of given point clouds specified by the \"dataname\" and by the\n% other inputs. The results are saved into file named \"savename\".\n% Notice, the code does not save indivual QSM runs into their own .mat or\n% .txt files but saves all models into one big .mat file. Same as\n% MAKE_MODELS but uses parfor command (requires Parallel Computing Toolbox)\n% which allows the utilization of multiple processors/cores to compute in\n% parallel number of QSMs with the same inputs.\n%\n% Inputs:\n% dataname String specifying the .mat-file containing the point\n% clouds that are used for the QSM reconstruction.\n% savename String, the name of the file where the QSMs are saved\n% Nmodels (Optional) Number of models generated for each input\n% (cloud and input parameters). Default value is 5.\n% inputs (Optional) The input parameters structure. Can be defined\n% below as part of this code. Can also be given as a\n% structure array where each tree gets its own, possibly\n% uniquely, defined parameters (e.g. optimal parameters)\n% but each tree has to have same number of parameter values.\n%\n% Output:\n% QSMs Structure array containing all the QSMs generated\n% ---------------------------------------------------------------------\n\n% Changes from version 1.1.1 to 1.1.2, 18 Aug 2020:\n% 1) Removed the inputs \"lcyl\" and \"FilRad\" from the inputs and the\n% calculations of number of input parameters\n\n% Changes from version 1.1.0 to 1.1.1, 13 Jan 2020:\n% 1) Changed \"m = m+n;\" to \"m = m+n(j);\" at the end of the function.\n\n% Changes from version 1.0.0 to 1.1.0, 03 Oct 2019:\n% 1) Added try-catch structure where \"treeqsm\" is called, so that if there\n% is an error during the reconstruction process of one tree, then the\n% larger process of making multiple QSMs from multiple tree is not\n% stopped.\n% 2) Changed the way the data is loaded. Previously all the data was\n% loaded into workspace, now only one point cloud is in the workspace.\n% 3) Corrected a bug where incomplete QSM was saved as complete QSM\n% 4) Changed where the input-structure for each tree reconstructed\n% 5) Changed the coding to separate more the results of the different\n% parallel processes (less warnings and errors)\n\nif nargin < 2\n disp('Not enough inputs, no models generated!')\n QSMs = struct([]);\n return\nend\n\nif nargin == 2\n Nmodels = 5; % Number of models per inputs, usually about 5 models is enough\nend\n\n%% Define the parameter values\nif nargin == 3 || nargin == 2\n % The following parameters can be varied and should be optimised\n % (each can have multiple values):\n % Patch size of the first uniform-size cover:\n inputs.PatchDiam1 = [0.08 0.15];\n % Minimum patch size of the cover sets in the second cover:\n inputs.PatchDiam2Min = [0.015 0.025];\n % Maximum cover set size in the stem's base in the second cover:\n inputs.PatchDiam2Max = [0.06 0.08];\n\n % The following parameters can be varied and but usually can be kept as\n % shown (i.e. as little bigger than PatchDiam parameters):\n % Ball radius used for the first uniform-size cover generation:\n inputs.BallRad1 = inputs.PatchDiam1+0.02;\n % Maximum ball radius used for the second cover generation:\n inputs.BallRad2 = inputs.PatchDiam2Max+0.01;\n\n % The following parameters can be usually kept fixed as shown:\n inputs.nmin1 = 3; % Minimum number of points in BallRad1-balls, good value is 3\n inputs.nmin2 = 1; % Minimum number of points in BallRad2-balls, good value is 1\n inputs.OnlyTree = 1; % If \"1\", then point cloud contains points only from the tree\n inputs.Tria = 0; % If \"1\", then triangulation produces\n inputs.Dist = 1; % If \"1\", then computes the point-model distances\n\n % Different cylinder radius correction options for modifying too large and\n % too small cylinders:\n % Traditional TreeQSM choices:\n % Minimum cylinder radius, used particularly in the taper corrections:\n inputs.MinCylRad = 0.0025;\n % Child branch cylinders radii are always smaller than the parent\n % branche's cylinder radii:\n inputs.ParentCor = 1;\n % Use partially linear (stem) and parabola (branches) taper corrections:\n inputs.TaperCor = 1;\n % Growth volume correction approach introduced by Jan Hackenberg,\n % allometry: GrowthVol = a*Radius^b+c\n % Use growth volume correction:\n inputs.GrowthVolCor = 0;\n % fac-parameter of the growth vol. approach, defines upper and lower\n % boundary:\n inputs.GrowthVolFac = 2.5;\n\n inputs.name = 'test';\n inputs.tree = 0;\n inputs.plot = 0;\n inputs.savetxt = 0;\n inputs.savemat = 0;\n inputs.disp = 0;\nend\n\n% Compute the number of input parameter combinations\nin = inputs(1);\nninputs = prod([length(in.PatchDiam1) length(in.PatchDiam2Min)...\n length(in.PatchDiam2Max)]);\n\n\n%% Load data\nmatobj = matfile([dataname,'.mat']);\nnames = fieldnames(matobj);\ni = 1;\nn = max(size(names));\nwhile i <= n && ~strcmp(names{i,:},'Properties')\n i = i+1;\nend\nI = (1:1:n);\nI = setdiff(I,i);\nnames = names(I,1);\nnames = sort(names);\nnt = max(size(names)); % number of trees/point clouds\n\n%% make the models\nQSMs = struct('cylinder',{},'branch',{},'treedata',{},'rundata',{},...\n 'pmdistance',{},'triangulation',{});\n\n% Generate Inputs struct that contains the input parameters for each tree\nif max(size(inputs)) == 1\n for i = 1:nt\n Inputs(i) = inputs;\n Inputs(i).name = names{i};\n Inputs(i).tree = i;\n Inputs(i).plot = 0;\n Inputs(i).savetxt = 0;\n Inputs(i).savemat = 0;\n Inputs(i).disp = 0;\n end\nelse\n Inputs = inputs;\nend\n\nm = 1;\nfor t = 1:nt % trees\n disp(['Modelling tree ',num2str(t),'/',num2str(nt),' (',Inputs(t).name,'):'])\n P = matobj.(Inputs(t).name);\n qsms = cell(Nmodels,1); % save here the accepted models\n qsm = cell(Nmodels,1); % cell-structure to keep different models separate\n n = ones(Nmodels,1);\n n0 = zeros(Nmodels,1);\n k = ones(Nmodels,1);\n parfor j = 1:Nmodels % generate N models per input\n inputs = Inputs(t);\n inputs.model = j;\n while k(j) <= 5 % try up to five times to generate non-empty models\n try\n qsm{j} = treeqsm(P,inputs);\n catch\n qsm{j} = struct('cylinder',{},'branch',{},'treedata',{},...\n 'rundata',{},'pmdistance',{},'triangulation',{});\n qsm{j}(ninputs).treedata = 0;\n end\n n(j) = max(size(qsm{j}));\n Empty = false(n(j),1);\n for b = 1:n(j)\n if isempty(qsm{j}(b).branch)\n Empty(b) = true;\n end\n end\n if n(j) < ninputs || any(Empty)\n n(j) = nnz(~Empty);\n k(j) = k(j)+1;\n if n(j) > n0(j)\n qsms{j} = qsm{j}(~Empty);\n n0(j) = n(j);\n end\n else\n % Successful models generated\n qsms{j} = qsm{j};\n k(j) = 10;\n end\n end\n if k(j) == 6\n disp('Incomplete run!!')\n end\n end\n % Save the models\n for j = 1:Nmodels\n QSM = qsms{j};\n a = max(size(QSM));\n QSMs(m:m+a-1) = QSM;\n m = m+n(j);\n end\n str = ['results/',savename];\n save(str,'QSMs')\nend\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "make_models.m", "ext": ".m", "path": "TreeQSM-master/src/make_models.m", "size": 7381, "source_encoding": "utf_8", "md5": "4c4a04194131735e4fc02825bc11a987", "text": "% This file is part of TREEQSM.\n%\n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n%\n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n%\n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction QSMs = make_models(dataname,savename,Nmodels,inputs)\n\n% ---------------------------------------------------------------------\n% MAKE_MODELS.M Makes QSMs of given point clouds.\n%\n% Version 1.1.0\n% Latest update 9 May 2022\n%\n% Copyright (C) 2013-2022 Pasi Raumonen\n% ---------------------------------------------------------------------\n%\n% Makes QSMs of given point clouds specified by the \"dataname\" and by the\n% other inputs. The results are saved into file named \"savename\".\n% Notice, the code does not save indivual QSM runs into their own .mat or\n% .txt files but saves all models into one big .mat file.\n%\n% Inputs:\n% dataname String specifying the .mat-file containing the point\n% clouds that are used for the QSM reconstruction.\n% savename String, the name of the file where the QSMs are saved\n% Nmodels (Optional) Number of models generated for each input\n% (cloud and input parameters). Default value is 5.\n% inputs (Optional) The input parameters structure. Can be defined\n% below as part of this code. Can also be given as a\n% structure array where each tree gets its own, possibly\n% uniquely, defined parameters (e.g. optimal parameters)\n% but each tree has to have same number of parameter values.\n%\n% Output:\n% QSMs Structure array containing all the QSMs generated\n% ---------------------------------------------------------------------\n\n% Changes from version 1.1.0 to 1.1.1, 18 Aug 2020:\n% 1) Removed the inputs \"lcyl\" and \"FilRad\" from the inputs and the\n% calculations of number of input parameters\n\n% Changes from version 1.0.0 to 1.1.0, 03 Oct 2019:\n% 1) Added try-catch structure where \"treeqsm\" is called, so that if there\n% is an error during the reconstruction process of one tree, then the\n% larger process of making multiple QSMs from multiple tree is not\n% stopped.\n% 2) Changed the way the data is loaded. Previously all the data was\n% loaded into workspace, now only one point cloud is in the workspace.\n% 3) Corrected a bug where incomplete QSM was saved as complete QSM\n% 4) Changed where the input-structure for each tree is reconstructed\n\nif nargin < 2\n disp('Not enough inputs, no models generated!')\n QSMs = struct([]);\n return\nend\n\nif nargin == 2\n Nmodels = 5; % Number of models per inputs, usually about 5 models is enough\nend\n\n%% Define the parameter values\nif nargin == 3 || nargin == 2\n % The following parameters can be varied and should be optimised\n % (each can have multiple values):\n % Patch size of the first uniform-size cover:\n inputs.PatchDiam1 = [0.08 0.1];\n % Minimum patch size of the cover sets in the second cover:\n inputs.PatchDiam2Min = [0.015 0.025];\n % Maximum cover set size in the stem's base in the second cover:\n inputs.PatchDiam2Max = [0.06 0.08];\n\n % The following parameters can be varied and but usually can be kept as\n % shown (i.e. as little bigger than PatchDiam parameters):\n % Ball radius used for the first uniform-size cover generation:\n inputs.BallRad1 = inputs.PatchDiam1+0.02;\n % Maximum ball radius used for the second cover generation:\n inputs.BallRad2 = inputs.PatchDiam2Max+0.01;\n\n % The following parameters can be usually kept fixed as shown:\n inputs.nmin1 = 3; % Minimum number of points in BallRad1-balls, good value is 3\n inputs.nmin2 = 1; % Minimum number of points in BallRad2-balls, good value is 1\n inputs.OnlyTree = 1; % If \"1\", then point cloud contains points only from the tree\n inputs.Tria = 0; % If \"1\", then triangulation produces\n inputs.Dist = 1; % If \"1\", then computes the point-model distances\n\n % Different cylinder radius correction options for modifying too large and\n % too small cylinders:\n % Traditional TreeQSM choices:\n % Minimum cylinder radius, used particularly in the taper corrections:\n inputs.MinCylRad = 0.0025;\n % Child branch cylinders radii are always smaller than the parent\n % branche's cylinder radii:\n inputs.ParentCor = 1;\n % Use partially linear (stem) and parabola (branches) taper corrections:\n inputs.TaperCor = 1;\n % Growth volume correction approach introduced by Jan Hackenberg,\n % allometry: GrowthVol = a*Radius^b+c\n % Use growth volume correction:\n inputs.GrowthVolCor = 0;\n % fac-parameter of the growth vol. approach, defines upper and lower\n % boundary:\n inputs.GrowthVolFac = 2.5;\n\n inputs.name = 'test';\n inputs.tree = 0;\n inputs.plot = 0;\n inputs.savetxt = 0;\n inputs.savemat = 0;\n inputs.disp = 0;\nend\n\n% Compute the number of input parameter combinations\nin = inputs(1);\nninputs = prod([length(in.PatchDiam1) length(in.PatchDiam2Min)...\n length(in.PatchDiam2Max)]);\n\n%% Load data\nmatobj = matfile([dataname,'.mat']);\nnames = fieldnames(matobj);\ni = 1;\nn = max(size(names));\nwhile i <= n && ~strcmp(names{i,:},'Properties')\n i = i+1;\nend\nI = (1:1:n);\nI = setdiff(I,i);\nnames = names(I,1);\nnames = sort(names);\nnt = max(size(names)); % number of trees/point clouds\n\n%% make the models\nQSMs = struct('cylinder',{},'branch',{},'treedata',{},'rundata',{},...\n 'pmdistance',{},'triangulation',{});\n\n% Generate Inputs struct that contains the input parameters for each tree\nif max(size(inputs)) == 1\n for i = 1:nt\n Inputs(i) = inputs;\n Inputs(i).name = names{i};\n Inputs(i).tree = i;\n Inputs(i).plot = 0;\n Inputs(i).savetxt = 0;\n Inputs(i).savemat = 0;\n Inputs(i).disp = 0;\n end\nelse\n Inputs = inputs;\nend\n\nm = 1;\nfor t = 1:nt % trees\n disp(['Modelling tree ',num2str(t),'/',num2str(nt),' (',Inputs(t).name,'):'])\n P = matobj.(Inputs(t).name);\n j = 1; % model number under generation, make \"Nmodels\" models per tree\n inputs = Inputs(t);\n while j <= Nmodels % generate N models per input\n k = 1;\n n0 = 0;\n inputs.model = j;\n while k <= 5 % try up to five times to generate non-empty models\n try\n QSM = treeqsm(P,inputs);\n catch\n QSM = struct('cylinder',{},'branch',{},'treedata',{},...\n 'rundata',{},'pmdistance',{},'triangulation',{});\n QSM(ninputs).treedata = 0;\n end\n\n n = max(size(QSM));\n Empty = false(n,1);\n for b = 1:n\n if isempty(QSM(b).branch)\n Empty(b) = true;\n end\n end\n if n < ninputs || any(Empty)\n n = nnz(~Empty);\n k = k+1;\n if n >= n0\n qsm = QSM(~Empty);\n n0 = n;\n end\n else\n % Succesfull models generated\n QSMs(m:m+n-1) = QSM;\n m = m+n;\n k = 10;\n end\n end\n if k == 6\n disp('Incomplete run!!')\n QSMs(m:m+n0-1) = qsm;\n m = m+n0;\n end\n j = j+1;\n end\n stri = ['results/',savename];\n save(stri,'QSMs')\nend\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "select_optimum.m", "ext": ".m", "path": "TreeQSM-master/src/select_optimum.m", "size": 41288, "source_encoding": "utf_8", "md5": "4810c22b2697e27fafb380cec479755f", "text": "% This file is part of TREEQSM.\n%\n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n%\n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n%\n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction [TreeData,OptModels,OptInputs,OptQSM] = ...\n select_optimum(QSMs,Metric,savename)\n\n% ---------------------------------------------------------------------\n% SELECT_OPTIMUM.M Selects optimum models based on point-cylinder model\n% distances or standard deviations of attributes\n%\n% Version 1.4.0 \n% Latest update 2 May 2022\n%\n% Copyright (C) 2013-2022 Pasi Raumonen\n% ---------------------------------------------------------------------\n%\n% Works for single or multiple tree cases where the input QSMs contains\n% multiple models for the same tree with different inputs and multiple runs\n% with the same inputs. Allows the user to select from 34 different metrics\n% for the optimization. These include average point-model distances from\n% all, trunk, branch, 1st-order branch and 2nd-order branch cylinders plus\n% some combinations where e.g. \"mean trunk and mean branch\" or \"mean trunk\n% and mean 1st-order branch\" point-model distances are added together.\n% Similarly for the maximum point-model distances and the sums of mean and\n% the maximum distances.\n% The difference between \"all\" and \"trunk and branch\" is that \"all\"\n% is the average of all cylinder distances which usually emphasizes\n% branch cylinder as there usually much more those, whereas \"trunk and branch\"\n% gives equal weight for trunk and branch cylinders.\n% The other options for metric are based on minimizing the standard deviations\n% of volumes (total, trunk, branch, trunk+branch which have equal emphasis\n% between trunk and branches), lengths (trunk, branches) or total number of\n% branches. Here the idea is that if the variance (standard deviation) of\n% some attribute between models with the same inputs is small, then it\n% indicates some kind of robustness which might indicate that the inputs\n% are close to optimal.\n% The optimal single model out of the models with the optimal inputs is\n% selected based on the minimum mean point-model-distance.\n%\n% Inputs:\n% QSMs Contain all the QSMs, possibly from multiple trees\n% Metric Optional input, Metric to be minimized:\n% CYLINDER-DISTANCE METRICS:\n% 'all_mean_dis' = mean distances from (mdf) all cylinders, DEFAULT option\n% 'trunk_mean_dis' = mdf trunk cylinders,\n% 'branch_mean_dis' = mdf all branch cylinders,\n% '1branch_mean_dis' = mdf 1st-order branch cylinders,\n% '2branch_mean_dis' = mdf 2nd-order branch cylinders,\n% 'trunk+branch_mean_dis' = mdf trunk + mdf branch cylinders,\n% 'trunk+1branch_mean_dis' = mdf trunk + mdf 1st-ord branch cyls,\n% 'trunk+1branch+2branch_mean_dis' = above + mdf 2nd-ord branch cyls\n% '1branch+2branch_mean_dis' = mdf 1branch cyls + mdf 2branch cyls\n% 'all_max_dis' = maximum distances from (mdf) all cylinders\n% 'trunk_max_dis' = mdf trunk cylinders,\n% 'branch_max_dis' = mdf all branch cylinders,\n% '1branch_max_dis' = mdf 1st-order branch cylinders,\n% '2branch_max_dis' = mdf 2nd-order branch cylinders,\n% 'trunk+branch_max_dis' = mdf trunk + mdf branch cylinders,\n% 'trunk+1branch_max_dis' = mdf trunk + mdf 1st-ord branch cyls,\n% 'trunk+1branch+2branch_max_dis' = above + mdf 2nd-ord branch cyls.\n% '1branch+2branch_max_dis' = mdf 1branch cyls + mdf 2branch cyls\n% 'all_mean+max_dis' = mean + maximum distances from (m+mdf) all cylinders\n% 'trunk_mean+max_dis' = (m+mdf) trunk cylinders,\n% 'branch_mean+max_dis' = (m+mdf) all branch cylinders,\n% '1branch_mean+max_dis' = (m+mdf) 1st-order branch cylinders,\n% '2branch_mean+max_dis' = (m+mdf) 2nd-order branch cylinders,\n% 'trunk+branch_mean+max_dis' = (m+mdf) trunk + (m+mdf) branch cylinders,\n% 'trunk+1branch_mean+max_dis' = (m+mdf) trunk + (m+mdf) 1branch cyls,\n% 'trunk+1branch+2branch_mean+max_dis' = above + (m+mdf) 2branch cyls.\n% '1branch+2branch_mean+max_dis' = (m+mdf) 1branch cyls + (m+mdf) 2branch cyls\n% STANDARD DEVIATION METRICS:\n% 'tot_vol_std' = standard deviation of total volume\n% 'trunk_vol_std' = standard deviation of trunk volume\n% 'branch_vol_std' = standard deviation of branch volume\n% 'trunk+branch_vol_std' = standard deviation of trunk plus branch volume\n% 'tot_are_std' = standard deviation of total area\n% 'trunk_are_std' = standard deviation of trunk area\n% 'branch_are_std' = standard deviation of branch area\n% 'trunk+branch_are_std' = standard deviation of trunk plus branch area\n% 'trunk_len_std' = standard deviation of trunk length\n% 'branch_len_std' = standard deviation of branch length\n% 'branch_num_std' = standard deviation of number of branches\n% BRANCH-ORDER DISTRIBUTION METRICS:\n% 'branch_vol_ord3_mean' = mean difference in volume of 1-3 branch orders\n% 'branch_are_ord3_mean' = mean difference in area of 1-3 branch orders\n% 'branch_len_ord3_mean' = mean difference in length of 1-3 branch orders\n% 'branch_num_ord3_mean' = mean difference in number of 1-3 branch orders\n% 'branch_vol_ord3_max' = max difference in volume of 1-3 branch orders\n% 'branch_are_ord3_max' = max difference in area of 1-3 branch orders\n% 'branch_len_ord3_max' = max difference in length of 1-3 branch orders\n% 'branch_num_ord3_max' = max difference in number of 1-3 branch orders\n% 'branch_vol_ord6_mean' = mean difference in volume of 1-6 branch orders\n% 'branch_are_ord6_mean' = mean difference in area of 1-6 branch orders\n% 'branch_len_ord6_mean' = mean difference in length of 1-6 branch orders\n% 'branch_num_ord6_mean' = mean difference in number of 1-6 branch orders\n% 'branch_vol_ord6_max' = max difference in volume of 1-6 branch orders\n% 'branch_are_ord6_max' = max difference in area of 1-6 branch orders\n% 'branch_len_ord6_max' = max difference in length of 1-6 branch orders\n% 'branch_num_ord6_max' = max difference in number of 1-6 branch orders\n% CYLINDER DISTRIBUTION METRICS:\n% 'cyl_vol_dia10_mean') = mean diff. in volume of 1-10cm diam cyl classes\n% 'cyl_are_dia10_mean') = mean diff. in area of 1-10cm diam cyl classes\n% 'cyl_len_dia10_mean') = mean diff. in length of 1-10cm diam cyl classes\n% 'cyl_vol_dia10_max') = max diff. in volume of 1-10cm diam cyl classes\n% 'cyl_are_dia10_max') = max diff. in area of 1-10cm diam cyl classes\n% 'cyl_len_dia10_max') = max diff. in length of 1-10cm diam cyl classes\n% 'cyl_vol_dia20_mean') = mean diff. in volume of 1-20cm diam cyl classes\n% 'cyl_are_dia20_mean') = mean diff. in area of 1-20cm diam cyl classes\n% 'cyl_len_dia20_mean') = mean diff. in length of 1-20cm diam cyl classes\n% 'cyl_vol_dia20_max') = max diff. in volume of 1-20cm diam cyl classes\n% 'cyl_are_dia20_max') = max diff. in area of 1-20cm diam cyl classes\n% 'cyl_len_dia20_max') = max diff. in length of 1-20cm diam cyl classes\n% 'cyl_vol_zen_mean') = mean diff. in volume of cyl zenith distribution\n% 'cyl_are_zen_mean') = mean diff. in area of cyl zenith distribution\n% 'cyl_len_zen_mean') = mean diff. in length of cyl zenith distribution\n% 'cyl_vol_zen_max') = max diff. in volume of cyl zenith distribution\n% 'cyl_are_zen_max') = max diff. in area of cyl zenith distribution\n% 'cyl_len_zen_max') = max diff. in length of cyl zenith distribution\n% SURFACE COVERAGE METRICS:\n% metric to be minimized is 1-mean(surface_coverage) or 1-min(SC)\n% 'all_mean_surf' = mean surface coverage from (msc) all cylinders\n% 'trunk_mean_surf' = msc trunk cylinders,\n% 'branch_mean_surf' = msc all branch cylinders,\n% '1branch_mean_surf' = msc 1st-order branch cylinders,\n% '2branch_mean_surf' = msc 2nd-order branch cylinders,\n% 'trunk+branch_mean_surf' = msc trunk + msc branch cylinders,\n% 'trunk+1branch_mean_surf' = msc trunk + msc 1st-ord branch cyls,\n% 'trunk+1branch+2branch_mean_surf' = above + msc 2nd-ord branch cyls\n% '1branch+2branch_mean_surf' = msc 1branch cyls + msc 2branch cyls\n% 'all_min_surf' = minimum surface coverage from (msc) all cylinders\n% 'trunk_min_surf' = msc trunk cylinders,\n% 'branch_min_surf' = msc all branch cylinders,\n% '1branch_min_surf' = msc 1st-order branch cylinders,\n% '2branch_min_surf' = msc 2nd-order branch cylinders,\n% 'trunk+branch_min_surf' = msc trunk + msc branch cylinders,\n% 'trunk+1branch_min_surf' = msc trunk + msc 1st-ord branch cyls,\n% 'trunk+1branch+2branch_min_surf' = above + msc 2nd-ord branch cyls.\n% '1branch+2branch_min_surf' = msc 1branch cyls + msc 2branch cyls\n% savename Optional input, name string specifying the name of the saved file\n% containing the outputs\n%\n% Outputs:\n% TreeData Similar structure array as the \"treedata\" in QSMs but now each\n% attribute contains the mean and std computed from the models\n% with the optimal inputs. Also contains the sensitivities\n% for the inputs PatchDiam1, PatchDiam2Min, PatchDiam2Max.\n% Thus for single number attributes (e.g. TotalVolume) there\n% are five numbers [mean std sensi_PD1 sensi_PD2Min sensi_PD2Max]\n% OptModels Indexes of the models with the optimal inputs (column 1) and\n% the index of the optimal single model (column 2) in \"QSMs\" \n% for each tree\n% OptInputs The optimal input parameters for each tree\n% OptQSMs The single best QSM for each tree, OptQSMs = QSMs(OptModel);\n% ---------------------------------------------------------------------\n\n\n% Changes from version 1.3.1 to 1.4.0, 2 May 2022:\n% 1) Added estimation of (relative) sensitivity of the single number\n% attributes in TreeData for the inputs PatchDiam1, PatchDiam2Min,\n% PatchDiam2Max. Now TreeData contains also these values as the columns\n% 3 to 5.\n% 2) Corrected a small bug in the subfunction \"collect_data\" (assignment\n% of values for \"CylSurfCov(i,:)\"). The bug caused error for QSMs whose\n% maximum branch order is less than 2.\n% 3) Bug fix for 3 lines (caused error for some cases and for other cases\n% the optimal single model was wrongly selected):\n% [~,T] = min(dist(ind,best)); --> [~,T] = min(Data.CylDist(ind,best));\n\n% Changes from version 1.2.0 to 1.3.0, 4 Aug 2020:\n% 1) Removed two inputs (\"lcyl\" and \"FilRad\") from the inputs to be\n% optimised. This corresponds to changes in the cylinder fitting.\n% 2) Added more choices for the optimisation criteria or cost\n% functions (\"metric\") that are minimised. There is now 91 metrics and\n% the new ones include surface coverage based metrics.\n\n% Changes from version 1.1.1 to 1.2.0, 4 Feb 2020:\n% 1) Major change in the structure: subfunctions\n% 2) Added more choices for the optimisation criteria or cost\n% functions (\"metric\") that are minimised. There is now 73 metrics and in\n% particular the new ones include some area related metrics and branch\n% and cylinder distribution based metrics.\n\n% Changes from version 1.1.0 to 1.1.1, 26 Nov 2019:\n% 1) Added the \"name\" of the point cloud from the inputs.name to the output\n% TreeData as a field. Also now displays the name together with the tree\n% number.\n% 2) TreeData contains now correctly fields (\"location\", \"StemTaper\",\n% \"VolumeBranchOrder\", etc) from the Optimal QSMs.\n\n% Changes from version 1.0.0 to 1.1.0, 08 Oct 2019:\n% 1) Added the posibility to select the optimisation criteria or cost\n% function (\"metric\") that is minimised from 34 different options.\n% Previously only one option was used. The used metric is also included\n% in \"OptInputs\" output as one of the fields.\n% 2) Added OptQSM as one of the outputs\n\n%% Select the metric based on the input\nif nargin > 1\n [met,Metric] = select_metric(Metric);\nelse\n met = 1;\n Metric = 'all_mean_dis';\nend\n\n\n% The metric for selecting the optimal single model from the models with\n% the optimal inputs is the mean point-model-distance.\nbest = 1;\n\n%% Collect data\n% Find the first non-empty model\ni = 1;\nwhile isempty(QSMs(i).cylinder)\n i = i+1;\nend\n% Determine how many single-number attributes there are in treedata\nnames = fieldnames(QSMs(i).treedata);\nn = 1;\nwhile numel(QSMs(i).treedata.(names{n})) == 1\n n = n+1;\nend\nn = n-1;\nNames = names(1:n);\nL = max(cellfun('length',Names))+1;\nfor i = 1:n\n name = Names{i};\n name(L) = ' ';\n Names{i} = name;\nend\n\n% Collect data:\n[treedata,inputs,TreeId,Data] = collect_data(QSMs,names,n);\n\n% Trees and their unique IDs\nTreeIds = unique(TreeId(:,1));\nnt = length(TreeIds); % number of trees\n\nDataM = zeros(n,nt);\nDataS = zeros(n,nt); % Standard deviation of tree data for each tree\nDataM2 = DataM; DataM3 = DataM;\nDataS2 = DataS; DataS3 = DataS;\n\nOptIn = zeros(nt,9); % Optimal input values\nOptDist = zeros(nt,9); % Smallest metric values\n\n\n% average treedata and inputs for each tree-input-combination:\nTreeDataAll = zeros(nt,5*5*5,n);\nInputs = zeros(nt,5*5*5,3);\n\nIndAll = (1:1:size(TreeId,1))';\n\n% Indexes of the optimal single models in QSMs:\nOptModel = zeros(nt,3);\n% The indexes of models in QSMs with the optimal inputs (col 1)\n% and the indexes of the optimal single models (col 2):\nOptModels = cell(nt,2);\n\nNInputs = zeros(nt,1);\n\n%% Process each tree separately\nfor tree = 1:nt\n % Select the models for the tree\n Models = TreeId(:,1) == TreeIds(tree);\n\n %% Determine the input parameter values\n InputParComb = unique(inputs(Models,:),'rows'); % Input parameter combinations\n IV = cell(3,1);\n N = zeros(3,1);\n for i = 1:3\n I = unique(InputParComb(:,i));\n IV{i} = I;\n N(i) = length(I);\n end\n\n %% Determine metric-value for each input\n % (average over number of models with the same inputs)\n input = cell(1,N(1)*N(2)*N(3));\n distM = zeros(1,N(1)*N(2)*N(3)); % average distances or volume stds\n b = 0;\n for d = 1:N(1) % PatchDiam1\n J = abs(inputs(:,1)-IV{1}(d)) < 0.0001;\n for a = 1:N(2) % PatchDiam2Min\n K = abs(inputs(:,2)-IV{2}(a)) < 0.0001;\n for i = 1:N(3) % PatchDiam2Max\n L = abs(inputs(:,3)-IV{3}(i)) < 0.0001;\n\n % Select models for the tree with the same inputs:\n T = Models & J & K & L;\n b = b+1;\n input{b} = [d a i];\n\n % Compute the metric value;\n D = compute_metric_value(met,T,treedata,Data);\n distM(b) = D;\n\n % Collect the data and inputs\n TreeDataAll(tree,b,:) = mean(treedata(:,T),2);\n Inputs(tree,b,:) = [IV{1}(d) IV{2}(a) IV{3}(i)];\n end\n end\n end\n\n %% Determine the optimal inputs and models\n ninputs = prod(N);\n NInputs(tree) = ninputs;\n [d,J] = sort(distM);\n O = input{J(1)};\n OptIn(tree,1:3) = [IV{1}(O(1)) IV{2}(O(2)) IV{3}(O(3))];\n OptDist(tree,1) = d(1);\n if ninputs > 1\n O = input{J(2)};\n OptIn(tree,4:6) = [IV{1}(O(1)) IV{2}(O(2)) IV{3}(O(3))];\n OptDist(tree,2) = d(2);\n if ninputs > 2\n O = input{J(3)};\n OptIn(tree,7:9) = [IV{1}(O(1)) IV{2}(O(2)) IV{3}(O(3))];\n OptDist(tree,3) = d(3);\n end\n end\n\n %% Mean of tree data for each tree computed from the optimal models:\n % Select the optimal models for each tree: In the case of multiple models\n % with same inputs, select the one model with the optimal inputs that\n % has the minimum metric value.\n J = abs(inputs(:,1)-OptIn(tree,1)) < 0.0001;\n K = abs(inputs(:,2)-OptIn(tree,2)) < 0.0001;\n L = abs(inputs(:,3)-OptIn(tree,3)) < 0.0001;\n T = Models & J & K & L;\n ind = IndAll(T);\n [~,T] = min(Data.CylDist(ind,best));\n OptModel(tree,1) = ind(T);\n OptModels{tree,1} = ind;\n OptModels{tree,2} = ind(T);\n DataM(:,tree) = mean(treedata(:,ind),2);\n DataS(:,tree) = std(treedata(:,ind),[],2);\n if ninputs > 1\n J = abs(inputs(:,1)-OptIn(tree,4)) < 0.0001;\n K = abs(inputs(:,2)-OptIn(tree,5)) < 0.0001;\n L = abs(inputs(:,3)-OptIn(tree,6)) < 0.0001;\n T = Models & J & K & L;\n ind = IndAll(T);\n [~,T] = min(Data.CylDist(ind,best));\n OptModel(tree,2) = ind(T);\n DataM2(:,tree) = mean(treedata(:,ind),2);\n DataS2(:,tree) = std(treedata(:,ind),[],2);\n if ninputs > 2\n J = abs(inputs(:,1)-OptIn(tree,7)) < 0.0001;\n K = abs(inputs(:,2)-OptIn(tree,8)) < 0.0001;\n L = abs(inputs(:,3)-OptIn(tree,9)) < 0.0001;\n T = Models & J & K & L;\n ind = IndAll(T);\n [~,T] = min(Data.CylDist(ind,best));\n OptModel(tree,3) = ind(T);\n DataM3(:,tree) = mean(treedata(:,ind),2);\n DataS3(:,tree) = std(treedata(:,ind),[],2);\n end\n end\n\n % Decrease the number on non-zero decimals\n DataM(:,tree) = change_precision(DataM(:,tree));\n DataS(:,tree) = change_precision(DataS(:,tree));\n if ninputs > 1\n DataM2(:,tree) = change_precision(DataM2(:,tree));\n DataS2(:,tree) = change_precision(DataS2(:,tree));\n if ninputs > 2\n DataM3(:,tree) = change_precision(DataM3(:,tree));\n DataS3(:,tree) = change_precision(DataS3(:,tree));\n end\n end\n\n % Define the output \"OptInputs\"\n OptM = IndAll(OptModel(tree,1));\n OptInputs(tree) = QSMs(OptM).rundata.inputs;\n if ninputs > 1\n OptM2 = IndAll(OptModel(tree,2));\n OI2(tree) = QSMs(OptM2).rundata.inputs;\n if ninputs > 2\n OptM3 = IndAll(OptModel(tree,3));\n OI3(tree) = QSMs(OptM3).rundata.inputs;\n end\n end\n\nend\nN = max(NInputs);\nTreeDataAll = TreeDataAll(:,1:N,:);\nInputs = Inputs(:,1:N,:);\n\n% Compute Coefficient of variation for the data\nOptModel = IndAll(OptModel(:,1));\nOptQSM = QSMs(OptModel);\nDataCV = DataS./DataM*100; % Coefficient of variation\nif ninputs > 1\n DataCV2 = DataS2./DataM2*100; % Coefficient of variation\n if ninputs > 2\n DataCV3 = DataS3./DataM3*100; % Coefficient of variation\n end\nend\n% Decrease the number on non-zero decimals\nfor j = 1:nt\n DataCV(:,j) = change_precision(DataCV(:,j));\n if ninputs > 1\n DataCV2(:,j) = change_precision(DataCV2(:,j));\n if ninputs > 2\n DataCV3(:,j) = change_precision(DataCV3(:,j));\n end\n end\nend\n\n%% Display some data about optimal models\n% Display optimal inputs, model and attributes for each tree\nfor t = 1:nt\n disp('-------------------------------')\n disp([' Tree: ',num2str(OptInputs(t).tree),', ',OptInputs(t).name])\n if NInputs(t) == 1\n disp([' Metric: ',Metric])\n disp([' Metric value: ',num2str(1000*OptDist(t,1))])\n disp([' Optimal inputs: PatchDiam1 = ',...\n num2str(OptInputs(t).PatchDiam1)])\n disp([' PatchDiam2Min = ',...\n num2str(OptInputs(t).PatchDiam2Min)])\n disp([' PatchDiam2Max = ',...\n num2str(OptInputs(t).PatchDiam2Max)])\n disp([' Optimal model: ',num2str(OptModel(t))])\n sec = num2str(round(QSMs(OptModel(t)).rundata.time(end)));\n disp([' Reconstruction time for the optimal model: ',sec,' seconds'])\n disp(' Attributes (mean, std, CV(%)):')\n for i = 1:n\n str = ([' ',Names{i},': ',num2str([...\n DataM(i,t) DataS(i,t) DataCV(i,t)])]);\n disp(str)\n end\n elseif NInputs(t) == 2\n disp(' The best two cases:')\n disp([' Metric: ',Metric])\n disp([' Metric values: ',num2str(OptDist(t,1:2))])\n disp([' inputs: PatchDiam1 = ',...\n num2str([OptInputs(t).PatchDiam1 OI2(t).PatchDiam1])])\n disp([' PatchDiam2Min = ',...\n num2str([OptInputs(t).PatchDiam2Min OI2(t).PatchDiam2Min])])\n disp([' PatchDiam2Max = ',...\n num2str([OptInputs(t).PatchDiam2Max OI2(t).PatchDiam2Max])])\n disp([' Optimal model: ',num2str(OptModel(t))])\n sec = num2str(round(QSMs(OptModel(t)).rundata.time(end)));\n disp([' Reconstruction time for the optimal model: ',sec,' seconds'])\n disp(' Attributes (mean, std, CV(%), second best mean):')\n for i = 1:n\n str = ([' ',Names{i},': ',num2str([DataM(i,t) ...\n DataS(i,t) DataCV(i,t) DataM2(i,t)])]);\n disp(str)\n end\n elseif NInputs(t) > 2\n disp(' The best three cases:')\n disp([' Metric: ',Metric])\n disp([' Metric values: ',num2str(OptDist(t,1:3))])\n disp([' inputs: PatchDiam1 = ',num2str([...\n OptInputs(t).PatchDiam1 OI2(t).PatchDiam1 OI3(t).PatchDiam1])])\n disp([' PatchDiam2Min = ',num2str([...\n OptInputs(t).PatchDiam2Min OI2(t).PatchDiam2Min OI3(t).PatchDiam2Min])])\n disp([' PatchDiam2Max = ',num2str([...\n OptInputs(t).PatchDiam2Max OI2(t).PatchDiam2Max OI3(t).PatchDiam2Max])])\n disp([' Optimal model: ',num2str(OptModel(t))])\n sec = num2str(round(QSMs(OptModel(t)).rundata.time(end)));\n disp([' Reconstruction time for the optimal model: ',sec,' seconds'])\n str = [' Attributes (mean, std, CV(%),',...\n ' second best mean, third best mean, sensitivity):'];\n disp(str)\n for i = 1:n\n sensi = max(abs([DataM(i,t)-DataM2(i,t)...\n DataM(i,t)-DataM3(i,t)])/DataM(i,t));\n sensi2 = 100*sensi;\n sensi = 100*sensi/DataCV(i,t);\n sensi2 = change_precision(sensi2);\n sensi = change_precision(sensi);\n str = ([' ',Names{i},': ',num2str([DataM(i,t) DataS(i,t) ...\n DataCV(i,t) DataM2(i,t) DataM3(i,t) sensi sensi2])]);\n disp(str)\n end\n end\n disp('------')\nend\n\n%% Compute the sensitivity of the tree attributes relative to PatchDiam-parameters\nSensi = sensitivity_analysis(TreeDataAll,TreeId,Inputs,OptIn,NInputs);\n\n%% Generate TreeData sructure for optimal models\nclear TreeData\nTreeData = vertcat(OptQSM(:).treedata);\nfor t = 1:nt\n for i = 1:n\n TreeData(t).(names{i}) = [DataM(i,t) DataS(i,t) squeeze(Sensi(t,i,:))'];\n end\n TreeData(t).name = OptInputs(t).name;\nend\n\n%% Add the metric for the \"OptInputs\"\nfor i = 1:nt\n OptInputs(i).metric = Metric;\nend\n\n%% Save results\nif nargin == 3\n str = ['results/OptimalQSMs_',savename];\n save(str,'TreeData','OptModels','OptInputs','OptQSM')\n\n str = ['results/tree_data_',savename,'.txt'];\n fid = fopen(str, 'wt');\n fprintf(fid, [repmat('%g\\t', 1, size(DataM,2)-1) '%g\\n'], DataM.');\n fclose(fid);\nend\n\n% End of main function\nend\n\n\nfunction [treedata,inputs,TreeId,Data] = collect_data(...\n QSMs,names,Nattri)\n\nNmod = max(size(QSMs)); % number of models\ntreedata = zeros(Nattri,Nmod); % Collect all tree attributes from all models\ninputs = zeros(Nmod,3); % collect the inputs from all models\n% ([PatchDiam1 PatchDiam2Min PatchDiam2Max])\nCylDist = zeros(Nmod,10); % collect the distances from all models\nCylSurfCov = zeros(Nmod,10); % collect the surface coverages from all models\ns = 6; % maximum branch order\nOrdDis = zeros(Nmod,4*s); % collect the distributions from all the models\nr = 20; % maximum cylinder diameter\nCylDiaDis = zeros(Nmod,3*r);\nCylZenDis = zeros(Nmod,54);\nTreeId = zeros(Nmod,2); % collectd the tree and model indexes from all models\nKeep = true(Nmod,1); % Non-empty models\n\nfor i = 1:Nmod\n if ~isempty(QSMs(i).cylinder)\n % Collect input-parameter values and tree IDs:\n p = QSMs(i).rundata.inputs;\n inputs(i,:) = [p.PatchDiam1 p.PatchDiam2Min p.PatchDiam2Max];\n TreeId(i,:) = [p.tree p.model];\n\n % Collect cylinder-point distances: mean of all cylinders,\n % mean of trunk, branch, 1st- and 2nd-order branch cylinders.\n % And the maximum of the previous:\n D = QSMs(i).pmdistance;\n CylDist(i,:) = [D.mean D.TrunkMean D.BranchMean D.Branch1Mean ...\n D.Branch2Mean D.max D.TrunkMax D.BranchMax D.Branch1Max ...\n D.Branch2Max];\n\n % Collect surface coverages: mean of all cylinders,\n % mean of trunk, branch, 1st- and 2nd-order branch cylinders.\n % And the minimum of the previous:\n D = QSMs(i).cylinder.SurfCov;\n T = QSMs(i).cylinder.branch == 1;\n B1 = QSMs(i).cylinder.BranchOrder == 1;\n B2 = QSMs(i).cylinder.BranchOrder == 2;\n if ~any(B1)\n CylSurfCov(i,:) = [mean(D) mean(D(T)) 0 0 0 ...\n min(D) min(D(T)) 0 0 0];\n elseif ~any(B2)\n CylSurfCov(i,:) = [mean(D) mean(D(T)) mean(D(~T)) mean(D(B1)) ...\n 0 min(D) min(D(T)) min(D(~T)) min(D(B1)) 0];\n else\n CylSurfCov(i,:) = [mean(D) mean(D(T)) mean(D(~T)) mean(D(B1)) ...\n mean(D(B2)) min(D) min(D(T)) min(D(~T)) min(D(B1)) min(D(B2))];\n end\n\n % Collect branch-order distributions:\n d = QSMs(i).treedata.VolBranchOrd;\n nd = length(d);\n if nd > 0\n a = min(nd,s);\n OrdDis(i,1:a) = d(1:a);\n OrdDis(i,s+1:s+a) = QSMs(i).treedata.AreBranchOrd(1:a);\n OrdDis(i,2*s+1:2*s+a) = QSMs(i).treedata.LenBranchOrd(1:a);\n OrdDis(i,3*s+1:3*s+a) = QSMs(i).treedata.NumBranchOrd(1:a);\n end\n\n % Collect cylinder diameter distributions:\n d = QSMs(i).treedata.VolCylDia;\n nd = length(d);\n if nd > 0\n a = min(nd,r);\n CylDiaDis(i,1:a) = d(1:a);\n CylDiaDis(i,r+1:r+a) = QSMs(i).treedata.AreCylDia(1:a);\n CylDiaDis(i,2*r+1:2*r+a) = QSMs(i).treedata.LenCylDia(1:a);\n end\n\n % Collect cylinder zenith direction distributions:\n d = QSMs(i).treedata.VolCylZen;\n if ~isempty(d)\n CylZenDis(i,1:18) = d;\n CylZenDis(i,19:36) = QSMs(i).treedata.AreCylZen;\n CylZenDis(i,37:54) = QSMs(i).treedata.LenCylZen;\n end\n\n % Collect the treedata values from each model\n for j = 1:Nattri\n treedata(j,i) = QSMs(i).treedata.(names{j});\n end\n\n else\n Keep(i) = false;\n end\nend\ntreedata = treedata(:,Keep);\ninputs = inputs(Keep,:);\nTreeId = TreeId(Keep,:);\nclear Data\nData.CylDist = CylDist(Keep,:);\nData.CylSurfCov = CylSurfCov(Keep,:);\nData.BranchOrdDis = OrdDis(Keep,:);\nData.CylDiaDis = CylDiaDis(Keep,:);\nData.CylZenDis = CylZenDis(Keep,:);\n\n% End of function\nend\n\n\nfunction [met,Metric] = select_metric(Metric)\n\n% Mean distance metrics:\nif strcmp(Metric,'all_mean_dis')\n met = 1;\nelseif strcmp(Metric,'trunk_mean_dis')\n met = 2;\nelseif strcmp(Metric,'branch_mean_dis')\n met = 3;\nelseif strcmp(Metric,'1branch_mean_dis')\n met = 4;\nelseif strcmp(Metric,'2branch_mean_dis')\n met = 5;\nelseif strcmp(Metric,'trunk+branch_mean_dis')\n met = 6;\nelseif strcmp(Metric,'trunk+1branch_mean_dis')\n met = 7;\nelseif strcmp(Metric,'trunk+1branch+2branch_mean_dis')\n met = 8;\nelseif strcmp(Metric,'1branch+2branch_mean_dis')\n met = 9;\n\n % Maximum distance metrics:\nelseif strcmp(Metric,'all_max_dis')\n met = 10;\nelseif strcmp(Metric,'trunk_max_dis')\n met = 11;\nelseif strcmp(Metric,'branch_max_dis')\n met = 12;\nelseif strcmp(Metric,'1branch_max_dis')\n met = 13;\nelseif strcmp(Metric,'2branch_max_dis')\n met = 14;\nelseif strcmp(Metric,'trunk+branch_max_dis')\n met = 15;\nelseif strcmp(Metric,'trunk+1branch_max_dis')\n met = 16;\nelseif strcmp(Metric,'trunk+1branch+2branch_max_dis')\n met = 17;\nelseif strcmp(Metric,'1branch+2branch_max_dis')\n met = 18;\n\n % Mean plus Maximum distance metrics:\nelseif strcmp(Metric,'all_mean+max_dis')\n met = 19;\nelseif strcmp(Metric,'trunk_mean+max_dis')\n met = 20;\nelseif strcmp(Metric,'branch_mean+max_dis')\n met = 21;\nelseif strcmp(Metric,'1branch_mean+max_dis')\n met = 22;\nelseif strcmp(Metric,'2branch_mean+max_dis')\n met = 23;\nelseif strcmp(Metric,'trunk+branch_mean+max_dis')\n met = 24;\nelseif strcmp(Metric,'trunk+1branch_mean+max_dis')\n met = 25;\nelseif strcmp(Metric,'trunk+1branch+2branch_mean+max_dis')\n met = 26;\nelseif strcmp(Metric,'1branch+2branch_mean+max_dis')\n met = 27;\n\n % Standard deviation metrics:\nelseif strcmp(Metric,'tot_vol_std')\n met = 28;\nelseif strcmp(Metric,'trunk_vol_std')\n met = 29;\nelseif strcmp(Metric,'branch_vol_std')\n met = 30;\nelseif strcmp(Metric,'trunk+branch_vol_std')\n met = 31;\nelseif strcmp(Metric,'tot_are_std')\n met = 32;\nelseif strcmp(Metric,'trunk_are_std')\n met = 33;\nelseif strcmp(Metric,'branch_are_std')\n met = 34;\nelseif strcmp(Metric,'trunk+branch_are_std')\n met = 35;\nelseif strcmp(Metric,'trunk_len_std')\n met = 36;\nelseif strcmp(Metric,'trunk+branch_len_std')\n met = 37;\nelseif strcmp(Metric,'branch_len_std')\n met = 38;\nelseif strcmp(Metric,'branch_num_std')\n met = 39;\n\n % Branch order distribution metrics:\nelseif strcmp(Metric,'branch_vol_ord3_mean')\n met = 40;\nelseif strcmp(Metric,'branch_are_ord3_mean')\n met = 41;\nelseif strcmp(Metric,'branch_len_ord3_mean')\n met = 42;\nelseif strcmp(Metric,'branch_num_ord3_mean')\n met = 43;\nelseif strcmp(Metric,'branch_vol_ord3_max')\n met = 44;\nelseif strcmp(Metric,'branch_are_ord3_max')\n met = 45;\nelseif strcmp(Metric,'branch_len_ord3_max')\n met = 46;\nelseif strcmp(Metric,'branch_num_ord3_max')\n met = 47;\nelseif strcmp(Metric,'branch_vol_ord6_mean')\n met = 48;\nelseif strcmp(Metric,'branch_are_ord6_mean')\n met = 49;\nelseif strcmp(Metric,'branch_len_ord6_mean')\n met = 50;\nelseif strcmp(Metric,'branch_num_ord6_mean')\n met = 51;\nelseif strcmp(Metric,'branch_vol_ord6_max')\n met = 52;\nelseif strcmp(Metric,'branch_are_ord6_max')\n met = 53;\nelseif strcmp(Metric,'branch_len_ord6_max')\n met = 54;\nelseif strcmp(Metric,'branch_num_ord6_max')\n met = 55;\n\n % Cylinder distribution metrics:\nelseif strcmp(Metric,'cyl_vol_dia10_mean')\n met = 56;\nelseif strcmp(Metric,'cyl_are_dia10_mean')\n met = 57;\nelseif strcmp(Metric,'cyl_len_dia10_mean')\n met = 58;\nelseif strcmp(Metric,'cyl_vol_dia10_max')\n met = 59;\nelseif strcmp(Metric,'cyl_are_dia10_max')\n met = 60;\nelseif strcmp(Metric,'cyl_len_dia10_max')\n met = 61;\nelseif strcmp(Metric,'cyl_vol_dia20_mean')\n met = 62;\nelseif strcmp(Metric,'cyl_are_dia20_mean')\n met = 63;\nelseif strcmp(Metric,'cyl_len_dia20_mean')\n met = 64;\nelseif strcmp(Metric,'cyl_vol_dia20_max')\n met = 65;\nelseif strcmp(Metric,'cyl_are_dia20_max')\n met = 66;\nelseif strcmp(Metric,'cyl_len_dia20_max')\n met = 67;\nelseif strcmp(Metric,'cyl_vol_zen_mean')\n met = 68;\nelseif strcmp(Metric,'cyl_are_zen_mean')\n met = 69;\nelseif strcmp(Metric,'cyl_len_zen_mean')\n met = 70;\nelseif strcmp(Metric,'cyl_vol_zen_max')\n met = 71;\nelseif strcmp(Metric,'cyl_are_zen_max')\n met = 72;\nelseif strcmp(Metric,'cyl_len_zen_max')\n met = 73;\n\n % Mean surface coverage metrics:\nelseif strcmp(Metric,'all_mean_surf')\n met = 74;\nelseif strcmp(Metric,'trunk_mean_surf')\n met = 75;\nelseif strcmp(Metric,'branch_mean_surf')\n met = 76;\nelseif strcmp(Metric,'1branch_mean_surf')\n met = 77;\nelseif strcmp(Metric,'2branch_mean_surf')\n met = 78;\nelseif strcmp(Metric,'trunk+branch_mean_surf')\n met = 79;\nelseif strcmp(Metric,'trunk+1branch_mean_surf')\n met = 80;\nelseif strcmp(Metric,'trunk+1branch+2branch_mean_surf')\n met = 81;\nelseif strcmp(Metric,'1branch+2branch_mean_surf')\n met = 82;\n\n % Minimum surface coverage metrics:\nelseif strcmp(Metric,'all_min_surf')\n met = 83;\nelseif strcmp(Metric,'trunk_min_surf')\n met = 84;\nelseif strcmp(Metric,'branch_min_surf')\n met = 85;\nelseif strcmp(Metric,'1branch_min_surf')\n met = 86;\nelseif strcmp(Metric,'2branch_min_surf')\n met = 87;\nelseif strcmp(Metric,'trunk+branch_min_surf')\n met = 88;\nelseif strcmp(Metric,'trunk+1branch_min_surf')\n met = 89;\nelseif strcmp(Metric,'trunk+1branch+2branch_min_surf')\n met = 90;\nelseif strcmp(Metric,'1branch+2branch_min_surf')\n met = 91;\n\n % Not given in right form, take the default option\nelse\n met = 1;\n Metric = 'all_mean_dis';\nend\n% End of function\nend\n\n\nfunction D = compute_metric_value(met,T,treedata,Data)\n\n\nif met <= 27 % cylinder distance metrics:\n D = mean(Data.CylDist(T,:),1);\n D(6:10) = 0.5*D(6:10); % Half the maximum values\nend\n\nif met < 10 % mean cylinder distance metrics:\n if met == 1 % all_mean_dis\n D = D(1);\n elseif met == 2 % trunk_mean_dis\n D = D(2);\n elseif met == 3 % branch_mean_dis\n D = D(3);\n elseif met == 4 % 1branch_mean_dis\n D = D(4);\n elseif met == 5 % 2branch_mean_dis\n D = D(5);\n elseif met == 6 % trunk+branch_mean_dis\n D = D(2)+D(3);\n elseif met == 7 % trunk+1branch_mean_dis\n D = D(2)+D(4);\n elseif met == 8 % trunk+1branch+2branch_mean_dis\n D = D(2)+D(4)+D(5);\n elseif met == 9 % 1branch+2branch_mean_dis\n D = D(4)+D(5);\n end\n\nelseif met < 19 % maximum cylinder distance metrics:\n if met == 10 % all_max_dis\n D = D(6);\n elseif met == 11 % trunk_max_dis\n D = D(7);\n elseif met == 12 % branch_max_dis\n D = D(8);\n elseif met == 13 % 1branch_max_dis\n D = D(9);\n elseif met == 14 % 2branch_max_dis\n D = D(10);\n elseif met == 15 % trunk+branch_max_dis\n D = D(7)+D(8);\n elseif met == 16 % trunk+1branch_max_dis\n D = D(7)+D(9);\n elseif met == 17 % trunk+1branch+2branch_max_dis\n D = D(7)+D(9)+D(10);\n elseif met == 18 % 1branch+2branch_max_dis\n D = D(9)+D(10);\n end\n\nelseif met < 28 % Mean plus maximum cylinder distance metrics:\n if met == 19 % all_mean+max_dis\n D = D(1)+D(6);\n elseif met == 20 % trunk_mean+max_dis\n D = D(2)+D(7);\n elseif met == 21 % branch_mean+max_dis\n D = D(3)+D(8);\n elseif met == 22 % 1branch_mean+max_dis\n D = D(4)+D(9);\n elseif met == 23 % 2branch_mean+max_dis\n D = D(5)+D(10);\n elseif met == 24 % trunk+branch_mean+max_dis\n D = D(2)+D(3)+D(7)+D(8);\n elseif met == 25 % trunk+1branch_mean+max_dis\n D = D(2)+D(4)+D(7)+D(9);\n elseif met == 26 % trunk+1branch+2branch_mean+max_dis\n D = D(2)+D(4)+D(5)+D(7)+D(9)+D(10);\n elseif met == 27 % 1branch+2branch_mean+max_dis\n D = D(4)+D(5)+D(9)+D(10);\n end\n\nelseif met < 39 % Standard deviation metrics:\n if met == 28 % tot_vol_std\n D = std(treedata(1,T));\n elseif met == 29 % trunk_vol_std\n D = std(treedata(2,T));\n elseif met == 30 % branch_vol_std\n D = std(treedata(3,T));\n elseif met == 31 % trunk+branch_vol_std\n D = std(treedata(2,T))+std(treedata(3,T));\n elseif met == 32 % tot_are_std\n D = std(treedata(12,T));\n elseif met == 33 % trunk_are_std\n D = std(treedata(10,T));\n elseif met == 34 % branch_are_std\n D = std(treedata(11,T));\n elseif met == 35 % trunk+branch_are_std\n D = std(treedata(10,T))+std(treedata(11,T));\n elseif met == 36 % trunk_len_std\n D = std(treedata(5,T));\n elseif met == 37 % branch_len_std\n D = std(treedata(6,T));\n elseif met == 38 % trunk+branch_len_std\n D = std(treedata(5,T))+std(treedata(6,T));\n elseif met == 39 % branch_num_std\n D = std(treedata(8,T));\n end\n\nelseif met < 56 % Branch order metrics:\n dis = max(Data.BranchOrdDis(T,:),[],1)-min(Data.BranchOrdDis(T,:),[],1);\n M = mean(Data.BranchOrdDis(T,:),1);\n I = M > 0;\n dis(I) = dis(I)./M(I);\n if met == 40 % branch_vol_ord3_mean\n D = mean(dis(1:3));\n elseif met == 41 % branch_are_ord3_mean\n D = mean(dis(7:9));\n elseif met == 42 % branch_len_ord3_mean\n D = mean(dis(13:15));\n elseif met == 43 % branch_num_ord3_mean\n D = mean(dis(19:21));\n elseif met == 44 % branch_vol_ord3_max\n D = max(dis(1:3));\n elseif met == 45 % branch_are_ord3_max\n D = max(dis(7:9));\n elseif met == 46 % branch_len_ord3_max\n D = max(dis(13:15));\n elseif met == 47 % branch_vol_ord3_max\n D = max(dis(19:21));\n elseif met == 48 % branch_vol_ord6_mean\n D = mean(dis(1:6));\n elseif met == 49 % branch_are_ord6_mean\n D = mean(dis(7:12));\n elseif met == 50 % branch_len_ord6_mean\n D = mean(dis(13:18));\n elseif met == 51 % branch_num_ord6_mean\n D = mean(dis(19:24));\n elseif met == 52 % branch_vol_ord6_max\n D = max(dis(1:6));\n elseif met == 53 % branch_are_ord6_max\n D = max(dis(7:12));\n elseif met == 54 % branch_len_ord6_max\n D = max(dis(13:18));\n elseif met == 55 % branch_vol_ord6_max\n D = max(dis(19:24));\n end\n\nelseif met < 68 % Cylinder diameter distribution metrics:\n dis = max(Data.CylDiaDis(T,:),[],1)-min(Data.CylDiaDis(T,:),[],1);\n M = mean(Data.CylDiaDis(T,:),1);\n I = M > 0;\n dis(I) = dis(I)./M(I);\n if met == 56 % cyl_vol_dia10_mean\n D = mean(dis(1:10));\n elseif met == 57 % cyl_are_dia10_mean\n D = mean(dis(21:30));\n elseif met == 58 % cyl_len_dia10_mean\n D = mean(dis(41:50));\n elseif met == 59 % cyl_vol_dia10_max\n D = max(dis(1:10));\n elseif met == 60 % cyl_are_dia10_max\n D = max(dis(21:30));\n elseif met == 61 % cyl_len_dia10_max\n D = max(dis(41:50));\n elseif met == 62 % cyl_vol_dia20_mean\n D = mean(dis(1:20));\n elseif met == 63 % cyl_are_dia20_mean\n D = mean(dis(21:40));\n elseif met == 64 % cyl_len_dia20_mean\n D = mean(dis(41:60));\n elseif met == 65 % cyl_vol_dia20_max\n D = max(dis(1:20));\n elseif met == 66 % cyl_are_dia20_max\n D = max(dis(21:40));\n elseif met == 67 % cyl_len_dia20_max\n D = max(dis(41:60));\n end\n\nelseif met < 74 % Cylinder zenith distribution metrics:\n dis = max(Data.CylZenDis(T,:),[],1)-min(Data.CylZenDis(T,:),[],1);\n M = mean(Data.CylZenDis(T,:),1);\n I = M > 0;\n dis(I) = dis(I)./M(I);\n if met == 68 % cyl_vol_zen_mean\n D = mean(dis(1:18));\n elseif met == 69 % cyl_are_zen_mean\n D = mean(dis(19:36));\n elseif met == 70 % cyl_len_zen_mean\n D = mean(dis(37:54));\n elseif met == 71 % cyl_vol_zen_max\n D = max(dis(1:18));\n elseif met == 72 % cyl_are_zen_max\n D = max(dis(19:36));\n elseif met == 73 % cyl_len_zen_max\n D = max(dis(37:54));\n end\n\nelseif met < 92 % Surface coverage metrics:\n D = 1-mean(Data.CylSurfCov(T,:),1);\n if met == 74 % all_mean_surf\n D = D(1);\n elseif met == 75 % trunk_mean_surf\n D = D(2);\n elseif met == 76 % branch_mean_surf\n D = D(3);\n elseif met == 77 % 1branch_mean_surf\n D = D(4);\n elseif met == 78 % 2branch_mean_surf\n D = D(5);\n elseif met == 79 % trunk+branch_mean_surf\n D = D(2)+D(3);\n elseif met == 80 % trunk+1branch_mean_surf\n D = D(2)+D(4);\n elseif met == 81 % trunk+1branch+2branch_mean_surf\n D = D(2)+D(4)+D(5);\n elseif met == 82 % 1branch+2branch_mean_surf\n D = D(4)+D(5);\n elseif met == 83 % all_min_surf\n D = D(6);\n elseif met == 84 % trunk_min_surf\n D = D(7);\n elseif met == 85 % branch_min_surf\n D = D(8);\n elseif met == 86 % 1branch_min_surf\n D = D(9);\n elseif met == 87 % 2branch_min_surf\n D = D(10);\n elseif met == 88 % trunk+branch_min_surf\n D = D(6)+D(7);\n elseif met == 89 % trunk+1branch_min_surf\n D = D(6)+D(8);\n elseif met == 90 % trunk+1branch+2branch_min_surf\n D = D(6)+D(9)+D(10);\n elseif met == 91 % 1branch+2branch_min_surf\n D = D(9)+D(10);\n end\nend\n% End of function\nend\n\n\nfunction Sensi = sensitivity_analysis(TreeDataAll,TreeId,Inputs,OptIn,NInputs)\n\n% Computes the sensitivity of tree attributes (e.g. total volume) to the\n% changes of input parameter, the PatchDiam parameters, values. The\n% sensitivity is normalized, i.e. the relative change of attribute value\n% (= max change in attribute value divided by the value with the optimal\n% inputs) is divided by the relative change of input parameter value. The\n% sensitivity is also expressed as percentage, i.e. multiplied by 100. The\n% sensitivity is computed relative PatchDiam1, PatchDiam2Min, and\n% PatchDiam2Max. The sensitivity is computed only from the attributes with\n% the input parameter values the closest to the optimal value. This way we\n% get the local sensitivity in the neighborhood of the optimal input.\n%\n% Output:\n% Sensi 3D-array (#trees,#attributes,#inputs)\n\nTreeIds = unique(TreeId(:,1)); % Unique tree IDs\nnt = length(TreeIds); % number of trees\nA = [2 3; 1 3; 1 2]; % Keep other two inputs constant and let one varie\nSensi = zeros(nt,size(TreeDataAll,3),3); % initialization of the output\nfor t = 1:nt % trees\n if NInputs(t) > 1\n D = squeeze(TreeDataAll(t,1:NInputs(t),:))'; % Select the attributes for the tree\n In = squeeze(Inputs(t,1:NInputs(t),:)); % Select the inputs for the tree\n n = size(In,1); % number of different input-combinations\n I = all(In == OptIn(t,1:3),2); % Which data are with the optimal inputs\n ind = (1:1:n)';\n I = ind(I);\n for i = 1:3 % inputs\n if length(unique(In(:,i))) > 1\n dI = abs(max(In(:,i),[],2)-OptIn(t,i));\n dImin = min(dI(dI > 0)); % the minimum nonzero absolute change in inputs\n dI = dImin/OptIn(t,i); % relative change in the attributes\n K1 = abs(max(In(:,i),[],2)-min(OptIn(t,i),[],2)) < dImin+0.0001;\n K = K1 & abs(max(In(:,i),[],2)-min(OptIn(t,i),[],2)) > 0.0001;\n K = ind(K); % the inputs the closest to the optimal input\n J = all(In(K,A(i,:)) == OptIn(t,A(i,:)),2);\n J = K(J); % input i the closest to the optimal and the other two equal the optimal\n dD = max(abs(D(:,J)-D(:,I)),[],2);\n dD = dD./D(:,I); % relative change in the input\n d = dD/dI*100; % relative sensitivity as a percentage\n Sensi(t,:,i) = round(100*d)/100;\n end\n end\n end\nend\n% End of function\nend\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "estimate_precision.m", "ext": ".m", "path": "TreeQSM-master/src/estimate_precision.m", "size": 5602, "source_encoding": "utf_8", "md5": "7781426d9cbcdfb71f74a079141e9b6b", "text": "% This file is part of TREEQSM.\n%\n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n%\n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n%\n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction [TreeData,OptQSMs,OptQSM] = ...\n estimate_precision(QSMs,NewQSMs,TreeData,OptModels,savename)\n\n% ---------------------------------------------------------------------\n% ESTIMATE_PRECISION.M Combines additional QSMs with optimal inputs\n% with previously generated QSMs to estimate the\n% precision (standard deviation) better.\n%\n% Version 1.1.0\n% Latest update 10 May 2022\n%\n% Copyright (C) 2016-2022 Pasi Raumonen\n% ---------------------------------------------------------------------\n\n% Uses models with the same inputs to estimate the precision (standard\n% deviation) of the results. Has two sets of models as its inputs:\n% 1) QSMs can contain models with many different input parameters for each tree\n% and OptModels contain the indexes of the models that are used here (\"optimal\n% models\"); 2) NewQSMs contains only models with the optimal inputs.\n%\n% Inputs:\n% QSMs Contain all the models, possibly from multiple trees\n% NewQSMs Contains the additional models with optimal inputs, for all trees\n% TreeData Similar structure array as the \"treedata\" in QSMs but now each\n% single-number attribute contains the mean and std computed\n% from the models with the optimal inputs, and the\n% sensitivities for PatchDiam-parameters\n% OptModels Indexes of the optimal models for each tree in \"QSMs\"\n% savename Optional input, name string specifying the name of the saved\n% file containing the outputs\n% Outputs:\n% TreeData Updated with new mean and std computed from all the QSMs\n% with the optimal inputs\n% OptQSMs Contains all the models with the optimal inputs, for all trees\n% OptQSM The best model (minimum point-model distance) among the models\n% with the optimal inputs, for all trees\n% ---------------------------------------------------------------------\n\n% Changes from version 1.0.2 to 1.1.0, 10 May 2022:\n% 1) Added \"TreeData\", the output of \"select_optimum\", as an input, and now\n% it is updated\n\n% Changes from version 1.0.1 to 1.0.2, 26 Nov 2019:\n% 1) Added the \"name\" of the point cloud from the inputs.name to the output\n% TreeData as a field. Also now displays the name together with the tree\n% number.\n\n% Changes from version 1.0.0 to 1.0.1, 08 Oct 2019:\n% 1) Small change for how the output \"TreeData\" is initialised\n\n\n%% Reconstruct the outputs\nOptQSMs = QSMs(vertcat(OptModels{:,1})); % Optimal models from the optimization process\nOptQSMs = [OptQSMs NewQSMs]; % Combine all the optimal QSMs\n\nm = max(size(OptQSMs)); % number of models\nIndAll = (1:1:m)';\n% Find the first non-empty model\ni = 1;\nwhile isempty(OptQSMs(i).cylinder)\n i = i+1;\nend\n% Determine how many single-number attributes there are in treedata\nnames = fieldnames(OptQSMs(i).treedata);\nn = 1;\nwhile numel(OptQSMs(i).treedata.(names{n})) == 1\n n = n+1;\nend\nn = n-1;\n\ntreedata = zeros(n,m); % Collect all single-number tree attributes from all models\nTreeId = zeros(m,1); % Collect tree and model indexes from all models\nDist = zeros(m,1); % Collect the distances\nKeep = true(m,1); % Non-empty models\nfor i = 1:m\n if ~isempty(OptQSMs(i).cylinder)\n for j = 1:n\n treedata(j,i) = OptQSMs(i).treedata.(names{j});\n end\n TreeId(i) = OptQSMs(i).rundata.inputs.tree;\n Dist(i) = OptQSMs(i).pmdistance.mean;\n else\n Keep(i) = false;\n end\nend\ntreedata = treedata(:,Keep);\nTreeId = TreeId(Keep,:);\nDist = Dist(Keep);\nIndAll = IndAll(Keep);\nTreeIds = unique(TreeId);\nnt = length(TreeIds); % number of trees\n\n% Compute the means and standard deviations\nOptModel = zeros(nt,1);\nDataM = zeros(n,nt);\nDataS = zeros(n,nt);\nfor t = 1:nt\n I = TreeId == TreeIds(t);\n ind = IndAll(I);\n dist = vertcat(Dist(ind));\n [~,J] = min(dist);\n OptModel(t) = ind(J);\n DataM(:,t) = mean(treedata(:,ind),2);\n DataS(:,t) = std(treedata(:,ind),[],2);\nend\nOptQSM = OptQSMs(OptModel);\nDataCV = DataS./DataM*100;\n\n%% Display some data about optimal models\n% Decrease the number of non-zero decimals\nfor j = 1:nt\n DataM(:,j) = change_precision(DataM(:,j));\n DataS(:,j) = change_precision(DataS(:,j));\n DataCV(:,j) = change_precision(DataCV(:,j));\nend\n\n% Display optimal inputs, model and attributes for each tree\nfor t = 1:nt\n disp([' Tree: ',num2str(t),', ',OptQSM(t).rundata.inputs.name])\n disp(' Attributes (mean, std, CV(%)):')\n for i = 1:n\n str = ([' ',names{i},': ',num2str([DataM(i,t) DataS(i,t) DataCV(i,t)])]);\n disp(str)\n end\n disp('------')\nend\n\n%% Generate TreeData structure for optimal models\n%TreeData = vertcat(OptQSM(:).treedata);\nfor t = 1:nt\n for i = 1:n\n TreeData(t).(names{i})(1:2) = [DataM(i,t) DataS(i,t)];\n end\n TreeData(t).name = OptQSM(t).rundata.inputs.name;\nend\n\n%% Save results\nif nargin == 5\n str = ['results/OptimalQSMs_',savename];\n save(str,'TreeData','OptQSMs','OptQSM')\nend\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "treeqsm.m", "ext": ".m", "path": "TreeQSM-master/src/treeqsm.m", "size": 19257, "source_encoding": "utf_8", "md5": "2fdd2b10f8257521a3ebf4f33ec31125", "text": "% This file is part of TREEQSM.\n% \n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\n\nfunction QSM = treeqsm(P,inputs)\n\n% ---------------------------------------------------------------------\n% TREEQSM.M Reconstructs quantitative structure tree models from point \n% clouds containing a tree.\n%\n% Version 2.4.1\n% Latest update 2 May 2022\n%\n% Copyright (C) 2013-2022 Pasi Raumonen\n% ---------------------------------------------------------------------\n%\n% INPUTS:\n%\n% P (Filtered) point cloud, (m_points x 3)-matrix, the rows\n% give the coordinates of the points.\n%\n% inputs Structure field defining reconstruction parameters.\n% Created with the \"create_input.m\" script. Contains \n% the following main fields:\n% PatchDiam1 Patch size of the first uniform-size cover\n%\n% PatchDiam2Min Minimum patch size of the cover sets in the second cover\n%\n% PatchDiam2Max Maximum cover set size in the stem's base in the \n% second cover\n%\n% BallRad1 Ball size used for the first cover generation\n%\n% BallRad2 Maximum ball radius used for the second cover generation\n%\n% nmin1 Minimum number of points in BallRad1-balls, \n% default value is 3.\n%\n% nmin2 Minimum number of points in BallRad2-balls, \n% default value is 1.\n%\n% OnlyTree If \"1\", the point cloud contains only points from the \n% tree and the trunk's base is defined as the lowest \n% part of the point cloud. Default value is \"1\". \n%\n% Tria If \"1\", tries to make triangulation for the stem up \n% to first main branch. Default value is \"0\". \n%\n% Dist If \"1\", compute the point-model distances. \n% Default value is \"1\".\n%\n% MinCylRad Minimum cylinder radius, used particularly in the \n% taper corrections\n%\n% ParentCor If \"1\", child branch cylinders radii are always \n% smaller than the parent branche's cylinder radii\n%\n% TaperCor If \"1\", use partially linear (stem) and parabola \n% (branches) taper corrections\n%\n% GrowthVolCor If \"1\", use growth volume correction introduced \n% by Jan Hackenberg\n%\n% GrowthVolFac fac-parameter of the growth volume approach, \n% defines upper and lower bound\n%\n% name Name string for saving output files and name for the\n% model in the output object\n% \n% tree Numerical id/index given to the tree\n% \n% model Model number of the tree, e.g. with the same inputs\n%\n% savemat If \"1\", saves the output struct QSM as a matlab-file\n% into \\result folder \n%\n% savetxt If \"1\", saves the models in .txt-files into \n% \\result folder \n%\n% plot Defines what is plotted during the reconstruction:\n% 2 = same as below plus distributions\n% 1 = plots the segmented point cloud and QSMs\n% 0 = plots nothing\n%\n% disp Defines what is displayed during the reconstruction:\n% 2 = same as below plus times and tree attributes; \n% 1 = display name, parameters and fit metrics;\n% 0 = display only the name\n% ---------------------------------------------------------------------\n% OUTPUT:\n%\n% QSM Structure array with the following fields:\n% cylinder Cylinder data \n% branch Branch data\n% treedata Tree attributes \n% rundata Information about the modelling run\n% pmdistances Point-to-model distance statistics\n% triangulation Triangulation of the stem (if inputs.Tria = 1)\n% ---------------------------------------------------------------------\n\n% cylinder (structure-array) contains the following fields:\n% radius\n% length\n% start xyz-coordinates of the starting point\n% axis xyz-component of the cylinder axis\n% parent index (in this file) of the parent cylinder\n% extension index (in this file) of the extension cylinder\n% added is cylinder added after normal cylinder fitting (= 1 if added)\n% UnmodRadius unmodified radius of the cylinder\n% branch branch (index in the branch structure array) of the cylinder\n% BranchOrder branch order of the branch the cylinder belongs\n% PositionInBranch\trunning number of the cylinder in the branch it belongs\n%\n% branch (structure-array) contains the following fields:\n% order branch order (0 for trunk, 1 for branches originating from \n% the trunk, etc.)\n% parent\tindex (in this file) of the parent branch\n% volume\tvolume (L) of the branch (sum of the volumes of the cylinders \n% forming the branch)\n% length\tlength (m) of the branch (sum of the lengths of the cylinders)\n% angle branching angle (deg) (angle between the branch and its parent \n% at the branching point)\n% height height (m) of the base of the branch\n% azimuth azimuth (deg) of the branch at the base \n% diameter diameter (m) of the branch at the base\n%\n% treedata (structure-array) contains the following fields:\n% TotalVolume\n% TrunkVolume\n% BranchVolume\n% TreeHeight\n% TrunkLength\n% BranchLength\n% NumberBranches Total number of branches\n% MaxBranchOrder \n% TotalArea \n% DBHqsm From the cylinder of the QSM at the right heigth\n% DBHcyl From the cylinder fitted to the section 1.1-1.5m\n% location (x,y,z)-coordinates of the base of the tree\n% StemTaper Stem taper function/curve from the QSM\n% VolumeCylDiam Distribution of the total volume in diameter classes\n% LengthCylDiam Distribution of the total length in diameter classes\n% VolumeBranchOrder Branch volume per branching order\n% LengthBranchOrder Branch length per branching order\n% NumberBranchOrder Number of branches per branching order\n\n% treedata from mixed model (cylinders and triangulation) contains also \n% the following fields:\n% DBHtri Computed from triangulation model\n% TriaTrunkVolume Triangulated trunk volume (up to first branch)\n% MixTrunkVolume Mixed trunk volume, bottom (triang.) + top (cylinders)\n% MixTotalVolume Mixed total volume, mixed trunk volume + branch volume\n% TriaTrunkLength Triangulated trunk length\n%\n% pmdistances (structure-array) contains the following fields (and others):\n% CylDists Average point-model distance for each cylinder\n% median median of CylDist for all, stem, 1branch, 2branch cylinder\n% mean mean of CylDist for all, stem, 1branch, 2branch cylinder\n% max max of CylDist for all, stem, 1branch, 2branch cylinder\n% std standard dev. of CylDist for all, stem, 1branch, 2branch cylinder\n% \n% rundata (structure-array) contains the following fields:\n% inputs The input parameters in a structure-array\n% time Computation times for each step\n% date Starting and stopping dates (year,month,day,hour,minute,second) \n% of the computation\n% \n% triangulation (structure-array) contains the following fields:\n% vert Vertices (xyz-coordinates) of the triangulation\n% facet Facet information\n% fvd Color information for plotting the model\n% volume Volume enclosed by the triangulation\n% bottom Z-coordinate of the bottom plane of the triangulation\n% top Z-coordinate of the top plane of the triangulation\n% triah Height of the triangles\n% triah Width of the triangles\n% cylind Cylinder index in the stem where the triangulation stops\n% ---------------------------------------------------------------------\n\n% Changes from version 2.4.0 to 2.4.1, 2 May 2022: \n% Minor update. New filtering options, new code (\"define_input\") for \n% selecting automatically PatchDiam and BallRad parameter values for \n% the optimization process, added sensitivity estimates of the results, \n% new smoother plotting of QSMs, corrected some bugs, rewrote some \n% functions (e.g. \"branches\").\n% Particular changes in treeqsm.m file:\n% 1) Deleted the remove of the field \"ChildCyls\" and \"CylsInSegment\".\n\n% Changes from version 2.3.2 to 2.4.0, 17 Aug 2020: \n% First major update. Cylinder fitting process and the taper correction \n% has changed. The fitting is adaptive and no more “lcyl” and “FilRad” \n% parameters. Treedata has many new outputs: Branch and cylinder \n% distributions; surface areas; crown dimensions. More robust triangulation \n% of stem. Branch, cylinder and triangulation structures have new fields. \n% More optimisation metrics, more plots of the results and more plotting \n% functions.\n% Particular changes in treeqsm.m file:\n% 1) Removed the for-loops for lcyl and FilRad.\n% 2) Changes what is displayed about the quality of QSMs \n% (point-model-distances and surface coverage) during reconstruction\n% 3) Added version number to rundata\n% 4) Added remove of the field \"ChildCyls\" and \"CylsInSegment\" of \"cylinder\"\n% from \"branches\" to \"treeqsm\".\n\n% Changes from version 2.3.1 to 2.3.2, 2 Dec 2019: \n% Small changes in the subfunction to allow trees without branches\n\n% Changes from version 2.3.0 to 2.3.1, 8 Oct 2019: \n% 1) Some changes in the subfunctions, particularly in \"cylinders\" and \n% \"tree_sets\"\n% 2) Changed how \"treeqsm\" displays things during the running of the\n% function\n\n\n%% Code starts -->\nTime = zeros(11,1); % Save computation times for modelling steps\nDate = zeros(2,6); % Starting and stopping dates of the computation\nDate(1,:) = clock;\n% Names of the steps to display\nname = ['Cover sets ';\n 'Tree sets ';\n 'Initial segments';\n 'Final segments ';\n 'Cylinders ';\n 'Branch & data ';\n 'Distances '];\n \nif inputs.disp > 0\n disp('---------------')\n disp([' ',inputs.name,', Tree = ',num2str(inputs.tree),...\n ', Model = ',num2str(inputs.model)])\nend\n\n% Input parameters\nPatchDiam1 = inputs.PatchDiam1;\nPatchDiam2Min = inputs.PatchDiam2Min;\nPatchDiam2Max = inputs.PatchDiam2Max;\nBallRad1 = inputs.BallRad1; \nBallRad2 = inputs.BallRad2; \nnd = length(PatchDiam1);\nni = length(PatchDiam2Min);\nna = length(PatchDiam2Max);\n\nif inputs.disp == 2\n % Display parameter values\n disp([' PatchDiam1 = ',num2str(PatchDiam1)])\n disp([' BallRad1 = ',num2str(BallRad1)])\n disp([' PatchDiam2Min = ',num2str(PatchDiam2Min)])\n disp([' PatchDiam2Max = ',num2str(PatchDiam2Max)])\n disp([' BallRad2 = ',num2str(BallRad2)])\n disp([' Tria = ',num2str(inputs.Tria),...\n ', OnlyTree = ',num2str(inputs.OnlyTree)])\n disp('Progress:')\nend\n\n%% Make the point cloud into proper form\n% only 3-dimensional data\nif size(P,2) > 3\n P = P(:,1:3);\nend\n% Only double precision data\nif ~isa(P,'double')\n P = double(P);\nend\n\n%% Initialize the output file\nQSM = struct('cylinder',{},'branch',{},'treedata',{},'rundata',{},...\n 'pmdistance',{},'triangulation',{});\n\n%% Reconstruct QSMs\nnmodel = 0;\nfor h = 1:nd\n tic\n Inputs = inputs;\n Inputs.PatchDiam1 = PatchDiam1(h);\n Inputs.BallRad1 = BallRad1(h);\n if nd > 1 && inputs.disp >= 1\n disp(' -----------------')\n disp([' PatchDiam1 = ',num2str(PatchDiam1(h))]);\n disp(' -----------------')\n end\n \n %% Generate cover sets\n cover1 = cover_sets(P,Inputs);\n Time(1) = toc;\n if inputs.disp == 2\n display_time(Time(1),Time(1),name(1,:),1)\n end\n \n %% Determine tree sets and update neighbors\n [cover1,Base,Forb] = tree_sets(P,cover1,Inputs);\n Time(2) = toc-Time(1);\n if inputs.disp == 2\n display_time(Time(2),sum(Time(1:2)),name(2,:),1)\n end\n \n %% Determine initial segments\n segment1 = segments(cover1,Base,Forb);\n Time(3) = toc-sum(Time(1:2));\n if inputs.disp == 2\n display_time(Time(3),sum(Time(1:3)),name(3,:),1)\n end\n \n %% Correct segments\n % Don't remove small segments and add the modified base to the segment\n segment1 = correct_segments(P,cover1,segment1,Inputs,0,1,1);\n Time(4) = toc-sum(Time(1:3));\n if inputs.disp == 2\n display_time(Time(4),sum(Time(1:4)),name(4,:),1)\n end\n \n for i = 1:na\n % Modify inputs\n Inputs.PatchDiam2Max = PatchDiam2Max(i);\n Inputs.BallRad2 = BallRad2(i);\n if na > 1 && inputs.disp >= 1\n disp(' -----------------')\n disp([' PatchDiam2Max = ',num2str(PatchDiam2Max(i))]);\n disp(' -----------------')\n end\n for j = 1:ni\n tic\n % Modify inputs\n Inputs.PatchDiam2Min = PatchDiam2Min(j);\n if ni > 1 && inputs.disp >= 1\n disp(' -----------------')\n disp([' PatchDiam2Min = ',num2str(PatchDiam2Min(j))]);\n disp(' -----------------')\n end\n \n %% Generate new cover sets\n % Determine relative size of new cover sets and use only tree points\n RS = relative_size(P,cover1,segment1);\n \n % Generate new cover\n cover2 = cover_sets(P,Inputs,RS);\n Time(5) = toc;\n if inputs.disp == 2\n display_time(Time(5),sum(Time(1:5)),name(1,:),1)\n end\n \n %% Determine tree sets and update neighbors\n [cover2,Base,Forb] = tree_sets(P,cover2,Inputs,segment1);\n Time(6) = toc-Time(5);\n if inputs.disp == 2\n display_time(Time(6),sum(Time(1:6)),name(2,:),1)\n end\n \n %% Determine segments\n segment2 = segments(cover2,Base,Forb);\n Time(7) = toc-sum(Time(5:6));\n if inputs.disp == 2\n display_time(Time(7),sum(Time(1:7)),name(3,:),1)\n end\n \n %% Correct segments\n % Remove small segments and the extended bases.\n segment2 = correct_segments(P,cover2,segment2,Inputs,1,1,0);\n Time(8) = toc-sum(Time(5:7));\n if inputs.disp == 2\n display_time(Time(8),sum(Time(1:8)),name(4,:),1)\n end\n \n %% Define cylinders\n cylinder = cylinders(P,cover2,segment2,Inputs);\n Time(9) = toc;\n if inputs.disp == 2\n display_time(Time(9),sum(Time(1:9)),name(5,:),1)\n end\n \n if ~isempty(cylinder.radius)\n %% Determine the branches\n branch = branches(cylinder);\n \n %% Compute (and display) model attributes\n T = segment2.segments{1};\n T = vertcat(T{:});\n T = vertcat(cover2.ball{T});\n trunk = P(T,:); % point cloud of the trunk\n % Compute attributes and distibutions from the cylinder model\n % and possibly some from a triangulation\n [treedata,triangulation] = tree_data(cylinder,branch,trunk,inputs);\n Time(10) = toc-Time(9);\n if inputs.disp == 2\n display_time(Time(10),sum(Time(1:10)),name(6,:),1)\n end\n \n %% Compute point model distances\n if inputs.Dist\n pmdis = point_model_distance(P,cylinder);\n \n % Display the mean point-model distances and surface coverages\n % for stem, branch, 1branc and 2branch cylinders\n if inputs.disp >= 1\n D = [pmdis.TrunkMean pmdis.BranchMean ...\n pmdis.Branch1Mean pmdis.Branch2Mean];\n D = round(10000*D)/10;\n \n T = cylinder.branch == 1;\n B1 = cylinder.BranchOrder == 1;\n B2 = cylinder.BranchOrder == 2;\n SC = 100*cylinder.SurfCov;\n S = [mean(SC(T)) mean(SC(~T)) mean(SC(B1)) mean(SC(B2))];\n S = round(10*S)/10;\n \n disp(' ----------')\n str = [' PatchDiam1 = ',num2str(PatchDiam1(h)), ...\n ', PatchDiam2Max = ',num2str(PatchDiam2Max(i)), ...\n ', PatchDiam2Min = ',num2str(PatchDiam2Min(j))];\n disp(str)\n str = [' Distances and surface coverages for ',...\n 'trunk, branch, 1branch, 2branch:'];\n disp(str)\n str = [' Average cylinder-point distance: '...\n num2str(D(1)),' ',num2str(D(2)),' ',...\n num2str(D(3)),' ',num2str(D(4)),' mm'];\n disp(str)\n str = [' Average surface coverage: '...\n num2str(S(1)),' ',num2str(S(2)),' ',...\n num2str(S(3)),' ',num2str(S(4)),' %'];\n disp(str)\n disp(' ----------')\n end\n Time(11) = toc-sum(Time(9:10));\n if inputs.disp == 2\n display_time(Time(11),sum(Time(1:11)),name(7,:),1)\n end\n end\n \n %% Reconstruct the output \"QSM\"\n Date(2,:) = clock;\n Time(12) = sum(Time(1:11));\n clear qsm\n qsm = struct('cylinder',{},'branch',{},'treedata',{},'rundata',{},...\n 'pmdistance',{},'triangulation',{});\n qsm(1).cylinder = cylinder;\n qsm(1).branch = branch;\n qsm(1).treedata = treedata;\n qsm(1).rundata.inputs = Inputs;\n qsm(1).rundata.time = single(Time);\n qsm(1).rundata.date = single(Date);\n qsm(1).rundata.version = '2.4.1';\n if inputs.Dist\n qsm(1).pmdistance = pmdis;\n end\n if inputs.Tria\n qsm(1).triangulation = triangulation;\n end\n nmodel = nmodel+1;\n QSM(nmodel) = qsm;\n \n %% Save the output into results-folder\n % matlab-format (.mat)\n if inputs.savemat\n str = [inputs.name,'_t',num2str(inputs.tree),'_m',...\n num2str(inputs.model)];\n save(['results/QSM_',str],'QSM')\n end\n % text-format (.txt)\n if inputs.savetxt\n if nd > 1 || na > 1 || ni > 1\n str = [inputs.name,'_t',num2str(inputs.tree),'_m',...\n num2str(inputs.model)];\n if nd > 1\n str = [str,'_D',num2str(PatchDiam1(h))];\n end\n if na > 1\n str = [str,'_DA',num2str(PatchDiam2Max(i))];\n end\n if ni > 1\n str = [str,'_DI',num2str(PatchDiam2Min(j))];\n end\n else\n str = [inputs.name,'_t',num2str(inputs.tree),'_m',...\n num2str(inputs.model)];\n end\n save_model_text(qsm,str)\n end\n\n %% Plot models and segmentations\n if inputs.plot >= 1\n if inputs.Tria\n plot_models_segmentations(P,cover2,segment2,cylinder,trunk,...\n triangulation)\n else\n plot_models_segmentations(P,cover2,segment2,cylinder)\n end\n if nd > 1 || na > 1 || ni > 1\n pause\n end\n end\n end\n end\n end\nend\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "plot_models_segmentations.m", "ext": ".m", "path": "TreeQSM-master/src/plotting/plot_models_segmentations.m", "size": 3161, "source_encoding": "utf_8", "md5": "5a2123902cb06456971999fd9aee3156", "text": "% This file is part of TREEQSM.\n% \n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction plot_models_segmentations(P,cover,segment,cylinder,trunk,triangulation)\n\n% ---------------------------------------------------------------------\n% PLOT_MODELS_SEGMENTATION.M Plots the segmented point clouds and\n% cylinder/triangulation models\n%\n% Version 1.1.0\n% Latest update 13 July 2020\n%\n% Copyright (C) 2013-2020 Pasi Raumonen\n% ---------------------------------------------------------------------\n\n% Inputs:\n% P Point cloud\n% cover cover-structure array\n% segment segment-structure array\n% cylinder cylinder-structure array\n% trunk point cloud of the trunk\n% triangulation triangulation-structure array\n\n% Changes from version 1.0.0 to 1.1.0, 13 July 2020:\n% 1) plots now figure 1 and 2 with two subplots; in the first the colors \n% are based on branching order and in the second they are based on\n% branch\n\n%% figure 1: branch-segmented point cloud \n% colors denote the branching order and branches\nfigure(1)\nsubplot(1,2,1)\nplot_branch_segmentation(P,cover,segment,'order')\nsubplot(1,2,2)\nplot_branch_segmentation(P,cover,segment,'branch')\n\n%% figure 2: cylinder model \n% colors denote the branching order and branches\nSta = cylinder.start;\nP = P-Sta(1,:);\nif nargin > 5\n trunk = trunk-Sta(1,:);\n Vert = double(triangulation.vert);\n Vert = Vert-Sta(1,:);\nend\nSta = Sta-Sta(1,:);\ncylinder.start = Sta;\nfigure(2)\nsubplot(1,2,1)\nplot_cylinder_model(cylinder,'order',2,10)\nsubplot(1,2,2)\nplot_cylinder_model(cylinder,'branch',2,10)\n\n%% figure 3, segmented point cloud and cylinder model\nplot_branch_segmentation(P,cover,segment,'order',3,1)\nhold on\nplot_cylinder_model(cylinder,'order',3,10,0.7)\nhold off\n\nif nargin > 4 \n %% figure 4, triangulation model (bottom) and cylinder model (top) \n % of the stem\n Facets = double(triangulation.facet);\n CylInd = triangulation.cylind;\n fvd = triangulation.fvd;\n if max(size(Vert)) > 5\n Bran = cylinder.branch;\n nc = size(Bran,1);\n ind = (1:1:nc)';\n C = ind(Bran == 1);\n n = size(trunk,1);\n I = logical(round(0.55*rand(n,1)));\n figure(4)\n point_cloud_plotting(trunk(I,:),4,3)\n patch('Vertices',Vert,'Faces',Facets,'FaceVertexCData',fvd,...\n 'FaceColor','flat')\n alpha(1)\n hold on\n plot_cylinder_model(cylinder,'order',4,20,1,(CylInd:C(end)))\n axis equal\n hold off\n else\n disp('No triangulation model generated!')\n end\nend"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "cubical_partition.m", "ext": ".m", "path": "TreeQSM-master/src/tools/cubical_partition.m", "size": 4085, "source_encoding": "utf_8", "md5": "5c56478a02bcbdc77d66b72d7288c317", "text": "% This file is part of TREEQSM.\r\n%\r\n% TREEQSM is free software: you can redistribute it and/or modify\r\n% it under the terms of the GNU General Public License as published by\r\n% the Free Software Foundation, either version 3 of the License, or\r\n% (at your option) any later version.\r\n%\r\n% TREEQSM is distributed in the hope that it will be useful,\r\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n% GNU General Public License for more details.\r\n%\r\n% You should have received a copy of the GNU General Public License\r\n% along with TREEQSM. If not, see .\r\n\r\nfunction [Partition,CubeCoord,Info,Cubes] = cubical_partition(P,EL,NE)\r\n\r\n% ---------------------------------------------------------------------\r\n% CUBICAL_PARTITION.M Partitions the point cloud into cubes.\r\n%\r\n% Version 1.1.0\r\n% Latest update 6 Oct 2021\r\n%\r\n% Copyright (C) 2015-2021 Pasi Raumonen\r\n% ---------------------------------------------------------------------\r\n\r\n% Inputs:\r\n% P Point cloud, (n_points x 3)-matrix\r\n% EL Length of the cube edges\r\n% NE Number of empty edge layers\r\n%\r\n% Outputs:\r\n% Partition Point cloud partitioned into cubical cells,\r\n% (nx x ny x nz)-cell, where nx,ny,nz are the number\r\n% of cubes in x,y,z-directions, respectively. If \"Cubes\"\r\n% is outputed, then \"Partition\" is (n x 1)-cell, where each\r\n% cell corresponds to a nonempty cube.\r\n%\r\n% CC (n_points x 3)-matrix whose rows are the cube coordinates\r\n% of each point: x,y,z-coordinates\r\n% Info The minimum coordinate values and number of cubes in each\r\n% coordinate direction\r\n% Cubes (Optional) (nx x ny x nz)-matrix (array), each nonzero\r\n% element indicates that its cube is nonempty and the\r\n% number indicates which cell in \"Partition\" contains the\r\n% points of the cube.\r\n% ---------------------------------------------------------------------\r\n\r\n% Changes from version 1.0.0 to 1.1.0, 6 Oct 2021:\r\n% 1) Changed the determinationa EL and NE so that the while loop don't\r\n% continue endlessly in some cases\r\n\r\nif nargin == 2\r\n NE = 3;\r\nend\r\n\r\n% The vertices of the big cube containing P\r\nMin = double(min(P));\r\nMax = double(max(P));\r\n\r\n% Number of cubes with edge length \"EdgeLength\" in the sides\r\n% of the big cube\r\nN = double(ceil((Max-Min)/EL)+2*NE+1);\r\nt = 0;\r\nwhile t < 10 && 8*N(1)*N(2)*N(3) > 4e9\r\n t = t+1;\r\n EL = 1.1*EL;\r\n N = double(ceil((Max-Min)/EL)+2*NE+1);\r\nend\r\nif 8*N(1)*N(2)*N(3) > 4e9\r\n NE = 3;\r\n N = double(ceil((Max-Min)/EL)+2*NE+1);\r\nend\r\nInfo = [Min N EL NE];\r\n\r\n% Calculates the cube-coordinates of the points\r\nCubeCoord = floor([P(:,1)-Min(1) P(:,2)-Min(2) P(:,3)-Min(3)]/EL)+NE+1;\r\n\r\n% Sorts the points according a lexicographical order\r\nLexOrd = [CubeCoord(:,1) CubeCoord(:,2)-1 CubeCoord(:,3)-1]*[1 N(1) N(1)*N(2)]';\r\nCubeCoord = uint16(CubeCoord);\r\n[LexOrd,SortOrd] = sort(LexOrd);\r\nSortOrd = uint32(SortOrd);\r\nLexOrd = uint32(LexOrd);\r\n\r\nif nargout <= 3\r\n % Define \"Partition\"\r\n Partition = cell(N(1),N(2),N(3));\r\n np = size(P,1); % number of points\r\n p = 1; % The index of the point under comparison\r\n while p <= np\r\n t = 1;\r\n while (p+t <= np) && (LexOrd(p) == LexOrd(p+t))\r\n t = t+1;\r\n end\r\n q = SortOrd(p);\r\n Partition{CubeCoord(q,1),CubeCoord(q,2),CubeCoord(q,3)} = SortOrd(p:p+t-1);\r\n p = p+t;\r\n end\r\n\r\nelse\r\n nc = size(unique(LexOrd),1);\r\n\r\n % Define \"Partition\"\r\n Cubes = zeros(N(1),N(2),N(3),'uint32');\r\n Partition = cell(nc,1);\r\n np = size(P,1); % number of points\r\n p = 1; % The index of the point under comparison\r\n c = 0;\r\n while p <= np\r\n t = 1;\r\n while (p+t <= np) && (LexOrd(p) == LexOrd(p+t))\r\n t = t+1;\r\n end\r\n q = SortOrd(p);\r\n c = c+1;\r\n Partition{c,1} = SortOrd(p:p+t-1);\r\n Cubes(CubeCoord(q,1),CubeCoord(q,2),CubeCoord(q,3)) = c;\r\n p = p+t;\r\n end\r\nend\r\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "connected_components.m", "ext": ".m", "path": "TreeQSM-master/src/tools/connected_components.m", "size": 5720, "source_encoding": "utf_8", "md5": "533338e9122eb5441ad9d27f943075fc", "text": "% This file is part of TREEQSM.\n% \n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction [Components,CompSize] = connected_components(Nei,Sub,MinSize,Fal)\n\n% ---------------------------------------------------------------------\n% CONNECTED_COMPONENTS.M Determines the connected components of cover\n% sets using their neighbour-relation\n%\n% Version 1.1\n% Latest update 16 Aug 2017\n%\n% Copyright (C) 2013-2017 Pasi Raumonen\n% ---------------------------------------------------------------------\n\n% Determines connected components of the subset of cover sets defined\n% by \"Sub\" such that each component has at least \"MinSize\"\n% number of cover sets.\n%\n% Inputs:\n% Nei Neighboring cover sets of each cover set, (n_sets x 1)-cell\n% Sub Subset whose components are determined,\n% length(Sub) < 2 means no subset and thus the whole point cloud\n% \"Sub\" may be also a vector of cover set indexes in the subset\n% or a logical (n_sets)-vector, where n_sets is the number of\n% all cover sets\n% MinSize Minimum number of cover sets in an acceptable component\n% Fal Logical false vector for the cover sets\n%\n% Outputs:\n% Components Connected components, (n_comp x 1)-cell\n% CompSize Number of sets in the components, (n_comp x 1)-vector\n\nif length(Sub) <= 3 && ~islogical(Sub) && Sub(1) > 0\n % Very small subset, i.e. at most 3 cover sets\n n = length(Sub);\n if n == 1\n Components = cell(1,1);\n Components{1} = uint32(Sub);\n CompSize = 1;\n elseif n == 2\n I = Nei{Sub(1)} == Sub(2);\n if any(I)\n Components = cell(1,1);\n Components{1} = uint32((Sub));\n CompSize = 1;\n else\n Components = cell(2,1);\n Components{1} = uint32(Sub(1));\n Components{2} = uint32(Sub(2));\n CompSize = [1 1];\n end\n elseif n == 3\n I = Nei{Sub(1)} == Sub(2);\n J = Nei{Sub(1)} == Sub(3);\n K = Nei{Sub(2)} == Sub(3);\n if any(I)+any(J)+any(K) >= 2\n Components = cell(1,1);\n Components{1} = uint32(Sub);\n CompSize = 1;\n elseif any(I)\n Components = cell(2,1);\n Components{1} = uint32(Sub(1:2));\n Components{2} = uint32(Sub(3));\n CompSize = [2 1];\n elseif any(J)\n Components = cell(2,1);\n Components{1} = uint32(Sub([1 3]));\n Components{2} = uint32(Sub(2));\n CompSize = [2 1];\n elseif any(K)\n Components = cell(2,1);\n Components{1} = uint32(Sub(2:3));\n Components{2} = uint32(Sub(1));\n CompSize = [2 1];\n else\n Components = cell(3,1);\n Components{1} = uint32(Sub(1));\n Components{2} = uint32(Sub(2));\n Components{3} = uint32(Sub(3));\n CompSize = [1 1 1];\n end\n end\n \nelseif any(Sub) || (length(Sub) == 1 && Sub(1) == 0)\n nb = size(Nei,1);\n if nargin == 3\n Fal = false(nb,1);\n end\n if length(Sub) == 1 && Sub == 0\n % All the cover sets\n ns = nb;\n if nargin == 3\n Sub = true(nb,1);\n else\n Sub = ~Fal;\n end\n elseif ~islogical(Sub)\n % Subset of cover sets\n ns = length(Sub);\n if nargin == 3\n sub = false(nb,1);\n else\n sub = Fal;\n end\n sub(Sub) = true;\n Sub = sub;\n else\n % Subset of cover sets\n ns = nnz(Sub);\n end\n \n Components = cell(ns,1);\n CompSize = zeros(ns,1,'uint32');\n nc = 0; % number of components found\n m = 1;\n while ~Sub(m)\n m = m+1;\n end\n i = 0;\n Comp = zeros(ns,1,'uint32');\n while i < ns\n Add = Nei{m};\n I = Sub(Add);\n Add = Add(I);\n a = length(Add);\n Comp(1) = m;\n Sub(m) = false;\n t = 1;\n while a > 0\n Comp(t+1:t+a) = Add;\n Sub(Add) = false;\n t = t+a;\n Add = vertcat(Nei{Add});\n I = Sub(Add);\n Add = Add(I);\n % select the unique elements of Add:\n n = length(Add);\n if n > 2\n I = true(n,1);\n for j = 1:n\n if ~Fal(Add(j))\n Fal(Add(j)) = true;\n else\n I(j) = false;\n end\n end\n Fal(Add) = false;\n Add = Add(I);\n elseif n == 2\n if Add(1) == Add(2)\n Add = Add(1);\n end\n end\n a = length(Add);\n end\n i = i+t;\n if t >= MinSize\n nc = nc+1;\n Components{nc} = uint32(Comp(1:t));\n CompSize(nc) = t;\n end\n if i < ns\n while m <= nb && Sub(m) == false\n m = m+1;\n end\n end\n end\n Components = Components(1:nc);\n CompSize = CompSize(1:nc);\nelse\n Components = cell(0,1);\n CompSize = 0;\nend"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "growth_volume_correction.m", "ext": ".m", "path": "TreeQSM-master/src/tools/growth_volume_correction.m", "size": 5308, "source_encoding": "utf_8", "md5": "43642ad1b72156592dd2da09e8efa614", "text": "function cylinder = growth_volume_correction(cylinder,inputs)\n\n% ---------------------------------------------------------------------\n% GROWTH_VOLUME_CORRECTION.M Use growth volume allometry approach to \n% modify the radius of cylinders.\n%\n% Version 2.0.0\n% Latest update 16 Sep 2021\n%\n% Copyright (C) 2013-2021 Pasi Raumonen\n% ---------------------------------------------------------------------\n%\n% Use growth volume (= the total volume \"supported by the cylinder\") \n% allometry approach to modify the radius of too large and too small \n% cylinders. Uses the allometry: \n%\n% Radius = a * GrowthVolume^b + c\n%\n% If cylinder's radius is over fac-times or under 1/fac-times the radius\n% predicted from the growth volume allometry, then correct the radius to \n% match the allometry. However, the radius of the cylinders in the branch\n% tips are never incresed, only decreased by the correction. More details \n% can be from Jan Hackenberg's \"SimpleTree\" papers and documents.\n% ---------------------------------------------------------------------\n% Inputs:\n% cylinder Structure array that needs to contains the following fields: \n% radius (Rad) Radii of the cylinders, vector\n% length (Len) Lengths of the cylinders, vector\n% parent (CPar) Parents of the cylinders, vector\n% inputs.GrowthVolFac The factor \"fac\", defines the upper and lower\n% allowed radius from the predicted one:\n% 1/fac*predicted_rad <= rad <= fac*predicted_rad\n% ---------------------------------------------------------------------\n\n% Changes from version 1.0.0 to 2.0.0, 16 Sep 2021:\n% 1) Changed the roles of RADIUS and GROWTH_VOLUME in the allometry, i.e.\n% the radius is now predicted from the growth volume\n% 2) Do not increase the radius of the branch tip cylinders \n\ndisp('----------')\ndisp('Growth volume based correction of cylinder radii:')\n\nRad = double(cylinder.radius);\nRad0 = Rad;\nLen = double(cylinder.length);\nCPar = cylinder.parent;\nCExt = cylinder.extension;\n\ninitial_volume = round(1000*pi*sum(Rad.^2.*Len));\ndisp([' Initial_volume (L): ',num2str(initial_volume)])\n\n%% Define the child cylinders for each cylinder\nn = length(Rad);\nCChi = cell(n,1);\nind = (1:1:n)';\nfor i = 1:n\n CChi{i} = ind(CPar == i);\nend\n\n%% Compute the growth volume\nGrowthVol = zeros(n,1); % growth volume\nS = cellfun('length',CChi);\nmodify = S == 0;\nGrowthVol(modify) = pi*Rad(modify).^2.*Len(modify);\nparents = unique(CPar(modify));\nif parents(1) == 0\n parents = parents(2:end);\nend\nwhile ~isempty(parents)\n V = pi*Rad(parents).^2.*Len(parents);\n m = length(parents);\n for i = 1:m\n GrowthVol(parents(i)) = V(i)+sum(GrowthVol(CChi{parents(i)}));\n end\n parents = unique(CPar(parents));\n if parents(1) == 0\n parents = parents(2:end);\n end\nend\n\n%% Fit the allometry: Rad = a*GV^b;\noptions = optimset('Display','off');\nX = lsqcurvefit(@allometry,[0.5 0.5 0],GrowthVol,Rad,[],[],options);\ndisp(' Allometry model parameters R = a*GV^b+c:')\ndisp([' Multiplier a: ', num2str(X(1))])\ndisp([' Exponent b: ', num2str(X(2))])\nif length(X) > 2\ndisp([' Intersect c: ', num2str(X(3))])\nend\n\n%% Compute the predicted radius from the allometry\nPredRad = allometry(X,GrowthVol);\n\n%% Correct the radii based on the predictions\n% If cylinder's radius is over fac-times or under 1/fac-times the\n% predicted radius, then correct the radius to match the allometry\nfac = inputs.GrowthVolFac;\nmodify = Rad < PredRad/fac | Rad > fac*PredRad;\nmodify(Rad < PredRad/fac & CExt == 0) = 0; % Do not increase the radius at tips\nCorRad = PredRad(modify);\n\n% Plot allometry and radii modification\ngvm = max(GrowthVol);\ngv = (0:0.001:gvm);\nPRad = allometry(X,gv);\nfigure(1)\nplot(GrowthVol,Rad,'.b','Markersize',2)\nhold on\nplot(gv,PRad,'-r','Linewidth',2)\nplot(gv,PRad/fac,'-g','Linewidth',2)\nplot(gv,fac*PRad,'-g','Linewidth',2)\nhold off\ngrid on\nxlabel('Growth volume (m^3)')\nylabel('Radius (m)')\nlegend('radius','predicted radius','minimum radius','maximum radius','Location','NorthWest')\n\nfigure(2)\nhistogram(CorRad-Rad(modify))\nxlabel('Change in radius')\ntitle('Number of cylinders per change in radius class')\n\n% Determine the maximum radius change\nR = Rad(modify);\nD = max(abs(R-CorRad)); % Maximum radius change\nJ = abs(R-CorRad) == D;\nD = CorRad(J)-R(J);\n\n% modify the radius according to allometry\nRad(modify) = CorRad; \ncylinder.radius = Rad;\n\ndisp([' Modified ',num2str(nnz(modify)),' of the ',num2str(n),' cylinders'])\ndisp([' Largest radius change (cm): ',num2str(round(1000*D)/10)])\ncorrected_volume = round(1000*pi*sum(Rad.^2.*Len));\ndisp([' Corrected volume (L): ', num2str(corrected_volume)])\ndisp([' Change in volume (L): ', num2str(corrected_volume-initial_volume)])\ndisp('----------')\n\n% % Plot cylinder models where the color indicates change (green = no change, \n% % red = decreased radius, cyan = increased radius)\n% cylinder.branch = ones(n,1);\n% cylinder.BranchOrder = ones(n,1);\n% I = Rad < Rad0;\n% cylinder.BranchOrder(I) = 2;\n% I = Rad > Rad0;\n% cylinder.BranchOrder(I) = 3;\n% plot_cylinder_model(cylinder,'order',3,20,1)\n% \n% cyl = cylinder;\n% cyl.radius = Rad0;\n% plot_cylinder_model(cyl,'order',4,20,1)\n\nend % End of main function\n\n\nfunction F = allometry(x,xdata)\nF = x(1)*xdata.^x(2)+x(3);\nend\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "simplify_qsm.m", "ext": ".m", "path": "TreeQSM-master/src/tools/simplify_qsm.m", "size": 10948, "source_encoding": "utf_8", "md5": "56b1bb310e2e1ffb3db492223c4c62d8", "text": "% This file is part of TREEQSM.\n% \n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction QSM = simplify_qsm(QSM,MaxOrder,SmallRadii,ReplaceIterations,Plot,Disp)\n\n% ---------------------------------------------------------------------\n% SIMPLIFY_QSM.M Simplifies cylinder QSMs by restricting the maximum\n% branching order, by removing thin branches, and by \n% replacing two concecutive cylinders with a longer cylinder\n%\n% Version 2.0.0\n% Latest update 4 May 2022\n%\n% Copyright (C) 2015-2022 Pasi Raumonen\n% ---------------------------------------------------------------------\n%\n% Inputs:\n% QSM QSM-structure, output of treeqsm.m, must contain only one model\n% MaxOrder Maximum branching order, higher order branches removed\n% SmallRadii Minimum acceptable radius for a branch at its base\n% ReplaceIterations Number of iterations for replacing two concecutive\n% cylinders inside one branch with one longer cylinder \n% Plot If true/1, then plots the cylinder models before and\n% after the simplification\n% Disp If Disp == 1, then display the simplication results \n% (the number of cylinders after each step). If \n% Disp == 2, then display also the treedata results for\n% the original and simplified QSMs. If Disp == 0, then\n% nothing is displayed.\n%\n% Output:\n% Modified QSM NOTICE: cylinder, branch and treedata are modified.\n\n% Changes from version 1.1.0 to 2.0.0, 4 May 2022:\n% 1) Added modification of branch and treedata structures based on the\n% modified cylinders\n% 2) Added input for plotting and displaying the results\n% 3) Corrected some bugs that could cause errors in some special cases \n\nif nargin <= 4\n Plot = 0;\n Disp = 1;\nelseif nargin <= 5\n Disp = 1;\nend\n\nif Disp == 2\n inputs = QSM.rundata.inputs;\n display_treedata(QSM.treedata,inputs)\nend\n% Plot the cylinder model before the simplification\nif Plot\n plot_cylinder_model(QSM.cylinder,'branch',1,20,1)\nend\n\n%% Maximum branching order\nc = QSM.cylinder;\nnc = size(c.radius,1);\nif Disp >= 1\n disp([' ',num2str(nc),' cylinders originally'])\nend\n\n% Cylinders with branching order up to MaxBranchOrder\nSmallOrder = c.BranchOrder <= MaxOrder; \nN = fieldnames(c);\nn = max(size(N));\nfor i = 1:n\n c.(N{i}) = c.(N{i})(SmallOrder,:);\nend\n\n% Modify topology information\nInd = (1:1:nc)';\nm = nnz(SmallOrder);\nInd(SmallOrder) = (1:1:m)';\nI = c.parent > 0;\nc.parent(I) = Ind(c.parent(I));\nI = c.extension > 0;\nc.extension(I) = Ind(c.extension(I));\n\nif Disp == 1\n nc = nnz(SmallOrder);\n disp([' ',num2str(nc),' cylinders after branching order simplification'])\nend\n\n\n%% Small branches\nif nargin >= 3 && SmallRadii > 0\n \n nc = size(c.radius,1);\n % Determine child branches\n BPar = QSM.branch.parent;\n nb = size(BPar,1);\n BChi = cell(nb,1);\n for i = 1:nb\n P = BPar(i);\n if P > 0\n BChi{P} = [BChi{P}; i];\n end\n end\n \n % Remove branches whose radii is too small compared to its parent\n Large = true(nc,1);\n Pass = true(nb,1);\n for i = 1:nb\n if Pass(i)\n if QSM.branch.diameter(i) < SmallRadii\n B = i;\n BC = BChi{B};\n while ~isempty(BC)\n B = [B; BC];\n BC = vertcat(BChi{BC});\n end\n Pass(B) = false;\n m = length(B);\n for k = 1:m\n Large(c.branch == B(k)) = false;\n end\n end\n end\n end\n \n % Modify topology information\n Ind = (1:1:nc)';\n m = nnz(Large);\n Ind(Large) = (1:1:m)';\n I = c.parent > 0;\n c.parent(I) = Ind(c.parent(I));\n I = c.extension > 0;\n c.extension(I) = Ind(c.extension(I));\n \n % Update/reduce cylinders\n for i = 1:n\n c.(N{i}) = c.(N{i})(Large,:);\n end\n \n if Disp >= 1\n nc = nnz(Large);\n disp([' ',num2str(nc),' cylinders after small branch simplification'])\n end\nend\n\n\n%% Cylinder replacing\nif nargin >= 4 && ReplaceIterations > 0\n \n % Determine child cylinders\n nc = size(c.radius,1);\n CChi = cell(nc,1);\n for i = 1:nc\n P = c.parent(i);\n if P > 0\n PE = c.extension(P);\n if PE ~= i\n CChi{P} = [CChi{P}; i];\n end\n end\n end\n \n % Replace cylinders\n for j = 1:ReplaceIterations\n \n nc = size(c.radius,1);\n Ind = (1:1:nc)';\n Keep = false(nc,1);\n i = 1;\n while i <= nc\n t = 1;\n while i+t <= nc && c.branch(i+t) == c.branch(i)\n t = t+1;\n end\n Cyls = (i:1:i+t-1)';\n S = c.start(Cyls,:);\n A = c.axis(Cyls,:);\n L = c.length(Cyls);\n if t == 1 % one cylinder in the branch\n Keep(i) = true;\n elseif ceil(t/2) == floor(t/2) % even number of cylinders in the branch\n I = (1:2:t)'; % select 1., 3., 5., ...\n % Correct radii, axes and lengths\n E = S(end,:)+L(end)*A(end,:);\n S = S(I,:);\n m = length(I);\n if m > 1\n A = [S(2:end,:); E]-S(1:end,:);\n else\n A = E-S(1,:);\n end\n L = sqrt(sum(A.*A,2));\n A = [A(:,1)./L A(:,2)./L A(:,3)./L];\n cyls = Cyls(I);\n Keep(cyls) = true;\n V = pi*c.radius(Cyls).^2.*c.length(Cyls);\n J = (2:2:t)';\n V = V(I)+V(J);\n R = sqrt(V./L/pi);\n c.radius(cyls) = R;\n \n else % odd number of cylinders\n I = [1 2:2:t]'; % select 1., 2., 4., 6., ...\n % Correct radii, axes and lengths\n E = S(end,:)+L(end)*A(end,:);\n S = S(I,:);\n l = L(1);\n a = A(I,:);\n m = length(I);\n if m > 2\n a(2:end,:) = [S(3:end,:); E]-S(2:end,:);\n else\n a(2,:) = E-S(2,:);\n end\n A = a;\n L = sqrt(sum(A.*A,2));\n L(1) = l;\n A(2:end,:) = [A(2:end,1)./L(2:end) A(2:end,2)./L(2:end) A(2:end,3)./L(2:end)];\n cyls = Cyls(I);\n Keep(cyls) = true;\n V = pi*c.radius(Cyls).^2.*c.length(Cyls);\n J = (3:2:t)';\n V = V(I(2:end))+V(J);\n R = sqrt(V./L(2:end)/pi);\n c.radius(cyls(2:end)) = R;\n end\n \n if t > 1\n % Modify cylinders\n c.length(cyls) = L;\n c.axis(cyls,:) = A;\n % Correct branching/topology information\n c.PositionInBranch(cyls) = (1:1:m)';\n c.extension(cyls) = [cyls(2:end); 0];\n c.parent(cyls(2:end)) = cyls(1:end-1);\n par = c.parent(cyls(1));\n if par > 0 && ~Keep(par)\n par0 = c.parent(par);\n if Keep(par0) && c.extension(par0) == par\n c.parent(cyls(1)) = par0;\n end\n end\n \n % Correct child branches\n chi = vertcat(CChi{Cyls});\n if ~isempty(chi)\n par = c.parent(chi);\n J = Keep(par);\n par = par(~J)-1;\n c.parent(chi(~J)) = par;\n \n par = c.parent(chi);\n rp = c.radius(par);\n sp = c.start(par,:);\n ap = c.axis(par,:);\n lc = c.length(chi);\n sc = c.start(chi,:);\n ac = c.axis(chi,:);\n ec = sc+[lc.*ac(:,1) lc.*ac(:,2) lc.*ac(:,3)];\n m = length(chi);\n for k = 1:m\n [d,V,h,B] = distances_to_line(sc(k,:),ap(k,:),sp(k,:));\n V = V/d;\n sc(k,:) = sp(k,:)+rp(k)*V+B;\n end\n ac = ec-sc;\n [ac,lc] = normalize(ac);\n c.length(chi) = lc;\n c.start(chi,:) = sc;\n c.axis(chi,:) = ac;\n end\n end\n \n i = i+t;\n end\n % Change topology (parent, extension) indexes\n m = nnz(Keep);\n Ind(Keep) = (1:1:m)';\n I = c.parent > 0;\n c.parent(I) = Ind(c.parent(I));\n I = c.extension > 0;\n c.extension(I) = Ind(c.extension(I));\n \n % Update/reduce cylinders\n for i = 1:n\n c.(N{i}) = c.(N{i})(Keep,:);\n end\n \n if j < ReplaceIterations\n % Determine child cylinders\n nc = size(c.radius,1);\n CChi = cell(nc,1);\n for i = 1:nc\n P = c.parent(i);\n if P > 0\n PE = c.extension(P);\n if PE ~= i\n CChi{P} = [CChi{P}; i];\n end\n end\n end\n end\n end\n \n if Disp >= 1\n nc = size(c.radius,1);\n disp([' ',num2str(nc),' cylinders after cylinder replacements'])\n end\nend\nif Disp >= 1\n nc = size(c.radius,1);\n disp([' ',num2str(nc),' cylinders after all simplifications'])\nend\n\n\n%% Updata the QSM\n% Update the branch\nbranch = branches(c);\n\n% Update the treedata\ninputs = QSM.rundata.inputs;\ninputs.plot = 0;\n% Display\nif Disp == 2\n inputs.disp = 2;\nelse\n inputs.disp = 0; \nend\ntreedata = update_tree_data(QSM,c,branch,inputs);\n\n% Update the cylinder, branch, and treedata of the QSM\nQSM.cylinder = c;\nQSM.branch = branch;\nQSM.treedata = treedata;\n\n% Plot the cylinder model after the simplification\nif Plot\n plot_cylinder_model(QSM.cylinder,'branch',2,20,1)\nend\n\nend % End of main function\n\n\nfunction display_treedata(treedata,inputs)\n%% Generate units for displaying the treedata\nNames = fieldnames(treedata);\nn = size(Names,1);\nUnits = zeros(n,3);\nm = 23;\nfor i = 1:n\n if ~inputs.Tria && strcmp(Names{i},'CrownVolumeAlpha')\n m = i;\n elseif inputs.Tria && strcmp(Names{i},'TriaTrunkLength')\n m = i;\n end\n if strcmp(Names{i}(1:3),'DBH')\n Units(i,:) = 'm ';\n elseif strcmp(Names{i}(end-2:end),'ume')\n Units(i,:) = 'L ';\n elseif strcmp(Names{i}(end-2:end),'ght')\n Units(i,:) = 'm ';\n elseif strcmp(Names{i}(end-2:end),'gth')\n Units(i,:) = 'm ';\n elseif strcmp(Names{i}(1:3),'vol')\n Units(i,:) = 'L ';\n elseif strcmp(Names{i}(1:3),'len')\n Units(i,:) = 'm ';\n elseif strcmp(Names{i}(end-2:end),'rea')\n Units(i,:) = 'm^2';\n elseif strcmp(Names{i}(1:3),'loc')\n Units(i,:) = 'm ';\n elseif strcmp(Names{i}(end-4:end),'aConv')\n Units(i,:) = 'm^2';\n elseif strcmp(Names{i}(end-5:end),'aAlpha')\n Units(i,:) = 'm^2';\n elseif strcmp(Names{i}(end-4:end),'eConv')\n Units(i,:) = 'm^3';\n elseif strcmp(Names{i}(end-5:end),'eAlpha')\n Units(i,:) = 'm^3';\n elseif strcmp(Names{i}(end-2:end),'Ave')\n Units(i,:) = 'm ';\n elseif strcmp(Names{i}(end-2:end),'Max')\n Units(i,:) = 'm ';\n end\nend\n%% Display treedata\ndisp('------------')\ndisp(' Tree attributes before simplification:')\nfor i = 1:m\n v = change_precision(treedata.(Names{i}));\n if strcmp(Names{i},'DBHtri')\n disp(' -----')\n disp(' Tree attributes from triangulation:')\n end\n disp([' ',Names{i},' = ',num2str(v),' ',Units(i,:)])\nend\ndisp(' -----')\nend"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "save_model_text.m", "ext": ".m", "path": "TreeQSM-master/src/tools/save_model_text.m", "size": 4758, "source_encoding": "utf_8", "md5": "b477a27d4b1f21363cdf3c28f6c7f863", "text": "% This file is part of TREEQSM.\n% \n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction save_model_text(QSM,savename)\n\n% ---------------------------------------------------------------------\n% SAVE_MODEL_TEXT.M Saves QSM (cylinder, branch, treedata) into text\n% files\n%\n% Version 1.1.0\n% Latest update 17 Aug 2020\n%\n% Copyright (C) 2013-2020 Pasi Raumonen\n% ---------------------------------------------------------------------\n\n% Save the cylinder, branch, and treedata structures in text-formats (.txt) \n% into /result-folder with the input \"savename\" defining the file names:\n% 'cylinder_',savename,'.txt'\n% 'branch_',savename,'.txt'\n% 'treedata_',savename,'.txt'\n% !!! Notice that only part of the treedata, the single number tree \n% attributes are saved in the text-file.\n% Every user can change this code easily to define what is saved into \n% their text-files.\n\n% Changes from version 1.0.0 to 1.1.0, 17 Aug 2020:\n% 1) Added the new fields of cylinder, branch and treedata structures\n% 2) Added header names to the files\n% 3) Changed the names of the files to be saved\n% 4) Changed the name of second input from \"string\" to \"savename\"\n% 5) Changed the rounding of some parameters and attributes\n\ncylinder = QSM.cylinder;\nbranch = QSM.branch;\ntreedata = QSM.treedata;\n\n%% Form cylinder data, branch data and tree data\n% Use less decimals\nRad = round(10000*cylinder.radius)/10000; % radius (m)\nLen = round(10000*cylinder.length)/10000; % length (m)\nSta = round(10000*cylinder.start)/10000; % starting point (m)\nAxe = round(10000*cylinder.axis)/10000; % axis (m)\nCPar = single(cylinder.parent); % parent cylinder\nCExt = single(cylinder.extension); % extension cylinder\nAdded = single(cylinder.added); % is cylinder added to fil a gap\nRad0 = round(10000*cylinder.UnmodRadius)/10000; % unmodified radius (m)\nB = single(cylinder.branch); % branch index of the cylinder\nBO = single(cylinder.BranchOrder); % branch order of the branch\nPIB = single(cylinder.PositionInBranch); % position of the cyl. in the branch\nMad = single(round(10000*cylinder.mad)/10000); % mean abso. distance (m)\nSC = single(round(10000*cylinder.SurfCov)/10000); % surface coverage\nCylData = [Rad Len Sta Axe CPar CExt B BO PIB Mad SC Added Rad0];\nNamesC = ['radius (m)',\"length (m)\",\"start_point\",\"axis_direction\",...\n \"parent\",\"extension\",\"branch\",\"branch_order\",\"position_in_branch\",...\n \"mad\",\"SurfCov\",\"added\",\"UnmodRadius (m)\"];\n\nBOrd = single(branch.order); % branch order\nBPar = single(branch.parent); % parent branch\nBDia = round(10000*branch.diameter)/10000; % diameter (m)\nBVol = round(10000*branch.volume)/10000; % volume (L)\nBAre = round(10000*branch.area)/10000; % area (m^2)\nBLen = round(1000*branch.length)/1000; % length (m)\nBAng = round(10*branch.angle)/10; % angle (deg)\nBHei = round(1000*branch.height)/1000; % height (m)\nBAzi = round(10*branch.azimuth)/10; % azimuth (deg)\nBZen = round(10*branch.zenith)/10; % zenith (deg)\nBranchData = [BOrd BPar BDia BVol BAre BLen BHei BAng BAzi BZen];\nNamesB = [\"order\",\"parent\",\"diameter (m)\",\"volume (L)\",\"area (m^2)\",...\n \"length (m)\",\"height (m)\",\"angle (deg)\",\"azimuth (deg)\",\"zenith (deg)\"];\n\n% Extract the field names of treedata\nNames = fieldnames(treedata);\nn = 1;\nwhile ~strcmp(Names{n},'location')\n n = n+1;\nend\nn = n-1;\nNames = Names(1:n);\n\nTreeData = zeros(n,1); \n% TreeData contains TotalVolume, TrunkVolume, BranchVolume, etc\nfor i = 1:n\n TreeData(i) = treedata.(Names{i,:});\nend\nTreeData = change_precision(TreeData); % use less decimals\nNamesD = string(Names);\n\n%% Save the data as text-files\nstr = ['results/cylinder_',savename,'.txt'];\nfid = fopen(str, 'wt');\nfprintf(fid, [repmat('%s\\t', 1, size(NamesC,2)-1) '%s\\n'], NamesC.');\nfprintf(fid, [repmat('%g\\t', 1, size(CylData,2)-1) '%g\\n'], CylData.');\nfclose(fid);\n\nstr = ['results/branch_',savename,'.txt'];\nfid = fopen(str, 'wt');\nfprintf(fid, [repmat('%s\\t', 1, size(NamesB,2)-1) '%s\\n'], NamesB.');\nfprintf(fid, [repmat('%g\\t', 1, size(BranchData,2)-1) '%g\\n'], BranchData.');\nfclose(fid);\n\nstr = ['results/treedata_',savename,'.txt'];\nfid = fopen(str, 'wt');\nNamesD(:,2) = TreeData;\nfprintf(fid,'%s\\t %g\\n',NamesD.');\nfclose(fid);\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "define_input.m", "ext": ".m", "path": "TreeQSM-master/src/tools/define_input.m", "size": 4812, "source_encoding": "utf_8", "md5": "fc9070dc19351ba25dce1ec2036e2d6d", "text": "% This file is part of TREEQSM.\n%\n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n%\n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n%\n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction inputs = define_input(Clouds,nPD1,nPD2Min,nPD2Max)\n\n% ---------------------------------------------------------------------\n% DEFINE_INPUT.M Defines the required inputs (PatchDiam and BallRad \n% parameters) for TreeQSM based in estimated tree\n% radius.\n%\n% Version 1.0.0\n% Latest update 4 May 2022\n%\n% Copyright (C) 2013-2022 Pasi Raumonen\n% ---------------------------------------------------------------------\n\n% Takes in a single tree point clouds, that preferably contains only points \n% from the tree and not e.g. from groung. User defines the number of\n% PatchDiam1, PatchDiam2Min, PatchDiam2Max parameter values needed. Then\n% the code estimates automatically these parameter values based on the \n% tree stem radius and tree height. Thus this code can be used to generate\n% the inputs needed for QSM reconstruction with TreeQSM.\n%\n% Inputs:\n% P Point cloud of a tree OR string specifying the name of the .mat\n% file where multiple point clouds are saved \n% nPD1 Number of parameter values estimated for PatchDiam1\n% nPD2Min Number of parameter values estimated for PatchDiam2Min\n% nPD2Max Number of parameter values estimated for PatchDiam2Max\n%\n% Output:\n% inputs Input structure with the estimated parameter values\n% ---------------------------------------------------------------------\n\n\n% Create inputs-structure\ncreate_input\nInputs = inputs;\n\n% If given multiple clouds, extract the names\nif ischar(Clouds) || isstring(Clouds)\n matobj = matfile([Clouds,'.mat']);\n names = fieldnames(matobj);\n i = 1;\n n = max(size(names));\n while i <= n && ~strcmp(names{i,:},'Properties')\n i = i+1;\n end\n I = (1:1:n);\n I = setdiff(I,i);\n names = names(I,1);\n names = sort(names);\n nt = max(size(names)); % number of trees/point clouds\nelse\n P = Clouds;\n nt = 1;\nend\ninputs(nt).PatchDiam1 = 0;\n\n\n%% Estimate the PatchDiam and BallRad parameters\nfor i = 1:nt\n if nt > 1\n % Select point cloud\n P = matobj.(names{i});\n inputs(i) = Inputs;\n inputs(i).name = names{i};\n inputs(i).tree = i;\n inputs(i).plot = 0;\n inputs(i).savetxt = 0;\n inputs(i).savemat = 0;\n inputs(i).disp = 0;\n end\n\n %% Estimate the stem diameter close to bottom\n % Define height\n Hb = min(P(:,3));\n Ht = max(P(:,3));\n TreeHeight = double(Ht-Hb);\n Hei = P(:,3)-Hb;\n\n % Select a section (0.02-0.1*tree_height) from the bottom of the tree\n hSecTop = min(4,0.1*TreeHeight);\n hSecBot = 0.02*TreeHeight;\n hSec = hSecTop-hSecBot;\n Sec = Hei > hSecBot & Hei < hSecTop;\n StemBot = P(Sec,1:3);\n\n % Estimate stem axis (point and direction)\n AxisPoint = mean(StemBot);\n V = StemBot-AxisPoint;\n V = normalize(V);\n AxisDir = optimal_parallel_vector(V);\n\n % Estimate stem diameter\n d = distances_to_line(StemBot,AxisDir,AxisPoint);\n Rstem = double(median(d));\n\n % Point resolution (distance between points)\n Res = sqrt((2*pi*Rstem*hSec)/size(StemBot,1));\n\n %% Define the PatchDiam parameters\n % PatchDiam1 is around stem radius divided by 3.\n pd1 = Rstem/3;%*max(1,TreeHeight/20);\n if nPD1 == 1\n inputs(i).PatchDiam1 = pd1;\n else\n n = nPD1;\n inputs(i).PatchDiam1 = linspace((0.90-(n-2)*0.1)*pd1,(1.10+(n-2)*0.1)*pd1,n);\n end\n\n % PatchDiam2Min is around stem radius divided by 6 and increased for\n % over 20 m heigh trees.\n pd2 = Rstem/6*min(1,20/TreeHeight);\n if nPD2Min == 1\n inputs(i).PatchDiam2Min = pd2;\n else\n n = nPD2Min;\n inputs(i).PatchDiam2Min = linspace((0.90-(n-2)*0.1)*pd2,(1.10+(n-2)*0.1)*pd2,n);\n end\n\n % PatchDiam2Max is around stem radius divided by 2.5.\n pd3 = Rstem/2.5;%*max(1,TreeHeight/20);\n if nPD2Max == 1\n inputs(i).PatchDiam2Max = pd3;\n else\n n = nPD2Max;\n inputs(i).PatchDiam2Max = linspace((0.90-(n-2)*0.1)*pd3,(1.10+(n-2)*0.1)*pd3,n);\n end\n\n % Define the BallRad parameters:\n inputs(i).BallRad1 = max([inputs(i).PatchDiam1+1.5*Res;\n min(1.25*inputs(i).PatchDiam1,inputs(i).PatchDiam1+0.025)]);\n inputs(i).BallRad2 = max([inputs(i).PatchDiam2Max+1.25*Res;\n min(1.2*inputs(i).PatchDiam2Max,inputs(i).PatchDiam2Max+0.025)]);\n\n %plot_point_cloud(P,1,1)\nend\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "update_tree_data.m", "ext": ".m", "path": "TreeQSM-master/src/tools/update_tree_data.m", "size": 22826, "source_encoding": "utf_8", "md5": "e026d8880095f35cc2322e8a540a7ed4", "text": "% This file is part of TREEQSM.\n%\n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n%\n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n%\n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction treedata = update_tree_data(QSM,cylinder,branch,inputs)\n\n% ---------------------------------------------------------------------\n% UPDATE_TREE_DATA.M Updates the treedata structure, e.g. after\n% simplification of QSM\n%\n% Version 1.0.0\n% Latest update 4 May 2022\n%\n% Copyright (C) 2013-2022 Pasi Raumonen\n% ---------------------------------------------------------------------\n\n% Inputs:\n% treedata Treedata structure from \"tree_data\"\n% cylinder Cylinder structure from \"cylinders\"\n% branch Branch structure from \"branches\"\n%\n% Output:\n% treedata Tree data/attributes in a struct\n% ---------------------------------------------------------------------\n\n\n% Define some variables from cylinder:\ntreedata = QSM.treedata;\nRad = cylinder.radius;\nLen = cylinder.length;\nSta = cylinder.start;\nAxe = cylinder.axis;\nnc = length(Rad);\nind = (1:1:nc)';\nTrunk = cylinder.branch == 1; % Trunk cylinders\n\n%% Tree attributes from cylinders\n% Volumes, areas, lengths, branches\ntreedata.TotalVolume = 1000*pi*Rad.^2'*Len;\ntreedata.TrunkVolume = 1000*pi*Rad(Trunk).^2'*Len(Trunk);\ntreedata.BranchVolume = 1000*pi*Rad(~Trunk).^2'*Len(~Trunk);\nbottom = min(Sta(:,3));\n[top,i] = max(Sta(:,3));\nif Axe(i,3) > 0\n top = top+Len(i)*Axe(i,3);\nend\ntreedata.TreeHeight = top-bottom;\ntreedata.TrunkLength = sum(Len(Trunk));\ntreedata.BranchLength = sum(Len(~Trunk));\ntreedata.TotalLength = treedata.TrunkLength+treedata.BranchLength;\nNB = length(branch.order)-1; % number of branches\ntreedata.NumberBranches = NB;\nBO = max(branch.order); % maximum branch order\ntreedata.MaxBranchOrder = BO;\ntreedata.TrunkArea = 2*pi*sum(Rad(Trunk).*Len(Trunk));\ntreedata.BranchArea = 2*pi*sum(Rad(~Trunk).*Len(~Trunk));\ntreedata.TotalArea = 2*pi*sum(Rad.*Len);\n\n%% Crown measures,Vertical profile and spreads\n[treedata,spreads] = crown_measures(treedata,cylinder,branch);\n\n%% Update triangulation information\nif inputs.Tria\n treedata = update_triangulation(QSM,treedata,cylinder);\nend\n\n%% Tree Location\ntreedata.location = Sta(1,:);\n\n%% Stem taper\nR = Rad(Trunk);\nn = length(R);\nTaper = zeros(n+1,2);\nTaper(1,2) = 2*R(1);\nTaper(2:end,1) = cumsum(Len(Trunk));\nTaper(2:end,2) = [2*R(2:end); 2*R(n)];\ntreedata.StemTaper = Taper';\n\n%% Vertical profile and spreads\ntreedata.VerticalProfile = mean(spreads,2);\ntreedata.spreads = spreads;\n\n%% CYLINDER DISTRIBUTIONS:\n%% Wood part diameter distributions\n% Volume, area and length of wood parts as functions of cylinder diameter\n% (in 1cm diameter classes)\ntreedata = cylinder_distribution(treedata,Rad,Len,Axe,'Dia');\n\n%% Wood part height distributions\n% Volume, area and length of cylinders as a function of height\n% (in 1 m height classes)\ntreedata = cylinder_height_distribution(treedata,Rad,Len,Sta,Axe,ind);\n\n%% Wood part zenith direction distributions\n% Volume, area and length of wood parts as functions of cylinder zenith\n% direction (in 10 degree angle classes)\ntreedata = cylinder_distribution(treedata,Rad,Len,Axe,'Zen');\n\n%% Wood part azimuth direction distributions\n% Volume, area and length of wood parts as functions of cylinder zenith\n% direction (in 10 degree angle classes)\ntreedata = cylinder_distribution(treedata,Rad,Len,Axe,'Azi');\n\n%% BRANCH DISTRIBUTIONS:\n%% Branch order distributions\n% Volume, area, length and number of branches as a function of branch order\ntreedata = branch_order_distribution(treedata,branch);\n\n%% Branch diameter distributions\n% Volume, area, length and number of branches as a function of branch diameter\n% (in 1cm diameter classes)\ntreedata = branch_distribution(treedata,branch,'Dia');\n\n%% Branch height distribution\n% Volume, area, length and number of branches as a function of branch height\n% (in 1 meter classes) for all and 1st-order branches\ntreedata = branch_distribution(treedata,branch,'Hei');\n\n%% Branch angle distribution\n% Volume, area, length and number of branches as a function of branch angle\n% (in 10 deg angle classes) for all and 1st-order branches\ntreedata = branch_distribution(treedata,branch,'Ang');\n\n%% Branch azimuth distribution\n% Volume, area, length and number of branches as a function of branch azimuth\n% (in 22.5 deg angle classes) for all and 1st-order branches\ntreedata = branch_distribution(treedata,branch,'Azi');\n\n%% Branch zenith distribution\n% Volume, area, length and number of branches as a function of branch zenith\n% (in 10 deg angle classes) for all and 1st-order branches\ntreedata = branch_distribution(treedata,branch,'Zen');\n\n%% change into single-format\nNames = fieldnames(treedata);\nn = size(Names,1);\nfor i = 1:n\n treedata.(Names{i}) = single(treedata.(Names{i}));\nend\n\nif inputs.disp == 2\n %% Generate units for displaying the treedata\n Units = zeros(n,3);\n m = 23;\n for i = 1:n\n if ~inputs.Tria && strcmp(Names{i},'CrownVolumeAlpha')\n m = i;\n elseif inputs.Tria && strcmp(Names{i},'TriaTrunkLength')\n m = i;\n end\n if strcmp(Names{i}(1:3),'DBH')\n Units(i,:) = 'm ';\n elseif strcmp(Names{i}(end-2:end),'ume')\n Units(i,:) = 'L ';\n elseif strcmp(Names{i}(end-2:end),'ght')\n Units(i,:) = 'm ';\n elseif strcmp(Names{i}(end-2:end),'gth')\n Units(i,:) = 'm ';\n elseif strcmp(Names{i}(1:3),'vol')\n Units(i,:) = 'L ';\n elseif strcmp(Names{i}(1:3),'len')\n Units(i,:) = 'm ';\n elseif strcmp(Names{i}(end-2:end),'rea')\n Units(i,:) = 'm^2';\n elseif strcmp(Names{i}(1:3),'loc')\n Units(i,:) = 'm ';\n elseif strcmp(Names{i}(end-4:end),'aConv')\n Units(i,:) = 'm^2';\n elseif strcmp(Names{i}(end-5:end),'aAlpha')\n Units(i,:) = 'm^2';\n elseif strcmp(Names{i}(end-4:end),'eConv')\n Units(i,:) = 'm^3';\n elseif strcmp(Names{i}(end-5:end),'eAlpha')\n Units(i,:) = 'm^3';\n elseif strcmp(Names{i}(end-2:end),'Ave')\n Units(i,:) = 'm ';\n elseif strcmp(Names{i}(end-2:end),'Max')\n Units(i,:) = 'm ';\n end\n end\n %% Display treedata\n disp('------------')\n disp(' Tree attributes:')\n for i = 1:m\n v = change_precision(treedata.(Names{i}));\n if strcmp(Names{i},'DBHtri')\n disp(' -----')\n disp(' Tree attributes from triangulation:')\n end\n disp([' ',Names{i},' = ',num2str(v),' ',Units(i,:)])\n end\n disp(' -----')\nend\n\nif inputs.plot > 1\n %% Plot distributions\n figure(6)\n subplot(2,4,1)\n plot(Taper(:,1),Taper(:,2),'-b')\n title('Stem taper')\n xlabel('Distance from base (m)')\n ylabel('Diameter (m)')\n axis tight\n grid on\n \n Q.treedata = treedata;\n subplot(2,4,2)\n plot_distribution(Q,6,0,0,'VolCylDia')\n \n subplot(2,4,3)\n plot_distribution(Q,6,0,0,'AreCylDia')\n \n subplot(2,4,4)\n plot_distribution(Q,6,0,0,'LenCylDia')\n \n subplot(2,4,5)\n plot_distribution(Q,6,0,0,'VolBranchOrd')\n \n subplot(2,4,6)\n plot_distribution(Q,6,0,0,'LenBranchOrd')\n \n subplot(2,4,7)\n plot_distribution(Q,6,0,0,'AreBranchOrd')\n \n subplot(2,4,8)\n plot_distribution(Q,6,0,0,'NumBranchOrd')\n \n figure(7)\n subplot(3,3,1)\n plot_distribution(Q,7,0,0,'VolCylHei')\n \n subplot(3,3,2)\n plot_distribution(Q,7,0,0,'AreCylHei')\n \n subplot(3,3,3)\n plot_distribution(Q,7,0,0,'LenCylHei')\n \n subplot(3,3,4)\n plot_distribution(Q,7,0,0,'VolCylZen')\n \n subplot(3,3,5)\n plot_distribution(Q,7,0,0,'AreCylZen')\n \n subplot(3,3,6)\n plot_distribution(Q,7,0,0,'LenCylZen')\n \n subplot(3,3,7)\n plot_distribution(Q,7,0,0,'VolCylAzi')\n \n subplot(3,3,8)\n plot_distribution(Q,7,0,0,'AreCylAzi')\n \n subplot(3,3,9)\n plot_distribution(Q,7,0,0,'LenCylAzi')\n \n figure(8)\n subplot(3,4,1)\n plot_distribution(Q,8,1,0,'VolBranchDia','VolBranch1Dia')\n \n subplot(3,4,2)\n plot_distribution(Q,8,1,0,'AreBranchDia','AreBranch1Dia')\n \n subplot(3,4,3)\n plot_distribution(Q,8,1,0,'LenBranchDia','LenBranch1Dia')\n \n subplot(3,4,4)\n plot_distribution(Q,8,1,0,'NumBranchDia','NumBranch1Dia')\n \n subplot(3,4,5)\n plot_distribution(Q,8,1,0,'VolBranchHei','VolBranch1Hei')\n \n subplot(3,4,6)\n plot_distribution(Q,8,1,0,'AreBranchHei','AreBranch1Hei')\n \n subplot(3,4,7)\n plot_distribution(Q,8,1,0,'LenBranchHei','LenBranch1Hei')\n \n subplot(3,4,8)\n plot_distribution(Q,8,1,0,'NumBranchHei','NumBranch1Hei')\n \n subplot(3,4,9)\n plot_distribution(Q,8,1,0,'VolBranchAng','VolBranch1Ang')\n \n subplot(3,4,10)\n plot_distribution(Q,8,1,0,'AreBranchAng','AreBranch1Ang')\n \n subplot(3,4,11)\n plot_distribution(Q,8,1,0,'LenBranchAng','LenBranch1Ang')\n \n subplot(3,4,12)\n plot_distribution(Q,8,1,0,'NumBranchAng','NumBranch1Ang')\n \n figure(9)\n subplot(2,4,1)\n plot_distribution(Q,9,1,0,'VolBranchZen','VolBranch1Zen')\n \n subplot(2,4,2)\n plot_distribution(Q,9,1,0,'AreBranchZen','AreBranch1Zen')\n \n subplot(2,4,3)\n plot_distribution(Q,9,1,0,'LenBranchZen','LenBranch1Zen')\n \n subplot(2,4,4)\n plot_distribution(Q,9,1,0,'NumBranchZen','NumBranch1Zen')\n \n subplot(2,4,5)\n plot_distribution(Q,9,1,0,'VolBranchAzi','VolBranch1Azi')\n \n subplot(2,4,6)\n plot_distribution(Q,9,1,0,'AreBranchAzi','AreBranch1Azi')\n \n subplot(2,4,7)\n plot_distribution(Q,9,1,0,'LenBranchAzi','LenBranch1Azi')\n \n subplot(2,4,8)\n plot_distribution(Q,9,1,0,'NumBranchAzi','NumBranch1Azi')\nend\n\nend % End of main function\n\n\nfunction [treedata,spreads] = crown_measures(treedata,cylinder,branch)\n\n%% Generate point clouds from the cylinder model\nAxe = cylinder.axis;\nLen = cylinder.length;\nSta = cylinder.start;\nTip = Sta+[Len.*Axe(:,1) Len.*Axe(:,2) Len.*Axe(:,3)]; % tips of the cylinders\nnc = length(Len);\nP = zeros(5*nc,3); % four mid points on the cylinder surface\nt = 0;\nfor i = 1:nc\n [U,V] = orthonormal_vectors(Axe(i,:));\n U = cylinder.radius(i)*U;\n if cylinder.branch(i) == 1\n % For stem cylinders generate more points\n for k = 1:4\n M = Sta(i,:)+k*Len(i)/4*Axe(i,:);\n R = rotation_matrix(Axe(i,:),pi/12);\n for j = 1:12\n if j > 1\n U = R*U;\n end\n t = t+1;\n P(t,:) = M+U';\n end\n end\n else\n M = Sta(i,:)+Len(i)/2*Axe(i,:);\n R = rotation_matrix(Axe(i,:),pi/4);\n for j = 1:4\n if j > 1\n U = R*U;\n end\n t = t+1;\n P(t,:) = M+U';\n end\n end\nend\nP = P(1:t,:);\nP = double([P; Sta; Tip]);\nP = unique(P,'rows');\n\n%% Vertical profiles (layer diameters/spreads), mean:\nbot = min(P(:,3));\ntop = max(P(:,3));\nHei = top-bot;\nif Hei > 10\n m = 20;\nelseif Hei > 2\n m = 10;\nelse\n m = 5;\nend\nspreads = zeros(m,18);\nfor j = 1:m\n I = P(:,3) >= bot+(j-1)*Hei/m & P(:,3) < bot+j*Hei/m;\n X = unique(P(I,:),'rows');\n if size(X,1) > 5\n [K,A] = convhull(X(:,1),X(:,2));\n % compute center of gravity for the convex hull and use it as\n % center for computing average diameters\n n = length(K);\n x = X(K,1);\n y = X(K,2);\n CX = sum((x(1:n-1)+x(2:n)).*(x(1:n-1).*y(2:n)-x(2:n).*y(1:n-1)))/6/A;\n CY = sum((y(1:n-1)+y(2:n)).*(x(1:n-1).*y(2:n)-x(2:n).*y(1:n-1)))/6/A;\n \n V = mat_vec_subtraction(X(:,1:2),[CX CY]);\n ang = atan2(V(:,2),V(:,1))+pi;\n [ang,I] = sort(ang);\n L = sqrt(sum(V.*V,2));\n L = L(I);\n for i = 1:18\n I = ang >= (i-1)*pi/18 & ang < i*pi/18;\n if any(I)\n L1 = max(L(I));\n else\n L1 = 0;\n end\n J = ang >= (i-1)*pi/18+pi & ang < i*pi/18+pi;\n if any(J)\n L2 = max(L(J));\n else\n L2 = 0;\n end\n spreads(j,i) = L1+L2;\n end\n end\nend\n\n%% Crown diameters (spreads), mean and maximum:\nX = unique(P(:,1:2),'rows');\n[K,A] = convhull(X(:,1),X(:,2));\n% compute center of gravity for the convex hull and use it as center for\n% computing average diameters\nn = length(K);\nx = X(K,1);\ny = X(K,2);\nCX = sum((x(1:n-1)+x(2:n)).*(x(1:n-1).*y(2:n)-x(2:n).*y(1:n-1)))/6/A;\nCY = sum((y(1:n-1)+y(2:n)).*(x(1:n-1).*y(2:n)-x(2:n).*y(1:n-1)))/6/A;\nV = mat_vec_subtraction(Tip(:,1:2),[CX CY]);\nang = atan2(V(:,2),V(:,1))+pi;\n[ang,I] = sort(ang);\nL = sqrt(sum(V.*V,2));\nL = L(I);\nS = zeros(18,1);\nfor i = 1:18\n I = ang >= (i-1)*pi/18 & ang < i*pi/18;\n if any(I)\n L1 = max(L(I));\n else\n L1 = 0;\n end\n J = ang >= (i-1)*pi/18+pi & ang < i*pi/18+pi;\n if any(J)\n L2 = max(L(J));\n else\n L2 = 0;\n end\n S(i) = L1+L2;\nend\ntreedata.CrownDiamAve = mean(S);\nMaxDiam = 0;\nfor i = 1:n\n V = mat_vec_subtraction([x y],[x(i) y(i)]);\n L = max(sqrt(sum(V.*V,2)));\n if L > MaxDiam\n MaxDiam = L;\n end\nend\ntreedata.CrownDiamMax = L;\n\n%% Crown areas from convex hull and alpha shape:\ntreedata.CrownAreaConv = A;\nalp = max(0.5,treedata.CrownDiamAve/10);\nshp = alphaShape(X(:,1),X(:,2),alp);\ntreedata.CrownAreaAlpha = shp.area;\n\n%% Crown base\n% Define first major branch as the branch whose diameter > min(0.05*dbh,5cm)\n% and whose horizontal relative reach is more than the median reach of 1st-ord.\n% branches (or at maximum 10). The reach is defined as the horizontal\n% distance from the base to the tip divided by the dbh.\ndbh = treedata.DBHcyl;\nnb = length(branch.order);\nHL = zeros(nb,1); % horizontal reach\nbranches1 = (1:1:nb)';\nbranches1 = branches1(branch.order == 1); % 1st-order branches\nnb = length(branches1);\nnc = size(Sta,1);\nind = (1:1:nc)';\nfor i = 1:nb\n C = ind(cylinder.branch == branches1(i));\n if ~isempty(C)\n base = Sta(C(1),:);\n C = C(end);\n tip = Sta(C,:)+Len(C)*Axe(C);\n V = tip(1:2)-base(1:2);\n HL(branches1(i)) = sqrt(V*V')/dbh*2;\n end\nend\nM = min(10,median(HL));\n\n% Sort the branches according to the their heights\nHei = branch.height(branches1);\n[Hei,SortOrd] = sort(Hei);\nbranches1 = branches1(SortOrd);\n\n% Search the first/lowest branch: \nd = min(0.05,0.05*dbh);\nb = 0;\nif nb > 1\n i = 1;\n while i < nb\n i = i+1;\n if branch.diameter(branches1(i)) > d && HL(branches1(i)) > M\n b = branches1(i);\n i = nb+2;\n end\n end\n if i == nb+1 && nb > 1\n b = branches1(1);\n end\nend\n\nif b > 0\n % search all the children of the first major branch:\n nb = size(branch.parent,1);\n Ind = (1:1:nb)';\n chi = Ind(branch.parent == b);\n B = b;\n while ~isempty(chi)\n B = [B; chi];\n n = length(chi);\n C = cell(n,1);\n for i = 1:n\n C{i} = Ind(branch.parent == chi(i));\n end\n chi = vertcat(C{:});\n end\n \n % define crown base height from the ground:\n BaseHeight = max(Sta(:,3)); % Height of the crown base\n for i = 1:length(B)\n C = ind(cylinder.branch == B(i));\n ht = min(Tip(C,3));\n hb = min(Sta(C,3));\n h = min(hb,ht);\n if h < BaseHeight\n BaseHeight = h;\n end\n end\n treedata.CrownBaseHeight = BaseHeight-Sta(1,3);\n \n %% Crown length and ratio\n treedata.CrownLength = treedata.TreeHeight-treedata.CrownBaseHeight;\n treedata.CrownRatio = treedata.CrownLength/treedata.TreeHeight;\n \n %% Crown volume from convex hull and alpha shape:\n I = P(:,3) >= BaseHeight;\n X = P(I,:);\n [K,V] = convhull(X(:,1),X(:,2),X(:,3));\n treedata.CrownVolumeConv = V;\n alp = max(0.5,treedata.CrownDiamAve/5);\n shp = alphaShape(X(:,1),X(:,2),X(:,3),alp,'HoleThreshold',10000);\n treedata.CrownVolumeAlpha = shp.volume;\n\nelse \n % No branches\n treedata.CrownBaseHeight = treedata.TreeHeight;\n treedata.CrownLength = 0;\n treedata.CrownRatio = 0;\n treedata.CrownVolumeConv = 0;\n treedata.CrownVolumeAlpha = 0;\nend\n\nend % End of function\n\n\nfunction treedata = update_triangulation(QSM,treedata,cylinder)\n\n% Update the mixed results:\nif ~isempty(QSM.triangulation)\n CylInd = QSM.triangulation.cylind;\n Rad = cylinder.radius;\n Len = cylinder.length;\n % Determine the new stem cylinder that is about the location where the\n % triangulation stops:\n nc = length(Rad);\n ind = (1:1:nc)';\n ind = ind(cylinder.branch == 1); % cylinders in the stem\n S = QSM.cylinder.start(CylInd,:); % The place where the triangulation stops\n V = cylinder.start(ind,:)-S;\n d = sqrt(sum(V.*V,2));\n [d,I] = min(d);\n V = V(I,:);\n CylInd = ind(I); % The new cylinder closest to the correct place\n if d < 0.01\n TrunkVolMix = treedata.TrunkVolume-...\n 1000*pi*sum(Rad(1:CylInd-1).^2.*Len(1:CylInd-1))+QSM.triangulation.volume;\n TrunkAreaMix = treedata.TrunkArea-...\n 2*pi*sum(Rad(1:CylInd-1).*Len(1:CylInd-1))+QSM.triangulation.SideArea;\n else\n % Select the following cylinder\n h = V*cylinder.axis(CylInd,:)';\n if h < 0\n CylInd = CylInd+1;\n V = cylinder.start(CylInd,:)-S;\n h = V*cylinder.axis(CylInd,:)';\n end\n Len(CylInd-1) = Len(CylInd-1)-h;\n \n TrunkVolMix = treedata.TrunkVolume-...\n 1000*pi*sum(Rad(1:CylInd-1).^2.*Len(1:CylInd-1))+QSM.triangulation.volume;\n TrunkAreaMix = treedata.TrunkArea-...\n 2*pi*sum(Rad(1:CylInd-1).*Len(1:CylInd-1))+QSM.triangulation.SideArea;\n end\n treedata.MixTrunkVolume = TrunkVolMix;\n treedata.MixTotalVolume = TrunkVolMix+treedata.BranchVolume;\n treedata.MixTrunkArea = TrunkAreaMix;\n treedata.MixTotalArea = TrunkAreaMix+treedata.BranchArea;\nend\nend\n\n\nfunction treedata = cylinder_distribution(treedata,Rad,Len,Axe,dist)\n%% Wood part diameter, zenith and azimuth direction distributions\n% Volume, area and length of wood parts as functions of cylinder\n% diameter, zenith, and azimuth\nif strcmp(dist,'Dia')\n Par = Rad;\n n = ceil(max(200*Rad));\n a = 0.005; % diameter in 1 cm classes\nelseif strcmp(dist,'Zen')\n Par = 180/pi*acos(Axe(:,3));\n n = 18;\n a = 10; % zenith direction in 10 degree angle classes\nelseif strcmp(dist,'Azi')\n Par = 180/pi*atan2(Axe(:,2),Axe(:,1))+180;\n n = 36;\n a = 10; % azimuth direction in 10 degree angle classes\nend\n\nCylDist = zeros(3,n);\nfor i = 1:n\n K = Par >= (i-1)*a & Par < i*a;\n CylDist(1,i) = 1000*pi*sum(Rad(K).^2.*Len(K)); % volumes in litres\n CylDist(2,i) = 2*pi*sum(Rad(K).*Len(K)); % areas in litres\n CylDist(3,i) = sum(Len(K)); % lengths in meters\nend\ntreedata.(['VolCyl',dist]) = CylDist(1,:);\ntreedata.(['AreCyl',dist]) = CylDist(2,:);\ntreedata.(['LenCyl',dist]) = CylDist(3,:);\nend\n\n\nfunction treedata = cylinder_height_distribution(treedata,Rad,Len,Sta,Axe,ind)\n\n%% Wood part height distributions\n% Volume, area and length of cylinders as a function of height\n% (in 1 m height classes)\nMaxHei= ceil(treedata.TreeHeight);\ntreedata.VolCylHei = zeros(1,MaxHei);\ntreedata.AreCylHei = zeros(1,MaxHei);\ntreedata.LenCylHei = zeros(1,MaxHei);\nEnd = Sta+[Len.*Axe(:,1) Len.*Axe(:,2) Len.*Axe(:,3)];\nbot = min(Sta(:,3));\nB = Sta(:,3)-bot;\nT = End(:,3)-bot;\nfor j = 1:MaxHei\n I1 = B >= (j-2) & B < (j-1); % base below this bin\n J1 = B >= (j-1) & B < j; % base in this bin\n K1 = B >= j & B < (j+1); % base above this bin\n I2 = T >= (j-2) & T < (j-1); % top below this bin\n J2 = T >= (j-1) & T < j; % top in this bin\n K2 = T >= j & T < (j+1); % top above this bin\n C1 = ind(J1&J2); % base and top in this bin\n C2 = ind(J1&K2); % base in this bin, top above\n C3 = ind(J1&I2); % base in this bin, top below\n C4 = ind(I1&J2); % base in bin below, top in this\n C5 = ind(K1&J2); % base in bin above, top in this\n v1 = 1000*pi*sum(Rad(C1).^2.*Len(C1));\n a1 = 2*pi*sum(Rad(C1).*Len(C1));\n l1 = sum(Len(C1));\n r2 = (j-B(C2))./(T(C2)-B(C2)); % relative portion in this bin\n v2 = 1000*pi*sum(Rad(C2).^2.*Len(C2).*r2);\n a2 = 2*pi*sum(Rad(C2).*Len(C2).*r2);\n l2 = sum(Len(C2).*r2);\n r3 = (B(C3)-j+1)./(B(C3)-T(C3)); % relative portion in this bin\n v3 = 1000*pi*sum(Rad(C3).^2.*Len(C3).*r3);\n a3 = 2*pi*sum(Rad(C3).*Len(C3).*r3);\n l3 = sum(Len(C3).*r3);\n r4 = (T(C4)-j+1)./(T(C4)-B(C4)); % relative portion in this bin\n v4 = 1000*pi*sum(Rad(C4).^2.*Len(C4).*r4);\n a4 = 2*pi*sum(Rad(C4).*Len(C4).*r4);\n l4 = sum(Len(C4).*r4);\n r5 = (j-T(C5))./(B(C5)-T(C5)); % relative portion in this bin\n v5 = 1000*pi*sum(Rad(C5).^2.*Len(C5).*r5);\n a5 = 2*pi*sum(Rad(C5).*Len(C5).*r5);\n l5 = sum(Len(C5).*r5);\n treedata.VolCylHei(j) = v1+v2+v3+v4+v5;\n treedata.AreCylHei(j) = a1+a2+a3+a4+a5;\n treedata.LenCylHei(j) = l1+l2+l3+l4+l5;\nend\nend\n\n\nfunction treedata = branch_distribution(treedata,branch,dist)\n%% Branch diameter, height, angle, zenith and azimuth distributions\n% Volume, area, length and number of branches as a function of branch\n% diamater, height, angle, zenith and aximuth\nBOrd = branch.order(2:end);\nBVol = branch.volume(2:end);\nBAre = branch.area(2:end);\nBLen = branch.length(2:end);\nif strcmp(dist,'Dia')\n Par = branch.diameter(2:end);\n n = ceil(max(100*Par));\n a = 0.005; % diameter in 1 cm classes\nelseif strcmp(dist,'Hei')\n Par = branch.height(2:end);\n n = ceil(treedata.TreeHeight);\n a = 1; % height in 1 m classes\nelseif strcmp(dist,'Ang')\n Par = branch.angle(2:end);\n n = 18;\n a = 10; % angle in 10 degree classes\nelseif strcmp(dist,'Zen')\n Par = branch.zenith(2:end);\n n = 18;\n a = 10; % zenith direction in 10 degree angle classes\nelseif strcmp(dist,'Azi')\n Par = branch.azimuth(2:end)+180;\n n = 36;\n a = 10; % azimuth direction in 10 degree angle classes\nend\n\nBranchDist = zeros(8,n);\nfor i = 1:n\n I = Par >= (i-1)*a & Par < i*a;\n BranchDist(1,i) = sum(BVol(I)); % volume (all branches)\n BranchDist(2,i) = sum(BVol(I & BOrd == 1)); % volume (1st-branches)\n BranchDist(3,i) = sum(BAre(I)); % area (all branches)\n BranchDist(4,i) = sum(BAre(I & BOrd == 1)); % area (1st-branches)\n BranchDist(5,i) = sum(BLen(I)); % length (all branches)\n BranchDist(6,i) = sum(BLen(I & BOrd == 1)); % length (1st-branches)\n BranchDist(7,i) = nnz(I); % number (all branches)\n BranchDist(8,i) = nnz(I & BOrd == 1); % number (1st-branches)\nend\ntreedata.(['VolBranch',dist]) = BranchDist(1,:);\ntreedata.(['VolBranch1',dist]) = BranchDist(2,:);\ntreedata.(['AreBranch',dist]) = BranchDist(3,:);\ntreedata.(['AreBranch1',dist]) = BranchDist(4,:);\ntreedata.(['LenBranch',dist]) = BranchDist(5,:);\ntreedata.(['LenBranch1',dist]) = BranchDist(6,:);\ntreedata.(['NumBranch',dist]) = BranchDist(7,:);\ntreedata.(['NumBranch1',dist]) = BranchDist(8,:);\nend\n\n\nfunction treedata = branch_order_distribution(treedata,branch)\n%% Branch order distributions\n% Volume, area, length and number of branches as a function of branch order\nBO = max(branch.order);\nBranchOrdDist = zeros(BO,4);\nfor i = 1:max(1,BO)\n I = branch.order == i;\n BranchOrdDist(i,1) = sum(branch.volume(I)); % volumes\n BranchOrdDist(i,2) = sum(branch.area(I)); % areas\n BranchOrdDist(i,3) = sum(branch.length(I)); % lengths\n BranchOrdDist(i,4) = nnz(I); % number of ith-order branches\nend\ntreedata.VolBranchOrd = BranchOrdDist(:,1)';\ntreedata.AreBranchOrd = BranchOrdDist(:,2)';\ntreedata.LenBranchOrd = BranchOrdDist(:,3)';\ntreedata.NumBranchOrd = BranchOrdDist(:,4)';\nend\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "initial_boundary_curve.m", "ext": ".m", "path": "TreeQSM-master/src/triangulation/initial_boundary_curve.m", "size": 6528, "source_encoding": "utf_8", "md5": "e1d5805313e080d63fe8c8cd0fe44b2e", "text": "% This file is part of TREEQSM.\n%\n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n%\n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n%\n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction Curve = initial_boundary_curve(P,TriaWidth)\n\n% ---------------------------------------------------------------------\n% INITIAL_BOUNDARY_CURVE.M Determines the boundary curve adaptively.\n%\n% Version 1.0.1\n% Latest update 26 Nov 2019\n%\n% Copyright (C) 2015-2017 Pasi Raumonen\n% ---------------------------------------------------------------------\n\n% Changes from version 1.0.0 to 1.0.1, 26 Nov 2019:\n% 1) Bug fix: Added \"return\" if the \"Curve\" is empty after it is first defined.\n\n%% Define suitable center\n% Use xy-data and even the z-coordinate to the top\nTop = max(P(:,3));\nP = [P(:,1:2) Top*ones(size(P,1),1)];\n\n% Define the \"center\" of points as the mean\nCenter = mean(P);\nCenter0 = Center;\n\n% If the center is outside or close to the boundary, define new center\ni = 0;\nA0 = 61;\nShortestDist = 0;\nwhile ShortestDist < 0.075 && i < 100\n Center = Center0+[3*ShortestDist*randn(1,2) 0]; % Randomly move the center\n % Compute angles of points as seen from the center\n V = mat_vec_subtraction(P(:,1:2),Center(1:2));\n angle = 180/pi*atan2(V(:,2),V(:,1))+180;\n % % Check if the center is outside or near the boundary of the cross section\n A = false(70,1);\n a = ceil(angle/5);\n I = a > 0;\n A(a(I)) = true;\n if i == 0\n ShortestDist = 0.025;\n elseif nnz(A) < A0\n ShortestDist = 0.05;\n else\n PointDist = sqrt(sum(V.*V,2));\n [ShortestDist,FirstPoint] = min(PointDist);\n end\n i = i+1;\n if i == 100 && ShortestDist < 0.075\n i = 0;\n A0 = A0-2;\n end\nend\n\n%% Define first boundary curve based on the center\nCurve = zeros(18,1); % the boundary curve, contains indexed of the point cloud rows\nCurve(1) = FirstPoint; % start the curve from the point the closest the center\n% Modify the angles so that first point has the angle 0\na0 = angle(FirstPoint);\nI = angle < a0;\nangle(I) = angle(I)+(360-a0);\nangle(~I) = angle(~I)-a0;\n% Select the rest of the points as the closest point in 15 deg sectors\n% centered at 20 deg intervals\nnp = size(P,1);\nInd = (1:1:np)';\nt = 0;\nfor i = 2:18\n J = angle > 12.5+20*(i-2) & angle < 27.5+20*(i-2);\n if ~any(J) % if no points, try 18 deg sector\n J = angle > 11+20*(i-2) & angle < 29+20*(i-2);\n end\n if any(J)\n % if sector has points, select the closest point as the curve point\n D = PointDist(J);\n ind = Ind(J);\n [~,J] = min(D);\n t = t+1;\n Curve(t) = ind(J);\n end\nend\nCurve = Curve(1:t);\nif isempty(Curve)\n return\nend\nI = true(np,1);\nI(Curve) = false;\nInd = Ind(I);\n\n\n%% Adapt the initial curve to the data\nV = P(Curve([(2:t)'; 1]),:)-P(Curve,:);\nD = sqrt(sum(V(:,1:2).*V(:,1:2),2));\nn = t;\nn0 = 1;\n% Continue adding new points as long as too long edges exists\nwhile any(D > 1.25*TriaWidth) && n > n0\n N = [V(:,2) -V(:,1) V(:,3)];\n M = P(Curve,:)+0.5*V;\n\n Curve1 = Curve;\n t = 0;\n for i = 1:n\n if D(i) > 1.25*TriaWidth\n [d,~,hc] = distances_to_line(P(Curve1,:),N(i,:),M(i,:));\n I = hc > 0.01 & d < D(i)/2;\n if any(I)\n H = min(hc(I));\n else\n H = 1;\n end\n [d,~,h] = distances_to_line(P(Ind,:),N(i,:),M(i,:));\n I = d < D(i)/3 & h > -TriaWidth/2 & h < H;\n\n if any(I)\n ind = Ind(I);\n h = h(I);\n [h,J] = min(h);\n I = ind(J);\n\n t = t+1;\n if i < n\n Curve1 = [Curve1(1:t); I; Curve1(t+1:end)];\n else\n Curve1 = [Curve1(1:t); I];\n end\n J = Ind ~= I;\n Ind = Ind(J);\n t = t+1;\n\n else\n t = t+1;\n end\n else\n t = t+1;\n end\n end\n Curve = Curve1(1:t);\n\n n0 = n;\n n = size(Curve,1);\n V = P(Curve([(2:n)'; 1]),:)-P(Curve,:);\n D = sqrt(sum(V.*V,2));\nend\n\n%% Refine the curve for longer edges if far away points\nn0 = n-1;\nwhile n > n0\n N = [V(:,2) -V(:,1) V(:,3)];\n M = P(Curve,:)+0.5*V;\n\n Curve1 = Curve;\n t = 0;\n for i = 1:n\n if D(i) > 0.5*TriaWidth\n [d,~,hc] = distances_to_line(P(Curve1,:),N(i,:),M(i,:));\n I = hc > 0.01 & d < D(i)/2;\n if any(I)\n H = min(hc(I));\n else\n H = 1;\n end\n [d,~,h] = distances_to_line(P(Ind,:),N(i,:),M(i,:));\n I = d < D(i)/3 & h > -TriaWidth/3 & h < H;\n ind = Ind(I);\n h = h(I);\n [h,J] = min(h);\n\n if h > TriaWidth/10\n I = ind(J);\n t = t+1;\n if i < n\n Curve1 = [Curve1(1:t); I; Curve1(t+1:end)];\n else\n Curve1 = [Curve1(1:t); I];\n end\n J = Ind ~= I;\n Ind = Ind(J);\n t = t+1;\n\n else\n t = t+1;\n end\n else\n t = t+1;\n end\n\n end\n Curve = Curve1(1:t);\n\n n0 = n;\n n = size(Curve,1);\n V = P(Curve([(2:n)'; 1]),:)-P(Curve,:);\n D = sqrt(sum(V.*V,2));\nend\n\n%% Smooth the curve by defining the points by means of neighbors\nCurve = P(Curve,:); % Change the curve from point indexes to coordinates\nCurve = boundary_curve2(P,Curve,0.04,TriaWidth);\nif isempty(Curve)\n return\nend\n\n%% Add points for too long edges\nn = size(Curve,1);\nV = Curve([(2:n)'; 1],:)-Curve;\nD = sqrt(sum(V.*V,2));\nCurve1 = Curve;\nt = 0;\nfor i = 1:n\n if D(i) > TriaWidth\n m = floor(D(i)/TriaWidth);\n t = t+1;\n W = zeros(m,3);\n for j = 1:m\n W(j,:) = Curve(i,:)+j/(m+1)*V(i,:);\n end\n Curve1 = [Curve1(1:t,:); W; Curve1(t+1:end,:)];\n t = t+m ;\n else\n t = t+1;\n end\nend\nCurve = Curve1;\nn = size(Curve,1);\n\n%% Define the curve again by equalising the point distances along the curve\nV = Curve([(2:n)'; 1],:)-Curve;\nD = sqrt(sum(V.*V,2));\nL = cumsum(D);\nm = ceil(L(end)/TriaWidth);\nTriaWidth = L(end)/m;\nCurve1 = zeros(m,3);\nCurve1(1,:) = Curve(1,:);\nb = 1;\nfor i = 2:m\n while L(b) < (i-1)*TriaWidth\n b = b+1;\n end\n if b > 1\n a = ((i-1)*TriaWidth-L(b-1))/D(b);\n Curve1(i,:) = Curve(b,:)+a*V(b,:);\n else\n a = (L(b)-(i-1)*TriaWidth)/D(b);\n Curve1(i,:) = Curve(b,:)+a*V(b,:);\n end\nend\nCurve = Curve1;\n\nIntersect = check_self_intersection(Curve(:,1:2));\nif Intersect\n Curve = zeros(0,3);\nend\n\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "boundary_curve2.m", "ext": ".m", "path": "TreeQSM-master/src/triangulation/boundary_curve2.m", "size": 4546, "source_encoding": "utf_8", "md5": "66ccb1233259456e8b6ba495d5ff178a", "text": "% This file is part of TREEQSM.\n%\n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n%\n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n%\n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction Curve = boundary_curve2(P,Curve0,rball,dmax)\n\n% ---------------------------------------------------------------------\n% BOUNDARY_CURVE2.M Determines the boundary curve based on the\n% previously defined boundary curve.\n%\n% Version 1.0\n% Latest update 16 Aug 2017\n%\n% Copyright (C) 2015-2017 Pasi Raumonen\n% ---------------------------------------------------------------------\n%\n% Inputs:\n% P Point cloud of the cross section\n% Curve0 Seed points from previous cross section curve\n% rball Radius of the balls centered at seed points\n% dmax Maximum distance between concecutive curve points, if larger,\n% then create a new one between the points\n\n\n%% Partition the point cloud into cubes\nMin = double(min([P(:,1:2); Curve0(:,1:2)]));\nMax = double(max([P(:,1:2); Curve0(:,1:2)]));\nN = double(ceil((Max-Min)/rball)+5);\nCC = floor([P(:,1)-Min(1) P(:,2)-Min(2)]/rball)+3; % cube coordinates of the section points\n% Sorts the points according a lexicographical order\nS = [CC(:,1) CC(:,2)-1]*[1 N(1)]';\n[S,I] = sort(S);\n% Define \"partition\"\nnp = size(P,1);\npartition = cell(N(1),N(2));\np = 1; % The index of the point under comparison\nwhile p <= np\n t = 1;\n while (p+t <= np) && (S(p) == S(p+t))\n t = t+1;\n end\n q = I(p);\n partition{CC(q,1),CC(q,2)} = I(p:p+t-1);\n p = p+t;\nend\n\n\n%% Define segments using the previous points\nCC = floor([Curve0(:,1)-Min(1) Curve0(:,2)-Min(2)]/rball)+3; % cube coordinates of the seed points\nI = CC < 3;\nCC(I) = 3;\nnc = size(Curve0,1); % number of sets\nDist = 1e8*ones(np,1); % distance of point to the closest center\nSoP = zeros(np,1); % the segment the points belong to\nRadius = rball^2;\nfor i = 1:nc\n points = partition(CC(i,1)-1:CC(i,1)+1,CC(i,2)-1:CC(i,2)+1);\n points = vertcat(points{:});\n V = [P(points,1)-Curve0(i,1) P(points,2)-Curve0(i,2)];\n dist = sum(V.*V,2);\n PointsInBall = dist < Radius;\n points = points(PointsInBall);\n dist = dist(PointsInBall);\n D = Dist(points);\n L = dist < D;\n I = points(L);\n Dist(I) = dist(L);\n SoP(I) = i;\nend\n\n%% Finalise the segments\n% Number of points in each segment and index of each point in its segment\nNum = zeros(nc,1);\nIndPoints = zeros(np,1);\nfor i = 1:np\n if SoP(i) > 0\n Num(SoP(i)) = Num(SoP(i))+1;\n IndPoints(i) = Num(SoP(i));\n end\nend\n% Continue if enough non-emtpy segments\nif nnz(Num) > 0.05*nc\n % Initialization of the \"Seg\"\n Seg = cell(nc,1);\n for i = 1:nc\n Seg{i} = zeros(Num(i),1);\n end\n % Define the \"Seg\"\n for i = 1:np\n if SoP(i) > 0\n Seg{SoP(i),1}(IndPoints(i),1) = i;\n end\n end\n\n %% Define the new curve points as the average of the segments\n Curve = zeros(nc,3); % the new boundary curve\n for i = 1:nc\n S = Seg{i};\n if ~isempty(S)\n Curve(i,:) = mean(P(S,:),1);\n if norm(Curve(i,:)-Curve0(i,:)) > 1.25*dmax\n Curve(i,:) = Curve0(i,:);\n end\n else\n Curve(i,:) = Curve0(i,:);\n end\n end\n\n %% Add new points if too large distances\n V = Curve([2:end 1],:)-Curve(1:end,:);\n d = sum(V.*V,2);\n Large = d > dmax^2;\n m = nnz(Large);\n if m > 0\n Curve0 = zeros(nc+m,3);\n t = 0;\n for i = 1:nc\n if Large(i)\n t = t+1;\n Curve0(t,:) = Curve(i,:);\n t = t+1;\n Curve0(t,:) = Curve(i,:)+0.5*V(i,:);\n else\n t = t+1;\n Curve0(t,:) = Curve(i,:);\n end\n end\n Curve = Curve0;\n end\n\n %% Remove new points if too small distances\n nc = size(Curve,1);\n V = Curve([2:end 1],:)-Curve(1:end,:);\n d = sum(V.*V,2);\n Small = d < (0.333*dmax)^2;\n m = nnz(Small);\n if m > 0\n for i = 1:nc-1\n if Small(i) && Small(i+1)\n Small(i+1) = false;\n end\n end\n if ~Small(nc) && Small(1)\n Small(1) = false;\n Small(nc) = true;\n end\n Curve = Curve(~Small,:);\n end\n\nelse\n % If not enough new points, return the old curve\n Curve = Curve0;\nend\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "boundary_curve.m", "ext": ".m", "path": "TreeQSM-master/src/triangulation/boundary_curve.m", "size": 8054, "source_encoding": "utf_8", "md5": "8dbebbed345eaa90bcef38e7c4e1da9f", "text": "% This file is part of TREEQSM.\n%\n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n%\n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n%\n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction [Curve,Ind] = boundary_curve(P,Curve0,rball,dmax)\n\n% ---------------------------------------------------------------------\n% BOUNDARY_CURVE.M Determines the boundary curve based on the\n% previously defined boundary curve.\n%\n% Version 1.1.0\n% Latest update 3 May 2022\n%\n% Copyright (C) 2015-2022 Pasi Raumonen\n% ---------------------------------------------------------------------\n%\n% Inputs:\n% P Point cloud of the cross section\n% Curve0 Seed points from previous cross section curve\n% rball Radius of the balls centered at seed points\n% dmax Maximum distance between concecutive curve points, if larger,\n% then create a new one between the points\n% ---------------------------------------------------------------------\n\n% Changes from version 1.0.0 to 1.1.0, 3 May 2022:\n% 1) Increased the cubical neighborhood in the generation of the segments\n\n%% Partition the point cloud into cubes\nMin = double(min([P(:,1:2); Curve0(:,1:2)]));\nMax = double(max([P(:,1:2); Curve0(:,1:2)]));\nN = double(ceil((Max-Min)/rball)+5);\n% cube coordinates of the section points\nCC = floor([P(:,1)-Min(1) P(:,2)-Min(2)]/rball)+3;\n% Sorts the points according a lexicographical order\nS = [CC(:,1) CC(:,2)-1]*[1 N(1)]';\n[S,I] = sort(S);\n% Define \"partition\"\nnp = size(P,1);\npartition = cell(N(1),N(2));\np = 1; % The index of the point under comparison\nwhile p <= np\n t = 1;\n while (p+t <= np) && (S(p) == S(p+t))\n t = t+1;\n end\n q = I(p);\n partition{CC(q,1),CC(q,2)} = I(p:p+t-1);\n p = p+t;\nend\n\n\n%% Define segments using the previous points\n% cube coordinates of the seed points:\nCC = floor([Curve0(:,1)-Min(1) Curve0(:,2)-Min(2)]/rball)+3;\nI = CC < 3;\nCC(I) = 3;\nnc = size(Curve0,1); % number of sets\nDist = 1e8*ones(np,1); % distance of point to the closest center\nSoP = zeros(np,1); % the segment the points belong to\nRadius = rball^2;\nfor i = 1:nc\n points = partition(CC(i,1)-2:CC(i,1)+2,CC(i,2)-2:CC(i,2)+2);\n points = vertcat(points{:});\n V = [P(points,1)-Curve0(i,1) P(points,2)-Curve0(i,2)];\n dist = sum(V.*V,2);\n PointsInBall = dist < Radius;\n points = points(PointsInBall);\n dist = dist(PointsInBall);\n D = Dist(points);\n L = dist < D;\n I = points(L);\n Dist(I) = dist(L);\n SoP(I) = i;\nend\n\n%% Finalise the segments\n% Number of points in each segment and index of each point in its segment\nNum = zeros(nc,1);\nIndPoints = zeros(np,1);\nfor i = 1:np\n if SoP(i) > 0\n Num(SoP(i)) = Num(SoP(i))+1;\n IndPoints(i) = Num(SoP(i));\n end\nend\n% Continue if enough non-emtpy segments\nif nnz(Num) > 0.05*nc\n % Initialization of the \"Seg\"\n Seg = cell(nc,1);\n for i = 1:nc\n Seg{i} = zeros(Num(i),1);\n end\n % Define the \"Seg\"\n for i = 1:np\n if SoP(i) > 0\n Seg{SoP(i),1}(IndPoints(i),1) = i;\n end\n end\n\n %% Define the new curve points as the average of the segments\n Curve = zeros(nc,3); % the new boundary curve\n Empty = false(nc,1);\n for i = 1:nc\n S = Seg{i};\n if ~isempty(S)\n Curve(i,:) = mean(P(S,:),1);\n if norm(Curve(i,:)-Curve0(i,:)) > 1.25*dmax\n Curve(i,:) = Curve0(i,:);\n end\n else\n Empty(i) = true;\n end\n end\n\n %% Interpolate for empty segments\n % For empty segments create points by interpolation from neighboring \n % non-empty segments\n if any(Empty)\n for i = 1:nc\n if Empty(i)\n if i > 1 && i < nc\n k = 0;\n while i+k <= nc && Empty(i+k)\n k = k+1;\n end\n if i+k <= nc\n LineEle = Curve(i+k,:)-Curve(i-1,:);\n else\n LineEle = Curve(1,:)-Curve(i-1,:);\n end\n if k < 5\n for j = 1:k\n Curve(i+j-1,:) = Curve(i-1,:)+j/(k+1)*LineEle;\n end\n else\n Curve(i:i+k-1,:) = Curve0(i:i+k-1,:);\n end\n elseif i == 1\n a = 0;\n while Empty(end-a)\n a = a+1;\n end\n b = 1;\n while Empty(b)\n b = b+1;\n end\n LineEle = Curve(b,:)-Curve(nc-a,:);\n n = a+b-1;\n if n < 5\n for j = 1:a-1\n Curve(nc-a+1+j,:) = Curve(nc-a,:)+j/n*LineEle;\n end\n for j = 1:b-1\n Curve(j,:) = Curve(nc-a,:)+(j+a-1)/n*LineEle;\n end\n else\n Curve(nc-a+2:nc,1:2) = Curve0(nc-a+2:nc,1:2);\n Curve(nc-a+2:nc,3) = Curve0(nc-a+2:nc,3);\n Curve(1:b-1,1:2) = Curve0(1:b-1,1:2);\n Curve(1:b-1,3) = Curve0(1:b-1,3);\n end\n elseif i == nc\n LineEle = Curve(1,:)-Curve(nc-1,:);\n Curve(i,:) = Curve(nc-1,:)+0.5*LineEle;\n end\n end\n end\n end\n\n % Correct the height\n Curve(:,3) = min(Curve(:,3));\n\n % Check self-intersection\n [Intersect,IntersectLines] = check_self_intersection(Curve(:,1:2));\n\n % If self-intersection, try to modify the curve\n j = 1;\n while Intersect && j <= 5\n n = size(Curve,1);\n InterLines = (1:1:n)';\n NumberOfIntersections = cellfun('length',IntersectLines(:,1));\n I = NumberOfIntersections > 0;\n InterLines = InterLines(I);\n CrossLen = vertcat(IntersectLines{I,2});\n if length(CrossLen) == length(InterLines)\n LineEle = Curve([2:end 1],:)-Curve(1:end,:);\n d = sqrt(sum(LineEle.*LineEle,2));\n m = length(InterLines);\n for i = 1:2:m\n if InterLines(i) ~= n\n Curve(InterLines(i)+1,:) = Curve(InterLines(i),:)+...\n 0.9*CrossLen(i)/d(InterLines(i))*LineEle(InterLines(i),:);\n else\n Curve(1,:) = Curve(InterLines(i),:)+...\n 0.9*CrossLen(i)/d(InterLines(i))*LineEle(InterLines(i),:);\n end\n end\n [Intersect,IntersectLines] = check_self_intersection(Curve(:,1:2));\n j = j+1;\n else\n j = 6;\n end\n end\n\n %% Add new points if too large distances\n LineEle = Curve([2:end 1],:)-Curve(1:end,:);\n d = sum(LineEle.*LineEle,2);\n Large = d > dmax^2;\n m = nnz(Large);\n if m > 0\n Curve0 = zeros(nc+m,3);\n Ind = zeros(nc+m,2);\n t = 0;\n for i = 1:nc\n if Large(i)\n t = t+1;\n Curve0(t,:) = Curve(i,:);\n if i < nc\n Ind(t,:) = [i i+1];\n else\n Ind(t,:) = [i 1];\n end\n t = t+1;\n Curve0(t,:) = Curve(i,:)+0.5*LineEle(i,:);\n if i < nc\n Ind(t,:) = [i+1 0];\n else\n Ind(t,:) = [1 0];\n end\n else\n t = t+1;\n Curve0(t,:) = Curve(i,:);\n if i < nc\n Ind(t,:) = [i i+1];\n else\n Ind(t,:) = [i 1];\n end\n end\n end\n Curve = Curve0;\n\n else\n Ind = [(1:1:nc)' [(2:1:nc)'; 1]];\n end\n\n\n %% Remove new points if too small distances\n nc = size(Curve,1);\n LineEle = Curve([2:end 1],:)-Curve(1:end,:);\n d = sum(LineEle.*LineEle,2);\n Small = d < (0.333*dmax)^2;\n m = nnz(Small);\n if m > 0\n for i = 1:nc-1\n if ~Small(i) && Small(i+1)\n Ind(i,2) = -1;\n elseif Small(i) && Small(i+1)\n Small(i+1) = false;\n end\n end\n if ~Small(nc) && Small(1)\n Ind(nc,2) = -1;\n Ind(1,2) = -1;\n Small(1) = false;\n Small(nc) = true;\n I = Ind(:,2) > 0;\n Ind(2:end,1) = Ind(2:end,1)+1;\n Ind(I,2) = Ind(I,2)+1;\n\n end\n Ind = Ind(~Small,:);\n Curve = Curve(~Small,:);\n end\n\nelse\n % If not enough new points, return the old curve\n Ind = [(1:1:nc)' [(2:1:nc)'; 1]];\n Curve = Curve0;\nend\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "curve_based_triangulation.m", "ext": ".m", "path": "TreeQSM-master/src/triangulation/curve_based_triangulation.m", "size": 16621, "source_encoding": "utf_8", "md5": "0a258bbf13767bf5a6c076d151b6307f", "text": "% This file is part of TREEQSM.\n%\n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n%\n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n%\n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction triangulation = curve_based_triangulation(P,TriaHeight,TriaWidth)\n\n% ---------------------------------------------------------------------\n% CURVE_BASED_TRIANGULATION.M Reconstructs a triangulation for the\n% stem-buttress surface based on boundary curves\n%\n% Version 1.1.0\n% Latest update 3 May 2022\n%\n% Copyright (C) 2015-2022 Pasi Raumonen\n% ---------------------------------------------------------------------\n%\n% Inputs:\n% P Point cloud of the stem to be triangulated\n% TriaHeight Height of the triangles\n% TriaWidth Width of the triangles\n%\n% Output:\n% triangulation Structure field defining the triangulation. Contains\n% the following main fields:\n% vert Vertices of the triangulation model (nv x 3)-matrix\n% facet Facets (triangles) of the triangulation \n% (the vertices forming the facets)\n% fvd Color information of the facets for plotting with \"patch\"\n% volume Volume enclosed by the facets in liters\n% bottom The z-coordinate of the bottom of the model\n% top The z-coordinate of the top of the model\n% triah TriaHeight\n% triaw TriaWidth\n% ---------------------------------------------------------------------\n\n% Changes from version 1.0.2 to 1.1.0, 3 May 2022:\n% 1) Increased the radius of the balls at seed points from TriaWidth to \n% 2*TriaWidth in the input of \"boundary_curve\"\n% 2) Added triangle orientation check after the side is covered with\n% triangles so that the surface normals are pointing outward \n% 3) Modified the check if the new boundary curve changes only a little and \n% then stop reconstruction \n% 4) Added halving the triangle height if the boundary curve length has\n% increased three times.\n% 5) Changed the bottom level from the smallest z-coordinate to the \n% average of the lowest 100 z-coordinates. \n% 6) Minor streamlining the code and added more comments\n\n% Changes from version 1.0.2 to 1.0.3, 11 Aug 2020:\n% 1) Small changes in the code when computing the delaunay triangulation\n% of the top layer\n\n% Changes from version 1.0.1 to 1.0.2, 15 Jan 2020:\n% 1) Added side surface areas (side, top, bottom) to output as fields\n\n% Changes from version 1.0.0 to 1.0.1, 26 Nov 2019:\n% 1) Removed the plotting of the triangulation model at the end of the code\n\n%% Determine the first boundary curve\nnp = size(P,1);\n[~,I] = sort(P(:,3),'descend');\nP = P(I,:);\nHbot = mean(P(end-100:end,3));\nHtop = P(1,3);\nN = ceil((Htop-Hbot)/TriaHeight);\nVert = zeros(1e5,3);\nTria = zeros(1e5,3);\nTriaLay = zeros(1e5,1);\nVertLay = zeros(1e5,1,'uint16');\nCurve = zeros(0,3);\ni = 0; % the layer whose cross section is under reconstruction\nps = 1;\nwhile P(ps,3) > Htop-i*TriaHeight\n ps = ps+1;\nend\npe = ps;\nwhile i < N/4 && isempty(Curve)\n % Define thin horizontal cross section of the stem\n i = i+1;\n ps = pe+1;\n k = 1;\n while P(ps+k,3) > Htop-i*TriaHeight\n k = k+1;\n end\n pe = ps+k-1;\n PSection = P(ps:pe,:);\n\n % Create initial boundary curve:\n iter = 0;\n while iter <= 15 && isempty(Curve)\n iter = iter+1;\n Curve = initial_boundary_curve(PSection,TriaWidth);\n end\nend\n\nif isempty(Curve)\n triangulation = zeros(0,1);\n disp(' No triangulation: Problem with the first curve')\n return\nend\n\n% make the height of the curve even:\nCurve(:,3) = max(Curve(:,3));\n% Save vertices:\nnv = size(Curve,1); % number of vertices in the curve\nVert(1:nv,:) = Curve;\nVertLay(1:nv) = i;\nt = 0;\nm00 = size(Curve,1);\n\n%% Determine the other boundary curves and the triangulation downwards\ni0 = i;\ni = i0+1;\nnv0 = 0;\nLayerBottom = Htop-i*TriaHeight;\nwhile i <= N && pe < np\n %% Define thin horizontal cross section of the stem\n ps = pe+1;\n k = 1;\n while ps+k <= np && P(ps+k,3) > LayerBottom\n k = k+1;\n end\n pe = ps+k-1;\n PSection = P(ps:pe,:);\n\n %% Create boundary curves using the previous curves as seeds\n if i > i0+1\n nv0 = nv1;\n end\n % Define seed points:\n Curve(:,3) = Curve(:,3)-TriaHeight;\n Curve0 = Curve;\n\n % Create new boundary curve\n [Curve,Ind] = boundary_curve(PSection,Curve,2*TriaWidth,1.5*TriaWidth);\n\n if isempty(Curve)\n disp(' No triangulation: Empty curve')\n triangulation = zeros(0,1);\n return\n end\n Curve(:,3) = max(Curve(:,3));\n\n %% Check if the curve intersects itself\n [Intersect,IntersectLines] = check_self_intersection(Curve(:,1:2));\n\n %% If self-intersection, try to modify the curve\n j = 1;\n while Intersect && j <= 10\n n = size(Curve,1);\n CrossLines = (1:1:n)';\n NumberOfIntersections = cellfun('length',IntersectLines(:,1));\n I = NumberOfIntersections > 0;\n CrossLines = CrossLines(I);\n CrossLen = vertcat(IntersectLines{I,2});\n if length(CrossLen) == length(CrossLines)\n LineEle = Curve([2:end 1],:)-Curve(1:end,:);\n d = sqrt(sum(LineEle.*LineEle,2));\n m = length(CrossLines);\n for k = 1:2:m\n if CrossLines(k) ~= n\n Curve(CrossLines(k)+1,:) = Curve(CrossLines(k),:)+...\n 0.9*CrossLen(k)/d(CrossLines(k))*LineEle(CrossLines(k),:);\n else\n Curve(1,:) = Curve(CrossLines(k),:)+...\n 0.9*CrossLen(k)/d(CrossLines(k))*LineEle(CrossLines(k),:);\n end\n end\n [Intersect,IntersectLines] = check_self_intersection(Curve(:,1:2));\n j = j+1;\n else\n j = 11;\n end\n end\n\n m = size(Curve,1);\n if Intersect\n %% Curve self-intersects, use previous curve to extrapolate to the bottom\n H = Curve0(1,3)-Hbot;\n if H > 0.75 && Intersect\n triangulation = zeros(0,1);\n disp([' No triangulation: Self-intersection at ',...\n num2str(H),' m from the bottom'])\n return\n end\n Curve = Curve0;\n Curve(:,3) = Curve(:,3)-TriaHeight;\n Nadd = floor(H/TriaHeight)+1;\n m = size(Curve,1);\n Ind = [(1:1:m)' [(2:1:m)'; 1]];\n T = H/Nadd;\n for k = 1:Nadd\n if k > 1\n Curve(:,3) = Curve(:,3)-T;\n end\n Vert(nv+1:nv+m,:) = Curve;\n VertLay(nv+1:nv+m) = i;\n %% Define the triangulation between two boundary curves\n nv1 = nv;\n nv = nv+m;\n t0 = t+1;\n pass = false;\n for j = 1:m\n if Ind(j,2) > 0 && j < m\n t = t+1;\n Tria(t,:) = [nv1+j nv0+Ind(j,:)];\n t = t+1;\n Tria(t,:) = [nv1+j nv0+Ind(j,2) nv1+j+1];\n elseif Ind(j,2) > 0 && ~pass\n t = t+1;\n Tria(t,:) = [nv1+j nv0+Ind(j,:)];\n t = t+1;\n Tria(t,:) = [nv1+j nv0+Ind(j,2) nv1+1];\n elseif Ind(j,2) == 0 && j < m\n t = t+1;\n Tria(t,:) = [nv1+j nv0+Ind(j,1) nv1+j+1];\n elseif Ind(j,2) == 0 && ~pass\n t = t+1;\n Tria(t,:) = [nv1+j nv0+Ind(j,1) nv1+1];\n elseif j == 1 && Ind(j,2) == -1\n t = t+1;\n Tria(t,:) = [nv nv1 nv0+1];\n t = t+1;\n Tria(t,:) = [nv nv0+1 nv1+1];\n t = t+1;\n Tria(t,:) = [nv0+1 nv0+2 nv1+1];\n t = t+1;\n Tria(t,:) = [nv1+1 nv0+2 nv0+3];\n t = t+1;\n Tria(t,:) = [nv1+1 nv0+3 nv1+2];\n pass = true;\n elseif Ind(j,2) == -1 && j < m\n t = t+1;\n Tria(t,:) = [nv1+j nv0+Ind(j,1) nv0+Ind(j,1)+1];\n t = t+1;\n Tria(t,:) = [nv1+j nv0+Ind(j,1)+1 nv1+j+1];\n t = t+1;\n Tria(t,:) = [nv0+Ind(j,1)+1 nv0+Ind(j,1)+2 nv1+j+1];\n elseif Ind(j,2) == -1 && ~pass\n t = t+1;\n Tria(t,:) = [nv1+j nv0+Ind(j,1) nv0+Ind(j,1)+1];\n t = t+1;\n Tria(t,:) = [nv1+j nv0+Ind(j,1)+1 nv1+1];\n t = t+1;\n Tria(t,:) = [nv0+Ind(j,1)+1 nv0+1 nv1+1];\n end\n end\n\n TriaLay(t0:t) = i;\n i = i+1;\n nv0 = nv1;\n end\n i = N+1;\n\n else\n %% No self-intersection, proceed with triangulation and new curves\n Vert(nv+1:nv+m,:) = Curve;\n VertLay(nv+1:nv+m) = i;\n\n %% If little change between Curve and Curve0, stop the reconstruction\n C = intersect(Curve0,Curve,\"rows\");\n if size(C,1) > 0.7*size(Curve,1)\n N = i;\n end\n\n %% If the boundary curve has grown much longer than originally, then\n % decrease the triangle height\n if m > 3*m00\n TriaHeight = TriaHeight/2; % use half the height\n N = N+ceil((N-i)/2); % update the number of layers \n m00 = m;\n end\n\n %% Define the triangulation between two boundary curves\n nv1 = nv;\n nv = nv+m;\n t0 = t+1;\n pass = false;\n for j = 1:m\n if Ind(j,2) > 0 && j < m\n t = t+1;\n Tria(t,:) = [nv1+j nv0+Ind(j,:)];\n t = t+1;\n Tria(t,:) = [nv1+j nv0+Ind(j,2) nv1+j+1];\n elseif Ind(j,2) > 0 && ~pass\n t = t+1;\n Tria(t,:) = [nv1+j nv0+Ind(j,:)];\n t = t+1;\n Tria(t,:) = [nv1+j nv0+Ind(j,2) nv1+1];\n elseif Ind(j,2) == 0 && j < m\n t = t+1;\n Tria(t,:) = [nv1+j nv0+Ind(j,1) nv1+j+1];\n elseif Ind(j,2) == 0 && ~pass\n t = t+1;\n Tria(t,:) = [nv1+j nv0+Ind(j,1) nv1+1];\n elseif j == 1 && Ind(j,2) == -1\n t = t+1;\n Tria(t,:) = [nv nv1 nv0+1];\n t = t+1;\n Tria(t,:) = [nv nv0+1 nv1+1];\n t = t+1;\n Tria(t,:) = [nv0+1 nv0+2 nv1+1];\n t = t+1;\n Tria(t,:) = [nv1+1 nv0+2 nv0+3];\n t = t+1;\n Tria(t,:) = [nv1+1 nv0+3 nv1+2];\n pass = true;\n elseif Ind(j,2) == -1 && j < m\n t = t+1;\n Tria(t,:) = [nv1+j nv0+Ind(j,1) nv0+Ind(j,1)+1];\n t = t+1;\n Tria(t,:) = [nv1+j nv0+Ind(j,1)+1 nv1+j+1];\n t = t+1;\n Tria(t,:) = [nv0+Ind(j,1)+1 nv0+Ind(j,1)+2 nv1+j+1];\n elseif Ind(j,2) == -1 && ~pass\n t = t+1;\n Tria(t,:) = [nv1+j nv0+Ind(j,1) nv0+Ind(j,1)+1];\n t = t+1;\n Tria(t,:) = [nv1+j nv0+Ind(j,1)+1 nv1+1];\n t = t+1;\n Tria(t,:) = [nv0+Ind(j,1)+1 nv0+1 nv1+1];\n end\n end\n\n TriaLay(t0:t) = i;\n i = i+1;\n LayerBottom = LayerBottom-TriaHeight;\n end\n\nend\nVert = Vert(1:nv,:);\nVertLay = VertLay(1:nv);\nTria = Tria(1:t,:);\nTriaLay = TriaLay(1:t);\n\n%% Check the orientation of the triangles \n% so that surface normals are outward pointing\na = round(t/10); % select the top triangles\nU = Vert(Tria(1:a,2),:)-Vert(Tria(1:a,1),:);\nV = Vert(Tria(1:a,3),:)-Vert(Tria(1:a,1),:);\nCenter = mean(Vert(1:nv-1,:)); % the center of the stem\nC = Vert(Tria(1:a,1),:)+0.25*V+0.25*U;\nW = C(:,1:2)-Center(1:2); % vectors from the triagles to the stem's center\nNormals = cross(U,V);\nif nnz(sum(Normals(:,1:2).*W,2) < 0) > 0.5*length(C)\n Tria(1:t,1:2) = [Tria(1:t,2) Tria(1:t,1)];\nend\n\n% U = Vert(Tria(1:t,2),:)-Vert(Tria(1:t,1),:);\n% V = Vert(Tria(1:t,3),:)-Vert(Tria(1:t,1),:);\n% Normals = cross(U,V);\n% Normals = normalize(Normals);\n% C = Vert(Tria(1:t,1),:)+0.25*V+0.25*U;\n% fvd = ones(t,1);\n% figure(5)\n% point_cloud_plotting(P(1,:),5,6)\n% patch('Vertices',Vert,'Faces',Tria,'FaceVertexCData',fvd,'FaceColor','flat')\n% alpha(1)\n% hold on\n% arrow_plot(C,0.1*Normals,5)\n% hold off\n% axis equal\n% pause\n\n\n%% Remove possible double triangles\nnt = size(Tria,1);\nKeep = true(nt,1);\nScoord = Vert(Tria(:,1),:)+Vert(Tria(:,2),:)+Vert(Tria(:,3),:);\nS = sum(Scoord,2);\n[part,CC] = cubical_partition(Scoord,2*TriaWidth);\nfor j = 1:nt-1\n if Keep(j)\n points = part(CC(j,1)-1:CC(j,1)+1,CC(j,2)-1:CC(j,2)+1,CC(j,3)-1:CC(j,3)+1);\n points = vertcat(points{:});\n I = S(j) == S(points);\n J = points ~= j;\n I = I&J&Keep(points);\n if any(I)\n p = points(I);\n I = intersect(Tria(j,:),Tria(p,:));\n if length(I) == 3\n Keep(p) = false;\n end\n end\n end\nend\nTria = Tria(Keep,:);\nTriaLay = TriaLay(Keep);\n\n\n%% Generate triangles for the horizontal layers and compute the volumes\n% Triangles of the ground layer\n% Select the boundary curve:\nN = double(max(VertLay));\nI = VertLay == N;\nVert(I,3) = Hbot;\nind = (1:1:nv)';\nind = ind(I);\nCurve = Vert(I,:); % Boundary curve of the bottom\nn = size(Curve,1);\nif n < 10\n triangulation = zeros(0,1);\n disp(' No triangulation: Ground layer boundary curve too small')\n return\nend\n\n% Define Delaunay triangulation for the bottom\nC = zeros(n,2);\nC(:,1) = (1:1:n)';\nC(1:n-1,2) = (2:1:n)';\nC(n,2) = 1;\nwarning off\ndt = delaunayTriangulation(Curve(:,1),Curve(:,2),C);\nIn = dt.isInterior();\nGroundTria = dt(In,:);\nPoints = dt.Points;\nwarning on\nif size(Points,1) > size(Curve,1)\n disp(' No triangulation: Problem with delaunay in the bottom layer')\n triangulation = zeros(0,1);\n return\nend\nGroundTria0 = GroundTria;\nGroundTria(:,1) = ind(GroundTria(:,1));\nGroundTria(:,2) = ind(GroundTria(:,2));\nGroundTria(:,3) = ind(GroundTria(:,3));\n\n% Compute the normals and areas\nU = Curve(GroundTria0(:,2),:)-Curve(GroundTria0(:,1),:);\nV = Curve(GroundTria0(:,3),:)-Curve(GroundTria0(:,1),:);\nCg = Curve(GroundTria0(:,1),:)+0.25*V+0.25*U;\nNg = cross(U,V);\nI = Ng(:,3) > 0; % Check orientation\nNg(I,:) = -Ng(I,:);\nAg = 0.5*sqrt(sum(Ng.*Ng,2));\nNg = 0.5*[Ng(:,1)./Ag Ng(:,2)./Ag Ng(:,3)./Ag];\n\n% Remove possible negative area triangles:\nI = Ag > 0; Ag = Ag(I); Cg = Cg(I,:); Ng = Ng(I,:);\nGroundTria = GroundTria(I,:);\n\n% Update the triangles:\nTria = [Tria; GroundTria];\nTriaLay = [TriaLay; (N+1)*ones(size(GroundTria,1),1)];\n\nif abs(sum(Ag)-polyarea(Curve(:,1),Curve(:,2))) > 0.001*sum(Ag)\n disp(' No triangulation: Problem with delaunay in the bottom layer')\n triangulation = zeros(0,1);\n return\nend\n\n% Triangles of the top layer\n% Select the top curve:\nN = double(min(VertLay));\nI = VertLay == N;\nind = (1:1:nv)';\nind = ind(I);\nCurve = Vert(I,:);\nCenterTop = mean(Curve);\n% Delaunay triangulation of the top:\nn = size(Curve,1);\nC = zeros(n,2);\nC(:,1) = (1:1:n)';\nC(1:n-1,2) = (2:1:n)';\nC(n,2) = 1;\nwarning off\ndt = delaunayTriangulation(Curve(:,1),Curve(:,2),C);\nPoints = dt.Points;\nwarning on\nif min(size(dt)) == 0 || size(Points,1) > size(Curve,1)\n disp(' No triangulation: Problem with delaunay in the top layer')\n triangulation = zeros(0,1);\n return\nend\nIn = dt.isInterior();\nTopTria = dt(In,:);\nTopTria0 = TopTria;\nTopTria(:,1) = ind(TopTria(:,1));\nTopTria(:,2) = ind(TopTria(:,2));\nTopTria(:,3) = ind(TopTria(:,3));\n\n% Compute the normals and areas:\nU = Curve(TopTria0(:,2),:)-Curve(TopTria0(:,1),:);\nV = Curve(TopTria0(:,3),:)-Curve(TopTria0(:,1),:);\nCt = Curve(TopTria0(:,1),:)+0.25*V+0.25*U;\nNt = cross(U,V);\nI = Nt(:,3) < 0;\nNt(I,:) = -Nt(I,:);\nAt = 0.5*sqrt(sum(Nt.*Nt,2));\nNt = 0.5*[Nt(:,1)./At Nt(:,2)./At Nt(:,3)./At];\n\n% Remove possible negative area triangles:\nI = At > 0; At = At(I); Ct = Ct(I,:); Nt = Nt(I,:);\nTopTria = TopTria(I,:);\n\n% Update the triangles:\nTria = [Tria; TopTria];\nTriaLay = [TriaLay; N*ones(size(TopTria,1),1)];\n\nif abs(sum(At)-polyarea(Curve(:,1),Curve(:,2))) > 0.001*sum(At)\n disp(' No triangulation: Problem with delaunay in the top layer')\n triangulation = zeros(0,1);\n return\nend\n\n% Triangles of the side\nB = TriaLay <= max(VertLay) & TriaLay > 1;\nU = Vert(Tria(B,2),:)-Vert(Tria(B,1),:);\nV = Vert(Tria(B,3),:)-Vert(Tria(B,1),:);\nCs = Vert(Tria(B,1),:)+0.25*V+0.25*U;\nNs = cross(U,V);\nAs = 0.5*sqrt(sum(Ns.*Ns,2));\nNs = 0.5*[Ns(:,1)./As Ns(:,2)./As Ns(:,3)./As];\nI = As > 0; Ns = Ns(I,:); As = As(I); Cs = Cs(I,:);\n\n% Volumes in liters\nVTotal = sum(At.*sum(Ct.*Nt,2))+sum(As.*sum(Cs.*Ns,2))+sum(Ag.*sum(Cg.*Ng,2));\nVTotal = round(10000*VTotal/3)/10;\n\nif VTotal < 0\n disp(' No triangulation: Problem with volume')\n triangulation = zeros(0,1);\n return\nend\n\nV = Vert(Tria(:,1),1:2)-CenterTop(1:2);\nfvd = sqrt(sum(V.*V,2));\ntriangulation.vert = single(Vert);\ntriangulation.facet = uint16(Tria);\ntriangulation.fvd = single(fvd);\ntriangulation.volume = VTotal;\ntriangulation.SideArea = sum(As);\ntriangulation.BottomArea = sum(Ag);\ntriangulation.TopArea = sum(At);\ntriangulation.bottom = min(Vert(:,3));\ntriangulation.top = max(Vert(:,3));\ntriangulation.triah = TriaHeight;\ntriangulation.triaw = TriaWidth;\n\n% figure(5)\n% point_cloud_plotting(P,5,6)\n% patch('Vertices',Vert,'Faces',Tria,'FaceVertexCData',fvd,'FaceColor','flat')\n% % hold on\n% % arrow_plot(Cs,0.2*Ns,5)\n% % hold off\n% % axis equal\n% alpha(1)\n\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "check_self_intersection.m", "ext": ".m", "path": "TreeQSM-master/src/triangulation/check_self_intersection.m", "size": 6103, "source_encoding": "utf_8", "md5": "28cf4603e614bcb3a761c35f28e1964f", "text": "% This file is part of TREEQSM.\n%\n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n%\n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n%\n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction [Intersect,IntersectLines] = check_self_intersection(Curve)\n\n% The function takes in a curve (the coordinates of the vertices, in the\n% right order) and checks if the curve intersects itself\n%\n% Outputs:\n% Intersect Logical value indicating if the curve self-intersects\n% IntersectLines Cell array containing for each line element which are\n% the intersecting elements and how far away along\n% the line the intersection point is\n\n\nif ~isempty(Curve)\n dim = size(Curve,2); % two or three dimensional curve\n n = size(Curve,1); % number of points in the curve\n V = Curve([(2:n)'; 1],:)-Curve; % line elements forming the curve\n L = sqrt(sum(V.*V,2)); % the lengths of the line elements\n i = 1; % the line element under inspection\n Ind = (1:1:n)'; % indexes of the line elements\n if dim == 2 % 2d curves\n % directions (unit vectors) of the line elements:\n DirLines = [1./L.*V(:,1) 1./L.*V(:,2)]; \n Intersect = false;\n if nargout == 1 % check only if the curve intersects\n while i <= n-1 && ~Intersect\n % Select the line elements that can intersect element i\n if i > 1\n I = Ind > i+1 | Ind < i-1;\n else\n I = Ind > i+1 & Ind < n;\n end\n ind = Ind(I)';\n for j = ind\n % Solve for the crossing points of every line element\n A = [DirLines(j,:)' -DirLines(i,:)'];\n b = Curve(i,:)'-Curve(j,:)';\n Ainv = 1/(A(1,1)*A(2,2)-A(1,2)*A(2,1))*[A(2,2) -A(1,2); -A(2,1) A(1,1)];\n x = Ainv*b; % signed length along the line elements to the crossing\n if x(1) >= 0 && x(1) <= L(j) && x(2) >= 0 && x(2) <= L(i)\n Intersect = true;\n end\n end\n i = i+1; % study the next line element\n end\n else % determine also all intersection points (line elements)\n IntersectLines = cell(n,2);\n for i = 1:n-1\n % Select the line elements that can intersect element i\n if i > 1\n I = Ind > i+1 | Ind < i-1;\n else\n I = Ind > i+1 & Ind < n;\n end\n ind = Ind(I)';\n for j = ind\n % Solve for the crossing points of every line element\n A = [DirLines(j,:)' -DirLines(i,:)'];\n b = Curve(i,:)'-Curve(j,:)';\n Ainv = 1/(A(1,1)*A(2,2)-A(1,2)*A(2,1))*[A(2,2) -A(1,2); -A(2,1) A(1,1)];\n x = Ainv*b;\n if x(1) >= 0 && x(1) <= L(j) && x(2) >= 0 && x(2) <= L(i)\n Intersect = true;\n % which line elements cross element i:\n IntersectLines{i,1} = [IntersectLines{i,1}; j]; \n % which line elements cross element j:\n IntersectLines{j,1} = [IntersectLines{j,1}; i]; \n % distances along element i to intersection points:\n IntersectLines{i,2} = [IntersectLines{i,2}; x(1)]; \n % distances along element j to intersection points:\n IntersectLines{j,2} = [IntersectLines{j,2}; x(2)]; \n end\n end\n end\n % remove possible multiple values\n for i = 1:n\n IntersectLines{i,1} = unique(IntersectLines{i,1});\n IntersectLines{i,2} = min(IntersectLines{i,2});\n end\n end\n\n elseif dim == 3 % 3d curves\n % directions (unit vectors) of the line elements\n DirLines = [1./L.*V(:,1) 1./L.*V(:,2) 1./L.*V(:,3)];\n Intersect = false;\n if nargout == 1 % check only if the curve intersects\n while i <= n-1\n % Select the line elements that can intersect element i\n if i > 1\n I = Ind > i+1 | Ind < i-1;\n else\n I = Ind > i+1 & Ind < n;\n end\n % Solve for possible intersection points\n [~,DistOnRay,DistOnLines] = distances_between_lines(...\n Curve(i,:),DirLines(i,:),Curve(I,:),DirLines(I,:));\n if any(DistOnRay >= 0 & DistOnRay <= L(i) &...\n DistOnLines > 0 & DistOnLines <= L(I))\n Intersect = true;\n i = n;\n else\n i = i+1; % study the next line element\n end\n end\n else % determine also all intersection points (line elements)\n IntersectLines = cell(n,2);\n for i = 1:n-1\n % Select the line elements that can intersect element i\n if i > 1\n I = Ind > i+1 | Ind < i-1;\n else\n I = Ind > i+1 & Ind < n;\n end\n % Solve for possible intersection points\n [D,DistOnRay,DistOnLines] = distances_between_lines(...\n Curve(i,:),DirLines(i,:),Curve(I,:),DirLines(I,:));\n if any(DistOnRay >= 0 & DistOnRay <= L(i) & ...\n DistOnLines > 0 & DistOnLines <= L(I))\n Intersect = true;\n J = DistOnRay >= 0 & DistOnRay <= L(i) & ...\n DistOnLines > 0 & DistOnLines <= L(I);\n ind = Ind(I);\n ind = ind(J);\n DistOnLines = DistOnLines(J);\n IntersectLines{i,1} = ind;\n IntersectLines{i,2} = DistOnRay(J);\n % Record the elements intersecting\n for j = 1:length(ind)\n IntersectLines{ind(j),1} = [IntersectLines{ind(j),1}; i];\n IntersectLines{ind(j),2} = [IntersectLines{ind(j),2}; DistOnLines(j)];\n end\n end\n end\n % remove possible multiple values\n for i = 1:n\n IntersectLines{i} = unique(IntersectLines{i});\n IntersectLines{i,2} = min(IntersectLines{i,2});\n end\n end\n end\nelse % Empty curve\n Intersect = false;\n IntersectLines = cell(1,1);\nend\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "branches.m", "ext": ".m", "path": "TreeQSM-master/src/main_steps/branches.m", "size": 4480, "source_encoding": "utf_8", "md5": "e5a63f1d1e99bdd56ea657a41c8df921", "text": "% This file is part of TREEQSM.\n%\n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n%\n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n%\n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction branch = branches(cylinder)\n\n% ---------------------------------------------------------------------\n% BRANCHES.M Determines the branching structure and computes branch\n% attributes\n%\n% Version 3.0.0\n% Latest update 2 May 2022\n%\n% Copyright (C) 2013-2022 Pasi Raumonen\n% ---------------------------------------------------------------------\n\n% Determines the branches (cylinders in a segment define a branch), their order\n% and topological parent-child-relation. Branch number one is the trunk and\n% its order is zero. Notice that branch number does not tell its age in the\n% sense that branch number two would be the oldest branch and the number\n% three the second oldest.\n%\n% Inputs:\n% cylinder Cylinders, structure array\n%\n% Outputs:\n% branch Branch structure array, contains fields:\n% Branch order, parent, volume, length, angle, height, azimuth\n% and diameter\n% ---------------------------------------------------------------------\n\n% Changes from version 2.1.0 to 3.0.0, 2 May 2022:\n% 1) Changed the code such that the input \"segment\" and output \"cylinder\"\n% are not needed anymore, which simplified the code in many places.\n% Cylinder info is now computed in \"cylinders\" function.\n\n% Changes from version 2.0.0 to 2.1.0, 25 Jan 2020:\n% 1) Chanced the coding to simplify and shorten the code\n% 2) Added branch area and zenith direction as new fields in the\n% branch-structure array\n% 3) Removed the line were 'ChildCyls' and'CylsInSegment' fields are\n% removed from the cylinder-structure array\n\nRad = cylinder.radius;\nLen = cylinder.length;\nAxe = cylinder.axis;\n\n%% Branches\nnc = size(Rad,1); % number of cylinder\nns = max(cylinder.branch); % number of segments\nBData = zeros(ns,9); % branch ord, dia, vol, are, len, ang, hei, azi, zen\nind = (1:1:nc)';\nCiB = cell(ns,1);\nfor i = 1:ns\n C = ind(cylinder.branch == i);\n CiB{i} = C;\n if ~isempty(C)\n\n BData(i,1) = cylinder.BranchOrder(C(1)); % branch order\n BData(i,2) = 2*Rad(C(1)); % branch diameter\n BData(i,3) = 1000*pi*sum(Len(C).*Rad(C).^2); % branch volume\n BData(i,4) = 2*pi*sum(Len(C).*Rad(C)); % branch area\n BData(i,5) = sum(Len(C)); % branch length\n\n % if the first cylinder is added to fill a gap, then\n % use the second cylinder to compute the angle:\n if cylinder.added(C(1)) && length(C) > 1\n FC = C(2); % first cyl in the branch\n PC = cylinder.parent(C(1)); % parent cylinder of the branch\n else\n FC = C(1);\n PC = cylinder.parent(FC);\n end\n if PC > 0\n BData(i,6) = 180/pi*acos(Axe(FC,:)*Axe(PC,:)'); % branch angle\n end\n\n BData(i,7) = cylinder.start(C(1),3)-cylinder.start(1,3); % branch height\n BData(i,8) = 180/pi*atan2(Axe(C(1),2),Axe(C(1),1)); % branch azimuth\n BData(i,9) = 180/pi*acos(Axe(C(1),3)); % branch zenith\n end\nend\nBData = single(BData);\n\n%% Branching structure (topology, parent-child-relation)\nbranch.order = uint8(BData(:,1));\nBPar = zeros(ns,1);\nChi = cell(nc,1);\nfor i = 1:nc\n c = ind(cylinder.parent == i);\n c = c(c ~= cylinder.extension(i));\n Chi{i} = c;\nend\nfor i = 1:ns\n C = CiB{i};\n ChildCyls = unique(vertcat(Chi{C}));\n CB = unique(cylinder.branch(ChildCyls)); % Child branches\n BPar(CB) = i;\nend\nif ns <= 2^16\n branch.parent = uint16(BPar);\nelse\n branch.parent = uint32(BPar);\nend\n\n%% Finish the definition of branch\nbranch.diameter = BData(:,2); % diameters in meters\nbranch.volume = BData(:,3); % volumes in liters\nbranch.area = BData(:,4); % areas in square meters\nbranch.length = BData(:,5); % lengths in meters\nbranch.angle = BData(:,6); % angles in degrees\nbranch.height = BData(:,7); % heights in meters\nbranch.azimuth = BData(:,8); % azimuth directions in angles\nbranch.zenith = BData(:,9); % zenith directions in angles\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "tree_data.m", "ext": ".m", "path": "TreeQSM-master/src/main_steps/tree_data.m", "size": 31615, "source_encoding": "utf_8", "md5": "29dd42794f0a3a84a3855ab686bba020", "text": "% This file is part of TREEQSM.\n%\n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n%\n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n%\n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction [treedata,triangulation] = tree_data(cylinder,branch,trunk,inputs)\n\n% ---------------------------------------------------------------------\n% TREE_DATA.M Calculates some tree attributes from cylinder QSM.\n%\n% Version 3.0.1\n% Latest update 2 May 2022\n%\n% Copyright (C) 2013-2022 Pasi Raumonen\n% ---------------------------------------------------------------------\n\n% Inputs:\n% cylinder:\n% radius (Rad) Radii of the cylinders\n% length (Len) Lengths of the cylinders\n% start (Sta) Starting points of the cylinders\n% axis (Axe) Axes of the cylinders\n% branch:\n% order (BOrd) Branch order data\n% volume (BVol) Branch volume data\n% length (BLen) Branch length data\n% trunk Point cloud of the trunk\n% inputs Input structure, defines if results are displayed and\n% plotted and if triangulation results are computed\n%\n% Output:\n% treedata Tree data/attributes in a struct\n% ---------------------------------------------------------------------\n\n% Changes from version 3.0.0 to 3.0.1, 2 May 2022:\n% 1) Small changes in \"crown_measures\" when computing crown base to prevent\n% errors in special cases.\n% 2) Small change for how to compute the \"first major branch\" in \n% \"triangulate_stem\".\n% 3) Modified code so that \"n\" cannot be empty in \"branch_distribution\" and\n% cause warning\n% 4) Decreased the minimum triangle sizes in \"triangulate_stem\"\n% 5) The triangulation code has some changes.\n% 6) Minor streamlining of the code\n\n% Changes from version 2.0.2 to 3.0.0, 13 Feb 2020:\n% 1) Changed the setup for triangulation:\n% - The size of the triangles is more dependent on the dbh\n% - The height of the stem section is defined up to the first major branch\n% (branch diameter > 0.1*dbh or maximum branch diameter) but keeping\n% the stem diameter above 25% of dbh.\n% 2) Makes now more tries for triangulation, also changes triangle size\n% and the length of the stem section if necessary.\n% 3) Changed the names of some fields in the output:\n% - VolumeCylDiam --> VolCylDia\n% - LengthCylDiam --> LenCylDia\n% - VolumeBranchOrder --> VolBranchOrd\n% - LengthBranchOrder --> LenBranchOrd\n% - NumberBranchOrder --> NumBranchOrd\n% 3) Added many new fields into the output treedata, particularly distributions:\n% - Total length (trunk length + branch length) (\"TotalLength\")\n% - Trunk area and branch area (\"TrunkArea\" and \"BranchArea\")\n% - Crown dimensions: \"CrownDiamAve\", \"CrownDiamMax\",\"CrownAreaConv\",\n% \"CrownAreaAlpha\", \"CrownBaseHeight\", \"CrownLength\", \"CrownRatio\",\n% \"CrownVolumeConv\", \"CrownVolumeAlpha\".\n% - Vertical tree profile \"VerticalProfile\" and tree diameters in\n% 18 directions at 20 height layers \"spreads\".\n% - Branch area as functions of diameter class and branch order\n% (\"AreCylDia\" and \"AreBranchOrd\")\n% - Volume, area and length of CYLINDERS (tree segments) in 1 meter\n% HEIGHT classes (\"VolCylHei\", \"AreCylHei\", \"LenCylHei\")\n% - Volume, area and length of CYLINDERS (tree segments) in 10 deg\n% ZENITH DIRECTION classes (\"VolCylZen\", \"AreCylZen\", \"LenCylZen\")\n% - Volume, area and length of CYLINDERS (tree segments) in 10 deg\n% AZIMUTH DIRECTION classes (\"VolCylAzi\", \"AreCylAzi\", \"LenCylAzi\")\n% - Volume, area, length and number of all and 1st-order BRANCHES\n% in 1 cm DIAMETER classes (\"AreBranchDia\", \"AreBranch1Dia\", etc.)\n% - Volume, area, length and number of all and 1st-order BRANCHES\n% in 1 meter HEIGHT classes (\"AreBranchDia\", \"AreBranch1Dia\", etc.)\n% - Volume, area, length and number of all and 1st-order BRANCHES\n% in 10 degree BRANCHING ANGLE classes\n% (\"AreBranchAng\", \"AreBranch1Ang\", etc.)\n% - Volume, area, length and number of all and 1st-order BRANCHES\n% in 22.5 degree branch AZIMUTH ANGLE classes\n% (\"AreBranchAzi\", \"AreBranch1Azi\", etc.)\n% - Volume, area, length and number of all and 1st-order BRANCHES\n% in 10 degree branch ZENITH ANGLE classes\n% (\"AreBranchZen\", \"AreBranch1Zen\", etc.)\n% 4) Added new area-related fields into the output triangulation:\n% - side area, top area and bottom area\n% 5) Added new triangulation related fields to the output treedata:\n% - TriaTrunkArea side area of the triangulation\n% - MixTrunkArea trunk area from triangulation and cylinders\n% - MixTotalArea total area where the MixTrunkArea used instead\n% of TrunkArea\n% 6) Structure has more subfunctions.\n% 7) Changed the coding for cylinder fitting of DBH to conform new output\n% of the least_square_cylinder.\n\n% Changes from version 2.0.1 to 2.0.2, 26 Nov 2019:\n% 1) Bug fix: Added a statement \"C < nc\" for a while command that makes sure\n% that the index \"C\" does not exceed the number of stem cylinders, when\n% determining the index of cylinders up to first branch.\n% 2) Bug fix: Changed \"for i = 1:BO\" to \"for i = 1:max(1,BO)\" where\n% computing branch order data.\n% 3) Added the plotting of the triangulation model\n\n% Changes from version 2.0.0 to 2.0.1, 9 Oct 2019:\n% 1) Bug fix: Changed the units (from 100m to 1m) for computing the branch\n% length distribution: branch length per branch order.\n\n% Define some variables from cylinder:\nRad = cylinder.radius;\nLen = cylinder.length;\nnc = length(Rad);\nind = (1:1:nc)';\nTrunk = cylinder.branch == 1; % Trunk cylinders\n\n%% Tree attributes from cylinders\n% Volumes, areas, lengths, branches\ntreedata.TotalVolume = 1000*pi*Rad.^2'*Len;\ntreedata.TrunkVolume = 1000*pi*Rad(Trunk).^2'*Len(Trunk);\ntreedata.BranchVolume = 1000*pi*Rad(~Trunk).^2'*Len(~Trunk);\nbottom = min(cylinder.start(:,3));\n[top,i] = max(cylinder.start(:,3));\nif cylinder.axis(i,3) > 0\n top = top+Len(i)*cylinder.axis(i,3);\nend\ntreedata.TreeHeight = top-bottom;\ntreedata.TrunkLength = sum(Len(Trunk));\ntreedata.BranchLength = sum(Len(~Trunk));\ntreedata.TotalLength = treedata.TrunkLength+treedata.BranchLength;\nNB = length(branch.order)-1; % number of branches\ntreedata.NumberBranches = NB;\nBO = max(branch.order); % maximum branch order\ntreedata.MaxBranchOrder = BO;\ntreedata.TrunkArea = 2*pi*sum(Rad(Trunk).*Len(Trunk));\ntreedata.BranchArea = 2*pi*sum(Rad(~Trunk).*Len(~Trunk));\ntreedata.TotalArea = 2*pi*sum(Rad.*Len);\n\n%% Diameter at breast height (dbh)\n% Dbh from the QSM and from a cylinder fitted particularly to the correct place\ntreedata = dbh_cylinder(treedata,trunk,Trunk,cylinder,ind);\n\n%% Crown measures,Vertical profile and spreads\n[treedata,spreads] = crown_measures(treedata,cylinder,branch);\n\n%% Trunk volume and DBH from triangulation\nif inputs.Tria\n [treedata,triangulation] = triangulate_stem(...\n treedata,cylinder,branch,trunk);\nelse\n triangulation = 0;\nend\n\n%% Tree Location\ntreedata.location = cylinder.start(1,:);\n\n%% Stem taper\nR = Rad(Trunk);\nn = length(R);\nTaper = zeros(n+1,2);\nTaper(1,2) = 2*R(1);\nTaper(2:end,1) = cumsum(Len(Trunk));\nTaper(2:end,2) = [2*R(2:end); 2*R(n)];\ntreedata.StemTaper = Taper';\n\n%% Vertical profile and spreads\ntreedata.VerticalProfile = mean(spreads,2);\ntreedata.spreads = spreads;\n\n%% CYLINDER DISTRIBUTIONS:\n%% Wood part diameter distributions\n% Volume, area and length of wood parts as functions of cylinder diameter\n% (in 1cm diameter classes)\ntreedata = cylinder_distribution(treedata,cylinder,'Dia');\n\n%% Wood part height distributions\n% Volume, area and length of cylinders as a function of height\n% (in 1 m height classes)\ntreedata = cylinder_height_distribution(treedata,cylinder,ind);\n\n%% Wood part zenith direction distributions\n% Volume, area and length of wood parts as functions of cylinder zenith\n% direction (in 10 degree angle classes)\ntreedata = cylinder_distribution(treedata,cylinder,'Zen');\n\n%% Wood part azimuth direction distributions\n% Volume, area and length of wood parts as functions of cylinder zenith\n% direction (in 10 degree angle classes)\ntreedata = cylinder_distribution(treedata,cylinder,'Azi');\n\n%% BRANCH DISTRIBUTIONS:\n%% Branch order distributions\n% Volume, area, length and number of branches as a function of branch order\ntreedata = branch_order_distribution(treedata,branch);\n\n%% Branch diameter distributions\n% Volume, area, length and number of branches as a function of branch diameter\n% (in 1cm diameter classes)\ntreedata = branch_distribution(treedata,branch,'Dia');\n\n%% Branch height distribution\n% Volume, area, length and number of branches as a function of branch height\n% (in 1 meter classes) for all and 1st-order branches\ntreedata = branch_distribution(treedata,branch,'Hei');\n\n%% Branch angle distribution\n% Volume, area, length and number of branches as a function of branch angle\n% (in 10 deg angle classes) for all and 1st-order branches\ntreedata = branch_distribution(treedata,branch,'Ang');\n\n%% Branch azimuth distribution\n% Volume, area, length and number of branches as a function of branch azimuth\n% (in 22.5 deg angle classes) for all and 1st-order branches\ntreedata = branch_distribution(treedata,branch,'Azi');\n\n%% Branch zenith distribution\n% Volume, area, length and number of branches as a function of branch zenith\n% (in 10 deg angle classes) for all and 1st-order branches\ntreedata = branch_distribution(treedata,branch,'Zen');\n\n%% change into single-format\nNames = fieldnames(treedata);\nn = size(Names,1);\nfor i = 1:n\n treedata.(Names{i}) = single(treedata.(Names{i}));\nend\n\nif inputs.disp == 2\n %% Generate units for displaying the treedata\n Units = zeros(n,3);\n for i = 1:n\n if ~inputs.Tria && strcmp(Names{i},'CrownVolumeAlpha')\n m = i;\n elseif inputs.Tria && strcmp(Names{i},'TriaTrunkLength')\n m = i;\n end\n if strcmp(Names{i}(1:3),'DBH')\n Units(i,:) = 'm ';\n elseif strcmp(Names{i}(end-2:end),'ume')\n Units(i,:) = 'L ';\n elseif strcmp(Names{i}(end-2:end),'ght')\n Units(i,:) = 'm ';\n elseif strcmp(Names{i}(end-2:end),'gth')\n Units(i,:) = 'm ';\n elseif strcmp(Names{i}(1:3),'vol')\n Units(i,:) = 'L ';\n elseif strcmp(Names{i}(1:3),'len')\n Units(i,:) = 'm ';\n elseif strcmp(Names{i}(end-2:end),'rea')\n Units(i,:) = 'm^2';\n elseif strcmp(Names{i}(1:3),'loc')\n Units(i,:) = 'm ';\n elseif strcmp(Names{i}(end-4:end),'aConv')\n Units(i,:) = 'm^2';\n elseif strcmp(Names{i}(end-5:end),'aAlpha')\n Units(i,:) = 'm^2';\n elseif strcmp(Names{i}(end-4:end),'eConv')\n Units(i,:) = 'm^3';\n elseif strcmp(Names{i}(end-5:end),'eAlpha')\n Units(i,:) = 'm^3';\n elseif strcmp(Names{i}(end-2:end),'Ave')\n Units(i,:) = 'm ';\n elseif strcmp(Names{i}(end-2:end),'Max')\n Units(i,:) = 'm ';\n end\n end\n %% Display treedata\n disp('------------')\n disp(' Tree attributes:')\n for i = 1:m\n v = change_precision(treedata.(Names{i}));\n if strcmp(Names{i},'DBHtri')\n disp(' -----')\n disp(' Tree attributes from triangulation:')\n end\n disp([' ',Names{i},' = ',num2str(v),' ',Units(i,:)])\n end\n disp(' -----')\nend\n\nif inputs.plot > 1\n %% Plot distributions\n figure(6)\n subplot(2,4,1)\n plot(Taper(:,1),Taper(:,2),'-b')\n title('Stem taper')\n xlabel('Distance from base (m)')\n ylabel('Diameter (m)')\n axis tight\n grid on\n \n Q.treedata = treedata;\n subplot(2,4,2)\n plot_distribution(Q,6,0,0,'VolCylDia')\n \n subplot(2,4,3)\n plot_distribution(Q,6,0,0,'AreCylDia')\n \n subplot(2,4,4)\n plot_distribution(Q,6,0,0,'LenCylDia')\n \n subplot(2,4,5)\n plot_distribution(Q,6,0,0,'VolBranchOrd')\n \n subplot(2,4,6)\n plot_distribution(Q,6,0,0,'LenBranchOrd')\n \n subplot(2,4,7)\n plot_distribution(Q,6,0,0,'AreBranchOrd')\n \n subplot(2,4,8)\n plot_distribution(Q,6,0,0,'NumBranchOrd')\n \n figure(7)\n subplot(3,3,1)\n plot_distribution(Q,7,0,0,'VolCylHei')\n \n subplot(3,3,2)\n plot_distribution(Q,7,0,0,'AreCylHei')\n \n subplot(3,3,3)\n plot_distribution(Q,7,0,0,'LenCylHei')\n \n subplot(3,3,4)\n plot_distribution(Q,7,0,0,'VolCylZen')\n \n subplot(3,3,5)\n plot_distribution(Q,7,0,0,'AreCylZen')\n \n subplot(3,3,6)\n plot_distribution(Q,7,0,0,'LenCylZen')\n \n subplot(3,3,7)\n plot_distribution(Q,7,0,0,'VolCylAzi')\n \n subplot(3,3,8)\n plot_distribution(Q,7,0,0,'AreCylAzi')\n \n subplot(3,3,9)\n plot_distribution(Q,7,0,0,'LenCylAzi')\n \n figure(8)\n subplot(3,4,1)\n %if %%%%%% !!!!!!!!\n plot_distribution(Q,8,1,0,'VolBranchDia','VolBranch1Dia')\n \n subplot(3,4,2)\n plot_distribution(Q,8,1,0,'AreBranchDia','AreBranch1Dia')\n \n subplot(3,4,3)\n plot_distribution(Q,8,1,0,'LenBranchDia','LenBranch1Dia')\n \n subplot(3,4,4)\n plot_distribution(Q,8,1,0,'NumBranchDia','NumBranch1Dia')\n \n subplot(3,4,5)\n plot_distribution(Q,8,1,0,'VolBranchHei','VolBranch1Hei')\n \n subplot(3,4,6)\n plot_distribution(Q,8,1,0,'AreBranchHei','AreBranch1Hei')\n \n subplot(3,4,7)\n plot_distribution(Q,8,1,0,'LenBranchHei','LenBranch1Hei')\n \n subplot(3,4,8)\n plot_distribution(Q,8,1,0,'NumBranchHei','NumBranch1Hei')\n \n subplot(3,4,9)\n plot_distribution(Q,8,1,0,'VolBranchAng','VolBranch1Ang')\n \n subplot(3,4,10)\n plot_distribution(Q,8,1,0,'AreBranchAng','AreBranch1Ang')\n \n subplot(3,4,11)\n plot_distribution(Q,8,1,0,'LenBranchAng','LenBranch1Ang')\n \n subplot(3,4,12)\n plot_distribution(Q,8,1,0,'NumBranchAng','NumBranch1Ang')\n \n figure(9)\n subplot(2,4,1)\n plot_distribution(Q,9,1,0,'VolBranchZen','VolBranch1Zen')\n \n subplot(2,4,2)\n plot_distribution(Q,9,1,0,'AreBranchZen','AreBranch1Zen')\n \n subplot(2,4,3)\n plot_distribution(Q,9,1,0,'LenBranchZen','LenBranch1Zen')\n \n subplot(2,4,4)\n plot_distribution(Q,9,1,0,'NumBranchZen','NumBranch1Zen')\n \n subplot(2,4,5)\n plot_distribution(Q,9,1,0,'VolBranchAzi','VolBranch1Azi')\n \n subplot(2,4,6)\n plot_distribution(Q,9,1,0,'AreBranchAzi','AreBranch1Azi')\n \n subplot(2,4,7)\n plot_distribution(Q,9,1,0,'LenBranchAzi','LenBranch1Azi')\n \n subplot(2,4,8)\n plot_distribution(Q,9,1,0,'NumBranchAzi','NumBranch1Azi')\nend\n\nend % End of main function\n\n\nfunction treedata = dbh_cylinder(treedata,trunk,Trunk,cylinder,ind)\n\n% Dbh from the QSM\ni = 1;\nn = nnz(Trunk);\nT = ind(Trunk);\nwhile i < n && sum(cylinder.length(T(1:i))) < 1.3\n i = i+1;\nend\nDBHqsm = 2*cylinder.radius(T(i));\ntreedata.DBHqsm = DBHqsm;\n\n% Determine DBH from cylinder fitted particularly to the correct place\n% Select the trunk point set\nV = trunk-cylinder.start(1,:);\nh = V*cylinder.axis(1,:)';\nI = h < 1.5;\nJ = h > 1.1;\nI = I&J;\nif nnz(I) > 100\n T = trunk(I,:);\n % Fit cylinder\n cyl0 = select_cylinders(cylinder,i);\n cyl = least_squares_cylinder(T,cyl0);\n RadiusOK = 2*cyl.radius > 0.8*DBHqsm & 2*cyl.radius < 1.2*DBHqsm;\n \n if RadiusOK && abs(cylinder.axis(i,:)*cyl.axis') > 0.9 && cyl.conv && cyl.rel\n treedata.DBHcyl = 2*cyl.radius;\n else\n treedata.DBHcyl = DBHqsm;\n end\nelse\n treedata.DBHcyl = DBHqsm;\nend\n% End of function\nend\n\n\nfunction [treedata,spreads] = crown_measures(treedata,cylinder,branch)\n\n%% Generate point clouds from the cylinder model\nAxe = cylinder.axis;\nLen = cylinder.length;\nSta = cylinder.start;\nTip = Sta+[Len.*Axe(:,1) Len.*Axe(:,2) Len.*Axe(:,3)]; % tips of the cylinders\nnc = length(Len);\nP = zeros(5*nc,3); % four mid points on the cylinder surface\nt = 0;\nfor i = 1:nc\n [U,V] = orthonormal_vectors(Axe(i,:));\n U = cylinder.radius(i)*U;\n if cylinder.branch(i) == 1\n % For stem cylinders generate more points\n R = rotation_matrix(Axe(i,:),pi/12);\n for k = 1:4\n M = Sta(i,:)+k/4*Len(i)*Axe(i,:);\n for j = 1:12\n if j > 1\n U = R*U;\n end\n t = t+1;\n P(t,:) = M+U';\n end\n end\n else\n M = Sta(i,:)+0.5*Len(i)*Axe(i,:);\n R = rotation_matrix(Axe(i,:),pi/4);\n for j = 1:4\n if j > 1\n U = R*U;\n end\n t = t+1;\n P(t,:) = M+U';\n end\n end\nend\nP = P(1:t,:);\nI = ~isnan(P(:,1));\nP = P(I,:);\nP = double([P; Sta; Tip]);\nP = unique(P,'rows');\n\n%% Vertical profiles (layer diameters/spreads), mean:\nbot = min(P(:,3));\ntop = max(P(:,3));\nHei = top-bot;\nif Hei > 10\n m = 20;\nelseif Hei > 2\n m = 10;\nelse\n m = 5;\nend\nspreads = zeros(m,18);\nfor j = 1:m\n I = P(:,3) >= bot+(j-1)*Hei/m & P(:,3) < bot+j*Hei/m;\n X = unique(P(I,:),'rows');\n if size(X,1) > 5\n [K,A] = convhull(X(:,1),X(:,2));\n % compute center of gravity for the convex hull and use it as\n % center for computing average diameters\n n = length(K);\n x = X(K,1);\n y = X(K,2);\n CX = sum((x(1:n-1)+x(2:n)).*(x(1:n-1).*y(2:n)-x(2:n).*y(1:n-1)))/6/A;\n CY = sum((y(1:n-1)+y(2:n)).*(x(1:n-1).*y(2:n)-x(2:n).*y(1:n-1)))/6/A;\n \n V = mat_vec_subtraction(X(:,1:2),[CX CY]);\n ang = atan2(V(:,2),V(:,1))+pi;\n [ang,I] = sort(ang);\n L = sqrt(sum(V.*V,2));\n L = L(I);\n for i = 1:18\n I = ang >= (i-1)*pi/18 & ang < i*pi/18;\n if any(I)\n L1 = max(L(I));\n else\n L1 = 0;\n end\n J = ang >= (i-1)*pi/18+pi & ang < i*pi/18+pi;\n if any(J)\n L2 = max(L(J));\n else\n L2 = 0;\n end\n spreads(j,i) = L1+L2;\n end\n end\nend\n\n%% Crown diameters (spreads), mean and maximum:\nX = unique(P(:,1:2),'rows');\n[K,A] = convhull(X(:,1),X(:,2));\n% compute center of gravity for the convex hull and use it as center for\n% computing average diameters\nn = length(K);\nx = X(K,1);\ny = X(K,2);\nCX = sum((x(1:n-1)+x(2:n)).*(x(1:n-1).*y(2:n)-x(2:n).*y(1:n-1)))/6/A;\nCY = sum((y(1:n-1)+y(2:n)).*(x(1:n-1).*y(2:n)-x(2:n).*y(1:n-1)))/6/A;\nV = Tip(:,1:2)-[CX CY];\nang = atan2(V(:,2),V(:,1))+pi;\n[ang,I] = sort(ang);\nL = sqrt(sum(V.*V,2));\nL = L(I);\nS = zeros(18,1);\nfor i = 1:18\n I = ang >= (i-1)*pi/18 & ang < i*pi/18;\n if any(I)\n L1 = max(L(I));\n else\n L1 = 0;\n end\n J = ang >= (i-1)*pi/18+pi & ang < i*pi/18+pi;\n if any(J)\n L2 = max(L(J));\n else\n L2 = 0;\n end\n S(i) = L1+L2;\nend\ntreedata.CrownDiamAve = mean(S);\nMaxDiam = 0;\nfor i = 1:n\n V = mat_vec_subtraction([x y],[x(i) y(i)]);\n L = max(sqrt(sum(V.*V,2)));\n if L > MaxDiam\n MaxDiam = L;\n end\nend\ntreedata.CrownDiamMax = L;\n\n%% Crown areas from convex hull and alpha shape:\ntreedata.CrownAreaConv = A;\nalp = max(0.5,treedata.CrownDiamAve/10);\nshp = alphaShape(X(:,1),X(:,2),alp);\ntreedata.CrownAreaAlpha = shp.area;\n\n%% Crown base\n% Define first major branch as the branch whose diameter > min(0.05*dbh,5cm)\n% and whose horizontal relative reach is more than the median reach of 1st-ord.\n% branches (or at maximum 10). The reach is defined as the horizontal\n% distance from the base to the tip divided by the dbh.\ndbh = treedata.DBHcyl;\nnb = length(branch.order);\nHL = zeros(nb,1); % horizontal reach\nbranches1 = (1:1:nb)';\nbranches1 = branches1(branch.order == 1); % 1st-order branches\nnb = length(branches1);\nnc = size(Sta,1);\nind = (1:1:nc)';\nfor i = 1:nb\n C = ind(cylinder.branch == branches1(i));\n if ~isempty(C)\n base = Sta(C(1),:);\n C = C(end);\n tip = Sta(C,:)+Len(C)*Axe(C);\n V = tip(1:2)-base(1:2);\n HL(branches1(i)) = sqrt(V*V')/dbh*2;\n end\nend\nM = min(10,median(HL));\n\n% Sort the branches according to the their heights\nHei = branch.height(branches1);\n[Hei,SortOrd] = sort(Hei);\nbranches1 = branches1(SortOrd);\n\n% Search the first/lowest branch: \nd = min(0.05,0.05*dbh);\nb = 0;\nif nb > 1\n i = 1;\n while i < nb\n i = i+1;\n if branch.diameter(branches1(i)) > d && HL(branches1(i)) > M\n b = branches1(i);\n i = nb+2;\n end\n end\n if i == nb+1 && nb > 1\n b = branches1(1);\n end\nend\n\nif b > 0\n % search all the children of the first major branch:\n nb = size(branch.parent,1);\n Ind = (1:1:nb)';\n chi = Ind(branch.parent == b);\n B = b;\n while ~isempty(chi)\n B = [B; chi];\n n = length(chi);\n C = cell(n,1);\n for i = 1:n\n C{i} = Ind(branch.parent == chi(i));\n end\n chi = vertcat(C{:});\n end\n \n % define crown base height from the ground:\n BaseHeight = max(Sta(:,3)); % Height of the crown base\n for i = 1:length(B)\n C = ind(cylinder.branch == B(i));\n ht = min(Tip(C,3));\n hb = min(Sta(C,3));\n h = min(hb,ht);\n if h < BaseHeight\n BaseHeight = h;\n end\n end\n treedata.CrownBaseHeight = BaseHeight-Sta(1,3);\n \n %% Crown length and ratio\n treedata.CrownLength = treedata.TreeHeight-treedata.CrownBaseHeight;\n treedata.CrownRatio = treedata.CrownLength/treedata.TreeHeight;\n \n %% Crown volume from convex hull and alpha shape:\n I = P(:,3) >= BaseHeight;\n X = P(I,:);\n [K,V] = convhull(X(:,1),X(:,2),X(:,3));\n treedata.CrownVolumeConv = V;\n alp = max(0.5,treedata.CrownDiamAve/5);\n shp = alphaShape(X(:,1),X(:,2),X(:,3),alp,'HoleThreshold',10000);\n treedata.CrownVolumeAlpha = shp.volume;\n\nelse \n % No branches\n treedata.CrownBaseHeight = treedata.TreeHeight;\n treedata.CrownLength = 0;\n treedata.CrownRatio = 0;\n treedata.CrownVolumeConv = 0;\n treedata.CrownVolumeAlpha = 0;\nend\n% End of function\nend\n\n\nfunction [treedata,triangulation] = ...\n triangulate_stem(treedata,cylinder,branch,trunk)\n\nSta = cylinder.start;\nRad = cylinder.radius;\nLen = cylinder.length;\nDBHqsm = treedata.DBHqsm;\n% Determine the first major branch (over 10% of dbh or the maximum\n% diameter branch):\nnb = size(branch.diameter,1);\nind = (1:1:nb)';\nind = ind(branch.order == 1);\n[~,I] = sort(branch.height(ind));\nind = ind(I);\nn = length(ind);\nb = 1;\nwhile b <= n && branch.diameter(ind(b)) < 0.1*DBHqsm\n b = b+1;\nend\nb = ind(b);\nif b > n\n [~,b] = max(branch.diameter);\nend\n\n% Determine suitable cylinders up to the first major branch but keep the\n% stem diameter above one quarter (25%) of dbh:\nC = 1;\nnc = size(Sta,1);\nwhile C < nc && cylinder.branch(C) < b\n C = C+1;\nend\nn = nnz(cylinder.branch == 1);\ni = 2;\nwhile i < n && Sta(i,3) < Sta(C,3) && Rad(i) > 0.125*DBHqsm\n i = i+1;\nend\nCylInd = max(i,3);\nTrunkLenTri = Sta(CylInd,3)-Sta(1,3);\n\nEmptyTriangulation = false;\n% Calculate the volumes\nif size(trunk,1) > 1000 && TrunkLenTri >= 1\n \n % Set the parameters for triangulation:\n % Compute point density, which is used to increase the triangle\n % size if the point density is very small\n PointDensity = zeros(CylInd-1,1);\n for i = 1:CylInd-1\n I = trunk(:,3) >= Sta(i,3) & trunk(:,3) < Sta(i+1,3);\n PointDensity(i) = pi*Rad(i)*Len(i)/nnz(I);\n end\n PointDensity = PointDensity(PointDensity < inf);\n d = max(PointDensity);\n \n % Determine minimum triangle size based on dbh\n if DBHqsm > 1\n MinTriaHeight = 0.1;\n elseif DBHqsm > 0.50\n MinTriaHeight = 0.075;\n elseif DBHqsm > 0.10\n MinTriaHeight = 0.05;\n else\n MinTriaHeight = 0.02;\n end\n TriaHeight0 = max(MinTriaHeight,4*sqrt(d));\n \n % Select the trunk point set used for triangulation\n I = trunk(:,3) <= Sta(CylInd,3);\n Stem = trunk(I,:);\n \n % Do the triangulation:\n triangulation = zeros(1,0);\n l = 0;\n while isempty(triangulation) && l < 4 && CylInd > 2\n l = l+1;\n TriaHeight = TriaHeight0;\n TriaWidth = TriaHeight;\n k = 0;\n while isempty(triangulation) && k < 3\n k = k+1;\n j = 0;\n while isempty(triangulation) && j < 5\n triangulation = curve_based_triangulation(Stem,TriaHeight,TriaWidth);\n j = j+1;\n end\n % try different triangle sizes if necessary\n if isempty(triangulation) && k < 3\n TriaHeight = TriaHeight+0.03;\n TriaWidth = TriaHeight;\n end\n end\n % try different length of stem sections if necessary\n if isempty(triangulation) && l < 4 && CylInd > 2\n CylInd = CylInd-1;\n I = trunk(:,3) <= Sta(CylInd,3);\n Stem = trunk(I,:);\n end\n end\n \n if ~isempty(triangulation)\n triangulation.cylind = CylInd;\n % Dbh from triangulation\n Vert = triangulation.vert;\n h = Vert(:,3)-triangulation.bottom;\n [~,I] = min(abs(h-1.3));\n H = h(I);\n I = abs(h-H) < triangulation.triah/2;\n V = Vert(I,:);\n V = V([2:end 1],:)-V(1:end,:);\n d = sqrt(sum(V.*V,2));\n treedata.DBHtri = sum(d)/pi;\n % volumes from the triangulation\n treedata.TriaTrunkVolume = triangulation.volume;\n TrunkVolMix = treedata.TrunkVolume-...\n 1000*pi*sum(Rad(1:CylInd-1).^2.*Len(1:CylInd-1))+triangulation.volume;\n TrunkAreaMix = treedata.TrunkArea-...\n 2*pi*sum(Rad(1:CylInd-1).*Len(1:CylInd-1))+triangulation.SideArea;\n treedata.MixTrunkVolume = TrunkVolMix;\n treedata.MixTotalVolume = TrunkVolMix+treedata.BranchVolume;\n treedata.TriaTrunkArea = triangulation.SideArea;\n treedata.MixTrunkArea = TrunkAreaMix;\n treedata.MixTotalArea = TrunkAreaMix+treedata.BranchArea;\n treedata.TriaTrunkLength = TrunkLenTri;\n \n else\n EmptyTriangulation = true;\n end\nelse\n EmptyTriangulation = true;\nend\n\nif EmptyTriangulation\n disp(' No triangulation model produced')\n clear triangulation\n treedata.DBHtri = DBHqsm;\n treedata.TriaTrunkVolume = treedata.TrunkVolume;\n treedata.TriaTrunkArea = treedata.TrunkArea;\n treedata.MixTrunkVolume = treedata.TrunkVolume;\n treedata.MixTrunkArea = treedata.TrunkArea;\n treedata.MixTotalVolume = treedata.TotalVolume;\n treedata.MixTotalArea = treedata.TotalArea;\n treedata.TriaTrunkLength = 0;\n triangulation.vert = zeros(0,3);\n triangulation.facet = zeros(0,3);\n triangulation.fvd = zeros(0,1);\n triangulation.volume = 0;\n triangulation.SideArea = 0;\n triangulation.BottomArea = 0;\n triangulation.TopArea = 0;\n triangulation.bottom = 0;\n triangulation.top = 0;\n triangulation.triah = 0;\n triangulation.triaw = 0;\n triangulation.cylind = 0;\nend\nend\n\n\nfunction treedata = cylinder_distribution(treedata,cyl,dist)\n%% Wood part diameter, zenith and azimuth direction distributions\n% Volume, area and length of wood parts as functions of cylinder\n% diameter, zenith, and azimuth\nif strcmp(dist,'Dia')\n Par = cyl.radius;\n n = ceil(max(200*cyl.radius));\n a = 0.005; % diameter in 1 cm classes\nelseif strcmp(dist,'Zen')\n Par = 180/pi*acos(cyl.axis(:,3));\n n = 18;\n a = 10; % zenith direction in 10 degree angle classes\nelseif strcmp(dist,'Azi')\n Par = 180/pi*atan2(cyl.axis(:,2),cyl.axis(:,1))+180;\n n = 36;\n a = 10; % azimuth direction in 10 degree angle classes\nend\n\nCylDist = zeros(3,n);\nfor i = 1:n\n K = Par >= (i-1)*a & Par < i*a;\n CylDist(1,i) = 1000*pi*sum(cyl.radius(K).^2.*cyl.length(K)); % vol in L\n CylDist(2,i) = 2*pi*sum(cyl.radius(K).*cyl.length(K)); % area in m^2\n CylDist(3,i) = sum(cyl.length(K)); % length in m\nend\ntreedata.(['VolCyl',dist]) = CylDist(1,:);\ntreedata.(['AreCyl',dist]) = CylDist(2,:);\ntreedata.(['LenCyl',dist]) = CylDist(3,:);\nend\n\n\nfunction treedata = cylinder_height_distribution(treedata,cylinder,ind)\n\nRad = cylinder.radius;\nLen = cylinder.length;\nAxe = cylinder.axis;\n\n%% Wood part height distributions\n% Volume, area and length of cylinders as a function of height\n% (in 1 m height classes)\nMaxHei= ceil(treedata.TreeHeight);\ntreedata.VolCylHei = zeros(1,MaxHei);\ntreedata.AreCylHei = zeros(1,MaxHei);\ntreedata.LenCylHei = zeros(1,MaxHei);\nEnd = cylinder.start+[Len.*Axe(:,1) Len.*Axe(:,2) Len.*Axe(:,3)];\nbot = min(cylinder.start(:,3));\nB = cylinder.start(:,3)-bot;\nT = End(:,3)-bot;\nfor j = 1:MaxHei\n I1 = B >= (j-2) & B < (j-1); % base below this bin\n J1 = B >= (j-1) & B < j; % base in this bin\n K1 = B >= j & B < (j+1); % base above this bin\n I2 = T >= (j-2) & T < (j-1); % top below this bin\n J2 = T >= (j-1) & T < j; % top in this bin\n K2 = T >= j & T < (j+1); % top above this bin\n C1 = ind(J1&J2); % base and top in this bin\n C2 = ind(J1&K2); % base in this bin, top above\n C3 = ind(J1&I2); % base in this bin, top below\n C4 = ind(I1&J2); % base in bin below, top in this\n C5 = ind(K1&J2); % base in bin above, top in this\n v1 = 1000*pi*sum(Rad(C1).^2.*Len(C1));\n a1 = 2*pi*sum(Rad(C1).*Len(C1));\n l1 = sum(Len(C1));\n r2 = (j-B(C2))./(T(C2)-B(C2)); % relative portion in this bin\n v2 = 1000*pi*sum(Rad(C2).^2.*Len(C2).*r2);\n a2 = 2*pi*sum(Rad(C2).*Len(C2).*r2);\n l2 = sum(Len(C2).*r2);\n r3 = (B(C3)-j+1)./(B(C3)-T(C3)); % relative portion in this bin\n v3 = 1000*pi*sum(Rad(C3).^2.*Len(C3).*r3);\n a3 = 2*pi*sum(Rad(C3).*Len(C3).*r3);\n l3 = sum(Len(C3).*r3);\n r4 = (T(C4)-j+1)./(T(C4)-B(C4)); % relative portion in this bin\n v4 = 1000*pi*sum(Rad(C4).^2.*Len(C4).*r4);\n a4 = 2*pi*sum(Rad(C4).*Len(C4).*r4);\n l4 = sum(Len(C4).*r4);\n r5 = (j-T(C5))./(B(C5)-T(C5)); % relative portion in this bin\n v5 = 1000*pi*sum(Rad(C5).^2.*Len(C5).*r5);\n a5 = 2*pi*sum(Rad(C5).*Len(C5).*r5);\n l5 = sum(Len(C5).*r5);\n treedata.VolCylHei(j) = v1+v2+v3+v4+v5;\n treedata.AreCylHei(j) = a1+a2+a3+a4+a5;\n treedata.LenCylHei(j) = l1+l2+l3+l4+l5;\nend\nend\n\n\nfunction treedata = branch_distribution(treedata,branch,dist)\n%% Branch diameter, height, angle, zenith and azimuth distributions\n% Volume, area, length and number of branches as a function of branch\n% diamater, height, angle, zenith and aximuth\nBOrd = branch.order(2:end);\nBVol = branch.volume(2:end);\nBAre = branch.area(2:end);\nBLen = branch.length(2:end);\nif strcmp(dist,'Dia')\n Par = branch.diameter(2:end);\n n = ceil(max(100*Par));\n a = 0.005; % diameter in 1 cm classes\nelseif strcmp(dist,'Hei')\n Par = branch.height(2:end);\n n = ceil(treedata.TreeHeight);\n a = 1; % height in 1 m classes\nelseif strcmp(dist,'Ang')\n Par = branch.angle(2:end);\n n = 18;\n a = 10; % angle in 10 degree classes\nelseif strcmp(dist,'Zen')\n Par = branch.zenith(2:end);\n n = 18;\n a = 10; % zenith direction in 10 degree angle classes\nelseif strcmp(dist,'Azi')\n Par = branch.azimuth(2:end)+180;\n n = 36;\n a = 10; % azimuth direction in 10 degree angle classes\nend\nif isempty(n)\n n = 0;\nend\n\nBranchDist = zeros(8,n);\nfor i = 1:n\n I = Par >= (i-1)*a & Par < i*a;\n BranchDist(1,i) = sum(BVol(I)); % volume (all branches)\n BranchDist(2,i) = sum(BVol(I & BOrd == 1)); % volume (1st-branches)\n BranchDist(3,i) = sum(BAre(I)); % area (all branches)\n BranchDist(4,i) = sum(BAre(I & BOrd == 1)); % area (1st-branches)\n BranchDist(5,i) = sum(BLen(I)); % length (all branches)\n BranchDist(6,i) = sum(BLen(I & BOrd == 1)); % length (1st-branches)\n BranchDist(7,i) = nnz(I); % number (all branches)\n BranchDist(8,i) = nnz(I & BOrd == 1); % number (1st-branches)\nend\ntreedata.(['VolBranch',dist]) = BranchDist(1,:);\ntreedata.(['VolBranch1',dist]) = BranchDist(2,:);\ntreedata.(['AreBranch',dist]) = BranchDist(3,:);\ntreedata.(['AreBranch1',dist]) = BranchDist(4,:);\ntreedata.(['LenBranch',dist]) = BranchDist(5,:);\ntreedata.(['LenBranch1',dist]) = BranchDist(6,:);\ntreedata.(['NumBranch',dist]) = BranchDist(7,:);\ntreedata.(['NumBranch1',dist]) = BranchDist(8,:);\nend\n\n\nfunction treedata = branch_order_distribution(treedata,branch)\n%% Branch order distributions\n% Volume, area, length and number of branches as a function of branch order\nBO = max(branch.order);\nBranchOrdDist = zeros(BO,4);\nfor i = 1:max(1,BO)\n I = branch.order == i;\n BranchOrdDist(i,1) = sum(branch.volume(I)); % volumes\n BranchOrdDist(i,2) = sum(branch.area(I)); % areas\n BranchOrdDist(i,3) = sum(branch.length(I)); % lengths\n BranchOrdDist(i,4) = nnz(I); % number of ith-order branches\nend\ntreedata.VolBranchOrd = BranchOrdDist(:,1)';\ntreedata.AreBranchOrd = BranchOrdDist(:,2)';\ntreedata.LenBranchOrd = BranchOrdDist(:,3)';\ntreedata.NumBranchOrd = BranchOrdDist(:,4)';\nend\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "cylinders.m", "ext": ".m", "path": "TreeQSM-master/src/main_steps/cylinders.m", "size": 34496, "source_encoding": "utf_8", "md5": "21b6b835cd40db99681596120408488e", "text": "% This file is part of TREEQSM.\n%\n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n%\n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n%\n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction cylinder = cylinders(P,cover,segment,inputs)\n\n% ---------------------------------------------------------------------\n% CYLINDERS.M Fits cylinders to the branch-segments of the point cloud\n%\n% Version 3.0.0\n% Latest update 1 Now 2018\n%\n% Copyright (C) 2013-2018 Pasi Raumonen\n% ---------------------------------------------------------------------\n%\n% Reconstructs the surface and volume of branches of input tree with\n% cylinders. Subdivides each segment to smaller regions to which cylinders\n% are fitted in least squares sense. Returns the cylinder information and\n% in addition the child-relation of the cylinders plus the cylinders in\n% each segment.\n% ---------------------------------------------------------------------\n% Inputs:\n% P Point cloud, matrix\n% cover Cover sets\n% segment Segments\n% input Input parameters of the reconstruction:\n% MinCylRad Minimum cylinder radius, used in the taper corrections\n% ParentCor Radius correction based on radius of the parent: radii in\n% a branch are usually smaller than the radius of the parent\n% cylinder in the parent branch\n% TaperCor Parabola taper correction of radii inside branches.\n% GrowthVolCor If 1, use growth volume correction\n% GrowthVolFac Growth volume correction factor\n%\n% Outputs:\n% cylinder Structure array containing the following cylinder info:\n% radius Radii of the cylinders, vector\n% length Lengths of the cylinders, vector\n% axis Axes of the cylinders, matrix\n% start Starting points of the cylinders, matrix\n% parent Parents of the cylinders, vector\n% extension Extensions of the cylinders, vector\n% branch Branch of the cylinder\n% BranchOrder Branching order of the cylinder\n% PositionInBranch Position of the cylinder inside the branch\n% mad Mean absolute distances of points from the cylinder\n% surface, vector\n% SurfCov Surface coverage measure, vector\n% added Added cylinders, logical vector\n% UnModRadius Unmodified radii\n% ---------------------------------------------------------------------\n\n% Changes from version 3.0.0 to 3.1.0, 6 Oct 2021:\n% 1) Added the growth volume correction option (\"growth_volume_correction\")\n% back, which was removed from the previous version by a mistake. The\n% \"growth_volume_correction\" function was also corrected.\n% 2) Added the fields \"branch\", \"BranchOrder\", \"PositionInBranch\" to the\n% output structure \"cylinder\"\n% 3) Removed the fields \"CylsInSegment\" and \"ChildCyls\" from the output\n% structure \"cylinder\"\n\n% Changes from version 2.0.0 to 3.0.0, 13 Aug 2020:\n% Many comprehensive and small changes:\n% 1) \"regions\" and \"cylinder_fitting\" are combined into \"cylinder_fitting\"\n% and the process is more adaptive as it now fits at least 3 (up to 10)\n% cylinders of different lengths for each region.\n% 2) \"lcyl\" and \"FilRad\" parameters are not used anymore\n% 3) Surface coverage (\"SurfCov\") and mean absolute distance (\"mad\") are\n% added to the cylinder structure as fields.\n% 4) Surface coverage filtering is used in the definition of the regions\n% and removing outliers\n% 5) \"adjustments\" has many changes, particularly in the taper corrections\n% where the parabola-taper curve is fitted to all the data with surface\n% coverage as a weight. Adjustment of radii based on the parabola is\n% closer the parabola the smaller the surface coverage. For the stem the\n% taper correction is the same as for the branches. The minimum and\n% maximum radii corrections are also modified.\n% 6) Syntax has changed, particularly for the \"cyl\"-structure\n\n% Changes from version 2.1.0 to 2.1.1, 26 Nov 2019:\n% 1) Increased the minimum number \"n\" of estimated cylinders for\n% initialization of vectors at the beginning of the code. This is done\n% to make sure that trees without branches will not cause errors.\n\n% Changes from version 2.0.0 to 2.1.0, 3 Oct 2019:\n% 1) Bug fix: UnmodRadius is now defined as it should, as the radius after\n% least squares fitting but without parent, taper or growth vol. corrections\n% 2) Bug fix: Correction in \"least_squares_cylinder.m\", calculates the\n% starting point of the cylinder now correctly.\n% 3) Bug fix: Correct errors related to combining data when a fitted\n% cylinder is replaced with two shorter ones, in \"cylinder_fitting\"\n% 4) Removed some unnecessary command lines for computing radius estimates\n% in \"regions\"\n\n%% Initialization of variables\nSegs = segment.segments;\nSPar = segment.ParentSegment;\nSChi = segment.ChildSegment;\nNumOfSeg = max(size(Segs)); % number of segments\nn = max(2000,min(40*NumOfSeg,2e5));\nc = 1; % number of cylinders determined\nCChi = cell(n,1); % Children of the cylinders\nCiS = cell(NumOfSeg,1); % Cylinders in the segment\ncylinder.radius = zeros(n,1,'single');\ncylinder.length = zeros(n,1,'single');\ncylinder.start = zeros(n,3,'single');\ncylinder.axis = zeros(n,3,'single');\ncylinder.parent = zeros(n,1,'uint32');\ncylinder.extension = zeros(n,1,'uint32');\ncylinder.added = false(n,1);\ncylinder.UnmodRadius = zeros(n,1,'single');\ncylinder.branch = zeros(n,1,'uint16');\ncylinder.SurfCov = zeros(n,1,'single');\ncylinder.mad = zeros(n,1,'single');\n\n%% Determine suitable order of segments (from trunk to the \"youngest\" child)\nbases = (1:1:NumOfSeg)';\nbases = bases(SPar(:,1) == 0);\nnb = length(bases);\nSegmentIndex = zeros(NumOfSeg,1);\nnc = 0;\nfor i = 1:nb\n nc = nc+1;\n SegmentIndex(nc) = bases(i);\n S = vertcat(SChi{bases(i)});\n while ~isempty(S)\n n = length(S);\n SegmentIndex(nc+1:nc+n) = S;\n nc = nc+n;\n S = vertcat(SChi{S});\n end\nend\n\n%% Fit cylinders individually for each segment\nfor k = 1:NumOfSeg\n si = SegmentIndex(k);\n if si > 0\n %% Some initialization about the segment\n Seg = Segs{si}; % the current segment under analysis\n nl = max(size(Seg)); % number of cover set layers in the segment\n [Sets,IndSets] = verticalcat(Seg); % the cover sets in the segment\n\n ns = length(Sets); % number of cover sets in the current segment\n Points = vertcat(cover.ball{Sets}); % the points in the segments\n np = length(Points); % number of points in the segment\n\n % Determine indexes of points for faster definition of regions\n BallSize = cellfun('length',cover.ball(Sets));\n IndPoints = ones(nl,2); % indexes for points in each layer of the segment\n for j = 1:nl\n IndPoints(j,2) = sum(BallSize(IndSets(j,1):IndSets(j,2)));\n end\n IndPoints(:,2) = cumsum(IndPoints(:,2));\n IndPoints(2:end,1) = IndPoints(2:end,1)+IndPoints(1:end-1,2);\n Base = Seg{1}; % the base of the segment\n nb = IndPoints(1,2); % number of points in the base\n\n % Reconstruct only large enough segments\n if nl > 1 && np > nb && ns > 2 && np > 20 && ~isempty(Base)\n\n %% Cylinder fitting\n [cyl,Reg] = cylinder_fitting(P,Points,IndPoints,nl,si);\n nc = numel(cyl.radius);\n\n %% Search possible parent cylinder\n if nc > 0 && si > 1\n [PC,cyl,added] = parent_cylinder(SPar,SChi,CiS,cylinder,cyl,si);\n nc = numel(cyl.radius);\n elseif si == 1\n PC = zeros(0,1);\n added = false;\n else\n added = false;\n end\n cyl.radius0 = cyl.radius;\n\n %% Modify cylinders\n if nc > 0\n % Define parent cylinder:\n parcyl.radius = cylinder.radius(PC);\n parcyl.length = cylinder.length(PC);\n parcyl.start = cylinder.start(PC,:);\n parcyl.axis = cylinder.axis(PC,:);\n % Modify the cylinders\n cyl = adjustments(cyl,parcyl,inputs,Reg);\n end\n\n %% Save the cylinders\n % if at least one acceptable cylinder, then save them\n Accept = nc > 0 & min(cyl.radius(1:nc)) > 0;\n if Accept\n % If the parent cylinder exists, set the parent-child relations\n if ~isempty(PC)\n cylinder.parent(c) = PC;\n if cylinder.extension(PC) == c\n I = cylinder.branch(PC);\n cylinder.branch(c:c+nc-1) = I;\n CiS{I} = [CiS{I}; linspace(c,c+nc-1,nc)'];\n else\n CChi{PC} = [CChi{PC}; c];\n cylinder.branch(c:c+nc-1) = si;\n CiS{si} = linspace(c,c+nc-1,nc)';\n end\n else\n cylinder.branch(c:c+nc-1) = si;\n CiS{si} = linspace(c,c+nc-1,nc)';\n end\n\n cylinder.radius(c:c+nc-1) = cyl.radius(1:nc);\n cylinder.length(c:c+nc-1) = cyl.length(1:nc);\n cylinder.axis(c:c+nc-1,:) = cyl.axis(1:nc,:);\n cylinder.start(c:c+nc-1,:) = cyl.start(1:nc,:);\n cylinder.parent(c+1:c+nc-1) = linspace(c,c+nc-2,nc-1);\n cylinder.extension(c:c+nc-2) = linspace(c+1,c+nc-1,nc-1);\n cylinder.UnmodRadius(c:c+nc-1) = cyl.radius0(1:nc);\n cylinder.SurfCov(c:c+nc-1) = cyl.SurfCov(1:nc);\n cylinder.mad(c:c+nc-1) = cyl.mad(1:nc);\n if added\n cylinder.added(c) = true;\n cylinder.added(c) = true;\n end\n c = c+nc; % number of cylinders so far (plus one)\n\n end\n end\n end\nend\nc = c-1; % number of cylinders\n\n\n%% Define outputs\nnames = fieldnames(cylinder);\nn = max(size(names));\nfor k = 1:n\n cylinder.(names{k}) = single(cylinder.(names{k})(1:c,:));\nend\nif c <= 2^16\n cylinder.parent = uint16(cylinder.parent);\n cylinder.extension = uint16(cylinder.extension);\nend\nnb = max(cylinder.branch);\nif nb <= 2^8\n cylinder.branch = uint8(cylinder.branch);\nelseif nb <= 2^16\n cylinder.branch = uint16(cylinder.branch);\nend\ncylinder.added = logical(cylinder.added);\n\n% Define the branching order:\nBOrd = zeros(c,1);\nfor i = 1:c\n if cylinder.parent(i) > 0\n p = cylinder.parent(i);\n if cylinder.extension(p) == i\n BOrd(i) = BOrd(p);\n else\n BOrd(i) = BOrd(p)+1;\n end\n end\nend\ncylinder.BranchOrder = uint8(BOrd);\n% Define the cylinder position inside the branch\nPiB = ones(c,1);\nfor i = 1:NumOfSeg\n C = CiS{i};\n if ~isempty(C)\n n = length(C);\n PiB(C) = (1:1:n)';\n end\nend\nif max(PiB) <= 2^8\n cylinder.PositionInBranch = uint8(PiB);\nelse\n cylinder.PositionInBranch = uint16(PiB);\nend\n\n% Growth volume correction\nif inputs.GrowthVolCor && c > 0\n cylinder = growth_volume_correction(cylinder,inputs);\nend\n\nend % End of main function\n\n\nfunction [cyl,Reg] = cylinder_fitting(P,Points,Ind,nl,si)\n\nif nl > 6\n i0 = 1; i = 4; % indexes of the first and last layers of the region\n t = 0;\n Reg = cell(nl,1);\n cyls = cell(11,1);\n regs = cell(11,1);\n data = zeros(11,4);\n while i0 < nl-2\n %% Fit at least three cylinders of different lengths\n bot = Points(Ind(i0,1):Ind(i0+1,2));\n Bot = average(P(bot,:)); % Bottom axis point of the region\n again = true;\n j = 0;\n while i+j <= nl && j <= 10 && (j <= 2 || again)\n %% Select points and estimate axis\n RegC = Points(Ind(i0,1):Ind(i+j,2)); % candidate region\n % Top axis point of the region:\n top = Points(Ind(i+j-1,1):Ind(i+j,2));\n Top = average(P(top,:));\n % Axis of the cylinder:\n Axis = Top-Bot;\n c0.axis = Axis/norm(Axis);\n % Compute the height along the axis:\n h = (P(RegC,:)-Bot)*c0.axis';\n minh = min(h);\n % Correct Bot to correspond to the real bottom\n if j == 0\n Bot = Bot+minh*c0.axis;\n c0.start = Bot;\n h = (P(RegC,:)-Bot)*c0.axis';\n minh = min(h);\n end\n if i+j >= nl\n ht = (Top-c0.start)*c0.axis';\n Top = Top+(max(h)-ht)*c0.axis;\n end\n % Compute the height of the Top:\n ht = (Top-c0.start)*c0.axis';\n Sec = h <= ht & h >= minh; % only points below the Top\n c0.length = ht-minh; % length of the region/cylinder\n % The region for the cylinder fitting:\n reg = RegC(Sec);\n Q0 = P(reg,:);\n\n %% Filter points and estimate radius\n if size(Q0,1) > 20\n [Keep,c0] = surface_coverage_filtering(Q0,c0,0.02,20);\n reg = reg(Keep);\n Q0 = Q0(Keep,:);\n else\n c0.radius = 0.01;\n c0.SurfCov = 0.05;\n c0.mad = 0.01;\n c0.conv = 1;\n c0.rel = 1;\n end\n\n %% Fit cylinder\n if size(Q0,1) > 9\n if i >= nl && t == 0\n c = least_squares_cylinder(Q0,c0);\n elseif i >= nl && t > 0\n h = (Q0-CylTop)*c0.axis';\n I = h >= 0;\n Q = Q0(I,:); % the section\n reg = reg(I);\n n2 = size(Q,1); n1 = nnz(~I);\n if n2 > 9 && n1 > 5\n Q0 = [Q0(~I,:); Q]; % the point cloud for cylinder fitting\n W = [1/3*ones(n2,1); 2/3*ones(n1,1)]; % the weights\n c = least_squares_cylinder(Q0,c0,W,Q);\n else\n c = least_squares_cylinder(Q0,c0);\n end\n elseif t == 0\n top = Points(Ind(i+j-3,1):Ind(i+j-2,2));\n Top = average(P(top,:)); % Top axis point of the region\n ht = (Top-Bot)*c0.axis';\n h = (Q0-Bot)*c0.axis';\n I = h <= ht;\n Q = Q0(I,:); % the section\n reg = reg(I);\n n2 = size(Q,1); n3 = nnz(~I);\n if n2 > 9 && n3 > 5\n Q0 = [Q; Q0(~I,:)]; % the point cloud for cylinder fitting\n W = [2/3*ones(n2,1); 1/3*ones(n3,1)]; % the weights\n c = least_squares_cylinder(Q0,c0,W,Q);\n else\n c = least_squares_cylinder(Q0,c0);\n end\n else\n top = Points(Ind(i+j-3,1):Ind(i+j-2,2));\n Top = average(P(top,:)); % Top axis point of the region\n ht = (Top-CylTop)*c0.axis';\n h = (Q0-CylTop)*c0.axis';\n I1 = h < 0; % the bottom\n I2 = h >= 0 & h <= ht; % the section\n I3 = h > ht; % the top\n Q = Q0(I2,:);\n reg = reg(I2);\n n1 = nnz(I1); n2 = size(Q,1); n3 = nnz(I3);\n if n2 > 9\n Q0 = [Q0(I1,:); Q; Q0(I3,:)];\n W = [1/4*ones(n1,1); 2/4*ones(n2,1); 1/4*ones(n3,1)];\n c = least_squares_cylinder(Q0,c0,W,Q);\n else\n c = c0;\n c.rel = 0;\n end\n end\n\n if c.conv == 0\n c = c0;\n c.rel = 0;\n end\n if c.SurfCov < 0.2\n c.rel = 0;\n end\n else\n c = c0;\n c.rel = 0;\n end\n\n % Collect fit data\n data(j+1,:) = [c.rel c.conv c.SurfCov c.length/c.radius];\n cyls{j+1} = c;\n regs{j+1} = reg;\n j = j+1;\n % If reasonable cylinder fitted, then stop fitting new ones\n % (but always fit at least three cylinders)\n RL = c.length/c.radius; % relative length of the cylinder\n if again && c.rel && c.conv && RL > 2\n if si == 1 && c.SurfCov > 0.7\n again = false;\n elseif si > 1 && c.SurfCov > 0.5\n again = false;\n end\n end\n end\n\n %% Select the best of the fitted cylinders\n % based on maximum surface coverage\n OKfit = data(1:j,1) & data(1:j,2) & data(1:j,4) > 1.5;\n\n J = (1:1:j)';\n t = t+1;\n if any(OKfit)\n J = J(OKfit);\n end\n [~,I] = max(data(J,3)-0.01*data(J,4));\n J = J(I);\n c = cyls{J};\n\n %% Update the indexes of the layers for the next region:\n CylTop = c.start+c.length*c.axis;\n i0 = i0+1;\n bot = Points(Ind(i0,1):Ind(i0+1,2));\n Bot = average(P(bot,:)); % Bottom axis point of the region\n h = (Bot-CylTop)*c.axis';\n i00 = i0;\n while i0+1 < nl && i0 < i00+5 && h < -c.radius/3\n i0 = i0+1;\n bot = Points(Ind(i0,1):Ind(i0+1,2));\n Bot = average(P(bot,:)); % Bottom axis point of the region\n h = (Bot-CylTop)*c.axis';\n end\n i = i0+5;\n i = min(i,nl);\n\n %% If the next section is very short part of the end of the branch\n % then simply increase the length of the current cylinder\n if nl-i0+2 < 4\n reg = Points(Ind(nl-5,1):Ind(nl,2));\n Q0 = P(reg,:);\n ht = (c.start+c.length*c.axis)*c.axis';\n h = Q0*c.axis';\n maxh = max(h);\n if maxh > ht\n c.length = c.length+(maxh-ht);\n end\n i0 = nl;\n end\n Reg{t} = regs{J};\n\n if t == 1\n cyl = c;\n names = fieldnames(cyl);\n n = max(size(names));\n else\n for k = 1:n\n cyl.(names{k}) = [cyl.(names{k}); c.(names{k})];\n end\n end\n\n %% compute cylinder top for the definition of the next section\n CylTop = c.start+c.length*c.axis;\n end\n Reg = Reg(1:t);\n\nelse\n %% Define a region for small segments\n Q0 = P(Points,:);\n if size(Q0,1) > 10\n %% Define the direction\n bot = Points(Ind(1,1):Ind(1,2));\n Bot = average(P(bot,:));\n top = Points(Ind(nl,1):Ind(nl,2));\n Top = average(P(top,:));\n Axis = Top-Bot;\n c0.axis = Axis/norm(Axis);\n h = Q0*c0.axis';\n c0.length = max(h)-min(h);\n hpoint = Bot*c0.axis';\n c0.start = Bot-(hpoint-min(h))*c0.axis;\n\n %% Define other outputs\n [Keep,c0] = surface_coverage_filtering(Q0,c0,0.02,20);\n Reg = cell(1,1);\n Reg{1} = Points(Keep);\n Q0 = Q0(Keep,:);\n cyl = least_squares_cylinder(Q0,c0);\n if ~cyl.conv || ~cyl.rel\n cyl = c0;\n end\n t = 1;\n else\n cyl = 0;\n t = 0;\n end\nend\n% Define Reg as coordinates\nfor i = 1:t\n Reg{i} = P(Reg{i},:);\nend\nReg = Reg(1:t);\n% End of function\nend\n\n\nfunction [PC,cyl,added] = parent_cylinder(SPar,SChi,CiS,cylinder,cyl,si)\n\n% Finds the parent cylinder from the possible parent segment.\n% Does this by checking if the axis of the cylinder, if continued, will\n% cross the nearby cylinders in the parent segment.\n% Adjust the cylinder so that it starts from the surface of its parent.\n\nrad = cyl.radius;\nlen = cyl.length;\nsta = cyl.start;\naxe = cyl.axis;\n\n% PC Parent cylinder\nnc = numel(rad);\nadded = false;\nif SPar(si) > 0 % parent segment exists, find the parent cylinder\n s = SPar(si);\n PC = CiS{s}; % the cylinders in the parent segment\n % select the closest cylinders for closer examination\n if length(PC) > 1\n D = mat_vec_subtraction(-cylinder.start(PC,:),-sta(1,:));\n d = sum(D.*D,2);\n [~,I] = sort(d);\n if length(PC) > 3\n I = I(1:4);\n end\n pc = PC(I);\n ParentFound = false;\n elseif length(PC) == 1\n ParentFound = true;\n else\n PC = zeros(0,1);\n ParentFound = true;\n end\n\n %% Check possible crossing points\n if ~ParentFound\n pc0 = pc;\n n = length(pc);\n % Calculate the possible crossing points of the cylinder axis, when\n % extended, on the surfaces of the parent candidate cylinders\n x = zeros(n,2); % how much the starting point has to move to cross\n h = zeros(n,2); % the crossing point height in the parent\n Axe = cylinder.axis(pc,:);\n Sta = cylinder.start(pc,:);\n for j = 1:n\n % Crossing points solved from a quadratic equation\n A = axe(1,:)-(axe(1,:)*Axe(j,:)')*Axe(j,:);\n B = sta(1,:)-Sta(j,:)-(sta(1,:)*Axe(j,:)')*Axe(j,:)...\n +(Sta(j,:)*Axe(j,:)')*Axe(j,:);\n e = A*A';\n f = 2*A*B';\n g = B*B'-cylinder.radius(pc(j))^2;\n di = sqrt(f^2 - 4*e*g); % the discriminant\n s1 = (-f + di)/(2*e);\n % how much the starting point must be moved to cross:\n s2 = (-f - di)/(2*e);\n if isreal(s1) %% cylinders can cross\n % the heights of the crossing points\n x(j,:) = [s1 s2];\n h(j,1) = sta(1,:)*Axe(j,:)'+x(j,1)*axe(1,:)*Axe(j,:)'-...\n Sta(j,:)*Axe(j,:)';\n h(j,2) = sta(1,:)*Axe(j,:)'+x(j,2)*axe(1,:)*Axe(j,:)'-...\n Sta(j,:)*Axe(j,:)';\n end\n end\n\n %% Extend to crossing point in the (extended) parent\n I = x(:,1) ~= 0; % Select only candidates with crossing points\n pc = pc0(I); x = x(I,:); h = h(I,:);\n j = 1; n = nnz(I);\n X = zeros(n,3); %\n Len = cylinder.length(pc);\n while j <= n && ~ParentFound\n if x(j,1) > 0 && x(j,2) < 0\n % sp inside the parent and crosses its surface\n if h(j,1) >= 0 && h(j,1) <= Len(j) && len(1)-x(j,1) > 0\n PC = pc(j);\n sta(1,:) = sta(1,:)+x(j,1)*axe(1,:);\n len(1) = len(1)-x(j,1);\n ParentFound = true;\n elseif len(1)-x(j,1) > 0\n if h(j,1) < 0\n X(j,:) = [x(j,1) abs(h(j,1)) 0];\n else\n X(j,:) = [x(j,1) h(j,1)-Len(j) 0];\n end\n else\n X(j,:) = [x(j,1) h(j,1) 1];\n end\n elseif x(j,1) < 0 && x(j,2) > 0 && len(1)-x(j,2) > 0\n % sp inside the parent and crosses its surface\n if h(j,2) >= 0 && h(j,2) <= Len(j) && len(1)-x(j,2) > 0\n PC = pc(j);\n sta(1,:) = sta(1,:)+x(j,2)*axe(1,:);\n len(1) = len(1)-x(j,2);\n ParentFound = true;\n elseif len(1)-x(j,2) > 0\n if h(j,2) < 0\n X(j,:) = [x(j,2) abs(h(j,2)) 0];\n else\n X(j,:) = [x(j,2) h(j,2)-Len(j) 0];\n end\n else\n X(j,:) = [x(j,2) h(j,2) 1];\n end\n elseif x(j,1) < 0 && x(j,2) < 0 && x(j,2) < x(j,1) && len(1)-x(j,1) > 0\n % sp outside the parent and crosses its surface when extended\n % backwards\n if h(j,1) >= 0 && h(j,1) <= Len(j) && len(1)-x(j,1) > 0\n PC = pc(j);\n sta(1,:) = sta(1,:)+x(j,1)*axe(1,:);\n len(1) = len(1)-x(j,1);\n ParentFound = true;\n elseif len(1)-x(j,1) > 0\n if h(j,1) < 0\n X(j,:) = [x(j,1) abs(h(j,1)) 0];\n else\n X(j,:) = [x(j,1) h(j,1)-Len(j) 0];\n end\n else\n X(j,:) = [x(j,1) h(j,1) 1];\n end\n elseif x(j,1) < 0 && x(j,2) < 0 && x(j,2) > x(j,1) && len(1)-x(j,2) > 0\n % sp outside the parent and crosses its surface when extended\n % backwards\n if h(j,2) >= 0 && h(j,2) <= Len(j) && len(1)-x(j,2) > 0\n PC = pc(j);\n sta(1,:) = sta(1,:)+x(j,2)*axe(1,:);\n len(1) = len(1)-x(j,2);\n ParentFound = true;\n elseif len(1)-x(j,2) > 0\n if h(j,2) < 0\n X(j,:) = [x(j,2) abs(h(j,2)) 0];\n else\n X(j,:) = [x(j,2) h(j,2)-Len(j) 0];\n end\n else\n X(j,:) = [x(j,2) h(j,2) 1];\n end\n elseif x(j,1) > 0 && x(j,2) > 0 && x(j,2) < x(j,1) && len(1)-x(j,1) > 0\n % sp outside the parent but crosses its surface when extended forward\n if h(j,1) >= 0 && h(j,1) <= Len(j) && len(1)-x(j,1) > 0\n PC = pc(j);\n sta(1,:) = sta(1,:)+x(j,1)*axe(1,:);\n len(1) = len(1)-x(j,1);\n ParentFound = true;\n elseif len(1)-x(j,1) > 0\n if h(j,1) < 0\n X(j,:) = [x(j,1) abs(h(j,1)) 0];\n else\n X(j,:) = [x(j,1) h(j,1)-Len(j) 0];\n end\n else\n X(j,:) = [x(j,1) h(j,1) 1];\n end\n elseif x(j,1) > 0 && x(j,2) > 0 && x(j,2) > x(j,1) && len(1)-x(j,2) > 0\n % sp outside the parent and crosses its surface when extended forward\n if h(j,2) >= 0 && h(j,2) <= Len(j) && len(1)-x(j,2) > 0\n PC = pc(j);\n sta(1,:) = sta(1,:)+x(j,2)*axe(1,:);\n len(1) = len(1)-x(j,2);\n ParentFound = true;\n elseif len(1)-x(j,2) > 0\n if h(j,1) < 0\n X(j,:) = [x(j,2) abs(h(j,2)) 0];\n else\n X(j,:) = [x(j,2) h(j,2)-Len(j) 0];\n end\n else\n X(j,:) = [x(j,2) h(j,2) 1];\n end\n end\n j = j+1;\n end\n\n if ~ParentFound && n > 0\n [H,I] = min(X(:,2));\n X = X(I,:);\n if X(3) == 0 && H < 0.1*Len(I)\n PC = pc(I);\n sta(1,:) = sta(1,:)+X(1)*axe(1,:);\n len(1) = len(1)-X(1);\n ParentFound = true;\n else\n PC = pc(I);\n\n if nc > 1 && X(1) <= rad(1) && abs(X(2)) <= 1.25*cylinder.length(PC)\n % Remove the first cylinder and adjust the second\n S = sta(1,:)+X(1)*axe(1,:);\n V = sta(2,:)+len(2)*axe(2,:)-S;\n len(2) = norm(V); len = len(2:nc);\n axe(2,:) = V/norm(V); axe = axe(2:nc,:);\n sta(2,:) = S; sta = sta(2:nc,:);\n rad = rad(2:nc);\n cyl.mad = cyl.mad(2:nc);\n cyl.SurfCov = cyl.SurfCov(2:nc);\n nc = nc-1;\n ParentFound = true;\n elseif nc > 1\n % Remove the first cylinder\n sta = sta(2:nc,:); len = len(2:nc);\n axe = axe(2:nc,:); rad = rad(2:nc);\n cyl.mad = cyl.mad(2:nc);\n cyl.SurfCov = cyl.SurfCov(2:nc);\n nc = nc-1;\n elseif isempty(SChi{si})\n % Remove the cylinder\n nc = 0;\n PC = zeros(0,1);\n ParentFound = true;\n rad = zeros(0,1);\n elseif X(1) <= rad(1) && abs(X(2)) <= 1.5*cylinder.length(PC)\n % Adjust the cylinder\n sta(1,:) = sta(1,:)+X(1)*axe(1,:);\n len(1) = abs(X(1));\n ParentFound = true;\n end\n end\n end\n\n if ~ParentFound\n % The parent is the cylinder in the parent segment whose axis\n % line is the closest to the axis line of the first cylinder\n % Or the parent cylinder is the one whose base, when connected\n % to the first cylinder is the most parallel.\n % Add new cylinder\n pc = pc0;\n\n [Dist,~,DistOnLines] = distances_between_lines(...\n sta(1,:),axe(1,:),cylinder.start(pc,:),cylinder.axis(pc,:));\n\n I = DistOnLines >= 0;\n J = DistOnLines <= cylinder.length(pc);\n I = I&J;\n if ~any(I)\n I = DistOnLines >= -0.2*cylinder.length(pc);\n J = DistOnLines <= 1.2*cylinder.length(pc);\n I = I&J;\n end\n if any(I)\n pc = pc(I); Dist = Dist(I); DistOnLines = DistOnLines(I);\n [~,I] = min(Dist);\n DistOnLines = DistOnLines(I); PC = pc(I);\n Q = cylinder.start(PC,:)+DistOnLines*cylinder.axis(PC,:);\n V = sta(1,:)-Q; L = norm(V); V = V/L;\n a = acos(V*cylinder.axis(PC,:)');\n h = sin(a)*L;\n S = Q+cylinder.radius(PC)/h*L*V;\n L = (h-cylinder.radius(PC))/h*L;\n if L > 0.01 && L/len(1) > 0.2\n nc = nc+1;\n sta = [S; sta]; rad = [rad(1); rad];\n axe = [V; axe]; len = [L; len];\n cyl.mad = [cyl.mad(1); cyl.mad];\n cyl.SurfCov = [cyl.SurfCov(1); cyl.SurfCov];\n cyl.rel = [cyl.rel(1); cyl.rel];\n cyl.conv = [cyl.conv(1); cyl.conv];\n added = true;\n end\n else\n V = -mat_vec_subtraction(cylinder.start(pc,:),sta(1,:));\n L0 = sqrt(sum(V.*V,2));\n V = [V(:,1)./L0 V(:,2)./L0 V(:,3)./L0];\n A = V*axe(1,:)';\n [A,I] = max(A);\n L1 = L0(I); PC = pc(I); V = V(I,:);\n a = acos(V*cylinder.axis(PC,:)');\n h = sin(a)*L1;\n S = cylinder.start(PC,:)+cylinder.radius(PC)/h*L1*V;\n L = (h-cylinder.radius(PC))/h*L1;\n if L > 0.01 && L/len(1) > 0.2\n nc = nc+1;\n sta = [S; sta]; rad = [rad(1); rad];\n axe = [V; axe]; len = [L; len];\n cyl.mad = [cyl.mad(1); cyl.mad];\n cyl.SurfCov = [cyl.SurfCov(1); cyl.SurfCov];\n cyl.rel = [cyl.rel(1); cyl.rel];\n cyl.conv = [cyl.conv(1); cyl.conv];\n added = true;\n end\n end\n end\n end\nelse\n % no parent segment exists\n PC = zeros(0,1);\nend\n\n% define the output\ncyl.radius = rad(1:nc); cyl.length = len(1:nc,:);\ncyl.start = sta(1:nc,:); cyl.axis = axe(1:nc,:);\ncyl.mad = cyl.mad(1:nc); cyl.SurfCov = cyl.SurfCov(1:nc);\ncyl.conv = cyl.conv(1:nc); cyl.rel = cyl.rel(1:nc);\n% End of function\nend\n\n\nfunction cyl = adjustments(cyl,parcyl,inputs,Regs)\n\nnc = size(cyl.radius,1);\nMod = false(nc,1); % cylinders modified\nSC = cyl.SurfCov;\n\n%% Determine the maximum and the minimum radius\n% The maximum based on parent branch\nif ~isempty(parcyl.radius)\n MaxR = 0.95*parcyl.radius;\n MaxR = max(MaxR,inputs.MinCylRad);\nelse\n % use the maximum from the bottom cylinders\n a = min(3,nc);\n MaxR = 1.25*max(cyl.radius(1:a));\nend\nMinR = min(cyl.radius(SC > 0.7));\nif ~isempty(MinR) && min(cyl.radius) < MinR/2\n MinR = min(cyl.radius(SC > 0.4));\nelseif isempty(MinR)\n MinR = min(cyl.radius(SC > 0.4));\n if isempty(MinR)\n MinR = inputs.MinCylRad;\n end\nend\n\n%% Check maximum and minimum radii\nI = cyl.radius < MinR;\ncyl.radius(I) = MinR;\nMod(I) = true;\nif inputs.ParentCor || nc <= 3\n I = (cyl.radius > MaxR & SC < 0.7) | (cyl.radius > 1.2*MaxR);\n cyl.radius(I) = MaxR;\n Mod(I) = true;\n % For short branches modify with more restrictions\n if nc <= 3\n I = (cyl.radius > 0.75*MaxR & SC < 0.7);\n if any(I)\n r = max(SC(I)/0.7.*cyl.radius(I),MinR);\n cyl.radius(I) = r;\n Mod(I) = true;\n end\n end\nend\n\n%% Use taper correction to modify radius of too small and large cylinders\n% Adjust radii if a small SurfCov and high SurfCov in the previous and\n% following cylinders\nfor i = 2:nc-1\n if SC(i) < 0.7 && SC(i-1) >= 0.7 && SC(i+1) >= 0.7\n cyl.radius(i) = 0.5*(cyl.radius(i-1)+cyl.radius(i+1));\n Mod(i) = true;\n end\nend\n\n%% Use taper correction to modify radius of too small and large cylinders\nif inputs.TaperCor\n if max(cyl.radius) < 0.001\n\n %% Adjust radii of thin branches to be linearly decreasing\n if nc > 2\n r = sort(cyl.radius);\n r = r(2:end-1);\n a = 2*mean(r);\n if a > max(r)\n a = min(0.01,max(r));\n end\n b = min(0.5*min(cyl.radius),0.001);\n cyl.radius = linspace(a,b,nc)';\n elseif nc > 1\n r = max(cyl.radius);\n cyl.radius = [r; 0.5*r];\n end\n Mod = true(nc,1);\n\n elseif nc > 4\n %% Parabola adjustment of maximum and minimum\n % Define parabola taper shape as maximum (and minimum) radii for\n % the cylinders with low surface coverage\n branchlen = sum(cyl.length(1:nc)); % branch length\n L = cyl.length/2+[0; cumsum(cyl.length(1:nc-1))];\n Taper = [L; branchlen];\n Taper(:,2) = [1.05*cyl.radius; MinR];\n sc = [SC; 1];\n\n % Least square fitting of parabola to \"Taper\":\n A = [sum(sc.*Taper(:,1).^4) sum(sc.*Taper(:,1).^2); ...\n sum(sc.*Taper(:,1).^2) sum(sc)];\n y = [sum(sc.*Taper(:,2).*Taper(:,1).^2); sum(sc.*Taper(:,2))];\n warning off\n x = A\\y;\n warning on\n x(1) = min(x(1),-0.0001); % tapering from the base to the tip\n Ru = x(1)*L.^2+x(2); % upper bound parabola\n Ru( Ru < MinR ) = MinR;\n if max(Ru) > MaxR\n a = max(Ru);\n Ru = MaxR/a*Ru;\n end\n Rl = 0.75*Ru; % lower bound parabola\n Rl( Rl < MinR ) = MinR;\n\n % Modify radii based on parabola:\n % change values larger than the parabola-values when SC < 70%:\n I = cyl.radius > Ru & SC < 0.7;\n cyl.radius(I) = Ru(I)+(cyl.radius(I)-Ru(I)).*SC(I)/0.7;\n Mod(I) = true;\n % change values larger than the parabola-values when SC > 70% and\n % radius is over 33% larger than the parabola-value:\n I = cyl.radius > 1.333*Ru & SC >= 0.7;\n cyl.radius(I) = Ru(I)+(cyl.radius(I)-Ru(I)).*SC(I);\n Mod(I) = true;\n % change values smaller than the downscaled parabola-values:\n I = (cyl.radius < Rl & SC < 0.7) | (cyl.radius < 0.5*Rl);\n cyl.radius(I) = Rl(I);\n Mod(I) = true;\n\n else\n %% Adjust radii of short branches to be linearly decreasing\n R = cyl.radius;\n if nnz(SC >= 0.7) > 1\n a = max(R(SC >= 0.7));\n b = min(R(SC >= 0.7));\n elseif nnz(SC >= 0.7) == 1\n a = max(R(SC >= 0.7));\n b = min(R);\n else\n a = sum(R.*SC/sum(SC));\n b = min(R);\n end\n Ru = linspace(a,b,nc)';\n I = SC < 0.7 & ~Mod;\n cyl.radius(I) = Ru(I)+(R(I)-Ru(I)).*SC(I)/0.7;\n Mod(I) = true;\n\n end\nend\n\n%% Modify starting points by optimising them for given radius and axis\nnr = size(Regs,1);\nfor i = 1:nc\n if Mod(i)\n if nr == nc\n Reg = Regs{i};\n elseif i > 1\n Reg = Regs{i-1};\n end\n if abs(cyl.radius(i)-cyl.radius0(i)) > 0.005 && ...\n (nr == nc || (nr < nc && i > 1))\n P = Reg-cyl.start(i,:);\n [U,V] = orthonormal_vectors(cyl.axis(i,:));\n P = P*[U V];\n cir = least_squares_circle_centre(P,[0 0],cyl.radius(i));\n if cir.conv && cir.rel\n cyl.start(i,:) = cyl.start(i,:)+cir.point(1)*U'+cir.point(2)*V';\n cyl.mad(i,1) = cir.mad;\n [~,V,h] = distances_to_line(Reg,cyl.axis(i,:),cyl.start(i,:));\n if min(h) < -0.001\n cyl.length(i) = max(h)-min(h);\n cyl.start(i,:) = cyl.start(i,:)+min(h)*cyl.axis(i,:);\n [~,V,h] = distances_to_line(Reg,cyl.axis(i,:),cyl.start(i,:));\n end\n a = max(0.02,0.2*cyl.radius(i));\n nl = ceil(cyl.length(i)/a);\n nl = max(nl,4);\n ns = ceil(2*pi*cyl.radius(i)/a);\n ns = max(ns,10);\n ns = min(ns,36);\n cyl.SurfCov(i,1) = surface_coverage2(...\n cyl.axis(i,:),cyl.length(i),V,h,nl,ns);\n end\n end\n end\nend\n\n%% Continuous branches\n% Make cylinders properly \"continuous\" by moving the starting points\n% Move the starting point to the plane defined by parent cylinder's top\nif nc > 1\n for j = 2:nc\n U = cyl.start(j,:)-cyl.start(j-1,:)-cyl.length(j-1)*cyl.axis(j-1,:);\n if (norm(U) > 0.0001)\n % First define vector V and W which are orthogonal to the\n % cylinder axis N\n N = cyl.axis(j,:)';\n if norm(N) > 0\n [V,W] = orthonormal_vectors(N);\n % Now define the new starting point\n x = [N V W]\\U';\n cyl.start(j,:) = cyl.start(j,:)-x(1)*N';\n if x(1) > 0\n cyl.length(j) = cyl.length(j)+x(1);\n elseif cyl.length(j)+x(1) > 0\n cyl.length(j) = cyl.length(j)+x(1);\n end\n end\n end\n end\nend\n\n%% Connect far away first cylinder to the parent\nif ~isempty(parcyl.radius)\n [d,V,h,B] = distances_to_line(cyl.start(1,:),parcyl.axis,parcyl.start);\n d = d-parcyl.radius;\n if d > 0.001\n taper = cyl.start(1,:);\n E = taper+cyl.length(1)*cyl.axis(1,:);\n V = parcyl.radius*V/norm(V);\n if h >= 0 && h <= parcyl.length\n cyl.start(1,:) = parcyl.start+B+V;\n elseif h < 0\n cyl.start(1,:) = parcyl.start+V;\n else\n cyl.start(1,:) = parcyl.start+parcyl.length*parcyl.axis+V;\n end\n cyl.axis(1,:) = E-cyl.start(1,:);\n cyl.length(1) = norm(cyl.axis(1,:));\n cyl.axis(1,:) = cyl.axis(1,:)/cyl.length(1);\n end\nend\n\n% End of function\nend\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "correct_segments.m", "ext": ".m", "path": "TreeQSM-master/src/main_steps/correct_segments.m", "size": 30167, "source_encoding": "utf_8", "md5": "3e6a16d9d908979779eec4d463d9d735", "text": "% This file is part of TREEQSM.\n%\n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n%\n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n%\n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction segment = correct_segments(P,cover,segment,inputs,RemSmall,ModBases,AddChild)\n\n% ---------------------------------------------------------------------\n% CORRECT_SEGMENTS.M Corrects the given segmentation.\n%\n% Version 2.0.2\n% Latest update 2 May 2022\n%\n% Copyright (C) 2013-2022 Pasi Raumonen\n% ---------------------------------------------------------------------\n\n% First segments are modified by making them as long as possible. Here the\n% stem and 1-st order branches are handled differently as there is also\n% restriction to how \"curved\" they can be in the sense of ratio\n% total_length/base_tip_distance. Then, optionally, small segments that\n% are close to their parent and have no children are removed as unclear\n% (are they part of the parent or real segments?).\n% Then, optionally, the bases of branches are modified by\n% expanding them into parent segment in order to remove ledges from the\n% parent from locations of the branches.\n\n% Inputs:\n% P Point cloud\n% cover Cover sets\n% segment Segments\n% inputs The input structure\n% RemSmall If True, small unclear segments are removed\n% ModBase If True, bases of the segments are modified\n% AddChild If True, the expanded (modified) base is added to the child segment.\n% If AddChild = false and ModBase = true, then the expanded part is\n% removed from both the child and the parent.\n% Outputs:\n% segment Segments\n% ---------------------------------------------------------------------\n\n% Changes from version 2.0.1 to 2.0.2, 2 May 2022:\n% 1) Added \"if ~isempty(SegChildren)... \" statement to the\n% \"modify_topology\" subfunction where next branch is selected based on \n% the increasing branching order to prevent a rare bug \n\n% Changes from version 2.0.0 to 2.0.1, 2 Oct 2019:\n% 1) Main function: added \"if SPar(i,1) > 1\"-statement to ModBase -->\n% NotAddChild\n\nif nargin == 4\n RemSmall = true;\n ModBases = false;\nelseif nargin == 5\n ModBases = false;\nelseif nargin == 6\n AddChild = false;\nend\n\nBal = cover.ball;\nSegs = segment.segments;\nSPar = segment.ParentSegment;\nSChi = segment.ChildSegment;\nCe = P(cover.center,:);\n\n%% Make stem and branches as long as possible\nif RemSmall\n [Segs,SPar,SChi] = modify_topology(P,Ce,Bal,Segs,SPar,SChi,inputs.PatchDiam2Max);\nelse\n [Segs,SPar,SChi] = modify_topology(P,Ce,Bal,Segs,SPar,SChi,inputs.PatchDiam1);\nend\n\n%% Remove small child segments\nif RemSmall\n [Segs,SPar,SChi] = remove_small(Ce,Segs,SPar,SChi);\nend\n\n% Check the consistency of empty vector sizes\nns = size(Segs,1);\nfor i = 1:ns\n if isempty(SChi{i})\n SChi{i} = zeros(0,1,'uint32');\n end\nend\n\nif ModBases\n %% Modify the base of the segments\n ns = size(Segs,1);\n base = cell(200,1);\n if AddChild\n % Add the expanded base to the child and remove it from the parent\n for i = 2:ns\n SegC = Segs{i};\n SegP = Segs{SPar(i,1)};\n [SegP,Base] = modify_parent(P,Bal,Ce,SegP,SegC,SPar(i,2),inputs.PatchDiam1,base);\n Segs{SPar(i,1)} = SegP;\n SegC{1} = Base;\n Segs{i} = SegC;\n end\n else\n % Only remove the expanded base from the parent\n for i = 2:ns\n if SPar(i,1) > 1\n SegC = Segs{i};\n SegP = Segs{SPar(i,1)};\n SegP = modify_parent(P,Bal,Ce,SegP,SegC,SPar(i,2),inputs.PatchDiam2Max,base);\n Segs{SPar(i,1)} = SegP;\n end\n end\n end\nend\nSPar = SPar(:,1);\n\n% Modify the size and type of SChi and Segs, if necessary\nns = size(Segs,1);\nfor i = 1:ns\n C = SChi{i};\n if size(C,2) > size(C,1) && size(C,1) > 0\n SChi{i} = uint32(C');\n elseif size(C,1) == 0 || size(C,2) == 0\n SChi{i} = zeros(0,1,'uint32');\n else\n SChi{i} = uint32(C);\n end\n S = Segs{i};\n for j = 1:size(S,1)\n S{j} = uint32(S{j});\n end\n Segs{i} = S;\nend\nsegment.segments = Segs;\nsegment.ParentSegment = SPar;\nsegment.ChildSegment = SChi;\n\n%% Generate segment data for the points\nnp = size(P,1);\nns = size(Segs,1);\n% Define for each point its segment\nif ns <= 2^16\n SegmentOfPoint = zeros(np,1,'uint16');\nelse\n SegmentOfPoint = zeros(np,1,'uint32');\nend\nfor i = 1:ns\n S = Segs{i};\n S = vertcat(S{:});\n SegmentOfPoint(vertcat(Bal{S})) = i;\nend\nsegment.SegmentOfPoint = SegmentOfPoint;\n% Define the indexes of the segments up to 3rd-order\nC = SChi{1};\nsegment.branch1indexes = C;\nif ~isempty(C)\n C = vertcat(SChi{C});\n segment.branch2indexes = C;\n if ~isempty(C)\n C = vertcat(SChi{C});\n segment.branch3indexes = C;\n else\n segment.branch3indexes = zeros(0,1);\n end\nelse\n segment.branch2indexes = zeros(0,1);\n segment.branch3indexes = zeros(0,1);\nend\n\nend % End of main function\n\n\nfunction StemTop = search_stem_top(P,Ce,Bal,Segs,SPar,dmin)\n\n% Search the stem's top segment such that the resulting stem\n% 1) is one the highest segments (goes to the top of the tree)\n% 2) is horizontally close to the bottom of the stem (goes straigth up)\n% 3) has length close to the distance between its bottom and top (is not too curved)\nnseg = size(Segs,1);\nSegHeight = zeros(nseg,1); % heights of the tips of the segments\nHorDist = zeros(nseg,1); % horizontal distances of the tips from stem's center\ns = Segs{1}{1};\nStemCen = average(Ce(s,:)); % center (x,y) of stem base\nfor i = 1:nseg\n S = Segs{i}{end}(1);\n SegHeight(i) = Ce(S,3);\n HorDist(i) = norm(Ce(S,1:2)-StemCen(1:2));\nend\nTop = max(SegHeight); % the height of the highest tip\nHeiDist = Top-SegHeight; % the height difference to \"Top\"\nDist = sqrt((HorDist.^2+HeiDist.^2)); % Distance to the top\nLenDisRatio = 2;\nSearchDist = 0.5;\nMaxLenDisRatio = 1.05; % the maximum acceptable length/distance ratio of segments\nSubSegs = zeros(100,1); % Segments to be combined to form the stem\nwhile LenDisRatio > MaxLenDisRatio\n StemTops = (1:1:nseg)';\n I = Dist < SearchDist; % only segments with distance to the top < 0.5m\n while ~any(I)\n SearchDist = SearchDist+0.5;\n I = Dist < SearchDist;\n end\n StemTops = StemTops(I);\n\n % Define i-1 alternative stems from StemTops\n n = length(StemTops);\n Stems = cell(n,1);\n Segment = cell(3000,1);\n for j = 1:n\n Seg = Segs{1};\n spar = SPar;\n if StemTops(j) ~= 1\n % Tip point was not in the current segment, modify segments\n SubSegs(1) = StemTops(j);\n nsegs = 1;\n segment = StemTops(j);\n while segment ~= 1\n segment = SPar(segment,1);\n nsegs = nsegs+1;\n SubSegs(nsegs) = segment;\n end\n % Modify stem\n a = size(Seg,1);\n Segment(1:a) = Seg;\n a = a+1;\n for i = 1:nsegs-2\n I = SubSegs(nsegs-i); % segment to be combined to the first segment\n J = SubSegs(nsegs-i-1); % above segment's child to be combined next\n SP = spar(I,2); % layer index of the child in the parent\n SegC = Segs{I};\n sp = spar(J,2); % layer index of the child's child in the child\n if SP >= a-2 % Use the whole parent\n Segment(a:a+sp-1) = SegC(1:sp);\n spar(J,2) = a+sp-1;\n a = a+sp;\n else % Use only bottom part of the parent\n Segment(SP+1:SP+sp) = SegC(1:sp);\n a = SP+sp+1;\n spar(J,2) = SP+sp;\n end\n SubSegs(nsegs-i) = 1;\n end\n\n % Combine the last segment to the branch\n I = SubSegs(1);\n SP = spar(I,2);\n SegC = Segs{I};\n nc = size(SegC,1);\n if SP >= a-2 % Use the whole parent\n Segment(a:a+nc-1) = SegC;\n a = a+nc-1;\n else % divide the parent segment into two parts\n Segment(SP+1:SP+nc) = SegC;\n a = SP+nc;\n end\n Stems{j,1} = Segment(1:a);\n else\n Stems{j,1} = Seg;\n end\n\n end\n\n % Calculate the lengths of the candidate stems\n N = ceil(0.5/dmin/1.4); % number of layers used for linear length approximation\n Lengths = zeros(n,1);\n Heights = zeros(n,1);\n for i = 1:n\n Seg = Stems{i,1};\n ns = size(Seg,1);\n if ceil(ns/N) > floor(ns/N)\n m = ceil(ns/N);\n else\n m = ceil(ns/N)+1;\n end\n Nodes = zeros(m,3);\n for j = 1:m\n I = (j-1)*N+1;\n if I > ns\n I = ns;\n end\n S = Seg{I};\n if length(S) > 1\n Nodes(j,:) = average(Ce(S,:));\n else\n S = Bal{S};\n Nodes(j,:) = average(P(S,:));\n end\n end\n V = Nodes(2:end,:)-Nodes(1:end-1,:);\n Lengths(i) = sum(sqrt(sum(V.*V,2)));\n V = Nodes(end,:)-Nodes(1,:);\n Heights(i) = norm(V);\n end\n\n LenDisRatio = Lengths./Heights;\n [LenDisRatio,I] = min(LenDisRatio);\n StemTop = StemTops(I);\n SearchDist = SearchDist+1;\n if SearchDist > 3\n MaxLenDisRatio = 1.1;\n if SearchDist > 5\n MaxLenDisRatio = 1.15;\n if SearchDist > 7\n MaxLenDisRatio = 5;\n end\n end\n end\nend\n\nend % End subfunction\n\n\nfunction BranchTop = search_branch_top(P,Ce,Bal,Segs,SPar,SChi,dmin,BI)\n\n% Search the end segment for branch such that the resulting branch\n% 1) has length close to the distance between its bottom and top\n% 2) has distance close to the farthest segment end\n\n% Inputs\n% BI Branch (segment) index\n\n% Outputs\n% BranchTop The index of the segment forming the tip of the branch\n% originating from the base of the given segment BI\n\n% Define all the sub-segments of the given segments\nns = size(Segs,1);\nSegments = zeros(ns,1); % the given segment and its sub-segments\nSegments(1) = BI;\nt = 2;\nC = SChi{BI};\nwhile ~isempty(C)\n n = length(C);\n Segments(t:t+n-1) = C;\n C = vertcat(SChi{C});\n t = t+n;\nend\nif t > 2\n t = t-n;\nend\nSegments = Segments(1:t);\n\n% Determine linear distances from the segment tips to the base of the given\n% segment\nLinearDist = zeros(t,1); % linear distances from the\nSeg = Segs{Segments(1)};\nBranchBase = average(Ce(Seg{1},:)); % center of branch's base\nfor i = 1:t\n Seg = Segs{Segments(i)};\n C = average(Ce(Seg{end},:)); % tip\n LinearDist(i) = norm(C-BranchBase);\nend\nLinearDist = LinearDist(1:t);\n\n% Sort the segments according their linear distance, from longest to\n% shortest\n[LinearDist,I] = sort(LinearDist,'descend');\nSegments = Segments(I);\n\n% Define alternative branches from Segments\nBranches = cell(t,1); % the alternative segments as cell layers\nSubSegs = zeros(100,1); % Segments to be combined\nSegment = cell(3000,1);\nfor j = 1:t\n Seg = Segs{BI};\n spar = SPar;\n if Segments(j) ~= BI\n % Tip point was not in the current segment, modify segments\n SubSegs(1) = Segments(j);\n k = 1;\n S = Segments(j);\n while S ~= BI\n S = SPar(S,1);\n k = k+1;\n SubSegs(k) = S;\n end\n % Modify branch\n a = size(Seg,1);\n Segment(1:a) = Seg;\n a = a+1;\n for i = 1:k-2\n I = SubSegs(k-i); % segment to be combined to the first segment\n J = SubSegs(k-i-1); % above segment's child to be combined next\n SP = spar(I,2); % layer index of the child in the parent\n SegC = Segs{I};\n sp = spar(J,2); % layer index of the child's child in the child\n if SP >= a-2 % Use the whole parent\n Segment(a:a+sp-1) = SegC(1:sp);\n spar(J,2) = a+sp-1;\n a = a+sp;\n else % Use only bottom part of the parent\n Segment(SP+1:SP+sp) = SegC(1:sp);\n a = SP+sp+1;\n spar(J,2) = SP+sp;\n end\n SubSegs(k-i) = 1;\n end\n\n % Combine the last segment to the branch\n I = SubSegs(1);\n SP = spar(I,2);\n SegC = Segs{I};\n L = size(SegC,1);\n if SP >= a-2 % Use the whole parent\n Segment(a:a+L-1) = SegC;\n a = a+L-1;\n else % divide the parent segment into two parts\n Segment(SP+1:SP+L) = SegC;\n a = SP+L;\n end\n Branches{j,1} = Segment(1:a);\n else\n Branches{j,1} = Seg;\n end\n\nend\n\n% Calculate the lengths of the candidate branches. Stop, if possible, when\n% the ratio length/linear distance is less 1.2 (branch is quite straight)\nN = ceil(0.25/dmin/1.4); % number of layers used for linear length approximation\ni = 1; % running index for while loop\nContinue = true; % continue while loop as long as \"Continue\" is true\nLengths = zeros(t,1); % linear lengths of the branches\nwhile i <= t && Continue\n % Approximate the length with line segments connecting nodes along\n % the segment\n Seg = Branches{i,1};\n ns = size(Seg,1);\n if ceil(ns/N) > floor(ns/N)\n m = ceil(ns/N);\n else\n m = ceil(ns/N)+1;\n end\n Nodes = zeros(m,3);\n for j = 1:m\n I = (j-1)*N+1;\n if I > ns\n I = ns;\n end\n S = Seg{I};\n if length(S) > 1\n Nodes(j,:) = average(Ce(S,:));\n else\n S = Bal{S};\n Nodes(j,:) = average(P(S,:));\n end\n end\n V = Nodes(2:end,:)-Nodes(1:end-1,:); % line segments\n Lengths(i) = sum(sqrt(sum(V.*V,2)));\n\n % Continue as long as the length is less than 20% longer than the linear dist.\n % and the linear distance is over 75% of the maximum\n if Lengths(i)/LinearDist(i) < 1.20 && LinearDist(i) > 0.75*LinearDist(1)\n Continue = false;\n BranchTop = Segments(i);\n end\n i = i+1;\nend\n\n% If no suitable segment was found, try first with less strict conditions,\n% and if that does not work, then select the one with the largest linear distance\nif Continue\n L = Lengths./LinearDist;\n i = 1;\n while i <= t && L(i) > 1.4 && LinearDist(i) > 0.75*LinearDist(1)\n i = i+1;\n end\n if i <= t\n BranchTop = Segments(i);\n else\n BranchTop = Segments(1);\n end\nend\n\nend % End subfunction\n\n\nfunction [Segs,SPar,SChi] = modify_topology(P,Ce,Bal,Segs,SPar,SChi,dmin)\n\n% Make stem and branches as long as possible\nns = size(Segs,1);\nFal = false(2*ns,1);\nnc = ceil(ns/5);\nSubSegments = zeros(nc,1); % for searching sub-segments\nSegInd = 1; % the segment under modification\nUnMod = true(ns,1);\nUnMod(SegInd) = false;\nBranchOrder = 0;\nChildSegInd = 1; % index of the child segments under modification\nwhile any(UnMod)\n ChildSegs = SChi{SegInd}; % child segments of the segment under modification\n if size(ChildSegs,1) < size(ChildSegs,2)\n ChildSegs = ChildSegs';\n SChi{SegInd} = ChildSegs;\n end\n\n if ~isempty(Segs(SegInd)) && ~isempty(ChildSegs)\n\n if SegInd > 1 && BranchOrder > 1 % 2nd-order and higher branches\n % Search the tip of the sub-branches with biggest linear\n % distance from the current branch's base\n SubSegments(1) = SegInd;\n NSubSegs = 2;\n while ~isempty(ChildSegs)\n n = length(ChildSegs);\n SubSegments(NSubSegs:NSubSegs+n-1) = ChildSegs;\n ChildSegs = vertcat(SChi{ChildSegs});\n NSubSegs = NSubSegs+n;\n end\n if NSubSegs > 2\n NSubSegs = NSubSegs-n;\n end\n\n % Find tip-points\n Top = zeros(NSubSegs,3);\n for i = 1:NSubSegs\n Top(i,:) = Ce(Segs{SubSegments(i)}{end}(1),:);\n end\n\n % Define bottom of the branch\n BotLayer = Segs{SegInd}{1};\n Bottom = average(Ce(BotLayer,:));\n\n % End segment is the segment whose tip has greatest distance to\n % the bottom of the branch\n V = mat_vec_subtraction(Top,Bottom);\n d = sum(V.*V,2);\n [~,I] = max(d);\n TipSeg = SubSegments(I(1));\n\n elseif SegInd > 1 && BranchOrder <= 1 % first order branches\n\n TipSeg = search_branch_top(P,Ce,Bal,Segs,SPar,SChi,dmin,SegInd);\n\n else % Stem\n\n TipSeg = search_stem_top(P,Ce,Bal,Segs,SPar,dmin);\n\n end\n\n if TipSeg ~= SegInd\n % Tip point was not in the current segment, modify segments\n SubSegments(1) = TipSeg;\n NSubSegs = 1;\n while TipSeg ~= SegInd\n TipSeg = SPar(TipSeg,1);\n NSubSegs = NSubSegs+1;\n SubSegments(NSubSegs) = TipSeg;\n end\n\n % refine branch\n for i = 1:NSubSegs-2\n I = SubSegments(NSubSegs-i); % segment to be combined to the first segment\n J = SubSegments(NSubSegs-i-1); % above segment's child to be combined next\n SP = SPar(I,2); % layer index of the child in the parent\n SegP = Segs{SegInd};\n SegC = Segs{I};\n N = size(SegP,1);\n sp = SPar(J,2); % layer index of the child's child in the child\n if SP >= N-2 % Use the whole parent\n Segs{SegInd} = [SegP; SegC(1:sp)];\n if sp < size(SegC,1) % use only part of the child segment\n Segs{I} = SegC(sp+1:end);\n SPar(I,2) = N+sp;\n\n ChildSegs = SChi{I};\n K = SPar(ChildSegs,2) <= sp;\n c = ChildSegs(~K);\n SChi{I} = c;\n SPar(c,2) = SPar(c,2)-sp;\n ChildSegs = ChildSegs(K);\n SChi{SegInd} = [SChi{SegInd}; ChildSegs];\n SPar(ChildSegs,1) = SegInd;\n SPar(ChildSegs,2) = N+SPar(ChildSegs,2);\n\n else % use the whole child segment\n Segs{I} = cell(0,1);\n SPar(I,1) = 0;\n UnMod(I) = false;\n\n ChildSegs = SChi{I};\n SChi{I} = zeros(0,1);\n c = set_difference(SChi{SegInd},I,Fal);\n SChi{SegInd} = [c; ChildSegs];\n SPar(ChildSegs,1) = SegInd;\n SPar(ChildSegs,2) = N+SPar(ChildSegs,2);\n\n end\n\n SubSegments(NSubSegs-i) = SegInd;\n else % divide the parent segment into two parts\n ns = ns+1;\n Segs{ns} = SegP(SP+1:end); % the top part of the parent forms a new segment\n SPar(ns,1) = SegInd;\n SPar(ns,2) = SP;\n UnMod(ns) = true;\n\n Segs{SegInd} = [SegP(1:SP); SegC(1:sp)];\n\n ChildSegs = SChi{SegInd};\n if size(ChildSegs,1) < size(ChildSegs,2)\n ChildSegs = ChildSegs';\n end\n K = SPar(ChildSegs,2) > SP;\n SChi{SegInd} = ChildSegs(~K);\n ChildSegs = ChildSegs(K);\n SChi{ns} = ChildSegs;\n SPar(ChildSegs,1) = ns;\n SPar(ChildSegs,2) = SPar(ChildSegs,2)-SP;\n SChi{SegInd} = [SChi{SegInd}; ns];\n if sp < size(SegC,1) % use only part of the child segment\n Segs{I} = SegC(sp+1:end);\n SPar(I,2) = SP+sp;\n\n ChildSegs = SChi{I};\n K = SPar(ChildSegs,2) <= sp;\n SChi{I} = ChildSegs(~K);\n SPar(ChildSegs(~K),2) = SPar(ChildSegs(~K),2)-sp;\n ChildSegs = ChildSegs(K);\n SChi{SegInd} = [SChi{SegInd}; ChildSegs];\n SPar(ChildSegs,1) = SegInd;\n SPar(ChildSegs,2) = SP+SPar(ChildSegs,2);\n\n else % use the whole child segment\n Segs{I} = cell(0,1);\n SPar(I,1) = 0;\n UnMod(I) = false;\n\n ChildSegs = SChi{I};\n c = set_difference(SChi{SegInd},I,Fal);\n SChi{SegInd} = [c; ChildSegs];\n SPar(ChildSegs,1) = SegInd;\n SPar(ChildSegs,2) = SP+SPar(ChildSegs,2);\n\n end\n SubSegments(NSubSegs-i) = SegInd;\n end\n\n end\n\n % Combine the last segment to the branch\n I = SubSegments(1);\n SP = SPar(I,2);\n SegP = Segs{SegInd};\n SegC = Segs{I};\n N = size(SegP,1);\n if SP >= N-3 % Use the whole parent\n Segs{SegInd} = [SegP; SegC];\n Segs{I} = cell(0);\n SPar(I,1) = 0;\n UnMod(I) = false;\n\n ChildSegs = SChi{I};\n if size(ChildSegs,1) < size(ChildSegs,2)\n ChildSegs = ChildSegs';\n end\n c = set_difference(SChi{SegInd},I,Fal);\n SChi{SegInd} = [c; ChildSegs];\n SPar(ChildSegs,1) = SegInd;\n SPar(ChildSegs,2) = N+SPar(ChildSegs,2);\n\n else % divide the parent segment into two parts\n ns = ns+1;\n Segs{ns} = SegP(SP+1:end);\n SPar(ns,:) = [SegInd SP];\n Segs{SegInd} = [SegP(1:SP); SegC];\n Segs{I} = cell(0);\n SPar(I,1) = 0;\n UnMod(ns) = true;\n UnMod(I) = false;\n\n ChildSegs = SChi{SegInd};\n K = SPar(ChildSegs,2) > SP;\n SChi{SegInd} = [ChildSegs(~K); ns];\n ChildSegs = ChildSegs(K);\n SChi{ns} = ChildSegs;\n SPar(ChildSegs,1) = ns;\n SPar(ChildSegs,2) = SPar(ChildSegs,2)-SP;\n\n ChildSegs = SChi{I};\n c = set_difference(SChi{SegInd},I,Fal);\n SChi{SegInd} = [c; ChildSegs];\n SPar(ChildSegs,1) = SegInd;\n SPar(ChildSegs,2) = SP+SPar(ChildSegs,2);\n\n end\n\n end\n UnMod(SegInd) = false;\n else\n UnMod(SegInd) = false;\n end\n\n % Select the next branch, use increasing branching order\n if BranchOrder > 0 && any(UnMod(SegChildren))\n ChildSegInd = ChildSegInd+1;\n SegInd = SegChildren(ChildSegInd);\n elseif BranchOrder == 0\n BranchOrder = BranchOrder+1;\n SegChildren = SChi{1};\n if ~isempty(SegChildren)\n SegInd = SegChildren(1);\n else\n UnMod = false;\n end\n else\n BranchOrder = BranchOrder+1;\n i = 1;\n SegChildren = SChi{1};\n while i < BranchOrder && ~isempty(SegChildren)\n i = i+1;\n L = cellfun('length',SChi(SegChildren));\n Keep = L > 0;\n SegChildren = SegChildren(Keep);\n SegChildren = vertcat(SChi{SegChildren});\n end\n I = UnMod(SegChildren);\n if any(I)\n SegChildren = SegChildren(I);\n SegInd = SegChildren(1);\n ChildSegInd = 1;\n end\n end\nend\n\n% Modify indexes by removing empty segments\nEmpty = true(ns,1);\nfor i = 1:ns\n if isempty(Segs{i})\n Empty(i) = false;\n end\nend\nSegs = Segs(Empty);\nInd = (1:1:ns)';\nn = nnz(Empty);\nI = (1:1:n)';\nInd(Empty) = I;\nSPar = SPar(Empty,:);\nJ = SPar(:,1) > 0;\nSPar(J,1) = Ind(SPar(J,1));\nfor i = 1:ns\n if Empty(i)\n ChildSegs = SChi{i};\n if ~isempty(ChildSegs)\n ChildSegs = Ind(ChildSegs);\n SChi{i} = ChildSegs;\n end\n end\nend\nSChi = SChi(Empty);\nns = n;\n\n% Modify SChi\nfor i = 1:ns\n ChildSegs = SChi{i};\n if size(ChildSegs,2) > size(ChildSegs,1) && size(ChildSegs,1) > 0\n SChi{i} = ChildSegs';\n elseif size(ChildSegs,1) == 0 || size(ChildSegs,2) == 0\n SChi{i} = zeros(0,1);\n end\n Seg = Segs{i};\n n = max(size(Seg));\n for j = 1:n\n ChildSegs = Seg{j};\n if size(ChildSegs,2) > size(ChildSegs,1) && size(ChildSegs,1) > 0\n Seg{j} = ChildSegs';\n elseif size(ChildSegs,1) == 0 || size(ChildSegs,2) == 0\n Seg{j} = zeros(0,1);\n end\n end\n Segs{i} = Seg;\nend\nend % End of function\n\n\nfunction [Segs,SPar,SChi] = remove_small(Ce,Segs,SPar,SChi)\n\n% Removes small child segments\n\n% computes and estimate for stem radius at the base\nSegment = Segs{1}; % current or parent segment\nns = size(Segment,1); % layers in the parent\nif ns > 10\n EndL = 10; % ending layer index in parent\nelse\n EndL = ns;\nend\nEnd = average(Ce(Segment{EndL},:)); % Center of end layer\nStart = average(Ce(Segment{1},:)); % Center of starting layer\nV = End-Start; % Vector between starting and ending centers\nV = V/norm(V); % normalize\nSets = vertcat(Segment{1:EndL});\nMaxRad = max(distances_to_line(Ce(Sets,:),V,Start));\n\nNseg = size(Segs,1);\nFal = false(Nseg,1);\nKeep = true(Nseg,1);\nSets = zeros(2000,1);\nfor i = 1:Nseg\n if Keep(i)\n ChildSegs = SChi{i}; % child segments\n if ~isempty(ChildSegs) % child segments exists\n n = length(ChildSegs); % number of children\n Segment = Segs{i}; % current or parent segment\n ns = size(Segment,1); % layers in the parent\n for j = 1:n % check each child separately\n nl = SPar(ChildSegs(j),2); % the index of the layer in the parent the child begins\n if nl > 10\n StartL = nl-10; % starting layer index in parent\n else\n StartL = 1;\n end\n if ns-nl > 10\n EndL = nl+10; % end layer index in parent\n else\n EndL = ns;\n end\n End = average(Ce(Segment{EndL},:));\n Start = average(Ce(Segment{StartL},:));\n V = End-Start; % Vector between starting and ending centers\n V = V/norm(V); % normalize\n\n % cover sets of the child\n ChildSets = Segs{ChildSegs(j)};\n NL = size(ChildSets,1);\n a = 1;\n for k = 1:NL\n S = ChildSets{k};\n Sets(a:a+length(S)-1) = S;\n a = a+length(S);\n end\n ChildSets = Sets(1:a-1);\n\n % maximum distance in child\n distChild = max(distances_to_line(Ce(ChildSets,:),V,Start));\n\n if distChild < MaxRad+0.06\n\n % Select the cover sets of the parent between centers\n NL = EndL-StartL+1;\n a = 1;\n for k = 1:NL\n S = Segment{StartL+(k-1)};\n Sets(a:a+length(S)-1) = S;\n a = a+length(S);\n end\n ParentSets = Sets(1:a-1);\n\n % maximum distance in parent\n distPar = max(distances_to_line(Ce(ParentSets,:),V,Start));\n if (distChild-distPar < 0.02) || (distChild/distPar < 1.2 && distChild-distPar < 0.06)\n ChildChildSegs = SChi{ChildSegs(j)};\n nc = length(ChildChildSegs);\n if nc == 0\n % Remove, no child segments\n Keep(ChildSegs(j)) = false;\n Segs{ChildSegs(j)} = zeros(0,1);\n SPar(ChildSegs(j),:) = zeros(1,2);\n SChi{i} = set_difference(ChildSegs,ChildSegs(j),Fal);\n else\n L = SChi(ChildChildSegs);\n L = vertcat(L{:}); % child child segments\n if isempty(L)\n J = false(nc,1);\n for k = 1:nc\n segment = Segs{ChildChildSegs(k)};\n if isempty(segment)\n J(k) = true;\n else\n segment1 = [vertcat(segment{:}); ParentSets];\n distSeg = max(distances_to_line(Ce(segment1,:),V,Start));\n if (distSeg-distPar < 0.02) || (distSeg/distPar < 1.2 && distSeg-distPar < 0.06)\n J(k) = true;\n end\n end\n end\n if all(J)\n % Remove\n ChildChildSegs1 = [ChildChildSegs; ChildSegs(j)];\n nc = length(ChildChildSegs1);\n Segs(ChildChildSegs1) = cell(nc,1);\n Keep(ChildChildSegs1) = false;\n SPar(ChildChildSegs1,:) = zeros(nc,2);\n d = set_difference(ChildSegs,ChildSegs(j),Fal);\n SChi{i} = d;\n SChi(ChildChildSegs1) = cell(nc,1);\n end\n end\n end\n end\n end\n end\n end\n if i == 1\n MaxRad = MaxRad/2;\n end\n end\nend\n% Modify segments and their indexing\nSegs = Segs(Keep);\nn = nnz(Keep);\nInd = (1:1:Nseg)';\nJ = (1:1:n)';\nInd(Keep) = J;\nInd(~Keep) = 0;\nSPar = SPar(Keep,:);\nJ = SPar(:,1) > 0;\nSPar(J,1) = Ind(SPar(J,1));\n% Modify SChi\nfor i = 1:Nseg\n if Keep(i)\n ChildSegs = SChi{i};\n if ~isempty(ChildSegs)\n ChildSegs = nonzeros(Ind(ChildSegs));\n if size(ChildSegs,1) < size(ChildSegs,2)\n SChi{i} = ChildSegs';\n else\n SChi{i} = ChildSegs;\n end\n else\n SChi{i} = zeros(0,1);\n end\n end\nend\nSChi = SChi(Keep);\nend % End of function\n\n\nfunction [SegP,Base] = modify_parent(P,Bal,Ce,SegP,SegC,nl,PatchDiam,base)\n\n% Expands the base of the branch backwards into its parent segment and\n% then removes the expansion from the parent segment.\n\nBase = SegC{1};\nif ~isempty(Base)\n\n % Define the directions of the segments\n DirChi = segment_direction(Ce,SegC,1);\n DirPar = segment_direction(Ce,SegP,nl);\n\n if length(Base) > 1\n BaseCent = average(Ce(Base,:));\n db = distances_to_line(Ce(Base,:), DirChi', BaseCent); % distances of the sets in the base to the axis of the branch\n DiamBase = 2*max(db); % diameter of the base\n elseif length(Bal{Base}) > 1\n BaseCent = average(P(Bal{Base},:));\n db = distances_to_line(P(Bal{Base},:), DirChi', BaseCent);\n DiamBase = 2*max(db);\n else\n BaseCent = Ce(Base,:);\n DiamBase = 0;\n end\n\n % Determine the number of cover set layers \"n\" to be checked\n Angle = abs(DirChi'*DirPar); % abs of cosine of the angle between component and segment directions\n Nlayer = max([3,ceil(Angle*2*DiamBase/PatchDiam)]);\n if Nlayer > nl % can go only to the bottom of the segment\n Nlayer = nl;\n end\n\n % Check the layers\n layer = 0;\n base{1} = Base;\n while layer < Nlayer\n Sets = SegP{nl-layer};\n Seg = average(Ce(Sets,:)); % mean of the cover sets' centers\n\n VBase = mat_vec_subtraction(Ce(Sets,:),BaseCent); % vectors from base's center to sets in the segment\n h = VBase*DirChi;\n B = repmat(DirChi',length(Sets),1);\n B = [h.*B(:,1) h.*B(:,2) h.*B(:,3)];\n V = VBase-B;\n distSets = sqrt(sum(V.*V,2)); % distances of the sets in the segment to the axis of the branch\n\n VSeg = mat_vec_subtraction(Ce(Sets,:),Seg); % vectors from segments's center to sets in the segment\n lenBase = sqrt(sum(VBase.*VBase,2)); % lengths of VBase\n lenSeg = sqrt(sum(VSeg.*VSeg,2)); % lengths of VSeg\n if Angle < 0.9\n K = lenBase < 1.1/(1-0.5*Angle^2)*lenSeg; % sets closer to the base's center than segment's center\n J = distSets < 1.25*DiamBase; % sets close enough to the axis of the branch\n I = K&J;\n else % branch almost parallel to parent\n I = distSets < 1.25*DiamBase; % only the distance to the branch axis counts\n end\n\n if all(I) || ~any(I) % stop the process if all the segment's or no segment's sets\n layer = Nlayer;\n else\n SegP{nl-layer} = Sets(not(I));\n base{layer+2} = Sets(I);\n layer = layer+1;\n end\n end\n Base = vertcat(base{1:Nlayer+1});\nend\n\nend % End of function\n\n\nfunction D = segment_direction(Ce,Seg,nl)\n\n% Defines the direction of the segment\n\n% Define bottom and top layers\nif nl-3 > 0\n bot = nl-3;\nelse\n bot = 1;\nend\nj = 1;\nwhile j < 3 && isempty(Seg{bot})\n bot = bot+1;\n j = j+1;\nend\nif nl+2 <= size(Seg,1)\n top = nl+2;\nelse\n top = size(Seg,1);\nend\nj = 1;\nwhile j < 3 && isempty(Seg{top})\n top = top-1;\n j = j+1;\nend\n\n% Direction\nif top > bot\n Bot = average(Ce(Seg{bot},:));\n Top = average(Ce(Seg{top},:));\n V = Top-Bot;\n D = V'/norm(V);\nelse\n D = zeros(3,1);\nend\n\n\nend % End of function\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "cover_sets.m", "ext": ".m", "path": "TreeQSM-master/src/main_steps/cover_sets.m", "size": 10655, "source_encoding": "utf_8", "md5": "60e3bf5398cc4bf4ade45637819e26a7", "text": "% This file is part of TREEQSM.\n%\n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n%\n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n%\n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction cover = cover_sets(P,inputs,RelSize)\n\n% ---------------------------------------------------------------------\n% COVER_SETS.M Creates cover sets (surface patches) and their\n% neighbor-relation for a point cloud\n%\n% Version 2.0.1\n% Latest update 2 May 2022\n%\n% Copyright (C) 2013-2022 Pasi Raumonen\n% ---------------------------------------------------------------------\n\n% Covers the point cloud with small sets, which are along the surface,\n% such that each point belongs at most one cover set; i.e. the cover is\n% a partition of the point cloud.\n%\n% The cover is generated such that at first the point cloud is covered\n% with balls with radius \"BallRad\". This first cover is such that\n% 1) the minimum distance between the centers is \"PatchDiam\", and\n% 2) the maximum distance from any point to nearest center is also \"PatchDiam\".\n% Then the first cover of BallRad-balls is used to define a second cover:\n% each BallRad-ball \"A\" defines corresponding cover set \"B\" in the second cover\n% such that \"B\" contains those points of \"A\" that are nearer to the center of\n% \"A\" than any other center of BallRad-balls. The BallRad-balls also define\n% the neighbors for the second cover: Let CA and CB denote cover sets in\n% the second cover, and BA and BB their BallRad-balls. Then CB is\n% a neighbor of CA, and vice versa, if BA and CB intersect or\n% BB and CA intersect.\n%\n% Inputs:\n% P Point cloud\n% inputs Input stucture, the following fields are needed:\n% PatchDiam1 Minimum distance between centers of cover sets; i.e. the\n% minimum diameter of cover set in uniform covers. Does\n% not need nor use the third optional input \"RelSize\".\n% PatchDiam2Min Minimum diameter of cover sets for variable-size\n% covers. Needed if \"RelSize\" is given as input.\n% PatchDiam2Max Maximum diameter of cover sets for variable-size\n% covers. Needed if \"RelSize\" is given as input.\n% \tBallRad1 Radius of the balls used to generate the uniform cover. \n% These balls are also used to determine the neighbors\n% BallRad2 Maximum radius of the balls used to generate the \n% varibale-size cover. \n% nmin1, nmin2 Minimum number of points in a BallRad1- and\n% BallRad2-balls\n% RelSize Relative cover set size for each point\n%\n% Outputs:\n% cover Structure array containing the followin fields:\n% ball Cover sets, (n_sets x 1)-cell\n% center Center points of the cover sets, (n_sets x 1)-vector\n% neighbor Neighboring cover sets of each cover set, (n_sets x 1)-cell\n\n% Changes from version 2.0.0 to 2.0.1, 2 May 2022:\n% 1) Added comments and changed some variable names\n% 2) Enforced that input parameters are type double\n\nif ~isa(P,'double')\n P = double(P);\nend\n\n%% Large balls and centers\nnp = size(P,1);\nBall = cell(np,1); % Large balls for generation of the cover sets and their neighbors\nCen = zeros(np,1,'uint32'); % the center points of the balls/cover sets\nNotExa = true(np,1); % the points not yet examined\nDist = 1e8*ones(np,1); % distance of point to the closest center\nBoP = zeros(np,1,'uint32'); % the balls/cover sets the points belong\nnb = 0; % number of sets generated\nif nargin == 2\n %% Same size cover sets everywhere\n BallRad = double(inputs.BallRad1);\n PatchDiamMax = double(inputs.PatchDiam1);\n nmin = double(inputs.nmin1);\n % Partition the point cloud into cubes for quick neighbor search\n [partition,CC] = cubical_partition(P,BallRad);\n\n % Generate the balls\n Radius = BallRad^2;\n MaxDist = PatchDiamMax^2;\n % random permutation of points, produces different covers for the same inputs:\n RandPerm = randperm(np); \n for i = 1:np\n if NotExa(RandPerm(i))\n Q = RandPerm(i); % the center/seed point of the current cover set\n % Select the points in the cubical neighborhood of the seed:\n points = partition(CC(Q,1)-1:CC(Q,1)+1,CC(Q,2)-1:CC(Q,2)+1,CC(Q,3)-1:CC(Q,3)+1);\n points = vertcat(points{:});\n % Compute distances of the points to the seed:\n V = [P(points,1)-P(Q,1) P(points,2)-P(Q,2) P(points,3)-P(Q,3)];\n dist = sum(V.*V,2);\n % Select the points inside the ball:\n Inside = dist < Radius;\n if nnz(Inside) >= nmin\n ball = points(Inside); % the points forming the ball\n d = dist(Inside); % the distances of the ball's points\n core = (d < MaxDist); % the core points of the cover set\n NotExa(ball(core)) = false; % mark points as examined\n % define new ball:\n nb = nb+1; \n Ball{nb} = ball;\n Cen(nb) = Q;\n % Select which points belong to this ball, i.e. are closer this\n % seed than previously tested seeds:\n D = Dist(ball); % the previous distances\n closer = d < D; % which points are closer to this seed\n ball = ball(closer); % define the ball\n % update the ball and distance information of the points\n Dist(ball) = d(closer); \n BoP(ball) = nb; \n end\n end\n end\nelse\n %% Use relative sizes (the size varies)\n % Partition the point cloud into cubes\n BallRad = double(inputs.BallRad2);\n PatchDiamMin = double(inputs.PatchDiam2Min);\n PatchDiamMax = double(inputs.PatchDiam2Max);\n nmin = double(inputs.nmin2);\n MRS = PatchDiamMin/PatchDiamMax;\n % minimum radius\n r = double(1.5*(double(min(RelSize))/256*(1-MRS)+MRS)*BallRad+1e-5); \n NE = 1+ceil(BallRad/r);\n if NE > 4\n r = PatchDiamMax/4;\n NE = 1+ceil(BallRad/r);\n end\n [Partition,CC,~,Cubes] = cubical_partition(P,r,NE);\n\n I = RelSize == 0; % Don't use points with no size determined\n NotExa(I) = false;\n\n % Define random permutation of points (results in different covers for \n % same input) so that first small sets are generated\n RandPerm = zeros(np,1,'uint32');\n I = RelSize <= 32;\n ind = uint32(1:1:np)';\n I = ind(I);\n t1 = length(I);\n RandPerm(1:1:t1) = I(randperm(t1));\n I = RelSize <= 128 & RelSize > 32;\n I = ind(I);\n t2 = length(I);\n RandPerm(t1+1:1:t1+t2) = I(randperm(t2));\n t2 = t2+t1;\n I = RelSize > 128;\n I = ind(I);\n t3 = length(I);\n RandPerm(t2+1:1:t2+t3) = I(randperm(t3));\n clearvars ind I\n\n Point = zeros(round(np/1000),1,'uint32');\n e = BallRad-PatchDiamMax;\n for i = 1:np\n if NotExa(RandPerm(i))\n Q = RandPerm(i); % the center/seed point of the current cover set\n % Compute the set size and the cubical neighborhood of the seed point:\n rs = double(RelSize(Q))/256*(1-MRS)+MRS; % relative radius\n MaxDist = PatchDiamMax*rs; % diameter of the cover set\n Radius = MaxDist+sqrt(rs)*e; % radius of the ball including the cover set\n N = ceil(Radius/r); % = number of cells needed to include the ball\n cubes = Cubes(CC(Q,1)-N:CC(Q,1)+N,CC(Q,2)-N:CC(Q,2)+N,CC(Q,3)-N:CC(Q,3)+N);\n I = cubes > 0;\n cubes = cubes(I); % Cubes forming the neighborhood\n Par = Partition(cubes); % cell-array of the points in the neighborhood\n % vertical catenation of the points from the cell-array\n S = cellfun('length',Par);\n stop = cumsum(S);\n start = [0; stop]+1;\n for k = 1:length(stop)\n Point(start(k):stop(k)) = Par{k};\n end\n points = Point(1:stop(k));\n % Compute the distance of the \"points\" to the seed:\n V = [P(points,1)-P(Q,1) P(points,2)-P(Q,2) P(points,3)-P(Q,3)];\n dist = sum(V.*V,2);\n % Select the points inside the ball:\n Inside = dist < Radius^2;\n if nnz(Inside) >= nmin\n ball = points(Inside); % the points forming the ball\n d = dist(Inside); % the distances of the ball's points\n core = (d < MaxDist^2); % the core points of the cover set\n NotExa(ball(core)) = false; % mark points as examined\n % define new ball:\n nb = nb+1; \n Ball{nb} = ball;\n Cen(nb) = Q;\n % Select which points belong to this ball, i.e. are closer this\n % seed than previously tested seeds:\n D = Dist(ball); % the previous distances\n closer = d < D; % which points are closer to this seed\n ball = ball(closer); % define the ball\n % update the ball and distance information of the points\n Dist(ball) = d(closer); \n BoP(ball) = nb; \n end\n end\n end\nend\nBall = Ball(1:nb,:);\nCen = Cen(1:nb);\nclearvars RandPerm NotExa Dist\n\n%% Cover sets\n% Number of points in each ball and index of each point in its ball\nNum = zeros(nb,1,'uint32');\nInd = zeros(np,1,'uint32');\nfor i = 1:np\n if BoP(i) > 0\n Num(BoP(i)) = Num(BoP(i))+1;\n Ind(i) = Num(BoP(i));\n end\nend\n\n% Initialization of the \"PointsInSets\"\nPointsInSets = cell(nb,1);\nfor i = 1:nb\n PointsInSets{i} = zeros(Num(i),1,'uint32');\nend\n\n% Define the \"PointsInSets\"\nfor i = 1:np\n if BoP(i) > 0\n PointsInSets{BoP(i),1}(Ind(i)) = i;\n end\nend\n\n%% Neighbors\n% Define neighbors. Sets A and B are neighbors if the large ball of A\n% contains points of B. Notice that this is not a symmetric relation.\nNei = cell(nb,1);\nFal = false(nb,1);\nfor i = 1:nb\n B = Ball{i}; % the points in the big ball of cover set \"i\"\n I = (BoP(B) ~= i);\n N = B(I); % the points of B not in the cover set \"i\"\n N = BoP(N);\n\n % select the unique elements of N:\n n = length(N);\n if n > 2\n Include = true(n,1);\n for j = 1:n\n if ~Fal(N(j))\n Fal(N(j)) = true;\n else\n Include(j) = false;\n end\n end\n Fal(N) = false;\n N = N(Include);\n elseif n == 2\n if N(1) == N(2)\n N = N(1);\n end\n end\n\n Nei{i} = uint32(N);\nend\n\n% Make the relation symmetric by adding, if needed, A as B's neighbor\n% in the case B is A's neighbor\nfor i = 1:nb\n N = Nei{i};\n for j = 1:length(N)\n K = (Nei{N(j)} == i);\n if ~any(K)\n Nei{N(j)} = uint32([Nei{N(j)}; i]);\n end\n end\nend\n\n% Define output\ncover.ball = PointsInSets;\ncover.center = Cen;\ncover.neighbor = Nei;\n\n%% Display statistics\n%disp([' ',num2str(nb),' cover sets, points not covered: ',num2str(np-nnz(BoP))])"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "point_model_distance.m", "ext": ".m", "path": "TreeQSM-master/src/main_steps/point_model_distance.m", "size": 5875, "source_encoding": "utf_8", "md5": "7b7c334df5d7577f4570e5e7df063761", "text": "% This file is part of TREEQSM.\n% \n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction pmdistance = point_model_distance(P,cylinder)\n\n% ---------------------------------------------------------------------\n% POINT_MODEL_DISTANCE.M Computes the distances of the points to the \n% cylinder model\n%\n% Version 2.1.1\n% Latest update 8 Oct 2021\n%\n% Copyright (C) 2015-2021 Pasi Raumonen\n% ---------------------------------------------------------------------\n\n% Changes from version 2.1.0 to 2.1.1, 8 Oct 2021: \n% 1) Changed the determinationa NE, the number of empty edge layers, so \n% that is now limited in size, before it is given as input for \n% cubical_partition function.\n\n% Changes from version 2.0.0 to 2.1.0, 26 Nov 2019: \n% 1) Bug fix: Corrected the computation of the output at the end of the\n% code so that trees without branches are computed correctly.\n\n% Cylinder data\nRad = cylinder.radius;\nLen = cylinder.length;\nSta = cylinder.start;\nAxe = cylinder.axis;\nBOrd = cylinder.BranchOrder;\n\n% Select randomly 25 % or max one million points for the distance comput.\nnp0 = size(P,1);\na = min(0.25*np0,1000000);\nI = logical(round(0.5/(1-a/np0)*rand(np0,1)));\nP = P(I,:);\n\n% Partition the points into cubes \nL = 2*median(Len);\nNE = max(3,min(10,ceil(max(Len)/L)))+3;\n[Partition,~,Info] = cubical_partition(P,L,NE);\nMin = Info(1:3);\nEL = Info(7);\nNE = Info(8);\n\n% Calculates the cube-coordinates of the starting points\nCC = floor([Sta(:,1)-Min(1) Sta(:,2)-Min(2) Sta(:,3)-Min(3)]/EL)+NE+1;\n\n% Compute the number of cubes needed for each starting point\nN = ceil(Len/L);\n\n% Correct N so that cube indexes are not too small or large\nI = CC(:,1) < N+1;\nN(I) = CC(I,1)-1;\nI = CC(:,2) < N+1;\nN(I) = CC(I,2)-1;\nI = CC(:,3) < N+1;\nN(I) = CC(I,3)-1;\nI = CC(:,1)+N+1 > Info(4);\nN(I) = Info(4)-CC(I,1)-1;\nI = CC(:,2)+N+1 > Info(5);\nN(I) = Info(5)-CC(I,2)-1;\nI = CC(:,3)+N+1 > Info(6);\nN(I) = Info(6)-CC(I,3)-1;\n\n% Calculate the distances to the cylinders\nn = size(Rad,1);\nnp = size(P,1);\nDist = zeros(np,2); % Distance and the closest cylinder of each points\nDist(:,1) = 2; % Large distance initially\nPoints = zeros(ceil(np/10),1,'int32'); % Auxiliary variable\nData = cell(n,1);\nfor i = 1:n\n Par = Partition(CC(i,1)-N(i):CC(i,1)+N(i),CC(i,2)-N(i):CC(i,2)+N(i),...\n CC(i,3)-N(i):CC(i,3)+N(i));\n if N(i) > 1\n S = cellfun('length',Par);\n I = S > 0;\n S = S(I);\n Par = Par(I);\n stop = cumsum(S);\n start = [0; stop]+1;\n for k = 1:length(stop)\n Points(start(k):stop(k)) = Par{k}(:);\n end\n points = Points(1:stop(k));\n else\n points = vertcat(Par{:});\n end\n [d,~,h] = distances_to_line(P(points,:),Axe(i,:),Sta(i,:));\n d = abs(d-Rad(i));\n Data{i} = [d h double(points)];\n I = d < Dist(points,1);\n J = h >= 0;\n K = h <= Len(i);\n L = d < 0.5;\n M = I&J&K&L;\n points = points(M);\n Dist(points,1) = d(M);\n Dist(points,2) = i;\nend\n\n% Calculate the distances to the cylinders for points not yet calculated\n% because they are not \"on side of cylinder\nfor i = 1:n\n if ~isempty(Data{i})\n d = Data{i}(:,1);\n h = Data{i}(:,2);\n points = Data{i}(:,3);\n I = d < Dist(points,1);\n J = h >= -0.1 & h <= 0;\n K = h <= Len(i)+0.1 & h >= Len(i);\n L = d < 0.5;\n M = I&(J|K)&L;\n points = points(M);\n Dist(points,1) = d(M);\n Dist(points,2) = i;\n end\nend\n\n% Select only the shortest 95% of distances for each cylinder\nN = zeros(n,1);\nO = zeros(np,1);\nfor i = 1:np\n if Dist(i,2) > 0\n N(Dist(i,2)) = N(Dist(i,2))+1;\n O(i) = N(Dist(i,2));\n end\nend\nCyl = cell(n,1);\nfor i = 1:n\n Cyl{i} = zeros(N(i),1);\nend\nfor i = 1:np\n if Dist(i,2) > 0\n Cyl{Dist(i,2)}(O(i)) = i;\n end\nend\nDistCyl = zeros(n,1); % Average point distance to each cylinder\nfor i = 1:n\n I = Cyl{i};\n m = length(I);\n if m > 19 % select the smallest 95% of distances\n d = sort(Dist(I,1));\n DistCyl(i) = mean(d(1:floor(0.95*m)));\n elseif m > 0\n DistCyl(i) = mean(Dist(I,1));\n end\nend\n\n% Define the output\npmdistance.CylDist = single(DistCyl);\npmdistance.median = median(DistCyl(:,1));\npmdistance.mean = mean(DistCyl(:,1));\npmdistance.max = max(DistCyl(:,1));\npmdistance.std = std(DistCyl(:,1));\n\nT = BOrd == 0;\nB1 = BOrd == 1;\nB2 = BOrd == 2;\nB = DistCyl(~T,1);\nT = DistCyl(T,1);\nB1 = DistCyl(B1,1);\nB2 = DistCyl(B2,1);\n\npmdistance.TrunkMedian = median(T);\npmdistance.TrunkMean = mean(T);\npmdistance.TrunkMax = max(T);\npmdistance.TrunkStd = std(T);\n\nif ~isempty(B)\n pmdistance.BranchMedian = median(B);\n pmdistance.BranchMean = mean(B);\n pmdistance.BranchMax = max(B);\n pmdistance.BranchStd = std(B);\nelse\n pmdistance.BranchMedian = 0;\n pmdistance.BranchMean = 0;\n pmdistance.BranchMax = 0;\n pmdistance.BranchStd = 0;\nend\n\nif ~isempty(B1)\n pmdistance.Branch1Median = median(B1);\n pmdistance.Branch1Mean = mean(B1);\n pmdistance.Branch1Max = max(B1);\n pmdistance.Branch1Std = std(B1);\nelse\n pmdistance.Branch1Median = 0;\n pmdistance.Branch1Mean = 0;\n pmdistance.Branch1Max = 0;\n pmdistance.Branch1Std = 0;\nend\n\nif ~isempty(B2)\n pmdistance.Branch2Median = median(B2);\n pmdistance.Branch2Mean = mean(B2);\n pmdistance.Branch2Max = max(B2);\n pmdistance.Branch2Std = std(B2);\nelse\n pmdistance.Branch2Median = 0;\n pmdistance.Branch2Mean = 0;\n pmdistance.Branch2Max = 0;\n pmdistance.Branch2Std = 0;\nend\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "tree_sets.m", "ext": ".m", "path": "TreeQSM-master/src/main_steps/tree_sets.m", "size": 28351, "source_encoding": "utf_8", "md5": "7b0856d9e0338fff9560f3d810b825c8", "text": "% This file is part of TREEQSM.\r\n%\r\n% TREEQSM is free software: you can redistribute it and/or modify\r\n% it under the terms of the GNU General Public License as published by\r\n% the Free Software Foundation, either version 3 of the License, or\r\n% (at your option) any later version.\r\n%\r\n% TREEQSM is distributed in the hope that it will be useful,\r\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n% GNU General Public License for more details.\r\n%\r\n% You should have received a copy of the GNU General Public License\r\n% along with TREEQSM. If not, see .\r\n\r\nfunction [cover,Base,Forb] = tree_sets(P,cover,inputs,segment)\r\n\r\n% ---------------------------------------------------------------------\r\n% TREE_SETS.M Determines the base of the trunk and the cover sets\r\n% belonging to the tree, updates the neighbor-relation\r\n%\r\n% Version 2.3.0\r\n% Latest update 2 May 2022\r\n%\r\n% Copyright (C) 2013-2022 Pasi Raumonen\r\n% ---------------------------------------------------------------------\r\n%\r\n% Determines the cover sets that belong to the tree. Determines also the\r\n% base of the tree and updates the neighbor-relation such that all of the\r\n% tree is connected, i.e., the cover sets belonging to the tree form a\r\n% single connected component. Optionally uses information from existing\r\n% segmentation to make sure that stem and 1st-, 2nd-, 3rd-order branches\r\n% are properly connnected.\r\n% ---------------------------------------------------------------------\r\n% Inputs:\r\n% P Point cloud\r\n% cover Cover sets, their centers and neighbors\r\n% PatchDiam Minimum diameter of the cover sets\r\n% OnlyTree Logical value indicating if the point cloud contains only\r\n% points from the tree to be modelled\r\n% segment Previous segments\r\n%\r\n% Outputs:\r\n% cover Cover sets with updated neigbors\r\n% Base Base of the trunk (the cover sets forming the base)\r\n% Forb Cover sets not part of the tree\r\n% ---------------------------------------------------------------------\r\n\r\n% Changes from version 2.2.0 to 2.3.0, 2 May 2022:\r\n% 1) Added new lines of code at the end of the \"define_main_branches\" to\r\n% make sure that the \"Trunk\" variable defines connected stem\r\n\r\n% Changes from version 2.1.0 to 2.2.0, 13 Aug 2020:\r\n% 1) \"define_base_forb\": Changed the base height specification from\r\n% 0.1*aux.Height to 0.02*aux.Height\r\n% 2) \"define_base_forb\": changed the cylinder fitting syntax corresponding\r\n% to the new input and outputs of \"least_squares_cylinder\"\r\n% 3) \"make_tree_connected”: Removed \"Trunk(Base) = false;\" at the beginning\r\n% of the function as unnecessary and to prevent errors in a special case\r\n% where the Trunk is equal to Base.\r\n%\t4) \"make_tree_connected”: Removed from the end the generation of \"Trunk\"\r\n% again and the new call for the function\r\n%\t5) \"make_tree_connected”: Increased the minimum distance of a component\r\n% to be removed from 8m to 12m.\r\n\r\n% Changes from version 2.0.0 to 2.1.0, 11 Oct 2019:\r\n% 1) \"define_main_branches\": modified the size of neighborhood \"balls0\",\r\n% added seven lines of code, prevents possible error of too low or big\r\n% indexes on \"Par\"\r\n% 2) Increased the maximum base height from 0.5m to 1.5m\r\n% 3) \"make_tree_connected\": added at the end a call for the function itself,\r\n% if the tree is not yet connected, thus running the function again if\r\n% necessary\r\n\r\n%% Define auxiliar object\r\nclear aux\r\naux.nb = max(size(cover.center)); % number of cover sets\r\naux.Fal = false(aux.nb,1);\r\naux.Ind = (1:1:aux.nb)';\r\naux.Ce = P(cover.center,1:3); % Coordinates of the center points\r\naux.Hmin = min(aux.Ce(:,3));\r\naux.Height = max(aux.Ce(:,3))-aux.Hmin;\r\n\r\n%% Define the base of the trunk and the forbidden sets\r\nif nargin == 3\r\n [Base,Forb,cover] = define_base_forb(P,cover,aux,inputs);\r\nelse\r\n inputs.OnlyTree = true;\r\n [Base,Forb,cover] = define_base_forb(P,cover,aux,inputs,segment);\r\nend\r\n\r\n%% Define the trunk (and the main branches)\r\nif nargin == 3\r\n [Trunk,cover] = define_trunk(cover,aux,Base,Forb,inputs);\r\nelse\r\n [Trunk,cover] = define_main_branches(cover,segment,aux,inputs);\r\nend\r\n\r\n%% Update neighbor-relation to make the whole tree connected\r\n[cover,Forb] = make_tree_connected(cover,aux,Forb,Base,Trunk,inputs);\r\n\r\nend % End of the main function\r\n\r\n\r\nfunction [Base,Forb,cover] = define_base_forb(P,cover,aux,inputs,segment)\r\n\r\n% Defines the base of the stem and the forbidden sets (the sets containing\r\n% points not from the tree, i.e, ground, understory, etc.)\r\nCe = aux.Ce;\r\nif inputs.OnlyTree && nargin == 4\r\n % No ground in the point cloud, the base is the lowest part\r\n BaseHeight = min(1.5,0.02*aux.Height);\r\n I = Ce(:,3) < aux.Hmin+BaseHeight;\r\n Base = aux.Ind(I);\r\n Forb = aux.Fal;\r\n % Make sure the base, as the bottom of point cloud, is not in multiple parts\r\n Wb = max(max(Ce(Base,1:2))-min(Ce(Base,1:2)));\r\n Wt = max(max(Ce(:,1:2))-min(Ce(:,1:2)));\r\n k = 1;\r\n while k <= 5 && Wb > 0.3*Wt\r\n BaseHeight = BaseHeight-0.05;\r\n BaseHeight = max(BaseHeight,0.05);\r\n if BaseHeight > 0\r\n I = Ce(:,3) < aux.Hmin+BaseHeight;\r\n else\r\n [~,I] = min(Ce(:,3));\r\n end\r\n Base = aux.Ind(I);\r\n Wb = max(max(Ce(Base,1:2))-min(Ce(Base,1:2)));\r\n k = k+1;\r\n end\r\nelseif inputs.OnlyTree\r\n % Select the stem sets from the previous segmentation and define the\r\n % base\r\n BaseHeight = min(1.5,0.02*aux.Height);\r\n SoP = segment.SegmentOfPoint(cover.center);\r\n stem = aux.Ind(SoP == 1);\r\n I = Ce(stem,3) < aux.Hmin+BaseHeight;\r\n Base = stem(I);\r\n Forb = aux.Fal;\r\nelse\r\n % Point cloud contains non-tree points.\r\n % Determine the base from the \"height\" and \"density\" of cover sets\r\n % by projecting the sets to the xy-plane\r\n Bal = cover.ball;\r\n Nei = cover.neighbor;\r\n\r\n % The vertices of the rectangle containing C\r\n Min = double(min(Ce));\r\n Max = double(max(Ce(:,1:2)));\r\n\r\n % Number of rectangles with edge length \"E\" in the plane\r\n E = min(0.1,0.015*aux.Height);\r\n n = double(ceil((Max(1:2)-Min(1:2))/E)+1);\r\n\r\n % Calculates the rectangular-coordinates of the points\r\n px = floor((Ce(:,1)-Min(1))/E)+1;\r\n py = floor((Ce(:,2)-Min(2))/E)+1;\r\n\r\n % Sorts the points according a lexicographical order\r\n LexOrd = [px py-1]*[1 n(1)]';\r\n [LexOrd,SortOrd] = sort(LexOrd);\r\n\r\n Partition = cell(n(1),n(2));\r\n hei = zeros(n(1),n(2)); % \"height\" of the cover sets in the squares\r\n den = hei; % density of the cover sets in the squares\r\n baseden = hei;\r\n p = 1; % The index of the point under comparison\r\n while p <= aux.nb\r\n t = 1;\r\n while (p+t <= aux.nb) && (LexOrd(p) == LexOrd(p+t))\r\n t = t+1;\r\n end\r\n q = SortOrd(p);\r\n J = SortOrd(p:p+t-1);\r\n Partition{px(q),py(q)} = J;\r\n p = p+t;\r\n K = ceil(10*(Ce(J,3)-Min(3)+0.01)/(aux.Height-0.01));\r\n B = K <= 2;\r\n K = unique(K);\r\n hei(px(q),py(q)) = length(K)/10;\r\n den(px(q),py(q)) = t;\r\n baseden(px(q),py(q)) = nnz(B);\r\n end\r\n den = den/max(max(den)); % normalize\r\n baseden = baseden/max(max(baseden));\r\n\r\n % function whose maximum determines location of the trunk\r\n f = den.*hei.*baseden;\r\n % smooth the function by averaging over 8-neighbors\r\n x = zeros(n(1),n(2));\r\n y = zeros(n(1),n(2));\r\n for i = 2:n(1)-1\r\n for j = 2:n(2)-1\r\n f(i,j) = mean(mean(f(i-1:i+1,j-1:j+1)));\r\n x(i,j) = Min(1)+i*E;\r\n y(i,j) = Min(2)+j*E;\r\n end\r\n end\r\n f = f/max(max(f));\r\n\r\n % Trunk location is around the maximum f-value\r\n I = f > 0.5;\r\n Trunk0 = Partition(I); % squares that contain the trunk\r\n Trunk0 = vertcat(Trunk0{:});\r\n HBottom = min(Ce(Trunk0,3));\r\n I = Ce(Trunk0,3) > HBottom+min(0.02*aux.Height,0.3);\r\n J = Ce(Trunk0,3) < HBottom+min(0.08*aux.Height,1.5);\r\n I = I&J; % slice close to bottom should contain the trunk\r\n Trunk = Trunk0(I);\r\n Trunk = union(Trunk,vertcat(Nei{Trunk})); % Expand with neighbors\r\n Trunk = union(Trunk,vertcat(Nei{Trunk})); % Expand with neighbors\r\n Trunk = union(Trunk,vertcat(Nei{Trunk})); % Expand with neighbors\r\n\r\n % Define connected components of Trunk and select the largest component\r\n [Comp,CS] = connected_components(Nei,Trunk,0,aux.Fal);\r\n [~,I] = max(CS);\r\n Trunk = Comp{I};\r\n\r\n % Fit cylinder to Trunk\r\n I = Ce(Trunk,3) < HBottom+min(0.1*aux.Height,2); % Select the bottom part\r\n Trunk = Trunk(I);\r\n Trunk = union(Trunk,vertcat(Nei{Trunk}));\r\n Points = Ce(Trunk,:);\r\n c.start = mean(Points);\r\n c.axis = [0 0 1];\r\n c.radius = mean(distances_to_line(Points,c.axis,c.start));\r\n c = least_squares_cylinder(Points,c);\r\n\r\n % Remove far away points and fit new cylinder\r\n dis = distances_to_line(Points,c.axis,c.start);\r\n [~,I] = sort(abs(dis));\r\n I = I(1:ceil(0.9*length(I)));\r\n Points = Points(I,:);\r\n Trunk = Trunk(I);\r\n c = least_squares_cylinder(Points,c);\r\n\r\n % Select the sets in the bottom part of the trunk and remove sets too\r\n % far away form the cylinder axis (also remove far away points from sets)\r\n I = Ce(Trunk0,3) < HBottom+min(0.04*aux.Height,0.6);\r\n TrunkBot = Trunk0(I);\r\n TrunkBot = union(TrunkBot,vertcat(Nei{TrunkBot}));\r\n TrunkBot = union(TrunkBot,vertcat(Nei{TrunkBot}));\r\n n = length(TrunkBot);\r\n Keep = true(n,1); % Keep sets that are close enough the axis\r\n a = max(0.06,0.2*c.radius);\r\n b = max(0.04,0.15*c.radius);\r\n for i = 1:n\r\n d = distances_to_line(Ce(TrunkBot(i),:),c.axis,c.start);\r\n if d < c.radius+a\r\n B = Bal{Trunk(i)};\r\n d = distances_to_line(P(B,:),c.axis,c.start);\r\n I = d < c.radius+b;\r\n Bal{Trunk(i)} = B(I);\r\n else\r\n Keep(i) = false;\r\n end\r\n end\r\n TrunkBot = TrunkBot(Keep);\r\n\r\n % Select the above part of the trunk and combine with the bottom\r\n I = Ce(Trunk0,3) > HBottom+min(0.03*aux.Height,0.45);\r\n Trunk = Trunk0(I);\r\n Trunk = union(Trunk,vertcat(Nei{Trunk}));\r\n Trunk = union(Trunk,TrunkBot);\r\n\r\n BaseHeight = min(1.5,0.02*aux.Height);\r\n % Determine the base\r\n Bot = min(Ce(Trunk,3));\r\n J = Ce(Trunk,3) < Bot+BaseHeight;\r\n Base = Trunk(J);\r\n\r\n % Determine \"Forb\", i.e, ground and non-tree sets by expanding Trunk\r\n % as much as possible\r\n Trunk = union(Trunk,vertcat(Nei{Trunk}));\r\n Forb = aux.Fal;\r\n Ground = setdiff(vertcat(Nei{Base}),Trunk);\r\n Ground = setdiff(union(Ground,vertcat(Nei{Ground})),Trunk);\r\n Forb(Ground) = true;\r\n Forb(Base) = false;\r\n Add = Forb;\r\n while any(Add)\r\n Add(vertcat(Nei{Add})) = true;\r\n Add(Forb) = false;\r\n Add(Trunk) = false;\r\n Forb(Add) = true;\r\n end\r\n\r\n % Try to expand the \"Forb\" more by adding all the bottom sets\r\n Bot = min(Ce(Trunk,3));\r\n Ground = Ce(:,3) < Bot+0.03*aux.Height;\r\n Forb(Ground) = true;\r\n Forb(Trunk) = false;\r\n cover.ball = Bal;\r\nend\r\n\r\nend % End of function\r\n\r\n\r\nfunction [Trunk,cover] = define_trunk(cover,aux,Base,Forb,inputs)\r\n\r\n% This function tries to make sure that likely \"route\" of the trunk from\r\n% the bottom to the top is connected. However, this does not mean that the\r\n% final trunk follows this \"route\".\r\n\r\nNei = cover.neighbor;\r\nCe = aux.Ce;\r\n% Determine the output \"Trunk\" which indicates which sets are part of\r\n% likely trunk\r\nTrunk = aux.Fal;\r\nTrunk(Base) = true;\r\n% Expand Trunk from the base above with neighbors as long as possible\r\nExp = Base; % the current \"top\" of Trunk\r\n% select the unique neighbors of Exp\r\nExp = unique_elements([Exp; vertcat(Nei{Exp})],aux.Fal);\r\nI = Trunk(Exp);\r\nJ = Forb(Exp);\r\nExp = Exp(~I|~J); % Only non forbidden sets that are not already in Trunk\r\nTrunk(Exp) = true; % Add the expansion Exp to Trunk\r\nL = 0.25; % maximum height difference in Exp from its top to bottom\r\nH = max(Ce(Trunk,3))-L; % the minimum bottom heigth for the current Exp\r\n% true as long as the expansion is possible with original neighbors:\r\nFirstMod = true;\r\nwhile ~isempty(Exp)\r\n % Expand Trunk similarly as above as long as possible\r\n H0 = H;\r\n Exp0 = Exp;\r\n Exp = union(Exp,vertcat(Nei{Exp}));\r\n I = Trunk(Exp);\r\n Exp = Exp(~I);\r\n I = Ce(Exp,3) >= H;\r\n Exp = Exp(I);\r\n Trunk(Exp) = true;\r\n if ~isempty(Exp)\r\n H = max(Ce(Exp,3))-L;\r\n end\r\n\r\n % If the expansion Exp is empty and the top of the tree is still over 5\r\n % meters higher, then search new neighbors from above\r\n if (isempty(Exp) || H < H0+inputs.PatchDiam1/2) && H < aux.Height-5\r\n\r\n % Generate rectangular partition of the sets\r\n if FirstMod\r\n FirstMod = false;\r\n % The vertices of the rectangle containing C\r\n Min = double(min(Ce(:,1:2)));\r\n Max = double(max(Ce(:,1:2)));\r\n nb = size(Ce,1);\r\n\r\n % Number of rectangles with edge length \"E\" in the plane\r\n EdgeLenth = 0.2;\r\n NRect = double(ceil((Max-Min)/EdgeLenth)+1);\r\n\r\n % Calculates the rectangular-coordinates of the points\r\n px = floor((Ce(:,1)-Min(1))/EdgeLenth)+1;\r\n py = floor((Ce(:,2)-Min(2))/EdgeLenth)+1;\r\n\r\n % Sorts the points according a lexicographical order\r\n LexOrd = [px py-1]*[1 NRect(1)]';\r\n [LexOrd,SortOrd] = sort(LexOrd);\r\n\r\n Partition = cell(NRect(1),NRect(2));\r\n p = 1; % The index of the point under comparison\r\n while p <= nb\r\n t = 1;\r\n while (p+t <= nb) && (LexOrd(p) == LexOrd(p+t))\r\n t = t+1;\r\n end\r\n q = SortOrd(p);\r\n J = SortOrd(p:p+t-1);\r\n Partition{px(q),py(q)} = J;\r\n p = p+t;\r\n end\r\n end\r\n\r\n % Select the region that is connected to a set above it\r\n if ~isempty(Exp)\r\n Region = Exp;\r\n else\r\n Region = Exp0;\r\n end\r\n\r\n % Select the minimum and maximum rectangular coordinate of the\r\n % region\r\n X1 = min(px(Region));\r\n if X1 <= 2\r\n X1 = 3;\r\n end\r\n X2 = max(px(Region));\r\n if X2 >= NRect(1)-1\r\n X2 = NRect(1)-2;\r\n end\r\n Y1 = min(py(Region));\r\n if Y1 <= 2\r\n Y1 = 3;\r\n end\r\n Y2 = max(py(Region));\r\n if Y2 >= NRect(2)-1\r\n Y2 = NRect(2)-2;\r\n end\r\n\r\n % Select the sets in the 2 meter layer above the region\r\n sets = Partition(X1-2:X2+2,Y1-2:Y2+2);\r\n sets = vertcat(sets{:});\r\n K = aux.Fal;\r\n K(sets) = true; % the potential sets\r\n I = Ce(:,3) > H;\r\n J = Ce(:,3) < H+2;\r\n I = I&J&K;\r\n I(Trunk) = false; % Must be non-Trunk sets\r\n SetsAbove = aux.Ind(I);\r\n\r\n % Search the closest connection between Region and SetsAbove that\r\n % is enough upward sloping (angle to the vertical has cosine larger\r\n % than 0.7)\r\n if ~isempty(SetsAbove)\r\n % Compute the distances and cosines of the connections\r\n n = length(Region);\r\n m = length(SetsAbove);\r\n Dist = zeros(n,m);\r\n Cos = zeros(n,m);\r\n for i = 1:n\r\n V = mat_vec_subtraction(Ce(SetsAbove,:),Ce(Region(i),:));\r\n Len = sum(V.*V,2);\r\n v = normalize(V);\r\n Dist(i,:) = Len';\r\n Cos(i,:) = v(:,3)';\r\n end\r\n I = Cos > 0.7; % select those connection with large enough cosines\r\n % if not any, search with smaller cosines\r\n t = 0;\r\n while ~any(I)\r\n t = t+1;\r\n I = Cos > 0.7-t*0.05;\r\n end\r\n % Search the minimum distance\r\n Dist(~I) = 3;\r\n if n > 1 && m > 1\r\n [d,I] = min(Dist);\r\n [~,J] = min(d);\r\n I = I(J);\r\n elseif n == 1 && m > 1\r\n [~,J] = min(Dist);\r\n I = 1;\r\n elseif m == 1 && n < 1\r\n [~,I] = min(Dist);\r\n J = 1;\r\n else\r\n I = 1; % the set in component to be connected\r\n J = 1; % the set in \"trunk\" to be connected\r\n end\r\n\r\n % Join to \"SetsAbove\"\r\n I = Region(I);\r\n J = SetsAbove(J);\r\n % make the connection\r\n Nei{I} = [Nei{I}; J];\r\n Nei{J} = [Nei{J}; I];\r\n\r\n % Expand \"Trunk\" again\r\n Exp = union(Region,vertcat(Nei{Region}));\r\n I = Trunk(Exp);\r\n Exp = Exp(~I);\r\n I = Ce(Exp,3) >= H;\r\n Exp = Exp(I);\r\n Trunk(Exp) = true;\r\n H = max(Ce(Exp,3))-L;\r\n end\r\n end\r\nend\r\ncover.neighbor = Nei;\r\n\r\nend % End of function\r\n\r\n\r\nfunction [Trunk,cover] = define_main_branches(cover,segment,aux,inputs)\r\n\r\n% If previous segmentation exists, then use it to make the sets in its main\r\n% branches (stem and first (second or even up to third) order branches)\r\n% connected. This ensures that similar branching structure as in the\r\n% existing segmentation is possible.\r\n\r\nBal = cover.ball;\r\nNei = cover.neighbor;\r\nCe = aux.Ce;\r\n% Determine sets in the main branches of previous segmentation\r\nnb = size(Bal,1);\r\nMainBranches = zeros(nb,1);\r\nSegmentOfPoint = segment.SegmentOfPoint;\r\n% Determine which branch indexes define the main branches\r\nMainBranchIndexes = false(max(SegmentOfPoint),1);\r\nMainBranchIndexes(1) = true;\r\nMainBranchIndexes(segment.branch1indexes) = true;\r\nMainBranchIndexes(segment.branch2indexes) = true;\r\nMainBranchIndexes(segment.branch3indexes) = true;\r\nfor i = 1:nb\r\n BranchInd = nonzeros(SegmentOfPoint(Bal{i}));\r\n if ~isempty(BranchInd)\r\n ind = min(BranchInd);\r\n if MainBranchIndexes(ind)\r\n MainBranches(i) = min(BranchInd);\r\n end\r\n end\r\nend\r\n\r\n% Define the trunk sets\r\nTrunk = aux.Fal;\r\nTrunk(MainBranches > 0) = true;\r\n\r\n% Update the neighbors to make the main branches connected\r\n[Par,CC] = cubical_partition(Ce,3*inputs.PatchDiam2Max,10);\r\nSets = zeros(aux.nb,1,'uint32');\r\nBI = max(MainBranches);\r\nN = size(Par);\r\nfor i = 1:BI\r\n if MainBranchIndexes(i)\r\n Branch = MainBranches == i; % The sets forming branch \"i\"\r\n % the connected components of \"Branch\":\r\n Comps = connected_components(Nei,Branch,1,aux.Fal);\r\n n = size(Comps,1);\r\n % Connect the components to each other as long as there are more than\r\n % one component\r\n while n > 1\r\n for j = 1:n\r\n comp = Comps{j};\r\n NC = length(comp);\r\n\r\n % Determine branch sets closest to the component\r\n c = unique(CC(comp,:),'rows');\r\n m = size(c,1);\r\n t = 0;\r\n NearSets = zeros(0,1);\r\n while isempty(NearSets)\r\n NearSets = aux.Fal;\r\n t = t+1;\r\n for k = 1:m\r\n x1 = max(1,c(k,1)-t);\r\n x2 = min(c(k,1)+t,N(1));\r\n y1 = max(1,c(k,2)-t);\r\n y2 = min(c(k,2)+t,N(2));\r\n z1 = max(1,c(k,3)-t);\r\n z2 = min(c(k,3)+t,N(3));\r\n balls0 = Par(x1:x2,y1:y2,z1:z2);\r\n if t == 1\r\n balls = vertcat(balls0{:});\r\n else\r\n S = cellfun('length',balls0);\r\n I = S > 0;\r\n S = S(I);\r\n balls0 = balls0(I);\r\n stop = cumsum(S);\r\n start = [0; stop]+1;\r\n for l = 1:length(stop)\r\n Sets(start(l):stop(l)) = balls0{l};\r\n end\r\n balls = Sets(1:stop(l));\r\n end\r\n I = Branch(balls);\r\n balls = balls(I);\r\n NearSets(balls) = true;\r\n end\r\n NearSets(comp) = false; % Only the non-component cover sets\r\n NearSets = aux.Ind(NearSets);\r\n end\r\n\r\n % Determine the closest sets for \"comp\"\r\n if ~isempty(NearSets)\r\n d = pdist2(Ce(comp,:),Ce(NearSets,:));\r\n if NC == 1 && length(NearSets) == 1\r\n IU = 1; % the set in component to be connected\r\n JU = 1; % the set in \"trunk\" to be connected\r\n elseif NC == 1\r\n [du,JU] = min(d);\r\n IU = 1;\r\n elseif length(NearSets) == 1\r\n [du,IU] = min(d);\r\n JU = 1;\r\n else\r\n [d,IU] = min(d);\r\n [du,JU] = min(d);\r\n IU = IU(JU);\r\n end\r\n\r\n % Join to the closest component\r\n I = comp(IU);\r\n J = NearSets(JU);\r\n % make the connection\r\n Nei{I} = [Nei{I}; J];\r\n Nei{J} = [Nei{J}; I];\r\n end\r\n end\r\n\r\n Comps = connected_components(Nei,Branch,1,aux.Fal);\r\n n = size(Comps,1);\r\n end\r\n end\r\nend\r\n\r\n% Update the neigbors to connect 1st-order branches to the stem\r\nStem = MainBranches == 1;\r\nStem = aux.Ind(Stem);\r\nMainBranchIndexes = false(max(SegmentOfPoint),1);\r\nMainBranchIndexes(segment.branch1indexes) = true;\r\nBI = max(segment.branch1indexes);\r\nif isempty(BI)\r\n BI = 0;\r\nend\r\nfor i = 2:BI\r\n if MainBranchIndexes(i)\r\n Branch = MainBranches == i;\r\n Branch = aux.Ind(Branch);\r\n if ~isempty(Branch)\r\n Neigbors = MainBranches(vertcat(Nei{Branch})) == 1;\r\n if ~any(Neigbors)\r\n d = pdist2(Ce(Branch,:),Ce(Stem,:));\r\n if length(Branch) > 1 && length(Stem) > 1\r\n [d,I] = min(d);\r\n [d,J] = min(d);\r\n I = I(J);\r\n elseif length(Branch) == 1 && length(Stem) > 1\r\n [d,J] = min(d);\r\n I = 1;\r\n elseif length(Stem) == 1 && length(Branch) > 1\r\n [d,I] = min(d);\r\n J = 1;\r\n elseif length(Branch) == 1 && length(Stem) == 1\r\n I = 1; % the set in component to be connected\r\n J = 1; % the set in \"trunk\" to be connected\r\n end\r\n\r\n % Join the Branch to Stem\r\n I = Branch(I);\r\n J = Stem(J);\r\n Nei{I} = [Nei{I}; J];\r\n Nei{J} = [Nei{J}; I];\r\n end\r\n end\r\n end\r\nend\r\ncover.neighbor = Nei;\r\n\r\n% Check if the trunk is still in mutliple components and select the bottom\r\n% component to define \"Trunk\":\r\n[comps,cs] = connected_components(cover.neighbor,Trunk,aux.Fal);\r\nif length(cs) > 1\r\n [cs,I] = sort(cs,'descend');\r\n comps = comps(I);\r\n Stem = MainBranches == 1;\r\n Trunk = aux.Fal;\r\n i = 1;\r\n C = comps{i};\r\n while i <= length(cs) && ~any(Stem(C))\r\n i = i+1;\r\n C = comps{i};\r\n end\r\n Trunk(C) = true;\r\nend\r\n\r\n\r\nend % End of function\r\n\r\n\r\nfunction [cover,Forb] = make_tree_connected(cover,aux,Forb,Base,Trunk,inputs)\r\n\r\n% Update neighbor-relation for whole tree such that the whole tree is one\r\n% connected component\r\n\r\nNei = cover.neighbor;\r\nCe = aux.Ce;\r\n% Expand trunk as much as possible\r\nTrunk(Forb) = false;\r\nExp = Trunk;\r\nwhile any(Exp)\r\n Exp(vertcat(Nei{Exp})) = true;\r\n Exp(Trunk) = false;\r\n Exp(Forb) = false;\r\n Exp(Base) = false;\r\n Trunk(Exp) = true;\r\nend\r\n\r\n% Define \"Other\", sets not yet connected to trunk or Forb\r\nOther = ~aux.Fal;\r\nOther(Forb) = false;\r\nOther(Trunk) = false;\r\nOther(Base) = false;\r\n\r\n% Determine parameters on the extent of the \"Nearby Space\" and acceptable\r\n% component size\r\n% cell size for \"Nearby Space\" = k0 times PatchDiam:\r\nk0 = min(10,ceil(0.2/inputs.PatchDiam1));\r\n% current cell size, increases by k0 every time when new connections cannot\r\n% be made:\r\nk = k0;\r\nif inputs.OnlyTree\r\n Cmin = 0;\r\nelse\r\n Cmin = ceil(0.1/inputs.PatchDiam1); % minimum accepted component size,\r\n % smaller ones are added to Forb, the size triples every round\r\nend\r\n\r\n% Determine the components of \"Other\"\r\nif any(Other)\r\n Comps = connected_components(Nei,Other,1,aux.Fal);\r\n nc = size(Comps,1);\r\n NonClassified = true(nc,1);\r\n %plot_segs(P,Comps,6,1,cover.ball)\r\n %pause\r\nelse\r\n NonClassified = false;\r\nend\r\n\r\nbottom = min(Ce(Base,3));\r\n% repeat search and connecting as long as \"Other\" sets exists\r\nwhile any(NonClassified)\r\n npre = nnz(NonClassified); % number of \"Other\" sets before new connections\r\n again = true; % check connections again with same \"distance\" if true\r\n\r\n % Partition the centers of the cover sets into cubes with size k*dmin\r\n [Par,CC] = cubical_partition(Ce,k*inputs.PatchDiam1);\r\n Neighbors = cell(nc,1);\r\n Sizes = zeros(nc,2);\r\n Pass = true(nc,1);\r\n first_round = true;\r\n while again\r\n % Check each component: part of \"Tree\" or \"Forb\"\r\n for i = 1:nc\r\n if NonClassified(i) && Pass(i)\r\n comp = Comps{i}; % candidate component for joining to the tree\r\n\r\n % If the component is neighbor of forbidden sets, remove it\r\n J = Forb(vertcat(Nei{comp}));\r\n if any(J)\r\n NonClassified(i) = false;\r\n Forb(comp) = true;\r\n Other(comp) = false;\r\n else\r\n % Other wise check nearest sets for a connection\r\n NC = length(comp);\r\n if first_round\r\n\r\n % Select the cover sets the nearest to the component\r\n c = unique(CC(comp,:),'rows');\r\n m = size(c,1);\r\n B = cell(m,1);\r\n for j = 1:m\r\n balls = Par(c(j,1)-1:c(j,1)+1,...\r\n c(j,2)-1:c(j,2)+1,c(j,3)-1:c(j,3)+1);\r\n B{j} = vertcat(balls{:});\r\n end\r\n NearSets = vertcat(B{:});\r\n % Only the non-component cover sets\r\n aux.Fal(comp) = true;\r\n I = aux.Fal(NearSets);\r\n NearSets = NearSets(~I);\r\n aux.Fal(comp) = false;\r\n NearSets = unique(NearSets);\r\n Neighbors{i} = NearSets;\r\n if isempty(NearSets)\r\n Pass(i) = false;\r\n end\r\n % No \"Other\" sets\r\n I = Other(NearSets);\r\n NearSets = NearSets(~I);\r\n else\r\n NearSets = Neighbors{i};\r\n % No \"Other\" sets\r\n I = Other(NearSets);\r\n NearSets = NearSets(~I);\r\n end\r\n\r\n % Select different class from NearSets\r\n I = Trunk(NearSets);\r\n J = Forb(NearSets);\r\n trunk = NearSets(I); % \"Trunk\" sets\r\n forb = NearSets(J); % \"Forb\" sets\r\n if length(trunk) ~= Sizes(i,1) || length(forb) ~= Sizes(i,2)\r\n Sizes(i,:) = [length(trunk) length(forb)];\r\n\r\n % If large component is tall and close to ground, then\r\n % search the connection near the component's bottom\r\n if NC > 100\r\n hmin = min(Ce(comp,3));\r\n H = max(Ce(comp,3))-hmin;\r\n if H > 5 && hmin < bottom+5\r\n I = Ce(NearSets,3) < hmin+0.5;\r\n NearSets = NearSets(I);\r\n I = Trunk(NearSets);\r\n J = Forb(NearSets);\r\n trunk = NearSets(I); % \"Trunk\" sets\r\n forb = NearSets(J); % \"Forb\" sets\r\n end\r\n end\r\n\r\n % Determine the closest sets for \"trunk\"\r\n if ~isempty(trunk)\r\n d = pdist2(Ce(comp,:),Ce(trunk,:));\r\n if NC == 1 && length(trunk) == 1\r\n dt = d; % the minimum distance\r\n IC = 1; % the set in component to be connected\r\n IT = 1; % the set in \"trunk\" to be connected\r\n elseif NC == 1\r\n [dt,IT] = min(d);\r\n IC = 1;\r\n elseif length(trunk) == 1\r\n [dt,IC] = min(d);\r\n IT = 1;\r\n else\r\n [d,IC] = min(d);\r\n [dt,IT] = min(d);\r\n IC = IC(IT);\r\n end\r\n else\r\n dt = 700;\r\n end\r\n\r\n % Determine the closest sets for \"forb\"\r\n if ~isempty(forb)\r\n d = pdist2(Ce(comp,:),Ce(forb,:));\r\n df = min(d);\r\n if length(df) > 1\r\n df = min(df);\r\n end\r\n else\r\n df = 1000;\r\n end\r\n\r\n % Determine what to do with the component\r\n if (dt > 12 && dt < 100) || (NC < Cmin && dt > 0.5 && dt < 10)\r\n % Remove small isolated component\r\n Forb(comp) = true;\r\n Other(comp) = false;\r\n NonClassified(i) = false;\r\n elseif 3*df < dt || (df < dt && df > 0.25)\r\n % Join the component to \"Forb\"\r\n Forb(comp) = true;\r\n Other(comp) = false;\r\n NonClassified(i) = false;\r\n elseif (df == 1000 && dt == 700) || dt > k*inputs.PatchDiam1\r\n % Isolated component, do nothing\r\n else\r\n % Join to \"Trunk\"\r\n I = comp(IC);\r\n J = trunk(IT);\r\n Other(comp) = false;\r\n Trunk(comp) = true;\r\n NonClassified(i) = false;\r\n % make the connection\r\n Nei{I} = [Nei{I}; J];\r\n Nei{J} = [Nei{J}; I];\r\n end\r\n end\r\n end\r\n end\r\n end\r\n first_round = false;\r\n % If \"Other\" has decreased, do another check with same \"distance\"\r\n if nnz(NonClassified) < npre\r\n again = true;\r\n npre = nnz(NonClassified);\r\n else\r\n again = false;\r\n end\r\n end\r\n k = k+k0; % increase the cell size of the nearby search space\r\n Cmin = 3*Cmin; % increase the acceptable component size\r\nend\r\nForb(Base) = false;\r\ncover.neighbor = Nei;\r\n\r\nend % End of function\r\n\r\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "segments.m", "ext": ".m", "path": "TreeQSM-master/src/main_steps/segments.m", "size": 13369, "source_encoding": "utf_8", "md5": "775d0a7de8b20ebd931b2cf4c554cabf", "text": "% This file is part of TREEQSM.\n% \n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction segment = segments(cover,Base,Forb)\n\n% ---------------------------------------------------------------------\n% SEGMENTS.M Segments the covered point cloud into branches.\n%\n% Version 2.10\n% Latest update 16 Aug 2017\n%\n% Copyright (C) 2013-2017 Pasi Raumonen\n% ---------------------------------------------------------------------\n\n% Segments the tree into branches and records their parent-child-relations. \n% Bifurcations are recognized by studying connectivity of a \"study\"\n% region moving along the tree. In case of multiple connected components \n% in \"study\", the components are classified as the continuation and branches.\n%\n% Inputs:\n% cover Cover sets\n% Base Base of the tree\n% Forb Cover sets not part of the tree\n%\n% Outputs:\n% segment Structure array containing the followin fields:\n% segments Segments found, (n_seg x 1)-cell, each cell contains a cell array the cover sets\n% ParentSegment Parent segment of each segment, (n_seg x 1)-vector,\n% equals to zero if no parent segment\n% ChildSegment Children segments of each segment, (n_seg x 1)-cell\n\nNei = cover.neighbor;\nnb = size(Nei,1); % The number of cover sets\na = max([200000 nb/100]); % Estimate for maximum number of segments\nSBas = cell(a,1); % The segment bases found\nSegs = cell(a,1); % The segments found\nSPar = zeros(a,2,'uint32'); % The parent segment of each segment\nSChi = cell(a,1); % The children segments of each segment\n\n% Initialize SChi\nSChi{1} = zeros(5000,1,'uint32');\nC = zeros(200,1);\nfor i = 2:a\n SChi{i} = C;\nend\nNChi = zeros(a,1); % Number of child segments found for each segment\n\nFal = false(nb,1); % Logical false-vector for cover sets\ns = 1; % The index of the segment under expansion\nb = s; % The index of the latest found base\n\nSBas{s} = Base;\nSeg = cell(1000,1); % The cover set layers in the current segment\nSeg{1} = Base;\n\nForbAll = Fal; % The forbidden sets\nForbAll(Forb) = true;\nForbAll(Base) = true;\nForb = ForbAll; % The forbidden sets for the segment under expansion\n\nContinue = true; % True as long as the component can be segmented further \nNewSeg = true; % True if the first Cut for the current segment\nnl = 1; % The number of cover set layers currently in the segment\n\n% Segmenting stops when there are no more segments to be found\nwhile Continue && (b < nb)\n \n % Update the forbidden sets\n Forb(Seg{nl}) = true;\n \n % Define the study\n Cut = define_cut(Nei,Seg{nl},Forb,Fal);\n CutSize = length(Cut);\n \n if NewSeg\n NewSeg = false;\n ns = min(CutSize,6);\n end\n \n % Define the components of cut and study regions\n if CutSize > 0\n CutComps = cut_components(Nei,Cut,CutSize,Fal,Fal);\n nc = size(CutComps,1);\n if nc > 1\n [StudyComps,Bases,CompSize,Cont,BaseSize] = ...\n study_components(Nei,ns,Cut,CutComps,Forb,Fal,Fal);\n nc = length(Cont);\n end\n else\n nc = 0;\n end\n \n % Classify study region components\n if nc == 1\n % One component, continue expansion of the current segment\n nl = nl+1;\n if size(Cut,2) > 1\n Seg{nl} = Cut';\n else\n Seg{nl} = Cut;\n end\n elseif nc > 1\n % Classify the components of the Study region\n Class = component_classification(CompSize,Cont,BaseSize,CutSize);\n \n for i = 1:nc\n if Class(i) == 1\n Base = Bases{i};\n ForbAll(Base) = true;\n Forb(StudyComps{i}) = true;\n J = Forb(Cut);\n Cut = Cut(~J);\n b = b+1;\n SBas{b} = Base;\n SPar(b,:) = [s nl];\n NChi(s) = NChi(s)+1;\n SChi{s}(NChi(s)) = b;\n end\n end\n \n % Define the new cut.\n % If the cut is empty, determine the new base\n if isempty(Cut)\n Segs{s} = Seg(1:nl);\n S = vertcat(Seg{1:nl});\n ForbAll(S) = true;\n\n if s < b\n s = s+1;\n Seg{1} = SBas{s};\n Forb = ForbAll;\n NewSeg = true;\n nl = 1;\n else\n Continue = false;\n end\n else\n if size(Cut,2) > 1\n Cut = Cut';\n end\n nl = nl+1;\n Seg{nl} = Cut;\n end\n \n else\n % If the study region has zero size, then the current segment is\n % complete and determine the base of the next segment\n Segs{s} = Seg(1:nl);\n S = vertcat(Seg{1:nl});\n ForbAll(S) = true;\n \n if s < b\n s = s+1;\n Seg{1} = SBas{s};\n Forb = ForbAll;\n NewSeg = true;\n nl = 1;\n else\n Continue = false;\n end\n end\nend\nSegs = Segs(1:b);\nSPar = SPar(1:b,:);\nschi = SChi(1:b);\n\n% Define output\nSChi = cell(b,1);\nfor i = 1:b\n if NChi(i) > 0\n SChi{i} = uint32(schi{i}(1:NChi(i)));\n else\n SChi{i} = zeros(0,1,'uint32');\n end\n S = Segs{i};\n for j = 1:size(S,1)\n S{j} = uint32(S{j});\n end\n Segs{i} = S;\nend\nclear Segment\nsegment.segments = Segs;\nsegment.ParentSegment = SPar;\nsegment.ChildSegment = SChi;\n\nend % End of the main function\n\n\n% Define subfunctions\n\nfunction Cut = define_cut(Nei,CutPre,Forb,Fal)\n\n% Defines the \"Cut\" region\nCut = vertcat(Nei{CutPre});\nCut = unique_elements(Cut,Fal);\nI = Forb(Cut);\nCut = Cut(~I);\nend % End of function \n\n\nfunction [Components,CompSize] = cut_components(Nei,Cut,CutSize,Fal,False)\n\n% Define the connected components of the Cut\nif CutSize == 1\n % Cut is connected and therefore Study is also\n CompSize = 1;\n Components = cell(1,1);\n Components{1} = Cut;\nelseif CutSize == 2\n I = Nei{Cut(1)} == Cut(2);\n if any(I)\n Components = cell(1,1);\n Components{1} = Cut;\n CompSize = 1;\n else\n Components = cell(2,1);\n Components{1} = Cut(1);\n Components{2} = Cut(2);\n CompSize = [1 1];\n end\nelseif CutSize == 3\n I = Nei{Cut(1)} == Cut(2);\n J = Nei{Cut(1)} == Cut(3);\n K = Nei{Cut(2)} == Cut(3);\n if any(I)+any(J)+any(K) >= 2\n CompSize = 1;\n Components = cell(1,1);\n Components{1} = Cut;\n elseif any(I)\n Components = cell(2,1);\n Components{1} = Cut(1:2);\n Components{2} = Cut(3);\n CompSize = [2 1];\n elseif any(J)\n Components = cell(2,1);\n Components{1} = Cut([1 3]');\n Components{2} = Cut(2);\n CompSize = [2 1];\n elseif any(K)\n Components = cell(2,1);\n Components{1} = Cut(2:3);\n Components{2} = Cut(1);\n CompSize = [2 1];\n else\n CompSize = [1 1 1];\n Components = cell(3,1);\n Components{1} = Cut(1);\n Components{2} = Cut(2);\n Components{3} = Cut(3);\n end\nelse\n Components = cell(CutSize,1);\n CompSize = zeros(CutSize,1);\n Comp = zeros(CutSize,1);\n Fal(Cut) = true;\n nc = 0; % number of components found\n m = Cut(1);\n i = 0;\n while i < CutSize\n Added = Nei{m};\n I = Fal(Added);\n Added = Added(I);\n a = length(Added);\n Comp(1) = m;\n Fal(m) = false;\n t = 1;\n while a > 0\n Comp(t+1:t+a) = Added;\n Fal(Added) = false;\n t = t+a;\n Ext = vertcat(Nei{Added});\n Ext = unique_elements(Ext,False);\n I = Fal(Ext);\n Added = Ext(I);\n a = length(Added);\n end\n i = i+t;\n nc = nc+1;\n Components{nc} = Comp(1:t);\n CompSize(nc) = t;\n if i < CutSize\n J = Fal(Cut);\n m = Cut(J);\n m = m(1);\n end\n end\n Components = Components(1:nc);\n CompSize = CompSize(1:nc);\nend\n\nend % End of function\n\n\nfunction [Components,Bases,CompSize,Cont,BaseSize] = ...\n study_components(Nei,ns,Cut,CutComps,Forb,Fal,False)\n\n% Define Study as a cell-array\nStudy = cell(ns,1);\nStudySize = zeros(ns,1);\nStudy{1} = Cut;\nStudySize(1) = length(Cut);\nif ns >= 2\n N = Cut;\n i = 1;\n while i < ns\n Forb(N) = true;\n N = vertcat(Nei{N});\n N = unique_elements(N,Fal);\n I = Forb(N);\n N = N(~I);\n if ~isempty(N)\n i = i+1;\n Study{i} = N;\n StudySize(i) = length(N);\n else\n Study = Study(1:i);\n StudySize = StudySize(1:i);\n i = ns+1;\n end\n end\nend\n\n% Define study as a vector\nns = length(StudySize);\nstudysize = sum(StudySize);\nstudy = vertcat(Study{:});\n\n% Determine the components of study\nnc = size(CutComps,1);\ni = 1; % index of cut component\nj = 0; % number of elements attributed to components\nk = 0; % number of study components\nFal(study) = true;\nComponents = cell(nc,1);\nCompSize = zeros(nc,1);\nComp = zeros(studysize,1);\nwhile i <= nc\n C = CutComps{i};\n while j < studysize\n a = length(C);\n Comp(1:a) = C;\n Fal(C) = false;\n if a > 1\n Add = unique_elements(vertcat(Nei{C}),False);\n else\n Add = Nei{C};\n end\n t = a;\n I = Fal(Add);\n Add = Add(I);\n a = length(Add);\n while a > 0\n Comp(t+1:t+a) = Add;\n Fal(Add) = false;\n t = t+a;\n Add = vertcat(Nei{Add});\n Add = unique_elements(Add,False);\n I = Fal(Add);\n Add = Add(I);\n a = length(Add);\n end\n j = j+t;\n k = k+1;\n Components{k} = Comp(1:t);\n CompSize(k) = t;\n if j < studysize\n C = zeros(0,1);\n while i < nc && isempty(C)\n i = i+1;\n C = CutComps{i};\n J = Fal(C);\n C = C(J);\n end\n if i == nc && isempty(C)\n j = studysize;\n i = nc+1;\n end\n else\n i = nc+1;\n end\n end\n Components = Components(1:k);\n CompSize = CompSize(1:k);\nend\n\n% Determine BaseSize and Cont\nCont = true(k,1);\nBaseSize = zeros(k,1);\nBases = cell(k,1);\nif k > 1\n Forb(study) = true;\n Fal(study) = false;\n Fal(Study{1}) = true;\n for i = 1:k\n % Determine the size of the base of the components\n Set = unique_elements([Components{i}; Study{1}],False);\n False(Components{i}) = true;\n I = False(Set)&Fal(Set);\n False(Components{i}) = false;\n Set = Set(I);\n Bases{i} = Set;\n BaseSize(i) = length(Set);\n end\n Fal(Study{1}) = false;\n Fal(Study{ns}) = true;\n Forb(study) = true;\n for i = 1:k\n % Determine if the component can be extended\n Set = unique_elements([Components{i}; Study{ns}],False);\n False(Components{i}) = true;\n I = False(Set)&Fal(Set);\n False(Components{i}) = false;\n Set = Set(I);\n if ~isempty(Set)\n N = vertcat(Nei{Set});\n N = unique_elements(N,False);\n I = Forb(N);\n N = N(~I);\n if isempty(N)\n Cont(i) = false;\n end\n else\n Cont(i) = false;\n end\n end\nend\n\nend % End of function\n\n\nfunction Class = component_classification(CompSize,Cont,BaseSize,CutSize)\n\n% Classifies study region components:\n% Class(i) == 0 continuation\n% Class(i) == 1 branch\n\nnc = size(CompSize,1);\nStudySize = sum(CompSize);\nClass = ones(nc,1); % true if a component is a branch to be further segmented\nContiComp = 0;\n% Simple initial classification\nfor i = 1:nc\n if BaseSize(i) == CompSize(i) && ~Cont(i)\n % component has no expansion, not a branch\n Class(i) = 0;\n elseif BaseSize(i) == 1 && CompSize(i) <= 2 && ~Cont(i)\n % component has very small expansion, not a branch\n Class(i) = 0;\n elseif BaseSize(i)/CutSize < 0.05 && 2*BaseSize(i) >= CompSize(i) && ~Cont(i)\n % component has very small expansion or is very small, not a branch\n Class(i) = 0;\n elseif CompSize(i) <= 3 && ~Cont(i)\n % very small component, not a branch\n Class(i) = 0;\n elseif BaseSize(i)/CutSize >= 0.7 || CompSize(i) >= 0.7*StudySize\n % continuation of the segment\n Class(i) = 0;\n ContiComp = i;\n else\n % Component is probably a branch\n end\nend\n\nBranches = Class == 1;\nif ContiComp == 0 && any(Branches)\n Ind = (1:1:nc)';\n Branches = Ind(Branches);\n [~,I] = max(CompSize(Branches));\n Class(Branches(I)) = 0;\nend\n\nend % End of function\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "filtering.m", "ext": ".m", "path": "TreeQSM-master/src/main_steps/filtering.m", "size": 8700, "source_encoding": "utf_8", "md5": "67b53b588752ce6985691d1aff849e99", "text": "% This file is part of TREEQSM.\r\n% \r\n% TREEQSM is free software: you can redistribute it and/or modify\r\n% it under the terms of the GNU General Public License as published by\r\n% the Free Software Foundation, either version 3 of the License, or\r\n% (at your option) any later version.\r\n% \r\n% TREEQSM is distributed in the hope that it will be useful,\r\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n% GNU General Public License for more details.\r\n% \r\n% You should have received a copy of the GNU General Public License\r\n% along with TREEQSM. If not, see .\r\n\r\nfunction Pass = filtering(P,inputs)\r\n\r\n% ---------------------------------------------------------------------\r\n% FILTERING.M Filters noise from point clouds.\r\n%\r\n% Version 3.0.0\r\n% Latest update 3 May 2022\r\n%\r\n% Copyright (C) 2013-2022 Pasi Raumonen\r\n% ---------------------------------------------------------------------\r\n\r\n% Filters the point cloud as follows:\r\n% \r\n% 1) the possible NaNs are removed.\r\n% \r\n% 2) (optional, done if filter.k > 0) Statistical kth-nearest neighbor \r\n% distance outlier filtering based on user defined \"k\" (filter.k) and\r\n% multiplier for standard deviation (filter.nsigma): Determines the \r\n% kth-nearest neighbor distance for all points and then removes the points \r\n% whose distances are over average_distance + nsigma*std. Computes the \r\n% statistics for each meter layer in vertical direction so that the\r\n% average distances and SDs can change as the point density decreases.\r\n% \r\n% 3) (optional, done if filter.radius > 0) Statistical point density \r\n% filtering based on user defined ball radius (filter.radius) and multiplier \r\n% for standard deviation (filter.nsigma): Balls of radius \"filter.radius\"\r\n% centered at each point are defined for all points and the number of\r\n% points included (\"point density\") are computed and then removes the points \r\n% whose density is smaller than average_density - nsigma*std. Computes the \r\n% statistics for each meter layer in vertical direction so that the\r\n% average densities and SDs can change as the point density decreases.\r\n% \r\n% 4) (optional, done if filter.ncomp > 0) Small component filtering based\r\n% on user defined cover (filter.PatchDiam1, filter.BallRad1) and threshold\r\n% (filter.ncomp): Covers the point cloud and determines the connected\r\n% components of the cover and removes the points from the small components\r\n% that have less than filter.ncomp cover sets.\r\n%\r\n% 5) (optional, done if filter.EdgeLength > 0) cubical downsampling of the \r\n% point cloud based on user defined cube size (filter.EdgeLength): \r\n% selects randomly one point from each cube\r\n%\r\n% Does the filtering in the above order and thus always applies the next \r\n% fitering to the point cloud already filtered by the previous methods. \r\n% Statistical kth-nearest neighbor distance outlier filtering and the \r\n% statistical point density filtering are meant to be exlusive to each\r\n% other.\r\n%\r\n% Inputs:\r\n% P Point cloud\r\n% inputs Inputs structure with the following subfields:\r\n% filter.EdgeLength Edge length of the cubes in the cubical downsampling\r\n% filter.k k of knn method\r\n% filter.radius Radius of the balls in the density filtering\r\n% filter.nsigma Multiplier for standard deviation, determines how\r\n% far from the mean the threshold is in terms of SD.\r\n% Used in both the knn and the density filtering\r\n% filter.ncomp Threshold number of components in the small\r\n% component filtering\r\n% filter.PatchDiam1 Defines the patch/cover set size for the component \r\n% filtering\r\n% filter.BallRad1 Defines the neighbors for the component filtering\r\n% filter.plot If true, plots the filtered point cloud\r\n% Outputs:\r\n% Pass Logical vector indicating points passing the filtering\r\n% ---------------------------------------------------------------------\r\n\r\n% Changes from version 2.0.0 to 3.0.0, 3 May 2022:\r\n% Major changes and additions.\r\n% 1) Added two new filtering options: statistical kth-nearest neighbor \r\n% distance outlier filtering and cubical downsampling.\r\n% 2) Changed the old point density filtering, which was based on given\r\n% threshold, into statistical point density filtering, where the\r\n% threshold is based on user defined statistical measure\r\n% 3) All the input parameters are given by \"inputs\"-structure that can be\r\n% defined by \"create_input\" script \r\n% 4) Streamlined the coding and what is displayed\r\n\r\n%% Initial data processing\r\n% Only double precision data\r\nif ~isa(P,'double')\r\n P = double(P);\r\nend\r\n% Only x,y,z-data\r\nif size(P,2) > 3\r\n P = P(:,1:3);\r\nend\r\nnp = size(P,1);\r\nnp0 = np;\r\nind = (1:1:np)';\r\nPass = false(np,1);\r\n\r\ndisp('----------------------')\r\ndisp(' Filtering...')\r\ndisp([' Points before filtering: ',num2str(np)])\r\n\r\n%% Remove possible NaNs\r\nF = ~any(isnan(P),2);\r\nif nnz(F) < np\r\n disp([' Points with NaN removed: ',num2str(np-nnz(Pass))])\r\n ind = ind(F);\r\nend \r\n\r\n%% Statistical kth-nearest neighbor distance outlier filtering\r\nif inputs.filter.k > 0\r\n % Compute the knn distances\r\n Q = P(ind,:);\r\n np = size(Q,1);\r\n [~, kNNdist] = knnsearch(Q,Q,'dist','euclidean','k',inputs.filter.k);\r\n kNNdist = kNNdist(:,end);\r\n\r\n % Change the threshold kNNdistance according the average and standard \r\n % deviation for every vertical layer of 1 meter in height\r\n hmin = min(Q(:,3));\r\n hmax = max(Q(:,3));\r\n H = ceil(hmax-hmin);\r\n F = false(np,1);\r\n ind = (1:1:np)';\r\n for i = 1:H\r\n I = Q(:,3) < hmin+i & Q(:,3) >= hmin+i-1;\r\n points = ind(I);\r\n d = kNNdist(points);\r\n J = d < mean(d)+inputs.filter.nsigma*std(d);\r\n points = points(J);\r\n F(points) = 1;\r\n end\r\n ind = ind(F);\r\n disp([' Points removed as statistical outliers: ',num2str(np-length(ind))])\r\nend\r\n\r\n%% Statistical point density filtering\r\nif inputs.filter.radius > 0\r\n Q = P(ind,:);\r\n np = size(Q,1);\r\n\r\n % Partition the point cloud into cubes\r\n [partition,CC] = cubical_partition(Q,inputs.filter.radius);\r\n\r\n % Determine the number of points inside a ball for each point\r\n NumOfPoints = zeros(np,1);\r\n r1 = inputs.filter.radius^2;\r\n for i = 1:np\r\n if NumOfPoints(i) == 0\r\n points = partition(CC(i,1)-1:CC(i,1)+1,CC(i,2)-1:CC(i,2)+1,CC(i,3)-1:CC(i,3)+1);\r\n points = vertcat(points{:,:});\r\n cube = Q(points,:);\r\n p = partition{CC(i,1),CC(i,2),CC(i,3)};\r\n for j = 1:length(p)\r\n dist = (Q(p(j),1)-cube(:,1)).^2+(Q(p(j),2)-cube(:,2)).^2+(Q(p(j),3)-cube(:,3)).^2;\r\n J = dist < r1;\r\n NumOfPoints(p(j)) = nnz(J);\r\n end\r\n end\r\n end\r\n\r\n % Change the threshold point density according the average and standard \r\n % deviation for every vertical layer of 1 meter in height\r\n hmin = min(Q(:,3));\r\n hmax = max(Q(:,3));\r\n H = ceil(hmax-hmin);\r\n F = false(np,1);\r\n ind = (1:1:np)';\r\n for i = 1:H\r\n I = Q(:,3) < hmin+i & Q(:,3) >= hmin+i-1;\r\n points = ind(I);\r\n N = NumOfPoints(points);\r\n J = N > mean(N)-inputs.filter.nsigma*std(N);\r\n points = points(J);\r\n F(points) = 1;\r\n end\r\n ind = ind(F);\r\n disp([' Points removed as statistical outliers: ',num2str(np-length(ind))])\r\nend\r\n\r\n%% Small component filtering\r\nif inputs.filter.ncomp > 0\r\n % Cover the point cloud with patches\r\n input.BallRad1 = inputs.filter.BallRad1;\r\n input.PatchDiam1 = inputs.filter.PatchDiam1;\r\n input.nmin1 = 0;\r\n Q = P(ind,:);\r\n np = size(Q,1);\r\n cover = cover_sets(Q,input);\r\n\r\n % Determine the separate components\r\n Components = connected_components(cover.neighbor,0,inputs.filter.ncomp);\r\n\r\n % The filtering\r\n B = vertcat(Components{:}); % patches in the components\r\n points = vertcat(cover.ball{B}); % points in the components\r\n F = false(np,1);\r\n F(points) = true;\r\n ind = ind(F);\r\n disp([' Points with small components removed: ',num2str(np-length(ind))])\r\nend\r\n\r\n%% Cubical downsampling\r\nif inputs.filter.EdgeLength > 0\r\n Q = P(ind,:);\r\n np = size(Q,1);\r\n F = cubical_downsampling(Q,inputs.filter.EdgeLength);\r\n ind = ind(F);\r\n disp([' Points removed with downsampling: ',num2str(np-length(ind))])\r\nend\r\n\r\n%% Define the output and display summary results\r\nPass(ind) = true;\r\nnp = nnz(Pass);\r\ndisp([' Points removed in total: ',num2str(np0-np)])\r\ndisp([' Points removed in total (%): ',num2str(round((1-np/np0)*1000)/10)])\r\ndisp([' Points left: ',num2str(np)])\r\n\r\n%% Plot the filtered and unfiltered point clouds\r\nif inputs.filter.plot\r\n plot_comparison(P(Pass,:),P(~Pass,:),1,1,1)\r\n plot_point_cloud(P(Pass,:),2,1)\r\nend\r\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "relative_size.m", "ext": ".m", "path": "TreeQSM-master/src/main_steps/relative_size.m", "size": 3969, "source_encoding": "utf_8", "md5": "ca5b31c61626f8eab338648a462c355b", "text": "% This file is part of TREEQSM.\n% \n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction RS = relative_size(P,cover,segment)\n\n% ---------------------------------------------------------------------\n% RELATIVE_SIZE.M Determines relative cover set size for points in new covers\n%\n% Version 2.00\n% Latest update 16 Aug 2017\n%\n% Copyright (C) 2014-2017 Pasi Raumonen\n% ---------------------------------------------------------------------\n% \n% Uses existing segmentation and its branching structure to determine\n% relative size of the cover sets distributed over new covers. The idea is \n% to decrease the relative size as the branch size decreases. This is \n% realised so that the relative size at the base of a branch is\n% proportional to the size of the stem's base, measured as number of\n% cover sets in the first few layers. Also when we approach the\n% tip of the branch, the relative size decreases to the minimum. \n% Maximum relative size is 256 at the bottom of the\n% stem and the minimum is 1 at the tip of every branch.\n%\n% Output:\n% RS Relative size (1-256), uint8-vector, (n_points x 1)\n\nBal = cover.ball;\nCen = cover.center;\nNei = cover.neighbor;\nSegs = segment.segments;\nSChi = segment.ChildSegment;\nnp = size(P,1); % number of points\nns = size(Segs,1); % number of segments\n\n%% Use branching order and height as apriori info\n% Determine the branch orders of the segments\nOrd = zeros(ns,1);\nC = SChi{1};\norder = 0;\nwhile ~isempty(C)\n order = order+order;\n Ord(C) = order;\n C = vertcat(SChi{C});\nend\nmaxO = order+1; % maximum branching order (plus one)\n\n% Determine tree height\nTop = max(P(Cen,3));\nBot = min(P(Cen,3));\nH = Top-Bot;\n\n%% Determine \"base size\" compared to the stem base\n% BaseSize is the relative size of the branch base compared to the stem\n% base, measured as number of cover sets in the first layers of the cover\n% sets. If it is larger than apriori upper limit based on branching order\n% and branch height, then correct to the apriori limit \nBaseSize = zeros(ns,1);\n% Determine first the base size at the stem\nS = Segs{1};\nn = size(S,1);\nif n >= 2\n m = min([6 n]);\n BaseSize(1) = mean(cellfun(@length,S(2:m)));\nelse\n BaseSize(1) = length(S{1});\nend\n% Then define base size for other segments\nfor i = 2:ns\n S = Segs{i};\n n = size(S,1);\n if n >= 2\n m = min([6 n]);\n BaseSize(i) = ceil(mean(cellfun(@length,S(2:m)))/BaseSize(1)*256);\n else\n BaseSize(i) = length(S{1})/BaseSize(1)*256;\n end\n bot = min(P(Cen(S{1}),3)); \n h = bot-Bot; % height of the segment's base\n BS = ceil(256*(maxO-Ord(i))/maxO*(H-h)/H); % maximum apriori base size\n if BaseSize(i) > BS\n BaseSize(i) = BS;\n end\nend\nBaseSize(1) = 256;\n\n%% Determine relative size for points\nTS = 1;\nRS = zeros(np,1,'uint8');\nfor i = 1:ns\n S = Segs{i};\n s = size(S,1);\n for j = 1:s\n Q = S{j};\n RS(vertcat(Bal{Q})) = BaseSize(i)-(BaseSize(i)-TS)*sqrt((j-1)/s);\n end\nend\n\n%% Adjust the relative size at the base of child segments\nRS0 = RS;\nfor i = 1:ns\n C = SChi{i};\n n = length(C);\n if n > 0\n for j = 1:n\n S = Segs{C(j)};\n B = S{1};\n N = vertcat(Nei{B});\n if size(S,1) > 1\n N = setdiff(N,S{2});\n end\n N = union(N,B);\n N = vertcat(Bal{N});\n RS(N) = RS0(N)/2;\n end\n end\nend\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "func_grad_cylinder.m", "ext": ".m", "path": "TreeQSM-master/src/least_squares_fitting/func_grad_cylinder.m", "size": 3370, "source_encoding": "utf_8", "md5": "20d0b6e220003ae8a669e991c4c73090", "text": "% This file is part of TREEQSM.\n% \n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction [dist,J] = func_grad_cylinder(par,P,weight)\n\n% ---------------------------------------------------------------------\n% FUNC_GRAD_CYLINDER.M Function and gradient calculation for \n% least-squares cylinder fit.\n%\n% Version 2.2.0\n% Latest update 5 Oct 2021\n%\n% Copyright (C) 2013-2021 Pasi Raumonen\n% ---------------------------------------------------------------------\n%\n% Input \n% par Cylinder parameters [x0 y0 alpha beta r]'\n% P Point cloud\n% weight (Optional) Weights for the points\n% \n% Output\n% dist Signed distances of points to the cylinder surface:\n% dist(i) = sqrt(xh(i)^2 + yh(i)^2) - r, where \n% [xh yh zh]' = Ry(beta) * Rx(alpha) * ([x y z]' - [x0 y0 0]')\n% J Jacobian matrix d dist(i)/d par(j).\n\n% Five cylinder parameters: \n% Location = axis point intersects xy-plane: x0 and y0 (z0 == 0)\n% Rotation angles around x and y axis = alpha and beta\n% Radius = r\n%\n% Transformed points:\n% Pt = [xh yx zh] = Ry(beta) * Rx(alpha) * (P - [x0 y0 0])\n%\n% \"Plane points\":\n% Qt = Pt * I2 = [xh yh];\n%\n% Distance:\n% D(x0,y0,alpha,beta,r) = sqrt( dot(Qt,Qt) )-r = sqrt( Qt*Qt' )-r\n%\n% Least squares = minimize Sum( D^2 ) over x0, y0, alpha, beta and r\n%\n% rt = sqrt( dot(Qt,Qt) )\n% N = Qt/rt\n%\n% Jacobian for D with respect to x0, y0, alpha, beta:\n% dD/di = dot( N,dQt/di ) = dot( Qt/rt,dQt/di )\n%\n% R = Ry*Rx\n% dQt/dx0 = R*[-1 0 0]'\n% dQt/dy0 = R*[0 -1 0]'\n% dQt/dalpha = (P-[x0 y0 0])*DRx';\n% dQt/dalpha = (P-[x0 y0 0])*DRy';\n\n% Changes from version 2.1.0 to 2.2.0, 5 Oct 20201:\n% 1) Minor changes in syntax\n\n% Changes from version 2.0.0 to 2.1.0, 14 July 2020:\n% 1) Added optional input for weights of the points\n\nx0 = par(1);\ny0 = par(2);\nalpha = par(3);\nbeta = par(4);\nr = par(5);\n\n% Determine the rotation matrices and their derivatives\n[R,DR1,DR2] = form_rotation_matrices([alpha beta]);\n\n% Calculate the distances\nPt = (P-[x0 y0 0])*R';\nxt = Pt(:,1);\nyt = Pt(:,2);\nrt = sqrt(xt.*xt + yt.*yt);\ndist = rt-r; % Distances to the cylinder surface\nif nargin == 3\n dist = weight.*dist; % Weighted distances\nend\n\n% form the Jacobian matrix\nif nargout > 1 \n N = [xt./rt yt./rt];\n m = size(P,1);\n J = zeros(m,5);\n \n A1 = (R*[-1 0 0]')';\n A = eye(2);\n A(1,1) = A1(1); A(2,2) = A1(2);\n J(:,1) = sum(N(:,1:2)*A,2);\n \n A2 = (R*[0 -1 0]')';\n A(1,1) = A2(1); A(2,2) = A2(2);\n J(:,2) = sum(N(:,1:2)*A,2);\n \n A3 = (P-[x0 y0 0])*DR1';\n J(:,3) = sum(N(:,1:2).*A3(:,1:2),2);\n \n A4 = (P-[x0 y0 0])*DR2';\n J(:,4) = sum(N(:,1:2).*A4(:,1:2),2);\n \n J(:,5) = -1*ones(m,1);\n if nargin == 3\n % Weighted Jacobian\n J = [weight.*J(:,1) weight.*J(:,2) weight.*J(:,3) ...\n weight.*J(:,4) weight.*J(:,5)];\n end\nend\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "func_grad_axis.m", "ext": ".m", "path": "TreeQSM-master/src/least_squares_fitting/func_grad_axis.m", "size": 3013, "source_encoding": "utf_8", "md5": "7a75b492055072602912f3a4b149510d", "text": "% This file is part of TREEQSM.\n% \n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction [dist,J] = func_grad_axis(P,par,weight)\n\n% ---------------------------------------------------------------------\n% FUNC_GRAD_CYLINDER.M Function and gradient calculation for \n% least-squares cylinder fit.\n%\n% Version 2.1.0\n% Latest update 14 July 2020\n%\n% Copyright (C) 2013-2020 Pasi Raumonen\n% ---------------------------------------------------------------------\n%\n% Input \n% par Cylinder parameters [x0 y0 alpha beta r]'\n% P Point cloud\n% weight (Optional) Weights for the points\n% \n% Output\n% dist Signed distances of points to the cylinder surface:\n% dist(i) = sqrt(xh(i)^2 + yh(i)^2) - r, where \n% [xh yh zh]' = Ry(beta) * Rx(alpha) * ([x y z]' - [x0 y0 0]')\n% J Jacobian matrix d dist(i)/d par(j).\n\n% Changes from version 2.0.0 to 2.1.0, 14 July 2020:\n% 1) Added optional input for weights of the points\n\n\n% Five cylinder parameters: \n% Location = axis point intersects xy-plane: x0 and y0 (z0 == 0)\n% Rotation angles around x and y axis = alpha and beta\n% Radius = r\n%\n% Transformed points:\n% Pt = [xh yx zh] = Ry(beta) * Rx(alpha) * (P - [x0 y0 0])\n%\n% \"Plane points\":\n% Qt = Pt * I2 = [xh yh];\n%\n% Distance:\n% D(x0,y0,alpha,beta,r) = sqrt( dot(Qt,Qt) )-r = sqrt( Qt*Qt' )-r\n%\n% Least squares = minimize Sum( D^2 ) over x0, y0, alpha, beta and r\n%\n% rt = sqrt( dot(Qt,Qt) )\n% N = Qt/rt\n%\n% Jacobian for D with respect to x0, y0, alpha, beta:\n% dD/di = dot( N,dQt/di ) = dot( Qt/rt,dQt/di )\n%\n% R = Ry*Rx\n% dQt/dx0 = R*[-1 0 0]'\n% dQt/dy0 = R*[0 -1 0]'\n% dQt/dalpha = (P-[x0 y0 0])*DRx';\n% dQt/dalpha = (P-[x0 y0 0])*DRy';\n \nx0 = par(1);\ny0 = par(2);\nalpha = par(3);\nbeta = par(4);\nr = par(5);\n\n% Determine the rotation matrices and their derivatives\n[R,DR1,DR2] = form_rotation_matrices([alpha beta]);\n\n% Calculate the distances\nPt = (P-[x0 y0 0])*R';\nxt = Pt(:,1);\nyt = Pt(:,2);\nrt = sqrt(xt.*xt + yt.*yt);\ndist = rt-r; % Distances to the cylinder surface\nif nargin == 3\n dist = weight.*dist; % Weighted distances\nend\n\n% form the Jacobian matrix\nif nargout > 1\n \n N = [xt./rt yt./rt];\n m = size(P,1);\n J = zeros(m,2);\n \n A3 = (P-[x0 y0 0])*DR1';\n J(:,1) = sum(N(:,1:2).*A3(:,1:2),2);\n \n A4 = (P-[x0 y0 0])*DR2';\n J(:,2) = sum(N(:,1:2).*A4(:,1:2),2);\n \n if nargin == 3\n % Weighted Jacobian\n J = [weight.*J(:,1) weight.*J(:,2)];\n end\nend\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "nlssolver.m", "ext": ".m", "path": "TreeQSM-master/src/least_squares_fitting/nlssolver.m", "size": 2534, "source_encoding": "utf_8", "md5": "d76f851140186c08867124872cbcc554", "text": "% This file is part of TREEQSM.\n% \n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction [par,d,conv,rel] = nlssolver(par,P,weight)\n\n% ---------------------------------------------------------------------\n% NLSSOLVER.M Nonlinear least squares solver for cylinders.\n%\n% Version 2.1.0\n% Latest update 14 July 2020\n%\n% Copyright (C) 2013-2020 Pasi Raumonen\n% ---------------------------------------------------------------------\n%\n% Input \n% par Intial estimates of the parameters\n% P Point cloud\n% \n% Output \n% par Optimised parameter values\n% d Distances of points to cylinder\n% conv True if fitting converged\n% rel True if condition number was not very bad, fit was reliable\n\n% Changes from version 2.0.0 to 2.1.0, 14 July 2020:\n% 1) Added optional input for weights of the points\n\nmaxiter = 50;\niter = 0;\nconv = false;\nrel = true;\n\nif nargin == 2\n NoWeights = true;\nelse\n NoWeights = false;\nend\n\n%% Gauss-Newton iterations\nwhile iter < maxiter && ~conv && rel\n \n %% Calculate the distances and Jacobian\n if NoWeights\n [d0, J] = func_grad_cylinder(par,P);\n else\n [d0, J] = func_grad_cylinder(par,P,weight);\n end\n \n %% Calculate update step\n SS0 = norm(d0); % Squared sum of the distances\n % solve for the system of equations: \n % par(i+1) = par(i) - (J'J)^(-1)*J'd0(par(i))\n A = J'*J;\n b = J'*d0;\n warning off\n p = -A\\b; % solve for the system of equations\n warning on\n par = par+p; % update the parameters\n \n %% Check reliability\n if rcond(-R) < 10000*eps\n rel = false;\n end\n \n %% Check convergence:\n % The distances with the new parameter values:\n if NoWeights\n d = func_grad_cylinder(par,P); \n else\n d = func_grad_cylinder(par,P,weight); \n end\n SS1 = norm(d); % Squared sum of the distances\n if abs(SS0-SS1) < 1e-4\n conv = true;\n end\n \n iter = iter + 1;\nend\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "least_squares_axis.m", "ext": ".m", "path": "TreeQSM-master/src/least_squares_fitting/least_squares_axis.m", "size": 3918, "source_encoding": "utf_8", "md5": "f47fffa17b2cc0925b40301fcc7076dd", "text": "\nfunction cyl = least_squares_axis(P,Axis,Point0,Rad0,weight)\n\n% ---------------------------------------------------------------------\n% LEAST_SQUARES_AXIS.M Least-squares cylinder axis fitting using \n% Gauss-Newton when radius and point are given\n%\n% Version 1.0\n% Latest update 1 Oct 2021\n%\n% Copyright (C) 2017-2021 Pasi Raumonen\n% ---------------------------------------------------------------------\n% Input \n% P 3d point cloud\n% Axis0 Initial axis estimate (1 x 3)\n% Point0 Initial estimate of axis point (1 x 3)\n% Rad0 Initial estimate of the cylinder radius\n% weight (Optional) Weights for each point\n% \n% Output\n% cyl Structure array with the following fields\n% axis Cylinder axis (optimized here)\n% radius Radius of the cylinder (from the input)\n% start Axis point (from the input)\n% mad Mean absolute distance of the points to the cylinder surface\n% SurfCov Surface coverage, how much of the cylinder surface is covered \n% with points\n% conv If conv = 1, the algorithm has converged \n% rel If rel = 1, the algorithm has reliable answer in terms of\n% matrix inversion with a good enough condition number\n% ---------------------------------------------------------------------\n\n\n%% Initial estimates and other settings\nres = 0.03; % \"Resolution level\" for computing surface coverage\npar = [0 0]';\nmaxiter = 50; % maximum number of Gauss-Newton iteration\niter = 0; % number of iterations so far\nconv = false; % converge of Gauss-Newton algorithm\nrel = true; % are the results reliable, system matrix not badly conditioned\nif nargin == 4\n weight = ones(size(P,1),1);\nend\nRot0 = rotate_to_z_axis(Axis);\nPt = (P-Point0)*Rot0';\n\nPar = [0 0 0 0 Rad0]';\n\n%% Gauss-Newton iterations\nwhile iter < maxiter && ~conv && rel\n \n % Calculate the distances and Jacobian\n [dist,J] = func_grad_axis(Pt,Par);\n \n % Calculate update step and gradient.\n SS0 = norm(dist); % Squared sum of the distances\n % solve for the system of equations: \n % par(i+1) = par(i) - (J'J)^(-1)*J'd(par(i))\n A = J'*J;\n b = J'*dist;\n warning off\n p = -A\\b; % solve for the system of equations\n warning on\n \n % Update\n par = par+p;\n \n % Check if the updated parameters lower the squared sum value\n Par = [0; 0; par; Rad0];\n dist = func_grad_axis(Pt,Par);\n SS1 = norm(dist);\n if SS1 > SS0\n % Update did not decreased the squared sum, use update with much\n % shorter update step\n par = par-0.95*p;\n Par = [0; 0; par; Rad0];\n dist = func_grad_axis(Pt,Par);\n SS1 = norm(dist);\n end\n \n % Check reliability\n rel = true;\n if rcond(A) < 10000*eps\n rel = false;\n end\n \n % Check convergence\n if abs(SS0-SS1) < 1e-5\n conv = true;\n end\n \n iter = iter+1;\nend\n\n%% Output\n% Inverse transformation to find axis and point on axis \n% corresponding to original data\nRot = form_rotation_matrices(par);\nAxis = Rot0'*Rot'*[0 0 1]'; % axis direction\n\n% Compute the point distances to the axis\n[dist,~,h] = distances_to_line(P,Axis,Point0); \ndist = dist-Rad0; % distances without weights\nLen = max(h)-min(h);\n\n% Compute mad (for points with maximum weights)\nif nargin <= 4\n mad = mean(abs(dist)); % mean absolute distance to the circle\nelse\n I = weight == max(weight);\n mad = mean(abs(dist(I))); % mean absolute distance to the circle\nend\n\n% Compute SurfCov, minimum 3*8 grid\nif ~any(isnan(par)) && rel && conv\n nl = ceil(Len/res);\n nl = max(nl,3);\n ns = ceil(2*pi*Rad0/res);\n ns = max(ns,8);\n ns = min(36,ns);\n SurfCov = single(surface_coverage(P,Axis,Point0,nl,ns,0.8*Rad0));\nelse\n SurfCov = single(0);\nend\n\n\n%% Define the output \nclear cir\ncyl.radius = Rad0;\ncyl.start = Point0;\ncyl.axis = Axis';\ncyl.mad = mad;\ncyl.SurfCov = SurfCov;\ncyl.conv = conv;\ncyl.rel = rel;\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "rotate_to_z_axis.m", "ext": ".m", "path": "TreeQSM-master/src/least_squares_fitting/rotate_to_z_axis.m", "size": 1206, "source_encoding": "utf_8", "md5": "249265c0c3047e01a4cc30792d37be20", "text": "% This file is part of TREEQSM.\r\n% \r\n% TREEQSM is free software: you can redistribute it and/or modify\r\n% it under the terms of the GNU General Public License as published by\r\n% the Free Software Foundation, either version 3 of the License, or\r\n% (at your option) any later version.\r\n% \r\n% TREEQSM is distributed in the hope that it will be useful,\r\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n% GNU General Public License for more details.\r\n% \r\n% You should have received a copy of the GNU General Public License\r\n% along with TREEQSM. If not, see .\r\n\r\nfunction [R,D,a] = rotate_to_z_axis(Vec)\r\n\r\n% --------------------------------------------------------------------------\r\n% ROTATE_TO_Z_AXIS.M Forms the rotation matrix to rotate the vector to \r\n% a point along the positive z-axis. \r\n%\r\n% Input \r\n% Vec Vector (3 x 1)\r\n%\r\n% Output \r\n% R Rotation matrix, with R * Vec = [0 0 z]', z > 0 \r\n\r\n\r\nD = cross(Vec,[0 0 1]);\r\nif norm(D) > 0\r\n a = acos(Vec(3));\r\n R = rotation_matrix(D,a);\r\nelse\r\n R = eye(3);\r\n a = 0;\r\n D = [1 0 0];\r\nend\r\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "least_squares_cylinder.m", "ext": ".m", "path": "TreeQSM-master/src/least_squares_fitting/least_squares_cylinder.m", "size": 6889, "source_encoding": "utf_8", "md5": "60305126c9e7fe2681c9f15ee98d5e21", "text": "% This file is part of TREEQSM.\n% \n% TREEQSM is free software: you can redistribute it and/or modify\n% it under the terms of the GNU General Public License as published by\n% the Free Software Foundation, either version 3 of the License, or\n% (at your option) any later version.\n% \n% TREEQSM is distributed in the hope that it will be useful,\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n% GNU General Public License for more details.\n% \n% You should have received a copy of the GNU General Public License\n% along with TREEQSM. If not, see .\n\nfunction cyl = least_squares_cylinder(P,cyl0,weight,Q)\n% ---------------------------------------------------------------------\n% LEAST_SQUARES_CYLINDER.M Least-squares cylinder using Gauss-Newton.\n%\n% Version 2.0.0\n% Latest update 5 Oct 2021\n%\n% Copyright (C) 2013-2021 Pasi Raumonen\n% ---------------------------------------------------------------------\n% Input \n% P Point cloud\n% cyl0 Initial estimates of the cylinder parameters\n% weight (Optional) Weights of the points for fitting\n% Q (Optional) Subset of \"P\" where the cylinder is intended\n% \n% Output \n% cyl Structure array containing the following fields:\n% radius Radius of the cylinder\n% length Length of the cylinder\n% start Point on the axis at the bottom of the cylinder (1 x 3)\n% axis Axis direction of the cylinder (1 x 3) \n% mad Mean absolute distance between points and cylinder surface\n% SurfCov Relative cover of the cylinder's surface by the points \n% dist Radial distances from the points to the cylinder (m x 1) \n% conv If conv = 1, the algorithm has converged \n% rel If rel = 1, the algorithm has reliable answer in terms of\n% matrix inversion with a good enough condition number\n% ---------------------------------------------------------------------\n\n% Changes from version 1.3.0 to 2.0.0, 5 Oct 2021: \n% 1) Included the Gauss-Newton iterations into this function (removed the \n% call to nlssolver function)\n% 2) Changed how the updata step is solved from the Jacobian\n% 3) Simplified some expressions and added comments\n% 4) mad is computed only from the points along the cylinder length in the\n% case of the optional input \"Q\" is given. \n% 5) Changed the surface coverage estimation by filtering out points whose \n% distance to the axis is less than 80% of the radius \n\n% Changes from version 1.2.0 to 1.3.0, 14 July 2020: \n% 1) Changed the input parameters of the cylinder to the struct format.\n% 2) Added optional input for weights\n% 3) Added optional input \"Q\", a subset of \"P\", the cylinder is intended\n% to be fitted in this subset but it is fitted to \"P\" to get better\n% estimate of the axis direction and radius\n\n% Changes from version 1.1.0 to 1.2.0, 14 Jan 2020: \n% 1) Changed the outputs and optionally the inputs to the struct format.\n% 2) Added new output, \"mad\", which is the mean absolute distance of the\n% points from the surface of the cylinder.\n% 3) Added new output, \"SurfCov\", that measures how well the surface of the\n% cylinder is covered by the points.\n% 4) Added new output, \"SurfCovDis\", which is a matrix of mean point distances \n% from layer/sector-intersections to the axis.\n% 5) Added new output, \"SurfCovVol\", which is an estimate of the cylinder's \n% volume based on the radii in \"SurfCovDis\" and \"cylindrical sectors\".\n% 6) Added new optional input \"res\" which gives the point resolution level\n% for computing SurfCov: the width and length of sectors/layers.\n\n% Changes from version 1.0.0 to 1.1.0, 3 Oct 2019: \n% 1) Bug fix: --> \"Point = Rot0'*([par(1) par(2) 0]')...\"\n\n\n%% Initialize data and values\nres = 0.03; % \"Resolution level\" for computing surface coverage\nmaxiter = 50; % maximum number of Gauss-Newton iterations\niter = 0; \nconv = false; % Did the iterations converge\nrel = true; % Are the results reliable (condition number was not very bad)\nif nargin == 2\n NoWeights = true; % No point weight given for the fitting\nelse\n NoWeights = false;\nend\n\n% Transform the data to close to standard position via a translation \n% followed by a rotation\nRot0 = rotate_to_z_axis(cyl0.axis);\nPt = (P-cyl0.start)*Rot0';\n\n% Initial estimates\npar = [0 0 0 0 cyl0.radius]'; \n\n\n%% Gauss-Newton algorithm \n% find estimate of rotation-translation-radius parameters that transform\n% the data so that the best-fit cylinder is one in standard position\nwhile iter < maxiter && ~conv && rel\n \n %% Calculate the distances and Jacobian\n if NoWeights\n [d0,J] = func_grad_cylinder(par,Pt);\n else\n [d0,J] = func_grad_cylinder(par,Pt,weight);\n end\n \n %% Calculate update step\n SS0 = norm(d0); % Squared sum of the distances\n % solve for the system of equations:\n % par(i+1) = par(i) - (J'J)^(-1)*J'd0(par(i))\n A = J'*J;\n b = J'*d0;\n warning off\n p = -A\\b; % solve for the system of equations\n warning on\n par = par+p; % update the parameters\n\n %% Check reliability\n if rcond(-A) < 10000*eps\n rel = false;\n end\n \n %% Check convergence:\n % The distances with the new parameter values:\n if NoWeights\n dist = func_grad_cylinder(par,Pt);\n else\n dist = func_grad_cylinder(par,Pt,weight);\n end\n SS1 = norm(dist); % Squared sum of the distances\n if abs(SS0-SS1) < 1e-4\n conv = true;\n end\n \n iter = iter + 1;\nend\n\n%% Compute the cylinder parameters and other outputs\ncyl.radius = single(par(5)); % radius\n\n% Inverse transformation to find axis and point on axis \n% corresponding to original data\nRot = form_rotation_matrices(par(3:4));\nAxis = Rot0'*Rot'*[0 0 1]'; % axis direction\nPoint = Rot0'*([par(1) par(2) 0]')+cyl0.start'; % axis point\n\n% Compute the start, length and mad, translate the axis point to the \n% cylinder's bottom:\n% If the fourth input (point cloud Q) is given, use it for the start, \n% length, mad, and SurfCov\nif nargin == 4\n if size(Q,1) > 5\n P = Q;\n end\nend\nH = P*Axis; % heights along the axis\nhmin = min(H);\ncyl.length = single(abs(max(H)-hmin));\nhpoint = Axis'*Point;\nPoint = Point-(hpoint-hmin)*Axis; % axis point at the cylinder's bottom\ncyl.start = single(Point');\ncyl.axis = single(Axis');\n% Compute mad for the points along the cylinder length:\nif nargin >= 6\n I = weight == max(weight);\n cyl.mad = single(average(abs(dist(I)))); % mean absolute distance\nelse\n cyl.mad = single(average(abs(dist))); % mean absolute distance\nend\ncyl.conv = conv;\ncyl.rel = rel;\n\n% Compute SurfCov, minimum 3*8 grid\nif ~any(isnan(Axis)) && ~any(isnan(Point)) && rel && conv\n nl = max(3,ceil(cyl.length/res));\n ns = ceil(2*pi*cyl.radius/res);\n ns = min(36,max(ns,8));\n SurfCov = surface_coverage(P,Axis',Point',nl,ns,0.8*cyl.radius);\n \n cyl.SurfCov = single(SurfCov);\nelse\n cyl.SurfCov = single(0);\nend\n"} +{"plateform": "github", "repo_name": "InverseTampere/TreeQSM-master", "name": "form_rotation_matrices.m", "ext": ".m", "path": "TreeQSM-master/src/least_squares_fitting/form_rotation_matrices.m", "size": 1449, "source_encoding": "utf_8", "md5": "b8928d5554f70ccfc10d9b4a2e5af802", "text": "% This file is part of TREEQSM.\r\n% \r\n% TREEQSM is free software: you can redistribute it and/or modify\r\n% it under the terms of the GNU General Public License as published by\r\n% the Free Software Foundation, either version 3 of the License, or\r\n% (at your option) any later version.\r\n% \r\n% TREEQSM is distributed in the hope that it will be useful,\r\n% but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n% GNU General Public License for more details.\r\n% \r\n% You should have received a copy of the GNU General Public License\r\n% along with TREEQSM. If not, see .\r\n\r\nfunction [R,dR1,dR2] = form_rotation_matrices(theta)\r\n \r\n% --------------------------------------------------------------------------\r\n% FORM_ROTATION_MATRICES.M Forms rotation matrices R = R2*R1 and its\r\n% derivatives\r\n%\r\n% Input \r\n% theta Plane rotation angles (t1, t2) \r\n%\r\n% Output \r\n% R Rotation matrix\r\n% R1 Plane rotation [1 0 0; 0 c1 -s1; 0 s1 c1]\r\n% R2 Plane rotation [c2 0 s2; 0 1 0; -s2 0 c2]\r\n\r\nc = cos(theta);\r\ns = sin(theta);\r\n\r\nR1 = [1 0 0; 0 c(1) -s(1); 0 s(1) c(1)];\r\nR = R1;\r\n\r\nR2 = [c(2) 0 s(2); 0 1 0; -s(2) 0 c(2)];\r\nR = R2*R;\r\n\r\nif nargout > 1\r\n dR1 = [0 0 0; 0 -R1(3,2) -R1(2,2); 0 R1(2,2) -R1(3,2)];\r\nend\r\n\r\nif nargout > 2\r\n dR2 = [-R2(1,3) 0 R2(1,1); 0 0 0; -R2(1,1) 0 -R2(1,3)];\r\nend"} +{"plateform": "github", "repo_name": "soumendu041/clustering-network-valued-data-master", "name": "moments.m", "ext": ".m", "path": "clustering-network-valued-data-master/moments.m", "size": 897, "source_encoding": "utf_8", "md5": "1e2789b8f0f75799935c1311361a06e2", "text": "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n% Comparison of graphs via normalizec count statistics/moments of the adjacency\n% matrix\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nfunction y = moments(A, k, method)\n % A = adjacency matrix of the desired graph\n % k = no. of moments requied\n % method = 'exact' or 'approx': 'exact' returns exact counts and\n % 'approx' returns normalized traces of powers of adjacency matrix\n y = zeros(k,1);\n \n if (strcmp(method,'approx'))\n n = size(A, 1);\n temp = A;\n for t = 1:k\n y(t) = trace(temp)/exp((1+t/2)*log(n)); % this normalization is for dense graphs\n temp = temp*temp;\n end\n \n elseif (strcmp(method, 'exact'))\n n = size(A, 1);\n % need to call those count functions Purna di downloaded\n end \nend"} +{"plateform": "github", "repo_name": "wvu-navLab/RobustGNSS-master", "name": "ccolamd_test.m", "ext": ".m", "path": "RobustGNSS-master/gtsam/gtsam/3rdparty/CCOLAMD/MATLAB/ccolamd_test.m", "size": 11944, "source_encoding": "utf_8", "md5": "ab91fed9a7d6b40fa30544983b26cc7f", "text": "function ccolamd_test\n%CCOLAMD_TEST extensive test of ccolamd and csymamd\n%\n% Example:\n% ccolamd_test\n%\n% See also csymamd, ccolamd, ccolamd_make.\n\n% Copyright 1998-2007, Timothy A. Davis, Stefan Larimore, and Siva Rajamanickam\n% Developed in collaboration with J. Gilbert and E. Ng.\n\nhelp ccolamd_test\n\nglobal ccolamd_default_knobs csymamd_default_knobs\nccolamd_default_knobs = [0 10 10 1 0] ;\ncsymamd_default_knobs = [10 1 0] ;\n\n fprintf ('Compiling ccolamd, csymamd, and test mexFunctions.\\n') ;\n ccolamd_make ;\n\n d = '' ;\n if (~isempty (strfind (computer, '64')))\n\td = '-largeArrayDims' ;\n end\n cmd = sprintf ( ...\n 'mex -DDLONG -O %s -I../../SuiteSparse_config -I../Include ', d) ;\n src = '../Source/ccolamd.c ../../SuiteSparse_config/SuiteSparse_config.c' ;\n if (~(ispc || ismac))\n % for POSIX timing routine\n src = [src ' -lrt'] ;\n end\n eval ([cmd 'ccolamdtestmex.c ' src]) ;\n eval ([cmd 'csymamdtestmex.c ' src]) ;\n fprintf ('Done compiling.\\n') ; \n\n\nfprintf ('\\nThe following codes will be tested:\\n') ;\nwhich ccolamd \nwhich csymamd\nwhich ccolamdtestmex\nwhich csymamdtestmex\n\nfprintf ('\\nStarting the tests. Please be patient.\\n') ;\n\nh = waitbar (0, 'COLAMD test') ;\n\nrand ('state', 0) ;\nrandn ('state', 0) ;\n\nA = sprandn (500,500,0.4) ;\n\np = ccolamd (A, [0 10 10 1 1]) ; check_perm (p, A) ;\np = ccolamd (A, [1 2 7 1 1]) ; check_perm (p, A) ;\np = ccolamd (A, [1 2 10 0 1]) ; check_perm (p, A) ;\np = ccolamd (A, [9 2 3 1 1]) ; check_perm (p, A) ;\n\np = csymamd (A, [10 1 1]) ; check_perm (p, A) ;\np = csymamd (A, [4 1 1]) ; check_perm (p, A) ;\np = csymamd (A, [9 0 1]) ; check_perm (p, A) ;\n\nfprintf ('Null matrices') ;\nA = zeros (0,0) ;\nA = sparse (A) ;\n\np = ccolamd (A) ;\ncheck_perm (p, A) ;\n\np = csymamd (A) ;\ncheck_perm (p, A) ;\n\nA = zeros (0, 100) ;\nA = sparse (A) ;\np = ccolamd (A) ;\ncheck_perm (p, A) ;\n\nA = zeros (100, 0) ;\nA = sparse (A) ;\np = ccolamd (A) ;\ncheck_perm (p, A) ;\nfprintf (' OK\\n') ;\n\n\nfprintf ('Matrices with a few dense row/cols\\n') ;\nfor trial = 1:20\n\n waitbar (trial/20, h, 'CCOLAMD: dense rows/cols') ;\n\n % random square unsymmetric matrix\n A = rand_matrix (1000, 1000, 1, 10, 20) ;\n [m n] = size (A) ;\n\n cmember = irand (min (trial,n), n) ;\n\n for tol = [0:.1:2 3:20 1e6]\n\tB = A + A' ;\n\n\tp = ccolamd (A, [ ]) ;\t\t check_perm (p, A) ;\n\tp = ccolamd (A, [1 tol tol 1]) ; check_perm (p, A) ;\n\tp = ccolamd (A, [0 tol tol 1]) ; check_perm (p, A) ;\n\tp = ccolamd (A, [1 tol tol 0]) ; check_perm (p, A) ;\n\tp = ccolamd (A, [0 tol tol 1]) ; check_perm (p, A) ;\n\tp = csymamd (A, [tol 1]) ;\t check_perm (p, A) ;\n\tp = csymamd (A, tol) ;\t\t check_perm (p, A) ;\n\tp = csymamd (A, [ ]) ;\t\t check_perm (p, A) ;\n\tp = csymamd (B, [tol 0]) ;\t check_perm (p, A) ;\n\tp = ccolamd (A, [0 tol -1 1]) ; check_perm (p, A) ;\n\tp = ccolamd (A, [0 -1 tol 1]) ; check_perm (p, A) ;\n\n\t% check with non-null cmember\n\n\tp = ccolamd (A, [ ], cmember) ;\t\t check_perm (p, A) ;\n\tp = ccolamd (A, [1 tol tol 1], cmember) ; check_perm (p, A) ;\n\tp = ccolamd (A, [0 tol tol 1], cmember) ; check_perm (p, A) ;\n\tp = ccolamd (A, [1 tol tol 0], cmember) ; check_perm (p, A) ;\n\tp = ccolamd (A, [0 tol tol 1], cmember) ; check_perm (p, A) ;\n\tp = csymamd (A, [tol 1], cmember) ;\t check_perm (p, A) ;\n\tp = csymamd (A, tol, cmember) ;\t\t check_perm (p, A) ;\n\tp = csymamd (A, [ ], cmember) ;\t\t check_perm (p, A) ;\n\tp = csymamd (B, [tol 0], cmember) ;\t check_perm (p, A) ;\n\tp = ccolamd (A, [0 tol -1 1], cmember) ; check_perm (p, A) ;\n\tp = ccolamd (A, [0 -1 tol 1], cmember) ; check_perm (p, A) ;\n\n\tp = ccolamd (A, [ ], [ ]) ;\t check_perm (p, A) ;\n\tp = ccolamd (A, [1 tol tol 1], [ ]) ; check_perm (p, A) ;\n\tp = ccolamd (A, [0 tol tol 1], [ ]) ; check_perm (p, A) ;\n\tp = ccolamd (A, [1 tol tol 0], [ ]) ; check_perm (p, A) ;\n\tp = ccolamd (A, [0 tol tol 1], [ ]) ; check_perm (p, A) ;\n\tp = csymamd (A, [tol 1], [ ]) ;\t check_perm (p, A) ;\n\tp = csymamd (A, tol, [ ]) ;\t check_perm (p, A) ;\n\tp = csymamd (A, [ ], [ ]) ;\t check_perm (p, A) ;\n\tp = csymamd (B, [tol 0], [ ]) ;\t check_perm (p, A) ;\n\tp = ccolamd (A, [0 tol -1 1], [ ]) ; check_perm (p, A) ;\n\tp = ccolamd (A, [0 -1 tol 1], [ ]) ; check_perm (p, A) ;\n\n end\nend\nfprintf (' OK\\n') ;\n\nfprintf ('General matrices\\n') ;\nfor trial = 1:400\n\n waitbar (trial/400, h, 'CCOLAMD: with dense rows/cols') ;\n\n % matrix of random mtype\n mtype = irand (3) ;\n A = rand_matrix (2000, 2000, mtype, 0, 0) ;\n p = ccolamd (A) ;\n check_perm (p, A) ;\n\n if (mtype == 3)\n\tp = csymamd (A) ;\n\tcheck_perm (p, A) ;\n end\n\nend\nfprintf (' OK\\n') ;\n\n\n\nfprintf ('Test error handling with invalid inputs\\n') ;\n\n% Check different erroneous input.\nfor trial = 1:30\n\n waitbar (trial/30, h, 'CCOLAMD: error handling') ;\n\n A = rand_matrix (1000, 1000, 2, 0, 0) ;\n\n for err = 1:13\n\n p = Tcolamd (A, [ccolamd_default_knobs 1 err], [ ]) ;\n if (p(1) ~= -1)\t\t\t\t\t\t\t %#ok\n\t check_perm (p, A) ;\n\tend\n\n\tif (err == 1)\n\t % check different (valid) input args to ccolamd\n\t p = Acolamd (A) ;\n\t p2 = Acolamd (A, [ccolamd_default_knobs 0 0]) ;\n\t if (any (p ~= p2))\n\t\terror ('ccolamd: mismatch 1!') ;\n\t end\n\tend\n\n\tB = A'*A ;\n p = Tsymamd (B, [-1 1 0 err], [ ]) ;\n if (p(1) ~= -1)\t\t\t\t\t\t\t %#ok\n\t check_perm (p, A) ;\n\tend\n\n\tif (err == 1)\n\n\t % check different (valid) input args to csymamd\n\t p = Asymamd (B) ;\n\t check_perm (p, A) ;\n\t p2 = Asymamd (B, [csymamd_default_knobs 0]) ;\n\t if (any (p ~= p2))\n\t\terror ('symamd: mismatch 1!') ;\n\t end\n\tend\n\n end\n\nend\nfprintf (' OK\\n') ;\n\nfprintf ('Matrices with a few empty columns\\n') ;\n\nfor trial = 1:400\n\n waitbar (trial/400, h, 'CCOLAMD: with empty rows/cols') ;\n\n % some are square, some are rectangular\n n = 0 ;\n while (n < 5)\n\tA = rand_matrix (1000, 1000, irand (2), 0, 0) ;\n\t[m n] = size (A) ;\n end\n\n % Add 5 null columns at random locations.\n null_col = randperm (n) ;\n A (:, null_col) = 0 ;\n\n % Order the matrix and make sure that the null columns are ordered last.\n p = ccolamd (A, [1 1e6 1e6 0]) ;\n check_perm (p, A) ;\n\n % find all null columns in A\n null_col = find (sum (spones (A), 1) == 0) ;\n nnull = length (null_col) ;\n if (any (null_col ~= p ((n-nnull+1):n)))\n\terror ('ccolamd: Null cols are not ordered last in natural order') ;\n end\n\nend\nfprintf (' OK\\n') ;\n\nfprintf ('Matrices with a few empty rows and columns\\n') ;\n\nfor trial = 1:400\n\n waitbar (trial/400, h, 'CCOLAMD: with empty rows/cols') ;\n\n % symmetric matrices\n n = 0 ;\n while (n < 5)\n\tA = rand_matrix (1000, 1000, 3, 0, 0) ;\n\t[m n] = size (A) ;\n end\n\n % Add 5 null columns and rows at random locations.\n null_col = randperm (n) ;\n A (:, null_col) = 0 ;\n A (null_col, :) = 0 ;\n\n % Order the matrix and make sure that the null rows/cols are ordered last.\n p = csymamd (A, -1) ;\n check_perm (p, A) ;\n\n % find all null rows/columns in A\n Alo = tril (A, -1) ;\n null_col = ...\n\tfind ((sum (spones (Alo), 1) == 0) & (sum (spones (Alo), 2) == 0)') ;\n nnull = length (null_col) ;\n if (any (null_col ~= p ((n-nnull+1):n)))\n\terror ('csymamd: Null cols are not ordered last in natural order') ;\n end\n\nend\nfprintf (' OK\\n') ;\n\nfprintf ('Matrices with a few empty rows\\n') ;\n\n% Test matrices with null rows inserted.\n\nfor trial = 1:400\n\n waitbar (trial/400, h, 'CCOLAMD: with null rows') ;\n m = 0 ;\n while (m < 5)\n\tA = rand_matrix (1000, 1000, 2, 0, 0) ;\n\tm = size (A,1) ;\n end\n\n % Add 5 null rows at random locations.\n null_row = randperm (m) ;\n null_row = sort (null_row (1:5)) ;\n A (null_row, :) = 0 ;\n\n p = ccolamd (A) ;\n check_perm (p, A) ;\n\nend\nfprintf (' OK\\n') ;\n\nfprintf ('\\nccolamd and csymamd: all tests passed\\n\\n') ;\nclose (h) ;\n\n%-------------------------------------------------------------------------------\n\nfunction p = Acolamd (S, knobs)\n% Acolamd: compare ccolamd and Tcolamd results\n\nglobal ccolamd_default_knobs\n\nif (nargin < 2)\n p = ccolamd (S) ;\n p1 = Tcolamd (S, [ccolamd_default_knobs 0 0], [ ]) ;\nelse\n p = ccolamd (S, knobs) ;\n p1 = Tcolamd (S, knobs, [ ]) ;\nend\n\ncheck_perm (p, S) ;\ncheck_perm (p1, S) ;\n\nif (any (p1 ~= p))\n narg = nargin ;\n if (nargin == 2)\n\tsave bad S narg knobs\n else\n\tsave bad S narg\n end\n error ('Acolamd mismatch!') ;\nend\n\n%-------------------------------------------------------------------------------\n\nfunction p = Asymamd (S, knobs)\n% Asymamd: compare csymamd and Tsymamd results\n\nglobal csymamd_default_knobs\n\nif (nargin < 2)\n p = csymamd (S) ;\n p1 = Tsymamd (S, [csymamd_default_knobs 0], [ ]) ;\nelse\n p = csymamd (S, knobs) ;\n p1 = Tsymamd (S, knobs, [ ]) ;\nend\n\nif (any (p1 ~= p))\n error ('Asymamd mismatch!') ;\nend\n\n\n%-------------------------------------------------------------------------------\n\nfunction check_perm (p, A, cmember)\n% check_perm: check for a valid permutation vector\n\nif (isempty (A) & isempty (p))\t\t\t\t\t\t %#ok\n % empty permutation vectors of empty matrices are OK\n return\nend\n\nif (isempty (p))\n error ('Bad permutation: cannot be empty') ;\nend\n\n[m n] = size (A) ;\n[p_m p_n] = size (p) ;\nif (p_n == 1)\n % force p to be a row vector\n p = p' ;\n [p_m p_n] = size (p) ;\nend\n\nif (n ~= p_n)\n error ('Bad permutation: wrong size') ;\nend\n\nif (p_m ~= 1) ;\n % p must be a vector\n error ('Bad permutation: not a vector') ;\nelse\n if (any (sort (p) - (1:p_n)))\n\terror ('Bad permutation') ;\n end\nend\n\nif (nargin > 2)\n % check cmember\n c = cmember (p) ;\n % c must be monotonically non-decreasing\n c = diff (c) ;\n if (any (c < 0))\n\terror ('permutation breaks the cmember constraints') ;\n end\nend\n\n%-------------------------------------------------------------------------------\n\nfunction i = irand (n,s)\n% irand: return a random vector of size s, with values between 1 and n\nif (nargin == 1)\n s = 1 ;\nend\ni = min (n, 1 + floor (rand (1,s) * n)) ;\n\n%-------------------------------------------------------------------------------\n\nfunction A = rand_matrix (n_max, m_max, mtype, d_rows, d_cols)\n% rand_matrix: return a random sparse matrix\n%\n% A = rand_matrix (n_max, m_max, mtype, d_rows, d_cols)\n%\n% A binary matrix of random size, at most n_max-by-m_max, with d_rows dense rows\n% and d_cols dense columns.\n%\n% mtype 1: square unsymmetric (m_max is ignored)\n% mtype 2: rectangular\n% mtype 3: symmetric (m_max is ignored)\n\nn = irand (n_max) ;\nif (mtype ~= 2)\n % square\n m = n ;\nelse\n m = irand (m_max) ;\nend\n\nA = sprand (m, n, 10 / max (m,n)) ;\n\nif (d_rows > 0)\n % add dense rows\n for k = 1:d_rows\n\ti = irand (m) ;\n\tnz = irand (n) ;\n\tp = randperm (n) ;\n\tp = p (1:nz) ;\n\tA (i,p) = 1 ;\n end\nend\n\nif (d_cols > 0)\n % add dense cols\n for k = 1:d_cols\n\tj = irand (n) ;\n\tnz = irand (m) ;\n\tp = randperm (m) ;\n\tp = p (1:nz) ;\n\tA (p,j) = 1 ;\n end\nend\n\nA = spones (A) ;\n\n% ensure that there are no empty columns\nd = find (full (sum (A,1)) == 0) ;\t\t\t %#ok\nA (m,d) = 1 ;\t\t\t\t\t\t %#ok\n\n% ensure that there are no empty rows\nd = find (full (sum (A,2)) == 0) ;\t\t\t %#ok\nA (d,n) = 1 ;\t\t\t\t\t\t %#ok\n\nif (mtype == 3)\n % symmetric\n A = A + A' + speye (n) ;\nend\n\nA = spones (A) ;\n\n%-------------------------------------------------------------------------------\n% Tcolamd: run ccolamd in a testing mode\n%-------------------------------------------------------------------------------\n\nfunction p = Tcolamd (S, knobs, cmember)\n\n% knobs (5) = 1 ;\np = ccolamdtestmex (S, knobs, cmember) ;\n\nif (p (1) ~= -1)\n check_perm (p, S) ;\nend\n\n\n%-------------------------------------------------------------------------------\n% Tsymamd: run csymamd in a testing mode\n%-------------------------------------------------------------------------------\n\nfunction p = Tsymamd (S, knobs, cmember)\n\n% knobs (2) = 1 ;\np = csymamdtestmex (S, knobs, cmember) ;\n\nif (p (1) ~= -1)\n check_perm (p, S) ;\nend\n\n"} +{"plateform": "github", "repo_name": "wvu-navLab/RobustGNSS-master", "name": "geodarea.m", "ext": ".m", "path": "RobustGNSS-master/gtsam/gtsam/3rdparty/GeographicLib/matlab/geodarea.m", "size": 4241, "source_encoding": "utf_8", "md5": "a20b9abbe24d8781e0c053b3ddfd9f3a", "text": "function [A, P, N] = geodarea(lats, lons, ellipsoid)\n%GEODAREA Surface area of polygon on an ellipsoid\n%\n% A = GEODAREA(lats, lons)\n% [A, P, N] = GEODAREA(lats, lons, ellipsoid)\n%\n% calculates the surface area A of the geodesic polygon specified by the\n% input vectors lats, lons (in degrees). The ellipsoid vector is of the\n% form [a, e], where a is the equatorial radius in meters, e is the\n% eccentricity. If ellipsoid is omitted, the WGS84 ellipsoid (more\n% precisely, the value returned by DEFAULTELLIPSOID) is used. There is\n% no need to \"close\" the polygon by repeating the first point. Multiple\n% polygons can be specified by separating the vertices by NaNs in the\n% vectors. Thus a series of quadrilaterals can be specified as two 5 x K\n% arrays where the 5th row is NaN. The output, A, is in meters^2.\n% Counter-clockwise traversal counts as a positive area. Only simple\n% polygons (which do not intersect themselves) are supported. Also\n% returned are the perimeters of the polygons in P (meters) and the\n% numbers of vertices in N. GEODDOC gives the restrictions on the\n% allowed ranges of the arguments.\n%\n% GEODAREA loosely duplicates the functionality of the AREAINT function\n% in the MATLAB mapping toolbox. The major difference is that the\n% polygon edges are taken to be geodesics and the area contributed by\n% each edge is computed using a series expansion with is accurate\n% regardless of the length of the edge. The formulas are derived in\n%\n% C. F. F. Karney, Algorithms for geodesics,\n% J. Geodesy 87, 43-55 (2013);\n% http://dx.doi.org/10.1007/s00190-012-0578-z\n% Addenda: http://geographiclib.sf.net/geod-addenda.html\n%\n% See also GEODDOC, GEODDISTANCE, GEODRECKON, POLYGONAREA,\n% DEFAULTELLIPSOID.\n\n% Copyright (c) Charles Karney (2012-2013) .\n%\n% This file was distributed with GeographicLib 1.31.\n\n if nargin < 2, error('Too few input arguments'), end\n if nargin < 3, ellipsoid = defaultellipsoid; end\n if ~isequal(size(lats), size(lons))\n error('lats, lons have incompatible sizes')\n end\n if length(ellipsoid(:)) ~= 2\n error('ellipsoid must be a vector of size 2')\n end\n\n lat1 = lats(:);\n lon1 = lons(:);\n M = length(lat1);\n ind = [0; find(isnan(lat1 + lon1))];\n if length(ind) == 1 || ind(end) ~= M\n ind = [ind; M + 1];\n end\n K = length(ind) - 1;\n A = zeros(K, 1); P = A; N = A;\n if M == 0, return, end\n\n lat2 = [lat1(2:end, 1); 0];\n lon2 = [lon1(2:end, 1); 0];\n m0 = min(M, ind(1:end-1) + 1);\n m1 = max(1, ind(2:end) - 1);\n lat2(m1) = lat1(m0); lon2(m1) = lon1(m0);\n\n a = ellipsoid(1);\n e2 = ellipsoid(2)^2;\n f = e2 / (1 + sqrt(1 - e2));\n\n b = (1 - f) * a;\n c2 = (a^2 + b^2 * atanhee(1, e2)) / 2;\n area0 = 4 * pi * c2;\n\n [s12, ~, ~, S12] = geoddistance(lat1, lon1, lat2, lon2, ellipsoid);\n cross = transit(lon1, lon2);\n\n for k = 1 : K\n N(k) = m1(k) - m0(k) + 1;\n P(k) = accumulator(s12(m0(k):m1(k)));\n [As, At] = accumulator(S12(m0(k):m1(k)));\n crossings = sum(cross(m0(k):m1(k)));\n if mod(crossings, 2) ~= 0,\n [As, At] = accumulator( ((As < 0) * 2 - 1) * area0 / 2, As, At);\n end\n As = -As; At = -At;\n if As > area0/2\n As = accumulator( -area0 / 2, As, At);\n elseif As <= -area0/2\n As = accumulator( area0 / 2, As, At);\n end\n A(k) = As;\n end\nend\n\nfunction cross = transit(lon1, lon2)\n%TRANSIT Count crossings of prime meridian\n%\n% CROSS = TRANSIT(LON1, LON2) return 1 or -1 if crossing prime meridian\n% in east or west direction. Otherwise return zero.\n\n lon1 = AngNormalize(lon1);\n lon2 = AngNormalize(lon2);\n lon12 = AngDiff(lon1, lon2);\n cross = zeros(length(lon1), 1);\n cross(lon1 < 0 & lon2 >= 0 & lon12 > 0) = 1;\n cross(lon2 < 0 & lon1 >= 0 & lon12 < 0) = -1;\n\nend\n\nfunction [s, t] = accumulator(x, s, t)\n%ACCUMULATOR Accurately sum x\n%\n% [S, T] = ACCUMULATOR(X, S, T) accumulate the sum of the elements of X\n% into [S, T] using extended precision. S and T are scalars.\n\n if nargin < 3, t = 0; end\n if nargin < 2, s = 0; end\n\n for y = x(:)',\n % Here's Shewchuk's solution...\n [z, u] = sumx(y, t);\n [s, t] = sumx(z, s);\n if s == 0\n s = u;\n else\n t = t + u;\n end\n end\nend\n"} +{"plateform": "github", "repo_name": "wvu-navLab/RobustGNSS-master", "name": "geoddistance.m", "ext": ".m", "path": "RobustGNSS-master/gtsam/gtsam/3rdparty/GeographicLib/matlab/geoddistance.m", "size": 17333, "source_encoding": "utf_8", "md5": "3b8e33df114efbd010cafcfdd2b79868", "text": "function [s12, azi1, azi2, S12, m12, M12, M21, a12] = geoddistance ...\n (lat1, lon1, lat2, lon2, ellipsoid)\n%GEODDISTANCE Distance between points on an ellipsoid\n%\n% [s12, azi1, azi2] = GEODDISTANCE(lat1, lon1, lat2, lon2)\n% [s12, azi1, azi2, S12, m12, M12, M21, a12] =\n% GEODDISTANCE(lat1, lon1, lat2, lon2, ellipsoid)\n%\n% solves the inverse geodesic problem of finding of length and azimuths\n% of the shortest geodesic between points specified by lat1, lon1, lat2,\n% lon2. The input latitudes and longitudes, lat1, lon1, lat2, lon2, can\n% be scalars or arrays of equal size and must be expressed in degrees.\n% The ellipsoid vector is of the form [a, e], where a is the equatorial\n% radius in meters, e is the eccentricity. If ellipsoid is omitted, the\n% WGS84 ellipsoid (more precisely, the value returned by\n% DEFAULTELLIPSOID) is used. The output s12 is the distance in meters\n% and azi1 and azi2 are the forward azimuths at the end points in\n% degrees. The other optional outputs, S12, m12, M12, M21, a12 are\n% documented in GEODDOC. GEODDOC also gives the restrictions on the\n% allowed ranges of the arguments.\n%\n% When given a combination of scalar and array inputs, the scalar inputs\n% are automatically expanded to match the size of the arrays.\n%\n% This is an implementation of the algorithm given in\n%\n% C. F. F. Karney, Algorithms for geodesics,\n% J. Geodesy 87, 43-55 (2013);\n% http://dx.doi.org/10.1007/s00190-012-0578-z\n% Addenda: http://geographiclib.sf.net/geod-addenda.html\n%\n% This function duplicates some of the functionality of the DISTANCE\n% function in the MATLAB mapping toolbox. Differences are\n%\n% * When the ellipsoid argument is omitted, use the WGS84 ellipsoid.\n% * The routines work for prolate (as well as oblate) ellipsoids.\n% * The azimuth at the second end point azi2 is returned.\n% * The solution is accurate to round off for abs(e) < 0.2.\n% * The algorithm converges for all pairs of input points.\n% * Additional properties of the geodesic are calcuated.\n%\n% See also GEODDOC, GEODRECKON, GEODAREA, GEODESICINVERSE,\n% DEFAULTELLIPSOID.\n\n% Copyright (c) Charles Karney (2012, 2013) .\n%\n% This file was distributed with GeographicLib 1.31.\n%\n% This is a straightforward transcription of the C++ implementation in\n% GeographicLib and the C++ source should be consulted for additional\n% documentation. This is a vector implementation and the results returned\n% with array arguments are identical to those obtained with multiple calls\n% with scalar arguments. The biggest change was to eliminate the branching\n% to allow a vectorized solution.\n\n if nargin < 4, error('Too few input arguments'), end\n if nargin < 5, ellipsoid = defaultellipsoid; end\n try\n Z = lat1 + lon1 + lat2 + lon2;\n S = size(Z);\n Z = zeros(S);\n lat1 = lat1 + Z; lon1 = lon1 + Z;\n lat2 = lat2 + Z; lon2 = lon2 + Z;\n Z = Z(:);\n catch err\n error('lat1, lon1, s12, azi1 have incompatible sizes')\n end\n if length(ellipsoid(:)) ~= 2\n error('ellipsoid must be a vector of size 2')\n end\n\n degree = pi/180;\n tiny = sqrt(realmin);\n tol0 = eps;\n tolb = eps * sqrt(eps);\n maxit1 = 20;\n maxit2 = maxit1 + (-log2(eps) + 1) + 10;\n\n a = ellipsoid(1);\n e2 = ellipsoid(2)^2;\n f = e2 / (1 + sqrt(1 - e2));\n\n f1 = 1 - f;\n ep2 = e2 / (1 - e2);\n n = f / (2 - f);\n b = a * f1;\n\n areap = nargout >= 4;\n scalp = nargout >= 6;\n\n A3x = A3coeff(n);\n C3x = C3coeff(n);\n\n lon12 = AngDiff(AngNormalize(lon1(:)), AngNormalize(lon2(:)));\n lon12 = AngRound(lon12);\n lonsign = 2 * (lon12 >= 0) - 1;\n lon12 = lonsign .* lon12;\n lat1 = AngRound(lat1(:));\n lat2 = AngRound(lat2(:));\n swapp = 2 * (abs(lat1) >= abs(lat2)) - 1;\n lonsign(swapp < 0) = - lonsign(swapp < 0);\n [lat1(swapp < 0), lat2(swapp < 0)] = swap(lat1(swapp < 0), lat2(swapp < 0));\n\n latsign = 2 * (lat1 < 0) - 1;\n lat1 = latsign .* lat1;\n lat2 = latsign .* lat2;\n\n phi = lat1 * degree;\n sbet1 = f1 * sin(phi); cbet1 = cos(phi); cbet1(lat1 == -90) = tiny;\n [sbet1, cbet1] = SinCosNorm(sbet1, cbet1);\n\n phi = lat2 * degree;\n sbet2 = f1 * sin(phi); cbet2 = cos(phi); cbet2(abs(lat2) == 90) = tiny;\n [sbet2, cbet2] = SinCosNorm(sbet2, cbet2);\n\n c = cbet1 < -sbet1 & cbet2 == cbet1;\n sbet2(c) = (2 * (sbet2(c) < 0) - 1) .* sbet1(c);\n c = ~(cbet1 < -sbet1) & abs(sbet2) == - sbet1;\n cbet2(c) = cbet1(c);\n\n dn1 = sqrt(1 + ep2 * sbet1.^2);\n dn2 = sqrt(1 + ep2 * sbet2.^2);\n lam12 = lon12 * degree;\n slam12 = sin(lam12); slam12(lon12 == 180) = 0; clam12 = cos(lam12);\n\n sig12 = Z; ssig1 = Z; csig1 = Z; ssig2 = Z; csig2 = Z;\n calp1 = Z; salp1 = Z; calp2 = Z; salp2 = Z;\n s12 = Z; m12 = Z; M12 = Z; M21 = Z; omg12 = Z;\n\n m = lat1 == -90 | slam12 == 0;\n\n if any(m)\n calp1(m) = clam12(m); salp1(m) = slam12(m);\n calp2(m) = 1; salp2(m) = 0;\n\n ssig1(m) = sbet1(m); csig1(m) = calp1(m) .* cbet1(m);\n ssig2(m) = sbet2(m); csig2(m) = calp2(m) .* cbet2(m);\n\n sig12(m) = atan2(max(csig1(m) .* ssig2(m) - ssig1(m) .* csig2(m), 0), ...\n csig1(m) .* csig2(m) + ssig1(m) .* ssig2(m));\n\n [s12(m), m12(m), ~, M12(m), M21(m)] = ...\n Lengths(n, sig12(m), ...\n ssig1(m), csig1(m), dn1(m), ssig2(m), csig2(m), dn2(m), ...\n cbet1(m), cbet2(m), scalp, ep2);\n m = m & (sig12 < 1 | m12 >= 0);\n m12(m) = m12(m) * b;\n s12(m) = s12(m) * b;\n end\n\n eq = ~m & sbet1 == 0;\n if f > 0\n eq = eq & lam12 < pi - f * pi;\n end\n calp1(eq) = 0; calp2(eq) = 0; salp1(eq) = 1; salp2(eq) = 1;\n s12(eq) = a * lam12(eq); sig12(eq) = lam12(eq) / f1; omg12(eq) = sig12(eq);\n m12(eq) = b * sin(omg12(eq)); M12(eq) = cos(omg12(eq)); M21(eq) = M12(eq);\n\n g = ~eq & ~m;\n\n dnm = Z;\n [sig12(g), salp1(g), calp1(g), salp2(g), calp2(g), dnm(g)] = ...\n InverseStart(sbet1(g), cbet1(g), dn1(g), sbet2(g), cbet2(g), dn2(g), ...\n lam12(g), f, A3x);\n\n s = g & sig12 >= 0;\n s12(s) = b * sig12(s) .* dnm(s);\n m12(s) = b * dnm(s).^2 .* sin(sig12(s) ./ dnm(s));\n if scalp\n M12(s) = cos(sig12(s) ./ dnm(s)); M21(s) = M12(s);\n end\n omg12(s) = lam12(s) ./ (f1 * dnm(s));\n\n g = g & sig12 < 0;\n\n salp1a = Z + tiny; calp1a = Z + 1;\n salp1b = Z + tiny; calp1b = Z - 1;\n ssig1 = Z; csig1 = Z; ssig2 = Z; csig2 = Z;\n epsi = Z; v = Z; dv = Z;\n numit = Z;\n tripn = Z > 0;\n tripb = tripn;\n gsave = g;\n for k = 0 : maxit2 - 1\n if k == 0 && ~any(g), break, end\n numit(g) = k;\n [v(g), dv(g), ...\n salp2(g), calp2(g), sig12(g), ...\n ssig1(g), csig1(g), ssig2(g), csig2(g), epsi(g), omg12(g)] = ...\n Lambda12(sbet1(g), cbet1(g), dn1(g), ...\n sbet2(g), cbet2(g), dn2(g), ...\n salp1(g), calp1(g), f, A3x, C3x);\n v = v - lam12;\n g = g & ~(tripb | ~(abs(v) >= ((tripn * 6) + 2) * tol0));\n if ~any(g), break, end\n\n c = g & v > 0;\n if k <= maxit1\n c = c & calp1 ./ salp1 > calp1b ./ salp1b;\n end\n salp1b(c) = salp1(c); calp1b(c) = calp1(c);\n\n c = g & v < 0;\n if k <= maxit1\n c = c & calp1 ./ salp1 < calp1a ./ salp1a;\n end\n salp1a(c) = salp1(c); calp1a(c) = calp1(c);\n\n if k == maxit1, tripn(g) = false; end\n if k < maxit1\n dalp1 = -v ./ dv;\n sdalp1 = sin(dalp1); cdalp1 = cos(dalp1);\n nsalp1 = salp1 .* cdalp1 + calp1 .* sdalp1;\n calp1(g) = calp1(g) .* cdalp1(g) - salp1(g) .* sdalp1(g);\n salp1(g) = nsalp1(g);\n tripn = g & abs(v) <= 16 * tol0;\n c = g & ~(dv > 0 & nsalp1 > 0 & abs(dalp1) < pi);\n tripn(c) = false;\n else\n c = g;\n end\n\n salp1(c) = (salp1a(c) + salp1b(c))/2;\n calp1(c) = (calp1a(c) + calp1b(c))/2;\n [salp1(g), calp1(g)] = SinCosNorm(salp1(g), calp1(g));\n tripb(c) = (abs(salp1a(c) - salp1(c)) + (calp1a(c) - calp1(c)) < tolb | ...\n abs(salp1(c) - salp1b(c)) + (calp1(c) - calp1b(c)) < tolb);\n end\n\n g = gsave;\n [s12(g), m12(g), ~, M12(g), M21(g)] = ...\n Lengths(epsi(g), sig12(g), ...\n ssig1(g), csig1(g), dn1(g), ssig2(g), csig2(g), dn2(g), ...\n cbet1(g), cbet2(g), scalp, ep2);\n\n m12(g) = m12(g) * b;\n s12(g) = s12(g) * b;\n omg12(g) = lam12(g) - omg12(g);\n\n s12 = 0 + s12;\n\n if areap\n salp0 = salp1 .* cbet1; calp0 = hypot(calp1, salp1 .* sbet1);\n ssig1 = sbet1; csig1 = calp1 .* cbet1;\n ssig2 = sbet2; csig2 = calp2 .* cbet2;\n k2 = calp0.^2 * ep2;\n epsi = k2 ./ (2 * (1 + sqrt(1 + k2)) + k2);\n A4 = (a^2 * e2) * calp0 .* salp0;\n [ssig1, csig1] = SinCosNorm(ssig1, csig1);\n [ssig2, csig2] = SinCosNorm(ssig2, csig2);\n\n C4x = C4coeff(n);\n C4a = C4f(epsi, C4x);\n B41 = SinCosSeries(false, ssig1, csig1, C4a);\n B42 = SinCosSeries(false, ssig2, csig2, C4a);\n S12 = A4 .* (B42 - B41);\n S12(calp0 == 0 | salp0 == 0) = 0;\n\n l = ~m & omg12 < 0.75 * pi & sbet2 - sbet1 < 1.75;\n alp12 = Z;\n somg12 = sin(omg12(l)); domg12 = 1 + cos(omg12(l));\n dbet1 = 1 + cbet1(l); dbet2 = 1 + cbet2(l);\n alp12(l) = 2 * atan2(somg12 .* (sbet1(l) .* dbet2 + sbet2(l) .* dbet1), ...\n domg12 .* (sbet1(l) .* sbet2(l) + dbet1 .* dbet2));\n l = ~l;\n salp12 = salp2(l) .* calp1(l) - calp2(l) .* salp1(l);\n calp12 = calp2(l) .* calp1(l) + salp2(l) .* salp1(l);\n s = salp12 == 0 & calp12 < 0;\n salp12(s) = tiny * calp1(s); calp12(s) = -1;\n alp12(l) = atan2(salp12, calp12);\n c2 = (a^2 + b^2 * atanhee(1, e2)) / 2;\n S12 = 0 + swapp .* lonsign .* latsign .* (S12 + c2 * alp12);\n end\n\n [salp1(swapp<0), salp2(swapp<0)] = swap(salp1(swapp<0), salp2(swapp<0));\n [calp1(swapp<0), calp2(swapp<0)] = swap(calp1(swapp<0), calp2(swapp<0));\n if scalp\n [M12(swapp<0), M21(swapp<0)] = swap(M12(swapp<0), M21(swapp<0));\n end\n salp1 = salp1 .* swapp .* lonsign; calp1 = calp1 .* swapp .* latsign;\n salp2 = salp2 .* swapp .* lonsign; calp2 = calp2 .* swapp .* latsign;\n\n azi1 = 0 - atan2(-salp1, calp1) / degree;\n azi2 = 0 - atan2(-salp2, calp2) / degree;\n a12 = sig12 / degree;\n\n s12 = reshape(s12, S); azi1 = reshape(azi1, S); azi2 = reshape(azi2, S);\n m12 = reshape(m12, S); M12 = reshape(M12, S); M21 = reshape(M21, S);\n a12 = reshape(a12, S);\n if (areap)\n S12 = reshape(S12, S);\n end\nend\n\nfunction [sig12, salp1, calp1, salp2, calp2, dnm] = ...\n InverseStart(sbet1, cbet1, dn1, sbet2, cbet2, dn2, lam12, f, A3x)\n%INVERSESTART Compute a starting point for Newton's method\n\n N = length(sbet1);\n f1 = 1 - f;\n e2 = f * (2 - f);\n ep2 = e2 / (1 - e2);\n n = f / (2 - f);\n tol0 = eps;\n tol1 = 200 * tol0;\n tol2 = sqrt(eps);\n etol2 = 0.1 * tol2 / sqrt( max(0.001, abs(f)) * min(1, 1 - f/2) / 2 );\n xthresh = 1000 * tol2;\n\n sig12 = -ones(N, 1); salp2 = NaN(N, 1); calp2 = NaN(N, 1);\n sbet12 = sbet2 .* cbet1 - cbet2 .* sbet1;\n cbet12 = cbet2 .* cbet1 + sbet2 .* sbet1;\n sbet12a = sbet2 .* cbet1 + cbet2 .* sbet1;\n s = cbet12 >= 0 & sbet12 < 0.5 & cbet2 .* lam12 < 0.5;\n omg12 = lam12;\n dnm = NaN(N, 1);\n sbetm2 = (sbet1(s) + sbet2(s)).^2;\n sbetm2 = sbetm2 ./ (sbetm2 + (cbet1(s) + cbet2(s)).^2);\n dnm(s) = sqrt(1 + ep2 * sbetm2);\n omg12(s) = omg12(s) ./ (f1 * dnm(s));\n somg12 = sin(omg12); comg12 = cos(omg12);\n\n salp1 = cbet2 .* somg12;\n t = cbet2 .* sbet1 .* somg12.^2;\n calp1 = cvmgt(sbet12 + t ./ (1 + comg12), ...\n sbet12a - t ./ (1 - comg12), ...\n comg12 >= 0);\n\n ssig12 = hypot(salp1, calp1);\n csig12 = sbet1 .* sbet2 + cbet1 .* cbet2 .* comg12;\n\n s = s & ssig12 < etol2;\n salp2(s) = cbet1(s) .* somg12(s);\n calp2(s) = somg12(s).^2 ./ (1 + comg12(s));\n calp2(s & comg12 < 0) = 1 - comg12(s & comg12 < 0);\n calp2(s) = sbet12(s) - cbet1(s) .* sbet2(s) .* calp2(s);\n [salp2, calp2] = SinCosNorm(salp2, calp2);\n sig12(s) = atan2(ssig12(s), csig12(s));\n\n s = ~(s | abs(n) > 0.1 | csig12 >= 0 | ssig12 >= 6 * abs(n) * pi * cbet1.^2);\n\n if any(s)\n if f >= 0\n k2 = sbet1(s).^2 * ep2;\n epsi = k2 ./ (2 * (1 + sqrt(1 + k2)) + k2);\n lamscale = f * cbet1(s) .* A3f(epsi, A3x) * pi;\n betscale = lamscale .* cbet1(s);\n x = (lam12(s) - pi) ./ lamscale;\n y = sbet12a(s) ./ betscale;\n else\n cbet12a = cbet2(s) .* cbet1(s) - sbet2(s) .* sbet1(s);\n bet12a = atan2(sbet12a(s), cbet12a);\n [~, m12b, m0] = ...\n Lengths(n, pi + bet12a, ...\n sbet1(s), -cbet1(s), dn1(s), sbet2(s), cbet2(s), dn2(s), ...\n cbet1(s), cbet2(s), false);\n x = -1 + m12b ./ (cbet1(s) .* cbet2(s) .* m0 * pi);\n betscale = cvmgt(sbet12a(s) ./ x, - f * cbet1(s).^2 * pi, x < -0.01);\n lamscale = betscale ./ cbet1(s);\n y = (lam12(s) - pi) ./ lamscale;\n end\n k = Astroid(x, y);\n if f >= 0\n omg12a = -x .* k ./ (1 + k);\n else\n omg12a = -y .* (1 + k) ./ k;\n end\n omg12a = lamscale .* omg12a;\n somg12 = sin(omg12a); comg12 = -cos(omg12a);\n salp1(s) = cbet2(s) .* somg12;\n calp1(s) = sbet12a(s) - cbet2(s) .* sbet1(s) .* somg12.^2 ./ (1 - comg12);\n\n str = y > -tol1 & x > -1 - xthresh;\n if any(str)\n salp1s = salp1(s); calp1s = calp1(s);\n if f >= 0\n salp1s(str) = min(1, -x(str));\n calp1s(str) = -sqrt(1 - salp1s(str).^2);\n else\n calp1s(str) = max(cvmgt(0, -1, x(str) > -tol1), x(str));\n salp1s(str) = sqrt(1 - calp1s(str).^2);\n end\n salp1(s) = salp1s; calp1(s) = calp1s;\n end\n end\n\n calp1(salp1 <= 0) = 0; salp1(salp1 <= 0) = 1;\n [salp1, calp1] = SinCosNorm(salp1, calp1);\nend\n\nfunction k = Astroid(x, y)\n% ASTROID Solve the astroid equation\n%\n% K = ASTROID(X, Y) solves the quartic polynomial Eq. (55)\n%\n% K^4 + 2 * K^3 - (X^2 + Y^2 - 1) * K^2 - 2*Y^2 * K - Y^2 = 0\n%\n% for the positive root K. X and Y are column vectors of the same size\n% and the returned value K has the same size.\n\n k = zeros(length(x), 1);\n p = x.^2;\n q = y.^2;\n r = (p + q - 1) / 6;\n fl1 = ~(q == 0 & r <= 0);\n p = p(fl1);\n q = q(fl1);\n r = r(fl1);\n S = p .* q / 4;\n r2 = r.^2;\n r3 = r .* r2;\n disc = S .* (S + 2 * r3);\n u = r;\n fl2 = disc >= 0;\n T3 = S(fl2) + r3(fl2);\n T3 = T3 + (1 - 2 * (T3 < 0)) .* sqrt(disc(fl2));\n T = cbrt(T3);\n u(fl2) = u(fl2) + T + cvmgt(r2(fl2) ./ T, 0, T ~= 0);\n ang = atan2(sqrt(-disc(~fl2)), -(S(~fl2) + r3(~fl2)));\n u(~fl2) = u(~fl2) + 2 * r(~fl2) .* cos(ang / 3);\n v = sqrt(u.^2 + q);\n uv = u + v;\n fl2 = u < 0;\n uv(fl2) = q(fl2) ./ (v(fl2) - u(fl2));\n w = (uv - q) ./ (2 * v);\n k(fl1) = uv ./ (sqrt(uv + w.^2) + w);\nend\n\nfunction [lam12, dlam12, ...\n salp2, calp2, sig12, ssig1, csig1, ssig2, csig2, epsi, domg12] = ...\n Lambda12(sbet1, cbet1, dn1, sbet2, cbet2, dn2, salp1, calp1, f, A3x, C3x)\n%LAMBDA12 Solve the hybrid problem\n\n tiny = sqrt(realmin);\n f1 = 1 - f;\n e2 = f * (2 - f);\n ep2 = e2 / (1 - e2);\n\n calp1(sbet1 == 0 & calp1 == 0) = -tiny;\n\n salp0 = salp1 .* cbet1;\n calp0 = hypot(calp1, salp1 .* sbet1);\n\n ssig1 = sbet1; somg1 = salp0 .* sbet1;\n csig1 = calp1 .* cbet1; comg1 = csig1;\n [ssig1, csig1] = SinCosNorm(ssig1, csig1);\n\n salp2 = cvmgt(salp0 ./ cbet2, salp1, cbet2 ~= cbet1);\n calp2 = cvmgt(sqrt((calp1 .* cbet1).^2 + ...\n cvmgt((cbet2 - cbet1) .* (cbet1 + cbet2), ...\n (sbet1 - sbet2) .* (sbet1 + sbet2), ...\n cbet1 < -sbet1)) ./ cbet2, ...\n abs(calp1), cbet2 ~= cbet1 | abs(sbet2) ~= -sbet1);\n ssig2 = sbet2; somg2 = salp0 .* sbet2;\n csig2 = calp2 .* cbet2; comg2 = csig2;\n [ssig2, csig2] = SinCosNorm(ssig2, csig2);\n\n sig12 = atan2(max(csig1 .* ssig2 - ssig1 .* csig2, 0), ...\n csig1 .* csig2 + ssig1 .* ssig2);\n\n omg12 = atan2(max(comg1 .* somg2 - somg1 .* comg2, 0), ...\n comg1 .* comg2 + somg1 .* somg2);\n k2 = calp0.^2 * ep2;\n epsi = k2 ./ (2 * (1 + sqrt(1 + k2)) + k2);\n C3a = C3f(epsi, C3x);\n B312 = SinCosSeries(true, ssig2, csig2, C3a) - ...\n SinCosSeries(true, ssig1, csig1, C3a);\n h0 = -f * A3f(epsi, A3x);\n domg12 = salp0 .* h0 .* (sig12 + B312);\n lam12 = omg12 + domg12;\n\n [~, dlam12] = ...\n Lengths(epsi, sig12, ...\n ssig1, csig1, dn1, ssig2, csig2, dn2, cbet1, cbet2, false);\n dlam12 = dlam12 .* f1 ./ (calp2 .* cbet2);\n z = calp2 == 0;\n dlam12(z) = - 2 * f1 .* dn1(z) ./ sbet1(z);\nend\n\nfunction [s12b, m12b, m0, M12, M21] = ...\n Lengths(epsi, sig12, ssig1, csig1, dn1, ssig2, csig2, dn2, ...\n cbet1, cbet2, scalp, ep2)\n%LENGTHS Compute various lengths associate with a geodesic\n\n if isempty(sig12)\n s12b = [];\n m12b = [];\n m0 = [];\n M12 = [];\n M21 = [];\n return\n end\n\n C1a = C1f(epsi);\n C2a = C2f(epsi);\n A1m1 = A1m1f(epsi);\n AB1 = (1 + A1m1) .* (SinCosSeries(true, ssig2, csig2, C1a) - ...\n SinCosSeries(true, ssig1, csig1, C1a));\n A2m1 = A2m1f(epsi);\n AB2 = (1 + A2m1) .* (SinCosSeries(true, ssig2, csig2, C2a) - ...\n SinCosSeries(true, ssig1, csig1, C2a));\n m0 = A1m1 - A2m1;\n J12 = m0 .* sig12 + (AB1 - AB2);\n m12b = dn2 .* (csig1 .* ssig2) - dn1 .* (ssig1 .* csig2) - ...\n csig1 .* csig2 .* J12;\n s12b = (1 + A1m1) .* sig12 + AB1;\n if scalp\n csig12 = csig1 .* csig2 + ssig1 .* ssig2;\n t = ep2 * (cbet1 - cbet2) .* (cbet1 + cbet2) ./ (dn1 + dn2);\n M12 = csig12 + (t .* ssig2 - csig2 .* J12) .* ssig1 ./ dn1;\n M21 = csig12 - (t .* ssig1 - csig1 .* J12) .* ssig2 ./ dn2;\n else\n M12 = sig12 + NaN; M21 = M12;\n end\nend\n"} +{"plateform": "github", "repo_name": "wvu-navLab/RobustGNSS-master", "name": "tranmerc_fwd.m", "ext": ".m", "path": "RobustGNSS-master/gtsam/gtsam/3rdparty/GeographicLib/matlab/tranmerc_fwd.m", "size": 5674, "source_encoding": "utf_8", "md5": "acff0226812f95bc17989337218cdde5", "text": "function [x, y, gam, k] = tranmerc_fwd(lat0, lon0, lat, lon, ellipsoid)\n%TRANMERC_FWD Forward transverse Mercator projection\n%\n% [X, Y] = TRANMERC_FWD(LAT0, LON0, LAT, LON)\n% [X, Y, GAM, K] = TRANMERC_FWD(LAT0, LON0, LAT, LON, ELLIPSOID)\n%\n% performs the forward transverse Mercator projection of points (LAT,LON)\n% to (X,Y) using (LAT0,LON0) as the center of projection. These input\n% arguments can be scalars or arrays of equal size. The ELLIPSOID vector\n% is of the form [a, e], where a is the equatorial radius in meters, e is\n% the eccentricity. If ellipsoid is omitted, the WGS84 ellipsoid (more\n% precisely, the value returned by DEFAULTELLIPSOID) is used. GEODPROJ\n% defines the projection and gives the restrictions on the allowed ranges\n% of the arguments. The inverse projection is given by TRANMERC_INV.\n%\n% GAM and K give metric properties of the projection at (LAT,LON); GAM is\n% the meridian convergence at the point and K is the scale.\n%\n% LAT0, LON0, LAT, LON, GAM are in degrees. The projected coordinates X,\n% Y are in meters (more precisely the units used for the equatorial\n% radius). K is dimensionless.\n%\n% This implementation of the projection is based on the series method\n% described in\n%\n% C. F. F. Karney, Transverse Mercator with an accuracy of a few\n% nanometers, J. Geodesy 85(8), 475-485 (Aug. 2011);\n% Addenda: http://geographiclib.sf.net/tm-addenda.html\n%\n% This extends the series given by Krueger (1912) to sixth order in the\n% flattening. This is a substantially better series than that used by\n% the MATLAB mapping toolbox. In particular the errors in the projection\n% are less than 5 nanometers withing 3900 km of the central meridian (and\n% less than 1 mm within 7600 km of the central meridian). The mapping\n% can be continued accurately over the poles to the opposite meridian.\n%\n% This routine depends on the MATLAB File Exchange package \"Geodesics on\n% an ellipsoid of revolution\":\n%\n% http://www.mathworks.com/matlabcentral/fileexchange/39108\n%\n% See also GEODPROJ, TRANMERC_INV, GEODDISTANCE, DEFAULTELLIPSOID.\n\n% Copyright (c) Charles Karney (2012) .\n%\n% This file was distributed with GeographicLib 1.29.\n\n if nargin < 4, error('Too few input arguments'), end\n if nargin < 5, ellipsoid = defaultellipsoid; end\n try\n Z = lat0 + lon0 + lat + lon;\n Z = zeros(size(Z));\n catch err\n error('lat0, lon0, lat, lon have incompatible sizes')\n end\n if length(ellipsoid(:)) ~= 2\n error('ellipsoid must be a vector of size 2')\n end\n\n degree = pi/180;\n maxpow = 6;\n\n a = ellipsoid(1);\n f = ecc2flat(ellipsoid(2));\n e2 = f * (2 - f);\n e2m = 1 - e2;\n cc = sqrt(e2m) * exp(e2 * atanhee(1, e2));\n n = f / (2 -f);\n alp = alpf(n);\n b1 = (1 - f) * (A1m1f(n) + 1);\n a1 = b1 * a;\n\n lon = AngDiff(AngNormalize(lon0), AngNormalize(lon));\n\n latsign = 1 - 2 * (lat < 0);\n lonsign = 1 - 2 * (lon < 0);\n lon = lon .* lonsign;\n lat = lat .* latsign;\n backside = lon > 90;\n latsign(backside & lat == 0) = -1;\n lon(backside) = 180 - lon(backside);\n phi = lat * degree;\n lam = lon * degree;\n c = max(0, cos(lam));\n tau = tan(phi);\n taup = taupf(tau, e2);\n xip = atan2(taup, c);\n etap = asinh(sin(lam) ./ hypot(taup, c));\n gam = atan(tan(lam) .* taup ./ hypot(1, taup));\n k = sqrt(e2m + e2 * cos(phi).^2) .* hypot(1, tau) ./ hypot(taup, c);\n c = ~(lat ~= 90);\n if any(c)\n xip(c) = pi/2;\n etap(c) = 0;\n gam(c) = lam;\n k = cc;\n end\n c0 = cos(2 * xip); ch0 = cosh(2 * etap);\n s0 = sin(2 * xip); sh0 = sinh(2 * etap);\n ar = 2 * c0 .* ch0; ai = -2 * s0 .* sh0;\n j = maxpow;\n xi0 = Z; yr0 = Z;\n if mod(j, 2)\n xi0 = xi0 + alp(j);\n yr0 = yr0 + 2 * maxpow * alp(j);\n j = j - 1;\n end\n xi1 = Z; eta0 = Z; eta1 = Z;\n yi0 = Z; yr1 = Z; yi1 = Z;\n for j = j : -2 : 1\n xi1 = ar .* xi0 - ai .* eta0 - xi1 + alp(j);\n eta1 = ai .* xi0 + ar .* eta0 - eta1;\n yr1 = ar .* yr0 - ai .* yi0 - yr1 + 2 * j * alp(j);\n yi1 = ai .* yr0 + ar .* yi0 - yi1;\n xi0 = ar .* xi1 - ai .* eta1 - xi0 + alp(j-1);\n eta0 = ai .* xi1 + ar .* eta1 - eta0;\n yr0 = ar .* yr1 - ai .* yi1 - yr0 + 2 * (j-1) * alp(j-1);\n yi0 = ai .* yr1 + ar .* yi1 - yi0;\n end\n ar = ar/2; ai = ai/2;\n yr1 = 1 - yr1 + ar .* yr0 - ai .* yi0;\n yi1 = - yi1 + ai .* yr0 + ar .* yi0;\n ar = s0 .* ch0; ai = c0 .* sh0;\n xi = xip + ar .* xi0 - ai .* eta0;\n eta = etap + ai .* xi0 + ar .* eta0;\n gam = gam - atan2(yi1, yr1);\n k = k .* (b1 * hypot(yr1, yi1));\n gam = gam / degree;\n xi(backside) = pi - xi(backside);\n y = a1 * xi .* latsign;\n x = a1 * eta .* lonsign;\n gam(backside) = 180 - gam(backside);\n gam = gam .* latsign .* lonsign;\n\n if isscalar(lat0) && lat0 == 0\n y0 = 0;\n else\n [sbet0, cbet0] = SinCosNorm((1-f) * sind(lat0), cosd(lat0));\n y0 = a1 * (atan2(sbet0, cbet0) + ...\n SinCosSeries(true, sbet0, cbet0, C1f(n)));\n end\n y = y - y0;\nend\n\nfunction alp = alpf(n)\n alp = zeros(1,6);\n nx = n^2;\n\n alp(1) = n*(n*(n*(n*(n*(31564*n-66675)+34440)+47250)-100800)+ ...\n 75600)/151200;\n alp(2) = nx*(n*(n*((863232-1983433*n)*n+748608)-1161216)+524160)/ ...\n 1935360;\n nx = nx * n;\n alp(3) = nx*(n*(n*(670412*n+406647)-533952)+184464)/725760;\n nx = nx * n;\n alp(4) = nx*(n*(6601661*n-7732800)+2230245)/7257600;\n nx = nx * n;\n alp(5) = (3438171-13675556*n)*nx/7983360;\n nx = nx * n;\n alp(6) = 212378941*nx/319334400;\nend\n\nfunction taup = taupf(tau, e2)\n tau1 = hypot(1, tau);\n sig = sinh( e2 * atanhee(tau ./ tau1, e2) );\n taup = hypot(1, sig) .* tau - sig .* tau1;\n overflow = 1/eps^2;\n c = ~(abs(tau) < overflow);\n taup(c) = tau(c);\nend\n"} +{"plateform": "github", "repo_name": "wvu-navLab/RobustGNSS-master", "name": "tranmerc_inv.m", "ext": ".m", "path": "RobustGNSS-master/gtsam/gtsam/3rdparty/GeographicLib/matlab/tranmerc_inv.m", "size": 5994, "source_encoding": "utf_8", "md5": "3ccf6b37ca13daed68a0ae8f166151ce", "text": "function [lat, lon, gam, k] = tranmerc_inv(lat0, lon0, x, y, ellipsoid)\n%TRANMERC_INV Inverse transverse Mercator projection\n%\n% [LAT, LON] = TRANMERC_INV(LAT0, LON0, X, Y)\n% [LAT, LON, GAM, K] = TRANMERC_INV(LAT0, LON0, X, Y, ELLIPSOID)\n%\n% performs the inverse transverse Mercator projection of points (X,Y) to\n% (LAT,LON) using (LAT0,LON0) as the center of projection. These input\n% arguments can be scalars or arrays of equal size. The ELLIPSOID vector\n% is of the form [a, e], where a is the equatorial radius in meters, e is\n% the eccentricity. If ellipsoid is omitted, the WGS84 ellipsoid (more\n% precisely, the value returned by DEFAULTELLIPSOID) is used. GEODPROJ\n% defines the projection and gives the restrictions on the allowed ranges\n% of the arguments. The forward projection is given by TRANMERC_FWD.\n%\n% GAM and K give metric properties of the projection at (LAT,LON); GAM is\n% the meridian convergence at the point and K is the scale.\n%\n% LAT0, LON0, LAT, LON, GAM are in degrees. The projected coordinates X,\n% Y are in meters (more precisely the units used for the equatorial\n% radius). K is dimensionless.\n%\n% This implementation of the projection is based on the series method\n% described in\n%\n% C. F. F. Karney, Transverse Mercator with an accuracy of a few\n% nanometers, J. Geodesy 85(8), 475-485 (Aug. 2011);\n% Addenda: http://geographiclib.sf.net/tm-addenda.html\n%\n% This extends the series given by Krueger (1912) to sixth order in the\n% flattening. This is a substantially better series than that used by\n% the MATLAB mapping toolbox. In particular the errors in the projection\n% are less than 5 nanometers withing 3900 km of the central meridian (and\n% less than 1 mm within 7600 km of the central meridian). The mapping\n% can be continued accurately over the poles to the opposite meridian.\n%\n% This routine depends on the MATLAB File Exchange package \"Geodesics on\n% an ellipsoid of revolution\":\n%\n% http://www.mathworks.com/matlabcentral/fileexchange/39108\n%\n% See also GEODPROJ, TRANMERC_FWD, GEODRECKON, DEFAULTELLIPSOID.\n\n% Copyright (c) Charles Karney (2012) .\n%\n% This file was distributed with GeographicLib 1.29.\n\n if nargin < 4, error('Too few input arguments'), end\n if nargin < 5, ellipsoid = defaultellipsoid; end\n try\n Z = lat0 + lon0 + x + y;\n Z = zeros(size(Z));\n catch err\n error('lat0, lon0, x, y have incompatible sizes')\n end\n if length(ellipsoid(:)) ~= 2\n error('ellipsoid must be a vector of size 2')\n end\n\n degree = pi/180;\n maxpow = 6;\n\n a = ellipsoid(1);\n f = ecc2flat(ellipsoid(2));\n e2 = f * (2 - f);\n e2m = 1 - e2;\n cc = sqrt(e2m) * exp(e2 * atanhee(1, e2));\n n = f / (2 -f);\n bet = betf(n);\n b1 = (1 - f) * (A1m1f(n) + 1);\n a1 = b1 * a;\n\n if isscalar(lat0) && lat0 == 0\n y0 = 0;\n else\n [sbet0, cbet0] = SinCosNorm((1-f) * sind(lat0), cosd(lat0));\n y0 = a1 * (atan2(sbet0, cbet0) + ...\n SinCosSeries(true, sbet0, cbet0, C1f(n)));\n end\n y = y + y0;\n\n xi = y / a1;\n eta = x / a1;\n xisign = 1 - 2 * (xi < 0 );\n etasign = 1 - 2 * (eta < 0 );\n xi = xi .* xisign;\n eta = eta .* etasign;\n backside = xi > pi/2;\n xi(backside) = pi - xi(backside);\n\n c0 = cos(2 * xi); ch0 = cosh(2 * eta);\n s0 = sin(2 * xi); sh0 = sinh(2 * eta);\n ar = 2 * c0 .* ch0; ai = -2 * s0 .* sh0;\n j = maxpow;\n xip0 = Z; yr0 = Z;\n if mod(j, 2)\n xip0 = xip0 + bet(j);\n yr0 = yr0 - 2 * maxpow * bet(j);\n j = j - 1;\n end\n xip1 = Z; etap0 = Z; etap1 = Z;\n yi0 = Z; yr1 = Z; yi1 = Z;\n for j = j : -2 : 1\n xip1 = ar .* xip0 - ai .* etap0 - xip1 - bet(j);\n etap1 = ai .* xip0 + ar .* etap0 - etap1;\n yr1 = ar .* yr0 - ai .* yi0 - yr1 - 2 * j * bet(j);\n yi1 = ai .* yr0 + ar .* yi0 - yi1;\n xip0 = ar .* xip1 - ai .* etap1 - xip0 - bet(j-1);\n etap0 = ai .* xip1 + ar .* etap1 - etap0;\n yr0 = ar .* yr1 - ai .* yi1 - yr0 - 2 * (j-1) * bet(j-1);\n yi0 = ai .* yr1 + ar .* yi1 - yi0;\n end\n ar = ar/2; ai = ai/2;\n yr1 = 1 - yr1 + ar .* yr0 - ai .* yi0;\n yi1 = - yi1 + ai .* yr0 + ar .* yi0;\n ar = s0 .* ch0; ai = c0 .* sh0;\n xip = xi + ar .* xip0 - ai .* etap0;\n etap = eta + ai .* xip0 + ar .* etap0;\n gam = atan2(yi1, yr1);\n k = b1 ./ hypot(yr1, yi1);\n s = sinh(etap);\n c = max(0, cos(xip));\n r = hypot(s, c);\n lam = atan2(s, c);\n taup = sin(xip)./r;\n tau = tauf(taup, e2);\n phi = atan(tau);\n gam = gam + atan(tan(xip) .* tanh(etap));\n c = r ~= 0;\n k(c) = k(c) .* sqrt(e2m + e2 * cos(phi(c)).^2) .* ...\n hypot(1, tau(c)) .* r(c);\n c = ~c;\n if any(c)\n phi(c) = pi/2;\n lam(c) = 0;\n k(c) = k(c) * cc;\n end\n lat = phi / degree .* xisign;\n lon = lam / degree;\n lon(backside) = 180 - lon(backside);\n lon = lon .* etasign;\n lon = AngNormalize(lon + AngNormalize(lon0));\n gam = gam/degree;\n gam(backside) = 180 - gam(backside);\n gam = gam .* xisign .* etasign;\nend\n\nfunction bet = betf(n)\n bet = zeros(1,6);\n nx = n^2;\n bet(1) = n*(n*(n*(n*(n*(384796*n-382725)-6720)+932400)-1612800)+ ...\n 1209600)/2419200;\n bet(2) = nx*(n*(n*((1695744-1118711*n)*n-1174656)+258048)+80640)/ ...\n 3870720;\n nx = nx * n;\n bet(3) = nx*(n*(n*(22276*n-16929)-15984)+12852)/362880;\n nx = nx * n;\n bet(4) = nx*((-830251*n-158400)*n+197865)/7257600;\n nx = nx * n;\n bet(5) = (453717-435388*n)*nx/15966720;\n nx = nx * n;\n bet(6) = 20648693*nx/638668800;\nend\n\nfunction tau = tauf(taup, e2)\n overflow = 1/eps^2;\n tol = 0.1 * sqrt(eps);\n numit = 5;\n e2m = 1 - e2;\n tau = taup / e2m;\n stol = tol * max(1, abs(taup));\n g = ~(abs(taup) < overflow);\n tau(g) = taup(g);\n g = ~g;\n for i = 1 : numit\n if ~any(g), break, end\n tau1 = hypot(1, tau);\n sig = sinh(e2 * atanhee( tau ./ tau1, e2 ) );\n taupa = hypot(1, sig) .* tau - sig .* tau1;\n dtau = (taup - taupa) .* (1 + e2m .* tau.^2) ./ ...\n (e2m * tau1 .* hypot(1, taupa));\n tau(g) = tau(g) + dtau(g);\n g = g & abs(dtau) >= stol;\n end\nend\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "submit.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex2/ex2/submit.m", "size": 1605, "source_encoding": "utf_8", "md5": "9b63d386e9bd7bcca66b1a3d2fa37579", "text": "function submit()\n addpath('./lib');\n\n conf.assignmentSlug = 'logistic-regression';\n conf.itemName = 'Logistic Regression';\n conf.partArrays = { ...\n { ...\n '1', ...\n { 'sigmoid.m' }, ...\n 'Sigmoid Function', ...\n }, ...\n { ...\n '2', ...\n { 'costFunction.m' }, ...\n 'Logistic Regression Cost', ...\n }, ...\n { ...\n '3', ...\n { 'costFunction.m' }, ...\n 'Logistic Regression Gradient', ...\n }, ...\n { ...\n '4', ...\n { 'predict.m' }, ...\n 'Predict', ...\n }, ...\n { ...\n '5', ...\n { 'costFunctionReg.m' }, ...\n 'Regularized Logistic Regression Cost', ...\n }, ...\n { ...\n '6', ...\n { 'costFunctionReg.m' }, ...\n 'Regularized Logistic Regression Gradient', ...\n }, ...\n };\n conf.output = @output;\n\n submitWithConfiguration(conf);\nend\n\nfunction out = output(partId, auxstring)\n % Random Test Cases\n X = [ones(20,1) (exp(1) * sin(1:1:20))' (exp(0.5) * cos(1:1:20))'];\n y = sin(X(:,1) + X(:,2)) > 0;\n if partId == '1'\n out = sprintf('%0.5f ', sigmoid(X));\n elseif partId == '2'\n out = sprintf('%0.5f ', costFunction([0.25 0.5 -0.5]', X, y));\n elseif partId == '3'\n [cost, grad] = costFunction([0.25 0.5 -0.5]', X, y);\n out = sprintf('%0.5f ', grad);\n elseif partId == '4'\n out = sprintf('%0.5f ', predict([0.25 0.5 -0.5]', X));\n elseif partId == '5'\n out = sprintf('%0.5f ', costFunctionReg([0.25 0.5 -0.5]', X, y, 0.1));\n elseif partId == '6'\n [cost, grad] = costFunctionReg([0.25 0.5 -0.5]', X, y, 0.1);\n out = sprintf('%0.5f ', grad);\n end \nend\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "submitWithConfiguration.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex2/ex2/lib/submitWithConfiguration.m", "size": 3734, "source_encoding": "utf_8", "md5": "84d9a81848f6d00a7aff4f79bdbb6049", "text": "function submitWithConfiguration(conf)\n addpath('./lib/jsonlab');\n\n parts = parts(conf);\n\n fprintf('== Submitting solutions | %s...\\n', conf.itemName);\n\n tokenFile = 'token.mat';\n if exist(tokenFile, 'file')\n load(tokenFile);\n [email token] = promptToken(email, token, tokenFile);\n else\n [email token] = promptToken('', '', tokenFile);\n end\n\n if isempty(token)\n fprintf('!! Submission Cancelled\\n');\n return\n end\n\n try\n response = submitParts(conf, email, token, parts);\n catch\n e = lasterror();\n fprintf( ...\n '!! Submission failed: unexpected error: %s\\n', ...\n e.message);\n fprintf('!! Please try again later.\\n');\n return\n end\n\n if isfield(response, 'errorMessage')\n fprintf('!! Submission failed: %s\\n', response.errorMessage);\n else\n showFeedback(parts, response);\n save(tokenFile, 'email', 'token');\n end\nend\n\nfunction [email token] = promptToken(email, existingToken, tokenFile)\n if (~isempty(email) && ~isempty(existingToken))\n prompt = sprintf( ...\n 'Use token from last successful submission (%s)? (Y/n): ', ...\n email);\n reenter = input(prompt, 's');\n\n if (isempty(reenter) || reenter(1) == 'Y' || reenter(1) == 'y')\n token = existingToken;\n return;\n else\n delete(tokenFile);\n end\n end\n email = input('Login (email address): ', 's');\n token = input('Token: ', 's');\nend\n\nfunction isValid = isValidPartOptionIndex(partOptions, i)\n isValid = (~isempty(i)) && (1 <= i) && (i <= numel(partOptions));\nend\n\nfunction response = submitParts(conf, email, token, parts)\n body = makePostBody(conf, email, token, parts);\n submissionUrl = submissionUrl();\n params = {'jsonBody', body};\n responseBody = urlread(submissionUrl, 'post', params);\n response = loadjson(responseBody);\nend\n\nfunction body = makePostBody(conf, email, token, parts)\n bodyStruct.assignmentSlug = conf.assignmentSlug;\n bodyStruct.submitterEmail = email;\n bodyStruct.secret = token;\n bodyStruct.parts = makePartsStruct(conf, parts);\n\n opt.Compact = 1;\n body = savejson('', bodyStruct, opt);\nend\n\nfunction partsStruct = makePartsStruct(conf, parts)\n for part = parts\n partId = part{:}.id;\n fieldName = makeValidFieldName(partId);\n outputStruct.output = conf.output(partId);\n partsStruct.(fieldName) = outputStruct;\n end\nend\n\nfunction [parts] = parts(conf)\n parts = {};\n for partArray = conf.partArrays\n part.id = partArray{:}{1};\n part.sourceFiles = partArray{:}{2};\n part.name = partArray{:}{3};\n parts{end + 1} = part;\n end\nend\n\nfunction showFeedback(parts, response)\n fprintf('== \\n');\n fprintf('== %43s | %9s | %-s\\n', 'Part Name', 'Score', 'Feedback');\n fprintf('== %43s | %9s | %-s\\n', '---------', '-----', '--------');\n for part = parts\n score = '';\n partFeedback = '';\n partFeedback = response.partFeedbacks.(makeValidFieldName(part{:}.id));\n partEvaluation = response.partEvaluations.(makeValidFieldName(part{:}.id));\n score = sprintf('%d / %3d', partEvaluation.score, partEvaluation.maxScore);\n fprintf('== %43s | %9s | %-s\\n', part{:}.name, score, partFeedback);\n end\n evaluation = response.evaluation;\n totalScore = sprintf('%d / %d', evaluation.score, evaluation.maxScore);\n fprintf('== --------------------------------\\n');\n fprintf('== %43s | %9s | %-s\\n', '', totalScore, '');\n fprintf('== \\n');\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%\n% Service configuration\n%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nfunction submissionUrl = submissionUrl()\n submissionUrl = 'https://www-origin.coursera.org/api/onDemandProgrammingImmediateFormSubmissions.v1';\nend\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "savejson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex2/ex2/lib/jsonlab/savejson.m", "size": 17462, "source_encoding": "utf_8", "md5": "861b534fc35ffe982b53ca3ca83143bf", "text": "function json=savejson(rootname,obj,varargin)\n%\n% json=savejson(rootname,obj,filename)\n% or\n% json=savejson(rootname,obj,opt)\n% json=savejson(rootname,obj,'param1',value1,'param2',value2,...)\n%\n% convert a MATLAB object (cell, struct or array) into a JSON (JavaScript\n% Object Notation) string\n%\n% author: Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2011/09/09\n%\n% $Id: savejson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% rootname: the name of the root-object, when set to '', the root name\n% is ignored, however, when opt.ForceRootName is set to 1 (see below),\n% the MATLAB variable name will be used as the root name.\n% obj: a MATLAB object (array, cell, cell array, struct, struct array).\n% filename: a string for the file name to save the output JSON data.\n% opt: a struct for additional options, ignore to use default values.\n% opt can have the following fields (first in [.|.] is the default)\n%\n% opt.FileName [''|string]: a file name to save the output JSON data\n% opt.FloatFormat ['%.10g'|string]: format to show each numeric element\n% of a 1D/2D array;\n% opt.ArrayIndent [1|0]: if 1, output explicit data array with\n% precedent indentation; if 0, no indentation\n% opt.ArrayToStruct[0|1]: when set to 0, savejson outputs 1D/2D\n% array in JSON array format; if sets to 1, an\n% array will be shown as a struct with fields\n% \"_ArrayType_\", \"_ArraySize_\" and \"_ArrayData_\"; for\n% sparse arrays, the non-zero elements will be\n% saved to _ArrayData_ field in triplet-format i.e.\n% (ix,iy,val) and \"_ArrayIsSparse_\" will be added\n% with a value of 1; for a complex array, the \n% _ArrayData_ array will include two columns \n% (4 for sparse) to record the real and imaginary \n% parts, and also \"_ArrayIsComplex_\":1 is added. \n% opt.ParseLogical [0|1]: if this is set to 1, logical array elem\n% will use true/false rather than 1/0.\n% opt.NoRowBracket [1|0]: if this is set to 1, arrays with a single\n% numerical element will be shown without a square\n% bracket, unless it is the root object; if 0, square\n% brackets are forced for any numerical arrays.\n% opt.ForceRootName [0|1]: when set to 1 and rootname is empty, savejson\n% will use the name of the passed obj variable as the \n% root object name; if obj is an expression and \n% does not have a name, 'root' will be used; if this \n% is set to 0 and rootname is empty, the root level \n% will be merged down to the lower level.\n% opt.Inf ['\"$1_Inf_\"'|string]: a customized regular expression pattern\n% to represent +/-Inf. The matched pattern is '([-+]*)Inf'\n% and $1 represents the sign. For those who want to use\n% 1e999 to represent Inf, they can set opt.Inf to '$11e999'\n% opt.NaN ['\"_NaN_\"'|string]: a customized regular expression pattern\n% to represent NaN\n% opt.JSONP [''|string]: to generate a JSONP output (JSON with padding),\n% for example, if opt.JSONP='foo', the JSON data is\n% wrapped inside a function call as 'foo(...);'\n% opt.UnpackHex [1|0]: conver the 0x[hex code] output by loadjson \n% back to the string form\n% opt.SaveBinary [0|1]: 1 - save the JSON file in binary mode; 0 - text mode.\n% opt.Compact [0|1]: 1- out compact JSON format (remove all newlines and tabs)\n%\n% opt can be replaced by a list of ('param',value) pairs. The param \n% string is equivallent to a field in opt and is case sensitive.\n% output:\n% json: a string in the JSON format (see http://json.org)\n%\n% examples:\n% jsonmesh=struct('MeshNode',[0 0 0;1 0 0;0 1 0;1 1 0;0 0 1;1 0 1;0 1 1;1 1 1],... \n% 'MeshTetra',[1 2 4 8;1 3 4 8;1 2 6 8;1 5 6 8;1 5 7 8;1 3 7 8],...\n% 'MeshTri',[1 2 4;1 2 6;1 3 4;1 3 7;1 5 6;1 5 7;...\n% 2 8 4;2 8 6;3 8 4;3 8 7;5 8 6;5 8 7],...\n% 'MeshCreator','FangQ','MeshTitle','T6 Cube',...\n% 'SpecialData',[nan, inf, -inf]);\n% savejson('jmesh',jsonmesh)\n% savejson('',jsonmesh,'ArrayIndent',0,'FloatFormat','\\t%.5g')\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details\n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nif(nargin==1)\n varname=inputname(1);\n obj=rootname;\n if(isempty(varname)) \n varname='root';\n end\n rootname=varname;\nelse\n varname=inputname(2);\nend\nif(length(varargin)==1 && ischar(varargin{1}))\n opt=struct('FileName',varargin{1});\nelse\n opt=varargin2struct(varargin{:});\nend\nopt.IsOctave=exist('OCTAVE_VERSION','builtin');\nrootisarray=0;\nrootlevel=1;\nforceroot=jsonopt('ForceRootName',0,opt);\nif((isnumeric(obj) || islogical(obj) || ischar(obj) || isstruct(obj) || iscell(obj)) && isempty(rootname) && forceroot==0)\n rootisarray=1;\n rootlevel=0;\nelse\n if(isempty(rootname))\n rootname=varname;\n end\nend\nif((isstruct(obj) || iscell(obj))&& isempty(rootname) && forceroot)\n rootname='root';\nend\n\nwhitespaces=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nif(jsonopt('Compact',0,opt)==1)\n whitespaces=struct('tab','','newline','','sep',',');\nend\nif(~isfield(opt,'whitespaces_'))\n opt.whitespaces_=whitespaces;\nend\n\nnl=whitespaces.newline;\n\njson=obj2json(rootname,obj,rootlevel,opt);\nif(rootisarray)\n json=sprintf('%s%s',json,nl);\nelse\n json=sprintf('{%s%s%s}\\n',nl,json,nl);\nend\n\njsonp=jsonopt('JSONP','',opt);\nif(~isempty(jsonp))\n json=sprintf('%s(%s);%s',jsonp,json,nl);\nend\n\n% save to a file if FileName is set, suggested by Patrick Rapin\nif(~isempty(jsonopt('FileName','',opt)))\n if(jsonopt('SaveBinary',0,opt)==1)\n\t fid = fopen(opt.FileName, 'wb');\n\t fwrite(fid,json);\n else\n\t fid = fopen(opt.FileName, 'wt');\n\t fwrite(fid,json,'char');\n end\n fclose(fid);\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=obj2json(name,item,level,varargin)\n\nif(iscell(item))\n txt=cell2json(name,item,level,varargin{:});\nelseif(isstruct(item))\n txt=struct2json(name,item,level,varargin{:});\nelseif(ischar(item))\n txt=str2json(name,item,level,varargin{:});\nelse\n txt=mat2json(name,item,level,varargin{:});\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=cell2json(name,item,level,varargin)\ntxt='';\nif(~iscell(item))\n error('input is not a cell');\nend\n\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item);\nws=jsonopt('whitespaces_',struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n')),varargin{:});\npadding0=repmat(ws.tab,1,level);\npadding2=repmat(ws.tab,1,level+1);\nnl=ws.newline;\nif(len>1)\n if(~isempty(name))\n txt=sprintf('%s\"%s\": [%s',padding0, checkname(name,varargin{:}),nl); name=''; \n else\n txt=sprintf('%s[%s',padding0,nl); \n end\nelseif(len==0)\n if(~isempty(name))\n txt=sprintf('%s\"%s\": []',padding0, checkname(name,varargin{:})); name=''; \n else\n txt=sprintf('%s[]',padding0); \n end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=sprintf('%s%s[%s',txt,padding2,nl); end\n for i=1:dim(1)\n txt=sprintf('%s%s',txt,obj2json(name,item{i,j},level+(dim(1)>1)+1,varargin{:}));\n if(i1) txt=sprintf('%s%s%s]',txt,nl,padding2); end\n if(j1) txt=sprintf('%s%s%s]',txt,nl,padding0); end\n\n%%-------------------------------------------------------------------------\nfunction txt=struct2json(name,item,level,varargin)\ntxt='';\nif(~isstruct(item))\n\terror('input is not a struct');\nend\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item);\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\npadding0=repmat(ws.tab,1,level);\npadding2=repmat(ws.tab,1,level+1);\npadding1=repmat(ws.tab,1,level+(dim(1)>1)+(len>1));\nnl=ws.newline;\n\nif(~isempty(name)) \n if(len>1) txt=sprintf('%s\"%s\": [%s',padding0,checkname(name,varargin{:}),nl); end\nelse\n if(len>1) txt=sprintf('%s[%s',padding0,nl); end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=sprintf('%s%s[%s',txt,padding2,nl); end\n for i=1:dim(1)\n names = fieldnames(item(i,j));\n if(~isempty(name) && len==1)\n txt=sprintf('%s%s\"%s\": {%s',txt,padding1, checkname(name,varargin{:}),nl); \n else\n txt=sprintf('%s%s{%s',txt,padding1,nl); \n end\n if(~isempty(names))\n for e=1:length(names)\n\t txt=sprintf('%s%s',txt,obj2json(names{e},getfield(item(i,j),...\n names{e}),level+(dim(1)>1)+1+(len>1),varargin{:}));\n if(e1) txt=sprintf('%s%s%s]',txt,nl,padding2); end\n if(j1) txt=sprintf('%s%s%s]',txt,nl,padding0); end\n\n%%-------------------------------------------------------------------------\nfunction txt=str2json(name,item,level,varargin)\ntxt='';\nif(~ischar(item))\n error('input is not a string');\nend\nitem=reshape(item, max(size(item),[1 0]));\nlen=size(item,1);\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\npadding1=repmat(ws.tab,1,level);\npadding0=repmat(ws.tab,1,level+1);\nnl=ws.newline;\nsep=ws.sep;\n\nif(~isempty(name)) \n if(len>1) txt=sprintf('%s\"%s\": [%s',padding1,checkname(name,varargin{:}),nl); end\nelse\n if(len>1) txt=sprintf('%s[%s',padding1,nl); end\nend\nisoct=jsonopt('IsOctave',0,varargin{:});\nfor e=1:len\n if(isoct)\n val=regexprep(item(e,:),'\\\\','\\\\');\n val=regexprep(val,'\"','\\\"');\n val=regexprep(val,'^\"','\\\"');\n else\n val=regexprep(item(e,:),'\\\\','\\\\\\\\');\n val=regexprep(val,'\"','\\\\\"');\n val=regexprep(val,'^\"','\\\\\"');\n end\n val=escapejsonstring(val);\n if(len==1)\n obj=['\"' checkname(name,varargin{:}) '\": ' '\"',val,'\"'];\n\tif(isempty(name)) obj=['\"',val,'\"']; end\n txt=sprintf('%s%s%s%s',txt,padding1,obj);\n else\n txt=sprintf('%s%s%s%s',txt,padding0,['\"',val,'\"']);\n end\n if(e==len) sep=''; end\n txt=sprintf('%s%s',txt,sep);\nend\nif(len>1) txt=sprintf('%s%s%s%s',txt,nl,padding1,']'); end\n\n%%-------------------------------------------------------------------------\nfunction txt=mat2json(name,item,level,varargin)\nif(~isnumeric(item) && ~islogical(item))\n error('input is not an array');\nend\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\npadding1=repmat(ws.tab,1,level);\npadding0=repmat(ws.tab,1,level+1);\nnl=ws.newline;\nsep=ws.sep;\n\nif(length(size(item))>2 || issparse(item) || ~isreal(item) || ...\n isempty(item) ||jsonopt('ArrayToStruct',0,varargin{:}))\n if(isempty(name))\n \ttxt=sprintf('%s{%s%s\"_ArrayType_\": \"%s\",%s%s\"_ArraySize_\": %s,%s',...\n padding1,nl,padding0,class(item),nl,padding0,regexprep(mat2str(size(item)),'\\s+',','),nl);\n else\n \ttxt=sprintf('%s\"%s\": {%s%s\"_ArrayType_\": \"%s\",%s%s\"_ArraySize_\": %s,%s',...\n padding1,checkname(name,varargin{:}),nl,padding0,class(item),nl,padding0,regexprep(mat2str(size(item)),'\\s+',','),nl);\n end\nelse\n if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1 && level>0)\n numtxt=regexprep(regexprep(matdata2json(item,level+1,varargin{:}),'^\\[',''),']','');\n else\n numtxt=matdata2json(item,level+1,varargin{:});\n end\n if(isempty(name))\n \ttxt=sprintf('%s%s',padding1,numtxt);\n else\n if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1)\n \ttxt=sprintf('%s\"%s\": %s',padding1,checkname(name,varargin{:}),numtxt);\n else\n \t txt=sprintf('%s\"%s\": %s',padding1,checkname(name,varargin{:}),numtxt);\n end\n end\n return;\nend\ndataformat='%s%s%s%s%s';\n\nif(issparse(item))\n [ix,iy]=find(item);\n data=full(item(find(item)));\n if(~isreal(item))\n data=[real(data(:)),imag(data(:))];\n if(size(item,1)==1)\n % Kludge to have data's 'transposedness' match item's.\n % (Necessary for complex row vector handling below.)\n data=data';\n end\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayIsComplex_\": ','1', sep);\n end\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayIsSparse_\": ','1', sep);\n if(size(item,1)==1)\n % Row vector, store only column indices.\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([iy(:),data'],level+2,varargin{:}), nl);\n elseif(size(item,2)==1)\n % Column vector, store only row indices.\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([ix,data],level+2,varargin{:}), nl);\n else\n % General case, store row and column indices.\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([ix,iy,data],level+2,varargin{:}), nl);\n end\nelse\n if(isreal(item))\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json(item(:)',level+2,varargin{:}), nl);\n else\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayIsComplex_\": ','1', sep);\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([real(item(:)) imag(item(:))],level+2,varargin{:}), nl);\n end\nend\ntxt=sprintf('%s%s%s',txt,padding1,'}');\n\n%%-------------------------------------------------------------------------\nfunction txt=matdata2json(mat,level,varargin)\n\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\ntab=ws.tab;\nnl=ws.newline;\n\nif(size(mat,1)==1)\n pre='';\n post='';\n level=level-1;\nelse\n pre=sprintf('[%s',nl);\n post=sprintf('%s%s]',nl,repmat(tab,1,level-1));\nend\n\nif(isempty(mat))\n txt='null';\n return;\nend\nfloatformat=jsonopt('FloatFormat','%.10g',varargin{:});\n%if(numel(mat)>1)\n formatstr=['[' repmat([floatformat ','],1,size(mat,2)-1) [floatformat sprintf('],%s',nl)]];\n%else\n% formatstr=[repmat([floatformat ','],1,size(mat,2)-1) [floatformat sprintf(',\\n')]];\n%end\n\nif(nargin>=2 && size(mat,1)>1 && jsonopt('ArrayIndent',1,varargin{:})==1)\n formatstr=[repmat(tab,1,level) formatstr];\nend\n\ntxt=sprintf(formatstr,mat');\ntxt(end-length(nl):end)=[];\nif(islogical(mat) && jsonopt('ParseLogical',0,varargin{:})==1)\n txt=regexprep(txt,'1','true');\n txt=regexprep(txt,'0','false');\nend\n%txt=regexprep(mat2str(mat),'\\s+',',');\n%txt=regexprep(txt,';',sprintf('],\\n['));\n% if(nargin>=2 && size(mat,1)>1)\n% txt=regexprep(txt,'\\[',[repmat(sprintf('\\t'),1,level) '[']);\n% end\ntxt=[pre txt post];\nif(any(isinf(mat(:))))\n txt=regexprep(txt,'([-+]*)Inf',jsonopt('Inf','\"$1_Inf_\"',varargin{:}));\nend\nif(any(isnan(mat(:))))\n txt=regexprep(txt,'NaN',jsonopt('NaN','\"_NaN_\"',varargin{:}));\nend\n\n%%-------------------------------------------------------------------------\nfunction newname=checkname(name,varargin)\nisunpack=jsonopt('UnpackHex',1,varargin{:});\nnewname=name;\nif(isempty(regexp(name,'0x([0-9a-fA-F]+)_','once')))\n return\nend\nif(isunpack)\n isoct=jsonopt('IsOctave',0,varargin{:});\n if(~isoct)\n newname=regexprep(name,'(^x|_){1}0x([0-9a-fA-F]+)_','${native2unicode(hex2dec($2))}');\n else\n pos=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','start');\n pend=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','end');\n if(isempty(pos)) return; end\n str0=name;\n pos0=[0 pend(:)' length(name)];\n newname='';\n for i=1:length(pos)\n newname=[newname str0(pos0(i)+1:pos(i)-1) char(hex2dec(str0(pos(i)+3:pend(i)-1)))];\n end\n if(pos(end)~=length(name))\n newname=[newname str0(pos0(end-1)+1:pos0(end))];\n end\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction newstr=escapejsonstring(str)\nnewstr=str;\nisoct=exist('OCTAVE_VERSION','builtin');\nif(isoct)\n vv=sscanf(OCTAVE_VERSION,'%f');\n if(vv(1)>=3.8) isoct=0; end\nend\nif(isoct)\n escapechars={'\\a','\\f','\\n','\\r','\\t','\\v'};\n for i=1:length(escapechars);\n newstr=regexprep(newstr,escapechars{i},escapechars{i});\n end\nelse\n escapechars={'\\a','\\b','\\f','\\n','\\r','\\t','\\v'};\n for i=1:length(escapechars);\n newstr=regexprep(newstr,escapechars{i},regexprep(escapechars{i},'\\\\','\\\\\\\\'));\n end\nend\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "loadjson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex2/ex2/lib/jsonlab/loadjson.m", "size": 18732, "source_encoding": "ibm852", "md5": "ab98cf173af2d50bbe8da4d6db252a20", "text": "function data = loadjson(fname,varargin)\n%\n% data=loadjson(fname,opt)\n% or\n% data=loadjson(fname,'param1',value1,'param2',value2,...)\n%\n% parse a JSON (JavaScript Object Notation) file or string\n%\n% authors:Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2011/09/09, including previous works from \n%\n% Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713\n% created on 2009/11/02\n% François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393\n% created on 2009/03/22\n% Joel Feenstra:\n% http://www.mathworks.com/matlabcentral/fileexchange/20565\n% created on 2008/07/03\n%\n% $Id: loadjson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% fname: input file name, if fname contains \"{}\" or \"[]\", fname\n% will be interpreted as a JSON string\n% opt: a struct to store parsing options, opt can be replaced by \n% a list of ('param',value) pairs - the param string is equivallent\n% to a field in opt. opt can have the following \n% fields (first in [.|.] is the default)\n%\n% opt.SimplifyCell [0|1]: if set to 1, loadjson will call cell2mat\n% for each element of the JSON data, and group \n% arrays based on the cell2mat rules.\n% opt.FastArrayParser [1|0 or integer]: if set to 1, use a\n% speed-optimized array parser when loading an \n% array object. The fast array parser may \n% collapse block arrays into a single large\n% array similar to rules defined in cell2mat; 0 to \n% use a legacy parser; if set to a larger-than-1\n% value, this option will specify the minimum\n% dimension to enable the fast array parser. For\n% example, if the input is a 3D array, setting\n% FastArrayParser to 1 will return a 3D array;\n% setting to 2 will return a cell array of 2D\n% arrays; setting to 3 will return to a 2D cell\n% array of 1D vectors; setting to 4 will return a\n% 3D cell array.\n% opt.ShowProgress [0|1]: if set to 1, loadjson displays a progress bar.\n%\n% output:\n% dat: a cell array, where {...} blocks are converted into cell arrays,\n% and [...] are converted to arrays\n%\n% examples:\n% dat=loadjson('{\"obj\":{\"string\":\"value\",\"array\":[1,2,3]}}')\n% dat=loadjson(['examples' filesep 'example1.json'])\n% dat=loadjson(['examples' filesep 'example1.json'],'SimplifyCell',1)\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details \n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nglobal pos inStr len esc index_esc len_esc isoct arraytoken\n\nif(regexp(fname,'[\\{\\}\\]\\[]','once'))\n string=fname;\nelseif(exist(fname,'file'))\n fid = fopen(fname,'rb');\n string = fread(fid,inf,'uint8=>char')';\n fclose(fid);\nelse\n error('input file does not exist');\nend\n\npos = 1; len = length(string); inStr = string;\nisoct=exist('OCTAVE_VERSION','builtin');\narraytoken=find(inStr=='[' | inStr==']' | inStr=='\"');\njstr=regexprep(inStr,'\\\\\\\\',' ');\nescquote=regexp(jstr,'\\\\\"');\narraytoken=sort([arraytoken escquote]);\n\n% String delimiters and escape chars identified to improve speed:\nesc = find(inStr=='\"' | inStr=='\\' ); % comparable to: regexp(inStr, '[\"\\\\]');\nindex_esc = 1; len_esc = length(esc);\n\nopt=varargin2struct(varargin{:});\n\nif(jsonopt('ShowProgress',0,opt)==1)\n opt.progressbar_=waitbar(0,'loading ...');\nend\njsoncount=1;\nwhile pos <= len\n switch(next_char)\n case '{'\n data{jsoncount} = parse_object(opt);\n case '['\n data{jsoncount} = parse_array(opt);\n otherwise\n error_pos('Outer level structure must be an object or an array');\n end\n jsoncount=jsoncount+1;\nend % while\n\njsoncount=length(data);\nif(jsoncount==1 && iscell(data))\n data=data{1};\nend\n\nif(~isempty(data))\n if(isstruct(data)) % data can be a struct array\n data=jstruct2array(data);\n elseif(iscell(data))\n data=jcell2array(data);\n end\nend\nif(isfield(opt,'progressbar_'))\n close(opt.progressbar_);\nend\n\n%%\nfunction newdata=jcell2array(data)\nlen=length(data);\nnewdata=data;\nfor i=1:len\n if(isstruct(data{i}))\n newdata{i}=jstruct2array(data{i});\n elseif(iscell(data{i}))\n newdata{i}=jcell2array(data{i});\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction newdata=jstruct2array(data)\nfn=fieldnames(data);\nnewdata=data;\nlen=length(data);\nfor i=1:length(fn) % depth-first\n for j=1:len\n if(isstruct(getfield(data(j),fn{i})))\n newdata(j)=setfield(newdata(j),fn{i},jstruct2array(getfield(data(j),fn{i})));\n end\n end\nend\nif(~isempty(strmatch('x0x5F_ArrayType_',fn)) && ~isempty(strmatch('x0x5F_ArrayData_',fn)))\n newdata=cell(len,1);\n for j=1:len\n ndata=cast(data(j).x0x5F_ArrayData_,data(j).x0x5F_ArrayType_);\n iscpx=0;\n if(~isempty(strmatch('x0x5F_ArrayIsComplex_',fn)))\n if(data(j).x0x5F_ArrayIsComplex_)\n iscpx=1;\n end\n end\n if(~isempty(strmatch('x0x5F_ArrayIsSparse_',fn)))\n if(data(j).x0x5F_ArrayIsSparse_)\n if(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n dim=data(j).x0x5F_ArraySize_;\n if(iscpx && size(ndata,2)==4-any(dim==1))\n ndata(:,end-1)=complex(ndata(:,end-1),ndata(:,end));\n end\n if isempty(ndata)\n % All-zeros sparse\n ndata=sparse(dim(1),prod(dim(2:end)));\n elseif dim(1)==1\n % Sparse row vector\n ndata=sparse(1,ndata(:,1),ndata(:,2),dim(1),prod(dim(2:end)));\n elseif dim(2)==1\n % Sparse column vector\n ndata=sparse(ndata(:,1),1,ndata(:,2),dim(1),prod(dim(2:end)));\n else\n % Generic sparse array.\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3),dim(1),prod(dim(2:end)));\n end\n else\n if(iscpx && size(ndata,2)==4)\n ndata(:,3)=complex(ndata(:,3),ndata(:,4));\n end\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3));\n end\n end\n elseif(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n if(iscpx && size(ndata,2)==2)\n ndata=complex(ndata(:,1),ndata(:,2));\n end\n ndata=reshape(ndata(:),data(j).x0x5F_ArraySize_);\n end\n newdata{j}=ndata;\n end\n if(len==1)\n newdata=newdata{1};\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction object = parse_object(varargin)\n parse_char('{');\n object = [];\n if next_char ~= '}'\n while 1\n str = parseStr(varargin{:});\n if isempty(str)\n error_pos('Name of value at position %d cannot be empty');\n end\n parse_char(':');\n val = parse_value(varargin{:});\n eval( sprintf( 'object.%s = val;', valid_field(str) ) );\n if next_char == '}'\n break;\n end\n parse_char(',');\n end\n end\n parse_char('}');\n\n%%-------------------------------------------------------------------------\n\nfunction object = parse_array(varargin) % JSON array is written in row-major order\nglobal pos inStr isoct\n parse_char('[');\n object = cell(0, 1);\n dim2=[];\n arraydepth=jsonopt('JSONLAB_ArrayDepth_',1,varargin{:});\n pbar=jsonopt('progressbar_',-1,varargin{:});\n\n if next_char ~= ']'\n\tif(jsonopt('FastArrayParser',1,varargin{:})>=1 && arraydepth>=jsonopt('FastArrayParser',1,varargin{:}))\n [endpos, e1l, e1r, maxlevel]=matching_bracket(inStr,pos);\n arraystr=['[' inStr(pos:endpos)];\n arraystr=regexprep(arraystr,'\"_NaN_\"','NaN');\n arraystr=regexprep(arraystr,'\"([-+]*)_Inf_\"','$1Inf');\n arraystr(arraystr==sprintf('\\n'))=[];\n arraystr(arraystr==sprintf('\\r'))=[];\n %arraystr=regexprep(arraystr,'\\s*,',','); % this is slow,sometimes needed\n if(~isempty(e1l) && ~isempty(e1r)) % the array is in 2D or higher D\n \tastr=inStr((e1l+1):(e1r-1));\n \tastr=regexprep(astr,'\"_NaN_\"','NaN');\n \tastr=regexprep(astr,'\"([-+]*)_Inf_\"','$1Inf');\n \tastr(astr==sprintf('\\n'))=[];\n \tastr(astr==sprintf('\\r'))=[];\n \tastr(astr==' ')='';\n \tif(isempty(find(astr=='[', 1))) % array is 2D\n dim2=length(sscanf(astr,'%f,',[1 inf]));\n \tend\n else % array is 1D\n \tastr=arraystr(2:end-1);\n \tastr(astr==' ')='';\n \t[obj, count, errmsg, nextidx]=sscanf(astr,'%f,',[1,inf]);\n \tif(nextidx>=length(astr)-1)\n object=obj;\n pos=endpos;\n parse_char(']');\n return;\n \tend\n end\n if(~isempty(dim2))\n \tastr=arraystr;\n \tastr(astr=='[')='';\n \tastr(astr==']')='';\n \tastr(astr==' ')='';\n \t[obj, count, errmsg, nextidx]=sscanf(astr,'%f,',inf);\n \tif(nextidx>=length(astr)-1)\n object=reshape(obj,dim2,numel(obj)/dim2)';\n pos=endpos;\n parse_char(']');\n if(pbar>0)\n waitbar(pos/length(inStr),pbar,'loading ...');\n end\n return;\n \tend\n end\n arraystr=regexprep(arraystr,'\\]\\s*,','];');\n\telse\n arraystr='[';\n\tend\n try\n if(isoct && regexp(arraystr,'\"','once'))\n error('Octave eval can produce empty cells for JSON-like input');\n end\n object=eval(arraystr);\n pos=endpos;\n catch\n while 1\n newopt=varargin2struct(varargin{:},'JSONLAB_ArrayDepth_',arraydepth+1);\n val = parse_value(newopt);\n object{end+1} = val;\n if next_char == ']'\n break;\n end\n parse_char(',');\n end\n end\n end\n if(jsonopt('SimplifyCell',0,varargin{:})==1)\n try\n oldobj=object;\n object=cell2mat(object')';\n if(iscell(oldobj) && isstruct(object) && numel(object)>1 && jsonopt('SimplifyCellArray',1,varargin{:})==0)\n object=oldobj;\n elseif(size(object,1)>1 && ndims(object)==2)\n object=object';\n end\n catch\n end\n end\n parse_char(']');\n \n if(pbar>0)\n waitbar(pos/length(inStr),pbar,'loading ...');\n end\n%%-------------------------------------------------------------------------\n\nfunction parse_char(c)\n global pos inStr len\n skip_whitespace;\n if pos > len || inStr(pos) ~= c\n error_pos(sprintf('Expected %c at position %%d', c));\n else\n pos = pos + 1;\n skip_whitespace;\n end\n\n%%-------------------------------------------------------------------------\n\nfunction c = next_char\n global pos inStr len\n skip_whitespace;\n if pos > len\n c = [];\n else\n c = inStr(pos);\n end\n\n%%-------------------------------------------------------------------------\n\nfunction skip_whitespace\n global pos inStr len\n while pos <= len && isspace(inStr(pos))\n pos = pos + 1;\n end\n\n%%-------------------------------------------------------------------------\nfunction str = parseStr(varargin)\n global pos inStr len esc index_esc len_esc\n % len, ns = length(inStr), keyboard\n if inStr(pos) ~= '\"'\n error_pos('String starting with \" expected at position %d');\n else\n pos = pos + 1;\n end\n str = '';\n while pos <= len\n while index_esc <= len_esc && esc(index_esc) < pos\n index_esc = index_esc + 1;\n end\n if index_esc > len_esc\n str = [str inStr(pos:len)];\n pos = len + 1;\n break;\n else\n str = [str inStr(pos:esc(index_esc)-1)];\n pos = esc(index_esc);\n end\n nstr = length(str); switch inStr(pos)\n case '\"'\n pos = pos + 1;\n if(~isempty(str))\n if(strcmp(str,'_Inf_'))\n str=Inf;\n elseif(strcmp(str,'-_Inf_'))\n str=-Inf;\n elseif(strcmp(str,'_NaN_'))\n str=NaN;\n end\n end\n return;\n case '\\'\n if pos+1 > len\n error_pos('End of file reached right after escape character');\n end\n pos = pos + 1;\n switch inStr(pos)\n case {'\"' '\\' '/'}\n str(nstr+1) = inStr(pos);\n pos = pos + 1;\n case {'b' 'f' 'n' 'r' 't'}\n str(nstr+1) = sprintf(['\\' inStr(pos)]);\n pos = pos + 1;\n case 'u'\n if pos+4 > len\n error_pos('End of file reached in escaped unicode character');\n end\n str(nstr+(1:6)) = inStr(pos-1:pos+4);\n pos = pos + 5;\n end\n otherwise % should never happen\n str(nstr+1) = inStr(pos), keyboard\n pos = pos + 1;\n end\n end\n error_pos('End of file while expecting end of inStr');\n\n%%-------------------------------------------------------------------------\n\nfunction num = parse_number(varargin)\n global pos inStr len isoct\n currstr=inStr(pos:end);\n numstr=0;\n if(isoct~=0)\n numstr=regexp(currstr,'^\\s*-?(?:0|[1-9]\\d*)(?:\\.\\d+)?(?:[eE][+\\-]?\\d+)?','end');\n [num, one] = sscanf(currstr, '%f', 1);\n delta=numstr+1;\n else\n [num, one, err, delta] = sscanf(currstr, '%f', 1);\n if ~isempty(err)\n error_pos('Error reading number at position %d');\n end\n end\n pos = pos + delta-1;\n\n%%-------------------------------------------------------------------------\n\nfunction val = parse_value(varargin)\n global pos inStr len\n true = 1; false = 0;\n \n pbar=jsonopt('progressbar_',-1,varargin{:});\n if(pbar>0)\n waitbar(pos/len,pbar,'loading ...');\n end\n \n switch(inStr(pos))\n case '\"'\n val = parseStr(varargin{:});\n return;\n case '['\n val = parse_array(varargin{:});\n return;\n case '{'\n val = parse_object(varargin{:});\n if isstruct(val)\n if(~isempty(strmatch('x0x5F_ArrayType_',fieldnames(val), 'exact')))\n val=jstruct2array(val);\n end\n elseif isempty(val)\n val = struct;\n end\n return;\n case {'-','0','1','2','3','4','5','6','7','8','9'}\n val = parse_number(varargin{:});\n return;\n case 't'\n if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'true')\n val = true;\n pos = pos + 4;\n return;\n end\n case 'f'\n if pos+4 <= len && strcmpi(inStr(pos:pos+4), 'false')\n val = false;\n pos = pos + 5;\n return;\n end\n case 'n'\n if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'null')\n val = [];\n pos = pos + 4;\n return;\n end\n end\n error_pos('Value expected at position %d');\n%%-------------------------------------------------------------------------\n\nfunction error_pos(msg)\n global pos inStr len\n poShow = max(min([pos-15 pos-1 pos pos+20],len),1);\n if poShow(3) == poShow(2)\n poShow(3:4) = poShow(2)+[0 -1]; % display nothing after\n end\n msg = [sprintf(msg, pos) ': ' ...\n inStr(poShow(1):poShow(2)) '' inStr(poShow(3):poShow(4)) ];\n error( ['JSONparser:invalidFormat: ' msg] );\n\n%%-------------------------------------------------------------------------\n\nfunction str = valid_field(str)\nglobal isoct\n% From MATLAB doc: field names must begin with a letter, which may be\n% followed by any combination of letters, digits, and underscores.\n% Invalid characters will be converted to underscores, and the prefix\n% \"x0x[Hex code]_\" will be added if the first character is not a letter.\n pos=regexp(str,'^[^A-Za-z]','once');\n if(~isempty(pos))\n if(~isoct)\n str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once');\n else\n str=sprintf('x0x%X_%s',char(str(1)),str(2:end));\n end\n end\n if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' ))) return; end\n if(~isoct)\n str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_');\n else\n pos=regexp(str,'[^0-9A-Za-z_]');\n if(isempty(pos)) return; end\n str0=str;\n pos0=[0 pos(:)' length(str)];\n str='';\n for i=1:length(pos)\n str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))];\n end\n if(pos(end)~=length(str))\n str=[str str0(pos0(end-1)+1:pos0(end))];\n end\n end\n %str(~isletter(str) & ~('0' <= str & str <= '9')) = '_';\n\n%%-------------------------------------------------------------------------\nfunction endpos = matching_quote(str,pos)\nlen=length(str);\nwhile(pos1 && str(pos-1)=='\\'))\n endpos=pos;\n return;\n end \n end\n pos=pos+1;\nend\nerror('unmatched quotation mark');\n%%-------------------------------------------------------------------------\nfunction [endpos, e1l, e1r, maxlevel] = matching_bracket(str,pos)\nglobal arraytoken\nlevel=1;\nmaxlevel=level;\nendpos=0;\nbpos=arraytoken(arraytoken>=pos);\ntokens=str(bpos);\nlen=length(tokens);\npos=1;\ne1l=[];\ne1r=[];\nwhile(pos<=len)\n c=tokens(pos);\n if(c==']')\n level=level-1;\n if(isempty(e1r)) e1r=bpos(pos); end\n if(level==0)\n endpos=bpos(pos);\n return\n end\n end\n if(c=='[')\n if(isempty(e1l)) e1l=bpos(pos); end\n level=level+1;\n maxlevel=max(maxlevel,level);\n end\n if(c=='\"')\n pos=matching_quote(tokens,pos+1);\n end\n pos=pos+1;\nend\nif(endpos==0) \n error('unmatched \"]\"');\nend\n\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "loadubjson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex2/ex2/lib/jsonlab/loadubjson.m", "size": 15574, "source_encoding": "utf_8", "md5": "5974e78e71b81b1e0f76123784b951a4", "text": "function data = loadubjson(fname,varargin)\n%\n% data=loadubjson(fname,opt)\n% or\n% data=loadubjson(fname,'param1',value1,'param2',value2,...)\n%\n% parse a JSON (JavaScript Object Notation) file or string\n%\n% authors:Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2013/08/01\n%\n% $Id: loadubjson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% fname: input file name, if fname contains \"{}\" or \"[]\", fname\n% will be interpreted as a UBJSON string\n% opt: a struct to store parsing options, opt can be replaced by \n% a list of ('param',value) pairs - the param string is equivallent\n% to a field in opt. opt can have the following \n% fields (first in [.|.] is the default)\n%\n% opt.SimplifyCell [0|1]: if set to 1, loadubjson will call cell2mat\n% for each element of the JSON data, and group \n% arrays based on the cell2mat rules.\n% opt.IntEndian [B|L]: specify the endianness of the integer fields\n% in the UBJSON input data. B - Big-Endian format for \n% integers (as required in the UBJSON specification); \n% L - input integer fields are in Little-Endian order.\n%\n% output:\n% dat: a cell array, where {...} blocks are converted into cell arrays,\n% and [...] are converted to arrays\n%\n% examples:\n% obj=struct('string','value','array',[1 2 3]);\n% ubjdata=saveubjson('obj',obj);\n% dat=loadubjson(ubjdata)\n% dat=loadubjson(['examples' filesep 'example1.ubj'])\n% dat=loadubjson(['examples' filesep 'example1.ubj'],'SimplifyCell',1)\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details \n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nglobal pos inStr len esc index_esc len_esc isoct arraytoken fileendian systemendian\n\nif(regexp(fname,'[\\{\\}\\]\\[]','once'))\n string=fname;\nelseif(exist(fname,'file'))\n fid = fopen(fname,'rb');\n string = fread(fid,inf,'uint8=>char')';\n fclose(fid);\nelse\n error('input file does not exist');\nend\n\npos = 1; len = length(string); inStr = string;\nisoct=exist('OCTAVE_VERSION','builtin');\narraytoken=find(inStr=='[' | inStr==']' | inStr=='\"');\njstr=regexprep(inStr,'\\\\\\\\',' ');\nescquote=regexp(jstr,'\\\\\"');\narraytoken=sort([arraytoken escquote]);\n\n% String delimiters and escape chars identified to improve speed:\nesc = find(inStr=='\"' | inStr=='\\' ); % comparable to: regexp(inStr, '[\"\\\\]');\nindex_esc = 1; len_esc = length(esc);\n\nopt=varargin2struct(varargin{:});\nfileendian=upper(jsonopt('IntEndian','B',opt));\n[os,maxelem,systemendian]=computer;\n\njsoncount=1;\nwhile pos <= len\n switch(next_char)\n case '{'\n data{jsoncount} = parse_object(opt);\n case '['\n data{jsoncount} = parse_array(opt);\n otherwise\n error_pos('Outer level structure must be an object or an array');\n end\n jsoncount=jsoncount+1;\nend % while\n\njsoncount=length(data);\nif(jsoncount==1 && iscell(data))\n data=data{1};\nend\n\nif(~isempty(data))\n if(isstruct(data)) % data can be a struct array\n data=jstruct2array(data);\n elseif(iscell(data))\n data=jcell2array(data);\n end\nend\n\n\n%%\nfunction newdata=parse_collection(id,data,obj)\n\nif(jsoncount>0 && exist('data','var')) \n if(~iscell(data))\n newdata=cell(1);\n newdata{1}=data;\n data=newdata;\n end\nend\n\n%%\nfunction newdata=jcell2array(data)\nlen=length(data);\nnewdata=data;\nfor i=1:len\n if(isstruct(data{i}))\n newdata{i}=jstruct2array(data{i});\n elseif(iscell(data{i}))\n newdata{i}=jcell2array(data{i});\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction newdata=jstruct2array(data)\nfn=fieldnames(data);\nnewdata=data;\nlen=length(data);\nfor i=1:length(fn) % depth-first\n for j=1:len\n if(isstruct(getfield(data(j),fn{i})))\n newdata(j)=setfield(newdata(j),fn{i},jstruct2array(getfield(data(j),fn{i})));\n end\n end\nend\nif(~isempty(strmatch('x0x5F_ArrayType_',fn)) && ~isempty(strmatch('x0x5F_ArrayData_',fn)))\n newdata=cell(len,1);\n for j=1:len\n ndata=cast(data(j).x0x5F_ArrayData_,data(j).x0x5F_ArrayType_);\n iscpx=0;\n if(~isempty(strmatch('x0x5F_ArrayIsComplex_',fn)))\n if(data(j).x0x5F_ArrayIsComplex_)\n iscpx=1;\n end\n end\n if(~isempty(strmatch('x0x5F_ArrayIsSparse_',fn)))\n if(data(j).x0x5F_ArrayIsSparse_)\n if(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n dim=double(data(j).x0x5F_ArraySize_);\n if(iscpx && size(ndata,2)==4-any(dim==1))\n ndata(:,end-1)=complex(ndata(:,end-1),ndata(:,end));\n end\n if isempty(ndata)\n % All-zeros sparse\n ndata=sparse(dim(1),prod(dim(2:end)));\n elseif dim(1)==1\n % Sparse row vector\n ndata=sparse(1,ndata(:,1),ndata(:,2),dim(1),prod(dim(2:end)));\n elseif dim(2)==1\n % Sparse column vector\n ndata=sparse(ndata(:,1),1,ndata(:,2),dim(1),prod(dim(2:end)));\n else\n % Generic sparse array.\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3),dim(1),prod(dim(2:end)));\n end\n else\n if(iscpx && size(ndata,2)==4)\n ndata(:,3)=complex(ndata(:,3),ndata(:,4));\n end\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3));\n end\n end\n elseif(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n if(iscpx && size(ndata,2)==2)\n ndata=complex(ndata(:,1),ndata(:,2));\n end\n ndata=reshape(ndata(:),data(j).x0x5F_ArraySize_);\n end\n newdata{j}=ndata;\n end\n if(len==1)\n newdata=newdata{1};\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction object = parse_object(varargin)\n parse_char('{');\n object = [];\n type='';\n count=-1;\n if(next_char == '$')\n type=inStr(pos+1); % TODO\n pos=pos+2;\n end\n if(next_char == '#')\n pos=pos+1;\n count=double(parse_number());\n end\n if next_char ~= '}'\n num=0;\n while 1\n str = parseStr(varargin{:});\n if isempty(str)\n error_pos('Name of value at position %d cannot be empty');\n end\n %parse_char(':');\n val = parse_value(varargin{:});\n num=num+1;\n eval( sprintf( 'object.%s = val;', valid_field(str) ) );\n if next_char == '}' || (count>=0 && num>=count)\n break;\n end\n %parse_char(',');\n end\n end\n if(count==-1)\n parse_char('}');\n end\n\n%%-------------------------------------------------------------------------\nfunction [cid,len]=elem_info(type)\nid=strfind('iUIlLdD',type);\ndataclass={'int8','uint8','int16','int32','int64','single','double'};\nbytelen=[1,1,2,4,8,4,8];\nif(id>0)\n cid=dataclass{id};\n len=bytelen(id);\nelse\n error_pos('unsupported type at position %d');\nend\n%%-------------------------------------------------------------------------\n\n\nfunction [data adv]=parse_block(type,count,varargin)\nglobal pos inStr isoct fileendian systemendian\n[cid,len]=elem_info(type);\ndatastr=inStr(pos:pos+len*count-1);\nif(isoct)\n newdata=int8(datastr);\nelse\n newdata=uint8(datastr);\nend\nid=strfind('iUIlLdD',type);\nif(id<=5 && fileendian~=systemendian)\n newdata=swapbytes(typecast(newdata,cid));\nend\ndata=typecast(newdata,cid);\nadv=double(len*count);\n\n%%-------------------------------------------------------------------------\n\n\nfunction object = parse_array(varargin) % JSON array is written in row-major order\nglobal pos inStr isoct\n parse_char('[');\n object = cell(0, 1);\n dim=[];\n type='';\n count=-1;\n if(next_char == '$')\n type=inStr(pos+1);\n pos=pos+2;\n end\n if(next_char == '#')\n pos=pos+1;\n if(next_char=='[')\n dim=parse_array(varargin{:});\n count=prod(double(dim));\n else\n count=double(parse_number());\n end\n end\n if(~isempty(type))\n if(count>=0)\n [object adv]=parse_block(type,count,varargin{:});\n if(~isempty(dim))\n object=reshape(object,dim);\n end\n pos=pos+adv;\n return;\n else\n endpos=matching_bracket(inStr,pos);\n [cid,len]=elem_info(type);\n count=(endpos-pos)/len;\n [object adv]=parse_block(type,count,varargin{:});\n pos=pos+adv;\n parse_char(']');\n return;\n end\n end\n if next_char ~= ']'\n while 1\n val = parse_value(varargin{:});\n object{end+1} = val;\n if next_char == ']'\n break;\n end\n %parse_char(',');\n end\n end\n if(jsonopt('SimplifyCell',0,varargin{:})==1)\n try\n oldobj=object;\n object=cell2mat(object')';\n if(iscell(oldobj) && isstruct(object) && numel(object)>1 && jsonopt('SimplifyCellArray',1,varargin{:})==0)\n object=oldobj;\n elseif(size(object,1)>1 && ndims(object)==2)\n object=object';\n end\n catch\n end\n end\n if(count==-1)\n parse_char(']');\n end\n\n%%-------------------------------------------------------------------------\n\nfunction parse_char(c)\n global pos inStr len\n skip_whitespace;\n if pos > len || inStr(pos) ~= c\n error_pos(sprintf('Expected %c at position %%d', c));\n else\n pos = pos + 1;\n skip_whitespace;\n end\n\n%%-------------------------------------------------------------------------\n\nfunction c = next_char\n global pos inStr len\n skip_whitespace;\n if pos > len\n c = [];\n else\n c = inStr(pos);\n end\n\n%%-------------------------------------------------------------------------\n\nfunction skip_whitespace\n global pos inStr len\n while pos <= len && isspace(inStr(pos))\n pos = pos + 1;\n end\n\n%%-------------------------------------------------------------------------\nfunction str = parseStr(varargin)\n global pos inStr esc index_esc len_esc\n % len, ns = length(inStr), keyboard\n type=inStr(pos);\n if type ~= 'S' && type ~= 'C' && type ~= 'H'\n error_pos('String starting with S expected at position %d');\n else\n pos = pos + 1;\n end\n if(type == 'C')\n str=inStr(pos);\n pos=pos+1;\n return;\n end\n bytelen=double(parse_number());\n if(length(inStr)>=pos+bytelen-1)\n str=inStr(pos:pos+bytelen-1);\n pos=pos+bytelen;\n else\n error_pos('End of file while expecting end of inStr');\n end\n\n%%-------------------------------------------------------------------------\n\nfunction num = parse_number(varargin)\n global pos inStr len isoct fileendian systemendian\n id=strfind('iUIlLdD',inStr(pos));\n if(isempty(id))\n error_pos('expecting a number at position %d');\n end\n type={'int8','uint8','int16','int32','int64','single','double'};\n bytelen=[1,1,2,4,8,4,8];\n datastr=inStr(pos+1:pos+bytelen(id));\n if(isoct)\n newdata=int8(datastr);\n else\n newdata=uint8(datastr);\n end\n if(id<=5 && fileendian~=systemendian)\n newdata=swapbytes(typecast(newdata,type{id}));\n end\n num=typecast(newdata,type{id});\n pos = pos + bytelen(id)+1;\n\n%%-------------------------------------------------------------------------\n\nfunction val = parse_value(varargin)\n global pos inStr len\n true = 1; false = 0;\n\n switch(inStr(pos))\n case {'S','C','H'}\n val = parseStr(varargin{:});\n return;\n case '['\n val = parse_array(varargin{:});\n return;\n case '{'\n val = parse_object(varargin{:});\n if isstruct(val)\n if(~isempty(strmatch('x0x5F_ArrayType_',fieldnames(val), 'exact')))\n val=jstruct2array(val);\n end\n elseif isempty(val)\n val = struct;\n end\n return;\n case {'i','U','I','l','L','d','D'}\n val = parse_number(varargin{:});\n return;\n case 'T'\n val = true;\n pos = pos + 1;\n return;\n case 'F'\n val = false;\n pos = pos + 1;\n return;\n case {'Z','N'}\n val = [];\n pos = pos + 1;\n return;\n end\n error_pos('Value expected at position %d');\n%%-------------------------------------------------------------------------\n\nfunction error_pos(msg)\n global pos inStr len\n poShow = max(min([pos-15 pos-1 pos pos+20],len),1);\n if poShow(3) == poShow(2)\n poShow(3:4) = poShow(2)+[0 -1]; % display nothing after\n end\n msg = [sprintf(msg, pos) ': ' ...\n inStr(poShow(1):poShow(2)) '' inStr(poShow(3):poShow(4)) ];\n error( ['JSONparser:invalidFormat: ' msg] );\n\n%%-------------------------------------------------------------------------\n\nfunction str = valid_field(str)\nglobal isoct\n% From MATLAB doc: field names must begin with a letter, which may be\n% followed by any combination of letters, digits, and underscores.\n% Invalid characters will be converted to underscores, and the prefix\n% \"x0x[Hex code]_\" will be added if the first character is not a letter.\n pos=regexp(str,'^[^A-Za-z]','once');\n if(~isempty(pos))\n if(~isoct)\n str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once');\n else\n str=sprintf('x0x%X_%s',char(str(1)),str(2:end));\n end\n end\n if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' ))) return; end\n if(~isoct)\n str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_');\n else\n pos=regexp(str,'[^0-9A-Za-z_]');\n if(isempty(pos)) return; end\n str0=str;\n pos0=[0 pos(:)' length(str)];\n str='';\n for i=1:length(pos)\n str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))];\n end\n if(pos(end)~=length(str))\n str=[str str0(pos0(end-1)+1:pos0(end))];\n end\n end\n %str(~isletter(str) & ~('0' <= str & str <= '9')) = '_';\n\n%%-------------------------------------------------------------------------\nfunction endpos = matching_quote(str,pos)\nlen=length(str);\nwhile(pos1 && str(pos-1)=='\\'))\n endpos=pos;\n return;\n end \n end\n pos=pos+1;\nend\nerror('unmatched quotation mark');\n%%-------------------------------------------------------------------------\nfunction [endpos e1l e1r maxlevel] = matching_bracket(str,pos)\nglobal arraytoken\nlevel=1;\nmaxlevel=level;\nendpos=0;\nbpos=arraytoken(arraytoken>=pos);\ntokens=str(bpos);\nlen=length(tokens);\npos=1;\ne1l=[];\ne1r=[];\nwhile(pos<=len)\n c=tokens(pos);\n if(c==']')\n level=level-1;\n if(isempty(e1r)) e1r=bpos(pos); end\n if(level==0)\n endpos=bpos(pos);\n return\n end\n end\n if(c=='[')\n if(isempty(e1l)) e1l=bpos(pos); end\n level=level+1;\n maxlevel=max(maxlevel,level);\n end\n if(c=='\"')\n pos=matching_quote(tokens,pos+1);\n end\n pos=pos+1;\nend\nif(endpos==0) \n error('unmatched \"]\"');\nend\n\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "saveubjson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex2/ex2/lib/jsonlab/saveubjson.m", "size": 16123, "source_encoding": "utf_8", "md5": "61d4f51010aedbf97753396f5d2d9ec0", "text": "function json=saveubjson(rootname,obj,varargin)\n%\n% json=saveubjson(rootname,obj,filename)\n% or\n% json=saveubjson(rootname,obj,opt)\n% json=saveubjson(rootname,obj,'param1',value1,'param2',value2,...)\n%\n% convert a MATLAB object (cell, struct or array) into a Universal \n% Binary JSON (UBJSON) binary string\n%\n% author: Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2013/08/17\n%\n% $Id: saveubjson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% rootname: the name of the root-object, when set to '', the root name\n% is ignored, however, when opt.ForceRootName is set to 1 (see below),\n% the MATLAB variable name will be used as the root name.\n% obj: a MATLAB object (array, cell, cell array, struct, struct array)\n% filename: a string for the file name to save the output UBJSON data\n% opt: a struct for additional options, ignore to use default values.\n% opt can have the following fields (first in [.|.] is the default)\n%\n% opt.FileName [''|string]: a file name to save the output JSON data\n% opt.ArrayToStruct[0|1]: when set to 0, saveubjson outputs 1D/2D\n% array in JSON array format; if sets to 1, an\n% array will be shown as a struct with fields\n% \"_ArrayType_\", \"_ArraySize_\" and \"_ArrayData_\"; for\n% sparse arrays, the non-zero elements will be\n% saved to _ArrayData_ field in triplet-format i.e.\n% (ix,iy,val) and \"_ArrayIsSparse_\" will be added\n% with a value of 1; for a complex array, the \n% _ArrayData_ array will include two columns \n% (4 for sparse) to record the real and imaginary \n% parts, and also \"_ArrayIsComplex_\":1 is added. \n% opt.ParseLogical [1|0]: if this is set to 1, logical array elem\n% will use true/false rather than 1/0.\n% opt.NoRowBracket [1|0]: if this is set to 1, arrays with a single\n% numerical element will be shown without a square\n% bracket, unless it is the root object; if 0, square\n% brackets are forced for any numerical arrays.\n% opt.ForceRootName [0|1]: when set to 1 and rootname is empty, saveubjson\n% will use the name of the passed obj variable as the \n% root object name; if obj is an expression and \n% does not have a name, 'root' will be used; if this \n% is set to 0 and rootname is empty, the root level \n% will be merged down to the lower level.\n% opt.JSONP [''|string]: to generate a JSONP output (JSON with padding),\n% for example, if opt.JSON='foo', the JSON data is\n% wrapped inside a function call as 'foo(...);'\n% opt.UnpackHex [1|0]: conver the 0x[hex code] output by loadjson \n% back to the string form\n%\n% opt can be replaced by a list of ('param',value) pairs. The param \n% string is equivallent to a field in opt and is case sensitive.\n% output:\n% json: a binary string in the UBJSON format (see http://ubjson.org)\n%\n% examples:\n% jsonmesh=struct('MeshNode',[0 0 0;1 0 0;0 1 0;1 1 0;0 0 1;1 0 1;0 1 1;1 1 1],... \n% 'MeshTetra',[1 2 4 8;1 3 4 8;1 2 6 8;1 5 6 8;1 5 7 8;1 3 7 8],...\n% 'MeshTri',[1 2 4;1 2 6;1 3 4;1 3 7;1 5 6;1 5 7;...\n% 2 8 4;2 8 6;3 8 4;3 8 7;5 8 6;5 8 7],...\n% 'MeshCreator','FangQ','MeshTitle','T6 Cube',...\n% 'SpecialData',[nan, inf, -inf]);\n% saveubjson('jsonmesh',jsonmesh)\n% saveubjson('jsonmesh',jsonmesh,'meshdata.ubj')\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details\n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nif(nargin==1)\n varname=inputname(1);\n obj=rootname;\n if(isempty(varname)) \n varname='root';\n end\n rootname=varname;\nelse\n varname=inputname(2);\nend\nif(length(varargin)==1 && ischar(varargin{1}))\n opt=struct('FileName',varargin{1});\nelse\n opt=varargin2struct(varargin{:});\nend\nopt.IsOctave=exist('OCTAVE_VERSION','builtin');\nrootisarray=0;\nrootlevel=1;\nforceroot=jsonopt('ForceRootName',0,opt);\nif((isnumeric(obj) || islogical(obj) || ischar(obj) || isstruct(obj) || iscell(obj)) && isempty(rootname) && forceroot==0)\n rootisarray=1;\n rootlevel=0;\nelse\n if(isempty(rootname))\n rootname=varname;\n end\nend\nif((isstruct(obj) || iscell(obj))&& isempty(rootname) && forceroot)\n rootname='root';\nend\njson=obj2ubjson(rootname,obj,rootlevel,opt);\nif(~rootisarray)\n json=['{' json '}'];\nend\n\njsonp=jsonopt('JSONP','',opt);\nif(~isempty(jsonp))\n json=[jsonp '(' json ')'];\nend\n\n% save to a file if FileName is set, suggested by Patrick Rapin\nif(~isempty(jsonopt('FileName','',opt)))\n fid = fopen(opt.FileName, 'wb');\n fwrite(fid,json);\n fclose(fid);\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=obj2ubjson(name,item,level,varargin)\n\nif(iscell(item))\n txt=cell2ubjson(name,item,level,varargin{:});\nelseif(isstruct(item))\n txt=struct2ubjson(name,item,level,varargin{:});\nelseif(ischar(item))\n txt=str2ubjson(name,item,level,varargin{:});\nelse\n txt=mat2ubjson(name,item,level,varargin{:});\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=cell2ubjson(name,item,level,varargin)\ntxt='';\nif(~iscell(item))\n error('input is not a cell');\nend\n\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item); % let's handle 1D cell first\nif(len>1) \n if(~isempty(name))\n txt=[S_(checkname(name,varargin{:})) '[']; name=''; \n else\n txt='['; \n end\nelseif(len==0)\n if(~isempty(name))\n txt=[S_(checkname(name,varargin{:})) 'Z']; name=''; \n else\n txt='Z'; \n end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=[txt '[']; end\n for i=1:dim(1)\n txt=[txt obj2ubjson(name,item{i,j},level+(len>1),varargin{:})];\n end\n if(dim(1)>1) txt=[txt ']']; end\nend\nif(len>1) txt=[txt ']']; end\n\n%%-------------------------------------------------------------------------\nfunction txt=struct2ubjson(name,item,level,varargin)\ntxt='';\nif(~isstruct(item))\n\terror('input is not a struct');\nend\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item);\n\nif(~isempty(name)) \n if(len>1) txt=[S_(checkname(name,varargin{:})) '[']; end\nelse\n if(len>1) txt='['; end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=[txt '[']; end\n for i=1:dim(1)\n names = fieldnames(item(i,j));\n if(~isempty(name) && len==1)\n txt=[txt S_(checkname(name,varargin{:})) '{']; \n else\n txt=[txt '{']; \n end\n if(~isempty(names))\n for e=1:length(names)\n\t txt=[txt obj2ubjson(names{e},getfield(item(i,j),...\n names{e}),level+(dim(1)>1)+1+(len>1),varargin{:})];\n end\n end\n txt=[txt '}'];\n end\n if(dim(1)>1) txt=[txt ']']; end\nend\nif(len>1) txt=[txt ']']; end\n\n%%-------------------------------------------------------------------------\nfunction txt=str2ubjson(name,item,level,varargin)\ntxt='';\nif(~ischar(item))\n error('input is not a string');\nend\nitem=reshape(item, max(size(item),[1 0]));\nlen=size(item,1);\n\nif(~isempty(name)) \n if(len>1) txt=[S_(checkname(name,varargin{:})) '[']; end\nelse\n if(len>1) txt='['; end\nend\nisoct=jsonopt('IsOctave',0,varargin{:});\nfor e=1:len\n val=item(e,:);\n if(len==1)\n obj=['' S_(checkname(name,varargin{:})) '' '',S_(val),''];\n\tif(isempty(name)) obj=['',S_(val),'']; end\n txt=[txt,'',obj];\n else\n txt=[txt,'',['',S_(val),'']];\n end\nend\nif(len>1) txt=[txt ']']; end\n\n%%-------------------------------------------------------------------------\nfunction txt=mat2ubjson(name,item,level,varargin)\nif(~isnumeric(item) && ~islogical(item))\n error('input is not an array');\nend\n\nif(length(size(item))>2 || issparse(item) || ~isreal(item) || ...\n isempty(item) || jsonopt('ArrayToStruct',0,varargin{:}))\n cid=I_(uint32(max(size(item))));\n if(isempty(name))\n \ttxt=['{' S_('_ArrayType_'),S_(class(item)),S_('_ArraySize_'),I_a(size(item),cid(1)) ];\n else\n if(isempty(item))\n txt=[S_(checkname(name,varargin{:})),'Z'];\n return;\n else\n \t txt=[S_(checkname(name,varargin{:})),'{',S_('_ArrayType_'),S_(class(item)),S_('_ArraySize_'),I_a(size(item),cid(1))];\n end\n end\nelse\n if(isempty(name))\n \ttxt=matdata2ubjson(item,level+1,varargin{:});\n else\n if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1)\n numtxt=regexprep(regexprep(matdata2ubjson(item,level+1,varargin{:}),'^\\[',''),']','');\n \ttxt=[S_(checkname(name,varargin{:})) numtxt];\n else\n \t txt=[S_(checkname(name,varargin{:})),matdata2ubjson(item,level+1,varargin{:})];\n end\n end\n return;\nend\nif(issparse(item))\n [ix,iy]=find(item);\n data=full(item(find(item)));\n if(~isreal(item))\n data=[real(data(:)),imag(data(:))];\n if(size(item,1)==1)\n % Kludge to have data's 'transposedness' match item's.\n % (Necessary for complex row vector handling below.)\n data=data';\n end\n txt=[txt,S_('_ArrayIsComplex_'),'T'];\n end\n txt=[txt,S_('_ArrayIsSparse_'),'T'];\n if(size(item,1)==1)\n % Row vector, store only column indices.\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([iy(:),data'],level+2,varargin{:})];\n elseif(size(item,2)==1)\n % Column vector, store only row indices.\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([ix,data],level+2,varargin{:})];\n else\n % General case, store row and column indices.\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([ix,iy,data],level+2,varargin{:})];\n end\nelse\n if(isreal(item))\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson(item(:)',level+2,varargin{:})];\n else\n txt=[txt,S_('_ArrayIsComplex_'),'T'];\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([real(item(:)) imag(item(:))],level+2,varargin{:})];\n end\nend\ntxt=[txt,'}'];\n\n%%-------------------------------------------------------------------------\nfunction txt=matdata2ubjson(mat,level,varargin)\nif(isempty(mat))\n txt='Z';\n return;\nend\nif(size(mat,1)==1)\n level=level-1;\nend\ntype='';\nhasnegtive=(mat<0);\nif(isa(mat,'integer') || isinteger(mat) || (isfloat(mat) && all(mod(mat(:),1) == 0)))\n if(isempty(hasnegtive))\n if(max(mat(:))<=2^8)\n type='U';\n end\n end\n if(isempty(type))\n % todo - need to consider negative ones separately\n id= histc(abs(max(mat(:))),[0 2^7 2^15 2^31 2^63]);\n if(isempty(find(id)))\n error('high-precision data is not yet supported');\n end\n key='iIlL';\n\ttype=key(find(id));\n end\n txt=[I_a(mat(:),type,size(mat))];\nelseif(islogical(mat))\n logicalval='FT';\n if(numel(mat)==1)\n txt=logicalval(mat+1);\n else\n txt=['[$U#' I_a(size(mat),'l') typecast(swapbytes(uint8(mat(:)')),'uint8')];\n end\nelse\n if(numel(mat)==1)\n txt=['[' D_(mat) ']'];\n else\n txt=D_a(mat(:),'D',size(mat));\n end\nend\n\n%txt=regexprep(mat2str(mat),'\\s+',',');\n%txt=regexprep(txt,';',sprintf('],['));\n% if(nargin>=2 && size(mat,1)>1)\n% txt=regexprep(txt,'\\[',[repmat(sprintf('\\t'),1,level) '[']);\n% end\nif(any(isinf(mat(:))))\n txt=regexprep(txt,'([-+]*)Inf',jsonopt('Inf','\"$1_Inf_\"',varargin{:}));\nend\nif(any(isnan(mat(:))))\n txt=regexprep(txt,'NaN',jsonopt('NaN','\"_NaN_\"',varargin{:}));\nend\n\n%%-------------------------------------------------------------------------\nfunction newname=checkname(name,varargin)\nisunpack=jsonopt('UnpackHex',1,varargin{:});\nnewname=name;\nif(isempty(regexp(name,'0x([0-9a-fA-F]+)_','once')))\n return\nend\nif(isunpack)\n isoct=jsonopt('IsOctave',0,varargin{:});\n if(~isoct)\n newname=regexprep(name,'(^x|_){1}0x([0-9a-fA-F]+)_','${native2unicode(hex2dec($2))}');\n else\n pos=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','start');\n pend=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','end');\n if(isempty(pos)) return; end\n str0=name;\n pos0=[0 pend(:)' length(name)];\n newname='';\n for i=1:length(pos)\n newname=[newname str0(pos0(i)+1:pos(i)-1) char(hex2dec(str0(pos(i)+3:pend(i)-1)))];\n end\n if(pos(end)~=length(name))\n newname=[newname str0(pos0(end-1)+1:pos0(end))];\n end\n end\nend\n%%-------------------------------------------------------------------------\nfunction val=S_(str)\nif(length(str)==1)\n val=['C' str];\nelse\n val=['S' I_(int32(length(str))) str];\nend\n%%-------------------------------------------------------------------------\nfunction val=I_(num)\nif(~isinteger(num))\n error('input is not an integer');\nend\nif(num>=0 && num<255)\n val=['U' data2byte(swapbytes(cast(num,'uint8')),'uint8')];\n return;\nend\nkey='iIlL';\ncid={'int8','int16','int32','int64'};\nfor i=1:4\n if((num>0 && num<2^(i*8-1)) || (num<0 && num>=-2^(i*8-1)))\n val=[key(i) data2byte(swapbytes(cast(num,cid{i})),'uint8')];\n return;\n end\nend\nerror('unsupported integer');\n\n%%-------------------------------------------------------------------------\nfunction val=D_(num)\nif(~isfloat(num))\n error('input is not a float');\nend\n\nif(isa(num,'single'))\n val=['d' data2byte(num,'uint8')];\nelse\n val=['D' data2byte(num,'uint8')];\nend\n%%-------------------------------------------------------------------------\nfunction data=I_a(num,type,dim,format)\nid=find(ismember('iUIlL',type));\n\nif(id==0)\n error('unsupported integer array');\nend\n\n% based on UBJSON specs, all integer types are stored in big endian format\n\nif(id==1)\n data=data2byte(swapbytes(int8(num)),'uint8');\n blen=1;\nelseif(id==2)\n data=data2byte(swapbytes(uint8(num)),'uint8');\n blen=1;\nelseif(id==3)\n data=data2byte(swapbytes(int16(num)),'uint8');\n blen=2;\nelseif(id==4)\n data=data2byte(swapbytes(int32(num)),'uint8');\n blen=4;\nelseif(id==5)\n data=data2byte(swapbytes(int64(num)),'uint8');\n blen=8;\nend\n\nif(nargin>=3 && length(dim)>=2 && prod(dim)~=dim(2))\n format='opt';\nend\nif((nargin<4 || strcmp(format,'opt')) && numel(num)>1)\n if(nargin>=3 && (length(dim)==1 || (length(dim)>=2 && prod(dim)~=dim(2))))\n cid=I_(uint32(max(dim)));\n data=['$' type '#' I_a(dim,cid(1)) data(:)'];\n else\n data=['$' type '#' I_(int32(numel(data)/blen)) data(:)'];\n end\n data=['[' data(:)'];\nelse\n data=reshape(data,blen,numel(data)/blen);\n data(2:blen+1,:)=data;\n data(1,:)=type;\n data=data(:)';\n data=['[' data(:)' ']'];\nend\n%%-------------------------------------------------------------------------\nfunction data=D_a(num,type,dim,format)\nid=find(ismember('dD',type));\n\nif(id==0)\n error('unsupported float array');\nend\n\nif(id==1)\n data=data2byte(single(num),'uint8');\nelseif(id==2)\n data=data2byte(double(num),'uint8');\nend\n\nif(nargin>=3 && length(dim)>=2 && prod(dim)~=dim(2))\n format='opt';\nend\nif((nargin<4 || strcmp(format,'opt')) && numel(num)>1)\n if(nargin>=3 && (length(dim)==1 || (length(dim)>=2 && prod(dim)~=dim(2))))\n cid=I_(uint32(max(dim)));\n data=['$' type '#' I_a(dim,cid(1)) data(:)'];\n else\n data=['$' type '#' I_(int32(numel(data)/(id*4))) data(:)'];\n end\n data=['[' data];\nelse\n data=reshape(data,(id*4),length(data)/(id*4));\n data(2:(id*4+1),:)=data;\n data(1,:)=type;\n data=data(:)';\n data=['[' data(:)' ']'];\nend\n%%-------------------------------------------------------------------------\nfunction bytes=data2byte(varargin)\nbytes=typecast(varargin{:});\nbytes=bytes(:)';\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "submit.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex4/ex4/submit.m", "size": 1635, "source_encoding": "utf_8", "md5": "ae9c236c78f9b5b09db8fbc2052990fc", "text": "function submit()\n addpath('./lib');\n\n conf.assignmentSlug = 'neural-network-learning';\n conf.itemName = 'Neural Networks Learning';\n conf.partArrays = { ...\n { ...\n '1', ...\n { 'nnCostFunction.m' }, ...\n 'Feedforward and Cost Function', ...\n }, ...\n { ...\n '2', ...\n { 'nnCostFunction.m' }, ...\n 'Regularized Cost Function', ...\n }, ...\n { ...\n '3', ...\n { 'sigmoidGradient.m' }, ...\n 'Sigmoid Gradient', ...\n }, ...\n { ...\n '4', ...\n { 'nnCostFunction.m' }, ...\n 'Neural Network Gradient (Backpropagation)', ...\n }, ...\n { ...\n '5', ...\n { 'nnCostFunction.m' }, ...\n 'Regularized Gradient', ...\n }, ...\n };\n conf.output = @output;\n\n submitWithConfiguration(conf);\nend\n\nfunction out = output(partId, auxstring)\n % Random Test Cases\n X = reshape(3 * sin(1:1:30), 3, 10);\n Xm = reshape(sin(1:32), 16, 2) / 5;\n ym = 1 + mod(1:16,4)';\n t1 = sin(reshape(1:2:24, 4, 3));\n t2 = cos(reshape(1:2:40, 4, 5));\n t = [t1(:) ; t2(:)];\n if partId == '1'\n [J] = nnCostFunction(t, 2, 4, 4, Xm, ym, 0);\n out = sprintf('%0.5f ', J);\n elseif partId == '2'\n [J] = nnCostFunction(t, 2, 4, 4, Xm, ym, 1.5);\n out = sprintf('%0.5f ', J);\n elseif partId == '3'\n out = sprintf('%0.5f ', sigmoidGradient(X));\n elseif partId == '4'\n [J, grad] = nnCostFunction(t, 2, 4, 4, Xm, ym, 0);\n out = sprintf('%0.5f ', J);\n out = [out sprintf('%0.5f ', grad)];\n elseif partId == '5'\n [J, grad] = nnCostFunction(t, 2, 4, 4, Xm, ym, 1.5);\n out = sprintf('%0.5f ', J);\n out = [out sprintf('%0.5f ', grad)];\n end \nend\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "submitWithConfiguration.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex4/ex4/lib/submitWithConfiguration.m", "size": 3734, "source_encoding": "utf_8", "md5": "84d9a81848f6d00a7aff4f79bdbb6049", "text": "function submitWithConfiguration(conf)\n addpath('./lib/jsonlab');\n\n parts = parts(conf);\n\n fprintf('== Submitting solutions | %s...\\n', conf.itemName);\n\n tokenFile = 'token.mat';\n if exist(tokenFile, 'file')\n load(tokenFile);\n [email token] = promptToken(email, token, tokenFile);\n else\n [email token] = promptToken('', '', tokenFile);\n end\n\n if isempty(token)\n fprintf('!! Submission Cancelled\\n');\n return\n end\n\n try\n response = submitParts(conf, email, token, parts);\n catch\n e = lasterror();\n fprintf( ...\n '!! Submission failed: unexpected error: %s\\n', ...\n e.message);\n fprintf('!! Please try again later.\\n');\n return\n end\n\n if isfield(response, 'errorMessage')\n fprintf('!! Submission failed: %s\\n', response.errorMessage);\n else\n showFeedback(parts, response);\n save(tokenFile, 'email', 'token');\n end\nend\n\nfunction [email token] = promptToken(email, existingToken, tokenFile)\n if (~isempty(email) && ~isempty(existingToken))\n prompt = sprintf( ...\n 'Use token from last successful submission (%s)? (Y/n): ', ...\n email);\n reenter = input(prompt, 's');\n\n if (isempty(reenter) || reenter(1) == 'Y' || reenter(1) == 'y')\n token = existingToken;\n return;\n else\n delete(tokenFile);\n end\n end\n email = input('Login (email address): ', 's');\n token = input('Token: ', 's');\nend\n\nfunction isValid = isValidPartOptionIndex(partOptions, i)\n isValid = (~isempty(i)) && (1 <= i) && (i <= numel(partOptions));\nend\n\nfunction response = submitParts(conf, email, token, parts)\n body = makePostBody(conf, email, token, parts);\n submissionUrl = submissionUrl();\n params = {'jsonBody', body};\n responseBody = urlread(submissionUrl, 'post', params);\n response = loadjson(responseBody);\nend\n\nfunction body = makePostBody(conf, email, token, parts)\n bodyStruct.assignmentSlug = conf.assignmentSlug;\n bodyStruct.submitterEmail = email;\n bodyStruct.secret = token;\n bodyStruct.parts = makePartsStruct(conf, parts);\n\n opt.Compact = 1;\n body = savejson('', bodyStruct, opt);\nend\n\nfunction partsStruct = makePartsStruct(conf, parts)\n for part = parts\n partId = part{:}.id;\n fieldName = makeValidFieldName(partId);\n outputStruct.output = conf.output(partId);\n partsStruct.(fieldName) = outputStruct;\n end\nend\n\nfunction [parts] = parts(conf)\n parts = {};\n for partArray = conf.partArrays\n part.id = partArray{:}{1};\n part.sourceFiles = partArray{:}{2};\n part.name = partArray{:}{3};\n parts{end + 1} = part;\n end\nend\n\nfunction showFeedback(parts, response)\n fprintf('== \\n');\n fprintf('== %43s | %9s | %-s\\n', 'Part Name', 'Score', 'Feedback');\n fprintf('== %43s | %9s | %-s\\n', '---------', '-----', '--------');\n for part = parts\n score = '';\n partFeedback = '';\n partFeedback = response.partFeedbacks.(makeValidFieldName(part{:}.id));\n partEvaluation = response.partEvaluations.(makeValidFieldName(part{:}.id));\n score = sprintf('%d / %3d', partEvaluation.score, partEvaluation.maxScore);\n fprintf('== %43s | %9s | %-s\\n', part{:}.name, score, partFeedback);\n end\n evaluation = response.evaluation;\n totalScore = sprintf('%d / %d', evaluation.score, evaluation.maxScore);\n fprintf('== --------------------------------\\n');\n fprintf('== %43s | %9s | %-s\\n', '', totalScore, '');\n fprintf('== \\n');\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%\n% Service configuration\n%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nfunction submissionUrl = submissionUrl()\n submissionUrl = 'https://www-origin.coursera.org/api/onDemandProgrammingImmediateFormSubmissions.v1';\nend\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "savejson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex4/ex4/lib/jsonlab/savejson.m", "size": 17462, "source_encoding": "utf_8", "md5": "861b534fc35ffe982b53ca3ca83143bf", "text": "function json=savejson(rootname,obj,varargin)\n%\n% json=savejson(rootname,obj,filename)\n% or\n% json=savejson(rootname,obj,opt)\n% json=savejson(rootname,obj,'param1',value1,'param2',value2,...)\n%\n% convert a MATLAB object (cell, struct or array) into a JSON (JavaScript\n% Object Notation) string\n%\n% author: Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2011/09/09\n%\n% $Id: savejson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% rootname: the name of the root-object, when set to '', the root name\n% is ignored, however, when opt.ForceRootName is set to 1 (see below),\n% the MATLAB variable name will be used as the root name.\n% obj: a MATLAB object (array, cell, cell array, struct, struct array).\n% filename: a string for the file name to save the output JSON data.\n% opt: a struct for additional options, ignore to use default values.\n% opt can have the following fields (first in [.|.] is the default)\n%\n% opt.FileName [''|string]: a file name to save the output JSON data\n% opt.FloatFormat ['%.10g'|string]: format to show each numeric element\n% of a 1D/2D array;\n% opt.ArrayIndent [1|0]: if 1, output explicit data array with\n% precedent indentation; if 0, no indentation\n% opt.ArrayToStruct[0|1]: when set to 0, savejson outputs 1D/2D\n% array in JSON array format; if sets to 1, an\n% array will be shown as a struct with fields\n% \"_ArrayType_\", \"_ArraySize_\" and \"_ArrayData_\"; for\n% sparse arrays, the non-zero elements will be\n% saved to _ArrayData_ field in triplet-format i.e.\n% (ix,iy,val) and \"_ArrayIsSparse_\" will be added\n% with a value of 1; for a complex array, the \n% _ArrayData_ array will include two columns \n% (4 for sparse) to record the real and imaginary \n% parts, and also \"_ArrayIsComplex_\":1 is added. \n% opt.ParseLogical [0|1]: if this is set to 1, logical array elem\n% will use true/false rather than 1/0.\n% opt.NoRowBracket [1|0]: if this is set to 1, arrays with a single\n% numerical element will be shown without a square\n% bracket, unless it is the root object; if 0, square\n% brackets are forced for any numerical arrays.\n% opt.ForceRootName [0|1]: when set to 1 and rootname is empty, savejson\n% will use the name of the passed obj variable as the \n% root object name; if obj is an expression and \n% does not have a name, 'root' will be used; if this \n% is set to 0 and rootname is empty, the root level \n% will be merged down to the lower level.\n% opt.Inf ['\"$1_Inf_\"'|string]: a customized regular expression pattern\n% to represent +/-Inf. The matched pattern is '([-+]*)Inf'\n% and $1 represents the sign. For those who want to use\n% 1e999 to represent Inf, they can set opt.Inf to '$11e999'\n% opt.NaN ['\"_NaN_\"'|string]: a customized regular expression pattern\n% to represent NaN\n% opt.JSONP [''|string]: to generate a JSONP output (JSON with padding),\n% for example, if opt.JSONP='foo', the JSON data is\n% wrapped inside a function call as 'foo(...);'\n% opt.UnpackHex [1|0]: conver the 0x[hex code] output by loadjson \n% back to the string form\n% opt.SaveBinary [0|1]: 1 - save the JSON file in binary mode; 0 - text mode.\n% opt.Compact [0|1]: 1- out compact JSON format (remove all newlines and tabs)\n%\n% opt can be replaced by a list of ('param',value) pairs. The param \n% string is equivallent to a field in opt and is case sensitive.\n% output:\n% json: a string in the JSON format (see http://json.org)\n%\n% examples:\n% jsonmesh=struct('MeshNode',[0 0 0;1 0 0;0 1 0;1 1 0;0 0 1;1 0 1;0 1 1;1 1 1],... \n% 'MeshTetra',[1 2 4 8;1 3 4 8;1 2 6 8;1 5 6 8;1 5 7 8;1 3 7 8],...\n% 'MeshTri',[1 2 4;1 2 6;1 3 4;1 3 7;1 5 6;1 5 7;...\n% 2 8 4;2 8 6;3 8 4;3 8 7;5 8 6;5 8 7],...\n% 'MeshCreator','FangQ','MeshTitle','T6 Cube',...\n% 'SpecialData',[nan, inf, -inf]);\n% savejson('jmesh',jsonmesh)\n% savejson('',jsonmesh,'ArrayIndent',0,'FloatFormat','\\t%.5g')\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details\n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nif(nargin==1)\n varname=inputname(1);\n obj=rootname;\n if(isempty(varname)) \n varname='root';\n end\n rootname=varname;\nelse\n varname=inputname(2);\nend\nif(length(varargin)==1 && ischar(varargin{1}))\n opt=struct('FileName',varargin{1});\nelse\n opt=varargin2struct(varargin{:});\nend\nopt.IsOctave=exist('OCTAVE_VERSION','builtin');\nrootisarray=0;\nrootlevel=1;\nforceroot=jsonopt('ForceRootName',0,opt);\nif((isnumeric(obj) || islogical(obj) || ischar(obj) || isstruct(obj) || iscell(obj)) && isempty(rootname) && forceroot==0)\n rootisarray=1;\n rootlevel=0;\nelse\n if(isempty(rootname))\n rootname=varname;\n end\nend\nif((isstruct(obj) || iscell(obj))&& isempty(rootname) && forceroot)\n rootname='root';\nend\n\nwhitespaces=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nif(jsonopt('Compact',0,opt)==1)\n whitespaces=struct('tab','','newline','','sep',',');\nend\nif(~isfield(opt,'whitespaces_'))\n opt.whitespaces_=whitespaces;\nend\n\nnl=whitespaces.newline;\n\njson=obj2json(rootname,obj,rootlevel,opt);\nif(rootisarray)\n json=sprintf('%s%s',json,nl);\nelse\n json=sprintf('{%s%s%s}\\n',nl,json,nl);\nend\n\njsonp=jsonopt('JSONP','',opt);\nif(~isempty(jsonp))\n json=sprintf('%s(%s);%s',jsonp,json,nl);\nend\n\n% save to a file if FileName is set, suggested by Patrick Rapin\nif(~isempty(jsonopt('FileName','',opt)))\n if(jsonopt('SaveBinary',0,opt)==1)\n\t fid = fopen(opt.FileName, 'wb');\n\t fwrite(fid,json);\n else\n\t fid = fopen(opt.FileName, 'wt');\n\t fwrite(fid,json,'char');\n end\n fclose(fid);\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=obj2json(name,item,level,varargin)\n\nif(iscell(item))\n txt=cell2json(name,item,level,varargin{:});\nelseif(isstruct(item))\n txt=struct2json(name,item,level,varargin{:});\nelseif(ischar(item))\n txt=str2json(name,item,level,varargin{:});\nelse\n txt=mat2json(name,item,level,varargin{:});\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=cell2json(name,item,level,varargin)\ntxt='';\nif(~iscell(item))\n error('input is not a cell');\nend\n\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item);\nws=jsonopt('whitespaces_',struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n')),varargin{:});\npadding0=repmat(ws.tab,1,level);\npadding2=repmat(ws.tab,1,level+1);\nnl=ws.newline;\nif(len>1)\n if(~isempty(name))\n txt=sprintf('%s\"%s\": [%s',padding0, checkname(name,varargin{:}),nl); name=''; \n else\n txt=sprintf('%s[%s',padding0,nl); \n end\nelseif(len==0)\n if(~isempty(name))\n txt=sprintf('%s\"%s\": []',padding0, checkname(name,varargin{:})); name=''; \n else\n txt=sprintf('%s[]',padding0); \n end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=sprintf('%s%s[%s',txt,padding2,nl); end\n for i=1:dim(1)\n txt=sprintf('%s%s',txt,obj2json(name,item{i,j},level+(dim(1)>1)+1,varargin{:}));\n if(i1) txt=sprintf('%s%s%s]',txt,nl,padding2); end\n if(j1) txt=sprintf('%s%s%s]',txt,nl,padding0); end\n\n%%-------------------------------------------------------------------------\nfunction txt=struct2json(name,item,level,varargin)\ntxt='';\nif(~isstruct(item))\n\terror('input is not a struct');\nend\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item);\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\npadding0=repmat(ws.tab,1,level);\npadding2=repmat(ws.tab,1,level+1);\npadding1=repmat(ws.tab,1,level+(dim(1)>1)+(len>1));\nnl=ws.newline;\n\nif(~isempty(name)) \n if(len>1) txt=sprintf('%s\"%s\": [%s',padding0,checkname(name,varargin{:}),nl); end\nelse\n if(len>1) txt=sprintf('%s[%s',padding0,nl); end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=sprintf('%s%s[%s',txt,padding2,nl); end\n for i=1:dim(1)\n names = fieldnames(item(i,j));\n if(~isempty(name) && len==1)\n txt=sprintf('%s%s\"%s\": {%s',txt,padding1, checkname(name,varargin{:}),nl); \n else\n txt=sprintf('%s%s{%s',txt,padding1,nl); \n end\n if(~isempty(names))\n for e=1:length(names)\n\t txt=sprintf('%s%s',txt,obj2json(names{e},getfield(item(i,j),...\n names{e}),level+(dim(1)>1)+1+(len>1),varargin{:}));\n if(e1) txt=sprintf('%s%s%s]',txt,nl,padding2); end\n if(j1) txt=sprintf('%s%s%s]',txt,nl,padding0); end\n\n%%-------------------------------------------------------------------------\nfunction txt=str2json(name,item,level,varargin)\ntxt='';\nif(~ischar(item))\n error('input is not a string');\nend\nitem=reshape(item, max(size(item),[1 0]));\nlen=size(item,1);\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\npadding1=repmat(ws.tab,1,level);\npadding0=repmat(ws.tab,1,level+1);\nnl=ws.newline;\nsep=ws.sep;\n\nif(~isempty(name)) \n if(len>1) txt=sprintf('%s\"%s\": [%s',padding1,checkname(name,varargin{:}),nl); end\nelse\n if(len>1) txt=sprintf('%s[%s',padding1,nl); end\nend\nisoct=jsonopt('IsOctave',0,varargin{:});\nfor e=1:len\n if(isoct)\n val=regexprep(item(e,:),'\\\\','\\\\');\n val=regexprep(val,'\"','\\\"');\n val=regexprep(val,'^\"','\\\"');\n else\n val=regexprep(item(e,:),'\\\\','\\\\\\\\');\n val=regexprep(val,'\"','\\\\\"');\n val=regexprep(val,'^\"','\\\\\"');\n end\n val=escapejsonstring(val);\n if(len==1)\n obj=['\"' checkname(name,varargin{:}) '\": ' '\"',val,'\"'];\n\tif(isempty(name)) obj=['\"',val,'\"']; end\n txt=sprintf('%s%s%s%s',txt,padding1,obj);\n else\n txt=sprintf('%s%s%s%s',txt,padding0,['\"',val,'\"']);\n end\n if(e==len) sep=''; end\n txt=sprintf('%s%s',txt,sep);\nend\nif(len>1) txt=sprintf('%s%s%s%s',txt,nl,padding1,']'); end\n\n%%-------------------------------------------------------------------------\nfunction txt=mat2json(name,item,level,varargin)\nif(~isnumeric(item) && ~islogical(item))\n error('input is not an array');\nend\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\npadding1=repmat(ws.tab,1,level);\npadding0=repmat(ws.tab,1,level+1);\nnl=ws.newline;\nsep=ws.sep;\n\nif(length(size(item))>2 || issparse(item) || ~isreal(item) || ...\n isempty(item) ||jsonopt('ArrayToStruct',0,varargin{:}))\n if(isempty(name))\n \ttxt=sprintf('%s{%s%s\"_ArrayType_\": \"%s\",%s%s\"_ArraySize_\": %s,%s',...\n padding1,nl,padding0,class(item),nl,padding0,regexprep(mat2str(size(item)),'\\s+',','),nl);\n else\n \ttxt=sprintf('%s\"%s\": {%s%s\"_ArrayType_\": \"%s\",%s%s\"_ArraySize_\": %s,%s',...\n padding1,checkname(name,varargin{:}),nl,padding0,class(item),nl,padding0,regexprep(mat2str(size(item)),'\\s+',','),nl);\n end\nelse\n if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1 && level>0)\n numtxt=regexprep(regexprep(matdata2json(item,level+1,varargin{:}),'^\\[',''),']','');\n else\n numtxt=matdata2json(item,level+1,varargin{:});\n end\n if(isempty(name))\n \ttxt=sprintf('%s%s',padding1,numtxt);\n else\n if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1)\n \ttxt=sprintf('%s\"%s\": %s',padding1,checkname(name,varargin{:}),numtxt);\n else\n \t txt=sprintf('%s\"%s\": %s',padding1,checkname(name,varargin{:}),numtxt);\n end\n end\n return;\nend\ndataformat='%s%s%s%s%s';\n\nif(issparse(item))\n [ix,iy]=find(item);\n data=full(item(find(item)));\n if(~isreal(item))\n data=[real(data(:)),imag(data(:))];\n if(size(item,1)==1)\n % Kludge to have data's 'transposedness' match item's.\n % (Necessary for complex row vector handling below.)\n data=data';\n end\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayIsComplex_\": ','1', sep);\n end\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayIsSparse_\": ','1', sep);\n if(size(item,1)==1)\n % Row vector, store only column indices.\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([iy(:),data'],level+2,varargin{:}), nl);\n elseif(size(item,2)==1)\n % Column vector, store only row indices.\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([ix,data],level+2,varargin{:}), nl);\n else\n % General case, store row and column indices.\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([ix,iy,data],level+2,varargin{:}), nl);\n end\nelse\n if(isreal(item))\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json(item(:)',level+2,varargin{:}), nl);\n else\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayIsComplex_\": ','1', sep);\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([real(item(:)) imag(item(:))],level+2,varargin{:}), nl);\n end\nend\ntxt=sprintf('%s%s%s',txt,padding1,'}');\n\n%%-------------------------------------------------------------------------\nfunction txt=matdata2json(mat,level,varargin)\n\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\ntab=ws.tab;\nnl=ws.newline;\n\nif(size(mat,1)==1)\n pre='';\n post='';\n level=level-1;\nelse\n pre=sprintf('[%s',nl);\n post=sprintf('%s%s]',nl,repmat(tab,1,level-1));\nend\n\nif(isempty(mat))\n txt='null';\n return;\nend\nfloatformat=jsonopt('FloatFormat','%.10g',varargin{:});\n%if(numel(mat)>1)\n formatstr=['[' repmat([floatformat ','],1,size(mat,2)-1) [floatformat sprintf('],%s',nl)]];\n%else\n% formatstr=[repmat([floatformat ','],1,size(mat,2)-1) [floatformat sprintf(',\\n')]];\n%end\n\nif(nargin>=2 && size(mat,1)>1 && jsonopt('ArrayIndent',1,varargin{:})==1)\n formatstr=[repmat(tab,1,level) formatstr];\nend\n\ntxt=sprintf(formatstr,mat');\ntxt(end-length(nl):end)=[];\nif(islogical(mat) && jsonopt('ParseLogical',0,varargin{:})==1)\n txt=regexprep(txt,'1','true');\n txt=regexprep(txt,'0','false');\nend\n%txt=regexprep(mat2str(mat),'\\s+',',');\n%txt=regexprep(txt,';',sprintf('],\\n['));\n% if(nargin>=2 && size(mat,1)>1)\n% txt=regexprep(txt,'\\[',[repmat(sprintf('\\t'),1,level) '[']);\n% end\ntxt=[pre txt post];\nif(any(isinf(mat(:))))\n txt=regexprep(txt,'([-+]*)Inf',jsonopt('Inf','\"$1_Inf_\"',varargin{:}));\nend\nif(any(isnan(mat(:))))\n txt=regexprep(txt,'NaN',jsonopt('NaN','\"_NaN_\"',varargin{:}));\nend\n\n%%-------------------------------------------------------------------------\nfunction newname=checkname(name,varargin)\nisunpack=jsonopt('UnpackHex',1,varargin{:});\nnewname=name;\nif(isempty(regexp(name,'0x([0-9a-fA-F]+)_','once')))\n return\nend\nif(isunpack)\n isoct=jsonopt('IsOctave',0,varargin{:});\n if(~isoct)\n newname=regexprep(name,'(^x|_){1}0x([0-9a-fA-F]+)_','${native2unicode(hex2dec($2))}');\n else\n pos=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','start');\n pend=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','end');\n if(isempty(pos)) return; end\n str0=name;\n pos0=[0 pend(:)' length(name)];\n newname='';\n for i=1:length(pos)\n newname=[newname str0(pos0(i)+1:pos(i)-1) char(hex2dec(str0(pos(i)+3:pend(i)-1)))];\n end\n if(pos(end)~=length(name))\n newname=[newname str0(pos0(end-1)+1:pos0(end))];\n end\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction newstr=escapejsonstring(str)\nnewstr=str;\nisoct=exist('OCTAVE_VERSION','builtin');\nif(isoct)\n vv=sscanf(OCTAVE_VERSION,'%f');\n if(vv(1)>=3.8) isoct=0; end\nend\nif(isoct)\n escapechars={'\\a','\\f','\\n','\\r','\\t','\\v'};\n for i=1:length(escapechars);\n newstr=regexprep(newstr,escapechars{i},escapechars{i});\n end\nelse\n escapechars={'\\a','\\b','\\f','\\n','\\r','\\t','\\v'};\n for i=1:length(escapechars);\n newstr=regexprep(newstr,escapechars{i},regexprep(escapechars{i},'\\\\','\\\\\\\\'));\n end\nend\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "loadjson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex4/ex4/lib/jsonlab/loadjson.m", "size": 18732, "source_encoding": "ibm852", "md5": "ab98cf173af2d50bbe8da4d6db252a20", "text": "function data = loadjson(fname,varargin)\n%\n% data=loadjson(fname,opt)\n% or\n% data=loadjson(fname,'param1',value1,'param2',value2,...)\n%\n% parse a JSON (JavaScript Object Notation) file or string\n%\n% authors:Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2011/09/09, including previous works from \n%\n% Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713\n% created on 2009/11/02\n% François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393\n% created on 2009/03/22\n% Joel Feenstra:\n% http://www.mathworks.com/matlabcentral/fileexchange/20565\n% created on 2008/07/03\n%\n% $Id: loadjson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% fname: input file name, if fname contains \"{}\" or \"[]\", fname\n% will be interpreted as a JSON string\n% opt: a struct to store parsing options, opt can be replaced by \n% a list of ('param',value) pairs - the param string is equivallent\n% to a field in opt. opt can have the following \n% fields (first in [.|.] is the default)\n%\n% opt.SimplifyCell [0|1]: if set to 1, loadjson will call cell2mat\n% for each element of the JSON data, and group \n% arrays based on the cell2mat rules.\n% opt.FastArrayParser [1|0 or integer]: if set to 1, use a\n% speed-optimized array parser when loading an \n% array object. The fast array parser may \n% collapse block arrays into a single large\n% array similar to rules defined in cell2mat; 0 to \n% use a legacy parser; if set to a larger-than-1\n% value, this option will specify the minimum\n% dimension to enable the fast array parser. For\n% example, if the input is a 3D array, setting\n% FastArrayParser to 1 will return a 3D array;\n% setting to 2 will return a cell array of 2D\n% arrays; setting to 3 will return to a 2D cell\n% array of 1D vectors; setting to 4 will return a\n% 3D cell array.\n% opt.ShowProgress [0|1]: if set to 1, loadjson displays a progress bar.\n%\n% output:\n% dat: a cell array, where {...} blocks are converted into cell arrays,\n% and [...] are converted to arrays\n%\n% examples:\n% dat=loadjson('{\"obj\":{\"string\":\"value\",\"array\":[1,2,3]}}')\n% dat=loadjson(['examples' filesep 'example1.json'])\n% dat=loadjson(['examples' filesep 'example1.json'],'SimplifyCell',1)\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details \n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nglobal pos inStr len esc index_esc len_esc isoct arraytoken\n\nif(regexp(fname,'[\\{\\}\\]\\[]','once'))\n string=fname;\nelseif(exist(fname,'file'))\n fid = fopen(fname,'rb');\n string = fread(fid,inf,'uint8=>char')';\n fclose(fid);\nelse\n error('input file does not exist');\nend\n\npos = 1; len = length(string); inStr = string;\nisoct=exist('OCTAVE_VERSION','builtin');\narraytoken=find(inStr=='[' | inStr==']' | inStr=='\"');\njstr=regexprep(inStr,'\\\\\\\\',' ');\nescquote=regexp(jstr,'\\\\\"');\narraytoken=sort([arraytoken escquote]);\n\n% String delimiters and escape chars identified to improve speed:\nesc = find(inStr=='\"' | inStr=='\\' ); % comparable to: regexp(inStr, '[\"\\\\]');\nindex_esc = 1; len_esc = length(esc);\n\nopt=varargin2struct(varargin{:});\n\nif(jsonopt('ShowProgress',0,opt)==1)\n opt.progressbar_=waitbar(0,'loading ...');\nend\njsoncount=1;\nwhile pos <= len\n switch(next_char)\n case '{'\n data{jsoncount} = parse_object(opt);\n case '['\n data{jsoncount} = parse_array(opt);\n otherwise\n error_pos('Outer level structure must be an object or an array');\n end\n jsoncount=jsoncount+1;\nend % while\n\njsoncount=length(data);\nif(jsoncount==1 && iscell(data))\n data=data{1};\nend\n\nif(~isempty(data))\n if(isstruct(data)) % data can be a struct array\n data=jstruct2array(data);\n elseif(iscell(data))\n data=jcell2array(data);\n end\nend\nif(isfield(opt,'progressbar_'))\n close(opt.progressbar_);\nend\n\n%%\nfunction newdata=jcell2array(data)\nlen=length(data);\nnewdata=data;\nfor i=1:len\n if(isstruct(data{i}))\n newdata{i}=jstruct2array(data{i});\n elseif(iscell(data{i}))\n newdata{i}=jcell2array(data{i});\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction newdata=jstruct2array(data)\nfn=fieldnames(data);\nnewdata=data;\nlen=length(data);\nfor i=1:length(fn) % depth-first\n for j=1:len\n if(isstruct(getfield(data(j),fn{i})))\n newdata(j)=setfield(newdata(j),fn{i},jstruct2array(getfield(data(j),fn{i})));\n end\n end\nend\nif(~isempty(strmatch('x0x5F_ArrayType_',fn)) && ~isempty(strmatch('x0x5F_ArrayData_',fn)))\n newdata=cell(len,1);\n for j=1:len\n ndata=cast(data(j).x0x5F_ArrayData_,data(j).x0x5F_ArrayType_);\n iscpx=0;\n if(~isempty(strmatch('x0x5F_ArrayIsComplex_',fn)))\n if(data(j).x0x5F_ArrayIsComplex_)\n iscpx=1;\n end\n end\n if(~isempty(strmatch('x0x5F_ArrayIsSparse_',fn)))\n if(data(j).x0x5F_ArrayIsSparse_)\n if(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n dim=data(j).x0x5F_ArraySize_;\n if(iscpx && size(ndata,2)==4-any(dim==1))\n ndata(:,end-1)=complex(ndata(:,end-1),ndata(:,end));\n end\n if isempty(ndata)\n % All-zeros sparse\n ndata=sparse(dim(1),prod(dim(2:end)));\n elseif dim(1)==1\n % Sparse row vector\n ndata=sparse(1,ndata(:,1),ndata(:,2),dim(1),prod(dim(2:end)));\n elseif dim(2)==1\n % Sparse column vector\n ndata=sparse(ndata(:,1),1,ndata(:,2),dim(1),prod(dim(2:end)));\n else\n % Generic sparse array.\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3),dim(1),prod(dim(2:end)));\n end\n else\n if(iscpx && size(ndata,2)==4)\n ndata(:,3)=complex(ndata(:,3),ndata(:,4));\n end\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3));\n end\n end\n elseif(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n if(iscpx && size(ndata,2)==2)\n ndata=complex(ndata(:,1),ndata(:,2));\n end\n ndata=reshape(ndata(:),data(j).x0x5F_ArraySize_);\n end\n newdata{j}=ndata;\n end\n if(len==1)\n newdata=newdata{1};\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction object = parse_object(varargin)\n parse_char('{');\n object = [];\n if next_char ~= '}'\n while 1\n str = parseStr(varargin{:});\n if isempty(str)\n error_pos('Name of value at position %d cannot be empty');\n end\n parse_char(':');\n val = parse_value(varargin{:});\n eval( sprintf( 'object.%s = val;', valid_field(str) ) );\n if next_char == '}'\n break;\n end\n parse_char(',');\n end\n end\n parse_char('}');\n\n%%-------------------------------------------------------------------------\n\nfunction object = parse_array(varargin) % JSON array is written in row-major order\nglobal pos inStr isoct\n parse_char('[');\n object = cell(0, 1);\n dim2=[];\n arraydepth=jsonopt('JSONLAB_ArrayDepth_',1,varargin{:});\n pbar=jsonopt('progressbar_',-1,varargin{:});\n\n if next_char ~= ']'\n\tif(jsonopt('FastArrayParser',1,varargin{:})>=1 && arraydepth>=jsonopt('FastArrayParser',1,varargin{:}))\n [endpos, e1l, e1r, maxlevel]=matching_bracket(inStr,pos);\n arraystr=['[' inStr(pos:endpos)];\n arraystr=regexprep(arraystr,'\"_NaN_\"','NaN');\n arraystr=regexprep(arraystr,'\"([-+]*)_Inf_\"','$1Inf');\n arraystr(arraystr==sprintf('\\n'))=[];\n arraystr(arraystr==sprintf('\\r'))=[];\n %arraystr=regexprep(arraystr,'\\s*,',','); % this is slow,sometimes needed\n if(~isempty(e1l) && ~isempty(e1r)) % the array is in 2D or higher D\n \tastr=inStr((e1l+1):(e1r-1));\n \tastr=regexprep(astr,'\"_NaN_\"','NaN');\n \tastr=regexprep(astr,'\"([-+]*)_Inf_\"','$1Inf');\n \tastr(astr==sprintf('\\n'))=[];\n \tastr(astr==sprintf('\\r'))=[];\n \tastr(astr==' ')='';\n \tif(isempty(find(astr=='[', 1))) % array is 2D\n dim2=length(sscanf(astr,'%f,',[1 inf]));\n \tend\n else % array is 1D\n \tastr=arraystr(2:end-1);\n \tastr(astr==' ')='';\n \t[obj, count, errmsg, nextidx]=sscanf(astr,'%f,',[1,inf]);\n \tif(nextidx>=length(astr)-1)\n object=obj;\n pos=endpos;\n parse_char(']');\n return;\n \tend\n end\n if(~isempty(dim2))\n \tastr=arraystr;\n \tastr(astr=='[')='';\n \tastr(astr==']')='';\n \tastr(astr==' ')='';\n \t[obj, count, errmsg, nextidx]=sscanf(astr,'%f,',inf);\n \tif(nextidx>=length(astr)-1)\n object=reshape(obj,dim2,numel(obj)/dim2)';\n pos=endpos;\n parse_char(']');\n if(pbar>0)\n waitbar(pos/length(inStr),pbar,'loading ...');\n end\n return;\n \tend\n end\n arraystr=regexprep(arraystr,'\\]\\s*,','];');\n\telse\n arraystr='[';\n\tend\n try\n if(isoct && regexp(arraystr,'\"','once'))\n error('Octave eval can produce empty cells for JSON-like input');\n end\n object=eval(arraystr);\n pos=endpos;\n catch\n while 1\n newopt=varargin2struct(varargin{:},'JSONLAB_ArrayDepth_',arraydepth+1);\n val = parse_value(newopt);\n object{end+1} = val;\n if next_char == ']'\n break;\n end\n parse_char(',');\n end\n end\n end\n if(jsonopt('SimplifyCell',0,varargin{:})==1)\n try\n oldobj=object;\n object=cell2mat(object')';\n if(iscell(oldobj) && isstruct(object) && numel(object)>1 && jsonopt('SimplifyCellArray',1,varargin{:})==0)\n object=oldobj;\n elseif(size(object,1)>1 && ndims(object)==2)\n object=object';\n end\n catch\n end\n end\n parse_char(']');\n \n if(pbar>0)\n waitbar(pos/length(inStr),pbar,'loading ...');\n end\n%%-------------------------------------------------------------------------\n\nfunction parse_char(c)\n global pos inStr len\n skip_whitespace;\n if pos > len || inStr(pos) ~= c\n error_pos(sprintf('Expected %c at position %%d', c));\n else\n pos = pos + 1;\n skip_whitespace;\n end\n\n%%-------------------------------------------------------------------------\n\nfunction c = next_char\n global pos inStr len\n skip_whitespace;\n if pos > len\n c = [];\n else\n c = inStr(pos);\n end\n\n%%-------------------------------------------------------------------------\n\nfunction skip_whitespace\n global pos inStr len\n while pos <= len && isspace(inStr(pos))\n pos = pos + 1;\n end\n\n%%-------------------------------------------------------------------------\nfunction str = parseStr(varargin)\n global pos inStr len esc index_esc len_esc\n % len, ns = length(inStr), keyboard\n if inStr(pos) ~= '\"'\n error_pos('String starting with \" expected at position %d');\n else\n pos = pos + 1;\n end\n str = '';\n while pos <= len\n while index_esc <= len_esc && esc(index_esc) < pos\n index_esc = index_esc + 1;\n end\n if index_esc > len_esc\n str = [str inStr(pos:len)];\n pos = len + 1;\n break;\n else\n str = [str inStr(pos:esc(index_esc)-1)];\n pos = esc(index_esc);\n end\n nstr = length(str); switch inStr(pos)\n case '\"'\n pos = pos + 1;\n if(~isempty(str))\n if(strcmp(str,'_Inf_'))\n str=Inf;\n elseif(strcmp(str,'-_Inf_'))\n str=-Inf;\n elseif(strcmp(str,'_NaN_'))\n str=NaN;\n end\n end\n return;\n case '\\'\n if pos+1 > len\n error_pos('End of file reached right after escape character');\n end\n pos = pos + 1;\n switch inStr(pos)\n case {'\"' '\\' '/'}\n str(nstr+1) = inStr(pos);\n pos = pos + 1;\n case {'b' 'f' 'n' 'r' 't'}\n str(nstr+1) = sprintf(['\\' inStr(pos)]);\n pos = pos + 1;\n case 'u'\n if pos+4 > len\n error_pos('End of file reached in escaped unicode character');\n end\n str(nstr+(1:6)) = inStr(pos-1:pos+4);\n pos = pos + 5;\n end\n otherwise % should never happen\n str(nstr+1) = inStr(pos), keyboard\n pos = pos + 1;\n end\n end\n error_pos('End of file while expecting end of inStr');\n\n%%-------------------------------------------------------------------------\n\nfunction num = parse_number(varargin)\n global pos inStr len isoct\n currstr=inStr(pos:end);\n numstr=0;\n if(isoct~=0)\n numstr=regexp(currstr,'^\\s*-?(?:0|[1-9]\\d*)(?:\\.\\d+)?(?:[eE][+\\-]?\\d+)?','end');\n [num, one] = sscanf(currstr, '%f', 1);\n delta=numstr+1;\n else\n [num, one, err, delta] = sscanf(currstr, '%f', 1);\n if ~isempty(err)\n error_pos('Error reading number at position %d');\n end\n end\n pos = pos + delta-1;\n\n%%-------------------------------------------------------------------------\n\nfunction val = parse_value(varargin)\n global pos inStr len\n true = 1; false = 0;\n \n pbar=jsonopt('progressbar_',-1,varargin{:});\n if(pbar>0)\n waitbar(pos/len,pbar,'loading ...');\n end\n \n switch(inStr(pos))\n case '\"'\n val = parseStr(varargin{:});\n return;\n case '['\n val = parse_array(varargin{:});\n return;\n case '{'\n val = parse_object(varargin{:});\n if isstruct(val)\n if(~isempty(strmatch('x0x5F_ArrayType_',fieldnames(val), 'exact')))\n val=jstruct2array(val);\n end\n elseif isempty(val)\n val = struct;\n end\n return;\n case {'-','0','1','2','3','4','5','6','7','8','9'}\n val = parse_number(varargin{:});\n return;\n case 't'\n if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'true')\n val = true;\n pos = pos + 4;\n return;\n end\n case 'f'\n if pos+4 <= len && strcmpi(inStr(pos:pos+4), 'false')\n val = false;\n pos = pos + 5;\n return;\n end\n case 'n'\n if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'null')\n val = [];\n pos = pos + 4;\n return;\n end\n end\n error_pos('Value expected at position %d');\n%%-------------------------------------------------------------------------\n\nfunction error_pos(msg)\n global pos inStr len\n poShow = max(min([pos-15 pos-1 pos pos+20],len),1);\n if poShow(3) == poShow(2)\n poShow(3:4) = poShow(2)+[0 -1]; % display nothing after\n end\n msg = [sprintf(msg, pos) ': ' ...\n inStr(poShow(1):poShow(2)) '' inStr(poShow(3):poShow(4)) ];\n error( ['JSONparser:invalidFormat: ' msg] );\n\n%%-------------------------------------------------------------------------\n\nfunction str = valid_field(str)\nglobal isoct\n% From MATLAB doc: field names must begin with a letter, which may be\n% followed by any combination of letters, digits, and underscores.\n% Invalid characters will be converted to underscores, and the prefix\n% \"x0x[Hex code]_\" will be added if the first character is not a letter.\n pos=regexp(str,'^[^A-Za-z]','once');\n if(~isempty(pos))\n if(~isoct)\n str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once');\n else\n str=sprintf('x0x%X_%s',char(str(1)),str(2:end));\n end\n end\n if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' ))) return; end\n if(~isoct)\n str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_');\n else\n pos=regexp(str,'[^0-9A-Za-z_]');\n if(isempty(pos)) return; end\n str0=str;\n pos0=[0 pos(:)' length(str)];\n str='';\n for i=1:length(pos)\n str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))];\n end\n if(pos(end)~=length(str))\n str=[str str0(pos0(end-1)+1:pos0(end))];\n end\n end\n %str(~isletter(str) & ~('0' <= str & str <= '9')) = '_';\n\n%%-------------------------------------------------------------------------\nfunction endpos = matching_quote(str,pos)\nlen=length(str);\nwhile(pos1 && str(pos-1)=='\\'))\n endpos=pos;\n return;\n end \n end\n pos=pos+1;\nend\nerror('unmatched quotation mark');\n%%-------------------------------------------------------------------------\nfunction [endpos, e1l, e1r, maxlevel] = matching_bracket(str,pos)\nglobal arraytoken\nlevel=1;\nmaxlevel=level;\nendpos=0;\nbpos=arraytoken(arraytoken>=pos);\ntokens=str(bpos);\nlen=length(tokens);\npos=1;\ne1l=[];\ne1r=[];\nwhile(pos<=len)\n c=tokens(pos);\n if(c==']')\n level=level-1;\n if(isempty(e1r)) e1r=bpos(pos); end\n if(level==0)\n endpos=bpos(pos);\n return\n end\n end\n if(c=='[')\n if(isempty(e1l)) e1l=bpos(pos); end\n level=level+1;\n maxlevel=max(maxlevel,level);\n end\n if(c=='\"')\n pos=matching_quote(tokens,pos+1);\n end\n pos=pos+1;\nend\nif(endpos==0) \n error('unmatched \"]\"');\nend\n\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "loadubjson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex4/ex4/lib/jsonlab/loadubjson.m", "size": 15574, "source_encoding": "utf_8", "md5": "5974e78e71b81b1e0f76123784b951a4", "text": "function data = loadubjson(fname,varargin)\n%\n% data=loadubjson(fname,opt)\n% or\n% data=loadubjson(fname,'param1',value1,'param2',value2,...)\n%\n% parse a JSON (JavaScript Object Notation) file or string\n%\n% authors:Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2013/08/01\n%\n% $Id: loadubjson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% fname: input file name, if fname contains \"{}\" or \"[]\", fname\n% will be interpreted as a UBJSON string\n% opt: a struct to store parsing options, opt can be replaced by \n% a list of ('param',value) pairs - the param string is equivallent\n% to a field in opt. opt can have the following \n% fields (first in [.|.] is the default)\n%\n% opt.SimplifyCell [0|1]: if set to 1, loadubjson will call cell2mat\n% for each element of the JSON data, and group \n% arrays based on the cell2mat rules.\n% opt.IntEndian [B|L]: specify the endianness of the integer fields\n% in the UBJSON input data. B - Big-Endian format for \n% integers (as required in the UBJSON specification); \n% L - input integer fields are in Little-Endian order.\n%\n% output:\n% dat: a cell array, where {...} blocks are converted into cell arrays,\n% and [...] are converted to arrays\n%\n% examples:\n% obj=struct('string','value','array',[1 2 3]);\n% ubjdata=saveubjson('obj',obj);\n% dat=loadubjson(ubjdata)\n% dat=loadubjson(['examples' filesep 'example1.ubj'])\n% dat=loadubjson(['examples' filesep 'example1.ubj'],'SimplifyCell',1)\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details \n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nglobal pos inStr len esc index_esc len_esc isoct arraytoken fileendian systemendian\n\nif(regexp(fname,'[\\{\\}\\]\\[]','once'))\n string=fname;\nelseif(exist(fname,'file'))\n fid = fopen(fname,'rb');\n string = fread(fid,inf,'uint8=>char')';\n fclose(fid);\nelse\n error('input file does not exist');\nend\n\npos = 1; len = length(string); inStr = string;\nisoct=exist('OCTAVE_VERSION','builtin');\narraytoken=find(inStr=='[' | inStr==']' | inStr=='\"');\njstr=regexprep(inStr,'\\\\\\\\',' ');\nescquote=regexp(jstr,'\\\\\"');\narraytoken=sort([arraytoken escquote]);\n\n% String delimiters and escape chars identified to improve speed:\nesc = find(inStr=='\"' | inStr=='\\' ); % comparable to: regexp(inStr, '[\"\\\\]');\nindex_esc = 1; len_esc = length(esc);\n\nopt=varargin2struct(varargin{:});\nfileendian=upper(jsonopt('IntEndian','B',opt));\n[os,maxelem,systemendian]=computer;\n\njsoncount=1;\nwhile pos <= len\n switch(next_char)\n case '{'\n data{jsoncount} = parse_object(opt);\n case '['\n data{jsoncount} = parse_array(opt);\n otherwise\n error_pos('Outer level structure must be an object or an array');\n end\n jsoncount=jsoncount+1;\nend % while\n\njsoncount=length(data);\nif(jsoncount==1 && iscell(data))\n data=data{1};\nend\n\nif(~isempty(data))\n if(isstruct(data)) % data can be a struct array\n data=jstruct2array(data);\n elseif(iscell(data))\n data=jcell2array(data);\n end\nend\n\n\n%%\nfunction newdata=parse_collection(id,data,obj)\n\nif(jsoncount>0 && exist('data','var')) \n if(~iscell(data))\n newdata=cell(1);\n newdata{1}=data;\n data=newdata;\n end\nend\n\n%%\nfunction newdata=jcell2array(data)\nlen=length(data);\nnewdata=data;\nfor i=1:len\n if(isstruct(data{i}))\n newdata{i}=jstruct2array(data{i});\n elseif(iscell(data{i}))\n newdata{i}=jcell2array(data{i});\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction newdata=jstruct2array(data)\nfn=fieldnames(data);\nnewdata=data;\nlen=length(data);\nfor i=1:length(fn) % depth-first\n for j=1:len\n if(isstruct(getfield(data(j),fn{i})))\n newdata(j)=setfield(newdata(j),fn{i},jstruct2array(getfield(data(j),fn{i})));\n end\n end\nend\nif(~isempty(strmatch('x0x5F_ArrayType_',fn)) && ~isempty(strmatch('x0x5F_ArrayData_',fn)))\n newdata=cell(len,1);\n for j=1:len\n ndata=cast(data(j).x0x5F_ArrayData_,data(j).x0x5F_ArrayType_);\n iscpx=0;\n if(~isempty(strmatch('x0x5F_ArrayIsComplex_',fn)))\n if(data(j).x0x5F_ArrayIsComplex_)\n iscpx=1;\n end\n end\n if(~isempty(strmatch('x0x5F_ArrayIsSparse_',fn)))\n if(data(j).x0x5F_ArrayIsSparse_)\n if(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n dim=double(data(j).x0x5F_ArraySize_);\n if(iscpx && size(ndata,2)==4-any(dim==1))\n ndata(:,end-1)=complex(ndata(:,end-1),ndata(:,end));\n end\n if isempty(ndata)\n % All-zeros sparse\n ndata=sparse(dim(1),prod(dim(2:end)));\n elseif dim(1)==1\n % Sparse row vector\n ndata=sparse(1,ndata(:,1),ndata(:,2),dim(1),prod(dim(2:end)));\n elseif dim(2)==1\n % Sparse column vector\n ndata=sparse(ndata(:,1),1,ndata(:,2),dim(1),prod(dim(2:end)));\n else\n % Generic sparse array.\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3),dim(1),prod(dim(2:end)));\n end\n else\n if(iscpx && size(ndata,2)==4)\n ndata(:,3)=complex(ndata(:,3),ndata(:,4));\n end\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3));\n end\n end\n elseif(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n if(iscpx && size(ndata,2)==2)\n ndata=complex(ndata(:,1),ndata(:,2));\n end\n ndata=reshape(ndata(:),data(j).x0x5F_ArraySize_);\n end\n newdata{j}=ndata;\n end\n if(len==1)\n newdata=newdata{1};\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction object = parse_object(varargin)\n parse_char('{');\n object = [];\n type='';\n count=-1;\n if(next_char == '$')\n type=inStr(pos+1); % TODO\n pos=pos+2;\n end\n if(next_char == '#')\n pos=pos+1;\n count=double(parse_number());\n end\n if next_char ~= '}'\n num=0;\n while 1\n str = parseStr(varargin{:});\n if isempty(str)\n error_pos('Name of value at position %d cannot be empty');\n end\n %parse_char(':');\n val = parse_value(varargin{:});\n num=num+1;\n eval( sprintf( 'object.%s = val;', valid_field(str) ) );\n if next_char == '}' || (count>=0 && num>=count)\n break;\n end\n %parse_char(',');\n end\n end\n if(count==-1)\n parse_char('}');\n end\n\n%%-------------------------------------------------------------------------\nfunction [cid,len]=elem_info(type)\nid=strfind('iUIlLdD',type);\ndataclass={'int8','uint8','int16','int32','int64','single','double'};\nbytelen=[1,1,2,4,8,4,8];\nif(id>0)\n cid=dataclass{id};\n len=bytelen(id);\nelse\n error_pos('unsupported type at position %d');\nend\n%%-------------------------------------------------------------------------\n\n\nfunction [data adv]=parse_block(type,count,varargin)\nglobal pos inStr isoct fileendian systemendian\n[cid,len]=elem_info(type);\ndatastr=inStr(pos:pos+len*count-1);\nif(isoct)\n newdata=int8(datastr);\nelse\n newdata=uint8(datastr);\nend\nid=strfind('iUIlLdD',type);\nif(id<=5 && fileendian~=systemendian)\n newdata=swapbytes(typecast(newdata,cid));\nend\ndata=typecast(newdata,cid);\nadv=double(len*count);\n\n%%-------------------------------------------------------------------------\n\n\nfunction object = parse_array(varargin) % JSON array is written in row-major order\nglobal pos inStr isoct\n parse_char('[');\n object = cell(0, 1);\n dim=[];\n type='';\n count=-1;\n if(next_char == '$')\n type=inStr(pos+1);\n pos=pos+2;\n end\n if(next_char == '#')\n pos=pos+1;\n if(next_char=='[')\n dim=parse_array(varargin{:});\n count=prod(double(dim));\n else\n count=double(parse_number());\n end\n end\n if(~isempty(type))\n if(count>=0)\n [object adv]=parse_block(type,count,varargin{:});\n if(~isempty(dim))\n object=reshape(object,dim);\n end\n pos=pos+adv;\n return;\n else\n endpos=matching_bracket(inStr,pos);\n [cid,len]=elem_info(type);\n count=(endpos-pos)/len;\n [object adv]=parse_block(type,count,varargin{:});\n pos=pos+adv;\n parse_char(']');\n return;\n end\n end\n if next_char ~= ']'\n while 1\n val = parse_value(varargin{:});\n object{end+1} = val;\n if next_char == ']'\n break;\n end\n %parse_char(',');\n end\n end\n if(jsonopt('SimplifyCell',0,varargin{:})==1)\n try\n oldobj=object;\n object=cell2mat(object')';\n if(iscell(oldobj) && isstruct(object) && numel(object)>1 && jsonopt('SimplifyCellArray',1,varargin{:})==0)\n object=oldobj;\n elseif(size(object,1)>1 && ndims(object)==2)\n object=object';\n end\n catch\n end\n end\n if(count==-1)\n parse_char(']');\n end\n\n%%-------------------------------------------------------------------------\n\nfunction parse_char(c)\n global pos inStr len\n skip_whitespace;\n if pos > len || inStr(pos) ~= c\n error_pos(sprintf('Expected %c at position %%d', c));\n else\n pos = pos + 1;\n skip_whitespace;\n end\n\n%%-------------------------------------------------------------------------\n\nfunction c = next_char\n global pos inStr len\n skip_whitespace;\n if pos > len\n c = [];\n else\n c = inStr(pos);\n end\n\n%%-------------------------------------------------------------------------\n\nfunction skip_whitespace\n global pos inStr len\n while pos <= len && isspace(inStr(pos))\n pos = pos + 1;\n end\n\n%%-------------------------------------------------------------------------\nfunction str = parseStr(varargin)\n global pos inStr esc index_esc len_esc\n % len, ns = length(inStr), keyboard\n type=inStr(pos);\n if type ~= 'S' && type ~= 'C' && type ~= 'H'\n error_pos('String starting with S expected at position %d');\n else\n pos = pos + 1;\n end\n if(type == 'C')\n str=inStr(pos);\n pos=pos+1;\n return;\n end\n bytelen=double(parse_number());\n if(length(inStr)>=pos+bytelen-1)\n str=inStr(pos:pos+bytelen-1);\n pos=pos+bytelen;\n else\n error_pos('End of file while expecting end of inStr');\n end\n\n%%-------------------------------------------------------------------------\n\nfunction num = parse_number(varargin)\n global pos inStr len isoct fileendian systemendian\n id=strfind('iUIlLdD',inStr(pos));\n if(isempty(id))\n error_pos('expecting a number at position %d');\n end\n type={'int8','uint8','int16','int32','int64','single','double'};\n bytelen=[1,1,2,4,8,4,8];\n datastr=inStr(pos+1:pos+bytelen(id));\n if(isoct)\n newdata=int8(datastr);\n else\n newdata=uint8(datastr);\n end\n if(id<=5 && fileendian~=systemendian)\n newdata=swapbytes(typecast(newdata,type{id}));\n end\n num=typecast(newdata,type{id});\n pos = pos + bytelen(id)+1;\n\n%%-------------------------------------------------------------------------\n\nfunction val = parse_value(varargin)\n global pos inStr len\n true = 1; false = 0;\n\n switch(inStr(pos))\n case {'S','C','H'}\n val = parseStr(varargin{:});\n return;\n case '['\n val = parse_array(varargin{:});\n return;\n case '{'\n val = parse_object(varargin{:});\n if isstruct(val)\n if(~isempty(strmatch('x0x5F_ArrayType_',fieldnames(val), 'exact')))\n val=jstruct2array(val);\n end\n elseif isempty(val)\n val = struct;\n end\n return;\n case {'i','U','I','l','L','d','D'}\n val = parse_number(varargin{:});\n return;\n case 'T'\n val = true;\n pos = pos + 1;\n return;\n case 'F'\n val = false;\n pos = pos + 1;\n return;\n case {'Z','N'}\n val = [];\n pos = pos + 1;\n return;\n end\n error_pos('Value expected at position %d');\n%%-------------------------------------------------------------------------\n\nfunction error_pos(msg)\n global pos inStr len\n poShow = max(min([pos-15 pos-1 pos pos+20],len),1);\n if poShow(3) == poShow(2)\n poShow(3:4) = poShow(2)+[0 -1]; % display nothing after\n end\n msg = [sprintf(msg, pos) ': ' ...\n inStr(poShow(1):poShow(2)) '' inStr(poShow(3):poShow(4)) ];\n error( ['JSONparser:invalidFormat: ' msg] );\n\n%%-------------------------------------------------------------------------\n\nfunction str = valid_field(str)\nglobal isoct\n% From MATLAB doc: field names must begin with a letter, which may be\n% followed by any combination of letters, digits, and underscores.\n% Invalid characters will be converted to underscores, and the prefix\n% \"x0x[Hex code]_\" will be added if the first character is not a letter.\n pos=regexp(str,'^[^A-Za-z]','once');\n if(~isempty(pos))\n if(~isoct)\n str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once');\n else\n str=sprintf('x0x%X_%s',char(str(1)),str(2:end));\n end\n end\n if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' ))) return; end\n if(~isoct)\n str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_');\n else\n pos=regexp(str,'[^0-9A-Za-z_]');\n if(isempty(pos)) return; end\n str0=str;\n pos0=[0 pos(:)' length(str)];\n str='';\n for i=1:length(pos)\n str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))];\n end\n if(pos(end)~=length(str))\n str=[str str0(pos0(end-1)+1:pos0(end))];\n end\n end\n %str(~isletter(str) & ~('0' <= str & str <= '9')) = '_';\n\n%%-------------------------------------------------------------------------\nfunction endpos = matching_quote(str,pos)\nlen=length(str);\nwhile(pos1 && str(pos-1)=='\\'))\n endpos=pos;\n return;\n end \n end\n pos=pos+1;\nend\nerror('unmatched quotation mark');\n%%-------------------------------------------------------------------------\nfunction [endpos e1l e1r maxlevel] = matching_bracket(str,pos)\nglobal arraytoken\nlevel=1;\nmaxlevel=level;\nendpos=0;\nbpos=arraytoken(arraytoken>=pos);\ntokens=str(bpos);\nlen=length(tokens);\npos=1;\ne1l=[];\ne1r=[];\nwhile(pos<=len)\n c=tokens(pos);\n if(c==']')\n level=level-1;\n if(isempty(e1r)) e1r=bpos(pos); end\n if(level==0)\n endpos=bpos(pos);\n return\n end\n end\n if(c=='[')\n if(isempty(e1l)) e1l=bpos(pos); end\n level=level+1;\n maxlevel=max(maxlevel,level);\n end\n if(c=='\"')\n pos=matching_quote(tokens,pos+1);\n end\n pos=pos+1;\nend\nif(endpos==0) \n error('unmatched \"]\"');\nend\n\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "saveubjson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex4/ex4/lib/jsonlab/saveubjson.m", "size": 16123, "source_encoding": "utf_8", "md5": "61d4f51010aedbf97753396f5d2d9ec0", "text": "function json=saveubjson(rootname,obj,varargin)\n%\n% json=saveubjson(rootname,obj,filename)\n% or\n% json=saveubjson(rootname,obj,opt)\n% json=saveubjson(rootname,obj,'param1',value1,'param2',value2,...)\n%\n% convert a MATLAB object (cell, struct or array) into a Universal \n% Binary JSON (UBJSON) binary string\n%\n% author: Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2013/08/17\n%\n% $Id: saveubjson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% rootname: the name of the root-object, when set to '', the root name\n% is ignored, however, when opt.ForceRootName is set to 1 (see below),\n% the MATLAB variable name will be used as the root name.\n% obj: a MATLAB object (array, cell, cell array, struct, struct array)\n% filename: a string for the file name to save the output UBJSON data\n% opt: a struct for additional options, ignore to use default values.\n% opt can have the following fields (first in [.|.] is the default)\n%\n% opt.FileName [''|string]: a file name to save the output JSON data\n% opt.ArrayToStruct[0|1]: when set to 0, saveubjson outputs 1D/2D\n% array in JSON array format; if sets to 1, an\n% array will be shown as a struct with fields\n% \"_ArrayType_\", \"_ArraySize_\" and \"_ArrayData_\"; for\n% sparse arrays, the non-zero elements will be\n% saved to _ArrayData_ field in triplet-format i.e.\n% (ix,iy,val) and \"_ArrayIsSparse_\" will be added\n% with a value of 1; for a complex array, the \n% _ArrayData_ array will include two columns \n% (4 for sparse) to record the real and imaginary \n% parts, and also \"_ArrayIsComplex_\":1 is added. \n% opt.ParseLogical [1|0]: if this is set to 1, logical array elem\n% will use true/false rather than 1/0.\n% opt.NoRowBracket [1|0]: if this is set to 1, arrays with a single\n% numerical element will be shown without a square\n% bracket, unless it is the root object; if 0, square\n% brackets are forced for any numerical arrays.\n% opt.ForceRootName [0|1]: when set to 1 and rootname is empty, saveubjson\n% will use the name of the passed obj variable as the \n% root object name; if obj is an expression and \n% does not have a name, 'root' will be used; if this \n% is set to 0 and rootname is empty, the root level \n% will be merged down to the lower level.\n% opt.JSONP [''|string]: to generate a JSONP output (JSON with padding),\n% for example, if opt.JSON='foo', the JSON data is\n% wrapped inside a function call as 'foo(...);'\n% opt.UnpackHex [1|0]: conver the 0x[hex code] output by loadjson \n% back to the string form\n%\n% opt can be replaced by a list of ('param',value) pairs. The param \n% string is equivallent to a field in opt and is case sensitive.\n% output:\n% json: a binary string in the UBJSON format (see http://ubjson.org)\n%\n% examples:\n% jsonmesh=struct('MeshNode',[0 0 0;1 0 0;0 1 0;1 1 0;0 0 1;1 0 1;0 1 1;1 1 1],... \n% 'MeshTetra',[1 2 4 8;1 3 4 8;1 2 6 8;1 5 6 8;1 5 7 8;1 3 7 8],...\n% 'MeshTri',[1 2 4;1 2 6;1 3 4;1 3 7;1 5 6;1 5 7;...\n% 2 8 4;2 8 6;3 8 4;3 8 7;5 8 6;5 8 7],...\n% 'MeshCreator','FangQ','MeshTitle','T6 Cube',...\n% 'SpecialData',[nan, inf, -inf]);\n% saveubjson('jsonmesh',jsonmesh)\n% saveubjson('jsonmesh',jsonmesh,'meshdata.ubj')\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details\n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nif(nargin==1)\n varname=inputname(1);\n obj=rootname;\n if(isempty(varname)) \n varname='root';\n end\n rootname=varname;\nelse\n varname=inputname(2);\nend\nif(length(varargin)==1 && ischar(varargin{1}))\n opt=struct('FileName',varargin{1});\nelse\n opt=varargin2struct(varargin{:});\nend\nopt.IsOctave=exist('OCTAVE_VERSION','builtin');\nrootisarray=0;\nrootlevel=1;\nforceroot=jsonopt('ForceRootName',0,opt);\nif((isnumeric(obj) || islogical(obj) || ischar(obj) || isstruct(obj) || iscell(obj)) && isempty(rootname) && forceroot==0)\n rootisarray=1;\n rootlevel=0;\nelse\n if(isempty(rootname))\n rootname=varname;\n end\nend\nif((isstruct(obj) || iscell(obj))&& isempty(rootname) && forceroot)\n rootname='root';\nend\njson=obj2ubjson(rootname,obj,rootlevel,opt);\nif(~rootisarray)\n json=['{' json '}'];\nend\n\njsonp=jsonopt('JSONP','',opt);\nif(~isempty(jsonp))\n json=[jsonp '(' json ')'];\nend\n\n% save to a file if FileName is set, suggested by Patrick Rapin\nif(~isempty(jsonopt('FileName','',opt)))\n fid = fopen(opt.FileName, 'wb');\n fwrite(fid,json);\n fclose(fid);\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=obj2ubjson(name,item,level,varargin)\n\nif(iscell(item))\n txt=cell2ubjson(name,item,level,varargin{:});\nelseif(isstruct(item))\n txt=struct2ubjson(name,item,level,varargin{:});\nelseif(ischar(item))\n txt=str2ubjson(name,item,level,varargin{:});\nelse\n txt=mat2ubjson(name,item,level,varargin{:});\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=cell2ubjson(name,item,level,varargin)\ntxt='';\nif(~iscell(item))\n error('input is not a cell');\nend\n\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item); % let's handle 1D cell first\nif(len>1) \n if(~isempty(name))\n txt=[S_(checkname(name,varargin{:})) '[']; name=''; \n else\n txt='['; \n end\nelseif(len==0)\n if(~isempty(name))\n txt=[S_(checkname(name,varargin{:})) 'Z']; name=''; \n else\n txt='Z'; \n end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=[txt '[']; end\n for i=1:dim(1)\n txt=[txt obj2ubjson(name,item{i,j},level+(len>1),varargin{:})];\n end\n if(dim(1)>1) txt=[txt ']']; end\nend\nif(len>1) txt=[txt ']']; end\n\n%%-------------------------------------------------------------------------\nfunction txt=struct2ubjson(name,item,level,varargin)\ntxt='';\nif(~isstruct(item))\n\terror('input is not a struct');\nend\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item);\n\nif(~isempty(name)) \n if(len>1) txt=[S_(checkname(name,varargin{:})) '[']; end\nelse\n if(len>1) txt='['; end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=[txt '[']; end\n for i=1:dim(1)\n names = fieldnames(item(i,j));\n if(~isempty(name) && len==1)\n txt=[txt S_(checkname(name,varargin{:})) '{']; \n else\n txt=[txt '{']; \n end\n if(~isempty(names))\n for e=1:length(names)\n\t txt=[txt obj2ubjson(names{e},getfield(item(i,j),...\n names{e}),level+(dim(1)>1)+1+(len>1),varargin{:})];\n end\n end\n txt=[txt '}'];\n end\n if(dim(1)>1) txt=[txt ']']; end\nend\nif(len>1) txt=[txt ']']; end\n\n%%-------------------------------------------------------------------------\nfunction txt=str2ubjson(name,item,level,varargin)\ntxt='';\nif(~ischar(item))\n error('input is not a string');\nend\nitem=reshape(item, max(size(item),[1 0]));\nlen=size(item,1);\n\nif(~isempty(name)) \n if(len>1) txt=[S_(checkname(name,varargin{:})) '[']; end\nelse\n if(len>1) txt='['; end\nend\nisoct=jsonopt('IsOctave',0,varargin{:});\nfor e=1:len\n val=item(e,:);\n if(len==1)\n obj=['' S_(checkname(name,varargin{:})) '' '',S_(val),''];\n\tif(isempty(name)) obj=['',S_(val),'']; end\n txt=[txt,'',obj];\n else\n txt=[txt,'',['',S_(val),'']];\n end\nend\nif(len>1) txt=[txt ']']; end\n\n%%-------------------------------------------------------------------------\nfunction txt=mat2ubjson(name,item,level,varargin)\nif(~isnumeric(item) && ~islogical(item))\n error('input is not an array');\nend\n\nif(length(size(item))>2 || issparse(item) || ~isreal(item) || ...\n isempty(item) || jsonopt('ArrayToStruct',0,varargin{:}))\n cid=I_(uint32(max(size(item))));\n if(isempty(name))\n \ttxt=['{' S_('_ArrayType_'),S_(class(item)),S_('_ArraySize_'),I_a(size(item),cid(1)) ];\n else\n if(isempty(item))\n txt=[S_(checkname(name,varargin{:})),'Z'];\n return;\n else\n \t txt=[S_(checkname(name,varargin{:})),'{',S_('_ArrayType_'),S_(class(item)),S_('_ArraySize_'),I_a(size(item),cid(1))];\n end\n end\nelse\n if(isempty(name))\n \ttxt=matdata2ubjson(item,level+1,varargin{:});\n else\n if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1)\n numtxt=regexprep(regexprep(matdata2ubjson(item,level+1,varargin{:}),'^\\[',''),']','');\n \ttxt=[S_(checkname(name,varargin{:})) numtxt];\n else\n \t txt=[S_(checkname(name,varargin{:})),matdata2ubjson(item,level+1,varargin{:})];\n end\n end\n return;\nend\nif(issparse(item))\n [ix,iy]=find(item);\n data=full(item(find(item)));\n if(~isreal(item))\n data=[real(data(:)),imag(data(:))];\n if(size(item,1)==1)\n % Kludge to have data's 'transposedness' match item's.\n % (Necessary for complex row vector handling below.)\n data=data';\n end\n txt=[txt,S_('_ArrayIsComplex_'),'T'];\n end\n txt=[txt,S_('_ArrayIsSparse_'),'T'];\n if(size(item,1)==1)\n % Row vector, store only column indices.\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([iy(:),data'],level+2,varargin{:})];\n elseif(size(item,2)==1)\n % Column vector, store only row indices.\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([ix,data],level+2,varargin{:})];\n else\n % General case, store row and column indices.\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([ix,iy,data],level+2,varargin{:})];\n end\nelse\n if(isreal(item))\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson(item(:)',level+2,varargin{:})];\n else\n txt=[txt,S_('_ArrayIsComplex_'),'T'];\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([real(item(:)) imag(item(:))],level+2,varargin{:})];\n end\nend\ntxt=[txt,'}'];\n\n%%-------------------------------------------------------------------------\nfunction txt=matdata2ubjson(mat,level,varargin)\nif(isempty(mat))\n txt='Z';\n return;\nend\nif(size(mat,1)==1)\n level=level-1;\nend\ntype='';\nhasnegtive=(mat<0);\nif(isa(mat,'integer') || isinteger(mat) || (isfloat(mat) && all(mod(mat(:),1) == 0)))\n if(isempty(hasnegtive))\n if(max(mat(:))<=2^8)\n type='U';\n end\n end\n if(isempty(type))\n % todo - need to consider negative ones separately\n id= histc(abs(max(mat(:))),[0 2^7 2^15 2^31 2^63]);\n if(isempty(find(id)))\n error('high-precision data is not yet supported');\n end\n key='iIlL';\n\ttype=key(find(id));\n end\n txt=[I_a(mat(:),type,size(mat))];\nelseif(islogical(mat))\n logicalval='FT';\n if(numel(mat)==1)\n txt=logicalval(mat+1);\n else\n txt=['[$U#' I_a(size(mat),'l') typecast(swapbytes(uint8(mat(:)')),'uint8')];\n end\nelse\n if(numel(mat)==1)\n txt=['[' D_(mat) ']'];\n else\n txt=D_a(mat(:),'D',size(mat));\n end\nend\n\n%txt=regexprep(mat2str(mat),'\\s+',',');\n%txt=regexprep(txt,';',sprintf('],['));\n% if(nargin>=2 && size(mat,1)>1)\n% txt=regexprep(txt,'\\[',[repmat(sprintf('\\t'),1,level) '[']);\n% end\nif(any(isinf(mat(:))))\n txt=regexprep(txt,'([-+]*)Inf',jsonopt('Inf','\"$1_Inf_\"',varargin{:}));\nend\nif(any(isnan(mat(:))))\n txt=regexprep(txt,'NaN',jsonopt('NaN','\"_NaN_\"',varargin{:}));\nend\n\n%%-------------------------------------------------------------------------\nfunction newname=checkname(name,varargin)\nisunpack=jsonopt('UnpackHex',1,varargin{:});\nnewname=name;\nif(isempty(regexp(name,'0x([0-9a-fA-F]+)_','once')))\n return\nend\nif(isunpack)\n isoct=jsonopt('IsOctave',0,varargin{:});\n if(~isoct)\n newname=regexprep(name,'(^x|_){1}0x([0-9a-fA-F]+)_','${native2unicode(hex2dec($2))}');\n else\n pos=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','start');\n pend=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','end');\n if(isempty(pos)) return; end\n str0=name;\n pos0=[0 pend(:)' length(name)];\n newname='';\n for i=1:length(pos)\n newname=[newname str0(pos0(i)+1:pos(i)-1) char(hex2dec(str0(pos(i)+3:pend(i)-1)))];\n end\n if(pos(end)~=length(name))\n newname=[newname str0(pos0(end-1)+1:pos0(end))];\n end\n end\nend\n%%-------------------------------------------------------------------------\nfunction val=S_(str)\nif(length(str)==1)\n val=['C' str];\nelse\n val=['S' I_(int32(length(str))) str];\nend\n%%-------------------------------------------------------------------------\nfunction val=I_(num)\nif(~isinteger(num))\n error('input is not an integer');\nend\nif(num>=0 && num<255)\n val=['U' data2byte(swapbytes(cast(num,'uint8')),'uint8')];\n return;\nend\nkey='iIlL';\ncid={'int8','int16','int32','int64'};\nfor i=1:4\n if((num>0 && num<2^(i*8-1)) || (num<0 && num>=-2^(i*8-1)))\n val=[key(i) data2byte(swapbytes(cast(num,cid{i})),'uint8')];\n return;\n end\nend\nerror('unsupported integer');\n\n%%-------------------------------------------------------------------------\nfunction val=D_(num)\nif(~isfloat(num))\n error('input is not a float');\nend\n\nif(isa(num,'single'))\n val=['d' data2byte(num,'uint8')];\nelse\n val=['D' data2byte(num,'uint8')];\nend\n%%-------------------------------------------------------------------------\nfunction data=I_a(num,type,dim,format)\nid=find(ismember('iUIlL',type));\n\nif(id==0)\n error('unsupported integer array');\nend\n\n% based on UBJSON specs, all integer types are stored in big endian format\n\nif(id==1)\n data=data2byte(swapbytes(int8(num)),'uint8');\n blen=1;\nelseif(id==2)\n data=data2byte(swapbytes(uint8(num)),'uint8');\n blen=1;\nelseif(id==3)\n data=data2byte(swapbytes(int16(num)),'uint8');\n blen=2;\nelseif(id==4)\n data=data2byte(swapbytes(int32(num)),'uint8');\n blen=4;\nelseif(id==5)\n data=data2byte(swapbytes(int64(num)),'uint8');\n blen=8;\nend\n\nif(nargin>=3 && length(dim)>=2 && prod(dim)~=dim(2))\n format='opt';\nend\nif((nargin<4 || strcmp(format,'opt')) && numel(num)>1)\n if(nargin>=3 && (length(dim)==1 || (length(dim)>=2 && prod(dim)~=dim(2))))\n cid=I_(uint32(max(dim)));\n data=['$' type '#' I_a(dim,cid(1)) data(:)'];\n else\n data=['$' type '#' I_(int32(numel(data)/blen)) data(:)'];\n end\n data=['[' data(:)'];\nelse\n data=reshape(data,blen,numel(data)/blen);\n data(2:blen+1,:)=data;\n data(1,:)=type;\n data=data(:)';\n data=['[' data(:)' ']'];\nend\n%%-------------------------------------------------------------------------\nfunction data=D_a(num,type,dim,format)\nid=find(ismember('dD',type));\n\nif(id==0)\n error('unsupported float array');\nend\n\nif(id==1)\n data=data2byte(single(num),'uint8');\nelseif(id==2)\n data=data2byte(double(num),'uint8');\nend\n\nif(nargin>=3 && length(dim)>=2 && prod(dim)~=dim(2))\n format='opt';\nend\nif((nargin<4 || strcmp(format,'opt')) && numel(num)>1)\n if(nargin>=3 && (length(dim)==1 || (length(dim)>=2 && prod(dim)~=dim(2))))\n cid=I_(uint32(max(dim)));\n data=['$' type '#' I_a(dim,cid(1)) data(:)'];\n else\n data=['$' type '#' I_(int32(numel(data)/(id*4))) data(:)'];\n end\n data=['[' data];\nelse\n data=reshape(data,(id*4),length(data)/(id*4));\n data(2:(id*4+1),:)=data;\n data(1,:)=type;\n data=data(:)';\n data=['[' data(:)' ']'];\nend\n%%-------------------------------------------------------------------------\nfunction bytes=data2byte(varargin)\nbytes=typecast(varargin{:});\nbytes=bytes(:)';\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "submit.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex6/ex6/submit.m", "size": 1318, "source_encoding": "utf_8", "md5": "bfa0b4ffb8a7854d8e84276e91818107", "text": "function submit()\n addpath('./lib');\n\n conf.assignmentSlug = 'support-vector-machines';\n conf.itemName = 'Support Vector Machines';\n conf.partArrays = { ...\n { ...\n '1', ...\n { 'gaussianKernel.m' }, ...\n 'Gaussian Kernel', ...\n }, ...\n { ...\n '2', ...\n { 'dataset3Params.m' }, ...\n 'Parameters (C, sigma) for Dataset 3', ...\n }, ...\n { ...\n '3', ...\n { 'processEmail.m' }, ...\n 'Email Preprocessing', ...\n }, ...\n { ...\n '4', ...\n { 'emailFeatures.m' }, ...\n 'Email Feature Extraction', ...\n }, ...\n };\n conf.output = @output;\n\n submitWithConfiguration(conf);\nend\n\nfunction out = output(partId, auxstring)\n % Random Test Cases\n x1 = sin(1:10)';\n x2 = cos(1:10)';\n ec = 'the quick brown fox jumped over the lazy dog';\n wi = 1 + abs(round(x1 * 1863));\n wi = [wi ; wi];\n if partId == '1'\n sim = gaussianKernel(x1, x2, 2);\n out = sprintf('%0.5f ', sim);\n elseif partId == '2'\n load('ex6data3.mat');\n [C, sigma] = dataset3Params(X, y, Xval, yval);\n out = sprintf('%0.5f ', C);\n out = [out sprintf('%0.5f ', sigma)];\n elseif partId == '3'\n word_indices = processEmail(ec);\n out = sprintf('%d ', word_indices);\n elseif partId == '4'\n x = emailFeatures(wi);\n out = sprintf('%d ', x);\n end \nend\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "porterStemmer.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex6/ex6/porterStemmer.m", "size": 9902, "source_encoding": "utf_8", "md5": "7ed5acd925808fde342fc72bd62ebc4d", "text": "function stem = porterStemmer(inString)\n% Applies the Porter Stemming algorithm as presented in the following\n% paper:\n% Porter, 1980, An algorithm for suffix stripping, Program, Vol. 14,\n% no. 3, pp 130-137\n\n% Original code modeled after the C version provided at:\n% http://www.tartarus.org/~martin/PorterStemmer/c.txt\n\n% The main part of the stemming algorithm starts here. b is an array of\n% characters, holding the word to be stemmed. The letters are in b[k0],\n% b[k0+1] ending at b[k]. In fact k0 = 1 in this demo program (since\n% matlab begins indexing by 1 instead of 0). k is readjusted downwards as\n% the stemming progresses. Zero termination is not in fact used in the\n% algorithm.\n\n% To call this function, use the string to be stemmed as the input\n% argument. This function returns the stemmed word as a string.\n\n% Lower-case string\ninString = lower(inString);\n\nglobal j;\nb = inString;\nk = length(b);\nk0 = 1;\nj = k;\n\n\n\n% With this if statement, strings of length 1 or 2 don't go through the\n% stemming process. Remove this conditional to match the published\n% algorithm.\nstem = b;\nif k > 2\n % Output displays per step are commented out.\n %disp(sprintf('Word to stem: %s', b));\n x = step1ab(b, k, k0);\n %disp(sprintf('Steps 1A and B yield: %s', x{1}));\n x = step1c(x{1}, x{2}, k0);\n %disp(sprintf('Step 1C yields: %s', x{1}));\n x = step2(x{1}, x{2}, k0);\n %disp(sprintf('Step 2 yields: %s', x{1}));\n x = step3(x{1}, x{2}, k0);\n %disp(sprintf('Step 3 yields: %s', x{1}));\n x = step4(x{1}, x{2}, k0);\n %disp(sprintf('Step 4 yields: %s', x{1}));\n x = step5(x{1}, x{2}, k0);\n %disp(sprintf('Step 5 yields: %s', x{1}));\n stem = x{1};\nend\n\n% cons(j) is TRUE <=> b[j] is a consonant.\nfunction c = cons(i, b, k0)\nc = true;\nswitch(b(i))\n case {'a', 'e', 'i', 'o', 'u'}\n c = false;\n case 'y'\n if i == k0\n c = true;\n else\n c = ~cons(i - 1, b, k0);\n end\nend\n\n% mseq() measures the number of consonant sequences between k0 and j. If\n% c is a consonant sequence and v a vowel sequence, and <..> indicates\n% arbitrary presence,\n\n% gives 0\n% vc gives 1\n% vcvc gives 2\n% vcvcvc gives 3\n% ....\nfunction n = measure(b, k0)\nglobal j;\nn = 0;\ni = k0;\nwhile true\n if i > j\n return\n end\n if ~cons(i, b, k0)\n break;\n end\n i = i + 1;\nend\ni = i + 1;\nwhile true\n while true\n if i > j\n return\n end\n if cons(i, b, k0)\n break;\n end\n i = i + 1;\n end\n i = i + 1;\n n = n + 1;\n while true\n if i > j\n return\n end\n if ~cons(i, b, k0)\n break;\n end\n i = i + 1;\n end\n i = i + 1;\nend\n\n\n% vowelinstem() is TRUE <=> k0,...j contains a vowel\nfunction vis = vowelinstem(b, k0)\nglobal j;\nfor i = k0:j,\n if ~cons(i, b, k0)\n vis = true;\n return\n end\nend\nvis = false;\n\n%doublec(i) is TRUE <=> i,(i-1) contain a double consonant.\nfunction dc = doublec(i, b, k0)\nif i < k0+1\n dc = false;\n return\nend\nif b(i) ~= b(i-1)\n dc = false;\n return\nend\ndc = cons(i, b, k0);\n\n\n% cvc(j) is TRUE <=> j-2,j-1,j has the form consonant - vowel - consonant\n% and also if the second c is not w,x or y. this is used when trying to\n% restore an e at the end of a short word. e.g.\n%\n% cav(e), lov(e), hop(e), crim(e), but\n% snow, box, tray.\n\nfunction c1 = cvc(i, b, k0)\nif ((i < (k0+2)) || ~cons(i, b, k0) || cons(i-1, b, k0) || ~cons(i-2, b, k0))\n c1 = false;\nelse\n if (b(i) == 'w' || b(i) == 'x' || b(i) == 'y')\n c1 = false;\n return\n end\n c1 = true;\nend\n\n% ends(s) is TRUE <=> k0,...k ends with the string s.\nfunction s = ends(str, b, k)\nglobal j;\nif (str(length(str)) ~= b(k))\n s = false;\n return\nend % tiny speed-up\nif (length(str) > k)\n s = false;\n return\nend\nif strcmp(b(k-length(str)+1:k), str)\n s = true;\n j = k - length(str);\n return\nelse\n s = false;\nend\n\n% setto(s) sets (j+1),...k to the characters in the string s, readjusting\n% k accordingly.\n\nfunction so = setto(s, b, k)\nglobal j;\nfor i = j+1:(j+length(s))\n b(i) = s(i-j);\nend\nif k > j+length(s)\n b((j+length(s)+1):k) = '';\nend\nk = length(b);\nso = {b, k};\n\n% rs(s) is used further down.\n% [Note: possible null/value for r if rs is called]\nfunction r = rs(str, b, k, k0)\nr = {b, k};\nif measure(b, k0) > 0\n r = setto(str, b, k);\nend\n\n% step1ab() gets rid of plurals and -ed or -ing. e.g.\n\n% caresses -> caress\n% ponies -> poni\n% ties -> ti\n% caress -> caress\n% cats -> cat\n\n% feed -> feed\n% agreed -> agree\n% disabled -> disable\n\n% matting -> mat\n% mating -> mate\n% meeting -> meet\n% milling -> mill\n% messing -> mess\n\n% meetings -> meet\n\nfunction s1ab = step1ab(b, k, k0)\nglobal j;\nif b(k) == 's'\n if ends('sses', b, k)\n k = k-2;\n elseif ends('ies', b, k)\n retVal = setto('i', b, k);\n b = retVal{1};\n k = retVal{2};\n elseif (b(k-1) ~= 's')\n k = k-1;\n end\nend\nif ends('eed', b, k)\n if measure(b, k0) > 0;\n k = k-1;\n end\nelseif (ends('ed', b, k) || ends('ing', b, k)) && vowelinstem(b, k0)\n k = j;\n retVal = {b, k};\n if ends('at', b, k)\n retVal = setto('ate', b(k0:k), k);\n elseif ends('bl', b, k)\n retVal = setto('ble', b(k0:k), k);\n elseif ends('iz', b, k)\n retVal = setto('ize', b(k0:k), k);\n elseif doublec(k, b, k0)\n retVal = {b, k-1};\n if b(retVal{2}) == 'l' || b(retVal{2}) == 's' || ...\n b(retVal{2}) == 'z'\n retVal = {retVal{1}, retVal{2}+1};\n end\n elseif measure(b, k0) == 1 && cvc(k, b, k0)\n retVal = setto('e', b(k0:k), k);\n end\n k = retVal{2};\n b = retVal{1}(k0:k);\nend\nj = k;\ns1ab = {b(k0:k), k};\n\n% step1c() turns terminal y to i when there is another vowel in the stem.\nfunction s1c = step1c(b, k, k0)\nglobal j;\nif ends('y', b, k) && vowelinstem(b, k0)\n b(k) = 'i';\nend\nj = k;\ns1c = {b, k};\n\n% step2() maps double suffices to single ones. so -ization ( = -ize plus\n% -ation) maps to -ize etc. note that the string before the suffix must give\n% m() > 0.\nfunction s2 = step2(b, k, k0)\nglobal j;\ns2 = {b, k};\nswitch b(k-1)\n case {'a'}\n if ends('ational', b, k) s2 = rs('ate', b, k, k0);\n elseif ends('tional', b, k) s2 = rs('tion', b, k, k0); end;\n case {'c'}\n if ends('enci', b, k) s2 = rs('ence', b, k, k0);\n elseif ends('anci', b, k) s2 = rs('ance', b, k, k0); end;\n case {'e'}\n if ends('izer', b, k) s2 = rs('ize', b, k, k0); end;\n case {'l'}\n if ends('bli', b, k) s2 = rs('ble', b, k, k0);\n elseif ends('alli', b, k) s2 = rs('al', b, k, k0);\n elseif ends('entli', b, k) s2 = rs('ent', b, k, k0);\n elseif ends('eli', b, k) s2 = rs('e', b, k, k0);\n elseif ends('ousli', b, k) s2 = rs('ous', b, k, k0); end;\n case {'o'}\n if ends('ization', b, k) s2 = rs('ize', b, k, k0);\n elseif ends('ation', b, k) s2 = rs('ate', b, k, k0);\n elseif ends('ator', b, k) s2 = rs('ate', b, k, k0); end;\n case {'s'}\n if ends('alism', b, k) s2 = rs('al', b, k, k0);\n elseif ends('iveness', b, k) s2 = rs('ive', b, k, k0);\n elseif ends('fulness', b, k) s2 = rs('ful', b, k, k0);\n elseif ends('ousness', b, k) s2 = rs('ous', b, k, k0); end;\n case {'t'}\n if ends('aliti', b, k) s2 = rs('al', b, k, k0);\n elseif ends('iviti', b, k) s2 = rs('ive', b, k, k0);\n elseif ends('biliti', b, k) s2 = rs('ble', b, k, k0); end;\n case {'g'}\n if ends('logi', b, k) s2 = rs('log', b, k, k0); end;\nend\nj = s2{2};\n\n% step3() deals with -ic-, -full, -ness etc. similar strategy to step2.\nfunction s3 = step3(b, k, k0)\nglobal j;\ns3 = {b, k};\nswitch b(k)\n case {'e'}\n if ends('icate', b, k) s3 = rs('ic', b, k, k0);\n elseif ends('ative', b, k) s3 = rs('', b, k, k0);\n elseif ends('alize', b, k) s3 = rs('al', b, k, k0); end;\n case {'i'}\n if ends('iciti', b, k) s3 = rs('ic', b, k, k0); end;\n case {'l'}\n if ends('ical', b, k) s3 = rs('ic', b, k, k0);\n elseif ends('ful', b, k) s3 = rs('', b, k, k0); end;\n case {'s'}\n if ends('ness', b, k) s3 = rs('', b, k, k0); end;\nend\nj = s3{2};\n\n% step4() takes off -ant, -ence etc., in context vcvc.\nfunction s4 = step4(b, k, k0)\nglobal j;\nswitch b(k-1)\n case {'a'}\n if ends('al', b, k) end;\n case {'c'}\n if ends('ance', b, k)\n elseif ends('ence', b, k) end;\n case {'e'}\n if ends('er', b, k) end;\n case {'i'}\n if ends('ic', b, k) end;\n case {'l'}\n if ends('able', b, k)\n elseif ends('ible', b, k) end;\n case {'n'}\n if ends('ant', b, k)\n elseif ends('ement', b, k)\n elseif ends('ment', b, k)\n elseif ends('ent', b, k) end;\n case {'o'}\n if ends('ion', b, k)\n if j == 0\n elseif ~(strcmp(b(j),'s') || strcmp(b(j),'t'))\n j = k;\n end\n elseif ends('ou', b, k) end;\n case {'s'}\n if ends('ism', b, k) end;\n case {'t'}\n if ends('ate', b, k)\n elseif ends('iti', b, k) end;\n case {'u'}\n if ends('ous', b, k) end;\n case {'v'}\n if ends('ive', b, k) end;\n case {'z'}\n if ends('ize', b, k) end;\nend\nif measure(b, k0) > 1\n s4 = {b(k0:j), j};\nelse\n s4 = {b(k0:k), k};\nend\n\n% step5() removes a final -e if m() > 1, and changes -ll to -l if m() > 1.\nfunction s5 = step5(b, k, k0)\nglobal j;\nj = k;\nif b(k) == 'e'\n a = measure(b, k0);\n if (a > 1) || ((a == 1) && ~cvc(k-1, b, k0))\n k = k-1;\n end\nend\nif (b(k) == 'l') && doublec(k, b, k0) && (measure(b, k0) > 1)\n k = k-1;\nend\ns5 = {b(k0:k), k};\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "submitWithConfiguration.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex6/ex6/lib/submitWithConfiguration.m", "size": 5562, "source_encoding": "utf_8", "md5": "4ac719ea6570ac228ea6c7a9c919e3f5", "text": "function submitWithConfiguration(conf)\n addpath('./lib/jsonlab');\n\n parts = parts(conf);\n\n fprintf('== Submitting solutions | %s...\\n', conf.itemName);\n\n tokenFile = 'token.mat';\n if exist(tokenFile, 'file')\n load(tokenFile);\n [email token] = promptToken(email, token, tokenFile);\n else\n [email token] = promptToken('', '', tokenFile);\n end\n\n if isempty(token)\n fprintf('!! Submission Cancelled\\n');\n return\n end\n\n try\n response = submitParts(conf, email, token, parts);\n catch\n e = lasterror();\n fprintf('\\n!! Submission failed: %s\\n', e.message);\n fprintf('\\n\\nFunction: %s\\nFileName: %s\\nLineNumber: %d\\n', ...\n e.stack(1,1).name, e.stack(1,1).file, e.stack(1,1).line);\n fprintf('\\nPlease correct your code and resubmit.\\n');\n return\n end\n\n if isfield(response, 'errorMessage')\n fprintf('!! Submission failed: %s\\n', response.errorMessage);\n elseif isfield(response, 'errorCode')\n fprintf('!! Submission failed: %s\\n', response.message);\n else\n showFeedback(parts, response);\n save(tokenFile, 'email', 'token');\n end\nend\n\nfunction [email token] = promptToken(email, existingToken, tokenFile)\n if (~isempty(email) && ~isempty(existingToken))\n prompt = sprintf( ...\n 'Use token from last successful submission (%s)? (Y/n): ', ...\n email);\n reenter = input(prompt, 's');\n\n if (isempty(reenter) || reenter(1) == 'Y' || reenter(1) == 'y')\n token = existingToken;\n return;\n else\n delete(tokenFile);\n end\n end\n email = input('Login (email address): ', 's');\n token = input('Token: ', 's');\nend\n\nfunction isValid = isValidPartOptionIndex(partOptions, i)\n isValid = (~isempty(i)) && (1 <= i) && (i <= numel(partOptions));\nend\n\nfunction response = submitParts(conf, email, token, parts)\n body = makePostBody(conf, email, token, parts);\n submissionUrl = submissionUrl();\n\n responseBody = getResponse(submissionUrl, body);\n jsonResponse = validateResponse(responseBody);\n response = loadjson(jsonResponse);\nend\n\nfunction body = makePostBody(conf, email, token, parts)\n bodyStruct.assignmentSlug = conf.assignmentSlug;\n bodyStruct.submitterEmail = email;\n bodyStruct.secret = token;\n bodyStruct.parts = makePartsStruct(conf, parts);\n\n opt.Compact = 1;\n body = savejson('', bodyStruct, opt);\nend\n\nfunction partsStruct = makePartsStruct(conf, parts)\n for part = parts\n partId = part{:}.id;\n fieldName = makeValidFieldName(partId);\n outputStruct.output = conf.output(partId);\n partsStruct.(fieldName) = outputStruct;\n end\nend\n\nfunction [parts] = parts(conf)\n parts = {};\n for partArray = conf.partArrays\n part.id = partArray{:}{1};\n part.sourceFiles = partArray{:}{2};\n part.name = partArray{:}{3};\n parts{end + 1} = part;\n end\nend\n\nfunction showFeedback(parts, response)\n fprintf('== \\n');\n fprintf('== %43s | %9s | %-s\\n', 'Part Name', 'Score', 'Feedback');\n fprintf('== %43s | %9s | %-s\\n', '---------', '-----', '--------');\n for part = parts\n score = '';\n partFeedback = '';\n partFeedback = response.partFeedbacks.(makeValidFieldName(part{:}.id));\n partEvaluation = response.partEvaluations.(makeValidFieldName(part{:}.id));\n score = sprintf('%d / %3d', partEvaluation.score, partEvaluation.maxScore);\n fprintf('== %43s | %9s | %-s\\n', part{:}.name, score, partFeedback);\n end\n evaluation = response.evaluation;\n totalScore = sprintf('%d / %d', evaluation.score, evaluation.maxScore);\n fprintf('== --------------------------------\\n');\n fprintf('== %43s | %9s | %-s\\n', '', totalScore, '');\n fprintf('== \\n');\nend\n\n% use urlread or curl to send submit results to the grader and get a response\nfunction response = getResponse(url, body)\n% try using urlread() and a secure connection\n params = {'jsonBody', body};\n [response, success] = urlread(url, 'post', params);\n\n if (success == 0)\n % urlread didn't work, try curl & the peer certificate patch\n if ispc\n % testing note: use 'jsonBody =' for a test case\n json_command = sprintf('echo jsonBody=%s | curl -k -X POST -d @- %s', body, url);\n else\n % it's linux/OS X, so use the other form\n json_command = sprintf('echo ''jsonBody=%s'' | curl -k -X POST -d @- %s', body, url);\n end\n % get the response body for the peer certificate patch method\n [code, response] = system(json_command);\n % test the success code\n if (code ~= 0)\n fprintf('[error] submission with curl() was not successful\\n');\n end\n end\nend\n\n% validate the grader's response\nfunction response = validateResponse(resp)\n % test if the response is json or an HTML page\n isJson = length(resp) > 0 && resp(1) == '{';\n isHtml = findstr(lower(resp), ']+>', ' ');\n strippedResponse = regexprep(strippedResponse, '[\\t ]+', ' ');\n fprintf(strippedResponse);\nend\n\n\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%\n% Service configuration\n%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nfunction submissionUrl = submissionUrl()\n submissionUrl = 'https://www-origin.coursera.org/api/onDemandProgrammingImmediateFormSubmissions.v1';\nend\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "savejson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex6/ex6/lib/jsonlab/savejson.m", "size": 17462, "source_encoding": "utf_8", "md5": "861b534fc35ffe982b53ca3ca83143bf", "text": "function json=savejson(rootname,obj,varargin)\n%\n% json=savejson(rootname,obj,filename)\n% or\n% json=savejson(rootname,obj,opt)\n% json=savejson(rootname,obj,'param1',value1,'param2',value2,...)\n%\n% convert a MATLAB object (cell, struct or array) into a JSON (JavaScript\n% Object Notation) string\n%\n% author: Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2011/09/09\n%\n% $Id: savejson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% rootname: the name of the root-object, when set to '', the root name\n% is ignored, however, when opt.ForceRootName is set to 1 (see below),\n% the MATLAB variable name will be used as the root name.\n% obj: a MATLAB object (array, cell, cell array, struct, struct array).\n% filename: a string for the file name to save the output JSON data.\n% opt: a struct for additional options, ignore to use default values.\n% opt can have the following fields (first in [.|.] is the default)\n%\n% opt.FileName [''|string]: a file name to save the output JSON data\n% opt.FloatFormat ['%.10g'|string]: format to show each numeric element\n% of a 1D/2D array;\n% opt.ArrayIndent [1|0]: if 1, output explicit data array with\n% precedent indentation; if 0, no indentation\n% opt.ArrayToStruct[0|1]: when set to 0, savejson outputs 1D/2D\n% array in JSON array format; if sets to 1, an\n% array will be shown as a struct with fields\n% \"_ArrayType_\", \"_ArraySize_\" and \"_ArrayData_\"; for\n% sparse arrays, the non-zero elements will be\n% saved to _ArrayData_ field in triplet-format i.e.\n% (ix,iy,val) and \"_ArrayIsSparse_\" will be added\n% with a value of 1; for a complex array, the \n% _ArrayData_ array will include two columns \n% (4 for sparse) to record the real and imaginary \n% parts, and also \"_ArrayIsComplex_\":1 is added. \n% opt.ParseLogical [0|1]: if this is set to 1, logical array elem\n% will use true/false rather than 1/0.\n% opt.NoRowBracket [1|0]: if this is set to 1, arrays with a single\n% numerical element will be shown without a square\n% bracket, unless it is the root object; if 0, square\n% brackets are forced for any numerical arrays.\n% opt.ForceRootName [0|1]: when set to 1 and rootname is empty, savejson\n% will use the name of the passed obj variable as the \n% root object name; if obj is an expression and \n% does not have a name, 'root' will be used; if this \n% is set to 0 and rootname is empty, the root level \n% will be merged down to the lower level.\n% opt.Inf ['\"$1_Inf_\"'|string]: a customized regular expression pattern\n% to represent +/-Inf. The matched pattern is '([-+]*)Inf'\n% and $1 represents the sign. For those who want to use\n% 1e999 to represent Inf, they can set opt.Inf to '$11e999'\n% opt.NaN ['\"_NaN_\"'|string]: a customized regular expression pattern\n% to represent NaN\n% opt.JSONP [''|string]: to generate a JSONP output (JSON with padding),\n% for example, if opt.JSONP='foo', the JSON data is\n% wrapped inside a function call as 'foo(...);'\n% opt.UnpackHex [1|0]: conver the 0x[hex code] output by loadjson \n% back to the string form\n% opt.SaveBinary [0|1]: 1 - save the JSON file in binary mode; 0 - text mode.\n% opt.Compact [0|1]: 1- out compact JSON format (remove all newlines and tabs)\n%\n% opt can be replaced by a list of ('param',value) pairs. The param \n% string is equivallent to a field in opt and is case sensitive.\n% output:\n% json: a string in the JSON format (see http://json.org)\n%\n% examples:\n% jsonmesh=struct('MeshNode',[0 0 0;1 0 0;0 1 0;1 1 0;0 0 1;1 0 1;0 1 1;1 1 1],... \n% 'MeshTetra',[1 2 4 8;1 3 4 8;1 2 6 8;1 5 6 8;1 5 7 8;1 3 7 8],...\n% 'MeshTri',[1 2 4;1 2 6;1 3 4;1 3 7;1 5 6;1 5 7;...\n% 2 8 4;2 8 6;3 8 4;3 8 7;5 8 6;5 8 7],...\n% 'MeshCreator','FangQ','MeshTitle','T6 Cube',...\n% 'SpecialData',[nan, inf, -inf]);\n% savejson('jmesh',jsonmesh)\n% savejson('',jsonmesh,'ArrayIndent',0,'FloatFormat','\\t%.5g')\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details\n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nif(nargin==1)\n varname=inputname(1);\n obj=rootname;\n if(isempty(varname)) \n varname='root';\n end\n rootname=varname;\nelse\n varname=inputname(2);\nend\nif(length(varargin)==1 && ischar(varargin{1}))\n opt=struct('FileName',varargin{1});\nelse\n opt=varargin2struct(varargin{:});\nend\nopt.IsOctave=exist('OCTAVE_VERSION','builtin');\nrootisarray=0;\nrootlevel=1;\nforceroot=jsonopt('ForceRootName',0,opt);\nif((isnumeric(obj) || islogical(obj) || ischar(obj) || isstruct(obj) || iscell(obj)) && isempty(rootname) && forceroot==0)\n rootisarray=1;\n rootlevel=0;\nelse\n if(isempty(rootname))\n rootname=varname;\n end\nend\nif((isstruct(obj) || iscell(obj))&& isempty(rootname) && forceroot)\n rootname='root';\nend\n\nwhitespaces=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nif(jsonopt('Compact',0,opt)==1)\n whitespaces=struct('tab','','newline','','sep',',');\nend\nif(~isfield(opt,'whitespaces_'))\n opt.whitespaces_=whitespaces;\nend\n\nnl=whitespaces.newline;\n\njson=obj2json(rootname,obj,rootlevel,opt);\nif(rootisarray)\n json=sprintf('%s%s',json,nl);\nelse\n json=sprintf('{%s%s%s}\\n',nl,json,nl);\nend\n\njsonp=jsonopt('JSONP','',opt);\nif(~isempty(jsonp))\n json=sprintf('%s(%s);%s',jsonp,json,nl);\nend\n\n% save to a file if FileName is set, suggested by Patrick Rapin\nif(~isempty(jsonopt('FileName','',opt)))\n if(jsonopt('SaveBinary',0,opt)==1)\n\t fid = fopen(opt.FileName, 'wb');\n\t fwrite(fid,json);\n else\n\t fid = fopen(opt.FileName, 'wt');\n\t fwrite(fid,json,'char');\n end\n fclose(fid);\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=obj2json(name,item,level,varargin)\n\nif(iscell(item))\n txt=cell2json(name,item,level,varargin{:});\nelseif(isstruct(item))\n txt=struct2json(name,item,level,varargin{:});\nelseif(ischar(item))\n txt=str2json(name,item,level,varargin{:});\nelse\n txt=mat2json(name,item,level,varargin{:});\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=cell2json(name,item,level,varargin)\ntxt='';\nif(~iscell(item))\n error('input is not a cell');\nend\n\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item);\nws=jsonopt('whitespaces_',struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n')),varargin{:});\npadding0=repmat(ws.tab,1,level);\npadding2=repmat(ws.tab,1,level+1);\nnl=ws.newline;\nif(len>1)\n if(~isempty(name))\n txt=sprintf('%s\"%s\": [%s',padding0, checkname(name,varargin{:}),nl); name=''; \n else\n txt=sprintf('%s[%s',padding0,nl); \n end\nelseif(len==0)\n if(~isempty(name))\n txt=sprintf('%s\"%s\": []',padding0, checkname(name,varargin{:})); name=''; \n else\n txt=sprintf('%s[]',padding0); \n end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=sprintf('%s%s[%s',txt,padding2,nl); end\n for i=1:dim(1)\n txt=sprintf('%s%s',txt,obj2json(name,item{i,j},level+(dim(1)>1)+1,varargin{:}));\n if(i1) txt=sprintf('%s%s%s]',txt,nl,padding2); end\n if(j1) txt=sprintf('%s%s%s]',txt,nl,padding0); end\n\n%%-------------------------------------------------------------------------\nfunction txt=struct2json(name,item,level,varargin)\ntxt='';\nif(~isstruct(item))\n\terror('input is not a struct');\nend\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item);\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\npadding0=repmat(ws.tab,1,level);\npadding2=repmat(ws.tab,1,level+1);\npadding1=repmat(ws.tab,1,level+(dim(1)>1)+(len>1));\nnl=ws.newline;\n\nif(~isempty(name)) \n if(len>1) txt=sprintf('%s\"%s\": [%s',padding0,checkname(name,varargin{:}),nl); end\nelse\n if(len>1) txt=sprintf('%s[%s',padding0,nl); end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=sprintf('%s%s[%s',txt,padding2,nl); end\n for i=1:dim(1)\n names = fieldnames(item(i,j));\n if(~isempty(name) && len==1)\n txt=sprintf('%s%s\"%s\": {%s',txt,padding1, checkname(name,varargin{:}),nl); \n else\n txt=sprintf('%s%s{%s',txt,padding1,nl); \n end\n if(~isempty(names))\n for e=1:length(names)\n\t txt=sprintf('%s%s',txt,obj2json(names{e},getfield(item(i,j),...\n names{e}),level+(dim(1)>1)+1+(len>1),varargin{:}));\n if(e1) txt=sprintf('%s%s%s]',txt,nl,padding2); end\n if(j1) txt=sprintf('%s%s%s]',txt,nl,padding0); end\n\n%%-------------------------------------------------------------------------\nfunction txt=str2json(name,item,level,varargin)\ntxt='';\nif(~ischar(item))\n error('input is not a string');\nend\nitem=reshape(item, max(size(item),[1 0]));\nlen=size(item,1);\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\npadding1=repmat(ws.tab,1,level);\npadding0=repmat(ws.tab,1,level+1);\nnl=ws.newline;\nsep=ws.sep;\n\nif(~isempty(name)) \n if(len>1) txt=sprintf('%s\"%s\": [%s',padding1,checkname(name,varargin{:}),nl); end\nelse\n if(len>1) txt=sprintf('%s[%s',padding1,nl); end\nend\nisoct=jsonopt('IsOctave',0,varargin{:});\nfor e=1:len\n if(isoct)\n val=regexprep(item(e,:),'\\\\','\\\\');\n val=regexprep(val,'\"','\\\"');\n val=regexprep(val,'^\"','\\\"');\n else\n val=regexprep(item(e,:),'\\\\','\\\\\\\\');\n val=regexprep(val,'\"','\\\\\"');\n val=regexprep(val,'^\"','\\\\\"');\n end\n val=escapejsonstring(val);\n if(len==1)\n obj=['\"' checkname(name,varargin{:}) '\": ' '\"',val,'\"'];\n\tif(isempty(name)) obj=['\"',val,'\"']; end\n txt=sprintf('%s%s%s%s',txt,padding1,obj);\n else\n txt=sprintf('%s%s%s%s',txt,padding0,['\"',val,'\"']);\n end\n if(e==len) sep=''; end\n txt=sprintf('%s%s',txt,sep);\nend\nif(len>1) txt=sprintf('%s%s%s%s',txt,nl,padding1,']'); end\n\n%%-------------------------------------------------------------------------\nfunction txt=mat2json(name,item,level,varargin)\nif(~isnumeric(item) && ~islogical(item))\n error('input is not an array');\nend\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\npadding1=repmat(ws.tab,1,level);\npadding0=repmat(ws.tab,1,level+1);\nnl=ws.newline;\nsep=ws.sep;\n\nif(length(size(item))>2 || issparse(item) || ~isreal(item) || ...\n isempty(item) ||jsonopt('ArrayToStruct',0,varargin{:}))\n if(isempty(name))\n \ttxt=sprintf('%s{%s%s\"_ArrayType_\": \"%s\",%s%s\"_ArraySize_\": %s,%s',...\n padding1,nl,padding0,class(item),nl,padding0,regexprep(mat2str(size(item)),'\\s+',','),nl);\n else\n \ttxt=sprintf('%s\"%s\": {%s%s\"_ArrayType_\": \"%s\",%s%s\"_ArraySize_\": %s,%s',...\n padding1,checkname(name,varargin{:}),nl,padding0,class(item),nl,padding0,regexprep(mat2str(size(item)),'\\s+',','),nl);\n end\nelse\n if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1 && level>0)\n numtxt=regexprep(regexprep(matdata2json(item,level+1,varargin{:}),'^\\[',''),']','');\n else\n numtxt=matdata2json(item,level+1,varargin{:});\n end\n if(isempty(name))\n \ttxt=sprintf('%s%s',padding1,numtxt);\n else\n if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1)\n \ttxt=sprintf('%s\"%s\": %s',padding1,checkname(name,varargin{:}),numtxt);\n else\n \t txt=sprintf('%s\"%s\": %s',padding1,checkname(name,varargin{:}),numtxt);\n end\n end\n return;\nend\ndataformat='%s%s%s%s%s';\n\nif(issparse(item))\n [ix,iy]=find(item);\n data=full(item(find(item)));\n if(~isreal(item))\n data=[real(data(:)),imag(data(:))];\n if(size(item,1)==1)\n % Kludge to have data's 'transposedness' match item's.\n % (Necessary for complex row vector handling below.)\n data=data';\n end\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayIsComplex_\": ','1', sep);\n end\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayIsSparse_\": ','1', sep);\n if(size(item,1)==1)\n % Row vector, store only column indices.\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([iy(:),data'],level+2,varargin{:}), nl);\n elseif(size(item,2)==1)\n % Column vector, store only row indices.\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([ix,data],level+2,varargin{:}), nl);\n else\n % General case, store row and column indices.\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([ix,iy,data],level+2,varargin{:}), nl);\n end\nelse\n if(isreal(item))\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json(item(:)',level+2,varargin{:}), nl);\n else\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayIsComplex_\": ','1', sep);\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([real(item(:)) imag(item(:))],level+2,varargin{:}), nl);\n end\nend\ntxt=sprintf('%s%s%s',txt,padding1,'}');\n\n%%-------------------------------------------------------------------------\nfunction txt=matdata2json(mat,level,varargin)\n\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\ntab=ws.tab;\nnl=ws.newline;\n\nif(size(mat,1)==1)\n pre='';\n post='';\n level=level-1;\nelse\n pre=sprintf('[%s',nl);\n post=sprintf('%s%s]',nl,repmat(tab,1,level-1));\nend\n\nif(isempty(mat))\n txt='null';\n return;\nend\nfloatformat=jsonopt('FloatFormat','%.10g',varargin{:});\n%if(numel(mat)>1)\n formatstr=['[' repmat([floatformat ','],1,size(mat,2)-1) [floatformat sprintf('],%s',nl)]];\n%else\n% formatstr=[repmat([floatformat ','],1,size(mat,2)-1) [floatformat sprintf(',\\n')]];\n%end\n\nif(nargin>=2 && size(mat,1)>1 && jsonopt('ArrayIndent',1,varargin{:})==1)\n formatstr=[repmat(tab,1,level) formatstr];\nend\n\ntxt=sprintf(formatstr,mat');\ntxt(end-length(nl):end)=[];\nif(islogical(mat) && jsonopt('ParseLogical',0,varargin{:})==1)\n txt=regexprep(txt,'1','true');\n txt=regexprep(txt,'0','false');\nend\n%txt=regexprep(mat2str(mat),'\\s+',',');\n%txt=regexprep(txt,';',sprintf('],\\n['));\n% if(nargin>=2 && size(mat,1)>1)\n% txt=regexprep(txt,'\\[',[repmat(sprintf('\\t'),1,level) '[']);\n% end\ntxt=[pre txt post];\nif(any(isinf(mat(:))))\n txt=regexprep(txt,'([-+]*)Inf',jsonopt('Inf','\"$1_Inf_\"',varargin{:}));\nend\nif(any(isnan(mat(:))))\n txt=regexprep(txt,'NaN',jsonopt('NaN','\"_NaN_\"',varargin{:}));\nend\n\n%%-------------------------------------------------------------------------\nfunction newname=checkname(name,varargin)\nisunpack=jsonopt('UnpackHex',1,varargin{:});\nnewname=name;\nif(isempty(regexp(name,'0x([0-9a-fA-F]+)_','once')))\n return\nend\nif(isunpack)\n isoct=jsonopt('IsOctave',0,varargin{:});\n if(~isoct)\n newname=regexprep(name,'(^x|_){1}0x([0-9a-fA-F]+)_','${native2unicode(hex2dec($2))}');\n else\n pos=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','start');\n pend=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','end');\n if(isempty(pos)) return; end\n str0=name;\n pos0=[0 pend(:)' length(name)];\n newname='';\n for i=1:length(pos)\n newname=[newname str0(pos0(i)+1:pos(i)-1) char(hex2dec(str0(pos(i)+3:pend(i)-1)))];\n end\n if(pos(end)~=length(name))\n newname=[newname str0(pos0(end-1)+1:pos0(end))];\n end\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction newstr=escapejsonstring(str)\nnewstr=str;\nisoct=exist('OCTAVE_VERSION','builtin');\nif(isoct)\n vv=sscanf(OCTAVE_VERSION,'%f');\n if(vv(1)>=3.8) isoct=0; end\nend\nif(isoct)\n escapechars={'\\a','\\f','\\n','\\r','\\t','\\v'};\n for i=1:length(escapechars);\n newstr=regexprep(newstr,escapechars{i},escapechars{i});\n end\nelse\n escapechars={'\\a','\\b','\\f','\\n','\\r','\\t','\\v'};\n for i=1:length(escapechars);\n newstr=regexprep(newstr,escapechars{i},regexprep(escapechars{i},'\\\\','\\\\\\\\'));\n end\nend\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "loadjson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex6/ex6/lib/jsonlab/loadjson.m", "size": 18732, "source_encoding": "ibm852", "md5": "ab98cf173af2d50bbe8da4d6db252a20", "text": "function data = loadjson(fname,varargin)\n%\n% data=loadjson(fname,opt)\n% or\n% data=loadjson(fname,'param1',value1,'param2',value2,...)\n%\n% parse a JSON (JavaScript Object Notation) file or string\n%\n% authors:Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2011/09/09, including previous works from \n%\n% Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713\n% created on 2009/11/02\n% François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393\n% created on 2009/03/22\n% Joel Feenstra:\n% http://www.mathworks.com/matlabcentral/fileexchange/20565\n% created on 2008/07/03\n%\n% $Id: loadjson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% fname: input file name, if fname contains \"{}\" or \"[]\", fname\n% will be interpreted as a JSON string\n% opt: a struct to store parsing options, opt can be replaced by \n% a list of ('param',value) pairs - the param string is equivallent\n% to a field in opt. opt can have the following \n% fields (first in [.|.] is the default)\n%\n% opt.SimplifyCell [0|1]: if set to 1, loadjson will call cell2mat\n% for each element of the JSON data, and group \n% arrays based on the cell2mat rules.\n% opt.FastArrayParser [1|0 or integer]: if set to 1, use a\n% speed-optimized array parser when loading an \n% array object. The fast array parser may \n% collapse block arrays into a single large\n% array similar to rules defined in cell2mat; 0 to \n% use a legacy parser; if set to a larger-than-1\n% value, this option will specify the minimum\n% dimension to enable the fast array parser. For\n% example, if the input is a 3D array, setting\n% FastArrayParser to 1 will return a 3D array;\n% setting to 2 will return a cell array of 2D\n% arrays; setting to 3 will return to a 2D cell\n% array of 1D vectors; setting to 4 will return a\n% 3D cell array.\n% opt.ShowProgress [0|1]: if set to 1, loadjson displays a progress bar.\n%\n% output:\n% dat: a cell array, where {...} blocks are converted into cell arrays,\n% and [...] are converted to arrays\n%\n% examples:\n% dat=loadjson('{\"obj\":{\"string\":\"value\",\"array\":[1,2,3]}}')\n% dat=loadjson(['examples' filesep 'example1.json'])\n% dat=loadjson(['examples' filesep 'example1.json'],'SimplifyCell',1)\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details \n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nglobal pos inStr len esc index_esc len_esc isoct arraytoken\n\nif(regexp(fname,'[\\{\\}\\]\\[]','once'))\n string=fname;\nelseif(exist(fname,'file'))\n fid = fopen(fname,'rb');\n string = fread(fid,inf,'uint8=>char')';\n fclose(fid);\nelse\n error('input file does not exist');\nend\n\npos = 1; len = length(string); inStr = string;\nisoct=exist('OCTAVE_VERSION','builtin');\narraytoken=find(inStr=='[' | inStr==']' | inStr=='\"');\njstr=regexprep(inStr,'\\\\\\\\',' ');\nescquote=regexp(jstr,'\\\\\"');\narraytoken=sort([arraytoken escquote]);\n\n% String delimiters and escape chars identified to improve speed:\nesc = find(inStr=='\"' | inStr=='\\' ); % comparable to: regexp(inStr, '[\"\\\\]');\nindex_esc = 1; len_esc = length(esc);\n\nopt=varargin2struct(varargin{:});\n\nif(jsonopt('ShowProgress',0,opt)==1)\n opt.progressbar_=waitbar(0,'loading ...');\nend\njsoncount=1;\nwhile pos <= len\n switch(next_char)\n case '{'\n data{jsoncount} = parse_object(opt);\n case '['\n data{jsoncount} = parse_array(opt);\n otherwise\n error_pos('Outer level structure must be an object or an array');\n end\n jsoncount=jsoncount+1;\nend % while\n\njsoncount=length(data);\nif(jsoncount==1 && iscell(data))\n data=data{1};\nend\n\nif(~isempty(data))\n if(isstruct(data)) % data can be a struct array\n data=jstruct2array(data);\n elseif(iscell(data))\n data=jcell2array(data);\n end\nend\nif(isfield(opt,'progressbar_'))\n close(opt.progressbar_);\nend\n\n%%\nfunction newdata=jcell2array(data)\nlen=length(data);\nnewdata=data;\nfor i=1:len\n if(isstruct(data{i}))\n newdata{i}=jstruct2array(data{i});\n elseif(iscell(data{i}))\n newdata{i}=jcell2array(data{i});\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction newdata=jstruct2array(data)\nfn=fieldnames(data);\nnewdata=data;\nlen=length(data);\nfor i=1:length(fn) % depth-first\n for j=1:len\n if(isstruct(getfield(data(j),fn{i})))\n newdata(j)=setfield(newdata(j),fn{i},jstruct2array(getfield(data(j),fn{i})));\n end\n end\nend\nif(~isempty(strmatch('x0x5F_ArrayType_',fn)) && ~isempty(strmatch('x0x5F_ArrayData_',fn)))\n newdata=cell(len,1);\n for j=1:len\n ndata=cast(data(j).x0x5F_ArrayData_,data(j).x0x5F_ArrayType_);\n iscpx=0;\n if(~isempty(strmatch('x0x5F_ArrayIsComplex_',fn)))\n if(data(j).x0x5F_ArrayIsComplex_)\n iscpx=1;\n end\n end\n if(~isempty(strmatch('x0x5F_ArrayIsSparse_',fn)))\n if(data(j).x0x5F_ArrayIsSparse_)\n if(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n dim=data(j).x0x5F_ArraySize_;\n if(iscpx && size(ndata,2)==4-any(dim==1))\n ndata(:,end-1)=complex(ndata(:,end-1),ndata(:,end));\n end\n if isempty(ndata)\n % All-zeros sparse\n ndata=sparse(dim(1),prod(dim(2:end)));\n elseif dim(1)==1\n % Sparse row vector\n ndata=sparse(1,ndata(:,1),ndata(:,2),dim(1),prod(dim(2:end)));\n elseif dim(2)==1\n % Sparse column vector\n ndata=sparse(ndata(:,1),1,ndata(:,2),dim(1),prod(dim(2:end)));\n else\n % Generic sparse array.\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3),dim(1),prod(dim(2:end)));\n end\n else\n if(iscpx && size(ndata,2)==4)\n ndata(:,3)=complex(ndata(:,3),ndata(:,4));\n end\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3));\n end\n end\n elseif(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n if(iscpx && size(ndata,2)==2)\n ndata=complex(ndata(:,1),ndata(:,2));\n end\n ndata=reshape(ndata(:),data(j).x0x5F_ArraySize_);\n end\n newdata{j}=ndata;\n end\n if(len==1)\n newdata=newdata{1};\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction object = parse_object(varargin)\n parse_char('{');\n object = [];\n if next_char ~= '}'\n while 1\n str = parseStr(varargin{:});\n if isempty(str)\n error_pos('Name of value at position %d cannot be empty');\n end\n parse_char(':');\n val = parse_value(varargin{:});\n eval( sprintf( 'object.%s = val;', valid_field(str) ) );\n if next_char == '}'\n break;\n end\n parse_char(',');\n end\n end\n parse_char('}');\n\n%%-------------------------------------------------------------------------\n\nfunction object = parse_array(varargin) % JSON array is written in row-major order\nglobal pos inStr isoct\n parse_char('[');\n object = cell(0, 1);\n dim2=[];\n arraydepth=jsonopt('JSONLAB_ArrayDepth_',1,varargin{:});\n pbar=jsonopt('progressbar_',-1,varargin{:});\n\n if next_char ~= ']'\n\tif(jsonopt('FastArrayParser',1,varargin{:})>=1 && arraydepth>=jsonopt('FastArrayParser',1,varargin{:}))\n [endpos, e1l, e1r, maxlevel]=matching_bracket(inStr,pos);\n arraystr=['[' inStr(pos:endpos)];\n arraystr=regexprep(arraystr,'\"_NaN_\"','NaN');\n arraystr=regexprep(arraystr,'\"([-+]*)_Inf_\"','$1Inf');\n arraystr(arraystr==sprintf('\\n'))=[];\n arraystr(arraystr==sprintf('\\r'))=[];\n %arraystr=regexprep(arraystr,'\\s*,',','); % this is slow,sometimes needed\n if(~isempty(e1l) && ~isempty(e1r)) % the array is in 2D or higher D\n \tastr=inStr((e1l+1):(e1r-1));\n \tastr=regexprep(astr,'\"_NaN_\"','NaN');\n \tastr=regexprep(astr,'\"([-+]*)_Inf_\"','$1Inf');\n \tastr(astr==sprintf('\\n'))=[];\n \tastr(astr==sprintf('\\r'))=[];\n \tastr(astr==' ')='';\n \tif(isempty(find(astr=='[', 1))) % array is 2D\n dim2=length(sscanf(astr,'%f,',[1 inf]));\n \tend\n else % array is 1D\n \tastr=arraystr(2:end-1);\n \tastr(astr==' ')='';\n \t[obj, count, errmsg, nextidx]=sscanf(astr,'%f,',[1,inf]);\n \tif(nextidx>=length(astr)-1)\n object=obj;\n pos=endpos;\n parse_char(']');\n return;\n \tend\n end\n if(~isempty(dim2))\n \tastr=arraystr;\n \tastr(astr=='[')='';\n \tastr(astr==']')='';\n \tastr(astr==' ')='';\n \t[obj, count, errmsg, nextidx]=sscanf(astr,'%f,',inf);\n \tif(nextidx>=length(astr)-1)\n object=reshape(obj,dim2,numel(obj)/dim2)';\n pos=endpos;\n parse_char(']');\n if(pbar>0)\n waitbar(pos/length(inStr),pbar,'loading ...');\n end\n return;\n \tend\n end\n arraystr=regexprep(arraystr,'\\]\\s*,','];');\n\telse\n arraystr='[';\n\tend\n try\n if(isoct && regexp(arraystr,'\"','once'))\n error('Octave eval can produce empty cells for JSON-like input');\n end\n object=eval(arraystr);\n pos=endpos;\n catch\n while 1\n newopt=varargin2struct(varargin{:},'JSONLAB_ArrayDepth_',arraydepth+1);\n val = parse_value(newopt);\n object{end+1} = val;\n if next_char == ']'\n break;\n end\n parse_char(',');\n end\n end\n end\n if(jsonopt('SimplifyCell',0,varargin{:})==1)\n try\n oldobj=object;\n object=cell2mat(object')';\n if(iscell(oldobj) && isstruct(object) && numel(object)>1 && jsonopt('SimplifyCellArray',1,varargin{:})==0)\n object=oldobj;\n elseif(size(object,1)>1 && ndims(object)==2)\n object=object';\n end\n catch\n end\n end\n parse_char(']');\n \n if(pbar>0)\n waitbar(pos/length(inStr),pbar,'loading ...');\n end\n%%-------------------------------------------------------------------------\n\nfunction parse_char(c)\n global pos inStr len\n skip_whitespace;\n if pos > len || inStr(pos) ~= c\n error_pos(sprintf('Expected %c at position %%d', c));\n else\n pos = pos + 1;\n skip_whitespace;\n end\n\n%%-------------------------------------------------------------------------\n\nfunction c = next_char\n global pos inStr len\n skip_whitespace;\n if pos > len\n c = [];\n else\n c = inStr(pos);\n end\n\n%%-------------------------------------------------------------------------\n\nfunction skip_whitespace\n global pos inStr len\n while pos <= len && isspace(inStr(pos))\n pos = pos + 1;\n end\n\n%%-------------------------------------------------------------------------\nfunction str = parseStr(varargin)\n global pos inStr len esc index_esc len_esc\n % len, ns = length(inStr), keyboard\n if inStr(pos) ~= '\"'\n error_pos('String starting with \" expected at position %d');\n else\n pos = pos + 1;\n end\n str = '';\n while pos <= len\n while index_esc <= len_esc && esc(index_esc) < pos\n index_esc = index_esc + 1;\n end\n if index_esc > len_esc\n str = [str inStr(pos:len)];\n pos = len + 1;\n break;\n else\n str = [str inStr(pos:esc(index_esc)-1)];\n pos = esc(index_esc);\n end\n nstr = length(str); switch inStr(pos)\n case '\"'\n pos = pos + 1;\n if(~isempty(str))\n if(strcmp(str,'_Inf_'))\n str=Inf;\n elseif(strcmp(str,'-_Inf_'))\n str=-Inf;\n elseif(strcmp(str,'_NaN_'))\n str=NaN;\n end\n end\n return;\n case '\\'\n if pos+1 > len\n error_pos('End of file reached right after escape character');\n end\n pos = pos + 1;\n switch inStr(pos)\n case {'\"' '\\' '/'}\n str(nstr+1) = inStr(pos);\n pos = pos + 1;\n case {'b' 'f' 'n' 'r' 't'}\n str(nstr+1) = sprintf(['\\' inStr(pos)]);\n pos = pos + 1;\n case 'u'\n if pos+4 > len\n error_pos('End of file reached in escaped unicode character');\n end\n str(nstr+(1:6)) = inStr(pos-1:pos+4);\n pos = pos + 5;\n end\n otherwise % should never happen\n str(nstr+1) = inStr(pos), keyboard\n pos = pos + 1;\n end\n end\n error_pos('End of file while expecting end of inStr');\n\n%%-------------------------------------------------------------------------\n\nfunction num = parse_number(varargin)\n global pos inStr len isoct\n currstr=inStr(pos:end);\n numstr=0;\n if(isoct~=0)\n numstr=regexp(currstr,'^\\s*-?(?:0|[1-9]\\d*)(?:\\.\\d+)?(?:[eE][+\\-]?\\d+)?','end');\n [num, one] = sscanf(currstr, '%f', 1);\n delta=numstr+1;\n else\n [num, one, err, delta] = sscanf(currstr, '%f', 1);\n if ~isempty(err)\n error_pos('Error reading number at position %d');\n end\n end\n pos = pos + delta-1;\n\n%%-------------------------------------------------------------------------\n\nfunction val = parse_value(varargin)\n global pos inStr len\n true = 1; false = 0;\n \n pbar=jsonopt('progressbar_',-1,varargin{:});\n if(pbar>0)\n waitbar(pos/len,pbar,'loading ...');\n end\n \n switch(inStr(pos))\n case '\"'\n val = parseStr(varargin{:});\n return;\n case '['\n val = parse_array(varargin{:});\n return;\n case '{'\n val = parse_object(varargin{:});\n if isstruct(val)\n if(~isempty(strmatch('x0x5F_ArrayType_',fieldnames(val), 'exact')))\n val=jstruct2array(val);\n end\n elseif isempty(val)\n val = struct;\n end\n return;\n case {'-','0','1','2','3','4','5','6','7','8','9'}\n val = parse_number(varargin{:});\n return;\n case 't'\n if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'true')\n val = true;\n pos = pos + 4;\n return;\n end\n case 'f'\n if pos+4 <= len && strcmpi(inStr(pos:pos+4), 'false')\n val = false;\n pos = pos + 5;\n return;\n end\n case 'n'\n if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'null')\n val = [];\n pos = pos + 4;\n return;\n end\n end\n error_pos('Value expected at position %d');\n%%-------------------------------------------------------------------------\n\nfunction error_pos(msg)\n global pos inStr len\n poShow = max(min([pos-15 pos-1 pos pos+20],len),1);\n if poShow(3) == poShow(2)\n poShow(3:4) = poShow(2)+[0 -1]; % display nothing after\n end\n msg = [sprintf(msg, pos) ': ' ...\n inStr(poShow(1):poShow(2)) '' inStr(poShow(3):poShow(4)) ];\n error( ['JSONparser:invalidFormat: ' msg] );\n\n%%-------------------------------------------------------------------------\n\nfunction str = valid_field(str)\nglobal isoct\n% From MATLAB doc: field names must begin with a letter, which may be\n% followed by any combination of letters, digits, and underscores.\n% Invalid characters will be converted to underscores, and the prefix\n% \"x0x[Hex code]_\" will be added if the first character is not a letter.\n pos=regexp(str,'^[^A-Za-z]','once');\n if(~isempty(pos))\n if(~isoct)\n str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once');\n else\n str=sprintf('x0x%X_%s',char(str(1)),str(2:end));\n end\n end\n if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' ))) return; end\n if(~isoct)\n str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_');\n else\n pos=regexp(str,'[^0-9A-Za-z_]');\n if(isempty(pos)) return; end\n str0=str;\n pos0=[0 pos(:)' length(str)];\n str='';\n for i=1:length(pos)\n str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))];\n end\n if(pos(end)~=length(str))\n str=[str str0(pos0(end-1)+1:pos0(end))];\n end\n end\n %str(~isletter(str) & ~('0' <= str & str <= '9')) = '_';\n\n%%-------------------------------------------------------------------------\nfunction endpos = matching_quote(str,pos)\nlen=length(str);\nwhile(pos1 && str(pos-1)=='\\'))\n endpos=pos;\n return;\n end \n end\n pos=pos+1;\nend\nerror('unmatched quotation mark');\n%%-------------------------------------------------------------------------\nfunction [endpos, e1l, e1r, maxlevel] = matching_bracket(str,pos)\nglobal arraytoken\nlevel=1;\nmaxlevel=level;\nendpos=0;\nbpos=arraytoken(arraytoken>=pos);\ntokens=str(bpos);\nlen=length(tokens);\npos=1;\ne1l=[];\ne1r=[];\nwhile(pos<=len)\n c=tokens(pos);\n if(c==']')\n level=level-1;\n if(isempty(e1r)) e1r=bpos(pos); end\n if(level==0)\n endpos=bpos(pos);\n return\n end\n end\n if(c=='[')\n if(isempty(e1l)) e1l=bpos(pos); end\n level=level+1;\n maxlevel=max(maxlevel,level);\n end\n if(c=='\"')\n pos=matching_quote(tokens,pos+1);\n end\n pos=pos+1;\nend\nif(endpos==0) \n error('unmatched \"]\"');\nend\n\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "loadubjson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex6/ex6/lib/jsonlab/loadubjson.m", "size": 15574, "source_encoding": "utf_8", "md5": "5974e78e71b81b1e0f76123784b951a4", "text": "function data = loadubjson(fname,varargin)\n%\n% data=loadubjson(fname,opt)\n% or\n% data=loadubjson(fname,'param1',value1,'param2',value2,...)\n%\n% parse a JSON (JavaScript Object Notation) file or string\n%\n% authors:Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2013/08/01\n%\n% $Id: loadubjson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% fname: input file name, if fname contains \"{}\" or \"[]\", fname\n% will be interpreted as a UBJSON string\n% opt: a struct to store parsing options, opt can be replaced by \n% a list of ('param',value) pairs - the param string is equivallent\n% to a field in opt. opt can have the following \n% fields (first in [.|.] is the default)\n%\n% opt.SimplifyCell [0|1]: if set to 1, loadubjson will call cell2mat\n% for each element of the JSON data, and group \n% arrays based on the cell2mat rules.\n% opt.IntEndian [B|L]: specify the endianness of the integer fields\n% in the UBJSON input data. B - Big-Endian format for \n% integers (as required in the UBJSON specification); \n% L - input integer fields are in Little-Endian order.\n%\n% output:\n% dat: a cell array, where {...} blocks are converted into cell arrays,\n% and [...] are converted to arrays\n%\n% examples:\n% obj=struct('string','value','array',[1 2 3]);\n% ubjdata=saveubjson('obj',obj);\n% dat=loadubjson(ubjdata)\n% dat=loadubjson(['examples' filesep 'example1.ubj'])\n% dat=loadubjson(['examples' filesep 'example1.ubj'],'SimplifyCell',1)\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details \n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nglobal pos inStr len esc index_esc len_esc isoct arraytoken fileendian systemendian\n\nif(regexp(fname,'[\\{\\}\\]\\[]','once'))\n string=fname;\nelseif(exist(fname,'file'))\n fid = fopen(fname,'rb');\n string = fread(fid,inf,'uint8=>char')';\n fclose(fid);\nelse\n error('input file does not exist');\nend\n\npos = 1; len = length(string); inStr = string;\nisoct=exist('OCTAVE_VERSION','builtin');\narraytoken=find(inStr=='[' | inStr==']' | inStr=='\"');\njstr=regexprep(inStr,'\\\\\\\\',' ');\nescquote=regexp(jstr,'\\\\\"');\narraytoken=sort([arraytoken escquote]);\n\n% String delimiters and escape chars identified to improve speed:\nesc = find(inStr=='\"' | inStr=='\\' ); % comparable to: regexp(inStr, '[\"\\\\]');\nindex_esc = 1; len_esc = length(esc);\n\nopt=varargin2struct(varargin{:});\nfileendian=upper(jsonopt('IntEndian','B',opt));\n[os,maxelem,systemendian]=computer;\n\njsoncount=1;\nwhile pos <= len\n switch(next_char)\n case '{'\n data{jsoncount} = parse_object(opt);\n case '['\n data{jsoncount} = parse_array(opt);\n otherwise\n error_pos('Outer level structure must be an object or an array');\n end\n jsoncount=jsoncount+1;\nend % while\n\njsoncount=length(data);\nif(jsoncount==1 && iscell(data))\n data=data{1};\nend\n\nif(~isempty(data))\n if(isstruct(data)) % data can be a struct array\n data=jstruct2array(data);\n elseif(iscell(data))\n data=jcell2array(data);\n end\nend\n\n\n%%\nfunction newdata=parse_collection(id,data,obj)\n\nif(jsoncount>0 && exist('data','var')) \n if(~iscell(data))\n newdata=cell(1);\n newdata{1}=data;\n data=newdata;\n end\nend\n\n%%\nfunction newdata=jcell2array(data)\nlen=length(data);\nnewdata=data;\nfor i=1:len\n if(isstruct(data{i}))\n newdata{i}=jstruct2array(data{i});\n elseif(iscell(data{i}))\n newdata{i}=jcell2array(data{i});\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction newdata=jstruct2array(data)\nfn=fieldnames(data);\nnewdata=data;\nlen=length(data);\nfor i=1:length(fn) % depth-first\n for j=1:len\n if(isstruct(getfield(data(j),fn{i})))\n newdata(j)=setfield(newdata(j),fn{i},jstruct2array(getfield(data(j),fn{i})));\n end\n end\nend\nif(~isempty(strmatch('x0x5F_ArrayType_',fn)) && ~isempty(strmatch('x0x5F_ArrayData_',fn)))\n newdata=cell(len,1);\n for j=1:len\n ndata=cast(data(j).x0x5F_ArrayData_,data(j).x0x5F_ArrayType_);\n iscpx=0;\n if(~isempty(strmatch('x0x5F_ArrayIsComplex_',fn)))\n if(data(j).x0x5F_ArrayIsComplex_)\n iscpx=1;\n end\n end\n if(~isempty(strmatch('x0x5F_ArrayIsSparse_',fn)))\n if(data(j).x0x5F_ArrayIsSparse_)\n if(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n dim=double(data(j).x0x5F_ArraySize_);\n if(iscpx && size(ndata,2)==4-any(dim==1))\n ndata(:,end-1)=complex(ndata(:,end-1),ndata(:,end));\n end\n if isempty(ndata)\n % All-zeros sparse\n ndata=sparse(dim(1),prod(dim(2:end)));\n elseif dim(1)==1\n % Sparse row vector\n ndata=sparse(1,ndata(:,1),ndata(:,2),dim(1),prod(dim(2:end)));\n elseif dim(2)==1\n % Sparse column vector\n ndata=sparse(ndata(:,1),1,ndata(:,2),dim(1),prod(dim(2:end)));\n else\n % Generic sparse array.\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3),dim(1),prod(dim(2:end)));\n end\n else\n if(iscpx && size(ndata,2)==4)\n ndata(:,3)=complex(ndata(:,3),ndata(:,4));\n end\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3));\n end\n end\n elseif(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n if(iscpx && size(ndata,2)==2)\n ndata=complex(ndata(:,1),ndata(:,2));\n end\n ndata=reshape(ndata(:),data(j).x0x5F_ArraySize_);\n end\n newdata{j}=ndata;\n end\n if(len==1)\n newdata=newdata{1};\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction object = parse_object(varargin)\n parse_char('{');\n object = [];\n type='';\n count=-1;\n if(next_char == '$')\n type=inStr(pos+1); % TODO\n pos=pos+2;\n end\n if(next_char == '#')\n pos=pos+1;\n count=double(parse_number());\n end\n if next_char ~= '}'\n num=0;\n while 1\n str = parseStr(varargin{:});\n if isempty(str)\n error_pos('Name of value at position %d cannot be empty');\n end\n %parse_char(':');\n val = parse_value(varargin{:});\n num=num+1;\n eval( sprintf( 'object.%s = val;', valid_field(str) ) );\n if next_char == '}' || (count>=0 && num>=count)\n break;\n end\n %parse_char(',');\n end\n end\n if(count==-1)\n parse_char('}');\n end\n\n%%-------------------------------------------------------------------------\nfunction [cid,len]=elem_info(type)\nid=strfind('iUIlLdD',type);\ndataclass={'int8','uint8','int16','int32','int64','single','double'};\nbytelen=[1,1,2,4,8,4,8];\nif(id>0)\n cid=dataclass{id};\n len=bytelen(id);\nelse\n error_pos('unsupported type at position %d');\nend\n%%-------------------------------------------------------------------------\n\n\nfunction [data adv]=parse_block(type,count,varargin)\nglobal pos inStr isoct fileendian systemendian\n[cid,len]=elem_info(type);\ndatastr=inStr(pos:pos+len*count-1);\nif(isoct)\n newdata=int8(datastr);\nelse\n newdata=uint8(datastr);\nend\nid=strfind('iUIlLdD',type);\nif(id<=5 && fileendian~=systemendian)\n newdata=swapbytes(typecast(newdata,cid));\nend\ndata=typecast(newdata,cid);\nadv=double(len*count);\n\n%%-------------------------------------------------------------------------\n\n\nfunction object = parse_array(varargin) % JSON array is written in row-major order\nglobal pos inStr isoct\n parse_char('[');\n object = cell(0, 1);\n dim=[];\n type='';\n count=-1;\n if(next_char == '$')\n type=inStr(pos+1);\n pos=pos+2;\n end\n if(next_char == '#')\n pos=pos+1;\n if(next_char=='[')\n dim=parse_array(varargin{:});\n count=prod(double(dim));\n else\n count=double(parse_number());\n end\n end\n if(~isempty(type))\n if(count>=0)\n [object adv]=parse_block(type,count,varargin{:});\n if(~isempty(dim))\n object=reshape(object,dim);\n end\n pos=pos+adv;\n return;\n else\n endpos=matching_bracket(inStr,pos);\n [cid,len]=elem_info(type);\n count=(endpos-pos)/len;\n [object adv]=parse_block(type,count,varargin{:});\n pos=pos+adv;\n parse_char(']');\n return;\n end\n end\n if next_char ~= ']'\n while 1\n val = parse_value(varargin{:});\n object{end+1} = val;\n if next_char == ']'\n break;\n end\n %parse_char(',');\n end\n end\n if(jsonopt('SimplifyCell',0,varargin{:})==1)\n try\n oldobj=object;\n object=cell2mat(object')';\n if(iscell(oldobj) && isstruct(object) && numel(object)>1 && jsonopt('SimplifyCellArray',1,varargin{:})==0)\n object=oldobj;\n elseif(size(object,1)>1 && ndims(object)==2)\n object=object';\n end\n catch\n end\n end\n if(count==-1)\n parse_char(']');\n end\n\n%%-------------------------------------------------------------------------\n\nfunction parse_char(c)\n global pos inStr len\n skip_whitespace;\n if pos > len || inStr(pos) ~= c\n error_pos(sprintf('Expected %c at position %%d', c));\n else\n pos = pos + 1;\n skip_whitespace;\n end\n\n%%-------------------------------------------------------------------------\n\nfunction c = next_char\n global pos inStr len\n skip_whitespace;\n if pos > len\n c = [];\n else\n c = inStr(pos);\n end\n\n%%-------------------------------------------------------------------------\n\nfunction skip_whitespace\n global pos inStr len\n while pos <= len && isspace(inStr(pos))\n pos = pos + 1;\n end\n\n%%-------------------------------------------------------------------------\nfunction str = parseStr(varargin)\n global pos inStr esc index_esc len_esc\n % len, ns = length(inStr), keyboard\n type=inStr(pos);\n if type ~= 'S' && type ~= 'C' && type ~= 'H'\n error_pos('String starting with S expected at position %d');\n else\n pos = pos + 1;\n end\n if(type == 'C')\n str=inStr(pos);\n pos=pos+1;\n return;\n end\n bytelen=double(parse_number());\n if(length(inStr)>=pos+bytelen-1)\n str=inStr(pos:pos+bytelen-1);\n pos=pos+bytelen;\n else\n error_pos('End of file while expecting end of inStr');\n end\n\n%%-------------------------------------------------------------------------\n\nfunction num = parse_number(varargin)\n global pos inStr len isoct fileendian systemendian\n id=strfind('iUIlLdD',inStr(pos));\n if(isempty(id))\n error_pos('expecting a number at position %d');\n end\n type={'int8','uint8','int16','int32','int64','single','double'};\n bytelen=[1,1,2,4,8,4,8];\n datastr=inStr(pos+1:pos+bytelen(id));\n if(isoct)\n newdata=int8(datastr);\n else\n newdata=uint8(datastr);\n end\n if(id<=5 && fileendian~=systemendian)\n newdata=swapbytes(typecast(newdata,type{id}));\n end\n num=typecast(newdata,type{id});\n pos = pos + bytelen(id)+1;\n\n%%-------------------------------------------------------------------------\n\nfunction val = parse_value(varargin)\n global pos inStr len\n true = 1; false = 0;\n\n switch(inStr(pos))\n case {'S','C','H'}\n val = parseStr(varargin{:});\n return;\n case '['\n val = parse_array(varargin{:});\n return;\n case '{'\n val = parse_object(varargin{:});\n if isstruct(val)\n if(~isempty(strmatch('x0x5F_ArrayType_',fieldnames(val), 'exact')))\n val=jstruct2array(val);\n end\n elseif isempty(val)\n val = struct;\n end\n return;\n case {'i','U','I','l','L','d','D'}\n val = parse_number(varargin{:});\n return;\n case 'T'\n val = true;\n pos = pos + 1;\n return;\n case 'F'\n val = false;\n pos = pos + 1;\n return;\n case {'Z','N'}\n val = [];\n pos = pos + 1;\n return;\n end\n error_pos('Value expected at position %d');\n%%-------------------------------------------------------------------------\n\nfunction error_pos(msg)\n global pos inStr len\n poShow = max(min([pos-15 pos-1 pos pos+20],len),1);\n if poShow(3) == poShow(2)\n poShow(3:4) = poShow(2)+[0 -1]; % display nothing after\n end\n msg = [sprintf(msg, pos) ': ' ...\n inStr(poShow(1):poShow(2)) '' inStr(poShow(3):poShow(4)) ];\n error( ['JSONparser:invalidFormat: ' msg] );\n\n%%-------------------------------------------------------------------------\n\nfunction str = valid_field(str)\nglobal isoct\n% From MATLAB doc: field names must begin with a letter, which may be\n% followed by any combination of letters, digits, and underscores.\n% Invalid characters will be converted to underscores, and the prefix\n% \"x0x[Hex code]_\" will be added if the first character is not a letter.\n pos=regexp(str,'^[^A-Za-z]','once');\n if(~isempty(pos))\n if(~isoct)\n str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once');\n else\n str=sprintf('x0x%X_%s',char(str(1)),str(2:end));\n end\n end\n if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' ))) return; end\n if(~isoct)\n str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_');\n else\n pos=regexp(str,'[^0-9A-Za-z_]');\n if(isempty(pos)) return; end\n str0=str;\n pos0=[0 pos(:)' length(str)];\n str='';\n for i=1:length(pos)\n str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))];\n end\n if(pos(end)~=length(str))\n str=[str str0(pos0(end-1)+1:pos0(end))];\n end\n end\n %str(~isletter(str) & ~('0' <= str & str <= '9')) = '_';\n\n%%-------------------------------------------------------------------------\nfunction endpos = matching_quote(str,pos)\nlen=length(str);\nwhile(pos1 && str(pos-1)=='\\'))\n endpos=pos;\n return;\n end \n end\n pos=pos+1;\nend\nerror('unmatched quotation mark');\n%%-------------------------------------------------------------------------\nfunction [endpos e1l e1r maxlevel] = matching_bracket(str,pos)\nglobal arraytoken\nlevel=1;\nmaxlevel=level;\nendpos=0;\nbpos=arraytoken(arraytoken>=pos);\ntokens=str(bpos);\nlen=length(tokens);\npos=1;\ne1l=[];\ne1r=[];\nwhile(pos<=len)\n c=tokens(pos);\n if(c==']')\n level=level-1;\n if(isempty(e1r)) e1r=bpos(pos); end\n if(level==0)\n endpos=bpos(pos);\n return\n end\n end\n if(c=='[')\n if(isempty(e1l)) e1l=bpos(pos); end\n level=level+1;\n maxlevel=max(maxlevel,level);\n end\n if(c=='\"')\n pos=matching_quote(tokens,pos+1);\n end\n pos=pos+1;\nend\nif(endpos==0) \n error('unmatched \"]\"');\nend\n\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "saveubjson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex6/ex6/lib/jsonlab/saveubjson.m", "size": 16123, "source_encoding": "utf_8", "md5": "61d4f51010aedbf97753396f5d2d9ec0", "text": "function json=saveubjson(rootname,obj,varargin)\n%\n% json=saveubjson(rootname,obj,filename)\n% or\n% json=saveubjson(rootname,obj,opt)\n% json=saveubjson(rootname,obj,'param1',value1,'param2',value2,...)\n%\n% convert a MATLAB object (cell, struct or array) into a Universal \n% Binary JSON (UBJSON) binary string\n%\n% author: Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2013/08/17\n%\n% $Id: saveubjson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% rootname: the name of the root-object, when set to '', the root name\n% is ignored, however, when opt.ForceRootName is set to 1 (see below),\n% the MATLAB variable name will be used as the root name.\n% obj: a MATLAB object (array, cell, cell array, struct, struct array)\n% filename: a string for the file name to save the output UBJSON data\n% opt: a struct for additional options, ignore to use default values.\n% opt can have the following fields (first in [.|.] is the default)\n%\n% opt.FileName [''|string]: a file name to save the output JSON data\n% opt.ArrayToStruct[0|1]: when set to 0, saveubjson outputs 1D/2D\n% array in JSON array format; if sets to 1, an\n% array will be shown as a struct with fields\n% \"_ArrayType_\", \"_ArraySize_\" and \"_ArrayData_\"; for\n% sparse arrays, the non-zero elements will be\n% saved to _ArrayData_ field in triplet-format i.e.\n% (ix,iy,val) and \"_ArrayIsSparse_\" will be added\n% with a value of 1; for a complex array, the \n% _ArrayData_ array will include two columns \n% (4 for sparse) to record the real and imaginary \n% parts, and also \"_ArrayIsComplex_\":1 is added. \n% opt.ParseLogical [1|0]: if this is set to 1, logical array elem\n% will use true/false rather than 1/0.\n% opt.NoRowBracket [1|0]: if this is set to 1, arrays with a single\n% numerical element will be shown without a square\n% bracket, unless it is the root object; if 0, square\n% brackets are forced for any numerical arrays.\n% opt.ForceRootName [0|1]: when set to 1 and rootname is empty, saveubjson\n% will use the name of the passed obj variable as the \n% root object name; if obj is an expression and \n% does not have a name, 'root' will be used; if this \n% is set to 0 and rootname is empty, the root level \n% will be merged down to the lower level.\n% opt.JSONP [''|string]: to generate a JSONP output (JSON with padding),\n% for example, if opt.JSON='foo', the JSON data is\n% wrapped inside a function call as 'foo(...);'\n% opt.UnpackHex [1|0]: conver the 0x[hex code] output by loadjson \n% back to the string form\n%\n% opt can be replaced by a list of ('param',value) pairs. The param \n% string is equivallent to a field in opt and is case sensitive.\n% output:\n% json: a binary string in the UBJSON format (see http://ubjson.org)\n%\n% examples:\n% jsonmesh=struct('MeshNode',[0 0 0;1 0 0;0 1 0;1 1 0;0 0 1;1 0 1;0 1 1;1 1 1],... \n% 'MeshTetra',[1 2 4 8;1 3 4 8;1 2 6 8;1 5 6 8;1 5 7 8;1 3 7 8],...\n% 'MeshTri',[1 2 4;1 2 6;1 3 4;1 3 7;1 5 6;1 5 7;...\n% 2 8 4;2 8 6;3 8 4;3 8 7;5 8 6;5 8 7],...\n% 'MeshCreator','FangQ','MeshTitle','T6 Cube',...\n% 'SpecialData',[nan, inf, -inf]);\n% saveubjson('jsonmesh',jsonmesh)\n% saveubjson('jsonmesh',jsonmesh,'meshdata.ubj')\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details\n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nif(nargin==1)\n varname=inputname(1);\n obj=rootname;\n if(isempty(varname)) \n varname='root';\n end\n rootname=varname;\nelse\n varname=inputname(2);\nend\nif(length(varargin)==1 && ischar(varargin{1}))\n opt=struct('FileName',varargin{1});\nelse\n opt=varargin2struct(varargin{:});\nend\nopt.IsOctave=exist('OCTAVE_VERSION','builtin');\nrootisarray=0;\nrootlevel=1;\nforceroot=jsonopt('ForceRootName',0,opt);\nif((isnumeric(obj) || islogical(obj) || ischar(obj) || isstruct(obj) || iscell(obj)) && isempty(rootname) && forceroot==0)\n rootisarray=1;\n rootlevel=0;\nelse\n if(isempty(rootname))\n rootname=varname;\n end\nend\nif((isstruct(obj) || iscell(obj))&& isempty(rootname) && forceroot)\n rootname='root';\nend\njson=obj2ubjson(rootname,obj,rootlevel,opt);\nif(~rootisarray)\n json=['{' json '}'];\nend\n\njsonp=jsonopt('JSONP','',opt);\nif(~isempty(jsonp))\n json=[jsonp '(' json ')'];\nend\n\n% save to a file if FileName is set, suggested by Patrick Rapin\nif(~isempty(jsonopt('FileName','',opt)))\n fid = fopen(opt.FileName, 'wb');\n fwrite(fid,json);\n fclose(fid);\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=obj2ubjson(name,item,level,varargin)\n\nif(iscell(item))\n txt=cell2ubjson(name,item,level,varargin{:});\nelseif(isstruct(item))\n txt=struct2ubjson(name,item,level,varargin{:});\nelseif(ischar(item))\n txt=str2ubjson(name,item,level,varargin{:});\nelse\n txt=mat2ubjson(name,item,level,varargin{:});\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=cell2ubjson(name,item,level,varargin)\ntxt='';\nif(~iscell(item))\n error('input is not a cell');\nend\n\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item); % let's handle 1D cell first\nif(len>1) \n if(~isempty(name))\n txt=[S_(checkname(name,varargin{:})) '[']; name=''; \n else\n txt='['; \n end\nelseif(len==0)\n if(~isempty(name))\n txt=[S_(checkname(name,varargin{:})) 'Z']; name=''; \n else\n txt='Z'; \n end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=[txt '[']; end\n for i=1:dim(1)\n txt=[txt obj2ubjson(name,item{i,j},level+(len>1),varargin{:})];\n end\n if(dim(1)>1) txt=[txt ']']; end\nend\nif(len>1) txt=[txt ']']; end\n\n%%-------------------------------------------------------------------------\nfunction txt=struct2ubjson(name,item,level,varargin)\ntxt='';\nif(~isstruct(item))\n\terror('input is not a struct');\nend\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item);\n\nif(~isempty(name)) \n if(len>1) txt=[S_(checkname(name,varargin{:})) '[']; end\nelse\n if(len>1) txt='['; end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=[txt '[']; end\n for i=1:dim(1)\n names = fieldnames(item(i,j));\n if(~isempty(name) && len==1)\n txt=[txt S_(checkname(name,varargin{:})) '{']; \n else\n txt=[txt '{']; \n end\n if(~isempty(names))\n for e=1:length(names)\n\t txt=[txt obj2ubjson(names{e},getfield(item(i,j),...\n names{e}),level+(dim(1)>1)+1+(len>1),varargin{:})];\n end\n end\n txt=[txt '}'];\n end\n if(dim(1)>1) txt=[txt ']']; end\nend\nif(len>1) txt=[txt ']']; end\n\n%%-------------------------------------------------------------------------\nfunction txt=str2ubjson(name,item,level,varargin)\ntxt='';\nif(~ischar(item))\n error('input is not a string');\nend\nitem=reshape(item, max(size(item),[1 0]));\nlen=size(item,1);\n\nif(~isempty(name)) \n if(len>1) txt=[S_(checkname(name,varargin{:})) '[']; end\nelse\n if(len>1) txt='['; end\nend\nisoct=jsonopt('IsOctave',0,varargin{:});\nfor e=1:len\n val=item(e,:);\n if(len==1)\n obj=['' S_(checkname(name,varargin{:})) '' '',S_(val),''];\n\tif(isempty(name)) obj=['',S_(val),'']; end\n txt=[txt,'',obj];\n else\n txt=[txt,'',['',S_(val),'']];\n end\nend\nif(len>1) txt=[txt ']']; end\n\n%%-------------------------------------------------------------------------\nfunction txt=mat2ubjson(name,item,level,varargin)\nif(~isnumeric(item) && ~islogical(item))\n error('input is not an array');\nend\n\nif(length(size(item))>2 || issparse(item) || ~isreal(item) || ...\n isempty(item) || jsonopt('ArrayToStruct',0,varargin{:}))\n cid=I_(uint32(max(size(item))));\n if(isempty(name))\n \ttxt=['{' S_('_ArrayType_'),S_(class(item)),S_('_ArraySize_'),I_a(size(item),cid(1)) ];\n else\n if(isempty(item))\n txt=[S_(checkname(name,varargin{:})),'Z'];\n return;\n else\n \t txt=[S_(checkname(name,varargin{:})),'{',S_('_ArrayType_'),S_(class(item)),S_('_ArraySize_'),I_a(size(item),cid(1))];\n end\n end\nelse\n if(isempty(name))\n \ttxt=matdata2ubjson(item,level+1,varargin{:});\n else\n if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1)\n numtxt=regexprep(regexprep(matdata2ubjson(item,level+1,varargin{:}),'^\\[',''),']','');\n \ttxt=[S_(checkname(name,varargin{:})) numtxt];\n else\n \t txt=[S_(checkname(name,varargin{:})),matdata2ubjson(item,level+1,varargin{:})];\n end\n end\n return;\nend\nif(issparse(item))\n [ix,iy]=find(item);\n data=full(item(find(item)));\n if(~isreal(item))\n data=[real(data(:)),imag(data(:))];\n if(size(item,1)==1)\n % Kludge to have data's 'transposedness' match item's.\n % (Necessary for complex row vector handling below.)\n data=data';\n end\n txt=[txt,S_('_ArrayIsComplex_'),'T'];\n end\n txt=[txt,S_('_ArrayIsSparse_'),'T'];\n if(size(item,1)==1)\n % Row vector, store only column indices.\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([iy(:),data'],level+2,varargin{:})];\n elseif(size(item,2)==1)\n % Column vector, store only row indices.\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([ix,data],level+2,varargin{:})];\n else\n % General case, store row and column indices.\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([ix,iy,data],level+2,varargin{:})];\n end\nelse\n if(isreal(item))\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson(item(:)',level+2,varargin{:})];\n else\n txt=[txt,S_('_ArrayIsComplex_'),'T'];\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([real(item(:)) imag(item(:))],level+2,varargin{:})];\n end\nend\ntxt=[txt,'}'];\n\n%%-------------------------------------------------------------------------\nfunction txt=matdata2ubjson(mat,level,varargin)\nif(isempty(mat))\n txt='Z';\n return;\nend\nif(size(mat,1)==1)\n level=level-1;\nend\ntype='';\nhasnegtive=(mat<0);\nif(isa(mat,'integer') || isinteger(mat) || (isfloat(mat) && all(mod(mat(:),1) == 0)))\n if(isempty(hasnegtive))\n if(max(mat(:))<=2^8)\n type='U';\n end\n end\n if(isempty(type))\n % todo - need to consider negative ones separately\n id= histc(abs(max(mat(:))),[0 2^7 2^15 2^31 2^63]);\n if(isempty(find(id)))\n error('high-precision data is not yet supported');\n end\n key='iIlL';\n\ttype=key(find(id));\n end\n txt=[I_a(mat(:),type,size(mat))];\nelseif(islogical(mat))\n logicalval='FT';\n if(numel(mat)==1)\n txt=logicalval(mat+1);\n else\n txt=['[$U#' I_a(size(mat),'l') typecast(swapbytes(uint8(mat(:)')),'uint8')];\n end\nelse\n if(numel(mat)==1)\n txt=['[' D_(mat) ']'];\n else\n txt=D_a(mat(:),'D',size(mat));\n end\nend\n\n%txt=regexprep(mat2str(mat),'\\s+',',');\n%txt=regexprep(txt,';',sprintf('],['));\n% if(nargin>=2 && size(mat,1)>1)\n% txt=regexprep(txt,'\\[',[repmat(sprintf('\\t'),1,level) '[']);\n% end\nif(any(isinf(mat(:))))\n txt=regexprep(txt,'([-+]*)Inf',jsonopt('Inf','\"$1_Inf_\"',varargin{:}));\nend\nif(any(isnan(mat(:))))\n txt=regexprep(txt,'NaN',jsonopt('NaN','\"_NaN_\"',varargin{:}));\nend\n\n%%-------------------------------------------------------------------------\nfunction newname=checkname(name,varargin)\nisunpack=jsonopt('UnpackHex',1,varargin{:});\nnewname=name;\nif(isempty(regexp(name,'0x([0-9a-fA-F]+)_','once')))\n return\nend\nif(isunpack)\n isoct=jsonopt('IsOctave',0,varargin{:});\n if(~isoct)\n newname=regexprep(name,'(^x|_){1}0x([0-9a-fA-F]+)_','${native2unicode(hex2dec($2))}');\n else\n pos=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','start');\n pend=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','end');\n if(isempty(pos)) return; end\n str0=name;\n pos0=[0 pend(:)' length(name)];\n newname='';\n for i=1:length(pos)\n newname=[newname str0(pos0(i)+1:pos(i)-1) char(hex2dec(str0(pos(i)+3:pend(i)-1)))];\n end\n if(pos(end)~=length(name))\n newname=[newname str0(pos0(end-1)+1:pos0(end))];\n end\n end\nend\n%%-------------------------------------------------------------------------\nfunction val=S_(str)\nif(length(str)==1)\n val=['C' str];\nelse\n val=['S' I_(int32(length(str))) str];\nend\n%%-------------------------------------------------------------------------\nfunction val=I_(num)\nif(~isinteger(num))\n error('input is not an integer');\nend\nif(num>=0 && num<255)\n val=['U' data2byte(swapbytes(cast(num,'uint8')),'uint8')];\n return;\nend\nkey='iIlL';\ncid={'int8','int16','int32','int64'};\nfor i=1:4\n if((num>0 && num<2^(i*8-1)) || (num<0 && num>=-2^(i*8-1)))\n val=[key(i) data2byte(swapbytes(cast(num,cid{i})),'uint8')];\n return;\n end\nend\nerror('unsupported integer');\n\n%%-------------------------------------------------------------------------\nfunction val=D_(num)\nif(~isfloat(num))\n error('input is not a float');\nend\n\nif(isa(num,'single'))\n val=['d' data2byte(num,'uint8')];\nelse\n val=['D' data2byte(num,'uint8')];\nend\n%%-------------------------------------------------------------------------\nfunction data=I_a(num,type,dim,format)\nid=find(ismember('iUIlL',type));\n\nif(id==0)\n error('unsupported integer array');\nend\n\n% based on UBJSON specs, all integer types are stored in big endian format\n\nif(id==1)\n data=data2byte(swapbytes(int8(num)),'uint8');\n blen=1;\nelseif(id==2)\n data=data2byte(swapbytes(uint8(num)),'uint8');\n blen=1;\nelseif(id==3)\n data=data2byte(swapbytes(int16(num)),'uint8');\n blen=2;\nelseif(id==4)\n data=data2byte(swapbytes(int32(num)),'uint8');\n blen=4;\nelseif(id==5)\n data=data2byte(swapbytes(int64(num)),'uint8');\n blen=8;\nend\n\nif(nargin>=3 && length(dim)>=2 && prod(dim)~=dim(2))\n format='opt';\nend\nif((nargin<4 || strcmp(format,'opt')) && numel(num)>1)\n if(nargin>=3 && (length(dim)==1 || (length(dim)>=2 && prod(dim)~=dim(2))))\n cid=I_(uint32(max(dim)));\n data=['$' type '#' I_a(dim,cid(1)) data(:)'];\n else\n data=['$' type '#' I_(int32(numel(data)/blen)) data(:)'];\n end\n data=['[' data(:)'];\nelse\n data=reshape(data,blen,numel(data)/blen);\n data(2:blen+1,:)=data;\n data(1,:)=type;\n data=data(:)';\n data=['[' data(:)' ']'];\nend\n%%-------------------------------------------------------------------------\nfunction data=D_a(num,type,dim,format)\nid=find(ismember('dD',type));\n\nif(id==0)\n error('unsupported float array');\nend\n\nif(id==1)\n data=data2byte(single(num),'uint8');\nelseif(id==2)\n data=data2byte(double(num),'uint8');\nend\n\nif(nargin>=3 && length(dim)>=2 && prod(dim)~=dim(2))\n format='opt';\nend\nif((nargin<4 || strcmp(format,'opt')) && numel(num)>1)\n if(nargin>=3 && (length(dim)==1 || (length(dim)>=2 && prod(dim)~=dim(2))))\n cid=I_(uint32(max(dim)));\n data=['$' type '#' I_a(dim,cid(1)) data(:)'];\n else\n data=['$' type '#' I_(int32(numel(data)/(id*4))) data(:)'];\n end\n data=['[' data];\nelse\n data=reshape(data,(id*4),length(data)/(id*4));\n data(2:(id*4+1),:)=data;\n data(1,:)=type;\n data=data(:)';\n data=['[' data(:)' ']'];\nend\n%%-------------------------------------------------------------------------\nfunction bytes=data2byte(varargin)\nbytes=typecast(varargin{:});\nbytes=bytes(:)';\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "submit.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex5/ex5/submit.m", "size": 1765, "source_encoding": "utf_8", "md5": "b1804fe5854d9744dca981d250eda251", "text": "function submit()\n addpath('./lib');\n\n conf.assignmentSlug = 'regularized-linear-regression-and-bias-variance';\n conf.itemName = 'Regularized Linear Regression and Bias/Variance';\n conf.partArrays = { ...\n { ...\n '1', ...\n { 'linearRegCostFunction.m' }, ...\n 'Regularized Linear Regression Cost Function', ...\n }, ...\n { ...\n '2', ...\n { 'linearRegCostFunction.m' }, ...\n 'Regularized Linear Regression Gradient', ...\n }, ...\n { ...\n '3', ...\n { 'learningCurve.m' }, ...\n 'Learning Curve', ...\n }, ...\n { ...\n '4', ...\n { 'polyFeatures.m' }, ...\n 'Polynomial Feature Mapping', ...\n }, ...\n { ...\n '5', ...\n { 'validationCurve.m' }, ...\n 'Validation Curve', ...\n }, ...\n };\n conf.output = @output;\n\n submitWithConfiguration(conf);\nend\n\nfunction out = output(partId, auxstring)\n % Random Test Cases\n X = [ones(10,1) sin(1:1.5:15)' cos(1:1.5:15)'];\n y = sin(1:3:30)';\n Xval = [ones(10,1) sin(0:1.5:14)' cos(0:1.5:14)'];\n yval = sin(1:10)';\n if partId == '1'\n [J] = linearRegCostFunction(X, y, [0.1 0.2 0.3]', 0.5);\n out = sprintf('%0.5f ', J);\n elseif partId == '2'\n [J, grad] = linearRegCostFunction(X, y, [0.1 0.2 0.3]', 0.5);\n out = sprintf('%0.5f ', grad);\n elseif partId == '3'\n [error_train, error_val] = ...\n learningCurve(X, y, Xval, yval, 1);\n out = sprintf('%0.5f ', [error_train(:); error_val(:)]);\n elseif partId == '4'\n [X_poly] = polyFeatures(X(2,:)', 8);\n out = sprintf('%0.5f ', X_poly);\n elseif partId == '5'\n [lambda_vec, error_train, error_val] = ...\n validationCurve(X, y, Xval, yval);\n out = sprintf('%0.5f ', ...\n [lambda_vec(:); error_train(:); error_val(:)]);\n end \nend\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "submitWithConfiguration.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex5/ex5/lib/submitWithConfiguration.m", "size": 5562, "source_encoding": "utf_8", "md5": "4ac719ea6570ac228ea6c7a9c919e3f5", "text": "function submitWithConfiguration(conf)\n addpath('./lib/jsonlab');\n\n parts = parts(conf);\n\n fprintf('== Submitting solutions | %s...\\n', conf.itemName);\n\n tokenFile = 'token.mat';\n if exist(tokenFile, 'file')\n load(tokenFile);\n [email token] = promptToken(email, token, tokenFile);\n else\n [email token] = promptToken('', '', tokenFile);\n end\n\n if isempty(token)\n fprintf('!! Submission Cancelled\\n');\n return\n end\n\n try\n response = submitParts(conf, email, token, parts);\n catch\n e = lasterror();\n fprintf('\\n!! Submission failed: %s\\n', e.message);\n fprintf('\\n\\nFunction: %s\\nFileName: %s\\nLineNumber: %d\\n', ...\n e.stack(1,1).name, e.stack(1,1).file, e.stack(1,1).line);\n fprintf('\\nPlease correct your code and resubmit.\\n');\n return\n end\n\n if isfield(response, 'errorMessage')\n fprintf('!! Submission failed: %s\\n', response.errorMessage);\n elseif isfield(response, 'errorCode')\n fprintf('!! Submission failed: %s\\n', response.message);\n else\n showFeedback(parts, response);\n save(tokenFile, 'email', 'token');\n end\nend\n\nfunction [email token] = promptToken(email, existingToken, tokenFile)\n if (~isempty(email) && ~isempty(existingToken))\n prompt = sprintf( ...\n 'Use token from last successful submission (%s)? (Y/n): ', ...\n email);\n reenter = input(prompt, 's');\n\n if (isempty(reenter) || reenter(1) == 'Y' || reenter(1) == 'y')\n token = existingToken;\n return;\n else\n delete(tokenFile);\n end\n end\n email = input('Login (email address): ', 's');\n token = input('Token: ', 's');\nend\n\nfunction isValid = isValidPartOptionIndex(partOptions, i)\n isValid = (~isempty(i)) && (1 <= i) && (i <= numel(partOptions));\nend\n\nfunction response = submitParts(conf, email, token, parts)\n body = makePostBody(conf, email, token, parts);\n submissionUrl = submissionUrl();\n\n responseBody = getResponse(submissionUrl, body);\n jsonResponse = validateResponse(responseBody);\n response = loadjson(jsonResponse);\nend\n\nfunction body = makePostBody(conf, email, token, parts)\n bodyStruct.assignmentSlug = conf.assignmentSlug;\n bodyStruct.submitterEmail = email;\n bodyStruct.secret = token;\n bodyStruct.parts = makePartsStruct(conf, parts);\n\n opt.Compact = 1;\n body = savejson('', bodyStruct, opt);\nend\n\nfunction partsStruct = makePartsStruct(conf, parts)\n for part = parts\n partId = part{:}.id;\n fieldName = makeValidFieldName(partId);\n outputStruct.output = conf.output(partId);\n partsStruct.(fieldName) = outputStruct;\n end\nend\n\nfunction [parts] = parts(conf)\n parts = {};\n for partArray = conf.partArrays\n part.id = partArray{:}{1};\n part.sourceFiles = partArray{:}{2};\n part.name = partArray{:}{3};\n parts{end + 1} = part;\n end\nend\n\nfunction showFeedback(parts, response)\n fprintf('== \\n');\n fprintf('== %43s | %9s | %-s\\n', 'Part Name', 'Score', 'Feedback');\n fprintf('== %43s | %9s | %-s\\n', '---------', '-----', '--------');\n for part = parts\n score = '';\n partFeedback = '';\n partFeedback = response.partFeedbacks.(makeValidFieldName(part{:}.id));\n partEvaluation = response.partEvaluations.(makeValidFieldName(part{:}.id));\n score = sprintf('%d / %3d', partEvaluation.score, partEvaluation.maxScore);\n fprintf('== %43s | %9s | %-s\\n', part{:}.name, score, partFeedback);\n end\n evaluation = response.evaluation;\n totalScore = sprintf('%d / %d', evaluation.score, evaluation.maxScore);\n fprintf('== --------------------------------\\n');\n fprintf('== %43s | %9s | %-s\\n', '', totalScore, '');\n fprintf('== \\n');\nend\n\n% use urlread or curl to send submit results to the grader and get a response\nfunction response = getResponse(url, body)\n% try using urlread() and a secure connection\n params = {'jsonBody', body};\n [response, success] = urlread(url, 'post', params);\n\n if (success == 0)\n % urlread didn't work, try curl & the peer certificate patch\n if ispc\n % testing note: use 'jsonBody =' for a test case\n json_command = sprintf('echo jsonBody=%s | curl -k -X POST -d @- %s', body, url);\n else\n % it's linux/OS X, so use the other form\n json_command = sprintf('echo ''jsonBody=%s'' | curl -k -X POST -d @- %s', body, url);\n end\n % get the response body for the peer certificate patch method\n [code, response] = system(json_command);\n % test the success code\n if (code ~= 0)\n fprintf('[error] submission with curl() was not successful\\n');\n end\n end\nend\n\n% validate the grader's response\nfunction response = validateResponse(resp)\n % test if the response is json or an HTML page\n isJson = length(resp) > 0 && resp(1) == '{';\n isHtml = findstr(lower(resp), ']+>', ' ');\n strippedResponse = regexprep(strippedResponse, '[\\t ]+', ' ');\n fprintf(strippedResponse);\nend\n\n\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%\n% Service configuration\n%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nfunction submissionUrl = submissionUrl()\n submissionUrl = 'https://www-origin.coursera.org/api/onDemandProgrammingImmediateFormSubmissions.v1';\nend\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "savejson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex5/ex5/lib/jsonlab/savejson.m", "size": 17462, "source_encoding": "utf_8", "md5": "861b534fc35ffe982b53ca3ca83143bf", "text": "function json=savejson(rootname,obj,varargin)\n%\n% json=savejson(rootname,obj,filename)\n% or\n% json=savejson(rootname,obj,opt)\n% json=savejson(rootname,obj,'param1',value1,'param2',value2,...)\n%\n% convert a MATLAB object (cell, struct or array) into a JSON (JavaScript\n% Object Notation) string\n%\n% author: Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2011/09/09\n%\n% $Id: savejson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% rootname: the name of the root-object, when set to '', the root name\n% is ignored, however, when opt.ForceRootName is set to 1 (see below),\n% the MATLAB variable name will be used as the root name.\n% obj: a MATLAB object (array, cell, cell array, struct, struct array).\n% filename: a string for the file name to save the output JSON data.\n% opt: a struct for additional options, ignore to use default values.\n% opt can have the following fields (first in [.|.] is the default)\n%\n% opt.FileName [''|string]: a file name to save the output JSON data\n% opt.FloatFormat ['%.10g'|string]: format to show each numeric element\n% of a 1D/2D array;\n% opt.ArrayIndent [1|0]: if 1, output explicit data array with\n% precedent indentation; if 0, no indentation\n% opt.ArrayToStruct[0|1]: when set to 0, savejson outputs 1D/2D\n% array in JSON array format; if sets to 1, an\n% array will be shown as a struct with fields\n% \"_ArrayType_\", \"_ArraySize_\" and \"_ArrayData_\"; for\n% sparse arrays, the non-zero elements will be\n% saved to _ArrayData_ field in triplet-format i.e.\n% (ix,iy,val) and \"_ArrayIsSparse_\" will be added\n% with a value of 1; for a complex array, the \n% _ArrayData_ array will include two columns \n% (4 for sparse) to record the real and imaginary \n% parts, and also \"_ArrayIsComplex_\":1 is added. \n% opt.ParseLogical [0|1]: if this is set to 1, logical array elem\n% will use true/false rather than 1/0.\n% opt.NoRowBracket [1|0]: if this is set to 1, arrays with a single\n% numerical element will be shown without a square\n% bracket, unless it is the root object; if 0, square\n% brackets are forced for any numerical arrays.\n% opt.ForceRootName [0|1]: when set to 1 and rootname is empty, savejson\n% will use the name of the passed obj variable as the \n% root object name; if obj is an expression and \n% does not have a name, 'root' will be used; if this \n% is set to 0 and rootname is empty, the root level \n% will be merged down to the lower level.\n% opt.Inf ['\"$1_Inf_\"'|string]: a customized regular expression pattern\n% to represent +/-Inf. The matched pattern is '([-+]*)Inf'\n% and $1 represents the sign. For those who want to use\n% 1e999 to represent Inf, they can set opt.Inf to '$11e999'\n% opt.NaN ['\"_NaN_\"'|string]: a customized regular expression pattern\n% to represent NaN\n% opt.JSONP [''|string]: to generate a JSONP output (JSON with padding),\n% for example, if opt.JSONP='foo', the JSON data is\n% wrapped inside a function call as 'foo(...);'\n% opt.UnpackHex [1|0]: conver the 0x[hex code] output by loadjson \n% back to the string form\n% opt.SaveBinary [0|1]: 1 - save the JSON file in binary mode; 0 - text mode.\n% opt.Compact [0|1]: 1- out compact JSON format (remove all newlines and tabs)\n%\n% opt can be replaced by a list of ('param',value) pairs. The param \n% string is equivallent to a field in opt and is case sensitive.\n% output:\n% json: a string in the JSON format (see http://json.org)\n%\n% examples:\n% jsonmesh=struct('MeshNode',[0 0 0;1 0 0;0 1 0;1 1 0;0 0 1;1 0 1;0 1 1;1 1 1],... \n% 'MeshTetra',[1 2 4 8;1 3 4 8;1 2 6 8;1 5 6 8;1 5 7 8;1 3 7 8],...\n% 'MeshTri',[1 2 4;1 2 6;1 3 4;1 3 7;1 5 6;1 5 7;...\n% 2 8 4;2 8 6;3 8 4;3 8 7;5 8 6;5 8 7],...\n% 'MeshCreator','FangQ','MeshTitle','T6 Cube',...\n% 'SpecialData',[nan, inf, -inf]);\n% savejson('jmesh',jsonmesh)\n% savejson('',jsonmesh,'ArrayIndent',0,'FloatFormat','\\t%.5g')\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details\n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nif(nargin==1)\n varname=inputname(1);\n obj=rootname;\n if(isempty(varname)) \n varname='root';\n end\n rootname=varname;\nelse\n varname=inputname(2);\nend\nif(length(varargin)==1 && ischar(varargin{1}))\n opt=struct('FileName',varargin{1});\nelse\n opt=varargin2struct(varargin{:});\nend\nopt.IsOctave=exist('OCTAVE_VERSION','builtin');\nrootisarray=0;\nrootlevel=1;\nforceroot=jsonopt('ForceRootName',0,opt);\nif((isnumeric(obj) || islogical(obj) || ischar(obj) || isstruct(obj) || iscell(obj)) && isempty(rootname) && forceroot==0)\n rootisarray=1;\n rootlevel=0;\nelse\n if(isempty(rootname))\n rootname=varname;\n end\nend\nif((isstruct(obj) || iscell(obj))&& isempty(rootname) && forceroot)\n rootname='root';\nend\n\nwhitespaces=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nif(jsonopt('Compact',0,opt)==1)\n whitespaces=struct('tab','','newline','','sep',',');\nend\nif(~isfield(opt,'whitespaces_'))\n opt.whitespaces_=whitespaces;\nend\n\nnl=whitespaces.newline;\n\njson=obj2json(rootname,obj,rootlevel,opt);\nif(rootisarray)\n json=sprintf('%s%s',json,nl);\nelse\n json=sprintf('{%s%s%s}\\n',nl,json,nl);\nend\n\njsonp=jsonopt('JSONP','',opt);\nif(~isempty(jsonp))\n json=sprintf('%s(%s);%s',jsonp,json,nl);\nend\n\n% save to a file if FileName is set, suggested by Patrick Rapin\nif(~isempty(jsonopt('FileName','',opt)))\n if(jsonopt('SaveBinary',0,opt)==1)\n\t fid = fopen(opt.FileName, 'wb');\n\t fwrite(fid,json);\n else\n\t fid = fopen(opt.FileName, 'wt');\n\t fwrite(fid,json,'char');\n end\n fclose(fid);\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=obj2json(name,item,level,varargin)\n\nif(iscell(item))\n txt=cell2json(name,item,level,varargin{:});\nelseif(isstruct(item))\n txt=struct2json(name,item,level,varargin{:});\nelseif(ischar(item))\n txt=str2json(name,item,level,varargin{:});\nelse\n txt=mat2json(name,item,level,varargin{:});\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=cell2json(name,item,level,varargin)\ntxt='';\nif(~iscell(item))\n error('input is not a cell');\nend\n\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item);\nws=jsonopt('whitespaces_',struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n')),varargin{:});\npadding0=repmat(ws.tab,1,level);\npadding2=repmat(ws.tab,1,level+1);\nnl=ws.newline;\nif(len>1)\n if(~isempty(name))\n txt=sprintf('%s\"%s\": [%s',padding0, checkname(name,varargin{:}),nl); name=''; \n else\n txt=sprintf('%s[%s',padding0,nl); \n end\nelseif(len==0)\n if(~isempty(name))\n txt=sprintf('%s\"%s\": []',padding0, checkname(name,varargin{:})); name=''; \n else\n txt=sprintf('%s[]',padding0); \n end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=sprintf('%s%s[%s',txt,padding2,nl); end\n for i=1:dim(1)\n txt=sprintf('%s%s',txt,obj2json(name,item{i,j},level+(dim(1)>1)+1,varargin{:}));\n if(i1) txt=sprintf('%s%s%s]',txt,nl,padding2); end\n if(j1) txt=sprintf('%s%s%s]',txt,nl,padding0); end\n\n%%-------------------------------------------------------------------------\nfunction txt=struct2json(name,item,level,varargin)\ntxt='';\nif(~isstruct(item))\n\terror('input is not a struct');\nend\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item);\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\npadding0=repmat(ws.tab,1,level);\npadding2=repmat(ws.tab,1,level+1);\npadding1=repmat(ws.tab,1,level+(dim(1)>1)+(len>1));\nnl=ws.newline;\n\nif(~isempty(name)) \n if(len>1) txt=sprintf('%s\"%s\": [%s',padding0,checkname(name,varargin{:}),nl); end\nelse\n if(len>1) txt=sprintf('%s[%s',padding0,nl); end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=sprintf('%s%s[%s',txt,padding2,nl); end\n for i=1:dim(1)\n names = fieldnames(item(i,j));\n if(~isempty(name) && len==1)\n txt=sprintf('%s%s\"%s\": {%s',txt,padding1, checkname(name,varargin{:}),nl); \n else\n txt=sprintf('%s%s{%s',txt,padding1,nl); \n end\n if(~isempty(names))\n for e=1:length(names)\n\t txt=sprintf('%s%s',txt,obj2json(names{e},getfield(item(i,j),...\n names{e}),level+(dim(1)>1)+1+(len>1),varargin{:}));\n if(e1) txt=sprintf('%s%s%s]',txt,nl,padding2); end\n if(j1) txt=sprintf('%s%s%s]',txt,nl,padding0); end\n\n%%-------------------------------------------------------------------------\nfunction txt=str2json(name,item,level,varargin)\ntxt='';\nif(~ischar(item))\n error('input is not a string');\nend\nitem=reshape(item, max(size(item),[1 0]));\nlen=size(item,1);\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\npadding1=repmat(ws.tab,1,level);\npadding0=repmat(ws.tab,1,level+1);\nnl=ws.newline;\nsep=ws.sep;\n\nif(~isempty(name)) \n if(len>1) txt=sprintf('%s\"%s\": [%s',padding1,checkname(name,varargin{:}),nl); end\nelse\n if(len>1) txt=sprintf('%s[%s',padding1,nl); end\nend\nisoct=jsonopt('IsOctave',0,varargin{:});\nfor e=1:len\n if(isoct)\n val=regexprep(item(e,:),'\\\\','\\\\');\n val=regexprep(val,'\"','\\\"');\n val=regexprep(val,'^\"','\\\"');\n else\n val=regexprep(item(e,:),'\\\\','\\\\\\\\');\n val=regexprep(val,'\"','\\\\\"');\n val=regexprep(val,'^\"','\\\\\"');\n end\n val=escapejsonstring(val);\n if(len==1)\n obj=['\"' checkname(name,varargin{:}) '\": ' '\"',val,'\"'];\n\tif(isempty(name)) obj=['\"',val,'\"']; end\n txt=sprintf('%s%s%s%s',txt,padding1,obj);\n else\n txt=sprintf('%s%s%s%s',txt,padding0,['\"',val,'\"']);\n end\n if(e==len) sep=''; end\n txt=sprintf('%s%s',txt,sep);\nend\nif(len>1) txt=sprintf('%s%s%s%s',txt,nl,padding1,']'); end\n\n%%-------------------------------------------------------------------------\nfunction txt=mat2json(name,item,level,varargin)\nif(~isnumeric(item) && ~islogical(item))\n error('input is not an array');\nend\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\npadding1=repmat(ws.tab,1,level);\npadding0=repmat(ws.tab,1,level+1);\nnl=ws.newline;\nsep=ws.sep;\n\nif(length(size(item))>2 || issparse(item) || ~isreal(item) || ...\n isempty(item) ||jsonopt('ArrayToStruct',0,varargin{:}))\n if(isempty(name))\n \ttxt=sprintf('%s{%s%s\"_ArrayType_\": \"%s\",%s%s\"_ArraySize_\": %s,%s',...\n padding1,nl,padding0,class(item),nl,padding0,regexprep(mat2str(size(item)),'\\s+',','),nl);\n else\n \ttxt=sprintf('%s\"%s\": {%s%s\"_ArrayType_\": \"%s\",%s%s\"_ArraySize_\": %s,%s',...\n padding1,checkname(name,varargin{:}),nl,padding0,class(item),nl,padding0,regexprep(mat2str(size(item)),'\\s+',','),nl);\n end\nelse\n if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1 && level>0)\n numtxt=regexprep(regexprep(matdata2json(item,level+1,varargin{:}),'^\\[',''),']','');\n else\n numtxt=matdata2json(item,level+1,varargin{:});\n end\n if(isempty(name))\n \ttxt=sprintf('%s%s',padding1,numtxt);\n else\n if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1)\n \ttxt=sprintf('%s\"%s\": %s',padding1,checkname(name,varargin{:}),numtxt);\n else\n \t txt=sprintf('%s\"%s\": %s',padding1,checkname(name,varargin{:}),numtxt);\n end\n end\n return;\nend\ndataformat='%s%s%s%s%s';\n\nif(issparse(item))\n [ix,iy]=find(item);\n data=full(item(find(item)));\n if(~isreal(item))\n data=[real(data(:)),imag(data(:))];\n if(size(item,1)==1)\n % Kludge to have data's 'transposedness' match item's.\n % (Necessary for complex row vector handling below.)\n data=data';\n end\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayIsComplex_\": ','1', sep);\n end\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayIsSparse_\": ','1', sep);\n if(size(item,1)==1)\n % Row vector, store only column indices.\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([iy(:),data'],level+2,varargin{:}), nl);\n elseif(size(item,2)==1)\n % Column vector, store only row indices.\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([ix,data],level+2,varargin{:}), nl);\n else\n % General case, store row and column indices.\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([ix,iy,data],level+2,varargin{:}), nl);\n end\nelse\n if(isreal(item))\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json(item(:)',level+2,varargin{:}), nl);\n else\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayIsComplex_\": ','1', sep);\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([real(item(:)) imag(item(:))],level+2,varargin{:}), nl);\n end\nend\ntxt=sprintf('%s%s%s',txt,padding1,'}');\n\n%%-------------------------------------------------------------------------\nfunction txt=matdata2json(mat,level,varargin)\n\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\ntab=ws.tab;\nnl=ws.newline;\n\nif(size(mat,1)==1)\n pre='';\n post='';\n level=level-1;\nelse\n pre=sprintf('[%s',nl);\n post=sprintf('%s%s]',nl,repmat(tab,1,level-1));\nend\n\nif(isempty(mat))\n txt='null';\n return;\nend\nfloatformat=jsonopt('FloatFormat','%.10g',varargin{:});\n%if(numel(mat)>1)\n formatstr=['[' repmat([floatformat ','],1,size(mat,2)-1) [floatformat sprintf('],%s',nl)]];\n%else\n% formatstr=[repmat([floatformat ','],1,size(mat,2)-1) [floatformat sprintf(',\\n')]];\n%end\n\nif(nargin>=2 && size(mat,1)>1 && jsonopt('ArrayIndent',1,varargin{:})==1)\n formatstr=[repmat(tab,1,level) formatstr];\nend\n\ntxt=sprintf(formatstr,mat');\ntxt(end-length(nl):end)=[];\nif(islogical(mat) && jsonopt('ParseLogical',0,varargin{:})==1)\n txt=regexprep(txt,'1','true');\n txt=regexprep(txt,'0','false');\nend\n%txt=regexprep(mat2str(mat),'\\s+',',');\n%txt=regexprep(txt,';',sprintf('],\\n['));\n% if(nargin>=2 && size(mat,1)>1)\n% txt=regexprep(txt,'\\[',[repmat(sprintf('\\t'),1,level) '[']);\n% end\ntxt=[pre txt post];\nif(any(isinf(mat(:))))\n txt=regexprep(txt,'([-+]*)Inf',jsonopt('Inf','\"$1_Inf_\"',varargin{:}));\nend\nif(any(isnan(mat(:))))\n txt=regexprep(txt,'NaN',jsonopt('NaN','\"_NaN_\"',varargin{:}));\nend\n\n%%-------------------------------------------------------------------------\nfunction newname=checkname(name,varargin)\nisunpack=jsonopt('UnpackHex',1,varargin{:});\nnewname=name;\nif(isempty(regexp(name,'0x([0-9a-fA-F]+)_','once')))\n return\nend\nif(isunpack)\n isoct=jsonopt('IsOctave',0,varargin{:});\n if(~isoct)\n newname=regexprep(name,'(^x|_){1}0x([0-9a-fA-F]+)_','${native2unicode(hex2dec($2))}');\n else\n pos=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','start');\n pend=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','end');\n if(isempty(pos)) return; end\n str0=name;\n pos0=[0 pend(:)' length(name)];\n newname='';\n for i=1:length(pos)\n newname=[newname str0(pos0(i)+1:pos(i)-1) char(hex2dec(str0(pos(i)+3:pend(i)-1)))];\n end\n if(pos(end)~=length(name))\n newname=[newname str0(pos0(end-1)+1:pos0(end))];\n end\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction newstr=escapejsonstring(str)\nnewstr=str;\nisoct=exist('OCTAVE_VERSION','builtin');\nif(isoct)\n vv=sscanf(OCTAVE_VERSION,'%f');\n if(vv(1)>=3.8) isoct=0; end\nend\nif(isoct)\n escapechars={'\\a','\\f','\\n','\\r','\\t','\\v'};\n for i=1:length(escapechars);\n newstr=regexprep(newstr,escapechars{i},escapechars{i});\n end\nelse\n escapechars={'\\a','\\b','\\f','\\n','\\r','\\t','\\v'};\n for i=1:length(escapechars);\n newstr=regexprep(newstr,escapechars{i},regexprep(escapechars{i},'\\\\','\\\\\\\\'));\n end\nend\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "loadjson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex5/ex5/lib/jsonlab/loadjson.m", "size": 18732, "source_encoding": "ibm852", "md5": "ab98cf173af2d50bbe8da4d6db252a20", "text": "function data = loadjson(fname,varargin)\n%\n% data=loadjson(fname,opt)\n% or\n% data=loadjson(fname,'param1',value1,'param2',value2,...)\n%\n% parse a JSON (JavaScript Object Notation) file or string\n%\n% authors:Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2011/09/09, including previous works from \n%\n% Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713\n% created on 2009/11/02\n% François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393\n% created on 2009/03/22\n% Joel Feenstra:\n% http://www.mathworks.com/matlabcentral/fileexchange/20565\n% created on 2008/07/03\n%\n% $Id: loadjson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% fname: input file name, if fname contains \"{}\" or \"[]\", fname\n% will be interpreted as a JSON string\n% opt: a struct to store parsing options, opt can be replaced by \n% a list of ('param',value) pairs - the param string is equivallent\n% to a field in opt. opt can have the following \n% fields (first in [.|.] is the default)\n%\n% opt.SimplifyCell [0|1]: if set to 1, loadjson will call cell2mat\n% for each element of the JSON data, and group \n% arrays based on the cell2mat rules.\n% opt.FastArrayParser [1|0 or integer]: if set to 1, use a\n% speed-optimized array parser when loading an \n% array object. The fast array parser may \n% collapse block arrays into a single large\n% array similar to rules defined in cell2mat; 0 to \n% use a legacy parser; if set to a larger-than-1\n% value, this option will specify the minimum\n% dimension to enable the fast array parser. For\n% example, if the input is a 3D array, setting\n% FastArrayParser to 1 will return a 3D array;\n% setting to 2 will return a cell array of 2D\n% arrays; setting to 3 will return to a 2D cell\n% array of 1D vectors; setting to 4 will return a\n% 3D cell array.\n% opt.ShowProgress [0|1]: if set to 1, loadjson displays a progress bar.\n%\n% output:\n% dat: a cell array, where {...} blocks are converted into cell arrays,\n% and [...] are converted to arrays\n%\n% examples:\n% dat=loadjson('{\"obj\":{\"string\":\"value\",\"array\":[1,2,3]}}')\n% dat=loadjson(['examples' filesep 'example1.json'])\n% dat=loadjson(['examples' filesep 'example1.json'],'SimplifyCell',1)\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details \n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nglobal pos inStr len esc index_esc len_esc isoct arraytoken\n\nif(regexp(fname,'[\\{\\}\\]\\[]','once'))\n string=fname;\nelseif(exist(fname,'file'))\n fid = fopen(fname,'rb');\n string = fread(fid,inf,'uint8=>char')';\n fclose(fid);\nelse\n error('input file does not exist');\nend\n\npos = 1; len = length(string); inStr = string;\nisoct=exist('OCTAVE_VERSION','builtin');\narraytoken=find(inStr=='[' | inStr==']' | inStr=='\"');\njstr=regexprep(inStr,'\\\\\\\\',' ');\nescquote=regexp(jstr,'\\\\\"');\narraytoken=sort([arraytoken escquote]);\n\n% String delimiters and escape chars identified to improve speed:\nesc = find(inStr=='\"' | inStr=='\\' ); % comparable to: regexp(inStr, '[\"\\\\]');\nindex_esc = 1; len_esc = length(esc);\n\nopt=varargin2struct(varargin{:});\n\nif(jsonopt('ShowProgress',0,opt)==1)\n opt.progressbar_=waitbar(0,'loading ...');\nend\njsoncount=1;\nwhile pos <= len\n switch(next_char)\n case '{'\n data{jsoncount} = parse_object(opt);\n case '['\n data{jsoncount} = parse_array(opt);\n otherwise\n error_pos('Outer level structure must be an object or an array');\n end\n jsoncount=jsoncount+1;\nend % while\n\njsoncount=length(data);\nif(jsoncount==1 && iscell(data))\n data=data{1};\nend\n\nif(~isempty(data))\n if(isstruct(data)) % data can be a struct array\n data=jstruct2array(data);\n elseif(iscell(data))\n data=jcell2array(data);\n end\nend\nif(isfield(opt,'progressbar_'))\n close(opt.progressbar_);\nend\n\n%%\nfunction newdata=jcell2array(data)\nlen=length(data);\nnewdata=data;\nfor i=1:len\n if(isstruct(data{i}))\n newdata{i}=jstruct2array(data{i});\n elseif(iscell(data{i}))\n newdata{i}=jcell2array(data{i});\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction newdata=jstruct2array(data)\nfn=fieldnames(data);\nnewdata=data;\nlen=length(data);\nfor i=1:length(fn) % depth-first\n for j=1:len\n if(isstruct(getfield(data(j),fn{i})))\n newdata(j)=setfield(newdata(j),fn{i},jstruct2array(getfield(data(j),fn{i})));\n end\n end\nend\nif(~isempty(strmatch('x0x5F_ArrayType_',fn)) && ~isempty(strmatch('x0x5F_ArrayData_',fn)))\n newdata=cell(len,1);\n for j=1:len\n ndata=cast(data(j).x0x5F_ArrayData_,data(j).x0x5F_ArrayType_);\n iscpx=0;\n if(~isempty(strmatch('x0x5F_ArrayIsComplex_',fn)))\n if(data(j).x0x5F_ArrayIsComplex_)\n iscpx=1;\n end\n end\n if(~isempty(strmatch('x0x5F_ArrayIsSparse_',fn)))\n if(data(j).x0x5F_ArrayIsSparse_)\n if(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n dim=data(j).x0x5F_ArraySize_;\n if(iscpx && size(ndata,2)==4-any(dim==1))\n ndata(:,end-1)=complex(ndata(:,end-1),ndata(:,end));\n end\n if isempty(ndata)\n % All-zeros sparse\n ndata=sparse(dim(1),prod(dim(2:end)));\n elseif dim(1)==1\n % Sparse row vector\n ndata=sparse(1,ndata(:,1),ndata(:,2),dim(1),prod(dim(2:end)));\n elseif dim(2)==1\n % Sparse column vector\n ndata=sparse(ndata(:,1),1,ndata(:,2),dim(1),prod(dim(2:end)));\n else\n % Generic sparse array.\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3),dim(1),prod(dim(2:end)));\n end\n else\n if(iscpx && size(ndata,2)==4)\n ndata(:,3)=complex(ndata(:,3),ndata(:,4));\n end\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3));\n end\n end\n elseif(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n if(iscpx && size(ndata,2)==2)\n ndata=complex(ndata(:,1),ndata(:,2));\n end\n ndata=reshape(ndata(:),data(j).x0x5F_ArraySize_);\n end\n newdata{j}=ndata;\n end\n if(len==1)\n newdata=newdata{1};\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction object = parse_object(varargin)\n parse_char('{');\n object = [];\n if next_char ~= '}'\n while 1\n str = parseStr(varargin{:});\n if isempty(str)\n error_pos('Name of value at position %d cannot be empty');\n end\n parse_char(':');\n val = parse_value(varargin{:});\n eval( sprintf( 'object.%s = val;', valid_field(str) ) );\n if next_char == '}'\n break;\n end\n parse_char(',');\n end\n end\n parse_char('}');\n\n%%-------------------------------------------------------------------------\n\nfunction object = parse_array(varargin) % JSON array is written in row-major order\nglobal pos inStr isoct\n parse_char('[');\n object = cell(0, 1);\n dim2=[];\n arraydepth=jsonopt('JSONLAB_ArrayDepth_',1,varargin{:});\n pbar=jsonopt('progressbar_',-1,varargin{:});\n\n if next_char ~= ']'\n\tif(jsonopt('FastArrayParser',1,varargin{:})>=1 && arraydepth>=jsonopt('FastArrayParser',1,varargin{:}))\n [endpos, e1l, e1r, maxlevel]=matching_bracket(inStr,pos);\n arraystr=['[' inStr(pos:endpos)];\n arraystr=regexprep(arraystr,'\"_NaN_\"','NaN');\n arraystr=regexprep(arraystr,'\"([-+]*)_Inf_\"','$1Inf');\n arraystr(arraystr==sprintf('\\n'))=[];\n arraystr(arraystr==sprintf('\\r'))=[];\n %arraystr=regexprep(arraystr,'\\s*,',','); % this is slow,sometimes needed\n if(~isempty(e1l) && ~isempty(e1r)) % the array is in 2D or higher D\n \tastr=inStr((e1l+1):(e1r-1));\n \tastr=regexprep(astr,'\"_NaN_\"','NaN');\n \tastr=regexprep(astr,'\"([-+]*)_Inf_\"','$1Inf');\n \tastr(astr==sprintf('\\n'))=[];\n \tastr(astr==sprintf('\\r'))=[];\n \tastr(astr==' ')='';\n \tif(isempty(find(astr=='[', 1))) % array is 2D\n dim2=length(sscanf(astr,'%f,',[1 inf]));\n \tend\n else % array is 1D\n \tastr=arraystr(2:end-1);\n \tastr(astr==' ')='';\n \t[obj, count, errmsg, nextidx]=sscanf(astr,'%f,',[1,inf]);\n \tif(nextidx>=length(astr)-1)\n object=obj;\n pos=endpos;\n parse_char(']');\n return;\n \tend\n end\n if(~isempty(dim2))\n \tastr=arraystr;\n \tastr(astr=='[')='';\n \tastr(astr==']')='';\n \tastr(astr==' ')='';\n \t[obj, count, errmsg, nextidx]=sscanf(astr,'%f,',inf);\n \tif(nextidx>=length(astr)-1)\n object=reshape(obj,dim2,numel(obj)/dim2)';\n pos=endpos;\n parse_char(']');\n if(pbar>0)\n waitbar(pos/length(inStr),pbar,'loading ...');\n end\n return;\n \tend\n end\n arraystr=regexprep(arraystr,'\\]\\s*,','];');\n\telse\n arraystr='[';\n\tend\n try\n if(isoct && regexp(arraystr,'\"','once'))\n error('Octave eval can produce empty cells for JSON-like input');\n end\n object=eval(arraystr);\n pos=endpos;\n catch\n while 1\n newopt=varargin2struct(varargin{:},'JSONLAB_ArrayDepth_',arraydepth+1);\n val = parse_value(newopt);\n object{end+1} = val;\n if next_char == ']'\n break;\n end\n parse_char(',');\n end\n end\n end\n if(jsonopt('SimplifyCell',0,varargin{:})==1)\n try\n oldobj=object;\n object=cell2mat(object')';\n if(iscell(oldobj) && isstruct(object) && numel(object)>1 && jsonopt('SimplifyCellArray',1,varargin{:})==0)\n object=oldobj;\n elseif(size(object,1)>1 && ndims(object)==2)\n object=object';\n end\n catch\n end\n end\n parse_char(']');\n \n if(pbar>0)\n waitbar(pos/length(inStr),pbar,'loading ...');\n end\n%%-------------------------------------------------------------------------\n\nfunction parse_char(c)\n global pos inStr len\n skip_whitespace;\n if pos > len || inStr(pos) ~= c\n error_pos(sprintf('Expected %c at position %%d', c));\n else\n pos = pos + 1;\n skip_whitespace;\n end\n\n%%-------------------------------------------------------------------------\n\nfunction c = next_char\n global pos inStr len\n skip_whitespace;\n if pos > len\n c = [];\n else\n c = inStr(pos);\n end\n\n%%-------------------------------------------------------------------------\n\nfunction skip_whitespace\n global pos inStr len\n while pos <= len && isspace(inStr(pos))\n pos = pos + 1;\n end\n\n%%-------------------------------------------------------------------------\nfunction str = parseStr(varargin)\n global pos inStr len esc index_esc len_esc\n % len, ns = length(inStr), keyboard\n if inStr(pos) ~= '\"'\n error_pos('String starting with \" expected at position %d');\n else\n pos = pos + 1;\n end\n str = '';\n while pos <= len\n while index_esc <= len_esc && esc(index_esc) < pos\n index_esc = index_esc + 1;\n end\n if index_esc > len_esc\n str = [str inStr(pos:len)];\n pos = len + 1;\n break;\n else\n str = [str inStr(pos:esc(index_esc)-1)];\n pos = esc(index_esc);\n end\n nstr = length(str); switch inStr(pos)\n case '\"'\n pos = pos + 1;\n if(~isempty(str))\n if(strcmp(str,'_Inf_'))\n str=Inf;\n elseif(strcmp(str,'-_Inf_'))\n str=-Inf;\n elseif(strcmp(str,'_NaN_'))\n str=NaN;\n end\n end\n return;\n case '\\'\n if pos+1 > len\n error_pos('End of file reached right after escape character');\n end\n pos = pos + 1;\n switch inStr(pos)\n case {'\"' '\\' '/'}\n str(nstr+1) = inStr(pos);\n pos = pos + 1;\n case {'b' 'f' 'n' 'r' 't'}\n str(nstr+1) = sprintf(['\\' inStr(pos)]);\n pos = pos + 1;\n case 'u'\n if pos+4 > len\n error_pos('End of file reached in escaped unicode character');\n end\n str(nstr+(1:6)) = inStr(pos-1:pos+4);\n pos = pos + 5;\n end\n otherwise % should never happen\n str(nstr+1) = inStr(pos), keyboard\n pos = pos + 1;\n end\n end\n error_pos('End of file while expecting end of inStr');\n\n%%-------------------------------------------------------------------------\n\nfunction num = parse_number(varargin)\n global pos inStr len isoct\n currstr=inStr(pos:end);\n numstr=0;\n if(isoct~=0)\n numstr=regexp(currstr,'^\\s*-?(?:0|[1-9]\\d*)(?:\\.\\d+)?(?:[eE][+\\-]?\\d+)?','end');\n [num, one] = sscanf(currstr, '%f', 1);\n delta=numstr+1;\n else\n [num, one, err, delta] = sscanf(currstr, '%f', 1);\n if ~isempty(err)\n error_pos('Error reading number at position %d');\n end\n end\n pos = pos + delta-1;\n\n%%-------------------------------------------------------------------------\n\nfunction val = parse_value(varargin)\n global pos inStr len\n true = 1; false = 0;\n \n pbar=jsonopt('progressbar_',-1,varargin{:});\n if(pbar>0)\n waitbar(pos/len,pbar,'loading ...');\n end\n \n switch(inStr(pos))\n case '\"'\n val = parseStr(varargin{:});\n return;\n case '['\n val = parse_array(varargin{:});\n return;\n case '{'\n val = parse_object(varargin{:});\n if isstruct(val)\n if(~isempty(strmatch('x0x5F_ArrayType_',fieldnames(val), 'exact')))\n val=jstruct2array(val);\n end\n elseif isempty(val)\n val = struct;\n end\n return;\n case {'-','0','1','2','3','4','5','6','7','8','9'}\n val = parse_number(varargin{:});\n return;\n case 't'\n if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'true')\n val = true;\n pos = pos + 4;\n return;\n end\n case 'f'\n if pos+4 <= len && strcmpi(inStr(pos:pos+4), 'false')\n val = false;\n pos = pos + 5;\n return;\n end\n case 'n'\n if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'null')\n val = [];\n pos = pos + 4;\n return;\n end\n end\n error_pos('Value expected at position %d');\n%%-------------------------------------------------------------------------\n\nfunction error_pos(msg)\n global pos inStr len\n poShow = max(min([pos-15 pos-1 pos pos+20],len),1);\n if poShow(3) == poShow(2)\n poShow(3:4) = poShow(2)+[0 -1]; % display nothing after\n end\n msg = [sprintf(msg, pos) ': ' ...\n inStr(poShow(1):poShow(2)) '' inStr(poShow(3):poShow(4)) ];\n error( ['JSONparser:invalidFormat: ' msg] );\n\n%%-------------------------------------------------------------------------\n\nfunction str = valid_field(str)\nglobal isoct\n% From MATLAB doc: field names must begin with a letter, which may be\n% followed by any combination of letters, digits, and underscores.\n% Invalid characters will be converted to underscores, and the prefix\n% \"x0x[Hex code]_\" will be added if the first character is not a letter.\n pos=regexp(str,'^[^A-Za-z]','once');\n if(~isempty(pos))\n if(~isoct)\n str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once');\n else\n str=sprintf('x0x%X_%s',char(str(1)),str(2:end));\n end\n end\n if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' ))) return; end\n if(~isoct)\n str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_');\n else\n pos=regexp(str,'[^0-9A-Za-z_]');\n if(isempty(pos)) return; end\n str0=str;\n pos0=[0 pos(:)' length(str)];\n str='';\n for i=1:length(pos)\n str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))];\n end\n if(pos(end)~=length(str))\n str=[str str0(pos0(end-1)+1:pos0(end))];\n end\n end\n %str(~isletter(str) & ~('0' <= str & str <= '9')) = '_';\n\n%%-------------------------------------------------------------------------\nfunction endpos = matching_quote(str,pos)\nlen=length(str);\nwhile(pos1 && str(pos-1)=='\\'))\n endpos=pos;\n return;\n end \n end\n pos=pos+1;\nend\nerror('unmatched quotation mark');\n%%-------------------------------------------------------------------------\nfunction [endpos, e1l, e1r, maxlevel] = matching_bracket(str,pos)\nglobal arraytoken\nlevel=1;\nmaxlevel=level;\nendpos=0;\nbpos=arraytoken(arraytoken>=pos);\ntokens=str(bpos);\nlen=length(tokens);\npos=1;\ne1l=[];\ne1r=[];\nwhile(pos<=len)\n c=tokens(pos);\n if(c==']')\n level=level-1;\n if(isempty(e1r)) e1r=bpos(pos); end\n if(level==0)\n endpos=bpos(pos);\n return\n end\n end\n if(c=='[')\n if(isempty(e1l)) e1l=bpos(pos); end\n level=level+1;\n maxlevel=max(maxlevel,level);\n end\n if(c=='\"')\n pos=matching_quote(tokens,pos+1);\n end\n pos=pos+1;\nend\nif(endpos==0) \n error('unmatched \"]\"');\nend\n\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "loadubjson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex5/ex5/lib/jsonlab/loadubjson.m", "size": 15574, "source_encoding": "utf_8", "md5": "5974e78e71b81b1e0f76123784b951a4", "text": "function data = loadubjson(fname,varargin)\n%\n% data=loadubjson(fname,opt)\n% or\n% data=loadubjson(fname,'param1',value1,'param2',value2,...)\n%\n% parse a JSON (JavaScript Object Notation) file or string\n%\n% authors:Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2013/08/01\n%\n% $Id: loadubjson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% fname: input file name, if fname contains \"{}\" or \"[]\", fname\n% will be interpreted as a UBJSON string\n% opt: a struct to store parsing options, opt can be replaced by \n% a list of ('param',value) pairs - the param string is equivallent\n% to a field in opt. opt can have the following \n% fields (first in [.|.] is the default)\n%\n% opt.SimplifyCell [0|1]: if set to 1, loadubjson will call cell2mat\n% for each element of the JSON data, and group \n% arrays based on the cell2mat rules.\n% opt.IntEndian [B|L]: specify the endianness of the integer fields\n% in the UBJSON input data. B - Big-Endian format for \n% integers (as required in the UBJSON specification); \n% L - input integer fields are in Little-Endian order.\n%\n% output:\n% dat: a cell array, where {...} blocks are converted into cell arrays,\n% and [...] are converted to arrays\n%\n% examples:\n% obj=struct('string','value','array',[1 2 3]);\n% ubjdata=saveubjson('obj',obj);\n% dat=loadubjson(ubjdata)\n% dat=loadubjson(['examples' filesep 'example1.ubj'])\n% dat=loadubjson(['examples' filesep 'example1.ubj'],'SimplifyCell',1)\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details \n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nglobal pos inStr len esc index_esc len_esc isoct arraytoken fileendian systemendian\n\nif(regexp(fname,'[\\{\\}\\]\\[]','once'))\n string=fname;\nelseif(exist(fname,'file'))\n fid = fopen(fname,'rb');\n string = fread(fid,inf,'uint8=>char')';\n fclose(fid);\nelse\n error('input file does not exist');\nend\n\npos = 1; len = length(string); inStr = string;\nisoct=exist('OCTAVE_VERSION','builtin');\narraytoken=find(inStr=='[' | inStr==']' | inStr=='\"');\njstr=regexprep(inStr,'\\\\\\\\',' ');\nescquote=regexp(jstr,'\\\\\"');\narraytoken=sort([arraytoken escquote]);\n\n% String delimiters and escape chars identified to improve speed:\nesc = find(inStr=='\"' | inStr=='\\' ); % comparable to: regexp(inStr, '[\"\\\\]');\nindex_esc = 1; len_esc = length(esc);\n\nopt=varargin2struct(varargin{:});\nfileendian=upper(jsonopt('IntEndian','B',opt));\n[os,maxelem,systemendian]=computer;\n\njsoncount=1;\nwhile pos <= len\n switch(next_char)\n case '{'\n data{jsoncount} = parse_object(opt);\n case '['\n data{jsoncount} = parse_array(opt);\n otherwise\n error_pos('Outer level structure must be an object or an array');\n end\n jsoncount=jsoncount+1;\nend % while\n\njsoncount=length(data);\nif(jsoncount==1 && iscell(data))\n data=data{1};\nend\n\nif(~isempty(data))\n if(isstruct(data)) % data can be a struct array\n data=jstruct2array(data);\n elseif(iscell(data))\n data=jcell2array(data);\n end\nend\n\n\n%%\nfunction newdata=parse_collection(id,data,obj)\n\nif(jsoncount>0 && exist('data','var')) \n if(~iscell(data))\n newdata=cell(1);\n newdata{1}=data;\n data=newdata;\n end\nend\n\n%%\nfunction newdata=jcell2array(data)\nlen=length(data);\nnewdata=data;\nfor i=1:len\n if(isstruct(data{i}))\n newdata{i}=jstruct2array(data{i});\n elseif(iscell(data{i}))\n newdata{i}=jcell2array(data{i});\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction newdata=jstruct2array(data)\nfn=fieldnames(data);\nnewdata=data;\nlen=length(data);\nfor i=1:length(fn) % depth-first\n for j=1:len\n if(isstruct(getfield(data(j),fn{i})))\n newdata(j)=setfield(newdata(j),fn{i},jstruct2array(getfield(data(j),fn{i})));\n end\n end\nend\nif(~isempty(strmatch('x0x5F_ArrayType_',fn)) && ~isempty(strmatch('x0x5F_ArrayData_',fn)))\n newdata=cell(len,1);\n for j=1:len\n ndata=cast(data(j).x0x5F_ArrayData_,data(j).x0x5F_ArrayType_);\n iscpx=0;\n if(~isempty(strmatch('x0x5F_ArrayIsComplex_',fn)))\n if(data(j).x0x5F_ArrayIsComplex_)\n iscpx=1;\n end\n end\n if(~isempty(strmatch('x0x5F_ArrayIsSparse_',fn)))\n if(data(j).x0x5F_ArrayIsSparse_)\n if(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n dim=double(data(j).x0x5F_ArraySize_);\n if(iscpx && size(ndata,2)==4-any(dim==1))\n ndata(:,end-1)=complex(ndata(:,end-1),ndata(:,end));\n end\n if isempty(ndata)\n % All-zeros sparse\n ndata=sparse(dim(1),prod(dim(2:end)));\n elseif dim(1)==1\n % Sparse row vector\n ndata=sparse(1,ndata(:,1),ndata(:,2),dim(1),prod(dim(2:end)));\n elseif dim(2)==1\n % Sparse column vector\n ndata=sparse(ndata(:,1),1,ndata(:,2),dim(1),prod(dim(2:end)));\n else\n % Generic sparse array.\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3),dim(1),prod(dim(2:end)));\n end\n else\n if(iscpx && size(ndata,2)==4)\n ndata(:,3)=complex(ndata(:,3),ndata(:,4));\n end\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3));\n end\n end\n elseif(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n if(iscpx && size(ndata,2)==2)\n ndata=complex(ndata(:,1),ndata(:,2));\n end\n ndata=reshape(ndata(:),data(j).x0x5F_ArraySize_);\n end\n newdata{j}=ndata;\n end\n if(len==1)\n newdata=newdata{1};\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction object = parse_object(varargin)\n parse_char('{');\n object = [];\n type='';\n count=-1;\n if(next_char == '$')\n type=inStr(pos+1); % TODO\n pos=pos+2;\n end\n if(next_char == '#')\n pos=pos+1;\n count=double(parse_number());\n end\n if next_char ~= '}'\n num=0;\n while 1\n str = parseStr(varargin{:});\n if isempty(str)\n error_pos('Name of value at position %d cannot be empty');\n end\n %parse_char(':');\n val = parse_value(varargin{:});\n num=num+1;\n eval( sprintf( 'object.%s = val;', valid_field(str) ) );\n if next_char == '}' || (count>=0 && num>=count)\n break;\n end\n %parse_char(',');\n end\n end\n if(count==-1)\n parse_char('}');\n end\n\n%%-------------------------------------------------------------------------\nfunction [cid,len]=elem_info(type)\nid=strfind('iUIlLdD',type);\ndataclass={'int8','uint8','int16','int32','int64','single','double'};\nbytelen=[1,1,2,4,8,4,8];\nif(id>0)\n cid=dataclass{id};\n len=bytelen(id);\nelse\n error_pos('unsupported type at position %d');\nend\n%%-------------------------------------------------------------------------\n\n\nfunction [data adv]=parse_block(type,count,varargin)\nglobal pos inStr isoct fileendian systemendian\n[cid,len]=elem_info(type);\ndatastr=inStr(pos:pos+len*count-1);\nif(isoct)\n newdata=int8(datastr);\nelse\n newdata=uint8(datastr);\nend\nid=strfind('iUIlLdD',type);\nif(id<=5 && fileendian~=systemendian)\n newdata=swapbytes(typecast(newdata,cid));\nend\ndata=typecast(newdata,cid);\nadv=double(len*count);\n\n%%-------------------------------------------------------------------------\n\n\nfunction object = parse_array(varargin) % JSON array is written in row-major order\nglobal pos inStr isoct\n parse_char('[');\n object = cell(0, 1);\n dim=[];\n type='';\n count=-1;\n if(next_char == '$')\n type=inStr(pos+1);\n pos=pos+2;\n end\n if(next_char == '#')\n pos=pos+1;\n if(next_char=='[')\n dim=parse_array(varargin{:});\n count=prod(double(dim));\n else\n count=double(parse_number());\n end\n end\n if(~isempty(type))\n if(count>=0)\n [object adv]=parse_block(type,count,varargin{:});\n if(~isempty(dim))\n object=reshape(object,dim);\n end\n pos=pos+adv;\n return;\n else\n endpos=matching_bracket(inStr,pos);\n [cid,len]=elem_info(type);\n count=(endpos-pos)/len;\n [object adv]=parse_block(type,count,varargin{:});\n pos=pos+adv;\n parse_char(']');\n return;\n end\n end\n if next_char ~= ']'\n while 1\n val = parse_value(varargin{:});\n object{end+1} = val;\n if next_char == ']'\n break;\n end\n %parse_char(',');\n end\n end\n if(jsonopt('SimplifyCell',0,varargin{:})==1)\n try\n oldobj=object;\n object=cell2mat(object')';\n if(iscell(oldobj) && isstruct(object) && numel(object)>1 && jsonopt('SimplifyCellArray',1,varargin{:})==0)\n object=oldobj;\n elseif(size(object,1)>1 && ndims(object)==2)\n object=object';\n end\n catch\n end\n end\n if(count==-1)\n parse_char(']');\n end\n\n%%-------------------------------------------------------------------------\n\nfunction parse_char(c)\n global pos inStr len\n skip_whitespace;\n if pos > len || inStr(pos) ~= c\n error_pos(sprintf('Expected %c at position %%d', c));\n else\n pos = pos + 1;\n skip_whitespace;\n end\n\n%%-------------------------------------------------------------------------\n\nfunction c = next_char\n global pos inStr len\n skip_whitespace;\n if pos > len\n c = [];\n else\n c = inStr(pos);\n end\n\n%%-------------------------------------------------------------------------\n\nfunction skip_whitespace\n global pos inStr len\n while pos <= len && isspace(inStr(pos))\n pos = pos + 1;\n end\n\n%%-------------------------------------------------------------------------\nfunction str = parseStr(varargin)\n global pos inStr esc index_esc len_esc\n % len, ns = length(inStr), keyboard\n type=inStr(pos);\n if type ~= 'S' && type ~= 'C' && type ~= 'H'\n error_pos('String starting with S expected at position %d');\n else\n pos = pos + 1;\n end\n if(type == 'C')\n str=inStr(pos);\n pos=pos+1;\n return;\n end\n bytelen=double(parse_number());\n if(length(inStr)>=pos+bytelen-1)\n str=inStr(pos:pos+bytelen-1);\n pos=pos+bytelen;\n else\n error_pos('End of file while expecting end of inStr');\n end\n\n%%-------------------------------------------------------------------------\n\nfunction num = parse_number(varargin)\n global pos inStr len isoct fileendian systemendian\n id=strfind('iUIlLdD',inStr(pos));\n if(isempty(id))\n error_pos('expecting a number at position %d');\n end\n type={'int8','uint8','int16','int32','int64','single','double'};\n bytelen=[1,1,2,4,8,4,8];\n datastr=inStr(pos+1:pos+bytelen(id));\n if(isoct)\n newdata=int8(datastr);\n else\n newdata=uint8(datastr);\n end\n if(id<=5 && fileendian~=systemendian)\n newdata=swapbytes(typecast(newdata,type{id}));\n end\n num=typecast(newdata,type{id});\n pos = pos + bytelen(id)+1;\n\n%%-------------------------------------------------------------------------\n\nfunction val = parse_value(varargin)\n global pos inStr len\n true = 1; false = 0;\n\n switch(inStr(pos))\n case {'S','C','H'}\n val = parseStr(varargin{:});\n return;\n case '['\n val = parse_array(varargin{:});\n return;\n case '{'\n val = parse_object(varargin{:});\n if isstruct(val)\n if(~isempty(strmatch('x0x5F_ArrayType_',fieldnames(val), 'exact')))\n val=jstruct2array(val);\n end\n elseif isempty(val)\n val = struct;\n end\n return;\n case {'i','U','I','l','L','d','D'}\n val = parse_number(varargin{:});\n return;\n case 'T'\n val = true;\n pos = pos + 1;\n return;\n case 'F'\n val = false;\n pos = pos + 1;\n return;\n case {'Z','N'}\n val = [];\n pos = pos + 1;\n return;\n end\n error_pos('Value expected at position %d');\n%%-------------------------------------------------------------------------\n\nfunction error_pos(msg)\n global pos inStr len\n poShow = max(min([pos-15 pos-1 pos pos+20],len),1);\n if poShow(3) == poShow(2)\n poShow(3:4) = poShow(2)+[0 -1]; % display nothing after\n end\n msg = [sprintf(msg, pos) ': ' ...\n inStr(poShow(1):poShow(2)) '' inStr(poShow(3):poShow(4)) ];\n error( ['JSONparser:invalidFormat: ' msg] );\n\n%%-------------------------------------------------------------------------\n\nfunction str = valid_field(str)\nglobal isoct\n% From MATLAB doc: field names must begin with a letter, which may be\n% followed by any combination of letters, digits, and underscores.\n% Invalid characters will be converted to underscores, and the prefix\n% \"x0x[Hex code]_\" will be added if the first character is not a letter.\n pos=regexp(str,'^[^A-Za-z]','once');\n if(~isempty(pos))\n if(~isoct)\n str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once');\n else\n str=sprintf('x0x%X_%s',char(str(1)),str(2:end));\n end\n end\n if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' ))) return; end\n if(~isoct)\n str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_');\n else\n pos=regexp(str,'[^0-9A-Za-z_]');\n if(isempty(pos)) return; end\n str0=str;\n pos0=[0 pos(:)' length(str)];\n str='';\n for i=1:length(pos)\n str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))];\n end\n if(pos(end)~=length(str))\n str=[str str0(pos0(end-1)+1:pos0(end))];\n end\n end\n %str(~isletter(str) & ~('0' <= str & str <= '9')) = '_';\n\n%%-------------------------------------------------------------------------\nfunction endpos = matching_quote(str,pos)\nlen=length(str);\nwhile(pos1 && str(pos-1)=='\\'))\n endpos=pos;\n return;\n end \n end\n pos=pos+1;\nend\nerror('unmatched quotation mark');\n%%-------------------------------------------------------------------------\nfunction [endpos e1l e1r maxlevel] = matching_bracket(str,pos)\nglobal arraytoken\nlevel=1;\nmaxlevel=level;\nendpos=0;\nbpos=arraytoken(arraytoken>=pos);\ntokens=str(bpos);\nlen=length(tokens);\npos=1;\ne1l=[];\ne1r=[];\nwhile(pos<=len)\n c=tokens(pos);\n if(c==']')\n level=level-1;\n if(isempty(e1r)) e1r=bpos(pos); end\n if(level==0)\n endpos=bpos(pos);\n return\n end\n end\n if(c=='[')\n if(isempty(e1l)) e1l=bpos(pos); end\n level=level+1;\n maxlevel=max(maxlevel,level);\n end\n if(c=='\"')\n pos=matching_quote(tokens,pos+1);\n end\n pos=pos+1;\nend\nif(endpos==0) \n error('unmatched \"]\"');\nend\n\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "saveubjson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex5/ex5/lib/jsonlab/saveubjson.m", "size": 16123, "source_encoding": "utf_8", "md5": "61d4f51010aedbf97753396f5d2d9ec0", "text": "function json=saveubjson(rootname,obj,varargin)\n%\n% json=saveubjson(rootname,obj,filename)\n% or\n% json=saveubjson(rootname,obj,opt)\n% json=saveubjson(rootname,obj,'param1',value1,'param2',value2,...)\n%\n% convert a MATLAB object (cell, struct or array) into a Universal \n% Binary JSON (UBJSON) binary string\n%\n% author: Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2013/08/17\n%\n% $Id: saveubjson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% rootname: the name of the root-object, when set to '', the root name\n% is ignored, however, when opt.ForceRootName is set to 1 (see below),\n% the MATLAB variable name will be used as the root name.\n% obj: a MATLAB object (array, cell, cell array, struct, struct array)\n% filename: a string for the file name to save the output UBJSON data\n% opt: a struct for additional options, ignore to use default values.\n% opt can have the following fields (first in [.|.] is the default)\n%\n% opt.FileName [''|string]: a file name to save the output JSON data\n% opt.ArrayToStruct[0|1]: when set to 0, saveubjson outputs 1D/2D\n% array in JSON array format; if sets to 1, an\n% array will be shown as a struct with fields\n% \"_ArrayType_\", \"_ArraySize_\" and \"_ArrayData_\"; for\n% sparse arrays, the non-zero elements will be\n% saved to _ArrayData_ field in triplet-format i.e.\n% (ix,iy,val) and \"_ArrayIsSparse_\" will be added\n% with a value of 1; for a complex array, the \n% _ArrayData_ array will include two columns \n% (4 for sparse) to record the real and imaginary \n% parts, and also \"_ArrayIsComplex_\":1 is added. \n% opt.ParseLogical [1|0]: if this is set to 1, logical array elem\n% will use true/false rather than 1/0.\n% opt.NoRowBracket [1|0]: if this is set to 1, arrays with a single\n% numerical element will be shown without a square\n% bracket, unless it is the root object; if 0, square\n% brackets are forced for any numerical arrays.\n% opt.ForceRootName [0|1]: when set to 1 and rootname is empty, saveubjson\n% will use the name of the passed obj variable as the \n% root object name; if obj is an expression and \n% does not have a name, 'root' will be used; if this \n% is set to 0 and rootname is empty, the root level \n% will be merged down to the lower level.\n% opt.JSONP [''|string]: to generate a JSONP output (JSON with padding),\n% for example, if opt.JSON='foo', the JSON data is\n% wrapped inside a function call as 'foo(...);'\n% opt.UnpackHex [1|0]: conver the 0x[hex code] output by loadjson \n% back to the string form\n%\n% opt can be replaced by a list of ('param',value) pairs. The param \n% string is equivallent to a field in opt and is case sensitive.\n% output:\n% json: a binary string in the UBJSON format (see http://ubjson.org)\n%\n% examples:\n% jsonmesh=struct('MeshNode',[0 0 0;1 0 0;0 1 0;1 1 0;0 0 1;1 0 1;0 1 1;1 1 1],... \n% 'MeshTetra',[1 2 4 8;1 3 4 8;1 2 6 8;1 5 6 8;1 5 7 8;1 3 7 8],...\n% 'MeshTri',[1 2 4;1 2 6;1 3 4;1 3 7;1 5 6;1 5 7;...\n% 2 8 4;2 8 6;3 8 4;3 8 7;5 8 6;5 8 7],...\n% 'MeshCreator','FangQ','MeshTitle','T6 Cube',...\n% 'SpecialData',[nan, inf, -inf]);\n% saveubjson('jsonmesh',jsonmesh)\n% saveubjson('jsonmesh',jsonmesh,'meshdata.ubj')\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details\n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nif(nargin==1)\n varname=inputname(1);\n obj=rootname;\n if(isempty(varname)) \n varname='root';\n end\n rootname=varname;\nelse\n varname=inputname(2);\nend\nif(length(varargin)==1 && ischar(varargin{1}))\n opt=struct('FileName',varargin{1});\nelse\n opt=varargin2struct(varargin{:});\nend\nopt.IsOctave=exist('OCTAVE_VERSION','builtin');\nrootisarray=0;\nrootlevel=1;\nforceroot=jsonopt('ForceRootName',0,opt);\nif((isnumeric(obj) || islogical(obj) || ischar(obj) || isstruct(obj) || iscell(obj)) && isempty(rootname) && forceroot==0)\n rootisarray=1;\n rootlevel=0;\nelse\n if(isempty(rootname))\n rootname=varname;\n end\nend\nif((isstruct(obj) || iscell(obj))&& isempty(rootname) && forceroot)\n rootname='root';\nend\njson=obj2ubjson(rootname,obj,rootlevel,opt);\nif(~rootisarray)\n json=['{' json '}'];\nend\n\njsonp=jsonopt('JSONP','',opt);\nif(~isempty(jsonp))\n json=[jsonp '(' json ')'];\nend\n\n% save to a file if FileName is set, suggested by Patrick Rapin\nif(~isempty(jsonopt('FileName','',opt)))\n fid = fopen(opt.FileName, 'wb');\n fwrite(fid,json);\n fclose(fid);\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=obj2ubjson(name,item,level,varargin)\n\nif(iscell(item))\n txt=cell2ubjson(name,item,level,varargin{:});\nelseif(isstruct(item))\n txt=struct2ubjson(name,item,level,varargin{:});\nelseif(ischar(item))\n txt=str2ubjson(name,item,level,varargin{:});\nelse\n txt=mat2ubjson(name,item,level,varargin{:});\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=cell2ubjson(name,item,level,varargin)\ntxt='';\nif(~iscell(item))\n error('input is not a cell');\nend\n\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item); % let's handle 1D cell first\nif(len>1) \n if(~isempty(name))\n txt=[S_(checkname(name,varargin{:})) '[']; name=''; \n else\n txt='['; \n end\nelseif(len==0)\n if(~isempty(name))\n txt=[S_(checkname(name,varargin{:})) 'Z']; name=''; \n else\n txt='Z'; \n end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=[txt '[']; end\n for i=1:dim(1)\n txt=[txt obj2ubjson(name,item{i,j},level+(len>1),varargin{:})];\n end\n if(dim(1)>1) txt=[txt ']']; end\nend\nif(len>1) txt=[txt ']']; end\n\n%%-------------------------------------------------------------------------\nfunction txt=struct2ubjson(name,item,level,varargin)\ntxt='';\nif(~isstruct(item))\n\terror('input is not a struct');\nend\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item);\n\nif(~isempty(name)) \n if(len>1) txt=[S_(checkname(name,varargin{:})) '[']; end\nelse\n if(len>1) txt='['; end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=[txt '[']; end\n for i=1:dim(1)\n names = fieldnames(item(i,j));\n if(~isempty(name) && len==1)\n txt=[txt S_(checkname(name,varargin{:})) '{']; \n else\n txt=[txt '{']; \n end\n if(~isempty(names))\n for e=1:length(names)\n\t txt=[txt obj2ubjson(names{e},getfield(item(i,j),...\n names{e}),level+(dim(1)>1)+1+(len>1),varargin{:})];\n end\n end\n txt=[txt '}'];\n end\n if(dim(1)>1) txt=[txt ']']; end\nend\nif(len>1) txt=[txt ']']; end\n\n%%-------------------------------------------------------------------------\nfunction txt=str2ubjson(name,item,level,varargin)\ntxt='';\nif(~ischar(item))\n error('input is not a string');\nend\nitem=reshape(item, max(size(item),[1 0]));\nlen=size(item,1);\n\nif(~isempty(name)) \n if(len>1) txt=[S_(checkname(name,varargin{:})) '[']; end\nelse\n if(len>1) txt='['; end\nend\nisoct=jsonopt('IsOctave',0,varargin{:});\nfor e=1:len\n val=item(e,:);\n if(len==1)\n obj=['' S_(checkname(name,varargin{:})) '' '',S_(val),''];\n\tif(isempty(name)) obj=['',S_(val),'']; end\n txt=[txt,'',obj];\n else\n txt=[txt,'',['',S_(val),'']];\n end\nend\nif(len>1) txt=[txt ']']; end\n\n%%-------------------------------------------------------------------------\nfunction txt=mat2ubjson(name,item,level,varargin)\nif(~isnumeric(item) && ~islogical(item))\n error('input is not an array');\nend\n\nif(length(size(item))>2 || issparse(item) || ~isreal(item) || ...\n isempty(item) || jsonopt('ArrayToStruct',0,varargin{:}))\n cid=I_(uint32(max(size(item))));\n if(isempty(name))\n \ttxt=['{' S_('_ArrayType_'),S_(class(item)),S_('_ArraySize_'),I_a(size(item),cid(1)) ];\n else\n if(isempty(item))\n txt=[S_(checkname(name,varargin{:})),'Z'];\n return;\n else\n \t txt=[S_(checkname(name,varargin{:})),'{',S_('_ArrayType_'),S_(class(item)),S_('_ArraySize_'),I_a(size(item),cid(1))];\n end\n end\nelse\n if(isempty(name))\n \ttxt=matdata2ubjson(item,level+1,varargin{:});\n else\n if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1)\n numtxt=regexprep(regexprep(matdata2ubjson(item,level+1,varargin{:}),'^\\[',''),']','');\n \ttxt=[S_(checkname(name,varargin{:})) numtxt];\n else\n \t txt=[S_(checkname(name,varargin{:})),matdata2ubjson(item,level+1,varargin{:})];\n end\n end\n return;\nend\nif(issparse(item))\n [ix,iy]=find(item);\n data=full(item(find(item)));\n if(~isreal(item))\n data=[real(data(:)),imag(data(:))];\n if(size(item,1)==1)\n % Kludge to have data's 'transposedness' match item's.\n % (Necessary for complex row vector handling below.)\n data=data';\n end\n txt=[txt,S_('_ArrayIsComplex_'),'T'];\n end\n txt=[txt,S_('_ArrayIsSparse_'),'T'];\n if(size(item,1)==1)\n % Row vector, store only column indices.\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([iy(:),data'],level+2,varargin{:})];\n elseif(size(item,2)==1)\n % Column vector, store only row indices.\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([ix,data],level+2,varargin{:})];\n else\n % General case, store row and column indices.\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([ix,iy,data],level+2,varargin{:})];\n end\nelse\n if(isreal(item))\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson(item(:)',level+2,varargin{:})];\n else\n txt=[txt,S_('_ArrayIsComplex_'),'T'];\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([real(item(:)) imag(item(:))],level+2,varargin{:})];\n end\nend\ntxt=[txt,'}'];\n\n%%-------------------------------------------------------------------------\nfunction txt=matdata2ubjson(mat,level,varargin)\nif(isempty(mat))\n txt='Z';\n return;\nend\nif(size(mat,1)==1)\n level=level-1;\nend\ntype='';\nhasnegtive=(mat<0);\nif(isa(mat,'integer') || isinteger(mat) || (isfloat(mat) && all(mod(mat(:),1) == 0)))\n if(isempty(hasnegtive))\n if(max(mat(:))<=2^8)\n type='U';\n end\n end\n if(isempty(type))\n % todo - need to consider negative ones separately\n id= histc(abs(max(mat(:))),[0 2^7 2^15 2^31 2^63]);\n if(isempty(find(id)))\n error('high-precision data is not yet supported');\n end\n key='iIlL';\n\ttype=key(find(id));\n end\n txt=[I_a(mat(:),type,size(mat))];\nelseif(islogical(mat))\n logicalval='FT';\n if(numel(mat)==1)\n txt=logicalval(mat+1);\n else\n txt=['[$U#' I_a(size(mat),'l') typecast(swapbytes(uint8(mat(:)')),'uint8')];\n end\nelse\n if(numel(mat)==1)\n txt=['[' D_(mat) ']'];\n else\n txt=D_a(mat(:),'D',size(mat));\n end\nend\n\n%txt=regexprep(mat2str(mat),'\\s+',',');\n%txt=regexprep(txt,';',sprintf('],['));\n% if(nargin>=2 && size(mat,1)>1)\n% txt=regexprep(txt,'\\[',[repmat(sprintf('\\t'),1,level) '[']);\n% end\nif(any(isinf(mat(:))))\n txt=regexprep(txt,'([-+]*)Inf',jsonopt('Inf','\"$1_Inf_\"',varargin{:}));\nend\nif(any(isnan(mat(:))))\n txt=regexprep(txt,'NaN',jsonopt('NaN','\"_NaN_\"',varargin{:}));\nend\n\n%%-------------------------------------------------------------------------\nfunction newname=checkname(name,varargin)\nisunpack=jsonopt('UnpackHex',1,varargin{:});\nnewname=name;\nif(isempty(regexp(name,'0x([0-9a-fA-F]+)_','once')))\n return\nend\nif(isunpack)\n isoct=jsonopt('IsOctave',0,varargin{:});\n if(~isoct)\n newname=regexprep(name,'(^x|_){1}0x([0-9a-fA-F]+)_','${native2unicode(hex2dec($2))}');\n else\n pos=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','start');\n pend=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','end');\n if(isempty(pos)) return; end\n str0=name;\n pos0=[0 pend(:)' length(name)];\n newname='';\n for i=1:length(pos)\n newname=[newname str0(pos0(i)+1:pos(i)-1) char(hex2dec(str0(pos(i)+3:pend(i)-1)))];\n end\n if(pos(end)~=length(name))\n newname=[newname str0(pos0(end-1)+1:pos0(end))];\n end\n end\nend\n%%-------------------------------------------------------------------------\nfunction val=S_(str)\nif(length(str)==1)\n val=['C' str];\nelse\n val=['S' I_(int32(length(str))) str];\nend\n%%-------------------------------------------------------------------------\nfunction val=I_(num)\nif(~isinteger(num))\n error('input is not an integer');\nend\nif(num>=0 && num<255)\n val=['U' data2byte(swapbytes(cast(num,'uint8')),'uint8')];\n return;\nend\nkey='iIlL';\ncid={'int8','int16','int32','int64'};\nfor i=1:4\n if((num>0 && num<2^(i*8-1)) || (num<0 && num>=-2^(i*8-1)))\n val=[key(i) data2byte(swapbytes(cast(num,cid{i})),'uint8')];\n return;\n end\nend\nerror('unsupported integer');\n\n%%-------------------------------------------------------------------------\nfunction val=D_(num)\nif(~isfloat(num))\n error('input is not a float');\nend\n\nif(isa(num,'single'))\n val=['d' data2byte(num,'uint8')];\nelse\n val=['D' data2byte(num,'uint8')];\nend\n%%-------------------------------------------------------------------------\nfunction data=I_a(num,type,dim,format)\nid=find(ismember('iUIlL',type));\n\nif(id==0)\n error('unsupported integer array');\nend\n\n% based on UBJSON specs, all integer types are stored in big endian format\n\nif(id==1)\n data=data2byte(swapbytes(int8(num)),'uint8');\n blen=1;\nelseif(id==2)\n data=data2byte(swapbytes(uint8(num)),'uint8');\n blen=1;\nelseif(id==3)\n data=data2byte(swapbytes(int16(num)),'uint8');\n blen=2;\nelseif(id==4)\n data=data2byte(swapbytes(int32(num)),'uint8');\n blen=4;\nelseif(id==5)\n data=data2byte(swapbytes(int64(num)),'uint8');\n blen=8;\nend\n\nif(nargin>=3 && length(dim)>=2 && prod(dim)~=dim(2))\n format='opt';\nend\nif((nargin<4 || strcmp(format,'opt')) && numel(num)>1)\n if(nargin>=3 && (length(dim)==1 || (length(dim)>=2 && prod(dim)~=dim(2))))\n cid=I_(uint32(max(dim)));\n data=['$' type '#' I_a(dim,cid(1)) data(:)'];\n else\n data=['$' type '#' I_(int32(numel(data)/blen)) data(:)'];\n end\n data=['[' data(:)'];\nelse\n data=reshape(data,blen,numel(data)/blen);\n data(2:blen+1,:)=data;\n data(1,:)=type;\n data=data(:)';\n data=['[' data(:)' ']'];\nend\n%%-------------------------------------------------------------------------\nfunction data=D_a(num,type,dim,format)\nid=find(ismember('dD',type));\n\nif(id==0)\n error('unsupported float array');\nend\n\nif(id==1)\n data=data2byte(single(num),'uint8');\nelseif(id==2)\n data=data2byte(double(num),'uint8');\nend\n\nif(nargin>=3 && length(dim)>=2 && prod(dim)~=dim(2))\n format='opt';\nend\nif((nargin<4 || strcmp(format,'opt')) && numel(num)>1)\n if(nargin>=3 && (length(dim)==1 || (length(dim)>=2 && prod(dim)~=dim(2))))\n cid=I_(uint32(max(dim)));\n data=['$' type '#' I_a(dim,cid(1)) data(:)'];\n else\n data=['$' type '#' I_(int32(numel(data)/(id*4))) data(:)'];\n end\n data=['[' data];\nelse\n data=reshape(data,(id*4),length(data)/(id*4));\n data(2:(id*4+1),:)=data;\n data(1,:)=type;\n data=data(:)';\n data=['[' data(:)' ']'];\nend\n%%-------------------------------------------------------------------------\nfunction bytes=data2byte(varargin)\nbytes=typecast(varargin{:});\nbytes=bytes(:)';\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "submit.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex3/ex3/submit.m", "size": 1567, "source_encoding": "utf_8", "md5": "1dba733a05282b2db9f2284548483b81", "text": "function submit()\n addpath('./lib');\n\n conf.assignmentSlug = 'multi-class-classification-and-neural-networks';\n conf.itemName = 'Multi-class Classification and Neural Networks';\n conf.partArrays = { ...\n { ...\n '1', ...\n { 'lrCostFunction.m' }, ...\n 'Regularized Logistic Regression', ...\n }, ...\n { ...\n '2', ...\n { 'oneVsAll.m' }, ...\n 'One-vs-All Classifier Training', ...\n }, ...\n { ...\n '3', ...\n { 'predictOneVsAll.m' }, ...\n 'One-vs-All Classifier Prediction', ...\n }, ...\n { ...\n '4', ...\n { 'predict.m' }, ...\n 'Neural Network Prediction Function' ...\n }, ...\n };\n conf.output = @output;\n\n submitWithConfiguration(conf);\nend\n\nfunction out = output(partId, auxdata)\n % Random Test Cases\n X = [ones(20,1) (exp(1) * sin(1:1:20))' (exp(0.5) * cos(1:1:20))'];\n y = sin(X(:,1) + X(:,2)) > 0;\n Xm = [ -1 -1 ; -1 -2 ; -2 -1 ; -2 -2 ; ...\n 1 1 ; 1 2 ; 2 1 ; 2 2 ; ...\n -1 1 ; -1 2 ; -2 1 ; -2 2 ; ...\n 1 -1 ; 1 -2 ; -2 -1 ; -2 -2 ];\n ym = [ 1 1 1 1 2 2 2 2 3 3 3 3 4 4 4 4 ]';\n t1 = sin(reshape(1:2:24, 4, 3));\n t2 = cos(reshape(1:2:40, 4, 5));\n\n if partId == '1'\n [J, grad] = lrCostFunction([0.25 0.5 -0.5]', X, y, 0.1);\n out = sprintf('%0.5f ', J);\n out = [out sprintf('%0.5f ', grad)];\n elseif partId == '2'\n out = sprintf('%0.5f ', oneVsAll(Xm, ym, 4, 0.1));\n elseif partId == '3'\n out = sprintf('%0.5f ', predictOneVsAll(t1, Xm));\n elseif partId == '4'\n out = sprintf('%0.5f ', predict(t1, t2, Xm));\n end \nend\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "submitWithConfiguration.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex3/ex3/lib/submitWithConfiguration.m", "size": 3734, "source_encoding": "utf_8", "md5": "84d9a81848f6d00a7aff4f79bdbb6049", "text": "function submitWithConfiguration(conf)\n addpath('./lib/jsonlab');\n\n parts = parts(conf);\n\n fprintf('== Submitting solutions | %s...\\n', conf.itemName);\n\n tokenFile = 'token.mat';\n if exist(tokenFile, 'file')\n load(tokenFile);\n [email token] = promptToken(email, token, tokenFile);\n else\n [email token] = promptToken('', '', tokenFile);\n end\n\n if isempty(token)\n fprintf('!! Submission Cancelled\\n');\n return\n end\n\n try\n response = submitParts(conf, email, token, parts);\n catch\n e = lasterror();\n fprintf( ...\n '!! Submission failed: unexpected error: %s\\n', ...\n e.message);\n fprintf('!! Please try again later.\\n');\n return\n end\n\n if isfield(response, 'errorMessage')\n fprintf('!! Submission failed: %s\\n', response.errorMessage);\n else\n showFeedback(parts, response);\n save(tokenFile, 'email', 'token');\n end\nend\n\nfunction [email token] = promptToken(email, existingToken, tokenFile)\n if (~isempty(email) && ~isempty(existingToken))\n prompt = sprintf( ...\n 'Use token from last successful submission (%s)? (Y/n): ', ...\n email);\n reenter = input(prompt, 's');\n\n if (isempty(reenter) || reenter(1) == 'Y' || reenter(1) == 'y')\n token = existingToken;\n return;\n else\n delete(tokenFile);\n end\n end\n email = input('Login (email address): ', 's');\n token = input('Token: ', 's');\nend\n\nfunction isValid = isValidPartOptionIndex(partOptions, i)\n isValid = (~isempty(i)) && (1 <= i) && (i <= numel(partOptions));\nend\n\nfunction response = submitParts(conf, email, token, parts)\n body = makePostBody(conf, email, token, parts);\n submissionUrl = submissionUrl();\n params = {'jsonBody', body};\n responseBody = urlread(submissionUrl, 'post', params);\n response = loadjson(responseBody);\nend\n\nfunction body = makePostBody(conf, email, token, parts)\n bodyStruct.assignmentSlug = conf.assignmentSlug;\n bodyStruct.submitterEmail = email;\n bodyStruct.secret = token;\n bodyStruct.parts = makePartsStruct(conf, parts);\n\n opt.Compact = 1;\n body = savejson('', bodyStruct, opt);\nend\n\nfunction partsStruct = makePartsStruct(conf, parts)\n for part = parts\n partId = part{:}.id;\n fieldName = makeValidFieldName(partId);\n outputStruct.output = conf.output(partId);\n partsStruct.(fieldName) = outputStruct;\n end\nend\n\nfunction [parts] = parts(conf)\n parts = {};\n for partArray = conf.partArrays\n part.id = partArray{:}{1};\n part.sourceFiles = partArray{:}{2};\n part.name = partArray{:}{3};\n parts{end + 1} = part;\n end\nend\n\nfunction showFeedback(parts, response)\n fprintf('== \\n');\n fprintf('== %43s | %9s | %-s\\n', 'Part Name', 'Score', 'Feedback');\n fprintf('== %43s | %9s | %-s\\n', '---------', '-----', '--------');\n for part = parts\n score = '';\n partFeedback = '';\n partFeedback = response.partFeedbacks.(makeValidFieldName(part{:}.id));\n partEvaluation = response.partEvaluations.(makeValidFieldName(part{:}.id));\n score = sprintf('%d / %3d', partEvaluation.score, partEvaluation.maxScore);\n fprintf('== %43s | %9s | %-s\\n', part{:}.name, score, partFeedback);\n end\n evaluation = response.evaluation;\n totalScore = sprintf('%d / %d', evaluation.score, evaluation.maxScore);\n fprintf('== --------------------------------\\n');\n fprintf('== %43s | %9s | %-s\\n', '', totalScore, '');\n fprintf('== \\n');\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%\n% Service configuration\n%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nfunction submissionUrl = submissionUrl()\n submissionUrl = 'https://www-origin.coursera.org/api/onDemandProgrammingImmediateFormSubmissions.v1';\nend\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "savejson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex3/ex3/lib/jsonlab/savejson.m", "size": 17462, "source_encoding": "utf_8", "md5": "861b534fc35ffe982b53ca3ca83143bf", "text": "function json=savejson(rootname,obj,varargin)\n%\n% json=savejson(rootname,obj,filename)\n% or\n% json=savejson(rootname,obj,opt)\n% json=savejson(rootname,obj,'param1',value1,'param2',value2,...)\n%\n% convert a MATLAB object (cell, struct or array) into a JSON (JavaScript\n% Object Notation) string\n%\n% author: Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2011/09/09\n%\n% $Id: savejson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% rootname: the name of the root-object, when set to '', the root name\n% is ignored, however, when opt.ForceRootName is set to 1 (see below),\n% the MATLAB variable name will be used as the root name.\n% obj: a MATLAB object (array, cell, cell array, struct, struct array).\n% filename: a string for the file name to save the output JSON data.\n% opt: a struct for additional options, ignore to use default values.\n% opt can have the following fields (first in [.|.] is the default)\n%\n% opt.FileName [''|string]: a file name to save the output JSON data\n% opt.FloatFormat ['%.10g'|string]: format to show each numeric element\n% of a 1D/2D array;\n% opt.ArrayIndent [1|0]: if 1, output explicit data array with\n% precedent indentation; if 0, no indentation\n% opt.ArrayToStruct[0|1]: when set to 0, savejson outputs 1D/2D\n% array in JSON array format; if sets to 1, an\n% array will be shown as a struct with fields\n% \"_ArrayType_\", \"_ArraySize_\" and \"_ArrayData_\"; for\n% sparse arrays, the non-zero elements will be\n% saved to _ArrayData_ field in triplet-format i.e.\n% (ix,iy,val) and \"_ArrayIsSparse_\" will be added\n% with a value of 1; for a complex array, the \n% _ArrayData_ array will include two columns \n% (4 for sparse) to record the real and imaginary \n% parts, and also \"_ArrayIsComplex_\":1 is added. \n% opt.ParseLogical [0|1]: if this is set to 1, logical array elem\n% will use true/false rather than 1/0.\n% opt.NoRowBracket [1|0]: if this is set to 1, arrays with a single\n% numerical element will be shown without a square\n% bracket, unless it is the root object; if 0, square\n% brackets are forced for any numerical arrays.\n% opt.ForceRootName [0|1]: when set to 1 and rootname is empty, savejson\n% will use the name of the passed obj variable as the \n% root object name; if obj is an expression and \n% does not have a name, 'root' will be used; if this \n% is set to 0 and rootname is empty, the root level \n% will be merged down to the lower level.\n% opt.Inf ['\"$1_Inf_\"'|string]: a customized regular expression pattern\n% to represent +/-Inf. The matched pattern is '([-+]*)Inf'\n% and $1 represents the sign. For those who want to use\n% 1e999 to represent Inf, they can set opt.Inf to '$11e999'\n% opt.NaN ['\"_NaN_\"'|string]: a customized regular expression pattern\n% to represent NaN\n% opt.JSONP [''|string]: to generate a JSONP output (JSON with padding),\n% for example, if opt.JSONP='foo', the JSON data is\n% wrapped inside a function call as 'foo(...);'\n% opt.UnpackHex [1|0]: conver the 0x[hex code] output by loadjson \n% back to the string form\n% opt.SaveBinary [0|1]: 1 - save the JSON file in binary mode; 0 - text mode.\n% opt.Compact [0|1]: 1- out compact JSON format (remove all newlines and tabs)\n%\n% opt can be replaced by a list of ('param',value) pairs. The param \n% string is equivallent to a field in opt and is case sensitive.\n% output:\n% json: a string in the JSON format (see http://json.org)\n%\n% examples:\n% jsonmesh=struct('MeshNode',[0 0 0;1 0 0;0 1 0;1 1 0;0 0 1;1 0 1;0 1 1;1 1 1],... \n% 'MeshTetra',[1 2 4 8;1 3 4 8;1 2 6 8;1 5 6 8;1 5 7 8;1 3 7 8],...\n% 'MeshTri',[1 2 4;1 2 6;1 3 4;1 3 7;1 5 6;1 5 7;...\n% 2 8 4;2 8 6;3 8 4;3 8 7;5 8 6;5 8 7],...\n% 'MeshCreator','FangQ','MeshTitle','T6 Cube',...\n% 'SpecialData',[nan, inf, -inf]);\n% savejson('jmesh',jsonmesh)\n% savejson('',jsonmesh,'ArrayIndent',0,'FloatFormat','\\t%.5g')\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details\n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nif(nargin==1)\n varname=inputname(1);\n obj=rootname;\n if(isempty(varname)) \n varname='root';\n end\n rootname=varname;\nelse\n varname=inputname(2);\nend\nif(length(varargin)==1 && ischar(varargin{1}))\n opt=struct('FileName',varargin{1});\nelse\n opt=varargin2struct(varargin{:});\nend\nopt.IsOctave=exist('OCTAVE_VERSION','builtin');\nrootisarray=0;\nrootlevel=1;\nforceroot=jsonopt('ForceRootName',0,opt);\nif((isnumeric(obj) || islogical(obj) || ischar(obj) || isstruct(obj) || iscell(obj)) && isempty(rootname) && forceroot==0)\n rootisarray=1;\n rootlevel=0;\nelse\n if(isempty(rootname))\n rootname=varname;\n end\nend\nif((isstruct(obj) || iscell(obj))&& isempty(rootname) && forceroot)\n rootname='root';\nend\n\nwhitespaces=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nif(jsonopt('Compact',0,opt)==1)\n whitespaces=struct('tab','','newline','','sep',',');\nend\nif(~isfield(opt,'whitespaces_'))\n opt.whitespaces_=whitespaces;\nend\n\nnl=whitespaces.newline;\n\njson=obj2json(rootname,obj,rootlevel,opt);\nif(rootisarray)\n json=sprintf('%s%s',json,nl);\nelse\n json=sprintf('{%s%s%s}\\n',nl,json,nl);\nend\n\njsonp=jsonopt('JSONP','',opt);\nif(~isempty(jsonp))\n json=sprintf('%s(%s);%s',jsonp,json,nl);\nend\n\n% save to a file if FileName is set, suggested by Patrick Rapin\nif(~isempty(jsonopt('FileName','',opt)))\n if(jsonopt('SaveBinary',0,opt)==1)\n\t fid = fopen(opt.FileName, 'wb');\n\t fwrite(fid,json);\n else\n\t fid = fopen(opt.FileName, 'wt');\n\t fwrite(fid,json,'char');\n end\n fclose(fid);\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=obj2json(name,item,level,varargin)\n\nif(iscell(item))\n txt=cell2json(name,item,level,varargin{:});\nelseif(isstruct(item))\n txt=struct2json(name,item,level,varargin{:});\nelseif(ischar(item))\n txt=str2json(name,item,level,varargin{:});\nelse\n txt=mat2json(name,item,level,varargin{:});\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=cell2json(name,item,level,varargin)\ntxt='';\nif(~iscell(item))\n error('input is not a cell');\nend\n\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item);\nws=jsonopt('whitespaces_',struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n')),varargin{:});\npadding0=repmat(ws.tab,1,level);\npadding2=repmat(ws.tab,1,level+1);\nnl=ws.newline;\nif(len>1)\n if(~isempty(name))\n txt=sprintf('%s\"%s\": [%s',padding0, checkname(name,varargin{:}),nl); name=''; \n else\n txt=sprintf('%s[%s',padding0,nl); \n end\nelseif(len==0)\n if(~isempty(name))\n txt=sprintf('%s\"%s\": []',padding0, checkname(name,varargin{:})); name=''; \n else\n txt=sprintf('%s[]',padding0); \n end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=sprintf('%s%s[%s',txt,padding2,nl); end\n for i=1:dim(1)\n txt=sprintf('%s%s',txt,obj2json(name,item{i,j},level+(dim(1)>1)+1,varargin{:}));\n if(i1) txt=sprintf('%s%s%s]',txt,nl,padding2); end\n if(j1) txt=sprintf('%s%s%s]',txt,nl,padding0); end\n\n%%-------------------------------------------------------------------------\nfunction txt=struct2json(name,item,level,varargin)\ntxt='';\nif(~isstruct(item))\n\terror('input is not a struct');\nend\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item);\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\npadding0=repmat(ws.tab,1,level);\npadding2=repmat(ws.tab,1,level+1);\npadding1=repmat(ws.tab,1,level+(dim(1)>1)+(len>1));\nnl=ws.newline;\n\nif(~isempty(name)) \n if(len>1) txt=sprintf('%s\"%s\": [%s',padding0,checkname(name,varargin{:}),nl); end\nelse\n if(len>1) txt=sprintf('%s[%s',padding0,nl); end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=sprintf('%s%s[%s',txt,padding2,nl); end\n for i=1:dim(1)\n names = fieldnames(item(i,j));\n if(~isempty(name) && len==1)\n txt=sprintf('%s%s\"%s\": {%s',txt,padding1, checkname(name,varargin{:}),nl); \n else\n txt=sprintf('%s%s{%s',txt,padding1,nl); \n end\n if(~isempty(names))\n for e=1:length(names)\n\t txt=sprintf('%s%s',txt,obj2json(names{e},getfield(item(i,j),...\n names{e}),level+(dim(1)>1)+1+(len>1),varargin{:}));\n if(e1) txt=sprintf('%s%s%s]',txt,nl,padding2); end\n if(j1) txt=sprintf('%s%s%s]',txt,nl,padding0); end\n\n%%-------------------------------------------------------------------------\nfunction txt=str2json(name,item,level,varargin)\ntxt='';\nif(~ischar(item))\n error('input is not a string');\nend\nitem=reshape(item, max(size(item),[1 0]));\nlen=size(item,1);\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\npadding1=repmat(ws.tab,1,level);\npadding0=repmat(ws.tab,1,level+1);\nnl=ws.newline;\nsep=ws.sep;\n\nif(~isempty(name)) \n if(len>1) txt=sprintf('%s\"%s\": [%s',padding1,checkname(name,varargin{:}),nl); end\nelse\n if(len>1) txt=sprintf('%s[%s',padding1,nl); end\nend\nisoct=jsonopt('IsOctave',0,varargin{:});\nfor e=1:len\n if(isoct)\n val=regexprep(item(e,:),'\\\\','\\\\');\n val=regexprep(val,'\"','\\\"');\n val=regexprep(val,'^\"','\\\"');\n else\n val=regexprep(item(e,:),'\\\\','\\\\\\\\');\n val=regexprep(val,'\"','\\\\\"');\n val=regexprep(val,'^\"','\\\\\"');\n end\n val=escapejsonstring(val);\n if(len==1)\n obj=['\"' checkname(name,varargin{:}) '\": ' '\"',val,'\"'];\n\tif(isempty(name)) obj=['\"',val,'\"']; end\n txt=sprintf('%s%s%s%s',txt,padding1,obj);\n else\n txt=sprintf('%s%s%s%s',txt,padding0,['\"',val,'\"']);\n end\n if(e==len) sep=''; end\n txt=sprintf('%s%s',txt,sep);\nend\nif(len>1) txt=sprintf('%s%s%s%s',txt,nl,padding1,']'); end\n\n%%-------------------------------------------------------------------------\nfunction txt=mat2json(name,item,level,varargin)\nif(~isnumeric(item) && ~islogical(item))\n error('input is not an array');\nend\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\npadding1=repmat(ws.tab,1,level);\npadding0=repmat(ws.tab,1,level+1);\nnl=ws.newline;\nsep=ws.sep;\n\nif(length(size(item))>2 || issparse(item) || ~isreal(item) || ...\n isempty(item) ||jsonopt('ArrayToStruct',0,varargin{:}))\n if(isempty(name))\n \ttxt=sprintf('%s{%s%s\"_ArrayType_\": \"%s\",%s%s\"_ArraySize_\": %s,%s',...\n padding1,nl,padding0,class(item),nl,padding0,regexprep(mat2str(size(item)),'\\s+',','),nl);\n else\n \ttxt=sprintf('%s\"%s\": {%s%s\"_ArrayType_\": \"%s\",%s%s\"_ArraySize_\": %s,%s',...\n padding1,checkname(name,varargin{:}),nl,padding0,class(item),nl,padding0,regexprep(mat2str(size(item)),'\\s+',','),nl);\n end\nelse\n if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1 && level>0)\n numtxt=regexprep(regexprep(matdata2json(item,level+1,varargin{:}),'^\\[',''),']','');\n else\n numtxt=matdata2json(item,level+1,varargin{:});\n end\n if(isempty(name))\n \ttxt=sprintf('%s%s',padding1,numtxt);\n else\n if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1)\n \ttxt=sprintf('%s\"%s\": %s',padding1,checkname(name,varargin{:}),numtxt);\n else\n \t txt=sprintf('%s\"%s\": %s',padding1,checkname(name,varargin{:}),numtxt);\n end\n end\n return;\nend\ndataformat='%s%s%s%s%s';\n\nif(issparse(item))\n [ix,iy]=find(item);\n data=full(item(find(item)));\n if(~isreal(item))\n data=[real(data(:)),imag(data(:))];\n if(size(item,1)==1)\n % Kludge to have data's 'transposedness' match item's.\n % (Necessary for complex row vector handling below.)\n data=data';\n end\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayIsComplex_\": ','1', sep);\n end\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayIsSparse_\": ','1', sep);\n if(size(item,1)==1)\n % Row vector, store only column indices.\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([iy(:),data'],level+2,varargin{:}), nl);\n elseif(size(item,2)==1)\n % Column vector, store only row indices.\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([ix,data],level+2,varargin{:}), nl);\n else\n % General case, store row and column indices.\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([ix,iy,data],level+2,varargin{:}), nl);\n end\nelse\n if(isreal(item))\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json(item(:)',level+2,varargin{:}), nl);\n else\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayIsComplex_\": ','1', sep);\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([real(item(:)) imag(item(:))],level+2,varargin{:}), nl);\n end\nend\ntxt=sprintf('%s%s%s',txt,padding1,'}');\n\n%%-------------------------------------------------------------------------\nfunction txt=matdata2json(mat,level,varargin)\n\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\ntab=ws.tab;\nnl=ws.newline;\n\nif(size(mat,1)==1)\n pre='';\n post='';\n level=level-1;\nelse\n pre=sprintf('[%s',nl);\n post=sprintf('%s%s]',nl,repmat(tab,1,level-1));\nend\n\nif(isempty(mat))\n txt='null';\n return;\nend\nfloatformat=jsonopt('FloatFormat','%.10g',varargin{:});\n%if(numel(mat)>1)\n formatstr=['[' repmat([floatformat ','],1,size(mat,2)-1) [floatformat sprintf('],%s',nl)]];\n%else\n% formatstr=[repmat([floatformat ','],1,size(mat,2)-1) [floatformat sprintf(',\\n')]];\n%end\n\nif(nargin>=2 && size(mat,1)>1 && jsonopt('ArrayIndent',1,varargin{:})==1)\n formatstr=[repmat(tab,1,level) formatstr];\nend\n\ntxt=sprintf(formatstr,mat');\ntxt(end-length(nl):end)=[];\nif(islogical(mat) && jsonopt('ParseLogical',0,varargin{:})==1)\n txt=regexprep(txt,'1','true');\n txt=regexprep(txt,'0','false');\nend\n%txt=regexprep(mat2str(mat),'\\s+',',');\n%txt=regexprep(txt,';',sprintf('],\\n['));\n% if(nargin>=2 && size(mat,1)>1)\n% txt=regexprep(txt,'\\[',[repmat(sprintf('\\t'),1,level) '[']);\n% end\ntxt=[pre txt post];\nif(any(isinf(mat(:))))\n txt=regexprep(txt,'([-+]*)Inf',jsonopt('Inf','\"$1_Inf_\"',varargin{:}));\nend\nif(any(isnan(mat(:))))\n txt=regexprep(txt,'NaN',jsonopt('NaN','\"_NaN_\"',varargin{:}));\nend\n\n%%-------------------------------------------------------------------------\nfunction newname=checkname(name,varargin)\nisunpack=jsonopt('UnpackHex',1,varargin{:});\nnewname=name;\nif(isempty(regexp(name,'0x([0-9a-fA-F]+)_','once')))\n return\nend\nif(isunpack)\n isoct=jsonopt('IsOctave',0,varargin{:});\n if(~isoct)\n newname=regexprep(name,'(^x|_){1}0x([0-9a-fA-F]+)_','${native2unicode(hex2dec($2))}');\n else\n pos=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','start');\n pend=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','end');\n if(isempty(pos)) return; end\n str0=name;\n pos0=[0 pend(:)' length(name)];\n newname='';\n for i=1:length(pos)\n newname=[newname str0(pos0(i)+1:pos(i)-1) char(hex2dec(str0(pos(i)+3:pend(i)-1)))];\n end\n if(pos(end)~=length(name))\n newname=[newname str0(pos0(end-1)+1:pos0(end))];\n end\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction newstr=escapejsonstring(str)\nnewstr=str;\nisoct=exist('OCTAVE_VERSION','builtin');\nif(isoct)\n vv=sscanf(OCTAVE_VERSION,'%f');\n if(vv(1)>=3.8) isoct=0; end\nend\nif(isoct)\n escapechars={'\\a','\\f','\\n','\\r','\\t','\\v'};\n for i=1:length(escapechars);\n newstr=regexprep(newstr,escapechars{i},escapechars{i});\n end\nelse\n escapechars={'\\a','\\b','\\f','\\n','\\r','\\t','\\v'};\n for i=1:length(escapechars);\n newstr=regexprep(newstr,escapechars{i},regexprep(escapechars{i},'\\\\','\\\\\\\\'));\n end\nend\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "loadjson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex3/ex3/lib/jsonlab/loadjson.m", "size": 18732, "source_encoding": "ibm852", "md5": "ab98cf173af2d50bbe8da4d6db252a20", "text": "function data = loadjson(fname,varargin)\n%\n% data=loadjson(fname,opt)\n% or\n% data=loadjson(fname,'param1',value1,'param2',value2,...)\n%\n% parse a JSON (JavaScript Object Notation) file or string\n%\n% authors:Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2011/09/09, including previous works from \n%\n% Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713\n% created on 2009/11/02\n% François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393\n% created on 2009/03/22\n% Joel Feenstra:\n% http://www.mathworks.com/matlabcentral/fileexchange/20565\n% created on 2008/07/03\n%\n% $Id: loadjson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% fname: input file name, if fname contains \"{}\" or \"[]\", fname\n% will be interpreted as a JSON string\n% opt: a struct to store parsing options, opt can be replaced by \n% a list of ('param',value) pairs - the param string is equivallent\n% to a field in opt. opt can have the following \n% fields (first in [.|.] is the default)\n%\n% opt.SimplifyCell [0|1]: if set to 1, loadjson will call cell2mat\n% for each element of the JSON data, and group \n% arrays based on the cell2mat rules.\n% opt.FastArrayParser [1|0 or integer]: if set to 1, use a\n% speed-optimized array parser when loading an \n% array object. The fast array parser may \n% collapse block arrays into a single large\n% array similar to rules defined in cell2mat; 0 to \n% use a legacy parser; if set to a larger-than-1\n% value, this option will specify the minimum\n% dimension to enable the fast array parser. For\n% example, if the input is a 3D array, setting\n% FastArrayParser to 1 will return a 3D array;\n% setting to 2 will return a cell array of 2D\n% arrays; setting to 3 will return to a 2D cell\n% array of 1D vectors; setting to 4 will return a\n% 3D cell array.\n% opt.ShowProgress [0|1]: if set to 1, loadjson displays a progress bar.\n%\n% output:\n% dat: a cell array, where {...} blocks are converted into cell arrays,\n% and [...] are converted to arrays\n%\n% examples:\n% dat=loadjson('{\"obj\":{\"string\":\"value\",\"array\":[1,2,3]}}')\n% dat=loadjson(['examples' filesep 'example1.json'])\n% dat=loadjson(['examples' filesep 'example1.json'],'SimplifyCell',1)\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details \n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nglobal pos inStr len esc index_esc len_esc isoct arraytoken\n\nif(regexp(fname,'[\\{\\}\\]\\[]','once'))\n string=fname;\nelseif(exist(fname,'file'))\n fid = fopen(fname,'rb');\n string = fread(fid,inf,'uint8=>char')';\n fclose(fid);\nelse\n error('input file does not exist');\nend\n\npos = 1; len = length(string); inStr = string;\nisoct=exist('OCTAVE_VERSION','builtin');\narraytoken=find(inStr=='[' | inStr==']' | inStr=='\"');\njstr=regexprep(inStr,'\\\\\\\\',' ');\nescquote=regexp(jstr,'\\\\\"');\narraytoken=sort([arraytoken escquote]);\n\n% String delimiters and escape chars identified to improve speed:\nesc = find(inStr=='\"' | inStr=='\\' ); % comparable to: regexp(inStr, '[\"\\\\]');\nindex_esc = 1; len_esc = length(esc);\n\nopt=varargin2struct(varargin{:});\n\nif(jsonopt('ShowProgress',0,opt)==1)\n opt.progressbar_=waitbar(0,'loading ...');\nend\njsoncount=1;\nwhile pos <= len\n switch(next_char)\n case '{'\n data{jsoncount} = parse_object(opt);\n case '['\n data{jsoncount} = parse_array(opt);\n otherwise\n error_pos('Outer level structure must be an object or an array');\n end\n jsoncount=jsoncount+1;\nend % while\n\njsoncount=length(data);\nif(jsoncount==1 && iscell(data))\n data=data{1};\nend\n\nif(~isempty(data))\n if(isstruct(data)) % data can be a struct array\n data=jstruct2array(data);\n elseif(iscell(data))\n data=jcell2array(data);\n end\nend\nif(isfield(opt,'progressbar_'))\n close(opt.progressbar_);\nend\n\n%%\nfunction newdata=jcell2array(data)\nlen=length(data);\nnewdata=data;\nfor i=1:len\n if(isstruct(data{i}))\n newdata{i}=jstruct2array(data{i});\n elseif(iscell(data{i}))\n newdata{i}=jcell2array(data{i});\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction newdata=jstruct2array(data)\nfn=fieldnames(data);\nnewdata=data;\nlen=length(data);\nfor i=1:length(fn) % depth-first\n for j=1:len\n if(isstruct(getfield(data(j),fn{i})))\n newdata(j)=setfield(newdata(j),fn{i},jstruct2array(getfield(data(j),fn{i})));\n end\n end\nend\nif(~isempty(strmatch('x0x5F_ArrayType_',fn)) && ~isempty(strmatch('x0x5F_ArrayData_',fn)))\n newdata=cell(len,1);\n for j=1:len\n ndata=cast(data(j).x0x5F_ArrayData_,data(j).x0x5F_ArrayType_);\n iscpx=0;\n if(~isempty(strmatch('x0x5F_ArrayIsComplex_',fn)))\n if(data(j).x0x5F_ArrayIsComplex_)\n iscpx=1;\n end\n end\n if(~isempty(strmatch('x0x5F_ArrayIsSparse_',fn)))\n if(data(j).x0x5F_ArrayIsSparse_)\n if(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n dim=data(j).x0x5F_ArraySize_;\n if(iscpx && size(ndata,2)==4-any(dim==1))\n ndata(:,end-1)=complex(ndata(:,end-1),ndata(:,end));\n end\n if isempty(ndata)\n % All-zeros sparse\n ndata=sparse(dim(1),prod(dim(2:end)));\n elseif dim(1)==1\n % Sparse row vector\n ndata=sparse(1,ndata(:,1),ndata(:,2),dim(1),prod(dim(2:end)));\n elseif dim(2)==1\n % Sparse column vector\n ndata=sparse(ndata(:,1),1,ndata(:,2),dim(1),prod(dim(2:end)));\n else\n % Generic sparse array.\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3),dim(1),prod(dim(2:end)));\n end\n else\n if(iscpx && size(ndata,2)==4)\n ndata(:,3)=complex(ndata(:,3),ndata(:,4));\n end\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3));\n end\n end\n elseif(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n if(iscpx && size(ndata,2)==2)\n ndata=complex(ndata(:,1),ndata(:,2));\n end\n ndata=reshape(ndata(:),data(j).x0x5F_ArraySize_);\n end\n newdata{j}=ndata;\n end\n if(len==1)\n newdata=newdata{1};\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction object = parse_object(varargin)\n parse_char('{');\n object = [];\n if next_char ~= '}'\n while 1\n str = parseStr(varargin{:});\n if isempty(str)\n error_pos('Name of value at position %d cannot be empty');\n end\n parse_char(':');\n val = parse_value(varargin{:});\n eval( sprintf( 'object.%s = val;', valid_field(str) ) );\n if next_char == '}'\n break;\n end\n parse_char(',');\n end\n end\n parse_char('}');\n\n%%-------------------------------------------------------------------------\n\nfunction object = parse_array(varargin) % JSON array is written in row-major order\nglobal pos inStr isoct\n parse_char('[');\n object = cell(0, 1);\n dim2=[];\n arraydepth=jsonopt('JSONLAB_ArrayDepth_',1,varargin{:});\n pbar=jsonopt('progressbar_',-1,varargin{:});\n\n if next_char ~= ']'\n\tif(jsonopt('FastArrayParser',1,varargin{:})>=1 && arraydepth>=jsonopt('FastArrayParser',1,varargin{:}))\n [endpos, e1l, e1r, maxlevel]=matching_bracket(inStr,pos);\n arraystr=['[' inStr(pos:endpos)];\n arraystr=regexprep(arraystr,'\"_NaN_\"','NaN');\n arraystr=regexprep(arraystr,'\"([-+]*)_Inf_\"','$1Inf');\n arraystr(arraystr==sprintf('\\n'))=[];\n arraystr(arraystr==sprintf('\\r'))=[];\n %arraystr=regexprep(arraystr,'\\s*,',','); % this is slow,sometimes needed\n if(~isempty(e1l) && ~isempty(e1r)) % the array is in 2D or higher D\n \tastr=inStr((e1l+1):(e1r-1));\n \tastr=regexprep(astr,'\"_NaN_\"','NaN');\n \tastr=regexprep(astr,'\"([-+]*)_Inf_\"','$1Inf');\n \tastr(astr==sprintf('\\n'))=[];\n \tastr(astr==sprintf('\\r'))=[];\n \tastr(astr==' ')='';\n \tif(isempty(find(astr=='[', 1))) % array is 2D\n dim2=length(sscanf(astr,'%f,',[1 inf]));\n \tend\n else % array is 1D\n \tastr=arraystr(2:end-1);\n \tastr(astr==' ')='';\n \t[obj, count, errmsg, nextidx]=sscanf(astr,'%f,',[1,inf]);\n \tif(nextidx>=length(astr)-1)\n object=obj;\n pos=endpos;\n parse_char(']');\n return;\n \tend\n end\n if(~isempty(dim2))\n \tastr=arraystr;\n \tastr(astr=='[')='';\n \tastr(astr==']')='';\n \tastr(astr==' ')='';\n \t[obj, count, errmsg, nextidx]=sscanf(astr,'%f,',inf);\n \tif(nextidx>=length(astr)-1)\n object=reshape(obj,dim2,numel(obj)/dim2)';\n pos=endpos;\n parse_char(']');\n if(pbar>0)\n waitbar(pos/length(inStr),pbar,'loading ...');\n end\n return;\n \tend\n end\n arraystr=regexprep(arraystr,'\\]\\s*,','];');\n\telse\n arraystr='[';\n\tend\n try\n if(isoct && regexp(arraystr,'\"','once'))\n error('Octave eval can produce empty cells for JSON-like input');\n end\n object=eval(arraystr);\n pos=endpos;\n catch\n while 1\n newopt=varargin2struct(varargin{:},'JSONLAB_ArrayDepth_',arraydepth+1);\n val = parse_value(newopt);\n object{end+1} = val;\n if next_char == ']'\n break;\n end\n parse_char(',');\n end\n end\n end\n if(jsonopt('SimplifyCell',0,varargin{:})==1)\n try\n oldobj=object;\n object=cell2mat(object')';\n if(iscell(oldobj) && isstruct(object) && numel(object)>1 && jsonopt('SimplifyCellArray',1,varargin{:})==0)\n object=oldobj;\n elseif(size(object,1)>1 && ndims(object)==2)\n object=object';\n end\n catch\n end\n end\n parse_char(']');\n \n if(pbar>0)\n waitbar(pos/length(inStr),pbar,'loading ...');\n end\n%%-------------------------------------------------------------------------\n\nfunction parse_char(c)\n global pos inStr len\n skip_whitespace;\n if pos > len || inStr(pos) ~= c\n error_pos(sprintf('Expected %c at position %%d', c));\n else\n pos = pos + 1;\n skip_whitespace;\n end\n\n%%-------------------------------------------------------------------------\n\nfunction c = next_char\n global pos inStr len\n skip_whitespace;\n if pos > len\n c = [];\n else\n c = inStr(pos);\n end\n\n%%-------------------------------------------------------------------------\n\nfunction skip_whitespace\n global pos inStr len\n while pos <= len && isspace(inStr(pos))\n pos = pos + 1;\n end\n\n%%-------------------------------------------------------------------------\nfunction str = parseStr(varargin)\n global pos inStr len esc index_esc len_esc\n % len, ns = length(inStr), keyboard\n if inStr(pos) ~= '\"'\n error_pos('String starting with \" expected at position %d');\n else\n pos = pos + 1;\n end\n str = '';\n while pos <= len\n while index_esc <= len_esc && esc(index_esc) < pos\n index_esc = index_esc + 1;\n end\n if index_esc > len_esc\n str = [str inStr(pos:len)];\n pos = len + 1;\n break;\n else\n str = [str inStr(pos:esc(index_esc)-1)];\n pos = esc(index_esc);\n end\n nstr = length(str); switch inStr(pos)\n case '\"'\n pos = pos + 1;\n if(~isempty(str))\n if(strcmp(str,'_Inf_'))\n str=Inf;\n elseif(strcmp(str,'-_Inf_'))\n str=-Inf;\n elseif(strcmp(str,'_NaN_'))\n str=NaN;\n end\n end\n return;\n case '\\'\n if pos+1 > len\n error_pos('End of file reached right after escape character');\n end\n pos = pos + 1;\n switch inStr(pos)\n case {'\"' '\\' '/'}\n str(nstr+1) = inStr(pos);\n pos = pos + 1;\n case {'b' 'f' 'n' 'r' 't'}\n str(nstr+1) = sprintf(['\\' inStr(pos)]);\n pos = pos + 1;\n case 'u'\n if pos+4 > len\n error_pos('End of file reached in escaped unicode character');\n end\n str(nstr+(1:6)) = inStr(pos-1:pos+4);\n pos = pos + 5;\n end\n otherwise % should never happen\n str(nstr+1) = inStr(pos), keyboard\n pos = pos + 1;\n end\n end\n error_pos('End of file while expecting end of inStr');\n\n%%-------------------------------------------------------------------------\n\nfunction num = parse_number(varargin)\n global pos inStr len isoct\n currstr=inStr(pos:end);\n numstr=0;\n if(isoct~=0)\n numstr=regexp(currstr,'^\\s*-?(?:0|[1-9]\\d*)(?:\\.\\d+)?(?:[eE][+\\-]?\\d+)?','end');\n [num, one] = sscanf(currstr, '%f', 1);\n delta=numstr+1;\n else\n [num, one, err, delta] = sscanf(currstr, '%f', 1);\n if ~isempty(err)\n error_pos('Error reading number at position %d');\n end\n end\n pos = pos + delta-1;\n\n%%-------------------------------------------------------------------------\n\nfunction val = parse_value(varargin)\n global pos inStr len\n true = 1; false = 0;\n \n pbar=jsonopt('progressbar_',-1,varargin{:});\n if(pbar>0)\n waitbar(pos/len,pbar,'loading ...');\n end\n \n switch(inStr(pos))\n case '\"'\n val = parseStr(varargin{:});\n return;\n case '['\n val = parse_array(varargin{:});\n return;\n case '{'\n val = parse_object(varargin{:});\n if isstruct(val)\n if(~isempty(strmatch('x0x5F_ArrayType_',fieldnames(val), 'exact')))\n val=jstruct2array(val);\n end\n elseif isempty(val)\n val = struct;\n end\n return;\n case {'-','0','1','2','3','4','5','6','7','8','9'}\n val = parse_number(varargin{:});\n return;\n case 't'\n if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'true')\n val = true;\n pos = pos + 4;\n return;\n end\n case 'f'\n if pos+4 <= len && strcmpi(inStr(pos:pos+4), 'false')\n val = false;\n pos = pos + 5;\n return;\n end\n case 'n'\n if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'null')\n val = [];\n pos = pos + 4;\n return;\n end\n end\n error_pos('Value expected at position %d');\n%%-------------------------------------------------------------------------\n\nfunction error_pos(msg)\n global pos inStr len\n poShow = max(min([pos-15 pos-1 pos pos+20],len),1);\n if poShow(3) == poShow(2)\n poShow(3:4) = poShow(2)+[0 -1]; % display nothing after\n end\n msg = [sprintf(msg, pos) ': ' ...\n inStr(poShow(1):poShow(2)) '' inStr(poShow(3):poShow(4)) ];\n error( ['JSONparser:invalidFormat: ' msg] );\n\n%%-------------------------------------------------------------------------\n\nfunction str = valid_field(str)\nglobal isoct\n% From MATLAB doc: field names must begin with a letter, which may be\n% followed by any combination of letters, digits, and underscores.\n% Invalid characters will be converted to underscores, and the prefix\n% \"x0x[Hex code]_\" will be added if the first character is not a letter.\n pos=regexp(str,'^[^A-Za-z]','once');\n if(~isempty(pos))\n if(~isoct)\n str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once');\n else\n str=sprintf('x0x%X_%s',char(str(1)),str(2:end));\n end\n end\n if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' ))) return; end\n if(~isoct)\n str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_');\n else\n pos=regexp(str,'[^0-9A-Za-z_]');\n if(isempty(pos)) return; end\n str0=str;\n pos0=[0 pos(:)' length(str)];\n str='';\n for i=1:length(pos)\n str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))];\n end\n if(pos(end)~=length(str))\n str=[str str0(pos0(end-1)+1:pos0(end))];\n end\n end\n %str(~isletter(str) & ~('0' <= str & str <= '9')) = '_';\n\n%%-------------------------------------------------------------------------\nfunction endpos = matching_quote(str,pos)\nlen=length(str);\nwhile(pos1 && str(pos-1)=='\\'))\n endpos=pos;\n return;\n end \n end\n pos=pos+1;\nend\nerror('unmatched quotation mark');\n%%-------------------------------------------------------------------------\nfunction [endpos, e1l, e1r, maxlevel] = matching_bracket(str,pos)\nglobal arraytoken\nlevel=1;\nmaxlevel=level;\nendpos=0;\nbpos=arraytoken(arraytoken>=pos);\ntokens=str(bpos);\nlen=length(tokens);\npos=1;\ne1l=[];\ne1r=[];\nwhile(pos<=len)\n c=tokens(pos);\n if(c==']')\n level=level-1;\n if(isempty(e1r)) e1r=bpos(pos); end\n if(level==0)\n endpos=bpos(pos);\n return\n end\n end\n if(c=='[')\n if(isempty(e1l)) e1l=bpos(pos); end\n level=level+1;\n maxlevel=max(maxlevel,level);\n end\n if(c=='\"')\n pos=matching_quote(tokens,pos+1);\n end\n pos=pos+1;\nend\nif(endpos==0) \n error('unmatched \"]\"');\nend\n\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "loadubjson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex3/ex3/lib/jsonlab/loadubjson.m", "size": 15574, "source_encoding": "utf_8", "md5": "5974e78e71b81b1e0f76123784b951a4", "text": "function data = loadubjson(fname,varargin)\n%\n% data=loadubjson(fname,opt)\n% or\n% data=loadubjson(fname,'param1',value1,'param2',value2,...)\n%\n% parse a JSON (JavaScript Object Notation) file or string\n%\n% authors:Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2013/08/01\n%\n% $Id: loadubjson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% fname: input file name, if fname contains \"{}\" or \"[]\", fname\n% will be interpreted as a UBJSON string\n% opt: a struct to store parsing options, opt can be replaced by \n% a list of ('param',value) pairs - the param string is equivallent\n% to a field in opt. opt can have the following \n% fields (first in [.|.] is the default)\n%\n% opt.SimplifyCell [0|1]: if set to 1, loadubjson will call cell2mat\n% for each element of the JSON data, and group \n% arrays based on the cell2mat rules.\n% opt.IntEndian [B|L]: specify the endianness of the integer fields\n% in the UBJSON input data. B - Big-Endian format for \n% integers (as required in the UBJSON specification); \n% L - input integer fields are in Little-Endian order.\n%\n% output:\n% dat: a cell array, where {...} blocks are converted into cell arrays,\n% and [...] are converted to arrays\n%\n% examples:\n% obj=struct('string','value','array',[1 2 3]);\n% ubjdata=saveubjson('obj',obj);\n% dat=loadubjson(ubjdata)\n% dat=loadubjson(['examples' filesep 'example1.ubj'])\n% dat=loadubjson(['examples' filesep 'example1.ubj'],'SimplifyCell',1)\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details \n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nglobal pos inStr len esc index_esc len_esc isoct arraytoken fileendian systemendian\n\nif(regexp(fname,'[\\{\\}\\]\\[]','once'))\n string=fname;\nelseif(exist(fname,'file'))\n fid = fopen(fname,'rb');\n string = fread(fid,inf,'uint8=>char')';\n fclose(fid);\nelse\n error('input file does not exist');\nend\n\npos = 1; len = length(string); inStr = string;\nisoct=exist('OCTAVE_VERSION','builtin');\narraytoken=find(inStr=='[' | inStr==']' | inStr=='\"');\njstr=regexprep(inStr,'\\\\\\\\',' ');\nescquote=regexp(jstr,'\\\\\"');\narraytoken=sort([arraytoken escquote]);\n\n% String delimiters and escape chars identified to improve speed:\nesc = find(inStr=='\"' | inStr=='\\' ); % comparable to: regexp(inStr, '[\"\\\\]');\nindex_esc = 1; len_esc = length(esc);\n\nopt=varargin2struct(varargin{:});\nfileendian=upper(jsonopt('IntEndian','B',opt));\n[os,maxelem,systemendian]=computer;\n\njsoncount=1;\nwhile pos <= len\n switch(next_char)\n case '{'\n data{jsoncount} = parse_object(opt);\n case '['\n data{jsoncount} = parse_array(opt);\n otherwise\n error_pos('Outer level structure must be an object or an array');\n end\n jsoncount=jsoncount+1;\nend % while\n\njsoncount=length(data);\nif(jsoncount==1 && iscell(data))\n data=data{1};\nend\n\nif(~isempty(data))\n if(isstruct(data)) % data can be a struct array\n data=jstruct2array(data);\n elseif(iscell(data))\n data=jcell2array(data);\n end\nend\n\n\n%%\nfunction newdata=parse_collection(id,data,obj)\n\nif(jsoncount>0 && exist('data','var')) \n if(~iscell(data))\n newdata=cell(1);\n newdata{1}=data;\n data=newdata;\n end\nend\n\n%%\nfunction newdata=jcell2array(data)\nlen=length(data);\nnewdata=data;\nfor i=1:len\n if(isstruct(data{i}))\n newdata{i}=jstruct2array(data{i});\n elseif(iscell(data{i}))\n newdata{i}=jcell2array(data{i});\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction newdata=jstruct2array(data)\nfn=fieldnames(data);\nnewdata=data;\nlen=length(data);\nfor i=1:length(fn) % depth-first\n for j=1:len\n if(isstruct(getfield(data(j),fn{i})))\n newdata(j)=setfield(newdata(j),fn{i},jstruct2array(getfield(data(j),fn{i})));\n end\n end\nend\nif(~isempty(strmatch('x0x5F_ArrayType_',fn)) && ~isempty(strmatch('x0x5F_ArrayData_',fn)))\n newdata=cell(len,1);\n for j=1:len\n ndata=cast(data(j).x0x5F_ArrayData_,data(j).x0x5F_ArrayType_);\n iscpx=0;\n if(~isempty(strmatch('x0x5F_ArrayIsComplex_',fn)))\n if(data(j).x0x5F_ArrayIsComplex_)\n iscpx=1;\n end\n end\n if(~isempty(strmatch('x0x5F_ArrayIsSparse_',fn)))\n if(data(j).x0x5F_ArrayIsSparse_)\n if(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n dim=double(data(j).x0x5F_ArraySize_);\n if(iscpx && size(ndata,2)==4-any(dim==1))\n ndata(:,end-1)=complex(ndata(:,end-1),ndata(:,end));\n end\n if isempty(ndata)\n % All-zeros sparse\n ndata=sparse(dim(1),prod(dim(2:end)));\n elseif dim(1)==1\n % Sparse row vector\n ndata=sparse(1,ndata(:,1),ndata(:,2),dim(1),prod(dim(2:end)));\n elseif dim(2)==1\n % Sparse column vector\n ndata=sparse(ndata(:,1),1,ndata(:,2),dim(1),prod(dim(2:end)));\n else\n % Generic sparse array.\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3),dim(1),prod(dim(2:end)));\n end\n else\n if(iscpx && size(ndata,2)==4)\n ndata(:,3)=complex(ndata(:,3),ndata(:,4));\n end\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3));\n end\n end\n elseif(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n if(iscpx && size(ndata,2)==2)\n ndata=complex(ndata(:,1),ndata(:,2));\n end\n ndata=reshape(ndata(:),data(j).x0x5F_ArraySize_);\n end\n newdata{j}=ndata;\n end\n if(len==1)\n newdata=newdata{1};\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction object = parse_object(varargin)\n parse_char('{');\n object = [];\n type='';\n count=-1;\n if(next_char == '$')\n type=inStr(pos+1); % TODO\n pos=pos+2;\n end\n if(next_char == '#')\n pos=pos+1;\n count=double(parse_number());\n end\n if next_char ~= '}'\n num=0;\n while 1\n str = parseStr(varargin{:});\n if isempty(str)\n error_pos('Name of value at position %d cannot be empty');\n end\n %parse_char(':');\n val = parse_value(varargin{:});\n num=num+1;\n eval( sprintf( 'object.%s = val;', valid_field(str) ) );\n if next_char == '}' || (count>=0 && num>=count)\n break;\n end\n %parse_char(',');\n end\n end\n if(count==-1)\n parse_char('}');\n end\n\n%%-------------------------------------------------------------------------\nfunction [cid,len]=elem_info(type)\nid=strfind('iUIlLdD',type);\ndataclass={'int8','uint8','int16','int32','int64','single','double'};\nbytelen=[1,1,2,4,8,4,8];\nif(id>0)\n cid=dataclass{id};\n len=bytelen(id);\nelse\n error_pos('unsupported type at position %d');\nend\n%%-------------------------------------------------------------------------\n\n\nfunction [data adv]=parse_block(type,count,varargin)\nglobal pos inStr isoct fileendian systemendian\n[cid,len]=elem_info(type);\ndatastr=inStr(pos:pos+len*count-1);\nif(isoct)\n newdata=int8(datastr);\nelse\n newdata=uint8(datastr);\nend\nid=strfind('iUIlLdD',type);\nif(id<=5 && fileendian~=systemendian)\n newdata=swapbytes(typecast(newdata,cid));\nend\ndata=typecast(newdata,cid);\nadv=double(len*count);\n\n%%-------------------------------------------------------------------------\n\n\nfunction object = parse_array(varargin) % JSON array is written in row-major order\nglobal pos inStr isoct\n parse_char('[');\n object = cell(0, 1);\n dim=[];\n type='';\n count=-1;\n if(next_char == '$')\n type=inStr(pos+1);\n pos=pos+2;\n end\n if(next_char == '#')\n pos=pos+1;\n if(next_char=='[')\n dim=parse_array(varargin{:});\n count=prod(double(dim));\n else\n count=double(parse_number());\n end\n end\n if(~isempty(type))\n if(count>=0)\n [object adv]=parse_block(type,count,varargin{:});\n if(~isempty(dim))\n object=reshape(object,dim);\n end\n pos=pos+adv;\n return;\n else\n endpos=matching_bracket(inStr,pos);\n [cid,len]=elem_info(type);\n count=(endpos-pos)/len;\n [object adv]=parse_block(type,count,varargin{:});\n pos=pos+adv;\n parse_char(']');\n return;\n end\n end\n if next_char ~= ']'\n while 1\n val = parse_value(varargin{:});\n object{end+1} = val;\n if next_char == ']'\n break;\n end\n %parse_char(',');\n end\n end\n if(jsonopt('SimplifyCell',0,varargin{:})==1)\n try\n oldobj=object;\n object=cell2mat(object')';\n if(iscell(oldobj) && isstruct(object) && numel(object)>1 && jsonopt('SimplifyCellArray',1,varargin{:})==0)\n object=oldobj;\n elseif(size(object,1)>1 && ndims(object)==2)\n object=object';\n end\n catch\n end\n end\n if(count==-1)\n parse_char(']');\n end\n\n%%-------------------------------------------------------------------------\n\nfunction parse_char(c)\n global pos inStr len\n skip_whitespace;\n if pos > len || inStr(pos) ~= c\n error_pos(sprintf('Expected %c at position %%d', c));\n else\n pos = pos + 1;\n skip_whitespace;\n end\n\n%%-------------------------------------------------------------------------\n\nfunction c = next_char\n global pos inStr len\n skip_whitespace;\n if pos > len\n c = [];\n else\n c = inStr(pos);\n end\n\n%%-------------------------------------------------------------------------\n\nfunction skip_whitespace\n global pos inStr len\n while pos <= len && isspace(inStr(pos))\n pos = pos + 1;\n end\n\n%%-------------------------------------------------------------------------\nfunction str = parseStr(varargin)\n global pos inStr esc index_esc len_esc\n % len, ns = length(inStr), keyboard\n type=inStr(pos);\n if type ~= 'S' && type ~= 'C' && type ~= 'H'\n error_pos('String starting with S expected at position %d');\n else\n pos = pos + 1;\n end\n if(type == 'C')\n str=inStr(pos);\n pos=pos+1;\n return;\n end\n bytelen=double(parse_number());\n if(length(inStr)>=pos+bytelen-1)\n str=inStr(pos:pos+bytelen-1);\n pos=pos+bytelen;\n else\n error_pos('End of file while expecting end of inStr');\n end\n\n%%-------------------------------------------------------------------------\n\nfunction num = parse_number(varargin)\n global pos inStr len isoct fileendian systemendian\n id=strfind('iUIlLdD',inStr(pos));\n if(isempty(id))\n error_pos('expecting a number at position %d');\n end\n type={'int8','uint8','int16','int32','int64','single','double'};\n bytelen=[1,1,2,4,8,4,8];\n datastr=inStr(pos+1:pos+bytelen(id));\n if(isoct)\n newdata=int8(datastr);\n else\n newdata=uint8(datastr);\n end\n if(id<=5 && fileendian~=systemendian)\n newdata=swapbytes(typecast(newdata,type{id}));\n end\n num=typecast(newdata,type{id});\n pos = pos + bytelen(id)+1;\n\n%%-------------------------------------------------------------------------\n\nfunction val = parse_value(varargin)\n global pos inStr len\n true = 1; false = 0;\n\n switch(inStr(pos))\n case {'S','C','H'}\n val = parseStr(varargin{:});\n return;\n case '['\n val = parse_array(varargin{:});\n return;\n case '{'\n val = parse_object(varargin{:});\n if isstruct(val)\n if(~isempty(strmatch('x0x5F_ArrayType_',fieldnames(val), 'exact')))\n val=jstruct2array(val);\n end\n elseif isempty(val)\n val = struct;\n end\n return;\n case {'i','U','I','l','L','d','D'}\n val = parse_number(varargin{:});\n return;\n case 'T'\n val = true;\n pos = pos + 1;\n return;\n case 'F'\n val = false;\n pos = pos + 1;\n return;\n case {'Z','N'}\n val = [];\n pos = pos + 1;\n return;\n end\n error_pos('Value expected at position %d');\n%%-------------------------------------------------------------------------\n\nfunction error_pos(msg)\n global pos inStr len\n poShow = max(min([pos-15 pos-1 pos pos+20],len),1);\n if poShow(3) == poShow(2)\n poShow(3:4) = poShow(2)+[0 -1]; % display nothing after\n end\n msg = [sprintf(msg, pos) ': ' ...\n inStr(poShow(1):poShow(2)) '' inStr(poShow(3):poShow(4)) ];\n error( ['JSONparser:invalidFormat: ' msg] );\n\n%%-------------------------------------------------------------------------\n\nfunction str = valid_field(str)\nglobal isoct\n% From MATLAB doc: field names must begin with a letter, which may be\n% followed by any combination of letters, digits, and underscores.\n% Invalid characters will be converted to underscores, and the prefix\n% \"x0x[Hex code]_\" will be added if the first character is not a letter.\n pos=regexp(str,'^[^A-Za-z]','once');\n if(~isempty(pos))\n if(~isoct)\n str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once');\n else\n str=sprintf('x0x%X_%s',char(str(1)),str(2:end));\n end\n end\n if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' ))) return; end\n if(~isoct)\n str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_');\n else\n pos=regexp(str,'[^0-9A-Za-z_]');\n if(isempty(pos)) return; end\n str0=str;\n pos0=[0 pos(:)' length(str)];\n str='';\n for i=1:length(pos)\n str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))];\n end\n if(pos(end)~=length(str))\n str=[str str0(pos0(end-1)+1:pos0(end))];\n end\n end\n %str(~isletter(str) & ~('0' <= str & str <= '9')) = '_';\n\n%%-------------------------------------------------------------------------\nfunction endpos = matching_quote(str,pos)\nlen=length(str);\nwhile(pos1 && str(pos-1)=='\\'))\n endpos=pos;\n return;\n end \n end\n pos=pos+1;\nend\nerror('unmatched quotation mark');\n%%-------------------------------------------------------------------------\nfunction [endpos e1l e1r maxlevel] = matching_bracket(str,pos)\nglobal arraytoken\nlevel=1;\nmaxlevel=level;\nendpos=0;\nbpos=arraytoken(arraytoken>=pos);\ntokens=str(bpos);\nlen=length(tokens);\npos=1;\ne1l=[];\ne1r=[];\nwhile(pos<=len)\n c=tokens(pos);\n if(c==']')\n level=level-1;\n if(isempty(e1r)) e1r=bpos(pos); end\n if(level==0)\n endpos=bpos(pos);\n return\n end\n end\n if(c=='[')\n if(isempty(e1l)) e1l=bpos(pos); end\n level=level+1;\n maxlevel=max(maxlevel,level);\n end\n if(c=='\"')\n pos=matching_quote(tokens,pos+1);\n end\n pos=pos+1;\nend\nif(endpos==0) \n error('unmatched \"]\"');\nend\n\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "saveubjson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex3/ex3/lib/jsonlab/saveubjson.m", "size": 16123, "source_encoding": "utf_8", "md5": "61d4f51010aedbf97753396f5d2d9ec0", "text": "function json=saveubjson(rootname,obj,varargin)\n%\n% json=saveubjson(rootname,obj,filename)\n% or\n% json=saveubjson(rootname,obj,opt)\n% json=saveubjson(rootname,obj,'param1',value1,'param2',value2,...)\n%\n% convert a MATLAB object (cell, struct or array) into a Universal \n% Binary JSON (UBJSON) binary string\n%\n% author: Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2013/08/17\n%\n% $Id: saveubjson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% rootname: the name of the root-object, when set to '', the root name\n% is ignored, however, when opt.ForceRootName is set to 1 (see below),\n% the MATLAB variable name will be used as the root name.\n% obj: a MATLAB object (array, cell, cell array, struct, struct array)\n% filename: a string for the file name to save the output UBJSON data\n% opt: a struct for additional options, ignore to use default values.\n% opt can have the following fields (first in [.|.] is the default)\n%\n% opt.FileName [''|string]: a file name to save the output JSON data\n% opt.ArrayToStruct[0|1]: when set to 0, saveubjson outputs 1D/2D\n% array in JSON array format; if sets to 1, an\n% array will be shown as a struct with fields\n% \"_ArrayType_\", \"_ArraySize_\" and \"_ArrayData_\"; for\n% sparse arrays, the non-zero elements will be\n% saved to _ArrayData_ field in triplet-format i.e.\n% (ix,iy,val) and \"_ArrayIsSparse_\" will be added\n% with a value of 1; for a complex array, the \n% _ArrayData_ array will include two columns \n% (4 for sparse) to record the real and imaginary \n% parts, and also \"_ArrayIsComplex_\":1 is added. \n% opt.ParseLogical [1|0]: if this is set to 1, logical array elem\n% will use true/false rather than 1/0.\n% opt.NoRowBracket [1|0]: if this is set to 1, arrays with a single\n% numerical element will be shown without a square\n% bracket, unless it is the root object; if 0, square\n% brackets are forced for any numerical arrays.\n% opt.ForceRootName [0|1]: when set to 1 and rootname is empty, saveubjson\n% will use the name of the passed obj variable as the \n% root object name; if obj is an expression and \n% does not have a name, 'root' will be used; if this \n% is set to 0 and rootname is empty, the root level \n% will be merged down to the lower level.\n% opt.JSONP [''|string]: to generate a JSONP output (JSON with padding),\n% for example, if opt.JSON='foo', the JSON data is\n% wrapped inside a function call as 'foo(...);'\n% opt.UnpackHex [1|0]: conver the 0x[hex code] output by loadjson \n% back to the string form\n%\n% opt can be replaced by a list of ('param',value) pairs. The param \n% string is equivallent to a field in opt and is case sensitive.\n% output:\n% json: a binary string in the UBJSON format (see http://ubjson.org)\n%\n% examples:\n% jsonmesh=struct('MeshNode',[0 0 0;1 0 0;0 1 0;1 1 0;0 0 1;1 0 1;0 1 1;1 1 1],... \n% 'MeshTetra',[1 2 4 8;1 3 4 8;1 2 6 8;1 5 6 8;1 5 7 8;1 3 7 8],...\n% 'MeshTri',[1 2 4;1 2 6;1 3 4;1 3 7;1 5 6;1 5 7;...\n% 2 8 4;2 8 6;3 8 4;3 8 7;5 8 6;5 8 7],...\n% 'MeshCreator','FangQ','MeshTitle','T6 Cube',...\n% 'SpecialData',[nan, inf, -inf]);\n% saveubjson('jsonmesh',jsonmesh)\n% saveubjson('jsonmesh',jsonmesh,'meshdata.ubj')\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details\n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nif(nargin==1)\n varname=inputname(1);\n obj=rootname;\n if(isempty(varname)) \n varname='root';\n end\n rootname=varname;\nelse\n varname=inputname(2);\nend\nif(length(varargin)==1 && ischar(varargin{1}))\n opt=struct('FileName',varargin{1});\nelse\n opt=varargin2struct(varargin{:});\nend\nopt.IsOctave=exist('OCTAVE_VERSION','builtin');\nrootisarray=0;\nrootlevel=1;\nforceroot=jsonopt('ForceRootName',0,opt);\nif((isnumeric(obj) || islogical(obj) || ischar(obj) || isstruct(obj) || iscell(obj)) && isempty(rootname) && forceroot==0)\n rootisarray=1;\n rootlevel=0;\nelse\n if(isempty(rootname))\n rootname=varname;\n end\nend\nif((isstruct(obj) || iscell(obj))&& isempty(rootname) && forceroot)\n rootname='root';\nend\njson=obj2ubjson(rootname,obj,rootlevel,opt);\nif(~rootisarray)\n json=['{' json '}'];\nend\n\njsonp=jsonopt('JSONP','',opt);\nif(~isempty(jsonp))\n json=[jsonp '(' json ')'];\nend\n\n% save to a file if FileName is set, suggested by Patrick Rapin\nif(~isempty(jsonopt('FileName','',opt)))\n fid = fopen(opt.FileName, 'wb');\n fwrite(fid,json);\n fclose(fid);\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=obj2ubjson(name,item,level,varargin)\n\nif(iscell(item))\n txt=cell2ubjson(name,item,level,varargin{:});\nelseif(isstruct(item))\n txt=struct2ubjson(name,item,level,varargin{:});\nelseif(ischar(item))\n txt=str2ubjson(name,item,level,varargin{:});\nelse\n txt=mat2ubjson(name,item,level,varargin{:});\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=cell2ubjson(name,item,level,varargin)\ntxt='';\nif(~iscell(item))\n error('input is not a cell');\nend\n\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item); % let's handle 1D cell first\nif(len>1) \n if(~isempty(name))\n txt=[S_(checkname(name,varargin{:})) '[']; name=''; \n else\n txt='['; \n end\nelseif(len==0)\n if(~isempty(name))\n txt=[S_(checkname(name,varargin{:})) 'Z']; name=''; \n else\n txt='Z'; \n end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=[txt '[']; end\n for i=1:dim(1)\n txt=[txt obj2ubjson(name,item{i,j},level+(len>1),varargin{:})];\n end\n if(dim(1)>1) txt=[txt ']']; end\nend\nif(len>1) txt=[txt ']']; end\n\n%%-------------------------------------------------------------------------\nfunction txt=struct2ubjson(name,item,level,varargin)\ntxt='';\nif(~isstruct(item))\n\terror('input is not a struct');\nend\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item);\n\nif(~isempty(name)) \n if(len>1) txt=[S_(checkname(name,varargin{:})) '[']; end\nelse\n if(len>1) txt='['; end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=[txt '[']; end\n for i=1:dim(1)\n names = fieldnames(item(i,j));\n if(~isempty(name) && len==1)\n txt=[txt S_(checkname(name,varargin{:})) '{']; \n else\n txt=[txt '{']; \n end\n if(~isempty(names))\n for e=1:length(names)\n\t txt=[txt obj2ubjson(names{e},getfield(item(i,j),...\n names{e}),level+(dim(1)>1)+1+(len>1),varargin{:})];\n end\n end\n txt=[txt '}'];\n end\n if(dim(1)>1) txt=[txt ']']; end\nend\nif(len>1) txt=[txt ']']; end\n\n%%-------------------------------------------------------------------------\nfunction txt=str2ubjson(name,item,level,varargin)\ntxt='';\nif(~ischar(item))\n error('input is not a string');\nend\nitem=reshape(item, max(size(item),[1 0]));\nlen=size(item,1);\n\nif(~isempty(name)) \n if(len>1) txt=[S_(checkname(name,varargin{:})) '[']; end\nelse\n if(len>1) txt='['; end\nend\nisoct=jsonopt('IsOctave',0,varargin{:});\nfor e=1:len\n val=item(e,:);\n if(len==1)\n obj=['' S_(checkname(name,varargin{:})) '' '',S_(val),''];\n\tif(isempty(name)) obj=['',S_(val),'']; end\n txt=[txt,'',obj];\n else\n txt=[txt,'',['',S_(val),'']];\n end\nend\nif(len>1) txt=[txt ']']; end\n\n%%-------------------------------------------------------------------------\nfunction txt=mat2ubjson(name,item,level,varargin)\nif(~isnumeric(item) && ~islogical(item))\n error('input is not an array');\nend\n\nif(length(size(item))>2 || issparse(item) || ~isreal(item) || ...\n isempty(item) || jsonopt('ArrayToStruct',0,varargin{:}))\n cid=I_(uint32(max(size(item))));\n if(isempty(name))\n \ttxt=['{' S_('_ArrayType_'),S_(class(item)),S_('_ArraySize_'),I_a(size(item),cid(1)) ];\n else\n if(isempty(item))\n txt=[S_(checkname(name,varargin{:})),'Z'];\n return;\n else\n \t txt=[S_(checkname(name,varargin{:})),'{',S_('_ArrayType_'),S_(class(item)),S_('_ArraySize_'),I_a(size(item),cid(1))];\n end\n end\nelse\n if(isempty(name))\n \ttxt=matdata2ubjson(item,level+1,varargin{:});\n else\n if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1)\n numtxt=regexprep(regexprep(matdata2ubjson(item,level+1,varargin{:}),'^\\[',''),']','');\n \ttxt=[S_(checkname(name,varargin{:})) numtxt];\n else\n \t txt=[S_(checkname(name,varargin{:})),matdata2ubjson(item,level+1,varargin{:})];\n end\n end\n return;\nend\nif(issparse(item))\n [ix,iy]=find(item);\n data=full(item(find(item)));\n if(~isreal(item))\n data=[real(data(:)),imag(data(:))];\n if(size(item,1)==1)\n % Kludge to have data's 'transposedness' match item's.\n % (Necessary for complex row vector handling below.)\n data=data';\n end\n txt=[txt,S_('_ArrayIsComplex_'),'T'];\n end\n txt=[txt,S_('_ArrayIsSparse_'),'T'];\n if(size(item,1)==1)\n % Row vector, store only column indices.\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([iy(:),data'],level+2,varargin{:})];\n elseif(size(item,2)==1)\n % Column vector, store only row indices.\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([ix,data],level+2,varargin{:})];\n else\n % General case, store row and column indices.\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([ix,iy,data],level+2,varargin{:})];\n end\nelse\n if(isreal(item))\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson(item(:)',level+2,varargin{:})];\n else\n txt=[txt,S_('_ArrayIsComplex_'),'T'];\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([real(item(:)) imag(item(:))],level+2,varargin{:})];\n end\nend\ntxt=[txt,'}'];\n\n%%-------------------------------------------------------------------------\nfunction txt=matdata2ubjson(mat,level,varargin)\nif(isempty(mat))\n txt='Z';\n return;\nend\nif(size(mat,1)==1)\n level=level-1;\nend\ntype='';\nhasnegtive=(mat<0);\nif(isa(mat,'integer') || isinteger(mat) || (isfloat(mat) && all(mod(mat(:),1) == 0)))\n if(isempty(hasnegtive))\n if(max(mat(:))<=2^8)\n type='U';\n end\n end\n if(isempty(type))\n % todo - need to consider negative ones separately\n id= histc(abs(max(mat(:))),[0 2^7 2^15 2^31 2^63]);\n if(isempty(find(id)))\n error('high-precision data is not yet supported');\n end\n key='iIlL';\n\ttype=key(find(id));\n end\n txt=[I_a(mat(:),type,size(mat))];\nelseif(islogical(mat))\n logicalval='FT';\n if(numel(mat)==1)\n txt=logicalval(mat+1);\n else\n txt=['[$U#' I_a(size(mat),'l') typecast(swapbytes(uint8(mat(:)')),'uint8')];\n end\nelse\n if(numel(mat)==1)\n txt=['[' D_(mat) ']'];\n else\n txt=D_a(mat(:),'D',size(mat));\n end\nend\n\n%txt=regexprep(mat2str(mat),'\\s+',',');\n%txt=regexprep(txt,';',sprintf('],['));\n% if(nargin>=2 && size(mat,1)>1)\n% txt=regexprep(txt,'\\[',[repmat(sprintf('\\t'),1,level) '[']);\n% end\nif(any(isinf(mat(:))))\n txt=regexprep(txt,'([-+]*)Inf',jsonopt('Inf','\"$1_Inf_\"',varargin{:}));\nend\nif(any(isnan(mat(:))))\n txt=regexprep(txt,'NaN',jsonopt('NaN','\"_NaN_\"',varargin{:}));\nend\n\n%%-------------------------------------------------------------------------\nfunction newname=checkname(name,varargin)\nisunpack=jsonopt('UnpackHex',1,varargin{:});\nnewname=name;\nif(isempty(regexp(name,'0x([0-9a-fA-F]+)_','once')))\n return\nend\nif(isunpack)\n isoct=jsonopt('IsOctave',0,varargin{:});\n if(~isoct)\n newname=regexprep(name,'(^x|_){1}0x([0-9a-fA-F]+)_','${native2unicode(hex2dec($2))}');\n else\n pos=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','start');\n pend=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','end');\n if(isempty(pos)) return; end\n str0=name;\n pos0=[0 pend(:)' length(name)];\n newname='';\n for i=1:length(pos)\n newname=[newname str0(pos0(i)+1:pos(i)-1) char(hex2dec(str0(pos(i)+3:pend(i)-1)))];\n end\n if(pos(end)~=length(name))\n newname=[newname str0(pos0(end-1)+1:pos0(end))];\n end\n end\nend\n%%-------------------------------------------------------------------------\nfunction val=S_(str)\nif(length(str)==1)\n val=['C' str];\nelse\n val=['S' I_(int32(length(str))) str];\nend\n%%-------------------------------------------------------------------------\nfunction val=I_(num)\nif(~isinteger(num))\n error('input is not an integer');\nend\nif(num>=0 && num<255)\n val=['U' data2byte(swapbytes(cast(num,'uint8')),'uint8')];\n return;\nend\nkey='iIlL';\ncid={'int8','int16','int32','int64'};\nfor i=1:4\n if((num>0 && num<2^(i*8-1)) || (num<0 && num>=-2^(i*8-1)))\n val=[key(i) data2byte(swapbytes(cast(num,cid{i})),'uint8')];\n return;\n end\nend\nerror('unsupported integer');\n\n%%-------------------------------------------------------------------------\nfunction val=D_(num)\nif(~isfloat(num))\n error('input is not a float');\nend\n\nif(isa(num,'single'))\n val=['d' data2byte(num,'uint8')];\nelse\n val=['D' data2byte(num,'uint8')];\nend\n%%-------------------------------------------------------------------------\nfunction data=I_a(num,type,dim,format)\nid=find(ismember('iUIlL',type));\n\nif(id==0)\n error('unsupported integer array');\nend\n\n% based on UBJSON specs, all integer types are stored in big endian format\n\nif(id==1)\n data=data2byte(swapbytes(int8(num)),'uint8');\n blen=1;\nelseif(id==2)\n data=data2byte(swapbytes(uint8(num)),'uint8');\n blen=1;\nelseif(id==3)\n data=data2byte(swapbytes(int16(num)),'uint8');\n blen=2;\nelseif(id==4)\n data=data2byte(swapbytes(int32(num)),'uint8');\n blen=4;\nelseif(id==5)\n data=data2byte(swapbytes(int64(num)),'uint8');\n blen=8;\nend\n\nif(nargin>=3 && length(dim)>=2 && prod(dim)~=dim(2))\n format='opt';\nend\nif((nargin<4 || strcmp(format,'opt')) && numel(num)>1)\n if(nargin>=3 && (length(dim)==1 || (length(dim)>=2 && prod(dim)~=dim(2))))\n cid=I_(uint32(max(dim)));\n data=['$' type '#' I_a(dim,cid(1)) data(:)'];\n else\n data=['$' type '#' I_(int32(numel(data)/blen)) data(:)'];\n end\n data=['[' data(:)'];\nelse\n data=reshape(data,blen,numel(data)/blen);\n data(2:blen+1,:)=data;\n data(1,:)=type;\n data=data(:)';\n data=['[' data(:)' ']'];\nend\n%%-------------------------------------------------------------------------\nfunction data=D_a(num,type,dim,format)\nid=find(ismember('dD',type));\n\nif(id==0)\n error('unsupported float array');\nend\n\nif(id==1)\n data=data2byte(single(num),'uint8');\nelseif(id==2)\n data=data2byte(double(num),'uint8');\nend\n\nif(nargin>=3 && length(dim)>=2 && prod(dim)~=dim(2))\n format='opt';\nend\nif((nargin<4 || strcmp(format,'opt')) && numel(num)>1)\n if(nargin>=3 && (length(dim)==1 || (length(dim)>=2 && prod(dim)~=dim(2))))\n cid=I_(uint32(max(dim)));\n data=['$' type '#' I_a(dim,cid(1)) data(:)'];\n else\n data=['$' type '#' I_(int32(numel(data)/(id*4))) data(:)'];\n end\n data=['[' data];\nelse\n data=reshape(data,(id*4),length(data)/(id*4));\n data(2:(id*4+1),:)=data;\n data(1,:)=type;\n data=data(:)';\n data=['[' data(:)' ']'];\nend\n%%-------------------------------------------------------------------------\nfunction bytes=data2byte(varargin)\nbytes=typecast(varargin{:});\nbytes=bytes(:)';\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "submit.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex1/ex1/submit.m", "size": 1876, "source_encoding": "utf_8", "md5": "8d1c467b830a89c187c05b121cb8fbfd", "text": "function submit()\n addpath('./lib');\n\n conf.assignmentSlug = 'linear-regression';\n conf.itemName = 'Linear Regression with Multiple Variables';\n conf.partArrays = { ...\n { ...\n '1', ...\n { 'warmUpExercise.m' }, ...\n 'Warm-up Exercise', ...\n }, ...\n { ...\n '2', ...\n { 'computeCost.m' }, ...\n 'Computing Cost (for One Variable)', ...\n }, ...\n { ...\n '3', ...\n { 'gradientDescent.m' }, ...\n 'Gradient Descent (for One Variable)', ...\n }, ...\n { ...\n '4', ...\n { 'featureNormalize.m' }, ...\n 'Feature Normalization', ...\n }, ...\n { ...\n '5', ...\n { 'computeCostMulti.m' }, ...\n 'Computing Cost (for Multiple Variables)', ...\n }, ...\n { ...\n '6', ...\n { 'gradientDescentMulti.m' }, ...\n 'Gradient Descent (for Multiple Variables)', ...\n }, ...\n { ...\n '7', ...\n { 'normalEqn.m' }, ...\n 'Normal Equations', ...\n }, ...\n };\n conf.output = @output;\n\n submitWithConfiguration(conf);\nend\n\nfunction out = output(partId)\n % Random Test Cases\n X1 = [ones(20,1) (exp(1) + exp(2) * (0.1:0.1:2))'];\n Y1 = X1(:,2) + sin(X1(:,1)) + cos(X1(:,2));\n X2 = [X1 X1(:,2).^0.5 X1(:,2).^0.25];\n Y2 = Y1.^0.5 + Y1;\n if partId == '1'\n out = sprintf('%0.5f ', warmUpExercise());\n elseif partId == '2'\n out = sprintf('%0.5f ', computeCost(X1, Y1, [0.5 -0.5]'));\n elseif partId == '3'\n out = sprintf('%0.5f ', gradientDescent(X1, Y1, [0.5 -0.5]', 0.01, 10));\n elseif partId == '4'\n out = sprintf('%0.5f ', featureNormalize(X2(:,2:4)));\n elseif partId == '5'\n out = sprintf('%0.5f ', computeCostMulti(X2, Y2, [0.1 0.2 0.3 0.4]'));\n elseif partId == '6'\n out = sprintf('%0.5f ', gradientDescentMulti(X2, Y2, [-0.1 -0.2 -0.3 -0.4]', 0.01, 10));\n elseif partId == '7'\n out = sprintf('%0.5f ', normalEqn(X2, Y2));\n end \nend\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "submitWithConfiguration.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex1/ex1/lib/submitWithConfiguration.m", "size": 3734, "source_encoding": "utf_8", "md5": "84d9a81848f6d00a7aff4f79bdbb6049", "text": "function submitWithConfiguration(conf)\n addpath('./lib/jsonlab');\n\n parts = parts(conf);\n\n fprintf('== Submitting solutions | %s...\\n', conf.itemName);\n\n tokenFile = 'token.mat';\n if exist(tokenFile, 'file')\n load(tokenFile);\n [email token] = promptToken(email, token, tokenFile);\n else\n [email token] = promptToken('', '', tokenFile);\n end\n\n if isempty(token)\n fprintf('!! Submission Cancelled\\n');\n return\n end\n\n try\n response = submitParts(conf, email, token, parts);\n catch\n e = lasterror();\n fprintf( ...\n '!! Submission failed: unexpected error: %s\\n', ...\n e.message);\n fprintf('!! Please try again later.\\n');\n return\n end\n\n if isfield(response, 'errorMessage')\n fprintf('!! Submission failed: %s\\n', response.errorMessage);\n else\n showFeedback(parts, response);\n save(tokenFile, 'email', 'token');\n end\nend\n\nfunction [email token] = promptToken(email, existingToken, tokenFile)\n if (~isempty(email) && ~isempty(existingToken))\n prompt = sprintf( ...\n 'Use token from last successful submission (%s)? (Y/n): ', ...\n email);\n reenter = input(prompt, 's');\n\n if (isempty(reenter) || reenter(1) == 'Y' || reenter(1) == 'y')\n token = existingToken;\n return;\n else\n delete(tokenFile);\n end\n end\n email = input('Login (email address): ', 's');\n token = input('Token: ', 's');\nend\n\nfunction isValid = isValidPartOptionIndex(partOptions, i)\n isValid = (~isempty(i)) && (1 <= i) && (i <= numel(partOptions));\nend\n\nfunction response = submitParts(conf, email, token, parts)\n body = makePostBody(conf, email, token, parts);\n submissionUrl = submissionUrl();\n params = {'jsonBody', body};\n responseBody = urlread(submissionUrl, 'post', params);\n response = loadjson(responseBody);\nend\n\nfunction body = makePostBody(conf, email, token, parts)\n bodyStruct.assignmentSlug = conf.assignmentSlug;\n bodyStruct.submitterEmail = email;\n bodyStruct.secret = token;\n bodyStruct.parts = makePartsStruct(conf, parts);\n\n opt.Compact = 1;\n body = savejson('', bodyStruct, opt);\nend\n\nfunction partsStruct = makePartsStruct(conf, parts)\n for part = parts\n partId = part{:}.id;\n fieldName = makeValidFieldName(partId);\n outputStruct.output = conf.output(partId);\n partsStruct.(fieldName) = outputStruct;\n end\nend\n\nfunction [parts] = parts(conf)\n parts = {};\n for partArray = conf.partArrays\n part.id = partArray{:}{1};\n part.sourceFiles = partArray{:}{2};\n part.name = partArray{:}{3};\n parts{end + 1} = part;\n end\nend\n\nfunction showFeedback(parts, response)\n fprintf('== \\n');\n fprintf('== %43s | %9s | %-s\\n', 'Part Name', 'Score', 'Feedback');\n fprintf('== %43s | %9s | %-s\\n', '---------', '-----', '--------');\n for part = parts\n score = '';\n partFeedback = '';\n partFeedback = response.partFeedbacks.(makeValidFieldName(part{:}.id));\n partEvaluation = response.partEvaluations.(makeValidFieldName(part{:}.id));\n score = sprintf('%d / %3d', partEvaluation.score, partEvaluation.maxScore);\n fprintf('== %43s | %9s | %-s\\n', part{:}.name, score, partFeedback);\n end\n evaluation = response.evaluation;\n totalScore = sprintf('%d / %d', evaluation.score, evaluation.maxScore);\n fprintf('== --------------------------------\\n');\n fprintf('== %43s | %9s | %-s\\n', '', totalScore, '');\n fprintf('== \\n');\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%\n% Service configuration\n%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nfunction submissionUrl = submissionUrl()\n submissionUrl = 'https://www-origin.coursera.org/api/onDemandProgrammingImmediateFormSubmissions.v1';\nend\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "savejson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex1/ex1/lib/jsonlab/savejson.m", "size": 17462, "source_encoding": "utf_8", "md5": "861b534fc35ffe982b53ca3ca83143bf", "text": "function json=savejson(rootname,obj,varargin)\n%\n% json=savejson(rootname,obj,filename)\n% or\n% json=savejson(rootname,obj,opt)\n% json=savejson(rootname,obj,'param1',value1,'param2',value2,...)\n%\n% convert a MATLAB object (cell, struct or array) into a JSON (JavaScript\n% Object Notation) string\n%\n% author: Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2011/09/09\n%\n% $Id: savejson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% rootname: the name of the root-object, when set to '', the root name\n% is ignored, however, when opt.ForceRootName is set to 1 (see below),\n% the MATLAB variable name will be used as the root name.\n% obj: a MATLAB object (array, cell, cell array, struct, struct array).\n% filename: a string for the file name to save the output JSON data.\n% opt: a struct for additional options, ignore to use default values.\n% opt can have the following fields (first in [.|.] is the default)\n%\n% opt.FileName [''|string]: a file name to save the output JSON data\n% opt.FloatFormat ['%.10g'|string]: format to show each numeric element\n% of a 1D/2D array;\n% opt.ArrayIndent [1|0]: if 1, output explicit data array with\n% precedent indentation; if 0, no indentation\n% opt.ArrayToStruct[0|1]: when set to 0, savejson outputs 1D/2D\n% array in JSON array format; if sets to 1, an\n% array will be shown as a struct with fields\n% \"_ArrayType_\", \"_ArraySize_\" and \"_ArrayData_\"; for\n% sparse arrays, the non-zero elements will be\n% saved to _ArrayData_ field in triplet-format i.e.\n% (ix,iy,val) and \"_ArrayIsSparse_\" will be added\n% with a value of 1; for a complex array, the \n% _ArrayData_ array will include two columns \n% (4 for sparse) to record the real and imaginary \n% parts, and also \"_ArrayIsComplex_\":1 is added. \n% opt.ParseLogical [0|1]: if this is set to 1, logical array elem\n% will use true/false rather than 1/0.\n% opt.NoRowBracket [1|0]: if this is set to 1, arrays with a single\n% numerical element will be shown without a square\n% bracket, unless it is the root object; if 0, square\n% brackets are forced for any numerical arrays.\n% opt.ForceRootName [0|1]: when set to 1 and rootname is empty, savejson\n% will use the name of the passed obj variable as the \n% root object name; if obj is an expression and \n% does not have a name, 'root' will be used; if this \n% is set to 0 and rootname is empty, the root level \n% will be merged down to the lower level.\n% opt.Inf ['\"$1_Inf_\"'|string]: a customized regular expression pattern\n% to represent +/-Inf. The matched pattern is '([-+]*)Inf'\n% and $1 represents the sign. For those who want to use\n% 1e999 to represent Inf, they can set opt.Inf to '$11e999'\n% opt.NaN ['\"_NaN_\"'|string]: a customized regular expression pattern\n% to represent NaN\n% opt.JSONP [''|string]: to generate a JSONP output (JSON with padding),\n% for example, if opt.JSONP='foo', the JSON data is\n% wrapped inside a function call as 'foo(...);'\n% opt.UnpackHex [1|0]: conver the 0x[hex code] output by loadjson \n% back to the string form\n% opt.SaveBinary [0|1]: 1 - save the JSON file in binary mode; 0 - text mode.\n% opt.Compact [0|1]: 1- out compact JSON format (remove all newlines and tabs)\n%\n% opt can be replaced by a list of ('param',value) pairs. The param \n% string is equivallent to a field in opt and is case sensitive.\n% output:\n% json: a string in the JSON format (see http://json.org)\n%\n% examples:\n% jsonmesh=struct('MeshNode',[0 0 0;1 0 0;0 1 0;1 1 0;0 0 1;1 0 1;0 1 1;1 1 1],... \n% 'MeshTetra',[1 2 4 8;1 3 4 8;1 2 6 8;1 5 6 8;1 5 7 8;1 3 7 8],...\n% 'MeshTri',[1 2 4;1 2 6;1 3 4;1 3 7;1 5 6;1 5 7;...\n% 2 8 4;2 8 6;3 8 4;3 8 7;5 8 6;5 8 7],...\n% 'MeshCreator','FangQ','MeshTitle','T6 Cube',...\n% 'SpecialData',[nan, inf, -inf]);\n% savejson('jmesh',jsonmesh)\n% savejson('',jsonmesh,'ArrayIndent',0,'FloatFormat','\\t%.5g')\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details\n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nif(nargin==1)\n varname=inputname(1);\n obj=rootname;\n if(isempty(varname)) \n varname='root';\n end\n rootname=varname;\nelse\n varname=inputname(2);\nend\nif(length(varargin)==1 && ischar(varargin{1}))\n opt=struct('FileName',varargin{1});\nelse\n opt=varargin2struct(varargin{:});\nend\nopt.IsOctave=exist('OCTAVE_VERSION','builtin');\nrootisarray=0;\nrootlevel=1;\nforceroot=jsonopt('ForceRootName',0,opt);\nif((isnumeric(obj) || islogical(obj) || ischar(obj) || isstruct(obj) || iscell(obj)) && isempty(rootname) && forceroot==0)\n rootisarray=1;\n rootlevel=0;\nelse\n if(isempty(rootname))\n rootname=varname;\n end\nend\nif((isstruct(obj) || iscell(obj))&& isempty(rootname) && forceroot)\n rootname='root';\nend\n\nwhitespaces=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nif(jsonopt('Compact',0,opt)==1)\n whitespaces=struct('tab','','newline','','sep',',');\nend\nif(~isfield(opt,'whitespaces_'))\n opt.whitespaces_=whitespaces;\nend\n\nnl=whitespaces.newline;\n\njson=obj2json(rootname,obj,rootlevel,opt);\nif(rootisarray)\n json=sprintf('%s%s',json,nl);\nelse\n json=sprintf('{%s%s%s}\\n',nl,json,nl);\nend\n\njsonp=jsonopt('JSONP','',opt);\nif(~isempty(jsonp))\n json=sprintf('%s(%s);%s',jsonp,json,nl);\nend\n\n% save to a file if FileName is set, suggested by Patrick Rapin\nif(~isempty(jsonopt('FileName','',opt)))\n if(jsonopt('SaveBinary',0,opt)==1)\n\t fid = fopen(opt.FileName, 'wb');\n\t fwrite(fid,json);\n else\n\t fid = fopen(opt.FileName, 'wt');\n\t fwrite(fid,json,'char');\n end\n fclose(fid);\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=obj2json(name,item,level,varargin)\n\nif(iscell(item))\n txt=cell2json(name,item,level,varargin{:});\nelseif(isstruct(item))\n txt=struct2json(name,item,level,varargin{:});\nelseif(ischar(item))\n txt=str2json(name,item,level,varargin{:});\nelse\n txt=mat2json(name,item,level,varargin{:});\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=cell2json(name,item,level,varargin)\ntxt='';\nif(~iscell(item))\n error('input is not a cell');\nend\n\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item);\nws=jsonopt('whitespaces_',struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n')),varargin{:});\npadding0=repmat(ws.tab,1,level);\npadding2=repmat(ws.tab,1,level+1);\nnl=ws.newline;\nif(len>1)\n if(~isempty(name))\n txt=sprintf('%s\"%s\": [%s',padding0, checkname(name,varargin{:}),nl); name=''; \n else\n txt=sprintf('%s[%s',padding0,nl); \n end\nelseif(len==0)\n if(~isempty(name))\n txt=sprintf('%s\"%s\": []',padding0, checkname(name,varargin{:})); name=''; \n else\n txt=sprintf('%s[]',padding0); \n end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=sprintf('%s%s[%s',txt,padding2,nl); end\n for i=1:dim(1)\n txt=sprintf('%s%s',txt,obj2json(name,item{i,j},level+(dim(1)>1)+1,varargin{:}));\n if(i1) txt=sprintf('%s%s%s]',txt,nl,padding2); end\n if(j1) txt=sprintf('%s%s%s]',txt,nl,padding0); end\n\n%%-------------------------------------------------------------------------\nfunction txt=struct2json(name,item,level,varargin)\ntxt='';\nif(~isstruct(item))\n\terror('input is not a struct');\nend\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item);\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\npadding0=repmat(ws.tab,1,level);\npadding2=repmat(ws.tab,1,level+1);\npadding1=repmat(ws.tab,1,level+(dim(1)>1)+(len>1));\nnl=ws.newline;\n\nif(~isempty(name)) \n if(len>1) txt=sprintf('%s\"%s\": [%s',padding0,checkname(name,varargin{:}),nl); end\nelse\n if(len>1) txt=sprintf('%s[%s',padding0,nl); end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=sprintf('%s%s[%s',txt,padding2,nl); end\n for i=1:dim(1)\n names = fieldnames(item(i,j));\n if(~isempty(name) && len==1)\n txt=sprintf('%s%s\"%s\": {%s',txt,padding1, checkname(name,varargin{:}),nl); \n else\n txt=sprintf('%s%s{%s',txt,padding1,nl); \n end\n if(~isempty(names))\n for e=1:length(names)\n\t txt=sprintf('%s%s',txt,obj2json(names{e},getfield(item(i,j),...\n names{e}),level+(dim(1)>1)+1+(len>1),varargin{:}));\n if(e1) txt=sprintf('%s%s%s]',txt,nl,padding2); end\n if(j1) txt=sprintf('%s%s%s]',txt,nl,padding0); end\n\n%%-------------------------------------------------------------------------\nfunction txt=str2json(name,item,level,varargin)\ntxt='';\nif(~ischar(item))\n error('input is not a string');\nend\nitem=reshape(item, max(size(item),[1 0]));\nlen=size(item,1);\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\npadding1=repmat(ws.tab,1,level);\npadding0=repmat(ws.tab,1,level+1);\nnl=ws.newline;\nsep=ws.sep;\n\nif(~isempty(name)) \n if(len>1) txt=sprintf('%s\"%s\": [%s',padding1,checkname(name,varargin{:}),nl); end\nelse\n if(len>1) txt=sprintf('%s[%s',padding1,nl); end\nend\nisoct=jsonopt('IsOctave',0,varargin{:});\nfor e=1:len\n if(isoct)\n val=regexprep(item(e,:),'\\\\','\\\\');\n val=regexprep(val,'\"','\\\"');\n val=regexprep(val,'^\"','\\\"');\n else\n val=regexprep(item(e,:),'\\\\','\\\\\\\\');\n val=regexprep(val,'\"','\\\\\"');\n val=regexprep(val,'^\"','\\\\\"');\n end\n val=escapejsonstring(val);\n if(len==1)\n obj=['\"' checkname(name,varargin{:}) '\": ' '\"',val,'\"'];\n\tif(isempty(name)) obj=['\"',val,'\"']; end\n txt=sprintf('%s%s%s%s',txt,padding1,obj);\n else\n txt=sprintf('%s%s%s%s',txt,padding0,['\"',val,'\"']);\n end\n if(e==len) sep=''; end\n txt=sprintf('%s%s',txt,sep);\nend\nif(len>1) txt=sprintf('%s%s%s%s',txt,nl,padding1,']'); end\n\n%%-------------------------------------------------------------------------\nfunction txt=mat2json(name,item,level,varargin)\nif(~isnumeric(item) && ~islogical(item))\n error('input is not an array');\nend\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\npadding1=repmat(ws.tab,1,level);\npadding0=repmat(ws.tab,1,level+1);\nnl=ws.newline;\nsep=ws.sep;\n\nif(length(size(item))>2 || issparse(item) || ~isreal(item) || ...\n isempty(item) ||jsonopt('ArrayToStruct',0,varargin{:}))\n if(isempty(name))\n \ttxt=sprintf('%s{%s%s\"_ArrayType_\": \"%s\",%s%s\"_ArraySize_\": %s,%s',...\n padding1,nl,padding0,class(item),nl,padding0,regexprep(mat2str(size(item)),'\\s+',','),nl);\n else\n \ttxt=sprintf('%s\"%s\": {%s%s\"_ArrayType_\": \"%s\",%s%s\"_ArraySize_\": %s,%s',...\n padding1,checkname(name,varargin{:}),nl,padding0,class(item),nl,padding0,regexprep(mat2str(size(item)),'\\s+',','),nl);\n end\nelse\n if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1 && level>0)\n numtxt=regexprep(regexprep(matdata2json(item,level+1,varargin{:}),'^\\[',''),']','');\n else\n numtxt=matdata2json(item,level+1,varargin{:});\n end\n if(isempty(name))\n \ttxt=sprintf('%s%s',padding1,numtxt);\n else\n if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1)\n \ttxt=sprintf('%s\"%s\": %s',padding1,checkname(name,varargin{:}),numtxt);\n else\n \t txt=sprintf('%s\"%s\": %s',padding1,checkname(name,varargin{:}),numtxt);\n end\n end\n return;\nend\ndataformat='%s%s%s%s%s';\n\nif(issparse(item))\n [ix,iy]=find(item);\n data=full(item(find(item)));\n if(~isreal(item))\n data=[real(data(:)),imag(data(:))];\n if(size(item,1)==1)\n % Kludge to have data's 'transposedness' match item's.\n % (Necessary for complex row vector handling below.)\n data=data';\n end\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayIsComplex_\": ','1', sep);\n end\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayIsSparse_\": ','1', sep);\n if(size(item,1)==1)\n % Row vector, store only column indices.\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([iy(:),data'],level+2,varargin{:}), nl);\n elseif(size(item,2)==1)\n % Column vector, store only row indices.\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([ix,data],level+2,varargin{:}), nl);\n else\n % General case, store row and column indices.\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([ix,iy,data],level+2,varargin{:}), nl);\n end\nelse\n if(isreal(item))\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json(item(:)',level+2,varargin{:}), nl);\n else\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayIsComplex_\": ','1', sep);\n txt=sprintf(dataformat,txt,padding0,'\"_ArrayData_\": ',...\n matdata2json([real(item(:)) imag(item(:))],level+2,varargin{:}), nl);\n end\nend\ntxt=sprintf('%s%s%s',txt,padding1,'}');\n\n%%-------------------------------------------------------------------------\nfunction txt=matdata2json(mat,level,varargin)\n\nws=struct('tab',sprintf('\\t'),'newline',sprintf('\\n'),'sep',sprintf(',\\n'));\nws=jsonopt('whitespaces_',ws,varargin{:});\ntab=ws.tab;\nnl=ws.newline;\n\nif(size(mat,1)==1)\n pre='';\n post='';\n level=level-1;\nelse\n pre=sprintf('[%s',nl);\n post=sprintf('%s%s]',nl,repmat(tab,1,level-1));\nend\n\nif(isempty(mat))\n txt='null';\n return;\nend\nfloatformat=jsonopt('FloatFormat','%.10g',varargin{:});\n%if(numel(mat)>1)\n formatstr=['[' repmat([floatformat ','],1,size(mat,2)-1) [floatformat sprintf('],%s',nl)]];\n%else\n% formatstr=[repmat([floatformat ','],1,size(mat,2)-1) [floatformat sprintf(',\\n')]];\n%end\n\nif(nargin>=2 && size(mat,1)>1 && jsonopt('ArrayIndent',1,varargin{:})==1)\n formatstr=[repmat(tab,1,level) formatstr];\nend\n\ntxt=sprintf(formatstr,mat');\ntxt(end-length(nl):end)=[];\nif(islogical(mat) && jsonopt('ParseLogical',0,varargin{:})==1)\n txt=regexprep(txt,'1','true');\n txt=regexprep(txt,'0','false');\nend\n%txt=regexprep(mat2str(mat),'\\s+',',');\n%txt=regexprep(txt,';',sprintf('],\\n['));\n% if(nargin>=2 && size(mat,1)>1)\n% txt=regexprep(txt,'\\[',[repmat(sprintf('\\t'),1,level) '[']);\n% end\ntxt=[pre txt post];\nif(any(isinf(mat(:))))\n txt=regexprep(txt,'([-+]*)Inf',jsonopt('Inf','\"$1_Inf_\"',varargin{:}));\nend\nif(any(isnan(mat(:))))\n txt=regexprep(txt,'NaN',jsonopt('NaN','\"_NaN_\"',varargin{:}));\nend\n\n%%-------------------------------------------------------------------------\nfunction newname=checkname(name,varargin)\nisunpack=jsonopt('UnpackHex',1,varargin{:});\nnewname=name;\nif(isempty(regexp(name,'0x([0-9a-fA-F]+)_','once')))\n return\nend\nif(isunpack)\n isoct=jsonopt('IsOctave',0,varargin{:});\n if(~isoct)\n newname=regexprep(name,'(^x|_){1}0x([0-9a-fA-F]+)_','${native2unicode(hex2dec($2))}');\n else\n pos=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','start');\n pend=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','end');\n if(isempty(pos)) return; end\n str0=name;\n pos0=[0 pend(:)' length(name)];\n newname='';\n for i=1:length(pos)\n newname=[newname str0(pos0(i)+1:pos(i)-1) char(hex2dec(str0(pos(i)+3:pend(i)-1)))];\n end\n if(pos(end)~=length(name))\n newname=[newname str0(pos0(end-1)+1:pos0(end))];\n end\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction newstr=escapejsonstring(str)\nnewstr=str;\nisoct=exist('OCTAVE_VERSION','builtin');\nif(isoct)\n vv=sscanf(OCTAVE_VERSION,'%f');\n if(vv(1)>=3.8) isoct=0; end\nend\nif(isoct)\n escapechars={'\\a','\\f','\\n','\\r','\\t','\\v'};\n for i=1:length(escapechars);\n newstr=regexprep(newstr,escapechars{i},escapechars{i});\n end\nelse\n escapechars={'\\a','\\b','\\f','\\n','\\r','\\t','\\v'};\n for i=1:length(escapechars);\n newstr=regexprep(newstr,escapechars{i},regexprep(escapechars{i},'\\\\','\\\\\\\\'));\n end\nend\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "loadjson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex1/ex1/lib/jsonlab/loadjson.m", "size": 18732, "source_encoding": "ibm852", "md5": "ab98cf173af2d50bbe8da4d6db252a20", "text": "function data = loadjson(fname,varargin)\n%\n% data=loadjson(fname,opt)\n% or\n% data=loadjson(fname,'param1',value1,'param2',value2,...)\n%\n% parse a JSON (JavaScript Object Notation) file or string\n%\n% authors:Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2011/09/09, including previous works from \n%\n% Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713\n% created on 2009/11/02\n% François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393\n% created on 2009/03/22\n% Joel Feenstra:\n% http://www.mathworks.com/matlabcentral/fileexchange/20565\n% created on 2008/07/03\n%\n% $Id: loadjson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% fname: input file name, if fname contains \"{}\" or \"[]\", fname\n% will be interpreted as a JSON string\n% opt: a struct to store parsing options, opt can be replaced by \n% a list of ('param',value) pairs - the param string is equivallent\n% to a field in opt. opt can have the following \n% fields (first in [.|.] is the default)\n%\n% opt.SimplifyCell [0|1]: if set to 1, loadjson will call cell2mat\n% for each element of the JSON data, and group \n% arrays based on the cell2mat rules.\n% opt.FastArrayParser [1|0 or integer]: if set to 1, use a\n% speed-optimized array parser when loading an \n% array object. The fast array parser may \n% collapse block arrays into a single large\n% array similar to rules defined in cell2mat; 0 to \n% use a legacy parser; if set to a larger-than-1\n% value, this option will specify the minimum\n% dimension to enable the fast array parser. For\n% example, if the input is a 3D array, setting\n% FastArrayParser to 1 will return a 3D array;\n% setting to 2 will return a cell array of 2D\n% arrays; setting to 3 will return to a 2D cell\n% array of 1D vectors; setting to 4 will return a\n% 3D cell array.\n% opt.ShowProgress [0|1]: if set to 1, loadjson displays a progress bar.\n%\n% output:\n% dat: a cell array, where {...} blocks are converted into cell arrays,\n% and [...] are converted to arrays\n%\n% examples:\n% dat=loadjson('{\"obj\":{\"string\":\"value\",\"array\":[1,2,3]}}')\n% dat=loadjson(['examples' filesep 'example1.json'])\n% dat=loadjson(['examples' filesep 'example1.json'],'SimplifyCell',1)\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details \n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nglobal pos inStr len esc index_esc len_esc isoct arraytoken\n\nif(regexp(fname,'[\\{\\}\\]\\[]','once'))\n string=fname;\nelseif(exist(fname,'file'))\n fid = fopen(fname,'rb');\n string = fread(fid,inf,'uint8=>char')';\n fclose(fid);\nelse\n error('input file does not exist');\nend\n\npos = 1; len = length(string); inStr = string;\nisoct=exist('OCTAVE_VERSION','builtin');\narraytoken=find(inStr=='[' | inStr==']' | inStr=='\"');\njstr=regexprep(inStr,'\\\\\\\\',' ');\nescquote=regexp(jstr,'\\\\\"');\narraytoken=sort([arraytoken escquote]);\n\n% String delimiters and escape chars identified to improve speed:\nesc = find(inStr=='\"' | inStr=='\\' ); % comparable to: regexp(inStr, '[\"\\\\]');\nindex_esc = 1; len_esc = length(esc);\n\nopt=varargin2struct(varargin{:});\n\nif(jsonopt('ShowProgress',0,opt)==1)\n opt.progressbar_=waitbar(0,'loading ...');\nend\njsoncount=1;\nwhile pos <= len\n switch(next_char)\n case '{'\n data{jsoncount} = parse_object(opt);\n case '['\n data{jsoncount} = parse_array(opt);\n otherwise\n error_pos('Outer level structure must be an object or an array');\n end\n jsoncount=jsoncount+1;\nend % while\n\njsoncount=length(data);\nif(jsoncount==1 && iscell(data))\n data=data{1};\nend\n\nif(~isempty(data))\n if(isstruct(data)) % data can be a struct array\n data=jstruct2array(data);\n elseif(iscell(data))\n data=jcell2array(data);\n end\nend\nif(isfield(opt,'progressbar_'))\n close(opt.progressbar_);\nend\n\n%%\nfunction newdata=jcell2array(data)\nlen=length(data);\nnewdata=data;\nfor i=1:len\n if(isstruct(data{i}))\n newdata{i}=jstruct2array(data{i});\n elseif(iscell(data{i}))\n newdata{i}=jcell2array(data{i});\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction newdata=jstruct2array(data)\nfn=fieldnames(data);\nnewdata=data;\nlen=length(data);\nfor i=1:length(fn) % depth-first\n for j=1:len\n if(isstruct(getfield(data(j),fn{i})))\n newdata(j)=setfield(newdata(j),fn{i},jstruct2array(getfield(data(j),fn{i})));\n end\n end\nend\nif(~isempty(strmatch('x0x5F_ArrayType_',fn)) && ~isempty(strmatch('x0x5F_ArrayData_',fn)))\n newdata=cell(len,1);\n for j=1:len\n ndata=cast(data(j).x0x5F_ArrayData_,data(j).x0x5F_ArrayType_);\n iscpx=0;\n if(~isempty(strmatch('x0x5F_ArrayIsComplex_',fn)))\n if(data(j).x0x5F_ArrayIsComplex_)\n iscpx=1;\n end\n end\n if(~isempty(strmatch('x0x5F_ArrayIsSparse_',fn)))\n if(data(j).x0x5F_ArrayIsSparse_)\n if(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n dim=data(j).x0x5F_ArraySize_;\n if(iscpx && size(ndata,2)==4-any(dim==1))\n ndata(:,end-1)=complex(ndata(:,end-1),ndata(:,end));\n end\n if isempty(ndata)\n % All-zeros sparse\n ndata=sparse(dim(1),prod(dim(2:end)));\n elseif dim(1)==1\n % Sparse row vector\n ndata=sparse(1,ndata(:,1),ndata(:,2),dim(1),prod(dim(2:end)));\n elseif dim(2)==1\n % Sparse column vector\n ndata=sparse(ndata(:,1),1,ndata(:,2),dim(1),prod(dim(2:end)));\n else\n % Generic sparse array.\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3),dim(1),prod(dim(2:end)));\n end\n else\n if(iscpx && size(ndata,2)==4)\n ndata(:,3)=complex(ndata(:,3),ndata(:,4));\n end\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3));\n end\n end\n elseif(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n if(iscpx && size(ndata,2)==2)\n ndata=complex(ndata(:,1),ndata(:,2));\n end\n ndata=reshape(ndata(:),data(j).x0x5F_ArraySize_);\n end\n newdata{j}=ndata;\n end\n if(len==1)\n newdata=newdata{1};\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction object = parse_object(varargin)\n parse_char('{');\n object = [];\n if next_char ~= '}'\n while 1\n str = parseStr(varargin{:});\n if isempty(str)\n error_pos('Name of value at position %d cannot be empty');\n end\n parse_char(':');\n val = parse_value(varargin{:});\n eval( sprintf( 'object.%s = val;', valid_field(str) ) );\n if next_char == '}'\n break;\n end\n parse_char(',');\n end\n end\n parse_char('}');\n\n%%-------------------------------------------------------------------------\n\nfunction object = parse_array(varargin) % JSON array is written in row-major order\nglobal pos inStr isoct\n parse_char('[');\n object = cell(0, 1);\n dim2=[];\n arraydepth=jsonopt('JSONLAB_ArrayDepth_',1,varargin{:});\n pbar=jsonopt('progressbar_',-1,varargin{:});\n\n if next_char ~= ']'\n\tif(jsonopt('FastArrayParser',1,varargin{:})>=1 && arraydepth>=jsonopt('FastArrayParser',1,varargin{:}))\n [endpos, e1l, e1r, maxlevel]=matching_bracket(inStr,pos);\n arraystr=['[' inStr(pos:endpos)];\n arraystr=regexprep(arraystr,'\"_NaN_\"','NaN');\n arraystr=regexprep(arraystr,'\"([-+]*)_Inf_\"','$1Inf');\n arraystr(arraystr==sprintf('\\n'))=[];\n arraystr(arraystr==sprintf('\\r'))=[];\n %arraystr=regexprep(arraystr,'\\s*,',','); % this is slow,sometimes needed\n if(~isempty(e1l) && ~isempty(e1r)) % the array is in 2D or higher D\n \tastr=inStr((e1l+1):(e1r-1));\n \tastr=regexprep(astr,'\"_NaN_\"','NaN');\n \tastr=regexprep(astr,'\"([-+]*)_Inf_\"','$1Inf');\n \tastr(astr==sprintf('\\n'))=[];\n \tastr(astr==sprintf('\\r'))=[];\n \tastr(astr==' ')='';\n \tif(isempty(find(astr=='[', 1))) % array is 2D\n dim2=length(sscanf(astr,'%f,',[1 inf]));\n \tend\n else % array is 1D\n \tastr=arraystr(2:end-1);\n \tastr(astr==' ')='';\n \t[obj, count, errmsg, nextidx]=sscanf(astr,'%f,',[1,inf]);\n \tif(nextidx>=length(astr)-1)\n object=obj;\n pos=endpos;\n parse_char(']');\n return;\n \tend\n end\n if(~isempty(dim2))\n \tastr=arraystr;\n \tastr(astr=='[')='';\n \tastr(astr==']')='';\n \tastr(astr==' ')='';\n \t[obj, count, errmsg, nextidx]=sscanf(astr,'%f,',inf);\n \tif(nextidx>=length(astr)-1)\n object=reshape(obj,dim2,numel(obj)/dim2)';\n pos=endpos;\n parse_char(']');\n if(pbar>0)\n waitbar(pos/length(inStr),pbar,'loading ...');\n end\n return;\n \tend\n end\n arraystr=regexprep(arraystr,'\\]\\s*,','];');\n\telse\n arraystr='[';\n\tend\n try\n if(isoct && regexp(arraystr,'\"','once'))\n error('Octave eval can produce empty cells for JSON-like input');\n end\n object=eval(arraystr);\n pos=endpos;\n catch\n while 1\n newopt=varargin2struct(varargin{:},'JSONLAB_ArrayDepth_',arraydepth+1);\n val = parse_value(newopt);\n object{end+1} = val;\n if next_char == ']'\n break;\n end\n parse_char(',');\n end\n end\n end\n if(jsonopt('SimplifyCell',0,varargin{:})==1)\n try\n oldobj=object;\n object=cell2mat(object')';\n if(iscell(oldobj) && isstruct(object) && numel(object)>1 && jsonopt('SimplifyCellArray',1,varargin{:})==0)\n object=oldobj;\n elseif(size(object,1)>1 && ndims(object)==2)\n object=object';\n end\n catch\n end\n end\n parse_char(']');\n \n if(pbar>0)\n waitbar(pos/length(inStr),pbar,'loading ...');\n end\n%%-------------------------------------------------------------------------\n\nfunction parse_char(c)\n global pos inStr len\n skip_whitespace;\n if pos > len || inStr(pos) ~= c\n error_pos(sprintf('Expected %c at position %%d', c));\n else\n pos = pos + 1;\n skip_whitespace;\n end\n\n%%-------------------------------------------------------------------------\n\nfunction c = next_char\n global pos inStr len\n skip_whitespace;\n if pos > len\n c = [];\n else\n c = inStr(pos);\n end\n\n%%-------------------------------------------------------------------------\n\nfunction skip_whitespace\n global pos inStr len\n while pos <= len && isspace(inStr(pos))\n pos = pos + 1;\n end\n\n%%-------------------------------------------------------------------------\nfunction str = parseStr(varargin)\n global pos inStr len esc index_esc len_esc\n % len, ns = length(inStr), keyboard\n if inStr(pos) ~= '\"'\n error_pos('String starting with \" expected at position %d');\n else\n pos = pos + 1;\n end\n str = '';\n while pos <= len\n while index_esc <= len_esc && esc(index_esc) < pos\n index_esc = index_esc + 1;\n end\n if index_esc > len_esc\n str = [str inStr(pos:len)];\n pos = len + 1;\n break;\n else\n str = [str inStr(pos:esc(index_esc)-1)];\n pos = esc(index_esc);\n end\n nstr = length(str); switch inStr(pos)\n case '\"'\n pos = pos + 1;\n if(~isempty(str))\n if(strcmp(str,'_Inf_'))\n str=Inf;\n elseif(strcmp(str,'-_Inf_'))\n str=-Inf;\n elseif(strcmp(str,'_NaN_'))\n str=NaN;\n end\n end\n return;\n case '\\'\n if pos+1 > len\n error_pos('End of file reached right after escape character');\n end\n pos = pos + 1;\n switch inStr(pos)\n case {'\"' '\\' '/'}\n str(nstr+1) = inStr(pos);\n pos = pos + 1;\n case {'b' 'f' 'n' 'r' 't'}\n str(nstr+1) = sprintf(['\\' inStr(pos)]);\n pos = pos + 1;\n case 'u'\n if pos+4 > len\n error_pos('End of file reached in escaped unicode character');\n end\n str(nstr+(1:6)) = inStr(pos-1:pos+4);\n pos = pos + 5;\n end\n otherwise % should never happen\n str(nstr+1) = inStr(pos), keyboard\n pos = pos + 1;\n end\n end\n error_pos('End of file while expecting end of inStr');\n\n%%-------------------------------------------------------------------------\n\nfunction num = parse_number(varargin)\n global pos inStr len isoct\n currstr=inStr(pos:end);\n numstr=0;\n if(isoct~=0)\n numstr=regexp(currstr,'^\\s*-?(?:0|[1-9]\\d*)(?:\\.\\d+)?(?:[eE][+\\-]?\\d+)?','end');\n [num, one] = sscanf(currstr, '%f', 1);\n delta=numstr+1;\n else\n [num, one, err, delta] = sscanf(currstr, '%f', 1);\n if ~isempty(err)\n error_pos('Error reading number at position %d');\n end\n end\n pos = pos + delta-1;\n\n%%-------------------------------------------------------------------------\n\nfunction val = parse_value(varargin)\n global pos inStr len\n true = 1; false = 0;\n \n pbar=jsonopt('progressbar_',-1,varargin{:});\n if(pbar>0)\n waitbar(pos/len,pbar,'loading ...');\n end\n \n switch(inStr(pos))\n case '\"'\n val = parseStr(varargin{:});\n return;\n case '['\n val = parse_array(varargin{:});\n return;\n case '{'\n val = parse_object(varargin{:});\n if isstruct(val)\n if(~isempty(strmatch('x0x5F_ArrayType_',fieldnames(val), 'exact')))\n val=jstruct2array(val);\n end\n elseif isempty(val)\n val = struct;\n end\n return;\n case {'-','0','1','2','3','4','5','6','7','8','9'}\n val = parse_number(varargin{:});\n return;\n case 't'\n if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'true')\n val = true;\n pos = pos + 4;\n return;\n end\n case 'f'\n if pos+4 <= len && strcmpi(inStr(pos:pos+4), 'false')\n val = false;\n pos = pos + 5;\n return;\n end\n case 'n'\n if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'null')\n val = [];\n pos = pos + 4;\n return;\n end\n end\n error_pos('Value expected at position %d');\n%%-------------------------------------------------------------------------\n\nfunction error_pos(msg)\n global pos inStr len\n poShow = max(min([pos-15 pos-1 pos pos+20],len),1);\n if poShow(3) == poShow(2)\n poShow(3:4) = poShow(2)+[0 -1]; % display nothing after\n end\n msg = [sprintf(msg, pos) ': ' ...\n inStr(poShow(1):poShow(2)) '' inStr(poShow(3):poShow(4)) ];\n error( ['JSONparser:invalidFormat: ' msg] );\n\n%%-------------------------------------------------------------------------\n\nfunction str = valid_field(str)\nglobal isoct\n% From MATLAB doc: field names must begin with a letter, which may be\n% followed by any combination of letters, digits, and underscores.\n% Invalid characters will be converted to underscores, and the prefix\n% \"x0x[Hex code]_\" will be added if the first character is not a letter.\n pos=regexp(str,'^[^A-Za-z]','once');\n if(~isempty(pos))\n if(~isoct)\n str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once');\n else\n str=sprintf('x0x%X_%s',char(str(1)),str(2:end));\n end\n end\n if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' ))) return; end\n if(~isoct)\n str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_');\n else\n pos=regexp(str,'[^0-9A-Za-z_]');\n if(isempty(pos)) return; end\n str0=str;\n pos0=[0 pos(:)' length(str)];\n str='';\n for i=1:length(pos)\n str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))];\n end\n if(pos(end)~=length(str))\n str=[str str0(pos0(end-1)+1:pos0(end))];\n end\n end\n %str(~isletter(str) & ~('0' <= str & str <= '9')) = '_';\n\n%%-------------------------------------------------------------------------\nfunction endpos = matching_quote(str,pos)\nlen=length(str);\nwhile(pos1 && str(pos-1)=='\\'))\n endpos=pos;\n return;\n end \n end\n pos=pos+1;\nend\nerror('unmatched quotation mark');\n%%-------------------------------------------------------------------------\nfunction [endpos, e1l, e1r, maxlevel] = matching_bracket(str,pos)\nglobal arraytoken\nlevel=1;\nmaxlevel=level;\nendpos=0;\nbpos=arraytoken(arraytoken>=pos);\ntokens=str(bpos);\nlen=length(tokens);\npos=1;\ne1l=[];\ne1r=[];\nwhile(pos<=len)\n c=tokens(pos);\n if(c==']')\n level=level-1;\n if(isempty(e1r)) e1r=bpos(pos); end\n if(level==0)\n endpos=bpos(pos);\n return\n end\n end\n if(c=='[')\n if(isempty(e1l)) e1l=bpos(pos); end\n level=level+1;\n maxlevel=max(maxlevel,level);\n end\n if(c=='\"')\n pos=matching_quote(tokens,pos+1);\n end\n pos=pos+1;\nend\nif(endpos==0) \n error('unmatched \"]\"');\nend\n\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "loadubjson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex1/ex1/lib/jsonlab/loadubjson.m", "size": 15574, "source_encoding": "utf_8", "md5": "5974e78e71b81b1e0f76123784b951a4", "text": "function data = loadubjson(fname,varargin)\n%\n% data=loadubjson(fname,opt)\n% or\n% data=loadubjson(fname,'param1',value1,'param2',value2,...)\n%\n% parse a JSON (JavaScript Object Notation) file or string\n%\n% authors:Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2013/08/01\n%\n% $Id: loadubjson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% fname: input file name, if fname contains \"{}\" or \"[]\", fname\n% will be interpreted as a UBJSON string\n% opt: a struct to store parsing options, opt can be replaced by \n% a list of ('param',value) pairs - the param string is equivallent\n% to a field in opt. opt can have the following \n% fields (first in [.|.] is the default)\n%\n% opt.SimplifyCell [0|1]: if set to 1, loadubjson will call cell2mat\n% for each element of the JSON data, and group \n% arrays based on the cell2mat rules.\n% opt.IntEndian [B|L]: specify the endianness of the integer fields\n% in the UBJSON input data. B - Big-Endian format for \n% integers (as required in the UBJSON specification); \n% L - input integer fields are in Little-Endian order.\n%\n% output:\n% dat: a cell array, where {...} blocks are converted into cell arrays,\n% and [...] are converted to arrays\n%\n% examples:\n% obj=struct('string','value','array',[1 2 3]);\n% ubjdata=saveubjson('obj',obj);\n% dat=loadubjson(ubjdata)\n% dat=loadubjson(['examples' filesep 'example1.ubj'])\n% dat=loadubjson(['examples' filesep 'example1.ubj'],'SimplifyCell',1)\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details \n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nglobal pos inStr len esc index_esc len_esc isoct arraytoken fileendian systemendian\n\nif(regexp(fname,'[\\{\\}\\]\\[]','once'))\n string=fname;\nelseif(exist(fname,'file'))\n fid = fopen(fname,'rb');\n string = fread(fid,inf,'uint8=>char')';\n fclose(fid);\nelse\n error('input file does not exist');\nend\n\npos = 1; len = length(string); inStr = string;\nisoct=exist('OCTAVE_VERSION','builtin');\narraytoken=find(inStr=='[' | inStr==']' | inStr=='\"');\njstr=regexprep(inStr,'\\\\\\\\',' ');\nescquote=regexp(jstr,'\\\\\"');\narraytoken=sort([arraytoken escquote]);\n\n% String delimiters and escape chars identified to improve speed:\nesc = find(inStr=='\"' | inStr=='\\' ); % comparable to: regexp(inStr, '[\"\\\\]');\nindex_esc = 1; len_esc = length(esc);\n\nopt=varargin2struct(varargin{:});\nfileendian=upper(jsonopt('IntEndian','B',opt));\n[os,maxelem,systemendian]=computer;\n\njsoncount=1;\nwhile pos <= len\n switch(next_char)\n case '{'\n data{jsoncount} = parse_object(opt);\n case '['\n data{jsoncount} = parse_array(opt);\n otherwise\n error_pos('Outer level structure must be an object or an array');\n end\n jsoncount=jsoncount+1;\nend % while\n\njsoncount=length(data);\nif(jsoncount==1 && iscell(data))\n data=data{1};\nend\n\nif(~isempty(data))\n if(isstruct(data)) % data can be a struct array\n data=jstruct2array(data);\n elseif(iscell(data))\n data=jcell2array(data);\n end\nend\n\n\n%%\nfunction newdata=parse_collection(id,data,obj)\n\nif(jsoncount>0 && exist('data','var')) \n if(~iscell(data))\n newdata=cell(1);\n newdata{1}=data;\n data=newdata;\n end\nend\n\n%%\nfunction newdata=jcell2array(data)\nlen=length(data);\nnewdata=data;\nfor i=1:len\n if(isstruct(data{i}))\n newdata{i}=jstruct2array(data{i});\n elseif(iscell(data{i}))\n newdata{i}=jcell2array(data{i});\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction newdata=jstruct2array(data)\nfn=fieldnames(data);\nnewdata=data;\nlen=length(data);\nfor i=1:length(fn) % depth-first\n for j=1:len\n if(isstruct(getfield(data(j),fn{i})))\n newdata(j)=setfield(newdata(j),fn{i},jstruct2array(getfield(data(j),fn{i})));\n end\n end\nend\nif(~isempty(strmatch('x0x5F_ArrayType_',fn)) && ~isempty(strmatch('x0x5F_ArrayData_',fn)))\n newdata=cell(len,1);\n for j=1:len\n ndata=cast(data(j).x0x5F_ArrayData_,data(j).x0x5F_ArrayType_);\n iscpx=0;\n if(~isempty(strmatch('x0x5F_ArrayIsComplex_',fn)))\n if(data(j).x0x5F_ArrayIsComplex_)\n iscpx=1;\n end\n end\n if(~isempty(strmatch('x0x5F_ArrayIsSparse_',fn)))\n if(data(j).x0x5F_ArrayIsSparse_)\n if(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n dim=double(data(j).x0x5F_ArraySize_);\n if(iscpx && size(ndata,2)==4-any(dim==1))\n ndata(:,end-1)=complex(ndata(:,end-1),ndata(:,end));\n end\n if isempty(ndata)\n % All-zeros sparse\n ndata=sparse(dim(1),prod(dim(2:end)));\n elseif dim(1)==1\n % Sparse row vector\n ndata=sparse(1,ndata(:,1),ndata(:,2),dim(1),prod(dim(2:end)));\n elseif dim(2)==1\n % Sparse column vector\n ndata=sparse(ndata(:,1),1,ndata(:,2),dim(1),prod(dim(2:end)));\n else\n % Generic sparse array.\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3),dim(1),prod(dim(2:end)));\n end\n else\n if(iscpx && size(ndata,2)==4)\n ndata(:,3)=complex(ndata(:,3),ndata(:,4));\n end\n ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3));\n end\n end\n elseif(~isempty(strmatch('x0x5F_ArraySize_',fn)))\n if(iscpx && size(ndata,2)==2)\n ndata=complex(ndata(:,1),ndata(:,2));\n end\n ndata=reshape(ndata(:),data(j).x0x5F_ArraySize_);\n end\n newdata{j}=ndata;\n end\n if(len==1)\n newdata=newdata{1};\n end\nend\n\n%%-------------------------------------------------------------------------\nfunction object = parse_object(varargin)\n parse_char('{');\n object = [];\n type='';\n count=-1;\n if(next_char == '$')\n type=inStr(pos+1); % TODO\n pos=pos+2;\n end\n if(next_char == '#')\n pos=pos+1;\n count=double(parse_number());\n end\n if next_char ~= '}'\n num=0;\n while 1\n str = parseStr(varargin{:});\n if isempty(str)\n error_pos('Name of value at position %d cannot be empty');\n end\n %parse_char(':');\n val = parse_value(varargin{:});\n num=num+1;\n eval( sprintf( 'object.%s = val;', valid_field(str) ) );\n if next_char == '}' || (count>=0 && num>=count)\n break;\n end\n %parse_char(',');\n end\n end\n if(count==-1)\n parse_char('}');\n end\n\n%%-------------------------------------------------------------------------\nfunction [cid,len]=elem_info(type)\nid=strfind('iUIlLdD',type);\ndataclass={'int8','uint8','int16','int32','int64','single','double'};\nbytelen=[1,1,2,4,8,4,8];\nif(id>0)\n cid=dataclass{id};\n len=bytelen(id);\nelse\n error_pos('unsupported type at position %d');\nend\n%%-------------------------------------------------------------------------\n\n\nfunction [data adv]=parse_block(type,count,varargin)\nglobal pos inStr isoct fileendian systemendian\n[cid,len]=elem_info(type);\ndatastr=inStr(pos:pos+len*count-1);\nif(isoct)\n newdata=int8(datastr);\nelse\n newdata=uint8(datastr);\nend\nid=strfind('iUIlLdD',type);\nif(id<=5 && fileendian~=systemendian)\n newdata=swapbytes(typecast(newdata,cid));\nend\ndata=typecast(newdata,cid);\nadv=double(len*count);\n\n%%-------------------------------------------------------------------------\n\n\nfunction object = parse_array(varargin) % JSON array is written in row-major order\nglobal pos inStr isoct\n parse_char('[');\n object = cell(0, 1);\n dim=[];\n type='';\n count=-1;\n if(next_char == '$')\n type=inStr(pos+1);\n pos=pos+2;\n end\n if(next_char == '#')\n pos=pos+1;\n if(next_char=='[')\n dim=parse_array(varargin{:});\n count=prod(double(dim));\n else\n count=double(parse_number());\n end\n end\n if(~isempty(type))\n if(count>=0)\n [object adv]=parse_block(type,count,varargin{:});\n if(~isempty(dim))\n object=reshape(object,dim);\n end\n pos=pos+adv;\n return;\n else\n endpos=matching_bracket(inStr,pos);\n [cid,len]=elem_info(type);\n count=(endpos-pos)/len;\n [object adv]=parse_block(type,count,varargin{:});\n pos=pos+adv;\n parse_char(']');\n return;\n end\n end\n if next_char ~= ']'\n while 1\n val = parse_value(varargin{:});\n object{end+1} = val;\n if next_char == ']'\n break;\n end\n %parse_char(',');\n end\n end\n if(jsonopt('SimplifyCell',0,varargin{:})==1)\n try\n oldobj=object;\n object=cell2mat(object')';\n if(iscell(oldobj) && isstruct(object) && numel(object)>1 && jsonopt('SimplifyCellArray',1,varargin{:})==0)\n object=oldobj;\n elseif(size(object,1)>1 && ndims(object)==2)\n object=object';\n end\n catch\n end\n end\n if(count==-1)\n parse_char(']');\n end\n\n%%-------------------------------------------------------------------------\n\nfunction parse_char(c)\n global pos inStr len\n skip_whitespace;\n if pos > len || inStr(pos) ~= c\n error_pos(sprintf('Expected %c at position %%d', c));\n else\n pos = pos + 1;\n skip_whitespace;\n end\n\n%%-------------------------------------------------------------------------\n\nfunction c = next_char\n global pos inStr len\n skip_whitespace;\n if pos > len\n c = [];\n else\n c = inStr(pos);\n end\n\n%%-------------------------------------------------------------------------\n\nfunction skip_whitespace\n global pos inStr len\n while pos <= len && isspace(inStr(pos))\n pos = pos + 1;\n end\n\n%%-------------------------------------------------------------------------\nfunction str = parseStr(varargin)\n global pos inStr esc index_esc len_esc\n % len, ns = length(inStr), keyboard\n type=inStr(pos);\n if type ~= 'S' && type ~= 'C' && type ~= 'H'\n error_pos('String starting with S expected at position %d');\n else\n pos = pos + 1;\n end\n if(type == 'C')\n str=inStr(pos);\n pos=pos+1;\n return;\n end\n bytelen=double(parse_number());\n if(length(inStr)>=pos+bytelen-1)\n str=inStr(pos:pos+bytelen-1);\n pos=pos+bytelen;\n else\n error_pos('End of file while expecting end of inStr');\n end\n\n%%-------------------------------------------------------------------------\n\nfunction num = parse_number(varargin)\n global pos inStr len isoct fileendian systemendian\n id=strfind('iUIlLdD',inStr(pos));\n if(isempty(id))\n error_pos('expecting a number at position %d');\n end\n type={'int8','uint8','int16','int32','int64','single','double'};\n bytelen=[1,1,2,4,8,4,8];\n datastr=inStr(pos+1:pos+bytelen(id));\n if(isoct)\n newdata=int8(datastr);\n else\n newdata=uint8(datastr);\n end\n if(id<=5 && fileendian~=systemendian)\n newdata=swapbytes(typecast(newdata,type{id}));\n end\n num=typecast(newdata,type{id});\n pos = pos + bytelen(id)+1;\n\n%%-------------------------------------------------------------------------\n\nfunction val = parse_value(varargin)\n global pos inStr len\n true = 1; false = 0;\n\n switch(inStr(pos))\n case {'S','C','H'}\n val = parseStr(varargin{:});\n return;\n case '['\n val = parse_array(varargin{:});\n return;\n case '{'\n val = parse_object(varargin{:});\n if isstruct(val)\n if(~isempty(strmatch('x0x5F_ArrayType_',fieldnames(val), 'exact')))\n val=jstruct2array(val);\n end\n elseif isempty(val)\n val = struct;\n end\n return;\n case {'i','U','I','l','L','d','D'}\n val = parse_number(varargin{:});\n return;\n case 'T'\n val = true;\n pos = pos + 1;\n return;\n case 'F'\n val = false;\n pos = pos + 1;\n return;\n case {'Z','N'}\n val = [];\n pos = pos + 1;\n return;\n end\n error_pos('Value expected at position %d');\n%%-------------------------------------------------------------------------\n\nfunction error_pos(msg)\n global pos inStr len\n poShow = max(min([pos-15 pos-1 pos pos+20],len),1);\n if poShow(3) == poShow(2)\n poShow(3:4) = poShow(2)+[0 -1]; % display nothing after\n end\n msg = [sprintf(msg, pos) ': ' ...\n inStr(poShow(1):poShow(2)) '' inStr(poShow(3):poShow(4)) ];\n error( ['JSONparser:invalidFormat: ' msg] );\n\n%%-------------------------------------------------------------------------\n\nfunction str = valid_field(str)\nglobal isoct\n% From MATLAB doc: field names must begin with a letter, which may be\n% followed by any combination of letters, digits, and underscores.\n% Invalid characters will be converted to underscores, and the prefix\n% \"x0x[Hex code]_\" will be added if the first character is not a letter.\n pos=regexp(str,'^[^A-Za-z]','once');\n if(~isempty(pos))\n if(~isoct)\n str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once');\n else\n str=sprintf('x0x%X_%s',char(str(1)),str(2:end));\n end\n end\n if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' ))) return; end\n if(~isoct)\n str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_');\n else\n pos=regexp(str,'[^0-9A-Za-z_]');\n if(isempty(pos)) return; end\n str0=str;\n pos0=[0 pos(:)' length(str)];\n str='';\n for i=1:length(pos)\n str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))];\n end\n if(pos(end)~=length(str))\n str=[str str0(pos0(end-1)+1:pos0(end))];\n end\n end\n %str(~isletter(str) & ~('0' <= str & str <= '9')) = '_';\n\n%%-------------------------------------------------------------------------\nfunction endpos = matching_quote(str,pos)\nlen=length(str);\nwhile(pos1 && str(pos-1)=='\\'))\n endpos=pos;\n return;\n end \n end\n pos=pos+1;\nend\nerror('unmatched quotation mark');\n%%-------------------------------------------------------------------------\nfunction [endpos e1l e1r maxlevel] = matching_bracket(str,pos)\nglobal arraytoken\nlevel=1;\nmaxlevel=level;\nendpos=0;\nbpos=arraytoken(arraytoken>=pos);\ntokens=str(bpos);\nlen=length(tokens);\npos=1;\ne1l=[];\ne1r=[];\nwhile(pos<=len)\n c=tokens(pos);\n if(c==']')\n level=level-1;\n if(isempty(e1r)) e1r=bpos(pos); end\n if(level==0)\n endpos=bpos(pos);\n return\n end\n end\n if(c=='[')\n if(isempty(e1l)) e1l=bpos(pos); end\n level=level+1;\n maxlevel=max(maxlevel,level);\n end\n if(c=='\"')\n pos=matching_quote(tokens,pos+1);\n end\n pos=pos+1;\nend\nif(endpos==0) \n error('unmatched \"]\"');\nend\n\n"} +{"plateform": "github", "repo_name": "jhalakpatel/AI-ML-DL-master", "name": "saveubjson.m", "ext": ".m", "path": "AI-ML-DL-master/AndrewNg_MachineLearning/machine-learning-ex1/ex1/lib/jsonlab/saveubjson.m", "size": 16123, "source_encoding": "utf_8", "md5": "61d4f51010aedbf97753396f5d2d9ec0", "text": "function json=saveubjson(rootname,obj,varargin)\n%\n% json=saveubjson(rootname,obj,filename)\n% or\n% json=saveubjson(rootname,obj,opt)\n% json=saveubjson(rootname,obj,'param1',value1,'param2',value2,...)\n%\n% convert a MATLAB object (cell, struct or array) into a Universal \n% Binary JSON (UBJSON) binary string\n%\n% author: Qianqian Fang (fangq nmr.mgh.harvard.edu)\n% created on 2013/08/17\n%\n% $Id: saveubjson.m 460 2015-01-03 00:30:45Z fangq $\n%\n% input:\n% rootname: the name of the root-object, when set to '', the root name\n% is ignored, however, when opt.ForceRootName is set to 1 (see below),\n% the MATLAB variable name will be used as the root name.\n% obj: a MATLAB object (array, cell, cell array, struct, struct array)\n% filename: a string for the file name to save the output UBJSON data\n% opt: a struct for additional options, ignore to use default values.\n% opt can have the following fields (first in [.|.] is the default)\n%\n% opt.FileName [''|string]: a file name to save the output JSON data\n% opt.ArrayToStruct[0|1]: when set to 0, saveubjson outputs 1D/2D\n% array in JSON array format; if sets to 1, an\n% array will be shown as a struct with fields\n% \"_ArrayType_\", \"_ArraySize_\" and \"_ArrayData_\"; for\n% sparse arrays, the non-zero elements will be\n% saved to _ArrayData_ field in triplet-format i.e.\n% (ix,iy,val) and \"_ArrayIsSparse_\" will be added\n% with a value of 1; for a complex array, the \n% _ArrayData_ array will include two columns \n% (4 for sparse) to record the real and imaginary \n% parts, and also \"_ArrayIsComplex_\":1 is added. \n% opt.ParseLogical [1|0]: if this is set to 1, logical array elem\n% will use true/false rather than 1/0.\n% opt.NoRowBracket [1|0]: if this is set to 1, arrays with a single\n% numerical element will be shown without a square\n% bracket, unless it is the root object; if 0, square\n% brackets are forced for any numerical arrays.\n% opt.ForceRootName [0|1]: when set to 1 and rootname is empty, saveubjson\n% will use the name of the passed obj variable as the \n% root object name; if obj is an expression and \n% does not have a name, 'root' will be used; if this \n% is set to 0 and rootname is empty, the root level \n% will be merged down to the lower level.\n% opt.JSONP [''|string]: to generate a JSONP output (JSON with padding),\n% for example, if opt.JSON='foo', the JSON data is\n% wrapped inside a function call as 'foo(...);'\n% opt.UnpackHex [1|0]: conver the 0x[hex code] output by loadjson \n% back to the string form\n%\n% opt can be replaced by a list of ('param',value) pairs. The param \n% string is equivallent to a field in opt and is case sensitive.\n% output:\n% json: a binary string in the UBJSON format (see http://ubjson.org)\n%\n% examples:\n% jsonmesh=struct('MeshNode',[0 0 0;1 0 0;0 1 0;1 1 0;0 0 1;1 0 1;0 1 1;1 1 1],... \n% 'MeshTetra',[1 2 4 8;1 3 4 8;1 2 6 8;1 5 6 8;1 5 7 8;1 3 7 8],...\n% 'MeshTri',[1 2 4;1 2 6;1 3 4;1 3 7;1 5 6;1 5 7;...\n% 2 8 4;2 8 6;3 8 4;3 8 7;5 8 6;5 8 7],...\n% 'MeshCreator','FangQ','MeshTitle','T6 Cube',...\n% 'SpecialData',[nan, inf, -inf]);\n% saveubjson('jsonmesh',jsonmesh)\n% saveubjson('jsonmesh',jsonmesh,'meshdata.ubj')\n%\n% license:\n% BSD, see LICENSE_BSD.txt files for details\n%\n% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)\n%\n\nif(nargin==1)\n varname=inputname(1);\n obj=rootname;\n if(isempty(varname)) \n varname='root';\n end\n rootname=varname;\nelse\n varname=inputname(2);\nend\nif(length(varargin)==1 && ischar(varargin{1}))\n opt=struct('FileName',varargin{1});\nelse\n opt=varargin2struct(varargin{:});\nend\nopt.IsOctave=exist('OCTAVE_VERSION','builtin');\nrootisarray=0;\nrootlevel=1;\nforceroot=jsonopt('ForceRootName',0,opt);\nif((isnumeric(obj) || islogical(obj) || ischar(obj) || isstruct(obj) || iscell(obj)) && isempty(rootname) && forceroot==0)\n rootisarray=1;\n rootlevel=0;\nelse\n if(isempty(rootname))\n rootname=varname;\n end\nend\nif((isstruct(obj) || iscell(obj))&& isempty(rootname) && forceroot)\n rootname='root';\nend\njson=obj2ubjson(rootname,obj,rootlevel,opt);\nif(~rootisarray)\n json=['{' json '}'];\nend\n\njsonp=jsonopt('JSONP','',opt);\nif(~isempty(jsonp))\n json=[jsonp '(' json ')'];\nend\n\n% save to a file if FileName is set, suggested by Patrick Rapin\nif(~isempty(jsonopt('FileName','',opt)))\n fid = fopen(opt.FileName, 'wb');\n fwrite(fid,json);\n fclose(fid);\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=obj2ubjson(name,item,level,varargin)\n\nif(iscell(item))\n txt=cell2ubjson(name,item,level,varargin{:});\nelseif(isstruct(item))\n txt=struct2ubjson(name,item,level,varargin{:});\nelseif(ischar(item))\n txt=str2ubjson(name,item,level,varargin{:});\nelse\n txt=mat2ubjson(name,item,level,varargin{:});\nend\n\n%%-------------------------------------------------------------------------\nfunction txt=cell2ubjson(name,item,level,varargin)\ntxt='';\nif(~iscell(item))\n error('input is not a cell');\nend\n\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item); % let's handle 1D cell first\nif(len>1) \n if(~isempty(name))\n txt=[S_(checkname(name,varargin{:})) '[']; name=''; \n else\n txt='['; \n end\nelseif(len==0)\n if(~isempty(name))\n txt=[S_(checkname(name,varargin{:})) 'Z']; name=''; \n else\n txt='Z'; \n end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=[txt '[']; end\n for i=1:dim(1)\n txt=[txt obj2ubjson(name,item{i,j},level+(len>1),varargin{:})];\n end\n if(dim(1)>1) txt=[txt ']']; end\nend\nif(len>1) txt=[txt ']']; end\n\n%%-------------------------------------------------------------------------\nfunction txt=struct2ubjson(name,item,level,varargin)\ntxt='';\nif(~isstruct(item))\n\terror('input is not a struct');\nend\ndim=size(item);\nif(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now\n item=reshape(item,dim(1),numel(item)/dim(1));\n dim=size(item);\nend\nlen=numel(item);\n\nif(~isempty(name)) \n if(len>1) txt=[S_(checkname(name,varargin{:})) '[']; end\nelse\n if(len>1) txt='['; end\nend\nfor j=1:dim(2)\n if(dim(1)>1) txt=[txt '[']; end\n for i=1:dim(1)\n names = fieldnames(item(i,j));\n if(~isempty(name) && len==1)\n txt=[txt S_(checkname(name,varargin{:})) '{']; \n else\n txt=[txt '{']; \n end\n if(~isempty(names))\n for e=1:length(names)\n\t txt=[txt obj2ubjson(names{e},getfield(item(i,j),...\n names{e}),level+(dim(1)>1)+1+(len>1),varargin{:})];\n end\n end\n txt=[txt '}'];\n end\n if(dim(1)>1) txt=[txt ']']; end\nend\nif(len>1) txt=[txt ']']; end\n\n%%-------------------------------------------------------------------------\nfunction txt=str2ubjson(name,item,level,varargin)\ntxt='';\nif(~ischar(item))\n error('input is not a string');\nend\nitem=reshape(item, max(size(item),[1 0]));\nlen=size(item,1);\n\nif(~isempty(name)) \n if(len>1) txt=[S_(checkname(name,varargin{:})) '[']; end\nelse\n if(len>1) txt='['; end\nend\nisoct=jsonopt('IsOctave',0,varargin{:});\nfor e=1:len\n val=item(e,:);\n if(len==1)\n obj=['' S_(checkname(name,varargin{:})) '' '',S_(val),''];\n\tif(isempty(name)) obj=['',S_(val),'']; end\n txt=[txt,'',obj];\n else\n txt=[txt,'',['',S_(val),'']];\n end\nend\nif(len>1) txt=[txt ']']; end\n\n%%-------------------------------------------------------------------------\nfunction txt=mat2ubjson(name,item,level,varargin)\nif(~isnumeric(item) && ~islogical(item))\n error('input is not an array');\nend\n\nif(length(size(item))>2 || issparse(item) || ~isreal(item) || ...\n isempty(item) || jsonopt('ArrayToStruct',0,varargin{:}))\n cid=I_(uint32(max(size(item))));\n if(isempty(name))\n \ttxt=['{' S_('_ArrayType_'),S_(class(item)),S_('_ArraySize_'),I_a(size(item),cid(1)) ];\n else\n if(isempty(item))\n txt=[S_(checkname(name,varargin{:})),'Z'];\n return;\n else\n \t txt=[S_(checkname(name,varargin{:})),'{',S_('_ArrayType_'),S_(class(item)),S_('_ArraySize_'),I_a(size(item),cid(1))];\n end\n end\nelse\n if(isempty(name))\n \ttxt=matdata2ubjson(item,level+1,varargin{:});\n else\n if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1)\n numtxt=regexprep(regexprep(matdata2ubjson(item,level+1,varargin{:}),'^\\[',''),']','');\n \ttxt=[S_(checkname(name,varargin{:})) numtxt];\n else\n \t txt=[S_(checkname(name,varargin{:})),matdata2ubjson(item,level+1,varargin{:})];\n end\n end\n return;\nend\nif(issparse(item))\n [ix,iy]=find(item);\n data=full(item(find(item)));\n if(~isreal(item))\n data=[real(data(:)),imag(data(:))];\n if(size(item,1)==1)\n % Kludge to have data's 'transposedness' match item's.\n % (Necessary for complex row vector handling below.)\n data=data';\n end\n txt=[txt,S_('_ArrayIsComplex_'),'T'];\n end\n txt=[txt,S_('_ArrayIsSparse_'),'T'];\n if(size(item,1)==1)\n % Row vector, store only column indices.\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([iy(:),data'],level+2,varargin{:})];\n elseif(size(item,2)==1)\n % Column vector, store only row indices.\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([ix,data],level+2,varargin{:})];\n else\n % General case, store row and column indices.\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([ix,iy,data],level+2,varargin{:})];\n end\nelse\n if(isreal(item))\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson(item(:)',level+2,varargin{:})];\n else\n txt=[txt,S_('_ArrayIsComplex_'),'T'];\n txt=[txt,S_('_ArrayData_'),...\n matdata2ubjson([real(item(:)) imag(item(:))],level+2,varargin{:})];\n end\nend\ntxt=[txt,'}'];\n\n%%-------------------------------------------------------------------------\nfunction txt=matdata2ubjson(mat,level,varargin)\nif(isempty(mat))\n txt='Z';\n return;\nend\nif(size(mat,1)==1)\n level=level-1;\nend\ntype='';\nhasnegtive=(mat<0);\nif(isa(mat,'integer') || isinteger(mat) || (isfloat(mat) && all(mod(mat(:),1) == 0)))\n if(isempty(hasnegtive))\n if(max(mat(:))<=2^8)\n type='U';\n end\n end\n if(isempty(type))\n % todo - need to consider negative ones separately\n id= histc(abs(max(mat(:))),[0 2^7 2^15 2^31 2^63]);\n if(isempty(find(id)))\n error('high-precision data is not yet supported');\n end\n key='iIlL';\n\ttype=key(find(id));\n end\n txt=[I_a(mat(:),type,size(mat))];\nelseif(islogical(mat))\n logicalval='FT';\n if(numel(mat)==1)\n txt=logicalval(mat+1);\n else\n txt=['[$U#' I_a(size(mat),'l') typecast(swapbytes(uint8(mat(:)')),'uint8')];\n end\nelse\n if(numel(mat)==1)\n txt=['[' D_(mat) ']'];\n else\n txt=D_a(mat(:),'D',size(mat));\n end\nend\n\n%txt=regexprep(mat2str(mat),'\\s+',',');\n%txt=regexprep(txt,';',sprintf('],['));\n% if(nargin>=2 && size(mat,1)>1)\n% txt=regexprep(txt,'\\[',[repmat(sprintf('\\t'),1,level) '[']);\n% end\nif(any(isinf(mat(:))))\n txt=regexprep(txt,'([-+]*)Inf',jsonopt('Inf','\"$1_Inf_\"',varargin{:}));\nend\nif(any(isnan(mat(:))))\n txt=regexprep(txt,'NaN',jsonopt('NaN','\"_NaN_\"',varargin{:}));\nend\n\n%%-------------------------------------------------------------------------\nfunction newname=checkname(name,varargin)\nisunpack=jsonopt('UnpackHex',1,varargin{:});\nnewname=name;\nif(isempty(regexp(name,'0x([0-9a-fA-F]+)_','once')))\n return\nend\nif(isunpack)\n isoct=jsonopt('IsOctave',0,varargin{:});\n if(~isoct)\n newname=regexprep(name,'(^x|_){1}0x([0-9a-fA-F]+)_','${native2unicode(hex2dec($2))}');\n else\n pos=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','start');\n pend=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','end');\n if(isempty(pos)) return; end\n str0=name;\n pos0=[0 pend(:)' length(name)];\n newname='';\n for i=1:length(pos)\n newname=[newname str0(pos0(i)+1:pos(i)-1) char(hex2dec(str0(pos(i)+3:pend(i)-1)))];\n end\n if(pos(end)~=length(name))\n newname=[newname str0(pos0(end-1)+1:pos0(end))];\n end\n end\nend\n%%-------------------------------------------------------------------------\nfunction val=S_(str)\nif(length(str)==1)\n val=['C' str];\nelse\n val=['S' I_(int32(length(str))) str];\nend\n%%-------------------------------------------------------------------------\nfunction val=I_(num)\nif(~isinteger(num))\n error('input is not an integer');\nend\nif(num>=0 && num<255)\n val=['U' data2byte(swapbytes(cast(num,'uint8')),'uint8')];\n return;\nend\nkey='iIlL';\ncid={'int8','int16','int32','int64'};\nfor i=1:4\n if((num>0 && num<2^(i*8-1)) || (num<0 && num>=-2^(i*8-1)))\n val=[key(i) data2byte(swapbytes(cast(num,cid{i})),'uint8')];\n return;\n end\nend\nerror('unsupported integer');\n\n%%-------------------------------------------------------------------------\nfunction val=D_(num)\nif(~isfloat(num))\n error('input is not a float');\nend\n\nif(isa(num,'single'))\n val=['d' data2byte(num,'uint8')];\nelse\n val=['D' data2byte(num,'uint8')];\nend\n%%-------------------------------------------------------------------------\nfunction data=I_a(num,type,dim,format)\nid=find(ismember('iUIlL',type));\n\nif(id==0)\n error('unsupported integer array');\nend\n\n% based on UBJSON specs, all integer types are stored in big endian format\n\nif(id==1)\n data=data2byte(swapbytes(int8(num)),'uint8');\n blen=1;\nelseif(id==2)\n data=data2byte(swapbytes(uint8(num)),'uint8');\n blen=1;\nelseif(id==3)\n data=data2byte(swapbytes(int16(num)),'uint8');\n blen=2;\nelseif(id==4)\n data=data2byte(swapbytes(int32(num)),'uint8');\n blen=4;\nelseif(id==5)\n data=data2byte(swapbytes(int64(num)),'uint8');\n blen=8;\nend\n\nif(nargin>=3 && length(dim)>=2 && prod(dim)~=dim(2))\n format='opt';\nend\nif((nargin<4 || strcmp(format,'opt')) && numel(num)>1)\n if(nargin>=3 && (length(dim)==1 || (length(dim)>=2 && prod(dim)~=dim(2))))\n cid=I_(uint32(max(dim)));\n data=['$' type '#' I_a(dim,cid(1)) data(:)'];\n else\n data=['$' type '#' I_(int32(numel(data)/blen)) data(:)'];\n end\n data=['[' data(:)'];\nelse\n data=reshape(data,blen,numel(data)/blen);\n data(2:blen+1,:)=data;\n data(1,:)=type;\n data=data(:)';\n data=['[' data(:)' ']'];\nend\n%%-------------------------------------------------------------------------\nfunction data=D_a(num,type,dim,format)\nid=find(ismember('dD',type));\n\nif(id==0)\n error('unsupported float array');\nend\n\nif(id==1)\n data=data2byte(single(num),'uint8');\nelseif(id==2)\n data=data2byte(double(num),'uint8');\nend\n\nif(nargin>=3 && length(dim)>=2 && prod(dim)~=dim(2))\n format='opt';\nend\nif((nargin<4 || strcmp(format,'opt')) && numel(num)>1)\n if(nargin>=3 && (length(dim)==1 || (length(dim)>=2 && prod(dim)~=dim(2))))\n cid=I_(uint32(max(dim)));\n data=['$' type '#' I_a(dim,cid(1)) data(:)'];\n else\n data=['$' type '#' I_(int32(numel(data)/(id*4))) data(:)'];\n end\n data=['[' data];\nelse\n data=reshape(data,(id*4),length(data)/(id*4));\n data(2:(id*4+1),:)=data;\n data(1,:)=type;\n data=data(:)';\n data=['[' data(:)' ']'];\nend\n%%-------------------------------------------------------------------------\nfunction bytes=data2byte(varargin)\nbytes=typecast(varargin{:});\nbytes=bytes(:)';\n"} +{"plateform": "github", "repo_name": "SeRViCE-Lab/FormationControl-master", "name": "detector.m", "ext": ".m", "path": "FormationControl-master/sphero_ros/detector.m", "size": 6914, "source_encoding": "utf_8", "md5": "d28e558faed25d1343ebb11ae9439023", "text": "% Version 1.4:\n% - Replaces centroids by median of upper edge of the bbox.\n% this provides a more stable representation for the\n% location of the spheros\n%\n% Version 1.3:\n% - Sends back the run time as a parameter\n%\n% Version 1.2:\n% - Refined search method:\n% blob detection, then circle detection around the\n% blobs, but only when the correct number of robots\n% is not detected\n%\n% Version 1.1:\n% - It doesn't work, its just for trial purposes\n%\n% Version 1.0: \n% - Initial version\n% - Detects blobs of resonable size using binarization\n% according to a threshold\n% - Can distinguish between robots that have collided (using erosion) \n%\n%\n%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nfunction [locs, bboxes,t] = detect_SpheroV1_4 (frame, numRob)\ntic\n% initialize variables\nlocs = zeros(2, numRob);\ncentersTemp(:,1) = [0;0]; \nbboxes = zeros(4, numRob);\nbboxesTemp(:,1) = [0;0;0;0];\nthresh = 0.9;\nnewDetectedTotal = 0;\nnumBlobsChecked = 0;\n\n% resizing the image\nframe = imresize(frame, [480,640]);\n\n% changing frame to grayscale\nframeGray = rgb2gray(frame); \n% figure;\n% imshow(frameGray);\n\n% binarize blurred image usign a threshold\nframeBin = imbinarize (frameGray,thresh); % generate binary image using thresh\n% figure;\n% imshow(frameBin);\n\n% erode the image to remove noise\nerodeElt = strel('disk',5);\nframeEr = imerode(frameBin,erodeElt);\n% figure;\n% imshow(frameEr);\n\n% dilate eroded image\ndilateElt = strel('disk',5);\nframeDil = imdilate(frameEr, dilateElt);\n% figure;\n% imshow(frameDil);\n\n% detect large enough blobs\nwhiteBlobs = bwpropfilt(frameDil, 'Area', [20, 100000]); % find white blobs\n% figure;\n% imshow(whiteBlobs);\n\n% get statistics from whiteBolobs image\nstats1 = regionprops ( logical(whiteBlobs), ...\n 'BoundingBox', 'Centroid', 'Area',...\n 'MajorAxisLength', 'MinorAxisLength');\n\n% organize data\ncenter1 = reshape([stats1.Centroid]', 2, numel(stats1));\nbboxes1 = reshape([stats1.BoundingBox]', 4, numel(stats1)); % format: ULcorner(x,y), x-width, y-width\narea1 = reshape([stats1.Area]', 1, numel(stats1));\nmajAxLeng1 = reshape([stats1.MajorAxisLength]', 1, numel(stats1));\nminAxLeng1 = reshape([stats1.MinorAxisLength]', 1, numel(stats1));\n\nnumRobDetect = numel(stats1); % number of robots detected\n\n% check to see if all robots were detected\nif (numRobDetect == numRob) % if robots detected\n bboxes = bboxes1;\n locs(:,:) = bboxes(1:2, :) + [bboxes(3,:)/2;zeros(1,numel(bboxes)/4)];\nelseif (numRobDetect > numRob)\n disp('Error: More objects detected than spheros');\n disp('Centers will be set to zero');\nelse % objects detected < num Spheros\n % calculate ratios of maj/min axis length\n for i = 1 : numel(stats1)\n maj_minAxRatio(i) = majAxLeng1(i)/ minAxLeng1(i);\n end\n \n % sort the detected blobs based on maj/min axis ratio\n [sortedAxRatio,sortAxRatioIndex] = sort(maj_minAxRatio); %finding sorting index using ratio\n stats2 = stats1(sortAxRatioIndex); % sort stats based on index obtained\n \n % organize data\n centers2 = reshape([stats2.Centroid]', 2, numel(stats2));\n bboxes2 = reshape([stats2.BoundingBox]', 4, numel(stats2)); % format: ULcorner(x,y), x-width, y-width\n area2 = reshape([stats2.Area]', 1, numel(stats2));\n majAxLeng2 = reshape([stats2.MajorAxisLength]', 1, numel(stats2));\n minAxLeng2 = reshape([stats2.MinorAxisLength]', 1, numel(stats2));\n \n % go through list to detect circles in blobs\n for i = numel(stats2) : -1 : 1\n if (numRobDetect ~= numRob)\n box = bboxes2(:,i); % get bbox\n center = centers2(:,i); % store center\n cornerUL = [box(1); box(2)];% store UL corner\n xCorner = box(1);\n yCorner = box(2);\n xWidth = box(3);\n yWidth = box(4);\n \n % zoom in on object\n xWidthN = xWidth * 1.5; % new x-width\n yWidthN = yWidth * 1.5; % new y-width\n dxWidth = xWidthN -xWidth; % variation in xWidth\n dyWidth = yWidthN -yWidth; % variation in yWidth\n xCornerN = xCorner - dxWidth/2; % new x for UL corner\n yCornerN = yCorner - dyWidth/2; % new y for UL corner\n \n boxN = [xCornerN, yCornerN, xWidthN, yWidthN]; % new bbox\n % take only image in new bbox\n frameCrop = frameGray( ...\n max(0,round(yCornerN)) : min(round(yCornerN + yWidthN), 480) ,...\n max(0,round(xCornerN)) : min(round(xCornerN + xWidthN), 680));\n% frame;\n% imshow(frameCrop);\n% hold on;\n% scatter(xWidthN/2 ,yWidthN/2, 'filled', 'LineWidth', 2); % display center on image\n% hold off;\n \n % use circle detection on zoomed image\n d = min(xWidthN, yWidthN); % minimum of new bbox sides\n tic\n [c, r] = imfindcircles(frameCrop,[round(d*0.1667), 3*round(0.1667*d)-1],'Sensitivity',0.9); % find circles\n toc\n% frame;\n% imshow(frameCrop);\n% hold on;\n% viscircles(c, r,'Color','b');\n% hold off\n% \n % moving back centers to the initial frame\n cFrame = c + [xCornerN, yCornerN]; % cFrame = [x1, y1 ; x2, y2; x3,y3 ; ...]\n% frame;\n% imshow(frameGray);\n% hold on;\n% viscircles(cFrame, r,'Color','b');\n% hold off;\n \n % saving the new centers and bboxes\n newDetected = numel(cFrame)/2; \n for j = 1 : newDetected\n centersTemp(:,numel(centersTemp)/2+1) = cFrame(j,:); % sotre new center\n cornerUL = cFrame(j,:) - r(j); % calculate new corener for bbox\n bboxesTemp(:,numel(bboxesTemp)/4+1) = [cornerUL' ; 2*r(j); 2*r(j)]; % store new bboc\n end\n \n % deleting old centers2 and bboxes2\n centers2(:,i) = [];\n bboxes2(:,i) = [];\n \n % update numBlobsChecked, newDetectedTotal, and numRobDetect\n numBlobsChecked = numBlobsChecked + 1; % keep track of number of blobs checked for robots\n newDetectedTotal = newDetectedTotal + newDetected; % keep track of number of newly discovered robots\n numRobDetect = numRobDetect + newDetected - 1; % update number of robots detected\n \n end\n end\n\n bboxes(:, 1 : (numel(bboxesTemp)/4-1)) = bboxesTemp(:, 2 : end);\n bboxes(:, (numel(bboxesTemp)/4) : numRob) = bboxes2(:,:);\n \n locs(:,:) = bboxes(1:2, :) + [bboxes(3,:)/2;zeros(1,numel(bboxes)/4)];\n \nend\n\nt = toc;\n \n \n \n \n "} +{"plateform": "github", "repo_name": "panji530/EDSC-master", "name": "hungarian.m", "ext": ".m", "path": "EDSC-master/hungarian.m", "size": 11781, "source_encoding": "utf_8", "md5": "294996aeeca4dadfc427da4f81f8b99d", "text": "function [C,T]=hungarian(A)\r\n%HUNGARIAN Solve the Assignment problem using the Hungarian method.\r\n%\r\n%[C,T]=hungarian(A)\r\n%A - a square cost matrix.\r\n%C - the optimal assignment.\r\n%T - the cost of the optimal assignment.\r\n%s.t. T = trace(A(C,:)) is minimized over all possible assignments.\r\n\r\n% Adapted from the FORTRAN IV code in Carpaneto and Toth, \"Algorithm 548:\r\n% Solution of the assignment problem [H]\", ACM Transactions on\r\n% Mathematical Software, 6(1):104-111, 1980.\r\n\r\n% v1.0 96-06-14. Niclas Borlin, niclas@cs.umu.se.\r\n% Department of Computing Science, Ume? University,\r\n% Sweden. \r\n% All standard disclaimers apply.\r\n\r\n% A substantial effort was put into this code. If you use it for a\r\n% publication or otherwise, please include an acknowledgement or at least\r\n% notify me by email. /Niclas\r\n\r\n[m,n]=size(A);\r\n\r\nif (m~=n)\r\n error('HUNGARIAN: Cost matrix must be square!');\r\nend\r\n\r\n% Save original cost matrix.\r\norig=A;\r\n\r\n% Reduce matrix.\r\nA=hminired(A);\r\n\r\n% Do an initial assignment.\r\n[A,C,U]=hminiass(A);\r\n\r\n% Repeat while we have unassigned rows.\r\nwhile (U(n+1))\r\n % Start with no path, no unchecked zeros, and no unexplored rows.\r\n LR=zeros(1,n);\r\n LC=zeros(1,n);\r\n CH=zeros(1,n);\r\n RH=[zeros(1,n) -1];\r\n \r\n % No labelled columns.\r\n SLC=[];\r\n \r\n % Start path in first unassigned row.\r\n r=U(n+1);\r\n % Mark row with end-of-path label.\r\n LR(r)=-1;\r\n % Insert row first in labelled row set.\r\n SLR=r;\r\n \r\n % Repeat until we manage to find an assignable zero.\r\n while (1)\r\n % If there are free zeros in row r\r\n if (A(r,n+1)~=0)\r\n % ...get column of first free zero.\r\n l=-A(r,n+1);\r\n \r\n % If there are more free zeros in row r and row r in not\r\n % yet marked as unexplored..\r\n if (A(r,l)~=0 & RH(r)==0)\r\n % Insert row r first in unexplored list.\r\n RH(r)=RH(n+1);\r\n RH(n+1)=r;\r\n \r\n % Mark in which column the next unexplored zero in this row\r\n % is.\r\n CH(r)=-A(r,l);\r\n end\r\n else\r\n % If all rows are explored..\r\n if (RH(n+1)<=0)\r\n % Reduce matrix.\r\n [A,CH,RH]=hmreduce(A,CH,RH,LC,LR,SLC,SLR);\r\n end\r\n \r\n % Re-start with first unexplored row.\r\n r=RH(n+1);\r\n % Get column of next free zero in row r.\r\n l=CH(r);\r\n % Advance \"column of next free zero\".\r\n CH(r)=-A(r,l);\r\n % If this zero is last in the list..\r\n if (A(r,l)==0)\r\n % ...remove row r from unexplored list.\r\n RH(n+1)=RH(r);\r\n RH(r)=0;\r\n end\r\n end\r\n \r\n % While the column l is labelled, i.e. in path.\r\n while (LC(l)~=0)\r\n % If row r is explored..\r\n if (RH(r)==0)\r\n % If all rows are explored..\r\n if (RH(n+1)<=0)\r\n % Reduce cost matrix.\r\n [A,CH,RH]=hmreduce(A,CH,RH,LC,LR,SLC,SLR);\r\n end\r\n \r\n % Re-start with first unexplored row.\r\n r=RH(n+1);\r\n end\r\n \r\n % Get column of next free zero in row r.\r\n l=CH(r);\r\n \r\n % Advance \"column of next free zero\".\r\n CH(r)=-A(r,l);\r\n \r\n % If this zero is last in list..\r\n if(A(r,l)==0)\r\n % ...remove row r from unexplored list.\r\n RH(n+1)=RH(r);\r\n RH(r)=0;\r\n end\r\n end\r\n \r\n % If the column found is unassigned..\r\n if (C(l)==0)\r\n % Flip all zeros along the path in LR,LC.\r\n [A,C,U]=hmflip(A,C,LC,LR,U,l,r);\r\n % ...and exit to continue with next unassigned row.\r\n break;\r\n else\r\n % ...else add zero to path.\r\n \r\n % Label column l with row r.\r\n LC(l)=r;\r\n \r\n % Add l to the set of labelled columns.\r\n SLC=[SLC l];\r\n \r\n % Continue with the row assigned to column l.\r\n r=C(l);\r\n \r\n % Label row r with column l.\r\n LR(r)=l;\r\n \r\n % Add r to the set of labelled rows.\r\n SLR=[SLR r];\r\n end\r\n end\r\nend\r\n\r\n% Calculate the total cost.\r\nT=sum(orig(logical(sparse(C,1:size(orig,2),1))));\r\n\r\n\r\nfunction A=hminired(A)\r\n%HMINIRED Initial reduction of cost matrix for the Hungarian method.\r\n%\r\n%B=assredin(A)\r\n%A - the unreduced cost matris.\r\n%B - the reduced cost matrix with linked zeros in each row.\r\n\r\n% v1.0 96-06-13. Niclas Borlin, niclas@cs.umu.se.\r\n\r\n[m,n]=size(A);\r\n\r\n% Subtract column-minimum values from each column.\r\ncolMin=min(A);\r\nA=A-colMin(ones(n,1),:);\r\n\r\n% Subtract row-minimum values from each row.\r\nrowMin=min(A')';\r\nA=A-rowMin(:,ones(1,n));\r\n\r\n% Get positions of all zeros.\r\n[i,j]=find(A==0);\r\n\r\n% Extend A to give room for row zero list header column.\r\nA(1,n+1)=0;\r\nfor k=1:n\r\n % Get all column in this row. \r\n cols=j(k==i)';\r\n % Insert pointers in matrix.\r\n A(k,[n+1 cols])=[-cols 0];\r\nend\r\n\r\n\r\nfunction [A,C,U]=hminiass(A)\r\n%HMINIASS Initial assignment of the Hungarian method.\r\n%\r\n%[B,C,U]=hminiass(A)\r\n%A - the reduced cost matrix.\r\n%B - the reduced cost matrix, with assigned zeros removed from lists.\r\n%C - a vector. C(J)=I means row I is assigned to column J,\r\n% i.e. there is an assigned zero in position I,J.\r\n%U - a vector with a linked list of unassigned rows.\r\n\r\n% v1.0 96-06-14. Niclas Borlin, niclas@cs.umu.se.\r\n\r\n[n,np1]=size(A);\r\n\r\n% Initalize return vectors.\r\nC=zeros(1,n);\r\nU=zeros(1,n+1);\r\n\r\n% Initialize last/next zero \"pointers\".\r\nLZ=zeros(1,n);\r\nNZ=zeros(1,n);\r\n\r\nfor i=1:n\r\n % Set j to first unassigned zero in row i.\r\n\tlj=n+1;\r\n\tj=-A(i,lj);\r\n\r\n % Repeat until we have no more zeros (j==0) or we find a zero\r\n\t% in an unassigned column (c(j)==0).\r\n \r\n\twhile (C(j)~=0)\r\n\t\t% Advance lj and j in zero list.\r\n\t\tlj=j;\r\n\t\tj=-A(i,lj);\r\n\t\r\n\t\t% Stop if we hit end of list.\r\n\t\tif (j==0)\r\n\t\t\tbreak;\r\n\t\tend\r\n\tend\r\n\r\n\tif (j~=0)\r\n\t\t% We found a zero in an unassigned column.\r\n\t\t\r\n\t\t% Assign row i to column j.\r\n\t\tC(j)=i;\r\n\t\t\r\n\t\t% Remove A(i,j) from unassigned zero list.\r\n\t\tA(i,lj)=A(i,j);\r\n\r\n\t\t% Update next/last unassigned zero pointers.\r\n\t\tNZ(i)=-A(i,j);\r\n\t\tLZ(i)=lj;\r\n\r\n\t\t% Indicate A(i,j) is an assigned zero.\r\n\t\tA(i,j)=0;\r\n\telse\r\n\t\t% We found no zero in an unassigned column.\r\n\r\n\t\t% Check all zeros in this row.\r\n\r\n\t\tlj=n+1;\r\n\t\tj=-A(i,lj);\r\n\t\t\r\n\t\t% Check all zeros in this row for a suitable zero in another row.\r\n\t\twhile (j~=0)\r\n\t\t\t% Check the in the row assigned to this column.\r\n\t\t\tr=C(j);\r\n\t\t\t\r\n\t\t\t% Pick up last/next pointers.\r\n\t\t\tlm=LZ(r);\r\n\t\t\tm=NZ(r);\r\n\t\t\t\r\n\t\t\t% Check all unchecked zeros in free list of this row.\r\n\t\t\twhile (m~=0)\r\n\t\t\t\t% Stop if we find an unassigned column.\r\n\t\t\t\tif (C(m)==0)\r\n\t\t\t\t\tbreak;\r\n\t\t\t\tend\r\n\t\t\t\t\r\n\t\t\t\t% Advance one step in list.\r\n\t\t\t\tlm=m;\r\n\t\t\t\tm=-A(r,lm);\r\n\t\t\tend\r\n\t\t\t\r\n\t\t\tif (m==0)\r\n\t\t\t\t% We failed on row r. Continue with next zero on row i.\r\n\t\t\t\tlj=j;\r\n\t\t\t\tj=-A(i,lj);\r\n\t\t\telse\r\n\t\t\t\t% We found a zero in an unassigned column.\r\n\t\t\t\r\n\t\t\t\t% Replace zero at (r,m) in unassigned list with zero at (r,j)\r\n\t\t\t\tA(r,lm)=-j;\r\n\t\t\t\tA(r,j)=A(r,m);\r\n\t\t\t\r\n\t\t\t\t% Update last/next pointers in row r.\r\n\t\t\t\tNZ(r)=-A(r,m);\r\n\t\t\t\tLZ(r)=j;\r\n\t\t\t\r\n\t\t\t\t% Mark A(r,m) as an assigned zero in the matrix . . .\r\n\t\t\t\tA(r,m)=0;\r\n\t\t\t\r\n\t\t\t\t% ...and in the assignment vector.\r\n\t\t\t\tC(m)=r;\r\n\t\t\t\r\n\t\t\t\t% Remove A(i,j) from unassigned list.\r\n\t\t\t\tA(i,lj)=A(i,j);\r\n\t\t\t\r\n\t\t\t\t% Update last/next pointers in row r.\r\n\t\t\t\tNZ(i)=-A(i,j);\r\n\t\t\t\tLZ(i)=lj;\r\n\t\t\t\r\n\t\t\t\t% Mark A(r,m) as an assigned zero in the matrix . . .\r\n\t\t\t\tA(i,j)=0;\r\n\t\t\t\r\n\t\t\t\t% ...and in the assignment vector.\r\n\t\t\t\tC(j)=i;\r\n\t\t\t\t\r\n\t\t\t\t% Stop search.\r\n\t\t\t\tbreak;\r\n\t\t\tend\r\n\t\tend\r\n\tend\r\nend\r\n\r\n% Create vector with list of unassigned rows.\r\n\r\n% Mark all rows have assignment.\r\nr=zeros(1,n);\r\nrows=C(C~=0);\r\nr(rows)=rows;\r\nempty=find(r==0);\r\n\r\n% Create vector with linked list of unassigned rows.\r\nU=zeros(1,n+1);\r\nU([n+1 empty])=[empty 0];\r\n\r\n\r\nfunction [A,C,U]=hmflip(A,C,LC,LR,U,l,r)\r\n%HMFLIP Flip assignment state of all zeros along a path.\r\n%\r\n%[A,C,U]=hmflip(A,C,LC,LR,U,l,r)\r\n%Input:\r\n%A - the cost matrix.\r\n%C - the assignment vector.\r\n%LC - the column label vector.\r\n%LR - the row label vector.\r\n%U - the \r\n%r,l - position of last zero in path.\r\n%Output:\r\n%A - updated cost matrix.\r\n%C - updated assignment vector.\r\n%U - updated unassigned row list vector.\r\n\r\n% v1.0 96-06-14. Niclas Borlin, niclas@cs.umu.se.\r\n\r\nn=size(A,1);\r\n\r\nwhile (1)\r\n % Move assignment in column l to row r.\r\n C(l)=r;\r\n \r\n % Find zero to be removed from zero list..\r\n \r\n % Find zero before this.\r\n m=find(A(r,:)==-l);\r\n \r\n % Link past this zero.\r\n A(r,m)=A(r,l);\r\n \r\n A(r,l)=0;\r\n \r\n % If this was the first zero of the path..\r\n if (LR(r)<0)\r\n ...remove row from unassigned row list and return.\r\n U(n+1)=U(r);\r\n U(r)=0;\r\n return;\r\n else\r\n \r\n % Move back in this row along the path and get column of next zero.\r\n l=LR(r);\r\n \r\n % Insert zero at (r,l) first in zero list.\r\n A(r,l)=A(r,n+1);\r\n A(r,n+1)=-l;\r\n \r\n % Continue back along the column to get row of next zero in path.\r\n r=LC(l);\r\n end\r\nend\r\n\r\n\r\nfunction [A,CH,RH]=hmreduce(A,CH,RH,LC,LR,SLC,SLR)\r\n%HMREDUCE Reduce parts of cost matrix in the Hungerian method.\r\n%\r\n%[A,CH,RH]=hmreduce(A,CH,RH,LC,LR,SLC,SLR)\r\n%Input:\r\n%A - Cost matrix.\r\n%CH - vector of column of 'next zeros' in each row.\r\n%RH - vector with list of unexplored rows.\r\n%LC - column labels.\r\n%RC - row labels.\r\n%SLC - set of column labels.\r\n%SLR - set of row labels.\r\n%\r\n%Output:\r\n%A - Reduced cost matrix.\r\n%CH - Updated vector of 'next zeros' in each row.\r\n%RH - Updated vector of unexplored rows.\r\n\r\n% v1.0 96-06-14. Niclas Borlin, niclas@cs.umu.se.\r\n\r\nn=size(A,1);\r\n\r\n% Find which rows are covered, i.e. unlabelled.\r\ncoveredRows=LR==0;\r\n\r\n% Find which columns are covered, i.e. labelled.\r\ncoveredCols=LC~=0;\r\n\r\nr=find(~coveredRows);\r\nc=find(~coveredCols);\r\n\r\n% Get minimum of uncovered elements.\r\nm=min(min(A(r,c)));\r\n\r\n% Subtract minimum from all uncovered elements.\r\nA(r,c)=A(r,c)-m;\r\n\r\n% Check all uncovered columns..\r\nfor j=c\r\n % ...and uncovered rows in path order..\r\n for i=SLR\r\n % If this is a (new) zero..\r\n if (A(i,j)==0)\r\n % If the row is not in unexplored list..\r\n if (RH(i)==0)\r\n % ...insert it first in unexplored list.\r\n RH(i)=RH(n+1);\r\n RH(n+1)=i;\r\n % Mark this zero as \"next free\" in this row.\r\n CH(i)=j;\r\n end\r\n % Find last unassigned zero on row I.\r\n row=A(i,:);\r\n colsInList=-row(row<0);\r\n if (length(colsInList)==0)\r\n % No zeros in the list.\r\n l=n+1;\r\n else\r\n l=colsInList(row(colsInList)==0);\r\n end\r\n % Append this zero to end of list.\r\n A(i,l)=-j;\r\n end\r\n end\r\nend\r\n\r\n% Add minimum to all doubly covered elements.\r\nr=find(coveredRows);\r\nc=find(coveredCols);\r\n\r\n% Take care of the zeros we will remove.\r\n[i,j]=find(A(r,c)<=0);\r\n\r\ni=r(i);\r\nj=c(j);\r\n\r\nfor k=1:length(i)\r\n % Find zero before this in this row.\r\n lj=find(A(i(k),:)==-j(k));\r\n % Link past it.\r\n A(i(k),lj)=A(i(k),j(k));\r\n % Mark it as assigned.\r\n A(i(k),j(k))=0;\r\nend\r\n\r\nA(r,c)=A(r,c)+m;"} +{"plateform": "github", "repo_name": "panji530/EDSC-master", "name": "dataProjection.m", "ext": ".m", "path": "EDSC-master/dataProjection.m", "size": 733, "source_encoding": "utf_8", "md5": "608c1dd2735280c008ffa8c973aff3d2", "text": "%--------------------------------------------------------------------------\n% This function takes the D x N data matrix with columns indicating\n% different data points and project the D dimensional data into a r\n% dimensional subspace using PCA.\n% X: D x N matrix of N data points\n% r: dimension of the PCA projection, if r = 0, then no projection\n% Xp: r x N matrix of N projectred data points\n%--------------------------------------------------------------------------\n% Copyright @ Ehsan Elhamifar, 2012\n%--------------------------------------------------------------------------\n\n\nfunction Xp = DataProjection(X,r)\n\nif (nargin < 2)\n r = 0;\nend\n\nif (r == 0)\n Xp = X;\nelse\n [U,~,~] = svd(X,0);\n Xp = U(:,1:r)' * X;\nend\n"} +{"plateform": "github", "repo_name": "voquocduy/Pedestrian-Detection-using-Hog-Svm-Matab-master", "name": "chuongtrinh.m", "ext": ".m", "path": "Pedestrian-Detection-using-Hog-Svm-Matab-master/chuongtrinh.m", "size": 4326, "source_encoding": "utf_8", "md5": "9aa6e0fba6419280402c840c53ddc448", "text": "function varargout = chuongtrinh(varargin)\n% CHUONGTRINH MATLAB code for chuongtrinh.fig\n% CHUONGTRINH, by itself, creates a new CHUONGTRINH or raises the existing\n% singleton*.\n%\n% H = CHUONGTRINH returns the handle to a new CHUONGTRINH or the handle to\n% the existing singleton*.\n%\n% CHUONGTRINH('CALLBACK',hObject,eventData,handles,...) calls the local\n% function named CALLBACK in CHUONGTRINH.M with the given input arguments.\n%\n% CHUONGTRINH('Property','Value',...) creates a new CHUONGTRINH or raises the\n% existing singleton*. Starting from the left, property value pairs are\n% applied to the GUI before chuongtrinh_OpeningFcn gets called. An\n% unrecognized property name or invalid value makes property application\n% stop. All inputs are passed to chuongtrinh_OpeningFcn via varargin.\n%\n% *See GUI Options on GUIDE's Tools menu. Choose \"GUI allows only one\n% instance to run (singleton)\".\n%\n% See also: GUIDE, GUIDATA, GUIHANDLES\n\n% Edit the above text to modify the response to help chuongtrinh\n\n% Last Modified by GUIDE v2.5 29-Aug-2017 10:44:09\n\n% Begin initialization code - DO NOT EDIT\ngui_Singleton = 1;\ngui_State = struct('gui_Name', mfilename, ...\n 'gui_Singleton', gui_Singleton, ...\n 'gui_OpeningFcn', @chuongtrinh_OpeningFcn, ...\n 'gui_OutputFcn', @chuongtrinh_OutputFcn, ...\n 'gui_LayoutFcn', [] , ...\n 'gui_Callback', []);\nif nargin && ischar(varargin{1})\n gui_State.gui_Callback = str2func(varargin{1});\nend\n\nif nargout\n [varargout{1:nargout}] = gui_mainfcn(gui_State, varargin{:});\nelse\n gui_mainfcn(gui_State, varargin{:});\nend\n% End initialization code - DO NOT EDIT\n\n\n% --- Executes just before chuongtrinh is made visible.\nfunction chuongtrinh_OpeningFcn(hObject, eventdata, handles, varargin)\n% This function has no output args, see OutputFcn.\n% hObject handle to figure\n% eventdata reserved - to be defined in a future version of MATLAB\n% handles structure with handles and user data (see GUIDATA)\n% varargin command line arguments to chuongtrinh (see VARARGIN)\n\n% Choose default command line output for chuongtrinh\nhandles.output = hObject;\n\n% Update handles structure\nguidata(hObject, handles);\naddpath('./params/');\naddpath('./libs/');\naddpath('./libsvm-master/matlab');\nstart_i=imread('xinchonanh.png');\naxes(handles.axes1);\nimshow(start_i);\n\n% UIWAIT makes chuongtrinh wait for user response (see UIRESUME)\n% uiwait(handles.figure1);\n\n\n% --- Outputs from this function are returned to the command line.\nfunction varargout = chuongtrinh_OutputFcn(hObject, eventdata, handles) \n% varargout cell array for returning output args (see VARARGOUT);\n% hObject handle to figure\n% eventdata reserved - to be defined in a future version of MATLAB\n% handles structure with handles and user data (see GUIDATA)\n\n% Get default command line output from handles structure\nvarargout{1} = handles.output;\n\n\n% --- Executes on button press in btnChonanh.\nfunction btnChonanh_Callback(hObject, eventdata, handles)\n% hObject handle to btnChonanh (see GCBO)\n% eventdata reserved - to be defined in a future version of MATLAB\n% handles structure with handles and user data (see GUIDATA)\n[filename, pathname] = uigetfile('.\\image','Xin vui long chon anh...');\nI=imread([pathname,filename]);\nimshow(I);\nassignin('base','I',I)\n\n% --- Executes on button press in btnNhandang.\nfunction btnNhandang_Callback(hObject, eventdata, handles)\n% hObject handle to btnNhandang (see GCBO)\n% eventdata reserved - to be defined in a future version of MATLAB\n% handles structure with handles and user data (see GUIDATA)\nevalin('base','nhandien');\n\n% --- Executes on button press in btnthoat.\nfunction btnthoat_Callback(hObject, eventdata, handles)\n% hObject handle to btnthoat (see GCBO)\n% eventdata reserved - to be defined in a future version of MATLAB\n% handles structure with handles and user data (see GUIDATA)\nclose all\n\n\n% --- Executes on button press in btntrain.\nfunction btntrain_Callback(hObject, eventdata, handles)\n% hObject handle to btntrain (see GCBO)\n% eventdata reserved - to be defined in a future version of MATLAB\n% handles structure with handles and user data (see GUIDATA)\neval('train');\n"} +{"plateform": "github", "repo_name": "voquocduy/Pedestrian-Detection-using-Hog-Svm-Matab-master", "name": "plot_DETcurve.m", "ext": ".m", "path": "Pedestrian-Detection-using-Hog-Svm-Matab-master/plot_DETcurve.m", "size": 5510, "source_encoding": "utf_8", "md5": "6c913ccc7db9a1ed012aa94ead1116cd", "text": "function plot_DETcurve(models, model_names,pos_path, neg_path)\n% PLOT_DETCURVE function to compute de DET plot given a set of models\n%\n% INPUT:\n% models: SVM models to test (as a row vector)\n% model_names: names of the models to use it in the DET_plot legends \n% (as cell array)\n% pos/neg path: path to pos/neg images\n% \n%\n%$ Author: Jose Marcos Rodriguez $ \n%$ Date: 09-Nov-2013 22:45:23 $ \n%$ Revision : 1.04 $ \n%% FILENAME : performance.m \n\n % if paths not specified by parameters\n if nargin < 3\n pos_path = uigetdir('.\\images','Select positive test image path');\n neg_path = uigetdir('.\\images','Select negative test image path');\n\n if isa(neg_path,'double') || isa(pos_path,'double')\n cprintf('Errors','Invalid paths...\\nexiting...\\n\\n')\n return \n end\n end\n\n det_figure_handler = figure('name','DET curves');\n set(det_figure_handler,'Visible','off');\n \n det_plot_handlers = zeros(1,max(size(models)));\n \n color = ['b','r','g','y'];\n \n for m_index=1:max(size(models))\n hold on;\n model = models(m_index);\n \n % getting classification scores\n [p_scores, n_scores] = get_scores(model,pos_path,neg_path);\n\n % Plot scores distribution as a Histogram\n positives = max(size(p_scores));\n negatives = max(size(n_scores)); \n scores = zeros(min(positives, negatives),2);\n for i=1:size(scores)\n scores(i,1) = p_scores(i);\n scores(i,2) = n_scores(i);\n end\n figure('name', sprintf('model %s scores distribution',model_names{m_index})); hist(scores);\n\n % Compute Pmiss and Pfa from experimental detection output scores\n [P_miss,P_fppw] = Compute_DET(p_scores,n_scores);\n\n % Plot the detection error trade-off\n figure(det_figure_handler);\n thick = 2;\n det_plot_handler = Plot_DET(P_miss,P_fppw,color(m_index)', thick);\n det_plot_handlers(m_index) = det_plot_handler;\n\n % Plot the optimum point for the detector\n C_miss = 1;\n C_fppw = 1;\n P_target = 0.5;\n\n Set_DCF(C_miss,C_fppw,P_target);\n [DCF_opt, Popt_miss, Popt_fa] = Min_DCF(P_miss,P_fppw);\n fprintf('Optimal Decision Cost Function for %s = %d\\n',model_names{m_index},DCF_opt)\n\n Plot_DET (Popt_miss,Popt_fa,'ko');\n end\n legend(det_plot_handlers, model_names);\nend\n\n\n\n\nfunction [p_scores, n_scores] = get_scores(model,pos_path, neg_path)\n % Tests a (lib)SVM classifier from the specified images paths\n %\n % ok: number of correct classifications\n % ko: number of wrong classifications\n % positive / negative images_path: paths of the images to test\n % model: SVMmodel to use.\n %\n %$ Author: Jose Marcos Rodriguez $ \n %$ Date: 2013/11/09 $ \n %$ Revision: 1.2 $\n\n [positive_images, negative_images] = get_files(-1,-1,{pos_path,neg_path});\n total_pos_windows = numel(positive_images);\n total_neg_windows = numel(negative_images);\n \n \n %% Init the svm test variables\n params = get_params('det_plot_params');\n chunk_size = params.chunk_size;\n desc_size = params.desc_size;\n params = get_params('window_params');\n im_h_size = params.height;\n im_w_size = params.width;\n im_c_depth = params.color_depth;\n \n % ====================================================================\n %% Reading all POSITIVE images \n % (64x128 images)\n % ==================================================================== \n \n % SVM scores\n p_scores = zeros(total_pos_windows,1);\n \n i = 0;\n while i < numel(positive_images)\n \n %% window obtainment\n this_chunk = min(chunk_size,numel(positive_images)-i);\n windows = uint8(zeros(im_h_size,im_w_size,im_c_depth,this_chunk));\n hogs = zeros(this_chunk, desc_size);\n labels = ones(size(hogs,1),1);\n for l=1:this_chunk\n I = imread(positive_images(i+1).name);\n windows(:,:,:,l) = get_window(I,im_w_size,im_h_size,'center');\n hogs(l,:) = compute_HOG(windows(:,:,:,l),8,2,9);\n i = i+1;\n end\n \n % just for fixing GUI freezing due to unic thread MatLab issue\n drawnow; \n\n %% prediction\n [~, ~, scores] = ...\n svmpredict(labels, hogs, model, '-b 0');\n \n p_scores(i-this_chunk+1:i,:) = scores(:,:); \n \n end\n \n % ====================================================================\n %% Reading all NEGATIVE images \n % (64x128 windows)\n % ====================================================================\n\n n_scores = zeros(total_neg_windows,1);\n \n i = 0;\n while i < numel(negative_images)\n \n %% window obtainment\n this_chunk = min(chunk_size,numel(negative_images)-i);\n windows = uint8(zeros(im_h_size,im_w_size,im_c_depth,this_chunk));\n hogs = zeros(this_chunk, desc_size);\n labels = ones(size(hogs,1),1)*(-1);\n for l=1:this_chunk\n I = imread(negative_images(i+1).name);\n windows(:,:,:,l) = get_window(I,im_w_size,im_h_size,[1,1]);\n hogs(l,:) = compute_HOG(windows(:,:,:,l),8,2,9);\n i = i+1;\n end\n \n % just for fixing GUI freezing due to unic thread MatLab issue\n drawnow; \n\n %% prediction\n [~, ~, scores] = ...\n svmpredict(labels, hogs, model, '-b 0');\n \n n_scores(i-this_chunk+1:i,:) = scores(:,:); \n \n \n end\nend\n\n"} +{"plateform": "github", "repo_name": "voquocduy/Pedestrian-Detection-using-Hog-Svm-Matab-master", "name": "draw_sliding_window.m", "ext": ".m", "path": "Pedestrian-Detection-using-Hog-Svm-Matab-master/draw_sliding_window.m", "size": 3630, "source_encoding": "utf_8", "md5": "2577c102d36999695fc68a9d8324fe2e", "text": "function draw_sliding_window(I, model)\n% DRAW_SLIDING_WINDOW function that given an image and a model scans \n% exhaustively over a scale-space pyramid the image for pedestrians\n% drawing the sliding detection window and the confidence probability.\n%\n% INPUT:\n% model: model to test\n% I: image to scan\n% \n%\n%$ Author: Jose Marcos Rodriguez $ \n%$ Date: - $ \n%$ Revision : 1.00 $ \n%% FILENAME : draw_sliding_window.m \n\n% Testing if param file exists in the params directory\nif exist(['params',filesep,'detect_and_draw.params'],'file')\n test_params = load(['params',filesep,'detect_and_draw.params'],'-ascii');\n\n% Testing if param file exists in the current directory\nelseif exist('detect_and_draw.params','file')\n test_params = load('detect_and_draw.params','-ascii');\n\n% Dialog to select param file\nelse\n [param_file,PathName,~] = uigetfile('*.params','Select parameter file');\n if ~isa(param_file,'double')\n test_params = load([PathName,filesep,param_file],'-ascii');\n else\n cprintf('Errors','Missing param file...\\nexiting...\\n\\n');\n return\n end\nend\n \n%% wiring up the param vars\nth = test_params(1);\nscale = test_params(2);\nhog_size = test_params(3); \nstride = test_params(4);\n\n% fprintf('Threshold=%f\\n',th)\n% fprintf('Scale=%f\\n',scale)\n% fprintf('Descriptor size=%f\\n',hog_size)\n% fprintf('Window stride=%f\\n',stride)\n\n%% color definitions\nred = uint8([255,0,0]);\ngreen = uint8([0,255,0]);\n\n%% shape inserters\nok_shapeInserter = ...\n vision.ShapeInserter('BorderColor','Custom','CustomBorderColor',green);\nko_shapeInserter = ...\n vision.ShapeInserter('BorderColor','Custom','CustomBorderColor',red);\n\nti = tic;\nfprintf('\\nbegining the pyramid hog extraction...\\n')\n[hogs, ~, wxl, coordinates] = get_pyramid_hogs(I, hog_size, scale, stride);\ntf = toc(ti);\nfprintf('time to extract %d hogs: %d\\n', size(hogs,1), tf);\n\n%% refer coordinates to the original image... (Level0)\n% for each window in every level...\nind = 1;\nfor l=1:size(wxl,2)\n ws= wxl(l);\n for w=1:ws\n % compute original coordinates in Level0 image \n factor = (scale^(l-1));\n coordinates(1,ind) = floor(coordinates(1,ind) * factor);\n coordinates(2,ind) = floor(coordinates(2,ind) * factor);\n ind = ind + 1;\n end\nend\n\n%% SVM prediction for all windows... \n[predict_labels, ~, probs] = ...\n svmpredict(zeros(size(hogs,1),1), hogs, model, '-b 1');\n \n% draw in the original image the detecction window\n% red: not detected\n% green: detected\nfor i=1:numel(predict_labels)\n [level, ~] = get_window_indices(wxl, i);\n% figure('name', sprintf('level %d detection', level));\n \n x = coordinates(1,i);\n y = coordinates(2,i);\n factor = (scale^(level-1));\n rectangle = int32([x,y,64*factor,128*factor]);\n \n if predict_labels(i) == 1 && probs(i) > th\n % J = step(ok_shapeInserter, I, rectangle);\n % J = insertText(J, [x,y], probs(i), 'FontSize',9,'BoxColor', 'green');\n % imshow(J);\n % figure(gcf); \n %pause(0.5);\n disp('ok');\n else\n disp('mok');\n J = step(ko_shapeInserter, I, rectangle);\n imshow(J);\n figure(gcf); \n end\nend\n% closing all figures...\n% close all\nend\n\n\n\n\n%% Aux func. to get the level and window number given a linear index\nfunction [level, num_window] = get_window_indices(wxl, w_linear_index)\n accum_windows = 0;\n for i=1:size(wxl,2)\n accum_windows = accum_windows + wxl(i);\n if w_linear_index <= accum_windows\n level = i;\n num_window = accum_windows - w_linear_index;\n break \n end\n end\n\nend"} +{"plateform": "github", "repo_name": "voquocduy/Pedestrian-Detection-using-Hog-Svm-Matab-master", "name": "compute_level0_coordinates.m", "ext": ".m", "path": "Pedestrian-Detection-using-Hog-Svm-Matab-master/compute_level0_coordinates.m", "size": 1067, "source_encoding": "utf_8", "md5": "d65b971929cc232aad3dc34827e93fe2", "text": "\r\n%% Aux function to compute the windows coordiantes at level 0 pyramid image\r\nfunction [bb_size, new_cords] = compute_level0_coordinates(wxl, coordinates, inds, scale)\r\n\r\n % Consts\r\n bb_width = 64;\r\n bb_height = 128;\r\n \r\n % Vars\r\n new_cords = zeros(size(inds,2),2);\r\n bb_size = zeros(size(inds,2),2);\r\n \r\n % for each positive window index...\r\n for i=1:size(inds,2)\r\n \r\n % linear index of the window\r\n ind = inds(i);\r\n \r\n % find the positive window original level \r\n level = 0;\r\n\r\n while ind > sum(wxl(1:level))\r\n \tlevel = level + 1;\r\n end\r\n\r\n% fprintf('Match found at level %d\\n', level);\r\n \r\n % compute original coordinates in Level0 image \r\n factor = (scale^(level-1));\r\n new_cords(i,1) = floor(coordinates(i,1) * factor);\r\n new_cords(i,2) = floor(coordinates(i,2) * factor);\r\n \r\n % Bounding Box resizing?\r\n bb_size(i,1) = ceil(bb_height*factor);\r\n bb_size(i,2) = ceil(bb_width*factor);\r\n end\r\nend"} +{"plateform": "github", "repo_name": "voquocduy/Pedestrian-Detection-using-Hog-Svm-Matab-master", "name": "test_svm.m", "ext": ".m", "path": "Pedestrian-Detection-using-Hog-Svm-Matab-master/test_svm.m", "size": 11053, "source_encoding": "utf_8", "md5": "9bfbc961a2df8136aa2b0eb74f485b1d", "text": "function statistics = test_svm(model,paths)\n% TEST_SVM Tests a (lib)SVM classifier from the specified images paths\n%\n% INPUT:\n% model: SVMmodel to use\n% threshold: positive confidence threshold \n% paths: positive / negative images_path to test\n% //\n% windows, descriptor and test parameter configuration is read from their\n% corresponding paramteter files. If not found a window prompts for them.\n%\n% OUTPUT:\n% statistics: ok, ko, false_pos, false_neg, true_pos, true_neg\n% fppw and miss_rate metrics\n%\n%$ Author: Jose Marcos Rodriguez $ \n%$ Date: 2013/11/09 $ \n%$ Revision: 1.05 $\n\n %% svm testing parameters\n get_test_params();\n\n % path stuff\n if nargin < 2\n positive_images_path = uigetdir('images','Select positive image folder');\n negative_images_path = uigetdir('images','Select negative image folder');\n if safe\n images_path = uigetdir('images','Select base image path');\n end\n\n if isa(positive_images_path,'double') || ...\n isa(negative_images_path,'double')\n cprintf('Errors','Invalid paths...\\nexiting...\\n\\n')\n return \n end\n \n else\n positive_images_path = paths{1};\n negative_images_path = paths{2};\n if safe\n images_path = paths{3};\n end\n end\n \n \n %% getting images to test from the specified folders\n paths = {positive_images_path,negative_images_path};\n [positive_images, negative_images] = get_files(pos_instances,neg_instances, paths);\n\n \n \n \n % ====================================================================\n %% Reading all POSITIVE images & computing the descriptor \n % (64x128 images)\n % ====================================================================\n\n %% Computing HOG descriptor for all images (in chunks)\n pos_start_time = tic;\n false_negatives = 0;\n true_positives = 0;\n\n i = 0;\n while i < numel(positive_images)\n \n %% window obtainment\n this_chunk = min(pos_chunk_size,numel(positive_images)-i);\n windows = uint8(zeros(height,width,depth,this_chunk));\n hogs = zeros(this_chunk, descriptor_size);\n labels = ones(size(hogs,1),1);\n for l=1:this_chunk\n I = imread(positive_images(i+1).name);\n windows(:,:,:,l) = get_window(I,width,height, 'center');\n hogs(l,:) = compute_HOG(windows(:,:,:,l),cell_size,block_size,n_bins);\n i = i+1;\n end\n \n % just for fixing GUI freezing due to unic thread MatLab issue\n drawnow; \n\n %% prediction\n [predict_labels, ~, probs] = ...\n svmpredict(labels, hogs, model, '-b 1');\n \n %% counting and copying\n for l=1:size(predict_labels)\n predict_label = predict_labels(l);\n \n if probs(l,1) >= 0.1\n ok = ok + 1;\n true_positives = true_positives + 1;\n else\n ko = ko + 1;\n false_negatives = false_negatives + 1;\n\n % saving hard image for further retrain\n if safe\n [~, name, ext] = fileparts(positive_images(i).name);\n saving_path = [images_path,'/hard_examples/false_neg/',...\n name,...\n '_n_wind_',num2str(l), ext];\n \n % writting image \n imwrite(windows(:,:,:,l), saving_path); \n end\n end \n end\n end\n \n % hog extraction elapsed time\n pos_elapsed_time = toc(pos_start_time);\n fprintf('Elapsed time to classify positive images: %f seconds.\\n',pos_elapsed_time);\n \n\n \n \n \n % ====================================================================\n %% Reading all NEGATIVE images & computing the descriptor \n % Exhaustive search for hard examples\n % (space-scaled 64x128 windows)\n % ====================================================================\n \n num_neg_images = size(negative_images,1);\n if strcmp(neg_method, 'pyramid')\n num_neg_windows = ...\n get_negative_windows_count(negative_images);\n elseif strcmp(neg_method, 'windows')\n num_neg_windows = num_neg_images*neg_chunk_size;\n end\n fprintf('testing with %d negative images and %d negative windows\\n', num_neg_images,num_neg_windows);\n \n\n %% Computing HOG descriptor for all images (in chunks)\n neg_start_time = tic;\n false_positives = 0;\n true_negatives = 0;\n\n i = 0;\n while i < numel(negative_images)\n \n %% window obtaintion\n % All pyramid HOGS\n if strcmp(neg_method, 'pyramid')\n I = imread(negative_images(i+1).name);\n \n %% temporal\n [h,w,~] = size(I);\n if max(h,w) >= 160\n ratio = max(96/w,160/h);\n I = imresize(I,ratio); \n end\n %% fin temporal\n [hogs, windows, wxl] = get_pyramid_hogs(I, descriptor_size, scale, stride);\n labels = ones(size(hogs,1),1).*(-1);\n i = i+1;\n \n % random window HOG\n elseif strcmp(neg_method,'windows')\n this_chunk = min(neg_chunk_size, numel(negative_images)-i);\n windows = uint8(zeros(height,width,depth,this_chunk));\n hogs = zeros(this_chunk, descriptor_size);\n labels = ones(size(hogs,1),1).*(-1);\n \n for l=1:this_chunk\n I = imread(negative_images(i+1).name);\n windows(:,:,:,l) = get_window(I,width,height, 'center');\n hogs(l,:) = compute_HOG(windows(:,:,:,l),cell_size,block_size,n_bins);\n i = i+1;\n end\n end\n \n % just for fixing GUI freezing due to unic thread MatLab issue\n drawnow; \n \n %% prediction\n [predict_labels, ~, probs] = ...\n svmpredict(labels, hogs, model, '-b 1');\n\n %% updating statistics\n for l=1:size(predict_labels)\n predict_label = predict_labels(l);\n\t\t \n if probs(l,1) < 0.1\n ok = ok + 1;\n true_negatives = true_negatives + 1;\n else\n ko = ko + 1;\n false_positives = false_positives + 1;\n\n if safe\n % saving hard image for further retrain\n [~, name, ext] = fileparts(negative_images(i).name);\n\n if strcmp(neg_method, 'pyramid')\n [level, num_image] = get_window_indices(wxl, l);\n saving_path = [images_path,'/hard_examples/false_pos/',...\n name,...\n '_l',num2str(level),...\n '_w',num2str(num_image),ext];\n else\n saving_path = [images_path,'/hard_examples/false_pos/',...\n name,...\n '_n_wind_',num2str(l), ext];\n end\n % writting image \n imwrite(windows(:,:,:,l), saving_path); \n end\n end \n end\n end\n \n % hog extraction elapsed time\n neg_elapsed_time = toc(neg_start_time);\n fprintf('Elapsed time to classify negative images: %f seconds.\\n',neg_elapsed_time);\n \n \n\n %% Printing gloabl results\n precision = true_positives/(true_positives+false_positives);\n recall = true_positives/(true_positives+false_negatives);\n \n fprintf('oks: %d \\n',ok)\n fprintf('kos: %d \\n',ko)\n fprintf('false positives: %d \\n',false_positives)\n fprintf('false negatives: %d \\n',false_negatives)\n fprintf('true positives: %d \\n',true_positives)\n fprintf('true negatives: %d \\n',true_negatives)\n fprintf('mis rate: %d \\n',false_negatives / (true_positives + false_negatives))\n fprintf('fppw: %d \\n',false_positives / (ok + ko))\n fprintf('Precision: %d \\n',precision)\n fprintf('Recall: %d \\n',recall)\n fprintf('F score: %d \\n',2*((precision*recall)/(precision+recall)))\n \n % preparing values to return\n statistics = containers.Map;\n statistics('oks') = ok;\n statistics('kos') = ok;\n statistics('fp') = false_positives;\n statistics('tp') = true_positives;\n statistics('fn') = false_negatives;\n statistics('tn') = true_negatives;\n statistics('miss_rate') = false_negatives / (true_positives + false_negatives);\n statistics('fppw') = false_positives / (ok + ko);\n statistics('precision') = precision;\n statistics('recall') = recall;\n statistics('fscore') = 2*((precision*recall)/(precision+recall));\n \n \n \n \n \n \n \n % ---------------------------------------------------------------------\n %% Aux function to obtain the test parameters\n % ---------------------------------------------------------------------\n function get_test_params()\n test_params = get_params('test_svm_params');\n pos_chunk_size = test_params.pos_chunk_size;\n neg_chunk_size = test_params.neg_chunk_size;\n scale = test_params.scale;\n stride = test_params.stride;\n threshold = test_params.threshold;\n neg_method = test_params.neg_window_method;\n safe = test_params.safe;\n neg_instances = test_params.neg_instances;\n pos_instances = test_params.pos_instances;\n\n w_params = get_params('window_params');\n depth = w_params.color_depth;\n width = w_params.width; \n height = w_params.height;\n \n desc_params = get_params('desc_params');\n cell_size = desc_params.cell_size;\n block_size = desc_params.block_size;\n n_bins = desc_params.n_bins;\n desp = 1;\n n_v_cells = floor(height/cell_size);\n n_h_cells = floor(width/cell_size);\n hist_size = block_size*block_size*n_bins;\n descriptor_size = hist_size*(n_v_cells-block_size+desp)*(n_h_cells-block_size+desp);\n \n\n ok = 0;\n ko = 0; \n end\n \nend\n \n\n\n%% Aux function to know how many windows we'll have...\nfunction count = get_negative_windows_count(negative_images)\n % computing number of levels in the pyramid\n count = 0;\n for i=1:numel(negative_images)\n I = imread(negative_images(i).name);\n %% temporal\n [h,w,~] = size(I);\n if max(h,w) >= 160\n ratio = max(96/w,160/h);\n I = imresize(I,ratio); \n end\n %% fin temporal\n [~, windows] = get_pyramid_dimensions(I);\n count = count + windows;\n end\nend\n\n\n\n\n%% Aux function to know how the windows indices...\nfunction [level, num_window] = get_window_indices(wxl, w_linear_index)\n accum_windows = 0;\n for i=1:size(wxl,2)\n accum_windows = accum_windows + wxl(i);\n if w_linear_index <= accum_windows\n level = i;\n num_window = accum_windows - w_linear_index;\n break \n end\n end\n\nend\n\n"} +{"plateform": "github", "repo_name": "voquocduy/Pedestrian-Detection-using-Hog-Svm-Matab-master", "name": "test_svm_PCA.m", "ext": ".m", "path": "Pedestrian-Detection-using-Hog-Svm-Matab-master/test_svm_PCA.m", "size": 11226, "source_encoding": "utf_8", "md5": "515c2df08059e5339874b04bb212cf82", "text": "function statistics = test_svm_PCA(model,Ureduce, paths)\n% TEST_SVM_PCA Tests a (lib)SVM classifier from the specified images paths\n% reducing first each hog matrix to a dimensionality reduced\n% version.\n%\n% INPUT:\n% model: SVMmodel to use\n% threshold: positive confidence threshold \n% paths: positive / negative images_path to test\n% //\n% windows, descriptor and test parameter configuration is read from their\n% corresponding paramteter files. If not found a window prompts for them.\n%\n% OUTPUT:\n% statistics: ok, ko, false_pos, false_neg, true_pos, true_neg\n% fppw and miss_rate metrics\n%\n%$ Author: Jose Marcos Rodriguez $ \n%$ Date: 2013/11/09 $ \n%$ Revision: 1.05 $\n\n %% svm testing parameters\n get_test_params();\n\n % path stuff\n if nargin < 3\n positive_images_path = uigetdir('images','Select positive image folder');\n negative_images_path = uigetdir('images','Select negative image folder');\n if safe\n images_path = uigetdir('images','Select base image path');\n end\n\n if isa(positive_images_path,'double') || ...\n isa(negative_images_path,'double')\n cprintf('Errors','Invalid paths...\\nexiting...\\n\\n')\n return \n end\n \n else\n positive_images_path = paths{1};\n negative_images_path = paths{2};\n if safe\n images_path = paths{3};\n end\n end\n \n \n %% getting images to test from the specified folders\n paths = {positive_images_path,negative_images_path};\n [positive_images, negative_images] = get_files(pos_instances,neg_instances, paths);\n\n \n \n \n % ====================================================================\n %% Reading all POSITIVE images & computing the descriptor \n % (64x128 images)\n % ====================================================================\n\n %% Computing HOG descriptor for all images (in chunks)\n pos_start_time = tic;\n false_negatives = 0;\n true_positives = 0;\n\n i = 0;\n while i < numel(positive_images)\n \n %% window obtainment\n this_chunk = min(pos_chunk_size,numel(positive_images)-i);\n windows = uint8(zeros(height,width,depth,this_chunk));\n hogs = zeros(this_chunk, descriptor_size);\n labels = ones(size(hogs,1),1);\n for l=1:this_chunk\n I = imread(positive_images(i+1).name);\n windows(:,:,:,l) = get_window(I,width,height, 'center');\n hogs(l,:) = compute_HOG(windows(:,:,:,l),cell_size,block_size,n_bins);\n i = i+1;\n end\n \n % just for fixing GUI freezing due to unic thread MatLab issue\n drawnow; \n\n %% prediction\n hogs = hogs*Ureduce;\n [predict_labels, ~, probs] = ...\n svmpredict(labels, hogs, model, '-b 1');\n \n %% counting and copying\n for l=1:size(predict_labels)\n predict_label = predict_labels(l);\n \n if probs(l,1) >= 0.1\n ok = ok + 1;\n true_positives = true_positives + 1;\n else\n ko = ko + 1;\n false_negatives = false_negatives + 1;\n\n % saving hard image for further retrain\n if safe\n [~, name, ext] = fileparts(positive_images(i).name);\n saving_path = [images_path,'/hard_examples/false_neg/',...\n name,...\n '_n_wind_',num2str(l), ext];\n \n % writting image \n imwrite(windows(:,:,:,l), saving_path); \n end\n end \n end\n end\n \n % hog extraction elapsed time\n pos_elapsed_time = toc(pos_start_time);\n fprintf('Elapsed time to classify positive images: %f seconds.\\n',pos_elapsed_time);\n \n\n \n \n \n % ====================================================================\n %% Reading all NEGATIVE images & computing the descriptor \n % Exhaustive search for hard examples\n % (space-scaled 64x128 windows)\n % ====================================================================\n \n num_neg_images = size(negative_images,1);\n if strcmp(neg_method, 'pyramid')\n num_neg_windows = ...\n get_negative_windows_count(negative_images);\n elseif strcmp(neg_method, 'windows')\n num_neg_windows = num_neg_images*neg_chunk_size;\n end\n fprintf('testing with %d negative images and %d negative windows\\n', num_neg_images,num_neg_windows);\n \n\n %% Computing HOG descriptor for all images (in chunks)\n neg_start_time = tic;\n false_positives = 0;\n true_negatives = 0;\n\n i = 0;\n while i < numel(negative_images)\n \n %% window obtaintion\n % All pyramid HOGS\n if strcmp(neg_method, 'pyramid')\n I = imread(negative_images(i+1).name);\n \n %% temporal\n [h,w,~] = size(I);\n if max(h,w) >= 160\n ratio = max(96/w,160/h);\n I = imresize(I,ratio); \n end\n %% fin temporal\n [hogs, windows, wxl] = get_pyramid_hogs(I, descriptor_size, scale, stride);\n labels = ones(size(hogs,1),1).*(-1);\n i = i+1;\n \n % random window HOG\n elseif strcmp(neg_method,'windows')\n this_chunk = min(neg_chunk_size, numel(negative_images)-i);\n windows = uint8(zeros(height,width,depth,this_chunk));\n hogs = zeros(this_chunk, descriptor_size);\n labels = ones(size(hogs,1),1).*(-1);\n \n for l=1:this_chunk\n I = imread(negative_images(i+1).name);\n windows(:,:,:,l) = get_window(I,width,height, 'center');\n hogs(l,:) = compute_HOG(windows(:,:,:,l),cell_size,block_size,n_bins);\n i = i+1;\n end\n end\n \n % just for fixing GUI freezing due to unic thread MatLab issue\n drawnow; \n \n %% prediction\n hogs = hogs*Ureduce;\n [predict_labels, ~, probs] = ...\n svmpredict(labels, hogs, model, '-b 1');\n\n %% updating statistics\n for l=1:size(predict_labels)\n predict_label = predict_labels(l);\n\t\t \n if probs(l,1) < 0.1\n ok = ok + 1;\n true_negatives = true_negatives + 1;\n else\n ko = ko + 1;\n false_positives = false_positives + 1;\n\n if safe\n % saving hard image for further retrain\n [~, name, ext] = fileparts(negative_images(i).name);\n\n if strcmp(neg_method, 'pyramid')\n [level, num_image] = get_window_indices(wxl, l);\n saving_path = [images_path,'/hard_examples/false_pos/',...\n name,...\n '_l',num2str(level),...\n '_w',num2str(num_image),ext];\n else\n saving_path = [images_path,'/hard_examples/false_pos/',...\n name,...\n '_n_wind_',num2str(l), ext];\n end\n % writting image \n imwrite(windows(:,:,:,l), saving_path); \n end\n end \n end\n end\n \n % hog extraction elapsed time\n neg_elapsed_time = toc(neg_start_time);\n fprintf('Elapsed time to classify negative images: %f seconds.\\n',neg_elapsed_time);\n \n \n\n %% Printing gloabl results\n precision = true_positives/(true_positives+false_positives);\n recall = true_positives/(true_positives+false_negatives);\n \n fprintf('oks: %d \\n',ok)\n fprintf('kos: %d \\n',ko)\n fprintf('false positives: %d \\n',false_positives)\n fprintf('false negatives: %d \\n',false_negatives)\n fprintf('true positives: %d \\n',true_positives)\n fprintf('true negatives: %d \\n',true_negatives)\n fprintf('mis rate: %d \\n',false_negatives / (true_positives + false_negatives))\n fprintf('fppw: %d \\n',false_positives / (ok + ko))\n fprintf('Precision: %d \\n',precision)\n fprintf('Recall: %d \\n',recall)\n fprintf('F score: %d \\n',2*((precision*recall)/(precision+recall)))\n \n % preparing values to return\n statistics = containers.Map;\n statistics('oks') = ok;\n statistics('kos') = ok;\n statistics('fp') = false_positives;\n statistics('tp') = true_positives;\n statistics('fn') = false_negatives;\n statistics('tn') = true_negatives;\n statistics('miss_rate') = false_negatives / (true_positives + false_negatives);\n statistics('fppw') = false_positives / (ok + ko);\n statistics('precision') = precision;\n statistics('recall') = recall;\n statistics('fscore') = 2*((precision*recall)/(precision+recall));\n \n \n \n \n \n \n \n % ---------------------------------------------------------------------\n %% Aux function to obtain the test parameters\n % ---------------------------------------------------------------------\n function get_test_params()\n test_params = get_params('test_svm_params');\n pos_chunk_size = test_params.pos_chunk_size;\n neg_chunk_size = test_params.neg_chunk_size;\n scale = test_params.scale;\n stride = test_params.stride;\n threshold = test_params.threshold;\n neg_method = test_params.neg_window_method;\n safe = test_params.safe;\n neg_instances = test_params.neg_instances;\n pos_instances = test_params.pos_instances;\n\n w_params = get_params('window_params');\n depth = w_params.color_depth;\n width = w_params.width; \n height = w_params.height;\n \n desc_params = get_params('desc_params');\n cell_size = desc_params.cell_size;\n block_size = desc_params.block_size;\n n_bins = desc_params.n_bins;\n desp = 1;\n n_v_cells = floor(height/cell_size);\n n_h_cells = floor(width/cell_size);\n hist_size = block_size*block_size*n_bins;\n descriptor_size = hist_size*(n_v_cells-block_size+desp)*(n_h_cells-block_size+desp);\n \n\n ok = 0;\n ko = 0; \n end\n \nend\n \n\n\n%% Aux function to know how many windows we'll have...\nfunction count = get_negative_windows_count(negative_images)\n % computing number of levels in the pyramid\n count = 0;\n for i=1:numel(negative_images)\n I = imread(negative_images(i).name);\n %% temporal\n [h,w,~] = size(I);\n if max(h,w) >= 160\n ratio = max(96/w,160/h);\n I = imresize(I,ratio); \n end\n %% fin temporal\n [~, windows] = get_pyramid_dimensions(I);\n count = count + windows;\n end\nend\n\n\n\n\n%% Aux function to know how the windows indices...\nfunction [level, num_window] = get_window_indices(wxl, w_linear_index)\n accum_windows = 0;\n for i=1:size(wxl,2)\n accum_windows = accum_windows + wxl(i);\n if w_linear_index <= accum_windows\n level = i;\n num_window = accum_windows - w_linear_index;\n break \n end\n end\n\nend\n\n"} +{"plateform": "github", "repo_name": "voquocduy/Pedestrian-Detection-using-Hog-Svm-Matab-master", "name": "non_max_suppression.m", "ext": ".m", "path": "Pedestrian-Detection-using-Hog-Svm-Matab-master/non_max_suppression.m", "size": 1983, "source_encoding": "utf_8", "md5": "f929c2cfe27c04ea18291377d6a6c143", "text": "function max_indices = non_max_suppression(coords, probs, bb_sizes)\n% NON_MAX_SUPRESION applies non maximum suppression to get the \n% most confident detections over a proximity area.\n% Input: window coordiantes, window classification probabilities and \n% window size referenced to the level 0 pyramid layer.\n% Output: the most confident window indices\n\n%$ Author: Jose Marcos Rodriguez $ \n%$ Date: 23-Nov-2013 12:37:16 $ \n%$ Revision : 1.00 $ \n%% FILENAME : non_max_supresion.m \n\nMIN_DIST = 1024;\n MAX_AREA = 128*64/6;\n\nmax_indices = [];\nm = size(coords,1);\nindices = 1:m;\n\n% while we have nearby windows not suppressed...\nwhile size(indices, 2) > 1\n\n nearby_window_indices = indices(1);\n \n % for all remaining indices...\n for i=2:size(indices,2)\n \n % we search the nearby windows\n d = distance(coords(indices(1),:), coords(indices(i),:));\n if d < MIN_DIST\n nearby_window_indices = [nearby_window_indices, indices(i)];\n end\n\n area = overlap(coords(indices(1),:), coords(indices(i),:), bb_sizes(indices(i),:));\n if area > MAX_AREA\n nearby_window_indices = [nearby_window_indices, indices(i)];\n end\n end\n \n % from the nearby windows we only keep the most confident one\n nearby_probs = probs(nearby_window_indices,1);\n max_indx = nearby_window_indices(max(nearby_probs) == nearby_probs);\n max_indices = [max_indices, max_indx];\n \n % removing from indices all the treated ones\n for k=1:size(nearby_window_indices,2)\n indices = indices(indices ~= nearby_window_indices(k));\n end\n \nend\nend\n\n\n\n\nfunction d = distance(coords1, coords2)\n d = sum((coords1-coords2).^2);\nend\n\nfunction overlapping_area = overlap(coords1, coords2, bb_size2)\n delta = coords1-coords2;\n delta_x = delta(1);\n delta_y = delta(2);\n h = bb_size2(1);\n w = bb_size2(2);\n overlapping_area = w*h - abs(delta_x*w) - abs(delta_y*h) + abs(delta_x*delta_y);\nend\n\n\n\n"} +{"plateform": "github", "repo_name": "voquocduy/Pedestrian-Detection-using-Hog-Svm-Matab-master", "name": "static_detector.m", "ext": ".m", "path": "Pedestrian-Detection-using-Hog-Svm-Matab-master/static_detector.m", "size": 5315, "source_encoding": "utf_8", "md5": "c2e656c452e2addd5dc511b90b999441", "text": "function static_detector(I,model)\n% STATIC_DETECTOR given a folder containing PNG or JPG images applies\n% the specified libSVM model to scan through every image \n% for pedestrians in a sliding window basis.\n% \n% All the parameters are hard coded to guaratee independence from\n% external files, assuming once this function in run the whole set of \n% parameters are well known and no further experimentation is needed.\n%\n%$ Author: Jose Marcos Rodriguez $ \n%$ Date: 05-Dec-2013 23:09:05 $ \n%$ Revision : 1.00 $ \n%% FILENAME : static_detector.m \n\n %% VARS\n hog_size = 3780;\n scale = 1.2;\n stride = 8;\n show_all = false;\n draw_all = false;\n \n %% color definitions\n green = uint8([0,255,0]);\n yellow = uint8([255,255,0]);\n \n %% shape inserters\n ok_shapeInserter = ...\n vision.ShapeInserter('BorderColor','Custom','CustomBorderColor',green);\n other_shapeInserter = ...\n vision.ShapeInserter('BorderColor','Custom','CustomBorderColor',yellow);\n\n %images_path = uigetdir('.\\..','Select image folder');\n \n %% image reading\n % jpgs = rdir(strcat(images_path,filesep,'*.jpg'));\n % pngs = rdir(strcat(images_path,filesep,'*.png'));\n % images = [jpgs, pngs];\n % num_images = size(images,1);\n\n %for i=1:num_images\n \n %fprintf('-------------------------------------------\\n')\n %disp(images(i).name);\n %I = imread(images(i).name);\n\n %% Reescale\n [h,w,~] = size(I);\n rscale = min(w/96, h/160);\n I = imresize(I, 1.2/rscale);\n\n %% HOG extraction for all image windows\n ti = tic;\n fprintf('\\nbegining the pyramid hog extraction...\\n')\n [hogs, windows, wxl, coordinates] = get_pyramid_hogs(I, hog_size, scale, stride);\n tf = toc(ti);\n fprintf('time to extract %d hogs: %d\\n', size(hogs,1), tf);\n\n %% SVM prediction for all windows... \n [predict_labels, ~, probs] = ...\n svmpredict(zeros(size(hogs,1),1), hogs, model, '-b 1');\n\n %% filtering only positives windows instances\n % index of positives windows\n range = 1:max(size(predict_labels));\n pos_indxs = range(predict_labels == 1);\n %pos_indxs = range(probs(1) >= 0.8);\n\n % positive match information\n coordinates = coordinates';\n coordinates = coordinates(pos_indxs,:);\n probs = probs(pos_indxs,:);\n\n\n %% Computing level 0 coordinates for drawing\n [bb_size, l0_coordinates] = compute_level0_coordinates(wxl, coordinates, pos_indxs, scale);\n \n %% Showing all positive windows in separate figures\n if show_all\n windows = windows(:,:,:,pos_indxs);\n\n for w=1:size(pos_indxs,2)\n figure('name',sprintf('x=%d, y=%d', l0_coordinates(w,1),l0_coordinates(w,2))); \n % figure('name',sprintf('x=%d, y=%d', bb_size(w,1),bb_size(w,2))); \n ii = insertText(windows(:,:,:,w), [1,1], probs(w), 'FontSize',9,'BoxColor', 'green');\n imshow(ii) \n end\n end\n\n %% Drawing detections over the original image\n %draw = I;\n shape_inserter = other_shapeInserter;\n if ~draw_all\n \n shape_inserter = ok_shapeInserter;\n \n %% non-max-suppression!\n max_indxs = non_max_suppression(l0_coordinates, probs, bb_size); \n pos_indxs = pos_indxs(max_indxs);\n l0_coordinates = l0_coordinates(max_indxs,:);\n bb_size = bb_size(max_indxs, :);\n probs = probs(max_indxs,:);\n end\n \n draw = I;\n for w=1:size(pos_indxs,2)\n %% Drawing the rectangle on the original image\n x = l0_coordinates(w,1);\n y = l0_coordinates(w,2);\n\n % Rectangle conf\n bb_height = bb_size(w,1);\n bb_width = bb_size(w,2);\n rectangle = int32([x,y,bb_width,bb_height]);\n\n draw = step(shape_inserter, draw, rectangle);\n draw = insertText(draw, [x,y+bb_height], probs(w), 'FontSize',9,'BoxColor', 'green');\n\n end\n % Showing image with all the detection boxes\n imshow(draw);\n figure(gcf);\n % pause;\n \n %end\nend\n\n\n\n%% Aux function to compute the windows coordiantes at level 0 pyramid image\nfunction [bb_size, new_cords] = compute_level0_coordinates(wxl, coordinates, inds, scale)\n\n % Consts\n bb_width = 64;\n bb_height = 128;\n \n % Vars\n new_cords = zeros(size(inds,2),2);\n bb_size = zeros(size(inds,2),2);\n \n % for each positive window index...\n for i=1:size(inds,2)\n \n % linear index of the window\n ind = inds(i);\n \n % find the positive window original level \n level = 0;\n\n while ind > sum(wxl(1:level))\n \tlevel = level + 1;\n end\n\n% fprintf('Match found at level %d\\n', level);\n \n % compute original coordinates in Level0 image \n factor = (scale^(level-1));\n new_cords(i,1) = floor(coordinates(i,1) * factor);\n new_cords(i,2) = floor(coordinates(i,2) * factor);\n \n % Bounding Box resizing?\n bb_size(i,1) = ceil(bb_height*factor);\n bb_size(i,2) = ceil(bb_width*factor);\n end\nend"} +{"plateform": "github", "repo_name": "voquocduy/Pedestrian-Detection-using-Hog-Svm-Matab-master", "name": "get_negative_windows.m", "ext": ".m", "path": "Pedestrian-Detection-using-Hog-Svm-Matab-master/get_negative_windows.m", "size": 1915, "source_encoding": "utf_8", "md5": "ecdfa7fe0e0ffad38158346f78fed842", "text": " \nfunction get_negative_windows(num_random_windows, num_images)\n% GET_NEGATIVE_WINDOWS retrieves random windows from the original negative\n% image set and saves the window in the specified\n% folder when prompted.\n% INPUT:\n% num_random_windows: random window samples per image\n% num_images: number of images from where to sample windows.\n%\n%$ Author: Jose Marcos Rodriguez $ \n%$ Date: N/D $ \n%$ Revision : 1.00 $ \n%% FILENAME : get_negative_windows.m \n \n % Paths\n negative_images_path = uigetdir('.\\images','Select original images path');\n windows_dst_path = uigetdir('.\\images','Select destination path');\n\n if isa(negative_images_path,'double') || isa(windows_dst_path,'double')\n cprintf('Errors','Invalid paths...\\nexiting...\\n\\n')\n return \n end\n \n negative_images = dir(negative_images_path);\n negative_images = negative_images(3:end);\n \n if num_images < 1\n fprintf('\\ngetting all available images\\n')\n num_images = numel(negative_images);\n elseif num_images > numel(negative_images)\n fprintf('not enought images...\\ngetting al available images\\n')\n num_images = numel(negative_images);\n end\n \n for i=1:num_images\n for nrw = 1:num_random_windows\n % getting random window from negative image\n file_name = ...\n strcat(negative_images_path,filesep,negative_images(i).name);\n I = imread(file_name);\n random_image_window = get_window(I,64,128, 'random');\n \n % making saving path\n [~, name, ext] = fileparts(file_name);\n file_saving_name = ...\n strcat(windows_dst_path, filesep,strcat(name,'_',sprintf('%02d',nrw)),ext);\n \n % saving image...\n imwrite(random_image_window, file_saving_name);\n end \n end\n \n \n \n \n"} +{"plateform": "github", "repo_name": "SkoltechRobotics/pcl-master", "name": "plot_camera_poses.m", "ext": ".m", "path": "pcl-master/gpu/kinfu/tools/plot_camera_poses.m", "size": 3407, "source_encoding": "utf_8", "md5": "d210c150da98c3f4667f2c1e8d4eb6d2", "text": "% Copyright (c) 2014-, Open Perception, Inc.\n% All rights reserved.\n%\n% Redistribution and use in source and binary forms, with or without\n% modification, are permitted provided that the following conditions\n% are met:\n%\n% * Redistributions of source code must retain the above copyright\n% notice, this list of conditions and the following disclaimer.\n% * Redistributions in binary form must reproduce the above\n% copyright notice, this list of conditions and the following\n% disclaimer in the documentation and/or other materials provided\n% with the distribution.\n% * Neither the name of the copyright holder(s) nor the names of its\n% contributors may be used to endorse or promote products derived\n% from this software without specific prior written permission.\n%\n% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n% \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n% LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n% FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n% COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n% INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n% BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n% LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n% LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n% ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n% POSSIBILITY OF SUCH DAMAGE.\n% Author: Marco Paladini \n\n% sample octave script to load camera poses from file and plot them\n% example usage: run 'pcl_kinfu_app -save_pose camera.csv' to save\n% camera poses in a 'camera.csv' file\n% run octave and cd into the directory where this script resides\n% and call plot_camera_poses('')\n\nfunction plot_camera_poses(filename)\nposes=load(filename);\n%% show data on a 2D graph\nh=figure();\nplot(poses,'*-');\nlegend('x','y','z','qw','qx','qy','qz');\n\n%% show data as 3D axis\nh=figure();\nfor n=1:size(poses,1)\n t=poses(n,1:3);\n q=poses(n,4:7);\n r=q2rot(q);\n coord(h,r,t);\nend\noctave_axis_equal(h);\n\n%% prevent Octave from quitting if called from the command line\ninput('Press enter to continue'); \nend\n\nfunction coord(h,r,t)\nfigure(h);\nhold on;\nc={'r','g','b'};\np=0.1*[1 0 0;0 1 0;0 0 1];\nfor n=1:3 \n a=r*p(n,:)';\n plot3([t(1),t(1)+a(1)], [t(2),t(2)+a(2)], [t(3),t(3)+a(3)], 'color', c{n});\nend\nend\n\nfunction R=q2rot(q)\n% conversion code from http://en.wikipedia.org/wiki/Rotation_matrix%Quaternion\t\nNq = q(1)^2 + q(2)^2 + q(3)^2 + q(4)^2;\nif Nq>0; s=2/Nq; else s=0; end\nX = q(2)*s; Y = q(3)*s; Z = q(4)*s;\nwX = q(1)*X; wY = q(1)*Y; wZ = q(1)*Z;\nxX = q(2)*X; xY = q(2)*Y; xZ = q(2)*Z;\nyY = q(3)*Y; yZ = q(3)*Z; zZ = q(4)*Z;\nR=[ 1.0-(yY+zZ) xY-wZ xZ+wY ;\n xY+wZ 1.0-(xX+zZ) yZ-wX ;\n xZ-wY yZ+wX 1.0-(xX+yY) ];\nend\n\nfunction octave_axis_equal(h)\n% workaround for axis auto not working in 3d\n% tanks http://octave.1599824.n4.nabble.com/axis-equal-help-tp1636701p1636702.html\nfigure(h);\nxl = get (gca, 'xlim');\nyl = get (gca, 'ylim');\nzl = get (gca, 'zlim');\nspan = max ([diff(xl), diff(yl), diff(zl)]);\nxlim (mean (xl) + span*[-0.5, 0.5])\nylim (mean (yl) + span*[-0.5, 0.5])\nzlim (mean (zl) + span*[-0.5, 0.5])\nend\n\n"} +{"plateform": "github", "repo_name": "LarsonLab/UTEMRI_Brain-master", "name": "precon_3dute_pfile_bartv300_allec.m", "ext": ".m", "path": "UTEMRI_Brain-master/ImageReconstruction/precon_3dute_pfile_bartv300_allec.m", "size": 13757, "source_encoding": "utf_8", "md5": "78e61bbb10ca3b8c81f5817eb027c508", "text": "function [im, header] = precon_3dute_pfile_bartv300_allec(pfile, ...\n coils, undersamp, ...\n skip, freq_shift, echoes,reg_coe, skip_calib_coil, cc_coil, rNecho,ind_echo_recon, espirit_recon);\n% [im, header, rhuser, data, data_grid] = recon_3dute_pfile(pfile,\n% coils, undersamp, skip, freq_shift, echoes)\n%\n% Reconstructs 3D UTE PR image acquired with half-projections and\n% ramp sampling from pfile only. Reads in scan parameters from pfile.\n% INPUTS:\n% pfile - points to a scanner raw data file\n% either use Pfile 'PXXXXX.7', or, for multiple files, can also be\n% 'filename01', where the number increases (can use rename_pfiles.x to appropriately rename)\n% use a cell array of the first Pfiles for averaging\n% coils (optional) - can select which coils to reconstruct\n% undersamp (optional) - [undersamp_dc, undersamp_imsize]. Undersampling ratios for\n% density compensation and/or reconstructed image size\n% (default is [1.0 1.0])\n% skip (optional) - shifts readout by this number of points. Can\n% be positive or negative\n% freq_shift (optional) - demodulates by this frequency (Hz) to correct for bulk frequency miscalibrations\n% echoes (optional) - can select which echoes to reconstruct\n% rNecho - real number of echo\n% ind_echo_recon - recon each individual echo sequentially\n% espirit_recon - do espirit recon or just nufft\n% OUTPUTS:\n% im - 3d image\n% header - header data from pfile\n% data - raw projection data\n% data_grid - gridded data\n%\n% Peder Larson 7/28/2008, 6/24/2011\n% pcao has some name conflictions for rawloadX_cp (in the src dir) vs. rawloadX and\n% rawheadX_cp (in the src dir) vs. rawheadX\n\nif ~isdeployed\n addpath /home/pcao/src/bart-0.2.07/matlab/\n addpath /home/pcao/src/bart-0.3.01/matlab/\n addpath /home/plarson/matlab/3DUTE-recon\n% addpath /netopt/share/ese/ESE_DV26.0_R01/tools/matlab/read_MR/\nend\n\ndisp(pfile)\nheader = read_MR_headers(pfile); % <<< DCM >>> 20180312\n\nif (nargin < 2)\n coils = [];\nend\n\nif (nargin < 3) || (isempty(undersamp))\n undersamp = 1/header.rdb_hdr.user26;\n undersamp_dc = undersamp; undersamp_imsize = undersamp;\nelseif length(undersamp) == 1\n undersamp_dc = undersamp; undersamp_imsize = undersamp;\nelse\n undersamp_dc = undersamp(1); undersamp_imsize = undersamp(2);\nend \n\nif (nargin < 4) || (isempty(skip))\n skip = 0;\nend\n\nif (nargin < 5) || (isempty(freq_shift))\n freq_shift = 0;\nend\n\n\nif (nargin < 6) || (isempty(echoes))\n Necho = header.rdb_hdr.nechoes; % <<< DCM >>> 20180312\n echoes = 1:Necho;\nelse\n Necho = length(echoes(:));\nend\n\n\n% if isempty(coils)\n% Ncoils = (header.rdb_hdr.dab(2)-header.rdb_hdr.dab(1))+1; % <<< DCM >>> 20180312\n% coils = 1:Ncoils;\n% else\n Ncoils = length(coils(:));\n% end\n\n\nif (nargin < 7) || (isempty(reg_coe))\n reg_coe = '-r0.05';\nend\n\nif (nargin < 12) || (isempty(espirit_recon))\n\n espirit_recon = 1;\nend\n\n\nzeropad_factor = 1;\n\n% <<< DCM >>> 20180312 -- KLUDGE ... add all echoes and slices\n\n%frsize = header.rdb_hdr.da_xres; % <<< DCM >>> 20180312\n%nframes = header.rdb_hdr.da_yres -1 ; % <<< DCM >>> 20180312 -- possibly off by one because of \"baseline\" scan\n%nslices = header.rdb_hdr.nslices; % <<< DCM >>> 20180312\n\nfrsize = header.rdb_hdr.frame_size;%header2.frsize;\nnframes = header.rdb_hdr.nframes;%header2.nframes;\nnslices = header.rdb_hdr.nslices;%header2.nslices;\n\n\nnfphases = header.image.fphase;\n\ndata = read_MR_rawdata(pfile,'db', 1:nfphases, echoes, 1:nslices,coils); % <<< DCM >>> 20180312 -- replaced rawloadX_cp\ndata = squeeze(data);\ndisp('READ DATA SIZE: ')\ndisp(size(data))\n\nNramp = header.rdb_hdr.user11;%rhuser(12);\nspres = header.rdb_hdr.user1;%rhuser(2);\nresz_scale = header.rdb_hdr.user2; %rhuser(3);\nFOV = [header.rdb_hdr.user16, header.rdb_hdr.user17, header.rdb_hdr.user18]; %rhuser(17:19).';\nNprojections = header.rdb_hdr.user9;%rhuser(10);\nacqs = header.rdb_hdr.user19;%rhuser(20);\nshift = [header.rdb_hdr.user20, header.rdb_hdr.user21,0] / spres; %[rhuser(21:22);0].'/spres;\nimsize = FOV*10/spres * zeropad_factor / undersamp_imsize;\nfinal_imsize=round(imsize);\na = 1.375; W = 5; S = calc_kerneldensity(1e-4, a);\ngridsize = round(a*imsize);\n\n\n% For extracting iamges within FOV\nrnum = gridsize(1); cnum = gridsize(2); snum = gridsize(3);\nru_skip = ceil((rnum-final_imsize(1))/2); rd_skip = rnum - final_imsize(1)-ru_skip;\ncu_skip = ceil((cnum-final_imsize(2))/2); cd_skip = cnum - final_imsize(2)-cu_skip;\nsu_skip = ceil((snum-final_imsize(3))/2); sd_skip = snum - final_imsize(3)-su_skip;\n\n% frsize = size(data,1);\n% nframes = size(data,2); \n% Necho = size(data,3);\n% nslices = size(data,4);\n% Ncoils = size(data,5);\n\nif nframes*nslices >= Nprojections\n % data is now frsize, 2*ceil(Nprojections/(Nslices*2)) echoes,\n % Nslices, Ncoils\n if Nprojections*2 < nframes*nslices\n % storing extra echos in slices\n data = cat(3, data(:,:,:,1:nslices/2,:), data(:,:,:,nslices/2+[1:nslices/2],:));\n nslices = nslices/2;\n Necho = 2*Necho; % num_utes?\n echoes = 1:Necho;\n end\n \n % <<< DCM >>> 20180312 -- reshape and permute the data (interleaved readouts corrected)\n data = permute(data, [2 1 4 3 5]); %pcao changed coil and echo dimensions\n reordered_projections = [[1:2:nslices],[2:2:nslices]];\n fprintf('DEBUG STUFF: \\n');\n disp(size(data));\n %disp(reordered_projections);\n fprintf('frsize = %d, nframes = %d, nslices= %d, Necho = %d, Ncoils = %d; product = %d\\n', frsize, nframes, nslices, Necho, Ncoils, frsize*nframes*nslices*Necho*Ncoils);\n fprintf('END DEBUG STUFF\\n');\n data = data(:, :, reordered_projections, :);\n data = reshape(data, [frsize nframes*nslices Necho Ncoils]); \n data = data(:,1:Nprojections,:,:);\n \n if Nprojections*2 < nframes*nslices\n %determine and set the real Number of echo, pcao20170214\n if (nargin < 10) || (isempty(rNecho) || (rNecho < 1))\n sum_nonzero_echo = squeeze(sum(sum(sum(abs(data),1),2),4))> 0;\n Necho = sum(sum_nonzero_echo);\n else\n Necho = rNecho;\n end\n echoes = 1:Necho;\n data = data(:,:,1:Necho,:);\n end\nelse\n % legacy recon code\n data = read_multiple_pfiles(pfile);\nend\n\n% apply different frequency demodulation\nif freq_shift ~= 0\n dt = 1/(2*header.rdb_hdr.bw*1e3);\n t = [0:frsize-1].' * dt;\n Sdata = size(data);\n data = data .* repmat( exp(-i*2*pi*freq_shift*t), [1 Sdata(2:end)]);\nend\n\n% Determine trajectory\n[theta, phi, kmax, dcf] = calc_3dpr_ellipse(FOV*10, spres, spres*resz_scale);\n\nx = cos(phi) .* sin(theta) .* kmax;\ny = sin(phi) .* sin(theta) .* kmax;\nz = cos(theta) .* kmax;\n\nkscale = 0.5 / max(abs(kmax(:)));\nx = kscale * x;\ny = kscale * y;\nz = kscale * z;\n\n% skip samples?\nif skip > 0\n frsize = frsize - skip;\n data = data(1+skip:end,:,:,:);\n % elseif skip < 0\n % frsize = frsize - skip;\n % data = [repmat(data(1,:,:,:,:), [-skip 1 1 1 1]); data];\nend\n\n\n[ksp, dcf_all] = calc_pr_ksp_dcf([x(:),y(:),z(:)],Nramp,frsize,dcf,undersamp_dc);\nclear x; clear y; clear z;\nclear theta; clear phi; clear dcf;\nclear kmax;\n\n% ksp((frsize * Nprojections + 1):end,:) = [];\n% dcf_all(:,(Nprojections + 1):end) = [];\n\nif skip < 0\n data = data(1:end+skip,:,:,:);\n dcf_new = dcf_all(1-skip:end,:);\n dcf_new(1,:) = sum(dcf_all(1:1-skip,:),1); % add density of central skipped points\n dcf_all = dcf_new;\n clear dcf_new\n \n ksp_new = zeros((frsize+skip)*Nprojections,3);\n for n = 1:Nprojections\n ksp_new([1:frsize+skip] + (n-1)*(frsize+skip),:) = ...\n ksp([1-skip:frsize] + (n-1)*frsize,:);\n end\n ksp = ksp_new;\n clear ksp_new\n \n % frsize = frsize + skip; % not necessary to change\nend\n\nif (nargin < 8) || (isempty(skip_calib_coil))\n skip_calib_coil = 0;%skip the coil calibration\nend\n\nif (nargin < 9) || (isempty(cc_coil))\n cc_coil = 0; % the coil compression \nend\n\n\n\nif cc_coil && (length(coils) > cc_coil) %do coil compression\n disp('Coil compression')\n data(:,:,:,1:cc_coil) = bart(sprintf('cc -r12 -P%d -S',cc_coil), data);\n% clear data;\n% data = cc_data;\n% clear cc_data;\n coils = 1:cc_coil;\n skip_calib_coil = 0;%cannot skip the sensitivity measurement\n data(:,:,:,(cc_coil +1):end) = [];\nend\n\n\nktraj_all=reshape(ksp,[frsize Nprojections 3]);\n\nktraj(1,:,:)=ktraj_all(:,:,1)*imsize(1);\nktraj(2,:,:)=ktraj_all(:,:,2)*imsize(2);\nktraj(3,:,:)=ktraj_all(:,:,3)*imsize(3);\ntot_npts=frsize*Nprojections;\n\n% dcf_all2(1,:,:)=dcf_all;\n% dcf_all2(2,:,:)=dcf_all;\n% dcf_all2(3,:,:)=dcf_all;\n% clear dcf_all;\n% dcf_all = dcf_all2;\n\n\nfor e = 1:length(echoes)\n disp(['Preparing reconstructing echo ' int2str(e) '...'])\n for Ic = 1:length(coils)\n % disp([' Reconstructing coil ' int2str(coils(Ic)) '...'])\n % tic\n \n data_c = squeeze(data(:,:,e,Ic));\n data_pc(:,Ic) = data_c(:).*exp(j*2*pi*(ksp(:,1)*shift(1) + ksp(:,2)*shift(2) + ksp(:,3)*shift(3)));\n end\n clear data_c;\n if e == echoes\n clear data; clear ksp;\n end\n\n data_pc = conj(data_pc);\n \n tic\n\n if ~espirit_recon\n disp([' Reconstructing echo ' int2str(e) '...'])\n im(:,:,:,:,e)=squeeze(bart('nufft -a -p', reshape(dcf_all,[1 tot_npts]), reshape(ktraj, [3 tot_npts]), reshape(data_pc,[1 tot_npts 1 length(coils)])));\n else\n root_dir = pwd;\n list = exist([root_dir '/smap_m1.mat'], 'file');\n if e == 1\n if ~skip_calib_coil || ~list\n disp('unfft to generate calibration k-space')\n % name = ['/data/larson/brain_uT2/2016-04-27_7T-vounteer/tmpfftec' int2str(echoes)];\n im_under=bart('nufft -a -p', reshape(dcf_all,[1 tot_npts]), reshape(ktraj, [3 tot_npts]), reshape(data_pc,[1 tot_npts 1 length(coils)]));\n k_calb=bart('fft -u 7',im_under);\n k_calb = bart(sprintf('crop 0 %d', 2*round(size(k_calb,1)*0.2)),k_calb);\n k_calb = bart(sprintf('crop 1 %d', 2*round(size(k_calb,2)*0.2)),k_calb);\n k_calb = bart(sprintf('crop 2 %d', 2*round(size(k_calb,3)*0.2)),k_calb);\n k_calb_zerop = padarray(k_calb, round([size(im_under)/2-size(k_calb)/2]));\n clear im_under; clear k_calb;\n \n smap_m1=bart('ecalib -k4 -r12 -m1 -c0.80', k_calb_zerop); %two sets sensitivity maps are needed here, as tested on the /data/vig2/UTE_ZTE/3dute/brain/20150506_TEphase\n % figure, imshow3(abs(squeeze(smap_m1(:,:,2,:))));\n clear k_calb_zerop;\n % if ~isdeployed\n save('smap_m1.mat','smap_m1');\n % end\n else\n load smap_m1.mat\n end\n end\n \n smapall(:,:,:,:,1,e) = smap_m1;\n dataall(:,:,:,:,1,e) = reshape(data_pc,[1 tot_npts 1 length(coils)]);\n ktrjall(:,:,1,1,1,e) = reshape(ktraj, [3 tot_npts]);\n decfall(:,:,1,1,1,e) = reshape(dcf_all,[1 tot_npts]);\n end\n toc\nend\n\nif espirit_recon\n clear data\n disp('Reconstructing all echos ')\n %try add ' -o' scale *; add -n turn off random shift; add -I to choose\n %iterative thresholding; move -l1 to reg_coe\n % recon_l1 = bartv207(['nusense -I -o -n ' reg_coe ' -p'], reshape(dcf_all,[1 tot_npts]), reshape(ktraj, [3 tot_npts]), reshape(data_pc,[1 tot_npts 1 length(coils)]),smap_m1);\n bartcmd = ['pics ' reg_coe];\n bartaddcmd{1} = '-p ';\n bartaddcmd{2} = '-t ';\n bartaddcmd{3} = ' ';\n bartaddcmd{4} = ' ';\n \n if nargin < 11 || (isempty(cc_coil))\n ind_echo_recon = 0;\n end\n \n if ind_echo_recon\n for e = 1:length(echoes)\n disp([' Individual reconstructing echo ' int2str(e) '...'])\n recon_l1(:,:,:,1,1,e) = bartv301addcmd(bartcmd, bartaddcmd, decfall(:,:,1,1,1,e), ktrjall(:,:,1,1,1,e), dataall(:,:,:,:,1,e),smapall(:,:,:,:,1,e));\n end\n else\n recon_l1 = bartv301addcmd(bartcmd, bartaddcmd, decfall, ktrjall, dataall,smapall);\n end\n im = squeeze(recon_l1); %somehow recon has two sets of data, like corespond to two sets of sensitivity maps\n \n clear recon_l1\nend\nreturn\n\nfunction data = read_multiple_pfiles(pfile)\n% legacy code for reading multiple pfile data\n\nMAX_FRAMES = 16384;\n\nif ~iscell(pfile)\n temp = pfile;\n pfile = cell(1);\n pfile{1} = temp;\nend\n\nnex = length(pfile);\n\n[data1, header, rhuser] = rawloadX(pfile{1}, [0:MAX_FRAMES],1,1);\nNprojections = rhuser(10);\nacqs = rhuser(20);\n\nfrsize = size(data1,1);\nNcoils = size(data1,5);\n\ndata = zeros(frsize, Nprojections, Necho, Ncoils);\n\nfor n = 1:nex\n for a = 1:acqs\n pfile_a = parse_pfile(pfile{1}, a);\n \n disp(['Reading ' pfile_a '...'])\n tic\n [data1] = rawloadX(pfile_a, [0:MAX_FRAMES],1,1);\n toc\n \n data(:, [MAX_FRAMES*(a-1)+1:min(Nprojections, MAX_FRAMES*a)], :,:) = ...\n data(:, [MAX_FRAMES*(a-1)+1:min(Nprojections, MAX_FRAMES*a)], :,:) + ...\n squeeze(data1(:, 1:min(Nprojections - (a-1)*MAX_FRAMES,MAX_FRAMES),:,1,:));\n clear data1;\n \n end\nend\n\nreturn\n\n\nfunction pfile_name = parse_pfile(pfilestring, acq)\n\n% determine if PXXXXX.7 filename or other\nif strcmp(pfilestring(end-1:end), '.7')\n pfile_num = sscanf(pfilestring(end-6:end-2),'%d');\n pfile_path = pfilestring(1:end-8);\n pfile_name = sprintf('%sP%05d.7', pfile_path, pfile_num + acq-1);\nelse\n pfile_num = sscanf(pfilestring(end-1:end),'%d');\n if isempty(pfile_num) % just single pfile\n pfile_name = pfilestring;\n else\n pfile_path = pfilestring(1:end-2);\n pfile_name = sprintf('%s%02d', pfile_path, pfile_num + acq-1);\n end\nend\n\nreturn\n"} +{"plateform": "github", "repo_name": "LarsonLab/UTEMRI_Brain-master", "name": "ute_dicom.m", "ext": ".m", "path": "UTEMRI_Brain-master/ImageReconstruction/ute_dicom.m", "size": 3971, "source_encoding": "utf_8", "md5": "3ad8fd96e99a55aeb5c47fbaf7ba76f0", "text": "function ute_dicom(finalImage, pfile_name, output_image, image_option, scaleFactor, seriesNumberOffset)\n% Convert matlab 3D matrix to dicom for UTE sequences\n% resolution is fixed in the recon - FOV/readout(from scanner), isotropic\n% matrix size is determined in the recon\n% Inputs:\n% finalImage: 3D image matrix\n% pfile_name: original pfile name\n% output_image: output directory\n% image_option: 1 for both phase and magnitude, 0(offset) mag only\n% scaleFactor: scale image matrix\n% seriesNumber: output series number\n%\n% August, 2018, Xucheng Zhu, Nikhil Deveshwar\n\n\nif nargin<4\n image_option = 0;\nend\naddpath(genpath('../util'));\n\n\naddpath(genpath('../orchestra-sdk-1.7-1.matlab'));\nIsize = size(finalImage);\n\npfile = GERecon('Pfile.Load', pfile_name);\npfile.header = GERecon('Pfile.Header', pfile);\npfile.phases = numel(finalImage(1,1,1,1,:)); \npfile.xRes = size(finalImage,1);\npfile.yRes = size(finalImage,2);\npfile.slices = size(finalImage,3);\npfile.echoes = size(finalImage,4);\n\n% calc real res(isotropic,axial)\ncorners = GERecon('Pfile.Corners', 1);\norientation = GERecon('Pfile.Orientation', 1);\noutCorners = GERecon('Orient', corners, orientation);\n% res = abs(outCorners.UpperRight(2)-outCorners.UpperLeft(2))/Isize(3);\nres2 = 2;\nscale = Isize/Isize(3);\nscale2 = [1, 1e-6, 1];\ncorners.LowerLeft = corners.LowerLeft.*scale2;\ncorners.UpperLeft = corners.UpperLeft.*scale2;\ncorners.UpperRight = corners.UpperRight.*scale2;\ninfo = GERecon('Pfile.Info', 1);\n\n\n% % NEED TO CONFIRM orientation/corners based on slice number\n% % HERE IS HOW this is done without the \"corners\" adjustment shown\n% % above:\n% sliceInfo.pass = 1;\n% sliceInfo.sliceInPass = s;\n% info = GERecon('Pfile.Info', 1);\n% orientation = info.Orientation; corners = info.Corners;\n\n\n% X = repmat(int16(0), [96 86 1 94]);\nX = zeros(96, 86, 1, 94);\n\n\nseriesDescription = ['UTE T2 - ', output_image];\nseriesNumber = pfile.header.SeriesData.se_no * 100 + seriesNumberOffset;\n\n\nfor s = flip(1:pfile.slices)\n \n \n for e = 1:pfile.echoes\n for p = 1:pfile.phases\n \n mag_t =flip(double(finalImage(:,:,s,e,p) * scaleFactor));\n% figure;imshow(mag_t);title('mag_t');\n\n% mag_t2 = GERecon('Orient', mag_t, orientation);\n \n\n imageNumber = ImageNumber(s, e, p, pfile);\n filename = ['DICOMs_' output_image, '/image_',num2str(imageNumber) '.dcm'];\n GERecon('Dicom.Write', filename, mag_t, imageNumber, orientation, corners, seriesNumber, seriesDescription);\n \n if image_option~=0\n phase_t = flip(flip(single(angle(finalImage(:,:,s,e,p))).',1),2);\n %phase_t = GERecon('Orient', phase_t, orientation);\n filename = [output_dir,'DICOMS/phase_',num2str(imageNumber) '.dcm'];\n GERecon('Dicom.Write', filename, phase_t, imageNumber, orientation, corners);\n end\n \n [X(:,:,1,s),map] = dicomread(filename);\n end\n end\n \n % sliceInfo.pass = 1\n % sliceInfo.sliceInPass = s\n % info = GERecon('Pfile.Info', 1)\n \n % Get corners and orientation for next slice location?\n corners.LowerLeft(3) = corners.LowerLeft(3) + res2;\n corners.UpperLeft(3) = corners.UpperLeft(3) + res2;\n corners.UpperRight(3) = corners.UpperRight(3) + res2;\n \n % Check header settings in Horos to ensure pixel spacing value is\n % correct relative to slice thickness\n \nend\n\ndisp([output_image, ' generated.']);\n\n% figure;montage(X(:,:,1,:),map);title(output_image);\n\nend\n \n\nfunction number = ImageNumber(slice, echo, phase, pfile)\n% Image numbering scheme:\n% P0S0E0, P0S0E1, ... P0S0En, P0S1E0, P0S1E1, ... P0S1En, ... P0SnEn, ...\n% P1S0E0, P1S0E1, ... PnSnEn\n slicesPerPhase = pfile.slices * pfile.echoes;\n number = (phase-1) * slicesPerPhase + (slice-1) * pfile.echoes + (echo-1) + 1;\nend\n"} +{"plateform": "github", "repo_name": "LarsonLab/UTEMRI_Brain-master", "name": "get_TE.m", "ext": ".m", "path": "UTEMRI_Brain-master/ImageReconstruction/get_TE.m", "size": 1517, "source_encoding": "utf_8", "md5": "d8688efc7a911afd02528f7c5f87b3b3", "text": "%% Import data from text file.\n% Script for importing data from the following text file:\n%\n% /data/larson/brain_uT2/2017-09-29_3T-volunteer/multi_utes.dat\n%\n% To extend the code to different selected data or a different text file,\n% generate a function instead of a script.\n\n% Auto-generated by MATLAB on 2017/09/29 16:08:36\n\n%% Initialize variables.\nfunction TE = get_TE(filename)\n\n%filename = 'multi_utes.dat';\ndelimiter = ' ';\nstartRow = 3;\n\n%% Format string for each line of text:\n% column1: double (%f)\n% For more information, see the TEXTSCAN documentation.\nformatSpec = '%f%*s%*s%[^\\n\\r]';\n\n%% Open the text file.\nfileID = fopen(filename,'r');\n\n%% Read columns of data according to format string.\n% This call is based on the structure of the file used to generate this\n% code. If an error occurs for a different file, try regenerating the code\n% from the Import Tool.\ndataArray = textscan(fileID, formatSpec, 'Delimiter', delimiter, 'MultipleDelimsAsOne', true, 'HeaderLines' ,startRow-1, 'ReturnOnError', false);\n\n%% Close the text file.\nfclose(fileID);\n\n%% Post processing for unimportable data.\n% No unimportable data rules were applied during the import, so no post\n% processing code is included. To generate code which works for\n% unimportable data, select unimportable cells in a file and regenerate the\n% script.\n\n%% Allocate imported array to column variable names\nTE = dataArray{:, 1};\nTE = TE';\n\n%% Clear temporary variables\nclearvars filename delimiter startRow formatSpec fileID dataArray ans;"} +{"plateform": "github", "repo_name": "longcw/pytorch-faster-rcnn-master", "name": "voc_eval.m", "ext": ".m", "path": "pytorch-faster-rcnn-master/lib/datasets/VOCdevkit-matlab-wrapper/voc_eval.m", "size": 1332, "source_encoding": "utf_8", "md5": "3ee1d5373b091ae4ab79d26ab657c962", "text": "function res = voc_eval(path, comp_id, test_set, output_dir)\n\nVOCopts = get_voc_opts(path);\nVOCopts.testset = test_set;\n\nfor i = 1:length(VOCopts.classes)\n cls = VOCopts.classes{i};\n res(i) = voc_eval_cls(cls, VOCopts, comp_id, output_dir);\nend\n\nfprintf('\\n~~~~~~~~~~~~~~~~~~~~\\n');\nfprintf('Results:\\n');\naps = [res(:).ap]';\nfprintf('%.1f\\n', aps * 100);\nfprintf('%.1f\\n', mean(aps) * 100);\nfprintf('~~~~~~~~~~~~~~~~~~~~\\n');\n\nfunction res = voc_eval_cls(cls, VOCopts, comp_id, output_dir)\n\ntest_set = VOCopts.testset;\nyear = VOCopts.dataset(4:end);\n\naddpath(fullfile(VOCopts.datadir, 'VOCcode'));\n\nres_fn = sprintf(VOCopts.detrespath, comp_id, cls);\n\nrecall = [];\nprec = [];\nap = 0;\nap_auc = 0;\n\ndo_eval = (str2num(year) <= 2007) | ~strcmp(test_set, 'test');\nif do_eval\n % Bug in VOCevaldet requires that tic has been called first\n tic;\n [recall, prec, ap] = VOCevaldet(VOCopts, comp_id, cls, true);\n ap_auc = xVOCap(recall, prec);\n\n % force plot limits\n ylim([0 1]);\n xlim([0 1]);\n\n print(gcf, '-djpeg', '-r0', ...\n [output_dir '/' cls '_pr.jpg']);\nend\nfprintf('!!! %s : %.4f %.4f\\n', cls, ap, ap_auc);\n\nres.recall = recall;\nres.prec = prec;\nres.ap = ap;\nres.ap_auc = ap_auc;\n\nsave([output_dir '/' cls '_pr.mat'], ...\n 'res', 'recall', 'prec', 'ap', 'ap_auc');\n\nrmpath(fullfile(VOCopts.datadir, 'VOCcode'));\n"} +{"plateform": "github", "repo_name": "dkouzoup/hanging-chain-acado-master", "name": "plot_partial_condensing.m", "ext": ".m", "path": "hanging-chain-acado-master/code/utils/plot_partial_condensing.m", "size": 1721, "source_encoding": "utf_8", "md5": "306d4177769297aab60fa87db66c8a40", "text": "function FHANDLE = plot_partial_condensing(logs)\n\n%% process data\n\nsolver = logs{1}.solver(1:strfind(logs{1}.solver,'_')-1);\nNMASS = size(logs, 1);\nBS = size(logs,2);\nFS = 24;\n\nCPUTIMES = zeros(NMASS, BS);\nBLOCKSIZE = zeros(NMASS, BS);\n\nfor ii = 1:NMASS\n for jj = 1:BS\n if ~contains(logs{ii, jj}.solver, solver)\n error('log of different solver detected')\n end\n CPUTIMES(ii, jj) = max(logs{ii, jj}.cputime - logs{ii, jj}.simtime);\n BLOCKSIZE(ii, jj) = str2double(logs{ii, jj}.solver(strfind(logs{ii, jj}.solver, '_B')+2:end));\n end\nend\n\nSPEEDUPS = 1./(CPUTIMES./repmat(CPUTIMES(:,1), 1, BS));\nBLOCKSIZE(BLOCKSIZE == 0) = 1;\n \n%% plot\n\nFHANDLE = figure;\n\nlegends = {};\nfor ii = 1:NMASS\n MARKER = set_up_marker(logs{ii, 1}.Nmass);\n plot(BLOCKSIZE(ii,:), SPEEDUPS(ii,:), 'Marker', MARKER, 'MarkerSize', 12, 'color', 'k', 'Linewidth',1.5, 'LineStyle', '-');\n hold on\n legends{end+1} = ['$n_{\\mathrm{m}} = ' num2str(logs{ii, 1}.Nmass) '$'];\nend\ngrid on\n\nset(gca, 'fontsize',FS);\nxlabel('Block size $M$', 'interpreter','latex', 'fontsize',FS);\nylabel('Speedup', 'interpreter','latex', 'fontsize',FS);\nset(gca,'TickLabelInterpreter','latex')\ntitle(['Partial condensing with \\texttt{' solver '}'],'interpreter','latex', 'fontsize',FS);\nl = legend(legends);\nl.Interpreter = 'latex';\nl.Location = 'northeast';\n\nxlim([BLOCKSIZE(1,1) BLOCKSIZE(1,end)]);\n\nWIDE = 0;\n\nif WIDE\n FHANDLE.Position = [100 300 1200 500];\nelse\n FHANDLE.Position = [100 300 600 500]; \nend\n\n\nend\n\n\nfunction marker = set_up_marker(nmasses)\n\nif nmasses == 3\n marker = 'o';\nelseif nmasses == 4\n marker = '>';\nelseif nmasses == 5\n marker = 'h';\nelse\n marker = '.';\nend\n\nend\n"} +{"plateform": "github", "repo_name": "dkouzoup/hanging-chain-acado-master", "name": "plot_logs.m", "ext": ".m", "path": "hanging-chain-acado-master/code/utils/plot_logs.m", "size": 4222, "source_encoding": "utf_8", "md5": "76a079fac6d034835eeb007d55e22c64", "text": "function [FHANDLE] = plot_logs(logs, FADED, LOGSCALE, FHANDLE, xlims, ylims)\n\n% PLOT_LOGS plot performance of QP solvers as a function of prediction\n% horizon N.\n%\n% INPUTS:\n%\n% logs logged data from simulation (cell array)\n% FADED set to true to plot solver curves faded (boolean)\n% FHANDLE pass existing figure handle to get multiple logs in on plot\n\nif nargin < 6\n ylims = [0 130];\nend\n\nif nargin < 5\n xlims = [10 100];\nend\n\nFS = 24;\n\n%% default values for inputs\n\nif nargin < 4 || isempty(FHANDLE)\n FHANDLE = figure;\nend\n\nif nargin < 3 || isempty(LOGSCALE)\n LOGSCALE = false;\nend\n\nif nargin < 2 || isempty(FADED)\n FADED = false;\nend\n\nif FADED\n alpha = 0.3;\n style = '--';\nelse\n alpha = 1.0;\n style = '-';\nend\n\ncolor = [0 0 0 alpha];\n\n%% process data\n\nsolver = 'undefined';\nnexp = length(logs);\nkk = 0; % will contain number of different solvers in log cell array\n\nfor ii = 1:nexp\n \n if ~strcmp(solver,logs{ii}.solver)\n kk = kk+1;\n solver = logs{ii}.solver;\n data(kk).x = [];\n data(kk).y = [];\n data(kk).marker = set_up_marker(solver);\n data(kk).solver = set_up_solver_name(solver);\n\n end\n \n data(kk).x = [data(kk).x logs{ii}.N];\n data(kk).y = [data(kk).y logs{ii}.cputime - logs{ii}.simtime];\n \nend\n\n%% plot timings\n\nfigure(FHANDLE);\n\nif ~LOGSCALE\n \n for kk = 1:length(data)\n plot(data(kk).x, 1e3*(max(data(kk).y)), ...\n 'Marker', data(kk).marker, 'MarkerSize', 12, 'MarkerEdgeColor', [1-alpha 1-alpha 1-alpha], ...\n 'Color', color, 'Linewidth',1.5, 'LineStyle', style);\n hold on\n end\n grid on\n \n set_up_plot(data, false, FS);\n xlim(xlims)\n ylim(ylims)\n\nelse\n \n for kk = 1:length(data)\n loglog(data(kk).x, 1e3*max(data(kk).y), ...\n 'Marker', data(kk).marker, 'MarkerSize', 12, 'MarkerEdgeColor', [1-alpha 1-alpha 1-alpha], ...\n 'Color', color, 'linewidth',1.5, 'LineStyle', style);\n hold on\n end\n grid on\n \n set_up_plot(data, true, FS);\n xlim(xlims)\n ylim(ylims)\n % title('Worst case CPU time in closed-loop','interpreter','latex', 'fontsize', FS)\nend\n\n\nFHANDLE.Position = [100 300 600 500];\n\nend\n\nfunction set_up_plot(data, LOGPLOT, FS)\n\nset(gca, 'fontsize',FS);\nxlabel('Prediction horizon $N$', 'interpreter','latex', 'fontsize',FS);\nylabel('CPU time $(\\mathrm{ms})$', 'interpreter','latex', 'fontsize',FS);\n\nset(gca,'TickLabelInterpreter','latex')\n\nif ~LOGPLOT\n hLegend = findobj(gcf, 'Type', 'Legend');\n \n if isempty(hLegend)\n l = legend(data.solver);\n l.Interpreter = 'latex';\n l.Location = 'northwest';\n else\n for ii = 1:length(data)\n hLegend.String{end-ii+1} = data(end-ii+1).solver; \n end\n end\nend\n\nif data(1).x(end) > data(1).x(1)\n xlim([data(1).x(1) data(1).x(end)])\nend\n\nend\n\n\nfunction marker = set_up_marker(solver)\n\nif strcmp(solver, 'qpOASES_N2')\n \n marker = '^';\n \nelseif strcmp(solver, 'qpOASES_N3')\n \n marker = 'v';\n\nelseif strcmp(solver, 'FORCES')\n \n marker = 's';\n \nelseif strcmp(solver, 'qpDUNES') || strcmp(solver, 'qpDUNES_B0')\n \n marker = 'p';\n \nelseif strcmp(solver, 'HPMPC') || strcmp(solver, 'HPMPC_B0')\n \n marker = '*';\n \nelseif contains(solver, 'HPMPC_B')\n \n marker = 'x';\n \nelseif contains(solver, 'qpDUNES_B')\n \n marker = 'd';\n \nelse \n marker = 'o';\nend\n\nend\n\n\nfunction solver_name_latex = set_up_solver_name(solver)\n\nsolver_name_latex = solver;\n\nsolver_name_latex(solver_name_latex == '_') = ' ';\nif contains(solver_name_latex, 'qpOASES')\n solver_name_latex = replace(solver_name_latex, 'N', 'C$N^');\n solver_name_latex(end+1) = '$';\nend\nif strcmp(solver_name_latex, 'HPMPC B0')\n solver_name_latex = 'HPMPC';\nend\nif strcmp(solver_name_latex, 'qpDUNES B0')\n solver_name_latex = 'qpDUNES';\nend\nif contains(solver_name_latex, 'HPMPC B') || contains(solver_name_latex, 'qpDUNES B')\n solver_name_latex = [solver_name_latex(1:strfind(solver_name_latex, 'B')-1) 'PC'];\n % solver_name_latex = replace(solver_name_latex, 'B', 'B$_{');\n % solver_name_latex(end+1:end+2) = '}$';\nend\n\nend"} +{"plateform": "github", "repo_name": "shane-nichols/smn-thesis-master", "name": "muellerData.m", "ext": ".m", "path": "smn-thesis-master/muellerData.m", "size": 52836, "source_encoding": "utf_8", "md5": "c342735994beb5434aef01012c5eb83e", "text": "classdef (InferiorClasses = {?matlab.graphics.axis.Axes}) muellerData\n \n properties\n Label % string\n Value % 4,4,M,N,... array of Mueller matrix values\n ErValue % 4,4,M,N,... array of Mueller matrix error values\n Size % size of Value\n Dims % cell array of length ndims(Value)-2 containing arrays of length M,N,...\n DimNames % cell array of strings with names of a dimensions M,N,...\n HV % M,N,... array of detector high voltage values (4PEM specific)\n DC % M,N,... array of waveform DC values (4PEM specific)\n reflection\n end\n \n methods\n function obj = muellerData(value) % Class Constructor\n obj.Size = size(value);\n obj.Value = value;\n obj.Label = '';\n end\n function varargout = subsref(obj,s) % overload subsref for custom indexing\n switch s(1).type\n case '()'\n if length(obj) == 1 % positional indexing of object properties\n if length(s(1).subs) ~= length(obj.Size)\n error('Error. Size of object and requested index are not equal');\n end\n if length(s) == 1\n varargout = {objSubset(obj,s)};\n else\n varargout = {builtin('subsref',objSubset(obj,s(1)),s(2:end))};\n end\n else\n if length(s) == 1\n varargout = {builtin('subsref',obj,s)}; % index object array\n else\n obj = builtin('subsref',obj,s(1));\n if numel(obj) == 1\n varargout = {builtin('subsref',obj,s(2:end))};\n else\n temp = builtin('subsref',obj(1),s(2:end));\n if isa(temp,'muellerData')\n for k=2:numel(obj)\n temp(k) = builtin('subsref',obj(k),s(2:end));\n end\n else\n temp = {temp};\n for k=2:numel(obj)\n temp{k} = builtin('subsref',obj(k),s(2:end));\n end\n end\n varargout = {temp};\n end\n end\n end\n \n case '{}'\n if length(obj) == 1\n if length(s(1).subs) ~= length(obj.Size)\n error('Error. Size of object and requested index are not equal');\n end\n if length(s) == 1\n s = dims2index(obj,s);\n varargout = {objSubset(obj,s)};\n else\n s(1) = dims2index(obj,s(1));\n varargout = {builtin('subsref',objSubset(obj,s(1)),s(2:end))};\n end\n else\n if any(arrayfun(@(x) length(s(1).subs) ~= length(x.Size),obj))\n error('Error. Size of object and requested index are not equal');\n end\n if length(s) == 1\n temp = obj;\n for k=1:numel(obj)\n subs = dims2index(obj(k),s);\n temp(k) = objSubset(obj(k),subs);\n varargout = {temp};\n end\n else\n subs = dims2index(obj(1),s(1));\n temp = builtin('subsref',objSubset(obj(1),subs),s(2:end));\n if isa(temp,'muellerData')\n for k=2:numel(obj)\n subs = dims2index(obj(k),s(1));\n temp(k) = builtin('subsref',objSubset(obj(k),subs),s(2:end));\n end\n else\n temp = {temp};\n for k=2:numel(obj)\n subs = dims2index(obj(k),s(1));\n temp{k} = builtin('subsref',objSubset(obj(k),subs),s(2:end));\n end\n end\n varargout = {temp};\n end\n end\n \n case '.'\n if length(obj) > 1\n temp = builtin('subsref',obj(1),s);\n if isa(temp,'muellerData')\n for k=2:numel(obj)\n temp(k) = builtin('subsref',obj(k),s);\n end\n else\n temp = {temp};\n for k=2:numel(obj)\n temp{k} = builtin('subsref',obj(k),s);\n end\n end\n varargout = {temp};\n else\n varargout = {builtin('subsref',obj,s)};\n end\n end\n end\n function n = numArgumentsFromSubscript(~,~,~)\n n = 1; % I don't like multiple outputs =P\n end\n function obj = merge(obj1,obj2) % merge two objects\n if ~(length(obj1.Size) == length(obj2.Size))\n error(['Objects not compatible with merge.'....\n ' Length of obj.Size must be equal for objects.'])\n end\n if isempty(obj1.Dims) || isempty(obj2.Dims)\n error('Objects not compatible with merge. Dims must be defined.')\n end\n idx = find(cell2mat(cellfun(@isequal,obj1.Dims,obj2.Dims,'uniformoutput',0))==0);\n if length(idx) > 1 || ~isempty(intersect(obj1.Dims{idx},obj2.Dims{idx}))\n error('Objects not compatible with merge. Dims must differ in 1 element only.')\n end\n idx2 = length(obj1.Size) - length(obj1.Dims) + idx;\n obj = muellerData(cat(idx2,obj1.Value,obj2.Value));\n if ~isempty(obj1.ErValue) && ~isempty(obj2.ErValue)\n obj.ErValue = cat(idx2,obj1.ErValue,obj2.ErValue);\n end\n if ~isempty(obj1.HV) && ~isempty(obj2.HV)\n obj.HV = cat(idx,obj1.HV,obj2.HV);\n end\n if ~isempty(obj1.DC) && ~isempty(obj2.DC)\n obj.DC = cat(idx,obj1.DC,obj2.DC);\n end\n obj.Dims = obj1.Dims;\n obj.Dims{idx} = [obj1.Dims{idx} , obj2.Dims{idx}];\n obj.DimNames = obj1.DimNames;\n obj.reflection = obj1.reflection;\n end\n function obj = squeeze(obj)\n obj.Value = squeeze(obj.Value);\n obj.ErValue = squeeze(obj.ErValue);\n obj.Size = size(obj.Value);\n if ~isempty(obj.Dims)\n logicalIdx = cellfun(@(x) ~isscalar(x),obj.Dims);\n obj.Dims = obj.Dims(logicalIdx);\n if ~isempty(obj.DimNames)\n obj.DimNames = obj.DimNames(logicalIdx);\n end\n end\n obj.HV = squeeze(obj.HV);\n obj.DC = squeeze(obj.DC);\n end\n function obj = plus(obj1,obj2) % overloading of + for muellerData.\n % to call, use: obj1 + obj2\n % Dims and DimNames and reflection are copied from obj1\n % It doesn't make sense to define HV and DC\n if isa(obj1,'muellerData') && isa(obj2,'muellerData')\n if isequal(obj1.Size,obj2.Size)\n obj = muellerData(obj1.Value + obj2.Value);\n obj.Dims = obj1.Dims;\n obj.DimNames = obj1.DimNames;\n obj.reflection = obj1.reflection;\n else\n error('Error in obj1 + obj2 for muellerData. obj.Size must be equal for objects.')\n end\n elseif isa(obj1,'muellerData') && isscalar(obj2)\n obj = obj1;\n obj.Value = obj.Value + obj2;\n elseif isa(obj2,'muellerData') && isscalar(obj1)\n obj = obj2;\n obj.Value = obj.Value + obj1;\n end\n end\n function obj = minus(obj1,obj2) % overloading of - for muellerData.\n if isequal(obj1.Size,obj2.Size)\n obj = muellerData(obj1.Value - obj2.Value);\n obj.Dims = obj1.Dims;\n obj.DimNames = obj1.DimNames;\n obj.reflection = obj1.reflection;\n else\n error('Error in obj1 - obj2 for muellerData. obj.Size must be equal for objects.')\n end\n end\n function obj = times(obj1,obj2) % overloading of .* for muellerData.\n if isequal(obj1.Size,obj2.Size)\n obj = muellerData(obj1.Value .* obj2.Value);\n obj.Dims = obj1.Dims;\n obj.DimNames = obj1.DimNames;\n obj.reflection = obj1.reflection;\n else\n error('Error in obj1 .* obj2 for muellerData. obj.Size must be equal for objects.')\n end\n end\n function obj = rdivide(obj1,obj2) % overloading of ./ for muellerData.\n if isequal(obj1.Size,obj2.Size)\n obj = muellerData(obj1.Value ./ obj2.Value);\n obj.Dims = obj1.Dims;\n obj.DimNames = obj1.DimNames;\n obj.reflection = obj1.reflection;\n else\n error('Error in obj1 ./ obj2 for muellerData. obj.Size must be equal for objects.')\n end\n end\n function obj = mtimes(obj1,obj2) % overloading of * for muellerData.\n ck1 = isa(obj1, 'muellerData');\n ck2 = isa(obj2, 'muellerData');\n if ck1 && ck2\n if ndims(obj2.Value) > ndims(obj1.Value)\n obj = obj2;\n else\n obj = obj1;\n end\n obj.Value = multiprod(obj1.Value, obj2.Value, [1 2], [1 2]);\n elseif ck1\n obj = obj1;\n obj.Value = multiprod(obj1.Value, obj2, [1 2], [1 2]);\n else\n obj = obj2;\n obj.Value = multiprod(obj1, obj2.Value, [1 2], [1 2]);\n end\n% if isequal(obj1.Size,obj2.Size)\n% val1 = shapeDown(obj1.Value);\n% val2 = shapeDown(obj2.Value);\n% for i=1:size(val1,3); val1(:,:,i) = val1(:,:,i)*val2(:,:,i); end\n% obj = muellerData(shapeUp(val1,obj1.Size));\n% obj.Dims = obj1.Dims;\n% obj.DimNames = obj1.DimNames;\n% obj.reflection = obj1.reflection;\n% else\n% error('Error in obj1 ./ obj2 for muellerData. obj.Size must be equal for objects.')\n% end\n end\n function obj = mrdivide(obj1,obj2) % overloading of / for muellerData.\n if isequal(obj1.Size,obj2.Size)\n val1 = shapeDown(obj1.Value);\n val2 = shapeDown(obj2.Value);\n for i=1:size(val1,3); val1(:,:,i) = val1(:,:,i)/val2(:,:,i); end\n obj = muellerData(shapeUp(val1,obj1.Size));\n obj.Dims = obj1.Dims;\n obj.DimNames = obj1.DimNames;\n obj.reflection = obj1.reflection;\n else\n error('Error in obj1 ./ obj2 for muellerData. obj.Size must be equal for objects.')\n end\n end\n function obj = mldivide(obj1,obj2) % overloading of \\ for muellerData.\n if isequal(obj1.Size,obj2.Size)\n val1 = shapeDown(obj1.Value);\n val2 = shapeDown(obj2.Value);\n for i=1:size(val1,3); val1(:,:,i) = val1(:,:,i) \\ val2(:,:,i); end\n obj = muellerData(shapeUp(val1,obj1.Size));\n obj.Dims = obj1.Dims;\n obj.DimNames = obj1.DimNames;\n obj.reflection = obj1.reflection;\n else\n error('Error in obj1 ./ obj2 for muellerData. obj.Size must be equal for objects.')\n end\n end\n function handles = plot(varargin)\n handles = prePlot(varargin{:});\n end\n function handles = subplot(varargin)\n % Example: % obj.subplot( {'lb','lbp','cb';'ld','ldp','cd'} , 'legend','none' )\n [obj,funcs] = varargin{:};\n figure\n M = size(funcs,1);\n N = size(funcs,2);\n funcs = funcs(:);\n handles = gobjects(1,M*N);\n for idx=1:M*N\n ax = subplot(M,N,idx);\n fn = str2func(funcs{idx});\n handles(idx) = plot(fn(obj),'handle',ax,varargin{3:end},...\n 'title',[', ',upper(funcs{idx})]);\n end\n end\n function handles = print(varargin)\n filePath = varargin{2}; % extract the filepath\n [pathStr,name] = fileparts(filePath);\n filePath = [pathStr,'/',varargin{1}.Label,name];\n handles = prePlot(varargin{[1,3:end]}); % make the figure\n print(gcf,filePath,'-depsc'); % print figure as .eps file\n end\n % Calls to static methods on obj.Value, returns new class instance %\n function obj = optProp(obj)\n obj.Value = obj.s_optProp(obj.Value);\n obj.ErValue = [];\n obj.Size = size(obj.Value);\n end\n function obj = lm(varargin)\n obj = varargin{1};\n if nargin == 1\n obj.Value = obj.s_lm(obj.Value);\n else\n obj.Value = obj.s_lm(obj.Value,varargin{2});\n end\n obj.ErValue = [];\n obj.Size = size(obj.Value);\n end\n function obj = logm(obj)\n obj.Value = obj.s_logm(obj.Value);\n obj.ErValue = [];\n obj.Size = size(obj.Value);\n end\n function obj = lu(obj)\n obj = obj.logm;\n g = diag([-1 1 1 1]);\n for n=1:size(obj.Value,3)\n obj.Value(:,:,n) = (obj.Value(:,:,n) + g*obj.Value(:,:,n).'*g)/2;\n end\n end\n function obj = lm2(obj)\n obj = obj.logm;\n g = diag([-1 1 1 1]);\n for n=1:size(obj.Value,3)\n obj.Value(:,:,n) = (obj.Value(:,:,n) - g*obj.Value(:,:,n).'*g)/2;\n end\n end\n function obj = expm(obj)\n obj.Value = obj.s_expm(obj.Value);\n obj.ErValue = [];\n obj.Size = size(obj.Value);\n end\n function obj = lb(obj)\n obj.Value = obj.s_lb(obj.Value);\n obj.ErValue = [];\n obj.Size = size(obj.Value);\n end\n function obj = ld(obj)\n obj.Value = obj.s_ld(obj.Value);\n obj.ErValue = [];\n obj.Size = size(obj.Value);\n end\n function obj = lbp(obj)\n obj.Value = obj.s_lbp(obj.Value);\n obj.ErValue = [];\n obj.Size = size(obj.Value);\n end\n function obj = ldp(obj)\n obj.Value = obj.s_ldp(obj.Value);\n obj.ErValue = [];\n obj.Size = size(obj.Value);\n end\n function obj = cb(obj)\n obj.Value = obj.s_cb(obj.Value);\n obj.ErValue = [];\n obj.Size = size(obj.Value);\n end\n function obj = cd(obj)\n obj.Value = obj.s_cd(obj.Value);\n obj.ErValue = [];\n obj.Size = size(obj.Value);\n end\n function obj = a(obj)\n obj.Value = obj.s_a(obj.Value);\n obj.ErValue = [];\n obj.Size = size(obj.Value);\n end\n function obj = a_aniso(obj)\n obj.Value = obj.s_a_aniso(obj.Value);\n obj.ErValue = [];\n obj.Size = size(obj.Value);\n end\n function obj = a_iso(obj)\n obj.Value = obj.s_a_iso(obj.Value);\n obj.ErValue = [];\n obj.Size = size(obj.Value);\n end\n function obj = ldmag(obj)\n obj.Value = obj.s_ldmag(obj.Value);\n obj.ErValue = [];\n obj.Size = size(obj.Value);\n end\n function obj = ldang(obj)\n obj.Value = obj.s_ldang(obj.Value);\n obj.ErValue = [];\n obj.Size = size(obj.Value);\n end\n function obj = lbang(obj)\n obj.Value = obj.s_lbang(obj.Value);\n obj.ErValue = [];\n obj.Size = size(obj.Value);\n end\n function obj = lbmag(obj)\n obj.Value = obj.s_lbmag(obj.Value);\n obj.ErValue = [];\n obj.Size = size(obj.Value);\n end\n function obj = di(obj)\n obj.Value = obj.s_di(obj.Value);\n obj.ErValue = [];\n obj.Size = size(obj.Value);\n end\n function obj = jones(obj)\n obj.Value = obj.s_jones(obj.Value);\n obj.ErValue = [];\n obj.Size = size(obj.Value);\n end\n function obj = nearestjones(obj)\n obj.Value = obj.s_nearestjones(obj.Value);\n obj.ErValue = [];\n obj.Size = size(obj.Value);\n end\n function obj = mfilter(obj)\n obj.Value = obj.s_mfilter(obj.Value);\n obj.ErValue = [];\n obj.Size = size(obj.Value);\n end\n function obj = covar(obj)\n obj.Value = obj.s_covar(obj.Value);\n obj.ErValue = [];\n obj.Size = size(obj.Value);\n end\n function obj = mrotate(obj,angle_rad)\n obj.Value = obj.s_mrotate(obj.Value,angle_rad);\n obj.ErValue = [];\n obj.Size = size(obj.Value);\n end\n function obj = lm2optProp(obj)\n % [LB;LD;LBp;LDp;CB;CD;A]\n lm = obj.Value;\n sz = size(lm);\n lm = shapeDown(lm);\n val(1,:) = lm(4,3,:);\n val(2,:) = -lm(1,2,:);\n val(3,:) = lm(2,4,:);\n val(4,:) = -lm(1,3,:);\n val(5,:) = lm(2,3,:);\n val(6,:) = lm(1,4,:);\n val(7,:) = -lm(1,1,:);\n obj.Value = shapeUp(val, sz);\n end\n end\n \n methods(Static)\n % value = obj.Value\n function r = s_optProp(value)\n sz = size(value);\n value = shapeDown(value);\n J = nearestJones(value);\n K = ( J(1,1,:).*J(2,2,:) - J(1,2,:).*J(2,1,:)).^(-1/2);\n T = acos( K.*( J(1,1,:) + J(2,2,:) )./2); % 2*T = sqrt(L.^2 + Lp.^2 + C.^2)\n O = (T.*K)./(sin(T));\n L=1i.*O.*( J(1,1,:) - J(2,2,:) );\n Lp=1i.*O.*( J(1,2,:) + J(2,1,:) );\n C=O.*( J(1,2,:) - J(2,1,:) );\n LB=real(L);\n LD=-imag(L);\n LBp=real(Lp);\n LDp=-imag(Lp);\n CB=real(C);\n CD=-imag(C);\n A = -2*real(log(1./K)); % mean absorption\n r = shapeUp(squeeze([LB;LD;LBp;LDp;CB;CD;A]),sz);\n end\n function value = s_lm(varargin)\n value = varargin{1};\n sz = size(value);\n if nargin == 1\n value = shapeDown(value);\n %J = nearestJones(value);\n J = MJ2J(value);\n K = ( J(1,1,:).*J(2,2,:) - J(1,2,:).*J(2,1,:)).^(-1/2);\n T = acos( K.*( J(1,1,:) + J(2,2,:) )./2);\n O = (T.*K)./(sin(T));\n L=1i.*O.*( J(1,1,:) - J(2,2,:) );\n Lp=1i.*O.*( J(1,2,:) + J(2,1,:) );\n C=O.*( J(1,2,:) - J(2,1,:) );\n LB=real(L);\n LD=-imag(L);\n LBp=real(Lp);\n LDp=-imag(Lp);\n CB=real(C);\n CD=-imag(C);\n A = 2*real(log(1./K)); % mean absorption\n value = shapeUp([A,-LD,-LDp,CD ; -LD,A,CB,LBp ; -LDp,-CB,A,-LB ; CD,-LBp,LB,A],sz);\n else\n n_int = varargin{2};\n value = reshape(value,4,4,size(value,3),[]);\n for j = 1:size(value,4)\n M = value(:,:,:,j);\n M = flip(M,3);\n J = nearestJones(M);\n K=(J(1,1,1).*J(2,2,1) - J(1,2,1)*J(2,1,1)).^(-1/2);\n T=2*acos((K.*(J(1,1,1) + J(2,2,1)))./2);\n O=(T+2*pi*n_int).*K./(sin(T/2)*2);\n \n N = size(J,3);\n L = zeros(1,N);\n Lp = zeros(1,N);\n C = zeros(1,N);\n A = zeros(1,N);\n \n L(1) = 1i.*O.*(J(1,1,1) - J(2,2,1));\n Lp(1) = 1i.*O.*(J(1,2,1) + J(2,1,1));\n C(1) = O.*(J(1,2,1) - J(2,1,1));\n A(1) = 2*real(log(1./K));\n \n n = n_int;\n \n for i = 2:N\n if n==0 || n==-1\n n_ar = [0,-1,1,-2,2];\n else\n n_ar = [n-1,-n,n,-(n+1),n+1];\n end\n K=(J(1,1,i).*J(2,2,i) - J(1,2,i)*J(2,1,i)).^(-1/2);\n T=2*acos((K.*(J(1,1,i) + J(2,2,i)))./2);\n O=(T+2*pi*n_ar).*K./(sin(T/2)*2);\n l = 1i.*O.*(J(1,1,i) - J(2,2,i));\n lp = 1i.*O.*(J(1,2,i) + J(2,1,i));\n c = O.*(J(1,2,i) - J(2,1,i));\n diffs = sum([L(i-1)-l;Lp(i-1)-lp;C(i-1)-c],1);\n [~,I] = min(diffs);\n L(i) = l(I);\n Lp(i) = lp(I);\n C(i) = c(I);\n n = n_ar(I);\n A(i) = 2*real(log(1./K));\n end\n \n LB=reshape(real(L),1,1,[]);\n LD=reshape(-imag(L),1,1,[]);\n LBp=reshape(real(Lp),1,1,[]);\n LDp=reshape(-imag(Lp),1,1,[]);\n CB=reshape(real(C),1,1,[]);\n CD=reshape(-imag(C),1,1,[]);\n A = reshape(A,1,1,[]);\n value(:,:,:,j) = ...\n flip([A,-LD,-LDp,CD ; -LD,A,CB,LBp ; -LDp,-CB,A,-LB ; CD,-LBp,LB,A],3);\n end\n value = reshape(value,sz);\n end\n end\n function r = s_logm(value) % log of Mueller matrix with filtering\n sz = size(value);\n value = shapeDown(value);\n Mfiltered = filterM(value);\n r = shapeUp(zeros(size(Mfiltered)),sz);\n for n=1:size(value,3); r(:,:,n) = logm(Mfiltered(:,:,n)); end\n end\n function r = s_expm(r) % log of Mueller matrix with filtering\n sz = size(r);\n r = shapeDown(r);\n for n=1:size(r,3); r(:,:,n) = expm(r(:,:,n)); end\n r = shapeUp(r,sz);\n end\n function r = s_lb(value)\n sz = size(value);\n value = shapeDown(value);\n J = nearestJones(value);\n r = jonesAnisotropy(J);\n r = real(1i.*r.*( J(1,1,:) - J(2,2,:) ));\n r = shapeUp(r,sz);\n end % 0,90 linear retardance\n function r = s_ld(value)\n sz = size(value);\n value = shapeDown(value);\n J = nearestJones(value);\n r = jonesAnisotropy(J);\n r = -imag(1i.*r.*( J(1,1,:) - J(2,2,:) ));\n r = shapeUp(r,sz);\n end % 0,90 linear extinction\n function r = s_lbp(value)\n sz = size(value);\n value = shapeDown(value);\n J = nearestJones(value);\n r = jonesAnisotropy(J);\n r = real(1i.*r.*( J(1,2,:) + J(2,1,:) ));\n r = shapeUp(r,sz);\n end % 45,-45 linear retardance\n function r = s_ldp(value)\n sz = size(value);\n value = shapeDown(value);\n J = nearestJones(value);\n r = jonesAnisotropy(J);\n r = -imag(1i.*r.*( J(1,2,:) + J(2,1,:) ));\n r = shapeUp(r,sz);\n end % 45,-45 linear extinction\n function r = s_cb(value)\n sz = size(value);\n value = shapeDown(value);\n J = nearestJones(value);\n r = jonesAnisotropy(J);\n r = real(r.*( J(1,2,:) - J(2,1,:) ));\n r = shapeUp(r,sz);\n end % circular retardance\n function r = s_cd(value)\n sz = size(value);\n value = shapeDown(value);\n J = nearestJones(value);\n r = jonesAnisotropy(J);\n r = -imag(r.*( J(1,2,:) - J(2,1,:) ));\n r = shapeUp(r,sz);\n end % circular extinction\n function r = s_a(value) % total mean extinction\n sz = size(value);\n value = shapeDown(value);\n J = nearestJones(value);\n r = -2*real(log( ( J(1,1,:).*J(2,2,:) - J(1,2,:).*J(2,1,:)).^(1/2) ));\n r = shapeUp(r,sz);\n end\n function r = s_a_aniso(value) % anisotropic part of the mean extinction\n sz = size(value);\n value = shapeDown(value);\n J = nearestJones(value);\n K = ( J(1,1,:).*J(2,2,:) - J(1,2,:).*J(2,1,:)).^(-1/2);\n T = acos( K.*( J(1,1,:) + J(2,2,:) )./2); % 2*T = sqrt(L.^2 + Lp.^2 + C.^2)\n O = (T.*K)./(sin(T));\n LD = -imag(1i.*O.*( J(1,1,:) - J(2,2,:) ));\n LDp = -imag(1i.*O.*( J(1,2,:) + J(2,1,:) ));\n CD = -imag(O.*( J(1,2,:) - J(2,1,:) ));\n r = shapeUp(sqrt(LD.^2 + LDp.^2 + CD.^2),sz); % not same as imag(2*T) !\n end\n function r = s_a_iso(value) % isotropic part of the mean extinction\n sz = size(value);\n value = shapeDown(value);\n J = nearestJones(value);\n K = ( J(1,1,:).*J(2,2,:) - J(1,2,:).*J(2,1,:)).^(-1/2);\n T = acos( K.*( J(1,1,:) + J(2,2,:) )./2); % 2*T = sqrt(L.^2 + Lp.^2 + C.^2)\n O = (T.*K)./(sin(T));\n LD = -imag(1i.*O.*( J(1,1,:) - J(2,2,:) ));\n LDp = -imag(1i.*O.*( J(1,2,:) + J(2,1,:) ));\n CD = -imag(O.*( J(1,2,:) - J(2,1,:) ));\n r = shapeUp(-2*real(log(1./K)) - sqrt(LD.^2 + LDp.^2 + CD.^2),sz);\n end\n function r = s_ldmag(value)\n sz = size(value);\n value = shapeDown(value);\n J = nearestJones(value);\n O = jonesAnisotropy(J);\n LD = imag(1i.*O.*( J(1,1,:) - J(2,2,:) ));\n LDp = imag(1i.*O.*( J(1,2,:) + J(2,1,:) ));\n r = shapeUp(sqrt(LD.^2 + LDp.^2),sz);\n end\n function r = s_ldang(value)\n sz = size(value);\n value = shapeDown(value);\n J = nearestJones(value);\n O = jonesAnisotropy(J);\n LD = -imag(1i.*O.*( J(1,1,:) - J(2,2,:) ));\n LDp = -imag(1i.*O.*( J(1,2,:) + J(2,1,:) ));\n r = shapeUp(atan2(LDp , LD)./2,sz);\n %out = out + pi*(out < 0);\n end\n function r = s_lbang(value)\n sz = size(value);\n value = shapeDown(value);\n J = nearestJones(value);\n O = jonesAnisotropy(J);\n LB = real(1i.*O.*( J(1,1,:) - J(2,2,:) ));\n LBp = real(1i.*O.*( J(1,2,:) + J(2,1,:) ));\n r = atan2(LBp , LB)./2;\n r = shapeUp(r + pi*(r < 0),sz);\n end\n function r = s_lbmag(value)\n sz = size(value);\n value = shapeDown(value);\n J = nearestJones(value);\n O = jonesAnisotropy(J);\n LB = real(1i.*O.*( J(1,1,:) - J(2,2,:) ));\n LBp = real(1i.*O.*( J(1,2,:) + J(2,1,:) ));\n r = shapeUp(sqrt(LB.^2 + LBp.^2),sz);\n end\n function r = s_di(value) % Depolarization Index\n sz = size(value);\n value = shapeDown(value);\n r = shapeUp((sqrt(squeeze(sum(sum(value.^2,1),2))./squeeze(value(1,1,:)).^2-1)./sqrt(3)).',sz);\n end\n function r = s_jones(value) % Jones matrix of a Mueller-Jones matrix\n sz = size(value);\n value = shapeDown(value);\n r = shapeUp(MJ2J(value),sz);\n end\n function r = s_nearestjones(value)\n sz = size(value);\n value = shapeDown(value);\n r = nearestJones(value); % Jones matrix\n % next line just phases the Jones matrix so that the\n % imaginary part of J(1,1) = 0. i.e., it matches case 'jones'\n for n=1:size(r,3); r(:,:,n) = exp( -1i*angle(r(1,1,n)) ) * r(:,:,n); end\n r = shapeUp(r,sz);\n end\n function r = s_mfilter(value) % closest physical Mueller matrix\n sz = size(value);\n value = shapeDown(value);\n r = shapeUp(filterM(value),sz);\n end\n function r = s_covar(value) % Mueller to Cloude covariance\n sz = size(value);\n value = shapeDown(value);\n r = shapeUp(M2Cov(value),sz);\n end\n function r = plotter(varargin)\n r = linePlot(varargin{:});\n end\n function r = s_mrotate(M,theta)\n % M is a Mueller matrix array of any dimension. The first two dimension\n % must be the Mueller matrix elements. MMout is a Mueller array with the\n % same dimension as the input array.\n \n % October 17, 2016: sign of theta changed so +LB transforms to +LB' with\n % theta = pi/4.\n sz = size(M);\n M = shapeDown(M);\n r = M;\n theta=-2*theta;\n C2=cos(theta);\n S2=sin(theta);\n r(1,2,:) = M(1,2,:)*C2 + M(1,3,:)*S2;\n r(1,3,:) = M(1,3,:)*C2 - M(1,2,:)*S2;\n r(2,1,:) = M(2,1,:)*C2 + M(3,1,:)*S2;\n r(3,1,:) = M(3,1,:)*C2 - M(2,1,:)*S2;\n r(2,4,:) = M(2,4,:)*C2 + M(3,4,:)*S2;\n r(3,4,:) = M(3,4,:)*C2 - M(2,4,:)*S2;\n r(4,2,:) = M(4,2,:)*C2 + M(4,3,:)*S2;\n r(4,3,:) = M(4,3,:)*C2 - M(4,2,:)*S2;\n r(2,2,:) = C2*(M(3,2,:)*S2 + M(2,2,:)*C2) + S2*(M(3,3,:)*S2 + M(2,3,:)*C2);\n r(2,3,:) = C2*(M(3,3,:)*S2 + M(2,3,:)*C2) - S2*(M(3,2,:)*S2 + M(2,2,:)*C2);\n r(3,2,:) = -C2*(M(2,2,:)*S2 - M(3,2,:)*C2) - S2*(M(2,3,:)*S2 - M(3,3,:)*C2);\n r(3,3,:) = S2*(M(2,2,:)*S2 - M(3,2,:)*C2) - C2*(M(2,3,:)*S2 - M(3,3,:)*C2);\n r = shapeUp(r,sz);\n end\n function fig = mergeAxes(h,sz)\n h = h(:);\n set(h,'Units','Pixels');\n p = get(h,'Position');\n ti = get(h,'TightInset');\n extents = ...\n cellfun(@(p,ti) [ti(1) + ti(3) + p(3) , ti(2) + ti(4) + p(4)],p,ti,'uniformoutput',0);\n extents = max(cell2mat(extents));\n [I,J] = ind2sub(sz,1:length(h));\n hspace = 10;\n vspace = 10;\n figSz = (flip(sz)).*[hspace,vspace] + flip(sz).*extents ;\n \n fig = figure('Units','Pixels','Position',[0, 0, figSz(1), figSz(2)] );\n for i=1:length(h)\n os1 = p{i}(1) - ti{i}(1);\n os2 = p{i}(2) - ti{i}(2);\n obj = h(i).Parent.Children;\n set(obj,'Units','Pixels');\n pos = get(obj,'Position');\n obj = copyobj(obj,fig);\n if length(obj) == 1\n pos = pos + [J(i) * hspace + (J(i) - 1) * extents(1) - os1 ,...\n (sz(1)-I(i)) * vspace + (sz(1)-I(i)) * extents(2) - os2 ,...\n 0,0];\n obj.Position = pos;\n else\n for j=1:length(obj)\n temp = pos{j} + ...\n [(J(i)-1) * hspace + (J(i) - 1) * extents(1) - os1 ,...\n (sz(1)-I(i)) * vspace + (sz(1)-I(i)) * extents(2) - os2 ,...\n 0,0];\n obj(j).Position = temp;\n end\n end\n end\n end\n end\nend\n\n% LOCAL FUNCTIONS\n% =========================================================================\nfunction s = dims2index(obj,s) % for indexing with Dims\nif isempty(obj.Dims)\n error('Error. obj.Dims not defined.');\nend\nsz = length(s.subs) - length(obj.Dims);\nfor i=1:length(obj.Dims)\n if s.subs{i+sz} ~= ':'\n [X,I] = sort(obj.Dims{i}); % added this to allow unsorted Dims\n indices = unique(round(fracIndex(X,s.subs{i+sz})),'first');\n s.subs{i+sz} = I(indices);\n end\nend\nend\nfunction obj = objSubset(obj,s) % obj parsing\nobj.Value = obj.Value(s.subs{:});\nobj.Size = size(obj.Value);\nif ~isempty(obj.ErValue)\n obj.ErValue = obj.ErValue(s.subs{:});\nend\nobj.DimNames = obj.DimNames;\nlsubs = length(s.subs) + 1;\nif ~isempty(obj.HV)\n obj.HV = obj.HV(s.subs{(lsubs-sum(size(obj.HV) ~= 1)):end});\nend\nif ~isempty(obj.DC)\n obj.DC = obj.DC(s.subs{(lsubs-sum(size(obj.DC) ~= 1)):end});\nend\nif ~isempty(obj.Dims)\n sz = lsubs - length(obj.Dims) - 1;\n for i=1:length(obj.Dims)\n obj.Dims{i} = obj.Dims{i}(s.subs{i+sz});\n end\nend\nend\nfunction out = shapeDown(out)\nif ndims(out) > 3 % reshape array into 4,4,N\n out = reshape(out,4,4,[]);\nend\nend % reshape\nfunction out = shapeUp(out,sz) % overly complicated reshaping\nsz2 = size(out);\nif length(sz)>=3 % reshape to match input dimensions\n out = reshape(out,[sz2(1:(length(sz2)-1)),sz(3:length(sz))]);\nend\nsz2 = size(out);\nif sz2(1) == 1 % remove leading singletons if necessary\n if sz2(2) == 1\n out = shiftdim(out,2); % out = reshape(out,sz2(3:end));\n else\n out = shiftdim(out,1); %out = reshape(out,sz2(2:end));\n end\nend\nend\nfunction J = MJ2J(M) % Mueller-Jones to Jones\nJ(1,1,:) = ((M(1,1,:)+M(1,2,:)+M(2,1,:)+M(2,2,:))/2).^(1/2);\nk = 1./(2.*J(1,1,:));\nJ(1,2,:) = k.*(M(1,3,:)+M(2,3,:)-1i.*(M(1,4,:)+M(2,4,:)));\nJ(2,1,:) = k.*(M(3,1,:)+M(3,2,:)+1i.*(M(4,1,:)+M(4,2,:)));\nJ(2,2,:) = k.*(M(3,3,:)+M(4,4,:)+1i.*(M(4,3,:)-M(3,4,:)));\nend\nfunction C = M2Cov(M) % Mueller to Cloude covariance\nC(1,1,:) = M(1,1,:) + M(1,2,:) + M(2,1,:) + M(2,2,:);\nC(1,2,:) = M(1,3,:) + M(1,4,:)*1i + M(2,3,:) + M(2,4,:)*1i;\nC(1,3,:) = M(3,1,:) + M(3,2,:) - M(4,1,:)*1i - M(4,2,:)*1i;\nC(1,4,:) = M(3,3,:) + M(3,4,:)*1i - M(4,3,:)*1i + M(4,4,:);\nC(2,1,:) = M(1,3,:) - M(1,4,:)*1i + M(2,3,:) - M(2,4,:)*1i;\nC(2,2,:) = M(1,1,:) - M(1,2,:) + M(2,1,:) - M(2,2,:);\nC(2,3,:) = M(3,3,:) - M(3,4,:)*1i - M(4,3,:)*1i - M(4,4,:);\nC(2,4,:) = M(3,1,:) - M(3,2,:) - M(4,1,:)*1i + M(4,2,:)*1i;\nC(3,1,:) = M(3,1,:) + M(3,2,:) + M(4,1,:)*1i + M(4,2,:)*1i;\nC(3,2,:) = M(3,3,:) + M(3,4,:)*1i + M(4,3,:)*1i - M(4,4,:);\nC(3,3,:) = M(1,1,:) + M(1,2,:) - M(2,1,:) - M(2,2,:);\nC(3,4,:) = M(1,3,:) + M(1,4,:)*1i - M(2,3,:) - M(2,4,:)*1i;\nC(4,1,:) = M(3,3,:) - M(3,4,:)*1i + M(4,3,:)*1i + M(4,4,:);\nC(4,2,:) = M(3,1,:) - M(3,2,:) + M(4,1,:)*1i - M(4,2,:)*1i;\nC(4,3,:) = M(1,3,:) - M(1,4,:)*1i - M(2,3,:) + M(2,4,:)*1i;\nC(4,4,:) = M(1,1,:) - M(1,2,:) - M(2,1,:) + M(2,2,:);\nC = C./2;\nend\nfunction M = Cov2M(C) % Cloude covariance to Mueller\nM(1,1,:) = C(1,1,:) + C(2,2,:) + C(3,3,:) + C(4,4,:);\nM(1,2,:) = C(1,1,:) - C(2,2,:) + C(3,3,:) - C(4,4,:);\nM(1,3,:) = C(1,2,:) + C(2,1,:) + C(3,4,:) + C(4,3,:);\nM(1,4,:) = ( -C(1,2,:) + C(2,1,:) - C(3,4,:) + C(4,3,:) )*1i;\nM(2,1,:) = C(1,1,:) + C(2,2,:) - C(3,3,:) - C(4,4,:);\nM(2,2,:) = C(1,1,:) - C(2,2,:) - C(3,3,:) + C(4,4,:);\nM(2,3,:) = C(1,2,:) + C(2,1,:) - C(3,4,:) - C(4,3,:);\nM(2,4,:) = ( -C(1,2,:) + C(2,1,:) + C(3,4,:) - C(4,3,:) )*1i;\nM(3,1,:) = C(1,3,:) + C(2,4,:) + C(3,1,:) + C(4,2,:);\nM(3,2,:) = C(1,3,:) - C(2,4,:) + C(3,1,:) - C(4,2,:);\nM(3,3,:) = C(1,4,:) + C(2,3,:) + C(3,2,:) + C(4,1,:);\nM(3,4,:) = ( -C(1,4,:) + C(2,3,:) - C(3,2,:) + C(4,1,:) )*1i;\nM(4,1,:) = ( C(1,3,:) + C(2,4,:) - C(3,1,:) - C(4,2,:) )*1i;\nM(4,2,:) = ( C(1,3,:) - C(2,4,:) - C(3,1,:) + C(4,2,:) )*1i;\nM(4,3,:) = ( C(1,4,:) + C(2,3,:) - C(3,2,:) - C(4,1,:) )*1i;\nM(4,4,:) = C(1,4,:) - C(2,3,:) - C(3,2,:) + C(4,1,:);\nM = real(M)./2;\nend\nfunction J = nearestJones(M)\nC = M2Cov(M);\nJ = zeros(2,2,size(C,3));\nfor n=1:size(C,3)\n [V,D] = eig(C(:,:,n),'vector');\n [~,mx] = max(D);\n J(:,:,n) = sqrt(D(mx))*reshape(V(:,mx),2,2).';\nend\nend\nfunction M = filterM(M) % M to nearest physical M\nC_raw = M2Cov(M);\nC = zeros(size(C_raw));\nfor n=1:size(C_raw,3)\n [V,D] = eig(C_raw(:,:,n),'vector');\n list = find(D > 0.00001).';\n idx = 0;\n temp = zeros(4,4,length(list));\n for j = list\n idx = idx + 1;\n temp(:,:,idx) = D(j)*V(:,j)*V(:,j)';\n end\n C(:,:,n) = sum(temp,3);\nend\nM = Cov2M(C);\nend\nfunction O = jonesAnisotropy(J)\nK = ( J(1,1,:).*J(2,2,:) - J(1,2,:).*J(2,1,:)).^(-1/2);\nT = acos( K.*( J(1,1,:) + J(2,2,:) )./2);\nO = (T.*K)./(sin(T));\nend\nfunction fracIndx = fracIndex(X,y) %fractional index\n% X: 1xN array of increasing values\n% y: array of values in the range of X\n% fracIndx is an array the length of y that contains the fractional\n% index of the y values in array X.\n% e.g., X = [2,4,6]; y = [4,5]; gives, fracIndx = [2,2.5];\nfracIndx = zeros(1,length(y));\nfor idx = 1:length(y)\n if y(idx) >= X(length(X))\n fracIndx(idx) = length(X);\n elseif y(idx) <= X(1)\n fracIndx(idx) = 1;\n else\n a = find(X <= y(idx));\n a = a(length(a));\n b = find(X > y(idx));\n b = b(1);\n fracIndx(idx) = a+(y(idx)-X(a))/(X(b)-X(a));\n end\nend\nend\nfunction handles = prePlot(varargin)\nobj = varargin{1};\nif all(obj.Size(1:2) == 4)\n plotTool = @MMplot;\nelse\n plotTool = @linePlot;\nend\nif ~isempty(obj.Label)\n if any(strcmpi('title',varargin))\n idx = find(strcmpi('title',varargin)) + 1;\n varargin{idx} = [obj.Label, ' ',varargin{idx}];\n else\n sz = length(varargin);\n varargin{sz+1} = 'title';\n varargin{sz+2} = obj.Label;\n end\nend\nif ~any(strcmpi('legend',varargin))\n if length(obj.Dims) >= 2 && ~isempty(obj.Dims{2})\n if length(obj.Dims) >= 3 && ~isempty(obj.Dims{3})\n idx = 1;\n Labels = cell(1,length(obj.Dims{2})*length(obj.Dims{3}));\n for i=1:length(obj.Dims{2})\n for j=1:length(obj.Dims{3})\n Labels{idx} = [num2str(obj.Dims{2}(i)),' ; ',num2str(obj.Dims{3}(j))];\n idx = idx + 1;\n end\n end\n LabelNames = [obj.DimNames{2},' ; ',obj.DimNames{3}];\n else\n Labels = obj.Dims{2};\n LabelNames = obj.DimNames{2};\n end\n sz = length(varargin);\n varargin{sz+1} = 'legend';\n varargin{sz+2} = {LabelNames,Labels};\n end\nend\nhandles = plotTool(obj.Dims{1},obj.Value,obj.ErValue,varargin{2:end});\nend\nfunction handles = MMplot(Lam,MMdata,MMerror,varargin)\n% Mueller matrix 2D plotting utility\n% Makes a 4 x 4 array of 2-D line plots with full control over line and\n% axes properties.\n% Outputs: [1 x 16] array of axis handles\n%\n% Required positional inputs:\n% Lam: [1 x n] array of wavelengths (X-axis)\n% MMdata: [4 x 4 x n x ...] Mueller matrix array\n% Optional positional inputs:\n% LineSpec: string containing a valid lineSpec. Type \"doc LineSpec\" in\n% command window for more info. Default is \"-\", a solid line.\n% Optional Name-Value pairs inputs:\n% ev: bool. converts X axis to eV. e.g., 'ev',true\n% handles: [1 x 16] array of plot handles. New handles are created if not given.\n% limY: scalar numeric. limits how small the range of the y-axes can be.\n% fontsize: sets font-size. Default is 12 pts. Changing the fontsize\n% of existing plots is not recommended. (Set on first call).\n% lineNV: a 1D cell array containing Name-Value pair arguments valid for\n% Chart Line Properties.\n% axNV: a 1D cell array containing Name-Value pairs arguments valid for\n% Axes Properties.\n% size: Size of the figure in pixels given as a two element vector [X Y].\n% A warning is issued if the requested size is larger than the screen\n% size minus the height of the OSX status bar (on my machine).\n% Default size is [1000 700].\n% title: string containing a title to place at the top of the figure.\n% legend: two-element cell array. First element is a string to use for\n% title of the legend. Second element is either a numeric array\n% containing values to use for labels of each plot, or a cell array\n% of strings to use as labels. Only set legend on last call, or just\n% write all plots at once (better).\n% vSpace: Adds extra space vertical between plots, in pixels\n% borderFactor: Increases white space around plots. This value is a\n% multiple of the largest line width on the plots.\n\np = inputParser;\n% input validation functions\nvalFun1 = @(x) ischar(x) && ...\n all(~strcmpi(x,{'ev','handles','lineNV','limY','fontsize','axNV','size',...\n 'title','legend','vSpace','borderFactor'}));\nvalFun2 = @(x) isscalar(x)&&isnumeric(x);\n% setup input scheme\naddRequired(p,'Lam',@isnumeric);\naddRequired(p,'MMdata',@isnumeric);\naddRequired(p,'MMerror',@isnumeric);\naddOptional(p,'LineSpec','-',valFun1)\naddParameter(p,'ev',false,@islogical)\naddParameter(p,'handles',gobjects(1,16), @(x) all(ishandle(x)))\naddParameter(p,'limY',0,valFun2)\naddParameter(p,'fontsize',12,valFun2)\naddParameter(p,'axNV',{},@iscell)\naddParameter(p,'lineNV',{},@iscell)\naddParameter(p,'size',[1000 700],@(x) length(x) == 2 && isnumeric(x))\naddParameter(p,'title','',@ischar)\naddParameter(p,'legend',{},@(x) iscell(x) || strcmp(x,'none'))\naddParameter(p,'vSpace',0,@isscalar)\naddParameter(p,'borderFactor',0,@isscalar)\nparse(p,Lam,MMdata,MMerror,varargin{:}) %parse inputs\n\n% create new figure if no valid handles were given\nhandles = p.Results.handles;\nif any(strcmpi('handles',p.UsingDefaults))\n % Determine how large to make the figure window, according to the screensize.\n scrsz = get(0,'screensize');\n figPos = [1 5 p.Results.size];\n if figPos(3) > scrsz(3)\n figPos(3) = scrsz(3);\n warning(['Figure horizontal dimension set to the maximum value of ',...\n num2str(figPos(3)),' pixels.'])\n end\n if figPos(4) > (scrsz(4) - 99) % 99 pixels is the height of the OSX status bar on my machine\n figPos(4) = (scrsz(4) - 99);\n warning(['Figure vertical dimension set to the maximum value of ',...\n num2str(figPos(4)),' pixels.'])\n end\n h_fig = figure('position',figPos,'units','pixels'); %create figure\n xLabel = uicontrol('style','text','BackgroundColor','w',...\n 'units','pixels','FontSize',p.Results.fontsize,...\n 'tag','xLabelObject'); % create x-label\n if p.Results.ev == true\n set(xLabel,'String','Energy (eV)');\n else\n set(xLabel,'String','Wavelength (nm)');\n end\n xLabel_sz = get(xLabel,'extent');\n set(xLabel,'Position',[(figPos(3) - xLabel_sz(3) )./2, 0, xLabel_sz(3), xLabel_sz(4)]);\n \n if ~isempty(p.Results.title) % create title if given\n figTitle = uicontrol('style','text','BackgroundColor','w',...\n 'units','pixels','FontSize',p.Results.fontsize,...\n 'tag','titleObject');\n set(figTitle,'String',p.Results.title)\n figTitle_sz = get(figTitle,'extent');\n set(figTitle,'Position',[( figPos(3) - figTitle_sz(3) )./2,...\n ( figPos(4) - figTitle_sz(4) ), figTitle_sz(3), figTitle_sz(4)]);\n end\n % determine the horizontal extent of y-axis marker labels\n dummy = uicontrol('style','text','fontsize',p.Results.fontsize,'units','pixels');\n set(dummy,'String','-0.000');\n yAxSz = get(dummy,'extent');\n delete(dummy)\n \n plotSzX = figPos(3)/4 - yAxSz(3) - yAxSz(3)./5; % X size of plot area in pixels\n plotSzY = ( figPos(4) - 4*yAxSz(4) )/4 - 6 - p.Results.vSpace; % Y size of plot area in pixels\n for i=1:4\n for j=1:4\n plotPos = [ ( (plotSzX + yAxSz(3) + 3)*(j-1) + yAxSz(3) +5)./figPos(3) ,...\n ((plotSzY + yAxSz(4)./2 + p.Results.vSpace)*(4-i)+yAxSz(4)*2 + 3)./figPos(4),...\n plotSzX./figPos(3), plotSzY./figPos(4)];\n hand = subplot('Position',plotPos);\n hold(hand,'on')\n box(hand,'on')\n if i ~= 4\n set(hand,'XTickLabel',[]) % keep X lables only for bottom row\n end\n handles(j+4*(i-1)) = hand;\n end\n end\nelse\n h_fig = get(handles(1),'parent');\n figPos = get(h_fig,'Position');\nend\n\n%plot data and set Line properties.\nif p.Results.ev == true; Lam = 1239.8./Lam; end\nif isempty(MMerror)\n for j = 1:4\n for k = 1:4\n plot(handles(k+4*(j-1)),Lam,squeeze(MMdata(j,k,:,:)),...\n p.Results.LineSpec,p.Results.lineNV{:})\n end\n end\nelse\n for j = 1:4\n for k = 1:4\n errorbar(handles(k+4*(j-1)),Lam,squeeze(MMdata(j,k,:,:)),...\n squeeze(MMerror(j,k,:,:)),...\n p.Results.LineSpec,'CapSize',0,p.Results.lineNV{:})\n end\n end\nend\n% set Axes properties\naxis(handles,'tight'); % first, axes are set to tight\nif ~isempty(p.Results.axNV)\n for j=1:16; set(handles(j),p.Results.axNV{:}); end\nend\nif p.Results.limY ~= 0 % modify axes bounds if limY is set\n lim = p.Results.limY;\n for j=1:16\n Ylim = get(handles(j),'YLim');\n if (Ylim(2) - Ylim(1)) < lim\n avg = (Ylim(2) + Ylim(1))./2;\n Ylim(2) = avg + lim/2;\n Ylim(1) = avg - lim/2;\n set(handles(j),'Ylim',Ylim);\n end\n end\nend\n% Adjust plot limits so that lines do not overlap axis borders.\n% *** If you like to use Markers, then perhaps change 'lineWidth' to 'MarkerSize'\nlineHandle = get(handles(1),'children');\nlineWidth = zeros(size(lineHandle));\nfor j = 1:length(lineHandle)\n lineWidth(j) = get(lineHandle(j),'lineWidth');\nend\nlineWidth = max(lineWidth)*p.Results.borderFactor;\nplotPos = get(handles(1),'Position');\nfor j=1:16\n xlim = get(handles(j),'xLim');\n ylim = get(handles(j),'yLim');\n xStep = (xlim(2) - xlim(1))/plotPos(3)/figPos(3)*lineWidth/2;\n yStep = (ylim(2) - ylim(1))/plotPos(4)/figPos(3)*lineWidth;\n set(handles(j),'XLim',[xlim(1)-xStep,xlim(2)+xStep]);\n set(handles(j),'YLim',[ylim(1)-yStep,ylim(2)+yStep]);\nend\n% set font size of all graphics objects if fontsize was passed\nif ~any(strcmpi('fontsize',p.UsingDefaults))\n set(get(gcf,'children'),'FontSize',p.Results.fontsize);\nend\n% optionally create legend (this will increase the width of the figure!)\nif ~any(strcmpi('legend',p.UsingDefaults))\n if iscell(p.Results.legend)\n Labels = p.Results.legend{2};\n if isnumeric(Labels)\n Labels = cellfun(@(x) num2str(x),num2cell(Labels),'uniformoutput',0);\n end\n pos = zeros(4,16);\n for i=1:16\n set(handles(i),'units','pixels');\n pos(:,i) = get(handles(i),'Position');\n end\n lgd = legend(handles(4),Labels,'location','northeastoutside');\n set(lgd,'units','pixels','fontsize',p.Results.fontsize);\n title(lgd,p.Results.legend{1},'FontSize',p.Results.fontsize);\n lgd_pos = get(lgd,'Position');\n h_fig.Position = h_fig.Position + [0 0 lgd_pos(3) 0];\n for i=1:16\n set(handles(i),'Position',pos(:,i));\n end\n end\nend\n\nend\nfunction handle = linePlot(X,Y,YEr,varargin)\n% this program just makes line-plots easier. Documentation is similar to\n% the MMplot program, except that this only makes 1 plot not a 4x4 plot array.\n% EXAMPLE:\n%\n% plotStuff = {...\n% 'size',[700,500],...\n% 'fontsize',16,...\n% 'title','Title of Graph',...\n% 'xLabel','X Axis',...\n% 'yLabel','Y Axis',...\n% 'limy',0.1,...\n% 'lineNV',{'lineWidth',2},...\n% 'axNV',{'XGrid','on','YGrid','on'}...\n% };\n%\n% h = plotter(Lam,MMgetp(MM1,'ld'),'b',plotStuff{:});\n% plotter(Lam,MMgetp(MM1,'ldp'),'r',plotStuff{:},'handle',h);\n%\n% or\n%\n% h = plotter(Lam,[MMgetp(MM1,'ld') ; MMgetp(MM1,'ldp')],plotStuff{:});\n\np = inputParser;\n% input validation functions\nvalFun1 = @(x) ischar(x) && ...\n all(~strcmpi(x,...\n {'handle','lineNV','limY','fontsize','axNV','size','title','xLabel',...\n 'yLabel','legend','legendLocation'}));\nvalFun2 = @(x) isscalar(x)&&isnumeric(x);\n% setup input scheme\naddRequired(p,'X',@isnumeric);\naddRequired(p,'Y',@isnumeric);\naddRequired(p,'YEr',@isnumeric);\naddOptional(p,'LineSpec','-',valFun1)\naddParameter(p,'handle',gobjects(1), @ishandle);\naddParameter(p,'limY',0,valFun2)\naddParameter(p,'fontsize',12,valFun2)\naddParameter(p,'axNV',{},@iscell)\naddParameter(p,'lineNV',{},@iscell)\naddParameter(p,'size',[700 500],@(x) length(x) == 2 && isnumeric(x))\naddParameter(p,'title','',@ischar)\naddParameter(p,'xLabel','',@ischar)\naddParameter(p,'yLabel','',@ischar)\naddParameter(p,'legend',{},@(x) iscell(x) || strcmp(x,'none'))\naddParameter(p,'legendLocation','northeastoutside',@ischar)\nparse(p,X,Y,YEr,varargin{:}) %parse inputs\n\n% create new figure if no valid handles were given\nif any(strcmpi('handle',p.UsingDefaults))\n % Determine how large to make the figure window, according to the screensize.\n scrsz = get(0,'screensize');\n figPos = [1 5 p.Results.size];\n if figPos(3) > scrsz(3)\n figPos(3) = scrsz(3);\n warning(['Figure horizontal dimension set to the maximum value of ',...\n num2str(figPos(3)),' pixels.'])\n end\n if figPos(4) > (scrsz(4) - 99) % 99 pixels is the height of the OSX status bar on my machine\n figPos(4) = (scrsz(4) - 99);\n warning(['Figure vertical dimension set to the maximum value of ',...\n num2str(figPos(4)),' pixels.'])\n end\n h_fig = figure('position',figPos,'units','pixels'); %create figure\n handle = axes;\n hold(handle,'on')\n box(handle,'on')\nelse\n handle = p.Results.handle;\n h_fig = get(handle,'parent');\n figPos = get(h_fig,'Position');\nend\n% plot line and set Line Properties\nplot(handle,X,Y(:,:),p.Results.LineSpec,p.Results.lineNV{:})\n% set Axes properties\naxis(handle,'tight'); % first, axes are set to tight\nif ~isempty(p.Results.axNV)\n set(handle,p.Results.axNV{:});\nend\nif p.Results.limY ~= 0 % modify axes bounds if limY is set\n lim = p.Results.limY;\n Ylim = get(handle,'YLim');\n if (Ylim(2) - Ylim(1)) < lim\n avg = (Ylim(2) + Ylim(1))./2;\n Ylim(2) = avg + lim/2;\n Ylim(1) = avg - lim/2;\n set(handle,'Ylim',Ylim);\n end\nend\n% Adjust plot limits so that lines do not overlap axis borders.\nlineHandle = get(handle,'children');\nlineWidth = zeros(size(lineHandle));\nfor j = 1:length(lineHandle)\n if strcmp(get(lineHandle(j),'Marker'),'none')\n lineWidth(j) = get(lineHandle(j),'LineWidth');\n else\n lineWidth(j) = get(lineHandle(j),'MarkerSize');\n end\nend\nlineWidth = max(lineWidth);\nplotPos = get(handle,'Position');\nxlim = get(handle,'xLim');\nylim = get(handle,'yLim');\nxStep = (xlim(2) - xlim(1))/plotPos(3)/figPos(3)*lineWidth/2;\nyStep = (ylim(2) - ylim(1))/plotPos(4)/figPos(3)*lineWidth;\nset(handle,'XLim',[xlim(1)-xStep,xlim(2)+xStep]);\nset(handle,'YLim',[ylim(1)-yStep,ylim(2)+yStep]);\n% add the labels if passed\nif ~any(strcmpi('title',p.UsingDefaults))\n title(p.Results.title,'FontSize',p.Results.fontsize,'FontWeight','normal');\nend\nif ~any(strcmpi('xLabel',p.UsingDefaults))\n xlabel(p.Results.xLabel,'FontSize',p.Results.fontsize);\nend\nif ~any(strcmpi('yLabel',p.UsingDefaults))\n ylabel(p.Results.yLabel,'FontSize',p.Results.fontsize);\nend\n% set font size of all graphics objects if fontsize was passed\nif ~any(strcmpi('fontsize',p.UsingDefaults))\n set(get(gcf,'children'),'FontSize',p.Results.fontsize);\nend\n% optionally create legend (this will increase the width of the figure!)\nif ~any(strcmpi('legend',p.UsingDefaults))\n if iscell(p.Results.legend)\n Labels = p.Results.legend{2};\n if isnumeric(Labels)\n Labels = cellfun(@(x) num2str(x),num2cell(Labels),'uniformoutput',0);\n end\n set(handle,'units','pixels');\n pos = get(handle,'Position');\n lgd = legend(handle,Labels,'location',p.Results.legendLocation);\n set(lgd,'units','pixels','fontsize',p.Results.fontsize);\n title(lgd,p.Results.legend{1},'FontSize',p.Results.fontsize);\n if ~isempty(regexp(p.Results.legendLocation,'.outside','ONCE'))\n lgd_pos = get(lgd,'Position');\n h_fig.Position = h_fig.Position + [0 0 lgd_pos(3) 0];\n set(handle,'Position',pos);\n end\n end\nend\nend\n% ========================================================================="} +{"plateform": "github", "repo_name": "shane-nichols/smn-thesis-master", "name": "MPlot3D.m", "ext": ".m", "path": "smn-thesis-master/MPlot3D.m", "size": 15677, "source_encoding": "utf_8", "md5": "1a8b8381948165ef6054487ebde73297", "text": "classdef (InferiorClasses = {?matlab.graphics.axis.Axes}) MPlot3D < handle\n \n properties\n \n uniquezero = true\n palette = 'HotCold Bright'\n gs = 0\n width\n fontsize = 14\n limz = 1e-3\n norm = true\n hSpacing = 3; \n vSpacing = 3; \n cbw = 10;\n end\n \n properties (SetAccess = protected)\n figHandle\n axesHandles = gobjects(4);\n colorbarHandles = gobjects(4);\n end\n \n properties (Hidden)\n maskHandles\n end\n \n methods\n \n function obj = MPlot3D(varargin)\n obj.figHandle = figure;\n obj.width = getFigWidth;\n plot(obj,varargin{:});\n end\n \n function plot(obj,data,varargin)\n % \\\\ Required positional input:\n \n % data: [4,4,X,Y] array. X and Y are horizontal and vertical plot\n % dimensions.\n \n % \\\\ Optional Name-Value pairs that set object properties\n \n % 'uniquezero', logical: make zero white or black in colormaps\n % Default is true.\n % 'palette', string: name of a colormap, including custom\n % ones in local function colPalette\n % Default is 'Fireice'\n % 'gs', [min max]: GlobalScale. plot limits of all Z-scales between min, max.\n % If not given, each MM element maps to its own min and max value.\n % Only 1 colorbar is drawn with GlobalScale is set\n % 'fontsize', scalar: Size of font in colorbars\n % 'width', scalar: Width of figure in inches. Height is\n % computed automatically to ensure no streching of plots (figure can go\n % off page, in which case, reduce value of 'width'. Default is %60 of\n % the monitor or with dual displays of different size, who knowns...\n % 'limz', scalar: limits how small the range of the z-axes can be.\n % 'hSpacing', scalar: sets the horizontal space between plots in pixels\n % 'vSpacing', scalar: sets the vertical space between plots in pixels\n % 'cbw', scalar: Colorbar width in pixels.\n \n p = inputParser;\n % setup input scheme\n addRequired(p,'obj',@(x) isa(x,'MPlot3D'))\n addRequired(p,'data',@(x) isnumeric(x) && ndims(x) == 4)\n addParameter(p,'norm',obj.norm,@(x) x == 1 || x == 0)\n addParameter(p,'uniquezero',obj.uniquezero,@(x) x == 1 || x == 0)\n addParameter(p,'palette',obj.palette,@(x) ischar(x) )\n addParameter(p,'limz',obj.limz,@(x) isscalar(x)&&isnumeric(x))\n addParameter(p,'fontsize',obj.fontsize,@(x) isscalar(x)&&isnumeric(x))\n addParameter(p,'width',obj.width,@(x) isscalar(x) && isnumeric(x)) % inches\n addParameter(p,'gs',obj.gs,@(x) length(x) == 2 && isnumeric(x))\n addParameter(p,'hSpacing',obj.hSpacing,@isscalar)\n addParameter(p,'vSpacing',obj.vSpacing,@isscalar)\n addParameter(p,'cbw',obj.cbw,@isscalar)\n parse(p,obj,data,varargin{:}) %parse inputs\n sz = size(data);\n obj.norm = p.Results.norm;\n obj.uniquezero = p.Results.uniquezero;\n obj.palette = p.Results.palette;\n obj.gs = p.Results.gs;\n obj.limz = p.Results.limz;\n obj.fontsize = p.Results.fontsize;\n obj.width = p.Results.width;\n obj.hSpacing = p.Results.hSpacing;\n obj.vSpacing = p.Results.vSpacing;\n obj.cbw = p.Results.cbw;\n \n % normalize and replace NaN with 0 if obj.norm is set\n if obj.norm\n data = data ./ data(1,1,:,:);\n data(isnan(data)) = 0;\n end\n \n dummy = uicontrol('style', 'text', 'fontsize', obj.fontsize, 'units', 'pixels');\n set(dummy,'String', '-0.000');\n cblbextents = get(dummy, 'extent');\n cblbsz = cblbextents(3); % colorbar label size\n delete(dummy)\n figWidth = (obj.width) * obj.figHandle.Parent.ScreenPixelsPerInch;\n\n if obj.gs==0\n plotW = (figWidth - 9*obj.vSpacing-4*(obj.cbw + cblbsz))/4;\n plotH = sz(3)/sz(4)*plotW;\n figHeight = plotH*4+5*obj.hSpacing;\n \n totalPlotWidth = obj.vSpacing*2+obj.cbw+cblbsz+plotW;\n plotPosFun = @(j,k) [ (obj.vSpacing+(k-1)*totalPlotWidth)/figWidth...\n ,(obj.hSpacing+(4-j)*(plotH+obj.hSpacing))/figHeight,...\n plotW/figWidth,...\n plotH/figHeight];\n set(obj.figHandle,'Position',[0,0,figWidth,figHeight],'units','pixels');\n for j=1:4\n for k=1:4\n if isgraphics(obj.axesHandles(j,k))\n subplot(obj.axesHandles(j,k), ...\n 'position',plotPosFun(j,k),'units','pixels');\n else\n obj.axesHandles(j,k) = ...\n subplot('position',plotPosFun(j,k),'units','pixels');\n end\n clim = [min(min(data(j,k,:,:))),max(max(data(j,k,:,:)))];\n if obj.limz ~= 0 % modify axes bounds if limz is set\n if (clim(2) - clim(1)) < obj.limz\n avg = (clim(2) + clim(1))./2;\n clim(2) = avg + obj.limz/2;\n clim(1) = avg - obj.limz/2;\n end\n end\n pos = get(obj.axesHandles(j,k),'Position');\n imagesc(squeeze(data(j,k,:,:)),'Parent',obj.axesHandles(j,k),clim)\n axis(obj.axesHandles(j,k),'off')\n colormap(obj.axesHandles(j,k),makeColormap(clim,obj.uniquezero,obj.palette))\n obj.colorbarHandles(j,k) = colorbar(obj.axesHandles(j,k),'units','pixels',...\n 'Position',[pos(1)+pos(3)+obj.vSpacing,pos(2)+cblbextents(4)/4,...\n obj.cbw,pos(4)-cblbextents(4)/2],...\n 'fontsize',obj.fontsize);\n end\n end\n if any(strcmp('nonorm', p.UsingDefaults))\n obj.axesHandles(1,1).CLim = [0 1];\n end\n else\n plotW = (figWidth - 6*obj.vSpacing - 2*obj.cbw - cblbsz)/4;\n plotH = sz(3)/sz(4)*plotW;\n figHeight = plotH*4+5*obj.hSpacing;\n plotPosFun = @(j,k) [ (obj.vSpacing+(k-1)*(plotW+obj.vSpacing))/figWidth,...\n (obj.hSpacing+(4-j)*(plotH+obj.hSpacing))/figHeight,...\n plotW/figWidth,...\n plotH/figHeight];\n set(obj.figHandle,'Position',[0,0,figWidth,figHeight],'units','pixels');\n for j=1:4\n for k=1:4\n if isgraphics(obj.axesHandles(j,k))\n subplot(obj.axesHandles(j,k),...\n 'position',plotPosFun(j,k),'units','pixels');\n else\n obj.axesHandles(j,k) = ...\n subplot('position',plotPosFun(j,k),'units','pixels');\n end\n pos = get(obj.axesHandles(j,k),'Position');\n imagesc(squeeze(data(j,k,:,:)),'Parent',obj.axesHandles(j,k),obj.gs)\n colormap(obj.axesHandles(j,k),makeColormap(obj.gs,obj.uniquezero,obj.palette))\n axis(obj.axesHandles(j,k),'off')\n end\n end\n obj.colorbarHandles(1,4) = colorbar(obj.axesHandles(1,4),'units','pixels',...\n 'Position',[pos(1)+pos(3)+obj.vSpacing,cblbextents(4)/4+6,...\n obj.cbw,figHeight-cblbextents(4)/2-12],...\n 'fontsize',obj.fontsize);\n end\n end\n \n function mmdata = getPlotData(obj)\n % h: [4,4] array of axis handles\n mmdata = zeros([4, 4, size(obj.axesHandles(1,1).Children.CData)], ...\n class(obj.axesHandles(1,1).Children.CData));\n for j=1:4\n for k=1:4\n mmdata(j,k,:,:) = obj.axesHandles(j,k).Children.CData;\n end\n end\n end\n\n function replacePlotData(obj,mmdata)\n % MMreplace3DplotData replaces the data in 4x4 intensity plots.\n % h is a [4,4] array of axis handles\n % Data is a 4x4xNxM array. Data size should not be different than data in\n % plots.\n for j=1:4\n for k=1:4\n obj.axesHandles(j,k).Children.CData = squeeze(mmdata(j,k,:,:));\n end\n end\n end\n \n function update(obj,varargin)\n obj.figHandle.Visible = 'off';\n data = getPlotData(obj);\n delete(obj.colorbarHandles)\n obj.colorbarHandles = gobjects(4);\n plot(obj,data,varargin{:});\n obj.figHandle.Visible = 'on';\n end\n \n function drawMask(obj, i, j)\n sz = size(obj.axesHandles(1,1).Children.CData);\n h_im = obj.axesHandles(i,j).Children;\n e = imellipse(obj.axesHandles(i,j),...\n [sz(1)*0.1,sz(2)*0.1,0.8*sz(1),0.8*sz(1)]);\n obj.maskHandles = {e,h_im};\n end\n \n function setElipse(obj, position)\n setPosition(obj.maskHandles{1}, position);\n end\n \n function applyMask(obj)\n mask = createMask(obj.maskHandles{1}, obj.maskHandles{2});\n delete(obj.maskHandles{1})\n for j=1:4\n for k=1:4\n obj.axesHandles(j,k).Children.CData = ...\n obj.axesHandles(j,k).Children.CData.*mask;\n end\n end\n end\n \n function applyMaskWithTrim(obj)\n mask = createMask(obj.maskHandles{1},obj.maskHandles{2});\n pos = obj.maskHandles{1}.getPosition;\n pos = [floor(pos(1:2)),ceil(pos(3:4))];\n delete(obj.maskHandles{1})\n data = zeros(4,4,pos(4),pos(3),'single');\n idx = {pos(2):(pos(2)+pos(4)-1),pos(1):(pos(1)+pos(3)-1)};\n for j=1:4\n for k=1:4\n data(j,k,:,:) = ...\n obj.axesHandles(j,k).Children.CData(idx{:}).*mask(idx{:});\n end\n end\n plot(obj,data)\n end\n \n function print(obj,filepath)\n print(obj.figHandle,filepath,'-depsc');\n end\n \n function flipX(obj)\n replacePlotData(obj, flip(getPlotData(obj), 4));\n end\n end\nend\n\nfunction width = getFigWidth % sets default width to 60% of display width\n scrsz = get(0,'screensize');\n width = 0.6*scrsz(3)/get(0,'ScreenPixelsPerInch');\nend\n\nfunction colAr = colPalette(palette)\n% these are custom colormaps. A colormap is just a Nx4 matrix. The\n% first column are values between 0 and 256 that position a color marker.\n% The 2nd, 3rd, and 4th columns are RGB color values. Names of matlab\n% colormaps can also be handed to this function.\nswitch palette\n \n case 'HotCold Bright'\n colAr = ...\n [0\t0\t65\t220;...\n 36\t0\t90\t240;...\n 76\t0\t253\t253;...\n 128\t250\t250\t250;...\n 182\t255\t242\t0;...\n 224\t255\t127\t0;...\n 256\t255\t0\t0];\n \n case 'HotCold Dark'\n colAr = ...\n [0\t0\t253\t253;...\n 36\t1\t114\t239;...\n 76\t0\t90\t240;...\n 128\t0\t0\t0;...\n 182\t255\t0\t0;...\n 224\t255\t127\t0;...\n 256\t255\t242\t0];\n \n case 'TwoTone Bright'\n colAr = ...\n [0\t0\t0\t255;...\n 128\t255\t255\t255;...\n 256\t255\t0\t0];\n \n case 'TwoTone Dark'\n colAr = ...\n [0\t0\t0\t255;...\n 128\t0\t0\t0;...\n 256\t255\t0\t0];\n \n case 'Fireice' \n %Copyright (c) 2009, Joseph Kirk \n %All rights reserved.\n clrs = [0.75 1 1; 0 1 1; 0 0 1;...\n 0 0 0; 1 0 0; 1 1 0; 1 1 0.75];\n \n y = -3:3;\n m = 64;\n if mod(m,2)\n delta = min(1,6/(m-1));\n half = (m-1)/2;\n yi = delta*(-half:half)';\n else\n delta = min(1,6/m);\n half = m/2;\n yi = delta*nonzeros(-half:half);\n end\n colAr = cat(2,(0:4:255).',255*interp2(1:3,y,clrs,1:3,yi));\n \n case 'Spectral'\n colAr = cbrewer('div', 'Spectral', 11) .* 255;\n t1 = linspace(0,256,size(colAr, 1)).';\n colAr = [t1, colAr];\n \n case 'RdYlGn'\n colAr = cbrewer('div', 'RdYlGn', 11) .* 255;\n t1 = linspace(0,256,size(colAr, 1)).';\n colAr = [t1, colAr];\n \n case 'RdYlBu'\n colAr = cbrewer('div', 'RdYlBu', 11) .* 255;\n t1 = linspace(0,256,size(colAr, 1)).';\n colAr = [t1, colAr];\n \n case 'RdBu'\n colAr = cbrewer('div', 'RdBu', 11) .* 255;\n t1 = linspace(0,256,size(colAr, 1)).';\n colAr = [t1, colAr];\n \n case 'RdGy'\n colAr = cbrewer('div', 'RdGy', 11) .* 255;\n t1 = linspace(0,256,size(colAr, 1)).';\n colAr = [t1, colAr];\n \n case 'PuOr'\n colAr = cbrewer('div', 'PuOr', 11) .* 255;\n t1 = linspace(0,256,size(colAr, 1)).';\n colAr = [t1, colAr];\n \n case 'PRGn'\n colAr = cbrewer('div', 'PRGn', 11) .* 255;\n t1 = linspace(0,256,size(colAr, 1)).';\n colAr = [t1, colAr];\n \n case 'PiYG'\n colAr = cbrewer('div', 'PiYG', 11) .* 255;\n t1 = linspace(0,256,size(colAr, 1)).';\n colAr = [t1, colAr];\n \n case 'BrBG'\n colAr = cbrewer('div', 'BrBG', 11) .* 255;\n t1 = linspace(0,256,size(colAr, 1)).';\n colAr = [t1, colAr];\n \n otherwise\n colAr = colormap(palette) .* 255; % to use other colormaps\n t1 = linspace(0,256,size(colAr, 1)).';\n colAr = [t1, colAr];\nend\nend\n\nfunction fracIndx = fracIndex(array,x)\n\nfracIndx = zeros(1,length(x));\nfor idx = 1:length(x)\n if x >= array(end)\n fracIndx(idx) = length(array);\n elseif x(idx) <= array(1)\n fracIndx(idx) = 1;\n else\n a = find(array <= x(idx));\n a = a(length(a));\n b = find(array > x(idx));\n b = b(1);\n fracIndx(idx) = a+(x(idx)-array(a))/(array(b)-array(a));\n end\n \nend\nend\n\nfunction cm = makeColormap(clim,b_uniqueZero,palette)\ndmin=clim(1);\ndmax=clim(2);\nif dmax == dmin\n dmax=1;\n dmin=0;\nend\nif b_uniqueZero == true\n Zscale = zeros(1,256);\n if abs(dmin) < abs(dmax)\n didx = (dmax - dmin)/(2*dmax);\n for idx = 0:255\n Zscale(idx+1) = 256 - didx*idx;\n end\n else\n didx = (dmin-dmax)/(2*dmin);\n for idx = 0:255\n Zscale(idx+1) = idx*didx;\n end\n Zscale = flip(Zscale);\n end\nelse\n Zscale = flip(1:256);\nend\ncolAr = colPalette(palette);\ncm = zeros(256,3);\nfor n = 1:256\n x = fracIndex(colAr(:,1),Zscale(n));\n cm(n,1) = interp1(colAr(:,2),x);\n cm(n,2) = interp1(colAr(:,3),x);\n cm(n,3) = interp1(colAr(:,4),x);\nend\ncm = cm./255;\ncm = flip(cm,1);\nend\n"} +{"plateform": "github", "repo_name": "shane-nichols/smn-thesis-master", "name": "genop.m", "ext": ".m", "path": "smn-thesis-master/dependencies/Multiprod_2009/Testing/genop.m", "size": 3837, "source_encoding": "utf_8", "md5": "2c087f1f1c6d8843c6f5198716d04526", "text": "function z = genop(op,x,y)\r\n%GENOP Generalized array operations.\r\n% GENOP(OP, X, Y) applies the function OP to the arguments X and Y where\r\n% singleton dimensions of X and Y have been expanded so that X and Y are\r\n% the same size, but this is done without actually copying any data.\r\n%\r\n% OP must be a function handle to a function that computes an\r\n% element-by-element function of its two arguments.\r\n%\r\n% X and Y can be any numeric arrays where non-singleton dimensions in one\r\n% must correspond to the same or unity size in the other. In other\r\n% words, singleton dimensions in one can be expanded to the size of\r\n% the other, otherwise the size of the dimensions must match.\r\n%\r\n% For example, to subtract the mean from each column, you could use\r\n%\r\n% X2 = X - repmat(mean(X),size(X,1),1);\r\n%\r\n% or, using GENOP,\r\n%\r\n% X2 = genop(@minus,X,mean(X));\r\n%\r\n% where the single row of mean(x) has been logically expanded to match\r\n% the number of rows in X, but without actually copying any data.\r\n%\r\n% GENOP(OP) returns a function handle that can be used like above:\r\n%\r\n% f = genop(@minus);\r\n% X2 = f(X,mean(X));\r\n\r\n% written by Douglas M. Schwarz\r\n% email: dmschwarz (at) urgrad (dot) rochester (dot) edu\r\n% 13 March 2006\r\n\r\n% This function was inspired by an idea by Urs Schwarz (no relation) and\r\n% the idea for returning a function handle was shamelessly stolen from\r\n% Duane Hanselman.\r\n\r\n% Check inputs.\r\nif ~(nargin == 1 || nargin == 3)\r\n\terror('genop:zeroInputs','1 or 3 arguments required.')\r\nend\r\nif ~isa(op,'function_handle')\r\n\terror('genop:incorrectOperator','Operator must be a function handle.')\r\nend\r\nif nargin == 1\r\n\tz = @(x,y) genop(op,x,y);\r\n\treturn\r\nend\r\n\r\n% Compute sizes of x and y, possibly extended with ones so they match\r\n% in length.\r\nnd = max(ndims(x),ndims(y));\r\nsx = size(x);\r\nsx(end+1:nd) = 1;\r\nsy = size(y);\r\nsy(end+1:nd) = 1;\r\ndz = sx ~= sy;\r\ndims = find(dz);\r\nnum_dims = length(dims);\r\n\r\n% Eliminate some simple cases.\r\nif num_dims == 0 || numel(x) == 1 || numel(y) == 1\r\n\tz = op(x,y);\r\n\treturn\r\nend\r\n\r\n% Check for dimensional compatibility of inputs, compute size and class of\r\n% output array and allocate it.\r\nif ~(all(sx(dz) == 1 | sy(dz) == 1))\r\n\terror('genop:argSizeError','Argument dimensions are not compatible.')\r\nend\r\nsz = max([sx;sy]);\r\nz1 = op(x(1),y(1));\r\nif islogical(z1)\r\n\tz = repmat(logical(0),sz);\r\nelse\r\n\tz = zeros(sz,class(z1));\r\nend\r\n\r\n% The most efficient way to compute the result seems to require that we\r\n% loop through the unmatching dimensions (those where dz = 1), performing\r\n% the operation and assigning to the appropriately indexed output. Since\r\n% we don't know in advance which or how many dimensions don't match we have\r\n% to create the code as a string and then eval it. To see how this works,\r\n% uncomment the disp statement below to display the code before it is\r\n% evaluated. This could all be done with fixed code using subsref and\r\n% subsasgn, but that way seems to be much slower.\r\n\r\n% Compute code strings representing the subscripts of x, y and z.\r\nxsub = subgen(sy ~= sz);\r\nysub = subgen(sx ~= sz);\r\nzsub = subgen(dz);\r\n\r\n% Generate the code.\r\nindent = 2; % spaces per indent level\r\ncode_cells = cell(1,2*num_dims + 1);\r\nfor i = 1:num_dims\r\n\tcode_cells{i} = sprintf('%*sfor i%d = 1:sz(%d)\\n',indent*(i-1),'',...\r\n\t\tdims([i i]));\r\n\tcode_cells{end-i+1} = sprintf('%*send\\n',indent*(i-1),'');\r\nend\r\ncode_cells{num_dims+1} = sprintf('%*sz(%s) = op(x(%s),y(%s));\\n',...\r\n\tindent*num_dims,'',zsub,xsub,ysub);\r\ncode = [code_cells{:}];\r\n\r\n% Evaluate the code.\r\n% disp(code)\r\neval(code)\r\n\r\n\r\nfunction sub = subgen(select_flag)\r\nelements = {':,','i%d,'};\r\nselected_elements = elements(select_flag + 1);\r\nformat_str = [selected_elements{:}];\r\nsub = sprintf(format_str(1:end-1),find(select_flag));\r\n"} +{"plateform": "github", "repo_name": "shane-nichols/smn-thesis-master", "name": "arraylab133.m", "ext": ".m", "path": "smn-thesis-master/dependencies/Multiprod_2009/Testing/arraylab133.m", "size": 2056, "source_encoding": "utf_8", "md5": "46c91102f1666d2e8a3f0accd7d809ed", "text": "function c = arraylab133(a,b,d1,d2)\r\n% Several adjustments to ARRAYLAB13:\r\n% 1) Adjustment used in ARRAYLAB131 was not used here.\r\n% 2) Nested statement used in ARRAYLAB132 was used here.\r\n% 3) PERMUTE in subfunction MBYV was substituted with RESHAPE\r\n% (faster by one order of magnitude!).\r\n\r\n ndimsA = ndims(a); % NOTE - Since trailing singletons are removed,\r\n ndimsB = ndims(b); % not always NDIMSB = NDIMSA\r\n NsA = d2 - ndimsA; % Number of added trailing singletons\r\n NsB = d2 - ndimsB;\r\n sizA = [size(a) ones(1,NsA)];\r\n sizB = [size(b) ones(1,NsB)];\r\n p = sizA(d1);\r\n r = sizB(d1);\r\n s = sizB(d2); \r\n % Initializing C\r\n sizC = sizA; \r\n sizC(d2) = s;\r\n c = zeros(sizC);\r\n % Vectorized indices for B and C\r\n Nd = length(sizB);\r\n Bindices = cell(1,Nd); % preallocating (cell array)\r\n for d = 1 : Nd\r\n Bindices{d} = 1:sizB(d);\r\n end\r\n B2size = sizB; B2size([d1 d2]) = [1 r];\r\n B2indices = Bindices; \r\n % B2 will be cloned P times along its singleton dimension D1 (see MBYV).\r\n B2indices([d1 d2]) = [{ones(1, p)} Bindices(d1)]; % \"Cloned\" index\r\n\r\n if sizB(d2) == 1 % PxQ IN A - Rx1 IN B\r\n % A * B\r\n c = mbyv(a, b, B2indices,B2size,d1,d2,p);\r\n\r\n else % PxQ IN A - RxS IN B\r\n Cindices = Bindices;\r\n Cindices{d1} = 1:p; \r\n % Building C\r\n for Ncol = 1:s\r\n Bindices{d2} = Ncol; Cindices{d2} = Ncol;\r\n c(Cindices{:}) = mbyv(a, b(Bindices{:}), B2indices,B2size,d1,d2,p);\r\n end\r\n end\r\n\r\nfunction c = mbyv(a, b2, indices, newsize, d1, d2, p)\r\n% This is an adjustment to a subfunction used within MULTIPROD 1.3\r\n\r\n% 1 - Transposing: Qx1 matrices in B become 1xQ matrices\r\nb2 = reshape(b2, newsize);\r\n\r\n% 3 - Performing dot products along dimension DIM+1\r\n% % NOTE: b(indices{:}) has same size as A\r\n% % NOTE: This nested statement is much faster than two separate ones.\r\nc = sum(a .* b2(indices{:}), d2);"} +{"plateform": "github", "repo_name": "shane-nichols/smn-thesis-master", "name": "timing_MX.m", "ext": ".m", "path": "smn-thesis-master/dependencies/Multiprod_2009/Testing/timing_MX.m", "size": 1472, "source_encoding": "utf_8", "md5": "7db26cc2c4954f1026e93f2d0c44139a", "text": "function timing_MX\r\n% TIMING_MX Speed of MX as performed by MULTIPROD and by a nested loop.\r\n% TIMING_MX compares the speed of matrix expansion as performed by\r\n% MULTIPROD and an equivalent nested loop. The results are shown in the\r\n% manual (fig. 2).\r\n% Notice that MULTIPROD enables array expansion which generalizes matrix\r\n% expansion to arrays of any size, while the loop tested in this\r\n% function works only for this specific case, and would be much slower\r\n% if it were generalized to N-D arrays.\r\n\r\n\r\n% Checking whether needed software exists\r\nmessage = sysrequirements_for_testing('timeit');\r\nif message\r\n disp ' ', error('testing_memory_usage:Missing_subfuncs', message)\r\nend\r\n\r\n% Matrix expansion example (fig. 2)\r\ndisp ' '\r\ndisp 'Timing matrix expansion (see MULTIPROD manual, figure 2)'\r\ndisp ' '\r\n\r\na = rand(2, 5);\r\nb = rand(5, 3, 1000, 10);\r\n\r\nfprintf ('Size of A: %0.0fx%0.0f\\n', size(a))\r\nfprintf ('Size of B: (%0.0fx%0.0f)x%0.0fx%0.0f\\n', size(b))\r\ndisp ' ', disp 'Please wait...'\r\ndisp ' '\r\n\r\nf1 = @() loop(a,b);\r\nf2 = @() multiprod(a,b);\r\n\r\nt1 = timeit(f1)*1000;\r\nfprintf('LOOP(A, B): %10.4f milliseconds\\n', t1)\r\nt2 = timeit(f2)*1000;\r\nfprintf('MULTIPROD(A, B): %10.4f milliseconds\\n', t2)\r\n\r\ndisp ' '\r\nfprintf('MULTIPROD performed matrix expansion %6.0f times faster than a plain loop\\n', t1/t2)\r\ndisp ' '\r\n\r\nfunction C = loop(A,B)\r\nfor i = 1:1000\r\n for j = 1:10\r\n C(:,:,i,j) = A * B(:,:,i,j);\r\n end\r\nend"} +{"plateform": "github", "repo_name": "shane-nichols/smn-thesis-master", "name": "timing_matlab_commands.m", "ext": ".m", "path": "smn-thesis-master/dependencies/Multiprod_2009/Testing/timing_matlab_commands.m", "size": 7975, "source_encoding": "utf_8", "md5": "5384e23295d7b37d3318825a1d5c3dfe", "text": "function timing_matlab_commands\n% TIMING_MATLAB_COMMANDS Testing for speed different MATLAB commands.\n% \n% Main conclusion: RESHAPE and * (i.e. MTIMES) are very quick!\n\n% Paolo de Leva\n% University of Rome, Foro Italico, Rome, Italy\n% 2008 Dec 24\n\nclear all\n\n% Checking whether needed software exists\nif ~exist('bsxfun', 'builtin')\n message = sysrequirements_for_testing('bsxmex', 'timeit');\nelse\n message = sysrequirements_for_testing('timeit');\nend\nif message\n disp ' ', error('timing_matlab_commands:Missing_subfuncs', message)\nend\n\ndisp ' '\ndisp '---------------------------------- Experiment 1 ----------------------------------'\nN = 10000; P = 3; Q = 3; R = 1; \ntiming(N,P,Q,R);\n\ndisp '---------------------------------- Experiment 2 ----------------------------------'\nN = 1000; P = 3; Q = 30; R = 1; \ntiming(N,P,Q,R);\n\ndisp '---------------------------------- Experiment 3 ----------------------------------'\nN = 1000; P = 9; Q = 10; R = 3; \ntiming(N,P,Q,R);\n\ndisp '---------------------------------- Experiment 4 ----------------------------------'\nN = 100; P = 9; Q = 100; R = 3; \ntiming(N,P,Q,R);\n\ndisp '---------------------------------- Experiment 5 ----------------------------------'\ndisp ' '\ntiming2(4, 10000);\ntiming2(200, 200);\ntiming2(10000, 4);\n\ndisp '---------------------------- Experiment 6 ----------------------------'\ndisp ' '\na = rand(4096, 4096);\nfprintf ('Size of A: %0.0f x %0.0f\\n', size(a))\ndisp ' '\ndisp ' SUM(A,1) SUM(A,2)'\nf1 = @() sum(a, 1);\nf2 = @() sum(a, 2);\ndisp ([timeit(f1), timeit(f2)])\n\nclear all\nb = rand(256, 256, 256);\nfprintf ('Size of B: %0.0f x %0.0f x %0.0f\\n', size(b))\ndisp ' '\ndisp ' SUM(B,1) SUM(B,2) SUM(B,3)'\nf1 = @() sum(b, 1);\nf2 = @() sum(b, 2);\nf3 = @() sum(b, 3);\ndisp ([timeit(f1), timeit(f2), timeit(f3)])\n\ndisp '---------------------------- Experiment 7 ----------------------------'\ndisp ' '\na = rand(101,102,103);\nfprintf ('Size of A: %0.0f x %0.0f x %0.0f\\n', size(a))\ndisp ' '\ndisp 'Moving last dimension to first dimension:'\ndisp 'PERMUTE(A,[3 2 1]) PERMUTE(A,[3 1 2]) SHIFTDIM(A,2)'\ndisp '(SWAPPING) (SHIFTING) (SHIFTING)'\nf1 = @() permute(a, [3 2 1]);\nf2 = @() permute(a, [3 1 2]);\nf3 = @() shiftdim(a, 2);\nfprintf(1, '%8.2g ', [timeit(f1), timeit(f2), timeit(f3)])\ndisp ' ', disp ' '\na2 = f1(); s = size(a2);\na2 = f2(); s(2,:) = size(a2);\na2 = f3(); s(3,:) = size(a2);\ndisp (s)\n\ndisp 'Moving first dimension to last dimension:'\ndisp 'PERMUTE(A,[3 2 1]) PERMUTE(A,[2 3 1]) SHIFTDIM(A,1)'\ndisp '(SWAPPING) (SHIFTING) (SHIFTING)'\nf1 = @() permute(a, [3 2 1]);\nf2 = @() permute(a, [2 3 1]);\nf3 = @() shiftdim(a, 1);\nfprintf(1, '%8.2g ', [timeit(f1), timeit(f2), timeit(f3)])\ndisp ' ', disp ' '\na2 = f1(); s = size(a2);\na2 = f2(); s(2,:) = size(a2);\na2 = f3(); s(3,:) = size(a2);\ndisp (s)\n\ndisp ' '\na = rand(21,22,23,24,25);\nfprintf ('Size of A: %0.0f x %0.0f x %0.0f x %0.0f x %0.0f\\n', size(a))\ndisp ' '\ndisp 'Moving 4th dimension to 1st dimension:'\ndisp 'PERMUTE(A,[4 2 3 1 5]) PERMUTE(A,[4 1 2 3 5]) PERMUTE(A,[4 5 1 2 3])'\ndisp '(SWAPPING) (PARTIAL SHIFTING) (SHIFTING)'\nf1 = @() permute(a, [4 2 3 1 5]);\nf2 = @() permute(a, [4 1 2 3 5]);\nf3 = @() permute(a, [4 5 1 2 3]);\nfprintf(1, '%8.2g ', [timeit(f1), timeit(f2), timeit(f3)])\ndisp ' ', disp ' '\na2 = f1(); s = size(a2);\na2 = f2(); s(2,:) = size(a2);\na2 = f3(); s(3,:) = size(a2);\ndisp (s)\n\ndisp 'Moving 2nd dimension to 5th dimension:'\ndisp 'PERMUTE(A,[1 5 3 4 2]) PERMUTE(A,[1 3 4 5 2]) PERMUTE(A,[3 4 5 1 2])'\ndisp '(SWAPPING) (PARTIAL SHIFTING) (SHIFTING)'\nf1 = @() permute(a, [1 5 3 4 2]);\nf2 = @() permute(a, [1 3 4 5 2]);\nf3 = @() permute(a, [3 4 5 1 2]);\nfprintf(1, '%8.2g ', [timeit(f1), timeit(f2), timeit(f3)])\ndisp ' ', disp ' '\na2 = f1(); s = size(a2);\na2 = f2(); s(2,:) = size(a2);\na2 = f3(); s(3,:) = size(a2);\ndisp (s)\n\ndisp '---------------------------- Experiment 8 ----------------------------'\ndisp ' '\na =rand(101,102,103);\norder = [1 2 3];\nshape = [101,102,103];\nf1 = @() perm(a,order);\nf2 = @() ifpermute(a,order);\nf3 = @() ifpermute2(a,order);\nf4 = @() resh(a,shape);\nf5 = @() ifreshape(a,shape);\nf6 = @() ifreshape2(a,shape);\ndisp 'COMPARING STATEMENTS THAT DO NOTHING!'\ndisp ' '\nfprintf ('Size of A: %0.0f x %0.0f x %0.0f\\n', size(a))\ndisp ' '\ndisp 'ORDER = [1 2 3] % (keeping same order)'\ndisp 'SHAPE = [101,102,103] % (keeping same shape)'\ndisp ' '\nfprintf (1,'PERMUTE(A,ORDER) .......................................... %0.4g\\n', timeit(f1))\nfprintf (1,'IF ~ISEQUAL(ORDER,1:LENGTH(ORDER)), A=PERMUTE(A,ORDER); END %0.4g\\n', timeit(f2))\nfprintf (1,'IF ~ISEQUAL(ORDER,1:3), A=PERMUTE(A,ORDER); END %0.4g\\n', timeit(f3))\ndisp ' '\nfprintf (1,'RESHAPE(A,SHAPE) .......................................... %0.4g\\n', timeit(f4))\nfprintf (1,'IF ~ISEQUAL(SHAPE,SIZE(A)), A=RESHAPE(A,SHAPE); END ....... %0.4g\\n', timeit(f5))\nfprintf (1,'IF ~ISEQUAL(SHAPE,SHAPE), A=RESHAPE(A,SHAPE); END ....... %0.4g\\n', timeit(f5))\ndisp ' '\n\n\nfunction a=perm(a, order)\na=permute(a, order);\nfunction a=resh(a,shape)\na=reshape(a,shape);\nfunction a=ifpermute(a, order)\nif ~isequal(order, 1:length(order)), a=permute(a,order); end\nfunction a=ifreshape(a, shape)\nif ~isequal(shape, size(a)), a=reshape(a,shape); end\nfunction a=ifpermute2(a, order)\nif ~isequal(order, 1:3), a=permute(a,order); end\nfunction a=ifreshape2(a, shape)\nif ~isequal(shape, shape), a=reshape(a,shape); end\n\n\nfunction timing(N,P,Q,R)\n\na0 = rand(1, P, Q); \nb0 = rand(1, Q, R);\na = a0(ones(1,N),:,:); % Cloning along first dimension\nb = b0(ones(1,N),:,:); % Cloning along first dimension\n[n1 p q1] = size(a); % reads third dim even if it is 1.\n[n2 q2 r] = size(b); % reads third dim even if it is 1.\ndisp ' '\ndisp 'Array Size Size Number of elements'\nfprintf (1, 'A Nx(PxQ) %0.0f x (%0.0f x %0.0f) %8.0f\\n', [n1 p q1 numel(a)])\nfprintf (1, 'B Nx(QxR) %0.0f x (%0.0f x %0.0f) %8.0f\\n', [n2 q2 r numel(b)])\nf1 = @() permute(a, [2 3 1]);\nf2 = @() permute(a, [1 3 2]);\nf3 = @() permute(a, [2 1 3]);\nf4 = @() permute(a, [1 2 3]);\nf5 = @() permute(b, [2 3 1]);\nf6 = @() permute(b, [1 3 2]);\nf7 = @() permute(b, [2 1 3]);\nf8 = @() permute(b, [1 2 3]);\ndisp ' '\ndisp ' PERMUTE(A,[2 3 1]) PERMUTE(A,[1 3 2]) PERMUTE(A,[2 1 3]) PERMUTE(A,[1 2 3])'\nfprintf(1, '%20.5f', [timeit(f1), timeit(f2), timeit(f3), timeit(f4)])\ndisp ' '\ndisp ' PERMUTE(B,[2 3 1]) PERMUTE(B,[1 3 2]) PERMUTE(B,[2 1 3]) PERMUTE(B,[1 2 3])'\nfprintf(1, '%20.5f', [timeit(f5), timeit(f6), timeit(f7), timeit(f8)])\ndisp ' '\ndisp ' '\ndisp ' RESHAPE(A,[N*P Q]) RESHAPE(B,[N R Q]) RESHAPE(B,[N 1 R Q])'\nf1 = @() reshape(a, [N*P Q]);\nf2 = @() reshape(b, [N R Q]);\nf3 = @() reshape(b, [N 1 R Q]);\nfprintf(1, '%20.5f', [timeit(f1), timeit(f2), timeit(f3)])\ndisp ' '\nf1 = @() a .* a;\nf2 = @() bsxfun(@times, a, a);\nf3 = @() b .* b;\nf4 = @() bsxfun(@times, b, b);\ndisp ' '\ndisp ' A .* A BSXFUN(@TIMES,A,A)'\nfprintf(1, '%20.5f%20.5f\\n', [timeit(f1), timeit(f2)])\ndisp ' B .* B BSXFUN(@TIMES,B,B)'\nfprintf(1, '%20.5f%20.5f\\n', [timeit(f3), timeit(f4)])\nif R==1\n disp ' '\n disp ' NOTE: If R=1 then RESHAPE(B,[N R Q]) is equivalent to'\n disp ' PERMUTE(B,[1 3 2]) but much faster!'\n disp ' (at least on my system)'\nend\ndisp ' '\n\n\nfunction timing2(P,Q)\n\na = rand(P, Q); \nb = rand(Q, 1);\nfprintf ('Size of A: %0.0f x %0.0f\\n', size(a))\nfprintf ('Size of B: %0.0f x %0.0f\\n', size(b))\ndisp ' '\ndisp ' A * B TONY''S TRICK BSXFUN'\nf1 = @() a * b;\nf2 = @() clone_multiply_sum(a, b', P);\nf3 = @() sum(bsxfun(@times, a, b'), 2);\nfprintf(1, '%13.5f', [timeit(f1), timeit(f2), timeit(f3)])\ndisp ' '\ndisp ' '\nc = f1() - f2(); \nd = max(c(:)); \nif d > eps*20\n disp 'There is an unexpected output difference:';\n disp (d);\nend\n\nfunction c = clone_multiply_sum(a,b,P)\nc = sum(a .* b(ones(1,P),:), 2);"} +{"plateform": "github", "repo_name": "shane-nichols/smn-thesis-master", "name": "arraylab13.m", "ext": ".m", "path": "smn-thesis-master/dependencies/Multiprod_2009/Testing/arraylab13.m", "size": 1913, "source_encoding": "utf_8", "md5": "942e4a25270936f264b83f4367d9b7fa", "text": "function c = arraylab13(a,b,d1,d2)\n% This is the engine used in MULTIPROD 1.3 for these cases:\n% PxQ IN A - Rx1 IN B\n% PxQ IN A - RxS IN B (slowest)\n\nndimsA = ndims(a); % NOTE - Since trailing singletons are removed,\nndimsB = ndims(b); % not always NDIMSB = NDIMSA\nNsA = d2 - ndimsA; % Number of added trailing singletons\nNsB = d2 - ndimsB;\nsizA = [size(a) ones(1,NsA)];\nsizB = [size(b) ones(1,NsB)];\n\n% Performing products\nif sizB(d2) == 1 % PxQ IN A - Rx1 IN B\n % A * B\n c = mbyv(a, b, d1);\nelse % PxQ IN A - RxS IN B (least efficient)\n p = sizA(d1);\n s = sizB(d2); \n % Initializing C\n sizC = sizA; \n sizC(d2) = s;\n c = zeros(sizC);\n % Vectorized indices for B and C\n Nd = length(sizB);\n Bindices = cell(1,Nd); % preallocating (cell array)\n for d = 1 : Nd\n Bindices{d} = 1:sizB(d);\n end\n Cindices = Bindices;\n Cindices{d1} = 1:p; \n % Building C\n for Ncol = 1:s\n Bindices{d2} = Ncol; Cindices{d2} = Ncol;\n c(Cindices{:}) = mbyv(a, b(Bindices{:}), d1);\n end\nend\n\n\nfunction c = mbyv(a, b, dim)\n% NOTE: This function is part of MULTIPROD 1.3\n\n% 1 - Transposing: Qx1 matrices in B become 1xQ matrices\norder = [1:dim-1, dim+1, dim, dim+2:ndims(b)];\nb = permute(b, order);\n\n% 2 - Cloning B P times along its singleton dimension DIM.\n% Optimized code for B2 = REPMAT(B, [ONES(1,DIM-1), P]).\n% INDICES is a cell array containing vectorized indices.\nP = size(a, dim);\nsiz = size(b);\nsiz = [siz ones(1,dim-length(siz))]; % Ones are added if DIM > NDIMS(B)\nNd = length(siz);\nindices = cell(1,Nd); % preallocating\nfor d = 1 : Nd\n indices{d} = 1:siz(d);\nend\nindices{dim} = ones(1, P); % \"Cloned\" index for dimension DIM\nb2 = b(indices{:}); % B2 has same size as A\n\n% 3 - Performing dot products along dimension DIM+1\nc = sum(a .* b2, dim+1);"} +{"plateform": "github", "repo_name": "shane-nichols/smn-thesis-master", "name": "materialLib.m", "ext": ".m", "path": "smn-thesis-master/materialLib/materialLib.m", "size": 6718, "source_encoding": "utf_8", "md5": "6395189afc14d401689fa1ea8dc486c6", "text": "function [epsilon,alpha,mu] = materialLib(material, wavelengths, varargin)\n% small library of optical functions for anisotropic materials\nNwl = length(wavelengths);\nepsilon = zeros(3,3,Nwl);\nmu = setDiag(ones(3,Nwl));\nalpha = 0;\n\nswitch material\n \n case 'rubrene'\n data = load('rubreneOptfun.mat');\n data = data.filetosave;\n eV = (1239.8)./wavelengths;\n epsilon = setDiag( ((interp1(data(:,1),data(:,2:4),eV)).^2)' );\n \n case '+EDS'\n alpha = zeros(3,3,Nwl);\n lam2 = (wavelengths/1000).^2;\n epsilon(1,1,:) = 1.06482*lam2./(lam2 - 0.0103027)...\n +2.3712*lam2./(lam2 - 92.3287) + 1.2728;\n epsilon(2,2,:) = epsilon(1,1,:);\n epsilon(3,3,:) = 1.07588*lam2./(lam2 - 0.0101915)...\n +2.64847*lam2./(lam2 - 94.8497) + 1.2751;\n lam2 = wavelengths.^2;\n alpha(1,1,:) = wavelengths.^3*(0.0146441)./(lam2 - 100.202^2).^2;\n alpha(3,3,:) = wavelengths.^3*(-0.0301548)./(lam2 - 100^2).^2;\n alpha(2,2,:) = alpha(1,1,:);\n \n case '-EDS'\n alpha = zeros(3,3,Nwl);\n lam2 = (wavelengths/1000).^2;\n epsilon(1,1,:) = 1.06482*lam2./(lam2 - 0.0103027)...\n +2.3712*lam2./(lam2 - 92.3287) + 1.2728;\n epsilon(2,2,:) = epsilon(1,1,:);\n epsilon(3,3,:) = 1.07588*lam2./(lam2 - 0.0101915)...\n +2.64847*lam2./(lam2 - 94.8497) + 1.2751;\n lam2 = wavelengths.^2;\n alpha(1,1,:) = wavelengths.^3*(-0.0146441)./(lam2 - 100.202^2).^2;\n alpha(3,3,:) = wavelengths.^3*(0.0301548)./(lam2 - 100^2).^2;\n alpha(2,2,:) = alpha(1,1,:);\n \n case 'SYEDS'\n if nargin > 2\n c = varargin{1};\n else\n c = 3.1547;\n end\n alpha = zeros(3,3,Nwl);\n lam2 = (wavelengths/1000).^2;\n epsilon(1,1,:) = 1.06482*lam2./(lam2 - 0.0103027)...\n +2.3712*lam2./(lam2 - 92.3287) + 1.2728;\n epsilon(2,2,:) = epsilon(1,1,:);\n epsilon(3,3,:) = 1.07588*lam2./(lam2 - 0.0101915)...\n +2.64847*lam2./(lam2 - 94.8497) + 1.2751;\n data = load('SYEDS');\n data = data.SYEDS;\n data = ((interp1(real(data(1,:)).',[real(data(2,:));imag(data(2,:))].',wavelengths)))';\n epsilon(1,1,:) = squeeze(c*(data(1,:) + 1i*data(2,:))) + squeeze(epsilon(1,1,:)).';\n lam2 = wavelengths.^2;\n alpha(1,1,:) = -wavelengths.^3*(0.0146441)./(lam2 - 100.202^2).^2;\n alpha(3,3,:) = -wavelengths.^3*(-0.0301548)./(lam2 - 100^2).^2;\n alpha(2,2,:) = alpha(1,1,:);\n \n case '+quartz'\n alpha = zeros(3,3,Nwl);\n lam2 = (wavelengths/1000).^2;\n epsilon(1,1,:) = 1.07044083*lam2./(lam2 - 0.0100585997)...\n +1.10202242*lam2./(lam2 - 100) + 1.28604141;\n epsilon(2,2,:) = epsilon(1,1,:);\n epsilon(3,3,:) = 1.09509924*lam2./(lam2 - 0.0102101864)...\n +1.15662475*lam2./(lam2 - 100) + 1.28851804;\n lam2 = wavelengths.^2;\n alpha(1,1,:) = wavelengths.^3*(0.0198)./(lam2 - 93^2).^2;\n alpha(3,3,:) = wavelengths.^3*(-0.0408)./(lam2 - 87^2).^2;\n alpha(2,2,:) = alpha(1,1,:);\n \n case '-quartz'\n alpha = zeros(3,3,Nwl);\n lam2 = (wavelengths/1000).^2;\n epsilon(1,1,:) = 1.07044083*lam2./(lam2 - 0.0100585997)...\n +1.10202242*lam2./(lam2 - 100) + 1.28604141;\n epsilon(2,2,:) = epsilon(1,1,:);\n epsilon(3,3,:) = 1.09509924*lam2./(lam2 - 0.0102101864)...\n +1.15662475*lam2./(lam2 - 100) + 1.28851804;\n lam2 = wavelengths.^2;\n alpha(1,1,:) = -wavelengths.^3*(0.0198)./(lam2 - 93^2).^2;\n alpha(3,3,:) = -wavelengths.^3*(-0.0408)./(lam2 - 87^2).^2;\n alpha(2,2,:) = alpha(1,1,:);\n \n case 'sapphire'\n osc1A = [1.4313493,0.65054713,5.3414021];\n osc1E = [0.0726631,0.1193242,18.028251].^2;\n osc2A = [1.5039759,0.55069141,6.5927379];\n osc2E = [0.0740288,0.1216529,20.072248].^2;\n lam2 = (wavelengths/1000).^2;\n for n = 1:Nwl\n epsilon(1,1,n) = sum(lam2(n)*osc1A./(lam2(n) - osc1E))+1;\n epsilon(3,3,n) = sum(lam2(n)*osc2A./(lam2(n) - osc2E))+1;\n end\n epsilon(2,2,:) = epsilon(1,1,:);\n \n case 'aBBO'\n lam2 = (wavelengths/1000).^2;\n epsilon(1,1,:) = -lam2*0.0155+0.0184./(lam2 - 0.0179)+2.7405;\n epsilon(2,2,:) = epsilon(1,1,:);\n epsilon(3,3,:) = -lam2*0.0044+0.0128./(lam2 - 0.0156)+2.373;\n \n case 'KDPnoG' % potassium acid phthalate without gyration\n lam2 = (wavelengths/1000).^2;\n epsilon(1,1,:) = lam2.*13.0052/(lam2 - 400)...\n +0.01008956./(lam2 - 0.0129426) + 2.259276;\n epsilon(2,2,:) = epsilon(1,1,:);\n epsilon(3,3,:) = lam2.*3.2279924./(lam2 - 400)...\n +0.008637494./(lam2 - 0.012281) + 2.132668;\n \n \n case 'LiNbO3'\n osc1A = [2.6734,1.229,12.614];\n osc1E = [0.01764,0.05914,474.6];\n osc2A = [2.9804,0.5981,8.9543];\n osc2E = [0.02047,0.0666,416.08];\n lam2 = (wavelengths/1000).^2;\n for n = 1:Nwl\n epsilon(1,1,n) = sum(lam2(n)*osc1A./(lam2(n) - osc1E))+1;\n epsilon(3,3,n) = sum(lam2(n)*osc2A./(lam2(n) - osc2E))+1;\n end\n epsilon(2,2,:) = epsilon(1,1,:);\n \n case 'KDP'\n %This includes epsilon from Zernike (1964) and alpha from\n %Konstantinova (2000)\n lam2 = (wavelengths/1000).^2;\n epsilon(1,1,:) = (13.00522*lam2./(lam2 - 400))+(0.01008956./(lam2 - 0.0129426))+2.259276;\n epsilon(2,2,:) = epsilon(1,1,:);\n epsilon(3,3,:) = (3.2279924*lam2./(lam2 - 400))+(0.008637494./(lam2 - 0.0122810))+2.132668;\n lam2 = wavelengths.^2;\n alpha = zeros(3,3,Nwl);\n alpha(1,2,:) = wavelengths.^3.*(.023)./(lam2 - 10^2).^2;\n alpha(2,1,:) = alpha(1,2,:);\n \n case 'KAP'\n lam2 = (wavelengths/1000).^2;\n epsilon(1,1,:) = (-0.013296131.*lam2)+(0.037318762./(lam2 - (0.175493731^2)))+2.663434655;\n epsilon(2,2,:) = 2.670444937+(0.031617528./(lam2-(0.208293225^2)))-(0.004014395.*lam2);\n epsilon(3,3,:) = 2.196073191+(0.015025722./(lam2-(0.190981743^2)))-(0.006100780.*lam2);\n \n case 'TiO2'\n lam2 = (wavelengths/1000).^2;\n epsilon(1,1,:) = 0.2441./(lam2 - 0.0803)+5.913;\n epsilon(2,2,:) = epsilon(1,1,:);\n epsilon(3,3,:) = 0.3322./(lam2 - 0.0843)+7.197;\n \n case 'test'\n\n epsilon(1,1,:) = ones(Nwl,1)*(2.2);\n epsilon(2,2,:) = ones(Nwl,1)*(2.2);\n epsilon(3,3,:) = ones(Nwl,1)*(2.2);\n\n \nend\nend\n\nfunction out = setDiag(diag)\nout = zeros(size(diag, 1), size(diag, 1), size(diag, 2));\nfor i=1:size(diag, 1)\n out(i,i,:) = diag(i,:);\nend\nend"} +{"plateform": "github", "repo_name": "shane-nichols/smn-thesis-master", "name": "MPlot4D.m", "ext": ".m", "path": "smn-thesis-master/misc_utilities/MPlot4D.m", "size": 14618, "source_encoding": "utf_8", "md5": "8b22ef4fe9c79bd9e96465da950b8e1c", "text": "classdef (InferiorClasses = {?matlab.graphics.axis.Axes}) MPlot4D < handle\n % this is a more powerful but less polished version of MPlot3D. It can\n % accept arrays of dimension 5 and make videos that run over the 5th\n % dimension. The constructor requires an array xData, which is the physical\n % values ascribed to the 5th dimension of Data (usually wavelength).\n % These values are put into a text box above the MM plots. I never got\n % around to documenting this class...\n \n properties\n \n Data\n xData\n uniquezero = true\n palette = 'Fireice'\n gs = 0\n width\n fontsize = 14\n limz = 1e-3\n norm = true\n hSpacing = 3 %vertical spacing of subplots\n vSpacing = 3 % horizonal spacing of subplots\n cbw = 10\n \n end\n \n properties (SetAccess = protected)\n \n figHandle\n axesHandles = gobjects(4)\n colorbarHandles = gobjects(4)\n xDataTextBox\n \n end\n \n properties (Hidden)\n \n maskHandles\n \n end\n \n methods\n \n function obj = MPlot4D(data, xData, varargin)\n obj.figHandle = figure;\n obj.width = getFigWidth;\n obj.Data = data;\n obj.xData = xData;\n plot(obj, xData(1), varargin{:});\n end\n \n function parseProperties(obj, varargin)\n % Optional Name-Value pairs\n \n % 'uniquezero',logical: make 0 white or black in color palettes\n % Default is true.\n % 'palette','colPalette': string giving name of a case in colPalette.m\n % Default is 'Fireice'\n % 'gs',[min max]: GlobalScale, plot limits of all Z-scales between min, max.\n % If not given, each MM element maps to its own min and max value.\n % Only 1 colorbar is drawn with GlobalScale is set\n % 'fontsize',scalar: Size of font in colorbars\n % 'width',scalar: Width of figure in inches (but probably not inches). Height is\n % computed automatically to ensure no streching of plots (figure can go\n % off page, in which case, reduce value of 'width'. Default is %60 of\n % monitor on a Mac.\n % 'limz',scalar: limits how small the range of the z-axes can be.\n % 'hSpacing',scalar: horizontal space between axes, in pixels.\n % 'vSpacing',scalar: vertical space between axes, in pixels.\n % 'cbw',scalar: Width of the colorbars, in pixels. \n p = inputParser;\n % setup input scheme\n addRequired(p,'obj',@(x) isa(x,'MPlot4D'))\n addParameter(p,'norm',obj.norm,@(x) strcmp(x,'nonorm'))\n addParameter(p,'uniquezero',obj.uniquezero,@islogical)\n addParameter(p,'palette',obj.palette,@ischar)\n addParameter(p,'limz',obj.limz,@(x) isscalar(x)&&isnumeric(x))\n addParameter(p,'fontsize',obj.fontsize,@(x) isscalar(x)&&isnumeric(x))\n addParameter(p,'width',obj.width,@(x) isscalar(x) && isnumeric(x)) % inches\n addParameter(p,'gs',obj.gs,@(x) length(x) == 2 && isnumeric(x))\n addParameter(p,'hSpacing',obj.hSpacing,@isscalar)\n addParameter(p,'vSpacing',obj.vSpacing,@isscalar)\n addParameter(p,'cbw',obj.cbw,@isscalar)\n parse(p, obj, varargin{:}) %parse inputs\n obj.norm = p.Results.norm;\n obj.uniquezero = p.Results.uniquezero;\n obj.palette = p.Results.palette;\n obj.gs = p.Results.gs;\n obj.limz = p.Results.limz;\n obj.fontsize = p.Results.fontsize;\n obj.width = p.Results.width;\n obj.hSpacing = p.Results.hSpacing;\n obj.vSpacing = p.Results.vSpacing;\n obj.cbw = p.Results.cbw;\n \n end\n \n function normalize(obj)\n obj.Data = obj.Data ./ obj.Data(1,1,:,:,:);\n obj.Data(isnan(obj.Data)) = 0;\n end\n \n function plot(obj, xVal, varargin)\n if ~isempty(varargin)\n parseProperties(obj, varargin{:})\n end\n dataIndex = round(fracIndex(obj.xData,xVal));\n \n sz = size(obj.Data);\n dummy = uicontrol('style', 'text', 'fontsize', obj.fontsize, 'units', 'pixels');\n set(dummy,'String', '-0.000');\n cblbextents = get(dummy, 'extent');\n cblbsz = cblbextents(3); % colorbar label size\n delete(dummy)\n figWidth = (obj.width) * obj.figHandle.Parent.ScreenPixelsPerInch;\n\n if obj.gs==0\n plotW = (figWidth - 9*obj.hSpacing-4*(obj.cbw + cblbsz))/4;\n plotH = sz(3)/sz(4)*plotW;\n figHeight = plotH*4+5*obj.vSpacing + cblbextents(4);\n \n totalPlotWidth = obj.hSpacing*2+obj.cbw+cblbsz+plotW;\n plotPosFun = @(j,k) [ (obj.hSpacing+(k-1)*totalPlotWidth)/figWidth...\n ,(obj.vSpacing+(4-j)*(plotH+obj.vSpacing))/figHeight,...\n plotW/figWidth,...\n plotH/figHeight];\n set(obj.figHandle,'Position',[0,0,figWidth,figHeight],'units','pixels');\n for j=1:4\n for k=1:4\n obj.axesHandles(j,k) = ...\n subplot('position',plotPosFun(j,k),'units','pixels');\n clim = [min(min(obj.Data(j,k,:,:,dataIndex))),max(max(obj.Data(j,k,:,:,dataIndex)))];\n if obj.limz ~= 0 % modify axes bounds if limz is set\n if (clim(2) - clim(1)) < obj.limz\n avg = (clim(2) + clim(1))./2;\n clim(2) = avg + obj.limz/2;\n clim(1) = avg - obj.limz/2;\n end\n end\n pos = get(obj.axesHandles(j,k),'Position');\n imagesc(squeeze(obj.Data(j,k,:,:,dataIndex)),'Parent',obj.axesHandles(j,k),clim)\n axis(obj.axesHandles(j,k),'off')\n colormap(obj.axesHandles(j,k),makeColormap(clim,obj.uniquezero,obj.palette))\n obj.colorbarHandles(j,k) = colorbar(obj.axesHandles(j,k),'units','pixels',...\n 'Position',[pos(1)+pos(3)+obj.hSpacing,pos(2)+cblbextents(4)/4,...\n obj.cbw,pos(4)-cblbextents(4)/2],...\n 'fontsize',obj.fontsize);\n end\n end\n% if any(strcmp('nonorm', p.UsingDefaults))\n% obj.axesHandles(1,1).CLim = [0 1];\n% end\n else\n plotW = (figWidth - 6*obj.hSpacing - 2*obj.cbw - cblbsz)/4;\n plotH = sz(3)/sz(4)*plotW;\n figHeight = plotH*4+5*obj.vSpacing + cblbextents(4);\n plotPosFun = @(j,k) [ (obj.hSpacing+(k-1)*(plotW+obj.hSpacing))/figWidth,...\n (obj.vSpacing+(4-j)*(plotH+obj.vSpacing))/figHeight,...\n plotW/figWidth,...\n plotH/figHeight];\n set(obj.figHandle,'Position',[0,0,figWidth,figHeight],'units','pixels');\n for j=1:4\n for k=1:4\n obj.axesHandles(j,k) = ...\n subplot('position',plotPosFun(j,k),'units','pixels');\n pos = get(obj.axesHandles(j,k),'Position');\n imagesc(squeeze(obj.Data(j,k,:,:,dataIndex)),'Parent',obj.axesHandles(j,k),obj.gs)\n colormap(obj.axesHandles(j,k),makeColormap(obj.gs,obj.uniquezero,obj.palette))\n axis(obj.axesHandles(j,k),'off')\n end\n end\n obj.colorbarHandles(1,4) = colorbar(obj.axesHandles(1,4),'units','pixels',...\n 'Position',[pos(1)+pos(3)+obj.hSpacing, cblbextents(4)/4+6,...\n obj.cbw,figHeight-3*cblbextents(4)/2-12],...\n 'fontsize',obj.fontsize);\n end\n obj.xDataTextBox = ...\n uicontrol('style', 'text', 'fontsize', obj.fontsize, 'units', 'pixels', ...\n 'position', [figWidth/2-cblbextents(3), figHeight-cblbextents(4), cblbextents(3:4)]);\n set(obj.xDataTextBox, 'String', num2str(obj.xData(dataIndex)));\n end\n \n function mmdata = getPlotData(obj)\n % h: [4,4] array of axis handles\n mmdata = zeros([4, 4, size(obj.axesHandles(1,1).Children.CData)], ...\n class(obj.axesHandles(1,1).Children.CData));\n for j=1:4\n for k=1:4\n mmdata(j,k,:,:) = obj.axesHandles(j,k).Children.CData;\n end\n end\n end\n\n function replacePlotData(obj, idx)\n % MMreplace3DplotData replaces the data in 4x4 intensity plots.\n % h is a [4,4] array of axis handles\n % Data is a 4x4xNxM array. Data size should not be different than data in\n % plots.\n if obj.gs == 0\n for j=1:4\n for k=1:4\n obj.axesHandles(j,k).Children.CData = squeeze(obj.Data(j,k,:,:,idx));\n clim = [min(min(obj.Data(j,k,:,:,idx))),max(max(obj.Data(j,k,:,:,idx)))];\n if obj.limz ~= 0 % modify axes bounds if limz is set\n if (clim(2) - clim(1)) < obj.limz\n avg = (clim(2) + clim(1))./2;\n clim(2) = avg + obj.limz/2;\n clim(1) = avg - obj.limz/2;\n end\n end\n obj.axesHandles(j,k).CLim = clim;\n colormap(obj.axesHandles(j,k),makeColormap(clim,obj.uniquezero,obj.palette))\n end\n end\n else\n for j=1:4\n for k=1:4\n obj.axesHandles(j,k).Children.CData = squeeze(obj.Data(j,k,:,:,idx));\n end\n end\n end\n% \n end\n \n function makeAVI(obj, xRange, AVIfilename)\n if isempty(obj.xData)\n xVals = xRange;\n else\n [X,I] = sort(obj.xData); % added this to allow unsorted xData\n indices = unique(round(fracIndex(X,xRange)),'first');\n xVals = I(indices);\n end\n \n v = VideoWriter(AVIfilename);\n v.FrameRate = 10;\n open(v);\n for i=xVals\n replacePlotData(obj, i)\n set(obj.xDataTextBox, 'String', num2str(obj.xData(i)));\n writeVideo(v, getframe(obj.figHandle));\n end\n close(v);\n\n end\n \n function update(obj, varargin)\n obj.figHandle.Visible = 'off';\n data = getPlotData(obj);\n delete(obj.axesHandles);\n delete(obj.colorbarHandles)\n obj.axesHandles = gobjects(4);\n obj.colorbarHandles = gobjects(4);\n plot(obj,data,varargin{:});\n obj.figHandle.Visible = 'on';\n end\n \n end\nend\n\n\nfunction width = getFigWidth % sets default width to 60% of display width\n scrsz = get(0,'screensize');\n width = 0.6*scrsz(3)/get(0,'ScreenPixelsPerInch');\nend\n\nfunction colAr = colPalette(palette)\n% these are custom color palettes. A palette is just a Nx4 matrix. The\n% first column are values between 0 and 256 that position a color marker.\n% The 2nd, 3rd, and 4th columns are RGB color values.\nswitch palette\n case 'Rainbow'\n colAr = ...\n [0\t255\t0\t241;...\n 36\t0\t65\t220;...\n 86\t0\t253\t253;...\n 128\t0\t255\t15;...\n 171\t255\t242\t0;...\n 234\t255\t127\t0;...\n 256\t255\t0\t0];\n \n case 'HotCold Bright'\n colAr = ...\n [0\t0\t65\t220;...\n 36\t0\t90\t240;...\n 76\t0\t253\t253;...\n 128\t250\t250\t250;...\n 182\t255\t242\t0;...\n 224\t255\t127\t0;...\n 256\t255\t0\t0];\n \n case 'HotCold Dark'\n colAr = ...\n [0\t0\t253\t253;...\n 36\t1\t114\t239;...\n 76\t0\t90\t240;...\n 128\t0\t0\t0;...\n 182\t255\t0\t0;...\n 224\t255\t127\t0;...\n 256\t255\t242\t0];\n \n case 'TwoTone Bright'\n colAr = ...\n [0\t0\t0\t255;...\n 128\t255\t255\t255;...\n 256\t255\t0\t0];\n \n case 'TwoTone Dark'\n colAr = ...\n [0\t0\t0\t255;...\n 128\t0\t0\t0;...\n 256\t255\t0\t0];\n \n case 'Fireice'\n clrs = [0.75 1 1; 0 1 1; 0 0 1;...\n 0 0 0; 1 0 0; 1 1 0; 1 1 0.75];\n \n y = -3:3;\n m = 64;\n if mod(m,2)\n delta = min(1,6/(m-1));\n half = (m-1)/2;\n yi = delta*(-half:half)';\n else\n delta = min(1,6/m);\n half = m/2;\n yi = delta*nonzeros(-half:half);\n end\n colAr = cat(2,(0:4:255).',255*interp2(1:3,y,clrs,1:3,yi));\nend\nend\n\nfunction fracIndx = fracIndex(array,x)\n\nfracIndx = zeros(1,length(x));\nfor idx = 1:length(x)\n if x(idx) >= array(end)\n fracIndx(idx) = length(array);\n elseif x(idx) <= array(1)\n fracIndx(idx) = 1;\n else\n a = find(array <= x(idx));\n a = a(length(a));\n b = find(array > x(idx));\n b = b(1);\n fracIndx(idx) = a+(x(idx)-array(a))/(array(b)-array(a));\n end\n \nend\nend\n\nfunction cm = makeColormap(clim,b_uniqueZero,palette)\ndmin=clim(1);\ndmax=clim(2);\nif dmax == dmin\n dmax=1;\n dmin=0;\nend\nif b_uniqueZero == true\n Zscale = zeros(1,256);\n if abs(dmin) < abs(dmax)\n didx = (dmax - dmin)/(2*dmax);\n for idx = 0:255\n Zscale(idx+1) = 256 - didx*idx;\n end\n else\n didx = (dmin-dmax)/(2*dmin);\n for idx = 0:255\n Zscale(idx+1) = idx*didx;\n end\n Zscale = flip(Zscale);\n end\nelse\n Zscale = flip(1:256);\nend\ncolAr = colPalette(palette);\ncm = zeros(256,3);\nfor n = 1:256\n x = fracIndex(colAr(:,1),Zscale(n));\n cm(n,1) = interp1(colAr(:,2),x);\n cm(n,2) = interp1(colAr(:,3),x);\n cm(n,3) = interp1(colAr(:,4),x);\nend\ncm = cm./255;\ncm = flip(cm,1);\nend\n"} +{"plateform": "github", "repo_name": "shane-nichols/smn-thesis-master", "name": "MMgetp.m", "ext": ".m", "path": "smn-thesis-master/misc_utilities/MMgetp.m", "size": 8872, "source_encoding": "utf_8", "md5": "65bcc2600efa3ab7e9f30ba08f232327", "text": "function out = MMgetp(M,parameter)\n% This function contains many parameters that one can compute from a\n% Mueller matrix (M). In general, M is assumed to be an\n% experimental one. Hence, a Mueller-Jones matrix or even a physical M is \n% not assumed. For most parameters, M is first converted to its closest \n% Mueller-Jones matrix, or its Nearest Jones matrix. \n\nif ndims(M) > 3 % reshape array into 4,4,N\n sz = size(M);\n M = reshape(M,4,4,[]);\nelse\n sz = 0;\nend\n\nswitch lower(parameter)\n case 'opt prop'\n J = nearestJones(M);\n K = ( J(1,1,:).*J(2,2,:) - J(1,2,:).*J(2,1,:)).^(-1/2);\n T = acos( K.*( J(1,1,:) + J(2,2,:) )./2); % 2*T = sqrt(L.^2 + Lp.^2 + C.^2)\n O = (T.*K)./(sin(T));\n L=1i.*O.*( J(1,1,:) - J(2,2,:) );\n Lp=1i.*O.*( J(1,2,:) + J(2,1,:) );\n C=O.*( J(1,2,:) - J(2,1,:) );\n LB=real(L);\n LD=-imag(L);\n LBp=real(Lp);\n LDp=-imag(Lp);\n CB=real(C);\n CD=-imag(C);\n A = -2*real(log(1./K)); % mean absorption\n out = squeeze([LB;LD;LBp;LDp;CB;CD;A]);\n case 'lm'\n J = nearestJones(M);\n K = ( J(1,1,:).*J(2,2,:) - J(1,2,:).*J(2,1,:)).^(-1/2);\n T = acos( K.*( J(1,1,:) + J(2,2,:) )./2);\n O = (T.*K)./(sin(T));\n L=1i.*O.*( J(1,1,:) - J(2,2,:) );\n Lp=1i.*O.*( J(1,2,:) + J(2,1,:) );\n C=O.*( J(1,2,:) - J(2,1,:) );\n LB=real(L);\n LD=-imag(L);\n LBp=real(Lp);\n LDp=-imag(Lp);\n CB=real(C);\n CD=-imag(C);\n A = 2*real(log(1./K)); % mean absorption\n out = [A,-LD,-LDp,CD ; -LD,A,CB,LBp ; -LDp,-CB,A,-LB ; CD,-LBp,LB,A];\n case 'logm' %log of Mueller matrix with filtering\n Mfiltered = filterM(M);\n out = zeros(size(M));\n for n=1:size(M,3); out(:,:,n) = logm(Mfiltered(:,:,n)); end\n case 'expm' %log of Mueller matrix with filtering\n out = zeros(size(M));\n for n=1:size(M,3); out(:,:,n) = expm(M(:,:,n)); end\n case 'lb'\n J = nearestJones(M);\n O = jonesAnisotropy(J);\n out = real(1i.*O.*( J(1,1,:) - J(2,2,:) ));\n case 'ld'\n J = nearestJones(M);\n O = jonesAnisotropy(J);\n out = -imag(1i.*O.*( J(1,1,:) - J(2,2,:) ));\n case 'lbp'\n J = nearestJones(M);\n O = jonesAnisotropy(J);\n out = real(1i.*O.*( J(1,2,:) + J(2,1,:) ));\n case 'ldp'\n J = nearestJones(M);\n O = jonesAnisotropy(J);\n out = -imag(1i.*O.*( J(1,2,:) + J(2,1,:) ));\n case 'cb'\n J = nearestJones(M);\n O = jonesAnisotropy(J);\n out = real(O.*( J(1,2,:) - J(2,1,:) ));\n case 'cd'\n J = nearestJones(M);\n O = jonesAnisotropy(J);\n out = -imag(O.*( J(1,2,:) - J(2,1,:) ));\n case 'a' % total mean extinction\n J = nearestJones(M);\n K = ( J(1,1,:).*J(2,2,:) - J(1,2,:).*J(2,1,:)).^(-1/2);\n out = -2*real(log(1./K));\n case 'a_aniso' % anisotropic part of the mean extinction\n J = nearestJones(M);\n K = ( J(1,1,:).*J(2,2,:) - J(1,2,:).*J(2,1,:)).^(-1/2);\n T = acos( K.*( J(1,1,:) + J(2,2,:) )./2); % 2*T = sqrt(L.^2 + Lp.^2 + C.^2)\n O = (T.*K)./(sin(T));\n LD = -imag(1i.*O.*( J(1,1,:) - J(2,2,:) ));\n LDp = -imag(1i.*O.*( J(1,2,:) + J(2,1,:) ));\n CD = -imag(O.*( J(1,2,:) - J(2,1,:) ));\n out = sqrt(LD.^2 + LDp.^2 + CD.^2); % not same as imag(2*T) !\n case 'a_iso' % isotropic part of the mean extinction\n J = nearestJones(M);\n K = ( J(1,1,:).*J(2,2,:) - J(1,2,:).*J(2,1,:)).^(-1/2);\n T = acos( K.*( J(1,1,:) + J(2,2,:) )./2); % 2*T = sqrt(L.^2 + Lp.^2 + C.^2)\n O = (T.*K)./(sin(T));\n LD = -imag(1i.*O.*( J(1,1,:) - J(2,2,:) ));\n LDp = -imag(1i.*O.*( J(1,2,:) + J(2,1,:) ));\n CD = -imag(O.*( J(1,2,:) - J(2,1,:) ));\n out = -2*real(log(1./K)) - sqrt(LD.^2 + LDp.^2 + CD.^2);\n case 'ldmag'\n J = nearestJones(M);\n O = jonesAnisotropy(J);\n LD = imag(1i.*O.*( J(1,1,:) - J(2,2,:) ));\n LDp = imag(1i.*O.*( J(1,2,:) + J(2,1,:) ));\n out = sqrt(LD.^2 + LDp.^2);\n case 'ldang'\n J = nearestJones(M);\n O = jonesAnisotropy(J);\n LD = -imag(1i.*O.*( J(1,1,:) - J(2,2,:) ));\n LDp = -imag(1i.*O.*( J(1,2,:) + J(2,1,:) ));\n out = atan2(LDp , LD)./2;\n %out = out + pi*(out < 0);\n case 'lbang'\n J = nearestJones(M);\n O = jonesAnisotropy(J);\n LB = real(1i.*O.*( J(1,1,:) - J(2,2,:) ));\n LBp = real(1i.*O.*( J(1,2,:) + J(2,1,:) ));\n out = atan2(LBp , LB)./2;\n out = out + pi*(out < 0);\n case 'lbmag'\n J = nearestJones(M);\n O = jonesAnisotropy(J);\n LB = real(1i.*O.*( J(1,1,:) - J(2,2,:) ));\n LBp = real(1i.*O.*( J(1,2,:) + J(2,1,:) ));\n out = sqrt(LB.^2 + LBp.^2);\n case 'di' % Depolarization Index\n out = (sqrt(squeeze(sum(sum(M.^2,1),2))./squeeze(M(1,1,:)).^2-1)./sqrt(3)).';\n case 'jones' % Jones matrix of a Mueller-Jones matrix\n out = MJ2J(M);\n case 'nearestjones'\n out = nearestJones(M); % Jones matrix\n % next line just phases the Jones matrix so that the\n % imaginary part of J(1,1) = 0. i.e., it matches case 'jones'\n for n=1:size(out,3); out(:,:,n) = exp( -1i*angle(out(1,1,n)) ) * out(:,:,n); end\n case 'covar' % Cloude carvariance matrix\n out = M2Cov(M);\n case 'covar2m' % Cloude carvariance matrix\n out = Cov2M(M);\n case 'mfiltered' % closest physical Mueller matrix\n out = filterM(M);\nend\n\nif size(out,1) == 1 && size(out,2) == 1 %remove extra singletons\n out = squeeze(out).';\nend\nif sz ~= 0 % reshape to match input dimensions\n sz2 = size(out);\n out = reshape(out,[sz2(1:(length(sz2)-1)),sz(3:length(sz))]);\nend\n\nend % end parent function\n\n% \\\\ LOCAL FUNCTIONS \\\\\n\nfunction J = MJ2J(M) % Mueller-Jones to Jones\nJ(1,1,:) = ((M(1,1,:)+M(1,2,:)+M(2,1,:)+M(2,2,:))/2).^(1/2);\nk = 1./(2.*J(1,1,:));\nJ(1,2,:) = k.*(M(1,3,:)+M(2,3,:)-1i.*(M(1,4,:)+M(2,4,:)));\nJ(2,1,:) = k.*(M(3,1,:)+M(3,2,:)+1i.*(M(4,1,:)+M(4,2,:)));\nJ(2,2,:) = k.*(M(3,3,:)+M(4,4,:)+1i.*(M(4,3,:)-M(3,4,:)));\nend\n\n\nfunction C = M2Cov(M) % Mueller to Cloude covariance\nC(1,1,:) = M(1,1,:) + M(1,2,:) + M(2,1,:) + M(2,2,:);\nC(1,2,:) = M(1,3,:) + M(1,4,:)*1i + M(2,3,:) + M(2,4,:)*1i;\nC(1,3,:) = M(3,1,:) + M(3,2,:) - M(4,1,:)*1i - M(4,2,:)*1i;\nC(1,4,:) = M(3,3,:) + M(3,4,:)*1i - M(4,3,:)*1i + M(4,4,:);\nC(2,1,:) = M(1,3,:) - M(1,4,:)*1i + M(2,3,:) - M(2,4,:)*1i;\nC(2,2,:) = M(1,1,:) - M(1,2,:) + M(2,1,:) - M(2,2,:);\nC(2,3,:) = M(3,3,:) - M(3,4,:)*1i - M(4,3,:)*1i - M(4,4,:);\nC(2,4,:) = M(3,1,:) - M(3,2,:) - M(4,1,:)*1i + M(4,2,:)*1i;\nC(3,1,:) = M(3,1,:) + M(3,2,:) + M(4,1,:)*1i + M(4,2,:)*1i;\nC(3,2,:) = M(3,3,:) + M(3,4,:)*1i + M(4,3,:)*1i - M(4,4,:);\nC(3,3,:) = M(1,1,:) + M(1,2,:) - M(2,1,:) - M(2,2,:);\nC(3,4,:) = M(1,3,:) + M(1,4,:)*1i - M(2,3,:) - M(2,4,:)*1i;\nC(4,1,:) = M(3,3,:) - M(3,4,:)*1i + M(4,3,:)*1i + M(4,4,:);\nC(4,2,:) = M(3,1,:) - M(3,2,:) + M(4,1,:)*1i - M(4,2,:)*1i;\nC(4,3,:) = M(1,3,:) - M(1,4,:)*1i - M(2,3,:) + M(2,4,:)*1i;\nC(4,4,:) = M(1,1,:) - M(1,2,:) - M(2,1,:) + M(2,2,:);\nC = C./2;\nend\n\nfunction M = Cov2M(C) % Cloude covariance to Mueller\nM(1,1,:) = C(1,1,:) + C(2,2,:) + C(3,3,:) + C(4,4,:);\nM(1,2,:) = C(1,1,:) - C(2,2,:) + C(3,3,:) - C(4,4,:);\nM(1,3,:) = C(1,2,:) + C(2,1,:) + C(3,4,:) + C(4,3,:);\nM(1,4,:) = ( -C(1,2,:) + C(2,1,:) - C(3,4,:) + C(4,3,:) )*1i;\nM(2,1,:) = C(1,1,:) + C(2,2,:) - C(3,3,:) - C(4,4,:);\nM(2,2,:) = C(1,1,:) - C(2,2,:) - C(3,3,:) + C(4,4,:);\nM(2,3,:) = C(1,2,:) + C(2,1,:) - C(3,4,:) - C(4,3,:);\nM(2,4,:) = ( -C(1,2,:) + C(2,1,:) + C(3,4,:) - C(4,3,:) )*1i;\nM(3,1,:) = C(1,3,:) + C(2,4,:) + C(3,1,:) + C(4,2,:);\nM(3,2,:) = C(1,3,:) - C(2,4,:) + C(3,1,:) - C(4,2,:);\nM(3,3,:) = C(1,4,:) + C(2,3,:) + C(3,2,:) + C(4,1,:);\nM(3,4,:) = ( -C(1,4,:) + C(2,3,:) - C(3,2,:) + C(4,1,:) )*1i;\nM(4,1,:) = ( C(1,3,:) + C(2,4,:) - C(3,1,:) - C(4,2,:) )*1i;\nM(4,2,:) = ( C(1,3,:) - C(2,4,:) - C(3,1,:) + C(4,2,:) )*1i;\nM(4,3,:) = ( C(1,4,:) + C(2,3,:) - C(3,2,:) - C(4,1,:) )*1i;\nM(4,4,:) = C(1,4,:) - C(2,3,:) - C(3,2,:) + C(4,1,:);\nM = real(M)./2;\nend\n\nfunction J = nearestJones(M)\nC = M2Cov(M);\nJ = zeros(2,2,size(C,3));\nfor n=1:size(C,3)\n [V,D] = eig(C(:,:,n),'vector');\n [~,mx] = max(D);\n J(:,:,n) = sqrt(D(mx))*reshape(V(:,mx),2,2).';\nend\nend\n\nfunction Mfiltered = filterM(M) % M to nearest physical M\nC_raw = M2Cov(M);\nC = zeros(size(C_raw));\nfor n=1:size(C_raw,3)\n [V,D] = eig(C_raw(:,:,n),'vector');\n list = find(D > 0.00001).';\n idx = 0;\n temp = zeros(4,4,length(list));\n for j = list\n idx = idx + 1;\n temp(:,:,idx) = D(j)*V(:,j)*V(:,j)';\n end\n C(:,:,n) = sum(temp,3);\nend\nMfiltered = Cov2M(C);\nend\n\nfunction O = jonesAnisotropy(J)\nK = ( J(1,1,:).*J(2,2,:) - J(1,2,:).*J(2,1,:)).^(-1/2);\nT = acos( K.*( J(1,1,:) + J(2,2,:) )./2);\nO = (T.*K)./(sin(T));\nend"} +{"plateform": "github", "repo_name": "shane-nichols/smn-thesis-master", "name": "PEMphaseVoltCali.m", "ext": ".m", "path": "smn-thesis-master/misc_utilities/4PEM/PEMphaseVoltCali.m", "size": 1767, "source_encoding": "utf_8", "md5": "f8a6fe7fc71e93e6c0a55a006b32a851", "text": "function [p_out,phase_out] = PEMphaseVoltCali(t,f,p)\n% p_out = [m,b,s] array of fitting values.\n% phase_out = phase of the PEM\n% this function demostrates how to find a linear relation relating the\n% PEM voltage to the amplitude of modulation. \nvolts = 0:0.01:2; % create an array of voltages to apply to the PEM\nAmps = 0.045 + 2.1*volts; % convert volts to amps using a linear equation.\n % the values b = 0.045 and m = 2.1 are what we are\n % trying to find.\n \nfor i = 1:length(Amps)\nI = 100*(1 + 0.95*sin( Amps(i)*sin(2*pi*t*f(1)+p(1)) )); % simulaiton of the waveform with scale factor\n% c = 100;\nC1 = sum(exp( 1i*2*pi*t*f(1)).*I)./length(t); % get amplitude of C_v1\nC2 = sum(exp( 1i*2*pi*t*3*f(1)).*I)./length(t); % get amplitude of C_v2\n[phase1(i),mag1(i)] = cart2pol(real(C1),imag(C1)); % convert to mag and phase\n[phase2(i),mag2(i)] = cart2pol(real(C2),imag(C2)); % convert to mag and phase\nend\n\n% add pi to any phases less than zero, average over the phases, then subtract from \n% pi/2.\nphase_out = pi/2 - sum(phase1+pi*(phase1<0))./length(phase1)\n \n\nfigure\nplot(volts,mag1,volts,mag2) % plot the magnitudes\n\np0 = [1,0,100]; % define initial parameters vector with slope of 1 and offset of 0\n% and scale factor 100 that one can estimate by looking at the plotted data.\n\n% perform non-linear least-square regression to determine parameters. \np_out = lsqcurvefit(@(param,volts)fitMags(param,volts),p0,volts,[mag1;mag2]);\nend\n\nfunction mags_out = fitMags(param,volts) % model function\n Amps = volts*param(1)+param(2); % convert volts to amps\n mags_out = param(3)*abs([besselj(1,Amps) ; besselj(3,Amps)]); %array to compare to data\nend\n"} +{"plateform": "github", "repo_name": "Saswati18/projectile_motion_matlab-master", "name": "quadDiff.m", "ext": ".m", "path": "projectile_motion_matlab-master/quadDiff.m", "size": 111, "source_encoding": "utf_8", "md5": "237d26155a5fb105383e2b0461272448", "text": "%% Equation of motion\r\nfunction xdot = mo(t, x, u) \r\n% xdotdot = a\r\nxdot = [0 1; 0 0]*x + [0 ; 1]*u ;\r\nend"} +{"plateform": "github", "repo_name": "Saswati18/projectile_motion_matlab-master", "name": "mo.m", "ext": ".m", "path": "projectile_motion_matlab-master/mo.m", "size": 117, "source_encoding": "utf_8", "md5": "8378ff91202eb369f2cf3029b7a10bea", "text": "%% Equation of motion\r\nfunction xdot = motion(t, x, u) \r\n% xdotdot = a\r\nxdot = [0 1; 0 0].*x + [0 ; 1].*u ;\r\nend"} +{"plateform": "github", "repo_name": "emsr/maths_burkhardt-master", "name": "bivnor.m", "ext": ".m", "path": "maths_burkhardt-master/bivnor.m", "size": 4663, "source_encoding": "utf_8", "md5": "aeeb07fb4759e7959064b8129dc6a95f", "text": "function value = bivnor ( ah, ak, r )\n\n%*****************************************************************************80\n%\n%% BIVNOR computes the bivariate normal CDF.\n%\n% Discussion:\n%\n% BIVNOR computes the probability for two normal variates X and Y\n% whose correlation is R, that AH <= X and AK <= Y.\n%\n% Licensing:\n%\n% This code is distributed under the GNU LGPL license.\n%\n% Modified:\n%\n% 13 April 2012\n%\n% Author:\n%\n% Original FORTRAN77 version by Thomas Donnelly.\n% MATLAB version by John Burkardt.\n%\n% Reference:\n%\n% Thomas Donnelly,\n% Algorithm 462: Bivariate Normal Distribution,\n% Communications of the ACM,\n% October 1973, Volume 16, Number 10, page 638.\n%\n% Parameters:\n%\n% Input, real AH, AK, the lower limits of integration.\n%\n% Input, real R, the correlation between X and Y.\n%\n% Output, real VALUE, the bivariate normal CDF.\n%\n% Local Parameters:\n%\n% Local, integer IDIG, the number of significant digits\n% to the right of the decimal point desired in the answer.\n%\n idig = 15;\n b = 0.0;\n\n gh = gauss ( - ah ) / 2.0;\n gk = gauss ( - ak ) / 2.0;\n\n if ( r == 0.0 )\n b = 4.00 * gh * gk;\n b = max ( b, 0.0 );\n b = min ( b, 1.0 );\n value = b;\n return\n end\n\n rr = ( 1.0 + r ) * ( 1.0 - r );\n\n if ( rr < 0.0 )\n fprintf ( 1, '\\n' );\n fprintf ( 1, 'BIVNOR - Fatal error!\\n' );\n fprintf ( 1, ' 1 < |R|.\\n' );\n error ( 'BIVNOR - Fatal error!' );\n end\n\n if ( rr == 0.0 )\n\n if ( r < 0.0 )\n\n if ( ah + ak < 0.0 )\n b = 2.0 * ( gh + gk ) - 1.0;\n end\n\n else\n\n if ( ah - ak < 0.0 )\n b = 2.0 * gk;\n else\n b = 2.0 * gh;\n end\n\n end\n\n b = max ( b, 0.0 );\n b = min ( b, 1.0 );\n value = b;\n return\n\n end\n\n sqr = sqrt ( rr );\n\n if ( idig == 15 )\n con = 2.0 * pi * 1.0E-15 / 2.0;\n else\n con = pi;\n for i = 1 : idig\n con = con / 10.0;\n end\n end\n%\n% (0,0)\n%\n if ( ah == 0.0 && ak == 0.0 )\n b = 0.25 + 0.5 * asin ( r ) / pi;\n b = max ( b, 0.0 );\n b = min ( b, 1.0 );\n value = b;\n return\n end\n%\n% (0,nonzero)\n%\n if ( ah == 0.0 && ak ~= 0.0 )\n\n b = gk;\n wh = -ak;\n wk = ( ah / ak - r ) / sqr;\n gw = 2.0 * gk;\n is = 1;\n%\n% (nonzero,0)\n%\n elseif ( ah ~= 0.0 && ak == 0.0 )\n\n b = gh;\n wh = -ah;\n wk = ( ak / ah - r ) / sqr;\n gw = 2.0 * gh;\n is = -1;\n%\n% (nonzero,nonzero)\n%\n elseif ( ah ~= 0.0 && ak ~= 0.0 )\n\n b = gh + gk;\n if ( ah * ak < 0.0 )\n b = b - 0.5;\n end\n wh = - ah;\n wk = ( ak / ah - r ) / sqr;\n gw = 2.0 * gh;\n is = -1;\n\n end\n\n while ( 1 )\n\n sgn = -1.0;\n t = 0.0;\n\n if ( wk ~= 0.0 )\n\n if ( abs ( wk ) == 1.0 )\n\n t = wk * gw * ( 1.0 - gw ) / 2.0;\n b = b + sgn * t;\n\n else\n\n if ( 1.0 < abs ( wk ) )\n\n sgn = -sgn;\n wh = wh * wk;\n g2 = gauss ( wh );\n wk = 1.0 / wk;\n\n if ( wk < 0.0 )\n b = b + 0.5;\n end\n\n b = b - ( gw + g2 ) / 2.0 + gw * g2;\n\n end\n\n h2 = wh * wh;\n a2 = wk * wk;\n h4 = h2 / 2.0;\n ex = exp ( - h4 );\n w2 = h4 * ex;\n ap = 1.0;\n s2 = ap - ex;\n sp = ap;\n s1 = 0.0;\n sn = s1;\n conex = abs ( con / wk );\n\n while ( 1 )\n\n cn = ap * s2 / ( sn + sp );\n s1 = s1 + cn;\n\n if ( abs ( cn ) <= conex )\n break\n end\n\n sn = sp;\n sp = sp + 1.0;\n s2 = s2 - w2;\n w2 = w2 * h4 / sp;\n ap = - ap * a2;\n\n end\n\n t = 0.5 * ( atan ( wk ) - wk * s1 ) / pi;\n b = b + sgn * t;\n\n end\n\n end\n\n if ( 0 <= is )\n break\n end\n\n if ( ak == 0.0 )\n break\n end\n\n wh = -ak;\n wk = ( ah / ak - r ) / sqr;\n gw = 2.0 * gk;\n is = 1;\n\n end\n\n b = max ( b, 0.0 );\n b = min ( b, 1.0 );\n value = b;\n\n return\nend\nfunction value = gauss ( t )\n\n%*****************************************************************************80\n%\n%% GAUSS is a univariate lower normal tail area.\n%\n% Licensing:\n%\n% This code is distributed under the GNU LGPL license.\n%\n% Modified:\n%\n% 13 April 2012\n%\n% Author:\n%\n% Original FORTRAN77 version by Thomas Donnelly.\n% MATLAB version by John Burkardt.\n%\n% Reference:\n%\n% Thomas Donnelly,\n% Algorithm 462: Bivariate Normal Distribution,\n% Communications of the ACM,\n% October 1973, Volume 16, Number 10, page 638.\n%\n% Parameters:\n%\n% Input, real T, the evaluation point.\n%\n% Output, real VALUE, the area of the lower tail of the normal PDF\n% from -oo to T.\n%\n value = ( 1.0 + erf ( t / sqrt ( 2.0 ) ) ) / 2.0;\n\n return\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "SGME_MXE.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/SGME_MXE.m", "size": 2114, "source_encoding": "utf_8", "md5": "829ff4b78c816bad28ac1dd5db3afbb8", "text": "function [y,back] = SGME_MXE(A,B,D,As,Bs,labels,logPrior)\n\n if nargin==0\n test_this();\n return;\n end\n\n\n dA = zeros(size(A));\n dB = zeros(size(B));\n dD = zeros(size(D));\n dAs = zeros(size(As));\n dBs = zeros(size(Bs));\n\n \n \n [LEc,back1] = SGME_logexpectation(A,B,D);\n [LEs,back2] = SGME_logexpectation(As,Bs,D);\n\n dLEc = zeros(size(LEc));\n dLEs = zeros(size(LEs));\n \n\n m = length(LEs); % #speakers\n n = length(LEc); % #recordings\n\n scal = 1/(n*log(m+1));\n\n \n \n logPost = zeros(m+1,1);\n logPost(m+1) = logPrior(m+1);\n y = 0;\n for j=1:n\n AA = bsxfun(@plus,As,A(:,j)); \n BB = bsxfun(@plus,Bs,B(:,j)); \n [LEboth,back3] = SGME_logexpectation(AA,BB,D); \n logPost(1:m) = logPrior(1:m) + LEboth.' - LEs.' - LEc(j); \n [yj,back4] = sumlogsoftmax(logPost,labels(j));\n y = y - yj;\n \n \n dlogPost = back4(-1);\n dLEs = dLEs - dlogPost(1:m).';\n dLEc(j) = dLEc(j) - sum(dlogPost(1:m));\n dLEboth = dlogPost(1:m).';\n [dAA,dBB,dDj] = back3(dLEboth);\n dD = dD + dDj;\n dAs = dAs + dAA;\n dBs = dBs + dBB;\n dA(:,j) = sum(dAA,2);\n dB(:,j) = sum(dBB,2);\n end\n\n y = y*scal;\n\n back = @(dy) back_this(dy,dA,dB,dD,dAs,dBs);\n \n function [dA,dB,dD,dAs,dBs] = back_this(dy,dA,dB,dD,dAs,dBs)\n\n %[LEc,back1] = SGME_logexpectation(A,B,D);\n %[LEs,back2] = SGME_logexpectation(As,Bs,D).';\n [dA1,dB1,dD1] = back1(dLEc);\n [dAs2,dBs2,dD2] = back2(dLEs);\n dA = (dy*scal) * (dA + dA1);\n dB = (dy*scal) * (dB + dB1);\n dD = (dy*scal) * (dD + dD1 + dD2);\n dAs = (dy*scal) * (dAs + dAs2);\n dBs = (dy*scal) * (dBs + dBs2);\n \n end\n \n \n\nend\n\nfunction test_this()\n\n m = 3;\n n = 5;\n dim = 2;\n \n A = randn(dim,n);\n As = randn(dim,m);\n B = rand(1,n);\n Bs = rand(1,m);\n D = rand(dim,1);\n logPrior = randn(m+1,1);\n labels = randi(m,1,n);\n\n \n f = @(A,B,D,As,Bs) SGME_MXE(A,B,D,As,Bs,labels,logPrior);\n testBackprop(f,{A,B,D,As,Bs});\n\n\n\nend\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "SGME_train.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/SGME_train.m", "size": 2349, "source_encoding": "utf_8", "md5": "875c864d98e47717be58a0d88a2550ab", "text": "function model = SGME_train(R,labels,nu,zdim,niters,test)\n\n\n\n if nargin==0\n test_this();\n return;\n end\n\n\n [rdim,n] = size(R);\n m = max(labels);\n blocks = sparse(labels,1:n,true,m+1,n); \n num = find(blocks(:)); \n \n %Can we choose maximum likelihood prior parameters, given labels?\n %For now: prior expected number of speakers = m\n prior = create_PYCRP([],0,m,n); \n logPrior = prior.GibbsMatrix(labels);\n \n \n \n delta = rdim - zdim;\n assert(delta>0);\n \n %initialize\n P0 = randn(zdim,rdim);\n H0 = randn(delta,rdim);\n sqrtd0 = rand(zdim,1);\n \n szP = numel(P0);\n szH = numel(H0);\n \n \n w0 = pack(P0,H0,sqrtd0);\n\n if exist('test','var') && test\n testBackprop(@objective,w0);\n return;\n end\n \n mem = 20;\n stpsz0 = 1e-3;\n timeout = 5*60;\n \n \n w = L_BFGS(@objective,w0,niters,timeout,mem,stpsz0);\n \n [P,H,sqrtd] = unpack(w);\n d = sqrtd.^2;\n \n model.logexpectation = @(A,b) SGME_logexpectation(A,b,d);\n model.extract = @(R) SGME_extract(P,H,nu,R);\n model.objective = @(P,H,d) objective(pack(P,H,d));\n model.d = d;\n \n \n function w = pack(P,H,d)\n w = [P(:);H(:);d(:)];\n end\n\n function [P,H,d] = unpack(w)\n at = 1:szP;\n P = reshape(w(at),zdim,rdim);\n at = szP + (1:szH);\n H = reshape(w(at),delta,rdim);\n at = szP + szH + (1:zdim);\n d = w(at);\n \n end\n \n \n \n \n function [y,back] = objective(w)\n \n [P,H,sqrtd] = unpack(w);\n \n [A,b,back1] = SGME_extract(P,H,nu,R);\n \n d = sqrtd.^2;\n \n [PsL,back2] = SGME_logPsL(A,b,d,blocks,labels,num,logPrior);\n y = -PsL;\n \n \n back = @back_this;\n \n function [dw] = back_this(dy)\n %dPsL = -dy;\n [dA,db,dd] = back2(-dy);\n dsqrtd = 2*sqrtd.*dd;\n [dP,dH] = back1(dA,db);\n dw = pack(dP,dH,dsqrtd);\n \n end\n \n \n end\n\n\n\n\n\n\nend\n\nfunction test_this()\n\n zdim = 2;\n rdim = 4;\n n = 5;\n m = 3;\n \n prior = create_PYCRP([],0,m,n); \n labels = prior.sample(n);\n\n nu = pi;\n R = randn(rdim,n);\n\n test = true;\n niters = [];\n SGME_train(R,labels,nu,zdim,niters,test);\n\n\nend\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "scaled_GME_precision.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/scaled_GME_precision.m", "size": 2566, "source_encoding": "utf_8", "md5": "59c037444c1e57e933d5346bc36263b6", "text": "function [SGMEP,meand] = scaled_GME_precision(B)\n\n if nargin==0\n test_this();\n return;\n end\n\n dim = size(B,1);\n\n [V,D] = eig(B); % B = VDV'\n d = diag(D);\n meand = mean(d);\n %D = sparse(D);\n %I = speye(dim);\n \n SGMEP.logdet = @logdet;\n SGMEP.solve = @solve;\n \n function [y,back] = logdet(beta)\n betad = bsxfun(@times,beta,d);\n y = sum(log1p(betad),1);\n back = @(dy) dy*sum(d./(1+betad),1);\n end\n\n\n function [Y,back] = solve(RHS,beta)\n betad = beta*d;\n Y = V*bsxfun(@ldivide,betad+1,V.'*RHS);\n back = @(dY) back_solve(dY,Y,beta); \n end\n\n function [dRHS,dbeta] = back_solve(dY,Y,beta)\n dRHS = solve(dY,beta);\n if nargout >= 2\n %dA = (-dRHS)*Y.';\n %dbeta = trace(dA*B.');\n dbeta = -trace(Y.'*B.'*dRHS);\n end\n end\n\n\n\nend\n\nfunction [y,back] = logdettestfun(SGMEP,gamma)\n beta = gamma^2;\n [y,back1] = SGMEP.logdet(beta);\n back =@(dy) 2*gamma*back1(dy);\nend\n\nfunction [Y,back] = solvetestfun(SGMEP,RHS,gamma)\n\n beta = gamma^2;\n [Y,back1] = SGMEP.solve(RHS,beta);\n \n back =@(dY) back_solvetestfun(dY);\n \n function [dRHS,dgamma] = back_solvetestfun(dY)\n [dRHS,dbeta] = back1(dY);\n dgamma = 2*gamma*dbeta;\n end\nend\n\n\n\n\nfunction test_this()\n\n close all;\n\n fprintf('Test function values:\\n');\n dim = 5;\n RHS = rand(dim,1);\n \n %R = randn(dim,floor(1.1*dim));B = R*R.';B = B/trace(B);\n R = randn(dim,dim);B = R*R.';B = B/trace(B);\n I = eye(dim);\n \n [SGMEP,meand] = scaled_GME_precision(B);\n \n beta = rand/rand;\n [log(det(I+beta*B)),SGMEP.logdet(beta)]\n \n [(I+beta*B)\\RHS,SGMEP.solve(RHS,beta)]\n\n doplot = false;\n if doplot\n beta = 0.01:0.01:200;\n y = zeros(size(beta));\n for i=1:length(beta)\n y(i) = SGMEP.logdet(beta(i));\n end\n 1/meand\n plot(log(1/meand+beta),y);\n end\n \n gamma = rand/rand;\n fprintf('\\n\\n\\nTest logdet backprop (complex step) :\\n');\n testBackprop(@(gamma) logdettestfun(SGMEP,gamma),gamma); \n\n fprintf('\\n\\n\\nTest logdet backprop (real step) :\\n');\n testBackprop_rs(@(gamma) logdettestfun(SGMEP,gamma),gamma,1e-4); \n\n fprintf('\\n\\n\\nTest solve backprop (complex step) :\\n');\n testBackprop(@(RHS,gamma) solvetestfun(SGMEP,RHS,gamma),{RHS,gamma},{1,1}); \n \n fprintf('\\n\\n\\nTest solve backprop (real step) :\\n');\n testBackprop_rs(@(RHS,gamma) solvetestfun(SGMEP,RHS,gamma),{RHS,gamma},1e-4,{1,1}); \n \nend\n\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "dsolve.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/dsolve.m", "size": 980, "source_encoding": "utf_8", "md5": "8734dea4d3f28af88579fef7b106d892", "text": "function [Y,back] = dsolve(RHS,A)\n% SOLVE: Y= A\\RHS, with backpropagation into both arguments \n%\n% This is mostly for debugging purposes. It can be done more efficiently \n% by caching a matrix factorization to re-use for derivative (and also for \n% the determinant if needed). \n\n\n if nargin==0\n test_this();\n return;\n end\n\n Y = A\\RHS;\n \n back = @back_this;\n \n function [dRHS,dA] = back_this(dY)\n dRHS = A.'\\dY; % A\\dY = dsolve(dY,A) can be re-used for symmetric A\n if nargout>=2\n dA = -dRHS*Y.';\n end\n end\n\nend\n\n\n% function [Y,back] = IbetaB(beta,B)\n% dim = size(B,1);\n% Y = speye(dim)+beta*B;\n% back = @(dY) trace(dY*B.');\n% end\n\n\nfunction test_this()\n\n m = 5;\n n = 2;\n A = randn(m,m);\n RHS = randn(m,n);\n \n testBackprop(@dsolve,{RHS,A});\n testBackprop_rs(@dsolve,{RHS,A},1e-4);\n \n% beta = rand/rand;\n% testBackprop(@(beta) IbetaB(beta,A),{beta});\n \n \n\nend"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "labels2blocks.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/labels2blocks.m", "size": 1058, "source_encoding": "utf_8", "md5": "4c8472730d7214ee98dda298830f8849", "text": "function [subsets,counts] = labels2blocks(labels)\n% Inputs:\n% labels: n-vector with elements in 1..m, maps each of n customers to a\n% table number. There are m tables. Empty tables not allowed. \n%\n% Ouputs:\n% subsets: n-by-m logical, with one-hot rows\n% counts: m-vector, maps table number to customer count\n\n if nargin==0\n test_this();\n return;\n end\n\n m = max(labels); %m tables\n n = length(labels); %n customers\n assert(min(labels)==1,'illegal argument ''labels'': tables must be consecutively numbered from 1');\n assert(m <= n,'illegal argument ''labels'': there are more tables than customers');\n \n subsets = bsxfun(@eq,1:m,labels(:));\n %subsets = sparse(1:n,labels,true,n,m,n);\n counts = sum(subsets,1);\n\n assert(sum(counts)==n,'illegal argument ''labels'': table counts must add up to length(labels)');\n assert(all(counts),'illegal argument ''labels'': empty tables not allowed');\n\nend\n\nfunction test_this()\n\n labels = [2,3,3,3,4];\n [subsets,counts] = labels2blocks(labels)\nend"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "create_BXE_calculator.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/create_BXE_calculator.m", "size": 2055, "source_encoding": "utf_8", "md5": "494fcd9ff939f75d131309b403080ae5", "text": "function calc = create_BXE_calculator(log_expectations,prior,poi)\n\n calc.BXE = @BXE;\n calc.get_tar_non = @get_tar_non;\n\n n = length(poi);\n spoi = sparse(poi);\n tar = bsxfun(@eq,spoi,spoi.');\n\n ntar = 0;\n nnon = 0;\n for k=1:n-1\n jj = k+1:n;\n tari = full(tar(k,jj));\n ntari = sum(tari);\n ntar = ntar + ntari;\n nnon = nnon + length(jj) - ntari;\n end\n \n if isempty(prior)\n prior = ntar/(ntar+nnon);\n end\n \n plo = log(prior) - log1p(-prior);\n\n \n function y = BXE(A,B)\n LEc = log_expectations(A,B);\n yt = 0;\n yn = 0;\n for i=1:n-1\n jj = i+1:n;\n AA = bsxfun(@plus,A(:,i),A(:,jj));\n BB = bsxfun(@plus,B(:,i),B(:,jj));\n tari = full(tar(i,jj));\n LE2 = log_expectations(AA,BB);\n llr = LE2 - LEc(i) - LEc(jj);\n log_post = plo + llr;\n yt = yt + sum(softplus(-log_post(tari)));\n yn = yn + sum(softplus(log_post(~tari)));\n end\n \n y = prior*yt/ntar + (1-prior)*yn/(nnon);\n \n \n end\n \n function [tars,nons] = get_tar_non(A,B)\n LEc = log_expectations(A,B);\n tars = zeros(1,ntar);\n nons = zeros(1,nnon);\n tcount = 0;\n ncount = 0;\n for i=1:n-1\n jj = i+1:n;\n AA = bsxfun(@plus,A(:,i),A(:,jj));\n BB = bsxfun(@plus,B(:,i),B(:,jj));\n tari = full(tar(i,jj));\n LE2 = log_expectations(AA,BB);\n llr = LE2 - LEc(i) - LEc(jj);\n \n llr_tar = llr(tari);\n count = length(llr_tar);\n tars(tcount+(1:count)) = llr_tar;\n tcount = tcount + count;\n\n llr_non = llr(~tari);\n count = length(llr_non);\n nons(ncount+(1:count)) = llr_non;\n ncount = ncount + count;\n \n end\n \n \n end\n \nend\n\nfunction y = softplus(x)\n% y = log(1+exp(x));\n y = x;\n f = find(x<30);\n y(f) = log1p(exp(x(f)));\nend"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "PLDA_mixture_responsibilities.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/PLDA_mixture_responsibilities.m", "size": 1346, "source_encoding": "utf_8", "md5": "78dfbb4de92f575f08845cbc7e0010fb", "text": "function P = PLDA_mixture_responsibilities(w,F,W,R)\n\n if nargin==0\n P = test_this();\n return\n end\n\n K = length(w);\n \n if iscell(F)\n [D,d] = size(F{1});\n else\n [D,d] = size(F);\n end\n N = size(R,2);\n\n P = zeros(K,N);\n \n Id = eye(d);\n\n for k=1:K\n if iscell(F)\n Fk = F{k};\n else\n Fk = F;\n end\n Wk = W{k};\n Bk = Fk.'*Wk*Fk;\n Gk = Wk - Wk*Fk*((Id+Bk)\\Fk.'*Wk);\n \n RGR = sum(R.*(Gk*R),1);\n logdetW = 2*sum(log(diag(chol(Wk))));\n logdetIB = 2*sum(log(diag(chol(Id+Bk))));\n \n P(k,:) = log(w(k)) + (logdetW - logdetIB - RGR)/2;\n \n end \n\n \n P = exp(bsxfun(@minus,P,max(P,[],1)));\n P = bsxfun(@rdivide,P,sum(P,1));\n \n\n\nend\n\nfunction P = test_this()\n\n close all;\n\n d = 100;\n D = 400;\n N = 1000;\n \n K = 5;\n w = ones(1,K)/K;\n W = cell(1,K);\n W{1} = eye(D);\n for k=2:K\n W{k} = 2*W{k-1};\n end\n \n %F = randn(D,d);\n F = cell(1,K);\n for k=1:K\n F{k} = randn(D,d);\n end\n\n \n Z = randn(d,N*K);\n R = randn(D,N*K);\n jj = 1:N;\n for k=1:K\n R(:,jj) = F{k}*Z(:,jj) + chol(W{k})\\randn(D,N);\n jj = jj + N;\n end\n \n P = PLDA_mixture_responsibilities(w,F,W,R);\n plot(P');\n\n\n\n\nend\n\n\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "create_partition_posterior_calculator.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/create_partition_posterior_calculator.m", "size": 4076, "source_encoding": "utf_8", "md5": "32fda68f00bdccc246e56e3db2e0babe", "text": "function calc = create_partition_posterior_calculator(log_expectations,prior,poi)\n% Inputs:\n% log_expectations: function handle, maps matrices of additive natural \n% parameters to log-expectations\n% prior: Exchangeable prior over partitions, for example CRP. It needs to\n% implement prior.logprob(counts), where counts are the number of \n% customers per table (partition block sizes).\n% poi: partition of interest, given as an n-vector of table assignments,\n% where there are n customers. The tables are numbered 1 to m. \n \n if nargin==0\n test_this();\n return;\n end\n\n n = length(poi); %number of customers\n \n %Generate flags for all possible (non-empty) subsets\n ns = 2^n-1; %number of non-empty customer subsets\n subsets = logical(mod(fix(bsxfun(@rdivide,0:ns,2.^(0:n-1)')),2));\n subsets = subsets(:,2:end); % dump empty subset\n %subsets = sparse(subsets);\n \n %maps partition to flags indicating subsets (blocks)\n % also returns table counts\n function [flags,counts] = labels2weights(labels)\n [blocks,counts] = labels2blocks(labels);\n %blocks = sparse(blocks);\n [tf,loc] = ismember(blocks',subsets','rows'); %seems faster with full matrices\n assert(all(tf));\n flags = false(ns,1);\n flags(loc) = true; \n end\n \n [poi_weights,counts] = labels2weights(poi);\n log_prior_poi = prior.logprob(counts);\n \n \n %precompute weights and prior for every partition\n Bn = Bell(n);\n PI = create_partition_iterator(n);\n Weights = false(ns,Bn);\n log_prior = zeros(1,Bn);\n for j=1:Bn\n labels = PI.next();\n [Weights(:,j),counts] = labels2weights(labels);\n log_prior(j) = prior.logprob(counts);\n end\n \n Weights = sparse(Weights);\n subsets = sparse(subsets);\n poi_weights = sparse(poi_weights);\n \n calc.logPost = @logPost;\n calc.logPostPoi = @logPostPoi;\n \n \n function y = logPostPoi(A,B)\n % Inputs:\n % A,B: n-column matrices of natural parameters for n meta-embeddings\n % Output:\n % y: log P(poi | A,B, prior)\n \n assert(size(B,2)==n && size(A,2)==n);\n\n\n %compute subset likelihoods\n log_ex = log_expectations(A*subsets,B*subsets); \n\n %compute posterior\n num = log_prior_poi + log_ex*poi_weights;\n dens = log_prior + log_ex*Weights;\n maxden = max(dens);\n den = maxden+log(sum(exp(dens-maxden)));\n y = num - den;\n \n end\n\n function f = logPost(A,B)\n % Inputs:\n % A,B: n-column matrices of natural parameters for n meta-embeddings\n % Output:\n % y: log P(poi | A,B, prior)\n \n assert(size(B,2)==n && size(A,2)==n);\n\n\n %compute subset likelihoods\n log_ex = log_expectations(A*subsets,B*subsets); \n \n llh = log_ex*Weights;\n den = log_prior + llh; \n maxden = max(den);\n den = maxden+log(sum(exp(den-maxden)));\n \n function y = logpost_this(poi)\n [poi_weights,counts] = labels2weights(poi);\n log_prior_poi = prior.logprob(counts);\n num = log_prior_poi + log_ex*poi_weights;\n y = num - den;\n end\n \n f = @logpost_this;\n \n end\n\n\n\nend\n\nfunction test_this\n\n \n Mu = [-1 0 -1.1; 0 -3 0];\n C = [3 1 3; 1 1 1];\n A = Mu./C;\n B = zeros(4,3);\n B(1,:) = 1./C(1,:);\n B(4,:) = 1./C(2,:);\n scale = 3;\n B = B * scale;\n C = C / scale;\n \n \n close all;\n figure;hold;\n plotGaussian(Mu(:,1),diag(C(:,1)),'blue','b');\n plotGaussian(Mu(:,2),diag(C(:,2)),'red','r');\n plotGaussian(Mu(:,3),diag(C(:,3)),'green','g');\n axis('square');\n axis('equal');\n\n \n poi = [1 1 2];\n %prior = create_PYCRP(0,[],2,3);\n %prior = create_PYCRP([],0,2,3);\n \n create_flat_partition_prior(length(poi));\n \n calc = create_partition_posterior_calculator(prior,poi);\n f = calc.logPost(A,B);\n exp([f([1 1 2]), f([1 1 1]), f([1 2 3]), f([1 2 2]), f([1 2 1])])\n \n \nend\n\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "SGME_train_BXE.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/SGME_train_BXE.m", "size": 2434, "source_encoding": "utf_8", "md5": "4fb4ed77b580dc09d69346bc07a2cd16", "text": "function model = SGME_train_BXE(R,labels,nu,zdim,niters,timeout,test)\n\n\n\n if nargin==0\n test_this();\n return;\n end\n\n\n [rdim,n] = size(R);\n spoi = sparse(labels);\n tar = bsxfun(@eq,spoi,spoi.');\n\n ntar = 0;\n nnon = 0;\n for k=1:n-1\n jj = k+1:n;\n tari = full(tar(k,jj));\n ntari = sum(tari);\n ntar = ntar + ntari;\n nnon = nnon + length(jj) - ntari;\n end\n \n prior = ntar/(ntar+nnon);\n plo = log(prior) - log1p(-prior);\n \n wt = prior/ntar;\n wn = (1-prior)/nnon;\n \n delta = rdim - zdim;\n assert(delta>0);\n \n %initialize\n P0 = randn(zdim,rdim);\n H0 = randn(delta,rdim);\n sqrtd0 = rand(zdim,1);\n \n szP = numel(P0);\n szH = numel(H0);\n \n \n w0 = pack(P0,H0,sqrtd0);\n\n if exist('test','var') && test\n testBackprop(@objective,w0);\n return;\n end\n \n mem = 20;\n stpsz0 = 1e-3;\n %timeout = 5*60;\n \n \n w = L_BFGS(@objective,w0,niters,timeout,mem,stpsz0);\n \n [P,H,sqrtd] = unpack(w);\n d = sqrtd.^2;\n \n model.logexpectation = @(A,b) SGME_logexpectation(A,b,d);\n model.extract = @(R) SGME_extract(P,H,nu,R);\n model.d = d;\n \n \n function w = pack(P,H,d)\n w = [P(:);H(:);d(:)];\n end\n\n function [P,H,d] = unpack(w)\n at = 1:szP;\n P = reshape(w(at),zdim,rdim);\n at = szP + (1:szH);\n H = reshape(w(at),delta,rdim);\n at = szP + szH + (1:zdim);\n d = w(at);\n \n end\n \n \n \n \n function [y,back] = objective(w)\n \n [P,H,sqrtd] = unpack(w);\n \n [A,b,back1] = SGME_extract(P,H,nu,R);\n \n d = sqrtd.^2;\n \n [y,back2] = SGME_BXE(A,b,d,plo,wt,wn,tar);\n \n \n back = @back_this;\n \n function [dw] = back_this(dy)\n [dA,db,dd] = back2(dy);\n dsqrtd = 2*sqrtd.*dd;\n [dP,dH] = back1(dA,db);\n dw = pack(dP,dH,dsqrtd);\n \n end\n \n \n end\n\n\n\n\n\n\nend\n\nfunction test_this()\n\n zdim = 2;\n rdim = 4;\n n = 10;\n m = 3;\n \n prior = create_PYCRP([],0,m,n); \n while true\n labels = prior.sample(n);\n if max(labels) > 1\n break;\n end\n end\n\n nu = pi;\n R = randn(rdim,n);\n\n test = true;\n niters = [];\n timeout = [];\n SGME_train_BXE(R,labels,nu,zdim,niters,timeout,test);\n\n\nend\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "SGME_extract.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/SGME_extract.m", "size": 1065, "source_encoding": "utf_8", "md5": "b9106e80e9a78235222680c566b510fd", "text": "function [A,b,back] = SGME_extract(P,H,nu,R)\n\n if nargin==0\n test_this();\n return;\n end\n\n [zdim,rdim] = size(P);\n nuprime = nu + rdim - zdim;\n\n HR = H*R;\n q = sum(HR.^2,1);\n den = nu + q;\n b = nuprime./den;\n\n M = P*R;\n A = bsxfun(@times,b,M);\n \n \n back = @back_this;\n \n \n function [dP,dH] = back_this(dA,db)\n \n %A = bsxfun(@times,b,M);\n db = db + sum(dA.*M,1);\n dM = bsxfun(@times,b,dA);\n \n %M = P*R;\n dP = dM*R.';\n \n %b = nuprime./den;\n dden = -db.*b./den;\n \n %den = nu + q;\n dq = dden;\n \n %q = sum(HR.^2,1);\n dHR = bsxfun(@times,2*dq,HR);\n \n %HR = H*R;\n dH = dHR*R.';\n \n \n \n end\n \n \n\nend\n\n\nfunction test_this()\n\n zdim = 2;\n rdim = 4;\n n = 5;\n P = randn(zdim,rdim);\n H = randn(rdim-zdim,rdim);\n \n nu = pi;\n R = randn(rdim,n);\n \n f = @(P,H) SGME_extract(P,H,nu,R);\n \n testBackprop_multi(f,2,{P,H});\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "sumlogsumexp.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/sumlogsumexp.m", "size": 455, "source_encoding": "utf_8", "md5": "cccd5f3ae0b7894b95682910eba4a060", "text": "function [y,back] = sumlogsumexp(X)\n\n if nargin==0\n test_this();\n return;\n end\n\n mx = max(real(X),[],1);\n yy = mx + log(sum(exp(bsxfun(@minus,X,mx)),1));\n y = sum(yy,2); \n \n back = @back_this;\n \n function dX = back_this(dy)\n dX = dy*exp(bsxfun(@minus,X,yy));\n \n end\n\n\nend\n\n\nfunction test_this()\n\n m = 3;\n n = 5;\n X = randn(m,n);\n testBackprop(@(X)sumlogsumexp(X),X);\n\n\nend\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "SGME_logexpectation.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/SGME_logexpectation.m", "size": 1796, "source_encoding": "utf_8", "md5": "46f79c08a985ae0a1833cad86fb74983", "text": "function [y,back] = SGME_logexpectation(A,b,d)\n% log expected values (w.r.t. standard normal) of diagonalized SGMEs\n% Inputs:\n% A: dim-by-n, natural parameters (precision *mean) for n SGMEs \n% b: 1-by-n, precision scale factors for these SGMEs\n% d: dim-by-1, common diagonal precision \n%\n% Note:\n% bsxfun(@times,b,d) is dim-by-n precision diagonals for the n SGMEs \n%\n% Outputs:\n% y: 1-by-n, log expectations\n% back: backpropagation handle, [dA,db,dd] = back(dy)\n\n\n if nargin==0\n test_this();\n return;\n end\n\n \n\n bd = bsxfun(@times,b,d);\n logdets = sum(log1p(bd),1);\n den = 1 + bd;\n Aden = A./den;\n Q = sum(A.*Aden,1); %Q = sum((A.^2)./den,1);\n y = (Q-logdets)/2;\n\n back = @back_this;\n\n\n function [dA,db,dd] = back_this(dy)\n dQ = dy/2;\n %dlogdets = - dQ;\n\n dAden = bsxfun(@times,dQ,A); \n dA = bsxfun(@times,dQ,Aden); \n\n dA2 = dAden./den;\n dA = dA + dA2; \n dden = -Aden.*dA2;\n\n dbd = dden - bsxfun(@rdivide,dQ,den); %dlogdets = -dQ\n\n db = d.' * dbd;\n dd = dbd * b.';\n \n end\n\n\nend\n\nfunction test_this0()\n\n m = 3;\n n = 5;\n A = randn(m,n);\n b = rand(1,n);\n d = rand(m,1);\n \n \n testBackprop(@SGME_logexpectation,{A,b,d},{1,1,1});\n\nend\n\n\n\n\n\nfunction test_this()\n\n %em = 4;\n n = 7;\n dim = 2;\n \n %prior = create_PYCRP([],0,em,n);\n %poi = prior.sample(n);\n %m = max(poi);\n %blocks = sparse(poi,1:n,true,m+1,n); \n %num = find(blocks(:)); \n \n %logPrior = prior.GibbsMatrix(poi); \n\n d = rand(dim,1);\n A = randn(dim,n);\n b = rand(1,n);\n \n \n f = @(A,b,d) SGME_logexpectation(A,b,d);\n testBackprop(f,{A,b,d},{1,1,1});\n\n \n \n \n \n \nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "SGME_train_MXE.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/SGME_train_MXE.m", "size": 2514, "source_encoding": "utf_8", "md5": "939eef34cb61a4493dfe9c98a11d633c", "text": "function model = SGME_train_MXE(R,labels,nu,zdim,niters,timeout,test)\n\n\n\n if nargin==0\n test_this();\n return;\n end\n\n\n [rdim,n] = size(R);\n m = max(labels);\n blocks = sparse(labels,1:n,true,m,n); \n counts = sum(blocks,2);\n logPrior = [log(counts);-inf];\n \n \n \n delta = rdim - zdim;\n assert(delta>0);\n \n %initialize\n P0 = randn(zdim,rdim);\n H0 = randn(delta,rdim);\n sqrtd0 = rand(zdim,1);\n As0 = randn(zdim,m);\n sqrtBs0 = randn(1,m);\n \n szP = numel(P0);\n szH = numel(H0);\n szd = numel(sqrtd0);\n szAs = numel(As0);\n szBs = numel(sqrtBs0);\n \n \n w0 = pack(P0,H0,sqrtd0,As0,sqrtBs0);\n\n if exist('test','var') && test\n testBackprop(@objective,w0);\n return;\n end\n \n mem = 20;\n stpsz0 = 1e-3;\n %timeout = 5*60;\n \n \n w = L_BFGS(@objective,w0,niters,timeout,mem,stpsz0);\n \n [P,H,sqrtd,As,sqrtBs] = unpack(w);\n d = sqrtd.^2;\n \n model.logexpectation = @(A,b) SGME_logexpectation(A,b,d);\n model.extract = @(R) SGME_extract(P,H,nu,R);\n model.d = d;\n \n \n function w = pack(P,H,d,As,Bs)\n w = [P(:);H(:);d(:);As(:);Bs(:)];\n end\n\n function [P,H,d,As,Bs] = unpack(w)\n at = 1:szP;\n P = reshape(w(at),zdim,rdim);\n at = szP + (1:szH);\n H = reshape(w(at),delta,rdim);\n at = szP + szH + (1:szd);\n d = w(at);\n at = szP + szH + szd + (1:szAs);\n As = reshape(w(at),zdim,m);\n at = szP + szH + szd + szAs + (1:szBs);\n Bs = w(at).';\n \n end\n \n \n \n \n function [y,back] = objective(w)\n \n [P,H,sqrtd,As,sqrtBs] = unpack(w);\n \n [A,b,back1] = SGME_extract(P,H,nu,R);\n \n d = sqrtd.^2;\n Bs = sqrtBs.^2;\n \n [y,back2] = SGME_MXE(A,b,d,As,Bs,labels,logPrior);\n \n \n back = @back_this;\n \n function [dw] = back_this(dy)\n [dA,db,dd,dAs,dBs] = back2(dy);\n dsqrtd = 2*sqrtd.*dd;\n dsqrtBs = 2*sqrtBs.*dBs;\n [dP,dH] = back1(dA,db);\n dw = pack(dP,dH,dsqrtd,dAs,dsqrtBs);\n \n end\n \n \n end\n\n\n\n\n\n\nend\n\nfunction test_this()\n\n zdim = 2;\n rdim = 4;\n n = 5;\n m = 3;\n \n prior = create_PYCRP([],0,m,n); \n labels = prior.sample(n);\n\n nu = pi;\n R = randn(rdim,n);\n\n test = true;\n niters = [];\n timeout = [];\n SGME_train_MXE(R,labels,nu,zdim,niters,timeout,test);\n\n\nend\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "SGME_BXE.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/SGME_BXE.m", "size": 1927, "source_encoding": "utf_8", "md5": "43f8a07c46e1df00ef02abdfbbc38dde", "text": "function [y,back] = SGME_BXE(A,B,D,plo,wt,wn,tar)\n\n\n if nargin==0\n test_this();\n return;\n end\n \n n = size(A,2); \n\n [LEc,back1] = SGME_logexpectation(A,B,D);\n y = 0;\n dA = zeros(size(A));\n dB = zeros(size(B));\n dLEc = zeros(size(LEc));\n dD = zeros(size(D));\n for i=1:n-1\n jj = i+1:n;\n AA = bsxfun(@plus,A(:,i),A(:,jj));\n BB = bsxfun(@plus,B(:,i),B(:,jj));\n tari = full(tar(i,jj));\n [LE2,back2] = SGME_logexpectation(AA,BB,D);\n llr = LE2 - LEc(i) - LEc(jj);\n \n arg_tar = -plo - llr(tari);\n noni = ~tari;\n arg_non = plo + llr(noni);\n \n y = y + wt*sum(softplus(arg_tar));\n y = y + wn*sum(softplus(arg_non));\n \n dllr = zeros(size(llr));\n dllr(tari) = (-wt)*sigmoid(arg_tar);\n dllr(noni) = wn*sigmoid(arg_non);\n \n dLE2 = dllr;\n dLEc(i) = dLEc(i) - sum(dllr);\n dLEc(jj) = dLEc(jj) - dllr;\n \n [dAA,dBB,dD2] = back2(dLE2);\n dD = dD + dD2;\n dA(:,i) = dA(:,i) + sum(dAA,2);\n dB(:,i) = dB(:,i) + sum(dBB,2);\n dA(:,jj) = dA(:,jj) + dAA;\n dB(:,jj) = dB(:,jj) + dBB;\n \n \n \n end\n\n back = @(dy) back_this(dy,dA,dB,dLEc,dD);\n \n function [dA,dB,dD] = back_this(dy,dA,dB,dLEc,dD)\n [dA1,dB1,dD1] = back1(dLEc);\n dA = dy*(dA + dA1);\n dB = dy*(dB + dB1);\n dD = dy*(dD + dD1);\n end\n \n \n \n\n\n\n\nend\n\n\nfunction y = sigmoid(x)\n y = 1./(1+exp(-x));\nend\n\nfunction y = softplus(x)\n% y = log(1+exp(x));\n y = x;\n f = find(x<30);\n y(f) = log1p(exp(x(f)));\nend\n\n\n\nfunction test_this()\n\n zdim = 2;\n n = 5;\n A = randn(zdim,n);\n B = rand(1,n);\n plo = randn;\n wt = rand;\n wn = rand;\n tar = sparse(randn(n)>0);\n D = rand(zdim,1);\n\n f = @(A,B,D) SGME_BXE(A,B,D,plo,wt,wn,tar);\n \n testBackprop(f,{A,B,D});\n \n \nend\n\n\n\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "plotGaussian.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/plotGaussian.m", "size": 1323, "source_encoding": "utf_8", "md5": "16ea9cd804af31a79f3ccd3cf5687a49", "text": "function tikz = plotGaussian(mu,C,colr,c)\n \n if nargin==0\n test_this();\n return;\n end\n\n if isempty(C) %assume mu is a GME\n [mu,C] = mu.get_mu_cov();\n end\n \n [V,D] = eig(C);\n \n v1 = V(:,1);\n v2 = V(:,2);\n if all(v1>=0)\n r1 = sqrt(D(1,1));\n r2 = sqrt(D(2,2));\n rotate = acos(v1(1))*180/pi;\n elseif all(-v1>=0)\n r1 = sqrt(D(1,1));\n r2 = sqrt(D(2,2));\n rotate = acos(-v1(1))*180/pi;\n elseif all(v2>=0)\n r1 = sqrt(D(2,2));\n r2 = sqrt(D(1,1));\n rotate = acos(v2(1))*180/pi;\n else\n r1 = sqrt(D(2,2));\n r2 = sqrt(D(1,1));\n rotate = acos(-v2(1))*180/pi;\n end\n \n if ~isempty(colr)\n tikz = sprintf('\\\\draw[rotate around ={%4.3g:(%4.3g,%4.3g)},%s] (%4.3g,%4.3g) ellipse [x radius=%4.3g, y radius=%4.3g];\\n',rotate,mu(1),mu(2),colr,mu(1),mu(2),r1,r2);\n fprintf('%s',tikz);\n end\n \n theta = (0:100)*2*pi/100;\n circle = [cos(theta);sin(theta)];\n ellipse = bsxfun(@plus,mu,V*sqrt(D)*circle);\n plot(ellipse(1,:),ellipse(2,:),c);\n \n \n \n\n\nend\n\nfunction test_this\n close all;\n\n %B = 2*eye(2) + ones(2);\n B = 2*eye(2) + [1,-1;-1,1];\n mu = [1;2];\n \n figure;hold;\n axis('equal');\n axis('square');\n plotGaussian(mu,B,'blue','b')\nend"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "create_HTPLDA_extractor.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/create_HTPLDA_extractor.m", "size": 5955, "source_encoding": "utf_8", "md5": "1304b09dbdcd66e16a53851e8e270761", "text": "function HTPLDA = create_HTPLDA_extractor(F,nu,W)\n\n if nargin==0\n test_PsL();\n %test_this();\n return;\n end\n\n [rdim,zdim] = size(F);\n assert(rdim>zdim);\n nu_prime = nu + rdim - zdim;\n \n if ~exist('W','var') || isempty(W)\n W = speye(rdim);\n end\n \n E = F.'*W*F;\n G = W - W*F*(E\\F.')*W;\n\n SGME = create_SGME_calculator(E);\n \n V = SGME.V; % E = VDV'\n VFW = V.'*F.'*W;\n \n HTPLDA.extractSGMEs = @extractSGMEs;\n HTPLDA.SGME = SGME;\n HTPLDA.plot_database = @plot_database;\n HTPLDA.getPHd = @getPHd;\n \n function [P,H,d] = getPHd()\n P = VFW;\n H = G;\n %HH = H'*H;\n d = SGME.d;\n end\n \n \n function [A,b] = extractSGMEs(R)\n q = sum(R.*(G*R),1);\n b = nu_prime./(nu+q);\n A = bsxfun(@times,b,VFW*R);\n end\n \n matlab_colours = {'r','g','b','m','c','k',':r',':g',':b',':m',':c',':k'}; \n tikz_colours = {'red','green','blue','magenta','cyan','black','red, dotted','green, dotted','blue, dotted','magenta, dotted','cyan, dotted','black, dotted'}; \n\n\n function plot_database(R,labels,Z)\n assert(max(labels) <= length(matlab_colours),'not enough colours to plot all speakers');\n [A,b] = extractSGMEs(R);\n %SGME.plotAll(A,b,matlab_colours(labels), tikz_colours(labels));\n SGME.plotAll(A,b,matlab_colours(labels), []);\n if exist('Z','var') && ~isempty(Z)\n for i=1:size(Z,2)\n plot(Z(1,i),Z(2,i),[matlab_colours{i},'*']);\n end\n end\n \n end\n \n\nend\n\nfunction test_this()\n\n zdim = 2;\n xdim = 20; %required: xdim > zdim\n nu = 3; %required: nu >= 1, integer, DF\n fscal = 3; %increase fscal to move speakers apart\n \n F = randn(xdim,zdim)*fscal;\n\n \n HTPLDA = create_HTPLDA_extractor(F,nu);\n SGME = HTPLDA.SGME;\n \n %labels = [1,2,2];\n %[R,Z,precisions] = sample_HTPLDA_database(nu,F,labels);\n \n \n n = 8;\n m = 5;\n %prior = create_PYCRP(0,[],m,n);\n prior = create_PYCRP([],0,m,n);\n [R,Z,precisions,labels] = sample_HTPLDA_database(nu,F,prior,n);\n fprintf('there are %i speakers\\n',max(labels));\n \n [A,b] = HTPLDA.extractSGMEs(R);\n \n rotate = true;\n [Ap,Bp] = SGME.SGME2GME(A,b,rotate);\n\n close all;\n figure;hold;\n plotGaussian(zeros(zdim,1),eye(zdim),'black, dashed','k--');\n \n %matlab_colours = {'b','r','r'};\n %tikz_colours = {'blue','red','red'};\n %SGME.plotAll(A,b,matlab_colours, tikz_colours, rotate);\n \n \n HTPLDA.plot_database(R,labels,Z);\n axis('square');axis('equal');\n \n calc1 = create_partition_posterior_calculator(SGME.log_expectations,prior,labels);\n calc2 = create_pseudolikelihood_calculator(SGME.log_expectations,prior,labels);\n calc3 = create_BXE_calculator(SGME.log_expectations,[],labels);\n \n scale = exp(-5:0.1:5);\n MCL = zeros(size(scale));\n PsL = zeros(size(scale));\n slowPsL = zeros(size(scale));\n BXE = zeros(size(scale));\n tic;\n for i=1:length(scale)\n MCL(i) = - calc1.logPostPoi(scale(i)*A,scale(i)*b);\n end\n toc\n\n tic;\n for i=1:length(scale)\n BXE(i) = calc3.BXE(scale(i)*A,scale(i)*b);\n end\n toc\n \n tic;\n for i=1:length(scale)\n slowPsL(i) = - calc2.slow_log_pseudo_likelihood(scale(i)*A,scale(i)*b);\n end\n toc\n\n tic;\n for i=1:length(scale)\n PsL(i) = - calc2.log_pseudo_likelihood(scale(i)*A,scale(i)*b);\n end\n toc\n\n \n \n \n figure;\n %subplot(2,1,1);semilogx(scale,MCL);title('MCL')\n %subplot(2,1,2);semilogx(scale,PsL);title('PsL');\n subplot(2,1,1);semilogx(scale,MCL,scale,slowPsL,scale,PsL,'--');legend('MCL','slowPsL','PsL');\n subplot(2,1,2);semilogx(scale,BXE);legend('BXE');\n \n %[precisions;b]\n \n %[plain_GME_log_expectations(Ap,Bp);SGME.log_expectations(A,b)]\n \n \n \nend\n\nfunction test_PsL()\n\n zdim = 2;\n xdim = 20; %required: xdim > zdim\n nu = 3; %required: nu >= 1, integer, DF\n fscal = 3; %increase fscal to move speakers apart\n \n F = randn(xdim,zdim)*fscal;\n\n \n HTPLDA = create_HTPLDA_extractor(F,nu);\n SGME = HTPLDA.SGME;\n \n \n n = 1000;\n m = 100;\n %prior = create_PYCRP(0,[],m,n);\n prior = create_PYCRP([],0,m,n);\n [R,Z,precisions,labels] = sample_HTPLDA_database(nu,F,prior,n);\n fprintf('there are %i speakers\\n',max(labels));\n \n [A,b] = HTPLDA.extractSGMEs(R);\n \n rotate = true;\n [Ap,Bp] = SGME.SGME2GME(A,b,rotate);\n\n close all;\n \n if zdim==2 && max(labels)<=12\n figure;hold;\n plotGaussian(zeros(zdim,1),eye(zdim),'black, dashed','k--');\n\n HTPLDA.plot_database(R,labels,Z);\n axis('square');axis('equal');\n end\n \n tic;calc0 = create_pseudolikelihood_calculator_old(SGME.log_expectations,prior,labels);toc\n tic;calc1 = create_pseudolikelihood_calculator(SGME.log_expectations,prior,labels);toc;\n tic;calc2 = create_BXE_calculator(SGME.log_expectations,[],labels);toc\n \n scale = exp(-5:0.1:5);\n oldPsL = zeros(size(scale));\n PsL = zeros(size(scale));\n BXE = zeros(size(scale));\n\n% tic;\n% for i=1:length(scale)\n% slowPsL(i) = - calc1.slow_log_pseudo_likelihood(scale(i)*A,scale(i)*b);\n% end\n% toc\n \n tic;\n for i=1:length(scale)\n oldPsL(i) = - calc0.log_pseudo_likelihood(scale(i)*A,scale(i)*b);\n end\n toc\n\n tic;\n for i=1:length(scale)\n PsL(i) = - calc1.log_pseudo_likelihood(scale(i)*A,scale(i)*b);\n end\n toc\n \n \n% tic;\n% for i=1:length(scale)\n% BXE(i) = calc2.BXE(scale(i)*A,scale(i)*b);\n% end\n% toc\n\n figure;\n subplot(2,1,1);semilogx(scale,oldPsL,scale,PsL,'r--');legend('oldPsL','PsL');\n subplot(2,1,2);semilogx(scale,BXE);title('BXE');\n \n %[precisions;b]\n \n %[plain_GME_log_expectations(Ap,Bp);SGME.log_expectations(A,b)]\n \n \n \nend\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "SGME_MXE2.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/SGME_MXE2.m", "size": 1787, "source_encoding": "utf_8", "md5": "353320c477be13a9cd785ec811fdd210", "text": "function [y,back] = SGME_MXE2(A,B,D,As,Bs,labels,logPrior)\n\n if nargin==0\n test_this();\n return;\n end\n\n\n dA = zeros(size(A));\n dB = zeros(size(B));\n dD = zeros(size(D));\n dAs = zeros(size(As));\n dBs = zeros(size(Bs));\n\n \n \n [LEs,back2] = SGME_logexpectation(As,Bs,D);\n\n dLEs = zeros(size(LEs));\n \n\n m = length(LEs); % #speakers\n n = size(A,2); % #recordings\n\n scal = 1/(n*log(m));\n\n \n \n y = 0;\n for j=1:n\n AA = bsxfun(@plus,As,A(:,j)); \n BB = bsxfun(@plus,Bs,B(:,j)); \n [LEboth,back3] = SGME_logexpectation(AA,BB,D); \n logPost = logPrior + LEboth.' - LEs.'; \n [yj,back4] = sumlogsoftmax(logPost,labels(j));\n y = y - yj;\n \n \n dlogPost = back4(-1);\n dLEs = dLEs - dlogPost.';\n dLEboth = dlogPost.';\n [dAA,dBB,dDj] = back3(dLEboth);\n dD = dD + dDj;\n dAs = dAs + dAA;\n dBs = dBs + dBB;\n dA(:,j) = sum(dAA,2);\n dB(:,j) = sum(dBB,2);\n end\n\n y = y*scal;\n\n back = @(dy) back_this(dy,dA,dB,dD,dAs,dBs);\n \n function [dA,dB,dD,dAs,dBs] = back_this(dy,dA,dB,dD,dAs,dBs)\n\n %[LEs,back2] = SGME_logexpectation(As,Bs,D).';\n [dAs2,dBs2,dD2] = back2(dLEs);\n dA = (dy*scal) * dA;\n dB = (dy*scal) * dB;\n dD = (dy*scal) * (dD + dD2);\n dAs = (dy*scal) * (dAs + dAs2);\n dBs = (dy*scal) * (dBs + dBs2);\n \n end\n \n \n\nend\n\nfunction test_this()\n\n m = 3;\n n = 5;\n dim = 2;\n \n A = randn(dim,n);\n As = randn(dim,m);\n B = rand(1,n);\n Bs = rand(1,m);\n D = rand(dim,1);\n logPrior = randn(m,1);\n labels = randi(m,1,n);\n\n \n f = @(A,B,D,As,Bs) SGME_MXE2(A,B,D,As,Bs,labels,logPrior);\n testBackprop(f,{A,B,D,As,Bs});\n\n\n\nend\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "SGME_train_MXE2.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/SGME_train_MXE2.m", "size": 2510, "source_encoding": "utf_8", "md5": "b71a75273c325f1e45edf8af7e971f30", "text": "function model = SGME_train_MXE2(R,labels,nu,zdim,niters,timeout,test)\n\n\n\n if nargin==0\n test_this();\n return;\n end\n\n\n [rdim,n] = size(R);\n m = max(labels);\n blocks = sparse(labels,1:n,true,m,n); \n counts = sum(blocks,2);\n logPrior = log(counts);\n \n \n \n delta = rdim - zdim;\n assert(delta>0);\n \n %initialize\n P0 = randn(zdim,rdim);\n H0 = randn(delta,rdim);\n sqrtd0 = rand(zdim,1);\n As0 = randn(zdim,m);\n sqrtBs0 = randn(1,m);\n \n szP = numel(P0);\n szH = numel(H0);\n szd = numel(sqrtd0);\n szAs = numel(As0);\n szBs = numel(sqrtBs0);\n \n \n w0 = pack(P0,H0,sqrtd0,As0,sqrtBs0);\n\n if exist('test','var') && test\n testBackprop(@objective,w0);\n return;\n end\n \n mem = 20;\n stpsz0 = 1e-3;\n %timeout = 5*60;\n \n \n w = L_BFGS(@objective,w0,niters,timeout,mem,stpsz0);\n \n [P,H,sqrtd,As,sqrtBs] = unpack(w);\n d = sqrtd.^2;\n \n model.logexpectation = @(A,b) SGME_logexpectation(A,b,d);\n model.extract = @(R) SGME_extract(P,H,nu,R);\n model.d = d;\n \n \n function w = pack(P,H,d,As,Bs)\n w = [P(:);H(:);d(:);As(:);Bs(:)];\n end\n\n function [P,H,d,As,Bs] = unpack(w)\n at = 1:szP;\n P = reshape(w(at),zdim,rdim);\n at = szP + (1:szH);\n H = reshape(w(at),delta,rdim);\n at = szP + szH + (1:szd);\n d = w(at);\n at = szP + szH + szd + (1:szAs);\n As = reshape(w(at),zdim,m);\n at = szP + szH + szd + szAs + (1:szBs);\n Bs = w(at).';\n \n end\n \n \n \n \n function [y,back] = objective(w)\n \n [P,H,sqrtd,As,sqrtBs] = unpack(w);\n \n [A,b,back1] = SGME_extract(P,H,nu,R);\n \n d = sqrtd.^2;\n Bs = sqrtBs.^2;\n \n [y,back2] = SGME_MXE2(A,b,d,As,Bs,labels,logPrior);\n \n \n back = @back_this;\n \n function [dw] = back_this(dy)\n [dA,db,dd,dAs,dBs] = back2(dy);\n dsqrtd = 2*sqrtd.*dd;\n dsqrtBs = 2*sqrtBs.*dBs;\n [dP,dH] = back1(dA,db);\n dw = pack(dP,dH,dsqrtd,dAs,dsqrtBs);\n \n end\n \n \n end\n\n\n\n\n\n\nend\n\nfunction test_this()\n\n zdim = 2;\n rdim = 4;\n n = 5;\n m = 3;\n \n prior = create_PYCRP([],0,m,n); \n labels = prior.sample(n);\n\n nu = pi;\n R = randn(rdim,n);\n\n test = true;\n niters = [];\n timeout = [];\n SGME_train_MXE2(R,labels,nu,zdim,niters,timeout,test);\n\n\nend\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "asChol.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/asChol.m", "size": 2365, "source_encoding": "utf_8", "md5": "ea86b12ae1d2edfe698ac2881861b35f", "text": "function CA = asChol(A)\n\n if nargin==0\n test_this();\n return;\n end\n\n if isreal(A)\n C = chol(A); %C'C = A\n r = true;\n else\n [L,U] = lu(A); % LU = A\n r = false;\n end\n \n dim = size(A,1);\n \n CA.logdet = @logdet;\n CA.solve = @solve;\n \n \n function [y,back] = logdet()\n if r\n y = 2*sum(log(diag(C)));\n else\n y = sum(log(diag(U).^2))/2;\n end\n back = @(dy) solve(dy*speye(dim));\n end\n\n\n\n\n function [Y,back] = solve(RHS)\n if r\n Y = C\\(C'\\RHS);\n else\n Y = U\\(L\\RHS);\n end\n back = @(dY) back_solve(dY,Y);\n end\n\n function Y = solveT(RHS) %A'\\RHS, for LU case\n Y = L.'\\(U.'\\RHS);\n end\n\n function [dRHS,dA] = back_solve(dY,Y)\n if r\n dRHS = solve(dY); \n if nargout >= 2\n dA = (-dRHS)*Y.';\n end\n else \n dRHS = solveT(dY);\n if nargout >= 2\n dA = (-dRHS)*Y.';\n end\n end\n end\n\n\n\n\nend\n\n\nfunction [y,back] = logdettestfun(A)\n CA = asChol(A*A.');\n [y,back1] = CA.logdet();\n sym = @(DY) DY + DY.';\n back =@(dy) sym(back1(dy))*A;\nend\n\nfunction [Y,back] = solvetestfun(RHS,A)\n CA = asChol(A*A.');\n [Y,back1] = CA.solve(RHS);\n \n back =@(dY) back_solvetestfun(dY);\n \n function [dRHS,dA] = back_solvetestfun(dY)\n [dRHS,dAA] = back1(dY);\n dA = (dAA+dAA.')*A;\n end\nend\n\n\nfunction test_this()\n\n fprintf('Test function values:\\n');\n dim = 5;\n RHS = rand(dim,1);\n \n A = randn(dim);A = A*A';\n \n \n CA = asChol(A);\n \n [log(det(A)),CA.logdet()]\n [A\\RHS,CA.solve(RHS)]\n\n\n A = complex(randn(dim),zeros(dim));\n CA = asChol(A);\n \n [log(abs(det(A))),CA.logdet()]\n [A\\RHS,CA.solve(RHS)]\n\n \n \n A = randn(dim,2*dim);A = A*A';\n fprintf('\\n\\n\\nTest logdet backprop (complex step) :\\n');\n testBackprop(@logdettestfun,A); \n fprintf('\\n\\n\\nTest logdet backprop (real step) :\\n');\n testBackprop_rs(@logdettestfun,A,1e-4); \n \n \n fprintf('\\n\\n\\nTest solve backprop (complex step) :\\n');\n testBackprop(@solvetestfun,{RHS,A},{1,1});\n\n fprintf('\\n\\n\\nTest solve backprop (real step) :\\n');\n testBackprop_rs(@solvetestfun,{RHS,A},1e-4,{1,1});\n \nend\n\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "SGME_logPsL.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/SGME_logPsL.m", "size": 3902, "source_encoding": "utf_8", "md5": "2459f9858e466eb1e4b939681dce8f05", "text": "function [y,back] = SGME_logPsL(A,B,d,blocks,poi,num,logPrior)\n \n if nargin==0\n test_this();\n return;\n end\n\n \n if isempty(blocks)\n m = max(poi);\n n = length(poi);\n blocks = sparse(poi,1:n,true,m+1,n);\n num = find(blocks(:));\n else\n m = size(blocks,1) - 1;\n end\n\n if isstruct(logPrior) % then it is prior\n prior = logPrior;\n logPrior = prior.GibbsMatrix(poi);\n end\n \n \n At = A*blocks.';\n Bt = B*blocks.';\n [LEt,back1] = SGME_logexpectation(At,Bt,d);\n \n \n [LEc,back2] = SGME_logexpectation(A,B,d);\n \n Amin = At(:,poi) - A;\n Bmin = Bt(:,poi) - B;\n [LEmin,back3] = SGME_logexpectation(Amin,Bmin,d);\n \n LLR = zeros(size(blocks));\n for i=1:m\n\n tar = full(blocks(i,:));\n LLR(i,tar) = LEt(i) - LEmin(tar) - LEc(tar);\n \n non = ~tar;\n Aplus = bsxfun(@plus,A(:,non),At(:,i));\n Bplus = bsxfun(@plus,B(:,non),Bt(:,i));\n LLR(i,non) = SGME_logexpectation(Aplus,Bplus,d) - LEt(i) - LEc(non);\n \n \n end\n \n \n %y = LLR;\n [y,back5] = sumlogsoftmax(LLR + logPrior,num);\n \n \n \n back = @back_this;\n function [dA,dB,dd] = back_this(dy)\n dA = zeros(size(A));\n dB = zeros(size(B));\n dd = zeros(size(d));\n dLEt = zeros(size(LEt));\n dLEmin = zeros(size(LEmin));\n dLEc = zeros(size(LEmin));\n dAt = zeros(size(At));\n dBt = zeros(size(Bt));\n \n %[y,back5] = sumlogsoftmax(LLR + logPrior,num);\n dLLR = back5(dy);\n\n\n for k=1:m\n\n tar = full(blocks(k,:));\n %LLR(k,tar) = LEt(k) - LEmin(tar) - LEc(tar);\n row = dLLR(k,tar);\n dLEt(k) = dLEt(k) + sum(row);\n dLEmin(tar) = dLEmin(tar) - row;\n dLEc(tar) = dLEc(tar) - row;\n\n non = ~tar;\n Aplus = bsxfun(@plus,A(:,non),At(:,k));\n Bplus = bsxfun(@plus,B(:,non),Bt(:,k));\n %LLR(k,non) = SGME_logexpectation(Aplus,Bplus,d) - LEt(k) - LEc(non);\n [~,back4] = SGME_logexpectation(Aplus,Bplus,d);\n row = dLLR(k,non);\n [dAplus,dBplus,dd4] = back4(row);\n dLEt(k) = dLEt(k) - sum(row);\n dLEc(non) = dLEc(non) - row;\n dd = dd + dd4;\n dA(:,non) = dA(:,non) + dAplus;\n dB(:,non) = dB(:,non) + dBplus;\n dAt(:,k) = dAt(:,k) + sum(dAplus,2);\n dBt(:,k) = dBt(:,k) + sum(dBplus,2);\n \n end\n \n \n \n \n %[LEmin,back3] = SGME_logexpectation(Amin,Bmin,d);\n [dAmin,dBmin,dd3] = back3(dLEmin);\n dd = dd + dd3;\n \n %Amin = At(:,poi) - A;\n %Bmin = Bt(:,poi) - B;\n dA = dA - dAmin;\n dB = dB - dBmin;\n dAt = dAt + dAmin*blocks.';\n dBt = dBt + dBmin*blocks.';\n \n %[LEc,back2] = SGME_logexpectation(A,B,d);\n [dA2,dB2,dd2] = back2(dLEc);\n dA = dA + dA2;\n dB = dB + dB2;\n dd = dd + dd2;\n \n %[LEt,back1] = SGME_logexpectation(At,Bt,d);\n [dAt1,dBt1,dd1] = back1(dLEt);\n dAt = dAt + dAt1;\n dBt = dBt + dBt1;\n dd = dd + dd1;\n\n \n %At = A*blocks.';\n %Bt = B*blocks.';\n dA = dA + dAt*blocks;\n dB = dB + dBt*blocks;\n end\n\n\n\nend\n\nfunction test_this()\n\n em = 4;\n n = 7;\n dim = 2;\n \n prior = create_PYCRP([],0,em,n);\n poi = prior.sample(n);\n m = max(poi);\n blocks = sparse(poi,1:n,true,m+1,n); \n num = find(blocks(:)); \n \n logPrior = prior.GibbsMatrix(poi); \n\n d = rand(dim,1);\n A = randn(dim,n);\n b = rand(1,n);\n \n \n %f = @(A,b,d) SGME_logexpectation(A,b,d);\n %testBackprop(f,{A,b,d},{1,1,1});\n\n \n g = @(A,b,d) SGME_logPsL(A,b,d,blocks,poi,num,logPrior);\n testBackprop(g,{A,b,d},{1,1,1});\n\n\n\n\nend\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "sumlogsoftmax.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/sumlogsoftmax.m", "size": 517, "source_encoding": "utf_8", "md5": "5591b4f9a440f97900ac26aefd1faf62", "text": "function [y,back] = sumlogsoftmax(X,num)\n\n if nargin==0\n test_this();\n return;\n end\n\n [den,back1] = sumlogsumexp(X);\n y = sum(X(num)) - den; \n \n \n back = @back_this;\n \n function dX = back_this(dy)\n dX = back1(-dy); \n dX(num) = dX(num) + dy; \n end\n\n\nend\n\n\nfunction test_this()\n\n m = 3;\n n = 5;\n X = randn(m,n);\n labels = randi(m,1,n);\n num = sub2ind(size(X),labels,1:n);\n testBackprop(@(X)sumlogsoftmax(X,num),X);\n\n\nend\n\n\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "create_SGME_calculator.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/create_SGME_calculator.m", "size": 3098, "source_encoding": "utf_8", "md5": "22c43d447699e600cb1e2c8a1f4c4a2d", "text": "function [SGME,LEfun] = create_SGME_calculator(E)\n\n if nargin==0\n test_this();\n return;\n end\n\n \n [V,D] = eig(E); % E = VDV'\n d = diag(D); % eigenvalues\n dd = zeros(size(d)); %gradient w.r.t. d backpropagated from log_expectations\n zdim = length(d);\n ii = reshape(logical(eye(zdim)),[],1);\n\n\n SGME.SGME2GME = @SGME2GME;\n SGME.log_expectations = @log_expectations;\n SGME.logLR = @logLR;\n SGME.plotAll = @plotAll;\n SGME.V = V;\n SGME.d = d;\n LEfun = @LE;\n SGME.reset_parameter_gradient = @reset_parameter_gradient;\n SGME.get_parameter_gradient = @get_parameter_gradient;\n \n function reset_parameter_gradient()\n dd(:) = 0;\n end\n \n function dd1 = get_parameter_gradient() \n dd1 = dd;\n end\n\n\n function plotAll(A,b,matlab_colours, tikz_colours, rotate)\n if ~exist('rotate','var') || isempty(rotate)\n rotate = true;\n end\n if ~exist('tikz_colours','var')\n tikz_colours = [];\n end\n [A,B] = SGME2GME(A,b,rotate);\n n = length(b);\n for i=1:n\n Bi = reshape(B(:,i),zdim,zdim);\n mu = Bi\\A(:,i);\n if ~isempty(tikz_colours)\n plotGaussian(mu,inv(Bi),tikz_colours{i},matlab_colours{i});\n else\n plotGaussian(mu,inv(Bi),[],matlab_colours{i});\n end\n end\n \n end\n \n \n function [A,B] = SGME2GME(A,b,rotate)\n B = zeros(zdim*zdim,length(b));\n B(ii,:) = bsxfun(@times,b,d);\n if ~exist('rotate','var') || isempty(rotate) || rotate %rotate by default\n A = V*A;\n for j = 1:size(B,2)\n BR = V*reshape(B(:,j),zdim,zdim)*V.';\n B(:,j) = BR(:);\n end\n end\n end\n\n function [y,back] = log_expectations(A,b)\n [y,back0] = LE(A,b,d);\n back = @back_this;\n function [dA,db] = back_this(dy)\n [dA,db,dd0] = back0(dy);\n dd = dd + dd0;\n end\n end\n\n\n function Y = logLR(left,right)\n B = bsxfun(@plus,left.b.',right.b);\n [m,n] = size(B);\n Y = zeros(m,n);\n for i=1:m\n AA = bsxfun(@plus,left.A(:,i),right.A);\n Y(i,:) = log_expectations(AA,B(i,:));\n end\n end\n \nend\n\n\nfunction [y,back] = LE(A,b,d)\n bd = bsxfun(@times,b,d);\n logdets = sum(log1p(bd),1);\n den = 1 + bd;\n Aden = A./den;\n Q = sum(A.*Aden,1); %Q = sum((A.^2)./den,1);\n y = (Q-logdets)/2;\n\n back = @back_LE;\n\n\n function [dA,db,dd] = back_LE(dy)\n dQ = dy/2;\n %dlogdets = - dQ;\n\n dAden = bsxfun(@times,dQ,A); \n dA = bsxfun(@times,dQ,Aden); \n\n dA2 = dAden./den;\n dA = dA + dA2; \n dden = -Aden.*dA2;\n\n dbd = dden - bsxfun(@rdivide,dQ,den); %dlogdets = -dQ\n\n db = d.' * dbd;\n dd = dbd * b.';\n end\n\n\nend\n\n\n\n\n\n\nfunction test_this()\n\n m = 3;\n n = 5;\n A = randn(m,n);\n b = rand(1,n);\n d = rand(m,1);\n \n testBackprop(@LE,{A,b,d},{1,1,1});\n\nend\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "logsumexp.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/logsumexp.m", "size": 456, "source_encoding": "utf_8", "md5": "ba0f6dd080d4fa7a7cd270a5055c5980", "text": "function [y,back] = logsumexp(X)\n\n if nargin==0\n test_this();\n return;\n end\n\n mx = max(X,[],1);\n y = bsxfun(@plus,log(sum(exp(bsxfun(@minus,X,mx)),1)),mx);\n \n \n back = @back_this;\n \n function dX = back_this(dy)\n dX = bsxfun(@times,dy,exp(bsxfun(@minus,X,y)));\n \n end\n\n\nend\n\n\nfunction test_this()\n\n m = 3;\n n = 5;\n X = randn(m,n);\n testBackprop(@(X)logsumexp(X),X);\n\n\nend\n\n\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "sample_speaker.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/synthdata/sample_speaker.m", "size": 1520, "source_encoding": "utf_8", "md5": "f0f62cb9af06dc368f90cf9c9d6c92d3", "text": "function [X,precisions] = sample_speaker(z,F,k,n,chi_sq)\n% Sample n heavy-tailed observations of speaker with identity variable z.\n% Inputs:\n% z: d-by-1 speaker identity variable\n% F: D-by-d factor loading matrix\n% k: integer, k>=1, where nu=2k is degrees of freedom of resulting\n% t-distribution\n% n: number of samples\n% chi_sq: [optional] If given and true, then precisions are sampled from\n% chi^2 with DF: nu = k*2. In this case, k*2 must be an integer,\n% so for example k=0.5 is valid and gives Cauchy samples. \n%\n% Output:\n% X: D-by-n samples\n% precisions: 1-by-n, the hidden precisions\n\n if nargin==0\n test_this();\n return;\n end\n \n if ~exist('n','var') || isempty(n)\n n = size(z,2);\n end\n \n if exist('chi_sq','var') && ~isempty(chi_sq) && chi_sq\n % sample Chi^2, with DF = nu=2k, scaled by 1/nu, so that mean = 1. \n nu = 2*k;\n precisions = mean(randn(nu,n).^2,1); \n else %Gamma\n % Sample n precisions independently from Gamma(k,k), which has mean = 1\n % mode = (k-1)/k.\n precisions = -mean(log(rand(k,n)),1); \n end\n \n std = 1./sqrt(precisions);\n \n dim = size(F,1);\n Y = bsxfun(@times,std,randn(dim,n));\n X = bsxfun(@plus,F*z,Y);\n\n\n\n\nend\n\nfunction test_this()\n\n close all;\n \n z = 0;\n F = zeros(100,1);\n k = 5;\n [X,precisions] = sample_speaker(z,F,k,1000);\n \n figure;\n plot(X(1,:),X(2,:),'.');\n\n figure;\n plot(sum(X.^2,1),1./precisions,'.');\n \n \n \nend"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "sample_HTnoise.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/synthdata/sample_HTnoise.m", "size": 695, "source_encoding": "utf_8", "md5": "9ffb422905007acca5d9b5c71ee828a9", "text": "function [X,precisions] = sample_HTnoise(nu,dim,n)\n% Sample n heavy-tailed observations of speaker with identity variable z.\n% Inputs:\n% nu: integer nu >=1, degrees of freedom of resulting t-distribution\n% n: number of samples\n%\n% Output:\n% X: dim-by-n samples\n% precisions: 1-by-n, the hidden precisions\n\n if nargin==0\n test_this();\n return;\n end\n \n precisions = mean(randn(nu,n).^2,1); \n std = 1./sqrt(precisions);\n \n X = bsxfun(@times,std,randn(dim,n));\n\n\n\n\nend\n\nfunction test_this()\n\n close all;\n \n [X,precisions] = sample_HTnoise(2,2,1000);\n \n figure;\n plot(X(1,:),X(2,:),'.');\n\n figure;\n plot(sum(X.^2,1),1./precisions,'.');\n \n \n \nend"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "qfuser_linear.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/fusion/funcs/qfuser_linear.m", "size": 2337, "source_encoding": "utf_8", "md5": "0fe31df563db3c6f4f08ea791e83c340", "text": "function [fusion,w0] = qfuser_linear(w,scores,scrQ,ndx,w_init)\n% This function does the actual quality fusion (and is passed to\n% the training function when training the quality fusion weights).\n% The scores from the linear fusion are added to the combined\n% quality measure for each trial to produce the final score.\n% Inputs:\n% w: The trained quality fusion weights. If empty, this function\n% returns a function handle.\n% scores: A matrix of scores where the number of rows is the\n% number of systems to be fused and the number of columns\n% is the number of scores.\n% scrQ: An object of type Quality containing the quality measures\n% for models and segments.\n% ndx: A Key or Ndx object indicating trials.\n% w_init: The trained weights from the linear fusion (without\n% quality measures) training.\n% Outputs:\n% fusion: If w is supplied, fusion is a vector of fused scores.\n% If w is not supplied, fusion is a function handle to a\n% function that takes w as input and produces a vector of fused\n% scores as output. This function wraps the scores and quality\n% measures. \n% w0: Initial weights for starting the quality fusion training.\n\nif nargin==0\n test_this();\n return\nend\n\nassert(isa(scrQ,'Quality'))\nassert(isa(ndx,'Ndx')||isa(ndx,'Key'))\n\nif ~exist('w_init','var')\n assert(~isempty(w),'If w=[], then w_init must be supplied.');\n w_init = w;\nend\n\n[m,n] = size(scores);\nwlin_sz = m+1;\n\n% linear fuser\nf1 = linear_fuser([],scores);\nw1 = w_init(1:wlin_sz);\n[wlin,wq] = splitvec_fh(wlin_sz);\nf1 = f1(wlin);\n\n[q,n1] = size(scrQ.modelQ);\n[q2,n2] = size(scrQ.segQ);\nassert(q==q2);\n\nscrQ.modelQ = [scrQ.modelQ;ones(1,n1)];\nscrQ.segQ = [scrQ.segQ;ones(1,n2)];\nq = q + 1;\n\nf2 = AWB_sparse(scrQ,ndx,tril_to_symm_fh(q));\nf2 = f2(wq);\n\nwq_sz = q*(q+1)/2;\nw3 = zeros(wq_sz,1);\n\n% assemble\nfusion = sum_of_functions(w,[1,1],f1,f2);\nw0 = [w1;w3];\n\nend\n\n\n\nfunction test_this()\n\nk = 2;\n\nm = 3;\nn = 4;\nq = 2;\nqual = Quality();\nqual.modelQ = randn(q,m);\nqual.segQ = randn(q,n);\nndx = Ndx();\nndx.trialmask = false(m,n);\nndx.trialmask(1,1:2) = true;\nndx.trialmask(2:3,3:4) = true;\n\nscores = randn(k,sum(ndx.trialmask(:)));\n\nw_init = [randn(k+1,1)]; % linear init\n\n[fusion,w0] = qfuser_linear([],scores,qual,ndx,w_init);\n\ntest_MV2DF(fusion,w0);\n\n[fusion(w0),linear_fuser(w_init,scores)]\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "AWB_sparse.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/fusion/funcs/AWB_sparse.m", "size": 2062, "source_encoding": "utf_8", "md5": "dcb6e85fdcca1dfb1b5cdee3eb6ab112", "text": "function fh = AWB_sparse(qual,ndx,w)\n% Produces trial quality measures from segment quality measures\n% using the weighting matrix 'w'.\n% This is almost an MV2DF, but it does not return derivatives on numeric\n% input, w.\n%\n% Algorithm: Y = A*reshape(w,..)*B\n% Inputs:\n% qual: A Quality object containing quality measures in modelQ\n% and segQ fields.\n% ndx: A Key or Ndx object indicating trials.\n% w: The combination weights for making trial quality measures.\n% Outputs:\n% fh: If 'w' is given, vector of quality scores --- one for each\n% trial. If 'w' is empty, a function handle that produces\n% these scores given a 'w'.\n\nif nargin==0\n test_this();\n return\nend\n\nassert(isa(qual,'Quality'))\nassert(isa(ndx,'Ndx')||isa(ndx,'Key'))\n\n[q,m] = size(qual.modelQ);\n[q1,n] = size(qual.segQ);\nassert(q==q1);\nif isa(ndx,'Ndx') \n trials = ndx.trialmask;\nelse\n trials = ndx.tar | ndx.non;\nend\nftrials = find(trials(:));\nk = length(ftrials);\nassert(m==size(trials,1)&n==size(trials,2));\n[ii,jj] = ind2sub([m,n],ftrials);\n\n function y = map_this(w) \n WR = reshape(w,q,q)*qual.segQ;\n y = zeros(1,k);\n done = 0;\n for j=1:n\n right = WR(:,j);\n col = right.'*qual.modelQ(:,trials(:,j));\n len = length(col);\n y(done+1:done+len) = col;\n done = done + len;\n end\n assert(done==k);\n end\n\n function w = transmap_this(y)\n Y = sparse(ii,jj,y,m,n);\n w = qual.modelQ*Y*qual.segQ.';\n end\n\n\nmap = @(y) map_this(y);\ntransmap = @(y) transmap_this(y);\n\n\nfh = linTrans([],map,transmap);\n\nif exist('w','var') && ~isempty(w)\n fh = fh(w);\nend\n\n\nend\n\nfunction test_this()\nm = 3;\nn = 4;\nq = 2;\nqual = Quality();\nqual.modelQ = randn(q,m);\nqual.segQ = randn(q,n);\nndx = Ndx();\nndx.trialmask = false(m,n);\nndx.trialmask(1,1:2) = true;\nndx.trialmask(2:3,3:4) = true;\nndx.trialmask\nf = AWB_sparse(qual,ndx);\n\nw = randn(q*q,1);\ntest_MV2DF(f,w);\n\nW = reshape(w,q,q)\nAWB = qual.modelQ'*W*qual.segQ\n[f(w),AWB(ndx.trialmask(:))]\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "dcfplot.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/plotting/dcfplot.m", "size": 1889, "source_encoding": "utf_8", "md5": "9fbbba6b08ba70f285386536481e29d5", "text": "function dcfplot(devkeyname,evalkeyname,devscrfilename,evalscrfilename,outfilename,plot_title,xmin,xmax,ymin,ymax,prior)\n% Makes a Norm_DCF plot of the dev and eval scores for a system. \n% Inputs:\n% devkeyname: The name of the file containing the Key for\n% the dev scores.\n% evalkeyname: The name of the file containing the Key for\n% the eval scores.\n% devscrfilename: The name of the file containing the Scores\n% for the dev trials.\n% evalscrfilename: The name of the file containing the\n% Scores the eval trials.\n% outfilename: The name for the PDF file that the plot will be\n% written in.\n% plot_title: A string for the plot title. (optional)\n% xmin, xmax, ymin, ymax: The boundaries of the plot. (optional)\n% prior: The effective target prior. (optional)\n\nassert(isa(devkeyname,'char'))\nassert(isa(evalkeyname,'char'))\nassert(isa(devscrfilename,'char'))\nassert(isa(evalscrfilename,'char'))\nassert(isa(outfilename,'char'))\n\nif ~exist('plot_title','var') || isempty(plot_title)\n plot_title = '';\nend\n\nif ~exist('xmin','var')\n xmin = -10;\n xmax = 0;\n ymin = 0;\n ymax = 1.2;\n prior = 0.001;\nend\n\n[dev_tar,dev_non] = get_tar_non_scores(devscrfilename,devkeyname);\n[eval_tar,eval_non] = get_tar_non_scores(evalscrfilename,evalkeyname);\n\nclose all\nplot_obj = Norm_DCF_Plot([xmin,xmax,ymin,ymax],plot_title);\nplot_obj.set_system(dev_tar,dev_non,'dev')\nplot_obj.plot_operating_point(logit(prior),'m--','new DCF point')\nplot_obj.plot_curves([0 0 0 1 1 1 0 0],{{'b--'},{'g--'},{'r--'}})\nplot_obj.set_system(eval_tar,eval_non,'eval')\nplot_obj.plot_curves([0 0 1 1 1 1 0 1],{{'r','LineWidth',2},{'b'},{'g'},{'r'},{'k*'}})\nplot_obj.display_legend()\nplot_obj.save_as_pdf(outfilename)\nend\n\nfunction [tar,non] = get_tar_non_scores(scrfilename,keyname)\nkey = Key.read(keyname);\nscr = Scores.read(scrfilename);\n[tar,non] = scr.get_tar_non(key);\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "fast_actDCF.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/stats/fast_actDCF.m", "size": 3032, "source_encoding": "utf_8", "md5": "5e62c5e1058f0ba3f5a59149249da2a9", "text": "function [dcf,Pmiss,Pfa] = fast_actDCF(tar,non,plo,normalize)\n% Computes the actual average cost of making Bayes decisions with scores\n% calibrated to act as log-likelihood-ratios. The average cost (DCF) is \n% computed for a given range of target priors and for unity cost of error.\n% If un-normalized, DCF is just the Bayes error-rate.\n%\n% Usage examples: dcf = fast_actDCF(tar,non,-10:0.01:0)\n% norm_dcf = fast_actDCF(tar,non,-10:0.01:0,true)\n% [dcf,pmiss,pfa] = fast_actDCF(tar,non,-10:0.01:0)\n%\n% Inputs:\n% tar: a vector of T calibrated target scores\n% non: a vector of N calibrated non-target scores\n% Both are assumed to be of the form \n%\n% log P(data | target)\n% llr = -----------------------\n% log P(data | non-target)\n%\n% where log is the natural logarithm.\n%\n% plo: an ascending vector of log-prior-odds, plo = logit(Ptar) \n% = log(Ptar) - log(1-Ptar)\n%\n% normalize: (optional, default false) return normalized dcf if true.\n%\n%\n% Outputs: \n% dcf: a vector of DCF values, one for every value of plo.\n% \n% dcf(plo) = Ptar(plo)*Pmiss(plo) + (1-Ptar(plo))*Pfa(plo)\n%\n% where Ptar(plo) = sigmoid(plo) = 1./(1+exp(-plo)) and\n% where Pmiss and Pfa are computed by counting miss and false-alarm\n% rates, when comparing 'tar' and 'non' scores to the Bayes decision\n% threshold, which is just -plo. If 'normalize' is true, then dcf is\n% normalized by dividing by min(Ptar,1-Ptar).\n%\n% Pmiss: empirical actual miss rate, one value per element of plo.\n% Pmiss is not altered by parameter 'normalize'.\n%\n% Pfa: empirical actual false-alarm rate, one value per element of plo.\n% Pfa is not altered by parameter 'normalize'.\n%\n% Note, the decision rule applied here is to accept if \n%\n% llr >= Bayes threshold. \n%\n% or reject otherwise. The >= is a consequence of the stability of the \n% sort algorithm , where equal values remain in the original order.\n%\n%\nif nargin==0\n test_this();\n return\nend\n\nassert(isvector(tar))\nassert(isvector(non))\nassert(isvector(plo))\n\nassert(issorted(plo),'Parameter plo must be in ascending order.');\n\ntar = tar(:)';\nnon = non(:)';\nplo = plo(:)';\n\nif ~exist('normalize','var') || isempty(normalize)\n normalize = false;\nend\n\nD = length(plo);\nT = length(tar);\nN = length(non);\n\n[s,ii] = sort([-plo,tar]); % -plo are thresholds\nr = zeros(1,T+D);\nr(ii) = 1:T+D; \nr = r(1:D); % rank of thresholds\nPmiss = r-(D:-1:1);\n\n[s,ii] = sort([-plo,non]); % -plo are thresholds\nr = zeros(1,N+D);\nr(ii) = 1:N+D; \nr = r(1:D); % rank of thresholds\nPfa = N - r + (D:-1:1);\n\nPmiss = Pmiss / T;\nPfa = Pfa / N;\n\n\nPtar = sigmoid(plo);\nPnon = sigmoid(-plo);\ndcf = Ptar.*Pmiss + Pnon.*Pfa;\n\nif normalize\n dcf = dcf ./ min(Ptar,Pnon);\nend\n\nend\n\nfunction test_this()\n\ntar = [1 2 5 7];\nnon = [-7 -5 -2 -1];\nplo = -6:6;\n\n[dcf,Pmiss,Pfa] = fast_actDCF(tar,non,plo)\n\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "fast_minDCF.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/stats/fast_minDCF.m", "size": 2585, "source_encoding": "utf_8", "md5": "6a709a2b121037d7919f57c87d835531", "text": "function [minDCF,Pmiss,Pfa,prbep,eer] = fast_minDCF(tar,non,plo,normalize)\n% Inputs:\n%\n% tar: vector of target scores\n% non: vector of non-target scores\n% plo: vector of prior-log-odds: plo = logit(Ptar) \n% = log(Ptar) - log(1-Ptar)\n%\n% normalize: if true, return normalized minDCF, else un-normalized.\n% (optional, default = false)\n%\n% Output:\n% minDCF: a vector with one value for every element of plo\n% Note that minDCF is parametrized by plo:\n% \n% minDCF(Ptar) = min_t Ptar * Pmiss(t) + (1-Ptar) * Pfa(t) \n%\n% where t is the adjustable decision threshold and\n% Ptar = sigmoid(plo) = 1./(1+exp(-plo))\n% If normalize == true, then the returned value is\n% minDCF(Ptar) / min(Ptar,1-Ptar).\n%\n%\n% Pmiss: a vector with one value for every element of plo.\n% This is Pmiss(tmin), where tmin is the minimizing threshold\n% for minDCF, at every value of plo. Pmiss is not altered by\n% parameter 'normalize'.\n%\n% Pfa: a vector with one value for every element of plo.\n% This is Pfa(tmin), where tmin is the minimizing threshold for\n% minDCF, at every value of plo. Pfa is not altered by\n% parameter 'normalize'.\n%\n% prbep: precision-recall break-even point: Where #FA == #miss\n%\n% eer: the equal error rate.\n%\n% Note, for the un-normalized case:\n% minDCF(plo) = sigmoid(plo).*Pfa(plo) + sigmoid(-plo).*Pmiss(plo)\n\nif nargin==0\n test_this();\n return\nend\n\nassert(isvector(tar))\nassert(isvector(non))\nassert(isvector(plo))\n\nif ~exist('normalize','var') || isempty(normalize)\n normalize = false;\nend\n\nplo = plo(:);\n[Pmiss,Pfa] = rocch(tar,non);\nif nargout > 3\n Nmiss = Pmiss * length(tar);\n Nfa = Pfa * length(non);\n prbep = rocch2eer(Nmiss,Nfa);\nend\nif nargout > 4\n eer = rocch2eer(Pmiss,Pfa);\nend\nPtar = sigmoid(plo);\nPnon = sigmoid(-plo);\ncdet = [Ptar,Pnon]*[Pmiss(:)';Pfa(:)'];\n[minDCF,ii] = min(cdet,[],2);\nif nargout>1\n Pmiss = Pmiss(ii);\n Pfa = Pfa(ii);\nend\n\nif normalize\n minDCF = minDCF ./ min(Ptar,Pnon);\nend\n\nend\n\nfunction test_this\n\nclose all;\nplo = -20:0.01:20;\n\ntar = randn(1,1e4)+4;\nnon = randn(1,1e4);\nminDCF = fast_minDCF(tar,non,plo,true);\n%sminDCF = slow_minDCF(tar,non,plo,true);\n%plot(plo,minDCF,'r',plo,sminDCF,'k');\nplot(plo,minDCF,'r');\nhold on;\n\ntar = randn(1,1e5)+4;\nnon = randn(1,1e5);\nminDCF = fast_minDCF(tar,non,plo,true);\nplot(plo,minDCF,'g')\n\ntar = randn(1,1e6)+4;\nnon = randn(1,1e6);\nminDCF = fast_minDCF(tar,non,plo,true);\nplot(plo,minDCF,'b')\nhold off;\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "rocch.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/det/rocch.m", "size": 2725, "source_encoding": "utf_8", "md5": "68aaac9f8a1f40d0d5eac901abc533d5", "text": "function [pmiss,pfa] = rocch(tar_scores,nontar_scores)\n% ROCCH: ROC Convex Hull.\n% Usage: [pmiss,pfa] = rocch(tar_scores,nontar_scores)\n% (This function has the same interface as compute_roc.)\n%\n% Note: pmiss and pfa contain the coordinates of the vertices of the\n% ROC Convex Hull.\n%\n% For a demonstration that plots ROCCH against ROC for a few cases, just\n% type 'rocch' at the MATLAB command line.\n%\n% Inputs:\n% tar_scores: scores for target trials\n% nontar_scores: scores for non-target trials\n\nif nargin==0\n test_this();\n return\nend\n\nassert(nargin==2)\nassert(isvector(tar_scores))\nassert(isvector(nontar_scores))\n\nNt = length(tar_scores);\nNn = length(nontar_scores);\nN = Nt+Nn;\nscores = [tar_scores(:)',nontar_scores(:)'];\nPideal = [ones(1,Nt),zeros(1,Nn)]; %ideal, but non-monotonic posterior\n\n%It is important here that scores that are the same (i.e. already in order) should NOT be swapped.\n%MATLAB's sort algorithm has this property.\n[scores,perturb] = sort(scores);\n\nPideal = Pideal(perturb);\n[Popt,width] = pavx(Pideal); \n\nnbins = length(width);\npmiss = zeros(1,nbins+1);\npfa = zeros(1,nbins+1);\n\n%threshold leftmost: accept eveything, miss nothing\nleft = 0; %0 scores to left of threshold\nfa = Nn;\nmiss = 0;\n\nfor i=1:nbins\n pmiss(i) = miss/Nt;\n pfa(i) = fa/Nn;\n left = left + width(i);\n miss = sum(Pideal(1:left));\n fa = N - left - sum(Pideal(left+1:end));\nend\npmiss(nbins+1) = miss/Nt;\npfa(nbins+1) = fa/Nn;\n\nend\n\n\nfunction test_this()\n\nfigure();\n\nsubplot(2,3,1);\ntar = [1]; non = [0];\n[pmiss,pfa] = rocch(tar,non);\n[pm,pf] = compute_roc(tar,non);\nplot(pfa,pmiss,'r-^',pf,pm,'g--v');\naxis('square');grid;legend('ROCCH','ROC');\ntitle('2 scores: non < tar');\n\nsubplot(2,3,2);\ntar = [0]; non = [1];\n[pmiss,pfa] = rocch(tar,non);\n[pm,pf] = compute_roc(tar,non);\nplot(pfa,pmiss,'r-^',pf,pm,'g-v');\naxis('square');grid;\ntitle('2 scores: tar < non');\n\nsubplot(2,3,3);\ntar = [0]; non = [-1,1];\n[pmiss,pfa] = rocch(tar,non);\n[pm,pf] = compute_roc(tar,non);\nplot(pfa,pmiss,'r-^',pf,pm,'g--v');\naxis('square');grid;\ntitle('3 scores: non < tar < non');\n\nsubplot(2,3,4);\ntar = [-1,1]; non = [0];\n[pmiss,pfa] = rocch(tar,non);\n[pm,pf] = compute_roc(tar,non);\nplot(pfa,pmiss,'r-^',pf,pm,'g--v');\naxis('square');grid;\ntitle('3 scores: tar < non < tar');\nxlabel('P_{fa}');\nylabel('P_{miss}');\n\nsubplot(2,3,5);\ntar = randn(1,100)+1; non = randn(1,100);\n[pmiss,pfa] = rocch(tar,non);\n[pm,pf] = compute_roc(tar,non);\nplot(pfa,pmiss,'r-^',pf,pm,'g');\naxis('square');grid;\ntitle('45^{\\circ} DET');\n\nsubplot(2,3,6);\ntar = randn(1,100)*2+1; non = randn(1,100);\n[pmiss,pfa] = rocch(tar,non);\n[pm,pf] = compute_roc(tar,non);\nplot(pfa,pmiss,'r-^',pf,pm,'g');\naxis('square');grid;\ntitle('flatter DET');\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "compute_roc.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/det/compute_roc.m", "size": 1956, "source_encoding": "utf_8", "md5": "16907ef9816ee330ac64b4eeb708366b", "text": "function [Pmiss, Pfa] = compute_roc(true_scores, false_scores)\n% compute_roc computes the (observed) miss/false_alarm probabilities\n% for a set of detection output scores.\n%\n% true_scores (false_scores) are detection output scores for a set of\n% detection trials, given that the target hypothesis is true (false).\n% (By convention, the more positive the score,\n% the more likely is the target hypothesis.)\n%\n% this code is matlab-tized for speed.\n% speedup: Old routine 54 secs -> new routine 5.71 secs.\n% for 109776 points.\n\n%-------------------------\n%Compute the miss/false_alarm error probabilities\n\nassert(nargin==2)\nassert(isvector(true_scores))\nassert(isvector(false_scores))\n\nnum_true = length(true_scores);\nnum_false = length(false_scores);\nassert(num_true>0)\nassert(num_false>0)\n\ntotal=num_true+num_false;\n\nPmiss = zeros(num_true+num_false+1, 1); %preallocate for speed\nPfa = zeros(num_true+num_false+1, 1); %preallocate for speed\n\nscores(1:num_false,1) = false_scores;\nscores(1:num_false,2) = 0;\nscores(num_false+1:total,1) = true_scores;\nscores(num_false+1:total,2) = 1;\n\nscores=DETsort(scores);\n\nsumtrue=cumsum(scores(:,2),1);\nsumfalse=num_false - ([1:total]'-sumtrue);\n\nPmiss(1) = 0;\nPfa(1) = 1.0;\nPmiss(2:total+1) = sumtrue ./ num_true;\nPfa(2:total+1) = sumfalse ./ num_false;\n\nend\n\n\nfunction [y,ndx] = DETsort(x,col)\n% DETsort Sort rows, the first in ascending, the remaining in decending\n% thereby postponing the false alarms on like scores.\n% based on SORTROWS\n\nif nargin<1, error('Not enough input arguments.'); end\nif ndims(x)>2, error('X must be a 2-D matrix.'); end\n\nif nargin<2, col = 1:size(x,2); end\nif isempty(x), y = x; ndx = []; return, end\n\nndx = (1:size(x,1))';\n\n% sort 2nd column ascending\n[v,ind] = sort(x(ndx,2));\nndx = ndx(ind);\n\n% reverse to decending order\nndx(1:size(x,1)) = ndx(size(x,1):-1:1);\n\n% now sort first column ascending\n[v,ind] = sort(x(ndx,1));\nndx = ndx(ind);\ny = x(ndx,:);\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "rocchdet.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/det/rocchdet.m", "size": 5471, "source_encoding": "utf_8", "md5": "2452dd1f98aad313c79879d410214cb2", "text": "function [x,y,eer,mindcf] = rocchdet(tar,non,dcfweights,pfa_min,pfa_max,pmiss_min,pmiss_max,dps)\n% ROCCHDET: Computes ROC Convex Hull and then maps that to the DET axes.\n%\n% (For demo, type 'rocchdet' on command line.)\n%\n% Inputs:\n%\n% tar: vector of target scores\n% non: vector of non-target scores\n%\n% dcfweights: 2-vector, such that: DCF = [pmiss,pfa]*dcfweights(:).\n% (Optional, provide only if mindcf is desired, otherwise \n% omit or use [].) \n%\n% pfa_min,pfa_max,pmiss_min,pmiss_max: limits of DET-curve rectangle.\n% The DET-curve is infinite, non-trivial limits (away from 0 and 1) \n% are mandatory.\n% (Uses min = 0.0005 and max = 0.5 if omitted.)\n%\n% dps: number of returned (x,y) dots (arranged in a curve) in DET space, \n% for every straight line-segment (edge) of the ROC Convex Hull.\n% (Uses dps = 100 if omitted.)\n% \n% Outputs:\n%\n% x: probit(Pfa)\n% y: probit(Pmiss)\n% eer: ROCCH EER = max_p mindcf(dcfweights=[p,1-p]), which is also \n% equal to the intersection of the ROCCH with the line pfa = pmiss.\n% \n% mindcf: Identical to result using traditional ROC, but\n% computed by mimimizing over the ROCCH vertices, rather than \n% over all the ROC points.\n\nif nargin==0\n test_this();\n return\nend\n\nassert(isvector(tar))\nassert(isvector(non))\n\nif ~exist('pmiss_max','var') || isempty(pmiss_max)\n pfa_min = 0.0005;\n pfa_max = 0.5;\n pmiss_min = 0.0005;\n pmiss_max = 0.5;\nend\n\nif ~exist('dps','var') || isempty(dps)\n dps = 100;\nend\n\n\nassert(pfa_min>0 && pfa_max<1 && pmiss_min>0 && pmiss_max<1,'limits must be strictly inside (0,1)');\nassert(pfa_min3\n dcf = dcfweights(:)'*[pmiss(:)';pfa(:)'];\n mindcf = min(dcf);\nend\n\n%pfa is decreasing\n%pmiss is increasing\n\n\nbox.left = pfa_min;\nbox.right = pfa_max;\nbox.top = pmiss_max;\nbox.bottom = pmiss_min;\n\nx = [];\ny = [];\neer = 0;\nfor i=1:length(pfa)-1\n xx = pfa(i:i+1);\n yy = pmiss(i:i+1);\n [xdots,ydots,eerseg] = plotseg(xx,yy,box,dps);\n x = [x,xdots];\n y = [y,ydots];\n eer = max(eer,eerseg);\nend\n\nend\n\n\nfunction [x,y,eer] = plotseg(xx,yy,box,dps)\n \n%xx and yy should be sorted:\nassert(xx(2)<=xx(1)&&yy(1)<=yy(2));\n\nXY = [xx(:),yy(:)];\ndd = [1,-1]*XY;\nif min(abs(dd))==0\n eer = 0;\nelse \n %find line coefficieents seg s.t. seg'[xx(i);yy(i)] = 1, \n %when xx(i),yy(i) is on the line.\n seg = XY\\[1;1];\n eer = 1/(sum(seg)); %candidate for EER, eer is highest candidate\nend\n\n%segment completely outside of box\nif xx(1)box.right || yy(2)box.top\n x = [];\n y = [];\n return\nend\n\nif xx(2)box.right\n xx(1) = box.right;\n yy(1) = (1-seg(1)*box.right)/seg(2);\nend\nif yy(1)box.top\n yy(2) = box.top;\n xx(2) = (1-seg(2)*box.top)/seg(1);\nend\ndx = xx(2)-xx(1); \nxdots = xx(1)+dx*(0:dps)/dps;\nydots = (1-seg(1)*xdots)/seg(2);\nx = probit(xdots);\ny = probit(ydots);\nend\n\n\n\nfunction test_this\n\nsubplot(2,3,1);\nhold on;\nmake_det_axes();\n\ntar = randn(1,100)+2;\nnon = randn(1,100);\n[x,y,eer] = rocchdet(tar,non);\n[pmiss,pfa] = compute_roc(tar,non);\nplot(x,y,'g',probit(pfa),probit(pmiss),'r');\nlegend(sprintf('ROCCH-DET (EER = %3.1f%%)',eer*100),'classical DET',...\n 'Location','SouthWest');\ntitle('EER read off ROCCH-DET');\n\n \nsubplot(2,3,2);\nshow_eer(pmiss,pfa,eer);\n\nsubplot(2,3,3);\n[pmiss,pfa] = rocch(tar,non);\nshow_eer(pmiss,pfa,eer);\n \n\nsubplot(2,3,4);\nhold on;\nmake_det_axes();\n\ntar = randn(1,100)*2+3;\nnon = randn(1,100);\n[x,y,eer] = rocchdet(tar,non);\n[pmiss,pfa] = compute_roc(tar,non);\nplot(x,y,'b',probit(pfa),probit(pmiss),'k');\nlegend(sprintf('ROCCH-DET (EER = %3.1f%%)',eer*100),'classical DET',...\n 'Location','SouthWest');\ntitle('EER read off ROCCH-DET');\n \n \nsubplot(2,3,5);\nshow_eer(pmiss,pfa,eer);\n\nsubplot(2,3,6);\n[pmiss,pfa] = rocch(tar,non);\nshow_eer(pmiss,pfa,eer);\n\nend\n\n\nfunction show_eer(pmiss,pfa,eer)\np = 0:0.001:1;\nx = p;\ny = zeros(size(p));\nfor i=1:length(p);\n %y(i) = mincdet @ ptar = p(i), cmiss = cfa = 1 \n y(i) = min(p(i)*pmiss+(1-p(i))*pfa); \nend\nplot([min(x),max(x)],[eer,eer],x,y);\ngrid;\nlegend('EER','minDCF(P_{tar},C_{miss}=C_{fa}=1)','Location','South');\nxlabel('P_{tar}');\ntitle('EER via minDCF on classical DET');\nend\n\n\nfunction make_det_axes()\n% make_det_axes creates a plot for displaying detection performance\n% with the axes scaled and labeled so that a normal Gaussian\n% distribution will plot as a straight line.\n%\n% The y axis represents the miss probability.\n% The x axis represents the false alarm probability.\n%\n% Creates a new figure, switches hold on, embellishes and returns handle.\n\npROC_limits = [0.0005 0.5];\n\npticks = [0.001 0.002 0.005 0.01 0.02 0.05 0.1 0.2 0.3 0.4];\nticklabels = ['0.1';'0.2';'0.5';' 1 ';' 2 ';' 5 ';'10 ';'20 ';'30 ';'40 '];\n\naxis('square');\n\nset (gca, 'xlim', probit(pROC_limits));\nset (gca, 'xtick', probit(pticks));\nset (gca, 'xticklabel', ticklabels);\nset (gca, 'xgrid', 'on');\nxlabel ('False Alarm probability (in %)');\n\n\nset (gca, 'ylim', probit(pROC_limits));\nset (gca, 'ytick', probit(pticks));\nset (gca, 'yticklabel', ticklabels);\nset (gca, 'ygrid', 'on')\nylabel ('Miss probability (in %)')\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "map_mod_names.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/manip/map_mod_names.m", "size": 3127, "source_encoding": "utf_8", "md5": "6aa97cdf9b5df6095e803bd14f612e52", "text": "function ndx = map_mod_names(ndx,src_map,dst_map)\n% Changes the model names in an index using two maps. The one map\n% lists the training segment for each model name and the other map\n% lists the new model name for each training segment. Existing\n% model names are replaced by new model names that are mapped to\n% the same training segment. If a model name is not present in the\n% src_map, it is left unchanged in the output ndx. If a train seg\n% is not present in the dst_map, the source model is dropped from\n% the output ndx (along with all its trials).\n% Inputs:\n% ndx: the Key or Ndx for which the model names must be changed\n% scr_map: the map from current model names to trn seg names\n% dst_map: the map from trn seg names to new model names\n% Outputs:\n% ndx: the Key or Ndx with a modified modelset field\n\nif nargin == 0\n test_this()\n return\nend\n\nassert(nargin==3)\nassert(isa(ndx,'Ndx')||isa(ndx,'Key'))\nassert(isstruct(src_map))\nassert(isstruct(dst_map))\nassert(isfield(src_map,'keySet'))\nassert(isfield(dst_map,'keySet'))\nassert(isfield(src_map,'values'))\nassert(isfield(dst_map,'values'))\n\n[trnsegs,is_present1] = maplookup(src_map,ndx.modelset);\nnum_unchanged = length(is_present1) - sum(is_present1);\nif num_unchanged ~= 0\n log_warning('Keeping %d model name(s) unchanged.\\n',num_unchanged);\nend\n\n[newnames,is_present2] = maplookup(dst_map,trnsegs);\nnum_dropped = length(is_present2) - sum(is_present2);\nif num_dropped ~= 0\n log_warning('Discarding %d row(s) in score matrix.\\n',num_dropped);\nend\n\nkeepndxs = true(length(ndx.modelset),1);\nkeepndxs(is_present1) = is_present2;\nnewmodnames = cell(length(is_present2),1);\nnewmodnames(is_present2) = newnames;\nndx.modelset(is_present1) = newmodnames;\n\nndx.modelset = ndx.modelset(keepndxs);\nif isa(ndx,'Ndx')\n ndx.trialmask = ndx.trialmask(keepndxs,:);\nelse\n ndx.tar = ndx.tar(keepndxs,:);\n ndx.non = ndx.non(keepndxs,:);\nend\n\nfunction test_this()\n\nsrc_map.keySet = {'mod1','mod2','mod3','mod4','mod8'};\nsrc_map.values = {'seg1','seg2','seg3','seg5','seg8'};\ndst_map.keySet = {'seg1','seg2','seg3','seg4','seg5','seg6'};\ndst_map.values = {'new1','new2','new3','new4','new5','new6'};\nndx = Ndx();\n\nfprintf('Test1\\n');\nndx.modelset = {'mod2','mod3','mod4'};\nndx.trialmask = true(3,4);\nfprintf('Input:\\n');\ndisp(ndx.modelset)\nfprintf('Output should be:\\n');\nout = {'new2','new3','new5'};\ndisp(out)\nfprintf('Output is:\\n');\nnewndx = map_mod_names(ndx,src_map,dst_map);\ndisp(newndx.modelset)\n\nfprintf('Test2\\n');\nndx.modelset = {'mod2','mod3','mod10','mod4','mod6'};\nndx.trialmask = true(5,4);\nfprintf('Input:\\n');\ndisp(ndx.modelset)\nfprintf('Output should be:\\n');\nout = {'new2','new3','mod10','new5','mod6'};\ndisp(out)\nfprintf('Output is:\\n');\nnewndx = map_mod_names(ndx,src_map,dst_map);\ndisp(newndx.modelset)\n\nfprintf('Test3\\n');\nndx.modelset = {'mod2','mod3','mod10','mod4','mod8','mod6'};\nndx.trialmask = true(6,4);\nfprintf('Input:\\n');\ndisp(ndx.modelset)\nfprintf('Output should be:\\n');\nout = {'new2','new3','mod10','new5','mod6'};\ndisp(out)\nfprintf('Output is:\\n');\nnewndx = map_mod_names(ndx,src_map,dst_map);\ndisp(newndx.modelset)\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "maplookup.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/manip/maplookup.m", "size": 3084, "source_encoding": "utf_8", "md5": "9e8a55e6a2201b6a0e975469dfe9c299", "text": "function [values,is_present] = maplookup(map,keys)\n% Does a map lookup, to map mutliple keys to multiple values in one call.\n% The parameter 'map' represents a function, where each key maps to a\n% unique value. Each value may be mapped to by one or more keys.\n%\n% Inputs:\n% map.keySet: a one-dimensional cell array; \n% or one-dimensional numeric array;\n% or a two dimensional char array, where each row is an \n% element.\n% The elements should be unique. If there are repeated elements, \n% the last one of each will be used.\n%\n% map.values: The values that each member of keySet maps to, in the same\n% order.\n%\n% keys: The array of keys to look up in the map. The class should agree\n% with map.keySet.\n%\n% Outputs:\n% values: a one-dimensional cell array; or one dimensional numeric array;\n% or a two dimensional char array, where rows are string values.\n% Each value corresponds to one of the elements in keys.\n% \n% is_present: logical array of same size as keys, indicating which keys\n% are in map.keySet.\n% Optional: if not asked, then maplookup crashes if one or\n% more keys are not in the map. If is_present is asked,\n% then maplookup does not crash for missing keys. The keys\n% that are in the map are: keys(is_present). \n\nif nargin==0\n test_this();\n return;\nend\n\nassert(nargin==2)\nassert(isstruct(map))\nassert(isfield(map,'keySet'))\nassert(isfield(map,'values'))\n\nif ischar(map.keySet)\n keySetSize = size(map.keySet,1);\nelse\n keySetSize = length(map.keySet);\nend\n\n\nif ischar(map.values)\n valueSize = size(map.values,1);\nelse\n valueSize = length(map.keySet);\nend\n\n\nif ~valueSize==keySetSize\n error('bad map: sizes of keySet and values are different')\nend\n\nif ~strcmp(class(map.keySet),class(keys))\n error('class(keys) = ''%s'', should be class(map.keySet) = ''%s''',class(keys),class(map.keySet));\nend\n\nif ischar(keys)\n [is_present,at] = ismember(keys,map.keySet,'rows');\nelse\n [is_present,at] = ismember(keys,map.keySet);\nend\n\nmissing = length(is_present) - sum(is_present);\nif missing>0\n if nargout<2\n error('%i of keys not in map',missing);\n else\n if ischar(map.values)\n values = map.values(at(is_present),:);\n else\n values = map.values(at(is_present));\n end\n end\nelse\n if ischar(map.values)\n values = map.values(at,:);\n else\n values = map.values(at);\n end\nend\nend\n\nfunction test_this()\nmap.keySet = ['one ';'two ';'three'];\nmap.values = ['a';'b';'c'];\nmaplookup(map,['one ';'one ';'three'])\n\n\n\nmap.keySet = {'one','two','three'};\nmap.values = [1,2,3];\nmaplookup(map,{'one','one','three'})\n\nmap.values = {'a','b','c'};\nmaplookup(map,{'one','one','three'})\n\nmap.keySet = [1 2 3];\nmaplookup(map,[1 1 3])\n%maplookup(map,{1 2 3})\n\n\n[values,is_present] = maplookup(map,[1 1 3 4 5])\n\n\nfprintf('Now testing error message:\\n');\nmaplookup(map,[1 1 3 4 5])\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "test_binary_classifier.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/test_binary_classifier.m", "size": 1332, "source_encoding": "utf_8", "md5": "9683ce2757d7eb67c8a8ec37954cbab4", "text": "function obj_val = test_binary_classifier(objective_function,classf, ...\n\t\t\t\t prior,system,input_data)\n% Returns the result of the objective function evaluated on the\n% scores.\n%\n% Inputs:\n% objective_function: a function handle to the objective function\n% to feed the scores into\n% classf: length T vector where T is the number of trials with entries +1 for target scores; -1 \n% for non-target scores\n% prior: the prior (given to the system that produced the scores)\n% system: a function handle to the system to be run\n% input_data: the data to run the system on (to produce scores)\n%\n% Outputs\n% obj_val: the value returned by the objective function\n\nif nargin==0\n test_this();\n return;\nend\n\nscores = system(input_data);\nobj_val = evaluate_objective(objective_function,scores,classf,prior);\n\nend\n\nfunction test_this()\n\nnum_trials = 100;\ninput_data = randn(20,num_trials);\n\nprior = 0.5;\nmaxiters = 1000;\nclassf = [ones(1,num_trials/2),-ones(1,num_trials/2)];\ntar = input_data(:,1:num_trials/2);\nnon = input_data(:,num_trials/2+1:end);\n[sys,run_sys,w0] = linear_fusion_factory(tar,non);\n\nw = train_binary_classifier(@cllr_obj,classf,sys,[],w0,[],maxiters,[],prior,[],true);\n\nsystem = @(data) run_sys(w,data);\ntest_binary_classifier(@cllr_obj,classf,prior,system,input_data)\n\n\nend\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "evaluate_objective.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/evaluate_objective.m", "size": 1417, "source_encoding": "utf_8", "md5": "70262971965caac5629612bd125dd0a2", "text": "function obj_val = evaluate_objective(objective_function,scores,classf, ...\n\t\t\t\t prior)\n% Returns the result of the objective function evaluated on the\n% scores.\n%\n% Inputs:\n% objective_function: a function handle to the objective function\n% to feed the scores into\n% scores: length T vector of scores to be evaluated where T is\n% the number of trials\n% classf: length T vector with entries +1 for target scores; -1 \n% for non-target scores\n% prior: the prior (given to the system that produced the scores)\n%\n% Outputs\n% obj_val: the value returned by the objective function\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif ~exist('objective_function','var') || isempty(objective_function)\n objective_function = @(w,T,weights,logit_prior) cllr_obj(w,T,weights,logit_prior);\nend\n\n\nlogit_prior = logit(prior);\nprior_entropy = objective_function([0;0],[1,-1],[prior,1-prior],logit_prior);\n\nntar = length(find(classf>0));\nnnon = length(find(classf<0));\nN = nnon+ntar;\n\nweights = zeros(1,N);\n\nweights(classf>0) = prior/(ntar*prior_entropy);\nweights(classf<0) = (1-prior)/(nnon*prior_entropy);\n\n\nobj_val = objective_function(scores,classf,weights,logit_prior);\n\nend\n\nfunction test_this()\nnum_trials = 20;\nscores = randn(1,num_trials);\nclassf = [ones(1,num_trials/2),-ones(1,num_trials/2)];\nprior = 0.5;\nres = evaluate_objective(@cllr_obj,scores,classf,prior)\nend\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "train_binary_classifier.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/train_binary_classifier.m", "size": 3938, "source_encoding": "utf_8", "md5": "de96b98d88aa8e3d0c36785a2f9a3a94", "text": "function [w,cxe,w_pen,optimizerState,converged] = ...\n train_binary_classifier(classifier,classf,w0,objective_function,prior,...\n penalizer,lambda,maxiters,maxCG,optimizerState,...\n quiet,cstepHessian)\n% \n% Supervised training of a regularized fusion.\n%\n%\n% Inputs:\n%\n% classifier: MV2DF function handle that maps parameters to llr-scores.\n% Note: The training data is already wrapped in this handle.\n%\n% classf: 1-by-N row of class labels: \n% -1 for non_target, \n% +1 for target, \n% 0 for ignore\n%\n% w0: initial parameters. This is NOT optional. \n%\n% objective_function: A function handle to an Mv2DF function that\n% maps the output (llr-scores) of classifier, to \n% the to-be-minimized objective (called cxe). \n% optional, use [] to invoke 'cllr_obj'.\n% \n% prior: a prior probability for target to set the 'operating point' \n% of the objective function.\n% optional: use [] to invoke default of 0.5\n%\n% penalizer: MV2DF function handle that maps parameters to a positive \n% regularization penalty.\n%\n% lambda: a weighting for the penalizer\n% \n% maxiters: the maximum number of Newton Trust Region optimization\n% iterations to perform. Note, the user can make maxiters\n% small, examine the solution and then continue training:\n% -- see w0 and optimizerState.\n%\n%\n%\n% optimizerState: In this implementation, it is the trust region radius.\n% optional: \n% omit or use []\n% If not supplied when resuming iteration,\n% this may cost some extra iterations. \n% Resume further iteration thus:\n% [w1,...,optimizerState] = train_binary_classifier(...);\n% ... examine solution w1 ...\n% [w2,...,optimizerState] = train_binary_classifier(...,w1,...,optimizerState);\n% \n%\n% quiet: if false, outputs more info during training\n%\n%\n% Outputs:\n% w: the solution. \n% cxe: normalized multiclass cross-entropy of the solution. \n% The range is 0 (good) to 1(useless).\n%\n% optimizerState: see above, can be used to resume iteration.\n% \n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif ~exist('maxCG','var') || isempty(maxCG)\n maxCG = 100;\nend\n\n\nif ~exist('optimizerState','var')\n optimizerState=[];\nend\n\nif ~exist('prior','var') || isempty(prior)\n prior = 0.5;\nend\n\nif ~exist('objective_function','var') || isempty(objective_function)\n objective_function = @(w,T,weights,logit_prior) cllr_obj(w,T,weights,logit_prior);\nend\n\n%prior_entropy = -prior*log(prior)-(1-prior)*log(1-prior);\nprior_entropy = objective_function([0;0],[1,-1],[prior,1-prior],logit(prior));\n\nclassf = classf(:)';\n\nntar = length(find(classf>0));\nnnon = length(find(classf<0));\nN = nnon+ntar;\n\nweights = zeros(size(classf));\nweights(classf>0) = prior/(ntar*prior_entropy);\nweights(classf<0) = (1-prior)/(nnon*prior_entropy);\n%weights remain 0, where classf==0\n\n\nw=[]; \n\nif exist('penalizer','var') && ~isempty(penalizer)\n obj1 = objective_function(classifier,classf,weights,logit(prior));\n obj2 = penalizer(w);\n obj = sum_of_functions(w,[1,lambda],obj1,obj2);\nelse\n obj = objective_function(classifier,classf,weights,logit(prior));\nend\n\nw0 = w0(:);\n\nif exist('cstepHessian','var') &&~ isempty(cstepHessian)\n obj = replace_hessian([],obj,cstepHessian);\nend\n\n[w,y,optimizerState,converged] = trustregion_newton_cg(obj,w0,maxiters,maxCG,optimizerState,[],1,quiet);\n\nif exist('penalizer','var') && ~isempty(penalizer)\n w_pen = lambda*obj2(w);\nelse\n w_pen = 0;\nend\n\n\ncxe = y-w_pen;\nif ~quiet\n fprintf('cxe = %g, pen = %g\\n',cxe,w_pen);\nend\n\n\n\n\nfunction test_this()\n\n%invoke test for linear_fuser, which calls train_binary_classifier\nlinear_fuser();\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "qfuser_v5.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/qfuser_v5.m", "size": 921, "source_encoding": "utf_8", "md5": "f82cbe0c178dae2a667496466b612770", "text": "function [fusion,w0] = qfuser_v5(w,scores,wfuse)\n\nif nargin==0\n test_this();\n return;\nend\n\n\n% block 1\nf1 = linear_fuser([],scores.scores);\nw1 = wfuse;\n[whead,wtail] = splitvec_fh(length(w1));\nf1 = f1(whead);\n\n% block 2\nmodelQ = scores.modelQ;\n[q,n1] = size(modelQ);\nmodelQ = [modelQ;ones(1,n1)];\nsegQ = scores.segQ;\n[q2,n2] = size(segQ);\nsegQ = [segQ;ones(1,n2)];\nassert(q==q2);\nq = q + 1;\n\nwq = q*(q+1)/2;\nf2 = AWB_fh(modelQ',segQ,tril_to_symm_fh(q,wtail));\nw2 = zeros(wq,1);\n\n% assemble\nfusion = sum_of_functions(w,[1,1],f1,f2);\nw0 = [w1;w2];\n\n\n\n\nend\n\n\nfunction test_this()\n\nm = 5;\nk = 2;\nn1 = 4;\nn2 = 5;\n\nscores.sindx = [1,2,3];\nscores.qindx = [4,5];\n\nscores.scores = randn(m,n1*n2);\nscores.modelQ = randn(k,n1);\nscores.segQ = randn(k,n2);\n\nwfuse = [1,2,3,4]';\n\n[fusion,w0] = qfuser_v4([],scores,wfuse);\n\n%test_MV2DF(fusion,w0);\n\n[fusion(w0),linear_fuser(wfuse,scores.scores(scores.sindx,:))]\n\n%fusion(w0)\n\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "qfuser_v2.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/qfuser_v2.m", "size": 1166, "source_encoding": "utf_8", "md5": "e10bf159cbd2dacaf85be8d4a90554f6", "text": "function [fusion,params] = qfuser_v2(w,scores)\n% \n% Inputs:\n%\n% scores: the primary detection scores, for training\n% D-by-T matrix of T scores for D input systems\n%\n% quality_input: K-by-T matrix of quality measures\n%\n% Output: \n% fusion: is numeric if w is numeric, or a handle to an MV2DF, representing:\n%\n% y= (alpha'*scores+beta) * sigmoid( gamma'*quality_inputs + delta)\n%\n\nif nargin==0\n test_this();\n return;\nend\n\n\n% Create building blocks\n[Cal,params1] = parallel_cal_augm([],scores.scores);\nm = size(scores.scores,1)+1;\n[P,params2] = QQtoP(params1.tail,scores.modelQ,scores.segQ,m);\n\n%params.get_w0 = @(wfuse) [params1.get_w0() ;params2.get_w0()];\nparams.get_w0 = @(wfuse) [params1.get_w0(wfuse) ;params2.get_w0()];\nparams.tail = params2.tail;\n\n\n\n% Assemble building blocks\n\n% modulate linear fusion with quality\nfusion = sumcolumns_fh(m,dottimes_of_functions(w,P,Cal));\n\n\n\nend\n\n\nfunction test_this()\n\nm = 3;\nk = 2;\nn1 = 4;\nn2 = 5;\n\nscores.scores = randn(m,n1*n2);\nscores.modelQ = randn(k,n1);\nscores.segQ = randn(k,n2);\n\n[fusion,params] = qfuser_v2([],scores);\n\nw0 = params.get_w0();\ntest_MV2DF(fusion,w0);\n\nfusion(w0)\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "linear_fuser.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/linear_fuser.m", "size": 2654, "source_encoding": "utf_8", "md5": "627fab3e121d1d87d9fad2a3234d26f8", "text": "function [fusion,params] = linear_fuser(w,scores)\n% \n% Does affine fusion of scores: It does a weighted sum of scores and adds\n% an offset.\n%\n% Inputs:\n% scores: M-by-N matrix of N scores for each of M input systems.\n% w: Optional: \n% - when supplied, the output 'fusion' is the vector of fused scores.\n% - when w=[], the output 'fusion' is a function handle, to be used \n% for training the fuser.\n% w is a (K+1)-vector, with one weight per system, followed by the\n% offset.\n%\n% fusion: if w is given, fusion is a vector of N fused scores.\n% if w is not given, fusion is a function handle, so that\n% fusion(w) = @(w) linear_fusion(scores,w).\n% w0: default values for w, to initialize training.\n%\n% For training use: \n% [fuser,params] = linear_fuser(train_scores);\n% w0 = get_w0();\n% w = train_binary_classifier(fuser,...,w0,...);\n%\n% For test use:\n% fused_scores = linear_fuser(test_scores,w);\n%\n\n\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif ~exist('scores','var') || isempty(scores)\n fusion = sprintf(['linear fuser:',repmat(' %g',1,length(w))],w);\n return;\nend\n\nwsz = size(scores,1)+1;\n[whead,wtail] = splitvec_fh(wsz,w);\nparams.get_w0 = @() zeros(wsz,1);\n%params.get_w0 = @() randn(wsz,1);\nparams.tail = wtail;\n\n\nfusion = fusion_mv2df(whead,scores);\n\nend\n\nfunction test_this()\n\n\nN = 100;\ndim = 2; % number of used systems\n\n% ----------------synthesize training data -------------------\nrandn('state',0);\nmeans = randn(dim,2)*8; %signal\n[tar,non] = make_data(N,means);\n\n% ------------- create system ------------------------------\n\n[fuser,params] = linear_fuser([],[tar,non]);\n\n% ------------- train it ------------------------------\n\nntar = size(tar,2);\nnnon = size(non,2);\nclassf = [ones(1,ntar),-ones(1,nnon)];\n\nprior = 0.1;\nmaxiters = 50;\nquiet = true;\nobjfun = [];\nw0 = params.get_w0();\n[w,cxe] = train_binary_classifier(fuser,classf,w0,objfun,prior,[],0,maxiters,[],[],quiet);\nfprintf('train Cxe = %g\\n',cxe);\n\n% ------------- test it ------------------------------\n\n[tar,non] = make_data(N,means);\n\n\nscores = [tar,non];\ntail = [1;2;3];\nwbig = [w;tail];\n[fused_scores,params] = linear_fuser(wbig,scores);\ncheck_tails = [tail,params.tail],\ncxe = evaluate_objective(objfun,fused_scores,classf,prior);\nfprintf('test Cxe = %g\\n',cxe);\n\nplot(fused_scores);\n\nend\n\nfunction [tar,non] = make_data(N,means)\n[dim,K] = size(means);\nX = 5*randn(dim,K*N); % noise\nii = 1:N;\nfor k=1:K\n X(:,ii) = bsxfun(@plus,means(:,k),X(:,ii));\n ii = ii+N;\nend\nN = K*N;\ntar = X(:,1:N/2);\nnon = X(:,N/2+(1:N/2));\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "qfuser_v3.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/qfuser_v3.m", "size": 1290, "source_encoding": "utf_8", "md5": "a2245f6284afa9f203096fc932e8cf07", "text": "function [fusion,params] = qfuser_v3(w,scores)\n% \n% Inputs:\n%\n% scores: the primary detection scores, for training\n% D-by-T matrix of T scores for D input systems\n%\n% quality_input: K-by-T matrix of quality measures\n%\n% Output: \n% fusion: is numeric if w is numeric, or a handle to an MV2DF, representing:\n%\n% y= (alpha'*scores+beta) * sigmoid( gamma'*quality_inputs + delta)\n%\n\nif nargin==0\n test_this();\n return;\nend\n\n\n% Create building blocks\n[Cal,params1] = parallel_cal([],scores.scores);\nm = size(scores.scores,1);\n[LLH,params2] = QQtoLLH(params1.tail,scores.modelQ,scores.segQ,m);\nP = LLH;\n%P = exp_mv2df(logsoftmax_trunc_mv2df(LLH,m));\n\nW = reshape(params2.get_w0(),[],m);\nW(:) = 0;\nW(end,:) = 0.5/(m+1);\n%params.get_w0 = @(wfuse) [params1.get_w0(wfuse) ;params2.get_w0()];\nparams.get_w0 = @(wfuse) [params1.get_w0(wfuse) ;W(:)];\nparams.tail = params2.tail;\n\n\n\n% Assemble building blocks\n\n% modulate linear fusion with quality\nfusion = sumcolumns_fh(m,dottimes_of_functions(w,P,Cal));\n\n\n\nend\n\n\nfunction test_this()\n\nm = 3;\nk = 2;\nn1 = 4;\nn2 = 5;\n\nscores.scores = randn(m,n1*n2);\nscores.modelQ = randn(k,n1);\nscores.segQ = randn(k,n2);\n\n[fusion,params] = qfuser_v3([],scores);\n\nw0 = params.get_w0([1 2 3 4]');\ntest_MV2DF(fusion,w0);\n\nfusion(w0)\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "qfuser_v6.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/qfuser_v6.m", "size": 1013, "source_encoding": "utf_8", "md5": "0bcb6e5fbd79494afd1c1c36eff1e95c", "text": "function [fusion,w0] = qfuser_v6(w,scores,wfuse)\n\nif nargin==0\n test_this();\n return;\nend\n\n\n% block 1\nf1 = linear_fuser([],scores.scores);\nw1 = wfuse;\n[whead,wtail] = splitvec_fh(length(w1));\nf1 = f1(whead);\n\n% block 2\nmodelQ = scores.modelQ;\n[q,n1] = size(modelQ);\nmodelQ = [modelQ;ones(1,n1)];\nsegQ = scores.segQ;\n[q2,n2] = size(segQ);\nsegQ = [segQ;ones(1,n2)];\nassert(q==q2);\nq = q + 1;\n\nwq = q*(q+1)/2;\nr = AWB_fh(modelQ',segQ,tril_to_symm_fh(q));\n[whead,wtail] = splitvec_fh(wq,wtail);\nr = r(whead);\nw2 = zeros(wq,1);w2(end) = -5;\n\n\n% block 3\ns = AWB_fh(modelQ',segQ,tril_to_symm_fh(q,wtail));\nw3 = w2;\n\n\n\n% assemble\nrs = stack([],r,s);\nfusion = scalibration_fh(stack(w,f1,rs));\nw0 = [w1;w2;w3];\n\n\n\n\nend\n\n\nfunction test_this()\n\nm = 3;\nk = 2;\nn1 = 4;\nn2 = 5;\n\nscores.scores = randn(m,n1*n2);\nscores.modelQ = randn(k,n1);\nscores.segQ = randn(k,n2);\n\nwfuse = [1,2,3,4]';\n\n[fusion,w0] = qfuser_v6([],scores,wfuse);\n\ntest_MV2DF(fusion,w0);\n\n[fusion(w0),linear_fuser(wfuse,scores.scores)]\n\n%fusion(w0)\n\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "qfuser_v1.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/qfuser_v1.m", "size": 1137, "source_encoding": "utf_8", "md5": "8dcda09e63d0f7e6a3f1fc2298b84d7e", "text": "function [fusion,params] = qfuser_v1(w,scores)\n% \n% Inputs:\n%\n% scores: the primary detection scores, for training\n% D-by-T matrix of T scores for D input systems\n%\n% quality_input: K-by-T matrix of quality measures\n%\n% Output: \n% fusion: is numeric if w is numeric, or a handle to an MV2DF, representing:\n%\n% y= (alpha'*scores+beta) * sigmoid( gamma'*quality_inputs + delta)\n%\n\nif nargin==0\n test_this();\n return;\nend\n\n\n% Create building blocks\n\n[linfusion,params1] = linear_fuser([],scores.scores);\n[Q,params2] = outerprod_of_sigmoids(params1.tail,scores.modelQ,scores.segQ);\n\nparams.get_w0 = @(ssat) [params1.get_w0(); params2.get_w0(ssat)];\nparams.tail = params2.tail;\n\n\n\n% Assemble building blocks\n\n% modulate linear fusion with quality\nfusion = dottimes_of_functions([],Q,linfusion);\n\n\nif ~isempty(w)\n fusion = fusion(w);\nend\n\nend\n\n\nfunction test_this()\n\nm = 3;\nk = 2;\nn1 = 4;\nn2 = 5;\n\nscores.scores = randn(m,n1*n2);\nscores.modelQ = randn(k,n1);\nscores.segQ = randn(k,n2);\n\nssat = 0.99;\n[fusion,params] = qfuser_v1([],scores);\n\nw0 = params.get_w0(ssat);\ntest_MV2DF(fusion,w0);\n\nfusion(w0)\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "qfuser_v7.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/qfuser_v7.m", "size": 1107, "source_encoding": "utf_8", "md5": "8d156ad2d97a7aa1b90d702cb2f0a195", "text": "function [fusion,w0] = qfuser_v7(w,scores,wfuse)\n\nif nargin==0\n test_this();\n return;\nend\n\n\n% block 1\nf1 = linear_fuser([],scores.scores);\nw1 = wfuse;\n[whead,wtail] = splitvec_fh(length(w1));\nf1 = f1(whead);\n\n% block 2\nmodelQ = scores.modelQ;\n[q,n1] = size(modelQ);\nmodelQ = [modelQ;ones(1,n1)];\nsegQ = scores.segQ;\n[q2,n2] = size(segQ);\nsegQ = [segQ;ones(1,n2)];\nassert(q==q2);\nq = q + 1;\n\nwq = q*(q+1)/2;\nf2 = AWB_fh(modelQ',segQ,tril_to_symm_fh(q));\nw2 = zeros(wq,1);\n[whead,rs] = splitvec_fh(wq,wtail);\nf2 = f2(whead);\n\n% block 3\nn = size(scores.scores,2);\nmap = @(rs) repmat(rs,n,1);\ntransmap =@(RS) sum(reshape(RS,2,[]),2);\nRS = linTrans(rs,map,transmap);\nw3 = [-10;-10];\n\n\n\n% assemble\nf12 = sum_of_functions([],[1,1],f1,f2);\nXRS = stack(w,f12,RS);\nfusion = scalibration_fh(XRS);\n\n\n\nw0 = [w1;w2;w3];\n\n\n\n\nend\n\n\nfunction test_this()\n\nm = 3;\nk = 2;\nn1 = 4;\nn2 = 5;\n\n\nscores.scores = randn(m,n1*n2);\nscores.modelQ = randn(k,n1);\nscores.segQ = randn(k,n2);\n\nwfuse = [1,2,3,4]';\n\n[fusion,w0] = qfuser_v7([],scores,wfuse);\n\ntest_MV2DF(fusion,w0);\n\n[fusion(w0),linear_fuser(wfuse,scores.scores)]\n\n\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "qfuser_v4.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/qfuser_v4.m", "size": 1388, "source_encoding": "utf_8", "md5": "cd65aea99057c92c142fc7e024dc1d53", "text": "function [fusion,w0] = qfuser_v4(w,scores,wfuse)\n% qindx: index set for rows of scores.scores which are per-trial quality\n% measures.\n%\n% sindx: index set for rows of scores.scores which are normal discriminative \n% scores.\n\nif nargin==0\n test_this();\n return;\nend\n\n\nsindx = scores.sindx;\nqindx = scores.qindx;\nm =length(sindx);\n\n\n% Create building blocks\n[Cal,w1] = parallel_cal([],scores.scores(sindx,:),wfuse);\n[whead,wtail] = splitvec_fh(length(w1));\nCal = Cal(whead);\n\n\n[LLH1,w2] = QQtoLLH([],scores.modelQ,scores.segQ,m);\n[whead,wtail] = splitvec_fh(length(w2),wtail);\nLLH1 = LLH1(whead);\nW2 = reshape(w2,[],m);\nW2(:) = 0;\nW2(end,:) = 0.5/(m+1);\nw2 = W2(:);\n\n\n[LLH2,w3] = QtoLLH([],scores.scores(qindx,:),m);\nLLH2 = LLH2(wtail);\n\n\nLLH = sum_of_functions([],[1,1],LLH1,LLH2);\n%LLH = LLH1;\n\nP = LLH;\n%P = exp_mv2df(logsoftmax_trunc_mv2df(LLH,m));\n\n\nw0 = [w1;w2;w3];\n\n\n\n% Assemble building blocks\n\n% modulate linear fusion with quality\nfusion = sumcolumns_fh(m,dottimes_of_functions(w,P,Cal));\n\n\n\nend\n\n\nfunction test_this()\n\nm = 5;\nk = 2;\nn1 = 4;\nn2 = 5;\n\nscores.sindx = [1,2,3];\nscores.qindx = [4,5];\n\nscores.scores = randn(m,n1*n2);\nscores.modelQ = randn(k,n1);\nscores.segQ = randn(k,n2);\n\nwfuse = [1,2,3,4]';\n\n[fusion,w0] = qfuser_v4([],scores,wfuse);\n\n%test_MV2DF(fusion,w0);\n\n[fusion(w0),linear_fuser(wfuse,scores.scores(scores.sindx,:))]\n\n%fusion(w0)\n\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "scal_fuser.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/scalibration/scal_fuser.m", "size": 2918, "source_encoding": "utf_8", "md5": "7e49185b74a064be721d9c243a08c07f", "text": "function [fusion,params] = scal_fuser(w,scores)\n% \n% Does scal calibration\n%\n% Inputs:\n% scores: M-by-N matrix of N scores for each of M input systems.\n% w: Optional: \n% - when supplied, the output 'fusion' is the vector of fused scores.\n% - when w=[], the output 'fusion' is a function handle, to be used \n% for training the fuser.\n% w is a (K+1)-vector, with one weight per system, followed by the\n% offset.\n%\n% fusion: if w is given, fusion is a vector of N fused scores.\n% if w is not given, fusion is a function handle, so that\n% fusion(w) = @(w) linear_fusion(scores,w).\n% w0: default values for w, to initialize training.\n%\n% For training use: \n% [fuser,params] = scal_fuser(train_scores);\n% w0 = params.get_w0();\n% w = train_binary_classifier(fuser,...,w0,...);\n%\n% For test use:\n% fused_scores = scal_fuser(test_scores,w);\n%\n\n\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif ~exist('scores','var') || isempty(scores)\n fusion = sprintf(['scal fuser:',repmat(' %g',1,length(w))],w);\n return;\nend\n\n\n[m,n] = size(scores);\nwsz = size(scores,1)+1;\n[wlin,wtail] = splitvec_fh(wsz);\n[rs,wtail] = splitvec_fh(2,wtail);\n\nx = fusion_mv2df(wlin,scores);\nxrs = stack([],x,rs);\nfusion = scal_simple_fh(xrs);\n\nif ~isempty(w)\n fusion = fusion(w);\n wtail = wtail(w);\nend\n\nparams.get_w0 = @() [zeros(wsz,1);-10;-10];\nparams.tail = wtail;\n\n\n\nend\n\nfunction test_this()\n\n\nN = 10;\ndim = 2; % number of used systems\n\n% ----------------synthesize training data -------------------\nrandn('state',0);\nmeans = randn(dim,2)*8; %signal\n[tar,non] = make_data(N,means);\ntar = [tar,[min(non(1,:));min(non(2,:))]];\nnon = [non,[max(tar(1,:));max(tar(2,:))]];\n\n% ------------- create system ------------------------------\n\n[fuser,params] = scal_fuser([],[tar,non]);\nw0 = params.get_w0();\ntest_mv2df(fuser,w0);\n\n\nreturn;\n\n% ------------- train it ------------------------------\n\nntar = size(tar,2);\nnnon = size(non,2);\nclassf = [ones(1,ntar),-ones(1,nnon)];\n\nprior = 0.1;\nmaxiters = 50;\nquiet = false;\nobjfun = [];\nw0 = params.get_w0();\n[w,cxe] = train_binary_classifier(fuser,classf,w0,objfun,prior,[],0,maxiters,[],[],quiet);\nfprintf('train Cxe = %g\\n',cxe);\n\n% ------------- test it ------------------------------\n\n[tar,non] = make_data(N,means);\nntar = size(tar,2);\nnnon = size(non,2);\nclassf = [ones(1,ntar),-ones(1,nnon)];\n\n\nscores = [tar,non];\ntail = [1;2;3];\nwbig = [w;tail];\n[fused_scores,params] = scal_fuser(wbig,scores);\ncheck_tails = [tail,params.tail],\ncxe = evaluate_objective(objfun,fused_scores,classf,prior);\nfprintf('test Cxe = %g\\n',cxe);\n\nplot(fused_scores);\n\nend\n\nfunction [tar,non] = make_data(N,means)\n[dim,K] = size(means);\nX = 5*randn(dim,K*N); % noise\nii = 1:N;\nfor k=1:K\n X(:,ii) = bsxfun(@plus,means(:,k),X(:,ii));\n ii = ii+N;\nend\nN = K*N;\ntar = X(:,1:N/2);\nnon = X(:,N/2+(1:N/2));\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "scal_fuser_slow.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/scalibration/scal_fuser_slow.m", "size": 2972, "source_encoding": "utf_8", "md5": "abc2a78dc2b6cf08cfdd508f4dabdb71", "text": "function [fusion,params] = scal_fuser_slow(w,scores)\n% \n% Does scal calibration\n%\n% Inputs:\n% scores: M-by-N matrix of N scores for each of M input systems.\n% w: Optional: \n% - when supplied, the output 'fusion' is the vector of fused scores.\n% - when w=[], the output 'fusion' is a function handle, to be used \n% for training the fuser.\n% w is a (K+1)-vector, with one weight per system, followed by the\n% offset.\n%\n% fusion: if w is given, fusion is a vector of N fused scores.\n% if w is not given, fusion is a function handle, so that\n% fusion(w) = @(w) linear_fusion(scores,w).\n% w0: default values for w, to initialize training.\n%\n% For training use: \n% [fuser,params] = scal_fuser(train_scores);\n% w0 = params.get_w0();\n% w = train_binary_classifier(fuser,...,w0,...);\n%\n% For test use:\n% fused_scores = scal_fuser(test_scores,w);\n%\n\n\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif ~exist('scores','var') || isempty(scores)\n fusion = sprintf(['scal fuser:',repmat(' %g',1,length(w))],w);\n return;\nend\n\n\n[m,n] = size(scores);\nwsz = size(scores,1)+1;\n[wlin,wtail] = splitvec_fh(wsz);\n[rs,wtail] = splitvec_fh(2,wtail);\n\nmap = @(rs) repmat(rs,n,1);\ntransmap =@(RS) sum(reshape(RS,2,[]),2);\nRS = linTrans(rs,map,transmap);\n\nX = fusion_mv2df(wlin,scores);\nXRS = stack([],X,RS);\nfusion = scalibration_fh(XRS);\n\nif ~isempty(w)\n fusion = fusion(w);\n wtail = wtail(w);\nend\n\nparams.get_w0 = @() [zeros(wsz,1);-5;-5];\nparams.tail = wtail;\n\n\n\nend\n\nfunction test_this()\n\n\nN = 1000;\ndim = 2; % number of used systems\n\n% ----------------synthesize training data -------------------\nrandn('state',0);\nmeans = randn(dim,2)*8; %signal\n[tar,non] = make_data(N,means);\ntar = [tar,[min(non(1,:));min(non(2,:))]];\nnon = [non,[max(tar(1,:));max(tar(2,:))]];\n\n% ------------- create system ------------------------------\n\n[fuser,params] = scal_fuser([],[tar,non]);\n\n% ------------- train it ------------------------------\n\nntar = size(tar,2);\nnnon = size(non,2);\nclassf = [ones(1,ntar),-ones(1,nnon)];\n\nprior = 0.1;\nmaxiters = 50;\nquiet = false;\nobjfun = [];\nw0 = params.get_w0();\n[w,cxe] = train_binary_classifier(fuser,classf,w0,objfun,prior,[],0,maxiters,[],[],quiet);\nfprintf('train Cxe = %g\\n',cxe);\n\n% ------------- test it ------------------------------\n\n[tar,non] = make_data(N,means);\nntar = size(tar,2);\nnnon = size(non,2);\nclassf = [ones(1,ntar),-ones(1,nnon)];\n\n\nscores = [tar,non];\ntail = [1;2;3];\nwbig = [w;tail];\n[fused_scores,params] = scal_fuser(wbig,scores);\ncheck_tails = [tail,params.tail],\ncxe = evaluate_objective(objfun,fused_scores,classf,prior);\nfprintf('test Cxe = %g\\n',cxe);\n\nplot(fused_scores);\n\nend\n\nfunction [tar,non] = make_data(N,means)\n[dim,K] = size(means);\nX = 5*randn(dim,K*N); % noise\nii = 1:N;\nfor k=1:K\n X(:,ii) = bsxfun(@plus,means(:,k),X(:,ii));\n ii = ii+N;\nend\nN = K*N;\ntar = X(:,1:N/2);\nnon = X(:,N/2+(1:N/2));\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "logsumexp_special.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/scalibration/logsumexp_special.m", "size": 1102, "source_encoding": "utf_8", "md5": "a15ffa60b181fdc8b0a1e3fb4bcfd403", "text": "function [y,deriv] = logsumexp_special(w)\n% This is a MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% If w = [x;r], where r is scalar and x vector, then\n% y = log(exp(x)+exp(r))\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif isempty(w)\n y = @(w)logsumexp_special(w);\n return;\nend\n\nif isa(w,'function_handle')\n outer = logsumexp_special([]);\n y = compose_mv(outer,w,[]);\n return;\nend\n\n\n[r,x] = get_rx(w);\nrmax = (r>x);\nrnotmax = ~rmax;\ny = zeros(size(x));\ny(rmax) = log(exp(x(rmax)-r)+1)+r;\ny(rnotmax) = log(exp(r-x(rnotmax))+1)+x(rnotmax);\n\n\n\nif nargout>1\n deriv = @(Dy) deriv_this(Dy,r,x,y);\nend\n\n\nend\n\nfunction [r,x] = get_rx(w)\nw = w(:);\nr = w(end);\nx = w(1:end-1);\nend\n\n\nfunction [g,hess,linear] = deriv_this(dy,r,x,y)\ngr = exp(r-y);\ngx = exp(x-y);\ng = [gx.*dy(:);gr.'*dy(:)];\nlinear = false;\nhess = @(dw) hess_this(dw,dy,gr,gx);\nend\n\nfunction [h,Jv] = hess_this(dw,dy,gr,gx)\n[dr,dx] = get_rx(dw);\np = gr.*gx.*dy;\nh = [p.*(dx-dr);dr*sum(p)-dx.'*p];\n\nif nargout>1\n Jv = gx.*dx+dr*gr;\nend\nend\n\n\nfunction test_this()\nf = logsumexp_special([]);\n\ntest_MV2DF(f,randn(5,1));\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "scalibration_fh.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/scalibration/scalibration_fh.m", "size": 1735, "source_encoding": "utf_8", "md5": "b9918a8e2a9fa07dfcef33933013931b", "text": "function f = scalibration_fh(w)\n% This is a factory for a function handle to an MV2DF, which represents\n% the vectorization of the s-calibration function. The whole mapping works like\n% this, in MATLAB-style pseudocode:\n%\n% If y = f([x;r;s]), where x,r,s are column vectors of size m, then y\n% is a column vector of size m and\n%\n% y = log( exp(x) + exp(r) ) + log( exp(-s) + 1 ) \n% - log( exp(x) + exp(-s) ) - log( exp(r) + 1 ) \n%\n% Viewed as a data-dependent calibration transform from x to y, with \n% parameters r and s, then: \n%\n% r: is the log-odds that x is a typical non-target score, given that \n% there really is a target.\n%\n% s: is the log-odds that x is a typical target score, given that \n% there really is a non-target.\n%\n% Ideally r and s should be large negative, in which case this is almost \n% an identity transform from x to y, but with saturation at large \n% positive and negative values. Increasing r increases the lower\n% saturation level. Increasing s decreases the upper saturation level.\n\nif nargin==0\n test_this();\n return;\nend\n\n\nx = columnJofN_fh(1,3);\nr = columnJofN_fh(2,3);\ns = columnJofN_fh(3,3);\n\nneg = @(x)-x;\nnegr = linTrans(r,neg,neg);\nnegs = linTrans(s,neg,neg);\n\nnum1 = logsumexp_fh(2,2,stack([],x,r));\nnum2 = neglogsigmoid_fh(s);\nden1 = neglogsigmoid_fh(negr);\nden2 = logsumexp_fh(2,2,stack([],x,negs));\n\nf = sum_of_functions([],[1 1],num1,num2);\nf = sum_of_functions([],[1 -1],f,den1);\nf = sum_of_functions([],[1 -1],f,den2);\n\n\nif exist('w','var') && ~isempty(w)\n f = f(w);\nend\n\nend\n\n\nfunction test_this()\nn = 3;\nx = randn(n,1);\nr = randn(n,1);\ns = randn(n,1);\nX = [x;r;s];\nf = scalibration_fh([]);\ntest_MV2DF(f,X(:));\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "scalibration_fragile_fh.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/scalibration/scalibration_fragile_fh.m", "size": 2389, "source_encoding": "utf_8", "md5": "8eec3ccf6bcd5f130a3d399194acd676", "text": "function f = scalibration_fragile_fh(direction,w)\n%\n% Don't use this function, it is just for reference. It will break for\n% large argument values.\n%\n% This is a factory for a function handle to an MV2DF, which represents\n% the vectorization of the logsumexp function. The whole mapping works like\n% this, in MATLAB-style psuedocode:\n%\n% F: R^(m*n) --> R^n, where y = F(x) is computed thus:\n%\n% n = length(x)/m\n% If direction=1, X = reshape(x,m,n), or \n% if direction=1, X = reshape(x,n,m). \n% y = log(sum(exp(X),direction))\n%\n% Inputs: \n% m: the number of inputs to each individual logsumexp calculation.\n% direction: 1 sums down columns, or 2 sums accross rows.\n% w: optional, if ssupplied \n%\n% Outputs:\n% f: a function handle to the MV2DF described above.\n%\n% see: MV2DF_API_DEFINITION.readme\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nf = vectorized_function([],@(X)F0(X,direction),3,direction);\n\nif exist('w','var') && ~isempty(w)\n f = f(w);\nend\n\nend\n\nfunction [y,f1] = F0(X,dr)\nif dr==1\n x = X(1,:);\n p = X(2,:);\n q = X(3,:);\nelse\n x = X(:,1);\n p = X(:,2);\n q = X(:,3);\nend\nexpx = exp(x);\nnum = (expx-1).*p+1;\nden = (expx-1).*q+1;\ny = log(num)-log(den);\nf1 = @() F1(expx,p,q,num,den,dr);\nend\n\nfunction [J,f2,linear] = F1(expx,p,q,num,den,dr)\nlinear = false;\nif dr==1\n J = [expx.*(p-q)./(num.*den);(expx-1)./num;-(expx-1)./den];\nelse\n J = [expx.*(p-q)./(num.*den),(expx-1)./num,-(expx-1)./den];\nend\nf2 = @(dX) F2(dX,expx,p,q,num,den,dr);\nend\n\nfunction H = F2(dX,expx,p,q,num,den,dr)\nd2dx2 = -expx.*(p-q).*(p+q+p.*q.*(expx.^2-1)-1)./(num.^2.*den.^2);\nd2dxdp = expx./num.^2;\nd2dxdq = -expx./den.^2;\nd2dp2 = -(expx-1).^2./num.^2;\nd2dq2 = (expx-1).^2./den.^2;\nif dr==1\n dx = dX(1,:);\n dp = dX(2,:);\n dq = dX(3,:);\n H = [\n dx.*d2dx2+dp.*d2dxdp+dq.*d2dxdq; ...\n dx.*d2dxdp+dp.*d2dp2; ...\n dx.*d2dxdq+dq.*d2dq2...\n ];\nelse\n dx = dX(:,1);\n dp = dX(:,2);\n dq = dX(:,3);\n H = [\n dx.*d2dx2+dp.*d2dxdp+dq.*d2dxdq, ...\n dx.*d2dxdp+dp.*d2dp2, ...\n dx.*d2dxdq+dq.*d2dq2...\n ];\nend\nend\n\n\n\nfunction test_this()\nn = 10;\nx = randn(1,n);\np = rand(1,n);\nq = rand(1,n);\nX = [x;p;q];\n\nfprintf('testing dir==1:\\n');\nf = scalibration_fragile_fh(1);\ntest_MV2DF(f,X(:));\n\nfprintf('\\n\\n\\ntesting dir==2:\\n');\nf = scalibration_fragile_fh(2);\nX = X';\ntest_MV2DF(f,X(:));\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "scal_simple_fh.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/scalibration/scal_simple_fh.m", "size": 1903, "source_encoding": "utf_8", "md5": "b6e3992c13b4424d2129302a3c51424c", "text": "function f = scal_simple_fh(w)\n% This is a factory for a function handle to an MV2DF, which represents\n% the vectorization of the s-calibration function. The whole mapping works like\n% this, in MATLAB-style pseudocode:\n%\n% If y = f([x;r;s]), where r,s are scalar, x is column vector of size m, \n% then y is a column vector of size m and\n%\n% y_i = log( exp(x_i) + exp(r) ) + log( exp(-s) + 1 ) \n% - log( exp(x_i) + exp(-s) ) - log( exp(r) + 1 ) \n%\n% Viewed as a data-dependent calibration transform from x to y, with \n% parameters r and s, then: \n%\n% r: is the log-odds that x is a typical non-target score, given that \n% there really is a target.\n%\n% s: is the log-odds that x is a typical target score, given that \n% there really is a non-target.\n%\n% Ideally r and s should be large negative, in which case this is almost \n% an identity transform from x to y, but with saturation at large \n% positive and negative values. Increasing r increases the lower\n% saturation level. Increasing s decreases the upper saturation level.\n\nif nargin==0\n test_this();\n return;\nend\n\n[x,rs] = splitvec_fh(-2); \n[r,s] = splitvec_fh(-1,rs); \n\nneg = @(t)-t;\nnegr = linTrans(r,neg,neg);\nnegs = linTrans(s,neg,neg);\n\nlinmap = linTrans([],@(x)map(x),@(y)transmap(y)); %add last element to others\n\nnum1 = logsumexp_special(stack([],x,r));\nnum2 = neglogsigmoid_fh(s);\nnum = linmap(stack([],num1,num2));\nden1 = neglogsigmoid_fh(negr);\nden2 = logsumexp_special(stack([],x,negs));\nden = linmap(stack([],den2,den1));\n\nf = sum_of_functions([],[1 -1],num,den);\n\n\nif exist('w','var') && ~isempty(w)\n f = f(w);\nend\n\nend\n\n\nfunction y = map(x)\ny = x(1:end-1)+x(end);\nend\nfunction x = transmap(y)\nx = [y(:);sum(y)];\nend\n\n\nfunction test_this()\nn = 3;\nx = randn(n,1);\nr = randn(1,1);\ns = randn(1,1);\nX = [x;r;s];\nf = scal_simple_fh([]);\ntest_MV2DF(f,X(:));\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "quality_fuser_v3.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/aside/quality_fuser_v3.m", "size": 1843, "source_encoding": "utf_8", "md5": "1be42594eb854e9b0b4d89daa27c0759", "text": "function [fusion,params] = quality_fuser_v3(w,scores,train_vecs,test_vecs,train_ndx,test_ndx,ddim)\n% \n% Inputs:\n%\n% scores: the primary detection scores, for training\n% D-by-T matrix of T scores for D input systems\n%\n% train_vecs: K1-by-M matrix, one column-vector for each of M training\n% segemnts\n%\n% test_vecs: K2-by-N matrix, one column-vector for each of N training\n% segemnts\n%\n% train_ndx: 1-by-T index where train_ndx(t) is the index into train_vecs\n% for trial t. \n%\n% test_ndx: 1-by-T index where test_ndx(t) is the index into test_vecs\n% for trial t. \n% ddim: dimension of subspace for quality distandce calculation,\n% where ddim <= min(K1,K2)\n%\n% Outputs:\n%\n\nif nargin==0\n test_this();\n return;\nend\n\n% Check data dimensions\n[K1,M] = size(train_vecs);\n[K2,N] = size(test_vecs);\nassert(ddim1, w0 = zeros(wsz,1); end\n\nLLH = linTrans(w,@(w)map_this(w),@(w)transmap_this(w));\n\n\n\n function y = map_this(w)\n w = reshape(w,n,m);\n y = w*Q;\n end\n\n function w = transmap_this(y)\n y = reshape(y,n,k);\n w = y*Q.';\n end\n\n\n\n\nend\n\nfunction test_this()\n\n Q = randn(2,10);\n [sys,w0] = QtoLLH([],Q,3);\n test_MV2DF(sys,w0);\n\n\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "fused_sigmoid.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/fused_sigmoid.m", "size": 1293, "source_encoding": "utf_8", "md5": "1f35e45a3c945008307dd1222a281bb8", "text": "function [ps,params] = fused_sigmoid(w,input_data)\n% \n% Algorithm: ps = sigmoid( alpha'*input_data +beta)\n% \n%\n% Inputs:\n% w: is [alpha; beta], where alpha is D-by-1 and beta is scalar.\n% Use w=[] to let output ps be an MV2DF function handle.\n% If w is a function handle to an MV2DF then ps is the function handle\n% to the composition of w and this function. \n%\n% input_data: D-by-T matrix\n%\n%\n% Outputs:\n% ps: function handle (if w=[], or w is handle), or numeric T-by-1\n% params.get_w0(ssat): returns w0 for optimization initialization, \n% 01, w0 = zeros(wsz,1); end\n\nlh = cell(1,n);\ntail = w;\nfor i=1:n\n [wi,tail] = splitvec_fh(q2,tail);\n lh{i} = AWB_fh(qleft',qright,tril_to_symm_fh(qdim,wi));\nend\n\nLLH = interleave(w,lh);\n\n\n\nend\n\n\nfunction test_this()\n\nqleft = randn(3,3);\nqright = randn(3,2);\n[sys,w0] = QQtoLLH([],qleft,qright,2);\n\ntest_MV2DF(sys,w0);\n\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "QQtoP.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/QQtoP.m", "size": 771, "source_encoding": "utf_8", "md5": "0a940f8a8a56510a32ad6a45accddc02", "text": "function [P,params] = QQtoP(w,qleft,qright,n)\n% \n\nif nargin==0\n test_this();\n return;\nend\n\n\nqleft = [qleft;ones(1,size(qleft,2))];\nqright = [qright;ones(1,size(qright,2))];\n\n\n[qdim,nleft] = size(qleft);\n[qdim2,nright] = size(qright);\nassert(qdim==qdim2);\nq2 = qdim*(qdim+1)/2;\nwsz = n*q2;\n[whead,wtail] = splitvec_fh(wsz);\nparams.get_w0 = @() zeros(wsz,1); \nparams.tail = wtail;\n\nlh = cell(1,n);\nfor i=1:n\n [wi,whead] = splitvec_fh(q2,whead);\n lh{i} = AWB_fh(qleft',qright,tril_to_symm_fh(qdim,wi));\nend\n\nP = exp_mv2df(logsoftmax_mv2df(interleave(w,lh),n));\n%P = interleave(w,lh);\n\n\n\n\nend\n\n\nfunction test_this()\n\nqleft = randn(3,3);\nqright = randn(3,2);\n[sys,params] = QQtoP([],qleft,qright,2);\n\nw0 = params.get_w0();\ntest_MV2DF(sys,w0);\n\nP = sys(w0),\n\nend\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "prod_sigmoid_logdist.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/prod_sigmoid_logdist.m", "size": 2506, "source_encoding": "utf_8", "md5": "f31a256c5434fca4b6c0641d23a2ebc1", "text": "function [sig,params] = prod_sigmoid_logdist(w,data1,data2,ndx1,ndx2,ddim)\n% \n% Algorithm: sig = distribute(ndx1,sigmoid( \n% log( \n% sum(bsxfun(@minus,M*data_1,c).^2,1)\n% )))\n% *\n% distribute(ndx2,sigmoid( \n% log( \n% sum(bsxfun(@minus,M*data_2,c).^2,1)\n% )))\n% \n%\n% Inputs:\n% w: is vec([M,c]), where M is ddim-by-D and c is ddim-by-1\n% Use w=[] to let output sld be an MV2DF function handle.\n%\n% data_1: D-by-T1 matrix\n% data_2: D-by-T2 matrix\n% ndx1,ndx2: indices of size 1 by T to distribute T1 and T2 segs over T\n% trials\n%\n% ddim: the first dimension of the M matrix\n%\n% Outputs:\n% sig: function handle (if w=[]), or numeric \n% params.get_w0(ssat): returns w0 for optimization initialization, \n% 01, w0 = init_w0(wfuse); end\n\ncalscores = linTrans(w,@(w)map_this(w),@(w)transmap_this(w));\n\n\n function w0 = init_w0(wfuse)\n assert(length(wfuse)-1==m);\n scal = wfuse(1:end-1);\n offs = wfuse(end);\n W = [scal*(m+1);((m+1)/m)*offs*ones(m,1)];\n w0 = W(:);\n end\n\n\n function y = map_this(w) \n w = reshape(w,m,2);\n y = bsxfun(@times,scores,w(:,1));\n y = bsxfun(@plus,y,w(:,2));\n end\n\n function w = transmap_this(y)\n y = reshape(y,m,n);\n w = [sum(y.*scores,2),sum(y,2)];\n end\n\n\n\n\nend\n\nfunction test_this()\n\n scores = randn(4,10);\n [sys,w0] = parallel_cal([],scores,(1:5)');\n test_MV2DF(sys,w0);\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "parallel_cal_augm.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/parallel_cal_augm.m", "size": 1115, "source_encoding": "utf_8", "md5": "f5a8bba6d164ab5577c8429ce5835305", "text": "function [calscores,params] = parallel_cal_augm(w,scores)\n% \n\n\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif ~exist('scores','var') || isempty(scores)\n calscores = sprintf(['parallel calibration:',repmat(' %g',1,length(w))],w);\n return;\nend\n\n[m,n] = size(scores);\nscores = [scores;zeros(1,n)];\nwsz = 2*m;\n\n[whead,wtail] = splitvec_fh(wsz,w);\nparams.get_w0 = @(wfuse) init_w0(wfuse);\nparams.tail = wtail;\n\nwaugm = augmentmatrix_fh(m,0,whead);\ncalscores = linTrans(waugm,@(w)map_this(w),@(w)transmap_this(w));\n\n\n function w0 = init_w0(wfuse)\n scal = wfuse(1:end-1);\n offs = wfuse(end);\n W = [scal*(m+1);((m+1)/m)*offs*ones(m,1)];\n w0 = W(:);\n end\n\n function y = map_this(w)\n w = reshape(w,m+1,2);\n y = bsxfun(@times,scores,w(:,1));\n y = bsxfun(@plus,y,w(:,2));\n end\n\n function w = transmap_this(y)\n y = reshape(y,m+1,n);\n w = [sum(y.*scores,2),sum(y,2)];\n end\n\n\n\n\nend\n\nfunction test_this()\n\n scores = randn(4,10);\n [sys,params] = parallel_cal_augm([],scores);\n w0 = params.get_w0();\n test_MV2DF(sys,w0);\n\n\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "prod_of_prmtrzd_sigmoids.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/prod_of_prmtrzd_sigmoids.m", "size": 1537, "source_encoding": "utf_8", "md5": "6c18de0879f128ada38d483ace60b57f", "text": "function [ps,params] = prod_of_prmtrzd_sigmoids(w,input_data)\n% \n% Algorithm: ps = prod_i sigmoid( alpha_i*input_data(i,:) + beta_i)\n% \n%\n% Inputs:\n% w: is vec([alpha; beta]), where alpha and beta are 1-by-D.\n% Use w=[] to let output ps be an MV2DF function handle.\n% If w is a function handle to an MV2DF then ps is the function handle\n% to the composition of w and this function. \n%\n% input_data: D-by-T matrix\n%\n%\n% Outputs:\n% ps: function handle (if w=[], or w is handle), or numeric T-by-1\n% params.get_w0(ssat): returns w0 for optimization initialization, \n% 00\n map_head = @(w) w(1:head_size);\n map_tail = @(w) w(head_size+1:end);\n head = linTrans_adaptive([],map_head,@(y,sz)transmap_head(y,sz));\n tail = linTrans_adaptive([],map_tail,@(y,sz)transmap_tail(y,sz));\nelseif head_size<0\n map_head = @(w) w(1:end-tail_size);\n map_tail = @(w) w(1+end-tail_size:end);\n head = linTrans_adaptive([],map_head,@(y,sz)transmap_head2(y,sz));\n tail = linTrans_adaptive([],map_tail,@(y,sz)transmap_tail2(y,sz));\nelse\n error('head size cannot be 0')\nend\n\n\n\nif exist('w','var') && ~isempty(w)\n head = head(w);\n tail = tail(w);\nend\n\nend\n\nfunction test_this()\n[head,tail] = splitvec_fh(2);\nfprintf('testing head:\\n');\ntest_MV2DF(head,[1 2 3 4 5]);\n\nfprintf('\\n\\n\\ntesting tail:\\n');\ntest_MV2DF(tail,[1 2 3 4 5]);\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "log_distance_mv2df.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/log_distance_mv2df.m", "size": 1968, "source_encoding": "utf_8", "md5": "ab190182251a8ee9a8cce755c6615e99", "text": "function [y,deriv] = log_distance_mv2df(w,input_data,new_dim)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% The function projects each column of input_data to a subspace and then \n% computes log distance from a centroid. The input_data is fixed, but \n% the projection and centroid parameters are variable.\n%\n% W = reshape(w);\n% y.' = log sum((W(:,1:end-1).'*input_data - W(:,end)).^2,1)\n%\n% W is the augmented matrix [M c] where M maps an input vector\n% to a lower dimensional space and c is the centroid in\n% the lower dimensional space.\n%\n% Parameters:\n% w: the vectorized version of the W matrix\n% input_data: is an K-by-T matrix of input vectors of length K, for \n% each of T trials.\n% new_dim: the dimension of vectors in the lower dimensional space.\n%\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w) \n [dim, num_trials] = size(input_data);\n map = @(w) map_this(w,input_data,dim,new_dim);\n transmap = @(w) transmap_this(w,input_data,num_trials,new_dim);\n delta = linTrans(w,map,transmap);\n y = logsumsquares_fh(new_dim,1,delta);\n return;\nend\n\nif isa(w,'function_handle')\n f = log_distance_mv2df([],input_data,new_dim);\n y = compose_mv(f,w,[]);\n return;\nend\n\nf = log_distance_mv2df([],input_data,new_dim);\nif nargout==1\n y = f(w);\nelse\n [y,deriv] = f(w);\nend\n\n\n\n\nfunction y = map_this(w,input_data,dim,new_dim)\n% V = [input_data; ones(1,num_trials)];\nW = reshape(w,new_dim,dim+1);\ny = bsxfun(@minus,W(:,1:end-1)*input_data,W(:,end));\ny = y(:);\n\nfunction dx = transmap_this(dy,input_data,num_trials,new_dim)\ndY = reshape(dy,new_dim,num_trials);\n% V = [input_data; ones(1,num_trials)];\n% Vt = V.';\n% dX = dY*Vt;\ndYt = dY.';\ndYtSum = sum(dYt,1);\ndX = [input_data*dYt;-dYtSum].';\ndx = dX(:);\n\n\nfunction test_this()\nK = 5;\nN = 10;\nP = 3;\nM = randn(P,N);\nc = randn(P,1);\nW = [M c];\nw = W(:);\ninput_data = randn(N,K);\n\nf = log_distance_mv2df([],input_data,P);\ntest_MV2DF(f,w);\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "AWB_fh.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/AWB_fh.m", "size": 675, "source_encoding": "utf_8", "md5": "3ab5ec4ad82fe2f901f95abf30fb3193", "text": "function fh = AWB_fh(A,B,w)\n% This is almost an MV2DF, but it does not return derivatives on numeric\n% input, w.\n%\n% Algorithm: Y = A*reshape(w,..)*B\n\n\nif nargin==0\n test_this();\n return;\nend\n\n[m,n] = size(A);\n[r,s] = size(B);\n\n function y = map_this(w) \n w = reshape(w,n,r);\n y = A*w*B;\n end\n\n function w = transmap_this(y) \n y = reshape(y,m,s);\n w = A.'*y*B.';\n end\n\n\n\nmap = @(y) map_this(y);\ntransmap = @(y) transmap_this(y);\n\n\nfh = linTrans([],map,transmap);\n\nif exist('w','var') && ~isempty(w)\n fh = fh(w);\nend\n\n\nend\n\nfunction test_this()\nA = randn(2,3);\nB = randn(4,5);\nf = AWB_fh(A,B);\ntest_MV2DF(f,randn(3*4,1));\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "xoverxplusalpha.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/xoverxplusalpha.m", "size": 792, "source_encoding": "utf_8", "md5": "9fbd612d42a50cee70f2b05dce2bf16c", "text": "function [y,deriv] = xoverxplusalpha(w,x)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% alpha --> x./(x+alpha) \n%\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w) \n y = @(w)xoverxplusalpha(w,x);\n return;\nend\n\n\nif isa(w,'function_handle')\n f = xoverxplusalpha([],x);\n y = compose_mv(f,w,[]);\n return;\nend\n\nx = x(:);\nassert(numel(w)==1);\ny = x./(x+w);\n\nderiv = @(Dy) deriv_this(Dy,x,w);\n\nend\n\nfunction [g,hess,linear] = deriv_this(Dy,x,w)\ng0 = -x./(x+w).^2;\ng = Dy.'*g0;\nlinear = false;\nhess = @(Dw) hess_this(Dw,Dy,x,w,g0);\nend\n\nfunction [h,Jv] = hess_this(Dw,Dy,x,w,g0)\n h = 2*Dw * Dy.'*(x./(x+w).^3);\n if nargin>1\n Jv = Dw*g0;\n end\nend\n\n\n\nfunction test_this()\n\nx = randn(1,100);\nw = randn(1);\nf = xoverxplusalpha([],x);\ntest_MV2DF(f,w);\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "tril_to_symm_fh.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/tril_to_symm_fh.m", "size": 786, "source_encoding": "utf_8", "md5": "9ea53a1f6c15720e67c1c446d7dfad43", "text": "function fh = tril_to_symm_fh(m,w)\n% This is almost an MV2DF, but it does not return derivatives on numeric\n% input, w.\n%\n% Algorithm: w is vector of sizem*(m+1)/2\n% w -> m-by-m lower triangular matrix Y\n% Y -> Y + Y' \n\nif nargin==0\n test_this();\n return;\nend\n\n\nindx = tril(true(m));\n\n function y = map_this(w) \n y = zeros(m);\n y(indx(:)) = w;\n y = y + y.';\n end\n\n function w = transmap_this(y) \n y = reshape(y,m,m);\n y = y + y.';\n w = y(indx(:));\n end\n\nmap = @(w) map_this(w);\ntransmap = @(y) transmap_this(y);\n\n\nfh = linTrans([],map,transmap);\n\nif exist('w','var') && ~isempty(w)\n fh = fh(w);\nend\n\n\nend\n\nfunction test_this()\nm=3;\nn = m*(m+1)/2;\nf = tril_to_symm_fh(m);\ntest_MV2DF(f,randn(n,1));\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "square_distance_mv2df.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/square_distance_mv2df.m", "size": 1835, "source_encoding": "utf_8", "md5": "a5d544c6956f70a3c3afdec634a2c891", "text": "function [y,deriv] = square_distance_mv2df(w,input_data,new_dim)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% The function computes the square distance of the vectors for each trial.\n% y.' = sum((W(:,1:end-1).'*input_data + W(:,end)).^2,1)\n%\n% W is the augmented matrix [M c] where M maps a score vector\n% to a lower dimensional space and c is an offset vector in\n% the lower dimensional space.\n%\n% Parameters:\n% w: the vectorized version of the W matrix\n% input_data: is an M-by-T matrix of input vectors of length M, for each of T\n% trials.\n% new_dim: the dimension of vectors in the lower dimensional space.\n%\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w) \n [dim, num_trials] = size(input_data);\n map = @(w) map_this(w,input_data,dim,new_dim);\n transmap = @(w) transmap_this(w,input_data,num_trials,new_dim);\n delta = linTrans(w,map,transmap);\n y = sums_of_squares(delta,new_dim);\n return;\nend\n\nif isa(w,'function_handle')\n f = square_distance_mv2df([],input_data,new_dim);\n y = compose_mv(f,w,[]);\n return;\nend\n\nf = square_distance_mv2df([],input_data,new_dim);\nif nargout==1\n y = f(w);\nelse\n [y,deriv] = f(w);\nend\n\n\n\n\nfunction y = map_this(w,input_data,dim,new_dim)\n% V = [input_data; ones(1,num_trials)];\nW = reshape(w,new_dim,dim+1);\ny = bsxfun(@minus,W(:,1:end-1)*input_data,W(:,end));\ny = y(:);\n\nfunction dx = transmap_this(dy,input_data,num_trials,new_dim)\ndY = reshape(dy,new_dim,num_trials);\n% V = [input_data; ones(1,num_trials)];\n% Vt = V.';\n% dX = dY*Vt;\ndYt = dY.';\ndYtSum = sum(dYt,1);\ndX = [input_data*dYt;-dYtSum].';\ndx = dX(:);\n\n\nfunction test_this()\nK = 5;\nN = 10;\nP = 3;\nM = randn(P,N);\nc = randn(P,1);\nW = [M c];\nw = W(:);\ninput_data = randn(N,K);\n\nf = square_distance_mv2df([],input_data,P);\ntest_MV2DF(f,w);\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "addtotranspose_fh.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/addtotranspose_fh.m", "size": 493, "source_encoding": "utf_8", "md5": "c009e482a302e2825fb3f59940bcc79e", "text": "function fh = addtotranspose_fh(m,w)\n% This is almost an MV2DF, but it does not return derivatives on numeric\n% input, w.\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\n\n function y = map_this(w) \n w = reshape(w,m,m);\n y = w+w.';\n end\n\nmap = @(y) map_this(y);\ntransmap = @(y) map_this(y);\n\n\nfh = linTrans([],map,transmap);\n\nif exist('w','var') && ~isempty(w)\n fh = fh(w);\nend\n\n\nend\n\nfunction test_this()\nm=3;\nf = addtotranspose_fh(3);\ntest_MV2DF(f,randn(m*m,1));\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "subvec_fh.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/subvec_fh.m", "size": 544, "source_encoding": "utf_8", "md5": "a8942d310965ca178a123eb3f4a78f21", "text": "function fh = subvec_fh(first,len,w)\n% This is almost an MV2DF, but it does not return derivatives on numeric\n% input, w.\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nmap = @(w) w(first:first+len-1);\n\n function w = transmap_this(y,sz) \n w=zeros(sz,1); \n w(first:first+len-1)=y; \n end\ntransmap = @(y,sz) transmap_this(y,sz);\n\n\nfh = linTrans_adaptive([],map,transmap);\n\nif exist('w','var') && ~isempty(w)\n fh = fh(w);\nend\n\n\nend\n\nfunction test_this()\nfirst = 2;\nlen = 3;\nf = subvec_fh(first,len);\ntest_MV2DF(f,randn(5,1));\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "linTrans_adaptive.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/templates/linTrans_adaptive.m", "size": 1173, "source_encoding": "utf_8", "md5": "66276c8cd337da71a4e14efc67112765", "text": "function [y,deriv] = linTrans_adaptive(w,map,transmap)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% Applies linear transform y = map(w). It needs the transpose of map, \n% transmap for computing the gradient. map and transmap are function\n% handles.\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w)\n y = @(w)linTrans_adaptive(w,map,transmap);\n return;\nend\n\nif isa(w,'function_handle')\n outer = linTrans_adaptive([],map,transmap);\n y = compose_mv(outer,w,[]);\n return;\nend\n\ny = map(w);\ny = y(:);\n\nderiv = @(g2) deriv_this(g2,map,transmap,numel(w));\nend\n\nfunction [g,hess,linear] = deriv_this(g2,map,transmap,wlen)\ng = transmap(g2,wlen);\ng = g(:);\n%linear = false; % use this to test linearity of map, if in doubt\nlinear = true;\nhess = @(d) hess_this(map,d);\nend\n\nfunction [h,Jd] = hess_this(map,d)\nh = [];\nif nargout>1 \n Jd = map(d);\n Jd = Jd(:);\nend\n\nend\n\nfunction test_this()\nfirst = 2;\nlen = 3;\nmap = @(w) w(first:first+len-1);\nfunction w = transmap_test(y,sz) \n w=zeros(sz,1); \n w(first:first+len-1)=y; \nend\ntransmap = @(y,sz) transmap_test(y,sz);\nf = linTrans_adaptive([],map,transmap);\ntest_MV2DF(f,randn(5,1));\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "logsumexp_fh.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/logsumexp_fh.m", "size": 1287, "source_encoding": "utf_8", "md5": "764511ba624a62ac12e572a26a5e7aa2", "text": "function f = logsumexp_fh(m,direction,w)\n% This is a factory for a function handle to an MV2DF, which represents\n% the vectorization of the logsumexp function. The whole mapping works like\n% this, in MATLAB-style psuedocode:\n%\n% F: R^(m*n) --> R^n, where y = F(x) is computed thus:\n%\n% n = length(x)/m\n% If direction=1, X = reshape(x,m,n), or \n% if direction=1, X = reshape(x,n,m). \n% y = log(sum(exp(X),direction))\n%\n% Inputs: \n% m: the number of inputs to each individual logsumexp calculation.\n% direction: 1 sums down columns, or 2 sums accross rows.\n% w: optional, if ssupplied \n%\n% Outputs:\n% f: a function handle to the MV2DF described above.\n%\n% see: MV2DF_API_DEFINITION.readme\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nf = vectorized_function([],@(X)F0(X,direction),m,direction);\n\nif exist('w','var') && ~isempty(w)\n f = f(w);\nend\n\nend\n\nfunction [y,f1] = F0(X,dr)\n M = max(X,[],dr);\n y = log(sum(exp(bsxfun(@minus,X,M)),dr))+M;\n f1 = @() F1(X,y,dr);\nend\n\nfunction [J,f2,linear] = F1(X,y,dr)\nlinear = false;\nJ = exp(bsxfun(@minus,X,y));\nf2 = @(dX) F2(dX,J,dr);\nend\n\nfunction H = F2(dX,J,dr)\n H = J.*bsxfun(@minus,dX,sum(dX.*J,dr));\nend\n\n\n\nfunction test_this()\nm = 4;n = 10;\nf = logsumexp_fh(m,1);\nX = randn(n,m);\ntest_MV2DF(f,X(:));\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "one_over_one_plus_w_mv2df.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/one_over_one_plus_w_mv2df.m", "size": 717, "source_encoding": "utf_8", "md5": "d735233c52193c323d03cdb85d0948f5", "text": "function [y,deriv] = one_over_one_plus_w_mv2df(w)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n% y = 1 ./ (1 + w)\n\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w)\n y = @(w)one_over_one_plus_w_mv2df(w);\n return;\nend\n\nif isa(w,'function_handle')\n outer = one_over_one_plus_w_mv2df([]);\n y = compose_mv(outer,w,[]);\n return;\nend\n\n\n\nw = w(:);\ny = 1 ./ (1 + w);\nderiv = @(dy) deriv_this(dy,y);\n\nfunction [g,hess,linear] = deriv_this(dy,y)\n\nlinear = false;\ng = -dy.*(y.^2);\nhess = @(d) hess_this(d,dy,y);\n\nfunction [h,Jv] = hess_this(d,dy,y)\n\nh = 2*dy.*d.*(y.^3);\nif nargout>1\n Jv = -d.*(y.^2);\nend\n\n\nfunction test_this()\nf = one_over_one_plus_w_mv2df([]);\ntest_MV2DF(f,randn(3,1));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "sigmoid_mv2df.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/sigmoid_mv2df.m", "size": 758, "source_encoding": "utf_8", "md5": "e0591c88d68032fcf2a300fe7f2e8df0", "text": "function [y,deriv] = sigmoid_mv2df(w)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n% y = sigmoid(w) = 1./(1+exp(-w)), vectorized as MATLAB usually does.\n\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w)\n y = @(w)sigmoid_mv2df(w);\n return;\nend\n\nif isa(w,'function_handle')\n outer = sigmoid_mv2df([]);\n y = compose_mv(outer,w,[]);\n return;\nend\n\n\n\nw = w(:);\ny = sigmoid(w);\ny1 = sigmoid(-w);\nderiv = @(dy) deriv_this(dy,y,y1);\n\nfunction [g,hess,linear] = deriv_this(dy,y,y1)\n\nlinear = false;\ng = dy.*y.*y1;\nhess = @(d) hess_this(d,dy,y,y1);\n\nfunction [h,Jv] = hess_this(d,dy,y,y1)\n\nh = dy.*d.*(y.*y1.^2 - y.^2.*y1);\nif nargout>1\n Jv = d.*y.*y1;\nend\n\n\nfunction test_this()\nf = sigmoid_mv2df([]);\ntest_MV2DF(f,randn(3,1));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "neglogsigmoid_fh.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/neglogsigmoid_fh.m", "size": 1075, "source_encoding": "utf_8", "md5": "dc180d133fc039197aa99a5e4186c6a7", "text": "function f = neglogsigmoid_fh(w)\n% This is a factory for a function handle to an MV2DF, which represents\n% the vectorization of the logsigmoid function. The mapping is, in \n% MATLAB-style code:\n%\n% y = log(sigmoid(w)) = log(1./1+exp(-w)) = -log(1+exp(-w))\n%\n% Inputs: \n% m: the number of inputs to each individual logsumexp calculation.\n% direction: 1 sums down columns, or 2 sums accross rows.\n% w: optional, if ssupplied \n%\n% Outputs:\n% f: a function handle to the MV2DF described above.\n%\n% see: MV2DF_API_DEFINITION.readme\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nf = vectorized_function([],@(x)F0(x));\n\nif exist('w','var') && ~isempty(w)\n f = f(w);\nend\n\nend\n\nfunction [y,f1] = F0(x)\nlogp1 = -neglogsigmoid(x);\nlogp2 = -neglogsigmoid(-x);\ny = -logp1;\nf1 = @() F1(logp1,logp2);\nend\n\nfunction [J,f2,linear] = F1(logp1,logp2)\nlinear = false;\nJ = -exp(logp2);\nf2 = @(dx) F2(dx,logp1,logp2);\nend\n\nfunction h = F2(dx,logp1,logp2)\nh = dx.*exp(logp1+logp2);\nend\n\n\n\nfunction test_this()\nn = 10;\nf = neglogsigmoid_fh([]);\nx = randn(n,1);\ntest_MV2DF(f,x);\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "logsumsquares_fh.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/logsumsquares_fh.m", "size": 1275, "source_encoding": "utf_8", "md5": "c1e543f6680e7257b1f55ff61d967598", "text": "function f = logsumsquares_fh(m,direction,w)\n% This is a factory for a function handle to an MV2DF, which represents\n% the vectorization of the logsumsquares function. The whole mapping works like\n% this, in MATLAB-style psuedocode:\n%\n% F: R^(m*n) --> R^n, where y = F(x) is computed thus:\n%\n% n = length(x)/m\n% If direction=1, X = reshape(x,m,n), or \n% if direction=1, X = reshape(x,n,m). \n% y = log(sum(X.^2,direction))\n%\n% Inputs: \n% m: the number of inputs to each individual logsumexp calculation.\n% direction: 1 sums down columns, or 2 sums accross rows.\n%\n%\n% Outputs:\n% f: a function handle to the MV2DF described above.\n%\n% see: MV2DF_API_DEFINITION.readme\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nf = vectorized_function([],@(X)F0(X,direction),m,direction);\n\nif exist('w','var') && ~isempty(w)\n f = f(w);\nend\n\nend\n\nfunction [y,f1] = F0(X,dr)\n ssq = sum(X.^2,dr);\n y = log(ssq);\n f1 = @() F1(X,ssq,dr);\nend\n\nfunction [J,f2,linear] = F1(X,s,dr)\nlinear = false;\nJ = bsxfun(@times,X,2./s);\nf2 = @(dX) F2(dX,X,s,dr);\nend\n\nfunction H = F2(dX,X,s,dr)\n H = bsxfun(@times,dX,2./s) - bsxfun(@times,X,4*sum(X.*dX,dr)./(s.^2));\nend\n\n\n\nfunction test_this()\nm = 4;n = 10;\nf = logsumsquares_fh(m,1);\nX = randn(n,m);\ntest_MV2DF(f,X(:));\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "expneg_mv2df.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/expneg_mv2df.m", "size": 675, "source_encoding": "utf_8", "md5": "f12485f16e7f66d9deb530df461bdcdc", "text": "function [y,deriv] = expneg_mv2df(w)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n% y = exp(-w), vectorized as MATLAB usually does.\n\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w)\n y = @(w)expneg_mv2df(w);\n return;\nend\n\nif isa(w,'function_handle')\n outer = expneg_mv2df([]);\n y = compose_mv(outer,w,[]);\n return;\nend\n\n\n\nw = w(:);\ny = exp(-w);\nderiv = @(dy) deriv_this(dy,y);\n\nfunction [g,hess,linear] = deriv_this(dy,y)\n\nlinear = false;\ng = -dy.*y;\nhess = @(d) hess_this(d,dy,y);\n\nfunction [h,Jv] = hess_this(d,dy,y)\n\nh = dy.*y.*d;\nif nargout>1\n Jv = -d.*y;\nend\n\n\nfunction test_this()\nf = expneg_mv2df([]);\ntest_MV2DF(f,randn(3,1));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "square_mv2df.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/square_mv2df.m", "size": 634, "source_encoding": "utf_8", "md5": "f7604570a85ea6be67d98ae414127642", "text": "function [y,deriv] = square_mv2df(w)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n% y = w.^2\n\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w)\n y = @(w)square_mv2df(w);\n return;\nend\n\nif isa(w,'function_handle')\n outer = square_mv2df([]);\n y = compose_mv(outer,w,[]);\n return;\nend\n\n\n\nw = w(:);\ny = w.^2;\nderiv = @(dy) deriv_this(dy,w);\n\nfunction [g,hess,linear] = deriv_this(dy,w)\n\nlinear = false;\ng = 2*dy.*w;\nhess = @(d) hess_this(d,dy,w);\n\nfunction [h,Jv] = hess_this(d,dy,w)\n\nh = 2*dy.*d;\nif nargout>1\n Jv = 2*w.*d;\nend\n\n\nfunction test_this()\nf = square_mv2df([]);\ntest_MV2DF(f,randn(3,1));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "logsigmoid_fh.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/logsigmoid_fh.m", "size": 1068, "source_encoding": "utf_8", "md5": "65bf6e2f03af50449d9492d02f7e3c98", "text": "function f = logsigmoid_fh(w)\n% This is a factory for a function handle to an MV2DF, which represents\n% the vectorization of the logsigmoid function. The mapping is, in \n% MATLAB-style code:\n%\n% y = log(sigmoid(w)) = log(1./1+exp(-w)) = -log(1+exp(-w))\n%\n% Inputs: \n% m: the number of inputs to each individual logsumexp calculation.\n% direction: 1 sums down columns, or 2 sums accross rows.\n% w: optional, if ssupplied \n%\n% Outputs:\n% f: a function handle to the MV2DF described above.\n%\n% see: MV2DF_API_DEFINITION.readme\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nf = vectorized_function([],@(x)F0(x));\n\nif exist('w','var') && ~isempty(w)\n f = f(w);\nend\n\nend\n\nfunction [y,f1] = F0(x)\nlogp1 = -neglogsigmoid(x);\nlogp2 = -neglogsigmoid(-x);\ny = logp1;\nf1 = @() F1(logp1,logp2);\nend\n\nfunction [J,f2,linear] = F1(logp1,logp2)\nlinear = false;\nJ = exp(logp2);\nf2 = @(dx) F2(dx,logp1,logp2);\nend\n\nfunction h = F2(dx,logp1,logp2)\nh = -dx.*exp(logp1+logp2);\nend\n\n\n\nfunction test_this()\nn = 10;\nf = logsigmoid_fh([]);\nx = randn(n,1);\ntest_MV2DF(f,x);\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "exp_mv2df.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/exp_mv2df.m", "size": 659, "source_encoding": "utf_8", "md5": "410b48565ed23cbda996866e44dfb2fa", "text": "function [y,deriv] = exp_mv2df(w)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n% y = exp(w), vectorized as MATLAB usually does.\n\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w)\n y = @(w)exp_mv2df(w);\n return;\nend\n\nif isa(w,'function_handle')\n outer = exp_mv2df([]);\n y = compose_mv(outer,w,[]);\n return;\nend\n\n\n\nw = w(:);\ny = exp(w);\nderiv = @(dy) deriv_this(dy,y);\n\nfunction [g,hess,linear] = deriv_this(dy,y)\n\nlinear = false;\ng = dy.*y;\nhess = @(d) hess_this(d,dy,y);\n\nfunction [h,Jv] = hess_this(d,dy,y)\n\nh = dy.*y.*d;\nif nargout>1\n Jv = d.*y;\nend\n\n\nfunction test_this()\nf = exp_mv2df([]);\ntest_MV2DF(f,randn(3,1));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "vectorized_function.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/templates/vectorized_function.m", "size": 4600, "source_encoding": "utf_8", "md5": "9c5431b821aa6587c3849945d31dd1fd", "text": "function [y,deriv] = vectorized_function(w,f,m,direction)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% This template vectorizes the given function F: R^m -> R as follows:\n% k = length(w)/m;\n% If direction=1, X = reshape(w,m,k), y(j) = F(X(:,j)), or\n% if direction=2, X = reshape(w,k,m), y(i) = F(X(i,:)),\n% so that length(y) = k.\n% \n% Input parameters:\n% w: As with every MV2DF, w can be [], a vector, or a function handle to\n% another MV2DF.\n% f: is a function handle to an m-file that represents the function \n% F: R^m -> R, as well as its first and second derivatives.\n%\n% m: The input dimension to F. \n% (optional, default m = 1)\n%\n% direction: is used as explained above to determine whether columns,\n% or rows of X are processed by F. \n% (optional, default direction = 2)\n%\n% Function f works as follows: \n% (Note that f, f1 and f2 have to know the required direction, it is \n% not passed to them.)\n% [y,f1] = f(X), where X and y are as defined above.\n%\n% Function f1 works as follows:\n% [J,f2] = f1(), where size(J) = size(X). \n% Column/row i of J is the gradient of y(i) w.r.t. \n% column/row i of W.\n% f2 is a function handle to 2nd order derivatives. \n% If 2nd order derivatives are 0, then f2 should be [].\n%\n% Function f2 works as follows:\n% H = f2(dX), where size(dX) = size(X). \n% If direction=1, H(:,j) = H_i * dX(:,j), or\n% if direction=2, H(i,:) = dX(i,:)* H_i, where \n% H_i is Hessian of y(i), w.r.t. colum/row i of X.\n%\n%\n\n\n\n\nif nargin==0\n test_this();\n return;\nend\n\nif ~exist('m','var')\n m = 1;\n direction = 2;\nend\n\n\nif isempty(w)\n y = @(w)vectorized_function(w,f,m,direction);\n return;\nend\n\nif isa(w,'function_handle')\n outer = vectorized_function([],f,m,direction);\n y = compose_mv(outer,w,[]);\n return;\nend\n\n\n\nif direction==1\n W = reshape(w,m,[]);\nelseif direction==2\n W = reshape(w,[],m);\nelse\n error('illegal direction %i',direction);\nend\n\nif nargout==1\n y = f(W);\nelse \n [y,f1] = f(W);\n deriv = @(dy) deriv_this(dy,f1,direction);\nend\ny = y(:);\n\nend\n\nfunction [g,hess,linear] = deriv_this(dy,f1,direction)\nif direction==1\n dy = dy(:).';\nelse\n dy = dy(:);\nend\nif nargout==1\n J = f1();\n g = reshape(bsxfun(@times,J,dy),[],1);\nelse\n [J,f2] = f1();\n linear = isempty(f2);\n g = reshape(bsxfun(@times,J,dy),[],1);\n hess = @(d) hess_this(d,f2,J,dy,direction);\nend\n\n\nend\n\nfunction [h,Jv] = hess_this(dx,f2,J,dy,direction)\n\ndX = reshape(dx,size(J));\nif isempty(f2)\n h = [];\nelse\n h = reshape(bsxfun(@times,dy,f2(dX)),[],1);\nend\nif nargout>1\n Jv = sum(dX.*J,direction);\n Jv = Jv(:);\nend\n\nend\n\n%%%%%%%%%%%%%%%%%%%% Example function: z = x^2 + y^3 %%%%%%%%%%%%%%%%%%%%\n\n% example function: z = x^2 + y^3\nfunction [z,f1] = x2y3(X,direction)\n if direction==1\n x = X(1,:);\n y = X(2,:);\n else\n x = X(:,1);\n y = X(:,2);\n end\n z = x.^2+y.^3;\n f1 = @() f1_x2y3(x,y,direction);\nend\n\n% example function 1st derivative: z = x^2 + y^2\nfunction [J,f2] = f1_x2y3(x,y,direction)\nif direction==1\n J = [2*x;3*y.^2];\nelse\n J = [2*x,3*y.^2];\nend\nf2 = @(dxy) f2_x2y3(dxy,y,direction);\nend\n\n\n% example function 2nd derivative: z = x^2 + y^2\nfunction H = f2_x2y3(dxy,y,direction)\nif direction==1\n H = dxy.*[2*ones(size(y));6*y];\nelse\n H = dxy.*[2*ones(size(y)),6*y];\nend\nend\n\n\n%%%%%%%%%%%%%%%%%%%% Example function: z = x*y^2 %%%%%%%%%%%%%%%%%%%%\n\n% example function: z = x*y^2\nfunction [z,f1] = xy2(X,direction)\n if direction==1\n x = X(1,:);\n y = X(2,:);\n else\n x = X(:,1);\n y = X(:,2);\n end\n y2 = y.^2;\n z = x.*+y2;\n f1 = @() f1_xy2(x,y,y2,direction);\nend\n\n% example function 1st derivative: z = x*y^2\nfunction [J,f2] = f1_xy2(x,y,y2,direction)\nif direction==1\n J = [y2;2*x.*y];\nelse\n J = [y2,2*x.*y];\nend\nf2 = @(dxy) f2_xy2(dxy,x,y,direction);\nend\n\n\n% example function 2nd derivative: z = x*y^2\nfunction H = f2_xy2(dxy,x,y,direction)\nif direction==1\n dx = dxy(1,:);\n dy = dxy(2,:);\n H = [2*y.*dy;2*y.*dx+2*x.*dy];\nelse\n dx = dxy(:,1);\n dy = dxy(:,2);\n H = [2*y.*dy,2*y.*dx+2*x.*dy];\nend\nend\n\n\n\n\n\nfunction test_this()\n\nk = 5;\nm = 2;\n\ndr = 1;\nfprintf('Testing x^2+y^2 in direction %i:\\n\\n',dr);\nf = vectorized_function([],@(X)x2y3(X,dr),2,dr);\ntest_MV2DF(f,randn(k*m,1));\n\ndr = 2;\nfprintf('\\n\\n\\n\\nTesting x*y^2 in direction %i:\\n\\n',dr);\nf = vectorized_function([],@(X)xy2(X,dr),2,dr);\ntest_MV2DF(f,randn(k*m,1));\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "logdet_chol.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/logdet_chol.m", "size": 1185, "source_encoding": "utf_8", "md5": "706e5c1e5b5b660da50408bd221522a0", "text": "function [y,deriv] = logdet_chol(w)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n% y = log(det(W)), where W is positive definite and W = reshape(w,...)\n\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w)\n y = @(w)logdet_chol(w);\n return;\nend\n\nif isa(w,'function_handle')\n outer = logdet_chol([]);\n y = compose_mv(outer,w,[]);\n return;\nend\n\ndim = sqrt(length(w));\nW = reshape(w,dim,dim);\n\nif nargout>1\n %[inv_map,bi_inv_map,logdet,iW] = invchol2(W);\n [inv_map,bi_inv_map,logdet,iW] = invchol_or_lu(W);\n y = logdet;\n deriv = @(dy) deriv_this(dy,bi_inv_map,iW);\nelse\n %[inv_map,bi_inv_map,logdet] = invchol2(W);\n [inv_map,bi_inv_map,logdet] = invchol_or_lu(W);\n y = logdet;\nend\n\nfunction [g,hess,linear] = deriv_this(dy,bi_inv_map,iW)\nG = iW.';\ngrad = G(:);\ng = dy*grad;\nlinear = false;\nhess = @(d) hess_this(grad,bi_inv_map,dy,d);\n\nfunction [h,Jd] = hess_this(grad,bi_inv_map,dy,d)\ndim = sqrt(length(d));\nD = reshape(d,dim,dim);\nH = - dy*bi_inv_map(D).';\nh = H(:);\nif nargout>1 \n Jd = grad.'*d(:);\nend\n\n\n\n\nfunction test_this()\nm = 3;\nn = 10;\n\nw = [];\nA = UtU(w,n,m);\n\nf = logdet_chol(A);\nw = randn(m*n,1);\n\ntest_MV2DF(f,w,true);\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "sumsquares_penalty.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/sumsquares_penalty.m", "size": 916, "source_encoding": "utf_8", "md5": "8f40fb9f94c7424808c89e165ec9960c", "text": "function [y,deriv] = sumsquares_penalty(w,lambda)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n% See code for details.\n\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w)\n y = @(w)sumsquares_penalty(w,lambda);\n return;\nend\n\nif isa(w,'function_handle')\n outer = sumsquares_penalty([],lambda);\n y = compose_mv(outer,w,[]);\n return;\nend\n\n\n\nw = w(:);\nif isscalar(lambda)\n lambda = lambda*ones(size(w));\nelse\n lambda = lambda(:);\nend\n \n\n\ny = 0.5*w.'*(lambda.*w);\nderiv = @(dy) deriv_this(dy,lambda,lambda.*w);\n\nfunction [g,hess,linear] = deriv_this(dy,lambda,lambda_w)\n\nlinear = false;\ng = dy*lambda_w;\n\nhess = @(d) hess_this(d,dy,lambda,lambda_w);\n\nfunction [h,Jv] = hess_this(d,dy,lambda,lambda_w)\n\nh = dy*lambda.*d;\nif nargout>1\n Jv = d(:).'*lambda_w;\nend\n\n\nfunction test_this()\nlambda = randn(10,1);\nf = sumsquares_penalty([],lambda);\ntest_MV2DF(f,randn(size(lambda)));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "wmlr_obj.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/wmlr_obj.m", "size": 2299, "source_encoding": "utf_8", "md5": "f450d5fdd89f4854a123b7d7947d32c3", "text": "function [y,deriv] = wmlr_obj(w,X,T,weights,logprior);\n% This is a SCAL2DF. See SCAL2DF_API_DEFINITION.readme.\n% Weighted multiclass linear logistic regression objective function.\n% w is vectorized D-by-K parameter matrix W (to be optimized)\n% X is D-by-N data matrix, for N trials\n% T is K-by-N, 0/1 class label matrix, with exactly one 1 per column.\n% weights is N-vector of objective function weights, one per trial.\n% logprior is logarithm of prior,\n%\n% The K-by-N log-likelihood matrix is\n% bsxfun(@plus,W'*X,logprior(:));\n\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w)\n y = @(w)wmlr_obj(w,X,T,weights,logprior);\n return;\nend\n\nif isa(w,'function_handle')\n outer = wmlr_obj([],X,T,weights,logprior);\n y = compose_mv(outer,w,[]);\n return;\nend\n\n\nw = w(:);\n\n[K,N] = size(T);\n[dim,N2] = size(X);\nif N ~=N2\n error('sizes of X and T incompatible');\nend\n\nW = reshape(w,dim,K); % dim*K\n% make W double so that it works if X is sparse\nscores = double(W.')*X; % K*N\nscores = bsxfun(@plus,scores,logprior(:));\n\nlsm = logsoftmax(scores); % K*N\ny = -sum(lsm.*T)*weights(:);\nderiv = @(dy) deriv_this(dy,lsm,X,T,weights);\n\n\nfunction [g,hess,linear] = deriv_this(dy,lsm,X,T,weights)\nsigma = exp(lsm); %posterior % K*N\ng0 = gradient(sigma,X,T,weights);\ng = g0*dy;\nhess = @(d) hess_this(d,dy,g0,sigma,X,weights);\nlinear = false;\n\nfunction g = gradient(sigma,X,T,weights)\nE = sigma-T; %K*N\nG = X*double(bsxfun(@times,weights(:),E.')); %dim*K\ng = G(:);\n\n\nfunction [h,Jv] = hess_this(d,dy,g,sigma,X,weights)\n\nK = size(sigma,1);\ndim = length(d)/K;\nD = reshape(d,dim,K);\n\nP = double(D.')*X; % K*N\nsigmaP = sigma.*P;\nssP = sum(sigmaP,1); % 1*N\nsssP = bsxfun(@times,sigma,ssP); %K*N\n\nh = X*double(bsxfun(@times,weights(:),(sigmaP-sssP).')); % dim*K\nh = dy*h(:);\n\nif nargout>1\n Jv = d(:).'*g;\nend\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nfunction test_this()\n\n\nK = 3;\nN = 100;\ndim = 2;\nrandn('state',0);\nmeans = randn(dim,K)*10; %signal\nX0 = randn(dim,K*N); % noise\nclassf = zeros(1,K*N);\nii = 1:N;\nT = zeros(K,N*K);\nfor k=1:K\n X0(:,ii) = bsxfun(@plus,means(:,k),X0(:,ii));\n classf(ii) = k;\n T(k,ii) = 1;\n ii = ii+N;\nend\n\nN = K*N;\nX = [X0;ones(1,N)];\n\nweights = rand(1,N);\nobj = wmlr_obj([],X,T,weights,2);\n\ntest_MV2DF(obj,randn((dim+1)*K,1));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "boost_obj.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/boost_obj.m", "size": 1556, "source_encoding": "utf_8", "md5": "eaa722fa0cd7b1b492401c4e6adf807b", "text": "function [y,deriv] = boost_obj(w,T,weights,logit_prior)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% Weighted binary classifier cross-entropy objective, based on 'boosting'\n% proper scoring rule. This rule places more emphasis on extreme scores,\n% than the logariothmic scoring rule.\n%\n% Differentiable inputs:\n% w: is vector of N detection scores (in log-likelihood-ratio format) \n%\n% Fixed parameters:\n% T: is vector of N labels: 1 for target and -1 for non-target.\n% weights: is N-vector of objective function weights, one per trial.\n% logit_prior: is logit(prior), this controls the region of interest\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w)\n y = @(w)boost_obj(w,T,weights,logit_prior);\n return;\nend\n\nif isa(w,'function_handle')\n outer = boost_obj([],T,weights,logit_prior);\n y = compose_mv(outer,w,[]);\n return;\nend\n\n\nw = w(:);\nscores = w.';\narg = bsxfun(@plus,scores,logit_prior).*T;\nwobj = exp(-arg/2).*weights; % 1*N\ny = sum(wobj);\n\n\n\nif nargout>1\n deriv = @(dy) deriv_this(dy,wobj(:),T);\nend\n\n\nfunction [g,hess,linear] = deriv_this(dy,wobj,T)\ng0 = -0.5*wobj.*T(:);\ng = dy*g0;\nlinear = false;\nhess = @(d) hessianprod(d,dy,g0,wobj);\n\n\n\n\nfunction [h,Jv] = hessianprod(d,dy,g0,wobj)\n\nh = dy*(0.25*wobj(:).*d(:));\n\n\nif nargout>1\n Jv = d.'*g0;\nend\n\n\nfunction test_this()\nN = 30;\nT = [ones(1,N/3),-ones(1,N/3),zeros(1,N/3)];\nscores = randn(1,N);\nweights = [rand(1,2*N/3),zeros(1,N/3)];\nf = @(w) brier_obj(w,T,weights,-2.23);\nf = @(w) boost_obj(w,T,weights,-2.23);\ntest_MV2DF(f,scores(:));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "neg_gaussll_taylor.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/neg_gaussll_taylor.m", "size": 1332, "source_encoding": "utf_8", "md5": "4efafbe09f47ca5947e223ca80f063c6", "text": "function [y,deriv] = neg_gaussll_taylor(w,x)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n% This function represents the part of log N(x|0,W) that is dependent on \n% W = reshape(w,...), where w is variable and x is given. \n%\n% y = -0.5*x'*inv(W)*x - 0.5*log(det(W)), where W is positive definite and W = reshape(w,...)\n\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w)\n y = @(w)neg_gaussll_taylor(w,x);\n return;\nend\n\nif isa(w,'function_handle')\n outer = neg_gaussll_taylor([],x);\n y = compose_mv(outer,w,[]);\n return;\nend\n\ndim = length(x);\nW = reshape(w,dim,dim);\n\n[inv_map,logdet] = invchol_taylor(W);\nz = inv_map(x);\ny = 0.5*x'*z + 0.5*logdet;\nderiv = @(dy) deriv_this(dy,z,inv_map);\n\nend\n\nfunction [g,hess,linear] = deriv_this(dy,z,inv_map)\nG1 = z*z.';\nG2 = inv_map(eye(length(z)));\ngrad = 0.5*(G2(:)-G1(:));\ng = dy*grad;\nlinear = false;\nhess = @(d) hess_this(grad,z,inv_map,dy,d);\nend\n\n\nfunction [h,Jd] = hess_this(grad,z,inv_map,dy,d)\ndim = sqrt(length(d));\nD = reshape(d,dim,dim);\nH1 = inv_map(D*z)*z' + z*inv_map(D'*z)';\nH2 = inv_map(inv_map(D)');\nh = 0.5*dy*(H1(:)-H2(:));\nif nargout>1 \n Jd = grad.'*d(:);\nend\nend\n\n\n\nfunction test_this()\nm = 3;\nn = 10;\n\nw = [];\nA = UtU(w,n,m); %A is m-by-m\nx = randn(m,1);\n\nf = neg_gaussll_taylor(A,x);\nw = randn(m*n,1);\n\ntest_MV2DF(f,w,true);\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "brier_obj.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/brier_obj.m", "size": 1722, "source_encoding": "utf_8", "md5": "f68fae1776aa1a970b6f329e4c0d1027", "text": "function [y,deriv] = brier_obj(w,T,weights,logit_prior)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% Weighted binary classifier cross-entropy objective, based on 'Brier'\n% quadratic proper scoring rule. This rule places less emphasis on extreme scores,\n% than the logariothmic scoring rule.\n%\n% Differentiable inputs:\n% w: is vector of N detection scores (in log-likelihood-ratio format) \n%\n% Fixed parameters:\n% T: is vector of N labels: 1 for target and -1 for non-target.\n% weights: is N-vector of objective function weights, one per trial.\n% logit_prior: is logit(prior), this controls the region of interest\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w)\n y = @(w)brier_obj(w,T,weights,logit_prior);\n return;\nend\n\nif isa(w,'function_handle')\n outer = brier_obj([],T,weights,logit_prior);\n y = compose_mv(outer,w,[]);\n return;\nend\n\n\nw = w(:);\nscores = w.';\n\narg = bsxfun(@plus,scores,logit_prior).*T;\nlogp2 = -neglogsigmoid(-arg);\nwobj = 0.5*exp(2*logp2).*weights; % 1*N\ny = sum(wobj);\n\n\n\nif nargout>1\n logp1 = -neglogsigmoid(arg);\n deriv = @(dy) deriv_this(dy,weights(:),T(:),logp1(:),logp2(:));\nend\n\n\nfunction [g,hess,linear] = deriv_this(dy,weights,T,logp1,logp2)\ng0 = -exp(logp1+2*logp2).*weights.*T;\ng = dy*g0;\nlinear = false;\nhess = @(d) hessianprod(d,dy,g0,weights,logp1,logp2);\n\n\n\n\nfunction [h,Jv] = hessianprod(d,dy,g0,weights,logp1,logp2)\n\nddx = -exp(logp1+2*logp2);\nh = dy*(ddx.*(1-3*exp(logp1))).*weights.*d(:);\n\nif nargout>1\n Jv = d.'*g0;\nend\n\n\nfunction test_this()\nN = 30;\nT = [ones(1,N/3),-ones(1,N/3),zeros(1,N/3)];\nscores = randn(1,N);\nweights = [rand(1,2*N/3),zeros(1,N/3)];\nf = @(w) brier_obj(w,T,weights,-2.23);\ntest_MV2DF(f,scores(:));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "gauss_ll.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/gauss_ll.m", "size": 1461, "source_encoding": "utf_8", "md5": "76707fbe20f1dae43305e2542e9644ce", "text": "function [y,deriv] = gauss_ll(w,x)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n% This function represents the part of log N(x|0,W) that is dependent on \n% W = reshape(w,...), where w is variable and x is given. \n%\n% y = -0.5*x'*inv(W)*x - 0.5*log(det(W)), where W is positive definite and W = reshape(w,...)\n\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w)\n y = @(w)gauss_ll(w,x);\n return;\nend\n\nif isa(w,'function_handle')\n outer = gauss_ll([],x);\n y = compose_mv(outer,w,[]);\n return;\nend\n\ndim = length(x);\nW = reshape(w,dim,dim);\n\nif nargout>1\n [inv_map,bi_inv_map,logdet,iW] = invchol_or_lu(W);\n z = inv_map(x);\n y = -0.5*x'*z - 0.5*logdet;\n deriv = @(dy) deriv_this(dy,z,inv_map,bi_inv_map,iW);\nelse\n [inv_map,bi_inv_map,logdet] = invchol_or_lu(W);\n z = inv_map(x);\n y = -0.5*x'*z - 0.5*logdet;\nend\n\nfunction [g,hess,linear] = deriv_this(dy,z,inv_map,bi_inv_map,iW)\nG1 = z*z.';\nG2 = iW.';\ngrad = 0.5*(G1(:)-G2(:));\ng = dy*grad;\nlinear = false;\nhess = @(d) hess_this(grad,z,inv_map,bi_inv_map,dy,d);\n\nfunction [h,Jd] = hess_this(grad,z,inv_map,bi_inv_map,dy,d)\ndim = sqrt(length(d));\nD = reshape(d,dim,dim);\nH1 = inv_map(D*z)*z.' + z*inv_map(D.'*z).';\nH2 = bi_inv_map(D).';\nh = -0.5*dy*(H1(:)-H2(:));\nif nargout>1 \n Jd = grad.'*d(:);\nend\n\n\n\n\nfunction test_this()\nm = 3;\nn = 10;\n\nw = [];\nA = UtU(w,n,m); %A is m-by-m\nx = randn(m,1);\n\nf = gauss_ll(A,x);\nw = randn(m*n,1);\n\ntest_MV2DF(f,w,true);\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "cllr_obj.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/cllr_obj.m", "size": 1611, "source_encoding": "utf_8", "md5": "374952d66aa4641a000a48cc12baebad", "text": "function [y,deriv] = cllr_obj(w,T,weights,logit_prior)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% Weighted binary classifier cross-entropy objective, based on logarithmic\n% cost function.\n%\n% Differentiable inputs:\n% w: is vector of N detection scores (in log-likelihood-ratio format) \n%\n% Fixed parameters:\n% T: is vector of N labels: 1 for target and -1 for non-target.\n% weights: is N-vector of objective function weights, one per trial.\n% logit_prior: is logit(prior), this controls the region of interest\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w)\n y = @(w)cllr_obj(w,T,weights,logit_prior);\n return;\nend\n\nif isa(w,'function_handle')\n outer = cllr_obj([],T,weights,logit_prior);\n y = compose_mv(outer,w,[]);\n return;\nend\n\n\nw = w(:);\nscores = w.';\narg = bsxfun(@plus,scores,logit_prior).*T;\nneglogp1 = neglogsigmoid(arg); % 1*N p1 = p(tar)\ny = neglogp1*weights(:);\n\n\n\nif nargout>1\n neglogp2 = neglogsigmoid(-arg); % 1*N p2 = 1-p1 = p(non)\n deriv = @(dy) deriv_this(dy,-neglogp1(:),-neglogp2(:),T(:),weights(:));\nend\n\n\nfunction [g,hess,linear] = deriv_this(dy,logp1,logp2,T,weights)\ng0 = -exp(logp2).*weights.*T;\ng = dy*g0;\nlinear = false;\nhess = @(d) hessianprod(d,dy,g0,logp1,logp2,weights);\n\n\n\n\nfunction [h,Jv] = hessianprod(d,dy,g0,logp1,logp2,weights)\n\nh = dy*(exp(logp1+logp2).*weights(:).*d(:));\n\n\nif nargout>1\n Jv = d.'*g0;\nend\n\n\nfunction test_this()\nN = 30;\nT = [ones(1,N/3),-ones(1,N/3),zeros(1,N/3)];\nW = randn(1,N);\nweights = [rand(1,2*N/3),zeros(1,N/3)];\nf = @(w) cllr_obj(w,T,weights,-2.23);\ntest_MV2DF(f,W(:));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "mce_obj.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/mce_obj.m", "size": 1711, "source_encoding": "utf_8", "md5": "93cfa59b8a57d279ebbdb02376bd696c", "text": "function [y,deriv] = mce_obj(w,T,weights,logprior)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% Weighted multiclass cross-entropy objective.\n% w is vectorized K-by-N score matrix W (to be optimized)\n% T is K-by-N, 0/1 class label matrix, with exactly one 1 per column.\n% weights is N-vector of objective function weights, one per trial.\n% logprior is logarithm of prior,\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w)\n y = @(w)mce_obj(w,T,weights,logprior);\n return;\nend\n\nif isa(w,'function_handle')\n outer = mce_obj([],T,weights,logprior);\n y = compose_mv(outer,w,[]);\n return;\nend\n\n\nw = w(:);\n\n[K,N] = size(T);\n\n\nscores = reshape(w,K,N);\nscores = bsxfun(@plus,scores,logprior(:));\n\nlsm = logsoftmax(scores); % K*N\ny = -sum(lsm.*T)*weights(:);\n\n\n\nderiv = @(dy) deriv_this(dy,lsm,T,weights);\n\n\nfunction [g,hess,linear] = deriv_this(dy,lsm,T,weights)\nsigma = exp(lsm); %posterior % K*N\ng0 = gradient(sigma,T,weights);\ng = dy*g0;\nlinear = false;\nhess = @(d) hessianprod(d,dy,g0,sigma,weights);\n\n\nfunction g = gradient(sigma,T,weights)\nE = sigma-T; %K*N\nG = bsxfun(@times,E,weights(:).'); %dim*K\ng = G(:);\n\n\nfunction [h,Jv] = hessianprod(d,dy,g0,sigma,weights)\n\nK = size(sigma,1);\ndim = length(d)/K;\nP = reshape(d,K,dim);\n\n\nsigmaP = sigma.*P;\nssP = sum(sigmaP,1); % 1*N\nsssP = bsxfun(@times,sigma,ssP); %K*N\n\nh = bsxfun(@times,(sigmaP-sssP),weights(:).'); % dim*K\nh = dy*h(:);\n\nif nargout>1\n Jv = d.'*g0;\nend\n\n\nfunction test_this()\nK = 3;\nN = 30;\n%T = [repmat([1;0;0],1,10),repmat([0;1;0],1,10),repmat([0;0;1],1,10)];\nT = rand(K,N); T = bsxfun(@times,T,1./sum(T,1));\nW = randn(K,N);\nweights = rand(1,N);\nf = @(w) mce_obj(w,T,weights,-1);\ntest_MV2DF(f,W(:));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "sum_ai_f_of_w_i.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/templates/sum_ai_f_of_w_i.m", "size": 1367, "source_encoding": "utf_8", "md5": "af9137c86c4b6c7456dbd1688c9ba0bb", "text": "function [y,deriv] = sum_ai_f_of_w_i(w,a,f,b)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% Does y = sum_i a_i f(w_i) + b, where f is non-linear.\n%\n%Notes: \n%\n% f is a function handle, with behaviour as demonstrated in the test code\n% of this function.\n%\n% b is optional, defaults to 0 if omitted\n\nif nargin==0\n test_this();\n return;\nend\n\nif ~exist('b','var')\n b = 0;\nend\n\n\nif isempty(w)\n y = @(w)sum_ai_f_of_w_i(w,a,f,b);\n return;\nend\n\nif isa(w,'function_handle')\n outer = sum_ai_f_of_w_i([],a,f,b);\n y = compose_mv(outer,w,[]);\n return;\nend\n\nntot = length(a);\nnz = find(a~=0);\na = a(nz);\n\nif nargin==1\n y = f(w(nz));\nelse\n [y,dfdw,f2] = f(w(nz));\n deriv = @(Dy) deriv_this(Dy,dfdw.*a,f2,a,nz,ntot);\nend\ny = y(:);\ny = a.'*y + b;\n\n\nfunction [g,hess,linear] = deriv_this(Dy,g0,f2,a,nz,ntot)\ng = zeros(ntot,1);\ng(nz) = Dy*g0(:);\nhess = @(d) hess_this(d,g0,f2,Dy,a,nz,ntot);\nlinear = false;\n\n\nfunction [h,Jd] = hess_this(d,g0,f2,Dy,a,nz,ntot)\nd = d(nz);\nhnz = f2();\nhnz = hnz(:).*d(:);\nh = zeros(ntot,1);\nh(nz) = Dy*(hnz.*a);\nif nargout>1 \n Jd = g0.'*d(:);\nend\n\n\n\nfunction [y,ddx,f2] = test_f(x)\ny = log(x);\nif nargout>1\n ddx = 1./x;\n f2 = @() -1./(x.^2);\nend\n\n\nfunction test_this()\nn = 10;\na = randn(n,1);\na = bsxfun(@max,a,0);\nb = 5;\nf = sum_ai_f_of_w_i([],a,@(x)test_f(x),b);\n\nw = 1+rand(n,1);\ntest_MV2DF(f,w);\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "KtimesW.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/KtimesW.m", "size": 726, "source_encoding": "utf_8", "md5": "d53d40345ce7668d43a1efa9eb621335", "text": "function [y,deriv] = KtimesW(w,K)\n% This is an MV2DF . See MV2DF_API_DEFINITION.readme.\n% \n% \n%\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif isempty(w) \n map = @(w) map_this(w,K);\n transmap = @(y) transmap_this(y,K);\n y = linTrans(w,map,transmap);\n return;\nend\n\n\nif isa(w,'function_handle')\n f = KtimesW([],K);\n y = compose_mv(f,w,[]);\n return;\nend\n\nf = KtimesW([],K);\nif nargout==1\n y = f(w);\nelse\n [y,deriv] = f(w);\nend\n\n\nfunction y = map_this(w,K)\n[m,n] = size(K);\ny = K*reshape(w,n,[]);\ny = y(:);\n\nfunction w = transmap_this(y,K)\n[m,n] = size(K);\nw = K.'*reshape(y,m,[]);\n\nfunction test_this()\nm = 3;\nn = 4;\nK = randn(m,n);\nr = 2;\nW = randn(n,r);\nf = KtimesW([],K);\ntest_MV2DF(f,W(:));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "scaleRows.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/scaleRows.m", "size": 798, "source_encoding": "utf_8", "md5": "c848225d200f35d733b8bb76c2495127", "text": "function [y,deriv] = scaleRows(w,scales)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% w --> bsxfun(@times,reshape(w,m,[]),scales(:))\n%\n% where m = length(scales);\n%\n% Note: this is a symmetric linear transform.\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w) \n map = @(w)map_this(w,scales);\n y = linTrans(w,map,map);\n return;\nend\n\n\nif isa(w,'function_handle')\n f = scaleRows([],scales);\n y = compose_mv(f,w,[]);\n return;\nend\n \n\n\nf = scaleRows([],scales);\nif nargout==1\n y = f(w);\nelse\n [y,deriv] = f(w);\nend\n\n\n\n\nfunction w = map_this(w,scales)\nn = length(scales);\nw = reshape(w,[],n);\nw = bsxfun(@times,w,scales(:)');\n\n\n\nfunction test_this()\nK = 5;\nN = 10;\nM = randn(K,N);\nscales = randn(1,N);\n\nf = scaleRows([],scales);\ntest_MV2DF(f,M(:));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "sumcolumns_fh.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/sumcolumns_fh.m", "size": 602, "source_encoding": "utf_8", "md5": "7a2cd01c3b7076cda20fa6a96cae0069", "text": "function fh = sumcolumns_fh(m,w)\n% This is almost an MV2DF, but it does not return derivatives on numeric\n% input, w.\n%\n% w -> W = reshape(w,m,[]) -> sum(W,1)'\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nmap = @(w) map_this(w,m);\ntransmap = @(y) transmap_this(y,m);\n\n\nfh = linTrans([],map,transmap);\n\nif exist('w','var') && ~isempty(w)\n fh = fh(w);\nend\n\n\nend\n\n\nfunction w = transmap_this(y,m) \n w = repmat(y(:).',m,1);\nend\n\nfunction s = map_this(w,m) \nW = reshape(w,m,[]);\ns = sum(W,1);\nend\n\n\nfunction test_this()\nm = 3; \nn = 4;\nf = sumcolumns_fh(m);\nW = randn(m,n);\ntest_MV2DF(f,W(:));\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "columnJofN_fh.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/columnJofN_fh.m", "size": 634, "source_encoding": "utf_8", "md5": "23448c0cc436ac53b95d5e4ec48c7b35", "text": "function fh = columnJofN_fh(j,n,w)\n% This is almost an MV2DF, but it does not return derivatives on numeric\n% input, w.\n%\n% w -> W = reshape(w,[],n) -> W(:,j)\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nmap = @(w) map_this(w,j,n);\ntransmap = @(y) transmap_this(y,j,n);\n\n\nfh = linTrans([],map,transmap);\n\nif exist('w','var') && ~isempty(w)\n fh = fh(w);\nend\n\n\nend\n\n\nfunction w = transmap_this(y,j,n) \nW = zeros(length(y),n); \nW(:,j) = y;\nw = W(:);\nend\n\nfunction col = map_this(w,j,n) \nW = reshape(w,[],n);\ncol = W(:,j);\nend\n\n\nfunction test_this()\nm = 3; \nn = 4;\nf = columnJofN_fh(2,4);\nW = randn(m,n);\ntest_MV2DF(f,W(:));\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "scaleColumns.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/scaleColumns.m", "size": 811, "source_encoding": "utf_8", "md5": "cacb0b80cb3f3871595674e741382d26", "text": "function [y,deriv] = scaleColumns(w,scales)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% w --> bsxfun(@times,reshape(w,[],n),scales(:)')\n%\n% where n = length(scales);\n%\n% Note: this is a symmetric linear transform.\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w) \n map = @(w)map_this(w,scales);\n y = linTrans(w,map,map);\n return;\nend\n\n\nif isa(w,'function_handle')\n f = scaleColumns([],scales);\n y = compose_mv(f,w,[]);\n return;\nend\n \n\n\nf = scaleColumns([],scales);\nif nargout==1\n y = f(w);\nelse\n [y,deriv] = f(w);\nend\n\n\n\n\nfunction w = map_this(w,scales)\nn = length(scales);\nw = reshape(w,[],n);\nw = bsxfun(@times,w,scales(:)');\n\n\n\nfunction test_this()\nK = 5;\nN = 10;\nM = randn(K,N);\nscales = randn(1,N);\n\nf = scaleColumns([],scales);\ntest_MV2DF(f,M(:));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "subvec.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/subvec.m", "size": 733, "source_encoding": "utf_8", "md5": "ed189df10ecad63eca1130710c559631", "text": "function [y,deriv] = subvec(w,size,first,length)\n% This is an MV2DF . See MV2DF_API_DEFINITION.readme.\n% \n% w --> w(first:first+length-1)\n%\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\n\nlast = first+length-1;\n\nif isempty(w) \n map = @(w) w(first:last);\n transmap = @(w) transmap_this(w,size,first,last);\n y = linTrans(w,map,transmap);\n return;\nend\n\n\nif isa(w,'function_handle')\n f = subvec([],size,first,length);\n y = compose_mv(f,w,[]);\n return;\nend\n\nf = subvec([],size,first,length);\nif nargout==1\n y = f(w);\nelse\n [y,deriv] = f(w);\nend\n\n\n\n\nfunction g = transmap_this(w,size,first,last)\ng = zeros(size,1);\ng(first:last) = w;\n\n\nfunction test_this()\n\nf = subvec([],10,2,4);\ntest_MV2DF(f,randn(10,1));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "identity_trans.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/identity_trans.m", "size": 495, "source_encoding": "utf_8", "md5": "aec19df7ff1e1fa5079b22973d9122fc", "text": "function [y,deriv] = identity_trans(w)\n% This is an MV2DF . See MV2DF_API_DEFINITION.readme.\n% \n% w --> w\n%\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif isempty(w) \n map = @(w) w;\n y = linTrans(w,map,map);\n return;\nend\n\nif isa(w,'function_handle')\n f = identity_trans([]);\n y = compose_mv(f,w,[]);\n return;\nend\n\nf = identity_trans([]);\nif nargout==1\n y = f(w);\nelse\n [y,deriv] = f(w);\nend\n\nfunction test_this()\n\nf = identity_trans([]);\ntest_MV2DF(f,randn(5,1));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "WtimesK.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/WtimesK.m", "size": 726, "source_encoding": "utf_8", "md5": "20d6a4715d3fb8e2c51fc17f1a45e865", "text": "function [y,deriv] = WtimesK(w,K)\n% This is an MV2DF . See MV2DF_API_DEFINITION.readme.\n% \n% \n%\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif isempty(w) \n map = @(w) map_this(w,K);\n transmap = @(y) transmap_this(y,K);\n y = linTrans(w,map,transmap);\n return;\nend\n\n\nif isa(w,'function_handle')\n f = WtimesK([],K);\n y = compose_mv(f,w,[]);\n return;\nend\n\nf = WtimesK([],K);\nif nargout==1\n y = f(w);\nelse\n [y,deriv] = f(w);\nend\n\n\nfunction y = map_this(w,K)\n[m,n] = size(K);\ny = reshape(w,[],m)*K;\ny = y(:);\n\nfunction w = transmap_this(y,K)\n[m,n] = size(K);\nw = reshape(y,[],n)*K.';\n\nfunction test_this()\nm = 3;\nn = 4;\nK = randn(m,n);\nr = 2;\nW = randn(r,m);\nf = WtimesK([],K);\ntest_MV2DF(f,W(:));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "transpose_mv2df.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/transpose_mv2df.m", "size": 700, "source_encoding": "utf_8", "md5": "58016f72134e4ccf6256f2ea1f952a43", "text": "function [y,deriv] = transpose_mv2df(w,M,N)\n% This is an MV2DF . See MV2DF_API_DEFINITION.readme.\n% \n% vec(A) --> vec(A'), \n%\n% where A is M by N\n%\n% Note: this is an orthogonal linear transform.\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif isempty(w) \n map = @(w) reshape(reshape(w,M,N).',[],1);\n transmap = @(w) reshape(reshape(w,N,M).',[],1);\n y = linTrans(w,map,transmap);\n return;\nend\n\n\nif isa(w,'function_handle')\n f = transpose_mv2df([],M,N);\n y = compose_mv(f,w,[]);\n return;\nend\n\nf = transpose_mv2df([],M,N);\nif nargout==1\n y = f(w);\nelse\n [y,deriv] = f(w);\nend\n\nfunction test_this()\n\nM = 4;\nN = 5;\nf = transpose_mv2df([],M,N);\ntest_MV2DF(f,randn(M*N,1));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "fusion_mv2df.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/fusion_mv2df.m", "size": 1182, "source_encoding": "utf_8", "md5": "df0b186cde5dcc42aea6490f13d6d479", "text": "function [y,deriv] = fusion_mv2df(w,scores)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% The function is a 'score fusion' computed thus:\n% y.' = w(1:end-1).'*scores + w(end)\n%\n% Here w is the vector of fusion weights, one weight per system and \n% an offset.\n%\n% Parameters:\n% scores: is an M-by-T matrix of scores from M systems, for each of T\n% trials.\n%\n% Note (even though the fusion is affine from input scores to output\n% scores) this MV2DF is a linear transform from w to y.\n\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w) \n map = @(w) map_this(w,scores);\n transmap = @(w) transmap_this(w,scores);\n y = linTrans(w,map,transmap);\n return;\nend\n\nif isa(w,'function_handle')\n f = fusion_mv2df([],scores);\n y = compose_mv(f,w,[]);\n return;\nend\n\n\nf = fusion_mv2df([],scores);\nif nargout==1\n y = f(w);\nelse\n [y,deriv] = f(w);\nend\n\n\n\n\nfunction y = map_this(w,scores)\ny = w(1:end-1).'*scores + w(end);\ny = y(:);\n\nfunction y = transmap_this(x,scores)\ny = [scores*x;sum(x)];\n\n\nfunction test_this()\nK = 5;\nN = 10;\nw = randn(N+1,1);\nscores = randn(N,K);\n\nf = fusion_mv2df([],scores);\ntest_MV2DF(f,w);\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "addSigmaI.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/addSigmaI.m", "size": 771, "source_encoding": "utf_8", "md5": "78b1a5b40a699c613a11ce64085abe6e", "text": "function [y,deriv] = addSigmaI(w)\n% This is an MV2DF . See MV2DF_API_DEFINITION.readme.\n% \n% \n%\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif isempty(w) \n map = @(w) map_this(w);\n transmap = @(w) transmap_this(w);\n y = linTrans(w,map,transmap);\n return;\nend\n\n\nif isa(w,'function_handle')\n f = addSigmaI([]);\n y = compose_mv(f,w,[]);\n return;\nend\n\nf = addSigmaI([]);\nif nargout==1\n y = f(w);\nelse\n [y,deriv] = f(w);\nend\n\n\nfunction y = map_this(w)\nw = w(:);\ny = w(1:end-1);\nsigma = w(end);\ndim = sqrt(length(y));\nii = 1:dim+1:dim*dim;\ny(ii) = w(ii)+sigma;\n\n\nfunction w = transmap_this(y)\ndim = sqrt(length(y));\nii = 1:dim+1:dim*dim;\nw = [y;sum(y(ii))];\n\nfunction test_this()\ndim = 5;\nf = addSigmaI([]);\ntest_MV2DF(f,randn(dim*dim+1,1));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "addOffset.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/addOffset.m", "size": 1057, "source_encoding": "utf_8", "md5": "38390e8a3f92c5a6b760571e3ba340e3", "text": "function [y,deriv] = addOffset(w,K,N)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% w = [vec(A);b] --> vec(bsxfun(@plus,A,b))\n%\n% This function retrieves a K by N matrix as well as a K-vector from w, \n% adds the K-vector to every column of the matrix\n% and outputs the vectorized result.\n% Note this is a linear transform.\n\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w) \n map = @(w) map_this(w,K,N);\n transmap = @(w) transmap_this(w,K,N);\n y = linTrans(w,map,transmap);\n return;\nend\n\nif isa(w,'function_handle')\n f = addOffset([],K,N);\n y = compose_mv(f,w,[]);\n return;\nend\n\n\nf = addOffset([],K,N);\nif nargout==1\n y = f(w);\nelse\n [y,deriv] = f(w);\nend\n\n\n\n\nfunction y = map_this(w,K,N)\ny = w(1:K*N);\ny = reshape(y,K,N);\noffs = w((K*N+1):end);\ny = bsxfun(@plus,y,offs(:));\ny = y(:);\n\n\nfunction y = transmap_this(x,K,N)\nM = reshape(x,K,N);\ny = [x(1:K*N);sum(M,2)];\n\n\nfunction test_this()\nK = 5;\nN = 10;\nM = randn(K,N);\noffs = randn(K,1);\nw = [M(:);offs];\n\nf = addOffset([],K,N);\ntest_MV2DF(f,w);\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "const_mv2df.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/templates/const_mv2df.m", "size": 856, "source_encoding": "utf_8", "md5": "541e86c2041370727a8705935c4d575e", "text": "function [y,deriv] = const_mv2df(w,const)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% y = const(:);\n%\n% This wraps the given constant into an MV2DF. The output, y, is \n% independent of input w. The derivatives are sparse zero vectors of the \n% appropriate size.\n\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w)\n y = @(w)const_mv2df(w,const);\n return;\nend\n\nif isa(w,'function_handle')\n outer = const_mv2df([],const);\n y = compose_mv(outer,w,[]);\n return;\nend\n\nw = w(:);\ny = const(:);\n\n\nderiv = @(g2) deriv_this(length(w),length(y));\n\nfunction [g,hess,linear] = deriv_this(wsz,ysz)\ng = sparse(wsz,1);\nlinear = true;\nhess = @(d) hess_this(ysz);\n\nfunction [h,Jd] = hess_this(ysz)\nh = [];\nif nargout>1 \n Jd = sparse(ysz,1);\nend\n\nfunction test_this()\nA = randn(4,5);\nf = const_mv2df([],A);\ntest_MV2DF(f,randn(5,1));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "linTrans.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/templates/linTrans.m", "size": 1012, "source_encoding": "utf_8", "md5": "5c26cd329441fa971c05127c464dfae5", "text": "function [y,deriv] = linTrans(w,map,transmap)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% Applies linear transform y = map(w). It needs the transpose of map, \n% transmap for computing the gradient. map and transmap are function\n% handles.\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w)\n y = @(w)linTrans(w,map,transmap);\n return;\nend\n\nif isa(w,'function_handle')\n outer = linTrans([],map,transmap);\n y = compose_mv(outer,w,[]);\n return;\nend\n\ny = map(w);\ny = y(:);\n\nderiv = @(g2) deriv_this(g2,map,transmap);\n\nfunction [g,hess,linear] = deriv_this(g2,map,transmap)\ng = transmap(g2);\ng = g(:);\n%linear = false; % use this to test linearity of map, if in doubt\nlinear = true;\nhess = @(d) hess_this(map,d);\n\nfunction [h,Jd] = hess_this(map,d)\nh = [];\nif nargout>1 \n Jd = map(d);\n Jd = Jd(:);\nend\n\nfunction test_this()\nA = randn(4,5);\nmap = @(w) A*w;\ntransmap = @(y) (y.'*A).'; % faster than A'*y, if A is big\nf = linTrans([],map,transmap);\ntest_MV2DF(f,randn(5,1));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "affineTrans.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/templates/affineTrans.m", "size": 1378, "source_encoding": "utf_8", "md5": "f1c4abd92c1dca63db5b0ccf3915a631", "text": "function [y,deriv] = affineTrans(w,affineMap,linMap,transMap)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% Applies affine transform y = affineMap(w). It needs also needs\n% linMap, the linear part of the mapping, as well as transMap, the \n% transpose of linMap. All of affineMap, linMap and transMap are function\n% handles.\n%\n% Note, linMap(x) = J*x where J is the Jacobian of affineMap; and\n% transMap(y) = J'y.\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif isempty(w)\n y = @(w)affineTrans(w,affineMap,linMap,transMap);\n return;\nend\n\nif isa(w,'function_handle')\n outer = affineTrans([],affineMap,linMap,transMap);\n y = compose_mv(outer,w,[]);\n return;\nend\n\ny = affineMap(w);\ny = y(:);\n\nderiv = @(g2) deriv_this(g2,linMap,transMap);\n\nfunction [g,hess,linear] = deriv_this(g2,linMap,transMap)\ng = transMap(g2);\ng = g(:);\n%linear = false; % use this to test linearity of affineMap, if in doubt\nlinear = true;\nhess = @(d) hess_this(linMap,d);\n\nfunction [h,Jd] = hess_this(linMap,d)\n%h=zeros(size(d)); % use this to test linearity of affineMap, if in doubt\nh = [];\nif nargout>1\n Jd = linMap(d);\n Jd = Jd(:);\nend\n\n\nfunction test_this()\nA = randn(4,5);\nk = randn(4,1);\naffineMap = @(w) A*w+k;\nlinMap = @(w) A*w;\ntransMap = @(y) (y.'*A).'; % faster than A'*y, if A is big\nf = affineTrans([],affineMap,linMap,transMap);\ntest_MV2DF(f,randn(5,1));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "logsoftmax_trunc_mv2df.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/logsoftmax_trunc_mv2df.m", "size": 1380, "source_encoding": "utf_8", "md5": "7933852f24348cedbc4c8750142e51de", "text": "function [y,deriv] = logsoftmax_trunc_mv2df(w,m)\n% This is a MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% Does:\n% (i) Reshapes w to m-by-n. \n% (ii) effectively (not physically) append a bottom row of zeros\n% (iii) Computes logsoftmax of each of n columns. \n% (iv) Omits last row (effectively)\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif isempty(w)\n y = @(w)logsoftmax_trunc_mv2df(w,m);\n return;\nend\n\nif isa(w,'function_handle')\n outer = logsoftmax_trunc_mv2df([],m);\n y = compose_mv(outer,w,[]);\n return;\nend\n\n\n\nw = reshape(w,m,[]);\ny = logsoftmax_trunc(w);\n\nif nargout>1\n deriv = @(Dy) deriv_this(Dy,exp(y));\nend\n\ny = y(:);\n\n\nfunction [g,hess,linear] = deriv_this(Dy,smax)\n[m,n] = size(smax);\nDy = reshape(Dy,m,n);\nsumDy = sum(Dy,1);\ng = Dy - bsxfun(@times,smax,sumDy);\ng = g(:); \n\nlinear = false; \nhess = @(v) hess_this(v,sumDy,smax);\n\n\nfunction [h,Jv] = hess_this(V,sumDy,smax)\n[m,n] = size(smax);\nV = reshape(V,m,n);\nVsmax = V.*smax;\nsumVsmax = sum(Vsmax,1);\nh = bsxfun(@times,smax,sumVsmax) - Vsmax;\nh = bsxfun(@times,h,sumDy);\nh = h(:);\nif nargout>1\n Jv = bsxfun(@minus,V,sumVsmax);\n Jv = Jv(:);\nend\n\n\nfunction test_this()\nm = 10;\nn = 3;\n\n%A = randn(m);\n%map = @(x) reshape(A*reshape(x,m,[]),[],1);\n%transmap = @(y) reshape(A'*reshape(y,m,[]),[],1);\n%f = linTrans([],map,transmap);\n\n\nf = logsoftmax_trunc_mv2df([],m);\ntest_MV2DF(f,randn(m*n,1));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "mm_special.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/mm_special.m", "size": 1465, "source_encoding": "utf_8", "md5": "735b9c605bad33588197fcc0c0d59eb5", "text": "function [prod,deriv] = mm_special(w,extractA,extractB)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% [vec(A);vec(B)] --> vec(A*B)\n%\n% where \n% A is extractA(w) \n% B is extractB(w)\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif isempty(w)\n prod = @(w)mm_special(w,extractA,extractB);\n return;\nend\n\nif isa(w,'function_handle')\n outer = mm_special([],extractA,extractB);\n prod = compose_mv(outer,w,[]);\n return;\nend\n\n\nw = w(:);\nA = extractA(w);\n[m,k] = size(A);\n\nB = extractB(w);\n[k2,n] = size(B);\nassert(k==k2,'inner matrix dimensions must agree');\n\nM = A*B;\nprod = M(:);\n\nderiv = @(g2) deriv_this(g2);\n\nfunction [g,hess,linear] = deriv_this(g2)\ng = vJ_this(g2,A,B);\nlinear = false;\nhess = @(w) hess_this(g2,w);\nend\n\nfunction [h,Jv] = hess_this(g2,w)\nh = vJ_this(g2,extractA(w),extractB(w));\nif nargout>=2\n Jv = Jv_this(w);\nend\nend\n\nfunction prod = Jv_this(w)\nAw = extractA(w);\nBw = extractB(w);\nM = Aw*B + A*Bw;\nprod = M(:);\nend\n\nfunction w = vJ_this(prod,A,B)\nM = reshape(prod,m,n);\nBp = A.'*M;\nAp = M*B.';\nw = [Ap(:);Bp(:)];\nend\n\nend\n\nfunction A = extractA_this(w,m,k)\nA = w(1:m*k);\nA = reshape(A,m,k); \nend\n\nfunction B = extractB_this(w,m,k,n)\nB = w(m*k+(1:k*n));\nB = reshape(B,k,n);\nend\n\nfunction test_this()\n\nm = 4;\nk = 5;\nn = 6;\n\nA = randn(m,k);\nB = randn(k,n);\n\nw = [A(:);B(:)];\n\nextractA = @(w) extractA_this(w,m,k);\nextractB = @(w) extractB_this(w,m,k,n);\n\nf = mm_special([],extractA,extractB);\ntest_MV2DF(f,w);\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "sums_of_squares.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/sums_of_squares.m", "size": 898, "source_encoding": "utf_8", "md5": "1fa8d45eea9355807d8ef47606407b36", "text": "function [y,deriv] = sums_of_squares(w,m)\n% This is a MV2DF. See MV2DF_API_DEFINITION.readme.\n% Does:\n% (i) Reshapes w to m-by-n. \n% (ii) Computes sum of squares of each of n columns. \n% (iii) Transposes to output n-vector.\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif isempty(w)\n y = @(w)sums_of_squares(w,m);\n return;\nend\n\nif isa(w,'function_handle')\n outer = sums_of_squares([],m);\n y = compose_mv(outer,w,[]);\n return;\nend\n\n\n\nM = reshape(w,m,[]);\ny = sum(M.^2,1);\ny = y(:);\n\nderiv = @(g2) deriv_this(g2,M);\n\n\nfunction [g,hess,linear] = deriv_this(g2,M)\n\ng = 2*bsxfun(@times,M,g2.');\ng = g(:);\nlinear = false;\nhess = @(d) hess_this(d,g2,M);\n\n\nfunction [h,Jv] = hess_this(d,g2,M)\nh = deriv_this(g2,reshape(d,size(M)));\nif nargout>1\n Jv = 2*sum(reshape(d,size(M)).*M,1);\n Jv = Jv(:);\nend\n\n\nfunction test_this()\nf = sums_of_squares([],10);\ntest_MV2DF(f,randn(10*4,1));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "gemm.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/gemm.m", "size": 1283, "source_encoding": "utf_8", "md5": "b9245303ab8248f450ad033cde69bf29", "text": "function [prod,deriv] = gemm(w,m,k,n)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% [vec(A);vec(B)] --> vec(A*B)\n%\n% where \n% A is m-by-k \n% B is k-by-n \n\nif nargin==0\n test_this();\n return;\nend\n\n\nif isempty(w)\n prod = @(w)gemm(w,m,k,n);\n return;\nend\n\nif isa(w,'function_handle')\n outer = gemm([],m,k,n);\n prod = compose_mv(outer,w,[]);\n return;\nend\n\n\nw = w(:);\nA = extractA(w,m,k);\nB = extractB(w,m,k,n);\n\n\nM = A*B;\nprod = M(:);\n\nderiv = @(g2) deriv_this(g2,A,B,m,k,n);\n\nfunction [g,hess,linear] = deriv_this(g2,A,B,m,k,n)\ng = vJ_this(g2,A,B,m,n);\nlinear = false;\nhess = @(w) hess_this(m,k,n,g2,A,B,w);\n\nfunction [h,Jv] = hess_this(m,k,n,g2,A,B,w)\nh = vJ_this(g2,...\n extractA(w,m,k),...\n extractB(w,m,k,n),...\n m,n);\nif nargout>=2\n Jv = Jv_this(w,A,B,m,k,n);\nend\n\n\nfunction prod = Jv_this(w,A,B,m,k,n)\nAw = extractA(w,m,k);\nBw = extractB(w,m,k,n);\nM = Aw*B + A*Bw;\nprod = M(:);\n\nfunction w = vJ_this(prod,A,B,m,n)\nM = reshape(prod,m,n);\nBp = A.'*M;\nAp = M*B.';\nw = [Ap(:);Bp(:)];\n\n\nfunction A = extractA(w,m,k)\nA = w(1:m*k);\nA = reshape(A,m,k); \n\n\n\nfunction B = extractB(w,m,k,n)\nB = w(m*k+(1:k*n));\nB = reshape(B,k,n);\n\n\nfunction test_this()\nA = randn(4,5);\nB = randn(5,4);\n\nw = [A(:);B(:)];\n\nf = gemm([],4,5,4);\ntest_MV2DF(f,w);\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "XtKX.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/XtKX.m", "size": 849, "source_encoding": "utf_8", "md5": "0298041dbd9ce1171c7cf66e0edb8a09", "text": "function [y,deriv] = XtKX(w,K)\n%This is an MV2DF.\n%\n% vec(X) --> vec(X'KX)\n%\n\nif nargin==0\n test_this();\n return;\nend\n\nm = size(K,1);\n\n\nif isempty(w)\n y = @(w) XtKX(w,K);\n return;\nend\n\n\nif isa(w,'function_handle')\n outer = XtKX([],K);\n y = compose_mv(outer,w,[]);\n return;\nend\n\nX = reshape(w,m,[]);\nn = size(X,2);\ny = X.'*K*X;\n\ny = y(:);\n\nderiv = @(dy) deriv_this(dy,K,X,n);\n\nfunction [g,hess,linear] = deriv_this(DY,K,X,n)\nlinear = false; \nDY = reshape(DY,n,n).';\ng = DY.'*X.'*K.' + DY*X.'*K;\ng = g.';\ng = g(:);\n\nhess = @(dx) hess_this(dx,K,X,DY);\n\n\n\n\nfunction [h,Jv] = hess_this(DX,K,X,DY)\nm = size(K,1);\nDX = reshape(DX,m,[]);\nh = K*DX*DY + K.'*DX*DY.';\nh = h(:);\n\n\nif nargin<2\n return;\nend\n\nJv = DX.'*K*X + X.'*K*DX;\nJv = Jv(:);\n\n\n\nfunction test_this()\nK = randn(4);\nX = randn(4,3);\nf = XtKX([],K);\ntest_MV2DF(f,X(:));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "UtU.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/UtU.m", "size": 945, "source_encoding": "utf_8", "md5": "086256cb24f7c7b69d69614ceff1519b", "text": "function [prod,deriv] = UtU(w,m,n)\n% This is a MV2DF. See MV2DF_API_DEFINITION.readme.\n% U = reshape(w,m,n), M = U'*U, prod = M(:).\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif isempty(w)\n prod = @(w)UtU(w,m,n);\n return;\nend\n\nif isa(w,'function_handle')\n outer = UtU([],m,n);\n prod = compose_mv(outer,w,[]);\n return;\nend\n\n\n\nw = w(:);\nU = reshape(w,m,n);\n\nM = U.'*U;\nprod = M(:);\n\nderiv = @(g2) deriv_this(g2,U,m,n);\n\nfunction [g,hess,linear] = deriv_this(g2,U,m,n)\ng = vJ_this(g2,U,n);\nlinear = false;\nhess = @(w) hess_this(w,g2,U,m,n);\n\nfunction [h,Jv] = hess_this(w,g2,U,m,n)\nh = vJ_this(g2,reshape(w,m,n),n);\nif nargout>=2\n Jv = Jv_this(w,U,m,n);\nend\n\nfunction dy = Jv_this(dw,U,m,n)\ndU = reshape(dw,m,n);\ndM = U.'*dU;\ndM = dM+dM.';\ndy = dM(:);\n\nfunction w = vJ_this(dy,U,n)\ndY = reshape(dy,n,n);\ndU = U*(dY+dY.');\nw = dU(:);\n\n\nfunction test_this()\nm = 5;\nn = 3;\nf = UtU([],m,n);\nU = randn(m,n);\ntest_MV2DF(f,U(:));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "bsxtimes.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/bsxtimes.m", "size": 1144, "source_encoding": "utf_8", "md5": "599b65f85120f5dec9d1a62d06393c35", "text": "function [y,deriv] = bsxtimes(w,m,n)\n% This is an MV2DF\n% \n% w = [vec(A); vec(b) ] --> vec(bsxfun(@times,A,b)), \n% \n% where A is an m-by-n matrix and \n% b is a 1-by-n row.\n%\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w) \n y = @(w) bsxtimes(w,m,n);\n return;\nend\n\n\nif isa(w,'function_handle')\n f = bsxtimes([],m,n);\n y = compose_mv(f,w,[]);\n return;\nend\n \n[A,b] = extract(w,m,n);\ny = bsxfun(@times,A,b);\ny = y(:);\n\nderiv = @(Dy) deriv_this(Dy,A,b);\n\n\nfunction [g,hess,linear] = deriv_this(Dy,A,b)\ng = gradient(Dy,A,b);\nlinear = false;\n\nhess = @(v) hess_this(v,Dy,A,b);\n\n\nfunction [h,Jv] = hess_this(v,Dy,A,b)\n[m,n] = size(A);\n[vA,vb] = extract(v,m,n);\nh = gradient(Dy,vA,vb);\nif nargout>1\n Jv = bsxfun(@times,vA,b);\n Jv = Jv + bsxfun(@times,A,vb);\n Jv = Jv(:);\nend\n\nfunction [A,b] = extract(w,m,n)\nA = reshape(w(1:m*n),m,n);\nb = w(m*n+1:end).';\n\nfunction g = gradient(Dy,A,b)\nDy = reshape(Dy,size(A));\ngA = bsxfun(@times,Dy,b);\ngb = sum(Dy.*A,1);\ng = [gA(:);gb(:)];\n\nfunction test_this()\nm = 5;\nn = 10;\nA = randn(m,n);\nb = randn(1,n);\nw = [A(:);b(:)];\n\nf = bsxtimes([],m,n);\ntest_MV2DF(f,w);\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "calibrateScores.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/calibrateScores.m", "size": 1095, "source_encoding": "utf_8", "md5": "36a554ff63a06324896dbea86ca33308", "text": "function [y,deriv] = calibrateScores(w,m,n)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% [vec(A);scal;offs] --> vec(bsxfun(@plus,scal*A,b))\n%\n% This function retrieves from w:\n% (i) an m-by-n matrix, 'scores'\n% (ii) a scalar 'scal', and\n% (iii) an m-vector, 'offset'\n%\n% Then it scales the scores and adds the offset vector to every column. \n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w) \n scoreSz = m*n;\n wSz = scoreSz+m+1;\n at = 1;\n\n scores = subvec(w,wSz,at,scoreSz);\n at = at + scoreSz;\n \n scal = subvec(w,wSz,at,1);\n at = at + 1;\n \n offs = subvec(w,wSz,at,m);\n \n scores = gemm(stack(w,scores,scal),scoreSz,1,1);\n scores = addOffset(stack(w,scores,offs),m,n);\n y = scores;\n return;\nend\n\n\nif isa(w,'function_handle')\n f = calibrateScores([],m,n);\n y = compose_mv(f,w,[]);\n return;\nend\n\n\nf = calibrateScores([],m,n);\n[y,deriv] = f(w);\n\nfunction test_this()\nm = 5;\nn = 10;\nscores = randn(m,n);\noffs = randn(m,1);\nscal = 3;\n\nf = calibrateScores([],m,n);\ntest_MV2DF(f,[scores(:);scal;offs]);\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "solve_AXeqB.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/solve_AXeqB.m", "size": 1054, "source_encoding": "utf_8", "md5": "cff7830e92caa23fabdd038a4e53750d", "text": "function [y,deriv] = solve_AXeqB(w,m)\n% This is an MV2DF.\n%\n% [A(:);B(:)] --> inv(A) * B\n%\n\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif isempty(w)\n y = @(w)solve_AXeqB(w,m);\n return;\nend\n\nif isa(w,'function_handle')\n outer = solve_AXeqB([],m);\n y = compose_mv(outer,w,[]);\n return;\nend\n\n\n\n\n[A,B,n] = extract(w,m);\ny = A\\B;\nderiv = @(dy) deriv_this(dy,m,n,A,A.',y);\ny = y(:);\n\n\nfunction [g,hess,linear] = deriv_this(dy,m,n,A,At,X)\nDXt = reshape(dy,m,n);\nDBt = At\\DXt;\nDAt = -DBt*X.';\ng = [DAt(:);DBt(:)];\n\nlinear = false; \nhess = @(dw) hess_this(dw,m,A,At,X,DBt);\n\n\n\n\nfunction [h,Jv] = hess_this(dw,m,A,At,X,DBt)\n[dA,dB] = extract(dw,m);\n\nD_DBt = -(At\\dA.')*DBt;\nDX = A\\(dB-dA*X);\nD_DAt = -(D_DBt*X.'+DBt*DX.');\nh = [D_DAt(:);D_DBt(:)];\n\nif nargout>1\n Jv = A\\(dB-dA*X);\n Jv = Jv(:);\nend\n\n\n\nfunction [A,B,n] = extract(w,m)\nmm = m^2;\nA = w(1:mm);\nA = reshape(A,m,m);\nB = w(mm+1:end);\nB = reshape(B,m,[]);\nn = size(B,2);\n\n\n\nfunction test_this()\nA = randn(5); \nB = randn(5,1);\nf = solve_AXeqB([],5);\ntest_MV2DF(f,[A(:);B(:)]);\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "logsoftmax_mv2df.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/logsoftmax_mv2df.m", "size": 1248, "source_encoding": "utf_8", "md5": "1c29a9da21772e72c800bb7be4025fe6", "text": "function [y,deriv] = logsoftmax_mv2df(w,m)\n% This is a MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% Does:\n% (i) Reshapes w to m-by-n. \n% (ii) Computes logsoftmax of each of n columns. \n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif isempty(w)\n y = @(w)logsoftmax_mv2df(w,m);\n return;\nend\n\nif isa(w,'function_handle')\n outer = logsoftmax_mv2df([],m);\n y = compose_mv(outer,w,[]);\n return;\nend\n\n\n\nw = reshape(w,m,[]);\ny = logsoftmax(w);\n\nif nargout>1\n deriv = @(Dy) deriv_this(Dy,exp(y));\nend\n\ny = y(:);\n\n\nfunction [g,hess,linear] = deriv_this(Dy,smax)\n[m,n] = size(smax);\nDy = reshape(Dy,m,n);\nsumDy = sum(Dy,1);\ng = Dy - bsxfun(@times,smax,sumDy);\ng = g(:); \n\nlinear = false; \nhess = @(v) hess_this(v,sumDy,smax);\n\n\nfunction [h,Jv] = hess_this(V,sumDy,smax)\n[m,n] = size(smax);\nV = reshape(V,m,n);\nVsmax = V.*smax;\nsumVsmax = sum(Vsmax,1);\nh = bsxfun(@times,smax,sumVsmax) - Vsmax;\nh = bsxfun(@times,h,sumDy);\nh = h(:);\nif nargout>1\n Jv = bsxfun(@minus,V,sumVsmax);\n Jv = Jv(:);\nend\n\n\nfunction test_this()\nm = 10;\nn = 3;\n\n%A = randn(m);\n%map = @(x) reshape(A*reshape(x,m,[]),[],1);\n%transmap = @(y) reshape(A'*reshape(y,m,[]),[],1);\n%f = linTrans([],map,transmap);\n\n\nf = logsoftmax_mv2df([],m);\ntest_MV2DF(f,randn(m*n,1));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "sqdist.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/sqdist.m", "size": 1170, "source_encoding": "utf_8", "md5": "bfa9c639ce948848c20501fec93af59c", "text": "function [y,deriv] = sqdist(w,dim)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% If W = reshape(w,dim,n), then Y = vec of symmetric n-by-n matrix of \n% 1/2 squared euclidian distances between all columns of W.\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif isempty(w)\n y = @(w)sqdist(w,dim);\n return;\nend\n\nif isa(w,'function_handle')\n outer = sqdist([],dim);\n y = compose_mv(outer,w,[]);\n return;\nend\n\nX = reshape(w,dim,[]);\nN = size(X,2);\nXX = 0.5*sum(X.^2,1);\ny = bsxfun(@minus,XX.',X.'*X);\ny = bsxfun(@plus,y,XX);\ny = y(:);\n\n\nderiv = @(dy) deriv_this(dy,X,N);\n\nfunction [G,hess,linear] = deriv_this(DY,X,N)\nDY = reshape(DY,N,N);\nsumDY = sum(DY,1)+sum(DY,2).';\nDYDY = DY+DY.';\nG = bsxfun(@times,X,sumDY)-X*DYDY;\nG = G(:);\nlinear = false;\nhess = @(d) hess_this(d,DYDY,sumDY,X);\n\nfunction [H,Jv] = hess_this(D,DYDY,sumDY,X)\nD = reshape(D,size(X));\nH = bsxfun(@times,D,sumDY)-D*DYDY;\nH = H(:);\nif nargout>=2\n DtX = D.'*X;\n xd = sum(X.*D,1);\n Jv = bsxfun(@minus,xd,DtX + DtX.');\n Jv = bsxfun(@plus,Jv,xd.');\n Jv = Jv(:);\nend\n\n\n\nfunction test_this()\ndim = 4;\nX = randn(dim,5);\n\nw = X(:);\n\nf = sqdist([],dim);\ntest_MV2DF(f,w);\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "dottimes.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/dottimes.m", "size": 884, "source_encoding": "utf_8", "md5": "7e8e3dedc670c1f93364db61f3d2b41d", "text": "function [y,deriv] = dottimes(w)\n% This is an MV2DF\n% \n% [a; b ] --> a.*b\n% \n% where length(a) == length(b)\n%\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w) \n y = @(w) dottimes(w);\n return;\nend\n\n\nif isa(w,'function_handle')\n f = dottimes([]);\n y = compose_mv(f,w,[]);\n return;\nend\n \nw = w(:);\n[a,b] = extract(w);\ny = a.*b;\n\nderiv = @(Dy) deriv_this(Dy,a,b);\n\n\nfunction [g,hess,linear] = deriv_this(Dy,a,b)\ng = gradient(Dy,a,b);\nlinear = false;\n\nhess = @(v) hess_this(v,Dy,a,b);\n\n\nfunction [h,Jv] = hess_this(v,Dy,a,b)\n[va,vb] = extract(v);\nh = gradient(Dy,va,vb);\nif nargout>1\n Jv = va.*b + a.*vb;\nend\n\nfunction [a,b] = extract(w)\nh = length(w)/2;\na = w(1:h);\nb = w(h+1:end);\n\n\nfunction g = gradient(Dy,a,b)\ng = [Dy.*b;Dy.*a];\n\nfunction test_this()\nn = 10;\na = randn(1,n);\nb = randn(1,n);\nw = [a(:);b(:)];\n\nf = dottimes([]);\ntest_MV2DF(f,w);\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "solveChol_AXeqB.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/solveChol_AXeqB.m", "size": 1391, "source_encoding": "utf_8", "md5": "f2ded36f846a5e9904fd8299ba4a5ed1", "text": "function [y,deriv] = solveChol_AXeqB(w,m)\n% This is an MV2DF.\n%\n% [A(:);B(:)] --> inv(A) * B\n%\n% We assume A is positive definite and we solve using Choleski\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif isempty(w)\n y = @(w)solveChol_AXeqB(w,m);\n return;\nend\n\nif isa(w,'function_handle')\n outer = solveChol_AXeqB([],m);\n y = compose_mv(outer,w,[]);\n return;\nend\n\n\n\n\n[A,B,n] = extract(w,m);\nif isreal(A)\n R = chol(A);\n solve = @(B) R\\(R.'\\B);\nelse %complex\n solve = @(B) A\\B;\nend\ny = solve(B);\nderiv = @(dy) deriv_this(dy,m,n,solve,y);\ny = y(:);\n\n\nfunction [g,hess,linear] = deriv_this(dy,m,n,solve,X)\nDXt = reshape(dy,m,n);\nDBt = solve(DXt);\nDAt = -DBt*X.';\ng = [DAt(:);DBt(:)];\n\nlinear = false; \nhess = @(dw) hess_this(dw,m,solve,X,DBt);\n\n\n\n\nfunction [h,Jv] = hess_this(dw,m,solve,X,DBt)\n[dA,dB] = extract(dw,m);\n\nD_DBt = -solve(dA.'*DBt);\nDX = solve(dB-dA*X);\nD_DAt = -(D_DBt*X.'+DBt*DX.');\nh = [D_DAt(:);D_DBt(:)];\n\nif nargout>1\n Jv = solve(dB-dA*X);\n Jv = Jv(:);\nend\n\n\n\nfunction [A,B,n] = extract(w,m)\nmm = m^2;\nA = w(1:mm);\nA = reshape(A,m,m);\nB = w(mm+1:end);\nB = reshape(B,m,[]);\nn = size(B,2);\n\n\n\nfunction test_this()\nm = 3;\nn = 10;\nk = 8;\nUsz = m*n;\nBsz = m*k;\nWsz = Usz+Bsz;\n\nw = [];\nU = subvec(w,Wsz,1,Usz);\nB = subvec(w,Wsz,Usz+1,Bsz);\nA = UtU(U,n,m);\nAB = stack(w,A,B);\n\nf = solveChol_AXeqB(AB,m);\nw = randn(Wsz,1);\n\ntest_MV2DF(f,w,true);\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "test_MV2DF.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/test/test_MV2DF.m", "size": 2104, "source_encoding": "utf_8", "md5": "1f7eea1823322c4c0741c86792fc73c4", "text": "function test_MV2DF(f,x0,do_cstep)\n\n%id_in = identity_trans([]);\n%id_out = identity_trans([]);\n%f = f(id_in);\n%f = id_out(f);\n\nx0 = x0(:);\n\nif ~exist('do_cstep','var')\n do_cstep = 1;\nend\n\nif do_cstep\n Jc = cstepJacobian(f,x0);\nend\nJr = rstepJacobian(f,x0);\n\n[y0,deriv] = f(x0);\nm = length(y0);\nn = length(x0);\n\nJ2 = zeros(size(Jr));\nfor i=1:m;\n y = zeros(m,1);\n y(i) = 1;\n J2(i,:) = deriv(y)';\nend\nif do_cstep\n c_err = max(max(abs(Jc-J2)));\nelse\n c_err = nan;\nend\nr_err = max(max(abs(Jr-J2)));\nfprintf('test gradient : cstep err = %g, rstep err = %g\\n',c_err,r_err);\n\ng2 = randn(m,1);\n[dummy,hess,linear] = deriv(g2);\n\n\nif true %~linear\n rHess = @(dx) rstep_approxHess(dx,g2,f,x0);\n if do_cstep\n cHess = @(dx) cstep_approxHess(dx,g2,f,x0);\n else\n cHess = @(dx) nan(size(dx));\n end\nend\n\n\nJ1 = zeros(size(Jr));\nif true %~linear\n H1 = zeros(n,n);\n H2 = zeros(n,n);\n Hr = zeros(n,n);\n Hc = zeros(n,n);\nend\nfor j=1:n;\n x = zeros(n,1);\n x(j) = 1;\n [h1,jx] = hess(x); \n h2 = hess(x); \n J1(:,j) = jx;\n if ~linear\n H1(:,j) = h1;\n H2(:,j) = h2;\n end\n Hr(:,j) = rHess(x);\n Hc(:,j) = cHess(x);\nend\n\nif do_cstep\n c_err = max(max(abs(Jc-J1)));\nelse\n c_err = nan;\nend\nr_err = max(max(abs(Jr-J1)));\nfprintf('test Jacobian : cstep err = %g, rstep err = %g\\n',c_err,r_err);\n\n\n\nfprintf('test Jacobian-gradient'': %g\\n',max(max(abs(J1-J2))));\n\nif false %linear\n fprintf('function claims to be linear, not testing Hessians\\n');\n return;\nend\n\n\n\nr_err = max(max(abs(H1-Hr)));\nc_err = max(max(abs(H1-Hc)));\nrc_err = max(max(abs(Hr-Hc)));\n\nfprintf('test Hess prod: cstep err = %g, rstep err = %g, cstep-rstep = %g\\n',c_err,r_err,rc_err);\nfprintf('test H1-H2: %g\\n',max(max(abs(H1-H2))));\n\nfunction x = rstep_approxHess(dx,dy,f,x0)\nalpha = sqrt(eps); \nx2 = x0 + alpha*dx;\n[dummy,deriv2] = f(x2);\nx1 = x0 - alpha*dx;\n[dummy,deriv1] = f(x1);\ng2 = deriv2(dy);\ng1 = deriv1(dy);\nx = (g2-g1)/(2*alpha);\n\nfunction p = cstep_approxHess(dx,dy,f,x0)\nx = x0 + 1e-20i*dx;\n[dummy,deriv] = f(x);\ng = deriv(dy);\np = 1e20*imag(g);\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "tracer.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/test/tracer.m", "size": 1081, "source_encoding": "utf_8", "md5": "5e8d7ea9aefc9d1c1cc8161546bd9483", "text": "function [w,deriv] = tracer(w,vstring,gstring,jstring)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n% Applies linear transform y = map(w). It needs the transpose of map, \n% transmap for computing the gradient. map and transmap are function\n% handles.\n\nif nargin==0\n test_this();\n return;\nend\n\nif nargin<2\n vstring=[];\nend\n\nif nargin<3\n gstring=[];\nend\n\nif nargin<4\n jstring=[];\nend\n\nif isempty(w)\n w = @(x)tracer(x,vstring,gstring,jstring);\n return;\nend\n\nif isa(w,'function_handle')\n outer = tracer([],vstring,gstring,jstring);\n w = compose_mv(outer,w,[]);\n return;\nend\n\nif ~isempty(vstring)\n fprintf('%s\\n',vstring);\nend\n\nderiv = @(g2) deriv_this(g2,gstring,jstring);\n\nfunction [g,hess,linear] = deriv_this(g,gstring,jstring)\nif ~isempty(gstring)\n fprintf('%s\\n',gstring);\nend\nlinear = true;\nhess = @(d) hess_this(d,jstring);\n\nfunction [h,Jd] = hess_this(Jd,jstring)\nh = [];\nif nargout>1\n if ~isempty(jstring)\n fprintf('%s\\n',jstring);\n end\nend\n\nfunction test_this()\nf = tracer([],'V','G','J');\ntest_MV2DF(f,randn(5,1));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "test_MV2DF_noHess.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/test/test_MV2DF_noHess.m", "size": 1125, "source_encoding": "utf_8", "md5": "2af48174c2441c011dffbf316b93612d", "text": "function test_MV2DF_noHess(f,x0)\n\n%id_in = identity_trans([]);\n%id_out = identity_trans([]);\n%f = f(id_in);\n%f = id_out(f);\n\nx0 = x0(:);\n\nJc = cstepJacobian(f,x0);\nJr = rstepJacobian(f,x0);\n\n[y0,deriv] = f(x0);\nm = length(y0);\nn = length(x0);\n\nJ2 = zeros(size(Jr));\nfor i=1:m;\n y = zeros(m,1);\n y(i) = 1;\n J2(i,:) = deriv(y)';\nend\nc_err = max(max(abs(Jc-J2)));\nr_err = max(max(abs(Jr-J2)));\nfprintf('test gradient : cstep err = %g, rstep err = %g\\n',c_err,r_err);\n\ng2 = randn(m,1);\n\nrHess = @(dx) rstep_approxHess(dx,g2,f,x0);\ncHess = @(dx) cstep_approxHess(dx,g2,f,x0);\n\nHr = zeros(n,n);\nHc = zeros(n,n);\nfor j=1:n;\n x = zeros(n,1);\n x(j) = 1;\n Hr(:,j) = rHess(x);\n Hc(:,j) = cHess(x);\nend\n\nrc_err = max(max(abs(Hr-Hc)));\nfprintf('test Hess prod: cstep-rstep = %g\\n',rc_err);\n\nfunction x = rstep_approxHess(dx,dy,f,x0)\nalpha = sqrt(eps); \nx2 = x0 + alpha*dx;\n[dummy,deriv2] = f(x2);\nx1 = x0 - alpha*dx;\n[dummy,deriv1] = f(x1);\ng2 = deriv2(dy);\ng1 = deriv1(dy);\nx = (g2-g1)/(2*alpha);\n\nfunction p = cstep_approxHess(dx,dy,f,x0)\nx = x0 + 1e-20i*dx;\n[dummy,deriv] = f(x);\ng = deriv(dy);\np = 1e20*imag(g);\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "inv_lu2.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/utils/inv_lu2.m", "size": 1044, "source_encoding": "utf_8", "md5": "8fa16d13c5b1ad8e681b3f2ba0f9b2c9", "text": "function [inv_map,bi_inv_map,logdet,invA] = inv_lu2(A)\n% INV_LU2\n% Does a LU decomposition on A and returns logdet, inverse and\n% two function handles that respectively map X to A\\X and A\\X/A.\n%\n\nif nargin==0\n test_this();\n return;\nend\n\n[L,T,p] = lu(A,'vector');\nP = sparse(p,1:length(p),1);\n % P*A = L*U\n % L is lower triangular, with unit diagonal and unit determinant\n % T is upper triangular, det(T) = prod(diag(T), may have negative values on diagonal\n % P is a permuation matrix: P' = inv(P) and det(P) is +1 or -1\n % \n\ninv_map = @(X) T\\(L\\(P*X));\n\n% inv(A)*X*inv(A)\nbi_inv_map = @(X) ((inv_map(X)/T)/L)*P;\n\n\nif nargout>2\n logdet = sum(log(diag(T)))-log(det(P));\n if nargout>3\n invA = T\\(L\\P);\n end\nend\n\nfunction test_this()\ndim = 3;\nA = randn(dim)+sqrt(-1)*randn(dim);\n[inv_map,bi_inv_map,logdet,iA] = inv_lu2(A);\n[logdet,log(det(A))]\nX = randn(dim,3);\nY1 = A\\X,\nY2 = inv_map(X)\n\nZ1 = (A\\X)/A\nZ2 = bi_inv_map(X)\n\n\niA,\ninv(A)\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "invchol2.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/utils/invchol2.m", "size": 968, "source_encoding": "utf_8", "md5": "936256e3c3a28ed65ad0c15d9fbb04cd", "text": "function [inv_map,bi_inv_map,logdet,invA] = invchol2(A)\n% INVCHOL2\n% Does a Cholesky decomposition on A and returns logdet, inverse and\n% two function handles that respectively map X to A\\X and A\\X/A.\n%\n\nif nargin==0\n test_this();\n return;\nend\n\nif isreal(A)\n\n R = chol(A); %R'*R = A\n inv_map = @(X) R\\(R'\\X); \n\n % inv(A)*X*inv(A)\n bi_inv_map = @(X) (inv_map(X)/R)/(R');\n\n\n if nargout>2\n logdet = 2*sum(log(diag(R)));\n if nargout>3\n invA = inv_map(eye(size(A)));\n end\n end\nelse\n inv_map = @(X) A\\X; \n\n % inv(A)*X*inv(A)\n bi_inv_map = @(X) (A\\X)/A;\n\n\n if nargout>2\n logdet = log(det(A));\n if nargout>3\n invA = inv_map(eye(size(A)));\n end\n end\nend\n\nfunction test_this()\ndim = 3;\nr = randn(dim,2*dim);\nA = r*r';\n[inv_map,bi_inv_map,logdet,iA] = invchol2(A);\n[logdet,log(det(A))]\nX = randn(dim,3);\nY1 = A\\X,\nY2 = inv_map(X)\n\nZ1 = (A\\X)/A\nZ2 = bi_inv_map(X)\n\n\niA,\ninv(A)\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "invchol_or_lu.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/utils/invchol_or_lu.m", "size": 1418, "source_encoding": "utf_8", "md5": "468d08dd52dd54bb77a471a5e2d0a856", "text": "function [inv_map,bi_inv_map,logdet,invA] = invchol_or_lu(A)\n% INVCHOL_OR_LU\n% Does a Cholesky decomposition on A and returns logdet, inverse and\n% two function handles that respectively map X to A\\X and A\\X/A.\n%\n\nif nargin==0\n test_this();\n return;\nend\n\nif isreal(A)\n\n R = chol(A); %R'*R = A\n inv_map = @(X) R\\(R'\\X); \n\n % inv(A)*X*inv(A)\n bi_inv_map = @(X) (inv_map(X)/R)/(R');\n\n\n if nargout>2\n logdet = 2*sum(log(diag(R)));\n if nargout>3\n invA = inv_map(eye(size(A)));\n end\n end\nelse\n [L,T,p] = lu(A,'vector');\n P = sparse(p,1:length(p),1);\n % P*A = L*U\n % L is lower triangular, with unit diagonal and unit determinant\n % T is upper triangular, det(T) = prod(diag(T), may have negative values on diagonal\n % P is a permuation matrix: P' = inv(P) and det(P) is +1 or -1\n % \n\n inv_map = @(X) T\\(L\\(P*X));\n\n % inv(A)*X*inv(A)\n bi_inv_map = @(X) ((inv_map(X)/T)/L)*P;\n\n\n if nargout>2\n logdet = sum(log(diag(T)))-log(det(P));\n if nargout>3\n invA = T\\(L\\P);\n end\n end\nend\n\nfunction test_this()\ndim = 3;\nA = randn(dim)+sqrt(-1)*randn(dim);\n[inv_map,bi_inv_map,logdet,iA] = invchol_or_lu(A);\n[logdet,log(det(A))]\nX = randn(dim,3);\nY1 = A\\X,\nY2 = inv_map(X)\n\nZ1 = (A\\X)/A\nZ2 = bi_inv_map(X)\n\n\niA,\ninv(A)\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "invchol_taylor.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/utils/invchol_taylor.m", "size": 1241, "source_encoding": "utf_8", "md5": "0f52f57c84dc1ae326e32c031541c496", "text": "function [inv_map,logdet] = invchol_taylor(A)\n% Does a Cholesky decomposition on A and returns: \n% inv_map: a function handle to solve for X in AX = B\n% logdet (of A)\n%\n% This code is designed to work correctly if A has a small complex\n% perturbation, such as used in complex step differentiation, even though\n% the complex A is not positive definite.\n\n\nif nargin==0\n test_this();\n return;\nend\n\nif isreal(A)\n\n R = chol(A); %R'*R = A\n inv_map = @(X) R\\(R'\\X); \n\n if nargout>1\n logdet = 2*sum(log(diag(R)));\n end\n if nargout>2\n invA = inv_map(eye(size(A)));\n end\n \nelse\n R = chol(real(A));\n rmap = @(X) R\\(R'\\X); \n P = rmap(imag(A));\n inv_map = @(X) inv_map_complex(X,rmap,P);\n\n if nargout>1\n logdet = 2*sum(log(diag(R))) + i*trace(P);\n end\nend\n\nend\n\nfunction Y = inv_map_complex(X,rmap,P)\n Z = rmap(X);\n Y = Z - i*P*Z;\nend\n\n\nfunction test_this()\n \n dim = 20;\n R = randn(dim,dim+1);\n C = R*R';\n C = C + 1.0e-20i*randn(dim); \n [map,logdet] = invchol_taylor(C);\n \n x = randn(dim,1);\n maps = imag([map(x),C\\x]),\n logdets = imag([logdet,log(det(C))])\n\n maps = real([map(x),C\\x]),\n logdets = real([logdet,log(det(C))])\n \n \nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "train_system.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/discrim_training/train_system.m", "size": 4969, "source_encoding": "utf_8", "md5": "69b1726b853595599d4a79414b256c8b", "text": "function [w,mce,divergence,w_pen,c_pen,optimizerState,converged] = train_system(classf,system,penalizer,W0,lambda,confusion,maxiters,maxCG,prior,optimizerState)\n% \n% Supervised training of a regularized K-class linear logistic\n% regression. Allows regularization via weight penalties and via\n% label confusion probabilities.\n%\n%\n% Inputs:\n% classf: 1-by-N row of class labels, in the range 1..K\n%\n% system: MV2DF function handle that maps parameters to score matrix.\n% Note: The system input data is already wrapped in this handle.\n% \n% penalizer: MV2DF function handle that maps parameters to a positive regularization penalty.\n%\n%\n% W0: initial parameters. This is NOT optional. \n%\n%\n% confusion: a scalar or a matrix of label confusion probabilities\n% -- if this is a K-by-K matrix, then \n% entry_ij denotes P(label_j | class i)\n%\n% -- if scalar: confusion = q, then \n% P(label_i | class_i) = 1-q, and\n% P(label_j | class_i) = q/(K-1)\n% \n% maxiters: the maximum number of Newton Trust Region optimization\n% iterations to perform. Note, the user can make maxiters\n% small, examine the solution and then continue training:\n% -- see W0 and optimizerState.\n%\n%\n% prior: a prior probability distribution over the K classes to\n% modify the optimization operating point.\n% optional: \n% omit or use []\n% default is prior = ones(1,K)/K\n%\n% optimizerState: In this implementation, it is the trust region radius.\n% optional: \n% omit or use []\n% If not supplied when resuming iteration,\n% this may cost some extra iterations. \n% Resume further iteration thus:\n% [W1,...,optimizerState] = train_..._logregr(...);\n% ... examine solution W1 ...\n% [W2,...,optimizerState] = train_..._logregr(...,W1,...,optimizerState);\n% \n%\n% \n%\n%\n% Outputs:\n% W: the solution. \n% mce: normalized multiclass cross-entropy of the solution. \n% The range is 0 (good) to 1(useless).\n%\n% optimizerState: see above, can be used to resume iteration.\n% \n\n\n\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\n\nclassf = classf(:)';\nK = max(classf);\nN = length(classf);\n\n\n\nif ~exist('maxCG','var') || isempty(maxCG)\n maxCG = 100;\nend\n\n\nif ~exist('optimizerState','var')\n optimizerState=[];\nend\n\nif ~exist('prior','var') || isempty(prior)\n prior = ones(K,1)/K;\nelse\n prior = prior(:);\nend\n\n\nweights = zeros(1,N);\nfor k = 1:K\n fk = find(classf==k);\n count = length(fk);\n weights(fk) = prior(k)/(count*log(2));\nend\n\n\nif ~exist('confusion','var') || isempty(confusion)\n confusion = 0;\nend\n\n\nif isscalar(confusion)\n q = confusion;\n confusion = (1-q)*eye(K) + (q/(K-1))*(ones(K)-eye(K));\nend\n\n\npost = bsxfun(@times,confusion,prior(:));\npost = bsxfun(@times,post,1./sum(post,1));\n\n\nlogpost = post;\nnz = logpost>0;\nlogpost(nz) = log(post(nz));\nconfusion_entropy = -mean(sum(post.*logpost,1),2);\nprior_entropy = -log(prior)'*prior;\nc_pen = confusion_entropy/prior_entropy;\nfprintf('normalized confusion entropy = %g\\n',c_pen);\n\n\nT = zeros(K,N);\nfor i=1:N\n T(:,i) = post(:,classf(i));\nend\n\n\nw=[]; \n\nobj1 = mce_obj(system,T,weights,log(prior));\nobj2 = penalizer(w);\nobj = sum_of_functions(w,[1,lambda],obj1,obj2);\n\nw0 = W0(:);\n\n\n[w,y,optimizerState,converged] = trustregion_newton_cg(obj,w0,maxiters,maxCG,optimizerState,[],1);\n\nw_pen = lambda*obj2(w)/prior_entropy;\n\n\nmce = y/prior_entropy-w_pen;\ndivergence = mce-c_pen;\nfprintf('mce = %g, divergence = %g, conf entr = %g, weight pen = %g\\n',mce,divergence,c_pen,w_pen);\n\n\n\n\nfunction y = score_map(W,X)\n[dim,N] = size(X);\nW = reshape(W,[],dim+1);\noffs = W(:,end);\nW(:,end)=[];\ny = bsxfun(@plus,W*X,offs);\ny = y(:);\n\nfunction W = score_transmap(y,X)\n[dim,N] = size(X);\ny = reshape(y,[],N).';\nW = [X*y;sum(y)]; \nW = W.';\nW = W(:);\n\nfunction test_this()\n\n\nK = 3;\nN = 100;\ndim = 2;\n\n\n% ----------------syntesize data -------------------\nrandn('state',0);\nmeans = randn(dim,K)*10; %signal\nX = randn(dim,K*N); % noise\nclassf = zeros(1,K*N);\nii = 1:N;\nfor k=1:K\n X(:,ii) = bsxfun(@plus,means(:,k),X(:,ii));\n classf(ii) = k;\n ii = ii+N;\nend\n\nN = K*N;\n\n% ---------------- define system -------------------\n\n\nw=[]; \nmap = @(W) score_map(W,X);\ntransmap = @(Y) score_transmap(Y,X);\nsystem = linTrans(w,map,transmap);\npenalizer = sumsquares_penalty(w,1);\n\n% ------------- train it ------------------------------\n\nconfusion = 0.01;\nlambda = 0.01;\n\nW0 = zeros(K,dim+1);\nW = train_system(classf,system,penalizer,W0,lambda,confusion,20);\n\n% ------------ plot log posterior on training data --------------------\n\nscores = score_system(W,system,K);\nscores = logsoftmax(scores);\nsubplot(1,2,1);plot(scores');\n\n\nscores = score_system(W,system,K,true);\nscores = [scores;zeros(1,N)];\nscores = logsoftmax(scores);\nsubplot(1,2,2);plot(scores');\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "sum_of_functions.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/sum_of_functions.m", "size": 1094, "source_encoding": "utf_8", "md5": "af1885792c3ce587c098ffc61a10cc06", "text": "function [y,deriv] = sum_of_functions(w,weights,f,g)\n% This is an MV2DF (see MV2DF_API_DEFINITION.readme) which \n% represents the new function, s(w), obtained by summing the \n% weighted outputs of the given functions:\n% s(w) = sum_i weights(i)*functions{i}(w)\n%\n% Usage examples:\n%\n% s = @(w) sum_of_functions(w,[1,-1],f,g)\n%\n% Here f,g are function handles to MV2DF's. \n\n\nif nargin==0\n test_this();\n return;\nend\n\nweights = weights(:);\n\nif isempty(w) \n\n s = stack(w,f,g,true);\n n = length(weights);\n\n map = @(s) reshape(s,[],n)*weights; \n transmap = @(y) reshape(y(:)*weights.',[],1);\n\n y = linTrans(s,map,transmap);\n return;\nend\n\n\nif isa(w,'function_handle')\n f = sum_of_functions([],weights,f,g);\n y = compose_mv(f,w,[]);\n return;\nend\n\n\nf = sum_of_functions([],weights,f,g);\nif nargout==1\n y = f(w);\nelse\n [y,deriv] = f(w);\nend\n\n\nfunction test_this()\n\nA = randn(4,4);\nB = randn(4,4);\n\n\nw = [];\nf = gemm(w,4,4,4);\ng = transpose_mv2df(f,4,4);\n\n%f = A*B;\n%g = B'*A';\n\ns = sum_of_functions(w,[-1,1],f,g);\n%s = stack(w,f,g);\n\ntest_MV2DF(s,[A(:);B(:)]);\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "scale_function.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/scale_function.m", "size": 856, "source_encoding": "utf_8", "md5": "fae26a24cbea0fcc7ae35cf1642b18e4", "text": "function [y,deriv] = scale_function(w,scale,f)\n% This is an MV2DF (see MV2DF_API_DEFINITION.readme) which \n% represents the new function, \n%\n% g(w) = scale(w)*f(w), \n%\n% where scale is scalar-valued and f is matrix-valued.\n%\n%\n% Here scale and f are function handles to MV2DF's. \n\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w) \n\n s = stack(w,f,scale);\n y = mm_special(s,@(w)reshape(w(1:end-1),[],1),@(w)w(end));\n return;\nend\n\n\nif isa(w,'function_handle')\n f = scale_function([],scale,f);\n y = compose_mv(f,w,[]);\n return;\nend\n\n\nf = scale_function([],scale,f);\nif nargout==1\n y = f(w);\nelse\n [y,deriv] = f(w);\nend\n\n\nfunction test_this()\n\nm = 5;\nn = 10;\ndata = randn(m,n);\nscal = 3;\nw = [data(:);scal];\n\ng = subvec([],m*n+1,1,m*n);\nscal = subvec([],m*n+1,m*n+1,1);\n\n\nf = scale_function([],scal,g);\ntest_MV2DF(f,w);\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "outerprod_of_functions.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/outerprod_of_functions.m", "size": 1085, "source_encoding": "utf_8", "md5": "731782f761b675bb6d3567ddb560c950", "text": "function [y,deriv] = outerprod_of_functions(w,f,g,m,n)\n% This is an MV2DF (see MV2DF_API_DEFINITION.readme) which \n% represents the new function, \n%\n% g(w) = f(w)g(w)' \n%\n% where f(w) and g(w) are column vectors of sizes m and n respectively.\n%\n% Here f,g are function handles to MV2DF's. \n\n\nif nargin==0\n test_this();\n return;\nend\n\nif ~exist('n','var'), n=[]; end\n\n\nfunction A = extractA(w)\n if isempty(m), m = length(w)-n; end\n A = w(1:m);\n A = A(:);\nend\n\n\nfunction B = extractB(w)\n if isempty(m), m = length(w)-n; end\n B = w(1+m:end);\n B = B(:).';\nend\n\n\n\nif isempty(w) \n\n s = stack(w,f,g);\n y = mm_special(s,@(w)extractA(w),@(w)extractB(w));\n return;\nend\n\n\nif isa(w,'function_handle')\n f = outerprod_of_functions([],f,g,m,n);\n y = compose_mv(f,w,[]);\n return;\nend\n\n\nf = outerprod_of_functions([],f,g,m,n);\nif nargout==1\n y = f(w);\nelse\n [y,deriv] = f(w);\nend\n\nend\n\nfunction test_this()\n\nm = 5;\nn = 3;\nw = randn(m+n,1);\nf = subvec([],m+n,1,m);\ng = subvec([],m+n,m+1,n);\n\nh = outerprod_of_functions([],f,g,m,n);\ntest_MV2DF(h,w);\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "interleave.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/interleave.m", "size": 2028, "source_encoding": "utf_8", "md5": "0cdd5849311559d9813888914f7530cd", "text": "function [y,deriv] = interleave(w,functions)\n% interleave is an MV2DF (see MV2DF_API_DEFINITION.readme) which \n% represents the new function, s(w), obtained by interleaving the outputs of \n% f() and g() thus:\n%\n% S(w) = [f(w)';g(w)'];\n% s(w) = S(:); \n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif isempty(w)\n y = @(w)interleave(w,functions);\n return;\nend\n\nif isa(w,'function_handle')\n outer = interleave([],functions);\n y = compose_mv(outer,w,[]);\n return;\nend\n\n% if ~isa(f,'function_handle')\n% f = const_mv2df([],f);\n% end\n% if ~isa(g,'function_handle')\n% g = const_mv2df([],g);\n% end\n\nw = w(:);\n\nm = length(functions);\nk = length(w);\n\nif nargout==1\n y1 = functions{1}(w);\n n = length(y1);\n y = zeros(m,n);\n y(1,:) = y1;\n for i=2:m\n y(i,:) = functions{i}(w);\n end\n y = y(:);\n return;\nend\n\nderiv = cell(1,m);\n[y1,deriv{1}] = functions{1}(w);\nn = length(y1);\ny = zeros(m,n);\ny(1,:) = y1;\nfor i=2:m\n [y(i,:),deriv{i}] = functions{i}(w);\nend\ny = y(:);\n\nderiv = @(g2) deriv_this(g2,deriv,m,n,k);\n\n\nfunction [g,hess,linear] = deriv_this(y,deriv,m,n,k)\n\ny = reshape(y,m,n);\nif nargout==1\n g = deriv{1}(y(1,:).');\n for i=2:m\n g = g+ deriv{i}(y(i,:).');\n end\n return;\nend\n\nhess = cell(1,m);\nlin = false(1,m);\n[g,hess{1},lin(1)] = deriv{1}(y(1,:).');\nlinear = lin(1);\nfor i=2:m\n [gi,hess{i},lin(i)] = deriv{i}(y(i,:).');\n g = g + gi;\n linear = linear && lin(i);\nend\nhess = @(d) hess_this(d,hess,lin,m,n,k);\n\n\nfunction [h,Jv] = hess_this(d,hess,lin,m,n,k)\n\nif all(lin)\n h = [];\nelse\n h = zeros(k,1);\nend\n\nif nargout>1\n Jv = zeros(m,n);\n for i=1:m\n [hi,Jv(i,:)] = hess{i}(d);\n if ~lin(i)\n h = h + hi;\n end\n end\n Jv = Jv(:);\nelse\n for i=1:m\n hi = hess{i}(d);\n if ~lin(i)\n h = h + hi;\n end\n end\nend\n\nfunction test_this()\n\nw = [];\nf = exp_mv2df(w);\ng = square_mv2df(w);\nh = identity_trans(w);\n\ns = interleave(w,{f,g,h});\n\n\nw = randn(5,1);\ntest_MV2DF(s,w);\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "shift_function.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/shift_function.m", "size": 896, "source_encoding": "utf_8", "md5": "82abefaa89d6e02403f0b543a3c69a0b", "text": "function [y,deriv] = shift_function(w,shift,f)\n% This is an MV2DF (see MV2DF_API_DEFINITION.readme) which \n% represents the new function, \n%\n% g(w) = shift(w)+f(w), \n%\n% where shift is scalar-valued and f is matrix-valued.\n%\n%\n% Here shift and f are function handles to MV2DF's. \n\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w) \n\n s = stack(w,shift,f);\n map = @(s) s(2:end)+s(1); \n transmap = @(y) [sum(y);y];\n y = linTrans(s,map,transmap);\n return;\nend\n\n\nif isa(w,'function_handle')\n f = shift_function([],shift,f);\n y = compose_mv(f,w,[]);\n return;\nend\n\n\nf = shift_function([],shift,f);\nif nargout==1\n y = f(w);\nelse\n [y,deriv] = f(w);\nend\n\n\nfunction test_this()\n\nm = 5;\nn = 10;\ndata = randn(m,n);\nshift = 3;\nw = [data(:);shift];\n\ng = subvec([],m*n+1,1,m*n);\nshift = subvec([],m*n+1,m*n+1,1);\n\n\nf = shift_function([],shift,g);\ntest_MV2DF(f,w);\n\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "dotprod_of_functions.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/dotprod_of_functions.m", "size": 952, "source_encoding": "utf_8", "md5": "2999899143500736ecbc06d5afc09df0", "text": "function [y,deriv] = dotprod_of_functions(w,f,g)\n% This is an MV2DF (see MV2DF_API_DEFINITION.readme) which \n% represents the new function, \n%\n% g(w) = f(w)'g(w) \n%\n% where f(w) and g(w) are column vectors of the same size.\n%\n% Here f,g are function handles to MV2DF's. \n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nfunction A = extractA(w)\n A = w(1:length(w)/2);\n A = A(:).';\nend\n\n\nfunction B = extractB(w)\n B = w(1+length(w)/2:end);\n B = B(:) ;\nend\n\n\n\nif isempty(w) \n\n s = stack(w,f,g);\n y = mm_special(s,@(w)extractA(w),@(w)extractB(w));\n return;\nend\n\n\nif isa(w,'function_handle')\n f = dotprod_of_functions([],f,g);\n y = compose_mv(f,w,[]);\n return;\nend\n\n\nf = dotprod_of_functions([],f,g);\nif nargout==1\n y = f(w);\nelse\n [y,deriv] = f(w);\nend\n\nend\n\nfunction test_this()\n\nm = 5;\nw = randn(2*m,1);\nf = subvec([],2*m,1,m);\ng = subvec([],2*m,m+1,m);\n\nh = dotprod_of_functions([],f,g);\ntest_MV2DF(h,w);\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "dottimes_of_functions.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/dottimes_of_functions.m", "size": 641, "source_encoding": "utf_8", "md5": "45195e5dddb789f3431e2f84495b06c7", "text": "function [y,deriv] = dottimes_of_functions(w,A,B)\n% This is an MV2DF (see MV2DF_API_DEFINITION.readme)\n%\n% w --> A(w) .* B(w)\n%\n% Here A and B are function handles to MV2DF's.\n\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\n\nif isempty(w) \n s = stack(w,A,B);\n y = dottimes(s);\n return;\nend\n\n\nif isa(w,'function_handle')\n f = dottimes_of_functions([],A,B);\n y = compose_mv(f,w,[]);\n return;\nend\n\n\nf = dottimes_of_functions([],A,B);\n[y,deriv] = f(w);\n\n\nfunction test_this()\n\nM = 4;\n\nX = [];\nXt = transpose_mv2df(X,M,M);\n\nA = UtU(X,M,M);\nB = UtU(Xt,M,M);\n\n\nf = dottimes_of_functions(X,A,B);\n\n\ntest_MV2DF(f,randn(16,1));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "replace_hessian.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/replace_hessian.m", "size": 1399, "source_encoding": "utf_8", "md5": "1e948d50795df5102876278fd5022da8", "text": "function [y,deriv] = replace_hessian(w,f,cstep)\n% This is an MV2DF. See MV2DF_API_DEFINITION.readme.\n%\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif isempty(w)\n y = @(w)replace_hessian(w,f,cstep);\n return;\nend\n\nif isa(w,'function_handle')\n outer = replace_hessian([],f,cstep);\n y = compose_mv(outer,w,[]);\n return;\nend\n\n\nif nargout==1\n y = f(w);\nelse \n [y,derivf] = f(w);\n deriv = @(dy) deriv_this(dy,derivf,f,w,cstep);\nend\n\nend\n\nfunction [g,hess,linear] = deriv_this(dy,derivf,f,w,cstep)\ng = derivf(dy);\nif nargout>1\n linear = false;\n hess = @(dx) hess_this(dx,dy,f,w,cstep);\nend\nend\n\nfunction [h,Jv] = hess_this(dx,dy,f,w,cstep)\nif cstep\n h = cstep_approxHess(dx,dy,f,w);\nelse\n h = rstep_approxHess(dx,dy,f,w);\nend\nif nargout>1\n error('replace_hessian cannot compute Jv');\n %Jv = zeros(size(dy));\nend\n\nend\n\n\nfunction x = rstep_approxHess(dx,dy,f,x0)\nalpha = sqrt(eps); \nx2 = x0 + alpha*dx;\n[dummy,deriv2] = f(x2);\nx1 = x0 - alpha*dx;\n[dummy,deriv1] = f(x1);\ng2 = deriv2(dy);\ng1 = deriv1(dy);\nx = (g2-g1)/(2*alpha);\nend\n\nfunction p = cstep_approxHess(dx,dy,f,x0)\nx = x0 + 1e-20i*dx;\n[dummy,deriv] = f(x);\ng = deriv(dy);\np = 1e20*imag(g);\nend\n\n\n\n\n\nfunction test_this()\n\n\nA = randn(5); \nB = randn(5,1);\n\nmap = @(w) 5*w;\nh = linTrans([],map,map);\n\nf = solve_AXeqB([],5);\ng = replace_hessian([],f,true);\ng = g(h);\n\nw = [A(:);B(:)];\ntest_MV2DF(g,w);\n\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "product_of_functions.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/product_of_functions.m", "size": 745, "source_encoding": "utf_8", "md5": "ae86bc0a8429bacd6044704a6a8a0e06", "text": "function [y,deriv] = product_of_functions(w,A,B,m,k,n)\n% This is an MV2DF (see MV2DF_API_DEFINITION.readme)\n%\n% w --> vec ( reshape(A(w),m,k) * reshape(B(w),k,n) )\n%\n% Here A and B are function handles to MV2DF's.\n\n\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif isempty(w) \n s = stack(w,A,B);\n y = gemm(s,m,k,n);\n return;\nend\n\n\nif isa(w,'function_handle')\n f = product_of_functions([],A,B,m,k,n);\n y = compose_mv(f,w,[]);\n return;\nend\n\n\nf = product_of_functions([],A,B,m,k,n);\n[y,deriv] = f(w);\n\n\nfunction test_this()\n\nM = 4;\nN = 4;\n\nX = [];\nXt = transpose_mv2df(X,M,N);\n\nA = UtU(X,M,N);\nB = UtU(Xt,N,M);\n\n\n%A = A(randn(16,1));\n%B = B(randn(16,1));\n\nf = product_of_functions(X,A,B,4,4,4);\n\n\ntest_MV2DF(f,randn(16,1));\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "stack.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/stack.m", "size": 3136, "source_encoding": "utf_8", "md5": "cbfe5ccd3255b5021692c3eb13e1798f", "text": "function [y,deriv] = stack(w,f,g,eqlen)\n% STACK is an MV2DF (see MV2DF_API_DEFINITION.readme) which \n% represents the new function, s(w), obtained by stacking the outputs of \n% f() and g() thus:\n% s(w) = [f(w);g(w)] \n\n\nif nargin==0\n test_this();\n return;\nend\n\nif ~exist('eqlen','var')\n eqlen = false;\nend\n\nif isempty(w)\n y = @(w)stack(w,f,g,eqlen);\n return;\nend\n\nif isa(w,'function_handle')\n outer = stack([],f,g,eqlen);\n y = compose_mv(outer,w,[]);\n return;\nend\n\n% if ~isa(f,'function_handle')\n% f = const_mv2df([],f);\n% end\n% if ~isa(g,'function_handle')\n% g = const_mv2df([],g);\n% end\n\nw = w(:);\n\nif nargout==1\n y1 = f(w);\n y2 = g(w);\n n1 = length(y1);\n n2 = length(y2);\n if eqlen, assert(n1==n2,'length(f(w))=%i must equal length(g(w))=%i.',n1,n2); end\n y = [y1;y2];\n return;\nend\n\n[y1,deriv1] = f(w);\n[y2,deriv2] = g(w);\ny = [y1;y2];\nn1 = length(y1);\nn2 = length(y2);\nif eqlen, assert(n1==n2,'length(f(w))=%i must equal length(g(w))=%i.',n1,n2); end\nderiv = @(g2) deriv_this(g2,deriv1,deriv2,n1);\n\n\nfunction [g,hess,linear] = deriv_this(y,deriv1,deriv2,n1)\n\nif nargout==1\n g1 = deriv1(y(1:n1));\n g2 = deriv2(y(n1+1:end));\n g = g1 + g2;\n return;\nend\n\n[g1,hess1,lin1] = deriv1(y(1:n1));\n[g2,hess2,lin2] = deriv2(y(n1+1:end));\n\ng = g1+g2;\nlinear = lin1 && lin2;\nhess = @(d) hess_this(d,hess1,hess2,lin1,lin2);\n\n\nfunction [h,Jv] = hess_this(d,hess1,hess2,lin1,lin2)\n\nif nargout>1\n [h1,Jv1] = hess1(d);\n [h2,Jv2] = hess2(d);\n Jv = [Jv1;Jv2];\nelse\n [h1] = hess1(d);\n [h2] = hess2(d);\nend\n\nif lin1 && lin2\n h = [];\nelseif ~lin1 && ~lin2\n h = h1 + h2;\nelseif lin2\n h = h1;\nelse\n h = h2;\nend\n\n\nfunction test_this()\nfprintf('-------------- Test 1 ------------------------\\n');\nfprintf('Stack [f(w);g(w)]: f() is non-linear and g() is non-linear:\\n');\n\n\nA = randn(4,5);\nB = randn(5,4);\n\n\nw = [];\nf = gemm(w,4,5,4);\ng = gemm(subvec(w,40,1,20),2,5,2);\n\ns = stack(w,f,g);\n\n\nw = [A(:);B(:)];\ntest_MV2DF(s,w);\n\nfprintf('--------------------------------------\\n\\n');\n\n\nfprintf('-------------- Test 2 ------------------------\\n');\nfprintf('Stack [f(w);g(w)]: f() is linear and g() is non-linear:\\n');\nA = randn(4,5);\nB = randn(5,4);\n\nw = [A(:);B(:)];\n\n\nT = randn(40);\nf = @(w) linTrans(w,@(x)T*x,@(y)T.'*y);\ng = @(w) gemm(w,4,5,4);\n\ns = @(w) stack(w,f,g);\n\n\ntest_MV2DF(s,w);\nfprintf('--------------------------------------\\n\\n');\n\n\nfprintf('-------------- Test 3 ------------------------\\n');\nfprintf('Stack [f(w);g(w)]: f() is non-linear and g() is linear:\\n');\nA = randn(4,5);\nB = randn(5,4);\n\nw = [A(:);B(:)];\n\n\nT = randn(40);\nf = @(w) linTrans(w,@(x)T*x,@(y)T.'*y);\ng = @(w) gemm(w,4,5,4);\n\ns = @(w) stack(w,g,f);\n\n\ntest_MV2DF(s,w);\nfprintf('--------------------------------------\\n\\n');\n\n\n\n\nfprintf('-------------- Test 4 ------------------------\\n');\nfprintf('Stack [f(w);g(w)]: f() is linear and g() is linear:\\n');\nw = randn(10,1);\nT1 = randn(11,10);\nf = @(w) linTrans(w,@(x)T1*x,@(y)T1.'*y);\n\nT2 = randn(12,10);\ng = @(w) linTrans(w,@(x)T2*x,@(y)T2.'*y);\ns = @(w) stack(w,g,f);\n\n\ntest_MV2DF(s,w);\nfprintf('--------------------------------------\\n\\n');\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "scale_and_translate.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/scale_and_translate.m", "size": 1341, "source_encoding": "utf_8", "md5": "121b1cd2e23a3d7111f310db8e3b6a05", "text": "function [y,deriv] = scale_and_translate(w,vectors,params,m,n)\n% This is an MV2DF (see MV2DF_API_DEFINITION.readme) which \n% represents the new function, obtained by scaling and translating the \n% column vectors of the output matrix of the function vectors(w). The\n% scaling and translation parameters, params(w) is also a function of w.\n%\n% The output, y is calulated as follows:\n%\n% V = reshape(vectors(w),m,n);\n% [scal;offs] = params(w); where scal is scalar and offs is m-by-1\n% y = bsxfun(@plus,scal*V,offs);\n% y = y(:);\n%\n% Usage examples:\n%\n% s = @(w) sum_of_functions(w,[1,-1],f,g)\n%\n% Here f,g are function handles to MV2DF's. \n\n\nif nargin==0\n test_this();\n return;\nend\n\nif isempty(w) \n\n s = stack(w,vectors,params);\n y = calibrateScores(s,m,n);\n return;\nend\n\n\nif isa(w,'function_handle')\n f = scale_and_translate([],vectors,params,m,n);\n y = compose_mv(f,w,[]);\n return;\nend\n\n\nf = scale_and_translate([],vectors,params,m,n);\nif nargout==1\n y = f(w);\nelse\n [y,deriv] = f(w);\nend\n\n\nfunction test_this()\n\nm = 5;\nn = 10;\ndata = randn(m,n);\noffs = randn(m,1);\nscal = 3;\nw = [data(:);scal;offs];\n\nvectors = subvec([],m*n+m+1,1,m*n);\n%vectors = randn(size(data));\nparams = subvec([],m*n+m+1,m*n+1,m+1);\n%params = [scal;offs];\n\nf = scale_and_translate([],vectors,params,m,n);\ntest_MV2DF(f,w);\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "compose_mv.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/compose_mv.m", "size": 2958, "source_encoding": "utf_8", "md5": "108f7eb78b4ff77e907d369fc9ae14db", "text": "function [y,deriv] = compose_mv(outer,inner,x)\n% COMPOSE_MV is an MV2DF (see MV2DF_API_DEFINITION.readme) which represents\n% the combination of two functions. If 'outer' is an MV2DF for a function\n% g() and 'inner' for a function f(), then this MV2DF represents g(f(x)).\n\n%feature scopedaccelenablement off\n\nif nargin==0\n test_this();\n return;\nend\n\n\nif isempty(x)\n y = @(w)compose_mv(outer,inner,w);\n return;\nend\n\nif isa(x,'function_handle')\n fh = compose_mv(outer,inner,[]); % fh =@(x) outer(inner(x))\n y = compose_mv(fh,x,[]); % y =@(w) outer(inner(x(w)))\n return;\nend\n\n% if ~isa(outer,'function_handle')\n% outer = const_mv2df([],outer);\n% end\n% if ~isa(inner,'function_handle')\n% inner = const_mv2df([],inner);\n% end\n\n\n\nif nargout==1\n y = outer(inner(x));\n return;\nend\n\n[y1,deriv1] = inner(x);\n[y,deriv2] = outer(y1);\nderiv = @(g3) deriv_this(deriv1,deriv2,g3);\n\n\n\nfunction [g,hess,linear] = deriv_this(deriv1,deriv2,g3)\n\nif nargout==1\n g = deriv1(deriv2(g3));\n return;\nend\n\n[g2,hess2,lin2] = deriv2(g3);\n[g,hess1,lin1] = deriv1(g2);\n\nhess =@(d) hess_this(deriv1,hess1,hess2,lin1,lin2,d);\n\nlinear = lin1 && lin2;\n\n\nfunction [h,Jv] = hess_this(deriv1,hess1,hess2,lin1,lin2,d)\n\n\nif nargout==1\n if ~lin2\n [h1,Jv1] = hess1(d);\n h2 = hess2(Jv1);\n h2 = deriv1(h2);\n elseif ~lin1\n h1 = hess1(d);\n end\nelse\n [h1,Jv1] = hess1(d);\n [h2,Jv] = hess2(Jv1);\n if ~lin2\n h2 = deriv1(h2);\n end\nend\n\nif lin1 && lin2\n h=[];\nelseif (~lin1) && (~lin2)\n h = h1+h2;\nelseif lin1\n h = h2;\nelse % if lin2\n h = h1;\nend\n\nfunction test_this()\n\nfprintf('-------------- Test 1 ------------------------\\n');\nfprintf('Composition g(f(w)): f() is non-linear and g() is non-linear:\\n');\nA = randn(4,5);\nB = randn(5,4);\n\nw = [A(:);B(:)];\n\nf = @(w) gemm(w,4,5,4);\ng1 = gemm(f,2,4,2);\n\ntest_MV2DF(g1,w);\nfprintf('--------------------------------------\\n\\n');\n\n\nfprintf('-------------- Test 2 ------------------------\\n');\nfprintf('Composition g(f(w)): f() is linear and g() is non-linear:\\n');\nA = randn(4,5);\nB = randn(5,4);\n\nw = [A(:);B(:)];\n\n\nT = randn(40);\nf = @(w) linTrans(w,@(x)T*x,@(y)T.'*y);\ng2 = gemm(f,4,5,4);\n\ntest_MV2DF(g2,w);\nfprintf('--------------------------------------\\n\\n');\n\n\nfprintf('-------------- Test 3 ------------------------\\n');\nfprintf('Composition g(f(w)): f() is non-linear and g() is linear:\\n');\nA = randn(4,5);\nB = randn(5,4);\n\nw = [A(:);B(:)];\nf = @(w) gemm(w,4,5,4);\n\nT = randn(16);\ng3 = linTrans(f,@(x)T*x,@(y)T.'*y);\n\ntest_MV2DF(g3,w);\nfprintf('--------------------------------------\\n\\n');\n\n\n\n\nfprintf('-------------- Test 4 ------------------------\\n');\nfprintf('Composition g(f(w)): f() is linear and g() is linear:\\n');\nw = randn(10,1);\nT1 = randn(11,10);\nf = @(w) linTrans(w,@(x)T1*x,@(y)T1.'*y);\n\nT2 = randn(5,11);\ng4 = linTrans(f,@(x)T2*x,@(y)T2.'*y);\n\n\ntest_MV2DF(g4,w);\nfprintf('--------------------------------------\\n\\n');\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "pav_calibration.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/calibration/pav_calibration.m", "size": 2716, "source_encoding": "utf_8", "md5": "2a9298835be5d7757fb4d660a5a2d7b3", "text": "function [pav_trans,score_bounds,llr_bounds] = pav_calibration(tar,non,small_val)\n% Creates a calibration transformation function using the PAV algorithm.\n% Inputs:\n% tar: A vector of target scores.\n% non: A vector of non-target scores.\n% small_val: An offset to make the transformation function\n% invertible. small_val is subtracted from the left-hand side\n% of the bin and added to the right-hand side (and the bin\n% height is linear between its left and right ends).\n% Outputs:\n% pav_trans: The transformation function. It takes in scores and\n% outputs (calibrated) log-likelihood ratios.\n% score_bounds: The left and right ends of the line segments\n% that make up the transformation.\n% llr_bounds: The lower and upper ends of the line segments that\n% make up the transformation.\n\nif nargin==0\n test_this()\n return\nelse\n assert(nargin==3)\n assert(size(tar,1)==1)\n assert(size(non,1)==1)\n assert(length(tar)>0)\n assert(length(non)>0)\nend\n\nlargeval = 1e6;\n\nscores = [-largeval tar non largeval];\nPideal = [ones(1,length(tar)+1),zeros(1,length(non)+1)];\n[scores,perturb] = sort(scores);\nPideal = Pideal(perturb);\n[Popt,width,height] = pavx(Pideal);\ndata_prior = (length(tar)+1)/length(Pideal);\nllr = logit(Popt) - logit(data_prior);\nbnd_ndx = make_bnd_ndx(width);\nscore_bounds = scores(bnd_ndx);\nllr_bounds = llr(bnd_ndx);\nllr_bounds(1:2:end) = llr_bounds(1:2:end) - small_val;\nllr_bounds(2:2:end) = llr_bounds(2:2:end) + small_val;\npav_trans = @(s) pav_transform(s,score_bounds,llr_bounds);\nend\n\nfunction scr_out = pav_transform(scr_in,score_bounds,llr_bounds)\nscr_out = zeros(1,length(scr_in));\nfor ii=1:length(scr_in)\n x = scr_in(ii);\n [x1,x2,v1,v2] = get_line_segment_vals(x,score_bounds,llr_bounds);\n scr_out(ii) = (v2 - v1) * (x - x1) / (x2 - x1) + v1;\nend\nend\n\nfunction bnd_ndx = make_bnd_ndx(width)\nlen = length(width)*2;\nc = cumsum(width);\nbnd_ndx = zeros(1,len);\nbnd_ndx(1:2:len) = [1 c(1:end-1)+1];\nbnd_ndx(2:2:len) = c;\nend\n\nfunction [x1,x2,v1,v2] = get_line_segment_vals(x,score_bounds,llr_bounds)\np = find(x>=score_bounds,1,'last');\nx1 = score_bounds(p);\nx2 = score_bounds(p+1);\nv1 = llr_bounds(p);\nv2 = llr_bounds(p+1);\nend\n\nfunction test_this()\nntar = 10;\nnnon = 12;\ntar = 2*randn(1,ntar)+2;\nnon = 2*randn(1,nnon)-2;\ntarnon = [tar non];\n\nscores = [-inf tarnon inf];\nPideal = [ones(1,length(tar)+1),zeros(1,length(non)+1)];\n[scores,perturb] = sort(scores);\nPideal = Pideal(perturb);\n[Popt,width,height] = pavx(Pideal);\ndata_prior = (length(tar)+1)/length(Pideal);\nllr = logit(Popt) - logit(data_prior);\n[dummy,pinv] = sort(perturb);\ntmp = llr(pinv);\nllr = tmp(2:end-1)\n\npav_trans = pav_calibration(tar,non,0);\npav_trans(tarnon)\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "align_with_ndx.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/classes/@Scores/align_with_ndx.m", "size": 2628, "source_encoding": "utf_8", "md5": "5899b5e5bd43dea8280d84cea8fdf0ec", "text": "function aligned_scr = align_with_ndx(scr,ndx)\n% The ordering in the output Scores object corresponds to ndx, so\n% aligning several Scores objects with the same ndx will result in\n% them being comparable with each other.\n% Inputs:\n% scr: a Scores object\n% ndx: a Key or Ndx object\n% Outputs:\n% aligned_scr: scr resized to size of 'ndx' and reordered\n% according to the ordering of modelset and segset in 'ndx'.\n\nif nargin==1\n test_this();\n return\nend\n\nassert(nargin==2)\nassert(isa(scr,'Scores'))\nassert(isa(ndx,'Key')||isa(ndx,'Ndx'))\nassert(scr.validate())\nassert(ndx.validate())\n\naligned_scr = Scores();\n\naligned_scr.modelset = ndx.modelset;\naligned_scr.segset = ndx.segset;\nm = length(ndx.modelset);\nn = length(ndx.segset);\n\n\n[hasmodel,rindx] = ismember(ndx.modelset,scr.modelset);\nrindx = rindx(hasmodel);\n[hasseg,cindx] = ismember(ndx.segset,scr.segset);\ncindx = cindx(hasseg);\n\naligned_scr.scoremat = zeros(m,n);\naligned_scr.scoremat(hasmodel,hasseg) = double(scr.scoremat(rindx,cindx)); \n\naligned_scr.scoremask = false(m,n);\naligned_scr.scoremask(hasmodel,hasseg) = scr.scoremask(rindx,cindx);\n\nassert(sum(aligned_scr.scoremask(:)) <= sum(hasmodel)*sum(hasseg));\n\nif isa(ndx,'Ndx')\n aligned_scr.scoremask = aligned_scr.scoremask & ndx.trialmask;\nelse\n aligned_scr.scoremask = aligned_scr.scoremask & (ndx.tar | ndx.non);\nend\n\nif sum(hasmodel) 0\n\tlog_warning('%i of %i targets missing.\\n',missing,sum(ndx.tar(:)));\n end\n missing = sum(ndx.non(:)) - sum(non(:));\n if missing > 0\n\tlog_warning('%i of %i non-targets missing.\\n',missing,sum(ndx.non(:)));\n end\nelse\n mask = ndx.trialmask & aligned_scr.scoremask;\n \n missing = sum(ndx.trialmask(:)) - sum(mask(:));\n if missing > 0\n\tlog_warning('%i of %i trials missing\\n',missing,sum(ndx.trialmask(:)));\n end\n\nend\n\nassert(all(isfinite(aligned_scr.scoremat(aligned_scr.scoremask(:)))))\n\nassert(aligned_scr.validate())\n\nend\n\nfunction test_this()\n\nkey = Key();\nkey.modelset = {'1','2','3'};\nkey.segset = {'a','b','c'};\nkey.tar = logical(eye(3));\nkey.non = ~key.tar;\n\nscr = Scores();\nscr.scoremat = [1 2 3; 4 5 6; 7 8 9];\nscr.scoremask = true(3);\nscr.modelset = {'3','2','1'};\nscr.segset = {'c','b','a'};\n\nscores = scr.scoremat,\nscr = scr.align_with_ndx(key);\nscores = scr.scoremat,\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "filter.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/classes/@Scores/filter.m", "size": 2622, "source_encoding": "utf_8", "md5": "ebaa2297b42e23384ffa07c12bdcc005", "text": "function outscr = filter(inscr,modlist,seglist,keep)\n% Removes some of the information in a Scores object. Useful for\n% creating a gender specific score set from a pooled gender score\n% set. Depending on the value of 'keep', the two input lists\n% indicate the models and test segments (and their associated\n% scores) to retain or discard. \n% Inputs:\n% inscr: A Scores object.\n% modlist: A cell array of strings which will be compared with\n% the modelset of 'inscr'.\n% seglist: A cell array of strings which will be compared with\n% the segset of 'inscr'.\n% keep: A boolean indicating whether modlist and seglist are the\n% models to keep or discard. \n% Outputs:\n% outscr: A filtered version of 'inscr'.\n\nif nargin == 0\n test_this();\n return\nend\n\nassert(nargin==4)\nassert(isa(inscr,'Scores'))\nassert(iscell(modlist))\nassert(iscell(seglist))\nassert(inscr.validate())\n\nif keep\n keepmods = modlist;\n keepsegs = seglist;\nelse\n keepmods = setdiff(inscr.modelset,modlist);\n keepsegs = setdiff(inscr.segset,seglist);\nend\n\nkeepmodidx = ismember(inscr.modelset,keepmods);\nkeepsegidx = ismember(inscr.segset,keepsegs);\n\noutscr = Scores();\noutscr.modelset = inscr.modelset(keepmodidx);\noutscr.segset = inscr.segset(keepsegidx);\noutscr.scoremat = inscr.scoremat(keepmodidx,keepsegidx);\noutscr.scoremask = inscr.scoremask(keepmodidx,keepsegidx);\n\nassert(outscr.validate())\n\nif length(inscr.modelset) > length(outscr.modelset)\n log_info('Number of models reduced from %d to %d.\\n',length(inscr.modelset),length(outscr.modelset));\nend\nif length(inscr.segset) > length(outscr.segset)\n log_info('Number of test segments reduced from %d to %d.\\n',length(inscr.segset),length(outscr.segset));\nend\n\nend\n\nfunction test_this()\n\nscr = Scores();\nscr.modelset = {'aaa','bbb','ccc','ddd'};\nscr.segset = {'11','22','33','44','55'};\nscr.scoremat = [1,2,3,4,5;6,7,8,9,10;11,12,13,14,15;16,17,18,19,20];\nscr.scoremask = true(4,5);\n\nfprintf('scr.modelset\\n');\ndisp(scr.modelset)\nfprintf('scr.segset\\n');\ndisp(scr.segset)\nfprintf('scr.scoremat\\n');\ndisp(scr.scoremat)\n\nmodlist = {'bbb','ddd'}\nseglist = {'11','55'}\n\nkeep = true\n\nout = Scores.filter(scr,modlist,seglist,keep);\n\nfprintf('out.modelset\\n');\ndisp(out.modelset)\nfprintf('out.segset\\n');\ndisp(out.segset)\nfprintf('out.scoremat\\n');\ndisp(out.scoremat)\nfprintf('out.scoremask\\n');\ndisp(out.scoremask)\n\nkeep = false\n\nout = Scores.filter(scr,modlist,seglist,keep);\n\nfprintf('out.modelset\\n');\ndisp(out.modelset)\nfprintf('out.segset\\n');\ndisp(out.segset)\nfprintf('out.scoremat\\n');\ndisp(out.scoremat)\nfprintf('out.scoremask\\n');\ndisp(out.scoremask)\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "filter.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/classes/@Key/filter.m", "size": 3047, "source_encoding": "utf_8", "md5": "9274e13ab0bf80ca9a90fd6f46da8ff0", "text": "function outkey = filter(inkey,modlist,seglist,keep)\n% Removes some of the information in a key. Useful for creating a\n% gender specific key from a pooled gender key. Depending on the\n% value of 'keep', the two input lists indicate the strings to\n% retain or the strings to discard.\n% Inputs:\n% inkey: A Key object.\n% modlist: A cell array of strings which will be compared with\n% the modelset of 'inkey'.\n% seglist: A cell array of strings which will be compared with\n% the segset of 'inkey'.\n% keep: A boolean indicating whether modlist and seglist are the\n% models to keep or discard. \n% Outputs:\n% outkey: A filtered version of 'inkey'.\n\nif nargin == 0\n test_this();\n return\nelse\n assert(nargin==4)\nend\n\nassert(isa(inkey,'Key'))\nassert(iscell(modlist))\nassert(iscell(seglist))\nassert(inkey.validate())\n\nif keep\n keepmods = modlist;\n keepsegs = seglist;\nelse\n keepmods = setdiff(inkey.modelset,modlist);\n keepsegs = setdiff(inkey.segset,seglist);\nend\n\nkeepmodidx = ismember(inkey.modelset,keepmods);\nkeepsegidx = ismember(inkey.segset,keepsegs);\n\noutkey = Key();\noutkey.modelset = inkey.modelset(keepmodidx);\noutkey.segset = inkey.segset(keepsegidx);\noutkey.tar = inkey.tar(keepmodidx,keepsegidx);\noutkey.non = inkey.non(keepmodidx,keepsegidx);\n\nassert(outkey.validate())\n\nif length(inkey.modelset) > length(outkey.modelset)\n log_info('Number of models reduced from %d to %d.\\n',length(inkey.modelset),length(outkey.modelset));\nend\nif length(inkey.segset) > length(outkey.segset)\n log_info('Number of test segments reduced from %d to %d.\\n',length(inkey.segset),length(outkey.segset));\nend\n\nend\n\nfunction test_this()\n\nkey = Key();\nkey.modelset = {'aaa','bbb','ccc','ddd'};\nkey.segset = {'11','22','33','44','55'};\nkey.tar = logical([1,0,0,1,0;0,1,0,1,1;0,0,0,1,0;1,1,0,0,0]);\nkey.non = logical([0,1,0,0,0;1,0,0,0,0;1,1,1,0,0;0,0,1,1,1]);\n\nfprintf('key.modelset\\n');\ndisp(key.modelset)\nfprintf('key.segset\\n');\ndisp(key.segset)\nfprintf('key.tar\\n');\ndisp(key.tar)\nfprintf('key.non\\n');\ndisp(key.non)\n\nmodlist = {'bbb','ddd'}\nseglist = {'11','55'}\n\nkeep = true\n\nout = Key.filter(key,modlist,seglist,keep);\n\nfprintf('out.modelset\\n');\ndisp(out.modelset)\nfprintf('out.segset\\n');\ndisp(out.segset)\nfprintf('out.tar\\n');\ndisp(out.tar)\nfprintf('out.non\\n');\ndisp(out.non)\n\nkeep = false\n\nout = Key.filter(key,modlist,seglist,keep);\n\nfprintf('out.modelset\\n');\ndisp(out.modelset)\nfprintf('out.segset\\n');\ndisp(out.segset)\nfprintf('out.tar\\n');\ndisp(out.tar)\nfprintf('out.non\\n');\ndisp(out.non)\n\n\nmodlist = {'bbb','ddd','eee'}\nseglist = {'11','66','77','55'}\n\nkeep = true\n\nout = Key.filter(key,modlist,seglist,keep);\n\nfprintf('out.modelset\\n');\ndisp(out.modelset)\nfprintf('out.segset\\n');\ndisp(out.segset)\nfprintf('out.tar\\n');\ndisp(out.tar)\nfprintf('out.non\\n');\ndisp(out.non)\n\nkeep = false\n\nout = Key.filter(key,modlist,seglist,keep);\n\nfprintf('out.modelset\\n');\ndisp(out.modelset)\nfprintf('out.segset\\n');\ndisp(out.segset)\nfprintf('out.tar\\n');\ndisp(out.tar)\nfprintf('out.non\\n');\ndisp(out.non)\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "read_hdf5.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/classes/@Key/read_hdf5.m", "size": 1196, "source_encoding": "utf_8", "md5": "4057278a996259de22fed6ee29c5d3b2", "text": "function key = read_hdf5(infilename)\n% Reads a Key object from an hdf5 file.\n% Inputs:\n% infilename: The name for the hdf5 file to read.\n% Outputs:\n% key: A Key object created from the information in the hdf5\n% file.\n\nassert(nargin==1)\nassert(isa(infilename,'char'))\n\nkey = Key();\n\nkey.modelset = h5strings_to_cell(infilename,'/ID/row_ids');\nkey.segset = h5strings_to_cell(infilename,'/ID/column_ids');\n\noldformat = false;\ninfo = hdf5info(infilename);\ndatasets = info.GroupHierarchy.Datasets;\nfor ii=1:length(datasets)\n if strcmp(datasets(ii).Name,'/target_mask')\n\toldformat = true;\n end\nend\n\nif oldformat\n key.tar = logical(hdf5read(infilename,'/target_mask','V71Dimensions',true));\n key.non = logical(hdf5read(infilename,'/nontarget_mask','V71Dimensions',true));\nelse\n trialmask = hdf5read(infilename,'/trial_mask','V71Dimensions',true);\n key.tar = trialmask > 0.5;\n key.non = trialmask < -0.5;\nend\n\nassert(key.validate())\n\nfunction cellstrarr = h5strings_to_cell(infilename,attribname)\ntmp = hdf5read(infilename,attribname,'V71Dimensions',true);\nnumentries = length(tmp);\ncellstrarr = cell(numentries,1);\nfor ii=1:numentries\n cellstrarr{ii} = tmp(ii).Data;\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "filter.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/classes/@Ndx/filter.m", "size": 2788, "source_encoding": "utf_8", "md5": "6d39760ecafc786f43259d1adb98a810", "text": "function outndx = filter(inndx,modlist,seglist,keep)\n% Removes some of the information in an Ndx. Useful for creating a\n% gender specific Ndx from a pooled gender Ndx. Depending on the\n% value of 'keep', the two input lists indicate the strings to\n% retain or the strings to discard.\n% Inputs:\n% inndx: An Ndx object.\n% modlist: A cell array of strings which will be compared with\n% the modelset of 'inndx'.\n% seglist: A cell array of strings which will be compared with\n% the segset of 'inndx'.\n% keep: A boolean indicating whether modlist and seglist are the\n% models to keep or discard. \n% Outputs:\n% outndx: A filtered version of 'inndx'.\n\nif nargin == 0\n test_this();\n return\nend\n\nassert(nargin==4)\nassert(isa(inndx,'Ndx'))\nassert(inndx.validate())\nassert(iscell(modlist))\nassert(iscell(seglist))\n\nif keep\n keepmods = modlist;\n keepsegs = seglist;\nelse\n keepmods = setdiff(inndx.modelset,modlist);\n keepsegs = setdiff(inndx.segset,seglist);\nend\n\nkeepmodidx = ismember(inndx.modelset,keepmods);\nkeepsegidx = ismember(inndx.segset,keepsegs);\n\noutndx = Ndx();\noutndx.modelset = inndx.modelset(keepmodidx);\noutndx.segset = inndx.segset(keepsegidx);\noutndx.trialmask = inndx.trialmask(keepmodidx,keepsegidx);\n\nassert(outndx.validate())\n\nif length(inndx.modelset) > length(outndx.modelset)\n log_info('Number of models reduced from %d to %d.\\n',length(inndx.modelset),length(outndx.modelset));\nend\nif length(inndx.segset) > length(outndx.segset)\n log_info('Number of test segments reduced from %d to %d.\\n',length(inndx.segset),length(outndx.segset));\nend\n\nend\n\nfunction test_this()\n\nndx = Ndx();\nndx.modelset = {'aaa','bbb','ccc','ddd'};\nndx.segset = {'11','22','33','44','55'};\nndx.trialmask = true(4,5);\n\nfprintf('ndx.modelset\\n');\ndisp(ndx.modelset)\nfprintf('ndx.segset\\n');\ndisp(ndx.segset)\nfprintf('ndx.trialmask\\n');\ndisp(ndx.trialmask)\n\nmodlist = {'bbb','ddd'}\nseglist = {'11','55'}\n\nkeep = true\n\nout = Ndx.filter(ndx,modlist,seglist,keep);\n\nfprintf('out.modelset\\n');\ndisp(out.modelset)\nfprintf('out.segset\\n');\ndisp(out.segset)\nfprintf('out.trialmask\\n');\ndisp(out.trialmask)\n\nkeep = false\n\nout = Ndx.filter(ndx,modlist,seglist,keep);\n\nfprintf('out.modelset\\n');\ndisp(out.modelset)\nfprintf('out.segset\\n');\ndisp(out.segset)\nfprintf('out.trialmask\\n');\ndisp(out.trialmask)\n\n\nmodlist = {'bbb','ddd','eee'}\nseglist = {'11','66','77','55'}\n\nkeep = true\n\nout = Ndx.filter(ndx,modlist,seglist,keep);\n\nfprintf('out.modelset\\n');\ndisp(out.modelset)\nfprintf('out.segset\\n');\ndisp(out.segset)\nfprintf('out.trialmask\\n');\ndisp(out.trialmask)\n\nkeep = false\n\nout = Ndx.filter(ndx,modlist,seglist,keep);\n\nfprintf('out.modelset\\n');\ndisp(out.modelset)\nfprintf('out.segset\\n');\ndisp(out.segset)\nfprintf('out.trialmask\\n');\ndisp(out.trialmask)\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "read_hdf5.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/classes/@Ndx/read_hdf5.m", "size": 838, "source_encoding": "utf_8", "md5": "424ae971c22eb22cf8c27af6130b9698", "text": "function ndx = read_hdf5(infilename)\n% Creates an Ndx object from the information in an hdf5 file.\n% Inputs:\n% infilename: The name of the hdf5 file contain the information\n% necessary to construct an Ndx object.\n% Outputs:\n% ndx: An Ndx object containing the information in the input\n% file.\n\nassert(nargin==1)\nassert(isa(infilename,'char'))\n\nndx = Ndx();\nndx.modelset = h5strings_to_cell(infilename,'/ID/row_ids');\nndx.segset = h5strings_to_cell(infilename,'/ID/column_ids');\nndx.trialmask = logical(hdf5read(infilename,'/trial_mask','V71Dimensions',true));\n\nassert(ndx.validate())\n\nfunction cellstrarr = h5strings_to_cell(infilename,attribname)\ntmp = hdf5read(infilename,attribname,'V71Dimensions',true);\nnumentries = length(tmp);\ncellstrarr = cell(numentries,1);\nfor ii=1:numentries\n cellstrarr{ii} = tmp(ii).Data;\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "filter_on_right.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/classes/@Id_Map/filter_on_right.m", "size": 1885, "source_encoding": "utf_8", "md5": "deb124220c828ae065475bc93957d53f", "text": "function out_idmap = filter_on_right(in_idmap,idlist,keep)\n% Removes some of the information in an idmap. Depending on the\n% value of 'keep', the idlist indicates the strings to retain or\n% the strings to discard.\n% Inputs:\n% in_idmap: An Id_Map object to be pruned.\n% idlist: A cell array of strings which will be compared with\n% the rightids of 'in_idmap'.\n% keep: A boolean indicating whether idlist contains the ids to\n% keep or to discard. \n% Outputs:\n% out_idmap: A filtered version of 'in_idmap'.\n\nif nargin == 0\n test_this();\n return\nend\n\nassert(nargin==3)\nassert(isa(in_idmap,'Id_Map'))\nassert(in_idmap.validate())\nassert(iscell(idlist))\n\nif keep\n keepids = idlist;\nelse\n keepids = setdiff(in_idmap.rightids,idlist);\nend\n\nkeep_idx = ismember(in_idmap.rightids,keepids);\n\nout_idmap = Id_Map();\nout_idmap.leftids = in_idmap.leftids(keep_idx);\nout_idmap.rightids = in_idmap.rightids(keep_idx);\nassert(out_idmap.validate(false))\n\nend\n\nfunction test_this()\n\nidmap = Id_Map();\nidmap.leftids = {'aaa','bbb','ccc','bbb','ddd','eee'};\nidmap.rightids = {'11','22','33','44','55','22'};\n\nfprintf('idmap.leftids\\n');\ndisp(idmap.leftids)\nfprintf('idmap.rightids\\n');\ndisp(idmap.rightids)\n\nidlist = {'22','44'}\n\nkeep = true\n\nout = Id_Map.filter_on_right(idmap,idlist,keep);\n\nfprintf('out.leftids\\n');\ndisp(out.leftids)\nfprintf('out.rightids\\n');\ndisp(out.rightids)\n\nkeep = false\n\nout = Id_Map.filter_on_right(idmap,idlist,keep);\n\nfprintf('out.leftids\\n');\ndisp(out.leftids)\nfprintf('out.rightids\\n');\ndisp(out.rightids)\n\nidlist = {'11','33','66'}\n\nkeep = true\n\nout = Id_Map.filter_on_right(idmap,idlist,keep);\n\nfprintf('out.leftids\\n');\ndisp(out.leftids)\nfprintf('out.rightids\\n');\ndisp(out.rightids)\n\nkeep = false\n\nout = Id_Map.filter_on_right(idmap,idlist,keep);\n\nfprintf('out.leftids\\n');\ndisp(out.leftids)\nfprintf('out.rightids\\n');\ndisp(out.rightids)\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "read_hdf5.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/classes/@Id_Map/read_hdf5.m", "size": 777, "source_encoding": "utf_8", "md5": "47581f23817e49ffc325aed95a088106", "text": "function idmap = read_hdf5(infilename)\n% Creates an Id_Map object from the information in an hdf5 file.\n% Inputs:\n% infilename: The name of the hdf5 file containing the information\n% necessary to construct an Id_Map object.\n% Outputs:\n% idmap: An Id_Map object containing the information in the input\n% file.\n\nassert(nargin==1)\nassert(isa(infilename,'char'))\n\nidmap = Id_Map();\nidmap.leftids = h5strings_to_cell(infilename,'/left_ids');\nidmap.rightids = h5strings_to_cell(infilename,'/right_ids');\nassert(idmap.validate())\n\nfunction cellstrarr = h5strings_to_cell(infilename,attribname)\ntmp = hdf5read(infilename,attribname,'V71Dimensions',true);\nnumentries = length(tmp);\ncellstrarr = cell(numentries,1);\nfor ii=1:numentries\n cellstrarr{ii} = tmp(ii).Data;\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "filter_on_left.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/classes/@Id_Map/filter_on_left.m", "size": 1871, "source_encoding": "utf_8", "md5": "27abe0c92b6ff488892e389fee1fb5e9", "text": "function out_idmap = filter_on_left(in_idmap,idlist,keep)\n% Removes some of the information in an idmap. Depending on the\n% value of 'keep', the idlist indicates the strings to retain or\n% the strings to discard.\n% Inputs:\n% in_idmap: An Id_Map object to be pruned.\n% idlist: A cell array of strings which will be compared with\n% the leftids of 'in_idmap'.\n% keep: A boolean indicating whether idlist contains the ids to\n% keep or to discard. \n% Outputs:\n% out_idmap: A filtered version of 'in_idmap'.\n\nif nargin == 0\n test_this();\n return\nend\n\nassert(nargin==3)\nassert(isa(in_idmap,'Id_Map'))\nassert(in_idmap.validate())\nassert(iscell(idlist))\n\nif keep\n keepids = idlist;\nelse\n keepids = setdiff(in_idmap.leftids,idlist);\nend\n\nkeep_idx = ismember(in_idmap.leftids,keepids);\n\nout_idmap = Id_Map();\nout_idmap.leftids = in_idmap.leftids(keep_idx);\nout_idmap.rightids = in_idmap.rightids(keep_idx);\nassert(out_idmap.validate(false))\n\nend\n\nfunction test_this()\n\nidmap = Id_Map();\nidmap.leftids = {'aaa','bbb','ccc','bbb','ddd'};\nidmap.rightids = {'11','22','33','44','55'};\n\nfprintf('idmap.leftids\\n');\ndisp(idmap.leftids)\nfprintf('idmap.rightids\\n');\ndisp(idmap.rightids)\n\nidlist = {'bbb','ddd'}\n\nkeep = true\n\nout = Id_Map.filter_on_left(idmap,idlist,keep);\n\nfprintf('out.leftids\\n');\ndisp(out.leftids)\nfprintf('out.rightids\\n');\ndisp(out.rightids)\n\nkeep = false\n\nout = Id_Map.filter_on_left(idmap,idlist,keep);\n\nfprintf('out.leftids\\n');\ndisp(out.leftids)\nfprintf('out.rightids\\n');\ndisp(out.rightids)\n\nidlist = {'bbb','ddd','eee'}\n\nkeep = true\n\nout = Id_Map.filter_on_left(idmap,idlist,keep);\n\nfprintf('out.leftids\\n');\ndisp(out.leftids)\nfprintf('out.rightids\\n');\ndisp(out.rightids)\n\nkeep = false\n\nout = Id_Map.filter_on_left(idmap,idlist,keep);\n\nfprintf('out.leftids\\n');\ndisp(out.leftids)\nfprintf('out.rightids\\n');\ndisp(out.rightids)\n\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "L_BFGS.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/optimization/L_BFGS.m", "size": 5021, "source_encoding": "utf_8", "md5": "ccfcc8e580c4dfa191a42bfcbaf055cb", "text": "function [w,y,mem,logs] = L_BFGS(obj,w,maxiters,timeout,mem,stpsz0,callback)\n% L-BFGS Quasi-Newton unconstrained optimizer.\n% -- This has a small interface change from LBFGS.m --\n%\n% Inputs:\n% obj: optimization objective, with interface: [y,grad] = obj(w),\n% where w is the parameter vector, y is the scalar objective value \n% and grad is a function handle, so that grad(1) gives the gradient \n% (same size as w).\n% w: the initial parameter vector\n% maxiters: max number of LBFGS iterations (line search iterations do not \n% count towards this limit).\n% timeout: LBFGS will stop when timeout (in seconds) is reached.\n% mem is either: (i) A struct with previously computed LBFGS data, to\n% allow resumption of iteration.\n% (ii) An integer: the size of the LBFGS memory. A good\n% default is 20.\n% \n\n\n %some linesearch magic numbers\n maxfev = 20; %max number of function evaluations\n stpmin = 1e-15; %same as Poblano default\n stpmax = 1e15; %same as Poblano default\n ftol = 1e-5; % as recommended by Nocedal (c1 in his book)\n gtol = 0.9; % as recommended by Nocedal (c2 in his book)\n xtol = 1e-15; %same as Poblano default\n quiet = false;\n\n %termination parameters\n %stopTol = 1e-5; %same as Poblano\n relFuncTol = 1e-6; %same as Poblano\n \n if ~exist('stpsz0','var') || isempty(stpsz0)\n stpsz0 = 1;\n end\n stpsz = stpsz0;\n \n\n if ~exist('timeout','var') || isempty(timeout)\n timeout = 15*60;\n fprintf('timeout defaulted to 15 minutes');\n end;\n\n if ~exist('callback','var') || isempty(callback)\n ncbLogs = 0;\n else\n ncbLogs = length( callback(w) );\n end;\n\n tic;\n dim = length(w);\n\n if ~isstruct(mem)\n m = mem;\n mem = [];\n mem.m = m;\n mem.sz = 0;\n mem.rho = zeros(1,m);\n mem.S = zeros(dim,m);\n mem.Y = zeros(dim,m);\n else\n m = mem.m;\n end\n\n if ~exist('y','var') || isempty(y)\n [y,grad] = obj(w);\n g = grad(1);\n fprintf('LBFGS 0: obj = %g, ||g||=%g\\n',y,sqrt(g'*g));\n end\n \n \n initial_y = y;\n \n logs = zeros(3+ncbLogs, maxiters);\n nlogs = 0;\n \n gmag = sqrt(g'*g);\n k = 0;\n while true\n\n if gmag< eps\n fprintf('LBFGS converged with tiny gradient\\n');\n break;\n end\n \n \n % choose direction\n p = -Hprod(g,mem);\n assert(g'*p<0,'p is not downhill');\n \n % line search\n \n g0 = g;\n y0 = y;\n w0 = w;\n [w,y,grad,g,alpha,info,nfev] = minpack_cvsrch(obj,w,y,g,p,stpsz,...\n ftol,gtol,xtol, ...\n stpmin,stpmax,maxfev,quiet); \n\n stpsz = 1;\n \n delta_total = abs(initial_y-y);\n delta = abs(y0-y);\n if delta_total>eps\n relfunc = delta/delta_total;\n else\n relfunc = delta;\n end\n \n gmag = sqrt(g'*g);\n \n \n if info==1 %Wolfe is happy\n sk = w-w0;\n yk = g-g0;\n dot = sk'*yk;\n assert(dot>0);\n if mem.sz==m\n mem.S(:,1:m-1) = mem.S(:,2:m);\n mem.Y(:,1:m-1) = mem.Y(:,2:m);\n mem.rho(:,1:m-1) = mem.rho(:,2:m);\n else\n mem.sz = mem.sz + 1;\n end\n sz = mem.sz;\n mem.S(:,sz) = sk;\n mem.Y(:,sz) = yk;\n mem.rho(sz) = 1/dot;\n fprintf('LBFGS %i: ||g||/n = %g, rel = %g\\n',k+1,gmag/length(g),relfunc);\n else\n fprintf('LBFGS %i: NO UPDATE, info = %i, ||g||/n = %g, rel = %g\\n',k+1,info,gmag/length(g),relfunc);\n end\n \n time = toc;\n nlogs = nlogs+1;\n if ncbLogs > 0\n logs(:,nlogs) = [time; y; nfev; callback(w)'];\n disp(logs(4:end,nlogs)');\n else\n logs(:,nlogs) = [time;y;nfev];\n end\n \n k = k + 1;\n if k>=maxiters\n fprintf('LBFGS stopped: maxiters exceeded\\n');\n break;\n end\n \n if time>timeout\n fprintf('LBFGS stopped: timeout\\n');\n break;\n end\n\n if relfunc < relFuncTol\n fprintf('\\nTDN: stopped with minimal function change\\n');\n break;\n end\n \n end\n logs = logs(:,1:nlogs); \n\nend\n\n\n\nfunction r = Hprod(q,mem)\n if mem.sz==0\n r = q;\n return;\n end\n sz = mem.sz;\n S = mem.S;\n Y = mem.Y;\n rho = mem.rho;\n alpha = zeros(1,sz);\n for i=sz:-1:1\n alpha(i) = rho(i)*S(:,i)'*q;\n q = q - alpha(i)*Y(:,i);\n end\n yy = sum(Y(:,sz).^2,1);\n r = q/(rho(sz)*yy);\n for i=1:sz\n beta = rho(i)*Y(:,i)'*r;\n r = r + S(:,i)*(alpha(i)-beta);\n end\nend\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "create_PYCRP.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/CRP/create_PYCRP.m", "size": 10787, "source_encoding": "utf_8", "md5": "482772d72ab94176d598b555bd28ec19", "text": "function PYCRP = create_PYCRP(alpha,beta,e,n)\n% alpha: alpha>=0, concentration\n% beta: 0<= beta <=1, discount\n\n\n \n\n if nargin==0\n %test_this2();\n test_Gibbsmatrix()\n return;\n end\n\n \n if nargin==4\n PYCRP = create_PYCRP(1,0);\n PYCRP.set_expected_number_tables(e,n,alpha,beta);\n return;\n else\n assert(nargin==2);\n end\n \n \n\n assert(alpha>=0);\n assert(beta>=0 && beta<=1);\n\n \n PYCRP.logprob = @logprob;\n PYCRP.logprob3 = @logprob3;\n PYCRP.sample = @sample;\n PYCRP.expected_number_tables = @expected_number_tables;\n PYCRP.set_expected_number_tables = @set_expected_number_tables;\n PYCRP.ent = @ent;\n PYCRP.getParams = @getParams;\n PYCRP.GibbsMatrix = @GibbsMatrix;\n PYCRP.slowGibbsMatrix = @slowGibbsMatrix;\n \n function [concentration,discount] = getParams()\n concentration = alpha;\n discount = beta;\n end\n \n\n\n\n \n function e = expected_number_tables(n)\n e = ent(alpha,beta,n);\n end\n \n\n function e = ent(alpha,beta,n)\n if alpha==0 && beta==0\n e = 1;\n elseif isinf(alpha)\n e = n;\n elseif alpha>0 && beta>0 \n A = gammaln(alpha + beta + n) + gammaln(alpha + 1) ...\n - log(beta) - gammaln(alpha+n) - gammaln(alpha+beta);\n B = alpha/beta;\n e = B*expm1(A-log(B)); %exp(A)-B\n elseif alpha>0 && beta==0\n e = alpha.*( psi(n+alpha) - psi(alpha) );\n elseif alpha==0 && beta>0\n A = gammaln(beta + n) - log(beta) - gammaln(n) - gammaln(beta);\n e = exp(A);\n end\n end\n \n\n\n function [flag,concentration,discount] = set_expected_number_tables(e,n,concentration,discount)\n if ~isempty(concentration) && ~isempty(discount)\n error('you can''t specify both parameters');\n end\n if isempty(concentration) && isempty(discount)\n error('you must specify one parameter');\n end\n if e<1 || e>n\n error('expectation must be between 1 and %i',n);\n end\n \n if isempty(concentration)\n assert(discount>=0 && discount<1);\n beta = discount;\n if beta==0 && e==1\n alpha = 0;\n concentration = alpha;\n flag = 1;\n return;\n elseif e==n\n alpha = inf;\n concentration = alpha;\n flag = 1;\n return;\n end\n \n min_e = ent(0,beta,n);\n if e < min_e\n error('e=%g is impossible at discount=%g, minimum is e=%g',e,beta,min_e);\n end\n \n f = @(logalpha) ent(exp(logalpha),beta,n) - e;\n [logalpha,~,flag] = fzero(f,0);\n alpha = exp(logalpha);\n concentration = alpha;\n\n elseif isempty(discount)\n \n assert(concentration>=0);\n alpha = concentration;\n \n if alpha==0 && e==1\n beta = 0;\n discount = beta;\n flag = 1;\n return;\n elseif e==n\n beta = 1;\n discount = beta;\n flag = 1;\n return;\n end\n \n min_e = ent(alpha,0,n);\n if e < min_e\n error('e=%g is impossible at concentration=%g, minimum is e=%min_e',e,alpha,min_e);\n end\n \n f = @(logitbeta) ent(alpha,sigmoid(logitbeta),n) - e;\n [logitbeta,~,flag] = fzero(f,0);\n beta = sigmoid(logitbeta);\n discount = beta;\n \n end\n end\n\n function y = sigmoid(x)\n y = 1./(1+exp(-x));\n end\n\n \n function logP = logprob(counts) %Wikipedia\n\n\n \n K = length(counts);\n T = sum(counts);\n \n if isinf(alpha) && beta==1 %singleton tables\n if all(counts==1)\n logP = 0;\n else\n logP = -inf;\n end\n return;\n end\n \n if alpha==0 && beta==0 %single table\n if K==1\n logP = 0;\n else\n logP = -inf;\n end\n return;\n end\n \n if alpha>0 && beta>0 % 2-param Pitman-Yor generalization\n logP = gammaln(alpha) - gammaln(alpha+T) + K*log(beta) ...\n + gammaln(alpha/beta + K) - gammaln(alpha/beta) ...\n + sum(gammaln(counts-beta)) ...\n - K*gammaln(1-beta);\n elseif beta==0 && alpha>0 % classical CRP\n logP = gammaln(alpha) + K*log(alpha) - gammaln(alpha+T) + sum(gammaln(counts));\n \n elseif beta>0 && alpha==0\n logP = (K-1)*log(beta) + gammaln(K) - gammaln(T) ...\n - K*gammaln(1-beta) + sum(gammaln(counts-beta));\n end\n \n end\n\n\n % Seems wrong\n% function logP = logprob2(counts) % Goldwater\n% \n% \n% \n% K = length(counts);\n% T = sum(counts);\n% if beta>0 %Pitman-Yor generalization\n% logP = gammaln(1+alpha) - gammaln(alpha+T) ...\n% + sum(beta*(1:K-1)+alpha) ...\n% + sum(gammaln(counts-beta)) ...\n% - K*gammaln(1-beta);\n% else %1 parameter CRP\n% logP = gammaln(1+alpha) + (K-1)*log(alpha) - gammaln(alpha+T) + sum(gammaln(counts));\n% end\n% \n% end\n\n\n % Agrees with Wikipedia version (faster for small counts)\n function logP = logprob3(counts)\n logP = 0;\n n = 0;\n for k=1:length(counts)\n % seat first customer at new table\n if k>1\n logP = logP +log((alpha+(k-1)*beta)/(n+alpha)); \n end\n n = n + 1;\n % seat the rest at this table\n for i = 2:counts(k)\n logP = logP + log((i-1-beta)/(n+alpha));\n n = n + 1;\n end\n end\n end\n\n\n % GibbsMatrix. Computes matrix of conditional log-probabilities \n % suitable for Gibbs sampling, or pseudolikelihood calculation.\n % Input: \n % labels: n-vector, maps each of n customers to a table in 1..m \n % Output:\n % logP: (m+1)-by-n matrix of **unnormalized** log-probabilities\n % logP(i,j) = log P(customer j at table i | seating of all others) + const\n % table m+1 is a new table\n function [logP,empties] = GibbsMatrix(labels)\n m = max(labels);\n n = length(labels);\n blocks = sparse(labels,1:n,true);\n counts = full(sum(blocks,2)); %original table sizes\n logP = repmat(log([counts-beta;alpha+m*beta]),1,n); %most common values for every row\n \n %return;\n \n empties = false(1,n); %new empty table when customer j removed\n for i=1:m\n cmin = counts(i) - 1;\n tar = blocks(i,:);\n if cmin==0 %table empty \n logP(i,tar) = log(alpha + (m-1)*beta); \n empties(tar) = true;\n else\n logP(i,tar) = log(cmin-beta);\n end\n end\n logP(m+1,empties) = -inf; \n end\n\n\n function logP = slowGibbsMatrix(labels)\n m = max(labels);\n n = length(labels);\n blocks = sparse(labels,1:n,true,m+1,n);\n counts = full(sum(blocks,2)); %original table sizes\n logP = zeros(m+1,n);\n for j=1:n\n cj = counts;\n tj = labels(j);\n cj(tj) = cj(tj) - 1;\n nz = cj>0;\n k = sum(nz);\n if k==m\n logP(nz,j) = log(cj(nz) - beta);\n logP(m+1,j) = log(alpha + m*beta);\n else %new empty table\n logP(nz,j) = log(cj(nz) - beta);\n logP(tj,j) = log(alpha + k*beta);\n logP(m+1,j) = -inf;\n end\n end\n end\n\n\n function [labels,counts] = sample(T)\n labels = zeros(1,T);\n counts = zeros(1,T);\n labels(1) = 1;\n K = 1; %number of classes\n counts(1) = 1;\n for i=2:T\n p = zeros(K+1,1);\n p(1:K) = counts(1:K) - beta; \n p(K+1) = alpha + K*beta;\n [~,k] = max(randgumbel(K+1,1) + log(p)); \n labels(i) = k;\n if k>K\n K = K + 1;\n assert(k==K);\n counts(k) = 1;\n else\n counts(k) = counts(k) + 1; \n end\n end\n counts = counts(1:K);\n labels = labels(randperm(T));\n end\n \n \nend\n\n\n\nfunction test_this2()\n \n T = 20;\n e = 10;\n N = 1000;\n\n crp1 = create_PYCRP(0,[],e,T);\n [concentration,discount] = crp1.getParams()\n \n crp2 = create_PYCRP([],0,e,T);\n [concentration,discount] = crp2.getParams()\n \n\n K1 = zeros(1,T);\n K2 = zeros(1,T);\n for i=1:N\n [~,counts] = crp1.sample(T);\n K = length(counts);\n K1(K) = K1(K) + 1;\n\n [~,counts] = crp2.sample(T);\n K = length(counts);\n K2(K) = K2(K) + 1;\n end\n e1 = sum((1:T).*K1)/N\n e2 = sum((1:T).*K2)/N\n \n close all;\n \n subplot(2,1,1);bar(1:T,K1);\n subplot(2,1,2);bar(1:T,K2);\n \n K1 = K1/sum(K1);\n K2 = K2/sum(K2);\n %dlmwrite('K1.table',[(1:T)',K1'],' ');\n %dlmwrite('K2.table',[(1:T)',K2'],' ');\n \n for i=1:T\n fprintf('(%i,%6.4f) ',2*i-1,K1(i))\n end\n fprintf('\\n');\n for i=1:T\n fprintf('(%i,%6.4f) ',2*i,K2(i))\n end\n fprintf('\\n');\n \n \nend\n\n\nfunction test_Gibbsmatrix()\n\n alpha = randn^2;\n beta = rand;\n PYCRP = create_PYCRP(alpha,beta);\n labels = [1 1 1 2 1 3 4 4]\n logP = exp(PYCRP.slowGibbsMatrix(labels))\n \n logP = exp(PYCRP.GibbsMatrix(labels))\n \n \nend\n\n\nfunction test_this()\n\n alpha1 = 0.0;\n beta1 = 0.6;\n crp1 = create_PYCRP(alpha1,beta1);\n \n \n alpha2 = 0.1;\n beta2 = 0.6;\n crp2 = create_PYCRP(alpha2,beta2);\n \n \n close all;\n figure;\n hold;\n \n for i=1:100;\n L1 = crp1.sample(100);\n L2 = crp2.sample(100);\n C1=full(sum(int2onehot(L1),2));\n C2=full(sum(int2onehot(L2),2));\n x11 = crp1.logprob(C1);\n x12 = crp2.logprob3(C1);\n x22 = crp2.logprob3(C2);\n x21 = crp1.logprob(C2);\n \n plot(x11,x12,'.g');\n plot(x21,x22,'.b');\n \n end\n\n figure;hold;\n crp = crp1;\n for i=1:100;\n L1 = crp.sample(100);\n C1=full(sum(int2onehot(L1),2));\n x = crp.logprob(C1);\n y = crp.logprob3(C1);\n \n plot(x,y);\n \n end\n \n \n \n \n\nend\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "rand_fake_Dirichlet.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/language_recognition/synth_data/rand_fake_Dirichlet.m", "size": 810, "source_encoding": "utf_8", "md5": "3070cfe2d8487d8ae48c7740fdc24c2d", "text": "function R = rand_fake_Dirichlet(alpha,m,n)\n% This is no longer Dirichlet. I replaced it with a faster ad-hoc\n% distribution.\n%\n% Generates m-by-n matrix of n samples from m-category Dirichlet, with\n% concentration parameter: alpha > 0.\n\n if nargin==0\n test_this();\n return;\n end\n\n %R = reshape(randgamma(alpha,1,m*n),m,n);\n R = exp(alpha*randn(m,n).^2);\n R = bsxfun(@rdivide,R,sum(R,1));\n\n\nend\n\n\nfunction E = app_exp(X)\n XX = X.^2/2;\n XXX = XX.*X/3;\n XXXX = XXX.*X/4;\n E = XXXX+ XXX + XX + X+1;\n \nend\n\nfunction test_this()\n\n close all;\n m = 400;\n %alpha = 1/(2*m);\n alpha = 2;\n \n n = 5000;\n R = rand_fake_Dirichlet(alpha,m,n);\n maxR = max(R,[],1);\n hist(maxR,100);\n\n% n = 50;\n% R = randDirichlet(alpha,m,n);\n% hist(R(:),100);\nend\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "create_T_backend.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/language_recognition/T_backend/create_T_backend.m", "size": 6482, "source_encoding": "utf_8", "md5": "ee013ab7facb848049ba586dfb7b6f33", "text": "function TBE = create_T_backend(nu,dim,K)\n% Create a (multivariate) T-distribution generative backend for multiclass classification.\n% The classes have different means, but the scatter matrix and degrees of\n% freedom are common to all clases.\n%\n% This object provides a method for supervised ML training (EM algorithm),\n% as well as a method for scoring at runtime (class log-likelihoods).\n%\n% Inputs:\n% nu: scalar >0, degrees of freedom\n% dim: data dimensionality\n% K: number of classes\n%\n% Typical usage:\n% > TBE = create_T-backend(nu,dim,K); %nu is fixed by user and not learnt during training\n% > TBE.train(TrainData,L,10); % TrainData: dim-by-N, L: K-by-N, (sparse) one-hot labels\n% > LLH = TBE.logLH(TestData) \n%\n% For EM algorithm, see: \n% Geoffrey J. McClachlan and Thriyambakam Krishnan, The EM Algorithm and Extensions, \n% 2nd Ed. John Wiley & Sons, 2008. Section 2.6 EXAMPLE 2.6: MULTIVARIATE t-DISTRIBUTION WITH KNOWN\n% DEGREES OF FREEDOM\n\n\n if nargin==0\n test_this();\n return;\n end\n\n assert(nu>0);\n\n Mu = zeros(dim,K);\n C = eye(dim);\n R = [];\n RMu = [];\n muWmu = [];\n logdetC = 0;\n prepare();\n \n TBE.logLH = @logLH;\n TBE.getParams = @getParams;\n TBE.setParams = @setParams;\n TBE.train = @train;\n TBE.simulate = @simulate;\n TBE.randParams = @randParams;\n TBE.test_error_rate = @test_error_rate;\n TBE.cross_entropy = @cross_entropy;\n \n function [Mu1,C1] = getParams()\n Mu1 = Mu;\n C1 = C;\n end\n\n function setParams(Mu0,C0)\n Mu = Mu0;\n C = C0;\n prepare();\n end\n \n function [obj,XE] = train(X,L,niters)\n [d,N] = size(X); assert(d==dim);\n [k,n] = size(L); assert(k==K && n==N);\n \n\n obj = zeros(1,niters+1);\n obj_i = EM_objective(X,L);\n obj(1) = obj_i;\n \n doXE = nargout>=2;\n if doXE\n XE = zeros(1,niters+1);\n XE_i = cross_entropy(X,L);\n XE(1) = XE_i;\n fprintf('%i: %g, %g\\n',0,obj_i,XE_i);\n else\n fprintf('%i: %g\\n',0,obj_i);\n end\n \n for i=1:niters\n EM_iteration(X,L);\n obj_i = EM_objective(X,L);\n obj(i+1) = obj_i;\n if doXE\n XE_i = cross_entropy(X,L);\n XE(i+1) = XE_i;\n fprintf('%i: %g, %g\\n',i,obj_i,XE_i);\n else\n fprintf('%i: %g\\n',i,obj_i);\n end\n end\n end\n\n\n\n %Class log-likelihood scores, with all irrelevant constants omitted\n function LLH = logLH(X,df) \n %inputs: \n % X: dim-by-N, data \n % df: [optional default df = nu], scalar, df>0, degrees of freedom parameter\n %\n %output:\n % LLH: K-by-N, class log-likelihoods\n \n if ~exist('df','var') || isempty(df)\n df = nu;\n else\n assert(df>0);\n end\n Delta = delta(X);\n LLH = (-0.5*(df+dim))*log1p(Delta/df);\n end\n\n\n function prepare()\n R = chol(C); % R'R = C and W = inv(C) = inv(R)*inv(R')\n RMu = R.'\\Mu; % dim-dy-K\n muWmu = sum(RMu.^2,1); % 1-by-K\n logdetC = 2*sum(log(diag(R)));\n end\n\n\n function Delta = delta(X) \n %input X: dim-by-N, data \n %output Delta: K-by-N, squared Mahalanobis distances between data and means \n RX = R.'\\X; % dim-by-N\n Delta = bsxfun(@minus,sum(RX.^2,1),(2*RMu).'*RX); %K-by-N\n Delta = bsxfun(@plus,Delta,muWmu.');\n end\n\n\n function EM_iteration(X,L)\n Delta = sum(L.*delta(X),1); %1-by-N\n u = (nu+dim)./(nu+Delta); %1-by-N posterior expectations of hiddden precision scaling factors\n Lu = bsxfun(@times,L,u); %K-by-N\n normLu = bsxfun(@rdivide,Lu,sum(Lu,2)); \n newMu = X*normLu.'; %dim-by-K\n diff = X - newMu*L;\n newC = (bsxfun(@times,diff,u)*diff.')/sum(u);\n setParams(newMu,newC);\n end\n\n\n function obj = EM_objective(X,L)\n % X: dim-by-N, data\n % K: K-by-N, one-hot labels\n % obj: scalar\n \n LLH = logLH(X);\n \n N = size(X,2);\n obj = L(:).'*LLH(:) - (N/2)*logdetC ;\n \n end\n\n\n function randParams(ncov,muscale)\n assert(ncov>=dim);\n D = randn(dim,ncov);\n C = D*D.';\n setParams(zeros(dim,K),C);\n Mu = muscale*simulate(K);\n setParams(Mu,C);\n end\n\n function [X,L] = simulate(N,df,L)\n if ~exist('L','var') || isempty(L)\n L = sparse(randi(K,1,N),1:N,1,K,N);\n end\n if ~exist('df','var') || isempty(df)\n df = ceil(nu);\n end\n u = sum(randn(df,N).^2,1)/df; % chi^2 with df dregrees of freedom, scaled so that =1\n X = Mu*L + bsxfun(@rdivide,R.'*randn(dim,N),sqrt(u));\n end\n\n %assuming flat prior for now\n function e = test_error_rate(X,L)\n N = size(X,2);\n LLH = TBE.logLH(X);\n [~,labels] = max(LLH,[],1);\n Lhat = sparse(labels,1:N,1,K,N);\n e = 1-(L(:).'*Lhat(:))/N;\n end\n\n %assuming flat prior for now\n function e = cross_entropy(X,L,df)\n if ~exist('df','var') || isempty(df)\n df = nu;\n else\n assert(df>0);\n end\n LLH = TBE.logLH(X,df);\n P = exp(bsxfun(@minus,LLH,max(LLH,[],1)));\n P = bsxfun(@rdivide,P,sum(P,1));\n e = -mean(log(full(sum(L.*P,1))),2)/log(K);\n end\n\n\n\nend\n\n\nfunction test_this()\n\n close all;\n\n dim = 100; % data dimensionality\n K = 10; % numer of classes\n nu = 3; % degrees of freedom (t-distribition parameter)\n N = K*1000;\n \n %create test and train data\n TBE0 = create_T_backend(nu,dim,K);\n TBE0.randParams(dim,5/sqrt(dim));\n [X,L] = TBE0.simulate(N);\n [Xtest,Ltest] = TBE0.simulate(N);\n \n \n TBE = create_T_backend(nu,dim,K);\n [obj,XE] = TBE.train(X,L,20);\n subplot(1,2,1);plot(obj);title('error-rate');\n subplot(1,2,2);plot(XE);title('cross-entropy');\n\n \n train_error_rate = TBE.test_error_rate(X,L),\n\n test_error_rate = TBE.test_error_rate(Xtest,Ltest),\n \n \n train_XE = TBE.cross_entropy(X,L),\n test_XE = TBE.cross_entropy(Xtest,Ltest),\n \n df = [0.1:0.1:10];\n XE = zeros(2,length(df));\n for i=1:length(df)\n XE(1,i) = TBE.cross_entropy(X,L,df(i));\n XE(2,i) = TBE.cross_entropy(Xtest,Ltest,df(i));\n end\n figure;plot(df,XE(1,:),df,XE(2,:));\n grid;xlabel('df');ylabel('XE');\n legend('train','test');\n \n \n \nend\n\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "train_TLDIvector.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/language_recognition/TLDIvector/train_TLDIvector.m", "size": 4374, "source_encoding": "utf_8", "md5": "6029b83917586ea3e2977c8fad616093", "text": "function [W,Mu,TT] = train_TLDIvector(stats_or_ivectors,N,T,TT,nu,labels,niters,W,Mu)\n% Inputs:\n% stats_or_ivectors: can be either F, or ivectors\n% F: dm-by-n first-order stats (m: UBM size; d: feature dim; n: no segments)\n% ivectors: k-by-n, classical i-vector point-estimates\n% N: m-by-n zero order stats\n% T: dm-by-k factor loading matrix\n% TT: [optional] k^2-by-m, vectorized precomputed T_i'T_i, i=1:m\n% nu: scalar, nu>0, degrees of freedom\n% labels: 1-by-n, label vector, with label values in 1:L, where L is\n% number of languages.\n% niters: number of VBEM iterations to do \n%\n% W: k-by-k within class precision [optional, for initialization]\n% Mu: k-by-L language means [optional, for initialization] \n%\n%\n% Outputs:\n% W: k-by-k within-class precision estimate\n% Mu: k-by-L class mean estimates\n\n\n if nargin==0\n test_this();\n return;\n end\n\n [A,B,k,n] = getPosteriorNatParams(stats_or_ivectors,N,T,TT);\n\n\n \n L = max(labels);\n if ~exist('Mu','var') || isempty(Mu)\n W = eye(k);\n Mu = zeros(k,L);\n else\n assert(all(size(W)==k));\n [k2,L2] = size(Mu);\n assert(k2==k && L==L2);\n end\n \n for iter=1:niters\n WMu = W*Mu;\n \n C = zeros(size(W));\n Pmeans = zeros(k,n);\n for ell=1:L\n tt = find(labels==ell);\n % E-step\n for t=tt\n Pt = W + reshape(B(:,t),k,k); %posterior precision\n Pmean = Pt\\(WMu(:,ell)+A(:,t)); %posterior mean\n Pmeans(:,t) = Pmean;\n C = C + inv(Pt);\n end\n \n %M-step\n D = Pmeans(:,tt);\n Mu(:,ell) = mean(D,2);\n D = bsxfun(@minus,D,Mu(:,ell));\n C = C + D*D.';\n end\n C = C/n;\n W = inv(C),\n Mu = Mu,\n \n end \n \n\n\nend\n\n\n\nfunction test_this\n\n close all;\n\n %dimensions\n d = 10; %feature dimension\n m = 10; %no components\n k = 3; %ivector dimension\n n = 1000; %number of segments\n L = 2; %number of languages\n mindur = 2; \n maxdur = 100;\n niters = 3;\n \n T = randn(d*m,k);\n \n W = randn(k,k*2);\n W = W*W.'/k;\n\n \n UBM.logweights = randn(m,1)/5;\n UBM.Means = 5*randn(d,m);\n \n \n \n Mu = randn(k,L);\n \n [F,N,labels] = make_data(UBM,Mu,W,T,m,d,n,mindur,maxdur);\n dur = sum(N,1);\n L1 = labels==1;\n L2 = labels==2;\n\n \n TT = precomputeTT(T,d,k,m);\n \n ivectors = stats2ivectors(F,N,T,TT);\n \n \n LR1 = [1,-1]* score_LDIvector(F,N,T,TT,W,Mu);\n %LR2 = [1,-1]* score_LDIvector(ivectors,N,T,TT,W,Mu);\n\n subplot(4,1,1);plot(dur(L1),LR1(L1),'.r',dur(L2),LR1(L2),'.g');\n \n \n [W2,Mu2] = train_LDIvector(F,N,T,[],labels,niters);\n [W3,Mu3] = train_LDIvector(ivectors,N,T,[],labels,niters);\n [W4,Mu4,map] = train_standaloneLGBE(ivectors,labels);\n \n LR2 = [1,-1]* score_LDIvector(F,N,T,[],W2,Mu2);\n LR3 = [1,-1]* score_CPF(ivectors,N,T,TT,W3,Mu3);\n\n LR4 = [1,-1]*map(ivectors);\n subplot(4,1,2);plot(dur(L1),LR2(L1),'.r',dur(L2),LR2(L2),'.g');\n subplot(4,1,3);plot(dur(L1),LR3(L1),'.r',dur(L2),LR3(L2),'.g');\n subplot(4,1,4);plot(dur(L1),LR4(L1),'.r',dur(L2),LR4(L2),'.g');\n \nend\n\n\nfunction [F,N,labels,relConf] = make_data(UBM,Mu,W,T,m,d,n,mindur,maxdur)\n\n [k,L] = size(Mu);\n labels = randi(L,1,n); \n Labels = sparse(labels,1:n,1,L,n); %L-by-n one-hot class labels\n x = Mu*Labels+chol(W)\\randn(k,n); %i-vectors \n Tx = T*x; \n\n \n dur = randi(1+maxdur-mindur,1,n) + mindur -1;\n dm = d*m;\n F = zeros(dm,n);\n N = zeros(m,n);\n \n logweights = UBM.logweights;\n prior = exp(logweights-max(logweights));\n prior = prior/sum(prior);\n priorConfusion = exp(-prior.'*log(prior))-1;\n relConf = zeros(1,n);\n \n \n for i=1:n\n D = dur(i);\n states = randcatgumbel(UBM.logweights,D);\n States = sparse(states,1:D,1,m,D);\n X = (reshape(Tx(:,i),d,m)+UBM.Means)*States + randn(d,D);\n Q = bsxfun(@minus,UBM.Means.'*X,0.5*sum(X.^2,1));\n Q = bsxfun(@plus,Q,UBM.logweights-0.5*sum(UBM.Means.^2,1).');\n \n Q = exp(bsxfun(@minus,Q,max(Q,[],1)));\n Q = bsxfun(@rdivide,Q,sum(Q,1));\n CE = -(States(:).'*log(Q(:)))/D; %cross entropy\n relConf(i) = (exp(CE)-1)/priorConfusion;\n \n \n Ni = sum(Q,2);\n Fi = X*Q.';\n N(:,i) = Ni;\n F(:,i) = Fi(:);\n \n end\n \n\n\n\nend\n\n\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "create_diagonalized_C.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/language_recognition/TLDIvector/create_diagonalized_C.m", "size": 4986, "source_encoding": "utf_8", "md5": "c1a642f817fa817097bf89b070af4c04", "text": "function C = create_diagonalized_C(B,R,RM,Ra,W,M,a)\n% Creates object to represent: C = inv(lambda W + B), \n%\n% Inputs:\n% B: positive definite matrix (i-vector dimension)\n% R: chol(W), so that R'R=W (i-vector dimension)\n% RM: R*M, where M has language means in columns\n% Ra: (R')\\a, vector (i-vector dimension)\n% W,M,a: optional for verification with slow version\n\n if nargin==0\n test_this();\n return;\n end\n\n dim = length(Ra);\n \n\n K = (R.'\\B)/R;\n [V,D] = eig(K); %K = V*D*V'\n e = diag(D); %eigenvalues\n \n mWm = sum(RM.^2,1); %vector of m'Wm, for every language\n VRM = V.'*RM;\n VRa = V.'*Ra;\n \n VRaVRa = VRa.^2;\n VRMVRM = VRM.^2;\n VRMVRa = bsxfun(@times,VRa,VRM);\n \n \n \n C.traceCW = @traceCW;\n C.logdetCW = @logdetCW;\n C.quad = @quad;\n\n C.slowQuad = @slowQuad;\n C.slow_traceCW = @slow_traceCW;\n C.slow_logdetCW = @slow_logdetCW;\n \n C.lambda_by_root = @lambda_by_root;\n C.lambda_by_min = @lambda_by_min;\n C.lambda_by_fixed_point = @lambda_by_fixed_point;\n \n %C.slow_xCWCx = @slow_xCWCx;\n %C.xCWCx = @xCWCx;\n %C.xCx = @xCx;\n %C.slow_xCx = @slow_xCx;\n \n \n function log_lambda = lambda_by_root(nu,log_lambda,ell)\n f = @(log_lambda) log_lambda - log((nu+dim)/(nu+energy(log_lambda,ell)));\n log_lambda = fzero(f,log_lambda);\n end\n \n function log_lambda = lambda_by_fixed_point(nu,log_lambda,ell,niters)\n f = @(log_lambda) log((nu+dim)/(nu+energy(log_lambda,ell)));\n for i=1:niters\n log_lambda = f(log_lambda);\n end\n end\n \n function log_lambda = lambda_by_min(nu,log_lambda)\n f = @(log_lambda) (log_lambda - log((nu+dim)/(nu+energy(log_lambda))))^2;\n log_lambda = fminsearch(f,log_lambda); \n end\n\n \n function y = energy(log_lambda,ell)\n lambda = exp(log_lambda);\n y = quad(lambda,ell) + traceCW(lambda);\n %fprintf('%i: energy = %g\\n%',ell,y);\n end\n\n \n function y = quad(lambda,ell)\n s = lambda + e;\n ss = s.^2;\n mWmu = sum(bsxfun(@rdivide, lambda*VRMVRM(:,ell) + VRMVRa(:,ell), s),1);\n muWmu = sum(bsxfun(@rdivide,bsxfun(@plus,...\n lambda^2*VRMVRM(:,ell) + ...\n (2*lambda)*VRMVRa(:,ell), ...\n VRaVRa), ss),1);\n y = mWm(ell) + muWmu -2*mWmu;\n end\n \n\n\n function y = slowQuad(lambda)\n P = lambda*W + B;\n cholP = chol(P);\n Mu = cholP\\(cholP'\\bsxfun(@plus,lambda*W*M,a));\n delta = R*(Mu-M);\n y = sum(delta.^2,1);\n %y = sum(Mu.*(W*M),1);\n end\n\n% function y = xCx(lambda,x)\n% z = V'*((R.')\\x);\n% s = lambda + e;\n% y = sum(z.^2./s,1);\n% end\n% \n% function y = xCWCx(lambda,x)\n% z = V'*((R.')\\x);\n% s = lambda + e;\n% y = sum(z.^2./s.^2,1);\n% end\n% \n% function y = slow_xCx(lambda,x)\n% P = lambda*W+B;\n% y = x'*(P\\x);\n% end\n% \n% function y = slow_xCWCx(lambda,x)\n% P = lambda*W+B;\n% z = P\\x;\n% y = z.'*W*z;\n% end\n\n \n function [y,back] = traceCW(lambda)\n s = lambda + e;\n r = 1./s;\n y = sum(r,1);\n back = @back_this;\n function dlambda = back_this(dy)\n dr = dy;\n ds = (-dr)*r./s;\n dlambda = sum(ds,1);\n end\n end\n\n function y = slow_traceCW(lambda)\n P = lambda*W + B;\n cholP = chol(P);\n X = cholP.'\\R.';\n y = X(:).'*X(:);\n end\n\n function [y,back] = logdetCW(lambda)\n s = log(lambda) + log1p(e/lambda);\n y = -sum(s,1);\n back = @(dy) (-dy)*sum(1./(lambda+e));\n end\n\n function y = slow_logdetCW(lambda)\n P = lambda*W + B;\n cholP = chol(P);\n y = 2*( sum(log(diag(R))) - sum(log(diag(cholP))) );\n end\n\nend\n\nfunction test_this()\n\n dim = 400;\n L = 1;\n RR = randn(dim,dim);W = RR*RR';\n RR = randn(dim,dim);B = RR*RR';\n a = randn(dim,1);\n M = randn(dim,L);\n R = chol(W);\n \n C = create_diagonalized_C(B,R,R*M,(R')\\a,W,M,a);\n \n lambda = rand/rand;\n \n %x = randn(dim,1);\n %[C.xCx(lambda,x),C.slow_xCx(lambda,x)]\n \n %tic;C.quad(lambda);toc\n %tic;C.slowQuad(lambda);toc\n\n %C.quad(lambda)\n %C.slowQuad(lambda)\n \n %[C.traceCW(lambda),C.slow_traceCW(lambda)]\n %[C.logdetCW(lambda),C.slow_logdetCW(lambda)]\n \n %[C.xCWCx(lambda,x),C.slow_xCWCx(lambda,x)]\n \n\n C.lambda_by_root(1,1)\n C.lambda_by_root(1,10)\n C.lambda_by_root(1,0.1)\n \n C.lambda_by_min(1,1)\n C.lambda_by_min(1,10)\n C.lambda_by_min(1,0.1)\n \n a = a*0;\n B = B*0;\n C = create_diagonalized_C(B,R,R*M,(R')\\a,W,M,a);\n \n C.lambda_by_root(0.1,0.01)\n C.lambda_by_root(1,10)\n C.lambda_by_root(10,0.1)\n \n C.lambda_by_min(1,10)\nend\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "create_augmenting_backend.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/language_recognition/augmentation/create_augmenting_backend.m", "size": 3599, "source_encoding": "utf_8", "md5": "852abc76e3f7dfa8796f7015880bc788", "text": "function ABE = create_augmenting_backend(nu,dim,T,K,L)\n% Inputs:\n% nu: scalar nu>0, t-distribution degrees of freedom\n% dim: ivector dimension\n% T: i-vector extr\\zctor T-matrix\n% K: UBM size\n% L: number of languages\n\n\n if nargin==0\n test_this();\n return;\n end\n\n assert(dim==size(T,2));\n \n TBE = create_T_backend(nu,dim,L);\n\n augment = [];\n \n \n \n ABE.train = @train;\n ABE.logLH = @logLH;\n ABE.test_error_rate = @test_error_rate;\n ABE.cross_entropy = @cross_entropy;\n \n function [obj,AX] = train(X,Z,Labels,niters,ntiters)\n % X: ivectors\n % Z: zero-order stats\n % Labels: sparse one-hot label matrix\n if ~exist('niters','var') || isempty(niters)\n niters = 1;\n end\n if ~exist('ntiters','var') || isempty(ntiters)\n ntiters = 10;\n end\n \n assert(size(Labels,1)==L);\n assert(size(Labels,2)==size(X,2));\n assert(size(Labels,2)==size(Z,2));\n assert(size(Z,1)==K);\n assert(size(X,1)==dim);\n \n AX = X;\n obj = [];\n for i=1:niters\n obj_i = TBE.train(AX,Labels,ntiters); % starts with parameters from prev. iteration\n obj = [obj(:);obj_i(:)];\n [Mu,C] = TBE.getParams();\n augment = augment_i_vectors(T,K,Mu,C);\n if i=2\n AX = augment(X,Z);\n end\n end\n \n end\n\n\n function [LLH,X] = logLH(X,Z)\n if exist('Z','var') && ~isempty(Z)\n X = augment(X,Z);\n end\n LLH = TBE.logLH(X); \n end\n\n %assuming flat prior for now\n function e = test_error_rate(X,Z,Labels)\n N = size(X,2);\n LLH = logLH(X,Z);\n [~,labels] = max(LLH,[],1);\n Lhat = sparse(labels,1:N,1,L,N);\n e = 1-(Labels(:).'*Lhat(:))/N;\n end\n\n %assuming flat prior for now\n function e = cross_entropy(X,Z,Labels)\n LLH = logLH(X,Z);\n P = exp(bsxfun(@minus,LLH,max(LLH,[],1)));\n P = bsxfun(@rdivide,P,sum(P,1));\n e = -mean(log(full(sum(Labels.*P,1))),2)/log(L);\n end\n\n\n\nend\n\n\nfunction test_this()\n\n big = true;\n\n if big\n L = 10; %languages\n K = 1024; %UBM size\n nu = 2; %df\n dim = 400; %ivector dim\n fdim = 40; % feature dim\n minDur = 3*100; %3 sec\n maxDur = 30*100; %30 sec\n\n M = randn(dim,L);\n T = randn(K*fdim,dim);\n RR = randn(dim,2*dim);W = RR*RR';\n\n Ntrain = 100;\n Ntest = 100;\n else\n L = 3; %languages\n K = 10; %UBM size\n nu = 2; %df\n dim = 40; %ivector dim\n fdim = 5; % feature dim\n minDur = 3*100; %3 sec\n maxDur = 30*100; %30 sec\n\n M = randn(dim,L);\n T = randn(K*fdim,dim);\n RR = randn(dim,2*dim);W = RR*RR'/100;\n\n Ntrain = 100;\n Ntest = 100;\n end\n \n fprintf('generating data\\n');\n [F,trainZ,trainLabels] = rand_ivector(M,nu,W,2,K,T,minDur,maxDur,Ntrain);\n [trainX,TT] = stats2ivectors(F,trainZ,T);\n [F,testZ,testLabels] = rand_ivector(M,nu,W,2,K,T,minDur,maxDur,Ntest);\n testX = stats2ivectors(F,testZ,T,TT);\n F = [];\n \n fprintf('training\\n');\n ABE = create_augmenting_backend(nu,dim,T,K,L);\n ABE.train(trainX,trainZ,trainLabels,2);\n \n train_error_rate = ABE.test_error_rate(trainX,trainZ,trainLabels),\n test_error_rate = ABE.test_error_rate(testX,testZ,testLabels),\n \n train_XE = ABE.cross_entropy(trainX,trainZ,trainLabels),\n test_XE = ABE.cross_entropy(testX,testZ,testLabels),\n \n \nend\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "testBackprop_rs.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/test/testBackprop_rs.m", "size": 2198, "source_encoding": "utf_8", "md5": "9d88acaf06002d97bf8eb2fdc07bf7b8", "text": "function total = testBackprop_rs(block,X,delta,mask)\n%same as testFBblock, but with real step\n\n if ~iscell(X)\n X = {X};\n end\n\n\n dX = cellrndn(X); \n if exist('mask','var')\n assert(length(mask)==length(X));\n dX = cellmask(dX,mask);\n end\n \n cX1 = cellstep(X,dX,delta);\n cX2 = cellstep(X,dX,-delta);\n DX = cell(size(X)); \n \n [Y,back] = block(X{:});\n \n \n DY = randn(size(Y));\n\n [DX{:}] = back(DY); %DX = J'*DY\n \n\n dot1 = celldot(DX,dX); %dX' * J' * DY\n \n \n cY1 = block(cX1{:});\n cY2 = block(cX2{:});\n\n [Y2,dY2] = recover(cY1,cY2,delta); %dY2 = J*dX\n dot2 = DY(:).'*dY2(:); %DY' * J* DX\n \n \n Y_diff = max(abs(Y(:)-Y2(:))),\n jacobian_diff = abs(dot1-dot2),\n \n \n total = Y_diff + jacobian_diff;\n if total < 1e-6\n fprintf('\\ntotal error=%g\\n',total);\n else\n fprintf(2,'\\ntotal error=%g\\n',total);\n end\n \n\nend\n\n\nfunction R = cellrndn(X)\n if ~iscell(X)\n R = randn(size(X));\n else\n R = cell(size(X));\n for i=1:numel(X)\n R{i} = cellrndn(X{i});\n end\n end\nend\n\n\nfunction C = cellstep(X,dX,delta)\n assert(all(size(X)==size(dX)));\n if ~iscell(X)\n C = X + delta*dX;\n else\n C = cell(size(X));\n for i=1:numel(X)\n C{i} = cellstep(X{i},dX{i},delta);\n end\n end\nend\n\nfunction [R,D] = recover(cX1,cX2,delta)\n assert(all(size(cX1)==size(cX2)));\n if ~iscell(cX1)\n R = (cX1+cX2)/2;\n D = (cX1-cX2)/(2*delta);\n else \n R = cell(size(cX1));\n D = cell(size(cX1));\n for i=1:numel(cX1)\n [R{i},D{i}] = recover(cX1{i},cX2{i}); \n end\n end\nend\n\nfunction X = cellmask(X,mask)\n if ~iscell(X)\n assert(length(mask)==1);\n X = X*mask;\n else\n for i=1:numel(X)\n X{i} = cellmask(X{i},mask{i});\n end\n end\nend\n\n\nfunction dot = celldot(X,Y)\n assert(all(size(X)==size(Y)));\n if ~iscell(X)\n dot = X(:).' * Y(:);\n else\n dot = 0;\n for i=1:numel(X)\n dot = dot + celldot(X{i},Y{i});\n end\n end\nend\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "testBackprop_multi.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/test/testBackprop_multi.m", "size": 2361, "source_encoding": "utf_8", "md5": "ced4a5b7e925d5c00a2d991fb34d012c", "text": "function total = testBackprop_multi(block,nout,X,mask)\n% same as testBackprop, but handles multiple outputs\n if ~iscell(X)\n X = {X};\n end\n\n\n dX = cellrndn(X); \n if exist('mask','var')\n assert(length(mask)==length(X));\n dX = cellmask(dX,mask);\n end\n \n cX = cellcomplex(X,dX);\n DX = cell(size(X)); \n \n \n Y = cell(1,nout);\n [Y{:},back] = block(X{:});\n \n \n DY = cell(size(Y));\n for i=1:numel(DY)\n DY{i} = randn(size(Y{i}));\n end\n [DX{:}] = back(DY{:}); %DX = J'*DY\n \n\n dot1 = celldot(DX,dX); %dX' * J' * DY\n \n \n cY = cell(1,nout);\n Y2 = cell(1,nout);\n dY2 = cell(1,nout);\n [cY{:}] = block(cX{:});\n for i=1:numel(cY)\n [Y2{i},dY2{i}] = recover(cY{i}); %dY2 = J*dX\n end\n dot2 = celldot(DY,dY2); %DY' * J* DX\n \n \n Y_diff = 0;\n for i=1:nout\n Y_diff = Y_diff + max(abs(Y{i}(:)-Y2{i}(:)));\n end\n Y_diff,\n jacobian_diff = abs(dot1-dot2),\n \n\n \n total = Y_diff + jacobian_diff;\n if total < 1e-6\n fprintf('\\ntotal error=%g\\n',total);\n else\n fprintf(2,'\\ntotal error=%g\\n',total);\n end\n \n\nend\n\n\nfunction R = cellrndn(X)\n if ~iscell(X)\n R = randn(size(X));\n else\n R = cell(size(X));\n for i=1:numel(X)\n R{i} = cellrndn(X{i});\n end\n end\nend\n\n\nfunction X = cellmask(X,mask)\n if ~iscell(X)\n assert(length(mask)==1);\n X = X*mask;\n else\n for i=1:numel(X)\n X{i} = cellmask(X{i},mask{i});\n end\n end\nend\n\n\nfunction C = cellcomplex(X,dX)\n assert(all(size(X)==size(dX)));\n if ~iscell(X)\n C = complex(X,1e-20*dX);\n else\n C = cell(size(X));\n for i=1:numel(X)\n C{i} = cellcomplex(X{i},dX{i});\n end\n end\nend\n\nfunction [R,D] = recover(cX)\n if ~iscell(cX)\n R = real(cX);\n D = 1e20*imag(cX);\n else \n R = cell(size(cX));\n D = cell(size(cX));\n for i=1:numel(cX)\n [R{i},D{i}] = recover(cX{i}); \n end\n end\nend\n\n\nfunction dot = celldot(X,Y)\n assert(all(size(X)==size(Y)));\n if ~iscell(X)\n dot = X(:).' * Y(:);\n else\n dot = 0;\n for i=1:numel(X)\n dot = dot + celldot(X{i},Y{i});\n end\n end\nend\n\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "testBackprop.m", "ext": ".m", "path": "meta-embeddings-master/code/snapshot_for_anya/matlab/test/testBackprop.m", "size": 2015, "source_encoding": "utf_8", "md5": "fc73fa404f5441c097eb63f249106078", "text": "function total = testBackprop(block,X,mask)\n\n if ~iscell(X)\n X = {X};\n end\n\n\n dX = cellrndn(X); \n if exist('mask','var')\n assert(length(mask)==length(X));\n dX = cellmask(dX,mask);\n end\n \n cX = cellcomplex(X,dX);\n DX = cell(size(X)); \n \n \n [Y,back] = block(X{:});\n \n \n DY = randn(size(Y));\n [DX{:}] = back(DY); %DX = J'*DY\n \n\n dot1 = celldot(DX,dX); %dX' * J' * DY\n \n \n cY = block(cX{:});\n [Y2,dY2] = recover(cY); %dY2 = J*dX\n dot2 = DY(:).'*dY2(:); %DY' * J* DX\n \n \n Y_diff = max(abs(Y(:)-Y2(:))),\n jacobian_diff = abs(dot1-dot2),\n \n\n \n total = Y_diff + jacobian_diff;\n if total < 1e-6\n fprintf('\\ntotal error=%g\\n',total);\n else\n fprintf(2,'\\ntotal error=%g\\n',total);\n end\n \n\nend\n\n\nfunction R = cellrndn(X)\n if ~iscell(X)\n R = randn(size(X));\n else\n R = cell(size(X));\n for i=1:numel(X)\n R{i} = cellrndn(X{i});\n end\n end\nend\n\n\nfunction X = cellmask(X,mask)\n if ~iscell(X)\n assert(length(mask)==1);\n X = X*mask;\n else\n for i=1:numel(X)\n X{i} = cellmask(X{i},mask{i});\n end\n end\nend\n\n\nfunction C = cellcomplex(X,dX)\n assert(all(size(X)==size(dX)));\n if ~iscell(X)\n C = complex(X,1e-20*dX);\n else\n C = cell(size(X));\n for i=1:numel(X)\n C{i} = cellcomplex(X{i},dX{i});\n end\n end\nend\n\nfunction [R,D] = recover(cX)\n if ~iscell(cX)\n R = real(cX);\n D = 1e20*imag(cX);\n else \n R = cell(size(cX));\n D = cell(size(cX));\n for i=1:numel(cX)\n [R{i},D{i}] = recover(cX{i}); \n end\n end\nend\n\n\nfunction dot = celldot(X,Y)\n assert(all(size(X)==size(Y)));\n if ~iscell(X)\n dot = X(:).' * Y(:);\n else\n dot = 0;\n for i=1:numel(X)\n dot = dot + celldot(X{i},Y{i});\n end\n end\nend\n\n\n"} +{"plateform": "github", "repo_name": "bsxfan/meta-embeddings-master", "name": "create_truncGMM.m", "ext": ".m", "path": "meta-embeddings-master/code/Niko/matlab/stochastic_clustering/create_truncGMM.m", "size": 11422, "source_encoding": "utf_8", "md5": "9bf418136c7994e16d5fb77486fd9bac", "text": "function model = create_truncGMM(W,F,alpha,m)\n% This is a truncated version of DP micture model, with a specified maximum number of\n% components. The observations are realted to the hidden cluster variables\n% like in an SPLDA model. The hidden variable for cluster i is z_i in R^d.\n% The observations, x_j are in R^D, where D>= d. The prior for the z_i is \n% IID: N(z_i | 0, I). The observations that belong to cluster i are \n% conditionally IID: N(x_j | F z_i, W^{-1} ). The SPLDA model parameters \n% are: \n% F: D-by-d, factor loading matrix\n% W: D-by-D, within cluster precision (inverse covariance)\n%\n% The other parameters are for the symmetric Dirichlet prior on mixture \n% weights, which has parameters alpha and m, where m is the maximum number\n% of mixture components: weights ~ Dir(alpha,m).\n% More generally, alpha may be an m-by-1 vector, for a non-symmetric Dirichlet\n% weight prior.\n\n if nargin==0\n test_this();\n return;\n end\n\n cholW = chol(W);\n dim = size(W,1);\n \n alpha = alpha(:);\n\n E = F'*W*F; %meta-embedding precision (before diagonalization)\n [V,Lambda] = eig(E); %E = V*Lambda*V';\n P = V.'*(F.'*W); % projection to extract 1st-order meta-embedding stats\n Lambda = diag(Lambda);\n % The diagonal Lambda is the meta-embedding precision after\n % diagonalization.\n % We now have the likelihood, or meta-embedding:\n % P(x | z) \\propto exp[ z'Px - 1/2 z' Lambda z ], where z is the\n % hidden variable after diagonalization.\n %\n % The (normal) posterior for z, given n observations {x_j} has natural\n % parameters:\n % sum_j P x_j and I + n Lambda\n \n FV = F*V; %projects from diagonalized Z to cluster means\n \n A = [];\n logThresh = log(1e-10);\n \n\n model.sampleData = @sampleData;\n model.sampleWeights = @sampleWeights;\n model.sampleZ = @sampleZ;\n model.sampleLabels = @sampleLabels;\n model.setData = @setData;\n model.label_log_posterior = @label_log_posterior;\n\n model.Means_given_labels = @Means_given_labels;\n model.fullGibbs_iteration = @fullGibbs_iteration;\n model.collapsedGibbs_iteration = @collapsedGibbs_iteration;\n model.mfvb_iteration = @mfvb_iteration;\n \n \n \n function setData(X)\n A = P*X;\n end\n \n% function Means = Z2Means(Z)\n% Means = FV*Z;\n% end\n \n\n %hlabels: m-by-n\n %Mu: D-by-m, posterior means for m cluster centroids\n %counts: 1-by-m, cluster occupancy counts (soft if hlabels is soft)\n %Q: posterior covariance for cluster i is: F*V*inv(diag(Q(:,i)))*V'*F.'\n function [Mu,Q,counts] = Means_given_labels(hlabels)\n if ~islogical(hlabels)\n [m,n] = size(hlabels);\n [~,L] = max(log(hlabels)-log(-log(rand(m,n))),[],1);\n hlabels = sparse(L,1:n,true,m,n);\n end\n counts = full(sum(hlabels,2)); %m-by-1\n Q = 1 + Lambda*counts.'; % d-by-m\n Zhat = (A*hlabels.') ./ Q; %d-by-m\n Mu = FV*Zhat;\n end\n \n \n % hlabels (one hot columns), m-by-n\n % A = P*X, d-by-n\n function Z = sampleZ(hlabels,counts)\n %counts = full(sum(hlabels,2)); %m-by-1\n Q = 1 + Lambda*counts.'; % d-by-m\n Zhat = (A*hlabels.') ./ Q; %d-by-m\n d = size(A,1);\n Z = Zhat + randn(d,m) ./ sqrt(Q);\n end\n\n\n function [weights,counts] = sampleWeights(hlabels)\n counts = sum(hlabels,2);\n weights = randDirichlet(alpha+counts,m,1);\n end\n\n\n\n % Z: d-by-m\n % A: d-by-n\n % weights: m-by-1\n function hlabels = sampleLabels(Z,weights)\n n = size(A,2);\n Gumbel = -log(-log(rand(m,n)));\n %ZLambdaZ = sum(Z.*bsxfun(@times,Lambda,Z),1); % m-by-1\n ZLambdaZ = Lambda.'*Z.^2; % m-by-1\n Score = bsxfun(@plus,log(weights)-ZLambdaZ.'/2,Z.'*A); %m-by-n\n [~,L] = max(Gumbel+Score,[],1);\n hlabels = sparse(L,1:n,true,m,n);\n end\n\n function hlabels = fullGibbs_iteration(hlabels)\n [weights,counts] = sampleWeights(hlabels);\n Z = sampleZ(hlabels,counts);\n hlabels = sampleLabels(Z,weights);\n \n end\n\n\n % hlabels (one hot columns), m-by-n\n % A = P*X, d-by-n\n function [Zhat,Q] = mfvb_Z(respbilties,counts)\n %counts = sum(respbilties,2); %m-by-1\n Q = 1 + Lambda*counts.'; % d-by-m\n Zhat = (A*respbilties.') ./ Q; %d-by-m\n end\n\n function [post_alpha,counts] = mfvb_Weights(respbilties)\n counts = sum(respbilties,2);\n post_alpha = alpha+counts;\n end\n\n % Z: d-by-m\n % A: d-by-n\n % weights: m-by-1\n function respbilties = mfvb_Labels(Zhat,Q,post_alpha)\n ZLambdaZ = Lambda.'*Zhat.^2 + sum(bsxfun(@rdivide,Lambda,Q),1); % expected value\n log_weights = psi(post_alpha) - psi(sum(post_alpha)); % expected value\n R = bsxfun(@plus,log_weights-ZLambdaZ.'/2,Zhat.'*A); %m-by-n\n mx = max(R,[],1);\n R = bsxfun(@minus,R,mx);\n R(R