diff --git "a/20230447_matlab.jsonl" "b/20230447_matlab.jsonl" new file mode 100644--- /dev/null +++ "b/20230447_matlab.jsonl" @@ -0,0 +1,695 @@ +{"plateform": "github", "repo_name": "biomedical-cybernetics/coalescent_embedding-master", "name": "lanbpro.m", "ext": ".m", "path": "coalescent_embedding-master/coemb_svds_eigs/lanbpro.m", "size": 19514, "source_encoding": "utf_8", "md5": "897b157335c2a5c269845380328709c4", "text": "function [U,B_k,V,p,ierr,work] = lanbpro(varargin)\n\n%LANBPRO Lanczos bidiagonalization with partial reorthogonalization.\n% LANBPRO computes the Lanczos bidiagonalization of a real \n% matrix using the with partial reorthogonalization. \n%\n% [U_k,B_k,V_k,R,ierr,work] = LANBPRO(A,K,R0,OPTIONS,U_old,B_old,V_old) \n% [U_k,B_k,V_k,R,ierr,work] = LANBPRO('Afun','Atransfun',M,N,K,R0, ...\n% OPTIONS,U_old,B_old,V_old) \n%\n% Computes K steps of the Lanczos bidiagonalization algorithm with partial \n% reorthogonalization (BPRO) with M-by-1 starting vector R0, producing a \n% lower bidiagonal K-by-K matrix B_k, an N-by-K matrix V_k, an M-by-K \n% matrix U_k and an M-by-1 vector R such that\n% A*V_k = U_k*B_k + R\n% Partial reorthogonalization is used to keep the columns of V_K and U_k\n% semiorthogonal:\n% MAX(DIAG((EYE(K) - V_K'*V_K))) <= OPTIONS.delta \n% and \n% MAX(DIAG((EYE(K) - U_K'*U_K))) <= OPTIONS.delta.\n%\n% B_k = LANBPRO(...) returns the bidiagonal matrix only.\n%\n% The first input argument is either a real matrix, or a string\n% containing the name of an M-file which applies a linear operator \n% to the columns of a given matrix. In the latter case, the second \n% input must be the name of an M-file which applies the transpose of \n% the same linear operator to the columns of a given matrix, \n% and the third and fourth arguments must be M and N, the dimensions \n% of then problem.\n%\n% The OPTIONS structure is used to control the reorthogonalization:\n% OPTIONS.delta: Desired level of orthogonality \n% (default = sqrt(eps/K)).\n% OPTIONS.eta : Level of orthogonality after reorthogonalization \n% (default = eps^(3/4)/sqrt(K)).\n% OPTIONS.cgs : Flag for switching between different reorthogonalization\n% algorithms:\n% 0 = iterated modified Gram-Schmidt (default)\n% 1 = iterated classical Gram-Schmidt \n% OPTIONS.elr : If OPTIONS.elr = 1 (default) then extended local\n% reorthogonalization is enforced.\n% OPTIONS.onesided\n% : If OPTIONS.onesided = 0 (default) then both the left\n% (U) and right (V) Lanczos vectors are kept \n% semiorthogonal. \n% OPTIONS.onesided = 1 then only the columns of U are\n% are reorthogonalized.\n% OPTIONS.onesided = -1 then only the columns of V are\n% are reorthogonalized.\n% OPTIONS.waitbar\n% : The progress of the algorithm is display graphically.\n%\n% If both R0, U_old, B_old, and V_old are provided, they must\n% contain a partial Lanczos bidiagonalization of A on the form\n%\n% A V_old = U_old B_old + R0 . \n%\n% In this case the factorization is extended to dimension K x K by\n% continuing the Lanczos bidiagonalization algorithm with R0 as a \n% starting vector.\n%\n% The output array work contains information about the work used in\n% reorthogonalizing the u- and v-vectors.\n% work = [ RU PU ]\n% [ RV PV ] \n% where\n% RU = Number of reorthogonalizations of U.\n% PU = Number of inner products used in reorthogonalizing U.\n% RV = Number of reorthogonalizations of V.\n% PV = Number of inner products used in reorthogonalizing V.\n\n% References: \n% R.M. Larsen, Ph.D. Thesis, Aarhus University, 1998.\n%\n% G. H. Golub & C. F. Van Loan, \"Matrix Computations\",\n% 3. Ed., Johns Hopkins, 1996. Section 9.3.4.\n%\n% B. N. Parlett, ``The Symmetric Eigenvalue Problem'', \n% Prentice-Hall, Englewood Cliffs, NJ, 1980.\n%\n% H. D. Simon, ``The Lanczos algorithm with partial reorthogonalization'',\n% Math. Comp. 42 (1984), no. 165, 115--142.\n%\n\n% Rasmus Munk Larsen, DAIMI, 1998.\n\n% Check input arguments.\n\nglobal LANBPRO_TRUTH\nLANBPRO_TRUTH=0;\n\nif LANBPRO_TRUTH==1\n global MU NU MUTRUE NUTRUE\n global MU_AFTER NU_AFTER MUTRUE_AFTER NUTRUE_AFTER\nend\n\nif nargin<1 | length(varargin)<2\n error('Not enough input arguments.');\nend\nnarg=length(varargin);\n\nA = varargin{1};\nif isnumeric(A) | isstruct(A)\n if isnumeric(A)\n if ~isreal(A)\n error('A must be real')\n end \n [m n] = size(A);\n elseif isstruct(A)\n [m n] = size(A.R);\n end\n k=varargin{2};\n if narg >= 3 & ~isempty(varargin{3});\n p = varargin{3};\n else\n p = rand(m,1)-0.5;\n end\n if narg < 4, options = []; else options=varargin{4}; end\n if narg > 4 \n if narg<7\n error('All or none of U_old, B_old and V_old must be provided.')\n else\n U = varargin{5}; B_k = varargin{6}; V = varargin{7};\n end\n else\n U = []; B_k = []; V = [];\n end\n if narg > 7, anorm=varargin{8}; else anorm = []; end\nelse\n if narg<5\n error('Not enough input arguments.');\n end\n Atrans = varargin{2};\n if ~isstr(Atrans)\n error('Afunc and Atransfunc must be names of m-files')\n end\n m = varargin{3};\n n = varargin{4};\n if ~isreal(n) | abs(fix(n)) ~= n | ~isreal(m) | abs(fix(m)) ~= m\n error('M and N must be positive integers.')\n end\n k=varargin{5};\n if narg < 6, p = rand(m,1)-0.5; else p=varargin{6}; end \n if narg < 7, options = []; else options=varargin{7}; end \n if narg > 7\n if narg < 10\n error('All or none of U_old, B_old and V_old must be provided.')\n else\n U = varargin{8}; B_k = varargin{9}; V = varargin{10};\n end\n else\n U = []; B_k = []; V=[];\n end\n if narg > 10, anorm=varargin{11}; else anorm = []; end\nend\n\n% Quick return for min(m,n) equal to 0 or 1.\nif min(m,n) == 0\n U = []; B_k = []; V = []; p = []; ierr = 0; work = zeros(2,2);\n return\nelseif min(m,n) == 1\n if isnumeric(A)\n U = 1; B_k = A; V = 1; p = 0; ierr = 0; work = zeros(2,2);\n else\n U = 1; B_k = feval(A,1); V = 1; p = 0; ierr = 0; work = zeros(2,2);\n end\n if nargout<3\n U = B_k;\n end\n return\nend\n\n% Set options. \n%m2 = 3/2*(sqrt(m)+1);\n%n2 = 3/2*(sqrt(n)+1);\nm2 = 3/2;\nn2 = 3/2;\ndelta = sqrt(eps/k); % Desired level of orthogonality.\neta = eps^(3/4)/sqrt(k); % Level of orth. after reorthogonalization.\ncgs = 0; % Flag for switching between iterated MGS and CGS.\nelr = 2; % Flag for switching extended local \n % reorthogonalization on and off.\ngamma = 1/sqrt(2); % Tolerance for iterated Gram-Schmidt.\nonesided = 0; t = 0; waitb = 0;\n\n% Parse options struct\nif ~isempty(options) & isstruct(options)\n c = fieldnames(options);\n for i=1:length(c)\n if strmatch(c(i),'delta'), delta = getfield(options,'delta'); end\n if strmatch(c(i),'eta'), eta = getfield(options,'eta'); end\n if strmatch(c(i),'cgs'), cgs = getfield(options,'cgs'); end\n if strmatch(c(i),'elr'), elr = getfield(options,'elr'); end\n if strmatch(c(i),'gamma'), gamma = getfield(options,'gamma'); end\n if strmatch(c(i),'onesided'), onesided = getfield(options,'onesided'); end\n if strmatch(c(i),'waitbar'), waitb=1; end\n end\nend\n\nif waitb\n waitbarh = waitbar(0,'Lanczos bidiagonalization in progress...');\nend\n\nif isempty(anorm)\n anorm = []; est_anorm=1; \nelse\n est_anorm=0; \nend\n\n% Conservative statistical estimate on the size of round-off terms. \n% Notice that {\\bf u} == eps/2.\nFUDGE = 1.01; % Fudge factor for ||A||_2 estimate.\n\nnpu = 0; npv = 0; ierr = 0;\np = p(:);\n% Prepare for Lanczos iteration.\nif isempty(U)\n V = zeros(n,k); U = zeros(m,k);\n beta = zeros(k+1,1); alpha = zeros(k,1);\n beta(1) = norm(p);\n % Initialize MU/NU-recurrences for monitoring loss of orthogonality.\n nu = zeros(k,1); mu = zeros(k+1,1);\n mu(1)=1; nu(1)=1;\n \n numax = zeros(k,1); mumax = zeros(k,1);\n force_reorth = 0; nreorthu = 0; nreorthv = 0;\n j0 = 1;\nelse\n j = size(U,2); % Size of existing factorization\n % Allocate space for Lanczos vectors\n U = [U, zeros(m,k-j)];\n V = [V, zeros(n,k-j)];\n alpha = zeros(k+1,1); beta = zeros(k+1,1);\n alpha(1:j) = diag(B_k); if j>1 beta(2:j) = diag(B_k,-1); end\n beta(j+1) = norm(p);\n % Reorthogonalize p.\n if j=gamma*normold\n\t stop = 1;\n\telse\n\t normold = alpha(j);\n\tend\n end\n end\n\n if est_anorm\n if j==2\n\tanorm = max(anorm,FUDGE*sqrt(alpha(1)^2+beta(2)^2+alpha(2)*beta(2)));\n else\t\n\tanorm = max(anorm,FUDGE*sqrt(alpha(j-1)^2+beta(j)^2+alpha(j-1)* ...\n\t beta(j-1) + alpha(j)*beta(j)));\n end\t\t\t \n end\n \n if ~fro & alpha(j) ~= 0\n % Update estimates of the level of orthogonality for the\n % columns 1 through j-1 in V.\n nu = update_nu(nu,mu,j,alpha,beta,anorm);\n numax(j) = max(abs(nu(1:j-1)));\n end\n\n if j>1 & LANBPRO_TRUTH\n NU(1:j-1,j-1) = nu(1:j-1);\n NUTRUE(1:j-1,j-1) = V(:,1:j-1)'*r/alpha(j);\n end\n \n if elr>0\n nu(j-1) = n2*eps;\n end\n \n % IF level of orthogonality is worse than delta THEN \n % Reorthogonalize v_j against some previous v_i's, 0<=i delta | force_reorth ) & alpha(j)~=0\n % Decide which vectors to orthogonalize against:\n if fro | eta==0\n\tint = [1:j-1]';\n elseif force_reorth==0\n\tint = compute_int(nu,j-1,delta,eta,0,0,0);\n end\n % Else use int from last reorth. to avoid spillover from mu_{j-1} \n % to nu_j.\n \n % Reorthogonalize v_j \n [r,alpha(j),rr] = reorth(V,r,alpha(j),int,gamma,cgs);\n npv = npv + rr*length(int); % number of inner products.\n nu(int) = n2*eps; % Reset nu for orthogonalized vectors.\n\n % If necessary force reorthogonalization of u_{j+1} \n % to avoid spillover\n if force_reorth==0 \n\tforce_reorth = 1; \n else\n\tforce_reorth = 0; \n end\n nreorthv = nreorthv + 1;\n end\n end\n\n \n % Check for convergence or failure to maintain semiorthogonality\n if alpha(j) < max(n,m)*anorm*eps & j 0\n\t% A vector numerically orthogonal to span(Q_k(:,1:j)) was found. \n\t% Continue iteration.\n\tbailout=0;\n\tbreak;\n end\n end\n if bailout\n j = j-1;\n ierr = -j;\n break;\n else\n r=r/nrmnew; % Continue with new normalized r as starting vector.\n force_reorth = 1;\n if delta>0\n\tfro = 0; % Turn off full reorthogonalization.\n end\n end \n elseif j delta*alpha(j)\n% fro = 1;\n ierr = j;\n end\n\n if j>1 & LANBPRO_TRUTH\n NU_AFTER(1:j-1,j-1) = nu(1:j-1);\n NUTRUE_AFTER(1:j-1,j-1) = V(:,1:j-1)'*r/alpha(j);\n end\n\n \n if alpha(j) ~= 0\n V(:,j) = r/alpha(j);\n else\n V(:,j) = r;\n end\n\n %%%%%%%%%% Lanczos step to generate u_{j+1}. %%%%%%%%%%%%%\n if waitb\n waitbar((2*j+1)/(2*k),waitbarh)\n end\n \n if isnumeric(A)\n p = A*V(:,j) - alpha(j)*U(:,j);\n elseif isstruct(A)\n p = A.Rt\\V(:,j) - alpha(j)*U(:,j);\n else\n p = feval(A,V(:,j)) - alpha(j)*U(:,j);\n end\n beta(j+1) = norm(p);\n % Extended local reorthogonalization\n if beta(j+1)= gamma*normold\n\tstop = 1;\n else\n\tnormold = beta(j+1);\n end\n end\n end\n\n if est_anorm\n % We should update estimate of ||A|| before updating mu - especially \n % important in the first step for problems with large norm since alpha(1) \n % may be a severe underestimate! \n if j==1\n anorm = max(anorm,FUDGE*pythag(alpha(1),beta(2))); \n else\n anorm = max(anorm,FUDGE*sqrt(alpha(j)^2+beta(j+1)^2 + alpha(j)*beta(j)));\n end\n end\n \n \n if ~fro & beta(j+1) ~= 0\n % Update estimates of the level of orthogonality for the columns of V.\n mu = update_mu(mu,nu,j,alpha,beta,anorm);\n mumax(j) = max(abs(mu(1:j))); \n end\n\n if LANBPRO_TRUTH==1\n MU(1:j,j) = mu(1:j);\n MUTRUE(1:j,j) = U(:,1:j)'*p/beta(j+1);\n end\n \n if elr>0\n mu(j) = m2*eps;\n end\n \n % IF level of orthogonality is worse than delta THEN \n % Reorthogonalize u_{j+1} against some previous u_i's, 0<=i<=j.\n if onesided~=1 & (fro | mumax(j) > delta | force_reorth) & beta(j+1)~=0\n % Decide which vectors to orthogonalize against.\n if fro | eta==0\n int = [1:j]';\n elseif force_reorth==0\n int = compute_int(mu,j,delta,eta,0,0,0); \n else\n int = [int; max(int)+1];\n end\n % Else use int from last reorth. to avoid spillover from nu to mu.\n\n% if onesided~=0\n% fprintf('i = %i, nr = %i, fro = %i\\n',j,size(int(:),1),fro)\n% end\n % Reorthogonalize u_{j+1} \n [p,beta(j+1),rr] = reorth(U,p,beta(j+1),int,gamma,cgs); \n npu = npu + rr*length(int); nreorthu = nreorthu + 1; \n\n % Reset mu to epsilon.\n mu(int) = m2*eps; \n \n if force_reorth==0 \n force_reorth = 1; % Force reorthogonalization of v_{j+1}.\n else\n force_reorth = 0; \n end\n end\n \n % Check for convergence or failure to maintain semiorthogonality\n if beta(j+1) < max(m,n)*anorm*eps & j 0\n\t% A vector numerically orthogonal to span(Q_k(:,1:j)) was found. \n\t% Continue iteration.\n\tbailout=0;\n\tbreak;\n end\n end\n if bailout\n ierr = -j;\n break;\n else\n p=p/nrmnew; % Continue with new normalized p as starting vector.\n force_reorth = 1;\n if delta>0\n\tfro = 0; % Turn off full reorthogonalization.\n end\n end \n elseif j delta*beta(j+1) \n% fro = 1;\n ierr = j;\n end \n \n if LANBPRO_TRUTH==1\n MU_AFTER(1:j,j) = mu(1:j);\n MUTRUE_AFTER(1:j,j) = U(:,1:j)'*p/beta(j+1);\n end \nend\nif waitb\n close(waitbarh)\nend\n\nif j5\n work = [[nreorthu,npu];[nreorthv,npv]];\nend\n\n\n\nfunction mu = update_mu(muold,nu,j,alpha,beta,anorm)\n\n% UPDATE_MU: Update the mu-recurrence for the u-vectors.\n%\n% mu_new = update_mu(mu,nu,j,alpha,beta,anorm)\n\n% Rasmus Munk Larsen, DAIMI, 1998.\n\nbinv = 1/beta(j+1);\nmu = muold;\neps1 = 100*eps/2;\nif j==1\n T = eps1*(pythag(alpha(1),beta(2)) + pythag(alpha(1),beta(1)));\n T = T + eps1*anorm;\n mu(1) = T / beta(2);\nelse\n mu(1) = alpha(1)*nu(1) - alpha(j)*mu(1);\n% T = eps1*(pythag(alpha(j),beta(j+1)) + pythag(alpha(1),beta(1)));\n T = eps1*(sqrt(alpha(j).^2+beta(j+1).^2) + sqrt(alpha(1).^2+beta(1).^2));\n T = T + eps1*anorm;\n mu(1) = (mu(1) + sign(mu(1))*T) / beta(j+1);\n % Vectorized version of loop:\n if j>2\n k=2:j-1;\n mu(k) = alpha(k).*nu(k) + beta(k).*nu(k-1) - alpha(j)*mu(k);\n %T = eps1*(pythag(alpha(j),beta(j+1)) + pythag(alpha(k),beta(k)));\n T = eps1*(sqrt(alpha(j).^2+beta(j+1).^2) + sqrt(alpha(k).^2+beta(k).^2));\n T = T + eps1*anorm;\n mu(k) = binv*(mu(k) + sign(mu(k)).*T);\n end\n% T = eps1*(pythag(alpha(j),beta(j+1)) + pythag(alpha(j),beta(j)));\n T = eps1*(sqrt(alpha(j).^2+beta(j+1).^2) + sqrt(alpha(j).^2+beta(j).^2));\n T = T + eps1*anorm;\n mu(j) = beta(j)*nu(j-1);\n mu(j) = (mu(j) + sign(mu(j))*T) / beta(j+1);\nend \nmu(j+1) = 1;\n\n\nfunction nu = update_nu(nuold,mu,j,alpha,beta,anorm)\n\n% UPDATE_MU: Update the nu-recurrence for the v-vectors.\n%\n% nu_new = update_nu(nu,mu,j,alpha,beta,anorm)\n\n% Rasmus Munk Larsen, DAIMI, 1998.\n\nnu = nuold;\nainv = 1/alpha(j);\neps1 = 100*eps/2;\nif j>1\n k = 1:(j-1);\n% T = eps1*(pythag(alpha(k),beta(k+1)) + pythag(alpha(j),beta(j)));\n T = eps1*(sqrt(alpha(k).^2+beta(k+1).^2) + sqrt(alpha(j).^2+beta(j).^2));\n T = T + eps1*anorm;\n nu(k) = beta(k+1).*mu(k+1) + alpha(k).*mu(k) - beta(j)*nu(k);\n nu(k) = ainv*(nu(k) + sign(nu(k)).*T);\nend\nnu(j) = 1;\n\nfunction x = pythag(y,z)\n%PYTHAG Computes sqrt( y^2 + z^2 ).\n%\n% x = pythag(y,z)\n%\n% Returns sqrt(y^2 + z^2) but is careful to scale to avoid overflow.\n\n% Christian H. Bischof, Argonne National Laboratory, 03/31/89.\n\n[m n] = size(y);\nif m>1 | n>1\n y = y(:); z=z(:);\n rmax = max(abs([y z]'))';\n id=find(rmax==0);\n if length(id)>0\n rmax(id) = 1;\n x = rmax.*sqrt((y./rmax).^2 + (z./rmax).^2);\n x(id)=0;\n else\n x = rmax.*sqrt((y./rmax).^2 + (z./rmax).^2);\n end\n x = reshape(x,m,n);\nelse\n rmax = max(abs([y;z]));\n if (rmax==0)\n x = 0;\n else\n x = rmax*sqrt((y/rmax)^2 + (z/rmax)^2);\n end\nend\n \n"} +{"plateform": "github", "repo_name": "biomedical-cybernetics/coalescent_embedding-master", "name": "coalescent_embedding.m", "ext": ".m", "path": "coalescent_embedding-master/coemb_svds_eigs/coalescent_embedding.m", "size": 25870, "source_encoding": "utf_8", "md5": "486c75e222bfe5b52daa60a6d09d78cb", "text": "function coords = coalescent_embedding(x, pre_weighting, dim_red, angular_adjustment, dims)\n\n% Authors:\n% - main code: Alessandro Muscoloni, 2017-09-21\n% - support functions: indicated at the beginning of the function\n\n% Released under MIT License\n% Copyright (c) 2017 A. Muscoloni, J. M. Thomas, C. V. Cannistraci\n\n% Reference:\n% A. Muscoloni, J. M. Thomas, S. Ciucci, G. Bianconi, and C. V. Cannistraci,\n% \"Machine learning meets complex networks via coalescent embedding in the hyperbolic space\",\n% Nature Communications 8, 1615 (2017). doi:10.1038/s41467-017-01825-5\n\n% The time complexity of the algorithms is O(N^2) or O(E*N) depending on the pre-weighting\n% and dimension reduction technique used, see the references for details.\n\n%%% INPUT %%%\n% x - adjacency matrix of the network, which must be:\n% symmetric, zero-diagonal, one connected component, not fully connected;\n% the network can be weighted\n%\n% pre_weighting - rule for pre-weighting the matrix, the alternatives are:\n% 'original' -> the original weights are considered;\n% NB: they should suggest distances and not similarities\n% 'reverse' -> the original weights reversed are considered;\n% NB: to use when they suggest similarities\n% 'RA1' -> Repulsion-Attraction v1\n% 'RA2' -> Repulsion-Attraction v2\n% 'EBC' -> Edge-Betweenness-Centrality\n%\n% dim_red - dimension reduction technique, the alternatives are:\n% 'ISO' -> Isomap (valid for 2D and 3D)\n% 'ncISO' -> noncentered Isomap (valid for 2D and 3D)\n% 'LE' -> Laplacian Eigenmaps (valid for 2D and 3D)\n% 'MCE' -> Minimum Curvilinear Embedding (only valid for 2D)\n% 'ncMCE' -> noncentered Minimum Curvilinear Embedding (only valid for 2D)\n%\n% angular_adjustment - method for the angular adjustment, the alternatives are:\n% 'original' -> original angular distances are preserved (valid for 2D and 3D)\n% 'EA' -> equidistant adjustment (only valid for 2D)\n% \n% dims - dimensions of the hyperbolic embedding space, the alternatives are:\n% 2 -> hyperbolic disk\n% 3 -> hyperbolic sphere\n\n%%% OUTPUT %%%\n% coords - polar or spherical hyperbolic coordinates of the nodes\n% in the hyperbolic disk they are in the form: [theta,r]\n% in the hyperbolic sphere they are in the form: [azimuth,elevation,r]\n% for details see the documentation of the MATLAB functions\n% \"cart2pol\" and \"cart2sph\"\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n% check input\nvalidateattributes(x, {'numeric'}, {'square','finite','nonnegative'});\nif ~issymmetric(x)\n error('The input matrix must be symmetric.')\nend\nif any(x(speye(size(x))==1))\n error('The input matrix must be zero-diagonal.')\nend\nvalidateattributes(pre_weighting, {'char'}, {});\nvalidateattributes(dim_red, {'char'}, {});\nvalidateattributes(angular_adjustment, {'char'}, {});\nvalidateattributes(dims, {'numeric'}, {'scalar','integer','>=',2,'<=',3});\nif ~any(strcmp(pre_weighting,{'original','reverse','RA1','RA2','EBC'}))\n error('Possible pre-weighting rules: ''original'',''reverse'',''RA1'',''RA2'',''EBC''.');\nend\nif dims == 2\n if ~any(strcmp(dim_red,{'ISO','ncISO','MCE','ncMCE','LE'}))\n error('Possible dimension reduction techniques in 2D: ''ISO'', ''ncISO'', ''MCE'', ''ncMCE'', ''LE''.');\n end\n if ~any(strcmp(angular_adjustment,{'original','EA'}))\n error('Possible angular adjustment methods in 2D: ''original'', ''EA''.');\n end\nelseif dims == 3\n if ~any(strcmp(dim_red,{'ISO','ncISO','LE'}))\n error('Possible dimension reduction techniques in 3D: ''ISO'', ''ncISO'', ''LE''.');\n end\n if ~any(strcmp(angular_adjustment,{'original'}))\n error('Possible angular adjustment methods in 3D: ''original''.');\n end \nend\n\n% pre-weighting\nif strcmp(pre_weighting,'original')\n xw = x;\nelseif strcmp(pre_weighting,'reverse')\n xw = reverse_weights(x);\nelseif strcmp(pre_weighting,'RA1')\n xw = RA1_weighting(double(x>0));\nelseif strcmp(pre_weighting,'RA2')\n xw = RA2_weighting(double(x>0));\nelseif strcmp(pre_weighting,'EBC')\n xw = EBC_weighting(double(x>0));\nend\n\n% dimension reduction and set of hyperbolic coordinates\nif dims == 2\n coords = zeros(size(x,1),2);\n if strcmp(dim_red,'ISO')\n coords(:,1) = set_angular_coordinates_ISO_2D(xw, angular_adjustment);\n elseif strcmp(dim_red,'ncISO')\n coords(:,1) = set_angular_coordinates_ncISO_2D(xw, angular_adjustment);\n elseif strcmp(dim_red,'MCE')\n coords(:,1) = set_angular_coordinates_MCE_2D(xw, angular_adjustment);\n elseif strcmp(dim_red,'ncMCE')\n coords(:,1) = set_angular_coordinates_ncMCE_2D(xw, angular_adjustment);\n elseif strcmp(dim_red,'LE')\n coords(:,1) = set_angular_coordinates_LE_2D(xw, angular_adjustment);\n end\n coords(:,2) = set_radial_coordinates(x);\nelseif dims == 3\n coords = zeros(size(x,1),3);\n if strcmp(dim_red,'ISO')\n [coords(:,1),coords(:,2)] = set_angular_coordinates_ISO_3D(xw);\n elseif strcmp(dim_red,'ncISO')\n [coords(:,1),coords(:,2)] = set_angular_coordinates_ncISO_3D(xw);\n elseif strcmp(dim_red,'LE')\n [coords(:,1),coords(:,2)] = set_angular_coordinates_LE_3D(xw);\n end\n coords(:,3) = set_radial_coordinates(x);\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Support Functions %%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction xrev = reverse_weights(x)\n\nxrev = x;\nxrev(xrev>0) = abs(x(x>0) - min(x(x>0)) - max(x(x>0)));\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction x_RA1 = RA1_weighting(x)\n\nn = size(x,1);\ncn = x*x;\ndeg = full(sum(x,1));\nx_RA1 = x .* (repmat(deg,n,1) + repmat(deg',1,n) + (repmat(deg,n,1) .* repmat(deg',1,n))) ./ (1 + cn);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction x_RA2 = RA2_weighting(x)\n\nn = size(x,1);\ncn = x*x;\next = repmat(sum(x,2),1,n) - cn - 1;\nx_RA2 = x .* (1 + ext + ext' + ext.*ext') ./ (1 + cn);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction x_EBC = EBC_weighting(x)\n\n[~,x_EBC] = betweenness_centrality(sparse(x));\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction ang_coords = set_angular_coordinates_ISO_2D(xw, angular_adjustment)\n\n% dimension reduction\ndr_coords = ISOMAP_propack(xw, 2, 'yes');\n\n% from cartesian to polar coordinates\n% using dimensions 1 and 2 of embedding\n[ang_coords,~] = cart2pol(dr_coords(:,1),dr_coords(:,2));\n% change angular range from [-pi,pi] to [0,2pi]\nang_coords = mod(ang_coords + 2*pi, 2*pi);\n\nif strcmp(angular_adjustment,'EA')\n ang_coords = equidistant_adjustment(ang_coords);\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction ang_coords = set_angular_coordinates_ncISO_2D(xw, angular_adjustment)\n\n% dimension reduction\ndr_coords = ISOMAP_propack(xw, 3, 'no');\n\n% from cartesian to polar coordinates\n% using dimensions 2 and 3 of embedding\n[ang_coords,~] = cart2pol(dr_coords(:,2),dr_coords(:,3));\n% change angular range from [-pi,pi] to [0,2pi]\nang_coords = mod(ang_coords + 2*pi, 2*pi);\n\nif strcmp(angular_adjustment,'EA')\n ang_coords = equidistant_adjustment(ang_coords);\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction ang_coords = set_angular_coordinates_MCE_2D(xw, angular_adjustment)\n\n% dimension reduction\ndr_coords = MCE_propack(xw, 1, 'yes');\n\nif strcmp(angular_adjustment,'original')\n % circular adjustment of dimension 1\n ang_coords = circular_adjustment(dr_coords(:,1));\nelseif strcmp(angular_adjustment,'EA')\n % equidistant adjustment of dimension 1\n ang_coords = equidistant_adjustment(dr_coords(:,1));\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction ang_coords = set_angular_coordinates_ncMCE_2D(xw, angular_adjustment)\n\n% dimension reduction\ndr_coords = MCE_propack(xw, 2, 'no');\n\nif strcmp(angular_adjustment,'original')\n % circular adjustment of dimension 2\n ang_coords = circular_adjustment(dr_coords(:,2));\nelseif strcmp(angular_adjustment,'EA')\n % equidistant adjustment of dimension 2\n ang_coords = equidistant_adjustment(dr_coords(:,2));\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction ang_coords = set_angular_coordinates_LE_2D(xw, angular_adjustment)\n\n% dimension reduction\nst = triu(full(xw),1);\nst = mean(st(st>0));\nheat_kernel = zeros(size(xw));\nheat_kernel(xw>0) = exp(-((xw(xw>0)./st).^2));\ndr_coords = LE_eigs(heat_kernel, 2);\n\n% from cartesian to polar coordinates\n% using dimensions 2 and 3 of embedding\n% (dimensions 1 and 2 in the code since the first is skipped by the function)\n[ang_coords,~] = cart2pol(dr_coords(:,1),dr_coords(:,2));\n% change angular range from [-pi,pi] to [0,2pi]\nang_coords = mod(ang_coords + 2*pi, 2*pi);\n\nif strcmp(angular_adjustment,'EA')\n ang_coords = equidistant_adjustment(ang_coords);\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction ang_coords = equidistant_adjustment(coords)\n\n% sort input coordinates\n[~,idx] = sort(coords);\n% assign equidistant angular coordinates in [0,2pi[ according to the sorting\nangles = linspace(0, 2*pi, length(coords)+1);\nang_coords(idx) = angles(1:end-1);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction ang_coords = circular_adjustment(coords)\n\n% scale the input coordinates into the range [0,2pi]\nn = length(coords);\nm = 2*pi*(n-1)/n;\nang_coords = ((coords - min(coords)) ./ (max(coords) - min(coords))) * m;\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [azimuth, elevation] = set_angular_coordinates_ISO_3D(xw)\n\n% dimension reduction\ndr_coords = ISOMAP_propack(xw, 3, 'yes');\n\n% from cartesian to spherical coordinates\n% using dimensions 1-3 of embedding\n[azimuth,elevation,~] = cart2sph(dr_coords(:,1),dr_coords(:,2),dr_coords(:,3));\n% change angular range from [-pi,pi] to [0,2pi]\nazimuth = mod(azimuth + 2*pi, 2*pi);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [azimuth, elevation] = set_angular_coordinates_ncISO_3D(xw)\n\n% dimension reduction\ndr_coords = ISOMAP_propack(xw, 4, 'no');\n\n% from cartesian to spherical coordinates\n% using dimensions 2-4 of embedding\n[azimuth,elevation,~] = cart2sph(dr_coords(:,2),dr_coords(:,3),dr_coords(:,4));\n% change angular range from [-pi,pi] to [0,2pi]\nazimuth = mod(azimuth + 2*pi, 2*pi);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [azimuth, elevation] = set_angular_coordinates_LE_3D(xw)\n\n% dimension reduction\nst = triu(full(xw),1);\nst = mean(st(st>0));\nheat_kernel = zeros(size(xw));\nheat_kernel(xw>0) = exp(-((xw(xw>0)./st).^2));\ndr_coords = LE_eigs(heat_kernel, 3);\n\n% from cartesian to spherical coordinates\n% using dimensions 2-4 of embedding (the first is skipped by the LE function)\n[azimuth,elevation,~] = cart2sph(dr_coords(:,1),dr_coords(:,2),dr_coords(:,3));\n% change angular range from [-pi,pi] to [0,2pi]\nazimuth = mod(azimuth + 2*pi, 2*pi);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction radial_coordinates = set_radial_coordinates(x)\n\nn = size(x,1);\ndeg = full(sum(x>0,1));\nif all(deg == deg(1))\n error('All the nodes have the same degree, the degree distribution cannot fit a power-law.'); \nend\n\n% fit power-law degree distribution\ngamma_range = 1.01:0.01:10.00;\nsmall_size_limit = 100;\nif length(deg) < small_size_limit\n gamma = plfit(deg, 'finite', 'range', gamma_range);\nelse\n gamma = plfit(deg, 'range', gamma_range);\nend\nbeta = 1 / (gamma - 1);\n\n% sort nodes by decreasing degree\n[~,idx] = sort(deg, 'descend');\n\n% for beta > 1 (gamma < 2) some radial coordinates are negative\nradial_coordinates = zeros(1, n);\nradial_coordinates(idx) = max(0, 2*beta*log(1:n) + 2*(1-beta)*log(n));\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [V, time] = LE_eigs(x, m)\n\n% Laplacian Eigenmaps for network embedding in a low dimensional space.\n% 2013-01-27 - Gregorio Alanis-Lobato\n% 2017-02-02 - Alessandro Muscoloni: introduced the usage of eigs\n\n%%% INPUT %%%\n% x - adjacency matrix\n% m - dimensions of embedding\n\n%%% OUTPUT %%%\n% V - coordinates of embedding\n% time - computational time (in seconds)\n\n% suppress eigs warnings (recurring for small matrices)\nwarning('off','MATLAB:nearlySingularMatrix')\nwarning('off','MATLAB:eigs:SigmaNearExactEig')\n\nt = tic;\n\nx = sparse(max(x,x'));\nD = sum(x,2);\nD = diag(D);\n\n% graph laplacian\nL = D - x;\n\ntime = toc(t);\n\n% solve the generalised eigenvalue problem L*V = lambda*D*V\n% and use the eigenvectors related to the smallest eigenvalues\n% discarding the first, since it is zero.\ntry\n t = tic;\n [V,E] = eigs(L, D, m+1, 'sm');\n [~,idx] = sort(E(speye(size(E))==1));\n V = V(:,idx);\n V = V(:,2:m+1);\n time = time + toc(t);\ncatch exc %#ok\n % for small matrices the following error could occur:\n % \"The shifted operator is singular. The shift is an eigenvalue. Try to use some other shift please\".\n % in this case the function eig is used.\n % warning('Error using the function EIGS:\\n%s\\nThe function EIG has been used.', exc.message)\n t = tic;\n [V,~] = eig(full(L), full(D));\n V = V(:,2:m+1);\n time = time + toc(t);\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [V, time] = ISOMAP_propack(x, m, centering)\n\n% ISOMAP for network embedding in a low dimensional space.\n% 2011-09-27 - Carlo Vittorio Cannistraci\n% 2017-02-02 - Alessandro Muscoloni: introduced the PROPACK version of SVD \n\n%%% INPUT %%%\n% x - adjacency matrix\n% m - dimensions of embedding\n% centering - 'yes' or 'no' for centering the kernel\n\n%%% OUTPUT %%%\n% V - coordinates of embedding\n% time - computational time (in seconds)\n\nt = tic;\n\nx = max(x, x');\n\n% shortest paths kernel\nkernel = graphallshortestpaths(sparse(x),'directed','false'); \nclear x;\n\n% kernel centering\nif strcmp(centering, 'yes')\n kernel = kernel_centering(kernel);\nend\n\n% singular value decomposition\n[~,S,V] = lansvd(kernel, m, 'L');\nV = (sqrt(S) * V')';\n\ntime = toc(t);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [V, time] = MCE_propack(x, m, centering)\n\n% Minimum Curvilinear Embedding for network embedding in a low dimensional space.\n% 2011-09-27 - Carlo Vittorio Cannistraci\n% 2017-02-02 - Alessandro Muscoloni: introduced the PROPACK version of SVD \n\n%%% INPUT %%%\n% x - adjacency matrix\n% m - dimensions of embedding\n% centering - 'yes' or 'no' for centering the kernel\n\n%%% OUTPUT %%%\n% V - coordinates of embedding\n% time - computational time (in seconds)\n\nt = tic;\n\nx = max(x, x');\n\n% MC-kernel\nkernel = graphallshortestpaths(graphminspantree(sparse(x),'method','kruskal'),'directed','false'); \nclear x;\n\n% kernel centering\nif strcmp(centering, 'yes')\n kernel = kernel_centering(kernel);\nend\n\n% singular value decomposition\n[~,S,V] = lansvd(kernel, m, 'L');\nV = (sqrt(S) * V')';\n\ntime = toc(t);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction D = kernel_centering(D)\n\n% 2011-09-27 - Carlo Vittorio Cannistraci\n\n%%% INPUT %%%\n% D - Distance matrix\n\n%%% OUTPUT %%%\n% D - Centered distance matrix\n\n% Centering\nN = size(D,1);\nJ = eye(N) - (1/N)*ones(N);\nD = -0.5*(J*(D.^2)*J);\n\n% Housekeeping\nD(isnan(D)) = 0;\nD(isinf(D)) = 0;\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [alpha, xmin, L]=plfit(x, varargin)\n% PLFIT fits a power-law distributional model to data.\n% Source: http://www.santafe.edu/~aaronc/powerlaws/\n% \n% PLFIT(x) estimates x_min and alpha according to the goodness-of-fit\n% based method described in Clauset, Shalizi, Newman (2007). x is a \n% vector of observations of some quantity to which we wish to fit the \n% power-law distribution p(x) ~ x^-alpha for x >= xmin.\n% PLFIT automatically detects whether x is composed of real or integer\n% values, and applies the appropriate method. For discrete data, if\n% min(x) > 1000, PLFIT uses the continuous approximation, which is \n% a reliable in this regime.\n% \n% The fitting procedure works as follows:\n% 1) For each possible choice of x_min, we estimate alpha via the \n% method of maximum likelihood, and calculate the Kolmogorov-Smirnov\n% goodness-of-fit statistic D.\n% 2) We then select as our estimate of x_min, the value that gives the\n% minimum value D over all values of x_min.\n%\n% Note that this procedure gives no estimate of the uncertainty of the \n% fitted parameters, nor of the validity of the fit.\n%\n% Example:\n% x = (1-rand(10000,1)).^(-1/(2.5-1));\n% [alpha, xmin, L] = plfit(x);\n%\n% The output 'alpha' is the maximum likelihood estimate of the scaling\n% exponent, 'xmin' is the estimate of the lower bound of the power-law\n% behavior, and L is the log-likelihood of the data x>=xmin under the\n% fitted power law.\n% \n% For more information, try 'type plfit'\n%\n% See also PLVAR, PLPVA\n\n% Version 1.0 (2007 May)\n% Version 1.0.2 (2007 September)\n% Version 1.0.3 (2007 September)\n% Version 1.0.4 (2008 January)\n% Version 1.0.5 (2008 March)\n% Version 1.0.6 (2008 July)\n% Version 1.0.7 (2008 October)\n% Version 1.0.8 (2009 February)\n% Version 1.0.9 (2009 October)\n% Version 1.0.10 (2010 January)\n% Version 1.0.11 (2012 January)\n% Copyright (C) 2008-2012 Aaron Clauset (Santa Fe Institute)\n% Distributed under GPL 2.0\n% http://www.gnu.org/copyleft/gpl.html\n% PLFIT comes with ABSOLUTELY NO WARRANTY\n% \n% Notes:\n% \n% 1. In order to implement the integer-based methods in Matlab, the numeric\n% maximization of the log-likelihood function was used. This requires\n% that we specify the range of scaling parameters considered. We set\n% this range to be [1.50 : 0.01 : 3.50] by default. This vector can be\n% set by the user like so,\n% \n% a = plfit(x,'range',[1.001:0.001:5.001]);\n% \n% 2. PLFIT can be told to limit the range of values considered as estimates\n% for xmin in three ways. First, it can be instructed to sample these\n% possible values like so,\n% \n% a = plfit(x,'sample',100);\n% \n% which uses 100 uniformly distributed values on the sorted list of\n% unique values in the data set. Second, it can simply omit all\n% candidates above a hard limit, like so\n% \n% a = plfit(x,'limit',3.4);\n% \n% Finally, it can be forced to use a fixed value, like so\n% \n% a = plfit(x,'xmin',3.4);\n% \n% In the case of discrete data, it rounds the limit to the nearest\n% integer.\n% \n% 3. When the input sample size is small (e.g., < 100), the continuous \n% estimator is slightly biased (toward larger values of alpha). To\n% explicitly use an experimental finite-size correction, call PLFIT like\n% so\n% \n% a = plfit(x,'finite');\n% \n% which does a small-size correction to alpha.\n%\n% 4. For continuous data, PLFIT can return erroneously large estimates of \n% alpha when xmin is so large that the number of obs x >= xmin is very \n% small. To prevent this, we can truncate the search over xmin values \n% before the finite-size bias becomes significant by calling PLFIT as\n% \n% a = plfit(x,'nosmall');\n% \n% which skips values xmin with finite size bias > 0.1.\n\nvec = [];\nsample = [];\nxminx = [];\nlimit = [];\nfinite = false;\nnosmall = false;\nnowarn = false;\n\n% parse command-line parameters; trap for bad input\ni=1; \nwhile i<=length(varargin), \n argok = 1; \n if ischar(varargin{i}), \n switch varargin{i},\n case 'range', vec = varargin{i+1}; i = i + 1;\n case 'sample', sample = varargin{i+1}; i = i + 1;\n case 'limit', limit = varargin{i+1}; i = i + 1;\n case 'xmin', xminx = varargin{i+1}; i = i + 1;\n case 'finite', finite = true;\n case 'nowarn', nowarn = true;\n case 'nosmall', nosmall = true;\n otherwise, argok=0; \n end\n end\n if ~argok, \n disp(['(PLFIT) Ignoring invalid argument #' num2str(i+1)]); \n end\n i = i+1; \nend\nif ~isempty(vec) && (~isvector(vec) || min(vec)<=1),\n\tfprintf('(PLFIT) Error: ''range'' argument must contain a vector; using default.\\n');\n vec = [];\nend;\nif ~isempty(sample) && (~isscalar(sample) || sample<2),\n\tfprintf('(PLFIT) Error: ''sample'' argument must be a positive integer > 1; using default.\\n');\n sample = [];\nend;\nif ~isempty(limit) && (~isscalar(limit) || limit= 1; using default.\\n');\n limit = [];\nend;\nif ~isempty(xminx) && (~isscalar(xminx) || xminx>=max(x)),\n\tfprintf('(PLFIT) Error: ''xmin'' argument must be a positive value < max(x); using default behavior.\\n');\n xminx = [];\nend;\n\n% reshape input vector\nx = reshape(x,numel(x),1);\n\n% select method (discrete or continuous) for fitting\nif isempty(setdiff(x,floor(x))), f_dattype = 'INTS';\nelseif isreal(x), f_dattype = 'REAL';\nelse f_dattype = 'UNKN';\nend;\nif strcmp(f_dattype,'INTS') && min(x) > 1000 && length(x)>100,\n f_dattype = 'REAL';\nend;\n\n% estimate xmin and alpha, accordingly\nswitch f_dattype,\n \n case 'REAL',\n xmins = unique(x);\n xmins = xmins(1:end-1);\n if ~isempty(xminx),\n xmins = xmins(find(xmins>=xminx,1,'first'));\n end;\n if ~isempty(limit),\n xmins(xmins>limit) = [];\n end;\n if ~isempty(sample),\n xmins = xmins(unique(round(linspace(1,length(xmins),sample))));\n end;\n dat = zeros(size(xmins));\n z = sort(x);\n for xm=1:length(xmins)\n xmin = xmins(xm);\n z = z(z>=xmin); \n n = length(z);\n % estimate alpha using direct MLE\n a = n ./ sum( log(z./xmin) );\n if nosmall,\n if (a-1)/sqrt(n) > 0.1\n dat(xm:end) = [];\n xm = length(xmins)+1; %#ok\n break;\n end;\n end;\n % compute KS statistic\n cx = (0:n-1)'./n;\n cf = 1-(xmin./z).^a;\n dat(xm) = max( abs(cf-cx) );\n end;\n D = min(dat);\n xmin = xmins(find(dat<=D,1,'first'));\n z = x(x>=xmin);\n n = length(z); \n alpha = 1 + n ./ sum( log(z./xmin) );\n if finite, alpha = alpha*(n-1)/n+1/n; end; % finite-size correction\n if n < 50 && ~finite && ~nowarn,\n% fprintf('(PLFIT) Warning: finite-size bias may be present.\\n');\n end;\n L = n*log((alpha-1)/xmin) - alpha.*sum(log(z./xmin));\n\n case 'INTS',\n \n if isempty(vec),\n vec = (1.50:0.01:3.50); % covers range of most practical \n end; % scaling parameters\n zvec = zeta(vec);\n\n xmins = unique(x);\n xmins = xmins(1:end-1);\n if ~isempty(xminx),\n xmins = xmins(find(xmins>=xminx,1,'first'));\n end;\n if ~isempty(limit),\n limit = round(limit);\n xmins(xmins>limit) = [];\n end;\n if ~isempty(sample),\n xmins = xmins(unique(round(linspace(1,length(xmins),sample))));\n end;\n if isempty(xmins)\n fprintf('(PLFIT) Error: x must contain at least two unique values.\\n');\n alpha = NaN; xmin = x(1); D = NaN; %#ok\n return;\n end;\n xmax = max(x);\n dat = zeros(length(xmins),2);\n z = x;\n fcatch = 0;\n\n for xm=1:length(xmins)\n xmin = xmins(xm);\n z = z(z>=xmin);\n n = length(z);\n % estimate alpha via direct maximization of likelihood function\n if fcatch==0\n try\n % vectorized version of numerical calculation\n zdiff = sum( repmat((1:xmin-1)',1,length(vec)).^-repmat(vec,xmin-1,1) ,1);\n L = -vec.*sum(log(z)) - n.*log(zvec - zdiff);\n catch\n % catch: force loop to default to iterative version for\n % remainder of the search\n fcatch = 1;\n end;\n end;\n if fcatch==1\n % force iterative calculation (more memory efficient, but \n % can be slower)\n L = -Inf*ones(size(vec));\n slogz = sum(log(z));\n xminvec = (1:xmin-1);\n for k=1:length(vec)\n L(k) = -vec(k)*slogz - n*log(zvec(k) - sum(xminvec.^-vec(k)));\n end\n end;\n [Y,I] = max(L); %#ok\n % compute KS statistic\n fit = cumsum((((xmin:xmax).^-vec(I)))./ (zvec(I) - sum((1:xmin-1).^-vec(I))));\n cdi = cumsum(hist(z,xmin:xmax)./n);\n dat(xm,:) = [max(abs( fit - cdi )) vec(I)];\n end\n % select the index for the minimum value of D\n [D,I] = min(dat(:,1)); %#ok\n xmin = xmins(I);\n z = x(x>=xmin);\n n = length(z);\n alpha = dat(I,2);\n if finite, alpha = alpha*(n-1)/n+1/n; end; % finite-size correction\n if n < 50 && ~finite && ~nowarn,\n% fprintf('(PLFIT) Warning: finite-size bias may be present.\\n');\n end;\n L = -alpha*sum(log(z)) - n*log(zvec(find(vec<=alpha,1,'last')) - sum((1:xmin-1).^-alpha));\n\n otherwise,\n fprintf('(PLFIT) Error: x must contain only reals or only integers.\\n');\n alpha = [];\n xmin = [];\n L = [];\n return;\nend;"} +{"plateform": "github", "repo_name": "biomedical-cybernetics/coalescent_embedding-master", "name": "coalescent_embedding.m", "ext": ".m", "path": "coalescent_embedding-master/coemb_svd_eig/coalescent_embedding.m", "size": 25081, "source_encoding": "utf_8", "md5": "0f84d1345d19f28fe588f1bc8da8aeec", "text": "function coords = coalescent_embedding(x, pre_weighting, dim_red, angular_adjustment, dims)\n\n% Authors:\n% - main code: Alessandro Muscoloni, 2017-09-21\n% - support functions: indicated at the beginning of the function\n\n% Released under MIT License\n% Copyright (c) 2017 A. Muscoloni, J. M. Thomas, C. V. Cannistraci\n\n% Reference:\n% A. Muscoloni, J. M. Thomas, S. Ciucci, G. Bianconi, and C. V. Cannistraci,\n% \"Machine learning meets complex networks via coalescent embedding in the hyperbolic space\",\n% Nature Communications 8, 1615 (2017). doi:10.1038/s41467-017-01825-5\n\n% The time complexity of the algorithms is O(N^3).\n\n%%% INPUT %%%\n% x - adjacency matrix of the network, which must be:\n% symmetric, zero-diagonal, one connected component, not fully connected;\n% the network can be weighted\n%\n% pre_weighting - rule for pre-weighting the matrix, the alternatives are:\n% 'original' -> the original weights are considered;\n% NB: they should suggest distances and not similarities\n% 'reverse' -> the original weights reversed are considered;\n% NB: to use when they suggest similarities\n% 'RA1' -> Repulsion-Attraction v1\n% 'RA2' -> Repulsion-Attraction v2\n% 'EBC' -> Edge-Betweenness-Centrality\n%\n% dim_red - dimension reduction technique, the alternatives are:\n% 'ISO' -> Isomap (valid for 2D and 3D)\n% 'ncISO' -> noncentered Isomap (valid for 2D and 3D)\n% 'LE' -> Laplacian Eigenmaps (valid for 2D and 3D)\n% 'MCE' -> Minimum Curvilinear Embedding (only valid for 2D)\n% 'ncMCE' -> noncentered Minimum Curvilinear Embedding (only valid for 2D)\n%\n% angular_adjustment - method for the angular adjustment, the alternatives are:\n% 'original' -> original angular distances are preserved (valid for 2D and 3D)\n% 'EA' -> equidistant adjustment (only valid for 2D)\n% \n% dims - dimensions of the hyperbolic embedding space, the alternatives are:\n% 2 -> hyperbolic disk\n% 3 -> hyperbolic sphere\n\n%%% OUTPUT %%%\n% coords - polar or spherical hyperbolic coordinates of the nodes\n% in the hyperbolic disk they are in the form: [theta,r]\n% in the hyperbolic sphere they are in the form: [azimuth,elevation,r]\n% for details see the documentation of the MATLAB functions\n% \"cart2pol\" and \"cart2sph\"\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n% check input\nvalidateattributes(x, {'numeric'}, {'square','finite','nonnegative'});\nif ~issymmetric(x)\n error('The input matrix must be symmetric.')\nend\nif any(x(speye(size(x))==1))\n error('The input matrix must be zero-diagonal.')\nend\nvalidateattributes(pre_weighting, {'char'}, {});\nvalidateattributes(dim_red, {'char'}, {});\nvalidateattributes(angular_adjustment, {'char'}, {});\nvalidateattributes(dims, {'numeric'}, {'scalar','integer','>=',2,'<=',3});\nif ~any(strcmp(pre_weighting,{'original','reverse','RA1','RA2','EBC'}))\n error('Possible pre-weighting rules: ''original'',''reverse'',''RA1'',''RA2'',''EBC''.');\nend\nif dims == 2\n if ~any(strcmp(dim_red,{'ISO','ncISO','MCE','ncMCE','LE'}))\n error('Possible dimension reduction techniques in 2D: ''ISO'', ''ncISO'', ''MCE'', ''ncMCE'', ''LE''.');\n end\n if ~any(strcmp(angular_adjustment,{'original','EA'}))\n error('Possible angular adjustment methods in 2D: ''original'', ''EA''.');\n end\nelseif dims == 3\n if ~any(strcmp(dim_red,{'ISO','ncISO','LE'}))\n error('Possible dimension reduction techniques in 3D: ''ISO'', ''ncISO'', ''LE''.');\n end\n if ~any(strcmp(angular_adjustment,{'original'}))\n error('Possible angular adjustment methods in 3D: ''original''.');\n end \nend\n\n% pre-weighting\nif strcmp(pre_weighting,'original')\n xw = x;\nelseif strcmp(pre_weighting,'reverse')\n xw = reverse_weights(x);\nelseif strcmp(pre_weighting,'RA1')\n xw = RA1_weighting(double(x>0));\nelseif strcmp(pre_weighting,'RA2')\n xw = RA2_weighting(double(x>0));\nelseif strcmp(pre_weighting,'EBC')\n xw = EBC_weighting(double(x>0));\nend\n\n% dimension reduction and set of hyperbolic coordinates\nif dims == 2\n coords = zeros(size(x,1),2);\n if strcmp(dim_red,'ISO')\n coords(:,1) = set_angular_coordinates_ISO_2D(xw, angular_adjustment);\n elseif strcmp(dim_red,'ncISO')\n coords(:,1) = set_angular_coordinates_ncISO_2D(xw, angular_adjustment);\n elseif strcmp(dim_red,'MCE')\n coords(:,1) = set_angular_coordinates_MCE_2D(xw, angular_adjustment);\n elseif strcmp(dim_red,'ncMCE')\n coords(:,1) = set_angular_coordinates_ncMCE_2D(xw, angular_adjustment);\n elseif strcmp(dim_red,'LE')\n coords(:,1) = set_angular_coordinates_LE_2D(xw, angular_adjustment);\n end\n coords(:,2) = set_radial_coordinates(x);\nelseif dims == 3\n coords = zeros(size(x,1),3);\n if strcmp(dim_red,'ISO')\n [coords(:,1),coords(:,2)] = set_angular_coordinates_ISO_3D(xw);\n elseif strcmp(dim_red,'ncISO')\n [coords(:,1),coords(:,2)] = set_angular_coordinates_ncISO_3D(xw);\n elseif strcmp(dim_red,'LE')\n [coords(:,1),coords(:,2)] = set_angular_coordinates_LE_3D(xw);\n end\n coords(:,3) = set_radial_coordinates(x);\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Support Functions %%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction xrev = reverse_weights(x)\n\nxrev = x;\nxrev(xrev>0) = abs(x(x>0) - min(x(x>0)) - max(x(x>0)));\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction x_RA1 = RA1_weighting(x)\n\nn = size(x,1);\ncn = x*x;\ndeg = full(sum(x,1));\nx_RA1 = x .* (repmat(deg,n,1) + repmat(deg',1,n) + (repmat(deg,n,1) .* repmat(deg',1,n))) ./ (1 + cn);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction x_RA2 = RA2_weighting(x)\n\nn = size(x,1);\ncn = x*x;\next = repmat(sum(x,2),1,n) - cn - 1;\nx_RA2 = x .* (1 + ext + ext' + ext.*ext') ./ (1 + cn);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction x_EBC = EBC_weighting(x)\n\n[~,x_EBC] = betweenness_centrality(sparse(x));\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction ang_coords = set_angular_coordinates_ISO_2D(xw, angular_adjustment)\n\n% dimension reduction\ndr_coords = isomap_graph_carlo(xw, 2, 'yes');\n\n% from cartesian to polar coordinates\n% using dimensions 1 and 2 of embedding\n[ang_coords,~] = cart2pol(dr_coords(:,1),dr_coords(:,2));\n% change angular range from [-pi,pi] to [0,2pi]\nang_coords = mod(ang_coords + 2*pi, 2*pi);\n\nif strcmp(angular_adjustment,'EA')\n ang_coords = equidistant_adjustment(ang_coords);\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction ang_coords = set_angular_coordinates_ncISO_2D(xw, angular_adjustment)\n\n% dimension reduction\ndr_coords = isomap_graph_carlo(xw, 3, 'no');\n\n% from cartesian to polar coordinates\n% using dimensions 2 and 3 of embedding\n[ang_coords,~] = cart2pol(dr_coords(:,2),dr_coords(:,3));\n% change angular range from [-pi,pi] to [0,2pi]\nang_coords = mod(ang_coords + 2*pi, 2*pi);\n\nif strcmp(angular_adjustment,'EA')\n ang_coords = equidistant_adjustment(ang_coords);\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction ang_coords = set_angular_coordinates_MCE_2D(xw, angular_adjustment)\n\n% dimension reduction\ndr_coords = mce(xw, 1, 'yes');\n\nif strcmp(angular_adjustment,'original')\n % circular adjustment of dimension 1\n ang_coords = circular_adjustment(dr_coords(:,1));\nelseif strcmp(angular_adjustment,'EA')\n % equidistant adjustment of dimension 1\n ang_coords = equidistant_adjustment(dr_coords(:,1));\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction ang_coords = set_angular_coordinates_ncMCE_2D(xw, angular_adjustment)\n\n% dimension reduction\ndr_coords = mce(xw, 2, 'no');\n\nif strcmp(angular_adjustment,'original')\n % circular adjustment of dimension 2\n ang_coords = circular_adjustment(dr_coords(:,2));\nelseif strcmp(angular_adjustment,'EA')\n % equidistant adjustment of dimension 2\n ang_coords = equidistant_adjustment(dr_coords(:,2));\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction ang_coords = set_angular_coordinates_LE_2D(xw, angular_adjustment)\n\n% dimension reduction\nst = triu(full(xw),1);\nst = mean(st(st>0));\nheat_kernel = zeros(size(xw));\nheat_kernel(xw>0) = exp(-((xw(xw>0)./st).^2));\ndr_coords = leig_graph_carlo_classical(heat_kernel, 2, 'no');\n\n% from cartesian to polar coordinates\n% using dimensions 2 and 3 of embedding\n% (dimensions 1 and 2 in the code since the first is skipped by the function)\n[ang_coords,~] = cart2pol(dr_coords(:,1),dr_coords(:,2));\n% change angular range from [-pi,pi] to [0,2pi]\nang_coords = mod(ang_coords + 2*pi, 2*pi);\n\nif strcmp(angular_adjustment,'EA')\n ang_coords = equidistant_adjustment(ang_coords);\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction ang_coords = equidistant_adjustment(coords)\n\n% sort input coordinates\n[~,idx] = sort(coords);\n% assign equidistant angular coordinates in [0,2pi[ according to the sorting\nangles = linspace(0, 2*pi, length(coords)+1);\nang_coords(idx) = angles(1:end-1);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction ang_coords = circular_adjustment(coords)\n\n% scale the input coordinates into the range [0,2pi]\nn = length(coords);\nm = 2*pi*(n-1)/n;\nang_coords = ((coords - min(coords)) ./ (max(coords) - min(coords))) * m;\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [azimuth, elevation] = set_angular_coordinates_ISO_3D(xw)\n\n% dimension reduction\ndr_coords = isomap_graph_carlo(xw, 3, 'yes');\n\n% from cartesian to spherical coordinates\n% using dimensions 1-3 of embedding\n[azimuth,elevation,~] = cart2sph(dr_coords(:,1),dr_coords(:,2),dr_coords(:,3));\n% change angular range from [-pi,pi] to [0,2pi]\nazimuth = mod(azimuth + 2*pi, 2*pi);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [azimuth, elevation] = set_angular_coordinates_ncISO_3D(xw)\n\n% dimension reduction\ndr_coords = isomap_graph_carlo(xw, 4, 'no');\n\n% from cartesian to spherical coordinates\n% using dimensions 2-4 of embedding\n[azimuth,elevation,~] = cart2sph(dr_coords(:,2),dr_coords(:,3),dr_coords(:,4));\n% change angular range from [-pi,pi] to [0,2pi]\nazimuth = mod(azimuth + 2*pi, 2*pi);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [azimuth, elevation] = set_angular_coordinates_LE_3D(xw)\n\n% dimension reduction\nst = triu(full(xw),1);\nst = mean(st(st>0));\nheat_kernel = zeros(size(xw));\nheat_kernel(xw>0) = exp(-((xw(xw>0)./st).^2));\ndr_coords = leig_graph_carlo_classical(heat_kernel, 3, 'no');\n\n% from cartesian to spherical coordinates\n% using dimensions 2-4 of embedding (the first is skipped by the LE function)\n[azimuth,elevation,~] = cart2sph(dr_coords(:,1),dr_coords(:,2),dr_coords(:,3));\n% change angular range from [-pi,pi] to [0,2pi]\nazimuth = mod(azimuth + 2*pi, 2*pi);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction radial_coordinates = set_radial_coordinates(x)\n\nn = size(x,1);\ndeg = full(sum(x>0,1));\nif all(deg == deg(1))\n error('All the nodes have the same degree, the degree distribution cannot fit a power-law.'); \nend\n\n% fit power-law degree distribution\ngamma_range = 1.01:0.01:10.00;\nsmall_size_limit = 100;\nif length(deg) < small_size_limit\n gamma = plfit(deg, 'finite', 'range', gamma_range);\nelse\n gamma = plfit(deg, 'range', gamma_range);\nend\nbeta = 1 / (gamma - 1);\n\n% sort nodes by decreasing degree\n[~,idx] = sort(deg, 'descend');\n\n% for beta > 1 (gamma < 2) some radial coordinates are negative\nradial_coordinates = zeros(1, n);\nradial_coordinates(idx) = max(0, 2*beta*log(1:n) + 2*(1-beta)*log(n));\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [f, time] = leig_graph_carlo_classical(x, d, centring)\n% Maps the high-dimensional samples in 'x' to a low dimensional space using\n% Laplacian Eigenmaps (coded 27-JANUARY-2013 by Gregorio Alanis-Lobato)\n\nt = tic;\n\ngraph = max(x, x');\n\n% Kernel centering\nif strcmp(centring, 'yes')\n graph=kernel_centering(graph); %Compute the centred MC-kernel\nend\n\nD = sum(graph, 2); %Degree values\nD = diag(D); %Degree matrix\n\n% Graph laplacian\nL = D - graph;\n\n% Solving the generalised eigenvalue problem L*f = lambda*D*f\n[f, ~] = eig(L, D); \nf = real(f(:, 2:d+1));\n\ntime = toc(t);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [s,time] = isomap_graph_carlo(x, n, centring)\n\n%INPUT\n% x => Distance or correlation matrix x\n% n => Dimension into which the data is to be projected\n% centring => 'yes' is x should be centred or 'no' if not\n%OUTPUT\n% s => Sample configuration in the space of n dimensions\n\nt = tic;\n\n% initialization\nx = max(x, x');\n\n% Iso-kernel computation\nkernel=graphallshortestpaths(sparse(x),'directed','false'); \n\nclear x\nkernel=max(kernel,kernel'); \n\n% Kernel centering\nif strcmp(centring, 'yes')\n kernel=kernel_centering(kernel); %Compute the centred Iso-kernel\nend\n\n% Embedding \n[~,L,V] = svd(kernel, 'econ');\nsqrtL = sqrt(L(1:n,1:n)); clear L\nV = V(:,1:n);\ns = real((sqrtL * V')');\n\ntime = toc(t);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [s, time] = mce(x, n, centring)\n%Given a distance or correlation matrix x, it performs Minimum Curvilinear \n%Embedding (MCE) or non-centred MCE (ncMCE) (coded 27-SEPTEMBER-2011 by \n%Carlo Cannistraci)\n\n%INPUT\n% x => Distance or correlation matrix x\n% n => Dimension into which the data is to be projected\n% centring => 'yes' is x should be centred or 'no' if not\n%OUTPUT\n% s => Sample configuration in the space of n dimensions\n\nt = tic;\n\n% initialization\nx = max(x, x');\n\n% MC-kernel computation\nkernel=graphallshortestpaths(graphminspantree(sparse(x),'method','kruskal'),'directed','false'); \n\nclear x\nkernel=max(kernel,kernel'); \n\n% Kernel centering\nif strcmp(centring, 'yes')\n kernel=kernel_centering(kernel); %Compute the centred MC-kernel\nend\n\n% Embedding \n[~,L,V] = svd(kernel, 'econ');\nsqrtL = sqrt(L(1:n,1:n)); clear L\nV = V(:,1:n);\ns = (sqrtL * V')';\ns=real(s(:,1:n));\n\ntime = toc(t);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction D = kernel_centering(D)\n\n% 2011-09-27 - Carlo Vittorio Cannistraci\n\n%%% INPUT %%%\n% D - Distance matrix\n\n%%% OUTPUT %%%\n% D - Centered distance matrix\n\n% Centering\nN = size(D,1);\nJ = eye(N) - (1/N)*ones(N);\nD = -0.5*(J*(D.^2)*J);\n\n% Housekeeping\nD(isnan(D)) = 0;\nD(isinf(D)) = 0;\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [alpha, xmin, L]=plfit(x, varargin)\n% PLFIT fits a power-law distributional model to data.\n% Source: http://www.santafe.edu/~aaronc/powerlaws/\n% \n% PLFIT(x) estimates x_min and alpha according to the goodness-of-fit\n% based method described in Clauset, Shalizi, Newman (2007). x is a \n% vector of observations of some quantity to which we wish to fit the \n% power-law distribution p(x) ~ x^-alpha for x >= xmin.\n% PLFIT automatically detects whether x is composed of real or integer\n% values, and applies the appropriate method. For discrete data, if\n% min(x) > 1000, PLFIT uses the continuous approximation, which is \n% a reliable in this regime.\n% \n% The fitting procedure works as follows:\n% 1) For each possible choice of x_min, we estimate alpha via the \n% method of maximum likelihood, and calculate the Kolmogorov-Smirnov\n% goodness-of-fit statistic D.\n% 2) We then select as our estimate of x_min, the value that gives the\n% minimum value D over all values of x_min.\n%\n% Note that this procedure gives no estimate of the uncertainty of the \n% fitted parameters, nor of the validity of the fit.\n%\n% Example:\n% x = (1-rand(10000,1)).^(-1/(2.5-1));\n% [alpha, xmin, L] = plfit(x);\n%\n% The output 'alpha' is the maximum likelihood estimate of the scaling\n% exponent, 'xmin' is the estimate of the lower bound of the power-law\n% behavior, and L is the log-likelihood of the data x>=xmin under the\n% fitted power law.\n% \n% For more information, try 'type plfit'\n%\n% See also PLVAR, PLPVA\n\n% Version 1.0 (2007 May)\n% Version 1.0.2 (2007 September)\n% Version 1.0.3 (2007 September)\n% Version 1.0.4 (2008 January)\n% Version 1.0.5 (2008 March)\n% Version 1.0.6 (2008 July)\n% Version 1.0.7 (2008 October)\n% Version 1.0.8 (2009 February)\n% Version 1.0.9 (2009 October)\n% Version 1.0.10 (2010 January)\n% Version 1.0.11 (2012 January)\n% Copyright (C) 2008-2012 Aaron Clauset (Santa Fe Institute)\n% Distributed under GPL 2.0\n% http://www.gnu.org/copyleft/gpl.html\n% PLFIT comes with ABSOLUTELY NO WARRANTY\n% \n% Notes:\n% \n% 1. In order to implement the integer-based methods in Matlab, the numeric\n% maximization of the log-likelihood function was used. This requires\n% that we specify the range of scaling parameters considered. We set\n% this range to be [1.50 : 0.01 : 3.50] by default. This vector can be\n% set by the user like so,\n% \n% a = plfit(x,'range',[1.001:0.001:5.001]);\n% \n% 2. PLFIT can be told to limit the range of values considered as estimates\n% for xmin in three ways. First, it can be instructed to sample these\n% possible values like so,\n% \n% a = plfit(x,'sample',100);\n% \n% which uses 100 uniformly distributed values on the sorted list of\n% unique values in the data set. Second, it can simply omit all\n% candidates above a hard limit, like so\n% \n% a = plfit(x,'limit',3.4);\n% \n% Finally, it can be forced to use a fixed value, like so\n% \n% a = plfit(x,'xmin',3.4);\n% \n% In the case of discrete data, it rounds the limit to the nearest\n% integer.\n% \n% 3. When the input sample size is small (e.g., < 100), the continuous \n% estimator is slightly biased (toward larger values of alpha). To\n% explicitly use an experimental finite-size correction, call PLFIT like\n% so\n% \n% a = plfit(x,'finite');\n% \n% which does a small-size correction to alpha.\n%\n% 4. For continuous data, PLFIT can return erroneously large estimates of \n% alpha when xmin is so large that the number of obs x >= xmin is very \n% small. To prevent this, we can truncate the search over xmin values \n% before the finite-size bias becomes significant by calling PLFIT as\n% \n% a = plfit(x,'nosmall');\n% \n% which skips values xmin with finite size bias > 0.1.\n\nvec = [];\nsample = [];\nxminx = [];\nlimit = [];\nfinite = false;\nnosmall = false;\nnowarn = false;\n\n% parse command-line parameters; trap for bad input\ni=1; \nwhile i<=length(varargin), \n argok = 1; \n if ischar(varargin{i}), \n switch varargin{i},\n case 'range', vec = varargin{i+1}; i = i + 1;\n case 'sample', sample = varargin{i+1}; i = i + 1;\n case 'limit', limit = varargin{i+1}; i = i + 1;\n case 'xmin', xminx = varargin{i+1}; i = i + 1;\n case 'finite', finite = true;\n case 'nowarn', nowarn = true;\n case 'nosmall', nosmall = true;\n otherwise, argok=0; \n end\n end\n if ~argok, \n disp(['(PLFIT) Ignoring invalid argument #' num2str(i+1)]); \n end\n i = i+1; \nend\nif ~isempty(vec) && (~isvector(vec) || min(vec)<=1),\n\tfprintf('(PLFIT) Error: ''range'' argument must contain a vector; using default.\\n');\n vec = [];\nend;\nif ~isempty(sample) && (~isscalar(sample) || sample<2),\n\tfprintf('(PLFIT) Error: ''sample'' argument must be a positive integer > 1; using default.\\n');\n sample = [];\nend;\nif ~isempty(limit) && (~isscalar(limit) || limit= 1; using default.\\n');\n limit = [];\nend;\nif ~isempty(xminx) && (~isscalar(xminx) || xminx>=max(x)),\n\tfprintf('(PLFIT) Error: ''xmin'' argument must be a positive value < max(x); using default behavior.\\n');\n xminx = [];\nend;\n\n% reshape input vector\nx = reshape(x,numel(x),1);\n\n% select method (discrete or continuous) for fitting\nif isempty(setdiff(x,floor(x))), f_dattype = 'INTS';\nelseif isreal(x), f_dattype = 'REAL';\nelse f_dattype = 'UNKN';\nend;\nif strcmp(f_dattype,'INTS') && min(x) > 1000 && length(x)>100,\n f_dattype = 'REAL';\nend;\n\n% estimate xmin and alpha, accordingly\nswitch f_dattype,\n \n case 'REAL',\n xmins = unique(x);\n xmins = xmins(1:end-1);\n if ~isempty(xminx),\n xmins = xmins(find(xmins>=xminx,1,'first'));\n end;\n if ~isempty(limit),\n xmins(xmins>limit) = [];\n end;\n if ~isempty(sample),\n xmins = xmins(unique(round(linspace(1,length(xmins),sample))));\n end;\n dat = zeros(size(xmins));\n z = sort(x);\n for xm=1:length(xmins)\n xmin = xmins(xm);\n z = z(z>=xmin); \n n = length(z);\n % estimate alpha using direct MLE\n a = n ./ sum( log(z./xmin) );\n if nosmall,\n if (a-1)/sqrt(n) > 0.1\n dat(xm:end) = [];\n xm = length(xmins)+1; %#ok\n break;\n end;\n end;\n % compute KS statistic\n cx = (0:n-1)'./n;\n cf = 1-(xmin./z).^a;\n dat(xm) = max( abs(cf-cx) );\n end;\n D = min(dat);\n xmin = xmins(find(dat<=D,1,'first'));\n z = x(x>=xmin);\n n = length(z); \n alpha = 1 + n ./ sum( log(z./xmin) );\n if finite, alpha = alpha*(n-1)/n+1/n; end; % finite-size correction\n if n < 50 && ~finite && ~nowarn,\n% fprintf('(PLFIT) Warning: finite-size bias may be present.\\n');\n end;\n L = n*log((alpha-1)/xmin) - alpha.*sum(log(z./xmin));\n\n case 'INTS',\n \n if isempty(vec),\n vec = (1.50:0.01:3.50); % covers range of most practical \n end; % scaling parameters\n zvec = zeta(vec);\n\n xmins = unique(x);\n xmins = xmins(1:end-1);\n if ~isempty(xminx),\n xmins = xmins(find(xmins>=xminx,1,'first'));\n end;\n if ~isempty(limit),\n limit = round(limit);\n xmins(xmins>limit) = [];\n end;\n if ~isempty(sample),\n xmins = xmins(unique(round(linspace(1,length(xmins),sample))));\n end;\n if isempty(xmins)\n fprintf('(PLFIT) Error: x must contain at least two unique values.\\n');\n alpha = NaN; xmin = x(1); D = NaN; %#ok\n return;\n end;\n xmax = max(x);\n dat = zeros(length(xmins),2);\n z = x;\n fcatch = 0;\n\n for xm=1:length(xmins)\n xmin = xmins(xm);\n z = z(z>=xmin);\n n = length(z);\n % estimate alpha via direct maximization of likelihood function\n if fcatch==0\n try\n % vectorized version of numerical calculation\n zdiff = sum( repmat((1:xmin-1)',1,length(vec)).^-repmat(vec,xmin-1,1) ,1);\n L = -vec.*sum(log(z)) - n.*log(zvec - zdiff);\n catch\n % catch: force loop to default to iterative version for\n % remainder of the search\n fcatch = 1;\n end;\n end;\n if fcatch==1\n % force iterative calculation (more memory efficient, but \n % can be slower)\n L = -Inf*ones(size(vec));\n slogz = sum(log(z));\n xminvec = (1:xmin-1);\n for k=1:length(vec)\n L(k) = -vec(k)*slogz - n*log(zvec(k) - sum(xminvec.^-vec(k)));\n end\n end;\n [Y,I] = max(L); %#ok\n % compute KS statistic\n fit = cumsum((((xmin:xmax).^-vec(I)))./ (zvec(I) - sum((1:xmin-1).^-vec(I))));\n cdi = cumsum(hist(z,xmin:xmax)./n);\n dat(xm,:) = [max(abs( fit - cdi )) vec(I)];\n end\n % select the index for the minimum value of D\n [D,I] = min(dat(:,1)); %#ok\n xmin = xmins(I);\n z = x(x>=xmin);\n n = length(z);\n alpha = dat(I,2);\n if finite, alpha = alpha*(n-1)/n+1/n; end; % finite-size correction\n if n < 50 && ~finite && ~nowarn,\n% fprintf('(PLFIT) Warning: finite-size bias may be present.\\n');\n end;\n L = -alpha*sum(log(z)) - n*log(zvec(find(vec<=alpha,1,'last')) - sum((1:xmin-1).^-alpha));\n\n otherwise,\n fprintf('(PLFIT) Error: x must contain only reals or only integers.\\n');\n alpha = [];\n xmin = [];\n L = [];\n return;\nend;"} +{"plateform": "github", "repo_name": "biomedical-cybernetics/coalescent_embedding-master", "name": "plot_embedding.m", "ext": ".m", "path": "coalescent_embedding-master/usage_example/plot_embedding.m", "size": 4230, "source_encoding": "utf_8", "md5": "2f9d8f22d3ab6070f6eeaf9070e1ad04", "text": "function plot_embedding(x, coords, coloring, labels)\n\n% Authors:\n% - main code: Alessandro Muscoloni, 2017-09-21\n% - support functions: indicated at the beginning of the function\n\n% Released under MIT License\n% Copyright (c) 2017 A. Muscoloni, J. M. Thomas, C. V. Cannistraci\n\n% Reference:\n% A. Muscoloni, J. M. Thomas, S. Ciucci, G. Bianconi, and C. V. Cannistraci,\n% \"Machine learning meets complex networks via coalescent embedding in the hyperbolic space\",\n% Nature Communications 8, 1615 (2017). doi:10.1038/s41467-017-01825-5\n\n%%% INPUT %%%\n% x - adjacency matrix (NxN) of the network\n%\n% coords - polar (Nx2) or spherical (Nx3) hyperbolic coordinates of the nodes\n% in the hyperbolic disk they are in the form: [theta,r]\n% in the hyperbolic sphere they are in the form: [azimuth,elevation,r]\n%\n% coloring - string indicating how to color the nodes:\n% 'popularity' - nodes colored by degree with a blue-to-red colormap\n% (valid for 2D and 3D)\n% 'similarity' - nodes colored by angular coordinate with a HSV colormap\n% (valid only for 2D)\n% 'labels' - nodes colored by labels, which can be all unique\n% (for example to indicate an ordering of the nodes)\n% or not (for example to indicate community memberships)\n% (valid for 2D and 3D)\n%\n% labels - numerical labels for the nodes (only needed if coloring = 'labels')\n\n% check input\nnarginchk(3,4);\nvalidateattributes(x, {'numeric'}, {'square','finite','nonnegative'});\nif ~issymmetric(x)\n error('The input matrix must be symmetric.')\nend\nif any(x(speye(size(x))==1))\n error('The input matrix must be zero-diagonal.')\nend\nvalidateattributes(coords, {'numeric'}, {'2d','nrows',length(x)})\ndims = size(coords,2);\nvalidateattributes(dims, {'numeric'}, {'>=',2,'<=',3});\nvalidateattributes(coloring, {'char'}, {});\nif dims == 2 && ~any(strcmp(coloring,{'popularity','similarity','labels'}))\n error('Possible coloring options in 2D: ''popularity'',''similarity'',''labels''.');\nend\nif dims == 3 && ~any(strcmp(coloring,{'popularity','labels'}))\n error('Possible coloring options in 3D: ''popularity'',''labels''.');\nend\nif strcmp(coloring,'labels')\n validateattributes(labels, {'numeric'}, {'vector','numel',length(x)})\nend\n\n% set plot options\nedge_width = 1;\nedge_color = [0.85 0.85 0.85];\nnode_size = 150;\n\n% set the node colors\nif strcmp(coloring,'popularity')\n deg = full(sum(x>0,1));\n deg = round((max(deg)-1) * (deg-min(deg))/(max(deg)-min(deg)) + 1);\n colors = colormap_blue_to_red(max(deg));\n colors = colors(deg,:);\nelseif strcmp(coloring,'similarity')\n colormap('hsv')\n colors = coords(:,1);\nelseif strcmp(coloring,'labels')\n uniq_lab = unique(labels);\n temp = zeros(size(labels));\n for i = 1:length(uniq_lab)\n temp(labels==uniq_lab(i)) = i;\n end\n labels = temp; clear uniq_lab temp;\n colors = hsv(length(unique(labels)));\n colors = colors(labels,:);\nend\n\n% plot the network\nhold on\nradius = 2*log(length(x));\nif dims == 2\n [coords(:,1),coords(:,2)] = pol2cart(coords(:,1),coords(:,2));\n [h1,h2] = gplot(x, coords, 'k'); plot(h1, h2, 'Color', edge_color, 'LineWidth', edge_width);\n scatter(coords(:,1), coords(:,2), node_size, colors, 'filled', 'MarkerEdgeColor', 'k');\n xlim([-radius, radius]); ylim([-radius, radius])\nelseif dims == 3\n [coords(:,1),coords(:,2),coords(:,3)] = sph2cart(coords(:,1),coords(:,2),coords(:,3));\n [r,c] = find(triu(x>0,1));\n for i = 1:length(r)\n plot3([coords(r(i),1) coords(c(i),1)],[coords(r(i),2) coords(c(i),2)], [coords(r(i),3) coords(c(i),3)], ...\n 'Color', edge_color, 'LineWidth', edge_width)\n end\n scatter3(coords(:,1), coords(:,2), coords(:,3), node_size, colors, 'filled', 'MarkerEdgeColor', 'k');\n xlim([-radius, radius]); ylim([-radius, radius]); zlim([-radius, radius])\nend\naxis square\naxis off\n\nfunction colors = colormap_blue_to_red(n)\n\ncolors = zeros(n,3);\nm = round(linspace(1,n,4));\ncolors(1:m(2),2) = linspace(0,1,m(2));\ncolors(1:m(2),3) = 1;\ncolors(m(2):m(3),1) = linspace(0,1,m(3)-m(2)+1);\ncolors(m(2):m(3),2) = 1;\ncolors(m(2):m(3),3) = linspace(1,0,m(3)-m(2)+1);\ncolors(m(3):n,1) = 1;\ncolors(m(3):n,2) = linspace(1,0,n-m(3)+1);\n"} +{"plateform": "github", "repo_name": "biomedical-cybernetics/coalescent_embedding-master", "name": "coalescent_embedding.m", "ext": ".m", "path": "coalescent_embedding-master/usage_example/coalescent_embedding.m", "size": 25081, "source_encoding": "utf_8", "md5": "0f84d1345d19f28fe588f1bc8da8aeec", "text": "function coords = coalescent_embedding(x, pre_weighting, dim_red, angular_adjustment, dims)\n\n% Authors:\n% - main code: Alessandro Muscoloni, 2017-09-21\n% - support functions: indicated at the beginning of the function\n\n% Released under MIT License\n% Copyright (c) 2017 A. Muscoloni, J. M. Thomas, C. V. Cannistraci\n\n% Reference:\n% A. Muscoloni, J. M. Thomas, S. Ciucci, G. Bianconi, and C. V. Cannistraci,\n% \"Machine learning meets complex networks via coalescent embedding in the hyperbolic space\",\n% Nature Communications 8, 1615 (2017). doi:10.1038/s41467-017-01825-5\n\n% The time complexity of the algorithms is O(N^3).\n\n%%% INPUT %%%\n% x - adjacency matrix of the network, which must be:\n% symmetric, zero-diagonal, one connected component, not fully connected;\n% the network can be weighted\n%\n% pre_weighting - rule for pre-weighting the matrix, the alternatives are:\n% 'original' -> the original weights are considered;\n% NB: they should suggest distances and not similarities\n% 'reverse' -> the original weights reversed are considered;\n% NB: to use when they suggest similarities\n% 'RA1' -> Repulsion-Attraction v1\n% 'RA2' -> Repulsion-Attraction v2\n% 'EBC' -> Edge-Betweenness-Centrality\n%\n% dim_red - dimension reduction technique, the alternatives are:\n% 'ISO' -> Isomap (valid for 2D and 3D)\n% 'ncISO' -> noncentered Isomap (valid for 2D and 3D)\n% 'LE' -> Laplacian Eigenmaps (valid for 2D and 3D)\n% 'MCE' -> Minimum Curvilinear Embedding (only valid for 2D)\n% 'ncMCE' -> noncentered Minimum Curvilinear Embedding (only valid for 2D)\n%\n% angular_adjustment - method for the angular adjustment, the alternatives are:\n% 'original' -> original angular distances are preserved (valid for 2D and 3D)\n% 'EA' -> equidistant adjustment (only valid for 2D)\n% \n% dims - dimensions of the hyperbolic embedding space, the alternatives are:\n% 2 -> hyperbolic disk\n% 3 -> hyperbolic sphere\n\n%%% OUTPUT %%%\n% coords - polar or spherical hyperbolic coordinates of the nodes\n% in the hyperbolic disk they are in the form: [theta,r]\n% in the hyperbolic sphere they are in the form: [azimuth,elevation,r]\n% for details see the documentation of the MATLAB functions\n% \"cart2pol\" and \"cart2sph\"\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n% check input\nvalidateattributes(x, {'numeric'}, {'square','finite','nonnegative'});\nif ~issymmetric(x)\n error('The input matrix must be symmetric.')\nend\nif any(x(speye(size(x))==1))\n error('The input matrix must be zero-diagonal.')\nend\nvalidateattributes(pre_weighting, {'char'}, {});\nvalidateattributes(dim_red, {'char'}, {});\nvalidateattributes(angular_adjustment, {'char'}, {});\nvalidateattributes(dims, {'numeric'}, {'scalar','integer','>=',2,'<=',3});\nif ~any(strcmp(pre_weighting,{'original','reverse','RA1','RA2','EBC'}))\n error('Possible pre-weighting rules: ''original'',''reverse'',''RA1'',''RA2'',''EBC''.');\nend\nif dims == 2\n if ~any(strcmp(dim_red,{'ISO','ncISO','MCE','ncMCE','LE'}))\n error('Possible dimension reduction techniques in 2D: ''ISO'', ''ncISO'', ''MCE'', ''ncMCE'', ''LE''.');\n end\n if ~any(strcmp(angular_adjustment,{'original','EA'}))\n error('Possible angular adjustment methods in 2D: ''original'', ''EA''.');\n end\nelseif dims == 3\n if ~any(strcmp(dim_red,{'ISO','ncISO','LE'}))\n error('Possible dimension reduction techniques in 3D: ''ISO'', ''ncISO'', ''LE''.');\n end\n if ~any(strcmp(angular_adjustment,{'original'}))\n error('Possible angular adjustment methods in 3D: ''original''.');\n end \nend\n\n% pre-weighting\nif strcmp(pre_weighting,'original')\n xw = x;\nelseif strcmp(pre_weighting,'reverse')\n xw = reverse_weights(x);\nelseif strcmp(pre_weighting,'RA1')\n xw = RA1_weighting(double(x>0));\nelseif strcmp(pre_weighting,'RA2')\n xw = RA2_weighting(double(x>0));\nelseif strcmp(pre_weighting,'EBC')\n xw = EBC_weighting(double(x>0));\nend\n\n% dimension reduction and set of hyperbolic coordinates\nif dims == 2\n coords = zeros(size(x,1),2);\n if strcmp(dim_red,'ISO')\n coords(:,1) = set_angular_coordinates_ISO_2D(xw, angular_adjustment);\n elseif strcmp(dim_red,'ncISO')\n coords(:,1) = set_angular_coordinates_ncISO_2D(xw, angular_adjustment);\n elseif strcmp(dim_red,'MCE')\n coords(:,1) = set_angular_coordinates_MCE_2D(xw, angular_adjustment);\n elseif strcmp(dim_red,'ncMCE')\n coords(:,1) = set_angular_coordinates_ncMCE_2D(xw, angular_adjustment);\n elseif strcmp(dim_red,'LE')\n coords(:,1) = set_angular_coordinates_LE_2D(xw, angular_adjustment);\n end\n coords(:,2) = set_radial_coordinates(x);\nelseif dims == 3\n coords = zeros(size(x,1),3);\n if strcmp(dim_red,'ISO')\n [coords(:,1),coords(:,2)] = set_angular_coordinates_ISO_3D(xw);\n elseif strcmp(dim_red,'ncISO')\n [coords(:,1),coords(:,2)] = set_angular_coordinates_ncISO_3D(xw);\n elseif strcmp(dim_red,'LE')\n [coords(:,1),coords(:,2)] = set_angular_coordinates_LE_3D(xw);\n end\n coords(:,3) = set_radial_coordinates(x);\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Support Functions %%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction xrev = reverse_weights(x)\n\nxrev = x;\nxrev(xrev>0) = abs(x(x>0) - min(x(x>0)) - max(x(x>0)));\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction x_RA1 = RA1_weighting(x)\n\nn = size(x,1);\ncn = x*x;\ndeg = full(sum(x,1));\nx_RA1 = x .* (repmat(deg,n,1) + repmat(deg',1,n) + (repmat(deg,n,1) .* repmat(deg',1,n))) ./ (1 + cn);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction x_RA2 = RA2_weighting(x)\n\nn = size(x,1);\ncn = x*x;\next = repmat(sum(x,2),1,n) - cn - 1;\nx_RA2 = x .* (1 + ext + ext' + ext.*ext') ./ (1 + cn);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction x_EBC = EBC_weighting(x)\n\n[~,x_EBC] = betweenness_centrality(sparse(x));\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction ang_coords = set_angular_coordinates_ISO_2D(xw, angular_adjustment)\n\n% dimension reduction\ndr_coords = isomap_graph_carlo(xw, 2, 'yes');\n\n% from cartesian to polar coordinates\n% using dimensions 1 and 2 of embedding\n[ang_coords,~] = cart2pol(dr_coords(:,1),dr_coords(:,2));\n% change angular range from [-pi,pi] to [0,2pi]\nang_coords = mod(ang_coords + 2*pi, 2*pi);\n\nif strcmp(angular_adjustment,'EA')\n ang_coords = equidistant_adjustment(ang_coords);\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction ang_coords = set_angular_coordinates_ncISO_2D(xw, angular_adjustment)\n\n% dimension reduction\ndr_coords = isomap_graph_carlo(xw, 3, 'no');\n\n% from cartesian to polar coordinates\n% using dimensions 2 and 3 of embedding\n[ang_coords,~] = cart2pol(dr_coords(:,2),dr_coords(:,3));\n% change angular range from [-pi,pi] to [0,2pi]\nang_coords = mod(ang_coords + 2*pi, 2*pi);\n\nif strcmp(angular_adjustment,'EA')\n ang_coords = equidistant_adjustment(ang_coords);\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction ang_coords = set_angular_coordinates_MCE_2D(xw, angular_adjustment)\n\n% dimension reduction\ndr_coords = mce(xw, 1, 'yes');\n\nif strcmp(angular_adjustment,'original')\n % circular adjustment of dimension 1\n ang_coords = circular_adjustment(dr_coords(:,1));\nelseif strcmp(angular_adjustment,'EA')\n % equidistant adjustment of dimension 1\n ang_coords = equidistant_adjustment(dr_coords(:,1));\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction ang_coords = set_angular_coordinates_ncMCE_2D(xw, angular_adjustment)\n\n% dimension reduction\ndr_coords = mce(xw, 2, 'no');\n\nif strcmp(angular_adjustment,'original')\n % circular adjustment of dimension 2\n ang_coords = circular_adjustment(dr_coords(:,2));\nelseif strcmp(angular_adjustment,'EA')\n % equidistant adjustment of dimension 2\n ang_coords = equidistant_adjustment(dr_coords(:,2));\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction ang_coords = set_angular_coordinates_LE_2D(xw, angular_adjustment)\n\n% dimension reduction\nst = triu(full(xw),1);\nst = mean(st(st>0));\nheat_kernel = zeros(size(xw));\nheat_kernel(xw>0) = exp(-((xw(xw>0)./st).^2));\ndr_coords = leig_graph_carlo_classical(heat_kernel, 2, 'no');\n\n% from cartesian to polar coordinates\n% using dimensions 2 and 3 of embedding\n% (dimensions 1 and 2 in the code since the first is skipped by the function)\n[ang_coords,~] = cart2pol(dr_coords(:,1),dr_coords(:,2));\n% change angular range from [-pi,pi] to [0,2pi]\nang_coords = mod(ang_coords + 2*pi, 2*pi);\n\nif strcmp(angular_adjustment,'EA')\n ang_coords = equidistant_adjustment(ang_coords);\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction ang_coords = equidistant_adjustment(coords)\n\n% sort input coordinates\n[~,idx] = sort(coords);\n% assign equidistant angular coordinates in [0,2pi[ according to the sorting\nangles = linspace(0, 2*pi, length(coords)+1);\nang_coords(idx) = angles(1:end-1);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction ang_coords = circular_adjustment(coords)\n\n% scale the input coordinates into the range [0,2pi]\nn = length(coords);\nm = 2*pi*(n-1)/n;\nang_coords = ((coords - min(coords)) ./ (max(coords) - min(coords))) * m;\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [azimuth, elevation] = set_angular_coordinates_ISO_3D(xw)\n\n% dimension reduction\ndr_coords = isomap_graph_carlo(xw, 3, 'yes');\n\n% from cartesian to spherical coordinates\n% using dimensions 1-3 of embedding\n[azimuth,elevation,~] = cart2sph(dr_coords(:,1),dr_coords(:,2),dr_coords(:,3));\n% change angular range from [-pi,pi] to [0,2pi]\nazimuth = mod(azimuth + 2*pi, 2*pi);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [azimuth, elevation] = set_angular_coordinates_ncISO_3D(xw)\n\n% dimension reduction\ndr_coords = isomap_graph_carlo(xw, 4, 'no');\n\n% from cartesian to spherical coordinates\n% using dimensions 2-4 of embedding\n[azimuth,elevation,~] = cart2sph(dr_coords(:,2),dr_coords(:,3),dr_coords(:,4));\n% change angular range from [-pi,pi] to [0,2pi]\nazimuth = mod(azimuth + 2*pi, 2*pi);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [azimuth, elevation] = set_angular_coordinates_LE_3D(xw)\n\n% dimension reduction\nst = triu(full(xw),1);\nst = mean(st(st>0));\nheat_kernel = zeros(size(xw));\nheat_kernel(xw>0) = exp(-((xw(xw>0)./st).^2));\ndr_coords = leig_graph_carlo_classical(heat_kernel, 3, 'no');\n\n% from cartesian to spherical coordinates\n% using dimensions 2-4 of embedding (the first is skipped by the LE function)\n[azimuth,elevation,~] = cart2sph(dr_coords(:,1),dr_coords(:,2),dr_coords(:,3));\n% change angular range from [-pi,pi] to [0,2pi]\nazimuth = mod(azimuth + 2*pi, 2*pi);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction radial_coordinates = set_radial_coordinates(x)\n\nn = size(x,1);\ndeg = full(sum(x>0,1));\nif all(deg == deg(1))\n error('All the nodes have the same degree, the degree distribution cannot fit a power-law.'); \nend\n\n% fit power-law degree distribution\ngamma_range = 1.01:0.01:10.00;\nsmall_size_limit = 100;\nif length(deg) < small_size_limit\n gamma = plfit(deg, 'finite', 'range', gamma_range);\nelse\n gamma = plfit(deg, 'range', gamma_range);\nend\nbeta = 1 / (gamma - 1);\n\n% sort nodes by decreasing degree\n[~,idx] = sort(deg, 'descend');\n\n% for beta > 1 (gamma < 2) some radial coordinates are negative\nradial_coordinates = zeros(1, n);\nradial_coordinates(idx) = max(0, 2*beta*log(1:n) + 2*(1-beta)*log(n));\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [f, time] = leig_graph_carlo_classical(x, d, centring)\n% Maps the high-dimensional samples in 'x' to a low dimensional space using\n% Laplacian Eigenmaps (coded 27-JANUARY-2013 by Gregorio Alanis-Lobato)\n\nt = tic;\n\ngraph = max(x, x');\n\n% Kernel centering\nif strcmp(centring, 'yes')\n graph=kernel_centering(graph); %Compute the centred MC-kernel\nend\n\nD = sum(graph, 2); %Degree values\nD = diag(D); %Degree matrix\n\n% Graph laplacian\nL = D - graph;\n\n% Solving the generalised eigenvalue problem L*f = lambda*D*f\n[f, ~] = eig(L, D); \nf = real(f(:, 2:d+1));\n\ntime = toc(t);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [s,time] = isomap_graph_carlo(x, n, centring)\n\n%INPUT\n% x => Distance or correlation matrix x\n% n => Dimension into which the data is to be projected\n% centring => 'yes' is x should be centred or 'no' if not\n%OUTPUT\n% s => Sample configuration in the space of n dimensions\n\nt = tic;\n\n% initialization\nx = max(x, x');\n\n% Iso-kernel computation\nkernel=graphallshortestpaths(sparse(x),'directed','false'); \n\nclear x\nkernel=max(kernel,kernel'); \n\n% Kernel centering\nif strcmp(centring, 'yes')\n kernel=kernel_centering(kernel); %Compute the centred Iso-kernel\nend\n\n% Embedding \n[~,L,V] = svd(kernel, 'econ');\nsqrtL = sqrt(L(1:n,1:n)); clear L\nV = V(:,1:n);\ns = real((sqrtL * V')');\n\ntime = toc(t);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [s, time] = mce(x, n, centring)\n%Given a distance or correlation matrix x, it performs Minimum Curvilinear \n%Embedding (MCE) or non-centred MCE (ncMCE) (coded 27-SEPTEMBER-2011 by \n%Carlo Cannistraci)\n\n%INPUT\n% x => Distance or correlation matrix x\n% n => Dimension into which the data is to be projected\n% centring => 'yes' is x should be centred or 'no' if not\n%OUTPUT\n% s => Sample configuration in the space of n dimensions\n\nt = tic;\n\n% initialization\nx = max(x, x');\n\n% MC-kernel computation\nkernel=graphallshortestpaths(graphminspantree(sparse(x),'method','kruskal'),'directed','false'); \n\nclear x\nkernel=max(kernel,kernel'); \n\n% Kernel centering\nif strcmp(centring, 'yes')\n kernel=kernel_centering(kernel); %Compute the centred MC-kernel\nend\n\n% Embedding \n[~,L,V] = svd(kernel, 'econ');\nsqrtL = sqrt(L(1:n,1:n)); clear L\nV = V(:,1:n);\ns = (sqrtL * V')';\ns=real(s(:,1:n));\n\ntime = toc(t);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction D = kernel_centering(D)\n\n% 2011-09-27 - Carlo Vittorio Cannistraci\n\n%%% INPUT %%%\n% D - Distance matrix\n\n%%% OUTPUT %%%\n% D - Centered distance matrix\n\n% Centering\nN = size(D,1);\nJ = eye(N) - (1/N)*ones(N);\nD = -0.5*(J*(D.^2)*J);\n\n% Housekeeping\nD(isnan(D)) = 0;\nD(isinf(D)) = 0;\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [alpha, xmin, L]=plfit(x, varargin)\n% PLFIT fits a power-law distributional model to data.\n% Source: http://www.santafe.edu/~aaronc/powerlaws/\n% \n% PLFIT(x) estimates x_min and alpha according to the goodness-of-fit\n% based method described in Clauset, Shalizi, Newman (2007). x is a \n% vector of observations of some quantity to which we wish to fit the \n% power-law distribution p(x) ~ x^-alpha for x >= xmin.\n% PLFIT automatically detects whether x is composed of real or integer\n% values, and applies the appropriate method. For discrete data, if\n% min(x) > 1000, PLFIT uses the continuous approximation, which is \n% a reliable in this regime.\n% \n% The fitting procedure works as follows:\n% 1) For each possible choice of x_min, we estimate alpha via the \n% method of maximum likelihood, and calculate the Kolmogorov-Smirnov\n% goodness-of-fit statistic D.\n% 2) We then select as our estimate of x_min, the value that gives the\n% minimum value D over all values of x_min.\n%\n% Note that this procedure gives no estimate of the uncertainty of the \n% fitted parameters, nor of the validity of the fit.\n%\n% Example:\n% x = (1-rand(10000,1)).^(-1/(2.5-1));\n% [alpha, xmin, L] = plfit(x);\n%\n% The output 'alpha' is the maximum likelihood estimate of the scaling\n% exponent, 'xmin' is the estimate of the lower bound of the power-law\n% behavior, and L is the log-likelihood of the data x>=xmin under the\n% fitted power law.\n% \n% For more information, try 'type plfit'\n%\n% See also PLVAR, PLPVA\n\n% Version 1.0 (2007 May)\n% Version 1.0.2 (2007 September)\n% Version 1.0.3 (2007 September)\n% Version 1.0.4 (2008 January)\n% Version 1.0.5 (2008 March)\n% Version 1.0.6 (2008 July)\n% Version 1.0.7 (2008 October)\n% Version 1.0.8 (2009 February)\n% Version 1.0.9 (2009 October)\n% Version 1.0.10 (2010 January)\n% Version 1.0.11 (2012 January)\n% Copyright (C) 2008-2012 Aaron Clauset (Santa Fe Institute)\n% Distributed under GPL 2.0\n% http://www.gnu.org/copyleft/gpl.html\n% PLFIT comes with ABSOLUTELY NO WARRANTY\n% \n% Notes:\n% \n% 1. In order to implement the integer-based methods in Matlab, the numeric\n% maximization of the log-likelihood function was used. This requires\n% that we specify the range of scaling parameters considered. We set\n% this range to be [1.50 : 0.01 : 3.50] by default. This vector can be\n% set by the user like so,\n% \n% a = plfit(x,'range',[1.001:0.001:5.001]);\n% \n% 2. PLFIT can be told to limit the range of values considered as estimates\n% for xmin in three ways. First, it can be instructed to sample these\n% possible values like so,\n% \n% a = plfit(x,'sample',100);\n% \n% which uses 100 uniformly distributed values on the sorted list of\n% unique values in the data set. Second, it can simply omit all\n% candidates above a hard limit, like so\n% \n% a = plfit(x,'limit',3.4);\n% \n% Finally, it can be forced to use a fixed value, like so\n% \n% a = plfit(x,'xmin',3.4);\n% \n% In the case of discrete data, it rounds the limit to the nearest\n% integer.\n% \n% 3. When the input sample size is small (e.g., < 100), the continuous \n% estimator is slightly biased (toward larger values of alpha). To\n% explicitly use an experimental finite-size correction, call PLFIT like\n% so\n% \n% a = plfit(x,'finite');\n% \n% which does a small-size correction to alpha.\n%\n% 4. For continuous data, PLFIT can return erroneously large estimates of \n% alpha when xmin is so large that the number of obs x >= xmin is very \n% small. To prevent this, we can truncate the search over xmin values \n% before the finite-size bias becomes significant by calling PLFIT as\n% \n% a = plfit(x,'nosmall');\n% \n% which skips values xmin with finite size bias > 0.1.\n\nvec = [];\nsample = [];\nxminx = [];\nlimit = [];\nfinite = false;\nnosmall = false;\nnowarn = false;\n\n% parse command-line parameters; trap for bad input\ni=1; \nwhile i<=length(varargin), \n argok = 1; \n if ischar(varargin{i}), \n switch varargin{i},\n case 'range', vec = varargin{i+1}; i = i + 1;\n case 'sample', sample = varargin{i+1}; i = i + 1;\n case 'limit', limit = varargin{i+1}; i = i + 1;\n case 'xmin', xminx = varargin{i+1}; i = i + 1;\n case 'finite', finite = true;\n case 'nowarn', nowarn = true;\n case 'nosmall', nosmall = true;\n otherwise, argok=0; \n end\n end\n if ~argok, \n disp(['(PLFIT) Ignoring invalid argument #' num2str(i+1)]); \n end\n i = i+1; \nend\nif ~isempty(vec) && (~isvector(vec) || min(vec)<=1),\n\tfprintf('(PLFIT) Error: ''range'' argument must contain a vector; using default.\\n');\n vec = [];\nend;\nif ~isempty(sample) && (~isscalar(sample) || sample<2),\n\tfprintf('(PLFIT) Error: ''sample'' argument must be a positive integer > 1; using default.\\n');\n sample = [];\nend;\nif ~isempty(limit) && (~isscalar(limit) || limit= 1; using default.\\n');\n limit = [];\nend;\nif ~isempty(xminx) && (~isscalar(xminx) || xminx>=max(x)),\n\tfprintf('(PLFIT) Error: ''xmin'' argument must be a positive value < max(x); using default behavior.\\n');\n xminx = [];\nend;\n\n% reshape input vector\nx = reshape(x,numel(x),1);\n\n% select method (discrete or continuous) for fitting\nif isempty(setdiff(x,floor(x))), f_dattype = 'INTS';\nelseif isreal(x), f_dattype = 'REAL';\nelse f_dattype = 'UNKN';\nend;\nif strcmp(f_dattype,'INTS') && min(x) > 1000 && length(x)>100,\n f_dattype = 'REAL';\nend;\n\n% estimate xmin and alpha, accordingly\nswitch f_dattype,\n \n case 'REAL',\n xmins = unique(x);\n xmins = xmins(1:end-1);\n if ~isempty(xminx),\n xmins = xmins(find(xmins>=xminx,1,'first'));\n end;\n if ~isempty(limit),\n xmins(xmins>limit) = [];\n end;\n if ~isempty(sample),\n xmins = xmins(unique(round(linspace(1,length(xmins),sample))));\n end;\n dat = zeros(size(xmins));\n z = sort(x);\n for xm=1:length(xmins)\n xmin = xmins(xm);\n z = z(z>=xmin); \n n = length(z);\n % estimate alpha using direct MLE\n a = n ./ sum( log(z./xmin) );\n if nosmall,\n if (a-1)/sqrt(n) > 0.1\n dat(xm:end) = [];\n xm = length(xmins)+1; %#ok\n break;\n end;\n end;\n % compute KS statistic\n cx = (0:n-1)'./n;\n cf = 1-(xmin./z).^a;\n dat(xm) = max( abs(cf-cx) );\n end;\n D = min(dat);\n xmin = xmins(find(dat<=D,1,'first'));\n z = x(x>=xmin);\n n = length(z); \n alpha = 1 + n ./ sum( log(z./xmin) );\n if finite, alpha = alpha*(n-1)/n+1/n; end; % finite-size correction\n if n < 50 && ~finite && ~nowarn,\n% fprintf('(PLFIT) Warning: finite-size bias may be present.\\n');\n end;\n L = n*log((alpha-1)/xmin) - alpha.*sum(log(z./xmin));\n\n case 'INTS',\n \n if isempty(vec),\n vec = (1.50:0.01:3.50); % covers range of most practical \n end; % scaling parameters\n zvec = zeta(vec);\n\n xmins = unique(x);\n xmins = xmins(1:end-1);\n if ~isempty(xminx),\n xmins = xmins(find(xmins>=xminx,1,'first'));\n end;\n if ~isempty(limit),\n limit = round(limit);\n xmins(xmins>limit) = [];\n end;\n if ~isempty(sample),\n xmins = xmins(unique(round(linspace(1,length(xmins),sample))));\n end;\n if isempty(xmins)\n fprintf('(PLFIT) Error: x must contain at least two unique values.\\n');\n alpha = NaN; xmin = x(1); D = NaN; %#ok\n return;\n end;\n xmax = max(x);\n dat = zeros(length(xmins),2);\n z = x;\n fcatch = 0;\n\n for xm=1:length(xmins)\n xmin = xmins(xm);\n z = z(z>=xmin);\n n = length(z);\n % estimate alpha via direct maximization of likelihood function\n if fcatch==0\n try\n % vectorized version of numerical calculation\n zdiff = sum( repmat((1:xmin-1)',1,length(vec)).^-repmat(vec,xmin-1,1) ,1);\n L = -vec.*sum(log(z)) - n.*log(zvec - zdiff);\n catch\n % catch: force loop to default to iterative version for\n % remainder of the search\n fcatch = 1;\n end;\n end;\n if fcatch==1\n % force iterative calculation (more memory efficient, but \n % can be slower)\n L = -Inf*ones(size(vec));\n slogz = sum(log(z));\n xminvec = (1:xmin-1);\n for k=1:length(vec)\n L(k) = -vec(k)*slogz - n*log(zvec(k) - sum(xminvec.^-vec(k)));\n end\n end;\n [Y,I] = max(L); %#ok\n % compute KS statistic\n fit = cumsum((((xmin:xmax).^-vec(I)))./ (zvec(I) - sum((1:xmin-1).^-vec(I))));\n cdi = cumsum(hist(z,xmin:xmax)./n);\n dat(xm,:) = [max(abs( fit - cdi )) vec(I)];\n end\n % select the index for the minimum value of D\n [D,I] = min(dat(:,1)); %#ok\n xmin = xmins(I);\n z = x(x>=xmin);\n n = length(z);\n alpha = dat(I,2);\n if finite, alpha = alpha*(n-1)/n+1/n; end; % finite-size correction\n if n < 50 && ~finite && ~nowarn,\n% fprintf('(PLFIT) Warning: finite-size bias may be present.\\n');\n end;\n L = -alpha*sum(log(z)) - n*log(zvec(find(vec<=alpha,1,'last')) - sum((1:xmin-1).^-alpha));\n\n otherwise,\n fprintf('(PLFIT) Error: x must contain only reals or only integers.\\n');\n alpha = [];\n xmin = [];\n L = [];\n return;\nend;"} +{"plateform": "github", "repo_name": "biomedical-cybernetics/coalescent_embedding-master", "name": "plot_embedding.m", "ext": ".m", "path": "coalescent_embedding-master/visualization_and_evaluation/plot_embedding.m", "size": 4230, "source_encoding": "utf_8", "md5": "2f9d8f22d3ab6070f6eeaf9070e1ad04", "text": "function plot_embedding(x, coords, coloring, labels)\n\n% Authors:\n% - main code: Alessandro Muscoloni, 2017-09-21\n% - support functions: indicated at the beginning of the function\n\n% Released under MIT License\n% Copyright (c) 2017 A. Muscoloni, J. M. Thomas, C. V. Cannistraci\n\n% Reference:\n% A. Muscoloni, J. M. Thomas, S. Ciucci, G. Bianconi, and C. V. Cannistraci,\n% \"Machine learning meets complex networks via coalescent embedding in the hyperbolic space\",\n% Nature Communications 8, 1615 (2017). doi:10.1038/s41467-017-01825-5\n\n%%% INPUT %%%\n% x - adjacency matrix (NxN) of the network\n%\n% coords - polar (Nx2) or spherical (Nx3) hyperbolic coordinates of the nodes\n% in the hyperbolic disk they are in the form: [theta,r]\n% in the hyperbolic sphere they are in the form: [azimuth,elevation,r]\n%\n% coloring - string indicating how to color the nodes:\n% 'popularity' - nodes colored by degree with a blue-to-red colormap\n% (valid for 2D and 3D)\n% 'similarity' - nodes colored by angular coordinate with a HSV colormap\n% (valid only for 2D)\n% 'labels' - nodes colored by labels, which can be all unique\n% (for example to indicate an ordering of the nodes)\n% or not (for example to indicate community memberships)\n% (valid for 2D and 3D)\n%\n% labels - numerical labels for the nodes (only needed if coloring = 'labels')\n\n% check input\nnarginchk(3,4);\nvalidateattributes(x, {'numeric'}, {'square','finite','nonnegative'});\nif ~issymmetric(x)\n error('The input matrix must be symmetric.')\nend\nif any(x(speye(size(x))==1))\n error('The input matrix must be zero-diagonal.')\nend\nvalidateattributes(coords, {'numeric'}, {'2d','nrows',length(x)})\ndims = size(coords,2);\nvalidateattributes(dims, {'numeric'}, {'>=',2,'<=',3});\nvalidateattributes(coloring, {'char'}, {});\nif dims == 2 && ~any(strcmp(coloring,{'popularity','similarity','labels'}))\n error('Possible coloring options in 2D: ''popularity'',''similarity'',''labels''.');\nend\nif dims == 3 && ~any(strcmp(coloring,{'popularity','labels'}))\n error('Possible coloring options in 3D: ''popularity'',''labels''.');\nend\nif strcmp(coloring,'labels')\n validateattributes(labels, {'numeric'}, {'vector','numel',length(x)})\nend\n\n% set plot options\nedge_width = 1;\nedge_color = [0.85 0.85 0.85];\nnode_size = 150;\n\n% set the node colors\nif strcmp(coloring,'popularity')\n deg = full(sum(x>0,1));\n deg = round((max(deg)-1) * (deg-min(deg))/(max(deg)-min(deg)) + 1);\n colors = colormap_blue_to_red(max(deg));\n colors = colors(deg,:);\nelseif strcmp(coloring,'similarity')\n colormap('hsv')\n colors = coords(:,1);\nelseif strcmp(coloring,'labels')\n uniq_lab = unique(labels);\n temp = zeros(size(labels));\n for i = 1:length(uniq_lab)\n temp(labels==uniq_lab(i)) = i;\n end\n labels = temp; clear uniq_lab temp;\n colors = hsv(length(unique(labels)));\n colors = colors(labels,:);\nend\n\n% plot the network\nhold on\nradius = 2*log(length(x));\nif dims == 2\n [coords(:,1),coords(:,2)] = pol2cart(coords(:,1),coords(:,2));\n [h1,h2] = gplot(x, coords, 'k'); plot(h1, h2, 'Color', edge_color, 'LineWidth', edge_width);\n scatter(coords(:,1), coords(:,2), node_size, colors, 'filled', 'MarkerEdgeColor', 'k');\n xlim([-radius, radius]); ylim([-radius, radius])\nelseif dims == 3\n [coords(:,1),coords(:,2),coords(:,3)] = sph2cart(coords(:,1),coords(:,2),coords(:,3));\n [r,c] = find(triu(x>0,1));\n for i = 1:length(r)\n plot3([coords(r(i),1) coords(c(i),1)],[coords(r(i),2) coords(c(i),2)], [coords(r(i),3) coords(c(i),3)], ...\n 'Color', edge_color, 'LineWidth', edge_width)\n end\n scatter3(coords(:,1), coords(:,2), coords(:,3), node_size, colors, 'filled', 'MarkerEdgeColor', 'k');\n xlim([-radius, radius]); ylim([-radius, radius]); zlim([-radius, radius])\nend\naxis square\naxis off\n\nfunction colors = colormap_blue_to_red(n)\n\ncolors = zeros(n,3);\nm = round(linspace(1,n,4));\ncolors(1:m(2),2) = linspace(0,1,m(2));\ncolors(1:m(2),3) = 1;\ncolors(m(2):m(3),1) = linspace(0,1,m(3)-m(2)+1);\ncolors(m(2):m(3),2) = 1;\ncolors(m(2):m(3),3) = linspace(1,0,m(3)-m(2)+1);\ncolors(m(3):n,1) = 1;\ncolors(m(3):n,2) = linspace(1,0,n-m(3)+1);\n"} +{"plateform": "github", "repo_name": "biomedical-cybernetics/coalescent_embedding-master", "name": "compute_angular_separation.m", "ext": ".m", "path": "coalescent_embedding-master/visualization_and_evaluation/angular_separation_index/compute_angular_separation.m", "size": 10618, "source_encoding": "utf_8", "md5": "ccbc95531e7b891f35adce0aed6455b5", "text": "function [index, group_index, pvalue] = compute_angular_separation(coords, labels, show_plot, rand_reps, rand_seed, worst_comp)\n\n% MATLAB implementation of the angular separation index (ASI):\n% a quantitative measure to evaluate the separation of groups\n% over the circle circumference (2D) or sphere surface (3D).\n\n% Reference:\n% A. Muscoloni and C. V. Cannistraci (2019), \"Angular separability of data clusters or network communities\n% in geometrical space and its relevance to hyperbolic embedding\", arXiv:1907.00025\n\n% Released under MIT License\n% Copyright (c) 2019 A. Muscoloni, C. V. Cannistraci\n\n%%% INPUT %%%\n% coords - 2D case: Nx1 vector containing for each sample the angular coordinates\n% 3D case: Nx2 matrix containing for each sample the coordinates (azimut,elevation)\n% angular and azimuth coordinates must be in [0,2pi]\n% elevation coordinates must be in [-pi/2,pi/2]\n% the code automatically selects the 2D or 3D case depending on the size of the \"coords\" variable\n% labels - N labels for the samples indicating the group membership (numeric vector or cell of strings)\n% show_plot - [optional] 1 or 0 to indicate whether the plot of the results has to be shown or not (default = 1)\n% rand_reps - [optional] repetitions for evaluating random coordinates (default = 1000)\n% rand_seed - [optional] nonnegative integer seed for random number generator (by default a seed is created based on the current time)\n% worst_comp - [optional] 1 or 0 to indicate if the worst case should be approximated computationally or theoretically (default = 1)\n% note that for the 3D case only the value 1 is valid\n% (NB: optional inputs not given or empty assume the default value)\n\n%%% OUTPUT %%%\n% index - overall index in [0,1], a value 1 indicates that all the groups\n% are perfectly separated over the circle circumference (2D) or sphere surface (3D),\n% the more the groups are mixed the more the index tends to 0, representing a worst-case scenario.\n% group_index - vector containing an index in [0,1] for each group,\n% to assess its separation with respect to the other groups\n% pvalue - empirical p-value computed comparing the observed index with a null distribution\n% of indexes obtained from random permutations of the coordinates\n\n% check input\nnarginchk(2,6)\nvalidateattributes(coords, {'numeric'}, {})\nN = size(coords,1);\nD = size(coords,2) + 1; \nif D == 2\n if any(coords(:,1)<0 | coords(:,1)>2*pi)\n error('Angular coordinates must be in [0,2pi]')\n end\n if N < 4\n error('The index in 2D cannot be assessed for less than 4 samples')\n end\nelseif D == 3\n if any(coords(:,1)<0 | coords(:,1)>2*pi)\n error('Azimuth coordinates must be in [0,2pi]')\n end\n if any(coords(:,2)<-pi/2 | coords(:,2)>pi/2)\n error('Elevation coordinates must be in [-pi/2,pi/2]')\n end\n if N < 6\n error('The index in 3D cannot be assessed for less than 6 samples')\n end\nelse\n error('Input coordinates must be a one-column vector (2D case) or two-columns matrix (3D case)')\nend\nvalidateattributes(labels, {'numeric','cell'}, {'vector','numel',N})\nif ~exist('show_plot','var') || isempty(show_plot)\n show_plot = 1;\nelse\n validateattributes(show_plot, {'numeric'}, {'scalar','binary'})\nend\nif ~exist('rand_reps','var') || isempty(rand_reps)\n rand_reps = 1000;\nelse\n validateattributes(rand_reps, {'numeric'}, {'scalar','integer','positive'})\nend\nif ~exist('rand_seed','var') || isempty(rand_seed)\n rand_str = RandStream('mt19937ar','Seed','shuffle');\nelse\n validateattributes(rand_seed, {'numeric'}, {'scalar','integer','nonnegative'})\n rand_str = RandStream('mt19937ar','Seed',rand_seed);\nend\nif ~exist('worst_comp','var') || isempty(worst_comp)\n worst_comp = 1;\nelse\n validateattributes(worst_comp, {'numeric'}, {'scalar','binary'})\n if D == 3 && worst_comp == 0\n error('In 3D the worst case can be approximated only computationally (worst_comp = 1)')\n end\nend\n\n% convert labels\nunique_labels = unique(labels);\nM = length(unique(labels));\nif M==1 || M==N\n error('The number of groups must be greater than 1 and lower than the number of samples')\nend\ntemp = zeros(N,1);\nNk = zeros(M,1);\nfor k = 1:M\n if isnumeric(labels)\n temp(labels==unique_labels(k)) = k;\n else\n temp(strcmp(labels,unique_labels{k})) = k;\n end\n Nk(k) = sum(temp == k);\nend\nlabels = temp; clear temp;\n\n% compute index\n[index, group_index, pvalue, index_rand] = compute_index(D, coords, labels, N, Nk, M, rand_reps, rand_str, worst_comp);\n\n% restore original labels in group index\nif isnumeric(unique_labels)\n group_index(:,1) = unique_labels;\nelse\n group_index = num2cell(group_index);\n group_index(:,1) = unique_labels(:);\nend\n\n% plot results\nif show_plot\n plot_results(index, index_rand, pvalue)\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [index, group_index, pvalue, index_rand] = compute_index(D, coords, labels, N, Nk, M, rand_reps, rand_str, worst_comp)\n\nif D == 2\n compute_mistakes = @compute_mistakes_2D;\nelse\n compute_mistakes = @compute_mistakes_3D;\nend\n\n% compute mistakes in input coordinates\nmistakes = compute_mistakes(coords, labels, N, Nk, M);\n\n% compute mistakes in random coordinates\nmistakes_rand = zeros(M,rand_reps);\nfor i = 1:rand_reps\n idx_rand = randperm(rand_str, size(coords,1));\n mistakes_rand(:,i) = compute_mistakes(coords(idx_rand,:), labels, N, Nk, M);\nend\n\nif all(isnan(mistakes)) || all(isnan(mistakes_rand(:)))\n error('The index could not be computed for any group.')\nend\n\n% find the worst case\nif worst_comp == 1\n [~,idx] = max(nansum(mistakes_rand,1));\n mistakes_worst = mistakes_rand(:,idx);\nelse\n mistakes_worst = ceil((N-Nk).*(Nk-1)./Nk);\n mistakes_worst(Nk == 1) = NaN;\nend\n\n% compute group index\ngroup_index = zeros(M,2);\ngroup_index(:,2) = 1 - mistakes./mistakes_worst;\n\n% compute overall index\nindex = 1 - nansum(mistakes)/nansum(mistakes_worst);\nindex = max(index,0);\n\n% compute pvalue\nindex_rand = 1 - nansum(mistakes_rand,1)./repmat(nansum(mistakes_worst),1,rand_reps);\nindex_rand = max(index_rand,0);\npvalue = (sum(index_rand >= index) + 1) / (rand_reps + 1);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction plot_results(index, index_rand, pvalue)\n\n% plot figure\nfigure('color', 'white')\n[fy,fx] = ksdensity(index_rand);\nplot(fx, fy, 'k', 'LineWidth', 2)\nhold on\nplot([index,index], [0,max(fy)*1.1], 'r', 'LineWidth', 2)\nset(gca,'YLim',[0,max(fy)*1.1],'XTick',0:0.1:1,'XLim',[0,max(max(fx),index)*1.1])\nbox on\nxlabel('index')\nylabel('probability density')\ntext(1, 1.05, ['pvalue = ' num2str(pvalue)], 'Units', 'normalized', 'HorizontalAlignment', 'right')\nlegend({'null distribution','observed value'},'Location','northoutside','Orientation','vertical')\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%% 2D separation (circle circumference) %%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction mistakes = compute_mistakes_2D(coords, labels, N, Nk, M)\n\n% ranking of the samples\nx = zeros(N,1);\n[~,idx] = sort(coords);\nx(idx) = 1:N;\n\n% for each group\nmistakes = zeros(M,1);\nfor k = 1:M\n\n if Nk(k) == 1\n mistakes(k) = NaN;\n continue;\n end\n \n % compute number of wrong samples within the extremes of the group,\n % where the extremes are the adjacent samples at the maximum distance:\n % - find the number of samples of other groups between adjacent samples\n % of the current group\n % - sum them excluding the maximum value, since it will be related to\n % wrong samples outside the extremes\n\n x_k = x(labels == k);\n x_list = sort(x_k);\n wr = zeros(Nk(k),1);\n for l = 1:Nk(k)-1\n wr(l) = x_list(l+1)-x_list(l)-1;\n end\n wr(end) = N-x_list(end)+x_list(1)-1;\n [~,max_id] = max(wr);\n wr(max_id) = [];\n mistakes(k) = sum(wr);\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%% 3D separation (sphere surface) %%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction mistakes = compute_mistakes_3D(coords, labels, N, Nk, M)\n\n% rank samples for azimuth\nazim_rank = zeros(N,1);\n[~,idx] = sort(coords(:,1));\nazim_rank(idx) = 1:N;\n\nmistakes = zeros(M,1);\nfor k = 1:M\n \n if Nk(k) < 3\n mistakes(k) = NaN;\n continue;\n end\n \n try\n % map the samples between the group extremes to a rectangular 2D area\n [xy_group, xy_other] = map_samples_between_group_extremes_3D(labels==k, azim_rank, coords(:,1), coords(:,2), N, Nk(k));\n \n if isempty(xy_other)\n mistakes(k) = 0;\n else\n % compute mistakes within the polygonal area delimited by the group samples\n pol_idx = convhull(xy_group(:,1),xy_group(:,2));\n [in_pol, on_pol] = inpolygon(xy_other(:,1),xy_other(:,2),xy_group(pol_idx,1),xy_group(pol_idx,2));\n mistakes(k) = sum(in_pol) - sum(on_pol);\n end\n catch\n mistakes(k) = NaN;\n end\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [xy_group, xy_other] = map_samples_between_group_extremes_3D(labels_k, azim_rank, azim, elev, N, Nk)\n\n% find group extremes: azimuth\nazim_k_order = sort(azim_rank(labels_k));\nwr = zeros(Nk,1);\nfor i = 1:Nk-1\n wr(i) = azim_k_order(i+1) - azim_k_order(i) - 1;\nend\nwr(Nk) = N - azim_k_order(end) + azim_k_order(1) - 1;\n[~,max_idx] = max(wr);\nif max_idx == Nk\n azim_ext = [azim(azim_rank==azim_k_order(1)) azim(azim_rank==azim_k_order(Nk))];\n disc = 0;\nelse\n azim_ext = [azim(azim_rank==azim_k_order(max_idx)) azim(azim_rank==azim_k_order(max_idx+1))];\n disc = 1;\nend\n\n% find group extremes: elevation\nelev_ext = [min(elev(labels_k)) max(elev(labels_k))];\n\n% detect samples between the group extremes\ndetected = (elev>=elev_ext(1) & elev<=elev_ext(2)) & ...\n ((~disc & (azim>=azim_ext(1) & azim<=azim_ext(2))) | (disc & (azim>=azim_ext(2) | azim<=azim_ext(1))));\n\n% map the coordinates to a rectangular 2D area\nif ~disc\n x_detected = azim(detected) - azim_ext(1);\nelse\n x_detected = mod(azim(detected) + (2*pi-azim_ext(2)), 2*pi);\nend\ny_detected = elev(detected) - elev_ext(1);\n\n% divide samples belonging to the group and not\nxy_group = [x_detected(labels_k(detected)==1) y_detected(labels_k(detected)==1)];\nxy_other = [x_detected(labels_k(detected)==0) y_detected(labels_k(detected)==0)];\n"} +{"plateform": "github", "repo_name": "kareem1925/coursera-Neural-Networks-for-Machine-Learning-master", "name": "train.m", "ext": ".m", "path": "coursera-Neural-Networks-for-Machine-Learning-master/week05/Assignment2/train.m", "size": 8724, "source_encoding": "utf_8", "md5": "f1ced206e6c895129b06f256ffe18f88", "text": "% This function trains a neural network language model.\nfunction [model] = train(epochs)\n% Inputs:\n% epochs: Number of epochs to run.\n% Output:\n% model: A struct containing the learned weights and biases and vocabulary.\n\nif size(ver('Octave'),1)\n OctaveMode = 1;\n warning('error', 'Octave:broadcast');\n start_time = time;\nelse\n OctaveMode = 0;\n start_time = clock;\nend\n\n% SET HYPERPARAMETERS HERE.\nbatchsize = 100; % Mini-batch size.\nlearning_rate = 0.1; % Learning rate; default = 0.1.\nmomentum = 0.9; % Momentum; default = 0.9.\nnumhid1 = 50; % Dimensionality of embedding space; default = 50.\nnumhid2 = 200; % Number of units in hidden layer; default = 200.\ninit_wt = 0.01; % Standard deviation of the normal distribution\n % which is sampled to get the initial weights; default = 0.01\n\n% VARIABLES FOR TRACKING TRAINING PROGRESS.\nshow_training_CE_after = 100;\nshow_validation_CE_after = 1000;\n\n% LOAD DATA.\n[train_input, train_target, valid_input, valid_target, ...\n test_input, test_target, vocab] = load_data(batchsize);\n[numwords, batchsize, numbatches] = size(train_input); \nvocab_size = size(vocab, 2);\n\n% INITIALIZE WEIGHTS AND BIASES.\nword_embedding_weights = init_wt * randn(vocab_size, numhid1);\nembed_to_hid_weights = init_wt * randn(numwords * numhid1, numhid2);\nhid_to_output_weights = init_wt * randn(numhid2, vocab_size);\nhid_bias = zeros(numhid2, 1);\noutput_bias = zeros(vocab_size, 1);\n\nword_embedding_weights_delta = zeros(vocab_size, numhid1);\nword_embedding_weights_gradient = zeros(vocab_size, numhid1);\nembed_to_hid_weights_delta = zeros(numwords * numhid1, numhid2);\nhid_to_output_weights_delta = zeros(numhid2, vocab_size);\nhid_bias_delta = zeros(numhid2, 1);\noutput_bias_delta = zeros(vocab_size, 1);\nexpansion_matrix = eye(vocab_size);\ncount = 0;\ntiny = exp(-30);\n\n% TRAIN.\nfor epoch = 1:epochs\n fprintf(1, 'Epoch %d\\n', epoch);\n this_chunk_CE = 0;\n trainset_CE = 0;\n % LOOP OVER MINI-BATCHES.\n for m = 1:numbatches\n input_batch = train_input(:, :, m);\n target_batch = train_target(:, :, m);\n\n % FORWARD PROPAGATE.\n % Compute the state of each layer in the network given the input batch\n % and all weights and biases\n [embedding_layer_state, hidden_layer_state, output_layer_state] = ...\n fprop(input_batch, ...\n word_embedding_weights, embed_to_hid_weights, ...\n hid_to_output_weights, hid_bias, output_bias);\n\n % COMPUTE DERIVATIVE.\n %% Expand the target to a sparse 1-of-K vector.\n expanded_target_batch = expansion_matrix(:, target_batch);\n %% Compute derivative of cross-entropy loss function.\n error_deriv = output_layer_state - expanded_target_batch;\n\n % MEASURE LOSS FUNCTION.\n CE = -sum(sum(...\n expanded_target_batch .* log(output_layer_state + tiny))) / batchsize;\n count = count + 1;\n this_chunk_CE = this_chunk_CE + (CE - this_chunk_CE) / count;\n trainset_CE = trainset_CE + (CE - trainset_CE) / m;\n fprintf(1, '\\rBatch %d Train CE %.3f', m, this_chunk_CE);\n if mod(m, show_training_CE_after) == 0\n fprintf(1, '\\n');\n count = 0;\n this_chunk_CE = 0;\n end\n if OctaveMode\n fflush(1);\n end\n\n % BACK PROPAGATE.\n %% OUTPUT LAYER.\n hid_to_output_weights_gradient = hidden_layer_state * error_deriv';\n output_bias_gradient = sum(error_deriv, 2);\n back_propagated_deriv_1 = (hid_to_output_weights * error_deriv) ...\n .* hidden_layer_state .* (1 - hidden_layer_state);\n\n %% HIDDEN LAYER.\n % FILL IN CODE. Replace the line below by one of the options.\n embed_to_hid_weights_gradient = embedding_layer_state * back_propagated_deriv_1';\n % Options:\n % (a) embed_to_hid_weights_gradient = back_propagated_deriv_1' * embedding_layer_state;\n % (b) embed_to_hid_weights_gradient = embedding_layer_state * back_propagated_deriv_1';\n % (c) embed_to_hid_weights_gradient = back_propagated_deriv_1;\n % (d) embed_to_hid_weights_gradient = embedding_layer_state;\n\n % FILL IN CODE. Replace the line below by one of the options.\n hid_bias_gradient = sum(back_propagated_deriv_1, 2);\n % Options\n % (a) hid_bias_gradient = sum(back_propagated_deriv_1, 2);\n % (b) hid_bias_gradient = sum(back_propagated_deriv_1, 1);\n % (c) hid_bias_gradient = back_propagated_deriv_1;\n % (d) hid_bias_gradient = back_propagated_deriv_1';\n\n % FILL IN CODE. Replace the line below by one of the options.\n back_propagated_deriv_2 = embed_to_hid_weights * back_propagated_deriv_1;\n % Options\n % (a) back_propagated_deriv_2 = embed_to_hid_weights * back_propagated_deriv_1;\n % (b) back_propagated_deriv_2 = back_propagated_deriv_1 * embed_to_hid_weights;\n % (c) back_propagated_deriv_2 = back_propagated_deriv_1' * embed_to_hid_weights;\n % (d) back_propagated_deriv_2 = back_propagated_deriv_1 * embed_to_hid_weights';\n\n word_embedding_weights_gradient(:) = 0;\n %% EMBEDDING LAYER.\n for w = 1:numwords\n word_embedding_weights_gradient = word_embedding_weights_gradient + ...\n expansion_matrix(:, input_batch(w, :)) * ...\n (back_propagated_deriv_2(1 + (w - 1) * numhid1 : w * numhid1, :)');\n end\n \n % UPDATE WEIGHTS AND BIASES.\n word_embedding_weights_delta = ...\n momentum .* word_embedding_weights_delta + ...\n word_embedding_weights_gradient ./ batchsize;\n word_embedding_weights = word_embedding_weights...\n - learning_rate * word_embedding_weights_delta;\n\n embed_to_hid_weights_delta = ...\n momentum .* embed_to_hid_weights_delta + ...\n embed_to_hid_weights_gradient ./ batchsize;\n embed_to_hid_weights = embed_to_hid_weights...\n - learning_rate * embed_to_hid_weights_delta;\n\n hid_to_output_weights_delta = ...\n momentum .* hid_to_output_weights_delta + ...\n hid_to_output_weights_gradient ./ batchsize;\n hid_to_output_weights = hid_to_output_weights...\n - learning_rate * hid_to_output_weights_delta;\n\n hid_bias_delta = momentum .* hid_bias_delta + ...\n hid_bias_gradient ./ batchsize;\n hid_bias = hid_bias - learning_rate * hid_bias_delta;\n\n output_bias_delta = momentum .* output_bias_delta + ...\n output_bias_gradient ./ batchsize;\n output_bias = output_bias - learning_rate * output_bias_delta;\n\n % VALIDATE.\n if mod(m, show_validation_CE_after) == 0\n fprintf(1, '\\rRunning validation ...');\n if OctaveMode\n fflush(1);\n end\n [embedding_layer_state, hidden_layer_state, output_layer_state] = ...\n fprop(valid_input, word_embedding_weights, embed_to_hid_weights,...\n hid_to_output_weights, hid_bias, output_bias);\n datasetsize = size(valid_input, 2);\n expanded_valid_target = expansion_matrix(:, valid_target);\n CE = -sum(sum(...\n expanded_valid_target .* log(output_layer_state + tiny))) /datasetsize;\n fprintf(1, ' Validation CE %.3f\\n', CE);\n if OctaveMode\n fflush(1);\n end\n end\n end\n fprintf(1, '\\rAverage Training CE %.3f\\n', trainset_CE);\nend\nfprintf(1, 'Finished Training.\\n');\nif OctaveMode\n fflush(1);\nend\nfprintf(1, 'Final Training CE %.3f\\n', trainset_CE);\n\n% EVALUATE ON VALIDATION SET.\nfprintf(1, '\\rRunning validation ...');\nif OctaveMode\n fflush(1);\nend\n[embedding_layer_state, hidden_layer_state, output_layer_state] = ...\n fprop(valid_input, word_embedding_weights, embed_to_hid_weights,...\n hid_to_output_weights, hid_bias, output_bias);\ndatasetsize = size(valid_input, 2);\nexpanded_valid_target = expansion_matrix(:, valid_target);\nCE = -sum(sum(...\n expanded_valid_target .* log(output_layer_state + tiny))) / datasetsize;\nfprintf(1, '\\rFinal Validation CE %.3f\\n', CE);\nif OctaveMode\n fflush(1);\nend\n\n% EVALUATE ON TEST SET.\nfprintf(1, '\\rRunning test ...');\nif OctaveMode\n fflush(1);\nend\n[embedding_layer_state, hidden_layer_state, output_layer_state] = ...\n fprop(test_input, word_embedding_weights, embed_to_hid_weights,...\n hid_to_output_weights, hid_bias, output_bias);\ndatasetsize = size(test_input, 2);\nexpanded_test_target = expansion_matrix(:, test_target);\nCE = -sum(sum(...\n expanded_test_target .* log(output_layer_state + tiny))) / datasetsize;\nfprintf(1, '\\rFinal Test CE %.3f\\n', CE);\nif OctaveMode\n fflush(1);\nend\n\nmodel.word_embedding_weights = word_embedding_weights;\nmodel.embed_to_hid_weights = embed_to_hid_weights;\nmodel.hid_to_output_weights = hid_to_output_weights;\nmodel.hid_bias = hid_bias;\nmodel.output_bias = output_bias;\nmodel.vocab = vocab;\n\n% In MATLAB replace line below with 'end_time = clock;'\nif OctaveMode\n end_time = time;\n diff = end_time - start_time;\nelse\n end_time = clock;\n diff = etime(end_time, start_time);\nend\nfprintf(1, 'Training took %.2f seconds\\n', diff);\nend\n"} +{"plateform": "github", "repo_name": "kareem1925/coursera-Neural-Networks-for-Machine-Learning-master", "name": "a4_main.m", "ext": ".m", "path": "coursera-Neural-Networks-for-Machine-Learning-master/week13/Assignment4/a4_main.m", "size": 4551, "source_encoding": "utf_8", "md5": "a36e706a0a625e7ca1eeadc45f05145f", "text": "% This file was published on Wed Nov 14 20:48:30 2012, UTC.\n\nfunction a4_main(n_hid, lr_rbm, lr_classification, n_iterations)\n% first, train the rbm\n global report_calls_to_sample_bernoulli\n report_calls_to_sample_bernoulli = false;\n global data_sets\n if prod(size(data_sets)) ~= 1,\n error('You must run a4_init before you do anything else.');\n end\n rbm_w = optimize([n_hid, 256], ...\n @(rbm_w, data) cd1(rbm_w, data.inputs), ... % discard labels\n data_sets.training, ...\n lr_rbm, ...\n n_iterations);\n % rbm_w is now a weight matrix of by \n show_rbm(rbm_w);\n input_to_hid = rbm_w;\n % calculate the hidden layer representation of the labeled data\n hidden_representation = logistic(input_to_hid * data_sets.training.inputs);\n % train hid_to_class\n data_2.inputs = hidden_representation;\n data_2.targets = data_sets.training.targets;\n hid_to_class = optimize([10, n_hid], @(model, data) classification_phi_gradient(model, data), data_2, lr_classification, n_iterations);\n % report results\n for data_details = reshape({'training', data_sets.training, 'validation', data_sets.validation, 'test', data_sets.test}, [2, 3]),\n data_name = data_details{1};\n data = data_details{2};\n hid_input = input_to_hid * data.inputs; % size: